From cc73504950eb7b5dff2dded9bedd67bc36d64641 Mon Sep 17 00:00:00 2001 From: dim Date: Sun, 19 Aug 2012 10:33:04 +0000 Subject: Vendor import of clang trunk r162107: http://llvm.org/svn/llvm-project/cfe/trunk@162107 --- docs/LanguageExtensions.html | 162 ++++++++ docs/ReleaseNotes.html | 19 + docs/analyzer/IPA.txt | 96 +++++ include/clang/AST/ASTContext.h | 20 +- include/clang/AST/Attr.h | 3 +- include/clang/AST/CommentCommandTraits.h | 6 +- include/clang/AST/DeclBase.h | 4 +- include/clang/AST/DeclCXX.h | 13 +- include/clang/AST/DeclGroup.h | 6 +- include/clang/AST/DeclLookups.h | 4 +- include/clang/AST/PrettyPrinter.h | 26 +- include/clang/AST/RawCommentList.h | 30 +- include/clang/AST/Stmt.h | 32 +- include/clang/AST/TemplateBase.h | 20 +- include/clang/AST/TypeLoc.h | 9 - include/clang/ASTMatchers/ASTMatchers.h | 148 ++++++++ include/clang/Basic/Attr.td | 21 ++ include/clang/Basic/Builtins.def | 6 +- include/clang/Basic/DiagnosticGroups.td | 8 +- include/clang/Basic/DiagnosticParseKinds.td | 4 + include/clang/Basic/DiagnosticSemaKinds.td | 36 +- include/clang/Lex/PTHManager.h | 2 +- include/clang/Parse/Parser.h | 4 + include/clang/Sema/AttributeList.h | 107 +++++- include/clang/Sema/Sema.h | 43 ++- .../StaticAnalyzer/Core/BugReporter/BugReporter.h | 51 ++- .../Core/BugReporter/PathDiagnostic.h | 22 +- .../StaticAnalyzer/Core/PathDiagnosticConsumers.h | 30 +- .../Core/PathSensitive/AnalysisManager.h | 27 +- .../StaticAnalyzer/Core/PathSensitive/CallEvent.h | 6 +- .../StaticAnalyzer/Core/PathSensitive/MemRegion.h | 4 +- .../Core/PathSensitive/ProgramState.h | 12 +- lib/AST/APValue.cpp | 5 +- lib/AST/ASTContext.cpp | 68 +++- lib/AST/ASTDiagnostic.cpp | 2 +- lib/AST/CommentCommandTraits.cpp | 10 +- lib/AST/DeclCXX.cpp | 13 +- lib/AST/DeclPrinter.cpp | 56 +-- lib/AST/DeclTemplate.cpp | 3 +- lib/AST/DumpXML.cpp | 5 + lib/AST/NestedNameSpecifier.cpp | 9 +- lib/AST/RawCommentList.cpp | 21 +- lib/AST/Stmt.cpp | 23 +- lib/AST/StmtPrinter.cpp | 32 +- lib/AST/TemplateBase.cpp | 6 +- lib/Basic/Diagnostic.cpp | 6 +- lib/Basic/Targets.cpp | 3 +- lib/CodeGen/CGBuiltin.cpp | 69 +++- lib/CodeGen/CGDebugInfo.cpp | 41 +- lib/CodeGen/CGExpr.cpp | 80 ++++ lib/CodeGen/CGExprCXX.cpp | 9 +- lib/CodeGen/CGStmt.cpp | 24 +- lib/CodeGen/CGValue.h | 2 +- lib/Driver/Tools.cpp | 3 + lib/Frontend/ASTConsumers.cpp | 2 +- lib/Frontend/CacheTokens.cpp | 2 +- lib/Lex/PTHLexer.cpp | 6 +- lib/Parse/ParseDecl.cpp | 73 +++- lib/Parse/ParseStmt.cpp | 181 ++++----- lib/Rewrite/RewriteModernObjC.cpp | 9 +- lib/Rewrite/RewriteObjC.cpp | 10 +- lib/Sema/AttributeList.cpp | 2 + lib/Sema/SemaCast.cpp | 20 +- lib/Sema/SemaChecking.cpp | 420 ++++++++++++++++++++- lib/Sema/SemaCodeComplete.cpp | 1 - lib/Sema/SemaDecl.cpp | 72 +++- lib/Sema/SemaDeclAttr.cpp | 163 ++++++-- lib/Sema/SemaDeclCXX.cpp | 2 +- lib/Sema/SemaExceptionSpec.cpp | 3 +- lib/Sema/SemaExpr.cpp | 3 +- lib/Sema/SemaExprMember.cpp | 2 +- lib/Sema/SemaOverload.cpp | 2 +- lib/Sema/SemaStmt.cpp | 329 ++++++++++------ lib/Sema/SemaTemplate.cpp | 7 + lib/Sema/SemaTemplateInstantiate.cpp | 3 +- lib/Sema/SemaType.cpp | 46 ++- lib/Sema/TreeTransform.h | 19 +- lib/Serialization/ASTReader.cpp | 1 - lib/Serialization/ASTWriter.cpp | 1 - .../Checkers/CallAndMessageChecker.cpp | 12 +- .../Checkers/DynamicTypePropagation.cpp | 87 ++++- lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp | 151 +------- lib/StaticAnalyzer/Core/AnalysisManager.cpp | 45 +-- lib/StaticAnalyzer/Core/BugReporter.cpp | 189 ++++------ lib/StaticAnalyzer/Core/BugReporterVisitors.cpp | 2 +- lib/StaticAnalyzer/Core/CallEvent.cpp | 86 +++-- .../Core/ExprEngineCallAndReturn.cpp | 42 ++- lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp | 48 +-- lib/StaticAnalyzer/Core/PathDiagnostic.cpp | 18 +- lib/StaticAnalyzer/Core/PlistDiagnostics.cpp | 92 +++-- lib/StaticAnalyzer/Core/ProgramState.cpp | 7 +- lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp | 12 +- lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp | 74 +++- test/Analysis/CFNumber.c | 6 +- test/Analysis/CheckNSError.m | 4 +- test/Analysis/array-struct.c | 2 +- test/Analysis/ctor-inlining.mm | 48 +++ test/Analysis/dtor.cpp | 54 +++ test/Analysis/func.c | 14 +- test/Analysis/html-diags.c | 6 +- test/Analysis/inline.cpp | 27 ++ test/Analysis/inlining/DynDispatchBifurcate.m | 10 +- test/Analysis/inlining/InlineObjCClassMethod.m | 30 ++ test/Analysis/inlining/dyn-dispatch-bifurcate.cpp | 17 + test/Analysis/keychainAPI.m | 6 +- test/Analysis/malloc-annotations.c | 4 +- test/Analysis/malloc.c | 6 +- test/Analysis/method-call-path-notes.cpp | 85 +++++ test/Analysis/method-call.cpp | 20 +- test/Analysis/misc-ps-region-store.m | 2 +- ...il-receiver-undefined-larger-than-voidptr-ret.m | 4 +- test/Analysis/ptr-arith.c | 6 +- test/Analysis/reinterpret-cast.cpp | 20 + test/Analysis/security-syntax-checks.m | 8 +- test/Analysis/sizeofpointer.c | 2 +- test/Analysis/stack-addr-ps.cpp | 2 +- test/Analysis/stream.c | 16 +- test/Analysis/variadic-method-types.m | 2 +- test/CodeCompletion/objc-expr.m | 2 +- test/CodeGen/align-global-large.c | 18 + test/CodeGen/alignment.c | 3 +- test/CodeGen/arm-neon-misc.c | 34 ++ test/CodeGen/complex-builtints.c | 71 ++++ test/CodeGen/ms-inline-asm.c | 86 ++++- .../devirtualize-virtual-function-calls.cpp | 17 + test/CodeGenObjC/instance-method-metadata.m | 10 +- test/CodeGenObjC/ns_consume_null_check.m | 8 +- test/CodeGenOpenCL/vectorLoadStore.cl | 9 + test/Driver/Xlinker-args.c | 11 +- test/Index/complete-enums.cpp | 4 +- test/Index/complete-exprs.m | 4 +- test/Index/complete-preamble.cpp | 8 + test/Index/complete-preamble.h | 6 + test/Parser/ms-inline-asm.c | 6 +- test/Sema/128bitint.c | 19 + test/Sema/arm-asm.c | 7 + test/Sema/builtins-decl.c | 5 + test/Sema/callingconv.c | 8 + test/Sema/static-array.c | 46 ++- test/Sema/tentative-decls.c | 5 +- test/Sema/warn-documentation.cpp | 60 ++- test/Sema/warn-type-safety-mpi-hdf5.c | 307 +++++++++++++++ test/Sema/warn-type-safety.c | 152 ++++++++ test/Sema/warn-type-safety.cpp | 71 ++++ test/SemaCXX/convert-to-bool.cpp | 3 +- test/SemaCXX/pragma-pack.cpp | 23 ++ test/SemaCXX/references.cpp | 2 +- test/SemaCXX/uninitialized.cpp | 81 ++++ test/SemaCXX/warn-thread-safety-parsing.cpp | 10 + test/SemaObjC/warn-cast-of-sel-expr.m | 21 ++ test/SemaObjCXX/abstract-class-type-ivar.mm | 29 ++ test/Tooling/clang-check-ast-dump.cpp | 5 + unittests/ASTMatchers/ASTMatchersTest.cpp | 257 +++++++++++++ unittests/Tooling/RecursiveASTVisitorTest.cpp | 7 + utils/TableGen/ClangAttrEmitter.cpp | 9 +- utils/analyzer/CmpRuns.py | 4 +- www/comparison.html | 6 +- 157 files changed, 4423 insertions(+), 1127 deletions(-) create mode 100644 docs/analyzer/IPA.txt create mode 100644 test/Analysis/inlining/dyn-dispatch-bifurcate.cpp create mode 100644 test/Analysis/reinterpret-cast.cpp create mode 100644 test/CodeGen/align-global-large.c create mode 100644 test/CodeGen/arm-neon-misc.c create mode 100644 test/CodeGen/complex-builtints.c create mode 100644 test/CodeGenOpenCL/vectorLoadStore.cl create mode 100644 test/Index/complete-preamble.cpp create mode 100644 test/Index/complete-preamble.h create mode 100644 test/Sema/arm-asm.c create mode 100644 test/Sema/warn-type-safety-mpi-hdf5.c create mode 100644 test/Sema/warn-type-safety.c create mode 100644 test/Sema/warn-type-safety.cpp create mode 100644 test/SemaObjC/warn-cast-of-sel-expr.m create mode 100644 test/SemaObjCXX/abstract-class-type-ivar.mm diff --git a/docs/LanguageExtensions.html b/docs/LanguageExtensions.html index eac3c69..40477b8 100644 --- a/docs/LanguageExtensions.html +++ b/docs/LanguageExtensions.html @@ -142,6 +142,13 @@
  • shared_locks_required(...)
  • +
  • Type Safety Checking + +
  • @@ -1913,6 +1920,161 @@ declaration to specify that the function must be called while holding the listed shared locks. Arguments must be lockable type, and there must be at least one argument.

    + +

    Type Safety Checking

    + + +

    Clang supports additional attributes to enable checking type safety +properties that can't be enforced by C type system. Usecases include:

    + + +

    You can detect support for these attributes with __has_attribute(). For +example:

    + +
    +
    +#if defined(__has_attribute)
    +#  if __has_attribute(argument_with_type_tag) && \
    +      __has_attribute(pointer_with_type_tag) && \
    +      __has_attribute(type_tag_for_datatype)
    +#    define ATTR_MPI_PWT(buffer_idx, type_idx) __attribute__((pointer_with_type_tag(mpi,buffer_idx,type_idx)))
    +/* ... other macros ... */
    +#  endif
    +#endif
    +
    +#if !defined(ATTR_MPI_PWT)
    +#define ATTR_MPI_PWT(buffer_idx, type_idx)
    +#endif
    +
    +int MPI_Send(void *buf, int count, MPI_Datatype datatype /*, other args omitted */)
    +    ATTR_MPI_PWT(1,3);
    +
    +
    + +

    argument_with_type_tag(...)

    + +

    Use __attribute__((argument_with_type_tag(arg_kind, arg_idx, +type_tag_idx))) on a function declaration to specify that the function +accepts a type tag that determines the type of some other argument. +arg_kind is an identifier that should be used when annotating all +applicable type tags.

    + +

    This attribute is primarily useful for checking arguments of variadic +functions (pointer_with_type_tag can be used in most of non-variadic +cases).

    + +

    For example:

    +
    +
    +int fcntl(int fd, int cmd, ...)
    +      __attribute__(( argument_with_type_tag(fcntl,3,2) ));
    +
    +
    + +

    pointer_with_type_tag(...)

    + +

    Use __attribute__((pointer_with_type_tag(ptr_kind, ptr_idx, +type_tag_idx))) on a function declaration to specify that the +function a type tag that determines the pointee type of some other pointer +argument.

    + +

    For example:

    +
    +
    +int MPI_Send(void *buf, int count, MPI_Datatype datatype /*, other args omitted */)
    +    __attribute__(( pointer_with_type_tag(mpi,1,3) ));
    +
    +
    + +

    type_tag_for_datatype(...)

    + +

    Clang supports annotating type tags of two forms.

    + + + +

    The attribute also accepts an optional third argument that determines how +the expression is compared to the type tag. There are two supported flags:

    + + + diff --git a/docs/ReleaseNotes.html b/docs/ReleaseNotes.html index 2108909..7a6a508 100644 --- a/docs/ReleaseNotes.html +++ b/docs/ReleaseNotes.html @@ -186,6 +186,25 @@ supported by the target, or if the compiler determines that a more specific model can be used.

    +

    Type safety attributes

    +

    Clang now supports type safety attributes that allow checking during compile +time that 'void *' function arguments and arguments for variadic functions are +of a particular type which is determined by some other argument to the same +function call.

    + +

    Usecases include:

    + + +

    See entries for argument_with_type_tag, +pointer_with_type_tag and type_tag_for_datatype +attributes in Clang language extensions documentation.

    +

    New Compiler Flags

    diff --git a/docs/analyzer/IPA.txt b/docs/analyzer/IPA.txt new file mode 100644 index 0000000..5691977 --- /dev/null +++ b/docs/analyzer/IPA.txt @@ -0,0 +1,96 @@ +Inlining +======== + +Inlining Modes +----------------------- +-analyzer-ipa=none - All inlining is disabled. +-analyzer-ipa=inlining - Turns on inlining when we can confidently find the function/method body corresponding to the call. (C functions, static functions, devirtualized C++ methods, ObjC class methods, ObjC instance methods when we are confident about the dynamic type of the instance). +-analyzer-ipa=dynamic - Inline instance methods for which the type is determined at runtime and we are not 100% sure that our type info is correct. For virtual calls, inline the most plausible definition. +-analyzer-ipa=dynamic-bifurcate - Same as -analyzer-ipa=dynamic, but the path is split. We inline on one branch and do not inline on the other. This mode does not drop the coverage in cases when the parent class has code that is only exercised when some of its methods are overriden. + +Currently, -analyzer-ipa=inlining is the default mode. + +Basics of Implementation +----------------------- + +The low-level mechanism of inlining a function is handled in ExprEngine::inlineCall and ExprEngine::processCallExit. If the conditions are right for inlining, a CallEnter node is created and added to the analysis work list. The CallEnter node marks the change to a new LocationContext representing the called function, and its state includes the contents of the new stack frame. When the CallEnter node is actually processed, its single successor will be a edge to the first CFG block in the function. + +Exiting an inlined function is a bit more work, fortunately broken up into reasonable steps: +1. The CoreEngine realizes we're at the end of an inlined call and generates a CallExitBegin node. +2. ExprEngine takes over (in processCallExit) and finds the return value of the function, if it has one. This is bound to the expression that triggered the call. (In the case of calls without origin expressions, such as destructors, this step is skipped.) +3. Dead symbols and bindings are cleaned out from the state, including any local bindings. +4. A CallExitEnd node is generated, which marks the transition back to the caller's LocationContext. +5. Custom post-call checks are processed and the final nodes are pushed back onto the work list, so that evaluation of the caller can continue. + +Retry Without Inlining +----------------------- + +In some cases, we would like to retry analyzes without inlining the particular call. Currently, we use this technique to recover the coverage in case we stop analyzing a path due to exceeding the maximum block count inside an inlined function. When this situation is detected, we walk up the path to find the first node before inlining was started and enqueue it on the WorkList with a special ReplayWithoutInlining bit added to it (ExprEngine::replayWithoutInlining). + +Deciding when to inline +----------------------- +In general, we try to inline as much as possible, since it provides a better summary of what actually happens in the program. However, there are some cases where we choose not to inline: +- if there is no definition available (of course) +- if we can't create a CFG or compute variable liveness for the function +- if we reach a cutoff of maximum stack depth (to avoid infinite recursion) +- if the function is variadic +- in C++, we don't inline constructors unless we know the destructor will be inlined as well +- in C++, we don't inline allocators (custom operator new implementations), since we don't properly handle deallocators (at the time of this writing) +- "Dynamic" calls are handled specially; see below. +- Engine:FunctionSummaries map stores additional information about declarations, some of which is collected at runtime based on previous analyzes of the function. We do not inline functions which were not profitable to inline in a different context (for example, if the maximum block count was exceeded, see Retry Without Inlining). + + +Dynamic calls and devirtualization +---------------------------------- +"Dynamic" calls are those that are resolved at runtime, such as C++ virtual method calls and Objective-C message sends. Due to the path-sensitive nature of the analyzer, we may be able to figure out the dynamic type of the object whose method is being called and thus "devirtualize" the call, i.e. find the actual method that will be called at runtime. (Obviously this is not always possible.) This is handled by CallEvent's getRuntimeDefinition method. + +Type information is tracked as DynamicTypeInfo, stored within the program state. If no DynamicTypeInfo has been explicitly set for a region, it will be inferred from the region's type or associated symbol. Information from symbolic regions is weaker than from true typed regions; a C++ object declared "A obj" is known to have the class 'A', but a reference "A &ref" may dynamically be a subclass of 'A'. The DynamicTypePropagation checker gathers and propagates the type information. + +(Warning: not all of the existing analyzer code has been retrofitted to use DynamicTypeInfo, nor is it universally appropriate. In particular, DynamicTypeInfo always applies to a region with all casts stripped off, but sometimes the information provided by casts can be useful.) + +When asked to provide a definition, the CallEvents for dynamic calls will use the type info in their state to provide the best definition of the method to be called. In some cases this devirtualization can be perfect or near-perfect, and we can inline the definition as usual. In others we can make a guess, but report that our guess may not be the method actually called at runtime. + +The -analyzer-ipa option has four different modes: none, inlining, dynamic, and dynamic-bifurcate. Under -analyzer-ipa=dynamic, all dynamic calls are inlined, whether we are certain or not that this will actually be the definition used at runtime. Under -analyzer-ipa=inlining, only "near-perfect" devirtualized calls are inlined*, and other dynamic calls are evaluated conservatively (as if no definition were available). + +* Currently, no Objective-C messages are not inlined under -analyzer-ipa=inlining, even if we are reasonably confident of the type of the receiver. We plan to enable this once we have tested our heuristics more thoroughly. + +The last option, -analyzer-ipa=dynamic-bifurcate, behaves similarly to "dynamic", but performs a conservative invalidation in the general virtual case in /addition/ to inlining. The details of this are discussed below. + + +Bifurcation +----------- +ExprEngine::BifurcateCall implements the -analyzer-ipa=dynamic-bifurcate mode. When a call is made on a region with dynamic type information, we bifurcate the path and add the region's processing mode to the GDM. Currently, there are 2 modes: DynamicDispatchModeInlined and DynamicDispatchModeConservative. Going forward, we consult the state of the region to make decisions on whether the calls should be inlined or not, which ensures that we have at most one split per region. The modes model the cases when the dynamic type information is perfectly correct and when the info is not correct (i.e. where the region is a subclass of the type we store in DynamicTypeInfo). + +Bifurcation mode allows for increased coverage in cases where the parent method contains code which is only executed when the class is subclassed. The disadvantages of this mode are a (considerable?) performance hit and the possibility of false positives on the path where the conservative mode is used. + + +Objective-C Message Heuristics +------------------------------ +We rely on a set of heuristics to partition the set of ObjC method calls into ones that require bifurcation and ones that do not (can or cannot be a subclass). Below are the cases when we consider that the dynamic type of the object is precise (cannot be a subclass): + - If the object was created with +alloc or +new and initialized with an -init method. + - If the calls are property accesses using dot syntax. This is based on the assumption that children rarely override properties, or do so in an essentially compatible way. + - If the class interface is declared inside the main source file. In this case it is unlikely that it will be subclassed. + - If the method is not declared outside of main source file, either by the receiver's class or by any superclasses. + + +C++ Inlining Caveats +-------------------- +C++11 [class.cdtor]p4 describes how the vtable of an object is modified as it is being constructed or destructed; that is, the type of the object depends on which base constructors have been completed. This is tracked using dynamic type info in the DynamicTypePropagation checker. + +Temporaries are poorly modelled right now because we're not confident in the placement + +'new' is poorly modelled due to some nasty CFG/design issues (elaborated in PR12014). 'delete' is essentially not modelled at all. + +Arrays of objects are modeled very poorly right now. We run only the first constructor and first destructor. Because of this, we don't inline any constructors or destructors for arrays. + + +CallEvent +========= + +A CallEvent represents a specific call to a function, method, or other body of code. It is path-sensitive, containing both the current state (ProgramStateRef) and stack space (LocationContext), and provides uniform access to the argument values and return type of a call, no matter how the call is written in the source or what sort of code body is being invoked. + +(For those familiar with Cocoa, CallEvent is roughly equivalent to NSInvocation.) + +CallEvent should be used whenever there is logic dealing with function calls that does not care how the call occurred. Examples include checking that arguments satisfy preconditions (such as __attribute__((nonnull))), and attempting to inline a call. + +CallEvents are reference-counted objects managed by a CallEventManager. While there is no inherent issue with persisting them (say, in the state's GDM), they are intended for short-lived use, and can be recreated from CFGElements or StackFrameContexts fairly easily. diff --git a/include/clang/AST/ASTContext.h b/include/clang/AST/ASTContext.h index 8fd7d6e..cad3ad2 100644 --- a/include/clang/AST/ASTContext.h +++ b/include/clang/AST/ASTContext.h @@ -474,8 +474,17 @@ public: Data.setPointer(RC); } + const Decl *getOriginalDecl() const LLVM_READONLY { + return OriginalDecl; + } + + void setOriginalDecl(const Decl *Orig) { + OriginalDecl = Orig; + } + private: llvm::PointerIntPair Data; + const Decl *OriginalDecl; }; /// \brief Mapping from declarations to comments attached to any @@ -485,6 +494,10 @@ public: /// lazily. mutable llvm::DenseMap RedeclComments; + /// \brief Mapping from declarations to parsed comments attached to any + /// redeclaration. + mutable llvm::DenseMap ParsedComments; + /// \brief Return the documentation comment attached to a given declaration, /// without looking into cache. RawComment *getRawCommentForDeclNoCache(const Decl *D) const; @@ -500,7 +513,12 @@ public: /// \brief Return the documentation comment attached to a given declaration. /// Returns NULL if no comment is attached. - const RawComment *getRawCommentForAnyRedecl(const Decl *D) const; + /// + /// \param OriginalDecl if not NULL, is set to declaration AST node that had + /// the comment, if the comment we found comes from a redeclaration. + const RawComment *getRawCommentForAnyRedecl( + const Decl *D, + const Decl **OriginalDecl = NULL) const; /// Return parsed documentation comment attached to a given declaration. /// Returns NULL if no comment is attached. diff --git a/include/clang/AST/Attr.h b/include/clang/AST/Attr.h index 27b44d4..b17bd48 100644 --- a/include/clang/AST/Attr.h +++ b/include/clang/AST/Attr.h @@ -105,7 +105,8 @@ public: virtual bool isLateParsed() const { return false; } // Pretty print this attribute. - virtual void printPretty(llvm::raw_ostream &OS, ASTContext &C) const = 0; + virtual void printPretty(llvm::raw_ostream &OS, + const PrintingPolicy &Policy) const = 0; // Implement isa/cast/dyncast/etc. static bool classof(const Attr *) { return true; } diff --git a/include/clang/AST/CommentCommandTraits.h b/include/clang/AST/CommentCommandTraits.h index f188375..5f0269a 100644 --- a/include/clang/AST/CommentCommandTraits.h +++ b/include/clang/AST/CommentCommandTraits.h @@ -35,14 +35,14 @@ public: /// A verbatim-like block command eats every character (except line starting /// decorations) until matching end command is seen or comment end is hit. /// - /// \param BeginName name of the command that starts the verbatim block. + /// \param StartName name of the command that starts the verbatim block. /// \param [out] EndName name of the command that ends the verbatim block. /// /// \returns true if a given command is a verbatim block command. bool isVerbatimBlockCommand(StringRef StartName, StringRef &EndName) const; /// \brief Register a new verbatim block command. - void addVerbatimBlockCommand(StringRef BeginName, StringRef EndName); + void addVerbatimBlockCommand(StringRef StartName, StringRef EndName); /// \brief Check if a given command is a verbatim line command. /// @@ -90,7 +90,7 @@ public: private: struct VerbatimBlockCommand { - StringRef BeginName; + StringRef StartName; StringRef EndName; }; diff --git a/include/clang/AST/DeclBase.h b/include/clang/AST/DeclBase.h index ac2cc9e..0f59609 100644 --- a/include/clang/AST/DeclBase.h +++ b/include/clang/AST/DeclBase.h @@ -858,10 +858,10 @@ public: raw_ostream &Out, const PrintingPolicy &Policy, unsigned Indentation = 0); // Debuggers don't usually respect default arguments. - LLVM_ATTRIBUTE_USED void dump() const { dump(llvm::errs()); } + LLVM_ATTRIBUTE_USED void dump() const; void dump(raw_ostream &Out) const; // Debuggers don't usually respect default arguments. - LLVM_ATTRIBUTE_USED void dumpXML() const { dumpXML(llvm::errs()); } + LLVM_ATTRIBUTE_USED void dumpXML() const; void dumpXML(raw_ostream &OS) const; private: diff --git a/include/clang/AST/DeclCXX.h b/include/clang/AST/DeclCXX.h index 851e310..2d95f03 100644 --- a/include/clang/AST/DeclCXX.h +++ b/include/clang/AST/DeclCXX.h @@ -1646,14 +1646,17 @@ public: /// \brief Find the method in RD that corresponds to this one. /// /// Find if RD or one of the classes it inherits from override this method. - /// If so, return it. RD is assumed to be a base class of the class defining - /// this method (or be the class itself). + /// If so, return it. RD is assumed to be a subclass of the class defining + /// this method (or be the class itself), unless MayBeBase is set to true. CXXMethodDecl * - getCorrespondingMethodInClass(const CXXRecordDecl *RD); + getCorrespondingMethodInClass(const CXXRecordDecl *RD, + bool MayBeBase = false); const CXXMethodDecl * - getCorrespondingMethodInClass(const CXXRecordDecl *RD) const { - return const_cast(this)->getCorrespondingMethodInClass(RD); + getCorrespondingMethodInClass(const CXXRecordDecl *RD, + bool MayBeBase = false) const { + return const_cast(this) + ->getCorrespondingMethodInClass(RD, MayBeBase); } // Implement isa/cast/dyncast/etc. diff --git a/include/clang/AST/DeclGroup.h b/include/clang/AST/DeclGroup.h index 63cdac5..cda6ae5 100644 --- a/include/clang/AST/DeclGroup.h +++ b/include/clang/AST/DeclGroup.h @@ -26,7 +26,11 @@ class DeclGroupIterator; class DeclGroup { // FIXME: Include a TypeSpecifier object. - unsigned NumDecls; + union { + unsigned NumDecls; + + Decl *Aligner; + }; private: DeclGroup() : NumDecls(0) {} diff --git a/include/clang/AST/DeclLookups.h b/include/clang/AST/DeclLookups.h index b8abe97..867b465 100644 --- a/include/clang/AST/DeclLookups.h +++ b/include/clang/AST/DeclLookups.h @@ -67,7 +67,7 @@ public: DeclContext::all_lookups_iterator DeclContext::lookups_begin() const { DeclContext *Primary = const_cast(this)->getPrimaryContext(); - if (hasExternalVisibleStorage()) + if (Primary->hasExternalVisibleStorage()) getParentASTContext().getExternalSource()->completeVisibleDeclsMap(Primary); if (StoredDeclsMap *Map = Primary->buildLookup()) return all_lookups_iterator(Map->begin(), Map->end()); @@ -76,7 +76,7 @@ DeclContext::all_lookups_iterator DeclContext::lookups_begin() const { DeclContext::all_lookups_iterator DeclContext::lookups_end() const { DeclContext *Primary = const_cast(this)->getPrimaryContext(); - if (hasExternalVisibleStorage()) + if (Primary->hasExternalVisibleStorage()) getParentASTContext().getExternalSource()->completeVisibleDeclsMap(Primary); if (StoredDeclsMap *Map = Primary->buildLookup()) return all_lookups_iterator(Map->end(), Map->end()); diff --git a/include/clang/AST/PrettyPrinter.h b/include/clang/AST/PrettyPrinter.h index 2e34dc8..f2c015f 100644 --- a/include/clang/AST/PrettyPrinter.h +++ b/include/clang/AST/PrettyPrinter.h @@ -34,19 +34,19 @@ public: struct PrintingPolicy { /// \brief Create a default printing policy for C. PrintingPolicy(const LangOptions &LO) - : Indentation(2), LangOpts(LO), SuppressSpecifiers(false), + : LangOpts(LO), Indentation(2), SuppressSpecifiers(false), SuppressTagKeyword(false), SuppressTag(false), SuppressScope(false), SuppressUnwrittenScope(false), SuppressInitializers(false), - Dump(false), ConstantArraySizeAsWritten(false), - AnonymousTagLocations(true), SuppressStrongLifetime(false), - Bool(LO.Bool) { } - - /// \brief The number of spaces to use to indent each line. - unsigned Indentation : 8; + ConstantArraySizeAsWritten(false), AnonymousTagLocations(true), + SuppressStrongLifetime(false), Bool(LO.Bool), + DumpSourceManager(0) { } /// \brief What language we're printing. LangOptions LangOpts; + /// \brief The number of spaces to use to indent each line. + unsigned Indentation : 8; + /// \brief Whether we should suppress printing of the actual specifiers for /// the given type or declaration. /// @@ -103,12 +103,6 @@ struct PrintingPolicy { /// internal initializer constructed for x will not be printed. bool SuppressInitializers : 1; - /// \brief True when we are "dumping" rather than "pretty-printing", - /// where dumping involves printing the internal details of the AST - /// and pretty-printing involves printing something similar to - /// source code. - bool Dump : 1; - /// \brief Whether we should print the sizes of constant array expressions /// as written in the sources. /// @@ -139,6 +133,12 @@ struct PrintingPolicy { /// \brief Whether we can use 'bool' rather than '_Bool', even if the language /// doesn't actually have 'bool' (because, e.g., it is defined as a macro). unsigned Bool : 1; + + /// \brief If we are "dumping" rather than "pretty-printing", this points to + /// a SourceManager which will be used to dump SourceLocations. Dumping + /// involves printing the internal details of the AST and pretty-printing + /// involves printing something similar to source code. + SourceManager *DumpSourceManager; }; } // end namespace clang diff --git a/include/clang/AST/RawCommentList.h b/include/clang/AST/RawCommentList.h index 4901d07..630626b 100644 --- a/include/clang/AST/RawCommentList.h +++ b/include/clang/AST/RawCommentList.h @@ -55,16 +55,11 @@ public: /// Is this comment attached to any declaration? bool isAttached() const LLVM_READONLY { - return !DeclOrParsedComment.isNull(); + return IsAttached; } - /// Return the declaration that this comment is attached to. - const Decl *getDecl() const; - - /// Set the declaration that this comment is attached to. - void setDecl(const Decl *D) { - assert(DeclOrParsedComment.isNull()); - DeclOrParsedComment = D; + void setAttached() { + IsAttached = true; } /// Returns true if it is a comment that should be put after a member: @@ -118,28 +113,23 @@ public: return extractBriefText(Context); } - /// Returns a \c FullComment AST node, parsing the comment if needed. - comments::FullComment *getParsed(const ASTContext &Context) const { - if (comments::FullComment *FC = - DeclOrParsedComment.dyn_cast()) - return FC; - - return parse(Context); - } + /// Parse the comment, assuming it is attached to decl \c D. + comments::FullComment *parse(const ASTContext &Context, const Decl *D) const; private: SourceRange Range; mutable StringRef RawText; mutable const char *BriefText; - mutable llvm::PointerUnion - DeclOrParsedComment; mutable bool RawTextValid : 1; ///< True if RawText is valid mutable bool BriefTextValid : 1; ///< True if BriefText is valid unsigned Kind : 3; + /// True if comment is attached to a declaration in ASTContext. + bool IsAttached : 1; + bool IsTrailingComment : 1; bool IsAlmostTrailingComment : 1; @@ -152,7 +142,7 @@ private: RawComment(SourceRange SR, CommentKind K, bool IsTrailingComment, bool IsAlmostTrailingComment) : Range(SR), RawTextValid(false), BriefTextValid(false), Kind(K), - IsTrailingComment(IsTrailingComment), + IsAttached(false), IsTrailingComment(IsTrailingComment), IsAlmostTrailingComment(IsAlmostTrailingComment), BeginLineValid(false), EndLineValid(false) { } @@ -161,8 +151,6 @@ private: const char *extractBriefText(const ASTContext &Context) const; - comments::FullComment *parse(const ASTContext &Context) const; - friend class ASTReader; }; diff --git a/include/clang/AST/Stmt.h b/include/clang/AST/Stmt.h index 79e1920..35fb693 100644 --- a/include/clang/AST/Stmt.h +++ b/include/clang/AST/Stmt.h @@ -373,15 +373,9 @@ public: /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. - void dumpPretty(ASTContext& Context) const; + void dumpPretty(ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, - unsigned Indentation = 0) const { - printPretty(OS, *(ASTContext*)0, Helper, Policy, Indentation); - } - void printPretty(raw_ostream &OS, ASTContext &Context, - PrinterHelper *Helper, - const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only @@ -1620,36 +1614,40 @@ public: /// MSAsmStmt - This represents a MS inline-assembly statement extension. /// class MSAsmStmt : public Stmt { - SourceLocation AsmLoc, EndLoc; + SourceLocation AsmLoc, LBraceLoc, EndLoc; std::string AsmStr; bool IsSimple; bool IsVolatile; unsigned NumAsmToks; - unsigned NumLineEnds; + unsigned NumInputs; + unsigned NumOutputs; unsigned NumClobbers; Token *AsmToks; - unsigned *LineEnds; + IdentifierInfo **Names; Stmt **Exprs; StringRef *Clobbers; public: - MSAsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, - bool isvolatile, ArrayRef asmtoks, - ArrayRef lineends, StringRef asmstr, - ArrayRef clobbers, SourceLocation endloc); + MSAsmStmt(ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, + bool issimple, bool isvolatile, ArrayRef asmtoks, + ArrayRef inputs, ArrayRef outputs, + StringRef asmstr, ArrayRef clobbers, + SourceLocation endloc); SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } + SourceLocation getLBraceLoc() const { return LBraceLoc; } + void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } + bool hasBraces() const { return LBraceLoc.isValid(); } + unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } - unsigned getNumLineEnds() { return NumLineEnds; } - unsigned *getLineEnds() { return LineEnds; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } @@ -1665,7 +1663,7 @@ public: //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } - StringRef getClobber(unsigned i) { return Clobbers[i]; } + StringRef getClobber(unsigned i) const { return Clobbers[i]; } SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(AsmLoc, EndLoc); diff --git a/include/clang/AST/TemplateBase.h b/include/clang/AST/TemplateBase.h index 54c9f2c..5047028 100644 --- a/include/clang/AST/TemplateBase.h +++ b/include/clang/AST/TemplateBase.h @@ -510,17 +510,23 @@ public: /// This is safe to be used inside an AST node, in contrast with /// TemplateArgumentListInfo. struct ASTTemplateArgumentListInfo { - /// \brief The source location of the left angle bracket ('<'); + /// \brief The source location of the left angle bracket ('<'). SourceLocation LAngleLoc; - /// \brief The source location of the right angle bracket ('>'); + /// \brief The source location of the right angle bracket ('>'). SourceLocation RAngleLoc; - /// \brief The number of template arguments in TemplateArgs. - /// The actual template arguments (if any) are stored after the - /// ExplicitTemplateArgumentList structure. - unsigned NumTemplateArgs; - + union { + /// \brief The number of template arguments in TemplateArgs. + /// The actual template arguments (if any) are stored after the + /// ExplicitTemplateArgumentList structure. + unsigned NumTemplateArgs; + + /// Force ASTTemplateArgumentListInfo to the right alignment + /// for the following array of TemplateArgumentLocs. + void *Aligner; + }; + /// \brief Retrieve the template arguments TemplateArgumentLoc *getTemplateArgs() { return reinterpret_cast (this + 1); diff --git a/include/clang/AST/TypeLoc.h b/include/clang/AST/TypeLoc.h index 1d1c1d1..11a878d 100644 --- a/include/clang/AST/TypeLoc.h +++ b/include/clang/AST/TypeLoc.h @@ -1061,7 +1061,6 @@ public: struct FunctionLocInfo { SourceLocation LocalRangeBegin; SourceLocation LocalRangeEnd; - bool TrailingReturn; }; /// \brief Wrapper for source info for functions. @@ -1084,13 +1083,6 @@ public: getLocalData()->LocalRangeEnd = L; } - bool getTrailingReturn() const { - return getLocalData()->TrailingReturn; - } - void setTrailingReturn(bool Trailing) { - getLocalData()->TrailingReturn = Trailing; - } - ArrayRef getParams() const { return ArrayRef(getParmArray(), getNumArgs()); } @@ -1119,7 +1111,6 @@ public: void initializeLocal(ASTContext &Context, SourceLocation Loc) { setLocalRangeBegin(Loc); setLocalRangeEnd(Loc); - setTrailingReturn(false); for (unsigned i = 0, e = getNumArgs(); i != e; ++i) setArg(i, NULL); } diff --git a/include/clang/ASTMatchers/ASTMatchers.h b/include/clang/ASTMatchers/ASTMatchers.h index 37e82e8..33ef3dc 100644 --- a/include/clang/ASTMatchers/ASTMatchers.h +++ b/include/clang/ASTMatchers/ASTMatchers.h @@ -50,6 +50,7 @@ #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/Regex.h" +#include namespace clang { namespace ast_matchers { @@ -195,6 +196,75 @@ AST_MATCHER_P(ClassTemplateSpecializationDecl, hasAnyTemplateArgument, return false; } +/// \brief Matches expressions that match InnerMatcher after any implicit casts +/// are stripped off. +/// +/// Parentheses and explicit casts are not discarded. +/// Given +/// int arr[5]; +/// int a = 0; +/// char b = 0; +/// const int c = a; +/// int *d = arr; +/// long e = (long) 0l; +/// The matchers +/// variable(hasInitializer(ignoringImpCasts(integerLiteral()))) +/// variable(hasInitializer(ignoringImpCasts(declarationReference()))) +/// would match the declarations for a, b, c, and d, but not e. +/// while +/// variable(hasInitializer(integerLiteral())) +/// variable(hasInitializer(declarationReference())) +/// only match the declarations for b, c, and d. +AST_MATCHER_P(Expr, ignoringImpCasts, + internal::Matcher, InnerMatcher) { + return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); +} + +/// \brief Matches expressions that match InnerMatcher after parentheses and +/// casts are stripped off. +/// +/// Implicit and non-C Style casts are also discarded. +/// Given +/// int a = 0; +/// char b = (0); +/// void* c = reinterpret_cast(0); +/// char d = char(0); +/// The matcher +/// variable(hasInitializer(ignoringParenCasts(integerLiteral()))) +/// would match the declarations for a, b, c, and d. +/// while +/// variable(hasInitializer(integerLiteral())) +/// only match the declaration for a. +AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher, InnerMatcher) { + return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); +} + +/// \brief Matches expressions that match InnerMatcher after implicit casts and +/// parentheses are stripped off. +/// +/// Explicit casts are not discarded. +/// Given +/// int arr[5]; +/// int a = 0; +/// char b = (0); +/// const int c = a; +/// int *d = (arr); +/// long e = ((long) 0l); +/// The matchers +/// variable(hasInitializer(ignoringParenImpCasts( +/// integerLiteral()))) +/// variable(hasInitializer(ignoringParenImpCasts( +/// declarationReference()))) +/// would match the declarations for a, b, c, and d, but not e. +/// while +/// variable(hasInitializer(integerLiteral())) +/// variable(hasInitializer(declarationReference())) +/// would only match the declaration for a. +AST_MATCHER_P(Expr, ignoringParenImpCasts, + internal::Matcher, InnerMatcher) { + return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); +} + /// \brief Matches classTemplateSpecializations where the n'th TemplateArgument /// matches the given Matcher. /// @@ -691,6 +761,19 @@ const internal::VariadicDynCastAllOfMatcher< Expr, ImplicitCastExpr> implicitCast; +/// \brief Matches any cast nodes of Clang's AST. +/// +/// Example: castExpr() matches each of the following: +/// (int) 3; +/// const_cast(SubExpr); +/// char c = 0; +/// but does not match +/// int i = (0); +/// int k = 0; +const internal::VariadicDynCastAllOfMatcher< + Expr, + CastExpr> castExpr; + /// \brief Matches functional cast expressions /// /// Example: Matches Foo(bar); @@ -1193,6 +1276,21 @@ AST_MATCHER_P(DeclRefExpr, throughUsingDecl, return false; } +/// \brief Matches the Decl of a DeclStmt which has a single declaration. +/// +/// Given +/// int a, b; +/// int c; +/// declarationStatement(hasSingleDecl(anything())) +/// matches 'int c;' but not 'int a, b;'. +AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher, InnerMatcher) { + if (Node.isSingleDecl()) { + const Decl *FoundDecl = Node.getSingleDecl(); + return InnerMatcher.matches(*FoundDecl, Finder, Builder); + } + return false; +} + /// \brief Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// @@ -1238,6 +1336,44 @@ AST_POLYMORPHIC_MATCHER_P2( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } +/// \brief Matches declaration statements that contain a specific number of +/// declarations. +/// +/// Example: Given +/// int a, b; +/// int c; +/// int d = 2, e; +/// declCountIs(2) +/// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. +AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { + return std::distance(Node.decl_begin(), Node.decl_end()) == N; +} + +/// \brief Matches the n'th declaration of a declaration statement. +/// +/// Note that this does not work for global declarations because the AST +/// breaks up multiple-declaration DeclStmt's into multiple single-declaration +/// DeclStmt's. +/// Example: Given non-global declarations +/// int a, b = 0; +/// int c; +/// int d = 2, e; +/// declarationStatement(containsDeclaration( +/// 0, variable(hasInitializer(anything())))) +/// matches only 'int d = 2, e;', and +/// declarationStatement(containsDeclaration(1, variable())) +/// matches 'int a, b = 0' as well as 'int d = 2, e;' +/// but 'int c;' is not matched. +AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, + internal::Matcher, InnerMatcher) { + const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); + if (N >= NumDecls) + return false; + DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); + std::advance(Iterator, N); + return InnerMatcher.matches(**Iterator, Finder, Builder); +} + /// \brief Matches a constructor initializer. /// /// Given @@ -1385,6 +1521,18 @@ AST_MATCHER_P(FunctionDecl, returns, internal::Matcher, Matcher) { return Matcher.matches(Node.getResultType(), Finder, Builder); } +/// \brief Matches extern "C" function declarations. +/// +/// Given: +/// extern "C" void f() {} +/// extern "C" { void g() {} } +/// void h() {} +/// function(isExternC()) +/// matches the declaration of f and g, but not the declaration h +AST_MATCHER(FunctionDecl, isExternC) { + return Node.isExternC(); +} + /// \brief Matches the condition expression of an if statement, for loop, /// or conditional operator. /// diff --git a/include/clang/Basic/Attr.td b/include/clang/Basic/Attr.td index 99180e4..fade83e 100644 --- a/include/clang/Basic/Attr.td +++ b/include/clang/Basic/Attr.td @@ -826,6 +826,27 @@ def SharedLocksRequired : InheritableAttr { let TemplateDependent = 1; } +// Type safety attributes for `void *' pointers and type tags. + +def ArgumentWithTypeTag : InheritableAttr { + let Spellings = [GNU<"argument_with_type_tag">, + GNU<"pointer_with_type_tag">]; + let Args = [IdentifierArgument<"ArgumentKind">, + UnsignedArgument<"ArgumentIdx">, + UnsignedArgument<"TypeTagIdx">, + BoolArgument<"IsPointer">]; + let Subjects = [Function]; +} + +def TypeTagForDatatype : InheritableAttr { + let Spellings = [GNU<"type_tag_for_datatype">]; + let Args = [IdentifierArgument<"ArgumentKind">, + TypeArgument<"MatchingCType">, + BoolArgument<"LayoutCompatible">, + BoolArgument<"MustBeNull">]; + let Subjects = [Var]; +} + // Microsoft-related attributes def MsStruct : InheritableAttr { diff --git a/include/clang/Basic/Builtins.def b/include/clang/Basic/Builtins.def index 1b060a5..84b2881 100644 --- a/include/clang/Basic/Builtins.def +++ b/include/clang/Basic/Builtins.def @@ -376,9 +376,9 @@ BUILTIN(__builtin_ctz , "iUi" , "nc") BUILTIN(__builtin_ctzl , "iULi" , "nc") BUILTIN(__builtin_ctzll, "iULLi", "nc") // TODO: int ctzimax(uintmax_t) -BUILTIN(__builtin_ffs , "iUi" , "nc") -BUILTIN(__builtin_ffsl , "iULi" , "nc") -BUILTIN(__builtin_ffsll, "iULLi", "nc") +BUILTIN(__builtin_ffs , "ii" , "nc") +BUILTIN(__builtin_ffsl , "iLi" , "nc") +BUILTIN(__builtin_ffsll, "iLLi", "nc") BUILTIN(__builtin_parity , "iUi" , "nc") BUILTIN(__builtin_parityl , "iULi" , "nc") BUILTIN(__builtin_parityll, "iULLi", "nc") diff --git a/include/clang/Basic/DiagnosticGroups.td b/include/clang/Basic/DiagnosticGroups.td index b95a90b..d8632dd 100644 --- a/include/clang/Basic/DiagnosticGroups.td +++ b/include/clang/Basic/DiagnosticGroups.td @@ -155,6 +155,8 @@ def MethodAccess : DiagGroup<"objc-method-access">; def ObjCReceiver : DiagGroup<"receiver-expr">; def OverlengthStrings : DiagGroup<"overlength-strings">; def OverloadedVirtual : DiagGroup<"overloaded-virtual">; +def PrivateExtern : DiagGroup<"private-extern">; +def SelTypeCast : DiagGroup<"cast-of-sel-type">; def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">; def ObjCPropertyNoAttribute : DiagGroup<"objc-property-no-attribute">; def ObjCMissingSuperCalls : DiagGroup<"objc-missing-super-calls">; @@ -341,6 +343,8 @@ def FormatNonLiteral : DiagGroup<"format-nonliteral", [FormatSecurity]>; def Format2 : DiagGroup<"format=2", [FormatNonLiteral, FormatSecurity, FormatY2K]>; +def TypeSafety : DiagGroup<"type-safety">; + def Extra : DiagGroup<"extra", [ MissingFieldInitializers, IgnoredQualifiers, @@ -371,7 +375,9 @@ def Most : DiagGroup<"most", [ Unused, VolatileRegisterVar, ObjCMissingSuperCalls, - OverloadedVirtual + OverloadedVirtual, + PrivateExtern, + SelTypeCast ]>; // Thread Safety warnings diff --git a/include/clang/Basic/DiagnosticParseKinds.td b/include/clang/Basic/DiagnosticParseKinds.td index 8cb82fd..b1c16fa 100644 --- a/include/clang/Basic/DiagnosticParseKinds.td +++ b/include/clang/Basic/DiagnosticParseKinds.td @@ -677,6 +677,10 @@ def warn_availability_and_unavailable : Warning< "'unavailable' availability overrides all other availability information">, InGroup; +// Type safety attributes +def err_type_safety_unknown_flag : Error< + "invalid comparison flag %0; use 'layout_compatible' or 'must_be_null'">; + // Language specific pragmas // - Generic warnings def warn_pragma_expected_lparen : Warning< diff --git a/include/clang/Basic/DiagnosticSemaKinds.td b/include/clang/Basic/DiagnosticSemaKinds.td index 6d7b52e..2d63dc4 100644 --- a/include/clang/Basic/DiagnosticSemaKinds.td +++ b/include/clang/Basic/DiagnosticSemaKinds.td @@ -815,7 +815,7 @@ def err_friend_def_in_local_class : Error< "friend function cannot be defined in a local class">; def err_abstract_type_in_decl : Error< - "%select{return|parameter|variable|field}0 type %1 is an abstract class">; + "%select{return|parameter|variable|field|ivar}0 type %1 is an abstract class">; def err_allocation_of_abstract_type : Error< "allocating an object of abstract class type %0">; def err_throw_abstract_type : Error< @@ -1489,6 +1489,11 @@ def note_non_literal_user_provided_dtor : Note< "%0 is not literal because it has a user-provided destructor">; def note_non_literal_nontrivial_dtor : Note< "%0 is not literal because it has a non-trivial destructor">; +def warn_private_extern : Warning< + "Use of __private_extern__ on tentative definition has unexpected" + " behaviour - use __attribute__((visibility(\"hidden\"))) on extern" + " declaration or definition instead">, + InGroup, DefaultIgnore; // C++11 char16_t/char32_t def warn_cxx98_compat_unicode_type : Warning< @@ -1542,6 +1547,8 @@ def err_attribute_argument_n_not_int : Error< "'%0' attribute requires parameter %1 to be an integer constant">; def err_attribute_argument_n_not_string : Error< "'%0' attribute requires parameter %1 to be a string">; +def err_attribute_argument_n_not_identifier : Error< + "'%0' attribute requires parameter %1 to be an identifier">; def err_attribute_argument_out_of_bounds : Error< "'%0' attribute parameter %1 is out of bounds">; def err_attribute_requires_objc_interface : Error< @@ -1550,6 +1557,8 @@ def err_attribute_uuid_malformed_guid : Error< "uuid attribute contains a malformed GUID">; def warn_nonnull_pointers_only : Warning< "nonnull attribute only applies to pointer arguments">; +def err_attribute_pointers_only : Error< + "'%0' attribute only applies to pointer arguments">; def err_attribute_invalid_implicit_this_argument : Error< "'%0' attribute is invalid for the implicit this argument">; def err_ownership_type : Error< @@ -1765,7 +1774,6 @@ def err_attribute_can_be_applied_only_to_value_decl : Error< def warn_attribute_not_on_decl : Error< "%0 attribute ignored when parsing type">; - // Availability attribute def warn_availability_unknown_platform : Warning< "unknown platform %0 in availability macro">, InGroup; @@ -3677,6 +3685,10 @@ def err_illegal_decl_array_of_references : Error< "'%0' declared as array of references of type %1">; def err_decl_negative_array_size : Error< "'%0' declared as an array with a negative size">; +def err_array_static_outside_prototype : Error< + "%0 used in array declarator outside of function prototype">; +def err_array_static_not_outermost : Error< + "%0 used in non-outermost array type derivation">; def err_array_star_outside_prototype : Error< "star modifier used outside of function prototype">; def err_illegal_decl_pointer_to_reference : Error< @@ -4953,6 +4965,9 @@ def err_typecheck_cast_to_union_no_type : Error< "cast to union type from type %0 not present in union">; def err_cast_pointer_from_non_pointer_int : Error< "operand of type %0 cannot be cast to a pointer type">; +def warn_cast_pointer_from_sel : Warning< + "cast of type %0 to %1 is deprecated; use sel_getName instead">, + InGroup; def err_cast_pointer_to_non_pointer_int : Error< "pointer cannot be cast to type %0">; def err_typecheck_expect_scalar_operand : Error< @@ -5467,6 +5482,23 @@ def warn_identity_field_assign : Warning< "assigning %select{field|instance variable}0 to itself">, InGroup; +// Type safety attributes +def err_type_tag_for_datatype_not_ice : Error< + "'type_tag_for_datatype' attribute requires the initializer to be " + "an %select{integer|integral}0 constant expression">; +def err_type_tag_for_datatype_too_large : Error< + "'type_tag_for_datatype' attribute requires the initializer to be " + "an %select{integer|integral}0 constant expression " + "that can be represented by a 64 bit integer">; +def warn_type_tag_for_datatype_wrong_kind : Warning< + "this type tag was not designed to be used with this function">, + InGroup; +def warn_type_safety_type_mismatch : Warning< + "argument type %0 doesn't match specified '%1' type tag " + "%select{that requires %3|}2">, InGroup; +def warn_type_safety_null_pointer_required : Warning< + "specified %0 type tag requires a null pointer">, InGroup; + // Generic selections. def err_assoc_type_incomplete : Error< "type %0 in generic association incomplete">; diff --git a/include/clang/Lex/PTHManager.h b/include/clang/Lex/PTHManager.h index 25a4903..44f9ab3 100644 --- a/include/clang/Lex/PTHManager.h +++ b/include/clang/Lex/PTHManager.h @@ -101,7 +101,7 @@ class PTHManager : public IdentifierInfoLookup { public: // The current PTH version. - enum { Version = 9 }; + enum { Version = 10 }; ~PTHManager(); diff --git a/include/clang/Parse/Parser.h b/include/clang/Parse/Parser.h index 353b59e..4ef92f7 100644 --- a/include/clang/Parse/Parser.h +++ b/include/clang/Parse/Parser.h @@ -1834,6 +1834,10 @@ private: ParsedAttributes &Attrs, SourceLocation *EndLoc); + void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, + SourceLocation AttrNameLoc, + ParsedAttributes &Attrs, + SourceLocation *EndLoc); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); diff --git a/include/clang/Sema/AttributeList.h b/include/clang/Sema/AttributeList.h index 5239044..bf35886 100644 --- a/include/clang/Sema/AttributeList.h +++ b/include/clang/Sema/AttributeList.h @@ -19,6 +19,7 @@ #include "llvm/ADT/SmallVector.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/VersionTuple.h" +#include "clang/Sema/Ownership.h" #include namespace clang { @@ -87,6 +88,10 @@ private: /// availability attribute. unsigned IsAvailability : 1; + /// True if this has extra information associated with a + /// type_tag_for_datatype attribute. + unsigned IsTypeTagForDatatype : 1; + unsigned AttrKind : 8; /// \brief The location of the 'unavailable' keyword in an @@ -119,6 +124,22 @@ private: return reinterpret_cast(this+1)[index]; } +public: + struct TypeTagForDatatypeData { + ParsedType *MatchingCType; + unsigned LayoutCompatible : 1; + unsigned MustBeNull : 1; + }; + +private: + TypeTagForDatatypeData &getTypeTagForDatatypeDataSlot() { + return *reinterpret_cast(this + 1); + } + + const TypeTagForDatatypeData &getTypeTagForDatatypeDataSlot() const { + return *reinterpret_cast(this + 1); + } + AttributeList(const AttributeList &); // DO NOT IMPLEMENT void operator=(const AttributeList &); // DO NOT IMPLEMENT void operator delete(void *); // DO NOT IMPLEMENT @@ -126,6 +147,7 @@ private: size_t allocated_size() const; + /// Constructor for attributes with expression arguments. AttributeList(IdentifierInfo *attrName, SourceRange attrRange, IdentifierInfo *scopeName, SourceLocation scopeLoc, IdentifierInfo *parmName, SourceLocation parmLoc, @@ -134,12 +156,13 @@ private: : AttrName(attrName), ScopeName(scopeName), ParmName(parmName), AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc), NumArgs(numArgs), SyntaxUsed(syntaxUsed), Invalid(false), - UsedAsTypeAttr(false), IsAvailability(false), - NextInPosition(0), NextInPool(0) { + UsedAsTypeAttr(false), IsAvailability(false), + IsTypeTagForDatatype(false), NextInPosition(0), NextInPool(0) { if (numArgs) memcpy(getArgsBuffer(), args, numArgs * sizeof(Expr*)); AttrKind = getKind(getName(), getScopeName(), syntaxUsed); } + /// Constructor for availability attributes. AttributeList(IdentifierInfo *attrName, SourceRange attrRange, IdentifierInfo *scopeName, SourceLocation scopeLoc, IdentifierInfo *parmName, SourceLocation parmLoc, @@ -153,6 +176,7 @@ private: AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc), NumArgs(0), SyntaxUsed(syntaxUsed), Invalid(false), UsedAsTypeAttr(false), IsAvailability(true), + IsTypeTagForDatatype(false), UnavailableLoc(unavailable), MessageExpr(messageExpr), NextInPosition(0), NextInPool(0) { new (&getAvailabilitySlot(IntroducedSlot)) AvailabilityChange(introduced); @@ -161,6 +185,25 @@ private: AttrKind = getKind(getName(), getScopeName(), syntaxUsed); } + /// Constructor for type_tag_for_datatype attribute. + AttributeList(IdentifierInfo *attrName, SourceRange attrRange, + IdentifierInfo *scopeName, SourceLocation scopeLoc, + IdentifierInfo *argumentKindName, + SourceLocation argumentKindLoc, + ParsedType matchingCType, bool layoutCompatible, + bool mustBeNull, Syntax syntaxUsed) + : AttrName(attrName), ScopeName(scopeName), ParmName(argumentKindName), + AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(argumentKindLoc), + NumArgs(0), SyntaxUsed(syntaxUsed), + Invalid(false), UsedAsTypeAttr(false), IsAvailability(false), + IsTypeTagForDatatype(true), NextInPosition(NULL), NextInPool(NULL) { + TypeTagForDatatypeData &ExtraData = getTypeTagForDatatypeDataSlot(); + new (&ExtraData.MatchingCType) ParsedType(matchingCType); + ExtraData.LayoutCompatible = layoutCompatible; + ExtraData.MustBeNull = mustBeNull; + AttrKind = getKind(getName(), getScopeName(), syntaxUsed); + } + friend class AttributePool; friend class AttributeFactory; @@ -279,6 +322,24 @@ public: assert(getKind() == AT_Availability && "Not an availability attribute"); return MessageExpr; } + + const ParsedType &getMatchingCType() const { + assert(getKind() == AT_TypeTagForDatatype && + "Not a type_tag_for_datatype attribute"); + return *getTypeTagForDatatypeDataSlot().MatchingCType; + } + + bool getLayoutCompatible() const { + assert(getKind() == AT_TypeTagForDatatype && + "Not a type_tag_for_datatype attribute"); + return getTypeTagForDatatypeDataSlot().LayoutCompatible; + } + + bool getMustBeNull() const { + assert(getKind() == AT_TypeTagForDatatype && + "Not a type_tag_for_datatype attribute"); + return getTypeTagForDatatypeDataSlot().MustBeNull; + } }; /// A factory, from which one makes pools, from which one creates @@ -294,7 +355,11 @@ public: AvailabilityAllocSize = sizeof(AttributeList) + ((3 * sizeof(AvailabilityChange) + sizeof(void*) - 1) - / sizeof(void*) * sizeof(void*)) + / sizeof(void*) * sizeof(void*)), + TypeTagForDatatypeAllocSize = + sizeof(AttributeList) + + (sizeof(AttributeList::TypeTagForDatatypeData) + sizeof(void *) - 1) + / sizeof(void*) * sizeof(void*) }; private: @@ -411,6 +476,21 @@ public: AttributeList *createIntegerAttribute(ASTContext &C, IdentifierInfo *Name, SourceLocation TokLoc, int Arg); + + AttributeList *createTypeTagForDatatype( + IdentifierInfo *attrName, SourceRange attrRange, + IdentifierInfo *scopeName, SourceLocation scopeLoc, + IdentifierInfo *argumentKindName, + SourceLocation argumentKindLoc, + ParsedType matchingCType, bool layoutCompatible, + bool mustBeNull, AttributeList::Syntax syntax) { + void *memory = allocate(AttributeFactory::TypeTagForDatatypeAllocSize); + return add(new (memory) AttributeList(attrName, attrRange, + scopeName, scopeLoc, + argumentKindName, argumentKindLoc, + matchingCType, layoutCompatible, + mustBeNull, syntax)); + } }; /// addAttributeLists - Add two AttributeLists together @@ -503,7 +583,7 @@ public: /// dependencies on this method, it may not be long-lived. AttributeList *&getListRef() { return list; } - + /// Add attribute with expression arguments. AttributeList *addNew(IdentifierInfo *attrName, SourceRange attrRange, IdentifierInfo *scopeName, SourceLocation scopeLoc, IdentifierInfo *parmName, SourceLocation parmLoc, @@ -516,6 +596,7 @@ public: return attr; } + /// Add availability attribute. AttributeList *addNew(IdentifierInfo *attrName, SourceRange attrRange, IdentifierInfo *scopeName, SourceLocation scopeLoc, IdentifierInfo *parmName, SourceLocation parmLoc, @@ -533,6 +614,24 @@ public: return attr; } + /// Add type_tag_for_datatype attribute. + AttributeList *addNewTypeTagForDatatype( + IdentifierInfo *attrName, SourceRange attrRange, + IdentifierInfo *scopeName, SourceLocation scopeLoc, + IdentifierInfo *argumentKindName, + SourceLocation argumentKindLoc, + ParsedType matchingCType, bool layoutCompatible, + bool mustBeNull, AttributeList::Syntax syntax) { + AttributeList *attr = + pool.createTypeTagForDatatype(attrName, attrRange, + scopeName, scopeLoc, + argumentKindName, argumentKindLoc, + matchingCType, layoutCompatible, + mustBeNull, syntax); + add(attr); + return attr; + } + AttributeList *addNewInteger(ASTContext &C, IdentifierInfo *name, SourceLocation loc, int arg) { AttributeList *attr = diff --git a/include/clang/Sema/Sema.h b/include/clang/Sema/Sema.h index 058e0d9..acc88c0 100644 --- a/include/clang/Sema/Sema.h +++ b/include/clang/Sema/Sema.h @@ -2498,13 +2498,11 @@ public: ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, - SourceLocation LParenLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); - StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, - SourceLocation LParenLoc, Stmt *LoopVar, + StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, @@ -2542,8 +2540,8 @@ public: bool MSAsm = false); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, + SourceLocation LBraceLoc, ArrayRef AsmToks, - ArrayRef LineEnds, SourceLocation EndLoc); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, @@ -4463,6 +4461,7 @@ public: AbstractParamType, AbstractVariableType, AbstractFieldType, + AbstractIvarType, AbstractArrayType }; @@ -7146,6 +7145,42 @@ private: void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); +public: + /// \brief Register a magic integral constant to be used as a type tag. + void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, + uint64_t MagicValue, QualType Type, + bool LayoutCompatible, bool MustBeNull); + + struct TypeTagData { + TypeTagData() {} + + TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : + Type(Type), LayoutCompatible(LayoutCompatible), + MustBeNull(MustBeNull) + {} + + QualType Type; + + /// If true, \c Type should be compared with other expression's types for + /// layout-compatibility. + unsigned LayoutCompatible : 1; + unsigned MustBeNull : 1; + }; + + /// A pair of ArgumentKind identifier and magic value. This uniquely + /// identifies the magic value. + typedef std::pair TypeTagMagicValue; + +private: + /// \brief A map from magic value to type information. + OwningPtr > + TypeTagForDatatypeMagicValues; + + /// \brief Peform checks on a call of a function with argument_with_type_tag + /// or pointer_with_type_tag attributes. + void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, + const Expr * const *ExprArgs); + /// \brief The parser's current scope. /// /// The parser maintains this state here. diff --git a/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h index 5ee52cc..48393a3 100644 --- a/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h +++ b/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h @@ -81,15 +81,19 @@ protected: typedef llvm::DenseSet Symbols; typedef llvm::DenseSet Regions; - /// A set of symbols that are registered with this report as being + /// A (stack of) a set of symbols that are registered with this + /// report as being "interesting", and thus used to help decide which + /// diagnostics to include when constructing the final path diagnostic. + /// The stack is largely used by BugReporter when generating PathDiagnostics + /// for multiple PathDiagnosticConsumers. + llvm::SmallVector interestingSymbols; + + /// A (stack of) set of regions that are registered with this report as being /// "interesting", and thus used to help decide which diagnostics /// to include when constructing the final path diagnostic. - Symbols interestingSymbols; - - /// A set of regions that are registered with this report as being - /// "interesting", and thus used to help decide which diagnostics - /// to include when constructing the final path diagnostic. - Regions interestingRegions; + /// The stack is largely used by BugReporter when generating PathDiagnostics + /// for multiple PathDiagnosticConsumers. + llvm::SmallVector interestingRegions; /// A set of custom visitors which generate "event" diagnostics at /// interesting points in the path. @@ -107,6 +111,15 @@ protected: /// when reporting an issue. bool DoNotPrunePath; +private: + // Used internally by BugReporter. + Symbols &getInterestingSymbols(); + Regions &getInterestingRegions(); + + void lazyInitializeInterestingSets(); + void pushInterestingSymbolsAndRegions(); + void popInterestingSymbolsAndRegions(); + public: BugReport(BugType& bt, StringRef desc, const ExplodedNode *errornode) : BT(bt), DeclWithIssue(0), Description(desc), ErrorNode(errornode), @@ -160,9 +173,9 @@ public: void markInteresting(const MemRegion *R); void markInteresting(SVal V); - bool isInteresting(SymbolRef sym) const; - bool isInteresting(const MemRegion *R) const; - bool isInteresting(SVal V) const; + bool isInteresting(SymbolRef sym); + bool isInteresting(const MemRegion *R); + bool isInteresting(SVal V); unsigned getConfigurationChangeToken() const { return ConfigurationChangeToken; @@ -295,7 +308,7 @@ class BugReporterData { public: virtual ~BugReporterData(); virtual DiagnosticsEngine& getDiagnostic() = 0; - virtual PathDiagnosticConsumer* getPathDiagnosticConsumer() = 0; + virtual ArrayRef getPathDiagnosticConsumers() = 0; virtual ASTContext &getASTContext() = 0; virtual SourceManager& getSourceManager() = 0; }; @@ -318,6 +331,12 @@ private: /// Generate and flush the diagnostics for the given bug report. void FlushReport(BugReportEquivClass& EQ); + /// Generate and flush the diagnostics for the given bug report + /// and PathDiagnosticConsumer. + void FlushReport(BugReport *exampleReport, + PathDiagnosticConsumer &PD, + ArrayRef BugReports); + /// The set of bug reports tracked by the BugReporter. llvm::FoldingSet EQClasses; /// A vector of BugReports for tracking the allocated pointers and cleanup. @@ -341,8 +360,8 @@ public: return D.getDiagnostic(); } - PathDiagnosticConsumer* getPathDiagnosticConsumer() { - return D.getPathDiagnosticConsumer(); + ArrayRef getPathDiagnosticConsumers() { + return D.getPathDiagnosticConsumers(); } /// \brief Iterator over the set of BugTypes tracked by the BugReporter. @@ -360,7 +379,8 @@ public: SourceManager& getSourceManager() { return D.getSourceManager(); } virtual void GeneratePathDiagnostic(PathDiagnostic& pathDiagnostic, - SmallVectorImpl &bugReports) {} + PathDiagnosticConsumer &PC, + ArrayRef &bugReports) {} void Register(BugType *BT); @@ -421,7 +441,8 @@ public: ProgramStateManager &getStateManager(); virtual void GeneratePathDiagnostic(PathDiagnostic &pathDiagnostic, - SmallVectorImpl &bugReports); + PathDiagnosticConsumer &PC, + ArrayRef &bugReports); /// classof - Used by isa<>, cast<>, and dyn_cast<>. static bool classof(const BugReporter* R) { diff --git a/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h b/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h index 2e7abfa..973cfb1 100644 --- a/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h +++ b/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h @@ -51,22 +51,25 @@ typedef const SymExpr* SymbolRef; class PathDiagnostic; class PathDiagnosticConsumer { +public: + typedef std::vector > FilesMade; + +private: virtual void anchor(); public: PathDiagnosticConsumer() : flushed(false) {} virtual ~PathDiagnosticConsumer(); - void FlushDiagnostics(SmallVectorImpl *FilesMade); + void FlushDiagnostics(FilesMade *FilesMade); virtual void FlushDiagnosticsImpl(std::vector &Diags, - SmallVectorImpl *FilesMade) - = 0; + FilesMade *filesMade) = 0; virtual StringRef getName() const = 0; void HandlePathDiagnostic(PathDiagnostic *D); - enum PathGenerationScheme { Minimal, Extensive }; + enum PathGenerationScheme { None, Minimal, Extensive }; virtual PathGenerationScheme getGenerationScheme() const { return Minimal; } virtual bool supportsLogicalOpControlFlow() const { return false; } virtual bool supportsAllBlockEdges() const { return false; } @@ -332,15 +335,8 @@ public: ranges.push_back(SourceRange(B,E)); } - typedef const SourceRange* range_iterator; - - range_iterator ranges_begin() const { - return ranges.empty() ? NULL : &ranges[0]; - } - - range_iterator ranges_end() const { - return ranges_begin() + ranges.size(); - } + /// Return the SourceRanges associated with this PathDiagnosticPiece. + ArrayRef getRanges() const { return ranges; } static inline bool classof(const PathDiagnosticPiece *P) { return true; diff --git a/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h index 65be3a4..3aab648 100644 --- a/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h +++ b/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h @@ -15,6 +15,7 @@ #define LLVM_CLANG_GR_PATH_DIAGNOSTIC_CLIENTS_H #include +#include namespace clang { @@ -23,24 +24,25 @@ class Preprocessor; namespace ento { class PathDiagnosticConsumer; +typedef std::vector PathDiagnosticConsumers; -PathDiagnosticConsumer* -createHTMLDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP); +void createHTMLDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& prefix, + const Preprocessor &PP); -PathDiagnosticConsumer* -createPlistDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP, - PathDiagnosticConsumer *SubPD = 0); +void createPlistDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& prefix, + const Preprocessor &PP); -PathDiagnosticConsumer* -createPlistMultiFileDiagnosticConsumer(const std::string& prefix, - const Preprocessor &PP); +void createPlistMultiFileDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& prefix, + const Preprocessor &PP); -PathDiagnosticConsumer* -createTextPathDiagnosticConsumer(const std::string& prefix, - const Preprocessor &PP); +void createTextPathDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& prefix, + const Preprocessor &PP); -} // end GR namespace - -} // end clang namespace +} // end 'ento' namespace +} // end 'clang' namespace #endif diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h b/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h index 1cc53d4..876196b 100644 --- a/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h +++ b/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h @@ -19,6 +19,7 @@ #include "clang/Frontend/AnalyzerOptions.h" #include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" #include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h" namespace clang { @@ -32,8 +33,7 @@ class AnalysisManager : public BugReporterData { ASTContext &Ctx; DiagnosticsEngine &Diags; const LangOptions &LangOpts; - - OwningPtr PD; + PathDiagnosticConsumers PathConsumers; // Configurable components creators. StoreManagerCreator CreateStoreMgr; @@ -82,8 +82,9 @@ public: bool NoRetryExhausted; public: - AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, - const LangOptions &lang, PathDiagnosticConsumer *pd, + AnalysisManager(ASTContext &ctx,DiagnosticsEngine &diags, + const LangOptions &lang, + const PathDiagnosticConsumers &Consumers, StoreManagerCreator storemgr, ConstraintManagerCreator constraintmgr, CheckerManager *checkerMgr, @@ -99,12 +100,7 @@ public: AnalysisInliningMode inliningMode, bool NoRetry); - /// Construct a clone of the given AnalysisManager with the given ASTContext - /// and DiagnosticsEngine. - AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, - AnalysisManager &ParentAM); - - ~AnalysisManager() { FlushDiagnostics(); } + ~AnalysisManager(); void ClearContexts() { AnaCtxMgr.clear(); @@ -140,15 +136,12 @@ public: return LangOpts; } - virtual PathDiagnosticConsumer *getPathDiagnosticConsumer() { - return PD.get(); - } - - void FlushDiagnostics() { - if (PD.get()) - PD->FlushDiagnostics(0); + ArrayRef getPathDiagnosticConsumers() { + return PathConsumers; } + void FlushDiagnostics(); + unsigned getMaxNodes() const { return MaxNodes; } unsigned getMaxVisit() const { return MaxVisit; } diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h b/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h index 47edfe9..f6c5830 100644 --- a/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h +++ b/include/clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h @@ -328,7 +328,7 @@ public: // For debugging purposes only void dump(raw_ostream &Out) const; - LLVM_ATTRIBUTE_USED void dump() const { dump(llvm::errs()); } + LLVM_ATTRIBUTE_USED void dump() const; static bool classof(const CallEvent *) { return true; } }; @@ -804,8 +804,12 @@ public: return getOriginExpr()->getReceiverInterface(); } + /// Returns how the message was written in the source (property access, + /// subscript, or explicit message send). ObjCMessageKind getMessageKind() const; + /// Returns true if this property access or subscript is a setter (has the + /// form of an assignment). bool isSetter() const { switch (getMessageKind()) { case OCM_Message: diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h index 01218ef..8044ed8 100644 --- a/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h +++ b/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h @@ -52,7 +52,9 @@ class RegionOffset { int64_t Offset; public: - enum { Symbolic = INT64_MAX }; + // We're using a const instead of an enumeration due to the size required; + // Visual Studio will only create enumerations of size int, not long long. + static const int64_t Symbolic = INT64_MAX; RegionOffset() : R(0) {} RegionOffset(const MemRegion *r, int64_t off) : R(r), Offset(off) {} diff --git a/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h index d36aa1b..b0c51dd 100644 --- a/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h +++ b/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h @@ -65,10 +65,14 @@ class DynamicTypeInfo { public: DynamicTypeInfo() : T(QualType()) {} - DynamicTypeInfo(QualType WithType, bool CanBeSub = true): - T(WithType), CanBeASubClass(CanBeSub) {} - QualType getType() { return T; } - bool canBeASubClass() { return CanBeASubClass; } + DynamicTypeInfo(QualType WithType, bool CanBeSub = true) + : T(WithType), CanBeASubClass(CanBeSub) {} + + bool isValid() const { return !T.isNull(); } + + QualType getType() const { return T; } + bool canBeASubClass() const { return CanBeASubClass; } + void Profile(llvm::FoldingSetNodeID &ID) const { T.Profile(ID); ID.AddInteger((unsigned)CanBeASubClass); diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp index a74ef14..2d7c9bd 100644 --- a/lib/AST/APValue.cpp +++ b/lib/AST/APValue.cpp @@ -367,8 +367,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{ if (const ValueDecl *VD = Base.dyn_cast()) Out << *VD; else - Base.get()->printPretty(Out, Ctx, 0, - Ctx.getPrintingPolicy()); + Base.get()->printPretty(Out, 0, Ctx.getPrintingPolicy()); if (!O.isZero()) { Out << " + " << (O / S); if (IsReference) @@ -389,7 +388,7 @@ void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{ ElemTy = VD->getType(); } else { const Expr *E = Base.get(); - E->printPretty(Out, Ctx, 0,Ctx.getPrintingPolicy()); + E->printPretty(Out, 0, Ctx.getPrintingPolicy()); ElemTy = E->getType(); } diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp index ad48dff..c021323 100644 --- a/lib/AST/ASTContext.cpp +++ b/lib/AST/ASTContext.cpp @@ -66,6 +66,12 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { if (D->isImplicit()) return NULL; + // User can not attach documentation to implicit instantiations. + if (const FunctionDecl *FD = dyn_cast(D)) { + if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) + return NULL; + } + // TODO: handle comments for function parameters properly. if (isa(D)) return NULL; @@ -145,7 +151,6 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { SourceMgr.getLineNumber(DeclLocDecomp.first, DeclLocDecomp.second) == SourceMgr.getLineNumber(CommentBeginDecomp.first, CommentBeginDecomp.second)) { - (*Comment)->setDecl(D); return *Comment; } } @@ -185,13 +190,13 @@ RawComment *ASTContext::getRawCommentForDeclNoCache(const Decl *D) const { if (Text.find_first_of(",;{}#@") != StringRef::npos) return NULL; - (*Comment)->setDecl(D); return *Comment; } -const RawComment *ASTContext::getRawCommentForAnyRedecl(const Decl *D) const { - // If we have a 'templated' declaration for a template, adjust 'D' to - // refer to the actual template. +namespace { +/// If we have a 'templated' declaration for a template, adjust 'D' to +/// refer to the actual template. +const Decl *adjustDeclToTemplate(const Decl *D) { if (const FunctionDecl *FD = dyn_cast(D)) { if (const FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) D = FTD; @@ -200,6 +205,14 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(const Decl *D) const { D = CTD; } // FIXME: Alias templates? + return D; +} +} // unnamed namespace + +const RawComment *ASTContext::getRawCommentForAnyRedecl( + const Decl *D, + const Decl **OriginalDecl) const { + D = adjustDeclToTemplate(D); // Check whether we have cached a comment for this declaration already. { @@ -207,13 +220,17 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(const Decl *D) const { RedeclComments.find(D); if (Pos != RedeclComments.end()) { const RawCommentAndCacheFlags &Raw = Pos->second; - if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) + if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { + if (OriginalDecl) + *OriginalDecl = Raw.getOriginalDecl(); return Raw.getRaw(); + } } } // Search for comments attached to declarations in the redeclaration chain. const RawComment *RC = NULL; + const Decl *OriginalDeclForRC = NULL; for (Decl::redecl_iterator I = D->redecls_begin(), E = D->redecls_end(); I != E; ++I) { @@ -223,16 +240,19 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(const Decl *D) const { const RawCommentAndCacheFlags &Raw = Pos->second; if (Raw.getKind() != RawCommentAndCacheFlags::NoCommentInDecl) { RC = Raw.getRaw(); + OriginalDeclForRC = Raw.getOriginalDecl(); break; } } else { RC = getRawCommentForDeclNoCache(*I); + OriginalDeclForRC = *I; RawCommentAndCacheFlags Raw; if (RC) { Raw.setRaw(RC); Raw.setKind(RawCommentAndCacheFlags::FromDecl); } else Raw.setKind(RawCommentAndCacheFlags::NoCommentInDecl); + Raw.setOriginalDecl(*I); RedeclComments[*I] = Raw; if (RC) break; @@ -242,10 +262,14 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(const Decl *D) const { // If we found a comment, it should be a documentation comment. assert(!RC || RC->isDocumentation()); + if (OriginalDecl) + *OriginalDecl = OriginalDeclForRC; + // Update cache for every declaration in the redeclaration chain. RawCommentAndCacheFlags Raw; Raw.setRaw(RC); Raw.setKind(RawCommentAndCacheFlags::FromRedecl); + Raw.setOriginalDecl(OriginalDeclForRC); for (Decl::redecl_iterator I = D->redecls_begin(), E = D->redecls_end(); @@ -259,11 +283,24 @@ const RawComment *ASTContext::getRawCommentForAnyRedecl(const Decl *D) const { } comments::FullComment *ASTContext::getCommentForDecl(const Decl *D) const { - const RawComment *RC = getRawCommentForAnyRedecl(D); + D = adjustDeclToTemplate(D); + const Decl *Canonical = D->getCanonicalDecl(); + llvm::DenseMap::iterator Pos = + ParsedComments.find(Canonical); + if (Pos != ParsedComments.end()) + return Pos->second; + + const Decl *OriginalDecl; + const RawComment *RC = getRawCommentForAnyRedecl(D, &OriginalDecl); if (!RC) return NULL; - return RC->getParsed(*this); + if (D != OriginalDecl) + return getCommentForDecl(OriginalDecl); + + comments::FullComment *FC = RC->parse(*this, D); + ParsedComments[Canonical] = FC; + return FC; } void @@ -5437,7 +5474,8 @@ ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS, QualifiedTemplateName *QTN = QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos); if (!QTN) { - QTN = new (*this,4) QualifiedTemplateName(NNS, TemplateKeyword, Template); + QTN = new (*this, llvm::alignOf()) + QualifiedTemplateName(NNS, TemplateKeyword, Template); QualifiedTemplateNames.InsertNode(QTN, InsertPos); } @@ -5464,10 +5502,12 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); if (CanonNNS == NNS) { - QTN = new (*this,4) DependentTemplateName(NNS, Name); + QTN = new (*this, llvm::alignOf()) + DependentTemplateName(NNS, Name); } else { TemplateName Canon = getDependentTemplateName(CanonNNS, Name); - QTN = new (*this,4) DependentTemplateName(NNS, Name, Canon); + QTN = new (*this, llvm::alignOf()) + DependentTemplateName(NNS, Name, Canon); DependentTemplateName *CheckQTN = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); assert(!CheckQTN && "Dependent type name canonicalization broken"); @@ -5498,10 +5538,12 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS, NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS); if (CanonNNS == NNS) { - QTN = new (*this,4) DependentTemplateName(NNS, Operator); + QTN = new (*this, llvm::alignOf()) + DependentTemplateName(NNS, Operator); } else { TemplateName Canon = getDependentTemplateName(CanonNNS, Operator); - QTN = new (*this,4) DependentTemplateName(NNS, Operator, Canon); + QTN = new (*this, llvm::alignOf()) + DependentTemplateName(NNS, Operator, Canon); DependentTemplateName *CheckQTN = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos); diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp index 35fcd41..a605f1a 100644 --- a/lib/AST/ASTDiagnostic.cpp +++ b/lib/AST/ASTDiagnostic.cpp @@ -1149,7 +1149,7 @@ class TemplateDiff { if (!E) OS << "(no argument)"; else - E->printPretty(OS, Context, 0, Policy); return; + E->printPretty(OS, 0, Policy); return; } /// PrintTemplateTemplate - Handles printing of template template arguments, diff --git a/lib/AST/CommentCommandTraits.cpp b/lib/AST/CommentCommandTraits.cpp index d8ce1f3..dc7a0bd 100644 --- a/lib/AST/CommentCommandTraits.cpp +++ b/lib/AST/CommentCommandTraits.cpp @@ -15,9 +15,9 @@ namespace comments { // TODO: tablegen -bool CommandTraits::isVerbatimBlockCommand(StringRef BeginName, +bool CommandTraits::isVerbatimBlockCommand(StringRef StartName, StringRef &EndName) const { - const char *Result = llvm::StringSwitch(BeginName) + const char *Result = llvm::StringSwitch(StartName) .Case("code", "endcode") .Case("verbatim", "endverbatim") .Case("htmlonly", "endhtmlonly") @@ -44,7 +44,7 @@ bool CommandTraits::isVerbatimBlockCommand(StringRef BeginName, I = VerbatimBlockCommands.begin(), E = VerbatimBlockCommands.end(); I != E; ++I) - if (I->BeginName == BeginName) { + if (I->StartName == StartName) { EndName = I->EndName; return true; } @@ -115,10 +115,10 @@ bool CommandTraits::isDeclarationCommand(StringRef Name) const { .Default(false); } -void CommandTraits::addVerbatimBlockCommand(StringRef BeginName, +void CommandTraits::addVerbatimBlockCommand(StringRef StartName, StringRef EndName) { VerbatimBlockCommand VBC; - VBC.BeginName = BeginName; + VBC.StartName = StartName; VBC.EndName = EndName; VerbatimBlockCommands.push_back(VBC); } diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp index eec2e9d..2f21e4c 100644 --- a/lib/AST/DeclCXX.cpp +++ b/lib/AST/DeclCXX.cpp @@ -1294,15 +1294,20 @@ static bool recursivelyOverrides(const CXXMethodDecl *DerivedMD, } CXXMethodDecl * -CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD) { +CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD, + bool MayBeBase) { if (this->getParent()->getCanonicalDecl() == RD->getCanonicalDecl()) return this; // Lookup doesn't work for destructors, so handle them separately. if (isa(this)) { CXXMethodDecl *MD = RD->getDestructor(); - if (MD && recursivelyOverrides(MD, this)) - return MD; + if (MD) { + if (recursivelyOverrides(MD, this)) + return MD; + if (MayBeBase && recursivelyOverrides(this, MD)) + return MD; + } return NULL; } @@ -1313,6 +1318,8 @@ CXXMethodDecl::getCorrespondingMethodInClass(const CXXRecordDecl *RD) { continue; if (recursivelyOverrides(MD, this)) return MD; + if (MayBeBase && recursivelyOverrides(this, MD)) + return MD; } for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(), diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp index aad0ca1..7f47604 100644 --- a/lib/AST/DeclPrinter.cpp +++ b/lib/AST/DeclPrinter.cpp @@ -26,7 +26,6 @@ using namespace clang; namespace { class DeclPrinter : public DeclVisitor { raw_ostream &Out; - ASTContext &Context; PrintingPolicy Policy; unsigned Indentation; bool PrintInstantiation; @@ -38,11 +37,9 @@ namespace { void Print(AccessSpecifier AS); public: - DeclPrinter(raw_ostream &Out, ASTContext &Context, - const PrintingPolicy &Policy, - unsigned Indentation = 0, - bool PrintInstantiation = false) - : Out(Out), Context(Context), Policy(Policy), Indentation(Indentation), + DeclPrinter(raw_ostream &Out, const PrintingPolicy &Policy, + unsigned Indentation = 0, bool PrintInstantiation = false) + : Out(Out), Policy(Policy), Indentation(Indentation), PrintInstantiation(PrintInstantiation) { } void VisitDeclContext(DeclContext *DC, bool Indent = true); @@ -96,7 +93,7 @@ void Decl::print(raw_ostream &Out, unsigned Indentation, void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy, unsigned Indentation, bool PrintInstantiation) const { - DeclPrinter Printer(Out, getASTContext(), Policy, Indentation, PrintInstantiation); + DeclPrinter Printer(Out, Policy, Indentation, PrintInstantiation); Printer.Visit(const_cast(this)); } @@ -171,13 +168,17 @@ void DeclContext::dumpDeclContext() const { DC = DC->getParent(); ASTContext &Ctx = cast(DC)->getASTContext(); - DeclPrinter Printer(llvm::errs(), Ctx, Ctx.getPrintingPolicy(), 0); + DeclPrinter Printer(llvm::errs(), Ctx.getPrintingPolicy(), 0); Printer.VisitDeclContext(const_cast(this), /*Indent=*/false); } +void Decl::dump() const { + dump(llvm::errs()); +} + void Decl::dump(raw_ostream &Out) const { PrintingPolicy Policy = getASTContext().getPrintingPolicy(); - Policy.Dump = true; + Policy.DumpSourceManager = &getASTContext().getSourceManager(); print(Out, Policy, /*Indentation*/ 0, /*PrintInstantiation*/ true); } @@ -191,8 +192,8 @@ void DeclPrinter::prettyPrintAttributes(Decl *D) { if (D->hasAttrs()) { AttrVec &Attrs = D->getAttrs(); for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) { - Attr *A = *i; - A->printPretty(Out, Context); + Attr *A = *i; + A->printPretty(Out, Policy); } } } @@ -231,7 +232,7 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) { if (isa(*D)) continue; - if (!Policy.Dump) { + if (!Policy.DumpSourceManager) { // Skip over implicit declarations in pretty-printing mode. if (D->isImplicit()) continue; // FIXME: Ugly hack so we don't pretty-print the builtin declaration @@ -381,7 +382,7 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) { Out << *D; if (Expr *Init = D->getInitExpr()) { Out << " = "; - Init->printPretty(Out, Context, 0, Policy, Indentation); + Init->printPretty(Out, 0, Policy, Indentation); } } @@ -420,7 +421,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { Proto += "("; if (FT) { llvm::raw_string_ostream POut(Proto); - DeclPrinter ParamPrinter(POut, Context, SubPolicy, Indentation); + DeclPrinter ParamPrinter(POut, SubPolicy, Indentation); for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) { if (i) POut << ", "; ParamPrinter.VisitParmVarDecl(D->getParamDecl(i)); @@ -466,7 +467,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { if (FT->getExceptionSpecType() == EST_ComputedNoexcept) { Proto += "("; llvm::raw_string_ostream EOut(Proto); - FT->getNoexceptExpr()->printPretty(EOut, Context, 0, SubPolicy, + FT->getNoexceptExpr()->printPretty(EOut, 0, SubPolicy, Indentation); EOut.flush(); Proto += EOut.str(); @@ -522,7 +523,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { SimpleInit = Init; if (SimpleInit) - SimpleInit->printPretty(Out, Context, 0, Policy, Indentation); + SimpleInit->printPretty(Out, 0, Policy, Indentation); else { for (unsigned I = 0; I != NumArgs; ++I) { if (isa(Args[I])) @@ -530,7 +531,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { if (I) Out << ", "; - Args[I]->printPretty(Out, Context, 0, Policy, Indentation); + Args[I]->printPretty(Out, 0, Policy, Indentation); } } } @@ -554,7 +555,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { // This is a K&R function definition, so we need to print the // parameters. Out << '\n'; - DeclPrinter ParamPrinter(Out, Context, SubPolicy, Indentation); + DeclPrinter ParamPrinter(Out, SubPolicy, Indentation); Indentation += Policy.Indentation; for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) { Indent(); @@ -565,7 +566,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) { } else Out << ' '; - D->getBody()->printPretty(Out, Context, 0, SubPolicy, Indentation); + D->getBody()->printPretty(Out, 0, SubPolicy, Indentation); Out << '\n'; } } @@ -580,7 +581,7 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) { if (D->isBitField()) { Out << " : "; - D->getBitWidth()->printPretty(Out, Context, 0, Policy, Indentation); + D->getBitWidth()->printPretty(Out, 0, Policy, Indentation); } Expr *Init = D->getInClassInitializer(); @@ -589,7 +590,7 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) { Out << " "; else Out << " = "; - Init->printPretty(Out, Context, 0, Policy, Indentation); + Init->printPretty(Out, 0, Policy, Indentation); } prettyPrintAttributes(D); } @@ -625,7 +626,7 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) { else if (D->getInitStyle() == VarDecl::CInit) { Out << " = "; } - Init->printPretty(Out, Context, 0, Policy, Indentation); + Init->printPretty(Out, 0, Policy, Indentation); if (D->getInitStyle() == VarDecl::CallInit) Out << ")"; } @@ -639,7 +640,7 @@ void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) { void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) { Out << "__asm ("; - D->getAsmString()->printPretty(Out, Context, 0, Policy, Indentation); + D->getAsmString()->printPretty(Out, 0, Policy, Indentation); Out << ")"; } @@ -650,9 +651,9 @@ void DeclPrinter::VisitImportDecl(ImportDecl *D) { void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) { Out << "static_assert("; - D->getAssertExpr()->printPretty(Out, Context, 0, Policy, Indentation); + D->getAssertExpr()->printPretty(Out, 0, Policy, Indentation); Out << ", "; - D->getMessage()->printPretty(Out, Context, 0, Policy, Indentation); + D->getMessage()->printPretty(Out, 0, Policy, Indentation); Out << ")"; } @@ -786,8 +787,7 @@ void DeclPrinter::PrintTemplateParameters( Args->get(i).print(Policy, Out); } else if (NTTP->hasDefaultArgument()) { Out << " = "; - NTTP->getDefaultArgument()->printPretty(Out, Context, 0, Policy, - Indentation); + NTTP->getDefaultArgument()->printPretty(Out, 0, Policy, Indentation); } } else if (const TemplateTemplateParmDecl *TTPD = dyn_cast(Param)) { @@ -871,7 +871,7 @@ void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) { if (OMD->getBody()) { Out << ' '; - OMD->getBody()->printPretty(Out, Context, 0, Policy); + OMD->getBody()->printPretty(Out, 0, Policy); Out << '\n'; } } diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp index 5aebc2b..a7e8999 100644 --- a/lib/AST/DeclTemplate.cpp +++ b/lib/AST/DeclTemplate.cpp @@ -43,7 +43,8 @@ TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc, unsigned NumParams, SourceLocation RAngleLoc) { unsigned Size = sizeof(TemplateParameterList) + sizeof(NamedDecl *) * NumParams; - unsigned Align = llvm::AlignOf::Alignment; + unsigned Align = std::max(llvm::alignOf(), + llvm::alignOf()); void *Mem = C.Allocate(Size, Align); return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params, NumParams, RAngleLoc); diff --git a/lib/AST/DumpXML.cpp b/lib/AST/DumpXML.cpp index c1432b5..84f3fc4 100644 --- a/lib/AST/DumpXML.cpp +++ b/lib/AST/DumpXML.cpp @@ -1022,12 +1022,17 @@ struct XMLDumper : public XMLDeclVisitor, }; } +void Decl::dumpXML() const { + dump(llvm::errs()); +} + void Decl::dumpXML(raw_ostream &out) const { XMLDumper(out, getASTContext()).dispatch(const_cast(this)); } #else /* ifndef NDEBUG */ +void Decl::dumpXML() const {} void Decl::dumpXML(raw_ostream &out) const {} #endif diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp index dbf267b..49b119b 100644 --- a/lib/AST/NestedNameSpecifier.cpp +++ b/lib/AST/NestedNameSpecifier.cpp @@ -18,6 +18,7 @@ #include "clang/AST/PrettyPrinter.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" +#include "llvm/Support/AlignOf.h" #include "llvm/Support/raw_ostream.h" #include @@ -33,7 +34,8 @@ NestedNameSpecifier::FindOrInsert(const ASTContext &Context, NestedNameSpecifier *NNS = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos); if (!NNS) { - NNS = new (Context, 4) NestedNameSpecifier(Mockup); + NNS = new (Context, llvm::alignOf()) + NestedNameSpecifier(Mockup); Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos); } @@ -107,7 +109,9 @@ NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) { NestedNameSpecifier * NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) { if (!Context.GlobalNestedNameSpecifier) - Context.GlobalNestedNameSpecifier = new (Context, 4) NestedNameSpecifier(); + Context.GlobalNestedNameSpecifier = + new (Context, llvm::alignOf()) + NestedNameSpecifier(); return Context.GlobalNestedNameSpecifier; } @@ -630,4 +634,3 @@ NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const { memcpy(Mem, Buffer, BufferSize); return NestedNameSpecifierLoc(Representation, Mem); } - diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp index c704cab..a5a3287 100644 --- a/lib/AST/RawCommentList.cpp +++ b/lib/AST/RawCommentList.cpp @@ -65,7 +65,7 @@ bool mergedCommentIsTrailingComment(StringRef Comment) { RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR, bool Merged) : Range(SR), RawTextValid(false), BriefTextValid(false), - IsAlmostTrailingComment(false), + IsAttached(false), IsAlmostTrailingComment(false), BeginLineValid(false), EndLineValid(false) { // Extract raw comment text, if possible. if (SR.getBegin() == SR.getEnd() || getRawText(SourceMgr).empty()) { @@ -87,16 +87,6 @@ RawComment::RawComment(const SourceManager &SourceMgr, SourceRange SR, } } -const Decl *RawComment::getDecl() const { - if (DeclOrParsedComment.isNull()) - return NULL; - - if (const Decl *D = DeclOrParsedComment.dyn_cast()) - return D; - - return DeclOrParsedComment.get()->getDecl(); -} - unsigned RawComment::getBeginLine(const SourceManager &SM) const { if (BeginLineValid) return BeginLine; @@ -169,7 +159,8 @@ const char *RawComment::extractBriefText(const ASTContext &Context) const { return BriefTextPtr; } -comments::FullComment *RawComment::parse(const ASTContext &Context) const { +comments::FullComment *RawComment::parse(const ASTContext &Context, + const Decl *D) const { // Make sure that RawText is valid. getRawText(Context.getSourceManager()); @@ -179,13 +170,11 @@ comments::FullComment *RawComment::parse(const ASTContext &Context) const { RawText.begin(), RawText.end()); comments::Sema S(Context.getAllocator(), Context.getSourceManager(), Context.getDiagnostics(), Traits); - S.setDecl(getDecl()); + S.setDecl(D); comments::Parser P(L, S, Context.getAllocator(), Context.getSourceManager(), Context.getDiagnostics(), Traits); - comments::FullComment *FC = P.parseFullComment(); - DeclOrParsedComment = FC; - return FC; + return P.parseFullComment(); } namespace { diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp index d877c3f..77452c9 100644 --- a/lib/AST/Stmt.cpp +++ b/lib/AST/Stmt.cpp @@ -584,22 +584,27 @@ AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, } MSAsmStmt::MSAsmStmt(ASTContext &C, SourceLocation asmloc, - bool issimple, bool isvolatile, ArrayRef asmtoks, - ArrayRef lineends, StringRef asmstr, + SourceLocation lbraceloc, bool issimple, bool isvolatile, + ArrayRef asmtoks, ArrayRef inputs, + ArrayRef outputs, StringRef asmstr, ArrayRef clobbers, SourceLocation endloc) - : Stmt(MSAsmStmtClass), AsmLoc(asmloc), EndLoc(endloc), + : Stmt(MSAsmStmtClass), AsmLoc(asmloc), LBraceLoc(lbraceloc), EndLoc(endloc), AsmStr(asmstr.str()), IsSimple(issimple), IsVolatile(isvolatile), - NumAsmToks(asmtoks.size()), NumLineEnds(lineends.size()), - NumClobbers(clobbers.size()) { + NumAsmToks(asmtoks.size()), NumInputs(inputs.size()), + NumOutputs(outputs.size()), NumClobbers(clobbers.size()) { + + unsigned NumExprs = NumOutputs + NumInputs; + + Names = new (C) IdentifierInfo*[NumExprs]; + for (unsigned i = 0, e = NumOutputs; i != e; ++i) + Names[i] = outputs[i]; + for (unsigned i = NumOutputs, e = NumExprs; i != e; ++i) + Names[i] = inputs[i]; AsmToks = new (C) Token[NumAsmToks]; for (unsigned i = 0, e = NumAsmToks; i != e; ++i) AsmToks[i] = asmtoks[i]; - LineEnds = new (C) unsigned[NumLineEnds]; - for (unsigned i = 0, e = NumLineEnds; i != e; ++i) - LineEnds[i] = lineends[i]; - Clobbers = new (C) StringRef[NumClobbers]; for (unsigned i = 0, e = NumClobbers; i != e; ++i) { // FIXME: Avoid the allocation/copy if at all possible. diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp index 2f7cb55..c0960ce 100644 --- a/lib/AST/StmtPrinter.cpp +++ b/lib/AST/StmtPrinter.cpp @@ -30,17 +30,15 @@ using namespace clang; namespace { class StmtPrinter : public StmtVisitor { raw_ostream &OS; - ASTContext &Context; unsigned IndentLevel; clang::PrinterHelper* Helper; PrintingPolicy Policy; public: - StmtPrinter(raw_ostream &os, ASTContext &C, PrinterHelper* helper, + StmtPrinter(raw_ostream &os, PrinterHelper* helper, const PrintingPolicy &Policy, unsigned Indentation = 0) - : OS(os), Context(C), IndentLevel(Indentation), Helper(helper), - Policy(Policy) {} + : OS(os), IndentLevel(Indentation), Helper(helper), Policy(Policy) {} void PrintStmt(Stmt *S) { PrintStmt(S, Policy.Indentation); @@ -181,7 +179,7 @@ void StmtPrinter::VisitAttributedStmt(AttributedStmt *Node) { first = false; } // TODO: check this - (*it)->printPretty(OS, Context); + (*it)->printPretty(OS, Policy); } OS << "]] "; PrintStmt(Node->getSubStmt(), 0); @@ -432,7 +430,12 @@ void StmtPrinter::VisitAsmStmt(AsmStmt *Node) { void StmtPrinter::VisitMSAsmStmt(MSAsmStmt *Node) { // FIXME: Implement MS style inline asm statement printer. - Indent() << "asm ()"; + Indent() << "__asm "; + if (Node->hasBraces()) + OS << "{\n"; + OS << *(Node->getAsmString()) << "\n"; + if (Node->hasBraces()) + Indent() << "}\n"; } void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) { @@ -1390,7 +1393,7 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) { std::string TypeS; if (Expr *Size = E->getArraySize()) { llvm::raw_string_ostream s(TypeS); - Size->printPretty(s, Context, Helper, Policy); + Size->printPretty(s, Helper, Policy); s.flush(); TypeS = "[" + TypeS + "]"; } @@ -1799,13 +1802,12 @@ void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) { // Stmt method implementations //===----------------------------------------------------------------------===// -void Stmt::dumpPretty(ASTContext& Context) const { - printPretty(llvm::errs(), Context, 0, - PrintingPolicy(Context.getLangOpts())); +void Stmt::dumpPretty(ASTContext &Context) const { + printPretty(llvm::errs(), 0, PrintingPolicy(Context.getLangOpts())); } -void Stmt::printPretty(raw_ostream &OS, ASTContext& Context, - PrinterHelper* Helper, +void Stmt::printPretty(raw_ostream &OS, + PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation) const { if (this == 0) { @@ -1813,12 +1815,12 @@ void Stmt::printPretty(raw_ostream &OS, ASTContext& Context, return; } - if (Policy.Dump && &Context) { - dump(OS, Context.getSourceManager()); + if (Policy.DumpSourceManager) { + dump(OS, *Policy.DumpSourceManager); return; } - StmtPrinter P(OS, Context, Helper, Policy, Indentation); + StmtPrinter P(OS, Helper, Policy, Indentation); P.Visit(const_cast(this)); } diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp index f8dd396..95ff4ed 100644 --- a/lib/AST/TemplateBase.cpp +++ b/lib/AST/TemplateBase.cpp @@ -556,8 +556,7 @@ const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB, const ASTTemplateArgumentListInfo * ASTTemplateArgumentListInfo::Create(ASTContext &C, const TemplateArgumentListInfo &List) { - std::size_t size = sizeof(CXXDependentScopeMemberExpr) + - ASTTemplateArgumentListInfo::sizeFor(List.size()); + std::size_t size = ASTTemplateArgumentListInfo::sizeFor(List.size()); void *Mem = C.Allocate(size, llvm::alignOf()); ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo(); TAI->initializeFrom(List); @@ -642,6 +641,7 @@ ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) { std::size_t ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) { // Add space for the template keyword location. + // FIXME: There's room for this in the padding before the template args in + // 64-bit builds. return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation); } - diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp index e689502..8065b2d 100644 --- a/lib/Basic/Diagnostic.cpp +++ b/lib/Basic/Diagnostic.cpp @@ -145,6 +145,9 @@ DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const { assert(DiagStatePoints.front().Loc.isInvalid() && "Should have created a DiagStatePoint for command-line"); + if (!SourceMgr) + return DiagStatePoints.end() - 1; + FullSourceLoc Loc(L, *SourceMgr); if (Loc.isInvalid()) return DiagStatePoints.end() - 1; @@ -167,8 +170,9 @@ void DiagnosticsEngine::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map, (Map == diag::MAP_FATAL || Map == diag::MAP_ERROR)) && "Cannot map errors into warnings!"); assert(!DiagStatePoints.empty()); + assert((L.isInvalid() || SourceMgr) && "No SourceMgr for valid location"); - FullSourceLoc Loc(L, *SourceMgr); + FullSourceLoc Loc = SourceMgr? FullSourceLoc(L, *SourceMgr) : FullSourceLoc(); FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc; // Don't allow a mapping to a warning override an error/fatal mapping. if (Map == diag::MAP_WARNING) { diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp index 883864f..1d495f1 100644 --- a/lib/Basic/Targets.cpp +++ b/lib/Basic/Targets.cpp @@ -3083,9 +3083,8 @@ public: unsigned &NumAliases) const; virtual bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const { - // FIXME: Check if this is complete switch (*Name) { - default: + default: break; case 'l': // r0-r7 case 'h': // r8-r15 case 'w': // VFP Floating point register single precision diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 65c782e..59ed313 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -229,6 +229,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, return RValue::get(Result); } + + case Builtin::BI__builtin_conj: + case Builtin::BI__builtin_conjf: + case Builtin::BI__builtin_conjl: { + ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); + Value *Real = ComplexVal.first; + Value *Imag = ComplexVal.second; + Value *Zero = + Imag->getType()->isFPOrFPVectorTy() + ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType()) + : llvm::Constant::getNullValue(Imag->getType()); + + Imag = Builder.CreateFSub(Zero, Imag, "sub"); + return RValue::getComplex(std::make_pair(Real, Imag)); + } + case Builtin::BI__builtin_creal: + case Builtin::BI__builtin_crealf: + case Builtin::BI__builtin_creall: { + ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); + return RValue::get(ComplexVal.first); + } + + case Builtin::BI__builtin_cimag: + case Builtin::BI__builtin_cimagf: + case Builtin::BI__builtin_cimagl: { + ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0)); + return RValue::get(ComplexVal.second); + } + case Builtin::BI__builtin_ctzs: case Builtin::BI__builtin_ctz: case Builtin::BI__builtin_ctzl: @@ -1720,8 +1749,29 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ops.push_back(GetPointeeAlignmentValue(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty), Ops, "vld1"); - case ARM::BI__builtin_neon_vld1_lane_v: - case ARM::BI__builtin_neon_vld1q_lane_v: { + case ARM::BI__builtin_neon_vld1q_lane_v: + // Handle 64-bit integer elements as a special case. Use shuffles of + // one-element vectors to avoid poor code for i64 in the backend. + if (VTy->getElementType()->isIntegerTy(64)) { + // Extract the other lane. + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + int Lane = cast(Ops[2])->getZExtValue(); + Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane)); + Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); + // Load the value as a one-element vector. + Ty = llvm::VectorType::get(VTy->getElementType(), 1); + Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty); + Value *Ld = Builder.CreateCall2(F, Ops[0], + GetPointeeAlignmentValue(E->getArg(0))); + // Combine them. + SmallVector Indices; + Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane)); + Indices.push_back(ConstantInt::get(Int32Ty, Lane)); + SV = llvm::ConstantVector::get(Indices); + return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane"); + } + // fall through + case ARM::BI__builtin_neon_vld1_lane_v: { Ops[1] = Builder.CreateBitCast(Ops[1], Ty); Ty = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); @@ -2086,8 +2136,19 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ops.push_back(GetPointeeAlignmentValue(E->getArg(0))); return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty), Ops, ""); - case ARM::BI__builtin_neon_vst1_lane_v: - case ARM::BI__builtin_neon_vst1q_lane_v: { + case ARM::BI__builtin_neon_vst1q_lane_v: + // Handle 64-bit integer elements as a special case. Use a shuffle to get + // a one-element vector and avoid poor code for i64 in the backend. + if (VTy->getElementType()->isIntegerTy(64)) { + Ops[1] = Builder.CreateBitCast(Ops[1], Ty); + Value *SV = llvm::ConstantVector::get(cast(Ops[2])); + Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV); + Ops[2] = GetPointeeAlignmentValue(E->getArg(0)); + return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, + Ops[1]->getType()), Ops); + } + // fall through + case ARM::BI__builtin_neon_vst1_lane_v: { Ops[1] = Builder.CreateBitCast(Ops[1], Ty); Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]); Ty = llvm::PointerType::getUnqual(Ops[1]->getType()); diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp index 00127ac..fd1c7a3 100644 --- a/lib/CodeGen/CGDebugInfo.cpp +++ b/lib/CodeGen/CGDebugInfo.cpp @@ -94,8 +94,10 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) { llvm::DenseMap::iterator I = RegionMap.find(Context); - if (I != RegionMap.end()) - return llvm::DIDescriptor(dyn_cast_or_null(&*I->second)); + if (I != RegionMap.end()) { + llvm::Value *V = I->second; + return llvm::DIDescriptor(dyn_cast_or_null(V)); + } // Check namespace. if (const NamespaceDecl *NSDecl = dyn_cast(Context)) @@ -227,8 +229,8 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) { if (it != DIFileCache.end()) { // Verify that the information still exists. - if (&*it->second) - return llvm::DIFile(cast(it->second)); + if (llvm::Value *V = it->second) + return llvm::DIFile(cast(V)); } llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname()); @@ -525,8 +527,10 @@ llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) { // See if we already have the parent. llvm::DenseMap::iterator I = RegionMap.find(Context); - if (I != RegionMap.end()) - return llvm::DIDescriptor(dyn_cast_or_null(&*I->second)); + if (I != RegionMap.end()) { + llvm::Value *V = I->second; + return llvm::DIDescriptor(dyn_cast_or_null(V)); + } // Check namespace. if (const NamespaceDecl *NSDecl = dyn_cast(Context)) @@ -1660,8 +1664,8 @@ llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) { TypeCache.find(Ty.getAsOpaquePtr()); if (it != TypeCache.end()) { // Verify that the debug info still exists. - if (&*it->second) - return llvm::DIType(cast(it->second)); + if (llvm::Value *V = it->second) + return llvm::DIType(cast(V)); } return llvm::DIType(); @@ -1679,8 +1683,8 @@ llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) { CompletedTypeCache.find(Ty.getAsOpaquePtr()); if (it != CompletedTypeCache.end()) { // Verify that the debug info still exists. - if (&*it->second) - return llvm::DIType(cast(it->second)); + if (llvm::Value *V = it->second) + return llvm::DIType(cast(V)); } return llvm::DIType(); @@ -1942,7 +1946,8 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) { llvm::DenseMap::iterator MI = SPCache.find(FD->getCanonicalDecl()); if (MI != SPCache.end()) { - llvm::DISubprogram SP(dyn_cast_or_null(&*MI->second)); + llvm::Value *V = MI->second; + llvm::DISubprogram SP(dyn_cast_or_null(V)); if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition()) return SP; } @@ -1953,7 +1958,8 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) { llvm::DenseMap::iterator MI = SPCache.find(NextFD->getCanonicalDecl()); if (MI != SPCache.end()) { - llvm::DISubprogram SP(dyn_cast_or_null(&*MI->second)); + llvm::Value *V = MI->second; + llvm::DISubprogram SP(dyn_cast_or_null(V)); if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition()) return SP; } @@ -2013,7 +2019,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType, llvm::DenseMap::iterator FI = SPCache.find(FD->getCanonicalDecl()); if (FI != SPCache.end()) { - llvm::DIDescriptor SP(dyn_cast_or_null(&*FI->second)); + llvm::Value *V = FI->second; + llvm::DIDescriptor SP(dyn_cast_or_null(V)); if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) { llvm::MDNode *SPN = SP; LexicalBlockStack.push_back(SPN); @@ -2701,15 +2708,15 @@ void CGDebugInfo::finalize(void) { = ReplaceMap.begin(), VE = ReplaceMap.end(); VI != VE; ++VI) { llvm::DIType Ty, RepTy; // Verify that the debug info still exists. - if (&*VI->second) - Ty = llvm::DIType(cast(VI->second)); + if (llvm::Value *V = VI->second) + Ty = llvm::DIType(cast(V)); llvm::DenseMap::iterator it = TypeCache.find(VI->first); if (it != TypeCache.end()) { // Verify that the debug info still exists. - if (&*it->second) - RepTy = llvm::DIType(cast(it->second)); + if (llvm::Value *V = it->second) + RepTy = llvm::DIType(cast(V)); } if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) { diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index ecee7b4..1fe4c18 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -938,6 +938,50 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, llvm::MDNode *TBAAInfo) { + + // For better performance, handle vector loads differently. + if (Ty->isVectorType()) { + llvm::Value *V; + const llvm::Type *EltTy = + cast(Addr->getType())->getElementType(); + + const llvm::VectorType *VTy = cast(EltTy); + + // Handle vectors of size 3, like size 4 for better performance. + if (VTy->getNumElements() == 3) { + + // Bitcast to vec4 type. + llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), + 4); + llvm::PointerType *ptVec4Ty = + llvm::PointerType::get(vec4Ty, + (cast( + Addr->getType()))->getAddressSpace()); + llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty, + "castToVec4"); + // Now load value. + llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4"); + + // Shuffle vector to get vec3. + llvm::SmallVector Mask; + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(getLLVMContext()), + 0)); + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(getLLVMContext()), + 1)); + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(getLLVMContext()), + 2)); + + llvm::Value *MaskV = llvm::ConstantVector::get(Mask); + V = Builder.CreateShuffleVector(LoadVal, + llvm::UndefValue::get(vec4Ty), + MaskV, "extractVec"); + return EmitFromMemory(V, Ty); + } + } + llvm::LoadInst *Load = Builder.CreateLoad(Addr); if (Volatile) Load->setVolatile(true); @@ -984,6 +1028,42 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, QualType Ty, llvm::MDNode *TBAAInfo, bool isInit) { + + // Handle vectors differently to get better performance. + if (Ty->isVectorType()) { + llvm::Type *SrcTy = Value->getType(); + llvm::VectorType *VecTy = cast(SrcTy); + // Handle vec3 special. + if (VecTy->getNumElements() == 3) { + llvm::LLVMContext &VMContext = getLLVMContext(); + + // Our source is a vec3, do a shuffle vector to make it a vec4. + llvm::SmallVector Mask; + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), + 0)); + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), + 1)); + Mask.push_back(llvm::ConstantInt::get( + llvm::Type::getInt32Ty(VMContext), + 2)); + Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext))); + + llvm::Value *MaskV = llvm::ConstantVector::get(Mask); + Value = Builder.CreateShuffleVector(Value, + llvm::UndefValue::get(VecTy), + MaskV, "extractVec"); + SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); + } + llvm::PointerType *DstPtr = cast(Addr->getType()); + if (DstPtr->getElementType() != SrcTy) { + llvm::Type *MemTy = + llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); + Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); + } + } + Value = EmitToMemory(Value, Ty); llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp index 7c2c9f1..31ea1b5 100644 --- a/lib/CodeGen/CGExprCXX.cpp +++ b/lib/CodeGen/CGExprCXX.cpp @@ -123,7 +123,14 @@ static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context, return false; } - + + // We can devirtualize calls on an object accessed by a class member access + // expression, since by C++11 [basic.life]p6 we know that it can't refer to + // a derived class object constructed in the same location. + if (const MemberExpr *ME = dyn_cast(Base)) + if (const ValueDecl *VD = dyn_cast(ME->getMemberDecl())) + return VD->getType()->isRecordType(); + // We can always devirtualize calls on temporary object expressions. if (isa(Base)) return true; diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp index 467c779..d78908d 100644 --- a/lib/CodeGen/CGStmt.cpp +++ b/lib/CodeGen/CGStmt.cpp @@ -1691,14 +1691,36 @@ void CodeGenFunction::EmitMSAsmStmt(const MSAsmStmt &S) { std::vector Args; std::vector ArgTypes; + std::string Constraints; + + // Clobbers + for (unsigned i = 0, e = S.getNumClobbers(); i != e; ++i) { + StringRef Clobber = S.getClobber(i); + + if (Clobber != "memory" && Clobber != "cc") + Clobber = Target.getNormalizedGCCRegisterName(Clobber); + + if (i != 0) + Constraints += ','; + + Constraints += "~{"; + Constraints += Clobber; + Constraints += '}'; + } + // Add machine specific clobbers std::string MachineClobbers = Target.getClobbers(); + if (!MachineClobbers.empty()) { + if (!Constraints.empty()) + Constraints += ','; + Constraints += MachineClobbers; + } llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, ArgTypes, false); llvm::InlineAsm *IA = - llvm::InlineAsm::get(FTy, *S.getAsmString(), MachineClobbers, true); + llvm::InlineAsm::get(FTy, *S.getAsmString(), Constraints, true); llvm::CallInst *Result = Builder.CreateCall(IA, Args); Result->addAttribute(~0, llvm::Attribute::NoUnwind); Result->addAttribute(~0, llvm::Attribute::IANSDialect); diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h index a46f313..c2b8e4d 100644 --- a/lib/CodeGen/CGValue.h +++ b/lib/CodeGen/CGValue.h @@ -128,7 +128,7 @@ class LValue { // The alignment to use when accessing this lvalue. (For vector elements, // this is the alignment of the whole vector.) - unsigned short Alignment; + int64_t Alignment; // objective-c's ivar bool Ivar:1; diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp index b4234cf..ed67f7b 100644 --- a/lib/Driver/Tools.cpp +++ b/lib/Driver/Tools.cpp @@ -5723,6 +5723,9 @@ void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA, CmdArgs.push_back(Args.MakeArgString(Plugin)); } + if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) + CmdArgs.push_back("--no-demangle"); + AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs); if (D.CCCIsCXX && diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp index bb1a4e6..0f0d835 100644 --- a/lib/Frontend/ASTConsumers.cpp +++ b/lib/Frontend/ASTConsumers.cpp @@ -89,7 +89,7 @@ namespace { class ASTDeclNodeLister : public ASTConsumer, public RecursiveASTVisitor { - typedef RecursiveASTVisitor base; + typedef RecursiveASTVisitor base; public: ASTDeclNodeLister(raw_ostream *Out = NULL) diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp index 58a6b8d..3e66613 100644 --- a/lib/Frontend/CacheTokens.cpp +++ b/lib/Frontend/CacheTokens.cpp @@ -447,7 +447,7 @@ Offset PTHWriter::EmitCachedSpellings() { void PTHWriter::GeneratePTH(const std::string &MainFile) { // Generate the prologue. - Out << "cfe-pth"; + Out << "cfe-pth" << '\0'; Emit32(PTHManager::Version); // Leave 4 words for the prologue. diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp index f104f96..67738e9 100644 --- a/lib/Lex/PTHLexer.cpp +++ b/lib/Lex/PTHLexer.cpp @@ -452,14 +452,14 @@ PTHManager *PTHManager::Create(const std::string &file, const unsigned char *BufEnd = (unsigned char*)File->getBufferEnd(); // Check the prologue of the file. - if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 3 + 4) || - memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth") - 1) != 0) { + if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 4 + 4) || + memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth")) != 0) { Diags.Report(diag::err_invalid_pth_file) << file; return 0; } // Read the PTH version. - const unsigned char *p = BufBeg + (sizeof("cfe-pth") - 1); + const unsigned char *p = BufBeg + (sizeof("cfe-pth")); unsigned Version = ReadLE32(p); if (Version < PTHManager::Version) { diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp index b830d9c..cb865cc 100644 --- a/lib/Parse/ParseDecl.cpp +++ b/lib/Parse/ParseDecl.cpp @@ -68,7 +68,6 @@ static bool isAttributeLateParsed(const IdentifierInfo &II) { .Default(false); } - /// ParseGNUAttributes - Parse a non-empty attributes list. /// /// [GNU] attributes: @@ -193,6 +192,11 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName, ParseThreadSafetyAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc); return; } + // Type safety attributes have their own grammar. + if (AttrName->isStr("type_tag_for_datatype")) { + ParseTypeTagForDatatypeAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc); + return; + } ConsumeParen(); // ignore the left paren loc for now @@ -866,7 +870,8 @@ void Parser::ParseLexedAttributes(ParsingClass &Class) { void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition) { for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) { - LAs[i]->addDecl(D); + if (D) + LAs[i]->addDecl(D); ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition); delete LAs[i]; } @@ -1019,6 +1024,70 @@ void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName, *EndLoc = T.getCloseLocation(); } +void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, + SourceLocation AttrNameLoc, + ParsedAttributes &Attrs, + SourceLocation *EndLoc) { + assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('"); + + BalancedDelimiterTracker T(*this, tok::l_paren); + T.consumeOpen(); + + if (Tok.isNot(tok::identifier)) { + Diag(Tok, diag::err_expected_ident); + T.skipToEnd(); + return; + } + IdentifierInfo *ArgumentKind = Tok.getIdentifierInfo(); + SourceLocation ArgumentKindLoc = ConsumeToken(); + + if (Tok.isNot(tok::comma)) { + Diag(Tok, diag::err_expected_comma); + T.skipToEnd(); + return; + } + ConsumeToken(); + + SourceRange MatchingCTypeRange; + TypeResult MatchingCType = ParseTypeName(&MatchingCTypeRange); + if (MatchingCType.isInvalid()) { + T.skipToEnd(); + return; + } + + bool LayoutCompatible = false; + bool MustBeNull = false; + while (Tok.is(tok::comma)) { + ConsumeToken(); + if (Tok.isNot(tok::identifier)) { + Diag(Tok, diag::err_expected_ident); + T.skipToEnd(); + return; + } + IdentifierInfo *Flag = Tok.getIdentifierInfo(); + if (Flag->isStr("layout_compatible")) + LayoutCompatible = true; + else if (Flag->isStr("must_be_null")) + MustBeNull = true; + else { + Diag(Tok, diag::err_type_safety_unknown_flag) << Flag; + T.skipToEnd(); + return; + } + ConsumeToken(); // consume flag + } + + if (!T.consumeClose()) { + Attrs.addNewTypeTagForDatatype(&AttrName, AttrNameLoc, 0, AttrNameLoc, + ArgumentKind, ArgumentKindLoc, + MatchingCType.release(), LayoutCompatible, + MustBeNull, AttributeList::AS_GNU); + } + + if (EndLoc) + *EndLoc = T.getCloseLocation(); +} + /// DiagnoseProhibitedCXX11Attribute - We have found the opening square brackets /// of a C++11 attribute-specifier in a location where an attribute is not /// permitted. By C++11 [dcl.attr.grammar]p6, this is ill-formed. Diagnose this diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp index d2e4309..df9b996 100644 --- a/lib/Parse/ParseStmt.cpp +++ b/lib/Parse/ParseStmt.cpp @@ -1495,8 +1495,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) { StmtResult ForEachStmt; if (ForRange) { - ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, T.getOpenLocation(), - FirstPart.take(), + ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, FirstPart.take(), ForRangeInit.ColonLoc, ForRangeInit.RangeExpr.get(), T.getCloseLocation()); @@ -1505,7 +1504,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) { // Similarly, we need to do the semantic analysis for a for-range // statement immediately in order to close over temporaries correctly. } else if (ForEach) { - ForEachStmt = Actions.ActOnObjCForCollectionStmt(ForLoc, T.getOpenLocation(), + ForEachStmt = Actions.ActOnObjCForCollectionStmt(ForLoc, FirstPart.take(), Collection.take(), T.getCloseLocation()); @@ -1657,112 +1656,98 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) { SourceManager &SrcMgr = PP.getSourceManager(); SourceLocation EndLoc = AsmLoc; SmallVector AsmToks; - SmallVector LineEnds; - do { - bool InBraces = false; - unsigned short savedBraceCount = 0; - bool InAsmComment = false; - FileID FID; - unsigned LineNo = 0; - unsigned NumTokensRead = 0; - SourceLocation LBraceLoc; - - if (Tok.is(tok::l_brace)) { - // Braced inline asm: consume the opening brace. - InBraces = true; - savedBraceCount = BraceCount; - EndLoc = LBraceLoc = ConsumeBrace(); - ++NumTokensRead; - } else { - // Single-line inline asm; compute which line it is on. - std::pair ExpAsmLoc = - SrcMgr.getDecomposedExpansionLoc(EndLoc); - FID = ExpAsmLoc.first; - LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second); - } - SourceLocation TokLoc = Tok.getLocation(); - do { - // If we hit EOF, we're done, period. - if (Tok.is(tok::eof)) - break; + bool InBraces = false; + unsigned short savedBraceCount = 0; + bool InAsmComment = false; + FileID FID; + unsigned LineNo = 0; + unsigned NumTokensRead = 0; + SourceLocation LBraceLoc; + + if (Tok.is(tok::l_brace)) { + // Braced inline asm: consume the opening brace. + InBraces = true; + savedBraceCount = BraceCount; + EndLoc = LBraceLoc = ConsumeBrace(); + ++NumTokensRead; + } else { + // Single-line inline asm; compute which line it is on. + std::pair ExpAsmLoc = + SrcMgr.getDecomposedExpansionLoc(EndLoc); + FID = ExpAsmLoc.first; + LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second); + } - // The asm keyword is a statement separator, so multiple asm statements - // are allowed. - if (!InAsmComment && Tok.is(tok::kw_asm)) - break; + SourceLocation TokLoc = Tok.getLocation(); + do { + // If we hit EOF, we're done, period. + if (Tok.is(tok::eof)) + break; - if (!InAsmComment && Tok.is(tok::semi)) { - // A semicolon in an asm is the start of a comment. - InAsmComment = true; - if (InBraces) { - // Compute which line the comment is on. - std::pair ExpSemiLoc = - SrcMgr.getDecomposedExpansionLoc(TokLoc); - FID = ExpSemiLoc.first; - LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second); - } - } else if (!InBraces || InAsmComment) { - // If end-of-line is significant, check whether this token is on a - // new line. - std::pair ExpLoc = - SrcMgr.getDecomposedExpansionLoc(TokLoc); - if (ExpLoc.first != FID || - SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) { - // If this is a single-line __asm, we're done. - if (!InBraces) - break; - // We're no longer in a comment. - InAsmComment = false; - } else if (!InAsmComment && Tok.is(tok::r_brace)) { - // Single-line asm always ends when a closing brace is seen. - // FIXME: This is compatible with Apple gcc's -fasm-blocks; what - // does MSVC do here? - break; - } + if (!InAsmComment && Tok.is(tok::semi)) { + // A semicolon in an asm is the start of a comment. + InAsmComment = true; + if (InBraces) { + // Compute which line the comment is on. + std::pair ExpSemiLoc = + SrcMgr.getDecomposedExpansionLoc(TokLoc); + FID = ExpSemiLoc.first; + LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second); } - if (!InAsmComment && InBraces && Tok.is(tok::r_brace) && - BraceCount == (savedBraceCount + 1)) { - // Consume the closing brace, and finish - EndLoc = ConsumeBrace(); + } else if (!InBraces || InAsmComment) { + // If end-of-line is significant, check whether this token is on a + // new line. + std::pair ExpLoc = + SrcMgr.getDecomposedExpansionLoc(TokLoc); + if (ExpLoc.first != FID || + SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) { + // If this is a single-line __asm, we're done. + if (!InBraces) + break; + // We're no longer in a comment. + InAsmComment = false; + } else if (!InAsmComment && Tok.is(tok::r_brace)) { + // Single-line asm always ends when a closing brace is seen. + // FIXME: This is compatible with Apple gcc's -fasm-blocks; what + // does MSVC do here? break; } - - // Consume the next token; make sure we don't modify the brace count etc. - // if we are in a comment. - EndLoc = TokLoc; - if (InAsmComment) - PP.Lex(Tok); - else { - AsmToks.push_back(Tok); - ConsumeAnyToken(); - } - TokLoc = Tok.getLocation(); - ++NumTokensRead; - } while (1); - - LineEnds.push_back(AsmToks.size()); - - if (InBraces && BraceCount != savedBraceCount) { - // __asm without closing brace (this can happen at EOF). - Diag(Tok, diag::err_expected_rbrace); - Diag(LBraceLoc, diag::note_matching) << "{"; - return StmtError(); - } else if (NumTokensRead == 0) { - // Empty __asm. - Diag(Tok, diag::err_expected_lbrace); - return StmtError(); } - // Multiple adjacent asm's form together into a single asm statement - // in the AST. - if (!Tok.is(tok::kw_asm)) + if (!InAsmComment && InBraces && Tok.is(tok::r_brace) && + BraceCount == (savedBraceCount + 1)) { + // Consume the closing brace, and finish + EndLoc = ConsumeBrace(); break; - EndLoc = ConsumeToken(); + } + + // Consume the next token; make sure we don't modify the brace count etc. + // if we are in a comment. + EndLoc = TokLoc; + if (InAsmComment) + PP.Lex(Tok); + else { + AsmToks.push_back(Tok); + ConsumeAnyToken(); + } + TokLoc = Tok.getLocation(); + ++NumTokensRead; } while (1); + if (InBraces && BraceCount != savedBraceCount) { + // __asm without closing brace (this can happen at EOF). + Diag(Tok, diag::err_expected_rbrace); + Diag(LBraceLoc, diag::note_matching) << "{"; + return StmtError(); + } else if (NumTokensRead == 0) { + // Empty __asm. + Diag(Tok, diag::err_expected_lbrace); + return StmtError(); + } + // FIXME: We should be passing source locations for better diagnostics. - return Actions.ActOnMSAsmStmt(AsmLoc, llvm::makeArrayRef(AsmToks), - llvm::makeArrayRef(LineEnds), EndLoc); + return Actions.ActOnMSAsmStmt(AsmLoc, LBraceLoc, + llvm::makeArrayRef(AsmToks), EndLoc); } /// ParseAsmStatement - Parse a GNU extended asm statement. diff --git a/lib/Rewrite/RewriteModernObjC.cpp b/lib/Rewrite/RewriteModernObjC.cpp index 9f42fca..dcd003f 100644 --- a/lib/Rewrite/RewriteModernObjC.cpp +++ b/lib/Rewrite/RewriteModernObjC.cpp @@ -241,7 +241,7 @@ namespace { // Get the new text. std::string SStr; llvm::raw_string_ostream S(SStr); - New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts)); + New->printPretty(S, 0, PrintingPolicy(LangOpts)); const std::string &Str = S.str(); // If replacement succeeded or warning disabled return with no warning. @@ -2549,8 +2549,7 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) { // The pretty printer for StringLiteral handles escape characters properly. std::string prettyBufS; llvm::raw_string_ostream prettyBuf(prettyBufS); - Exp->getString()->printPretty(prettyBuf, *Context, 0, - PrintingPolicy(LangOpts)); + Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts)); Preamble += prettyBuf.str(); Preamble += ","; Preamble += utostr(Exp->getString()->getByteLength()) + "};\n"; @@ -4341,7 +4340,7 @@ void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart, std::string SStr; llvm::raw_string_ostream constructorExprBuf(SStr); - GlobalConstructionExp->printPretty(constructorExprBuf, *Context, 0, + GlobalConstructionExp->printPretty(constructorExprBuf, 0, PrintingPolicy(LangOpts)); globalBuf += constructorExprBuf.str(); globalBuf += ";\n"; @@ -5610,7 +5609,7 @@ Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) { // Get the new text. std::string SStr; llvm::raw_string_ostream Buf(SStr); - Replacement->printPretty(Buf, *Context); + Replacement->printPretty(Buf); const std::string &Str = Buf.str(); printf("CAST = %s\n", &Str[0]); diff --git a/lib/Rewrite/RewriteObjC.cpp b/lib/Rewrite/RewriteObjC.cpp index 425cd77..37c17e6 100644 --- a/lib/Rewrite/RewriteObjC.cpp +++ b/lib/Rewrite/RewriteObjC.cpp @@ -227,7 +227,7 @@ namespace { // Get the new text. std::string SStr; llvm::raw_string_ostream S(SStr); - New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts)); + New->printPretty(S, 0, PrintingPolicy(LangOpts)); const std::string &Str = S.str(); // If replacement succeeded or warning disabled return with no warning. @@ -1720,8 +1720,7 @@ Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) { CK, syncExpr); std::string syncExprBufS; llvm::raw_string_ostream syncExprBuf(syncExprBufS); - syncExpr->printPretty(syncExprBuf, *Context, 0, - PrintingPolicy(LangOpts)); + syncExpr->printPretty(syncExprBuf, 0, PrintingPolicy(LangOpts)); syncBuf += syncExprBuf.str(); syncBuf += ");"; @@ -2553,8 +2552,7 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) { // The pretty printer for StringLiteral handles escape characters properly. std::string prettyBufS; llvm::raw_string_ostream prettyBuf(prettyBufS); - Exp->getString()->printPretty(prettyBuf, *Context, 0, - PrintingPolicy(LangOpts)); + Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts)); Preamble += prettyBuf.str(); Preamble += ","; Preamble += utostr(Exp->getString()->getByteLength()) + "};\n"; @@ -4885,7 +4883,7 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) { // Get the new text. std::string SStr; llvm::raw_string_ostream Buf(SStr); - Replacement->printPretty(Buf, *Context); + Replacement->printPretty(Buf); const std::string &Str = Buf.str(); printf("CAST = %s\n", &Str[0]); diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/AttributeList.cpp index 0f209fd..7c79879 100644 --- a/lib/Sema/AttributeList.cpp +++ b/lib/Sema/AttributeList.cpp @@ -21,6 +21,8 @@ using namespace clang; size_t AttributeList::allocated_size() const { if (IsAvailability) return AttributeFactory::AvailabilityAllocSize; + else if (IsTypeTagForDatatype) + return AttributeFactory::TypeTagForDatatypeAllocSize; return (sizeof(AttributeList) + NumArgs * sizeof(Expr*)); } diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp index 8199751..d8d51e7 100644 --- a/lib/Sema/SemaCast.cpp +++ b/lib/Sema/SemaCast.cpp @@ -1477,6 +1477,21 @@ void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, Diag(Range.getBegin(), DiagID) << SrcType << DestType << Range; } +static void DiagnoseCastOfObjCSEL(Sema &Self, const ExprResult &SrcExpr, + QualType DestType) { + QualType SrcType = SrcExpr.get()->getType(); + if (const PointerType *SrcPtrTy = SrcType->getAs()) + if (SrcPtrTy->isObjCSelType()) { + QualType DT = DestType; + if (isa(DestType)) + DT = DestType->getPointeeType(); + if (!DT.getUnqualifiedType()->isVoidType()) + Self.Diag(SrcExpr.get()->getExprLoc(), + diag::warn_cast_pointer_from_sel) + << SrcType << DestType << SrcExpr.get()->getSourceRange(); + } +} + static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, bool CStyle, const SourceRange &OpRange, @@ -1721,7 +1736,9 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr, if (CStyle && DestType->isObjCObjectPointerType()) { return TC_Success; } - + if (CStyle) + DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType); + // Not casting away constness, so the only remaining check is for compatible // pointer categories. @@ -2058,6 +2075,7 @@ void CastOperation::CheckCStyleCast() { return; } } + DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType); Kind = Self.PrepareScalarCast(SrcExpr, DestType); if (SrcExpr.isInvalid()) diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp index 2594648..2559f00 100644 --- a/lib/Sema/SemaChecking.cpp +++ b/lib/Sema/SemaChecking.cpp @@ -513,6 +513,13 @@ void Sema::checkCall(NamedDecl *FDecl, Expr **Args, I = FDecl->specific_attr_begin(), E = FDecl->specific_attr_end(); I != E; ++I) CheckNonNullArguments(*I, Args, Loc); + + // Type safety checking. + for (specific_attr_iterator + i = FDecl->specific_attr_begin(), + e = FDecl->specific_attr_end(); i != e; ++i) { + CheckArgumentWithTypeTag(*i, Args); + } } /// CheckConstructorCall - Check a constructor call for correctness and safety @@ -3170,7 +3177,7 @@ void Sema::CheckStrlcpycatArguments(const CallExpr *Call, SmallString<128> sizeString; llvm::raw_svector_ostream OS(sizeString); OS << "sizeof("; - DstArg->printPretty(OS, Context, 0, getPrintingPolicy()); + DstArg->printPretty(OS, 0, getPrintingPolicy()); OS << ")"; Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size) @@ -3267,10 +3274,10 @@ void Sema::CheckStrncatArguments(const CallExpr *CE, SmallString<128> sizeString; llvm::raw_svector_ostream OS(sizeString); OS << "sizeof("; - DstArg->printPretty(OS, Context, 0, getPrintingPolicy()); + DstArg->printPretty(OS, 0, getPrintingPolicy()); OS << ") - "; OS << "strlen("; - DstArg->printPretty(OS, Context, 0, getPrintingPolicy()); + DstArg->printPretty(OS, 0, getPrintingPolicy()); OS << ") - 1"; Diag(SL, diag::note_strncat_wrong_size) @@ -5468,3 +5475,410 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S, Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); } } + +//===--- Layout compatibility ----------------------------------------------// + +namespace { + +bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); + +/// \brief Check if two enumeration types are layout-compatible. +bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { + // C++11 [dcl.enum] p8: + // Two enumeration types are layout-compatible if they have the same + // underlying type. + return ED1->isComplete() && ED2->isComplete() && + C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); +} + +/// \brief Check if two fields are layout-compatible. +bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, FieldDecl *Field2) { + if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) + return false; + + if (Field1->isBitField() != Field2->isBitField()) + return false; + + if (Field1->isBitField()) { + // Make sure that the bit-fields are the same length. + unsigned Bits1 = Field1->getBitWidthValue(C); + unsigned Bits2 = Field2->getBitWidthValue(C); + + if (Bits1 != Bits2) + return false; + } + + return true; +} + +/// \brief Check if two standard-layout structs are layout-compatible. +/// (C++11 [class.mem] p17) +bool isLayoutCompatibleStruct(ASTContext &C, + RecordDecl *RD1, + RecordDecl *RD2) { + // If both records are C++ classes, check that base classes match. + if (const CXXRecordDecl *D1CXX = dyn_cast(RD1)) { + // If one of records is a CXXRecordDecl we are in C++ mode, + // thus the other one is a CXXRecordDecl, too. + const CXXRecordDecl *D2CXX = cast(RD2); + // Check number of base classes. + if (D1CXX->getNumBases() != D2CXX->getNumBases()) + return false; + + // Check the base classes. + for (CXXRecordDecl::base_class_const_iterator + Base1 = D1CXX->bases_begin(), + BaseEnd1 = D1CXX->bases_end(), + Base2 = D2CXX->bases_begin(); + Base1 != BaseEnd1; + ++Base1, ++Base2) { + if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) + return false; + } + } else if (const CXXRecordDecl *D2CXX = dyn_cast(RD2)) { + // If only RD2 is a C++ class, it should have zero base classes. + if (D2CXX->getNumBases() > 0) + return false; + } + + // Check the fields. + RecordDecl::field_iterator Field2 = RD2->field_begin(), + Field2End = RD2->field_end(), + Field1 = RD1->field_begin(), + Field1End = RD1->field_end(); + for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { + if (!isLayoutCompatible(C, *Field1, *Field2)) + return false; + } + if (Field1 != Field1End || Field2 != Field2End) + return false; + + return true; +} + +/// \brief Check if two standard-layout unions are layout-compatible. +/// (C++11 [class.mem] p18) +bool isLayoutCompatibleUnion(ASTContext &C, + RecordDecl *RD1, + RecordDecl *RD2) { + llvm::SmallPtrSet UnmatchedFields; + for (RecordDecl::field_iterator Field2 = RD2->field_begin(), + Field2End = RD2->field_end(); + Field2 != Field2End; ++Field2) { + UnmatchedFields.insert(*Field2); + } + + for (RecordDecl::field_iterator Field1 = RD1->field_begin(), + Field1End = RD1->field_end(); + Field1 != Field1End; ++Field1) { + llvm::SmallPtrSet::iterator + I = UnmatchedFields.begin(), + E = UnmatchedFields.end(); + + for ( ; I != E; ++I) { + if (isLayoutCompatible(C, *Field1, *I)) { + bool Result = UnmatchedFields.erase(*I); + (void) Result; + assert(Result); + break; + } + } + if (I == E) + return false; + } + + return UnmatchedFields.empty(); +} + +bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, RecordDecl *RD2) { + if (RD1->isUnion() != RD2->isUnion()) + return false; + + if (RD1->isUnion()) + return isLayoutCompatibleUnion(C, RD1, RD2); + else + return isLayoutCompatibleStruct(C, RD1, RD2); +} + +/// \brief Check if two types are layout-compatible in C++11 sense. +bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { + if (T1.isNull() || T2.isNull()) + return false; + + // C++11 [basic.types] p11: + // If two types T1 and T2 are the same type, then T1 and T2 are + // layout-compatible types. + if (C.hasSameType(T1, T2)) + return true; + + T1 = T1.getCanonicalType().getUnqualifiedType(); + T2 = T2.getCanonicalType().getUnqualifiedType(); + + const Type::TypeClass TC1 = T1->getTypeClass(); + const Type::TypeClass TC2 = T2->getTypeClass(); + + if (TC1 != TC2) + return false; + + if (TC1 == Type::Enum) { + return isLayoutCompatible(C, + cast(T1)->getDecl(), + cast(T2)->getDecl()); + } else if (TC1 == Type::Record) { + if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) + return false; + + return isLayoutCompatible(C, + cast(T1)->getDecl(), + cast(T2)->getDecl()); + } + + return false; +} +} + +//===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// + +namespace { +/// \brief Given a type tag expression find the type tag itself. +/// +/// \param TypeExpr Type tag expression, as it appears in user's code. +/// +/// \param VD Declaration of an identifier that appears in a type tag. +/// +/// \param MagicValue Type tag magic value. +bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, + const ValueDecl **VD, uint64_t *MagicValue) { + while(true) { + if (!TypeExpr) + return false; + + TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); + + switch (TypeExpr->getStmtClass()) { + case Stmt::UnaryOperatorClass: { + const UnaryOperator *UO = cast(TypeExpr); + if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { + TypeExpr = UO->getSubExpr(); + continue; + } + return false; + } + + case Stmt::DeclRefExprClass: { + const DeclRefExpr *DRE = cast(TypeExpr); + *VD = DRE->getDecl(); + return true; + } + + case Stmt::IntegerLiteralClass: { + const IntegerLiteral *IL = cast(TypeExpr); + llvm::APInt MagicValueAPInt = IL->getValue(); + if (MagicValueAPInt.getActiveBits() <= 64) { + *MagicValue = MagicValueAPInt.getZExtValue(); + return true; + } else + return false; + } + + case Stmt::BinaryConditionalOperatorClass: + case Stmt::ConditionalOperatorClass: { + const AbstractConditionalOperator *ACO = + cast(TypeExpr); + bool Result; + if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) { + if (Result) + TypeExpr = ACO->getTrueExpr(); + else + TypeExpr = ACO->getFalseExpr(); + continue; + } + return false; + } + + case Stmt::BinaryOperatorClass: { + const BinaryOperator *BO = cast(TypeExpr); + if (BO->getOpcode() == BO_Comma) { + TypeExpr = BO->getRHS(); + continue; + } + return false; + } + + default: + return false; + } + } +} + +/// \brief Retrieve the C type corresponding to type tag TypeExpr. +/// +/// \param TypeExpr Expression that specifies a type tag. +/// +/// \param MagicValues Registered magic values. +/// +/// \param FoundWrongKind Set to true if a type tag was found, but of a wrong +/// kind. +/// +/// \param TypeInfo Information about the corresponding C type. +/// +/// \returns true if the corresponding C type was found. +bool GetMatchingCType( + const IdentifierInfo *ArgumentKind, + const Expr *TypeExpr, const ASTContext &Ctx, + const llvm::DenseMap *MagicValues, + bool &FoundWrongKind, + Sema::TypeTagData &TypeInfo) { + FoundWrongKind = false; + + // Variable declaration that has type_tag_for_datatype attribute. + const ValueDecl *VD = NULL; + + uint64_t MagicValue; + + if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue)) + return false; + + if (VD) { + for (specific_attr_iterator + I = VD->specific_attr_begin(), + E = VD->specific_attr_end(); + I != E; ++I) { + if (I->getArgumentKind() != ArgumentKind) { + FoundWrongKind = true; + return false; + } + TypeInfo.Type = I->getMatchingCType(); + TypeInfo.LayoutCompatible = I->getLayoutCompatible(); + TypeInfo.MustBeNull = I->getMustBeNull(); + return true; + } + return false; + } + + if (!MagicValues) + return false; + + llvm::DenseMap::const_iterator I = + MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); + if (I == MagicValues->end()) + return false; + + TypeInfo = I->second; + return true; +} +} // unnamed namespace + +void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, + uint64_t MagicValue, QualType Type, + bool LayoutCompatible, + bool MustBeNull) { + if (!TypeTagForDatatypeMagicValues) + TypeTagForDatatypeMagicValues.reset( + new llvm::DenseMap); + + TypeTagMagicValue Magic(ArgumentKind, MagicValue); + (*TypeTagForDatatypeMagicValues)[Magic] = + TypeTagData(Type, LayoutCompatible, MustBeNull); +} + +namespace { +bool IsSameCharType(QualType T1, QualType T2) { + const BuiltinType *BT1 = T1->getAs(); + if (!BT1) + return false; + + const BuiltinType *BT2 = T2->getAs(); + if (!BT2) + return false; + + BuiltinType::Kind T1Kind = BT1->getKind(); + BuiltinType::Kind T2Kind = BT2->getKind(); + + return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || + (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || + (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || + (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); +} +} // unnamed namespace + +void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, + const Expr * const *ExprArgs) { + const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); + bool IsPointerAttr = Attr->getIsPointer(); + + const Expr *TypeTagExpr = ExprArgs[Attr->getTypeTagIdx()]; + bool FoundWrongKind; + TypeTagData TypeInfo; + if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, + TypeTagForDatatypeMagicValues.get(), + FoundWrongKind, TypeInfo)) { + if (FoundWrongKind) + Diag(TypeTagExpr->getExprLoc(), + diag::warn_type_tag_for_datatype_wrong_kind) + << TypeTagExpr->getSourceRange(); + return; + } + + const Expr *ArgumentExpr = ExprArgs[Attr->getArgumentIdx()]; + if (IsPointerAttr) { + // Skip implicit cast of pointer to `void *' (as a function argument). + if (const ImplicitCastExpr *ICE = dyn_cast(ArgumentExpr)) + if (ICE->getType()->isVoidPointerType()) + ArgumentExpr = ICE->getSubExpr(); + } + QualType ArgumentType = ArgumentExpr->getType(); + + // Passing a `void*' pointer shouldn't trigger a warning. + if (IsPointerAttr && ArgumentType->isVoidPointerType()) + return; + + if (TypeInfo.MustBeNull) { + // Type tag with matching void type requires a null pointer. + if (!ArgumentExpr->isNullPointerConstant(Context, + Expr::NPC_ValueDependentIsNotNull)) { + Diag(ArgumentExpr->getExprLoc(), + diag::warn_type_safety_null_pointer_required) + << ArgumentKind->getName() + << ArgumentExpr->getSourceRange() + << TypeTagExpr->getSourceRange(); + } + return; + } + + QualType RequiredType = TypeInfo.Type; + if (IsPointerAttr) + RequiredType = Context.getPointerType(RequiredType); + + bool mismatch = false; + if (!TypeInfo.LayoutCompatible) { + mismatch = !Context.hasSameType(ArgumentType, RequiredType); + + // C++11 [basic.fundamental] p1: + // Plain char, signed char, and unsigned char are three distinct types. + // + // But we treat plain `char' as equivalent to `signed char' or `unsigned + // char' depending on the current char signedness mode. + if (mismatch) + if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), + RequiredType->getPointeeType())) || + (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) + mismatch = false; + } else + if (IsPointerAttr) + mismatch = !isLayoutCompatible(Context, + ArgumentType->getPointeeType(), + RequiredType->getPointeeType()); + else + mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); + + if (mismatch) + Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) + << ArgumentType << ArgumentKind->getName() + << TypeInfo.LayoutCompatible << RequiredType + << ArgumentExpr->getSourceRange() + << TypeTagExpr->getSourceRange(); +} + diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp index 9fa757d..adf1327 100644 --- a/lib/Sema/SemaCodeComplete.cpp +++ b/lib/Sema/SemaCodeComplete.cpp @@ -4476,7 +4476,6 @@ static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) { Builder.AddResultTypeChunk("NSDictionary *"); Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,"{")); Builder.AddPlaceholderChunk("key"); - Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace); Builder.AddChunk(CodeCompletionString::CK_Colon); Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace); Builder.AddPlaceholderChunk("object, ..."); diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp index 3aae99a..ea181de 100644 --- a/lib/Sema/SemaDecl.cpp +++ b/lib/Sema/SemaDecl.cpp @@ -6174,6 +6174,7 @@ namespace { Decl *OrigDecl; bool isRecordType; bool isPODType; + bool isReferenceType; public: typedef EvaluatedExprVisitor Inherited; @@ -6182,9 +6183,11 @@ namespace { S(S), OrigDecl(OrigDecl) { isPODType = false; isRecordType = false; + isReferenceType = false; if (ValueDecl *VD = dyn_cast(OrigDecl)) { isPODType = VD->getType().isPODType(S.Context); isRecordType = VD->getType()->isRecordType(); + isReferenceType = VD->getType()->isReferenceType(); } } @@ -6192,9 +6195,9 @@ namespace { // to determine which DeclRefExpr's to check. Assume that the casts // are present and continue visiting the expression. void HandleExpr(Expr *E) { - // Skip checking T a = a where T is not a record type. Doing so is a - // way to silence uninitialized warnings. - if (isRecordType) + // Skip checking T a = a where T is not a record or reference type. + // Doing so is a way to silence uninitialized warnings. + if (isRecordType || isReferenceType) if (DeclRefExpr *DRE = dyn_cast(E)) HandleDeclRefExpr(DRE); @@ -6309,11 +6312,11 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, } // Check for self-references within variable initializers. - // Variables declared within a function/method body are handled - // by a dataflow analysis. + // Variables declared within a function/method body (except for references) + // are handled by a dataflow analysis. // Record types initialized by initializer list are handled here. // Initialization by constructors are handled in TryConstructorInitialization. - if (!VDecl->hasLocalStorage() && !VDecl->isStaticLocal() && + if ((!VDecl->hasLocalStorage() || VDecl->getType()->isReferenceType()) && (isa(Init) || !VDecl->getType()->isRecordType())) CheckSelfReference(RealDecl, Init); @@ -6754,6 +6757,10 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl, diag::err_abstract_type_in_decl, AbstractVariableType)) Var->setInvalidDecl(); + if (!Type->isDependentType() && !Var->isInvalidDecl() && + Var->getStorageClass() == SC_PrivateExtern) + Diag(Var->getLocation(), diag::warn_private_extern); + return; case VarDecl::TentativeDefinition: @@ -7027,6 +7034,42 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) { // Note that we are no longer parsing the initializer for this declaration. ParsingInitForAutoVars.erase(ThisDecl); + + // Now we have parsed the initializer and can update the table of magic + // tag values. + if (ThisDecl && ThisDecl->hasAttr()) { + const VarDecl *VD = dyn_cast(ThisDecl); + if (VD && VD->getType()->isIntegralOrEnumerationType()) { + for (specific_attr_iterator + I = ThisDecl->specific_attr_begin(), + E = ThisDecl->specific_attr_end(); + I != E; ++I) { + const Expr *MagicValueExpr = VD->getInit(); + if (!MagicValueExpr) { + continue; + } + llvm::APSInt MagicValueInt; + if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) { + Diag(I->getRange().getBegin(), + diag::err_type_tag_for_datatype_not_ice) + << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange(); + continue; + } + if (MagicValueInt.getActiveBits() > 64) { + Diag(I->getRange().getBegin(), + diag::err_type_tag_for_datatype_too_large) + << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange(); + continue; + } + uint64_t MagicValue = MagicValueInt.getZExtValue(); + RegisterTypeTagForDatatype(I->getArgumentKind(), + MagicValue, + I->getMatchingCType(), + I->getLayoutCompatible(), + I->getMustBeNull()); + } + } + } } Sema::DeclGroupPtrTy @@ -7623,7 +7666,9 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) { << FD->getName() << "dllimport"; } } - ActOnDocumentableDecl(FD); + // We want to attach documentation to original Decl (which might be + // a function template). + ActOnDocumentableDecl(D); return FD; } @@ -7750,7 +7795,8 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, // Verify that gotos and switch cases don't jump into scopes illegally. if (getCurFunction()->NeedsScopeChecking() && !dcl->isInvalidDecl() && - !hasAnyUnrecoverableErrorsInThisFunction()) + !hasAnyUnrecoverableErrorsInThisFunction() && + !PP.isCodeCompletionEnabled()) DiagnoseInvalidJumps(Body); if (CXXDestructorDecl *Destructor = dyn_cast(dcl)) { @@ -9907,6 +9953,13 @@ void Sema::ActOnFields(Scope* S, } } } + if (isa(EnclosingDecl) && + RequireNonAbstractType(FD->getLocation(), FD->getType(), + diag::err_abstract_type_in_decl, + AbstractIvarType)) { + // Ivars can not have abstract class types + FD->setInvalidDecl(); + } if (Record && FDTTy->getDecl()->hasObjectMember()) Record->setHasObjectMember(true); } else if (FDTy->isObjCObjectType()) { @@ -9915,8 +9968,7 @@ void Sema::ActOnFields(Scope* S, << FixItHint::CreateInsertion(FD->getLocation(), "*"); QualType T = Context.getObjCObjectPointerType(FD->getType()); FD->setType(T); - } - else if (!getLangOpts().CPlusPlus) { + } else if (!getLangOpts().CPlusPlus) { if (getLangOpts().ObjCAutoRefCount && Record && !ARCErrReported) { // It's an error in ARC if a field has lifetime. // We don't want to report this in a system header, though, diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp index 22bff86..caa7b2f 100644 --- a/lib/Sema/SemaDeclAttr.cpp +++ b/lib/Sema/SemaDeclAttr.cpp @@ -221,6 +221,53 @@ static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr, return true; } +/// \brief Check if IdxExpr is a valid argument index for a function or +/// instance method D. May output an error. +/// +/// \returns true if IdxExpr is a valid index. +static bool checkFunctionOrMethodArgumentIndex(Sema &S, const Decl *D, + StringRef AttrName, + SourceLocation AttrLoc, + unsigned AttrArgNum, + const Expr *IdxExpr, + uint64_t &Idx) +{ + assert(isFunctionOrMethod(D) && hasFunctionProto(D)); + + // In C++ the implicit 'this' function parameter also counts. + // Parameters are counted from one. + const bool HasImplicitThisParam = isInstanceMethod(D); + const unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam; + const unsigned FirstIdx = 1; + + llvm::APSInt IdxInt; + if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() || + !IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) { + S.Diag(AttrLoc, diag::err_attribute_argument_n_not_int) + << AttrName << AttrArgNum << IdxExpr->getSourceRange(); + return false; + } + + Idx = IdxInt.getLimitedValue(); + if (Idx < FirstIdx || (!isFunctionOrMethodVariadic(D) && Idx > NumArgs)) { + S.Diag(AttrLoc, diag::err_attribute_argument_out_of_bounds) + << AttrName << AttrArgNum << IdxExpr->getSourceRange(); + return false; + } + Idx--; // Convert to zero-based. + if (HasImplicitThisParam) { + if (Idx == 0) { + S.Diag(AttrLoc, + diag::err_attribute_invalid_implicit_this_argument) + << AttrName << IdxExpr->getSourceRange(); + return false; + } + --Idx; + } + + return true; +} + /// /// \brief Check if passed in Decl is a field or potentially shared global var /// \return true if the Decl is a field or potentially shared global variable @@ -3523,25 +3570,16 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) { D->addAttr(::new (S.Context) PascalAttr(Attr.getRange(), S.Context)); return; case AttributeList::AT_Pcs: { - Expr *Arg = Attr.getArg(0); - StringLiteral *Str = dyn_cast(Arg); - if (!Str || !Str->isAscii()) { - S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string) - << "pcs" << 1; - Attr.setInvalid(); - return; - } - - StringRef StrRef = Str->getString(); PcsAttr::PCSType PCS; - if (StrRef == "aapcs") + switch (CC) { + case CC_AAPCS: PCS = PcsAttr::AAPCS; - else if (StrRef == "aapcs-vfp") + break; + case CC_AAPCS_VFP: PCS = PcsAttr::AAPCS_VFP; - else { - S.Diag(Attr.getLoc(), diag::err_invalid_pcs); - Attr.setInvalid(); - return; + break; + default: + llvm_unreachable("unexpected calling convention in pcs attribute"); } D->addAttr(::new (S.Context) PcsAttr(Attr.getRange(), S.Context, PCS)); @@ -3560,10 +3598,9 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) { if (attr.isInvalid()) return true; - if ((attr.getNumArgs() != 0 && - !(attr.getKind() == AttributeList::AT_Pcs && attr.getNumArgs() == 1)) || - attr.getParameterName()) { - Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0; + unsigned ReqArgs = attr.getKind() == AttributeList::AT_Pcs ? 1 : 0; + if (attr.getNumArgs() != ReqArgs || attr.getParameterName()) { + Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << ReqArgs; attr.setInvalid(); return true; } @@ -3594,7 +3631,10 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) { CC = CC_AAPCS_VFP; break; } - // FALLS THROUGH + + attr.setInvalid(); + Diag(attr.getLoc(), diag::err_invalid_pcs); + return true; } default: llvm_unreachable("unexpected attribute kind"); } @@ -3703,6 +3743,79 @@ static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr){ } } +static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D, + const AttributeList &Attr) { + StringRef AttrName = Attr.getName()->getName(); + if (!Attr.getParameterName()) { + S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_identifier) + << Attr.getName() << /* arg num = */ 1; + return; + } + + if (Attr.getNumArgs() != 2) { + S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) + << /* required args = */ 3; + return; + } + + IdentifierInfo *ArgumentKind = Attr.getParameterName(); + + if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) { + S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type) + << Attr.getName() << ExpectedFunctionOrMethod; + return; + } + + uint64_t ArgumentIdx; + if (!checkFunctionOrMethodArgumentIndex(S, D, AttrName, + Attr.getLoc(), 2, + Attr.getArg(0), ArgumentIdx)) + return; + + uint64_t TypeTagIdx; + if (!checkFunctionOrMethodArgumentIndex(S, D, AttrName, + Attr.getLoc(), 3, + Attr.getArg(1), TypeTagIdx)) + return; + + bool IsPointer = (AttrName == "pointer_with_type_tag"); + if (IsPointer) { + // Ensure that buffer has a pointer type. + QualType BufferTy = getFunctionOrMethodArgType(D, ArgumentIdx); + if (!BufferTy->isPointerType()) { + S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only) + << AttrName; + } + } + + D->addAttr(::new (S.Context) ArgumentWithTypeTagAttr(Attr.getRange(), + S.Context, + ArgumentKind, + ArgumentIdx, + TypeTagIdx, + IsPointer)); +} + +static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D, + const AttributeList &Attr) { + IdentifierInfo *PointerKind = Attr.getParameterName(); + if (!PointerKind) { + S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_identifier) + << "type_tag_for_datatype" << 1; + return; + } + + QualType MatchingCType = S.GetTypeFromParser(Attr.getMatchingCType(), NULL); + + D->addAttr(::new (S.Context) TypeTagForDatatypeAttr( + Attr.getRange(), + S.Context, + PointerKind, + MatchingCType, + Attr.getLayoutCompatible(), + Attr.getMustBeNull())); +} + //===----------------------------------------------------------------------===// // Checker-specific attribute handlers. //===----------------------------------------------------------------------===// @@ -4333,6 +4446,14 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D, handleAcquiredAfterAttr(S, D, Attr); break; + // Type safety attributes. + case AttributeList::AT_ArgumentWithTypeTag: + handleArgumentWithTypeTagAttr(S, D, Attr); + break; + case AttributeList::AT_TypeTagForDatatype: + handleTypeTagForDatatypeAttr(S, D, Attr); + break; + default: // Ask target about the attribute. const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema(); diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp index 1d45a68..eeac9b8 100644 --- a/lib/Sema/SemaDeclCXX.cpp +++ b/lib/Sema/SemaDeclCXX.cpp @@ -9807,7 +9807,7 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, if (!Failed && !Cond) { llvm::SmallString<256> MsgBuffer; llvm::raw_svector_ostream Msg(MsgBuffer); - AssertMessage->printPretty(Msg, Context, 0, getPrintingPolicy()); + AssertMessage->printPretty(Msg, 0, getPrintingPolicy()); Diag(StaticAssertLoc, diag::err_static_assert_failed) << Msg.str() << AssertExpr->getSourceRange(); Failed = true; diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp index 63bfa9d..e6266fb 100644 --- a/lib/Sema/SemaExceptionSpec.cpp +++ b/lib/Sema/SemaExceptionSpec.cpp @@ -242,8 +242,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) { case EST_ComputedNoexcept: OS << "noexcept("; - OldProto->getNoexceptExpr()->printPretty(OS, Context, 0, - getPrintingPolicy()); + OldProto->getNoexceptExpr()->printPretty(OS, 0, getPrintingPolicy()); OS << ")"; break; diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp index 6a503ee..3875ba1 100644 --- a/lib/Sema/SemaExpr.cpp +++ b/lib/Sema/SemaExpr.cpp @@ -9461,7 +9461,8 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc, // If needed, diagnose invalid gotos and switches in the block. if (getCurFunction()->NeedsScopeChecking() && - !hasAnyUnrecoverableErrorsInThisFunction()) + !hasAnyUnrecoverableErrorsInThisFunction() && + !PP.isCodeCompletionEnabled()) DiagnoseInvalidJumps(cast(Body)); BSI->TheDecl->setBody(cast(Body)); diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp index 53f22f6..8f445e2 100644 --- a/lib/Sema/SemaExprMember.cpp +++ b/lib/Sema/SemaExprMember.cpp @@ -1137,7 +1137,7 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr, goto fail; // There's an implicit 'isa' ivar on all objects. // But we only actually find it this way on objects of type 'id', - // apparently.ghjg + // apparently. if (OTy->isObjCId() && Member->isStr("isa")) { Diag(MemberLoc, diag::warn_objc_isa_use); return Owned(new (Context) ObjCIsaExpr(BaseExpr.take(), IsArrow, MemberLoc, diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp index a874489..9382f7d 100644 --- a/lib/Sema/SemaOverload.cpp +++ b/lib/Sema/SemaOverload.cpp @@ -57,7 +57,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType, StandardConversionSequence &SCS, bool CStyle, bool AllowObjCWritebackConversion); - + static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From, QualType &ToType, bool InOverloadResolution, diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp index d22deb2..86884b7 100644 --- a/lib/Sema/SemaStmt.cpp +++ b/lib/Sema/SemaStmt.cpp @@ -28,6 +28,7 @@ #include "clang/Lex/Preprocessor.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" @@ -35,11 +36,15 @@ #include "llvm/ADT/Triple.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstPrinter.h" +#include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCTargetAsmParser.h" +#include "llvm/MC/MCParser/MCAsmLexer.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/TargetRegistry.h" @@ -1516,7 +1521,6 @@ Sema::CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) { StmtResult Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc, - SourceLocation LParenLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc) { @@ -1698,9 +1702,9 @@ static bool ObjCEnumerationCollection(Expr *Collection) { && Collection->getType()->getAs() != 0; } -/// ActOnCXXForRangeStmt - Check and build a C++0x for-range statement. +/// ActOnCXXForRangeStmt - Check and build a C++11 for-range statement. /// -/// C++0x [stmt.ranged]: +/// C++11 [stmt.ranged]: /// A range-based for statement is equivalent to /// /// { @@ -1717,15 +1721,14 @@ static bool ObjCEnumerationCollection(Expr *Collection) { /// The body of the loop is not available yet, since it cannot be analysed until /// we have determined the type of the for-range-declaration. StmtResult -Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc, SourceLocation LParenLoc, +Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *First, SourceLocation ColonLoc, Expr *Range, SourceLocation RParenLoc) { if (!First || !Range) return StmtError(); if (ObjCEnumerationCollection(Range)) - return ActOnObjCForCollectionStmt(ForLoc, LParenLoc, First, Range, - RParenLoc); + return ActOnObjCForCollectionStmt(ForLoc, First, Range, RParenLoc); DeclStmt *DS = dyn_cast(First); assert(DS && "first part of for range not a decl stmt"); @@ -2759,165 +2762,225 @@ StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple, return Owned(NS); } -// needSpaceAsmToken - This function handles whitespace around asm punctuation. -// Returns true if a space should be emitted. -static inline bool needSpaceAsmToken(Token currTok) { - static Token prevTok; - - // No need for space after prevToken. - switch(prevTok.getKind()) { - default: - break; - case tok::l_square: - case tok::r_square: - case tok::l_brace: - case tok::r_brace: - case tok::colon: - prevTok = currTok; - return false; - } +// isMSAsmKeyword - Return true if this is an MS-style inline asm keyword. These +// require special handling. +static bool isMSAsmKeyword(StringRef Name) { + bool Ret = llvm::StringSwitch(Name) + .Cases("EVEN", "ALIGN", true) // Alignment directives. + .Cases("LENGTH", "SIZE", "TYPE", true) // Type and variable sizes. + .Case("_emit", true) // _emit Pseudoinstruction. + .Default(false); + return Ret; +} - // No need for a space before currToken. - switch(currTok.getKind()) { - default: - break; - case tok::l_square: - case tok::r_square: - case tok::l_brace: - case tok::r_brace: - case tok::comma: - case tok::colon: - prevTok = currTok; - return false; - } - prevTok = currTok; - return true; +static StringRef getSpelling(Sema &SemaRef, Token AsmTok) { + StringRef Asm; + SmallString<512> TokenBuf; + TokenBuf.resize(512); + bool StringInvalid = false; + Asm = SemaRef.PP.getSpelling(AsmTok, TokenBuf, &StringInvalid); + assert (!StringInvalid && "Expected valid string!"); + return Asm; } static void patchMSAsmStrings(Sema &SemaRef, bool &IsSimple, SourceLocation AsmLoc, ArrayRef AsmToks, - ArrayRef LineEnds, const TargetInfo &TI, + std::vector &AsmRegs, + std::vector &AsmNames, std::vector &AsmStrings) { assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); - // Assume simple asm stmt until we parse a non-register identifer. + // Assume simple asm stmt until we parse a non-register identifer (or we just + // need to bail gracefully). IsSimple = true; - for (unsigned i = 0, e = LineEnds.size(); i != e; ++i) { - SmallString<512> Asm; + SmallString<512> Asm; + unsigned NumAsmStrings = 0; + for (unsigned i = 0, e = AsmToks.size(); i != e; ++i) { + + // Determine if this should be considered a new asm. + bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() || + AsmToks[i].is(tok::kw_asm); + + // Emit the previous asm string. + if (i && isNewAsm) { + AsmStrings[NumAsmStrings++] = Asm.c_str(); + if (AsmToks[i].is(tok::kw_asm)) { + ++i; // Skip __asm + assert (i != e && "Expected another token."); + } + } - // Check the operands. - for (unsigned j = (i == 0) ? 0 : LineEnds[i-1], e = LineEnds[i]; j != e; ++j) { + // Start a new asm string with the opcode. + if (isNewAsm) { + AsmRegs[NumAsmStrings].resize(AsmToks.size()); + AsmNames[NumAsmStrings].resize(AsmToks.size()); - IdentifierInfo *II; - if (j == 0 || (i > 0 && j == LineEnds[i-1])) { - II = AsmToks[j].getIdentifierInfo(); - Asm = II->getName().str(); - continue; + StringRef Piece = AsmToks[i].getIdentifierInfo()->getName(); + // MS-style inline asm keywords require special handling. + if (isMSAsmKeyword(Piece)) + IsSimple = false; + + // TODO: Verify this is a valid opcode. + Asm = Piece; + continue; + } + + if (i && AsmToks[i].hasLeadingSpace()) + Asm += ' '; + + // Check the operand(s). + switch (AsmToks[i].getKind()) { + default: + IsSimple = false; + Asm += getSpelling(SemaRef, AsmToks[i]); + break; + case tok::comma: Asm += ","; break; + case tok::colon: Asm += ":"; break; + case tok::l_square: Asm += "["; break; + case tok::r_square: Asm += "]"; break; + case tok::l_brace: Asm += "{"; break; + case tok::r_brace: Asm += "}"; break; + case tok::numeric_constant: + Asm += getSpelling(SemaRef, AsmToks[i]); + break; + case tok::identifier: { + IdentifierInfo *II = AsmToks[i].getIdentifierInfo(); + StringRef Name = II->getName(); + + // Valid register? + if (TI.isValidGCCRegisterName(Name)) { + AsmRegs[NumAsmStrings].set(i); + Asm += Name; + break; } - if (needSpaceAsmToken(AsmToks[j])) - Asm += " "; + IsSimple = false; - switch (AsmToks[j].getKind()) { - default: - //llvm_unreachable("Unknown token."); + // MS-style inline asm keywords require special handling. + if (isMSAsmKeyword(Name)) { + IsSimple = false; + Asm += Name; break; - case tok::comma: Asm += ","; break; - case tok::colon: Asm += ":"; break; - case tok::l_square: Asm += "["; break; - case tok::r_square: Asm += "]"; break; - case tok::l_brace: Asm += "{"; break; - case tok::r_brace: Asm += "}"; break; - case tok::numeric_constant: { - SmallString<32> TokenBuf; - TokenBuf.resize(32); - bool StringInvalid = false; - Asm += SemaRef.PP.getSpelling(AsmToks[j], TokenBuf, &StringInvalid); - assert (!StringInvalid && "Expected valid string!"); + } + + // FIXME: Why are we missing this segment register? + if (Name == "fs") { + Asm += Name; break; } - case tok::identifier: { - II = AsmToks[j].getIdentifierInfo(); - StringRef Name = II->getName(); - // Valid registers don't need modification. - if (TI.isValidGCCRegisterName(Name)) { - Asm += Name; - break; - } + // Lookup the identifier. + // TODO: Someone with more experience with clang should verify this the + // proper way of doing a symbol lookup. + DeclarationName DeclName(II); + Scope *CurScope = SemaRef.getCurScope(); + LookupResult R(SemaRef, DeclName, AsmLoc, Sema::LookupOrdinaryName); + if (!SemaRef.LookupName(R, CurScope, false/*AllowBuiltinCreation*/)) + break; - // TODO: Lookup the identifier. - IsSimple = false; + assert (R.isSingleResult() && "Expected a single result?!"); + NamedDecl *Decl = R.getFoundDecl(); + switch (Decl->getKind()) { + default: + assert(0 && "Unknown decl kind."); + break; + case Decl::Var: { + case Decl::ParmVar: + AsmNames[NumAsmStrings].set(i); + + VarDecl *Var = cast(Decl); + QualType Ty = Var->getType(); + (void)Ty; // Avoid warning. + // TODO: Patch identifier with valid operand. One potential idea is to + // probe the backend with type information to guess the possible + // operand. + break; + } } - } // AsmToks[i].getKind() + break; + } } - AsmStrings[i] = Asm.c_str(); } + + // Emit the final (and possibly only) asm string. + AsmStrings[NumAsmStrings] = Asm.c_str(); } // Build the unmodified MSAsmString. static std::string buildMSAsmString(Sema &SemaRef, ArrayRef AsmToks, - ArrayRef LineEnds) { + unsigned &NumAsmStrings) { assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); + NumAsmStrings = 0; + SmallString<512> Asm; - SmallString<512> TokenBuf; - TokenBuf.resize(512); - unsigned AsmLineNum = 0; for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) { - const char *ThisTokBuf = &TokenBuf[0]; - bool StringInvalid = false; - unsigned ThisTokLen = - Lexer::getSpelling(AsmToks[i], ThisTokBuf, SemaRef.getSourceManager(), - SemaRef.getLangOpts(), &StringInvalid); - if (i && (!AsmLineNum || i != LineEnds[AsmLineNum-1]) && - needSpaceAsmToken(AsmToks[i])) - Asm += ' '; - Asm += StringRef(ThisTokBuf, ThisTokLen); - if (i + 1 == LineEnds[AsmLineNum] && i + 1 != AsmToks.size()) { - Asm += '\n'; - ++AsmLineNum; + bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() || + AsmToks[i].is(tok::kw_asm); + + if (isNewAsm) { + ++NumAsmStrings; + if (i) + Asm += '\n'; + if (AsmToks[i].is(tok::kw_asm)) { + i++; // Skip __asm + assert (i != e && "Expected another token"); + } } + + if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm) + Asm += ' '; + + Asm += getSpelling(SemaRef, AsmToks[i]); } return Asm.c_str(); } StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, + SourceLocation LBraceLoc, ArrayRef AsmToks, - ArrayRef LineEnds, SourceLocation EndLoc) { // MS-style inline assembly is not fully supported, so emit a warning. Diag(AsmLoc, diag::warn_unsupported_msasm); SmallVector Clobbers; + std::set ClobberRegs; + SmallVector Inputs; + SmallVector Outputs; // Empty asm statements don't need to instantiate the AsmParser, etc. if (AsmToks.empty()) { StringRef AsmString; MSAsmStmt *NS = - new (Context) MSAsmStmt(Context, AsmLoc, /* IsSimple */ true, - /* IsVolatile */ true, AsmToks, LineEnds, + new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true, + /*IsVolatile*/ true, AsmToks, Inputs, Outputs, AsmString, Clobbers, EndLoc); return Owned(NS); } - std::string AsmString = buildMSAsmString(*this, AsmToks, LineEnds); + unsigned NumAsmStrings; + std::string AsmString = buildMSAsmString(*this, AsmToks, NumAsmStrings); bool IsSimple; + std::vector Regs; + std::vector Names; std::vector PatchedAsmStrings; - PatchedAsmStrings.resize(LineEnds.size()); + + Regs.resize(NumAsmStrings); + Names.resize(NumAsmStrings); + PatchedAsmStrings.resize(NumAsmStrings); // Rewrite operands to appease the AsmParser. - patchMSAsmStrings(*this, IsSimple, AsmLoc, AsmToks, LineEnds, - Context.getTargetInfo(), PatchedAsmStrings); + patchMSAsmStrings(*this, IsSimple, AsmLoc, AsmToks, + Context.getTargetInfo(), Regs, Names, PatchedAsmStrings); // patchMSAsmStrings doesn't correctly patch non-simple asm statements. if (!IsSimple) { MSAsmStmt *NS = - new (Context) MSAsmStmt(Context, AsmLoc, /* IsSimple */ true, - /* IsVolatile */ true, AsmToks, LineEnds, + new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true, + /*IsVolatile*/ true, AsmToks, Inputs, Outputs, AsmString, Clobbers, EndLoc); return Owned(NS); } @@ -2947,7 +3010,7 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, // Tell SrcMgr about this buffer, which is what the parser will pick up. SrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc()); - OwningPtr Str; + OwningPtr Str(createNullStreamer(Ctx)); OwningPtr Parser(createMCAsmParser(SrcMgr, Ctx, *Str.get(), *MAI)); OwningPtr @@ -2956,13 +3019,63 @@ StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, Parser->setAssemblerDialect(1); Parser->setTargetParser(*TargetParser.get()); - // TODO: Start parsing. + // Prime the lexer. + Parser->Lex(); + + // Parse the opcode. + StringRef IDVal; + Parser->ParseIdentifier(IDVal); + + // Canonicalize the opcode to lower case. + SmallString<128> Opcode; + for (unsigned i = 0, e = IDVal.size(); i != e; ++i) + Opcode.push_back(tolower(IDVal[i])); + + // Parse the operands. + llvm::SMLoc IDLoc; + SmallVector Operands; + bool HadError = TargetParser->ParseInstruction(Opcode.str(), IDLoc, + Operands); + assert (!HadError && "Unexpected error parsing instruction"); + + // Match the MCInstr. + SmallVector Instrs; + HadError = TargetParser->MatchInstruction(IDLoc, Operands, Instrs); + assert (!HadError && "Unexpected error matching instruction"); + assert ((Instrs.size() == 1) && "Expected only a single instruction."); + + // Get the instruction descriptor. + llvm::MCInst Inst = Instrs[0]; + const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo(); + const llvm::MCInstrDesc &Desc = MII->get(Inst.getOpcode()); + llvm::MCInstPrinter *IP = + TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI); + + // Build the list of clobbers. + for (unsigned i = 0, e = Desc.getNumDefs(); i != e; ++i) { + const llvm::MCOperand &Op = Inst.getOperand(i); + if (!Op.isReg()) + continue; + + std::string Reg; + llvm::raw_string_ostream OS(Reg); + IP->printRegName(OS, Op.getReg()); + + StringRef Clobber(OS.str()); + if (!Context.getTargetInfo().isValidClobber(Clobber)) + return StmtError(Diag(AsmLoc, diag::err_asm_unknown_register_name) << + Clobber); + ClobberRegs.insert(Reg); + } } + for (std::set::iterator I = ClobberRegs.begin(), + E = ClobberRegs.end(); I != E; ++I) + Clobbers.push_back(*I); MSAsmStmt *NS = - new (Context) MSAsmStmt(Context, AsmLoc, IsSimple, /* IsVolatile */ true, - AsmToks, LineEnds, AsmString, Clobbers, EndLoc); - + new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple, + /*IsVolatile*/ true, AsmToks, Inputs, Outputs, + AsmString, Clobbers, EndLoc); return Owned(NS); } diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp index c8e4501..4dbf3e4 100644 --- a/lib/Sema/SemaTemplate.cpp +++ b/lib/Sema/SemaTemplate.cpp @@ -5518,6 +5518,13 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, if (Attr) ProcessDeclAttributeList(S, Specialization, Attr); + // Add alignment attributes if necessary; these attributes are checked when + // the ASTContext lays out the structure. + if (TUK == TUK_Definition) { + AddAlignmentAttributesForRecord(Specialization); + AddMsStructLayoutForRecord(Specialization); + } + if (ModulePrivateLoc.isValid()) Diag(Specialization->getLocation(), diag::err_module_private_specialization) << (isPartialSpecialization? 1 : 0) diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp index c7cbc41..20e755f 100644 --- a/lib/Sema/SemaTemplateInstantiate.cpp +++ b/lib/Sema/SemaTemplateInstantiate.cpp @@ -988,12 +988,11 @@ TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc, SourceLocation TagLocation = KeywordLoc; - // FIXME: type might be anonymous. IdentifierInfo *Id = TD->getIdentifier(); // TODO: should we even warn on struct/class mismatches for this? Seems // like it's likely to produce a lot of spurious errors. - if (Keyword != ETK_None && Keyword != ETK_Typename) { + if (Id && Keyword != ETK_None && Keyword != ETK_Typename) { TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword); if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false, TagLocation, *Id)) { diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp index 20fe036..54f8dba 100644 --- a/lib/Sema/SemaType.cpp +++ b/lib/Sema/SemaType.cpp @@ -2258,6 +2258,51 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, ASM = ArrayType::Normal; D.setInvalidType(true); } + + // C99 6.7.5.2p1: The optional type qualifiers and the keyword static + // shall appear only in a declaration of a function parameter with an + // array type, ... + if (ASM == ArrayType::Static || ATI.TypeQuals) { + if (!(D.isPrototypeContext() || + D.getContext() == Declarator::KNRTypeListContext)) { + S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) << + (ASM == ArrayType::Static ? "'static'" : "type qualifier"); + // Remove the 'static' and the type qualifiers. + if (ASM == ArrayType::Static) + ASM = ArrayType::Normal; + ATI.TypeQuals = 0; + D.setInvalidType(true); + } + + // C99 6.7.5.2p1: ... and then only in the outermost array type + // derivation. + unsigned x = chunkIndex; + while (x != 0) { + // Walk outwards along the declarator chunks. + x--; + const DeclaratorChunk &DC = D.getTypeObject(x); + switch (DC.Kind) { + case DeclaratorChunk::Paren: + continue; + case DeclaratorChunk::Array: + case DeclaratorChunk::Pointer: + case DeclaratorChunk::Reference: + case DeclaratorChunk::MemberPointer: + S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) << + (ASM == ArrayType::Static ? "'static'" : "type qualifier"); + if (ASM == ArrayType::Static) + ASM = ArrayType::Normal; + ATI.TypeQuals = 0; + D.setInvalidType(true); + break; + case DeclaratorChunk::Function: + case DeclaratorChunk::BlockPointer: + // These are invalid anyway, so just ignore. + break; + } + } + } + T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals, SourceRange(DeclType.Loc, DeclType.EndLoc), Name); break; @@ -3224,7 +3269,6 @@ namespace { assert(Chunk.Kind == DeclaratorChunk::Function); TL.setLocalRangeBegin(Chunk.Loc); TL.setLocalRangeEnd(Chunk.EndLoc); - TL.setTrailingReturn(Chunk.Fun.hasTrailingReturnType()); const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun; for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) { diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h index 90d5840..619ad33 100644 --- a/lib/Sema/TreeTransform.h +++ b/lib/Sema/TreeTransform.h @@ -1185,10 +1185,10 @@ public: /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc, + SourceLocation LBraceLoc, ArrayRef AsmToks, - ArrayRef LineEnds, SourceLocation EndLoc) { - return getSema().ActOnMSAsmStmt(AsmLoc, AsmToks, LineEnds, EndLoc); + return getSema().ActOnMSAsmStmt(AsmLoc, LBraceLoc, AsmToks, EndLoc); } /// \brief Build a new Objective-C \@try statement. @@ -1277,12 +1277,11 @@ public: /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCForCollectionStmt(SourceLocation ForLoc, - SourceLocation LParenLoc, Stmt *Element, Expr *Collection, SourceLocation RParenLoc, Stmt *Body) { - StmtResult ForEachStmt = getSema().ActOnObjCForCollectionStmt(ForLoc, LParenLoc, + StmtResult ForEachStmt = getSema().ActOnObjCForCollectionStmt(ForLoc, Element, Collection, RParenLoc); @@ -4205,7 +4204,7 @@ TreeTransform::TransformFunctionProtoType(TypeLocBuilder &TLB, QualType ResultType; - if (TL.getTrailingReturn()) { + if (T->hasTrailingReturn()) { if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(), TL.getParmArray(), TL.getNumArgs(), @@ -4262,7 +4261,6 @@ TreeTransform::TransformFunctionProtoType(TypeLocBuilder &TLB, FunctionProtoTypeLoc NewTL = TLB.push(Result); NewTL.setLocalRangeBegin(TL.getLocalRangeBegin()); NewTL.setLocalRangeEnd(TL.getLocalRangeEnd()); - NewTL.setTrailingReturn(TL.getTrailingReturn()); for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i) NewTL.setArg(i, ParamDecls[i]); @@ -4286,7 +4284,6 @@ QualType TreeTransform::TransformFunctionNoProtoType( FunctionNoProtoTypeLoc NewTL = TLB.push(Result); NewTL.setLocalRangeBegin(TL.getLocalRangeBegin()); NewTL.setLocalRangeEnd(TL.getLocalRangeEnd()); - NewTL.setTrailingReturn(false); return Result; } @@ -5612,12 +5609,9 @@ StmtResult TreeTransform::TransformMSAsmStmt(MSAsmStmt *S) { ArrayRef AsmToks = llvm::makeArrayRef(S->getAsmToks(), S->getNumAsmToks()); - ArrayRef LineEnds = - llvm::makeArrayRef(S->getLineEnds(), S->getNumLineEnds()); - // No need to transform the asm string literal. - return getDerived().RebuildMSAsmStmt(S->getAsmLoc(), AsmToks, LineEnds, - S->getEndLoc()); + return getDerived().RebuildMSAsmStmt(S->getAsmLoc(), S->getLBraceLoc(), + AsmToks, S->getEndLoc()); } template @@ -5808,7 +5802,6 @@ TreeTransform::TransformObjCForCollectionStmt( // Build a new statement. return getDerived().RebuildObjCForCollectionStmt(S->getForLoc(), - /*FIXME:*/S->getForLoc(), Element.get(), Collection.get(), S->getRParenLoc(), diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp index beef338..3adbc57 100644 --- a/lib/Serialization/ASTReader.cpp +++ b/lib/Serialization/ASTReader.cpp @@ -4259,7 +4259,6 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) { void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) { TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx)); TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx)); - TL.setTrailingReturn(Record[Idx++]); for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) { TL.setArg(i, ReadDeclAs(Record, Idx)); } diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp index b7718c4..425d2e3 100644 --- a/lib/Serialization/ASTWriter.cpp +++ b/lib/Serialization/ASTWriter.cpp @@ -486,7 +486,6 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) { void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) { Writer.AddSourceLocation(TL.getLocalRangeBegin(), Record); Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record); - Record.push_back(TL.getTrailingReturn()); for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) Writer.AddDeclRef(TL.getArg(i), Record); } diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp index 30f45c7..5edcf09 100644 --- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp @@ -235,17 +235,20 @@ void CallAndMessageChecker::checkPreStmt(const CallExpr *CE, ProgramStateRef StNonNull, StNull; llvm::tie(StNonNull, StNull) = State->assume(cast(L)); - // FIXME: Do we want to record the non-null assumption here? if (StNull && !StNonNull) { if (!BT_call_null) BT_call_null.reset( new BuiltinBug("Called function pointer is null (null dereference)")); emitBadCall(BT_call_null.get(), C, Callee); } + + C.addTransition(StNonNull); } void CallAndMessageChecker::checkPreCall(const CallEvent &Call, CheckerContext &C) const { + ProgramStateRef State = C.getState(); + // If this is a call to a C++ method, check if the callee is null or // undefined. if (const CXXInstanceCall *CC = dyn_cast(&Call)) { @@ -258,11 +261,9 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call, return; } - ProgramStateRef State = C.getState(); ProgramStateRef StNonNull, StNull; llvm::tie(StNonNull, StNull) = State->assume(cast(V)); - // FIXME: Do we want to record the non-null assumption here? if (StNull && !StNonNull) { if (!BT_cxx_call_null) BT_cxx_call_null.reset(new BuiltinBug("Called C++ object pointer " @@ -270,6 +271,8 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call, emitBadCall(BT_cxx_call_null.get(), C, CC->getCXXThisExpr()); return; } + + State = StNonNull; } // Don't check for uninitialized field values in arguments if the @@ -291,6 +294,9 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call, Call.getArgExpr(i), /*IsFirstArgument=*/i == 0, checkUninitFields, Call, *BT)) return; + + // If we make it here, record our assumptions about the callee. + C.addTransition(State); } void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg, diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp index fea5733..b636efb 100644 --- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp +++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp @@ -25,7 +25,8 @@ using namespace ento; namespace { class DynamicTypePropagation: - public Checker< check::PostCall, + public Checker< check::PreCall, + check::PostCall, check::PostStmt > { const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE, CheckerContext &C) const; @@ -34,11 +35,70 @@ class DynamicTypePropagation: const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE, CheckerContext &C) const; public: + void checkPreCall(const CallEvent &Call, CheckerContext &C) const; void checkPostCall(const CallEvent &Call, CheckerContext &C) const; void checkPostStmt(const ImplicitCastExpr *CastE, CheckerContext &C) const; }; } +static void recordFixedType(const MemRegion *Region, const CXXMethodDecl *MD, + CheckerContext &C) { + assert(Region); + assert(MD); + + ASTContext &Ctx = C.getASTContext(); + QualType Ty = Ctx.getPointerType(Ctx.getRecordType(MD->getParent())); + + ProgramStateRef State = C.getState(); + State = State->setDynamicTypeInfo(Region, Ty, /*CanBeSubclass=*/false); + C.addTransition(State); + return; +} + +void DynamicTypePropagation::checkPreCall(const CallEvent &Call, + CheckerContext &C) const { + if (const CXXConstructorCall *Ctor = dyn_cast(&Call)) { + // C++11 [class.cdtor]p4: When a virtual function is called directly or + // indirectly from a constructor or from a destructor, including during + // the construction or destruction of the class’s non-static data members, + // and the object to which the call applies is the object under + // construction or destruction, the function called is the final overrider + // in the constructor's or destructor's class and not one overriding it in + // a more-derived class. + + switch (Ctor->getOriginExpr()->getConstructionKind()) { + case CXXConstructExpr::CK_Complete: + case CXXConstructExpr::CK_Delegating: + // No additional type info necessary. + return; + case CXXConstructExpr::CK_NonVirtualBase: + case CXXConstructExpr::CK_VirtualBase: + if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) + recordFixedType(Target, Ctor->getDecl(), C); + return; + } + + return; + } + + if (const CXXDestructorCall *Dtor = dyn_cast(&Call)) { + // C++11 [class.cdtor]p4 (see above) + + const MemRegion *Target = Dtor->getCXXThisVal().getAsRegion(); + if (!Target) + return; + + // FIXME: getRuntimeDefinition() can be expensive. It would be better to do + // this when we are entering the stack frame for the destructor. + const Decl *D = Dtor->getRuntimeDefinition().getDecl(); + if (!D) + return; + + recordFixedType(Target, cast(D), C); + return; + } +} + void DynamicTypePropagation::checkPostCall(const CallEvent &Call, CheckerContext &C) const { // We can obtain perfect type info for return values from some calls. @@ -82,6 +142,31 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call, break; } } + + return; + } + + if (const CXXConstructorCall *Ctor = dyn_cast(&Call)) { + // We may need to undo the effects of our pre-call check. + switch (Ctor->getOriginExpr()->getConstructionKind()) { + case CXXConstructExpr::CK_Complete: + case CXXConstructExpr::CK_Delegating: + // No additional work necessary. + // Note: This will leave behind the actual type of the object for + // complete constructors, but arguably that's a good thing, since it + // means the dynamic type info will be correct even for objects + // constructed with operator new. + return; + case CXXConstructExpr::CK_NonVirtualBase: + case CXXConstructExpr::CK_VirtualBase: + if (const MemRegion *Target = Ctor->getCXXThisVal().getAsRegion()) { + // We just finished a base constructor. Now we can use the subclass's + // type when resolving virtual calls. + const Decl *D = C.getLocationContext()->getDecl(); + recordFixedType(Target, cast(D), C); + } + return; + } } } diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp index 5503b23..3c00d99 100644 --- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp @@ -1107,18 +1107,6 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) { if (S) break; - // Enable this code once the semantics of NSDeallocateObject are resolved - // for GC. -#if 0 - // Handle: NSDeallocateObject(id anObject); - // This method does allow 'nil' (although we don't check it now). - if (strcmp(FName, "NSDeallocateObject") == 0) { - return RetTy == Ctx.VoidTy - ? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc) - : getPersistentStopSummary(); - } -#endif - if (RetTy->isPointerType()) { // For CoreFoundation ('CF') types. if (cocoa::isRefType(RetTy, "CF", FName)) { @@ -1591,28 +1579,12 @@ void RetainSummaryManager::InitializeMethodSummaries() { addClassMethSummary("NSWindow", "alloc", NoTrackYet); -#if 0 - addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect", - "styleMask", "backing", "defer", NULL); - - addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect", - "styleMask", "backing", "defer", "screen", NULL); -#endif - // For NSPanel (which subclasses NSWindow), allocated objects are not // self-owned. // FIXME: For now we don't track NSPanels. object for the same reason // as for NSWindow objects. addClassMethSummary("NSPanel", "alloc", NoTrackYet); -#if 0 - addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect", - "styleMask", "backing", "defer", NULL); - - addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect", - "styleMask", "backing", "defer", "screen", NULL); -#endif - // Don't track allocated autorelease pools yet, as it is okay to prematurely // exit a method. addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet); @@ -1636,62 +1608,6 @@ void RetainSummaryManager::InitializeMethodSummaries() { } //===----------------------------------------------------------------------===// -// AutoreleaseBindings - State used to track objects in autorelease pools. -//===----------------------------------------------------------------------===// -#define AUTORELEASE_POOL_MODELING (0) -// We do not currently have complete modeling of autorelease pools. - -#if AUTORELEASE_POOL_MODELING - -typedef llvm::ImmutableMap ARCounts; -typedef llvm::ImmutableMap ARPoolContents; -typedef llvm::ImmutableList ARStack; - -static int AutoRCIndex = 0; -static int AutoRBIndex = 0; - -namespace { class AutoreleasePoolContents {}; } -namespace { class AutoreleaseStack {}; } - -namespace clang { -namespace ento { -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait { - static inline void *GDMIndex() { return &AutoRBIndex; } -}; - -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait { - static inline void *GDMIndex() { return &AutoRCIndex; } -}; -} // end GR namespace -} // end clang namespace - -static SymbolRef GetCurrentAutoreleasePool(ProgramStateRef state) { - ARStack stack = state->get(); - return stack.isEmpty() ? SymbolRef() : stack.getHead(); -} - -static ProgramStateRef -SendAutorelease(ProgramStateRef state, - ARCounts::Factory &F, - SymbolRef sym) { - SymbolRef pool = GetCurrentAutoreleasePool(state); - const ARCounts *cnts = state->get(pool); - ARCounts newCnts(0); - - if (cnts) { - const unsigned *cnt = (*cnts).lookup(sym); - newCnts = F.add(*cnts, sym, cnt ? *cnt + 1 : 1); - } - else - newCnts = F.add(F.getEmptyMap(), sym, 1); - - return state->set(pool, newCnts); -} -#endif - -//===----------------------------------------------------------------------===// // Error reporting. //===----------------------------------------------------------------------===// namespace { @@ -2431,11 +2347,6 @@ class RetainCountChecker mutable OwningPtr Summaries; mutable OwningPtr SummariesGC; - -#if AUTORELEASE_POOL_MODELING - mutable ARCounts::Factory ARCountFactory; -#endif - mutable SummaryLogTy SummaryLog; mutable bool ShouldResetSummaryLog; @@ -2892,15 +2803,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ, ResultTy)); // FIXME: Add a flag to the checker where allocations are assumed to - // *not* fail. (The code below is out-of-date, though.) -#if 0 - if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) { - bool isFeasible; - state = state.assume(loc::SymbolVal(Sym), true, isFeasible); - assert(isFeasible && "Cannot assume fresh symbol is non-null."); - } -#endif - + // *not* fail. break; } @@ -3011,9 +2914,6 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case NewAutoreleasePool: assert(!C.isObjCGCEnabled()); -#if AUTORELEASE_POOL_MODELING - state = state->add(sym); -#endif return state; case MayEscape: @@ -3030,13 +2930,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case Autorelease: if (C.isObjCGCEnabled()) return state; - // Update the autorelease counts. - // TODO: AutoreleasePoolContents are not currently used. We will need to - // call SendAutorelease after it's wired up. -#if AUTORELEASE_POOL_MODELING - state = SendAutorelease(state, ARCountFactory, sym); -#endif V = V.autorelease(); break; @@ -3719,35 +3613,6 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper, C.addTransition(state, Pred); } -//===----------------------------------------------------------------------===// -// Debug printing of refcount bindings and autorelease pools. -//===----------------------------------------------------------------------===// - -#if AUTORELEASE_POOL_MODELING -static void PrintPool(raw_ostream &Out, SymbolRef Sym, - ProgramStateRef State) { - Out << ' '; - if (Sym) - Sym->dumpToStream(Out); - else - Out << ""; - Out << ":{"; - - // Get the contents of the pool. - if (const ARCounts *Cnts = State->get(Sym)) - for (ARCounts::iterator I = Cnts->begin(), E = Cnts->end(); I != E; ++I) - Out << '(' << I.getKey() << ',' << I.getData() << ')'; - Out << '}'; -} - -static bool UsesAutorelease(ProgramStateRef state) { - // A state uses autorelease if it allocated an autorelease pool or if it has - // objects in the caller's autorelease pool. - return !state->get().isEmpty() || - state->get(SymbolRef()); -} -#endif - void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State, const char *NL, const char *Sep) const { @@ -3761,20 +3626,6 @@ void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State, I->second.print(Out); Out << NL; } - -#if AUTORELEASE_POOL_MODELING - // Print the autorelease stack. - if (UsesAutorelease(State)) { - Out << Sep << NL << "AR pool stack:"; - ARStack Stack = State->get(); - - PrintPool(Out, SymbolRef(), State); // Print the caller's pool. - for (ARStack::iterator I = Stack.begin(), E = Stack.end(); I != E; ++I) - PrintPool(Out, *I, State); - - Out << NL; - } -#endif } //===----------------------------------------------------------------------===// diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp index 5aac640..efeba17 100644 --- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp +++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp @@ -16,7 +16,7 @@ void AnalysisManager::anchor() { } AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, const LangOptions &lang, - PathDiagnosticConsumer *pd, + const PathDiagnosticConsumers &PDC, StoreManagerCreator storemgr, ConstraintManagerCreator constraintmgr, CheckerManager *checkerMgr, @@ -33,7 +33,8 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, AnalysisInliningMode IMode, bool NoRetry) : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, /*addInitializers=*/true), - Ctx(ctx), Diags(diags), LangOpts(lang), PD(pd), + Ctx(ctx), Diags(diags), LangOpts(lang), + PathConsumers(PDC), CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr), CheckerMgr(checkerMgr), MaxNodes(maxnodes), MaxVisit(maxvisit), @@ -49,29 +50,19 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd(); } -AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, - AnalysisManager &ParentAM) - : AnaCtxMgr(ParentAM.AnaCtxMgr.getUseUnoptimizedCFG(), - ParentAM.AnaCtxMgr.getCFGBuildOptions().AddImplicitDtors, - ParentAM.AnaCtxMgr.getCFGBuildOptions().AddInitializers), - Ctx(ctx), Diags(diags), - LangOpts(ParentAM.LangOpts), PD(ParentAM.getPathDiagnosticConsumer()), - CreateStoreMgr(ParentAM.CreateStoreMgr), - CreateConstraintMgr(ParentAM.CreateConstraintMgr), - CheckerMgr(ParentAM.CheckerMgr), - MaxNodes(ParentAM.MaxNodes), - MaxVisit(ParentAM.MaxVisit), - VisualizeEGDot(ParentAM.VisualizeEGDot), - VisualizeEGUbi(ParentAM.VisualizeEGUbi), - PurgeDead(ParentAM.PurgeDead), - EagerlyAssume(ParentAM.EagerlyAssume), - TrimGraph(ParentAM.TrimGraph), - EagerlyTrimEGraph(ParentAM.EagerlyTrimEGraph), - IPAMode(ParentAM.IPAMode), - InlineMaxStackDepth(ParentAM.InlineMaxStackDepth), - InlineMaxFunctionSize(ParentAM.InlineMaxFunctionSize), - InliningMode(ParentAM.InliningMode), - NoRetryExhausted(ParentAM.NoRetryExhausted) -{ - AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd(); +AnalysisManager::~AnalysisManager() { + FlushDiagnostics(); + for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(), + E = PathConsumers.end(); I != E; ++I) { + delete *I; + } +} + +void AnalysisManager::FlushDiagnostics() { + PathDiagnosticConsumer::FilesMade filesMade; + for (PathDiagnosticConsumers::iterator I = PathConsumers.begin(), + E = PathConsumers.end(); + I != E; ++I) { + (*I)->FlushDiagnostics(&filesMade); + } } diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp index 7ba2fa7..571baec 100644 --- a/lib/StaticAnalyzer/Core/BugReporter.cpp +++ b/lib/StaticAnalyzer/Core/BugReporter.cpp @@ -1345,6 +1345,9 @@ BugReport::~BugReport() { for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I) { delete *I; } + while (!interestingSymbols.empty()) { + popInterestingSymbolsAndRegions(); + } } const Decl *BugReport::getDeclWithIssue() const { @@ -1386,11 +1389,11 @@ void BugReport::markInteresting(SymbolRef sym) { return; // If the symbol wasn't already in our set, note a configuration change. - if (interestingSymbols.insert(sym).second) + if (getInterestingSymbols().insert(sym).second) ++ConfigurationChangeToken; if (const SymbolMetadata *meta = dyn_cast(sym)) - interestingRegions.insert(meta->getRegion()); + getInterestingRegions().insert(meta->getRegion()); } void BugReport::markInteresting(const MemRegion *R) { @@ -1399,11 +1402,11 @@ void BugReport::markInteresting(const MemRegion *R) { // If the base region wasn't already in our set, note a configuration change. R = R->getBaseRegion(); - if (interestingRegions.insert(R).second) + if (getInterestingRegions().insert(R).second) ++ConfigurationChangeToken; if (const SymbolicRegion *SR = dyn_cast(R)) - interestingSymbols.insert(SR->getSymbol()); + getInterestingSymbols().insert(SR->getSymbol()); } void BugReport::markInteresting(SVal V) { @@ -1411,30 +1414,58 @@ void BugReport::markInteresting(SVal V) { markInteresting(V.getAsSymbol()); } -bool BugReport::isInteresting(SVal V) const { +bool BugReport::isInteresting(SVal V) { return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol()); } -bool BugReport::isInteresting(SymbolRef sym) const { +bool BugReport::isInteresting(SymbolRef sym) { if (!sym) return false; // We don't currently consider metadata symbols to be interesting // even if we know their region is interesting. Is that correct behavior? - return interestingSymbols.count(sym); + return getInterestingSymbols().count(sym); } -bool BugReport::isInteresting(const MemRegion *R) const { +bool BugReport::isInteresting(const MemRegion *R) { if (!R) return false; R = R->getBaseRegion(); - bool b = interestingRegions.count(R); + bool b = getInterestingRegions().count(R); if (b) return true; if (const SymbolicRegion *SR = dyn_cast(R)) - return interestingSymbols.count(SR->getSymbol()); + return getInterestingSymbols().count(SR->getSymbol()); return false; } - + +void BugReport::lazyInitializeInterestingSets() { + if (interestingSymbols.empty()) { + interestingSymbols.push_back(new Symbols()); + interestingRegions.push_back(new Regions()); + } +} + +BugReport::Symbols &BugReport::getInterestingSymbols() { + lazyInitializeInterestingSets(); + return *interestingSymbols.back(); +} + +BugReport::Regions &BugReport::getInterestingRegions() { + lazyInitializeInterestingSets(); + return *interestingRegions.back(); +} + +void BugReport::pushInterestingSymbolsAndRegions() { + interestingSymbols.push_back(new Symbols(getInterestingSymbols())); + interestingRegions.push_back(new Regions(getInterestingRegions())); +} + +void BugReport::popInterestingSymbolsAndRegions() { + delete interestingSymbols.back(); + interestingSymbols.pop_back(); + delete interestingRegions.back(); + interestingRegions.pop_back(); +} const Stmt *BugReport::getStmt() const { if (!ErrorNode) @@ -1793,12 +1824,13 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) { } void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, - SmallVectorImpl &bugReports) { + PathDiagnosticConsumer &PC, + ArrayRef &bugReports) { assert(!bugReports.empty()); SmallVector errorNodes; - for (SmallVectorImpl::iterator I = bugReports.begin(), - E = bugReports.end(); I != E; ++I) { + for (ArrayRef::iterator I = bugReports.begin(), + E = bugReports.end(); I != E; ++I) { errorNodes.push_back((*I)->getErrorNode()); } @@ -1818,8 +1850,7 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, const ExplodedNode *N = GPair.second.first; // Start building the path diagnostic... - PathDiagnosticBuilder PDB(*this, R, BackMap.get(), - getPathDiagnosticConsumer()); + PathDiagnosticBuilder PDB(*this, R, BackMap.get(), &PC); // Register additional node visitors. R->addVisitor(new NilReceiverBRVisitor()); @@ -1867,6 +1898,8 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, case PathDiagnosticConsumer::Minimal: GenerateMinimalPathDiagnostic(PD, PDB, N, visitors); break; + case PathDiagnosticConsumer::None: + llvm_unreachable("PathDiagnosticConsumer::None should never appear here"); } // Clean up the visitors we used. @@ -2022,53 +2055,21 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ, return exampleReport; } -//===----------------------------------------------------------------------===// -// DiagnosticCache. This is a hack to cache analyzer diagnostics. It -// uses global state, which eventually should go elsewhere. -//===----------------------------------------------------------------------===// -namespace { -class DiagCacheItem : public llvm::FoldingSetNode { - llvm::FoldingSetNodeID ID; -public: - DiagCacheItem(BugReport *R, PathDiagnostic *PD) { - R->Profile(ID); - PD->Profile(ID); - } - - void Profile(llvm::FoldingSetNodeID &id) { - id = ID; - } - - llvm::FoldingSetNodeID &getID() { return ID; } -}; -} - -static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) { - // FIXME: Eventually this diagnostic cache should reside in something - // like AnalysisManager instead of being a static variable. This is - // really unsafe in the long term. - typedef llvm::FoldingSet DiagnosticCache; - static DiagnosticCache DC; - - void *InsertPos; - DiagCacheItem *Item = new DiagCacheItem(R, PD); - - if (DC.FindNodeOrInsertPos(Item->getID(), InsertPos)) { - delete Item; - return true; - } - - DC.InsertNode(Item, InsertPos); - return false; -} - void BugReporter::FlushReport(BugReportEquivClass& EQ) { SmallVector bugReports; BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports); - if (!exampleReport) - return; - - PathDiagnosticConsumer* PD = getPathDiagnosticConsumer(); + if (exampleReport) { + const PathDiagnosticConsumers &C = getPathDiagnosticConsumers(); + for (PathDiagnosticConsumers::const_iterator I=C.begin(), + E=C.end(); I != E; ++I) { + FlushReport(exampleReport, **I, bugReports); + } + } +} + +void BugReporter::FlushReport(BugReport *exampleReport, + PathDiagnosticConsumer &PD, + ArrayRef bugReports) { // FIXME: Make sure we use the 'R' for the path that was actually used. // Probably doesn't make a difference in practice. @@ -2077,65 +2078,39 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) { OwningPtr D(new PathDiagnostic(exampleReport->getDeclWithIssue(), exampleReport->getBugType().getName(), - !PD || PD->useVerboseDescription() + PD.useVerboseDescription() ? exampleReport->getDescription() : exampleReport->getShortDescription(), BT.getCategory())); - if (!bugReports.empty()) - GeneratePathDiagnostic(*D.get(), bugReports); - - // Get the meta data. - const BugReport::ExtraTextList &Meta = - exampleReport->getExtraText(); - for (BugReport::ExtraTextList::const_iterator i = Meta.begin(), - e = Meta.end(); i != e; ++i) { - D->addMeta(*i); - } - - // Emit a summary diagnostic to the regular Diagnostics engine. - BugReport::ranges_iterator Beg, End; - llvm::tie(Beg, End) = exampleReport->getRanges(); - DiagnosticsEngine &Diag = getDiagnostic(); - - if (!IsCachedDiagnostic(exampleReport, D.get())) { - // Search the description for '%', as that will be interpretted as a - // format character by FormatDiagnostics. - StringRef desc = exampleReport->getShortDescription(); - - SmallString<512> TmpStr; - llvm::raw_svector_ostream Out(TmpStr); - for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) { - if (*I == '%') - Out << "%%"; - else - Out << *I; - } - - Out.flush(); - unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning, TmpStr); - - DiagnosticBuilder diagBuilder = Diag.Report( - exampleReport->getLocation(getSourceManager()).asLocation(), ErrorDiag); - for (BugReport::ranges_iterator I = Beg; I != End; ++I) - diagBuilder << *I; + // Generate the full path diagnostic, using the generation scheme + // specified by the PathDiagnosticConsumer. + if (PD.getGenerationScheme() != PathDiagnosticConsumer::None) { + if (!bugReports.empty()) + GeneratePathDiagnostic(*D.get(), PD, bugReports); } - // Emit a full diagnostic for the path if we have a PathDiagnosticConsumer. - if (!PD) - return; - + // If the path is empty, generate a single step path with the location + // of the issue. if (D->path.empty()) { - PathDiagnosticPiece *piece = new PathDiagnosticEventPiece( - exampleReport->getLocation(getSourceManager()), - exampleReport->getDescription()); + PathDiagnosticLocation L = exampleReport->getLocation(getSourceManager()); + PathDiagnosticPiece *piece = + new PathDiagnosticEventPiece(L, exampleReport->getDescription()); + BugReport::ranges_iterator Beg, End; + llvm::tie(Beg, End) = exampleReport->getRanges(); for ( ; Beg != End; ++Beg) piece->addRange(*Beg); - D->getActivePath().push_back(piece); } - PD->HandlePathDiagnostic(D.take()); + // Get the meta data. + const BugReport::ExtraTextList &Meta = exampleReport->getExtraText(); + for (BugReport::ExtraTextList::const_iterator i = Meta.begin(), + e = Meta.end(); i != e; ++i) { + D->addMeta(*i); + } + + PD.HandlePathDiagnostic(D.take()); } void BugReporter::EmitBasicReport(const Decl *DeclWithIssue, diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp index 46aa9e2..e729587 100644 --- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp +++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp @@ -323,7 +323,7 @@ void bugreporter::addTrackNullOrUndefValueVisitor(const ExplodedNode *N, // Walk through lvalue-to-rvalue conversions. const Expr *Ex = dyn_cast(S); if (Ex) { - Ex = Ex->IgnoreParenLValueCasts(); + Ex = Ex->IgnoreParenCasts(); if (const DeclRefExpr *DR = dyn_cast(Ex)) { if (const VarDecl *VD = dyn_cast(DR->getDecl())) { const VarRegion *R = diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp index e3f4c61..5345bd5 100644 --- a/lib/StaticAnalyzer/Core/CallEvent.cpp +++ b/lib/StaticAnalyzer/Core/CallEvent.cpp @@ -207,10 +207,14 @@ SourceRange CallEvent::getArgSourceRange(unsigned Index) const { return ArgE->getSourceRange(); } +void CallEvent::dump() const { + dump(llvm::errs()); +} + void CallEvent::dump(raw_ostream &Out) const { ASTContext &Ctx = getState()->getStateManager().getContext(); if (const Expr *E = getOriginExpr()) { - E->printPretty(Out, Ctx, 0, Ctx.getPrintingPolicy()); + E->printPretty(Out, 0, Ctx.getPrintingPolicy()); Out << "\n"; return; } @@ -372,47 +376,49 @@ void CXXInstanceCall::getExtraInvalidatedRegions(RegionList &Regions) const { Regions.push_back(R); } -static const CXXMethodDecl *devirtualize(const CXXMethodDecl *MD, SVal ThisVal){ - const MemRegion *R = ThisVal.getAsRegion(); - if (!R) - return 0; - - const TypedValueRegion *TR = dyn_cast(R->StripCasts()); - if (!TR) - return 0; - - const CXXRecordDecl *RD = TR->getValueType()->getAsCXXRecordDecl(); - if (!RD) - return 0; - - const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD); - const FunctionDecl *Definition; - if (!Result->hasBody(Definition)) - return 0; - - return cast(Definition); -} - RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const { + // Do we have a decl at all? const Decl *D = getDecl(); if (!D) return RuntimeDefinition(); + // If the method is non-virtual, we know we can inline it. const CXXMethodDecl *MD = cast(D); if (!MD->isVirtual()) return AnyFunctionCall::getRuntimeDefinition(); - // If the method is virtual, see if we can find the actual implementation - // based on context-sensitivity. - // FIXME: Virtual method calls behave differently when an object is being - // constructed or destructed. It's not as simple as "no devirtualization" - // because a /partially/ constructed object can be referred to through a - // base pointer. We'll eventually want to use DynamicTypeInfo here. - if (const CXXMethodDecl *Devirtualized = devirtualize(MD, getCXXThisVal())) - return RuntimeDefinition(Devirtualized); + // Do we know the implicit 'this' object being called? + const MemRegion *R = getCXXThisVal().getAsRegion(); + if (!R) + return RuntimeDefinition(); - return RuntimeDefinition(); + // Do we know anything about the type of 'this'? + DynamicTypeInfo DynType = getState()->getDynamicTypeInfo(R); + if (!DynType.isValid()) + return RuntimeDefinition(); + + // Is the type a C++ class? (This is mostly a defensive check.) + QualType RegionType = DynType.getType()->getPointeeType(); + const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl(); + if (!RD || !RD->hasDefinition()) + return RuntimeDefinition(); + + // Find the decl for this method in that class. + const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true); + assert(Result && "At the very least the static decl should show up."); + + // Does the decl that we found have an implementation? + const FunctionDecl *Definition; + if (!Result->hasBody(Definition)) + return RuntimeDefinition(); + + // We found a definition. If we're not sure that this devirtualization is + // actually what will happen at runtime, make sure to provide the region so + // that ExprEngine can decide what to do with it. + if (DynType.canBeASubClass()) + return RuntimeDefinition(Definition, R->StripCasts()); + return RuntimeDefinition(Definition, /*DispatchRegion=*/0); } void CXXInstanceCall::getInitialStackFrameContents( @@ -421,16 +427,17 @@ void CXXInstanceCall::getInitialStackFrameContents( AnyFunctionCall::getInitialStackFrameContents(CalleeCtx, Bindings); // Handle the binding of 'this' in the new stack frame. - // We need to make sure we have the proper layering of CXXBaseObjectRegions. SVal ThisVal = getCXXThisVal(); if (!ThisVal.isUnknown()) { ProgramStateManager &StateMgr = getState()->getStateManager(); SValBuilder &SVB = StateMgr.getSValBuilder(); - + const CXXMethodDecl *MD = cast(CalleeCtx->getDecl()); Loc ThisLoc = SVB.getCXXThis(MD, CalleeCtx); - if (const MemRegion *ThisReg = ThisVal.getAsRegion()) { + // If we devirtualized to a different member function, we need to make sure + // we have the proper layering of CXXBaseObjectRegions. + if (MD->getCanonicalDecl() != getDecl()->getCanonicalDecl()) { ASTContext &Ctx = SVB.getContext(); const CXXRecordDecl *Class = MD->getParent(); QualType Ty = Ctx.getPointerType(Ctx.getRecordType(Class)); @@ -439,13 +446,10 @@ void CXXInstanceCall::getInitialStackFrameContents( bool Failed; ThisVal = StateMgr.getStoreManager().evalDynamicCast(ThisVal, Ty, Failed); assert(!Failed && "Calling an incorrectly devirtualized method"); - - // If we couldn't build the correct cast, just strip off all casts. - if (ThisVal.isUnknown()) - ThisVal = loc::MemRegionVal(ThisReg->StripCasts()); } - Bindings.push_back(std::make_pair(ThisLoc, ThisVal)); + if (!ThisVal.isUnknown()) + Bindings.push_back(std::make_pair(ThisLoc, ThisVal)); } } @@ -666,6 +670,9 @@ bool ObjCMethodCall::canBeOverridenInSubclass(ObjCInterfaceDecl *IDecl, if (InterfLoc.isValid() && SM.isFromMainFile(InterfLoc)) return false; + // Assume that property accessors are not overridden. + if (getMessageKind() == OCM_PropertyAccess) + return false; // We assume that if the method is public (declared outside of main file) or // has a parent which publicly declares the method, the method could be @@ -853,4 +860,3 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx, return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(), State, CallerCtx); } - diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp index 8ee6723..3b2e4ec 100644 --- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp @@ -154,6 +154,11 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { } } + // Generate a CallEvent /before/ cleaning the state, so that we can get the + // correct value for 'this' (if necessary). + CallEventManager &CEMgr = getStateManager().getCallEventManager(); + CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); + // Step 3: BindedRetNode -> CleanedNodes // If we can find a statement and a block in the inlined function, run remove // dead bindings before returning from the call. This is important to ensure @@ -203,21 +208,21 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { &Ctx); SaveAndRestore CBISave(currentStmtIdx, calleeCtx->getIndex()); - CallEventManager &CEMgr = getStateManager().getCallEventManager(); - CallEventRef<> Call = CEMgr.getCaller(calleeCtx, CEEState); + CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); ExplodedNodeSet DstPostCall; - getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, *Call, - *this, true); + getCheckerManager().runCheckersForPostCall(DstPostCall, CEENode, + *UpdatedCall, *this, + /*WasInlined=*/true); ExplodedNodeSet Dst; - if (isa(Call)) { - getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, - cast(*Call), - *this, true); + if (const ObjCMethodCall *Msg = dyn_cast(Call)) { + getCheckerManager().runCheckersForPostObjCMessage(Dst, DstPostCall, *Msg, + *this, + /*WasInlined=*/true); } else if (CE) { getCheckerManager().runCheckersForPostStmt(Dst, DstPostCall, CE, - *this, true); + *this, /*WasInlined=*/true); } else { Dst.insert(DstPostCall); } @@ -555,12 +560,20 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, RuntimeDefinition RD = Call->getRuntimeDefinition(); const Decl *D = RD.getDecl(); if (D) { - // Explore with and without inlining the call. - if (RD.mayHaveOtherDefinitions() && - getAnalysisManager().IPAMode == DynamicDispatchBifurcate) { - BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); - return; + if (RD.mayHaveOtherDefinitions()) { + // Explore with and without inlining the call. + if (getAnalysisManager().IPAMode == DynamicDispatchBifurcate) { + BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); + return; + } + + // Don't inline if we're not in any dynamic dispatch mode. + if (getAnalysisManager().IPAMode != DynamicDispatch) { + conservativeEvalCall(*Call, Bldr, Pred, State); + return; + } } + // We are not bifurcating and we do have a Decl, so just inline. if (inlineCall(*Call, D, Bldr, Pred, State)) return; @@ -575,6 +588,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg, const CallEvent &Call, const Decl *D, NodeBuilder &Bldr, ExplodedNode *Pred) { assert(BifurReg); + BifurReg = BifurReg->StripCasts(); // Check if we've performed the split already - note, we only want // to split the path once per memory region. diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp index 0152e32..982bcbf 100644 --- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp +++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp @@ -45,7 +45,7 @@ public: virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); } virtual void FlushDiagnosticsImpl(std::vector &Diags, - SmallVectorImpl *FilesMade); + FilesMade *filesMade); virtual StringRef getName() const { return "HTMLDiagnostics"; @@ -63,7 +63,7 @@ public: const char *HighlightEnd = ""); void ReportDiag(const PathDiagnostic& D, - SmallVectorImpl *FilesMade); + FilesMade *filesMade); }; } // end anonymous namespace @@ -76,10 +76,10 @@ HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix, FilePrefix.appendComponent("report"); } -PathDiagnosticConsumer* -ento::createHTMLDiagnosticConsumer(const std::string& prefix, - const Preprocessor &PP) { - return new HTMLDiagnostics(prefix, PP); +void ento::createHTMLDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& prefix, + const Preprocessor &PP) { + C.push_back(new HTMLDiagnostics(prefix, PP)); } //===----------------------------------------------------------------------===// @@ -88,15 +88,15 @@ ento::createHTMLDiagnosticConsumer(const std::string& prefix, void HTMLDiagnostics::FlushDiagnosticsImpl( std::vector &Diags, - SmallVectorImpl *FilesMade) { + FilesMade *filesMade) { for (std::vector::iterator it = Diags.begin(), et = Diags.end(); it != et; ++it) { - ReportDiag(**it, FilesMade); + ReportDiag(**it, filesMade); } } void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, - SmallVectorImpl *FilesMade) { + FilesMade *filesMade) { // Create the HTML directory if it is missing. if (!createdDir) { @@ -266,8 +266,10 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, return; } - if (FilesMade) - FilesMade->push_back(llvm::sys::path::filename(H.str())); + if (filesMade) { + filesMade->push_back(std::make_pair(StringRef(getName()), + llvm::sys::path::filename(H.str()))); + } // Emit the HTML to disk. for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I) @@ -480,29 +482,11 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID, R.InsertTextBefore(Loc, os.str()); // Now highlight the ranges. - for (const SourceRange *I = P.ranges_begin(), *E = P.ranges_end(); - I != E; ++I) + ArrayRef Ranges = P.getRanges(); + for (ArrayRef::iterator I = Ranges.begin(), + E = Ranges.end(); I != E; ++I) { HighlightRange(R, LPosInfo.first, *I); - -#if 0 - // If there is a code insertion hint, insert that code. - // FIXME: This code is disabled because it seems to mangle the HTML - // output. I'm leaving it here because it's generally the right idea, - // but needs some help from someone more familiar with the rewriter. - for (const FixItHint *Hint = P.fixit_begin(), *HintEnd = P.fixit_end(); - Hint != HintEnd; ++Hint) { - if (Hint->RemoveRange.isValid()) { - HighlightRange(R, LPosInfo.first, Hint->RemoveRange, - "", ""); - } - if (Hint->InsertionLoc.isValid()) { - std::string EscapedCode = html::EscapeText(Hint->CodeToInsert, true); - EscapedCode = "" + EscapedCode - + ""; - R.InsertTextBefore(Hint->InsertionLoc, EscapedCode); - } } -#endif } static void EmitAlphaCounter(raw_ostream &os, unsigned n) { diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp index 7d52aac..c849778 100644 --- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp +++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp @@ -157,13 +157,13 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) { return; // FIXME: Emit a warning? // Check the source ranges. - for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(), - RE = piece->ranges_end(); - RI != RE; ++RI) { - SourceLocation L = SMgr.getExpansionLoc(RI->getBegin()); + ArrayRef Ranges = piece->getRanges(); + for (ArrayRef::iterator I = Ranges.begin(), + E = Ranges.end(); I != E; ++I) { + SourceLocation L = SMgr.getExpansionLoc(I->getBegin()); if (!L.isFileID() || SMgr.getFileID(L) != FID) return; // FIXME: Emit a warning? - L = SMgr.getExpansionLoc(RI->getEnd()); + L = SMgr.getExpansionLoc(I->getEnd()); if (!L.isFileID() || SMgr.getFileID(L) != FID) return; // FIXME: Emit a warning? } @@ -240,8 +240,8 @@ struct CompareDiagnostics { }; } -void -PathDiagnosticConsumer::FlushDiagnostics(SmallVectorImpl *Files) { +void PathDiagnosticConsumer::FlushDiagnostics( + PathDiagnosticConsumer::FilesMade *Files) { if (flushed) return; @@ -718,7 +718,9 @@ void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const { ID.AddString(str); // FIXME: Add profiling support for code hints. ID.AddInteger((unsigned) getDisplayHint()); - for (range_iterator I = ranges_begin(), E = ranges_end(); I != E; ++I) { + ArrayRef Ranges = getRanges(); + for (ArrayRef::iterator I = Ranges.begin(), E = Ranges.end(); + I != E; ++I) { ID.AddInteger(I->getBegin().getRawEncoding()); ID.AddInteger(I->getEnd().getRawEncoding()); } diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp index 58a4bba..d5fdd9d 100644 --- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp +++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp @@ -30,23 +30,21 @@ namespace { class PlistDiagnostics : public PathDiagnosticConsumer { const std::string OutputFile; const LangOptions &LangOpts; - OwningPtr SubPD; const bool SupportsCrossFileDiagnostics; public: PlistDiagnostics(const std::string& prefix, const LangOptions &LangOpts, - bool supportsMultipleFiles, - PathDiagnosticConsumer *subPD); + bool supportsMultipleFiles); virtual ~PlistDiagnostics() {} void FlushDiagnosticsImpl(std::vector &Diags, - SmallVectorImpl *FilesMade); + FilesMade *filesMade); virtual StringRef getName() const { return "PlistDiagnostics"; } - PathGenerationScheme getGenerationScheme() const; + PathGenerationScheme getGenerationScheme() const { return Extensive; } bool supportsLogicalOpControlFlow() const { return true; } bool supportsAllBlockEdges() const { return true; } virtual bool useVerboseDescription() const { return false; } @@ -58,29 +56,20 @@ namespace { PlistDiagnostics::PlistDiagnostics(const std::string& output, const LangOptions &LO, - bool supportsMultipleFiles, - PathDiagnosticConsumer *subPD) - : OutputFile(output), LangOpts(LO), SubPD(subPD), + bool supportsMultipleFiles) + : OutputFile(output), LangOpts(LO), SupportsCrossFileDiagnostics(supportsMultipleFiles) {} -PathDiagnosticConsumer* -ento::createPlistDiagnosticConsumer(const std::string& s, const Preprocessor &PP, - PathDiagnosticConsumer *subPD) { - return new PlistDiagnostics(s, PP.getLangOpts(), false, subPD); +void ento::createPlistDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& s, + const Preprocessor &PP) { + C.push_back(new PlistDiagnostics(s, PP.getLangOpts(), false)); } -PathDiagnosticConsumer* -ento::createPlistMultiFileDiagnosticConsumer(const std::string &s, - const Preprocessor &PP) { - return new PlistDiagnostics(s, PP.getLangOpts(), true, 0); -} - -PathDiagnosticConsumer::PathGenerationScheme -PlistDiagnostics::getGenerationScheme() const { - if (const PathDiagnosticConsumer *PD = SubPD.get()) - return PD->getGenerationScheme(); - - return Extensive; +void ento::createPlistMultiFileDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string &s, + const Preprocessor &PP) { + C.push_back(new PlistDiagnostics(s, PP.getLangOpts(), true)); } static void AddFID(FIDMap &FIDs, SmallVectorImpl &V, @@ -231,15 +220,16 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P, EmitLocation(o, SM, LangOpts, L, FM, indent); // Output the ranges (if any). - PathDiagnosticPiece::range_iterator RI = P.ranges_begin(), - RE = P.ranges_end(); + ArrayRef Ranges = P.getRanges(); - if (RI != RE) { + if (!Ranges.empty()) { Indent(o, indent) << "ranges\n"; Indent(o, indent) << "\n"; ++indent; - for (; RI != RE; ++RI) - EmitRange(o, SM, LangOpts, *RI, FM, indent+1); + for (ArrayRef::iterator I = Ranges.begin(), E = Ranges.end(); + I != E; ++I) { + EmitRange(o, SM, LangOpts, *I, FM, indent+1); + } --indent; Indent(o, indent) << "\n"; } @@ -353,7 +343,7 @@ static void ReportPiece(raw_ostream &o, void PlistDiagnostics::FlushDiagnosticsImpl( std::vector &Diags, - SmallVectorImpl *FilesMade) { + FilesMade *filesMade) { // Build up a set of FIDs that we use by scanning the locations and // ranges of the diagnostics. FIDMap FM; @@ -380,11 +370,11 @@ void PlistDiagnostics::FlushDiagnosticsImpl( I!=E; ++I) { const PathDiagnosticPiece *piece = I->getPtr(); AddFID(FM, Fids, SM, piece->getLocation().asLocation()); - - for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(), - RE= piece->ranges_end(); RI != RE; ++RI) { - AddFID(FM, Fids, SM, RI->getBegin()); - AddFID(FM, Fids, SM, RI->getEnd()); + ArrayRef Ranges = piece->getRanges(); + for (ArrayRef::iterator I = Ranges.begin(), + E = Ranges.end(); I != E; ++I) { + AddFID(FM, Fids, SM, I->getBegin()); + AddFID(FM, Fids, SM, I->getEnd()); } if (const PathDiagnosticCallPiece *call = @@ -507,19 +497,21 @@ void PlistDiagnostics::FlushDiagnosticsImpl( EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2); // Output the diagnostic to the sub-diagnostic client, if any. - if (SubPD) { - std::vector SubDiags; - SubDiags.push_back(D); - SmallVector SubFilesMade; - SubPD->FlushDiagnosticsImpl(SubDiags, &SubFilesMade); - - if (!SubFilesMade.empty()) { - o << " " << SubPD->getName() << "_files\n"; - o << " \n"; - for (size_t i = 0, n = SubFilesMade.size(); i < n ; ++i) - o << " " << SubFilesMade[i] << "\n"; - o << " \n"; + if (!filesMade->empty()) { + StringRef lastName; + for (FilesMade::iterator I = filesMade->begin(), E = filesMade->end(); + I != E; ++I) { + StringRef newName = I->first; + if (newName != lastName) { + if (!lastName.empty()) + o << " \n"; + lastName = newName; + o << " " << lastName << "_files\n"; + o << " \n"; + } + o << " " << I->second << "\n"; } + o << " \n"; } // Close up the entry. @@ -531,6 +523,8 @@ void PlistDiagnostics::FlushDiagnosticsImpl( // Finish. o << "\n"; - if (FilesMade) - FilesMade->push_back(OutputFile); + if (filesMade) { + StringRef Name(getName()); + filesMade->push_back(std::make_pair(Name, OutputFile)); + } } diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp index dc988cc..2000338 100644 --- a/lib/StaticAnalyzer/Core/ProgramState.cpp +++ b/lib/StaticAnalyzer/Core/ProgramState.cpp @@ -745,14 +745,16 @@ template<> struct ProgramStateTrait }} DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const { + Reg = Reg->StripCasts(); + // Look up the dynamic type in the GDM. const DynamicTypeInfo *GDMType = get(Reg); if (GDMType) return *GDMType; // Otherwise, fall back to what we know about the region. - if (const TypedValueRegion *TR = dyn_cast(Reg)) - return DynamicTypeInfo(TR->getValueType()); + if (const TypedRegion *TR = dyn_cast(Reg)) + return DynamicTypeInfo(TR->getLocationType(), /*CanBeSubclass=*/false); if (const SymbolicRegion *SR = dyn_cast(Reg)) { SymbolRef Sym = SR->getSymbol(); @@ -764,6 +766,7 @@ DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const { ProgramStateRef ProgramState::setDynamicTypeInfo(const MemRegion *Reg, DynamicTypeInfo NewTy) const { + Reg = Reg->StripCasts(); ProgramStateRef NewState = set(Reg, NewTy); assert(NewState); return NewState; diff --git a/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp index e5b8553..66bf4bb 100644 --- a/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp +++ b/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp @@ -32,7 +32,7 @@ public: : OutputFile(output), Diag(diag) {} void FlushDiagnosticsImpl(std::vector &Diags, - SmallVectorImpl *FilesMade); + FilesMade *filesMade); virtual StringRef getName() const { return "TextPathDiagnostics"; @@ -47,15 +47,15 @@ public: } // end anonymous namespace -PathDiagnosticConsumer* -ento::createTextPathDiagnosticConsumer(const std::string& out, - const Preprocessor &PP) { - return new TextPathDiagnostics(out, PP.getDiagnostics()); +void ento::createTextPathDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string& out, + const Preprocessor &PP) { + C.push_back(new TextPathDiagnostics(out, PP.getDiagnostics())); } void TextPathDiagnostics::FlushDiagnosticsImpl( std::vector &Diags, - SmallVectorImpl *FilesMade) { + FilesMade *) { for (std::vector::iterator it = Diags.begin(), et = Diags.end(); it != et; ++it) { const PathDiagnostic *D = *it; diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp index fcdaaea..34b5266 100644 --- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp +++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp @@ -64,14 +64,55 @@ STATISTIC(MaxCFGSize, "The maximum number of basic blocks in a function."); // Special PathDiagnosticConsumers. //===----------------------------------------------------------------------===// -static PathDiagnosticConsumer* -createPlistHTMLDiagnosticConsumer(const std::string& prefix, - const Preprocessor &PP) { - PathDiagnosticConsumer *PD = - createHTMLDiagnosticConsumer(llvm::sys::path::parent_path(prefix), PP); - return createPlistDiagnosticConsumer(prefix, PP, PD); +static void createPlistHTMLDiagnosticConsumer(PathDiagnosticConsumers &C, + const std::string &prefix, + const Preprocessor &PP) { + createHTMLDiagnosticConsumer(C, llvm::sys::path::parent_path(prefix), PP); + createPlistDiagnosticConsumer(C, prefix, PP); } +namespace { +class ClangDiagPathDiagConsumer : public PathDiagnosticConsumer { + DiagnosticsEngine &Diag; +public: + ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag) : Diag(Diag) {} + virtual ~ClangDiagPathDiagConsumer() {} + virtual StringRef getName() const { return "ClangDiags"; } + virtual bool useVerboseDescription() const { return false; } + virtual PathGenerationScheme getGenerationScheme() const { return None; } + + void FlushDiagnosticsImpl(std::vector &Diags, + FilesMade *filesMade) { + for (std::vector::iterator I = Diags.begin(), + E = Diags.end(); I != E; ++I) { + const PathDiagnostic *PD = *I; + StringRef desc = PD->getDescription(); + SmallString<512> TmpStr; + llvm::raw_svector_ostream Out(TmpStr); + for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) { + if (*I == '%') + Out << "%%"; + else + Out << *I; + } + Out.flush(); + unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning, + TmpStr); + SourceLocation L = PD->getLocation().asLocation(); + DiagnosticBuilder diagBuilder = Diag.Report(L, ErrorDiag); + + // Get the ranges from the last point in the path. + ArrayRef Ranges = PD->path.back()->getRanges(); + + for (ArrayRef::iterator I = Ranges.begin(), + E = Ranges.end(); I != E; ++I) { + diagBuilder << *I; + } + } + } +}; +} // end anonymous namespace + //===----------------------------------------------------------------------===// // AnalysisConsumer declaration. //===----------------------------------------------------------------------===// @@ -105,8 +146,8 @@ public: /// working with a PCH file. SetOfDecls LocalTUDecls; - // PD is owned by AnalysisManager. - PathDiagnosticConsumer *PD; + // Set of PathDiagnosticConsumers. Owned by AnalysisManager. + PathDiagnosticConsumers PathConsumers; StoreManagerCreator CreateStoreMgr; ConstraintManagerCreator CreateConstraintMgr; @@ -126,7 +167,7 @@ public: const AnalyzerOptions& opts, ArrayRef plugins) : RecVisitorMode(ANALYSIS_ALL), RecVisitorBR(0), - Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins), PD(0) { + Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins) { DigestAnalyzerOptions(); if (Opts.PrintStats) { llvm::EnableStatistics(); @@ -141,17 +182,19 @@ public: void DigestAnalyzerOptions() { // Create the PathDiagnosticConsumer. + PathConsumers.push_back(new ClangDiagPathDiagConsumer(PP.getDiagnostics())); + if (!OutDir.empty()) { switch (Opts.AnalysisDiagOpt) { default: #define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE) \ - case PD_##NAME: PD = CREATEFN(OutDir, PP); break; + case PD_##NAME: CREATEFN(PathConsumers, OutDir, PP); break; #include "clang/Frontend/Analyses.def" } } else if (Opts.AnalysisDiagOpt == PD_TEXT) { // Create the text client even without a specified output file since // it just uses diagnostic notes. - PD = createTextPathDiagnosticConsumer("", PP); + createTextPathDiagnosticConsumer(PathConsumers, "", PP); } // Create the analyzer component creators. @@ -205,9 +248,12 @@ public: Ctx = &Context; checkerMgr.reset(createCheckerManager(Opts, PP.getLangOpts(), Plugins, PP.getDiagnostics())); - Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(), - PP.getLangOpts(), PD, - CreateStoreMgr, CreateConstraintMgr, + Mgr.reset(new AnalysisManager(*Ctx, + PP.getDiagnostics(), + PP.getLangOpts(), + PathConsumers, + CreateStoreMgr, + CreateConstraintMgr, checkerMgr.get(), Opts.MaxNodes, Opts.MaxLoop, Opts.VisualizeEGDot, Opts.VisualizeEGUbi, diff --git a/test/Analysis/CFNumber.c b/test/Analysis/CFNumber.c index fbbe4d1..537e497 100644 --- a/test/Analysis/CFNumber.c +++ b/test/Analysis/CFNumber.c @@ -17,11 +17,11 @@ typedef const struct __CFNumber * CFNumberRef; extern CFNumberRef CFNumberCreate(CFAllocatorRef allocator, CFNumberType theType, const void *valuePtr); CFNumberRef f1(unsigned char x) { - return CFNumberCreate(0, kCFNumberSInt16Type, &x); // expected-warning{{An 8 bit integer is used to initialize a CFNumber object that represents a 16 bit integer. 8 bits of the CFNumber value will be garbage.}} + return CFNumberCreate(0, kCFNumberSInt16Type, &x); // expected-warning{{An 8 bit integer is used to initialize a CFNumber object that represents a 16 bit integer. 8 bits of the CFNumber value will be garbage}} } __attribute__((cf_returns_retained)) CFNumberRef f2(unsigned short x) { - return CFNumberCreate(0, kCFNumberSInt8Type, &x); // expected-warning{{A 16 bit integer is used to initialize a CFNumber object that represents an 8 bit integer. 8 bits of the input integer will be lost.}} + return CFNumberCreate(0, kCFNumberSInt8Type, &x); // expected-warning{{A 16 bit integer is used to initialize a CFNumber object that represents an 8 bit integer. 8 bits of the input integer will be lost}} } // test that the attribute overrides the naming convention. @@ -30,5 +30,5 @@ __attribute__((cf_returns_not_retained)) CFNumberRef CreateNum(unsigned char x) } CFNumberRef f3(unsigned i) { - return CFNumberCreate(0, kCFNumberLongType, &i); // expected-warning{{A 32 bit integer is used to initialize a CFNumber object that represents a 64 bit integer.}} + return CFNumberCreate(0, kCFNumberLongType, &i); // expected-warning{{A 32 bit integer is used to initialize a CFNumber object that represents a 64 bit integer}} } diff --git a/test/Analysis/CheckNSError.m b/test/Analysis/CheckNSError.m index d35b686..cdec1d5 100644 --- a/test/Analysis/CheckNSError.m +++ b/test/Analysis/CheckNSError.m @@ -23,7 +23,7 @@ extern NSString * const NSXMLParserErrorDomain ; @implementation A - (void)myMethodWhichMayFail:(NSError **)error { // expected-warning {{Method accepting NSError** should have a non-void return value to indicate whether or not an error occurred}} - *error = [NSError errorWithDomain:@"domain" code:1 userInfo:0]; // expected-warning {{Potential null dereference.}} + *error = [NSError errorWithDomain:@"domain" code:1 userInfo:0]; // expected-warning {{Potential null dereference}} } - (BOOL)myMethodWhichMayFail2:(NSError **)error { // no-warning @@ -36,7 +36,7 @@ struct __CFError {}; typedef struct __CFError* CFErrorRef; void foo(CFErrorRef* error) { // expected-warning {{Function accepting CFErrorRef* should have a non-void return value to indicate whether or not an error occurred}} - *error = 0; // expected-warning {{Potential null dereference.}} + *error = 0; // expected-warning {{Potential null dereference}} } int f1(CFErrorRef* error) { diff --git a/test/Analysis/array-struct.c b/test/Analysis/array-struct.c index c5bdb86..1b36190 100644 --- a/test/Analysis/array-struct.c +++ b/test/Analysis/array-struct.c @@ -151,7 +151,7 @@ struct s3 p[1]; // an ElementRegion of type 'char'. Then load a nonloc::SymbolVal from it and // assigns to 'a'. void f16(struct s3 *p) { - struct s3 a = *((struct s3*) ((char*) &p[0])); // expected-warning{{Casting a non-structure type to a structure type and accessing a field can lead to memory access errors or data corruption.}} + struct s3 a = *((struct s3*) ((char*) &p[0])); // expected-warning{{Casting a non-structure type to a structure type and accessing a field can lead to memory access errors or data corruption}} } void inv(struct s1 *); diff --git a/test/Analysis/ctor-inlining.mm b/test/Analysis/ctor-inlining.mm index 54d51b4..fe4781c 100644 --- a/test/Analysis/ctor-inlining.mm +++ b/test/Analysis/ctor-inlining.mm @@ -39,3 +39,51 @@ void testNonPODCopyConstructor() { clang_analyzer_eval(b.x == 42); // expected-warning{{TRUE}} } + +namespace ConstructorVirtualCalls { + class A { + public: + int *out1, *out2, *out3; + + virtual int get() { return 1; } + + A(int *out1) { + *out1 = get(); + } + }; + + class B : public A { + public: + virtual int get() { return 2; } + + B(int *out1, int *out2) : A(out1) { + *out2 = get(); + } + }; + + class C : public B { + public: + virtual int get() { return 3; } + + C(int *out1, int *out2, int *out3) : B(out1, out2) { + *out3 = get(); + } + }; + + void test() { + int a, b, c; + + C obj(&a, &b, &c); + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + clang_analyzer_eval(c == 3); // expected-warning{{TRUE}} + + clang_analyzer_eval(obj.get() == 3); // expected-warning{{TRUE}} + + // Sanity check for devirtualization. + A *base = &obj; + clang_analyzer_eval(base->get() == 3); // expected-warning{{TRUE}} + } +} + + diff --git a/test/Analysis/dtor.cpp b/test/Analysis/dtor.cpp index 6209948..1f45925 100644 --- a/test/Analysis/dtor.cpp +++ b/test/Analysis/dtor.cpp @@ -173,3 +173,57 @@ void testDefaultArg() { // Force a bug to be emitted. *(char *)0 = 1; // expected-warning{{Dereference of null pointer}} } + + +namespace DestructorVirtualCalls { + class A { + public: + int *out1, *out2, *out3; + + virtual int get() { return 1; } + + ~A() { + *out1 = get(); + } + }; + + class B : public A { + public: + virtual int get() { return 2; } + + ~B() { + *out2 = get(); + } + }; + + class C : public B { + public: + virtual int get() { return 3; } + + ~C() { + *out3 = get(); + } + }; + + void test() { + int a, b, c; + + // New scope for the C object. + { + C obj; + clang_analyzer_eval(obj.get() == 3); // expected-warning{{TRUE}} + + // Sanity check for devirtualization. + A *base = &obj; + clang_analyzer_eval(base->get() == 3); // expected-warning{{TRUE}} + + obj.out1 = &a; + obj.out2 = &b; + obj.out3 = &c; + } + + clang_analyzer_eval(a == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(b == 2); // expected-warning{{TRUE}} + clang_analyzer_eval(c == 3); // expected-warning{{TRUE}} + } +} diff --git a/test/Analysis/func.c b/test/Analysis/func.c index b6cebde8..709ebf7 100644 --- a/test/Analysis/func.c +++ b/test/Analysis/func.c @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,debug.ExprInspection -analyzer-store=region -verify %s + +void clang_analyzer_eval(int); void f(void) { void (*p)(void); @@ -13,3 +15,13 @@ void g(void (*fp)(void)); void f2() { g(f); } + +void f3(void (*f)(void), void (*g)(void)) { + clang_analyzer_eval(!f); // expected-warning{{UNKNOWN}} + f(); + clang_analyzer_eval(!f); // expected-warning{{FALSE}} + + clang_analyzer_eval(!g); // expected-warning{{UNKNOWN}} + (*g)(); + clang_analyzer_eval(!g); // expected-warning{{FALSE}} +} diff --git a/test/Analysis/html-diags.c b/test/Analysis/html-diags.c index b9361f7..7c15df6 100644 --- a/test/Analysis/html-diags.c +++ b/test/Analysis/html-diags.c @@ -1,6 +1,6 @@ -// RUN: mkdir %t.dir -// RUN: %clang_cc1 -analyze -analyzer-output=html -analyzer-checker=core -o %t.dir %s -// RUN: rm -fR %t.dir +// RUN: mkdir %T/dir +// RUN: %clang_cc1 -analyze -analyzer-output=html -analyzer-checker=core -o %T/dir %s +// RUN: rm -fR %T/dir // Currently this test mainly checks that the HTML diagnostics doesn't crash // when handling macros will calls with macros. We should actually validate diff --git a/test/Analysis/inline.cpp b/test/Analysis/inline.cpp index 4eaed9f..6b9a885 100644 --- a/test/Analysis/inline.cpp +++ b/test/Analysis/inline.cpp @@ -166,3 +166,30 @@ namespace PR13569_virtual { x.interface(); } } + +namespace Invalidation { + struct X { + void touch(int &x) const { + x = 0; + } + + void touch2(int &x) const; + + virtual void touchV(int &x) const { + x = 0; + } + + virtual void touchV2(int &x) const; + + int test() const { + // We were accidentally not invalidating under -analyzer-ipa=inlining + // at one point for virtual methods with visible definitions. + int a, b, c, d; + touch(a); + touch2(b); + touchV(c); + touchV2(d); + return a + b + c + d; // no-warning + } + }; +} diff --git a/test/Analysis/inlining/DynDispatchBifurcate.m b/test/Analysis/inlining/DynDispatchBifurcate.m index e78b90b..6637dfd 100644 --- a/test/Analysis/inlining/DynDispatchBifurcate.m +++ b/test/Analysis/inlining/DynDispatchBifurcate.m @@ -160,17 +160,17 @@ int testCallToPublicAPICat(PublicSubClass *p) { // weither they are "public" or private. int testPublicProperty(PublicClass *p) { int x = 0; - [p setValue3:0]; - if ([p value3] != 0) - return 5/x; // expected-warning {{Division by zero}} // TODO: no warning, we should always inline the property. - return 5/[p value3];// expected-warning {{Division by zero}} + p.value3 = 0; + if (p.value3 != 0) + return 5/x; + return 5/p.value3;// expected-warning {{Division by zero}} } int testExtension(PublicClass *p) { int x = 0; [p setValue2:0]; if ([p value2] != 0) - return 5/x; // expected-warning {{Division by zero}} // TODO: no warning, we should always inline the property. + return 5/x; // expected-warning {{Division by zero}} return 5/[p value2]; // expected-warning {{Division by zero}} } diff --git a/test/Analysis/inlining/InlineObjCClassMethod.m b/test/Analysis/inlining/InlineObjCClassMethod.m index 7e8b51f..814d437 100644 --- a/test/Analysis/inlining/InlineObjCClassMethod.m +++ b/test/Analysis/inlining/InlineObjCClassMethod.m @@ -179,3 +179,33 @@ int foo2() { int y = [MyParentSelf testSelf]; return 5/y; // Should warn here. } + +// TODO: We do not inline 'getNum' in the following case, where the value of +// 'self' in call '[self getNum]' is available and evaualtes to +// 'SelfUsedInParentChild' if it's called from fooA. +// Self region should get created before we call foo and yje call to super +// should keep it live. +@interface SelfUsedInParent : NSObject ++ (int)getNum; ++ (int)foo; +@end +@implementation SelfUsedInParent ++ (int)getNum {return 5;} ++ (int)foo { + return [self getNum]; +} +@end +@interface SelfUsedInParentChild : SelfUsedInParent ++ (int)getNum; ++ (int)fooA; +@end +@implementation SelfUsedInParentChild ++ (int)getNum {return 0;} ++ (int)fooA { + return [super foo]; +} +@end +int checkSelfUsedInparentClassMethod() { + return 5/[SelfUsedInParentChild fooA]; +} + diff --git a/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp b/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp new file mode 100644 index 0000000..fa473ae --- /dev/null +++ b/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=dynamic-bifurcate -verify %s + +void clang_analyzer_eval(bool); + +class A { +public: + virtual int get() { return 0; } +}; + +void testBifurcation(A *a) { + clang_analyzer_eval(a->get() == 0); // expected-warning{{TRUE}} expected-warning{{UNKNOWN}} +} + +void testKnown() { + A a; + clang_analyzer_eval(a.get() == 0); // expected-warning{{TRUE}} +} diff --git a/test/Analysis/keychainAPI.m b/test/Analysis/keychainAPI.m index cb4f72c..585a32d 100644 --- a/test/Analysis/keychainAPI.m +++ b/test/Analysis/keychainAPI.m @@ -76,7 +76,7 @@ void errRetVal() { UInt32 length; void *outData; st = SecKeychainItemCopyContent(2, ptr, ptr, &length, &outData); - if (st == GenericError) // expected-warning{{Allocated data is not released: missing a call to 'SecKeychainItemFreeContent'.}} + if (st == GenericError) // expected-warning{{Allocated data is not released: missing a call to 'SecKeychainItemFreeContent'}} SecKeychainItemFreeContent(ptr, outData); // expected-warning{{Only call free if a valid (non-NULL) buffer was returned}} } @@ -220,7 +220,7 @@ int foo(CFTypeRef keychainOrArray, SecProtocolType protocol, if (st == noErr) SecKeychainItemFreeContent(ptr, outData[3]); } - if (length) { // expected-warning{{Allocated data is not released: missing a call to 'SecKeychainItemFreeContent'.}} + if (length) { // expected-warning{{Allocated data is not released: missing a call to 'SecKeychainItemFreeContent'}} length++; } return 0; @@ -318,7 +318,7 @@ void radar10508828_2() { UInt32 pwdLen = 0; void* pwdBytes = 0; OSStatus rc = SecKeychainFindGenericPassword(0, 3, "foo", 3, "bar", &pwdLen, &pwdBytes, 0); - SecKeychainItemFreeContent(0, pwdBytes); // expected-warning {{Only call free if a valid (non-NULL) buffer was returned.}} + SecKeychainItemFreeContent(0, pwdBytes); // expected-warning {{Only call free if a valid (non-NULL) buffer was returned}} } //Example from bug 10797. diff --git a/test/Analysis/malloc-annotations.c b/test/Analysis/malloc-annotations.c index 1dc0f78..9c040b6 100644 --- a/test/Analysis/malloc-annotations.c +++ b/test/Analysis/malloc-annotations.c @@ -208,11 +208,11 @@ void f7_realloc() { } void PR6123() { - int *x = malloc(11); // expected-warning{{Cast a region whose size is not a multiple of the destination type size.}} + int *x = malloc(11); // expected-warning{{Cast a region whose size is not a multiple of the destination type size}} } void PR7217() { - int *buf = malloc(2); // expected-warning{{Cast a region whose size is not a multiple of the destination type size.}} + int *buf = malloc(2); // expected-warning{{Cast a region whose size is not a multiple of the destination type size}} buf[1] = 'c'; // not crash } diff --git a/test/Analysis/malloc.c b/test/Analysis/malloc.c index f60271f..e3d92d9 100644 --- a/test/Analysis/malloc.c +++ b/test/Analysis/malloc.c @@ -260,11 +260,11 @@ void f7_realloc() { } void PR6123() { - int *x = malloc(11); // expected-warning{{Cast a region whose size is not a multiple of the destination type size.}} + int *x = malloc(11); // expected-warning{{Cast a region whose size is not a multiple of the destination type size}} } void PR7217() { - int *buf = malloc(2); // expected-warning{{Cast a region whose size is not a multiple of the destination type size.}} + int *buf = malloc(2); // expected-warning{{Cast a region whose size is not a multiple of the destination type size}} buf[1] = 'c'; // not crash } @@ -389,7 +389,7 @@ void mallocEscapeMalloc() { void mallocMalloc() { int *p = malloc(12); - p = malloc(12); // expected-warning 2 {{Memory is never released; potential leak}} + p = malloc(12); // expected-warning {{Memory is never released; potential leak}} } void mallocFreeMalloc() { diff --git a/test/Analysis/method-call-path-notes.cpp b/test/Analysis/method-call-path-notes.cpp index fbf0cae..17034b9 100644 --- a/test/Analysis/method-call-path-notes.cpp +++ b/test/Analysis/method-call-path-notes.cpp @@ -36,6 +36,11 @@ void test_ic_member_ptr() { (p->*bar)(); // expected-warning {{Called C++ object pointer is null}} expected-note{{Called C++ object pointer is null}} } +void test_cast(const TestInstanceCall *p) { + if (!p) // expected-note {{Assuming pointer value is null}} expected-note {{Taking true branch}} + const_cast(p)->foo(); // expected-warning {{Called C++ object pointer is null}} expected-note {{Called C++ object pointer is null}} +} + // CHECK: // CHECK: // CHECK: @@ -659,6 +664,86 @@ void test_ic_member_ptr() { // CHECK: file0 // CHECK: // CHECK: +// CHECK: +// CHECK: path +// CHECK: +// CHECK: +// CHECK: kindcontrol +// CHECK: edges +// CHECK: +// CHECK: +// CHECK: start +// CHECK: +// CHECK: +// CHECK: line40 +// CHECK: col3 +// CHECK: file0 +// CHECK: +// CHECK: +// CHECK: line40 +// CHECK: col4 +// CHECK: file0 +// CHECK: +// CHECK: +// CHECK: end +// CHECK: +// CHECK: +// CHECK: line41 +// CHECK: col5 +// CHECK: file0 +// CHECK: +// CHECK: +// CHECK: line41 +// CHECK: col14 +// CHECK: file0 +// CHECK: +// CHECK: +// CHECK: +// CHECK: +// CHECK: +// CHECK: +// CHECK: kindevent +// CHECK: location +// CHECK: +// CHECK: line41 +// CHECK: col5 +// CHECK: file0 +// CHECK: +// CHECK: ranges +// CHECK: +// CHECK: +// CHECK: +// CHECK: line41 +// CHECK: col5 +// CHECK: file0 +// CHECK: +// CHECK: +// CHECK: line41 +// CHECK: col37 +// CHECK: file0 +// CHECK: +// CHECK: +// CHECK: +// CHECK: depth0 +// CHECK: extended_message +// CHECK: Called C++ object pointer is null +// CHECK: message +// CHECK: Called C++ object pointer is null +// CHECK: +// CHECK: +// CHECK: descriptionCalled C++ object pointer is null +// CHECK: categoryLogic error +// CHECK: typeCalled C++ object pointer is null +// CHECK: issue_context_kindfunction +// CHECK: issue_contexttest_cast +// CHECK: issue_hash2 +// CHECK: location +// CHECK: +// CHECK: line41 +// CHECK: col5 +// CHECK: file0 +// CHECK: +// CHECK: // CHECK: // CHECK: // CHECK: diff --git a/test/Analysis/method-call.cpp b/test/Analysis/method-call.cpp index 91da532..9120627 100644 --- a/test/Analysis/method-call.cpp +++ b/test/Analysis/method-call.cpp @@ -1,25 +1,37 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -analyzer-store region -verify %s -// XFAIL: * void clang_analyzer_eval(bool); + struct A { int x; A(int a) { x = a; } int getx() const { return x; } }; +void testNullObject(A *a) { + clang_analyzer_eval(a); // expected-warning{{UNKNOWN}} + (void)a->getx(); // assume we know what we're doing + clang_analyzer_eval(a); // expected-warning{{TRUE}} +} + + +// FIXME: These require constructor inlining to be enabled. + void f1() { A x(3); - clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} + // should be TRUE + clang_analyzer_eval(x.getx() == 3); // expected-warning{{UNKNOWN}} } void f2() { const A &x = A(3); - clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} + // should be TRUE + clang_analyzer_eval(x.getx() == 3); // expected-warning{{UNKNOWN}} } void f3() { const A &x = (A)3; - clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} + // should be TRUE + clang_analyzer_eval(x.getx() == 3); // expected-warning{{UNKNOWN}} } diff --git a/test/Analysis/misc-ps-region-store.m b/test/Analysis/misc-ps-region-store.m index 88860bb..2b481c4 100644 --- a/test/Analysis/misc-ps-region-store.m +++ b/test/Analysis/misc-ps-region-store.m @@ -299,7 +299,7 @@ void test_handle_array_wrapper_helper(); int test_handle_array_wrapper() { struct ArrayWrapper x; test_handle_array_wrapper_helper(&x); - struct WrappedStruct *p = (struct WrappedStruct*) x.y; // expected-warning{{Casting a non-structure type to a structure type and accessing a field can lead to memory access errors or data corruption.}} + struct WrappedStruct *p = (struct WrappedStruct*) x.y; // expected-warning{{Casting a non-structure type to a structure type and accessing a field can lead to memory access errors or data corruption}} return p->z; // no-warning } diff --git a/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m b/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m index e4d5aaf..7cf2aee 100644 --- a/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m +++ b/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m @@ -80,11 +80,11 @@ int handleVoidInComma() { int marker(void) { // control reaches end of non-void function } +// CHECK-darwin8: warning: The receiver of message 'longDoubleM' is nil and returns a value of type 'long double' that will be garbage // CHECK-darwin8: warning: The receiver of message 'longlongM' is nil and returns a value of type 'long long' that will be garbage -// CHECK-darwin8: warning: The receiver of message 'unsignedLongLongM' is nil and returns a value of type 'unsigned long long' that will be garbage // CHECK-darwin8: warning: The receiver of message 'doubleM' is nil and returns a value of type 'double' that will be garbage +// CHECK-darwin8: warning: The receiver of message 'unsignedLongLongM' is nil and returns a value of type 'unsigned long long' that will be garbage // CHECK-darwin8: warning: The receiver of message 'longlongM' is nil and returns a value of type 'long long' that will be garbage -// CHECK-darwin8: warning: The receiver of message 'longDoubleM' is nil and returns a value of type 'long double' that will be garbage // CHECK-darwin9-NOT: warning: The receiver of message 'longlongM' is nil and returns a value of type 'long long' that will be garbage // CHECK-darwin9-NOT: warning: The receiver of message 'unsignedLongLongM' is nil and returns a value of type 'unsigned long long' that will be garbage diff --git a/test/Analysis/ptr-arith.c b/test/Analysis/ptr-arith.c index 6567000..884ae5b 100644 --- a/test/Analysis/ptr-arith.c +++ b/test/Analysis/ptr-arith.c @@ -36,7 +36,7 @@ domain_port (const char *domain_b, const char *domain_e, void f3() { int x, y; - int d = &y - &x; // expected-warning{{Subtraction of two pointers that do not point to the same memory chunk may cause incorrect result.}} + int d = &y - &x; // expected-warning{{Subtraction of two pointers that do not point to the same memory chunk may cause incorrect result}} int a[10]; int *p = &a[2]; @@ -46,13 +46,13 @@ void f3() { void f4() { int *p; - p = (int*) 0x10000; // expected-warning{{Using a fixed address is not portable because that address will probably not be valid in all environments or platforms.}} + p = (int*) 0x10000; // expected-warning{{Using a fixed address is not portable because that address will probably not be valid in all environments or platforms}} } void f5() { int x, y; int *p; - p = &x + 1; // expected-warning{{Pointer arithmetic done on non-array variables means reliance on memory layout, which is dangerous.}} + p = &x + 1; // expected-warning{{Pointer arithmetic done on non-array variables means reliance on memory layout, which is dangerous}} int a[10]; p = a + 1; // no-warning diff --git a/test/Analysis/reinterpret-cast.cpp b/test/Analysis/reinterpret-cast.cpp new file mode 100644 index 0000000..73f2e2d --- /dev/null +++ b/test/Analysis/reinterpret-cast.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -verify %s + +void clang_analyzer_eval(bool); + +typedef struct Opaque *Data; +struct IntWrapper { + int x; +}; + +struct Child : public IntWrapper { + void set() { x = 42; } +}; + +void test(Data data) { + Child *wrapper = reinterpret_cast(data); + // Don't crash when upcasting here. + // We don't actually know if 'data' is a Child. + wrapper->set(); + clang_analyzer_eval(wrapper->x == 42); // expected-warning{{TRUE}} +} diff --git a/test/Analysis/security-syntax-checks.m b/test/Analysis/security-syntax-checks.m index f4ccefe..1df8a40 100644 --- a/test/Analysis/security-syntax-checks.m +++ b/test/Analysis/security-syntax-checks.m @@ -46,7 +46,7 @@ int getpw(unsigned int uid, char *buf); void test_getpw() { char buff[1024]; - getpw(2, buff); // expected-warning{{The getpw() function is dangerous as it may overflow the provided buffer. It is obsoleted by getpwuid().}} + getpw(2, buff); // expected-warning{{The getpw() function is dangerous as it may overflow the provided buffer. It is obsoleted by getpwuid()}} } // CWE-273: Failure to Check Whether Privileges Were @@ -138,7 +138,7 @@ void test_strcpy() { char x[4]; char *y; - strcpy(x, y); //expected-warning{{Call to function 'strcpy' is insecure as it does not provide bounding of the memory buffer. Replace unbounded copy functions with analogous functions that support length arguments such as 'strlcpy'. CWE-119.}} + strcpy(x, y); //expected-warning{{Call to function 'strcpy' is insecure as it does not provide bounding of the memory buffer. Replace unbounded copy functions with analogous functions that support length arguments such as 'strlcpy'. CWE-119}} } //===----------------------------------------------------------------------=== @@ -162,7 +162,7 @@ void test_strcat() { char x[4]; char *y; - strcat(x, y); //expected-warning{{Call to function 'strcat' is insecure as it does not provide bounding of the memory buffer. Replace unbounded copy functions with analogous functions that support length arguments such as 'strlcat'. CWE-119.}} + strcat(x, y); //expected-warning{{Call to function 'strcat' is insecure as it does not provide bounding of the memory buffer. Replace unbounded copy functions with analogous functions that support length arguments such as 'strlcat'. CWE-119}} } //===----------------------------------------------------------------------=== @@ -173,7 +173,7 @@ typedef __int32_t pid_t; pid_t vfork(void); void test_vfork() { - vfork(); //expected-warning{{Call to function 'vfork' is insecure as it can lead to denial of service situations in the parent process.}} + vfork(); //expected-warning{{Call to function 'vfork' is insecure as it can lead to denial of service situations in the parent process}} } //===----------------------------------------------------------------------=== diff --git a/test/Analysis/sizeofpointer.c b/test/Analysis/sizeofpointer.c index 0c86de8..aa85fc0 100644 --- a/test/Analysis/sizeofpointer.c +++ b/test/Analysis/sizeofpointer.c @@ -4,5 +4,5 @@ struct s { }; int f(struct s *p) { - return sizeof(p); // expected-warning{{The code calls sizeof() on a pointer type. This can produce an unexpected result.}} + return sizeof(p); // expected-warning{{The code calls sizeof() on a pointer type. This can produce an unexpected result}} } diff --git a/test/Analysis/stack-addr-ps.cpp b/test/Analysis/stack-addr-ps.cpp index b21a03d..cbdb143 100644 --- a/test/Analysis/stack-addr-ps.cpp +++ b/test/Analysis/stack-addr-ps.cpp @@ -87,6 +87,6 @@ struct TS { // rdar://11345441 int* f5() { - int& i = i; // expected-warning {{Assigned value is garbage or undefined}} expected-note {{binding reference variable 'i' here}} + int& i = i; // expected-warning {{Assigned value is garbage or undefined}} expected-note {{binding reference variable 'i' here}} expected-warning{{variable 'i' is uninitialized when used within its own initialization}} return &i; // expected-warning {{address of stack memory associated with local variable 'i' returned}} } diff --git a/test/Analysis/stream.c b/test/Analysis/stream.c index e68835e..4a095cf 100644 --- a/test/Analysis/stream.c +++ b/test/Analysis/stream.c @@ -16,25 +16,25 @@ extern void rewind (FILE *__stream); void f1(void) { FILE *p = fopen("foo", "r"); char buf[1024]; - fread(buf, 1, 1, p); // expected-warning {{Stream pointer might be NULL.}} + fread(buf, 1, 1, p); // expected-warning {{Stream pointer might be NULL}} fclose(p); } void f2(void) { FILE *p = fopen("foo", "r"); - fseek(p, 1, SEEK_SET); // expected-warning {{Stream pointer might be NULL.}} + fseek(p, 1, SEEK_SET); // expected-warning {{Stream pointer might be NULL}} fclose(p); } void f3(void) { FILE *p = fopen("foo", "r"); - ftell(p); // expected-warning {{Stream pointer might be NULL.}} + ftell(p); // expected-warning {{Stream pointer might be NULL}} fclose(p); } void f4(void) { FILE *p = fopen("foo", "r"); - rewind(p); // expected-warning {{Stream pointer might be NULL.}} + rewind(p); // expected-warning {{Stream pointer might be NULL}} fclose(p); } @@ -43,26 +43,26 @@ void f5(void) { if (!p) return; fseek(p, 1, SEEK_SET); // no-warning - fseek(p, 1, 3); // expected-warning {{The whence argument to fseek() should be SEEK_SET, SEEK_END, or SEEK_CUR.}} + fseek(p, 1, 3); // expected-warning {{The whence argument to fseek() should be SEEK_SET, SEEK_END, or SEEK_CUR}} fclose(p); } void f6(void) { FILE *p = fopen("foo", "r"); fclose(p); - fclose(p); // expected-warning {{Try to close a file Descriptor already closed. Cause undefined behaviour.}} + fclose(p); // expected-warning {{Try to close a file Descriptor already closed. Cause undefined behaviour}} } void f7(void) { FILE *p = tmpfile(); - ftell(p); // expected-warning {{Stream pointer might be NULL.}} + ftell(p); // expected-warning {{Stream pointer might be NULL}} fclose(p); } void f8(int c) { FILE *p = fopen("foo.c", "r"); if(c) - return; // expected-warning {{Opened File never closed. Potential Resource leak.}} + return; // expected-warning {{Opened File never closed. Potential Resource leak}} fclose(p); } diff --git a/test/Analysis/variadic-method-types.m b/test/Analysis/variadic-method-types.m index 4d0f6bc..9f90e5f 100644 --- a/test/Analysis/variadic-method-types.m +++ b/test/Analysis/variadic-method-types.m @@ -74,7 +74,7 @@ void f(id a, id

    b, C* c, C

    *d, FooType fooType, BarType barType) { [NSArray arrayWithObjects:@"Hello", a, b, c, d, nil]; [NSArray arrayWithObjects:@"Foo", ^{}, nil]; - [NSArray arrayWithObjects:@"Foo", "Bar", "Baz", nil]; // expected-warning 2 {{Argument to 'NSArray' method 'arrayWithObjects:' should be an Objective-C pointer type, not 'char *'}} + [NSArray arrayWithObjects:@"Foo", "Bar", "Baz", nil]; // expected-warning {{Argument to 'NSArray' method 'arrayWithObjects:' should be an Objective-C pointer type, not 'char *'}} [NSDictionary dictionaryWithObjectsAndKeys:@"Foo", "Bar", nil]; // expected-warning {{Argument to 'NSDictionary' method 'dictionaryWithObjectsAndKeys:' should be an Objective-C pointer type, not 'char *'}} [NSSet setWithObjects:@"Foo", "Bar", nil]; // expected-warning {{Argument to 'NSSet' method 'setWithObjects:' should be an Objective-C pointer type, not 'char *'}} [NSOrderedSet orderedSetWithObjects:@"Foo", "Bar", nil]; // expected-warning {{Argument to 'NSOrderedSet' method 'orderedSetWithObjects:' should be an Objective-C pointer type, not 'char *'}} diff --git a/test/CodeCompletion/objc-expr.m b/test/CodeCompletion/objc-expr.m index b59586a..d3c95a6 100644 --- a/test/CodeCompletion/objc-expr.m +++ b/test/CodeCompletion/objc-expr.m @@ -11,7 +11,7 @@ id testCompleteAfterAtSign() { // CHECK-AT: COMPLETION: Pattern : [#char[]#]encode(<#type-name#>) // CHECK-AT: COMPLETION: Pattern : [#Protocol *#]protocol(<#protocol-name#>) // CHECK-AT: COMPLETION: Pattern : [#SEL#]selector(<#selector#>) -// CHECK-AT: COMPLETION: Pattern : [#NSDictionary *#]{<#key#> : <#object, ...#>} +// CHECK-AT: COMPLETION: Pattern : [#NSDictionary *#]{<#key#>: <#object, ...#>} // RUN: %clang_cc1 -fsyntax-only -code-completion-patterns -code-completion-at=%s:4:11 %s -fconst-strings -o - | FileCheck -check-prefix=CONST-STRINGS %s // CHECK-CONST-STRINGS: COMPLETION: Pattern : [#const char[]#]encode(<#type-name#>) diff --git a/test/CodeGen/align-global-large.c b/test/CodeGen/align-global-large.c new file mode 100644 index 0000000..fcbe758 --- /dev/null +++ b/test/CodeGen/align-global-large.c @@ -0,0 +1,18 @@ +// PR13606 - Clang crashes with large alignment attribute +// RUN: %clang -S -emit-llvm %s -o - | FileCheck %s + +// CHECK: x +// CHECK: align +// CHECK: 1048576 +volatile char x[4000] __attribute__((aligned(0x100000))); + +int +main (int argc, char ** argv) { + // CHECK: y + // CHECK: align + // CHECK: 1048576 + volatile char y[4000] __attribute__((aligned(0x100000))); + + return y[argc]; +} + diff --git a/test/CodeGen/alignment.c b/test/CodeGen/alignment.c index 8882c91..98ea01b 100644 --- a/test/CodeGen/alignment.c +++ b/test/CodeGen/alignment.c @@ -43,7 +43,8 @@ void test3(packedfloat3 *p) { *p = (packedfloat3) { 3.2f, 2.3f, 0.1f }; } // CHECK: @test3( -// CHECK: store <3 x float> {{.*}}, align 4 +// CHECK: %{{.*}} = bitcast <3 x float>* %{{.*}} to <4 x float>* +// CHECK: store <4 x float> {{.*}}, align 4 // CHECK: ret void diff --git a/test/CodeGen/arm-neon-misc.c b/test/CodeGen/arm-neon-misc.c new file mode 100644 index 0000000..56ce316 --- /dev/null +++ b/test/CodeGen/arm-neon-misc.c @@ -0,0 +1,34 @@ +// REQUIRES: arm-registered-target +// RUN: %clang_cc1 -triple thumbv7-apple-darwin \ +// RUN: -target-abi apcs-gnu \ +// RUN: -target-cpu cortex-a8 \ +// RUN: -mfloat-abi soft \ +// RUN: -target-feature +soft-float-abi \ +// RUN: -ffreestanding \ +// RUN: -emit-llvm -w -o - %s | FileCheck %s + +#include + +// Radar 11998303: Avoid using i64 types for vld1q_lane and vst1q_lane Neon +// intrinsics with <2 x i64> vectors to avoid poor code for i64 in the backend. +void t1(uint64_t *src, uint8_t *dst) { +// CHECK: @t1 + uint64x2_t q = vld1q_u64(src); +// CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64 + vst1q_lane_u64(dst, q, 1); +// CHECK: bitcast <16 x i8> %{{.*}} to <2 x i64> +// CHECK: shufflevector <2 x i64> +// CHECK: call void @llvm.arm.neon.vst1.v1i64 +} + +void t2(uint64_t *src1, uint8_t *src2, uint64x2_t *dst) { +// CHECK: @t2 + uint64x2_t q = vld1q_u64(src1); +// CHECK: call <2 x i64> @llvm.arm.neon.vld1.v2i64 + q = vld1q_lane_u64(src2, q, 0); +// CHECK: shufflevector <2 x i64> +// CHECK: call <1 x i64> @llvm.arm.neon.vld1.v1i64 +// CHECK: shufflevector <1 x i64> + *dst = q; +// CHECK: store <2 x i64> +} diff --git a/test/CodeGen/complex-builtints.c b/test/CodeGen/complex-builtints.c new file mode 100644 index 0000000..09219cf --- /dev/null +++ b/test/CodeGen/complex-builtints.c @@ -0,0 +1,71 @@ +// RUN: %clang_cc1 %s -O1 -emit-llvm -o - | FileCheck %s +// rdar://8315199 + +/* Test for builtin conj, creal, cimag. */ +/* Origin: Joseph Myers */ + +extern float _Complex conjf (float _Complex); +extern double _Complex conj (double _Complex); +extern long double _Complex conjl (long double _Complex); + +extern float crealf (float _Complex); +extern double creal (double _Complex); +extern long double creall (long double _Complex); + +extern float cimagf (float _Complex); +extern double cimag (double _Complex); +extern long double cimagl (long double _Complex); + +extern void abort (void); +extern void link_error (void); + +int +main () +{ + /* For each type, test both runtime and compile time (constant folding) + optimization. */ + volatile float _Complex fc = 1.0F + 2.0iF; + volatile double _Complex dc = 1.0 + 2.0i; + volatile long double _Complex ldc = 1.0L + 2.0iL; + /* Test floats. */ + if (__builtin_conjf (fc) != 1.0F - 2.0iF) + abort (); + if (__builtin_conjf (1.0F + 2.0iF) != 1.0F - 2.0iF) + link_error (); + if (__builtin_crealf (fc) != 1.0F) + abort (); + if (__builtin_crealf (1.0F + 2.0iF) != 1.0F) + link_error (); + if (__builtin_cimagf (fc) != 2.0F) + abort (); + if (__builtin_cimagf (1.0F + 2.0iF) != 2.0F) + link_error (); + /* Test doubles. */ + if (__builtin_conj (dc) != 1.0 - 2.0i) + abort (); + if (__builtin_conj (1.0 + 2.0i) != 1.0 - 2.0i) + link_error (); + if (__builtin_creal (dc) != 1.0) + abort (); + if (__builtin_creal (1.0 + 2.0i) != 1.0) + link_error (); + if (__builtin_cimag (dc) != 2.0) + abort (); + if (__builtin_cimag (1.0 + 2.0i) != 2.0) + link_error (); + /* Test long doubles. */ + if (__builtin_conjl (ldc) != 1.0L - 2.0iL) + abort (); + if (__builtin_conjl (1.0L + 2.0iL) != 1.0L - 2.0iL) + link_error (); + if (__builtin_creall (ldc) != 1.0L) + abort (); + if (__builtin_creall (1.0L + 2.0iL) != 1.0L) + link_error (); + if (__builtin_cimagl (ldc) != 2.0L) + abort (); + if (__builtin_cimagl (1.0L + 2.0iL) != 2.0L) + link_error (); +} + +// CHECK-NOT: link_error diff --git a/test/CodeGen/ms-inline-asm.c b/test/CodeGen/ms-inline-asm.c index 8c3e5f7..c140d60 100644 --- a/test/CodeGen/ms-inline-asm.c +++ b/test/CodeGen/ms-inline-asm.c @@ -9,7 +9,9 @@ void t1() { void t2() { // CHECK: @t2 -// CHECK: call void asm sideeffect "nop\0Anop\0Anop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect // CHECK: ret void __asm nop __asm nop @@ -25,7 +27,8 @@ void t3() { void t4(void) { // CHECK: @t4 -// CHECK: call void asm sideeffect "mov ebx, eax\0Amov ecx, ebx", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "mov ebx, eax", "~{ebx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "mov ecx, ebx", "~{ecx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect // CHECK: ret void __asm mov ebx, eax __asm mov ecx, ebx @@ -33,8 +36,85 @@ void t4(void) { void t5(void) { // CHECK: @t5 -// CHECK: call void asm sideeffect "mov ebx, eax\0Amov ecx, ebx", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "mov ebx, eax\0Amov ecx, ebx", "~{ebx},~{ecx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect // CHECK: ret void __asm mov ebx, eax __asm mov ecx, ebx } +void t6(void) { + __asm int 0x2c +// CHECK: t6 +// CHECK: call void asm sideeffect "int 0x2c", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +} + +void* t7(void) { + __asm mov eax, fs:[0x10] +// CHECK: t7 +// CHECK: call void asm sideeffect "mov eax, fs:[0x10]", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +} + +void t8() { + __asm { + int 0x2c ; } asm comments are fun! }{ + } + __asm {} +// CHECK: t8 +// CHECK: call void asm sideeffect "int 0x2c", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +} +int t9() { + __asm int 3 ; } comments for single-line asm + __asm {} + __asm int 4 + return 10; +// CHECK: t9 +// CHECK: call void asm sideeffect "int 3", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect "int 4", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: ret i32 10 +} +void t10() { + __asm { + push ebx + mov ebx, 0x07 + pop ebx + } +// CHECK: t10 +// CHECK: call void asm sideeffect "push ebx\0Amov ebx, 0x07\0Apop ebx", "~{ebx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +} + +unsigned t11(void) { + unsigned i = 1, j; + __asm { + mov eax, i + mov j, eax + } + return j; +// CHECK: t11 +// CHECK: [[I:%[a-zA-Z0-9]+]] = alloca i32, align 4 +// CHECK: [[J:%[a-zA-Z0-9]+]] = alloca i32, align 4 +// CHECK: store i32 1, i32* [[I]], align 4 +// CHECK: call void asm sideeffect "mov eax, i\0Amov j, eax", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: [[RET:%[a-zA-Z0-9]+]] = load i32* [[J]], align 4 +// CHECK: ret i32 [[RET]] +} + +void t12(void) { + __asm EVEN + __asm ALIGN +} + +void t13(void) { + __asm { + _emit 0x4A + _emit 0x43 + _emit 0x4B + } +} + +void t14(void) { + unsigned arr[10]; + __asm LENGTH arr ; sizeof(arr)/sizeof(arr[0]) + __asm SIZE arr ; sizeof(arr) + __asm TYPE arr ; sizeof(arr[0]) +} diff --git a/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp b/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp index c5a4094..7ef4864 100644 --- a/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp +++ b/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp @@ -83,3 +83,20 @@ namespace test3 { d.B::~B(); } } + +namespace test4 { + struct Animal { + virtual void eat(); + }; + struct Fish : Animal { + virtual void eat(); + }; + struct Wrapper { + Fish fish; + }; + extern Wrapper *p; + void test() { + // CHECK: call void @_ZN5test44Fish3eatEv + p->fish.eat(); + } +} diff --git a/test/CodeGenObjC/instance-method-metadata.m b/test/CodeGenObjC/instance-method-metadata.m index ec24c28..0d8c0e0 100644 --- a/test/CodeGenObjC/instance-method-metadata.m +++ b/test/CodeGenObjC/instance-method-metadata.m @@ -28,8 +28,8 @@ @end // CHECK: l_OBJC_$_INSTANCE_METHODS_Bar: -// CHECK-NEXT .long 24 -// CHECK-NEXT .long 2 -// CHECK-NEXT .quad L_OBJC_METH_VAR_NAME_ -// CHECK-NEXT .quad L_OBJC_METH_VAR_TYPE_ -// CHECK-NEXT .quad "-[Bar prop]" +// CHECK-NEXT: .long 24 +// CHECK-NEXT: .long 2 +// CHECK-NEXT: .quad L_OBJC_METH_VAR_NAME_ +// CHECK-NEXT: .quad L_OBJC_METH_VAR_TYPE_ +// CHECK-NEXT: .quad "-[Bar prop]" diff --git a/test/CodeGenObjC/ns_consume_null_check.m b/test/CodeGenObjC/ns_consume_null_check.m index e3b6075..a8e5acd 100644 --- a/test/CodeGenObjC/ns_consume_null_check.m +++ b/test/CodeGenObjC/ns_consume_null_check.m @@ -17,7 +17,7 @@ void foo() [x isEqual : obj]; } -// CHECK: [[TMP:%.*]] = alloca i8 +// CHECK: [[TMP:%.*]] = alloca i8{{$}} // CHECK: [[FIVE:%.*]] = call i8* @objc_retain // CHECK-NEXT: [[SIX:%.*]] = bitcast // CHECK-NEXT: [[SEVEN:%.*]] = icmp eq i8* [[SIX]], null @@ -25,8 +25,8 @@ void foo() // CHECK: [[FN:%.*]] = load i8** getelementptr inbounds // CHECK-NEXT: [[EIGHT:%.*]] = bitcast i8* [[FN]] // CHECK-NEXT: [[CALL:%.*]] = call signext i8 [[EIGHT]] -// CHECK-NEXT store i8 [[CALL]], i8* [[TMP]] -// CHECK-NEXT br label [[CONT:%.*]] +// CHECK-NEXT: store i8 [[CALL]], i8* [[TMP]] +// CHECK-NEXT: br label [[CONT:%.*]] // CHECK: call void @objc_release(i8* [[FIVE]]) nounwind // CHECK-NEXT: call void @llvm.memset -// CHECK-NEXT br label [[CONT]] +// CHECK-NEXT: br label [[CONT]] diff --git a/test/CodeGenOpenCL/vectorLoadStore.cl b/test/CodeGenOpenCL/vectorLoadStore.cl new file mode 100644 index 0000000..44bc7bd --- /dev/null +++ b/test/CodeGenOpenCL/vectorLoadStore.cl @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 %s -emit-llvm -O0 -o - | FileCheck %s + +typedef char char3 __attribute((ext_vector_type(3)));; + +// Check for optimized vec3 load/store which treats vec3 as vec4. +void foo(char3 *P, char3 *Q) { + *P = *Q; + // CHECK: %{{.*}} = shufflevector <4 x i8> %{{.*}}, <4 x i8> undef, <3 x i32> +} diff --git a/test/Driver/Xlinker-args.c b/test/Driver/Xlinker-args.c index b4e5a9b..d89d5ba 100644 --- a/test/Driver/Xlinker-args.c +++ b/test/Driver/Xlinker-args.c @@ -4,6 +4,13 @@ // RUN: %clang -target i386-apple-darwin9 -### \ // RUN: -Xlinker one -Xlinker --no-demangle \ // RUN: -Wl,two,--no-demangle,three -Xlinker four %s 2> %t -// RUN: FileCheck < %t %s +// RUN: FileCheck -check-prefix=DARWIN < %t %s // -// CHECK: "one" "two" "three" "four" +// RUN: %clang -target x86_64-pc-linux-gnu -### \ +// RUN: -Xlinker one -Xlinker --no-demangle \ +// RUN: -Wl,two,--no-demangle,three -Xlinker four %s 2> %t +// RUN: FileCheck -check-prefix=LINUX < %t %s +// +// DARWIN-NOT: --no-demangle +// DARWIN: "one" "two" "three" "four" +// LINUX: "--no-demangle" "one" "two" "three" "four" diff --git a/test/Index/complete-enums.cpp b/test/Index/complete-enums.cpp index 49a8932..23c60ac 100644 --- a/test/Index/complete-enums.cpp +++ b/test/Index/complete-enums.cpp @@ -1,6 +1,6 @@ // Note: the run lines follow their respective tests, since line/column // matter in this test. - +struct X { X(); ~X(); }; enum class Color { Red = 17, Green, @@ -9,7 +9,7 @@ enum class Color { int Greeby(); void f(Color color) { switch (color) { - case Color::Green: + case Color::Green: { X x; } case Color::Red; } } diff --git a/test/Index/complete-exprs.m b/test/Index/complete-exprs.m index 3fb1540..16eeda9 100644 --- a/test/Index/complete-exprs.m +++ b/test/Index/complete-exprs.m @@ -21,7 +21,7 @@ __strong id global; // CHECK-CC1: NotImplemented:{ResultType NSString *}{TypedText @"}{Placeholder string}{Text "} (40) // CHECK-CC1: NotImplemented:{ResultType id}{TypedText @(}{Placeholder expression}{RightParen )} (40) // CHECK-CC1: NotImplemented:{ResultType NSArray *}{TypedText @[}{Placeholder objects, ...}{RightBracket ]} (40) -// CHECK-CC1: NotImplemented:{ResultType NSDictionary *}{TypedText @{}{Placeholder key}{HorizontalSpace }{Colon :}{HorizontalSpace }{Placeholder object, ...}{RightBrace }} (40) +// CHECK-CC1: NotImplemented:{ResultType NSDictionary *}{TypedText @{}{Placeholder key}{Colon :}{HorizontalSpace }{Placeholder object, ...}{RightBrace }} (40) // CHECK-CC1: NotImplemented:{ResultType SEL}{TypedText _cmd} (80) // CHECK-CC1: TypedefDecl:{TypedText BOOL} (50) // CHECK-CC1: macro definition:{TypedText bool} (51) @@ -43,7 +43,7 @@ __strong id global; // RUN: c-index-test -code-completion-at=%s:16:5 %s | FileCheck -check-prefix=CHECK-CC4 %s // RUN: c-index-test -code-completion-at=%s:16:14 %s | FileCheck -check-prefix=CHECK-CC4 %s // CHECK-CC4: NotImplemented:{ResultType NSArray *}{TypedText @[}{Placeholder objects, ...}{RightBracket ]} (40) -// CHECK-CC4: NotImplemented:{ResultType NSDictionary *}{TypedText @{}{Placeholder key}{HorizontalSpace }{Colon :}{HorizontalSpace }{Placeholder object, ...}{RightBrace }} (40) +// CHECK-CC4: NotImplemented:{ResultType NSDictionary *}{TypedText @{}{Placeholder key}{Colon :}{HorizontalSpace }{Placeholder object, ...}{RightBrace }} (40) // CHECK-CC4: NotImplemented:{ResultType SEL}{TypedText _cmd} (80) // CHECK-CC4: macro definition:{TypedText bool} (51) // CHECK-CC4: macro definition:{TypedText NO} (65) diff --git a/test/Index/complete-preamble.cpp b/test/Index/complete-preamble.cpp new file mode 100644 index 0000000..8f48105 --- /dev/null +++ b/test/Index/complete-preamble.cpp @@ -0,0 +1,8 @@ +#include "complete-preamble.h" +void f() { + std:: +} + +// RUN: env CINDEXTEST_EDITING=1 c-index-test -code-completion-at=%s:3:8 %s -o - | FileCheck -check-prefix=CC1 %s +// CHECK-CC1: {ResultType void}{TypedText wibble}{LeftParen (}{RightParen )} (50) (parent: Namespace 'std') + diff --git a/test/Index/complete-preamble.h b/test/Index/complete-preamble.h new file mode 100644 index 0000000..e696284 --- /dev/null +++ b/test/Index/complete-preamble.h @@ -0,0 +1,6 @@ +namespace std { + void wibble(); +} + +namespace std { +} diff --git a/test/Parser/ms-inline-asm.c b/test/Parser/ms-inline-asm.c index 2d18195..5326ce4 100644 --- a/test/Parser/ms-inline-asm.c +++ b/test/Parser/ms-inline-asm.c @@ -11,13 +11,13 @@ void t5() { __asm { // expected-warning {{MS-style inline assembly is not supported}} int 0x2c ; } asm comments are fun! }{ } - __asm {} // no warning as this gets merged with the previous inline asm + __asm {} // expected-warning {{MS-style inline assembly is not supported}} } int t6() { __asm int 3 ; } comments for single-line asm // expected-warning {{MS-style inline assembly is not supported}} - __asm {} // no warning as this gets merged with the previous inline asm + __asm {} // expected-warning {{MS-style inline assembly is not supported}} - __asm int 4 // no warning as this gets merged with the previous inline asm + __asm int 4 // expected-warning {{MS-style inline assembly is not supported}} return 10; } int t7() { diff --git a/test/Sema/128bitint.c b/test/Sema/128bitint.c index ddad835..600c25a 100644 --- a/test/Sema/128bitint.c +++ b/test/Sema/128bitint.c @@ -18,3 +18,22 @@ long long Signed64 = 123456789012345678901234567890i128; // expected-warning {{i unsigned long long UnsignedTooBig = 123456789012345678901234567890; // expected-warning {{integer constant is too large for its type}} __uint128_t Unsigned128 = 123456789012345678901234567890Ui128; unsigned long long Unsigned64 = 123456789012345678901234567890Ui128; // expected-warning {{implicit conversion from 'unsigned __int128' to 'unsigned long long' changes value from 123456789012345678901234567890 to 14083847773837265618}} + +// Ensure we don't crash when user passes 128-bit values to type safety +// attributes. +void pointer_with_type_tag_arg_num_1(void *buf, int datatype) + __attribute__(( pointer_with_type_tag(mpi,0x10000000000000001i128,1) )); // expected-error {{attribute parameter 2 is out of bounds}} + +void pointer_with_type_tag_arg_num_2(void *buf, int datatype) + __attribute__(( pointer_with_type_tag(mpi,1,0x10000000000000001i128) )); // expected-error {{attribute parameter 3 is out of bounds}} + +void MPI_Send(void *buf, int datatype) __attribute__(( pointer_with_type_tag(mpi,1,2) )); + +static const __uint128_t mpi_int_wrong __attribute__(( type_tag_for_datatype(mpi,int) )) = 0x10000000000000001i128; // expected-error {{'type_tag_for_datatype' attribute requires the initializer to be an integer constant expression that can be represented by a 64 bit integer}} +static const int mpi_int __attribute__(( type_tag_for_datatype(mpi,int) )) = 10; + +void test(int *buf) +{ + MPI_Send(buf, 0x10000000000000001i128); // expected-warning {{implicit conversion from '__int128' to 'int' changes value}} +} + diff --git a/test/Sema/arm-asm.c b/test/Sema/arm-asm.c new file mode 100644 index 0000000..3fc0eeb --- /dev/null +++ b/test/Sema/arm-asm.c @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 %s -triple armv7-apple-darwin -verify -fsyntax-only + +void f (void) { + int Val; + asm volatile ("lw (r1), %0[val]": "=&b"(Val)); // expected-error {{invalid output constraint '=&b' in asm}} + return; +} diff --git a/test/Sema/builtins-decl.c b/test/Sema/builtins-decl.c index 19bdb84..d6b004a 100644 --- a/test/Sema/builtins-decl.c +++ b/test/Sema/builtins-decl.c @@ -6,3 +6,8 @@ extern unsigned int __builtin_ia32_crc32qi (unsigned int, unsigned char); extern unsigned int __builtin_ia32_crc32hi (unsigned int, unsigned short); extern unsigned int __builtin_ia32_crc32si (unsigned int, unsigned int); + +// GCC documents these as unsigned, but they are defined with a signed argument. +extern int __builtin_ffs(int); +extern int __builtin_ffsl(long); +extern int __builtin_ffsll(long long); diff --git a/test/Sema/callingconv.c b/test/Sema/callingconv.c index 25669f0..6c844a3 100644 --- a/test/Sema/callingconv.c +++ b/test/Sema/callingconv.c @@ -36,6 +36,14 @@ void (__attribute__((cdecl)) *pctest2)() = ctest2; typedef void (__attribute__((fastcall)) *Handler) (float *); Handler H = foo; +int __attribute__((pcs("aapcs", "aapcs"))) pcs1(void); // expected-error {{attribute takes one argument}} +int __attribute__((pcs())) pcs2(void); // expected-error {{attribute takes one argument}} +int __attribute__((pcs(pcs1))) pcs3(void); // expected-error {{attribute takes one argument}} +int __attribute__((pcs(0))) pcs4(void); // expected-error {{'pcs' attribute requires parameter 1 to be a string}} +int __attribute__((pcs("aapcs"))) pcs5(void); // no-error +int __attribute__((pcs("aapcs-vfp"))) pcs6(void); // no-error +int __attribute__((pcs("foo"))) pcs7(void); // expected-error {{Invalid PCS type}} + // PR6361 void ctest3(); void __attribute__((cdecl)) ctest3() {} diff --git a/test/Sema/static-array.c b/test/Sema/static-array.c index 2d4b968..5ca693b 100644 --- a/test/Sema/static-array.c +++ b/test/Sema/static-array.c @@ -1,12 +1,9 @@ -// RUN: %clang_cc1 -fsyntax-only -verify %s +// RUN: %clang_cc1 -fsyntax-only -fblocks -verify %s void cat0(int a[static 0]) {} // expected-warning {{'static' has no effect on zero-length arrays}} void cat(int a[static 3]) {} // expected-note 2 {{callee declares array parameter as static here}} -typedef int i3[static 3]; -void tcat(i3 a) {} - void vat(int i, int a[static i]) {} // expected-note {{callee declares array parameter as static here}} void f(int *p) { @@ -20,12 +17,41 @@ void f(int *p) { cat(c); cat(p); - tcat(0); // expected-warning {{null passed to a callee which requires a non-null argument}} - tcat(a); // expected-warning {{array argument is too small; contains 2 elements, callee requires at least 3}} - tcat(b); - tcat(c); - tcat(p); - vat(1, 0); // expected-warning {{null passed to a callee which requires a non-null argument}} vat(3, b); } + + +typedef int td[static 3]; // expected-error {{'static' used in array declarator outside of function prototype}} +typedef void(*fp)(int[static 42]); // no-warning + +void g(void) { + int a[static 42]; // expected-error {{'static' used in array declarator outside of function prototype}} + + int b[const 10]; // expected-error {{type qualifier used in array declarator outside of function prototype}} + int c[volatile 10]; // expected-error {{type qualifier used in array declarator outside of function prototype}} + int d[restrict 10]; // expected-error {{type qualifier used in array declarator outside of function prototype}} + + int e[static restrict 1]; // expected-error {{'static' used in array declarator outside of function prototype}} +} + +void h(int [static const 10][42]); // no-warning + +void i(int [10] + [static 42]); // expected-error {{'static' used in non-outermost array type derivation}} + +void j(int [10] + [const 42]); // expected-error {{type qualifier used in non-outermost array type derivation}} + +void k(int (*x)[static 10]); // expected-error {{'static' used in non-outermost array type derivation}} +void l(int (x)[static 10]); // no-warning +void m(int *x[static 10]); // no-warning +void n(int *(x)[static 10]); // no-warning + +void o(int (x[static 10])(void)); // expected-error{{'x' declared as array of functions of type 'int (void)'}} +void p(int (^x)[static 10]); // expected-error{{block pointer to non-function type is invalid}} +void q(int (^x[static 10])()); // no-warning + +void r(x) + int x[restrict]; // no-warning +{} diff --git a/test/Sema/tentative-decls.c b/test/Sema/tentative-decls.c index b15537b..e14540b 100644 --- a/test/Sema/tentative-decls.c +++ b/test/Sema/tentative-decls.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -fsyntax-only -verify +// RUN: %clang_cc1 %s -fsyntax-only -Wprivate-extern -verify // PR3310 struct a x1; // expected-note 2{{forward declaration of 'struct a'}} @@ -32,7 +32,8 @@ int i2 = 3; // expected-error{{non-static declaration of 'i2' follows static dec static int i3 = 5; extern int i3; -__private_extern__ int pExtern; +// rdar://7703982 +__private_extern__ int pExtern; // expected-warning {{Use of __private_extern__ on tentative definition has unexpected behaviour}} int pExtern = 0; int i4; diff --git a/test/Sema/warn-documentation.cpp b/test/Sema/warn-documentation.cpp index 16ba429..d99520b 100644 --- a/test/Sema/warn-documentation.cpp +++ b/test/Sema/warn-documentation.cpp @@ -624,13 +624,9 @@ class test_attach37 { /// \brief\author Aaa /// \tparam T Aaa void test_attach38(int aaa, int bbb); -}; -// expected-warning@+1 {{empty paragraph passed to '\brief' command}} -/// \brief\author Aaa -/// \tparam T Aaa -template -void test_attach37::test_attach38(int aaa, int bbb) {} + void test_attach39(int aaa, int bbb); +}; // expected-warning@+2 {{empty paragraph passed to '\brief' command}} // expected-warning@+2 {{template parameter 'T' not found in the template declaration}} @@ -639,6 +635,29 @@ void test_attach37::test_attach38(int aaa, int bbb) {} template<> void test_attach37::test_attach38(int aaa, int bbb) {} +// expected-warning@+1 {{empty paragraph passed to '\brief' command}} +/// \brief\author Aaa +/// \tparam T Aaa +template +void test_attach37::test_attach39(int aaa, int bbb) {} + +// We used to emit warning that parameter 'a' is not found because we parsed +// the comment in context of the redeclaration which does not have parameter +// names. +template +struct test_attach38 { + /*! + \param a First param + \param b Second param + */ + template + void test_attach39(T a, B b); +}; + +template <> +template +void test_attach38::test_attach39(int, B); + // PR13411, reduced. We used to crash on this. /** @@ -652,7 +671,7 @@ void test_nocrash1(int); /// \param\brief void test_nocrash2(int); -// PR13593 +// PR13593, example 1 and 2 /** * Bla. @@ -668,3 +687,30 @@ template void test_nocrash3() { } + +// PR13593, example 3 + +/** + * aaa + */ +template +inline T test_nocrash5(T a1) +{ + return a1; +} + +/// +//, + +inline void test_nocrash6() +{ + test_nocrash5(1); +} + +// We used to crash on this. + +/*! + Blah. +*/ +typedef const struct test_nocrash7 * test_nocrash8; + diff --git a/test/Sema/warn-type-safety-mpi-hdf5.c b/test/Sema/warn-type-safety-mpi-hdf5.c new file mode 100644 index 0000000..9c2ee96 --- /dev/null +++ b/test/Sema/warn-type-safety-mpi-hdf5.c @@ -0,0 +1,307 @@ +// RUN: %clang_cc1 -std=c99 -DOPEN_MPI -fsyntax-only -verify %s +// RUN: %clang_cc1 -std=c99 -DMPICH -fsyntax-only -verify %s +// RUN: %clang_cc1 -x c++ -std=c++98 -DOPEN_MPI -fsyntax-only -verify %s +// RUN: %clang_cc1 -x c++ -std=c++98 -DMPICH -fsyntax-only -verify %s +// +// RUN: %clang_cc1 -std=c99 -DOPEN_MPI -fno-signed-char -fsyntax-only -verify %s +// RUN: %clang_cc1 -std=c99 -DMPICH -fno-signed-char -fsyntax-only -verify %s + +//===--- limits.h mock ----------------------------------------------------===// + +#ifdef __CHAR_UNSIGNED__ +#define CHAR_MIN 0 +#define CHAR_MAX (__SCHAR_MAX__*2 +1) +#else +#define CHAR_MIN (-__SCHAR_MAX__-1) +#define CHAR_MAX __SCHAR_MAX__ +#endif + +//===--- mpi.h mock -------------------------------------------------------===// + +#define NULL ((void *)0) + +#ifdef OPEN_MPI +typedef struct ompi_datatype_t *MPI_Datatype; +#endif + +#ifdef MPICH +typedef int MPI_Datatype; +#endif + +int MPI_Send(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,1,3) )); + +int MPI_Gather(void *sendbuf, int sendcount, MPI_Datatype sendtype, + void *recvbuf, int recvcount, MPI_Datatype recvtype) + __attribute__(( pointer_with_type_tag(mpi,1,3), pointer_with_type_tag(mpi,4,6) )); + +#ifdef OPEN_MPI +// OpenMPI and LAM/MPI-style datatype definitions + +#define OMPI_PREDEFINED_GLOBAL(type, global) ((type) &(global)) + +#define MPI_DATATYPE_NULL OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_datatype_null) +#define MPI_FLOAT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_float) +#define MPI_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_int) +#define MPI_LONG OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long) +#define MPI_LONG_LONG_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_long_long_int) +#define MPI_CHAR OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_char) + +#define MPI_FLOAT_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_float_int) +#define MPI_2INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_2int) + +#define MPI_IN_PLACE ((void *) 1) + +extern struct ompi_predefined_datatype_t ompi_mpi_datatype_null __attribute__(( type_tag_for_datatype(mpi,void,must_be_null) )); +extern struct ompi_predefined_datatype_t ompi_mpi_float __attribute__(( type_tag_for_datatype(mpi,float) )); +extern struct ompi_predefined_datatype_t ompi_mpi_int __attribute__(( type_tag_for_datatype(mpi,int) )); +extern struct ompi_predefined_datatype_t ompi_mpi_long __attribute__(( type_tag_for_datatype(mpi,long) )); +extern struct ompi_predefined_datatype_t ompi_mpi_long_long_int __attribute__(( type_tag_for_datatype(mpi,long long int) )); +extern struct ompi_predefined_datatype_t ompi_mpi_char __attribute__(( type_tag_for_datatype(mpi,char) )); + +struct ompi_struct_mpi_float_int {float f; int i;}; +extern struct ompi_predefined_datatype_t ompi_mpi_float_int __attribute__(( type_tag_for_datatype(mpi, struct ompi_struct_mpi_float_int, layout_compatible) )); + +struct ompi_struct_mpi_2int {int i1; int i2;}; +extern struct ompi_predefined_datatype_t ompi_mpi_2int __attribute__(( type_tag_for_datatype(mpi, struct ompi_struct_mpi_2int, layout_compatible) )); +#endif + +#ifdef MPICH +// MPICH2 and MVAPICH2-style datatype definitions + +#define MPI_COMM_WORLD ((MPI_Comm) 0x44000000) + +#define MPI_DATATYPE_NULL ((MPI_Datatype) 0xa0000000) +#define MPI_FLOAT ((MPI_Datatype) 0xa0000001) +#define MPI_INT ((MPI_Datatype) 0xa0000002) +#define MPI_LONG ((MPI_Datatype) 0xa0000003) +#define MPI_LONG_LONG_INT ((MPI_Datatype) 0xa0000004) +#define MPI_CHAR ((MPI_Datatype) 0xa0000005) + +#define MPI_FLOAT_INT ((MPI_Datatype) 0xa0000006) +#define MPI_2INT ((MPI_Datatype) 0xa0000007) + +#define MPI_IN_PLACE (void *) -1 + +static const MPI_Datatype mpich_mpi_datatype_null __attribute__(( type_tag_for_datatype(mpi,void,must_be_null) )) = 0xa0000000; +static const MPI_Datatype mpich_mpi_float __attribute__(( type_tag_for_datatype(mpi,float) )) = 0xa0000001; +static const MPI_Datatype mpich_mpi_int __attribute__(( type_tag_for_datatype(mpi,int) )) = 0xa0000002; +static const MPI_Datatype mpich_mpi_long __attribute__(( type_tag_for_datatype(mpi,long) )) = 0xa0000003; +static const MPI_Datatype mpich_mpi_long_long_int __attribute__(( type_tag_for_datatype(mpi,long long int) )) = 0xa0000004; +static const MPI_Datatype mpich_mpi_char __attribute__(( type_tag_for_datatype(mpi,char) )) = 0xa0000005; + +struct mpich_struct_mpi_float_int { float f; int i; }; +struct mpich_struct_mpi_2int { int i1; int i2; }; +static const MPI_Datatype mpich_mpi_float_int __attribute__(( type_tag_for_datatype(mpi, struct mpich_struct_mpi_float_int, layout_compatible) )) = 0xa0000006; +static const MPI_Datatype mpich_mpi_2int __attribute__(( type_tag_for_datatype(mpi, struct mpich_struct_mpi_2int, layout_compatible) )) = 0xa0000007; +#endif + +//===--- HDF5 headers mock ------------------------------------------------===// + +typedef int hid_t; +void H5open(void); + +#ifndef HDF_PRIVATE +#define H5OPEN H5open(), +#else +#define H5OPEN +#endif + +#define H5T_NATIVE_CHAR (CHAR_MIN?H5T_NATIVE_SCHAR:H5T_NATIVE_UCHAR) +#define H5T_NATIVE_SCHAR (H5OPEN H5T_NATIVE_SCHAR_g) +#define H5T_NATIVE_UCHAR (H5OPEN H5T_NATIVE_UCHAR_g) +#define H5T_NATIVE_INT (H5OPEN H5T_NATIVE_INT_g) +#define H5T_NATIVE_LONG (H5OPEN H5T_NATIVE_LONG_g) + +hid_t H5T_NATIVE_SCHAR_g __attribute__(( type_tag_for_datatype(hdf5,signed char) )); +hid_t H5T_NATIVE_UCHAR_g __attribute__(( type_tag_for_datatype(hdf5,unsigned char) )); +hid_t H5T_NATIVE_INT_g __attribute__(( type_tag_for_datatype(hdf5,int) )); +hid_t H5T_NATIVE_LONG_g __attribute__(( type_tag_for_datatype(hdf5,long) )); + +void H5Dwrite(hid_t mem_type_id, const void *buf) __attribute__(( pointer_with_type_tag(hdf5,2,1) )); + +//===--- Tests ------------------------------------------------------------===// + +//===--- MPI + +struct pair_float_int +{ + float f; int i; +}; + +struct pair_int_int +{ + int i1; int i2; +}; + +void test_mpi_predefined_types( + int *int_buf, + long *long_buf1, + long *long_buf2, + void *void_buf, + struct pair_float_int *pfi, + struct pair_int_int *pii) +{ + char char_buf[255]; + + // Layout-compatible scalar types. + MPI_Send(int_buf, 1, MPI_INT); // no-warning + + // Layout-compatible class types. + MPI_Send(pfi, 1, MPI_FLOAT_INT); // no-warning + MPI_Send(pii, 1, MPI_2INT); // no-warning + + // Layout-incompatible scalar types. + MPI_Send(long_buf1, 1, MPI_INT); // expected-warning {{argument type 'long *' doesn't match specified 'mpi' type tag that requires 'int *'}} + + // Layout-incompatible class types. + MPI_Send(pii, 1, MPI_FLOAT_INT); // expected-warning {{argument type 'struct pair_int_int *' doesn't match specified 'mpi' type tag}} + MPI_Send(pfi, 1, MPI_2INT); // expected-warning {{argument type 'struct pair_float_int *' doesn't match specified 'mpi' type tag}} + + // Layout-incompatible class-scalar types. + MPI_Send(long_buf1, 1, MPI_2INT); // expected-warning {{argument type 'long *' doesn't match specified 'mpi' type tag}} + + // Function with two buffers. + MPI_Gather(long_buf1, 1, MPI_INT, // expected-warning {{argument type 'long *' doesn't match specified 'mpi' type tag that requires 'int *'}} + long_buf2, 1, MPI_INT); // expected-warning {{argument type 'long *' doesn't match specified 'mpi' type tag that requires 'int *'}} + + // Array buffers should work like pointer buffers. + MPI_Send(char_buf, 255, MPI_CHAR); // no-warning + + // Explicit casts should not be dropped. + MPI_Send((int *) char_buf, 255, MPI_INT); // no-warning + MPI_Send((int *) char_buf, 255, MPI_CHAR); // expected-warning {{argument type 'int *' doesn't match specified 'mpi' type tag that requires 'char *'}} + + // `void*' buffer should never warn. + MPI_Send(void_buf, 255, MPI_CHAR); // no-warning + + // We expect that MPI_IN_PLACE is `void*', shouldn't warn. + MPI_Gather(MPI_IN_PLACE, 0, MPI_INT, + int_buf, 1, MPI_INT); + + // Special handling for MPI_DATATYPE_NULL: buffer pointer should be either + // a `void*' pointer or a null pointer constant. + MPI_Gather(NULL, 0, MPI_DATATYPE_NULL, // no-warning + int_buf, 1, MPI_INT); + + MPI_Gather(int_buf, 0, MPI_DATATYPE_NULL, // expected-warning {{specified mpi type tag requires a null pointer}} + int_buf, 1, MPI_INT); +} + +MPI_Datatype my_int_datatype __attribute__(( type_tag_for_datatype(mpi,int) )); + +struct S1 { int a; int b; }; +MPI_Datatype my_s1_datatype __attribute__(( type_tag_for_datatype(mpi,struct S1) )); + +// Layout-compatible to S1, but should be treated as a different type. +struct S2 { int a; int b; }; +MPI_Datatype my_s2_datatype __attribute__(( type_tag_for_datatype(mpi,struct S2) )); + +void test_user_types(int *int_buf, + long *long_buf, + struct S1 *s1_buf, + struct S2 *s2_buf) +{ + MPI_Send(int_buf, 1, my_int_datatype); // no-warning + MPI_Send(long_buf, 1, my_int_datatype); // expected-warning {{argument type 'long *' doesn't match specified 'mpi' type tag that requires 'int *'}} + + MPI_Send(s1_buf, 1, my_s1_datatype); // no-warning + MPI_Send(s1_buf, 1, my_s2_datatype); // expected-warning {{argument type 'struct S1 *' doesn't match specified 'mpi' type tag that requires 'struct S2 *'}} + + MPI_Send(long_buf, 1, my_s1_datatype); // expected-warning {{argument type 'long *' doesn't match specified 'mpi' type tag that requires 'struct S1 *'}} + MPI_Send(s1_buf, 1, MPI_INT); // expected-warning {{argument type 'struct S1 *' doesn't match specified 'mpi' type tag that requires 'int *'}} +} + +MPI_Datatype my_unknown_datatype; + +void test_not_annotated(int *int_buf, + long *long_buf, + MPI_Datatype type) +{ + // Using 'MPI_Datatype's without attributes should not produce warnings. + MPI_Send(long_buf, 1, my_unknown_datatype); // no-warning + MPI_Send(int_buf, 1, type); // no-warning +} + +struct S1_compat { int a; int b; }; +MPI_Datatype my_s1_compat_datatype + __attribute__(( type_tag_for_datatype(mpi, struct S1_compat, layout_compatible) )); + +struct S3 { int a; long b; double c; double d; struct S1 s1; }; +struct S3_compat { int a; long b; double c; double d; struct S2 s2; }; +MPI_Datatype my_s3_compat_datatype + __attribute__(( type_tag_for_datatype(mpi, struct S3_compat, layout_compatible) )); + +struct S4 { char c; }; +struct S4_compat { signed char c; }; +MPI_Datatype my_s4_compat_datatype + __attribute__(( type_tag_for_datatype(mpi, struct S4_compat, layout_compatible) )); + +union U1 { int a; long b; double c; double d; struct S1 s1; }; +union U1_compat { long b; double c; struct S2 s; int a; double d; }; +MPI_Datatype my_u1_compat_datatype + __attribute__(( type_tag_for_datatype(mpi, union U1_compat, layout_compatible) )); + +union U2 { int a; long b; double c; struct S1 s1; }; +MPI_Datatype my_u2_datatype + __attribute__(( type_tag_for_datatype(mpi, union U2, layout_compatible) )); + +void test_layout_compatibility(struct S1 *s1_buf, struct S3 *s3_buf, + struct S4 *s4_buf, + union U1 *u1_buf, union U2 *u2_buf) +{ + MPI_Send(s1_buf, 1, my_s1_compat_datatype); // no-warning + MPI_Send(s3_buf, 1, my_s3_compat_datatype); // no-warning + MPI_Send(s1_buf, 1, my_s3_compat_datatype); // expected-warning {{argument type 'struct S1 *' doesn't match specified 'mpi' type tag}} + MPI_Send(s4_buf, 1, my_s4_compat_datatype); // expected-warning {{argument type 'struct S4 *' doesn't match specified 'mpi' type tag}} + MPI_Send(u1_buf, 1, my_u1_compat_datatype); // no-warning + MPI_Send(u1_buf, 1, my_u2_datatype); // expected-warning {{argument type 'union U1 *' doesn't match specified 'mpi' type tag}} + MPI_Send(u2_buf, 1, my_u1_compat_datatype); // expected-warning {{argument type 'union U2 *' doesn't match specified 'mpi' type tag}} +} + +// There is an MPI_REAL predefined in MPI, but some existing MPI programs do +// this. +typedef float real; +#define MPI_REAL MPI_FLOAT + +void test_mpi_real_user_type(real *real_buf, float *float_buf) +{ + MPI_Send(real_buf, 1, MPI_REAL); // no-warning + MPI_Send(real_buf, 1, MPI_FLOAT); // no-warning + MPI_Send(float_buf, 1, MPI_REAL); // no-warning + MPI_Send(float_buf, 1, MPI_FLOAT); // no-warning +} + +//===--- HDF5 + +void test_hdf5(char *char_buf, + signed char *schar_buf, + unsigned char *uchar_buf, + int *int_buf, + long *long_buf) +{ + H5Dwrite(H5T_NATIVE_CHAR, char_buf); // no-warning +#ifdef __CHAR_UNSIGNED__ + H5Dwrite(H5T_NATIVE_CHAR, schar_buf); // expected-warning {{argument type 'signed char *' doesn't match specified 'hdf5' type tag that requires 'unsigned char *'}} + H5Dwrite(H5T_NATIVE_CHAR, uchar_buf); // no-warning +#else + H5Dwrite(H5T_NATIVE_CHAR, schar_buf); // no-warning + H5Dwrite(H5T_NATIVE_CHAR, uchar_buf); // expected-warning {{argument type 'unsigned char *' doesn't match specified 'hdf5' type tag that requires 'signed char *'}} +#endif + H5Dwrite(H5T_NATIVE_SCHAR, schar_buf); // no-warning + H5Dwrite(H5T_NATIVE_UCHAR, uchar_buf); // no-warning + H5Dwrite(H5T_NATIVE_INT, int_buf); // no-warning + H5Dwrite(H5T_NATIVE_LONG, long_buf); // no-warning + +#ifdef __CHAR_UNSIGNED__ + H5Dwrite(H5T_NATIVE_CHAR, int_buf); // expected-warning {{argument type 'int *' doesn't match specified 'hdf5' type tag that requires 'unsigned char *'}} +#else + H5Dwrite(H5T_NATIVE_CHAR, int_buf); // expected-warning {{argument type 'int *' doesn't match specified 'hdf5' type tag that requires 'signed char *'}} +#endif + H5Dwrite(H5T_NATIVE_INT, long_buf); // expected-warning {{argument type 'long *' doesn't match specified 'hdf5' type tag that requires 'int *'}} + + // FIXME: we should warn here, but it will cause false positives because + // different kinds may use same magic values. + //H5Dwrite(MPI_INT, int_buf); +} + diff --git a/test/Sema/warn-type-safety.c b/test/Sema/warn-type-safety.c new file mode 100644 index 0000000..6f548aa --- /dev/null +++ b/test/Sema/warn-type-safety.c @@ -0,0 +1,152 @@ +// RUN: %clang_cc1 -std=c99 -fsyntax-only -verify %s +// RUN: %clang_cc1 -x c++ -std=c++98 -fsyntax-only -verify %s +// RUN: %clang_cc1 -std=c99 -fno-signed-char -fsyntax-only -verify %s + +struct A {}; + +typedef struct A *MPI_Datatype; + +int wrong1(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag )); // expected-error {{attribute requires parameter 1 to be an identifier}} + +int wrong2(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,0,7) )); // expected-error {{attribute parameter 2 is out of bounds}} + +int wrong3(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,3,7) )); // expected-error {{attribute parameter 2 is out of bounds}} + +int wrong4(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,1,0) )); // expected-error {{attribute parameter 3 is out of bounds}} + +int wrong5(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,1,3) )); // expected-error {{attribute parameter 3 is out of bounds}} + +int wrong6(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,0x8000000000000001ULL,1) )); // expected-error {{attribute parameter 2 is out of bounds}} + +extern int x; + +int wrong7(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,x,2) )); // expected-error {{attribute requires parameter 2 to be an integer constant}} + +int wrong8(void *buf, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,1,x) )); // expected-error {{attribute requires parameter 3 to be an integer constant}} + +int wrong9 __attribute__(( pointer_with_type_tag(mpi,1,2) )); // expected-error {{attribute only applies to functions and methods}} + +int wrong10(double buf, MPI_Datatype type) + __attribute__(( pointer_with_type_tag(mpi,1,2) )); // expected-error {{'pointer_with_type_tag' attribute only applies to pointer arguments}} + + +extern struct A datatype_wrong1 + __attribute__(( type_tag_for_datatype )); // expected-error {{attribute requires parameter 1 to be an identifier}} + +extern struct A datatype_wrong2 + __attribute__(( type_tag_for_datatype(mpi,1,2) )); // expected-error {{expected a type}} + +extern struct A datatype_wrong3 + __attribute__(( type_tag_for_datatype(mpi,not_a_type) )); // expected-error {{unknown type name 'not_a_type'}} + +extern struct A datatype_wrong4 + __attribute__(( type_tag_for_datatype(mpi,int,int) )); // expected-error {{expected identifier}} + +extern struct A datatype_wrong5 + __attribute__(( type_tag_for_datatype(mpi,int,not_a_flag) )); // expected-error {{invalid comparison flag 'not_a_flag'}} + +extern struct A datatype_wrong6 + __attribute__(( type_tag_for_datatype(mpi,int,layout_compatible,not_a_flag) )); // expected-error {{invalid comparison flag 'not_a_flag'}} + + +// Using a tag with kind A in a place where the function requires kind B should +// warn. + +void A_func(void *ptr, void *tag) __attribute__(( pointer_with_type_tag(a,1,2) )); + +extern struct A A_tag __attribute__(( type_tag_for_datatype(a,int) )); +extern struct A B_tag __attribute__(( type_tag_for_datatype(b,int) )); + +void C_func(void *ptr, int tag) __attribute__(( pointer_with_type_tag(c,1,2) )); + +static const int C_tag __attribute__(( type_tag_for_datatype(c,int) )) = 10; +static const int D_tag __attribute__(( type_tag_for_datatype(d,int) )) = 20; + +void test_tag_mismatch(int *ptr) +{ + A_func(ptr, &A_tag); // no-warning + A_func(ptr, &B_tag); // expected-warning {{this type tag was not designed to be used with this function}} + C_func(ptr, C_tag); // no-warning + C_func(ptr, D_tag); // expected-warning {{this type tag was not designed to be used with this function}} + C_func(ptr, 10); // no-warning + C_func(ptr, 20); // should warn, but may cause false positives +} + +// Check that we look through typedefs in the special case of allowing 'char' +// to be matched with 'signed char' or 'unsigned char'. +void E_func(void *ptr, int tag) __attribute__(( pointer_with_type_tag(e,1,2) )); + +typedef char E_char; +typedef char E_char_2; +typedef signed char E_char_signed; +typedef unsigned char E_char_unsigned; + +static const int E_tag __attribute__(( type_tag_for_datatype(e,E_char) )) = 10; + +void test_char_typedef(char *char_buf, + E_char_2 *e_char_buf, + E_char_signed *e_char_signed_buf, + E_char_unsigned *e_char_unsigned_buf) +{ + E_func(char_buf, E_tag); + E_func(e_char_buf, E_tag); +#ifdef __CHAR_UNSIGNED__ + E_func(e_char_signed_buf, E_tag); // expected-warning {{argument type 'E_char_signed *' (aka 'signed char *') doesn't match specified 'e' type tag that requires 'E_char *' (aka 'char *')}} + E_func(e_char_unsigned_buf, E_tag); +#else + E_func(e_char_signed_buf, E_tag); + E_func(e_char_unsigned_buf, E_tag); // expected-warning {{argument type 'E_char_unsigned *' (aka 'unsigned char *') doesn't match specified 'e' type tag that requires 'E_char *' (aka 'char *')}} +#endif +} + +// Tests for argument_with_type_tag. + +#define F_DUPFD 10 +#define F_SETLK 20 + +struct flock { }; + +static const int F_DUPFD_tag __attribute__(( type_tag_for_datatype(fcntl,int) )) = F_DUPFD; +static const int F_SETLK_tag __attribute__(( type_tag_for_datatype(fcntl,struct flock *) )) = F_SETLK; + +int fcntl(int fd, int cmd, ...) __attribute__(( argument_with_type_tag(fcntl,3,2) )); + +void test_argument_with_type_tag(struct flock *f) +{ + fcntl(0, F_DUPFD, 10); // no-warning + fcntl(0, F_SETLK, f); // no-warning + + fcntl(0, F_SETLK, 10); // expected-warning {{argument type 'int' doesn't match specified 'fcntl' type tag that requires 'struct flock *'}} + fcntl(0, F_DUPFD, f); // expected-warning {{argument type 'struct flock *' doesn't match specified 'fcntl' type tag that requires 'int'}} +} + +void test_tag_expresssion(int b) { + fcntl(0, b ? F_DUPFD : F_SETLK, 10); // no-warning + fcntl(0, b + F_DUPFD, 10); // no-warning + fcntl(0, (b, F_DUPFD), 10); // expected-warning {{expression result unused}} +} + +// Check that using 64-bit magic values as tags works and tag values do not +// overflow internally. +void F_func(void *ptr, unsigned long long tag) __attribute__((pointer_with_type_tag(f,1,2) )); + +static const unsigned long long F_tag1 __attribute__(( type_tag_for_datatype(f,int) )) = 0xFFFFFFFFFFFFFFFFULL; +static const unsigned long long F_tag2 __attribute__(( type_tag_for_datatype(f,float) )) = 0xFFFFFFFFULL; + +void test_64bit_magic(int *int_ptr, float *float_ptr) +{ + F_func(int_ptr, 0xFFFFFFFFFFFFFFFFULL); + F_func(int_ptr, 0xFFFFFFFFULL); // expected-warning {{argument type 'int *' doesn't match specified 'f' type tag that requires 'float *'}} + F_func(float_ptr, 0xFFFFFFFFFFFFFFFFULL); // expected-warning {{argument type 'float *' doesn't match specified 'f' type tag that requires 'int *'}} + F_func(float_ptr, 0xFFFFFFFFULL); +} + + diff --git a/test/Sema/warn-type-safety.cpp b/test/Sema/warn-type-safety.cpp new file mode 100644 index 0000000..d053fba --- /dev/null +++ b/test/Sema/warn-type-safety.cpp @@ -0,0 +1,71 @@ +// RUN: %clang_cc1 -fsyntax-only -verify %s + +typedef struct ompi_datatype_t *MPI_Datatype; + +#define OMPI_PREDEFINED_GLOBAL(type, global) ((type) &(global)) + +#define MPI_FLOAT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_float) +#define MPI_INT OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_int) +#define MPI_NULL OMPI_PREDEFINED_GLOBAL(MPI_Datatype, ompi_mpi_null) + +extern struct ompi_predefined_datatype_t ompi_mpi_float __attribute__(( type_tag_for_datatype(mpi,float) )); +extern struct ompi_predefined_datatype_t ompi_mpi_int __attribute__(( type_tag_for_datatype(mpi,int) )); +extern struct ompi_predefined_datatype_t ompi_mpi_null __attribute__(( type_tag_for_datatype(mpi,void,must_be_null) )); + +int f(int x) { return x; } +static const int wrong_init __attribute__(( type_tag_for_datatype(zzz,int) )) = f(100); // expected-error {{'type_tag_for_datatype' attribute requires the initializer to be an integral constant expression}} + +//===--- Tests ------------------------------------------------------------===// +// Check that hidden 'this' is handled correctly. + +class C +{ +public: + void f1(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,5,6) )); // expected-error {{attribute parameter 2 is out of bounds}} + + void f2(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,2,5) )); // expected-error {{attribute parameter 3 is out of bounds}} + + void f3(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,1,5) )); // expected-error {{attribute is invalid for the implicit this argument}} + + void f4(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,2,1) )); // expected-error {{attribute is invalid for the implicit this argument}} + + void MPI_Send(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,2,4) )); // no-error +}; + +// Check that we don't crash on type and value dependent expressions. +template +void value_dep(void *buf, int count, MPI_Datatype datatype) + __attribute__(( pointer_with_type_tag(mpi,a,5) )); // expected-error {{attribute requires parameter 2 to be an integer constant}} + +class OperatorIntStar +{ +public: + operator int*(); +}; + +void test1(C *c, int *int_buf) +{ + c->MPI_Send(int_buf, 1, MPI_INT); // no-warning + c->MPI_Send(int_buf, 1, MPI_FLOAT); // expected-warning {{argument type 'int *' doesn't match specified 'mpi' type tag that requires 'float *'}} + + OperatorIntStar i; + c->MPI_Send(i, 1, MPI_INT); // no-warning + c->MPI_Send(i, 1, MPI_FLOAT); // expected-warning {{argument type 'int *' doesn't match specified 'mpi' type tag that requires 'float *'}} +} + +template +void test2(C *c, int *int_buf, T tag) +{ + c->MPI_Send(int_buf, 1, tag); // no-warning +} + +void test3(C *c, int *int_buf) { + test2(c, int_buf, MPI_INT); + test2(c, int_buf, MPI_NULL); +} + diff --git a/test/SemaCXX/convert-to-bool.cpp b/test/SemaCXX/convert-to-bool.cpp index c9a3555..b52f11c 100644 --- a/test/SemaCXX/convert-to-bool.cpp +++ b/test/SemaCXX/convert-to-bool.cpp @@ -62,6 +62,5 @@ struct C { void test_copy_init_conversions(C c) { A &a = c; // expected-error{{no viable conversion from 'C' to 'A'}} - B &b = b; // okay + B &b = c; // okay } - diff --git a/test/SemaCXX/pragma-pack.cpp b/test/SemaCXX/pragma-pack.cpp index 1bc738b..5c1d5c6 100644 --- a/test/SemaCXX/pragma-pack.cpp +++ b/test/SemaCXX/pragma-pack.cpp @@ -32,3 +32,26 @@ struct Sub : virtual Base { int check[sizeof(Sub) == 13 ? 1 : -1]; } + +namespace llvm_support_endian { + +template struct X; + +#pragma pack(push) +#pragma pack(1) +template struct X { + T t; +}; +#pragma pack(pop) + +#pragma pack(push) +#pragma pack(2) +template<> struct X { + long double c; +}; +#pragma pack(pop) + +int check1[__alignof(X) == 1 ? 1 : -1]; +int check2[__alignof(X) == 2 ? 1 : -1]; + +} diff --git a/test/SemaCXX/references.cpp b/test/SemaCXX/references.cpp index 70d3799..028c690 100644 --- a/test/SemaCXX/references.cpp +++ b/test/SemaCXX/references.cpp @@ -136,4 +136,4 @@ namespace PR8608 { } // The following crashed trying to recursively evaluate the LValue. -const int &do_not_crash = do_not_crash; +const int &do_not_crash = do_not_crash; // expected-warning{{variable 'do_not_crash' is uninitialized when used within its own initialization}} diff --git a/test/SemaCXX/uninitialized.cpp b/test/SemaCXX/uninitialized.cpp index 13d287bf..385548b 100644 --- a/test/SemaCXX/uninitialized.cpp +++ b/test/SemaCXX/uninitialized.cpp @@ -316,3 +316,84 @@ namespace { G(char (*)[8]) : f3(new F(f3->*ptr)) {} // expected-warning {{field is uninitialized when used here}} }; } + +namespace statics { + static int a = a; // no-warning: used to signal intended lack of initialization. + static int b = b + 1; // expected-warning {{variable 'b' is uninitialized when used within its own initialization}} + static int c = (c + c); // expected-warning 2{{variable 'c' is uninitialized when used within its own initialization}} + static int e = static_cast(e) + 1; // expected-warning {{variable 'e' is uninitialized when used within its own initialization}} + static int f = foo(f); // expected-warning {{variable 'f' is uninitialized when used within its own initialization}} + + // Thes don't warn as they don't require the value. + static int g = sizeof(g); + int gg = g; // Silence unneeded warning + static void* ptr = &ptr; + static int h = bar(&h); + static int i = boo(i); + static int j = far(j); + static int k = __alignof__(k); + + static int l = k ? l : l; // expected-warning 2{{variable 'l' is uninitialized when used within its own initialization}} + static int m = 1 + (k ? m : m); // expected-warning 2{{variable 'm' is uninitialized when used within its own initialization}} + static int n = -n; // expected-warning {{variable 'n' is uninitialized when used within its own initialization}} + + void test() { + static int a = a; // no-warning: used to signal intended lack of initialization. + static int b = b + 1; // expected-warning {{variable 'b' is uninitialized when used within its own initialization}} + static int c = (c + c); // expected-warning 2{{variable 'c' is uninitialized when used within its own initialization}} + static int d = ({ d + d ;}); // expected-warning 2{{variable 'd' is uninitialized when used within its own initialization}} + static int e = static_cast(e) + 1; // expected-warning {{variable 'e' is uninitialized when used within its own initialization}} + static int f = foo(f); // expected-warning {{variable 'f' is uninitialized when used within its own initialization}} + + // Thes don't warn as they don't require the value. + static int g = sizeof(g); + static void* ptr = &ptr; + static int h = bar(&h); + static int i = boo(i); + static int j = far(j); + static int k = __alignof__(k); + + static int l = k ? l : l; // expected-warning 2{{variable 'l' is uninitialized when used within its own initialization}} + static int m = 1 + (k ? m : m); // expected-warning 2{{variable 'm' is uninitialized when used within its own initialization}} + static int n = -n; // expected-warning {{variable 'n' is uninitialized when used within its own initialization}} + for (;;) { + static int a = a; // no-warning: used to signal intended lack of initialization. + static int b = b + 1; // expected-warning {{variable 'b' is uninitialized when used within its own initialization}} + static int c = (c + c); // expected-warning 2{{variable 'c' is uninitialized when used within its own initialization}} + static int d = ({ d + d ;}); // expected-warning 2{{variable 'd' is uninitialized when used within its own initialization}} + static int e = static_cast(e) + 1; // expected-warning {{variable 'e' is uninitialized when used within its own initialization}} + static int f = foo(f); // expected-warning {{variable 'f' is uninitialized when used within its own initialization}} + + // Thes don't warn as they don't require the value. + static int g = sizeof(g); + static void* ptr = &ptr; + static int h = bar(&h); + static int i = boo(i); + static int j = far(j); + static int k = __alignof__(k); + + static int l = k ? l : l; // expected-warning 2{{variable 'l' is uninitialized when used within its own initialization}} + static int m = 1 + (k ? m : m); // expected-warning 2{{variable 'm' is uninitialized when used within its own initialization}} + static int n = -n; // expected-warning {{variable 'n' is uninitialized when used within its own initialization}} + } + } +} + +namespace references { + int &a = a; // expected-warning{{variable 'a' is uninitialized when used within its own initialization}} + + struct S { + S() : a(a) {} // expected-warning{{field is uninitialized when used here}} + int &a; + }; + + void f() { + int &a = a; // expected-warning{{variable 'a' is uninitialized when used within its own initialization}} + } + + struct T { + T() : a(b), b(a) {} // FIXME: Warn here. + int &a, &b; + int &c = c; // FIXME: Warn here. + }; +} diff --git a/test/SemaCXX/warn-thread-safety-parsing.cpp b/test/SemaCXX/warn-thread-safety-parsing.cpp index 3f8a735..8aa6a91 100644 --- a/test/SemaCXX/warn-thread-safety-parsing.cpp +++ b/test/SemaCXX/warn-thread-safety-parsing.cpp @@ -1429,4 +1429,14 @@ class Foo { } +namespace InvalidDeclTest { + +class Foo { }; +namespace { +void Foo::bar(Mutex* mu) LOCKS_EXCLUDED(mu) { } // \ + // expected-error {{cannot define or redeclare 'bar' here because namespace '' does not enclose namespace 'Foo'}} \ + // expected-warning {{attribute locks_excluded ignored, because it is not attached to a declaration}} +} + +} // end namespace InvalidDeclTest diff --git a/test/SemaObjC/warn-cast-of-sel-expr.m b/test/SemaObjC/warn-cast-of-sel-expr.m new file mode 100644 index 0000000..97915a0 --- /dev/null +++ b/test/SemaObjC/warn-cast-of-sel-expr.m @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -Wno-unused-value %s +// RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify -Wcast-of-sel-type -Wno-unused-value %s +// rdar://12107381 + +SEL s; + +SEL sel_registerName(const char *); + +int main() { +(char *)s; // expected-warning {{cast of type 'SEL' to 'char *' is deprecated; use sel_getName instead}} +(void *)s; // ok +(const char *)sel_registerName("foo"); // expected-warning {{cast of type 'SEL' to 'const char *' is deprecated; use sel_getName instead}} + +(const void *)sel_registerName("foo"); // ok + +(void) s; // ok + +(void *const)s; // ok + +(const void *const)s; // ok +} diff --git a/test/SemaObjCXX/abstract-class-type-ivar.mm b/test/SemaObjCXX/abstract-class-type-ivar.mm new file mode 100644 index 0000000..823e9c1 --- /dev/null +++ b/test/SemaObjCXX/abstract-class-type-ivar.mm @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// rdar://12095239 + +class CppAbstractBase { +public: + virtual void testA() = 0; + virtual void testB() = 0; // expected-note {{unimplemented pure virtual method 'testB' in 'CppConcreteSub}} + int a; +}; + +class CppConcreteSub : public CppAbstractBase { + virtual void testA() { } +}; + +@interface Objc { + CppConcreteSub _concrete; // expected-error{{ivar type 'CppConcreteSub' is an abstract class}} +} +- (CppAbstractBase*)abstract; +@end +@implementation Objc +- (CppAbstractBase*)abstract { + return &_concrete; +} +@end + +class Cpp { +public: + CppConcreteSub sub; // expected-error {{field type 'CppConcreteSub' is an abstract class}} +}; diff --git a/test/Tooling/clang-check-ast-dump.cpp b/test/Tooling/clang-check-ast-dump.cpp index 86533af..e29072b 100644 --- a/test/Tooling/clang-check-ast-dump.cpp +++ b/test/Tooling/clang-check-ast-dump.cpp @@ -22,6 +22,10 @@ // CHECK-LIST-NEXT: test_namespace::TheClass // CHECK-LIST-NEXT: test_namespace::TheClass::theMethod // CHECK-LIST-NEXT: x +// +// RUN: clang-check -ast-dump -ast-dump-filter test_namespace::TheClass::n "%s" -- 2>&1 | FileCheck -check-prefix CHECK-ATTR %s +// CHECK-ATTR: test_namespace +// CHECK-ATTR-NEXT: int n __attribute__((aligned((BinaryOperator namespace test_namespace { @@ -30,6 +34,7 @@ public: int theMethod(int x) { return x + x; } + int n __attribute__((aligned(1+1))); }; } diff --git a/unittests/ASTMatchers/ASTMatchersTest.cpp b/unittests/ASTMatchers/ASTMatchersTest.cpp index cf37c7d..adf0e94 100644 --- a/unittests/ASTMatchers/ASTMatchersTest.cpp +++ b/unittests/ASTMatchers/ASTMatchersTest.cpp @@ -1099,6 +1099,12 @@ TEST(Returns, MatchesReturnTypes) { function(returns(hasDeclaration(record(hasName("Y"))))))); } +TEST(IsExternC, MatchesExternCFunctionDeclarations) { + EXPECT_TRUE(matches("extern \"C\" void f() {}", function(isExternC()))); + EXPECT_TRUE(matches("extern \"C\" { void f() {} }", function(isExternC()))); + EXPECT_TRUE(notMatches("void f() {}", function(isExternC()))); +} + TEST(HasAnyParameter, DoesntMatchIfInnerMatcherDoesntMatch) { EXPECT_TRUE(notMatches("class Y {}; class X { void x(int) {} };", method(hasAnyParameter(hasType(record(hasName("X"))))))); @@ -2023,6 +2029,28 @@ TEST(IsConstQualified, DoesNotMatchInappropriately) { variable(hasType(isConstQualified())))); } +TEST(CastExpression, MatchesExplicitCasts) { + EXPECT_TRUE(matches("char *p = reinterpret_cast(&p);", + expression(castExpr()))); + EXPECT_TRUE(matches("void *p = (void *)(&p);", expression(castExpr()))); + EXPECT_TRUE(matches("char q, *p = const_cast(&q);", + expression(castExpr()))); + EXPECT_TRUE(matches("char c = char(0);", expression(castExpr()))); +} +TEST(CastExpression, MatchesImplicitCasts) { + // This test creates an implicit cast from int to char. + EXPECT_TRUE(matches("char c = 0;", expression(castExpr()))); + // This test creates an implicit cast from lvalue to rvalue. + EXPECT_TRUE(matches("char c = 0, d = c;", expression(castExpr()))); +} + +TEST(CastExpression, DoesNotMatchNonCasts) { + EXPECT_TRUE(notMatches("char c = '0';", expression(castExpr()))); + EXPECT_TRUE(notMatches("char c, &q = c;", expression(castExpr()))); + EXPECT_TRUE(notMatches("int i = (0);", expression(castExpr()))); + EXPECT_TRUE(notMatches("int i = 0;", expression(castExpr()))); +} + TEST(ReinterpretCast, MatchesSimpleCase) { EXPECT_TRUE(matches("char* p = reinterpret_cast(&p);", expression(reinterpretCast()))); @@ -2089,6 +2117,201 @@ TEST(HasDestinationType, MatchesSimpleCase) { pointsTo(TypeMatcher(anything()))))))); } +TEST(HasImplicitDestinationType, MatchesSimpleCase) { + // This test creates an implicit const cast. + EXPECT_TRUE(matches("int x; const int i = x;", + expression(implicitCast( + hasImplicitDestinationType(isInteger()))))); + // This test creates an implicit array-to-pointer cast. + EXPECT_TRUE(matches("int arr[3]; int *p = arr;", + expression(implicitCast(hasImplicitDestinationType( + pointsTo(TypeMatcher(anything()))))))); +} + +TEST(HasImplicitDestinationType, DoesNotMatchIncorrectly) { + // This test creates an implicit cast from int to char. + EXPECT_TRUE(notMatches("char c = 0;", + expression(implicitCast(hasImplicitDestinationType( + unless(anything())))))); + // This test creates an implicit array-to-pointer cast. + EXPECT_TRUE(notMatches("int arr[3]; int *p = arr;", + expression(implicitCast(hasImplicitDestinationType( + unless(anything())))))); +} + +TEST(ImplicitCast, MatchesSimpleCase) { + // This test creates an implicit const cast. + EXPECT_TRUE(matches("int x = 0; const int y = x;", + variable(hasInitializer(implicitCast())))); + // This test creates an implicit cast from int to char. + EXPECT_TRUE(matches("char c = 0;", + variable(hasInitializer(implicitCast())))); + // This test creates an implicit array-to-pointer cast. + EXPECT_TRUE(matches("int arr[6]; int *p = arr;", + variable(hasInitializer(implicitCast())))); +} + +TEST(ImplicitCast, DoesNotMatchIncorrectly) { + // This test verifies that implicitCast() matches exactly when implicit casts + // are present, and that it ignores explicit and paren casts. + + // These two test cases have no casts. + EXPECT_TRUE(notMatches("int x = 0;", + variable(hasInitializer(implicitCast())))); + EXPECT_TRUE(notMatches("int x = 0, &y = x;", + variable(hasInitializer(implicitCast())))); + + EXPECT_TRUE(notMatches("int x = 0; double d = (double) x;", + variable(hasInitializer(implicitCast())))); + EXPECT_TRUE(notMatches("const int *p; int *q = const_cast(p);", + variable(hasInitializer(implicitCast())))); + + EXPECT_TRUE(notMatches("int x = (0);", + variable(hasInitializer(implicitCast())))); +} + +TEST(IgnoringImpCasts, MatchesImpCasts) { + // This test checks that ignoringImpCasts matches when implicit casts are + // present and its inner matcher alone does not match. + // Note that this test creates an implicit const cast. + EXPECT_TRUE(matches("int x = 0; const int y = x;", + variable(hasInitializer(ignoringImpCasts( + declarationReference(to(variable(hasName("x"))))))))); + // This test creates an implict cast from int to char. + EXPECT_TRUE(matches("char x = 0;", + variable(hasInitializer(ignoringImpCasts( + integerLiteral(equals(0))))))); +} + +TEST(IgnoringImpCasts, DoesNotMatchIncorrectly) { + // These tests verify that ignoringImpCasts does not match if the inner + // matcher does not match. + // Note that the first test creates an implicit const cast. + EXPECT_TRUE(notMatches("int x; const int y = x;", + variable(hasInitializer(ignoringImpCasts( + unless(anything())))))); + EXPECT_TRUE(notMatches("int x; int y = x;", + variable(hasInitializer(ignoringImpCasts( + unless(anything())))))); + + // These tests verify that ignoringImplictCasts does not look through explicit + // casts or parentheses. + EXPECT_TRUE(notMatches("char* p = static_cast(0);", + variable(hasInitializer(ignoringImpCasts( + integerLiteral()))))); + EXPECT_TRUE(notMatches("int i = (0);", + variable(hasInitializer(ignoringImpCasts( + integerLiteral()))))); + EXPECT_TRUE(notMatches("float i = (float)0;", + variable(hasInitializer(ignoringImpCasts( + integerLiteral()))))); + EXPECT_TRUE(notMatches("float i = float(0);", + variable(hasInitializer(ignoringImpCasts( + integerLiteral()))))); +} + +TEST(IgnoringImpCasts, MatchesWithoutImpCasts) { + // This test verifies that expressions that do not have implicit casts + // still match the inner matcher. + EXPECT_TRUE(matches("int x = 0; int &y = x;", + variable(hasInitializer(ignoringImpCasts( + declarationReference(to(variable(hasName("x"))))))))); +} + +TEST(IgnoringParenCasts, MatchesParenCasts) { + // This test checks that ignoringParenCasts matches when parentheses and/or + // casts are present and its inner matcher alone does not match. + EXPECT_TRUE(matches("int x = (0);", + variable(hasInitializer(ignoringParenCasts( + integerLiteral(equals(0))))))); + EXPECT_TRUE(matches("int x = (((((0)))));", + variable(hasInitializer(ignoringParenCasts( + integerLiteral(equals(0))))))); + + // This test creates an implict cast from int to char in addition to the + // parentheses. + EXPECT_TRUE(matches("char x = (0);", + variable(hasInitializer(ignoringParenCasts( + integerLiteral(equals(0))))))); + + EXPECT_TRUE(matches("char x = (char)0;", + variable(hasInitializer(ignoringParenCasts( + integerLiteral(equals(0))))))); + EXPECT_TRUE(matches("char* p = static_cast(0);", + variable(hasInitializer(ignoringParenCasts( + integerLiteral(equals(0))))))); +} + +TEST(IgnoringParenCasts, MatchesWithoutParenCasts) { + // This test verifies that expressions that do not have any casts still match. + EXPECT_TRUE(matches("int x = 0;", + variable(hasInitializer(ignoringParenCasts( + integerLiteral(equals(0))))))); +} + +TEST(IgnoringParenCasts, DoesNotMatchIncorrectly) { + // These tests verify that ignoringImpCasts does not match if the inner + // matcher does not match. + EXPECT_TRUE(notMatches("int x = ((0));", + variable(hasInitializer(ignoringParenCasts( + unless(anything())))))); + + // This test creates an implicit cast from int to char in addition to the + // parentheses. + EXPECT_TRUE(notMatches("char x = ((0));", + variable(hasInitializer(ignoringParenCasts( + unless(anything())))))); + + EXPECT_TRUE(notMatches("char *x = static_cast((0));", + variable(hasInitializer(ignoringParenCasts( + unless(anything())))))); +} + +TEST(IgnoringParenAndImpCasts, MatchesParenImpCasts) { + // This test checks that ignoringParenAndImpCasts matches when + // parentheses and/or implicit casts are present and its inner matcher alone + // does not match. + // Note that this test creates an implicit const cast. + EXPECT_TRUE(matches("int x = 0; const int y = x;", + variable(hasInitializer(ignoringParenImpCasts( + declarationReference(to(variable(hasName("x"))))))))); + // This test creates an implicit cast from int to char. + EXPECT_TRUE(matches("const char x = (0);", + variable(hasInitializer(ignoringParenImpCasts( + integerLiteral(equals(0))))))); +} + +TEST(IgnoringParenAndImpCasts, MatchesWithoutParenImpCasts) { + // This test verifies that expressions that do not have parentheses or + // implicit casts still match. + EXPECT_TRUE(matches("int x = 0; int &y = x;", + variable(hasInitializer(ignoringParenImpCasts( + declarationReference(to(variable(hasName("x"))))))))); + EXPECT_TRUE(matches("int x = 0;", + variable(hasInitializer(ignoringParenImpCasts( + integerLiteral(equals(0))))))); +} + +TEST(IgnoringParenAndImpCasts, DoesNotMatchIncorrectly) { + // These tests verify that ignoringParenImpCasts does not match if + // the inner matcher does not match. + // This test creates an implicit cast. + EXPECT_TRUE(notMatches("char c = ((3));", + variable(hasInitializer(ignoringParenImpCasts( + unless(anything())))))); + // These tests verify that ignoringParenAndImplictCasts does not look + // through explicit casts. + EXPECT_TRUE(notMatches("float y = (float(0));", + variable(hasInitializer(ignoringParenImpCasts( + integerLiteral()))))); + EXPECT_TRUE(notMatches("float y = (float)0;", + variable(hasInitializer(ignoringParenImpCasts( + integerLiteral()))))); + EXPECT_TRUE(notMatches("char* p = static_cast(0);", + variable(hasInitializer(ignoringParenImpCasts( + integerLiteral()))))); +} + TEST(HasSourceExpression, MatchesImplicitCasts) { EXPECT_TRUE(matches("class string {}; class URL { public: URL(string s); };" "void r() {string a_string; URL url = a_string; }", @@ -2154,6 +2377,40 @@ TEST(UsingDeclaration, ThroughUsingDeclaration) { declarationReference(throughUsingDecl(anything())))); } +TEST(SingleDecl, IsSingleDecl) { + StatementMatcher SingleDeclStmt = + declarationStatement(hasSingleDecl(variable(hasInitializer(anything())))); + EXPECT_TRUE(matches("void f() {int a = 4;}", SingleDeclStmt)); + EXPECT_TRUE(notMatches("void f() {int a;}", SingleDeclStmt)); + EXPECT_TRUE(notMatches("void f() {int a = 4, b = 3;}", + SingleDeclStmt)); +} + +TEST(DeclStmt, ContainsDeclaration) { + DeclarationMatcher MatchesInit = variable(hasInitializer(anything())); + + EXPECT_TRUE(matches("void f() {int a = 4;}", + declarationStatement(containsDeclaration(0, + MatchesInit)))); + EXPECT_TRUE(matches("void f() {int a = 4, b = 3;}", + declarationStatement(containsDeclaration(0, MatchesInit), + containsDeclaration(1, + MatchesInit)))); + unsigned WrongIndex = 42; + EXPECT_TRUE(notMatches("void f() {int a = 4, b = 3;}", + declarationStatement(containsDeclaration(WrongIndex, + MatchesInit)))); +} + +TEST(DeclCount, DeclCountIsCorrect) { + EXPECT_TRUE(matches("void f() {int i,j;}", + declarationStatement(declCountIs(2)))); + EXPECT_TRUE(notMatches("void f() {int i,j; int k;}", + declarationStatement(declCountIs(3)))); + EXPECT_TRUE(notMatches("void f() {int i,j, k, l;}", + declarationStatement(declCountIs(3)))); +} + TEST(While, MatchesWhileLoops) { EXPECT_TRUE(notMatches("void x() {}", whileStmt())); EXPECT_TRUE(matches("void x() { while(true); }", whileStmt())); diff --git a/unittests/Tooling/RecursiveASTVisitorTest.cpp b/unittests/Tooling/RecursiveASTVisitorTest.cpp index f3ba646..4b53906 100644 --- a/unittests/Tooling/RecursiveASTVisitorTest.cpp +++ b/unittests/Tooling/RecursiveASTVisitorTest.cpp @@ -385,4 +385,11 @@ TEST(RecursiveASTVisitor, VisitsImplicitCopyConstructors) { "int main() { Simple s; Simple t(s); }\n")); } +TEST(RecursiveASTVisitor, VisitsExtension) { + DeclRefExprVisitor Visitor; + Visitor.ExpectMatch("s", 1, 24); + EXPECT_TRUE(Visitor.runOver( + "int s = __extension__ (s);\n")); +} + } // end namespace clang diff --git a/utils/TableGen/ClangAttrEmitter.cpp b/utils/TableGen/ClangAttrEmitter.cpp index 1b1a478..ef1ad3e 100644 --- a/utils/TableGen/ClangAttrEmitter.cpp +++ b/utils/TableGen/ClangAttrEmitter.cpp @@ -349,7 +349,9 @@ namespace { << "Type(), Record);\n"; } void writeValue(raw_ostream &OS) const { - OS << "\" << get" << getUpperName() << "(Ctx) << \""; + OS << "\";\n" + << " " << getLowerName() << "Expr->printPretty(OS, 0, Policy);\n" + << " OS << \""; } }; @@ -728,7 +730,8 @@ void EmitClangAttrClass(RecordKeeper &Records, raw_ostream &OS) { OS << " }\n\n"; OS << " virtual " << R.getName() << "Attr *clone (ASTContext &C) const;\n"; - OS << " virtual void printPretty(llvm::raw_ostream &OS, ASTContext &Ctx) const;\n"; + OS << " virtual void printPretty(llvm::raw_ostream &OS," + << " const PrintingPolicy &Policy) const;\n"; for (ai = Args.begin(); ai != ae; ++ai) { (*ai)->writeAccessors(OS); @@ -786,7 +789,7 @@ void EmitClangAttrImpl(RecordKeeper &Records, raw_ostream &OS) { OS << ");\n}\n\n"; OS << "void " << R.getName() << "Attr::printPretty(" - << "llvm::raw_ostream &OS, ASTContext &Ctx) const {\n"; + << "llvm::raw_ostream &OS, const PrintingPolicy &Policy) const {\n"; if (Spellings.begin() != Spellings.end()) { std::string Spelling = (*Spellings.begin())->getValueAsString("Name"); OS << " OS << \" __attribute__((" << Spelling; diff --git a/utils/analyzer/CmpRuns.py b/utils/analyzer/CmpRuns.py index f2961cf..c8f05cb 100755 --- a/utils/analyzer/CmpRuns.py +++ b/utils/analyzer/CmpRuns.py @@ -146,7 +146,9 @@ def loadResults(path, opts, root = "", deleteEmpty=True): for d in data['diagnostics']: # FIXME: Why is this named files, when does it have multiple # files? - assert len(d['HTMLDiagnostics_files']) == 1 + # TODO: Add the assert back in after we fix the + # plist-html output. + # assert len(d['HTMLDiagnostics_files']) == 1 htmlFiles.append(d.pop('HTMLDiagnostics_files')[0]) else: htmlFiles = [None] * len(data['diagnostics']) diff --git a/www/comparison.html b/www/comparison.html index 58c4b31..01b8aea 100644 --- a/www/comparison.html +++ b/www/comparison.html @@ -103,9 +103,9 @@ sometimes acceptable, but are often confusing and it does not support expressive diagnostics. Clang also preserves typedefs in diagnostics consistently, showing macro expansions and many other features. -

  • GCC is licensed under the GPL license. clang uses a BSD license, which - allows it to be used by projects that do not themselves want to be - GPL.
  • +
  • GCC is licensed under the GPL license. + clang uses a BSD license, which allows it to be embedded in + software that is not GPL-licensed.
  • Clang inherits a number of features from its use of LLVM as a backend, including support for a bytecode representation for intermediate code, pluggable optimizers, link-time optimization support, Just-In-Time -- cgit v1.1 From 6de2c08bc400b4aca9fb46684e8bdb56eed9b09f Mon Sep 17 00:00:00 2001 From: dim Date: Sun, 2 Dec 2012 13:10:19 +0000 Subject: Vendor import of llvm release_32 branch r168974 (effectively, 3.2 RC2): http://llvm.org/svn/llvm-project/llvm/branches/release_32@168974 --- .gitignore | 2 + CMakeLists.txt | 31 +- CREDITS.TXT | 16 +- Makefile | 10 +- Makefile.config.in | 1 + Makefile.rules | 38 +- autoconf/configure.ac | 73 +- .../ocaml/executionengine/executionengine_ocaml.c | 3 + .../ocaml/executionengine/llvm_executionengine.ml | 2 +- .../ocaml/executionengine/llvm_executionengine.mli | 2 +- bindings/ocaml/llvm/llvm.mli | 2 +- bindings/ocaml/llvm/llvm_ocaml.c | 3 + bindings/ocaml/target/llvm_target.ml | 26 +- bindings/ocaml/target/llvm_target.mli | 56 +- bindings/ocaml/target/target_ocaml.c | 30 +- cmake/config-ix.cmake | 23 +- cmake/modules/AddLLVM.cmake | 25 +- cmake/modules/LLVMProcessSources.cmake | 2 +- cmake/platforms/Android.cmake | 21 + configure | 369 +- docs/AliasAnalysis.rst | 2 +- docs/BitCodeFormat.rst | 49 +- docs/CMake.rst | 5 - docs/CodeGenerator.rst | 33 +- docs/CodingStandards.rst | 253 +- docs/CommandGuide/FileCheck.rst | 10 +- docs/CommandGuide/lit.rst | 13 + docs/CompilerWriterInfo.html | 267 -- docs/CompilerWriterInfo.rst | 118 + docs/DebuggingJITedCode.html | 184 - docs/DebuggingJITedCode.rst | 147 + docs/DeveloperPolicy.rst | 5 +- docs/ExtendingLLVM.html | 379 -- docs/ExtendingLLVM.rst | 306 ++ docs/GarbageCollection.html | 6 +- docs/GettingStarted.html | 1760 --------- docs/GettingStarted.rst | 1304 +++++++ docs/GoldPlugin.html | 227 -- docs/GoldPlugin.rst | 186 + docs/HowToAddABuilder.html | 142 - docs/HowToAddABuilder.rst | 90 + docs/HowToBuildOnARM.rst | 47 + docs/HowToSetUpLLVMStyleRTTI.rst | 332 ++ docs/HowToSubmitABug.html | 345 -- docs/HowToSubmitABug.rst | 233 ++ docs/HowToUseInstrMappings.rst | 179 + docs/LangRef.html | 107 +- docs/Lexicon.rst | 13 +- docs/LinkTimeOptimization.rst | 2 + docs/Makefile.sphinx | 4 + docs/MarkedUpDisassembly.rst | 88 + docs/Passes.html | 39 +- docs/Phabricator.rst | 100 + docs/ProgrammersManual.html | 29 +- docs/README.txt | 2 +- docs/ReleaseNotes.html | 101 +- docs/SourceLevelDebugging.html | 8 +- docs/SphinxQuickstartTemplate.rst | 125 + docs/TestingGuide.html | 23 +- docs/WritingAnLLVMBackend.html | 36 +- docs/_themes/llvm-theme/layout.html | 23 + docs/_themes/llvm-theme/static/contents.png | Bin 0 -> 202 bytes docs/_themes/llvm-theme/static/llvm-theme.css | 371 ++ docs/_themes/llvm-theme/static/logo.png | Bin 0 -> 9864 bytes docs/_themes/llvm-theme/static/navigation.png | Bin 0 -> 218 bytes docs/_themes/llvm-theme/theme.conf | 4 + docs/conf.py | 21 +- docs/index.rst | 46 +- docs/llvm-theme/layout.html | 23 - docs/llvm-theme/static/contents.png | Bin 202 -> 0 bytes docs/llvm-theme/static/llvm-theme.css | 374 -- docs/llvm-theme/static/logo.png | Bin 9864 -> 0 bytes docs/llvm-theme/static/navigation.png | Bin 218 -> 0 bytes docs/llvm-theme/theme.conf | 4 - docs/programming.rst | 19 +- docs/subsystems.rst | 19 +- docs/tutorial/LangImpl4.html | 8 +- docs/tutorial/LangImpl5.html | 6 +- docs/tutorial/LangImpl6.html | 6 +- docs/tutorial/LangImpl7.html | 8 +- docs/tutorial/OCamlLangImpl4.html | 6 +- docs/tutorial/OCamlLangImpl5.html | 4 +- docs/tutorial/OCamlLangImpl6.html | 4 +- docs/tutorial/OCamlLangImpl7.html | 6 +- docs/userguides.rst | 23 +- docs/yaml2obj.rst | 4 +- examples/ExceptionDemo/ExceptionDemo.cpp | 1181 +++--- examples/Fibonacci/fibonacci.cpp | 2 +- examples/Kaleidoscope/Chapter4/toy.cpp | 4 +- examples/Kaleidoscope/Chapter5/toy.cpp | 4 +- examples/Kaleidoscope/Chapter6/toy.cpp | 4 +- examples/Kaleidoscope/Chapter7/toy.cpp | 4 +- examples/OCaml-Kaleidoscope/Chapter4/toy.ml | 2 +- examples/OCaml-Kaleidoscope/Chapter5/toy.ml | 2 +- examples/OCaml-Kaleidoscope/Chapter6/toy.ml | 2 +- examples/OCaml-Kaleidoscope/Chapter7/toy.ml | 2 +- include/llvm-c/Core.h | 39 +- include/llvm-c/Disassembler.h | 9 + include/llvm-c/Target.h | 46 +- include/llvm-c/TargetMachine.h | 2 +- include/llvm-c/Transforms/Vectorize.h | 3 + include/llvm/ADT/APFloat.h | 11 +- include/llvm/ADT/APInt.h | 29 +- include/llvm/ADT/ArrayRef.h | 13 +- include/llvm/ADT/BitVector.h | 86 +- include/llvm/ADT/DAGDeltaAlgorithm.h | 15 +- include/llvm/ADT/DeltaAlgorithm.h | 14 +- include/llvm/ADT/DenseMap.h | 11 +- include/llvm/ADT/DenseMapInfo.h | 6 +- include/llvm/ADT/EquivalenceClasses.h | 2 + include/llvm/ADT/FoldingSet.h | 9 + include/llvm/ADT/Hashing.h | 3 +- include/llvm/ADT/ImmutableList.h | 5 +- include/llvm/ADT/ImmutableMap.h | 4 +- include/llvm/ADT/ImmutableSet.h | 81 +- include/llvm/ADT/MapVector.h | 90 + include/llvm/ADT/Optional.h | 9 + include/llvm/ADT/OwningPtr.h | 27 +- include/llvm/ADT/PackedVector.h | 27 +- include/llvm/ADT/PointerIntPair.h | 4 +- include/llvm/ADT/ScopedHashTable.h | 4 +- include/llvm/ADT/SetVector.h | 92 +- include/llvm/ADT/SmallBitVector.h | 30 + include/llvm/ADT/SmallPtrSet.h | 7 +- include/llvm/ADT/SmallString.h | 100 +- include/llvm/ADT/SmallVector.h | 156 +- include/llvm/ADT/SparseBitVector.h | 18 +- include/llvm/ADT/SparseSet.h | 10 +- include/llvm/ADT/StringExtras.h | 23 +- include/llvm/ADT/StringRef.h | 163 +- include/llvm/ADT/StringSet.h | 9 +- include/llvm/ADT/Trie.h | 334 -- include/llvm/ADT/Triple.h | 34 +- include/llvm/ADT/Twine.h | 28 +- include/llvm/ADT/ValueMap.h | 4 +- include/llvm/ADT/ilist.h | 5 +- include/llvm/AddressingMode.h | 41 + include/llvm/Analysis/AliasAnalysis.h | 26 +- include/llvm/Analysis/AliasSetTracker.h | 5 +- include/llvm/Analysis/BranchProbabilityInfo.h | 38 +- include/llvm/Analysis/CallGraph.h | 6 +- include/llvm/Analysis/CaptureTracking.h | 2 +- include/llvm/Analysis/CodeMetrics.h | 8 +- include/llvm/Analysis/ConstantFolding.h | 16 +- include/llvm/Analysis/DependenceAnalysis.h | 885 +++++ include/llvm/Analysis/Dominators.h | 2 +- include/llvm/Analysis/IVUsers.h | 4 +- include/llvm/Analysis/InlineCost.h | 11 +- include/llvm/Analysis/InstructionSimplify.h | 54 +- include/llvm/Analysis/IntervalPartition.h | 4 +- include/llvm/Analysis/LazyValueInfo.h | 8 +- include/llvm/Analysis/Loads.h | 4 +- include/llvm/Analysis/LoopDependenceAnalysis.h | 124 - include/llvm/Analysis/LoopInfo.h | 15 +- include/llvm/Analysis/LoopInfoImpl.h | 1 - include/llvm/Analysis/MemoryBuiltins.h | 70 +- include/llvm/Analysis/MemoryDependenceAnalysis.h | 6 +- include/llvm/Analysis/PHITransAddr.h | 6 +- include/llvm/Analysis/Passes.h | 23 +- include/llvm/Analysis/ProfileDataLoader.h | 139 + include/llvm/Analysis/ProfileDataTypes.h | 39 + include/llvm/Analysis/ProfileInfoTypes.h | 10 +- include/llvm/Analysis/RegionInfo.h | 39 +- include/llvm/Analysis/ScalarEvolution.h | 10 +- include/llvm/Analysis/ScalarEvolutionExpressions.h | 14 - include/llvm/Analysis/SparsePropagation.h | 6 +- include/llvm/Analysis/ValueTracking.h | 26 +- include/llvm/Argument.h | 5 +- include/llvm/Attributes.h | 553 +-- include/llvm/BasicBlock.h | 5 +- include/llvm/Bitcode/Archive.h | 14 +- include/llvm/Bitcode/BitstreamReader.h | 8 +- include/llvm/Bitcode/BitstreamWriter.h | 4 +- include/llvm/Bitcode/LLVMBitCodes.h | 7 +- include/llvm/CallingConv.h | 24 +- include/llvm/CodeGen/AsmPrinter.h | 10 +- include/llvm/CodeGen/CallingConvLower.h | 2 + include/llvm/CodeGen/CommandFlags.h | 228 ++ include/llvm/CodeGen/FastISel.h | 4 +- include/llvm/CodeGen/GCMetadata.h | 5 + include/llvm/CodeGen/GCMetadataPrinter.h | 7 +- include/llvm/CodeGen/ISDOpcodes.h | 4 + include/llvm/CodeGen/IntrinsicLowering.h | 6 +- include/llvm/CodeGen/LiveInterval.h | 35 +- include/llvm/CodeGen/LiveIntervalAnalysis.h | 49 +- include/llvm/CodeGen/LiveVariables.h | 6 - include/llvm/CodeGen/MachineBasicBlock.h | 26 +- .../llvm/CodeGen/MachineBranchProbabilityInfo.h | 9 +- include/llvm/CodeGen/MachineConstantPool.h | 6 +- include/llvm/CodeGen/MachineFrameInfo.h | 27 +- include/llvm/CodeGen/MachineFunction.h | 12 +- include/llvm/CodeGen/MachineInstr.h | 83 +- include/llvm/CodeGen/MachineInstrBuilder.h | 17 +- include/llvm/CodeGen/MachineInstrBundle.h | 40 +- include/llvm/CodeGen/MachineJumpTableInfo.h | 6 +- include/llvm/CodeGen/MachineLoopInfo.h | 4 +- include/llvm/CodeGen/MachineMemOperand.h | 9 + include/llvm/CodeGen/MachineModuleInfoImpls.h | 4 +- include/llvm/CodeGen/MachineOperand.h | 49 +- include/llvm/CodeGen/MachinePostDominators.h | 87 + include/llvm/CodeGen/MachineRegisterInfo.h | 80 +- include/llvm/CodeGen/MachineSSAUpdater.h | 6 +- include/llvm/CodeGen/MachineScheduler.h | 237 ++ include/llvm/CodeGen/PBQP/Graph.h | 21 +- include/llvm/CodeGen/PBQP/HeuristicBase.h | 5 +- include/llvm/CodeGen/Passes.h | 4 + include/llvm/CodeGen/PseudoSourceValue.h | 4 - include/llvm/CodeGen/RegAllocPBQP.h | 4 +- include/llvm/CodeGen/RegisterClassInfo.h | 19 - include/llvm/CodeGen/RegisterPressure.h | 3 +- include/llvm/CodeGen/RegisterScavenging.h | 9 +- include/llvm/CodeGen/ScheduleDAG.h | 99 +- include/llvm/CodeGen/ScheduleDAGILP.h | 86 + include/llvm/CodeGen/ScheduleDAGInstrs.h | 114 +- include/llvm/CodeGen/SchedulerRegistry.h | 5 + include/llvm/CodeGen/SelectionDAG.h | 16 +- include/llvm/CodeGen/SelectionDAGNodes.h | 65 +- include/llvm/CodeGen/TargetSchedule.h | 167 + include/llvm/CodeGen/ValueTypes.h | 128 +- include/llvm/CodeGen/ValueTypes.td | 62 +- include/llvm/Config/AsmParsers.def.in | 42 +- include/llvm/Config/AsmPrinters.def.in | 42 +- include/llvm/Config/Disassemblers.def.in | 42 +- include/llvm/Config/config.h.cmake | 9 +- include/llvm/Config/config.h.in | 3 + include/llvm/Constant.h | 9 +- include/llvm/Constants.h | 56 +- include/llvm/DIBuilder.h | 43 +- include/llvm/DataLayout.h | 429 +++ include/llvm/DebugInfo.h | 12 +- include/llvm/DebugInfo/DIContext.h | 36 +- include/llvm/DefaultPasses.h | 2 +- include/llvm/DerivedTypes.h | 35 +- include/llvm/ExecutionEngine/ExecutionEngine.h | 19 +- .../llvm/ExecutionEngine/IntelJITEventsWrapper.h | 102 - include/llvm/ExecutionEngine/JITEventListener.h | 15 + include/llvm/ExecutionEngine/JITMemoryManager.h | 31 +- include/llvm/ExecutionEngine/ObjectBuffer.h | 80 + include/llvm/ExecutionEngine/ObjectImage.h | 61 + include/llvm/ExecutionEngine/RuntimeDyld.h | 47 +- include/llvm/Function.h | 85 +- include/llvm/GlobalAlias.h | 5 +- include/llvm/GlobalValue.h | 32 +- include/llvm/GlobalVariable.h | 7 +- include/llvm/IRBuilder.h | 69 +- include/llvm/InitializePasses.h | 13 +- include/llvm/InlineAsm.h | 30 +- include/llvm/InstrTypes.h | 29 +- include/llvm/Instruction.h | 5 +- include/llvm/Instructions.h | 437 ++- include/llvm/IntrinsicInst.h | 58 +- include/llvm/Intrinsics.h | 4 +- include/llvm/Intrinsics.td | 12 +- include/llvm/IntrinsicsARM.td | 437 ++- include/llvm/IntrinsicsMips.td | 125 + include/llvm/IntrinsicsX86.td | 82 +- include/llvm/LLVMContext.h | 10 +- include/llvm/LinkAllPasses.h | 8 +- include/llvm/MC/MCAsmBackend.h | 24 +- include/llvm/MC/MCAsmInfo.h | 20 +- include/llvm/MC/MCAssembler.h | 38 +- include/llvm/MC/MCCodeEmitter.h | 10 +- include/llvm/MC/MCContext.h | 5 +- include/llvm/MC/MCDwarf.h | 11 +- include/llvm/MC/MCELFObjectWriter.h | 9 +- include/llvm/MC/MCExpr.h | 21 +- include/llvm/MC/MCInst.h | 2 +- include/llvm/MC/MCInstPrinter.h | 13 +- include/llvm/MC/MCInstrDesc.h | 2 +- include/llvm/MC/MCLabel.h | 8 +- include/llvm/MC/MCMachObjectWriter.h | 6 +- include/llvm/MC/MCObjectFileInfo.h | 3 +- include/llvm/MC/MCObjectStreamer.h | 10 + include/llvm/MC/MCObjectWriter.h | 5 +- include/llvm/MC/MCParser/AsmLexer.h | 4 +- include/llvm/MC/MCParser/MCAsmLexer.h | 14 +- include/llvm/MC/MCParser/MCAsmParser.h | 38 +- include/llvm/MC/MCParser/MCAsmParserExtension.h | 8 +- include/llvm/MC/MCParser/MCParsedAsmOperand.h | 54 + include/llvm/MC/MCRegisterInfo.h | 9 +- include/llvm/MC/MCSchedule.h | 141 +- include/llvm/MC/MCSection.h | 8 +- include/llvm/MC/MCSectionCOFF.h | 1 - include/llvm/MC/MCSectionELF.h | 1 - include/llvm/MC/MCSectionMachO.h | 1 - include/llvm/MC/MCStreamer.h | 24 +- include/llvm/MC/MCSubtargetInfo.h | 65 +- include/llvm/MC/MCSymbol.h | 11 +- include/llvm/MC/MCTargetAsmLexer.h | 10 +- include/llvm/MC/MCTargetAsmParser.h | 77 +- include/llvm/MC/MCValue.h | 2 +- include/llvm/MC/SubtargetFeature.h | 6 +- include/llvm/MDBuilder.h | 21 + include/llvm/Metadata.h | 10 +- include/llvm/Object/Archive.h | 1 - include/llvm/Object/Binary.h | 5 +- include/llvm/Object/COFF.h | 3 +- include/llvm/Object/ELF.h | 370 +- include/llvm/Object/MachO.h | 3 +- include/llvm/Object/MachOFormat.h | 10 +- include/llvm/Object/ObjectFile.h | 43 +- include/llvm/Object/RelocVisitor.h | 131 + include/llvm/Operator.h | 50 +- include/llvm/Pass.h | 5 +- include/llvm/PassAnalysisSupport.h | 2 +- include/llvm/PassSupport.h | 4 +- include/llvm/Support/AlignOf.h | 65 +- include/llvm/Support/Allocator.h | 8 +- include/llvm/Support/CallSite.h | 36 +- include/llvm/Support/Casting.h | 16 +- include/llvm/Support/CommandLine.h | 17 +- include/llvm/Support/Compiler.h | 35 +- include/llvm/Support/DataExtractor.h | 5 +- include/llvm/Support/ELF.h | 60 +- include/llvm/Support/FileOutputBuffer.h | 7 +- include/llvm/Support/FileSystem.h | 78 +- include/llvm/Support/Format.h | 40 +- include/llvm/Support/FormattedStream.h | 11 +- include/llvm/Support/GCOV.h | 32 +- include/llvm/Support/InstVisitor.h | 6 + include/llvm/Support/IntegersSubset.h | 8 +- include/llvm/Support/IntegersSubsetMapping.h | 18 +- include/llvm/Support/LEB128.h | 2 +- include/llvm/Support/LockFileManager.h | 4 +- include/llvm/Support/MathExtras.h | 31 +- include/llvm/Support/Memory.h | 65 + include/llvm/Support/MemoryBuffer.h | 5 +- include/llvm/Support/Mutex.h | 5 +- include/llvm/Support/MutexGuard.h | 4 +- include/llvm/Support/PathV1.h | 4 +- include/llvm/Support/PathV2.h | 113 +- include/llvm/Support/PrettyStackTrace.h | 10 +- include/llvm/Support/Program.h | 4 +- include/llvm/Support/RWMutex.h | 5 +- include/llvm/Support/Regex.h | 8 +- include/llvm/Support/Registry.h | 7 +- include/llvm/Support/SourceMgr.h | 8 +- include/llvm/Support/StreamableMemoryObject.h | 20 +- include/llvm/Support/TargetFolder.h | 15 +- include/llvm/Support/TargetRegistry.h | 44 +- include/llvm/Support/Threading.h | 4 +- include/llvm/Support/TimeValue.h | 7 +- include/llvm/Support/Timer.h | 7 +- include/llvm/Support/ValueHandle.h | 4 +- include/llvm/Support/YAMLParser.h | 13 +- include/llvm/Support/circular_raw_ostream.h | 4 +- include/llvm/Support/raw_os_ostream.h | 10 +- include/llvm/Support/raw_ostream.h | 64 +- include/llvm/Support/system_error.h | 8 +- include/llvm/Support/type_traits.h | 10 +- include/llvm/SymbolTableListTraits.h | 1 - include/llvm/TableGen/Error.h | 19 +- include/llvm/TableGen/Main.h | 9 +- include/llvm/TableGen/Record.h | 486 ++- include/llvm/TableGen/TableGenAction.h | 35 - include/llvm/Target/Mangler.h | 9 +- include/llvm/Target/Target.td | 104 +- include/llvm/Target/TargetCallingConv.h | 27 +- include/llvm/Target/TargetData.h | 363 -- include/llvm/Target/TargetELFWriterInfo.h | 121 - include/llvm/Target/TargetInstrInfo.h | 37 +- include/llvm/Target/TargetIntrinsicInfo.h | 5 +- include/llvm/Target/TargetLibraryInfo.h | 103 + include/llvm/Target/TargetLowering.h | 131 +- include/llvm/Target/TargetLoweringObjectFile.h | 9 +- include/llvm/Target/TargetMachine.h | 20 +- include/llvm/Target/TargetOpcodes.h | 6 +- include/llvm/Target/TargetOptions.h | 4 + include/llvm/Target/TargetRegisterInfo.h | 62 +- include/llvm/Target/TargetSchedule.td | 340 +- include/llvm/Target/TargetSelectionDAG.td | 4 +- include/llvm/Target/TargetSelectionDAGInfo.h | 10 +- include/llvm/Target/TargetSubtargetInfo.h | 23 +- include/llvm/Target/TargetTransformImpl.h | 98 + include/llvm/TargetTransformInfo.h | 204 ++ include/llvm/Transforms/IPO.h | 25 +- include/llvm/Transforms/IPO/InlinerPass.h | 2 +- include/llvm/Transforms/IPO/PassManagerBuilder.h | 1 + include/llvm/Transforms/Instrumentation.h | 2 +- include/llvm/Transforms/Scalar.h | 6 + include/llvm/Transforms/Utils/AddrModeMatcher.h | 3 +- include/llvm/Transforms/Utils/BasicBlockUtils.h | 28 +- include/llvm/Transforms/Utils/BuildLibCalls.h | 32 +- include/llvm/Transforms/Utils/BypassSlowDivision.h | 33 + include/llvm/Transforms/Utils/Cloning.h | 15 +- include/llvm/Transforms/Utils/IntegerDivision.h | 48 + include/llvm/Transforms/Utils/Local.h | 30 +- include/llvm/Transforms/Utils/SSAUpdater.h | 4 +- include/llvm/Transforms/Utils/SimplifyIndVar.h | 2 - include/llvm/Transforms/Utils/SimplifyLibCalls.h | 52 + include/llvm/Transforms/Utils/ValueMapper.h | 2 +- include/llvm/Transforms/Vectorize.h | 6 + include/llvm/Type.h | 23 +- include/llvm/Use.h | 3 +- include/llvm/User.h | 46 +- include/llvm/Value.h | 12 +- lib/Analysis/AliasAnalysis.cpp | 10 +- lib/Analysis/AliasSetTracker.cpp | 6 +- lib/Analysis/Analysis.cpp | 4 +- lib/Analysis/BasicAliasAnalysis.cpp | 170 +- lib/Analysis/BranchProbabilityInfo.cpp | 134 +- lib/Analysis/CMakeLists.txt | 5 +- lib/Analysis/CaptureTracking.cpp | 4 +- lib/Analysis/CodeMetrics.cpp | 10 +- lib/Analysis/ConstantFolding.cpp | 265 +- lib/Analysis/CostModel.cpp | 193 + lib/Analysis/DependenceAnalysis.cpp | 3786 ++++++++++++++++++++ lib/Analysis/DominanceFrontier.cpp | 2 + lib/Analysis/IPA/CallGraph.cpp | 13 +- lib/Analysis/IPA/GlobalsModRef.cpp | 6 +- lib/Analysis/IVUsers.cpp | 6 +- lib/Analysis/InlineCost.cpp | 95 +- lib/Analysis/InstructionSimplify.cpp | 89 +- lib/Analysis/LazyValueInfo.cpp | 52 +- lib/Analysis/Lint.cpp | 56 +- lib/Analysis/Loads.cpp | 8 +- lib/Analysis/LoopDependenceAnalysis.cpp | 362 -- lib/Analysis/LoopInfo.cpp | 11 +- lib/Analysis/MemoryBuiltins.cpp | 184 +- lib/Analysis/MemoryDependenceAnalysis.cpp | 30 +- lib/Analysis/NoAliasAnalysis.cpp | 4 +- lib/Analysis/PHITransAddr.cpp | 2 + lib/Analysis/ProfileDataLoader.cpp | 155 + lib/Analysis/ProfileDataLoaderPass.cpp | 188 + lib/Analysis/ProfileEstimatorPass.cpp | 2 +- lib/Analysis/ProfileInfo.cpp | 26 - lib/Analysis/RegionInfo.cpp | 26 +- lib/Analysis/RegionPass.cpp | 5 +- lib/Analysis/ScalarEvolution.cpp | 116 +- lib/Analysis/ScalarEvolutionExpander.cpp | 23 +- lib/Analysis/Trace.cpp | 2 + lib/Analysis/ValueTracking.cpp | 61 +- lib/Archive/ArchiveInternals.h | 2 +- lib/Archive/ArchiveReader.cpp | 4 +- lib/AsmParser/LLLexer.cpp | 9 +- lib/AsmParser/LLParser.cpp | 257 +- lib/AsmParser/LLParser.h | 2 +- lib/AsmParser/LLToken.h | 11 +- lib/Bitcode/Reader/BitcodeReader.cpp | 159 +- lib/Bitcode/Reader/BitcodeReader.h | 67 +- lib/Bitcode/Writer/BitcodeWriter.cpp | 130 +- lib/Bitcode/Writer/ValueEnumerator.h | 6 +- lib/CodeGen/AggressiveAntiDepBreaker.cpp | 4 +- lib/CodeGen/AllocationOrder.cpp | 5 +- lib/CodeGen/Analysis.cpp | 18 +- lib/CodeGen/AsmPrinter/ARMException.cpp | 2 +- lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 171 +- lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp | 4 +- lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp | 234 +- lib/CodeGen/AsmPrinter/DIE.cpp | 18 +- lib/CodeGen/AsmPrinter/DIE.h | 8 - lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp | 4 +- lib/CodeGen/AsmPrinter/DwarfAccelTable.h | 4 +- lib/CodeGen/AsmPrinter/DwarfCFIException.cpp | 2 +- lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp | 67 +- lib/CodeGen/AsmPrinter/DwarfCompileUnit.h | 7 +- lib/CodeGen/AsmPrinter/DwarfDebug.cpp | 203 +- lib/CodeGen/AsmPrinter/DwarfDebug.h | 27 +- lib/CodeGen/AsmPrinter/DwarfException.cpp | 4 +- lib/CodeGen/AsmPrinter/DwarfException.h | 40 +- lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp | 4 +- lib/CodeGen/AsmPrinter/Win64Exception.cpp | 2 +- lib/CodeGen/BranchFolding.cpp | 25 +- lib/CodeGen/CMakeLists.txt | 3 + lib/CodeGen/CalcSpillWeights.cpp | 6 +- lib/CodeGen/CallingConvLower.cpp | 4 +- lib/CodeGen/CodeGen.cpp | 2 + lib/CodeGen/CodePlacementOpt.cpp | 2 +- lib/CodeGen/CriticalAntiDepBreaker.cpp | 2 +- lib/CodeGen/DeadMachineInstructionElim.cpp | 8 +- lib/CodeGen/EarlyIfConversion.cpp | 10 +- lib/CodeGen/ExecutionDepsFix.cpp | 11 +- lib/CodeGen/ExpandPostRAPseudos.cpp | 4 +- lib/CodeGen/GCStrategy.cpp | 14 +- lib/CodeGen/IfConversion.cpp | 6 +- lib/CodeGen/InlineSpiller.cpp | 6 +- lib/CodeGen/IntrinsicLowering.cpp | 8 +- lib/CodeGen/LLVMTargetMachine.cpp | 6 +- lib/CodeGen/LiveDebugVariables.cpp | 3 +- lib/CodeGen/LiveInterval.cpp | 97 +- lib/CodeGen/LiveIntervalAnalysis.cpp | 831 ++--- lib/CodeGen/LiveIntervalUnion.h | 4 +- lib/CodeGen/LiveRangeCalc.cpp | 6 +- lib/CodeGen/LiveRangeEdit.cpp | 11 +- lib/CodeGen/LiveRegMatrix.cpp | 4 +- lib/CodeGen/LiveRegMatrix.h | 2 +- lib/CodeGen/LiveStackAnalysis.cpp | 5 +- lib/CodeGen/LiveVariables.cpp | 42 +- lib/CodeGen/MachineBasicBlock.cpp | 90 +- lib/CodeGen/MachineBlockPlacement.cpp | 6 +- lib/CodeGen/MachineBranchProbabilityInfo.cpp | 20 +- lib/CodeGen/MachineCSE.cpp | 70 +- lib/CodeGen/MachineCopyPropagation.cpp | 13 +- lib/CodeGen/MachineFunction.cpp | 49 +- lib/CodeGen/MachineFunctionPrinterPass.cpp | 2 +- lib/CodeGen/MachineInstr.cpp | 337 +- lib/CodeGen/MachineInstrBundle.cpp | 62 +- lib/CodeGen/MachineLICM.cpp | 2 +- lib/CodeGen/MachineLoopInfo.cpp | 2 + lib/CodeGen/MachineModuleInfo.cpp | 2 +- lib/CodeGen/MachineModuleInfoImpls.cpp | 4 +- lib/CodeGen/MachinePostDominators.cpp | 55 + lib/CodeGen/MachineRegisterInfo.cpp | 18 +- lib/CodeGen/MachineScheduler.cpp | 1458 +++++--- lib/CodeGen/MachineSink.cpp | 2 - lib/CodeGen/MachineTraceMetrics.cpp | 74 +- lib/CodeGen/MachineTraceMetrics.h | 13 +- lib/CodeGen/MachineVerifier.cpp | 157 +- lib/CodeGen/Passes.cpp | 17 +- lib/CodeGen/PeepholeOptimizer.cpp | 5 + lib/CodeGen/PostRASchedulerList.cpp | 9 +- lib/CodeGen/ProcessImplicitDefs.cpp | 3 +- lib/CodeGen/PrologEpilogInserter.cpp | 8 +- lib/CodeGen/RegAllocBasic.cpp | 3 +- lib/CodeGen/RegAllocFast.cpp | 73 +- lib/CodeGen/RegAllocGreedy.cpp | 8 +- lib/CodeGen/RegAllocPBQP.cpp | 17 +- lib/CodeGen/RegisterClassInfo.cpp | 10 +- lib/CodeGen/RegisterCoalescer.cpp | 1077 ++++-- lib/CodeGen/RegisterCoalescer.h | 7 + lib/CodeGen/RegisterPressure.cpp | 35 +- lib/CodeGen/RegisterScavenging.cpp | 7 +- lib/CodeGen/ScheduleDAG.cpp | 2 + lib/CodeGen/ScheduleDAGInstrs.cpp | 346 +- lib/CodeGen/ScheduleDAGPrinter.cpp | 3 +- lib/CodeGen/ScoreboardHazardRecognizer.cpp | 2 + lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 1190 +++++- lib/CodeGen/SelectionDAG/FastISel.cpp | 4 +- lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp | 8 +- lib/CodeGen/SelectionDAG/InstrEmitter.cpp | 55 +- lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 147 +- lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp | 50 +- lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp | 43 +- lib/CodeGen/SelectionDAG/LegalizeTypes.cpp | 2 +- lib/CodeGen/SelectionDAG/LegalizeTypes.h | 3 +- lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp | 48 +- lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 73 +- lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp | 27 +- lib/CodeGen/SelectionDAG/SDNodeOrdering.h | 4 +- lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp | 182 +- lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp | 44 +- lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp | 18 +- lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h | 9 +- lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp | 2 +- lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 284 +- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 372 +- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h | 19 +- lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp | 11 +- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 31 +- lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp | 7 +- lib/CodeGen/SelectionDAG/TargetLowering.cpp | 43 +- .../SelectionDAG/TargetSelectionDAGInfo.cpp | 2 +- lib/CodeGen/ShrinkWrapping.cpp | 22 +- lib/CodeGen/SjLjEHPrepare.cpp | 86 +- lib/CodeGen/SlotIndexes.cpp | 4 + lib/CodeGen/SplitKit.cpp | 2 + lib/CodeGen/StackColoring.cpp | 783 ++++ lib/CodeGen/StackProtector.cpp | 69 +- lib/CodeGen/StackSlotColoring.cpp | 6 +- lib/CodeGen/StrongPHIElimination.cpp | 11 +- lib/CodeGen/TailDuplication.cpp | 3 +- lib/CodeGen/TargetInstrInfoImpl.cpp | 120 +- lib/CodeGen/TargetLoweringObjectFileImpl.cpp | 12 +- lib/CodeGen/TargetSchedule.cpp | 306 ++ lib/CodeGen/TwoAddressInstructionPass.cpp | 499 +-- lib/CodeGen/VirtRegMap.cpp | 16 +- lib/CodeGen/VirtRegMap.h | 4 +- lib/DebugInfo/CMakeLists.txt | 1 + lib/DebugInfo/DIContext.cpp | 7 +- lib/DebugInfo/DWARFCompileUnit.cpp | 48 +- lib/DebugInfo/DWARFCompileUnit.h | 16 +- lib/DebugInfo/DWARFContext.cpp | 220 +- lib/DebugInfo/DWARFContext.h | 40 +- lib/DebugInfo/DWARFDebugAranges.cpp | 1 - lib/DebugInfo/DWARFDebugInfoEntry.cpp | 184 +- lib/DebugInfo/DWARFDebugInfoEntry.h | 54 +- lib/DebugInfo/DWARFDebugLine.cpp | 27 + lib/DebugInfo/DWARFDebugLine.h | 8 + lib/DebugInfo/DWARFDebugRangeList.cpp | 67 + lib/DebugInfo/DWARFDebugRangeList.h | 78 + lib/DebugInfo/DWARFFormValue.cpp | 76 +- lib/DebugInfo/DWARFFormValue.h | 2 +- lib/ExecutionEngine/ExecutionEngine.cpp | 43 +- lib/ExecutionEngine/ExecutionEngineBindings.cpp | 2 +- lib/ExecutionEngine/IntelJITEvents/CMakeLists.txt | 9 +- .../IntelJITEvents/IntelJITEventListener.cpp | 32 +- .../IntelJITEvents/IntelJITEventsWrapper.h | 102 + lib/ExecutionEngine/IntelJITEvents/Makefile | 5 +- .../IntelJITEvents/ittnotify_config.h | 454 +++ .../IntelJITEvents/ittnotify_types.h | 70 + lib/ExecutionEngine/IntelJITEvents/jitprofiling.c | 481 +++ lib/ExecutionEngine/IntelJITEvents/jitprofiling.h | 259 ++ .../Interpreter/ExternalFunctions.cpp | 8 +- lib/ExecutionEngine/Interpreter/Interpreter.cpp | 2 +- lib/ExecutionEngine/Interpreter/Interpreter.h | 4 +- lib/ExecutionEngine/JIT/JIT.cpp | 14 +- lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp | 4 +- lib/ExecutionEngine/JIT/JITDwarfEmitter.h | 4 +- lib/ExecutionEngine/JIT/JITEmitter.cpp | 36 +- lib/ExecutionEngine/MCJIT/CMakeLists.txt | 1 - lib/ExecutionEngine/MCJIT/MCJIT.cpp | 113 +- lib/ExecutionEngine/MCJIT/MCJIT.h | 26 +- lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp | 14 - lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h | 50 - lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp | 8 +- lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h | 6 +- lib/ExecutionEngine/RuntimeDyld/ObjectImage.h | 59 - .../RuntimeDyld/ObjectImageCommon.h | 76 + lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp | 145 +- lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp | 555 ++- lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h | 56 +- lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h | 96 +- .../RuntimeDyld/RuntimeDyldMachO.cpp | 28 +- lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h | 6 +- lib/ExecutionEngine/TargetSelect.cpp | 9 +- lib/MC/ELFObjectWriter.cpp | 28 +- lib/MC/MCAsmBackend.cpp | 7 +- lib/MC/MCAsmInfo.cpp | 2 +- lib/MC/MCAsmInfoCOFF.cpp | 4 +- lib/MC/MCAsmInfoDarwin.cpp | 1 + lib/MC/MCAsmStreamer.cpp | 27 +- lib/MC/MCAssembler.cpp | 15 +- lib/MC/MCContext.cpp | 6 + lib/MC/MCDisassembler/Disassembler.cpp | 14 + lib/MC/MCDisassembler/EDDisassembler.cpp | 3 +- lib/MC/MCDwarf.cpp | 2 + lib/MC/MCELFObjectTargetWriter.cpp | 8 + lib/MC/MCELFStreamer.cpp | 52 +- lib/MC/MCExpr.cpp | 11 +- lib/MC/MCInst.cpp | 4 + lib/MC/MCInstPrinter.cpp | 14 + lib/MC/MCLabel.cpp | 2 + lib/MC/MCMachOStreamer.cpp | 59 +- lib/MC/MCObjectFileInfo.cpp | 33 +- lib/MC/MCObjectStreamer.cpp | 45 +- lib/MC/MCParser/AsmLexer.cpp | 13 +- lib/MC/MCParser/AsmParser.cpp | 578 ++- lib/MC/MCParser/ELFAsmParser.cpp | 2 +- lib/MC/MCParser/MCAsmLexer.cpp | 3 +- lib/MC/MCParser/MCAsmParser.cpp | 2 + lib/MC/MCParser/MCTargetAsmParser.cpp | 2 +- lib/MC/MCRegisterInfo.cpp | 3 + lib/MC/MCStreamer.cpp | 4 + lib/MC/MCSubtargetInfo.cpp | 57 +- lib/MC/MCSymbol.cpp | 4 +- lib/MC/MCValue.cpp | 2 + lib/MC/MachObjectWriter.cpp | 53 +- lib/MC/SubtargetFeature.cpp | 35 +- lib/MC/WinCOFFStreamer.cpp | 42 - lib/Object/COFFObjectFile.cpp | 14 +- lib/Object/MachOObjectFile.cpp | 21 +- lib/Support/APFloat.cpp | 230 +- lib/Support/Atomic.cpp | 14 +- lib/Support/CMakeLists.txt | 6 - lib/Support/CommandLine.cpp | 26 +- lib/Support/DAGDeltaAlgorithm.cpp | 10 +- lib/Support/DataExtractor.cpp | 6 +- lib/Support/DataStream.cpp | 2 +- lib/Support/DynamicLibrary.cpp | 2 +- lib/Support/Errno.cpp | 15 +- lib/Support/FoldingSet.cpp | 18 + lib/Support/Host.cpp | 4 + lib/Support/LockFileManager.cpp | 2 +- lib/Support/Makefile | 3 - lib/Support/Memory.cpp | 56 - lib/Support/MemoryBuffer.cpp | 58 +- lib/Support/SmallVector.cpp | 6 +- lib/Support/StreamableMemoryObject.cpp | 24 +- lib/Support/StringMap.cpp | 9 +- lib/Support/StringRef.cpp | 4 +- lib/Support/Triple.cpp | 59 +- lib/Support/Unix/Memory.inc | 206 +- lib/Support/Unix/Path.inc | 17 +- lib/Support/Unix/Signals.inc | 38 +- lib/Support/Windows/Memory.inc | 165 +- lib/Support/Windows/PathV2.inc | 2 +- lib/Support/YAMLParser.cpp | 7 + lib/Support/raw_ostream.cpp | 12 +- lib/Support/regexec.c | 2 +- lib/Support/system_error.cpp | 10 +- lib/TableGen/CMakeLists.txt | 5 - lib/TableGen/Error.cpp | 35 +- lib/TableGen/Main.cpp | 138 +- lib/TableGen/Makefile | 4 - lib/TableGen/Record.cpp | 615 ++-- lib/TableGen/TGParser.cpp | 94 +- lib/TableGen/TGParser.h | 23 +- lib/TableGen/TableGenAction.cpp | 15 - lib/Target/ARM/ARM.h | 1 + lib/Target/ARM/ARM.td | 31 +- lib/Target/ARM/ARMAsmPrinter.cpp | 98 +- lib/Target/ARM/ARMAsmPrinter.h | 40 +- lib/Target/ARM/ARMBaseInstrInfo.cpp | 943 ++++- lib/Target/ARM/ARMBaseInstrInfo.h | 16 +- lib/Target/ARM/ARMBaseRegisterInfo.cpp | 173 +- lib/Target/ARM/ARMBaseRegisterInfo.h | 13 +- lib/Target/ARM/ARMCallingConv.td | 2 + lib/Target/ARM/ARMCodeEmitter.cpp | 8 +- lib/Target/ARM/ARMConstantIslandPass.cpp | 9 +- lib/Target/ARM/ARMConstantPoolValue.h | 5 - lib/Target/ARM/ARMELFWriterInfo.cpp | 78 - lib/Target/ARM/ARMELFWriterInfo.h | 59 - lib/Target/ARM/ARMExpandPseudoInsts.cpp | 16 +- lib/Target/ARM/ARMFastISel.cpp | 233 +- lib/Target/ARM/ARMFrameLowering.cpp | 41 +- lib/Target/ARM/ARMHazardRecognizer.cpp | 2 +- lib/Target/ARM/ARMISelDAGToDAG.cpp | 174 +- lib/Target/ARM/ARMISelLowering.cpp | 679 +++- lib/Target/ARM/ARMISelLowering.h | 19 +- lib/Target/ARM/ARMInstrFormats.td | 21 + lib/Target/ARM/ARMInstrInfo.cpp | 62 + lib/Target/ARM/ARMInstrInfo.td | 212 +- lib/Target/ARM/ARMInstrNEON.td | 125 +- lib/Target/ARM/ARMInstrThumb.td | 6 +- lib/Target/ARM/ARMInstrThumb2.td | 138 +- lib/Target/ARM/ARMInstrVFP.td | 12 +- lib/Target/ARM/ARMJITInfo.cpp | 2 +- lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 6 +- lib/Target/ARM/ARMMachineFunctionInfo.h | 12 +- lib/Target/ARM/ARMRegisterInfo.td | 23 +- lib/Target/ARM/ARMSchedule.td | 2 + lib/Target/ARM/ARMScheduleA9.td | 597 ++- lib/Target/ARM/ARMScheduleSwift.td | 1085 ++++++ lib/Target/ARM/ARMSelectionDAGInfo.cpp | 2 +- lib/Target/ARM/ARMSubtarget.cpp | 9 +- lib/Target/ARM/ARMSubtarget.h | 14 +- lib/Target/ARM/ARMTargetMachine.cpp | 19 +- lib/Target/ARM/ARMTargetMachine.h | 35 +- lib/Target/ARM/AsmParser/ARMAsmParser.cpp | 280 +- lib/Target/ARM/CMakeLists.txt | 1 - lib/Target/ARM/Disassembler/ARMDisassembler.cpp | 92 +- lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp | 584 +-- lib/Target/ARM/InstPrinter/ARMInstPrinter.h | 3 +- lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp | 15 +- lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp | 7 + lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp | 1 - lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp | 10 +- lib/Target/ARM/MCTargetDesc/ARMMCExpr.h | 5 +- lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp | 8 + lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h | 2 +- .../ARM/MCTargetDesc/ARMMachObjectWriter.cpp | 51 +- lib/Target/ARM/MLxExpansionPass.cpp | 74 +- lib/Target/CMakeLists.txt | 3 +- lib/Target/CellSPU/SPUAsmPrinter.cpp | 6 +- lib/Target/CellSPU/SPUFrameLowering.cpp | 2 +- lib/Target/CellSPU/SPUISelDAGToDAG.cpp | 43 +- lib/Target/CellSPU/SPUSubtarget.h | 4 +- lib/Target/CellSPU/SPUTargetMachine.cpp | 5 +- lib/Target/CellSPU/SPUTargetMachine.h | 17 +- lib/Target/CppBackend/CPPBackend.cpp | 35 +- lib/Target/CppBackend/CPPTargetMachine.h | 4 +- lib/Target/Hexagon/CMakeLists.txt | 1 + lib/Target/Hexagon/HexagonAsmPrinter.cpp | 2 +- lib/Target/Hexagon/HexagonCallingConvLower.cpp | 2 +- lib/Target/Hexagon/HexagonISelLowering.cpp | 2 + lib/Target/Hexagon/HexagonInstrFormats.td | 10 + lib/Target/Hexagon/HexagonInstrInfo.cpp | 28 +- lib/Target/Hexagon/HexagonInstrInfo.td | 312 +- lib/Target/Hexagon/HexagonMachineScheduler.cpp | 681 ++++ lib/Target/Hexagon/HexagonMachineScheduler.h | 244 ++ lib/Target/Hexagon/HexagonNewValueJump.cpp | 2 +- lib/Target/Hexagon/HexagonPeephole.cpp | 35 + lib/Target/Hexagon/HexagonRegisterInfo.cpp | 52 + lib/Target/Hexagon/HexagonRegisterInfo.h | 5 + lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp | 2 +- lib/Target/Hexagon/HexagonSchedule.td | 1 + lib/Target/Hexagon/HexagonScheduleV4.td | 1 + lib/Target/Hexagon/HexagonSubtarget.cpp | 33 +- lib/Target/Hexagon/HexagonTargetMachine.cpp | 28 +- lib/Target/Hexagon/HexagonTargetMachine.h | 17 +- lib/Target/Hexagon/HexagonTargetObjectFile.cpp | 4 +- lib/Target/Hexagon/HexagonVLIWPacketizer.cpp | 4 +- .../Hexagon/HexagonVarargsCallingConvention.h | 8 +- .../Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp | 2 +- lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp | 29 +- lib/Target/MBlaze/CMakeLists.txt | 1 - lib/Target/MBlaze/MBlazeAsmPrinter.cpp | 2 +- lib/Target/MBlaze/MBlazeELFWriterInfo.cpp | 107 - lib/Target/MBlaze/MBlazeELFWriterInfo.h | 59 - lib/Target/MBlaze/MBlazeFrameLowering.cpp | 2 +- lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp | 5 +- lib/Target/MBlaze/MBlazeRegisterInfo.cpp | 2 +- lib/Target/MBlaze/MBlazeTargetMachine.cpp | 7 +- lib/Target/MBlaze/MBlazeTargetMachine.h | 20 +- lib/Target/MBlaze/MBlazeTargetObjectFile.cpp | 4 +- .../MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp | 3 +- .../MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp | 4 +- .../MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h | 3 +- lib/Target/MSP430/MSP430FrameLowering.cpp | 16 +- lib/Target/MSP430/MSP430FrameLowering.h | 1 + lib/Target/MSP430/MSP430ISelDAGToDAG.cpp | 4 +- lib/Target/MSP430/MSP430ISelLowering.cpp | 4 +- lib/Target/MSP430/MSP430ISelLowering.h | 2 +- lib/Target/MSP430/MSP430RegisterInfo.cpp | 14 - lib/Target/MSP430/MSP430RegisterInfo.h | 2 - lib/Target/MSP430/MSP430TargetMachine.cpp | 6 +- lib/Target/MSP430/MSP430TargetMachine.h | 16 +- lib/Target/Mangler.cpp | 9 +- lib/Target/Mips/AsmParser/CMakeLists.txt | 1 + lib/Target/Mips/AsmParser/MipsAsmParser.cpp | 1289 ++++++- lib/Target/Mips/CMakeLists.txt | 2 + lib/Target/Mips/Disassembler/MipsDisassembler.cpp | 29 + lib/Target/Mips/MCTargetDesc/CMakeLists.txt | 1 + lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp | 16 +- lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h | 6 +- .../Mips/MCTargetDesc/MipsDirectObjLower.cpp | 81 + lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h | 28 + .../Mips/MCTargetDesc/MipsELFObjectWriter.cpp | 10 +- lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp | 60 +- lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h | 12 +- lib/Target/Mips/Makefile | 4 +- lib/Target/Mips/Mips.td | 19 +- lib/Target/Mips/Mips16FrameLowering.cpp | 56 +- lib/Target/Mips/Mips16FrameLowering.h | 5 + lib/Target/Mips/Mips16InstrInfo.cpp | 93 +- lib/Target/Mips/Mips16InstrInfo.h | 4 + lib/Target/Mips/Mips16InstrInfo.td | 1225 ++++++- lib/Target/Mips/Mips16RegisterInfo.cpp | 122 +- lib/Target/Mips/Mips16RegisterInfo.h | 5 +- lib/Target/Mips/Mips64InstrInfo.td | 95 +- lib/Target/Mips/MipsAnalyzeImmediate.cpp | 2 +- lib/Target/Mips/MipsAsmPrinter.cpp | 42 +- lib/Target/Mips/MipsAsmPrinter.h | 8 + lib/Target/Mips/MipsCallingConv.td | 12 - lib/Target/Mips/MipsCodeEmitter.cpp | 46 +- lib/Target/Mips/MipsDSPInstrFormats.td | 309 ++ lib/Target/Mips/MipsDSPInstrInfo.td | 1319 +++++++ lib/Target/Mips/MipsDelaySlotFiller.cpp | 11 +- lib/Target/Mips/MipsFrameLowering.cpp | 36 +- lib/Target/Mips/MipsFrameLowering.h | 3 + lib/Target/Mips/MipsISelDAGToDAG.cpp | 210 +- lib/Target/Mips/MipsISelLowering.cpp | 1204 ++++--- lib/Target/Mips/MipsISelLowering.h | 164 + lib/Target/Mips/MipsInstrFPU.td | 41 +- lib/Target/Mips/MipsInstrFormats.td | 29 + lib/Target/Mips/MipsInstrInfo.cpp | 55 +- lib/Target/Mips/MipsInstrInfo.h | 12 - lib/Target/Mips/MipsInstrInfo.td | 208 +- lib/Target/Mips/MipsLongBranch.cpp | 232 +- lib/Target/Mips/MipsMCInstLower.cpp | 29 - lib/Target/Mips/MipsMCInstLower.h | 3 +- lib/Target/Mips/MipsMachineFunction.cpp | 13 + lib/Target/Mips/MipsMachineFunction.h | 50 +- lib/Target/Mips/MipsRegisterInfo.cpp | 50 +- lib/Target/Mips/MipsRegisterInfo.h | 4 +- lib/Target/Mips/MipsRegisterInfo.td | 27 +- lib/Target/Mips/MipsSEFrameLowering.cpp | 16 +- lib/Target/Mips/MipsSEInstrInfo.cpp | 51 +- lib/Target/Mips/MipsSEInstrInfo.h | 8 +- lib/Target/Mips/MipsSERegisterInfo.cpp | 45 +- lib/Target/Mips/MipsSERegisterInfo.h | 9 +- lib/Target/Mips/MipsSubtarget.cpp | 9 +- lib/Target/Mips/MipsSubtarget.h | 11 +- lib/Target/Mips/MipsTargetMachine.cpp | 7 +- lib/Target/Mips/MipsTargetMachine.h | 18 +- lib/Target/Mips/MipsTargetObjectFile.cpp | 10 +- lib/Target/NVPTX/NVPTX.td | 34 +- lib/Target/NVPTX/NVPTXAllocaHoisting.h | 4 +- lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 105 +- lib/Target/NVPTX/NVPTXISelLowering.cpp | 68 +- lib/Target/NVPTX/NVPTXISelLowering.h | 3 + lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp | 4 +- lib/Target/NVPTX/NVPTXLowerAggrCopies.h | 4 +- lib/Target/NVPTX/NVPTXSubtarget.cpp | 20 +- lib/Target/NVPTX/NVPTXSubtarget.h | 12 +- lib/Target/NVPTX/NVPTXTargetMachine.cpp | 7 +- lib/Target/NVPTX/NVPTXTargetMachine.h | 16 +- lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp | 8 +- lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp | 15 +- .../PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp | 92 +- lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h | 10 + lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp | 4 +- .../PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp | 56 +- .../PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp | 2 +- lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h | 2 +- lib/Target/PowerPC/PPC.td | 10 + lib/Target/PowerPC/PPCAsmPrinter.cpp | 101 +- lib/Target/PowerPC/PPCCallingConv.td | 7 + lib/Target/PowerPC/PPCFrameLowering.cpp | 266 +- lib/Target/PowerPC/PPCFrameLowering.h | 71 +- lib/Target/PowerPC/PPCISelDAGToDAG.cpp | 156 +- lib/Target/PowerPC/PPCISelLowering.cpp | 1217 ++++++- lib/Target/PowerPC/PPCISelLowering.h | 60 +- lib/Target/PowerPC/PPCInstr64Bit.td | 115 +- lib/Target/PowerPC/PPCInstrAltivec.td | 32 + lib/Target/PowerPC/PPCInstrFormats.td | 41 +- lib/Target/PowerPC/PPCInstrInfo.cpp | 24 +- lib/Target/PowerPC/PPCInstrInfo.td | 200 +- lib/Target/PowerPC/PPCRegisterInfo.cpp | 36 +- lib/Target/PowerPC/PPCRegisterInfo.h | 5 +- lib/Target/PowerPC/PPCSchedule.td | 88 +- lib/Target/PowerPC/PPCSchedule440.td | 60 +- lib/Target/PowerPC/PPCScheduleA2.td | 81 +- lib/Target/PowerPC/PPCScheduleE500mc.td | 265 ++ lib/Target/PowerPC/PPCScheduleE5500.td | 309 ++ lib/Target/PowerPC/PPCScheduleG3.td | 7 +- lib/Target/PowerPC/PPCScheduleG4.td | 7 +- lib/Target/PowerPC/PPCScheduleG4Plus.td | 8 +- lib/Target/PowerPC/PPCScheduleG5.td | 10 +- lib/Target/PowerPC/PPCSubtarget.cpp | 15 +- lib/Target/PowerPC/PPCSubtarget.h | 57 +- lib/Target/PowerPC/PPCTargetMachine.cpp | 5 +- lib/Target/PowerPC/PPCTargetMachine.h | 15 +- lib/Target/README.txt | 7 +- lib/Target/Sparc/SparcFrameLowering.cpp | 2 +- lib/Target/Sparc/SparcISelLowering.cpp | 2 +- lib/Target/Sparc/SparcInstrInfo.td | 2 +- lib/Target/Sparc/SparcTargetMachine.cpp | 4 +- lib/Target/Sparc/SparcTargetMachine.h | 15 +- lib/Target/Target.cpp | 19 +- lib/Target/TargetData.cpp | 663 ---- lib/Target/TargetELFWriterInfo.cpp | 25 - lib/Target/TargetLibraryInfo.cpp | 75 +- lib/Target/TargetLoweringObjectFile.cpp | 4 +- lib/Target/TargetMachineC.cpp | 12 +- lib/Target/TargetRegisterInfo.cpp | 6 +- lib/Target/TargetTransformImpl.cpp | 353 ++ lib/Target/X86/AsmParser/X86AsmLexer.cpp | 43 +- lib/Target/X86/AsmParser/X86AsmParser.cpp | 445 ++- lib/Target/X86/CMakeLists.txt | 1 - lib/Target/X86/Disassembler/X86Disassembler.cpp | 12 +- lib/Target/X86/Disassembler/X86Disassembler.h | 2 +- .../X86/Disassembler/X86DisassemblerDecoder.c | 16 +- .../X86/Disassembler/X86DisassemblerDecoder.h | 12 +- .../Disassembler/X86DisassemblerDecoderCommon.h | 5 + lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp | 66 +- lib/Target/X86/InstPrinter/X86ATTInstPrinter.h | 3 +- lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp | 48 +- lib/Target/X86/InstPrinter/X86IntelInstPrinter.h | 5 +- lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp | 70 +- lib/Target/X86/MCTargetDesc/X86BaseInfo.h | 16 +- lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp | 25 +- lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp | 5 + lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp | 37 +- lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp | 115 +- lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h | 11 +- .../X86/MCTargetDesc/X86MachObjectWriter.cpp | 48 +- lib/Target/X86/README-SSE.txt | 12 + lib/Target/X86/X86.td | 14 +- lib/Target/X86/X86AsmPrinter.cpp | 113 +- lib/Target/X86/X86AsmPrinter.h | 37 +- lib/Target/X86/X86COFFMachineModuleInfo.h | 2 +- lib/Target/X86/X86CallingConv.td | 59 +- lib/Target/X86/X86CodeEmitter.cpp | 61 +- lib/Target/X86/X86ELFWriterInfo.cpp | 147 - lib/Target/X86/X86ELFWriterInfo.h | 59 - lib/Target/X86/X86FastISel.cpp | 56 +- lib/Target/X86/X86FloatingPoint.cpp | 6 +- lib/Target/X86/X86FrameLowering.cpp | 13 +- lib/Target/X86/X86ISelDAGToDAG.cpp | 609 ++-- lib/Target/X86/X86ISelLowering.cpp | 3120 +++++++++++----- lib/Target/X86/X86ISelLowering.h | 154 +- lib/Target/X86/X86InstrCompiler.td | 475 ++- lib/Target/X86/X86InstrControl.td | 7 +- lib/Target/X86/X86InstrFMA.td | 432 ++- lib/Target/X86/X86InstrFormats.td | 91 +- lib/Target/X86/X86InstrFragmentsSIMD.td | 31 +- lib/Target/X86/X86InstrInfo.cpp | 536 +-- lib/Target/X86/X86InstrInfo.h | 3 + lib/Target/X86/X86InstrInfo.td | 83 +- lib/Target/X86/X86InstrMMX.td | 75 +- lib/Target/X86/X86InstrSSE.td | 1488 +++++--- lib/Target/X86/X86InstrShiftRotate.td | 78 + lib/Target/X86/X86InstrTSX.td | 32 + lib/Target/X86/X86InstrXOP.td | 17 +- lib/Target/X86/X86JITInfo.cpp | 17 +- lib/Target/X86/X86MCInstLower.cpp | 54 +- lib/Target/X86/X86MCInstLower.h | 52 - lib/Target/X86/X86RegisterInfo.cpp | 66 +- lib/Target/X86/X86RegisterInfo.h | 9 +- lib/Target/X86/X86RegisterInfo.td | 496 +-- lib/Target/X86/X86SelectionDAGInfo.cpp | 2 +- lib/Target/X86/X86Subtarget.cpp | 27 +- lib/Target/X86/X86Subtarget.h | 31 +- lib/Target/X86/X86TargetMachine.cpp | 19 +- lib/Target/X86/X86TargetMachine.h | 32 +- lib/Target/X86/X86VZeroUpper.cpp | 12 +- lib/Target/XCore/XCoreAsmPrinter.cpp | 4 +- lib/Target/XCore/XCoreFrameLowering.cpp | 13 +- lib/Target/XCore/XCoreISelLowering.cpp | 16 +- lib/Target/XCore/XCoreInstrInfo.td | 4 +- lib/Target/XCore/XCoreRegisterInfo.cpp | 2 +- lib/Target/XCore/XCoreTargetMachine.cpp | 4 +- lib/Target/XCore/XCoreTargetMachine.h | 15 +- lib/Transforms/IPO/ArgumentPromotion.cpp | 64 +- lib/Transforms/IPO/BarrierNoopPass.cpp | 47 + lib/Transforms/IPO/CMakeLists.txt | 1 + lib/Transforms/IPO/ConstantMerge.cpp | 8 +- lib/Transforms/IPO/DeadArgumentElimination.cpp | 107 +- lib/Transforms/IPO/ExtractGV.cpp | 63 +- lib/Transforms/IPO/FunctionAttrs.cpp | 34 +- lib/Transforms/IPO/GlobalOpt.cpp | 93 +- lib/Transforms/IPO/IPO.cpp | 7 +- lib/Transforms/IPO/InlineAlways.cpp | 6 +- lib/Transforms/IPO/InlineSimple.cpp | 4 +- lib/Transforms/IPO/Inliner.cpp | 28 +- lib/Transforms/IPO/Internalize.cpp | 38 +- lib/Transforms/IPO/MergeFunctions.cpp | 26 +- lib/Transforms/IPO/PassManagerBuilder.cpp | 56 +- lib/Transforms/IPO/PruneEH.cpp | 10 +- lib/Transforms/InstCombine/InstCombine.h | 14 +- lib/Transforms/InstCombine/InstCombineAddSub.cpp | 2 +- lib/Transforms/InstCombine/InstCombineCalls.cpp | 172 +- lib/Transforms/InstCombine/InstCombineCasts.cpp | 25 +- lib/Transforms/InstCombine/InstCombineCompares.cpp | 70 +- .../InstCombine/InstCombineLoadStoreAlloca.cpp | 201 +- .../InstCombine/InstCombineMulDivRem.cpp | 17 +- lib/Transforms/InstCombine/InstCombinePHI.cpp | 2 +- lib/Transforms/InstCombine/InstCombineSelect.cpp | 30 +- lib/Transforms/InstCombine/InstCombineShifts.cpp | 2 +- .../InstCombine/InstCombineSimplifyDemanded.cpp | 2 +- .../InstCombine/InstCombineVectorOps.cpp | 7 +- lib/Transforms/InstCombine/InstCombineWorklist.h | 4 +- .../InstCombine/InstructionCombining.cpp | 401 ++- .../Instrumentation/AddressSanitizer.cpp | 437 ++- lib/Transforms/Instrumentation/BlackList.cpp | 105 + lib/Transforms/Instrumentation/BlackList.h | 57 + lib/Transforms/Instrumentation/BoundsChecking.cpp | 16 +- lib/Transforms/Instrumentation/CMakeLists.txt | 2 +- .../Instrumentation/FunctionBlackList.cpp | 79 - lib/Transforms/Instrumentation/FunctionBlackList.h | 37 - lib/Transforms/Instrumentation/GCOVProfiling.cpp | 73 +- .../Instrumentation/MaximumSpanningTree.h | 53 +- lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 137 +- lib/Transforms/Scalar/CMakeLists.txt | 1 + lib/Transforms/Scalar/CodeGenPrepare.cpp | 80 +- lib/Transforms/Scalar/ConstantProp.cpp | 4 +- .../Scalar/CorrelatedValuePropagation.cpp | 5 + lib/Transforms/Scalar/DCE.cpp | 17 +- lib/Transforms/Scalar/DeadStoreElimination.cpp | 203 +- lib/Transforms/Scalar/EarlyCSE.cpp | 120 +- lib/Transforms/Scalar/GVN.cpp | 99 +- lib/Transforms/Scalar/GlobalMerge.cpp | 10 +- lib/Transforms/Scalar/IndVarSimplify.cpp | 42 +- lib/Transforms/Scalar/JumpThreading.cpp | 8 +- lib/Transforms/Scalar/LICM.cpp | 45 +- lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 37 +- lib/Transforms/Scalar/LoopInstSimplify.cpp | 6 +- lib/Transforms/Scalar/LoopRotation.cpp | 65 +- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 58 +- lib/Transforms/Scalar/LoopUnrollPass.cpp | 9 +- lib/Transforms/Scalar/LoopUnswitch.cpp | 13 +- lib/Transforms/Scalar/MemCpyOptimizer.cpp | 72 +- lib/Transforms/Scalar/ObjCARC.cpp | 150 +- lib/Transforms/Scalar/Reassociate.cpp | 105 +- lib/Transforms/Scalar/SCCP.cpp | 10 +- lib/Transforms/Scalar/SROA.cpp | 3697 +++++++++++++++++++ lib/Transforms/Scalar/Scalar.cpp | 3 +- lib/Transforms/Scalar/ScalarReplAggregates.cpp | 198 +- lib/Transforms/Scalar/SimplifyCFGPass.cpp | 62 +- lib/Transforms/Scalar/SimplifyLibCalls.cpp | 980 +---- lib/Transforms/Utils/AddrModeMatcher.cpp | 6 +- lib/Transforms/Utils/BasicBlockUtils.cpp | 45 +- lib/Transforms/Utils/BuildLibCalls.cpp | 154 +- lib/Transforms/Utils/BypassSlowDivision.cpp | 262 ++ lib/Transforms/Utils/CMakeLists.txt | 4 + lib/Transforms/Utils/CloneFunction.cpp | 14 +- lib/Transforms/Utils/CodeExtractor.cpp | 2 +- lib/Transforms/Utils/InlineFunction.cpp | 4 +- lib/Transforms/Utils/IntegerDivision.cpp | 420 +++ lib/Transforms/Utils/LCSSA.cpp | 15 +- lib/Transforms/Utils/Local.cpp | 82 +- lib/Transforms/Utils/LoopSimplify.cpp | 7 + lib/Transforms/Utils/MetaRenamer.cpp | 132 + lib/Transforms/Utils/PromoteMemoryToRegister.cpp | 18 +- lib/Transforms/Utils/SSAUpdater.cpp | 2 +- lib/Transforms/Utils/SimplifyCFG.cpp | 1507 ++++++-- lib/Transforms/Utils/SimplifyIndVar.cpp | 6 +- lib/Transforms/Utils/SimplifyInstructions.cpp | 6 +- lib/Transforms/Utils/SimplifyLibCalls.cpp | 1149 ++++++ lib/Transforms/Utils/Utils.cpp | 1 + lib/Transforms/Utils/ValueMapper.cpp | 2 +- lib/Transforms/Vectorize/BBVectorize.cpp | 1012 ++++-- lib/Transforms/Vectorize/CMakeLists.txt | 1 + lib/Transforms/Vectorize/LoopVectorize.cpp | 1941 ++++++++++ lib/Transforms/Vectorize/Vectorize.cpp | 8 +- lib/VMCore/AsmWriter.cpp | 129 +- lib/VMCore/Attributes.cpp | 518 ++- lib/VMCore/AttributesImpl.h | 71 + lib/VMCore/AutoUpgrade.cpp | 3 +- lib/VMCore/CMakeLists.txt | 6 +- lib/VMCore/ConstantFold.cpp | 28 +- lib/VMCore/Constants.cpp | 27 + lib/VMCore/ConstantsContext.h | 35 +- lib/VMCore/Core.cpp | 62 +- lib/VMCore/DIBuilder.cpp | 40 +- lib/VMCore/DataLayout.cpp | 749 ++++ lib/VMCore/DebugInfo.cpp | 10 + lib/VMCore/Dominators.cpp | 10 + lib/VMCore/Function.cpp | 21 +- lib/VMCore/GCOV.cpp | 30 +- lib/VMCore/IRBuilder.cpp | 6 +- lib/VMCore/InlineAsm.cpp | 13 +- lib/VMCore/Instructions.cpp | 63 +- lib/VMCore/LLVMContext.cpp | 5 + lib/VMCore/LLVMContextImpl.cpp | 18 +- lib/VMCore/LLVMContextImpl.h | 9 +- lib/VMCore/Makefile | 1 - lib/VMCore/PassManager.cpp | 2 +- lib/VMCore/TargetTransformInfo.cpp | 31 + lib/VMCore/Type.cpp | 41 +- lib/VMCore/User.cpp | 9 + lib/VMCore/Value.cpp | 2 +- lib/VMCore/ValueTypes.cpp | 30 +- lib/VMCore/Verifier.cpp | 181 +- projects/CMakeLists.txt | 7 + projects/sample/Makefile.llvm.rules | 4 +- projects/sample/autoconf/configure.ac | 4 +- projects/sample/configure | 4 +- runtime/libprofile/CMakeLists.txt | 3 +- runtime/libprofile/CommonProfiling.c | 22 + runtime/libprofile/Makefile | 9 +- runtime/libprofile/Profiling.h | 2 +- test/Analysis/BasicAA/noalias-geps.ll | 54 + test/Analysis/BasicAA/nocapture.ll | 21 + test/Analysis/BasicAA/phi-speculation.ll | 33 + test/Analysis/BranchProbabilityInfo/basic.ll | 27 + test/Analysis/CallGraph/do-nothing-intrinsic.ll | 13 + test/Analysis/CostModel/X86/arith.ll | 42 + test/Analysis/CostModel/X86/cast.ll | 69 + test/Analysis/CostModel/X86/cmp.ll | 42 + test/Analysis/CostModel/X86/i32.ll | 9 + .../CostModel/X86/insert-extract-at-zero.ll | 40 + test/Analysis/CostModel/X86/lit.local.cfg | 6 + test/Analysis/CostModel/X86/loop_v2.ll | 43 + test/Analysis/CostModel/X86/tiny.ll | 11 + test/Analysis/CostModel/X86/vectorized-loop.ll | 78 + test/Analysis/CostModel/lit.local.cfg | 1 + test/Analysis/CostModel/no_info.ll | 15 + test/Analysis/DependenceAnalysis/Banerjee.ll | 595 +++ test/Analysis/DependenceAnalysis/Coupled.ll | 509 +++ test/Analysis/DependenceAnalysis/ExactRDIV.ll | 508 +++ test/Analysis/DependenceAnalysis/ExactSIV.ll | 428 +++ test/Analysis/DependenceAnalysis/GCD.ll | 597 +++ test/Analysis/DependenceAnalysis/Preliminary.ll | 469 +++ test/Analysis/DependenceAnalysis/Propagating.ll | 467 +++ test/Analysis/DependenceAnalysis/Separability.ll | 267 ++ test/Analysis/DependenceAnalysis/StrongSIV.ll | 342 ++ test/Analysis/DependenceAnalysis/SymbolicRDIV.ll | 312 ++ test/Analysis/DependenceAnalysis/SymbolicSIV.ll | 330 ++ .../Analysis/DependenceAnalysis/WeakCrossingSIV.ll | 220 ++ test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll | 212 ++ test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll | 212 ++ test/Analysis/DependenceAnalysis/ZIV.ll | 53 + test/Analysis/DependenceAnalysis/lit.local.cfg | 1 + test/Analysis/LoopDependenceAnalysis/alias.ll | 44 - test/Analysis/LoopDependenceAnalysis/lit.local.cfg | 1 - test/Analysis/LoopDependenceAnalysis/siv-strong.ll | 110 - .../LoopDependenceAnalysis/siv-weak-crossing.ll | 118 - .../LoopDependenceAnalysis/siv-weak-zero.ll | 56 - test/Analysis/LoopDependenceAnalysis/ziv.ll | 63 - test/Analysis/Profiling/load-branch-weights-ifs.ll | 122 + .../Profiling/load-branch-weights-loops.ll | 188 + .../Profiling/load-branch-weights-switches.ll | 165 + test/Assembler/2008-09-02-FunctionNotes2.ll | 2 +- test/Assembler/global-addrspace-forwardref.ll | 8 + test/Assembler/invalid-fwdref1.ll | 4 + test/Bindings/Ocaml/ipo_opts.ml | 6 +- test/Bindings/Ocaml/scalar_opts.ml | 6 +- test/Bindings/Ocaml/target.ml | 6 +- test/Bindings/Ocaml/vmcore.ml | 38 +- test/Bitcode/blockaddress.ll | 15 + test/Bitcode/function-encoding-rel-operands.ll | 49 + test/BugPoint/crash-narrowfunctiontest.ll | 1 + test/BugPoint/metadata.ll | 1 + test/BugPoint/remove_arguments_test.ll | 1 + test/CMakeLists.txt | 15 +- test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll | 4 +- test/CodeGen/ARM/2010-12-07-PEIBug.ll | 2 +- test/CodeGen/ARM/2011-06-16-TailCallByVal.ll | 5 + test/CodeGen/ARM/2011-10-26-memset-with-neon.ll | 4 +- test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll | 135 +- test/CodeGen/ARM/2012-05-04-vmov.ll | 11 + test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll | 14 + test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll | 129 + test/CodeGen/ARM/2012-08-30-select.ll | 18 + test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll | 11 + .../ARM/2012-09-25-InlineAsmScalarToVectorConv.ll | 11 + .../ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll | 11 + test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll | 56 + test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll | 19 + test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll | 16 + .../ARM/2012-10-18-PR14099-ByvalFrameAddress.ll | 29 + test/CodeGen/ARM/a15-mla.ll | 12 + test/CodeGen/ARM/a15.ll | 6 + test/CodeGen/ARM/atomic-op.ll | 10 + test/CodeGen/ARM/atomicrmw_minmax.ll | 21 + test/CodeGen/ARM/avoid-cpsr-rmw.ll | 1 + test/CodeGen/ARM/call-noret-minsize.ll | 27 + test/CodeGen/ARM/call-noret.ll | 31 + test/CodeGen/ARM/carry.ll | 13 + test/CodeGen/ARM/coalesce-subregs.ll | 294 +- test/CodeGen/ARM/constants.ll | 12 +- test/CodeGen/ARM/crash-shufflevector.ll | 10 + test/CodeGen/ARM/darwin-section-order.ll | 21 + test/CodeGen/ARM/deps-fix.ll | 22 + test/CodeGen/ARM/div.ll | 17 +- test/CodeGen/ARM/divmod.ll | 46 +- test/CodeGen/ARM/domain-conv-vmovs.ll | 100 + test/CodeGen/ARM/fabss.ll | 4 +- test/CodeGen/ARM/fadds.ll | 8 +- test/CodeGen/ARM/fast-isel-pic.ll | 61 + test/CodeGen/ARM/fast-isel.ll | 66 + test/CodeGen/ARM/fdivs.ll | 8 +- test/CodeGen/ARM/fmuls.ll | 8 +- test/CodeGen/ARM/fp-fast.ll | 60 + test/CodeGen/ARM/fp_convert.ll | 4 +- test/CodeGen/ARM/fsubs.ll | 6 +- test/CodeGen/ARM/ifcvt1.ll | 12 +- test/CodeGen/ARM/ifcvt12.ll | 15 + test/CodeGen/ARM/ifcvt5.ll | 12 +- test/CodeGen/ARM/indirectbr-2.ll | 46 + test/CodeGen/ARM/integer_insertelement.ll | 35 + test/CodeGen/ARM/ldr_post.ll | 1 + test/CodeGen/ARM/ldr_pre.ll | 1 + test/CodeGen/ARM/longMAC.ll | 44 + test/CodeGen/ARM/mls.ll | 12 + test/CodeGen/ARM/neon-fma.ll | 22 + test/CodeGen/ARM/neon_ld2.ll | 37 +- test/CodeGen/ARM/opt-shuff-tstore.ll | 4 +- test/CodeGen/ARM/reg_sequence.ll | 11 +- test/CodeGen/ARM/select.ll | 2 +- test/CodeGen/ARM/select_xform.ll | 65 +- test/CodeGen/ARM/struct_byval.ll | 44 + test/CodeGen/ARM/sub-cmp-peephole.ll | 21 + test/CodeGen/ARM/sub.ll | 2 +- test/CodeGen/ARM/subreg-remat.ll | 4 +- test/CodeGen/ARM/trap.ll | 12 + test/CodeGen/ARM/twoaddrinstr.ll | 16 +- test/CodeGen/ARM/unaligned_load_store.ll | 18 +- test/CodeGen/ARM/unaligned_load_store_vector.ll | 487 +++ test/CodeGen/ARM/vbsl-constant.ll | 20 +- test/CodeGen/ARM/vbsl.ll | 97 + test/CodeGen/ARM/vdup.ll | 70 + test/CodeGen/ARM/vector-extend-narrow.ll | 11 + test/CodeGen/ARM/vext.ll | 33 + test/CodeGen/ARM/vget_lane.ll | 2 +- test/CodeGen/ARM/vselect_imax.ll | 12 + test/CodeGen/CellSPU/icmp16.ll | 4 +- test/CodeGen/Generic/MachineBranchProb.ll | 32 + test/CodeGen/Hexagon/args.ll | 4 +- test/CodeGen/Hexagon/newvaluestore.ll | 2 +- test/CodeGen/Hexagon/remove_lsr.ll | 80 + test/CodeGen/Hexagon/static.ll | 2 +- test/CodeGen/MSP430/fp.ll | 17 + test/CodeGen/Mips/alloca16.ll | 75 + test/CodeGen/Mips/atomic.ll | 5 +- test/CodeGen/Mips/atomicops.ll | 40 + test/CodeGen/Mips/brconeq.ll | 38 + test/CodeGen/Mips/brconeqk.ll | 22 + test/CodeGen/Mips/brconeqz.ll | 20 + test/CodeGen/Mips/brconge.ll | 37 + test/CodeGen/Mips/brcongt.ll | 25 + test/CodeGen/Mips/brconle.ll | 37 + test/CodeGen/Mips/brconlt.ll | 27 + test/CodeGen/Mips/brconne.ll | 26 + test/CodeGen/Mips/brconnek.ll | 25 + test/CodeGen/Mips/brconnez.ll | 24 + test/CodeGen/Mips/brdelayslot.ll | 34 +- test/CodeGen/Mips/brind.ll | 40 + test/CodeGen/Mips/check-noat.ll | 11 + test/CodeGen/Mips/div.ll | 18 + test/CodeGen/Mips/div_rem.ll | 21 + test/CodeGen/Mips/divu.ll | 18 + test/CodeGen/Mips/divu_remu.ll | 23 + test/CodeGen/Mips/dsp-r1.ll | 1241 +++++++ test/CodeGen/Mips/dsp-r2.ll | 568 +++ test/CodeGen/Mips/eh-dwarf-cfa.ll | 63 + test/CodeGen/Mips/helloworld.ll | 4 +- test/CodeGen/Mips/i32k.ll | 17 + test/CodeGen/Mips/init-array.ll | 14 + test/CodeGen/Mips/largeimm1.ll | 4 +- test/CodeGen/Mips/largeimmprinting.ll | 22 +- test/CodeGen/Mips/llcarry.ll | 51 + test/CodeGen/Mips/longbranch.ll | 10 +- test/CodeGen/Mips/mips64-sret.ll | 16 + test/CodeGen/Mips/misha.ll | 69 + test/CodeGen/Mips/mul.ll | 17 + test/CodeGen/Mips/mulll.ll | 21 + test/CodeGen/Mips/mulull.ll | 21 + test/CodeGen/Mips/null.ll | 2 +- test/CodeGen/Mips/o32_cc_byval.ll | 10 + test/CodeGen/Mips/rem.ll | 19 + test/CodeGen/Mips/remat-immed-load.ll | 51 + test/CodeGen/Mips/remu.ll | 18 + test/CodeGen/Mips/return-vector.ll | 244 ++ test/CodeGen/Mips/selpat.ll | 350 ++ test/CodeGen/Mips/seteq.ll | 21 + test/CodeGen/Mips/seteqz.ll | 24 + test/CodeGen/Mips/setge.ll | 27 + test/CodeGen/Mips/setgek.ll | 18 + test/CodeGen/Mips/setle.ll | 26 + test/CodeGen/Mips/setlt.ll | 21 + test/CodeGen/Mips/setltk.ll | 20 + test/CodeGen/Mips/setne.ll | 20 + test/CodeGen/Mips/setuge.ll | 26 + test/CodeGen/Mips/setugt.ll | 21 + test/CodeGen/Mips/setule.ll | 26 + test/CodeGen/Mips/setult.ll | 21 + test/CodeGen/Mips/setultk.ll | 20 + test/CodeGen/Mips/small-section-reserve-gp.ll | 12 + test/CodeGen/Mips/stchar.ll | 90 + test/CodeGen/Mips/stldst.ll | 41 + test/CodeGen/Mips/tailcall.ll | 245 ++ test/CodeGen/Mips/tls-alias.ll | 2 +- test/CodeGen/Mips/tls.ll | 12 +- test/CodeGen/Mips/tls16.ll | 13 + test/CodeGen/Mips/tls16_2.ll | 15 + test/CodeGen/Mips/uitofp.ll | 12 + test/CodeGen/Mips/ul1.ll | 15 + test/CodeGen/Mips/vector-load-store.ll | 27 + test/CodeGen/NVPTX/global-ordering.ll | 20 + test/CodeGen/NVPTX/param-align.ll | 25 + test/CodeGen/NVPTX/pr13291-i1-store.ll | 26 + test/CodeGen/NVPTX/ptx-version-30.ll | 6 + test/CodeGen/NVPTX/ptx-version-31.ll | 6 + test/CodeGen/NVPTX/sm-version-10.ll | 6 + test/CodeGen/NVPTX/sm-version-11.ll | 6 + test/CodeGen/NVPTX/sm-version-12.ll | 6 + test/CodeGen/NVPTX/sm-version-13.ll | 6 + test/CodeGen/NVPTX/sm-version-20.ll | 6 + test/CodeGen/NVPTX/sm-version-21.ll | 6 + test/CodeGen/NVPTX/sm-version-30.ll | 6 + test/CodeGen/NVPTX/sm-version-35.ll | 6 + test/CodeGen/PowerPC/2010-03-09-indirect-call.ll | 5 +- test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll | 27 + test/CodeGen/PowerPC/2012-10-11-dynalloc.ll | 18 + test/CodeGen/PowerPC/2012-10-12-bitcast.ll | 20 + test/CodeGen/PowerPC/asm-Zy.ll | 14 + test/CodeGen/PowerPC/big-endian-formal-args.ll | 4 +- test/CodeGen/PowerPC/bl8_elf_nop.ll | 16 - test/CodeGen/PowerPC/coalesce-ext.ll | 3 +- test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll | 26 + test/CodeGen/PowerPC/crsave.ll | 49 + test/CodeGen/PowerPC/emptystruct.ll | 51 + test/CodeGen/PowerPC/floatPSA.ll | 97 + test/CodeGen/PowerPC/fsl-e500mc.ll | 22 + test/CodeGen/PowerPC/fsl-e5500.ll | 22 + test/CodeGen/PowerPC/i64_fp_round.ll | 27 + test/CodeGen/PowerPC/inlineasm-copy.ll | 9 +- test/CodeGen/PowerPC/int-fp-conv-1.ll | 3 +- test/CodeGen/PowerPC/jaggedstructs.ll | 48 + test/CodeGen/PowerPC/misched.ll | 45 + test/CodeGen/PowerPC/novrsave.ll | 15 + test/CodeGen/PowerPC/ppc64-abi-extend.ll | 97 + test/CodeGen/PowerPC/ppc64-align-long-double.ll | 26 + test/CodeGen/PowerPC/ppc64-calls.ll | 63 + test/CodeGen/PowerPC/ppc64-ind-call.ll | 16 - test/CodeGen/PowerPC/ppc64-linux-func-size.ll | 1 + test/CodeGen/PowerPC/ppc64-toc.ll | 68 + test/CodeGen/PowerPC/ppc64-zext.ll | 11 + test/CodeGen/PowerPC/pr12757.ll | 14 + test/CodeGen/PowerPC/pr13641.ll | 11 + test/CodeGen/PowerPC/pr13891.ll | 27 + test/CodeGen/PowerPC/remat-imm.ll | 16 + test/CodeGen/PowerPC/structsinmem.ll | 227 ++ test/CodeGen/PowerPC/structsinregs.ll | 213 ++ test/CodeGen/PowerPC/varargs-struct-float.ll | 23 + test/CodeGen/PowerPC/vec_cmp.ll | 527 +++ test/CodeGen/PowerPC/vec_conv.ll | 57 + test/CodeGen/PowerPC/vec_extload.ll | 155 + test/CodeGen/PowerPC/vec_sqrt.ll | 71 + test/CodeGen/PowerPC/vrspill.ll | 19 + test/CodeGen/SPARC/2011-01-11-CC.ll | 2 +- test/CodeGen/Thumb2/buildvector-crash.ll | 4 +- test/CodeGen/Thumb2/carry.ll | 13 + test/CodeGen/Thumb2/cortex-fp.ll | 6 +- test/CodeGen/Thumb2/div.ll | 10 + test/CodeGen/Thumb2/longMACt.ll | 44 + test/CodeGen/Thumb2/thumb2-mla.ll | 7 + test/CodeGen/Thumb2/thumb2-select_xform.ll | 4 +- test/CodeGen/Thumb2/thumb2-smla.ll | 4 + test/CodeGen/Thumb2/thumb2-uxtb.ll | 4 +- test/CodeGen/X86/2010-01-08-Atomic64Bug.ll | 19 +- test/CodeGen/X86/2012-01-18-vbitcast.ll | 4 +- test/CodeGen/X86/2012-03-15-build_vector_wl.ll | 2 +- test/CodeGen/X86/2012-04-26-sdglue.ll | 2 +- test/CodeGen/X86/2012-07-10-extload64.ll | 4 +- test/CodeGen/X86/2012-08-16-setcc.ll | 45 + test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll | 20 + test/CodeGen/X86/2012-09-13-dagco-fneg.ll | 21 + test/CodeGen/X86/2012-09-28-CGPBug.ll | 53 + test/CodeGen/X86/2012-10-02-DAGCycle.ll | 52 + test/CodeGen/X86/2012-10-03-DAGCycle.ll | 31 + test/CodeGen/X86/2012-10-18-crash-dagco.ll | 61 + test/CodeGen/X86/MergeConsecutiveStores.ll | 305 ++ test/CodeGen/X86/StackColoring-dbg.ll | 30 + test/CodeGen/X86/StackColoring.ll | 410 +++ test/CodeGen/X86/add-of-carry.ll | 13 + test/CodeGen/X86/atom-bypass-slow-division.ll | 112 + test/CodeGen/X86/atom-shuf.ll | 9 + test/CodeGen/X86/atomic-minmax-i6432.ll | 67 + test/CodeGen/X86/atomic-pointer.ll | 22 + test/CodeGen/X86/atomic16.ll | 250 ++ test/CodeGen/X86/atomic32.ll | 250 ++ test/CodeGen/X86/atomic64.ll | 216 ++ test/CodeGen/X86/atomic6432.ll | 208 ++ test/CodeGen/X86/atomic8.ll | 250 ++ test/CodeGen/X86/atomic_add.ll | 3 +- test/CodeGen/X86/atomic_op.ll | 11 +- test/CodeGen/X86/avx-basic.ll | 4 +- test/CodeGen/X86/avx-intel-ocl.ll | 107 + test/CodeGen/X86/avx-intrinsics-x86.ll | 52 +- test/CodeGen/X86/avx-shuffle.ll | 10 +- test/CodeGen/X86/avx-vextractf128.ll | 88 +- test/CodeGen/X86/avx2-shuffle.ll | 34 + test/CodeGen/X86/bitcast-i256.ll | 11 + test/CodeGen/X86/bool-simplify.ll | 18 +- test/CodeGen/X86/buildvec-insertvec.ll | 15 + test/CodeGen/X86/cmov-fp.ll | 451 +++ test/CodeGen/X86/crash.ll | 147 + test/CodeGen/X86/cvtv2f32.ll | 25 + test/CodeGen/X86/early-ifcvt-crash.ll | 32 + test/CodeGen/X86/early-ifcvt.ll | 77 +- test/CodeGen/X86/extract-concat.ll | 17 + test/CodeGen/X86/fast-cc-callee-pops.ll | 4 +- test/CodeGen/X86/fast-cc-merge-stack-adj.ll | 2 +- test/CodeGen/X86/fast-cc-pass-in-regs.ll | 4 +- test/CodeGen/X86/fast-isel-x86-64.ll | 21 +- test/CodeGen/X86/fma.ll | 16 +- test/CodeGen/X86/fma3-intrinsics.ll | 4 +- test/CodeGen/X86/fma4-intrinsics-x86_64.ll | 1 + test/CodeGen/X86/fma_patterns.ll | 103 +- test/CodeGen/X86/fold-load.ll | 4 +- test/CodeGen/X86/fp-fast.ll | 57 + test/CodeGen/X86/fp-load-trunc.ll | 61 + test/CodeGen/X86/fp-trunc.ll | 53 +- test/CodeGen/X86/handle-move.ll | 74 + test/CodeGen/X86/inline-asm-tied.ll | 9 + test/CodeGen/X86/inline-asm.ll | 7 + test/CodeGen/X86/inlineasm-sched-bug.ll | 13 + test/CodeGen/X86/jump_sign.ll | 56 +- test/CodeGen/X86/misched-balance.ll | 230 ++ test/CodeGen/X86/misched-ilp.ll | 25 + test/CodeGen/X86/misched-new.ll | 28 +- test/CodeGen/X86/mmx-builtins.ll | 14 + test/CodeGen/X86/ms-inline-asm.ll | 63 + test/CodeGen/X86/mulx32.ll | 22 + test/CodeGen/X86/mulx64.ll | 22 + test/CodeGen/X86/phys_subreg_coalesce-3.ll | 6 +- test/CodeGen/X86/pic_jumptable.ll | 12 + test/CodeGen/X86/pmovext.ll | 22 + test/CodeGen/X86/pointer-vector.ll | 5 +- test/CodeGen/X86/pr11334.ll | 8 + test/CodeGen/X86/pr11985.ll | 19 + test/CodeGen/X86/pr12312.ll | 155 + test/CodeGen/X86/pr12359.ll | 10 + test/CodeGen/X86/pr13458.ll | 14 + test/CodeGen/X86/pr13859.ll | 28 + test/CodeGen/X86/pr13899.ll | 58 + test/CodeGen/X86/pr14088.ll | 25 + test/CodeGen/X86/pr14090.ll | 76 + test/CodeGen/X86/pr14098.ll | 23 + test/CodeGen/X86/pr14161.ll | 38 + test/CodeGen/X86/pr14204.ll | 15 + test/CodeGen/X86/pr14314.ll | 13 + test/CodeGen/X86/pr14333.ll | 12 + test/CodeGen/X86/pr5145.ll | 35 + test/CodeGen/X86/promote.ll | 2 +- test/CodeGen/X86/ptr-rotate.ll | 2 +- test/CodeGen/X86/red-zone2.ll | 7 +- test/CodeGen/X86/rot32.ll | 29 +- test/CodeGen/X86/rot64.ll | 31 +- test/CodeGen/X86/rotate2.ll | 2 +- test/CodeGen/X86/rtm.ll | 30 + test/CodeGen/X86/select.ll | 13 + test/CodeGen/X86/select_const.ll | 16 + test/CodeGen/X86/shift-bmi2.ll | 178 + test/CodeGen/X86/sincos.ll | 13 + test/CodeGen/X86/sjlj.ll | 60 + test/CodeGen/X86/smul-with-overflow.ll | 14 + test/CodeGen/X86/sse-intel-ocl.ll | 93 + test/CodeGen/X86/sse-minmax.ll | 80 +- test/CodeGen/X86/sse_partial_update.ll | 36 + test/CodeGen/X86/tailcall-64.ll | 67 +- test/CodeGen/X86/targetLoweringGeneric.ll | 2 +- test/CodeGen/X86/tls-pic.ll | 12 +- test/CodeGen/X86/trunc-ext-ld-st.ll | 15 +- test/CodeGen/X86/vec_compare-2.ll | 3 +- test/CodeGen/X86/vec_fabs.ll | 38 + test/CodeGen/X86/vec_floor.ll | 38 + test/CodeGen/X86/vec_fpext.ll | 32 +- test/CodeGen/X86/vec_shuffle-26.ll | 45 +- test/CodeGen/X86/vec_shuffle-30.ll | 14 +- test/CodeGen/X86/widen_cast-1.ll | 2 +- test/CodeGen/X86/widen_load-1.ll | 13 +- test/CodeGen/X86/widen_load-2.ll | 2 +- test/CodeGen/X86/xmulo.ll | 50 + test/DebugInfo/2010-04-13-PubType.ll | 47 - .../DebugInfo/Inputs/dwarfdump-inl-test.elf-x86-64 | Bin 0 -> 7468 bytes test/DebugInfo/X86/2010-04-13-PubType.ll | 47 + test/DebugInfo/X86/DW_AT_byte_size.ll | 3 +- test/DebugInfo/X86/DW_AT_object_pointer.ll | 79 + test/DebugInfo/X86/concrete_out_of_line.ll | 7 +- test/DebugInfo/X86/elf-names.ll | 109 + test/DebugInfo/X86/enum-fwd-decl.ll | 16 +- test/DebugInfo/X86/linkage-name.ll | 56 + test/DebugInfo/X86/pr13303.ll | 28 - test/DebugInfo/X86/prologue-stack.ll | 35 + test/DebugInfo/X86/stringpool.ll | 4 +- test/DebugInfo/bug_null_debuginfo.ll | 3 +- test/DebugInfo/dwarfdump-inlining.test | 28 + test/DebugInfo/dwarfdump-test.test | 10 + test/ExecutionEngine/2002-12-16-ArgTest.ll | 1 + test/ExecutionEngine/MCJIT/2002-12-16-ArgTest.ll | 2 +- .../MCJIT/2003-01-04-ArgumentBug.ll | 2 +- test/ExecutionEngine/MCJIT/2003-01-04-LoopTest.ll | 2 +- test/ExecutionEngine/MCJIT/2003-01-04-PhiTest.ll | 2 +- test/ExecutionEngine/MCJIT/2003-01-09-SARTest.ll | 2 +- test/ExecutionEngine/MCJIT/2003-01-10-FUCOM.ll | 2 +- .../MCJIT/2003-01-15-AlignmentTest.ll | 2 +- .../MCJIT/2003-05-06-LivenessClobber.ll | 2 +- .../MCJIT/2003-05-07-ArgumentTest.ll | 2 +- .../MCJIT/2003-05-11-PHIRegAllocBug.ll | 2 +- test/ExecutionEngine/MCJIT/2003-06-04-bzip2-bug.ll | 2 +- test/ExecutionEngine/MCJIT/2003-06-05-PHIBug.ll | 2 +- .../MCJIT/2003-08-15-AllocaAssertion.ll | 2 +- .../MCJIT/2003-08-21-EnvironmentTest.ll | 2 +- .../MCJIT/2003-08-23-RegisterAllocatePhysReg.ll | 2 +- ...-10-18-PHINode-ConstantExpr-CondCode-Failure.ll | 2 +- .../MCJIT/2005-12-02-TailCallBug.ll | 2 +- .../MCJIT/2007-12-10-APIntLoadStore.ll | 2 +- .../MCJIT/2008-06-05-APInt-OverAShr.ll | 2 +- .../ExecutionEngine/MCJIT/2010-01-15-UndefValue.ll | 2 +- test/ExecutionEngine/MCJIT/fpbitcast.ll | 2 +- test/ExecutionEngine/MCJIT/hello.ll | 2 +- test/ExecutionEngine/MCJIT/hello2.ll | 2 +- test/ExecutionEngine/MCJIT/lit.local.cfg | 10 +- test/ExecutionEngine/MCJIT/pr13727.ll | 88 + test/ExecutionEngine/MCJIT/simplesttest.ll | 2 +- test/ExecutionEngine/MCJIT/simpletest.ll | 2 +- test/ExecutionEngine/MCJIT/stubs.ll | 2 +- test/ExecutionEngine/MCJIT/test-arith.ll | 2 +- test/ExecutionEngine/MCJIT/test-branch.ll | 2 +- .../MCJIT/test-call-no-external-funcs.ll | 2 +- test/ExecutionEngine/MCJIT/test-call.ll | 2 +- test/ExecutionEngine/MCJIT/test-cast.ll | 2 +- .../MCJIT/test-common-symbols-alignment.ll | 32 + test/ExecutionEngine/MCJIT/test-common-symbols.ll | 2 +- test/ExecutionEngine/MCJIT/test-constantexpr.ll | 2 +- test/ExecutionEngine/MCJIT/test-data-align.ll | 15 + .../MCJIT/test-fp-no-external-funcs.ll | 2 +- test/ExecutionEngine/MCJIT/test-fp.ll | 2 +- .../MCJIT/test-global-init-nonzero.ll | 2 +- test/ExecutionEngine/MCJIT/test-global.ll | 2 +- test/ExecutionEngine/MCJIT/test-loadstore.ll | 2 +- test/ExecutionEngine/MCJIT/test-local.ll | 2 +- test/ExecutionEngine/MCJIT/test-logical.ll | 2 +- test/ExecutionEngine/MCJIT/test-loop.ll | 2 +- test/ExecutionEngine/MCJIT/test-phi.ll | 2 +- test/ExecutionEngine/MCJIT/test-ptr-reloc.ll | 16 + test/ExecutionEngine/MCJIT/test-ret.ll | 2 +- test/ExecutionEngine/MCJIT/test-return.ll | 2 +- test/ExecutionEngine/MCJIT/test-setcond-fp.ll | 2 +- test/ExecutionEngine/MCJIT/test-setcond-int.ll | 2 +- test/ExecutionEngine/MCJIT/test-shift.ll | 2 +- test/ExecutionEngine/lit.local.cfg | 11 + test/ExecutionEngine/test-fp-no-external-funcs.ll | 1 + test/ExecutionEngine/test-fp.ll | 1 + test/Feature/linker_private_linkages.ll | 1 - test/Feature/minsize_attr.ll | 7 + test/Instrumentation/AddressSanitizer/basic.ll | 20 + .../do-not-instrument-internal-globals.ll | 19 + .../AddressSanitizer/instrument_global.ll | 2 +- .../instrument_initializer_metadata.ll | 36 + test/Instrumentation/ThreadSanitizer/atomic.ll | 1712 ++++++++- test/MC/ARM/arm-arithmetic-aliases.s | 4 + test/MC/ARM/arm-shift-encoding.s | 119 + test/MC/ARM/basic-thumb-instructions.s | 4 +- test/MC/ARM/diagnostics.s | 50 + test/MC/ARM/elf-jump24-fixup.s | 9 + test/MC/ARM/thumb-shift-encoding.s | 45 + test/MC/ARM/thumb2-b.w-encodingT4.s | 12 + test/MC/AsmParser/bad-macro.s | 9 + test/MC/AsmParser/directive_lcomm.s | 11 +- test/MC/AsmParser/labels.s | 2 +- test/MC/AsmParser/macro-args.s | 12 +- test/MC/AsmParser/macro-rept-err1.s | 2 +- test/MC/AsmParser/macros-darwin.s | 9 + test/MC/AsmParser/macros.s | 64 +- test/MC/COFF/comm.ll | 13 + test/MC/COFF/global_ctors.ll | 28 - test/MC/COFF/global_ctors_dtors.ll | 39 + .../Disassembler/ARM/invalid-VLD1DUPq8_UPD-arm.txt | 4 +- .../ARM/invalid-VLD1LNd32_UPD-thumb.txt | 4 + .../ARM/invalid-VLD4DUPd32_UPD-thumb.txt | 4 + .../ARM/invalid-VLD4LNd32_UPD-thumb.txt | 4 + .../ARM/invalid-VST1LNd32_UPD-thumb.txt | 4 + .../ARM/invalid-VST4LNd32_UPD-thumb.txt | 4 + test/MC/Disassembler/ARM/marked-up-thumb.txt | 7 + test/MC/Disassembler/ARM/neont-VLD-reencoding.txt | 77 + test/MC/Disassembler/ARM/neont-VST-reencoding.txt | 77 + test/MC/Disassembler/ARM/thumb-printf.txt | 6 +- test/MC/Disassembler/ARM/thumb-tests.txt | 2 +- test/MC/Disassembler/ARM/thumb1.txt | 2 + test/MC/Disassembler/ARM/thumb2.txt | 3 + test/MC/Disassembler/Mips/mips64.txt | 12 +- test/MC/Disassembler/Mips/mips64_le.txt | 12 +- test/MC/Disassembler/Mips/mips64r2.txt | 12 +- test/MC/Disassembler/Mips/mips64r2_le.txt | 12 +- test/MC/Disassembler/X86/marked-up.txt | 6 + test/MC/ELF/cfi-reg.s | 18 + test/MC/ELF/lcomm.s | 21 + .../MachO/ARM/long-call-branch-island-relocation.s | 43 + test/MC/MachO/absolute.s | 158 + test/MC/MachO/gen-dwarf-cpp.s | 22 + test/MC/MachO/gen-dwarf-macro-cpp.s | 17 + test/MC/MachO/i386-large-relocations.s | 36 + test/MC/MachO/lit.local.cfg | 2 +- test/MC/MachO/x86-data-in-code.ll | 108 + test/MC/Markup/basic-markup.mc | 16 + test/MC/Markup/lit.local.cfg | 2 + test/MC/Mips/do_switch.ll | 39 + test/MC/Mips/elf-N64.ll | 2 +- test/MC/Mips/higher_highest.ll | 7 +- test/MC/Mips/mips-alu-instructions.s | 100 + test/MC/Mips/mips-coprocessor-encodings.s | 37 + test/MC/Mips/mips-expansions.s | 27 + test/MC/Mips/mips-fpu-instructions.s | 178 + test/MC/Mips/mips-jump-instructions.s | 72 + test/MC/Mips/mips-memory-instructions.s | 45 + test/MC/Mips/mips-register-names.s | 71 + test/MC/Mips/mips-relocations.s | 41 + test/MC/Mips/mips64-register-names.s | 70 + test/MC/Mips/mips64extins.ll | 57 + test/MC/Mips/mips64shift.ll | 5 +- test/MC/Mips/mips_directives.s | 16 + test/MC/Mips/multi-64bit-func.ll | 4 +- test/MC/Mips/sext_64_32.ll | 4 +- test/MC/PowerPC/lit.local.cfg | 5 + test/MC/PowerPC/ppc64-initial-cfa.ll | 41 + test/MC/PowerPC/ppc64-relocs-01.ll | 66 + test/MC/PowerPC/ppc64-tls-relocs-01.ll | 28 + test/MC/X86/intel-syntax-2.s | 6 +- test/MC/X86/x86-32-ms-inline-asm.s | 60 + test/MC/X86/x86-64.s | 4 + test/MC/X86/x86_64-rtm-encoding.s | 13 + test/MC/X86/x86_nop.s | 13 + test/Makefile | 18 +- test/Object/Inputs/dext-test.elf-mips64r2 | Bin 0 -> 802 bytes test/Object/Inputs/relocations.elf-x86-64 | Bin 0 -> 1032 bytes test/Object/Mips/feature.test | 11 + test/Object/Mips/lit.local.cfg | 5 + test/Object/nm-shared-object.test | 26 +- test/Object/objdump-relocations.test | 13 + test/Object/objdump-symbol-table.test | 8 + test/Other/FileCheck-space.txt | 9 + test/Other/Inputs/llvm-cov.gcda | Bin 0 -> 296 bytes test/Other/Inputs/llvm-cov.gcno | Bin 0 -> 984 bytes test/Other/ResponseFile.ll | 9 + test/Other/extract-alias.ll | 49 + test/Other/extract-weak-odr.ll | 23 + test/Other/extract.ll | 9 +- test/Other/link-opts.ll | 13 + test/Other/lint.ll | 25 +- test/Other/lit.local.cfg | 2 +- test/Other/llvm-cov.test | 3 + test/Other/llvm-nm-without-aliases.ll | 25 + test/Other/spir_cc.ll | 13 + test/TableGen/if.td | 46 +- test/TableGen/list-element-bitref.td | 15 + test/TableGen/pr8330.td | 29 + test/Transforms/BBVectorize/X86/cmp-types.ll | 16 + test/Transforms/BBVectorize/X86/loop1.ll | 53 + test/Transforms/BBVectorize/X86/sh-rec.ll | 54 + test/Transforms/BBVectorize/X86/sh-rec2.ll | 85 + test/Transforms/BBVectorize/X86/sh-rec3.ll | 170 + test/Transforms/BBVectorize/X86/sh-types.ll | 25 + test/Transforms/BBVectorize/X86/simple-ldstr.ll | 29 + test/Transforms/BBVectorize/X86/simple.ll | 103 + test/Transforms/BBVectorize/X86/vs-cast.ll | 12 + test/Transforms/BBVectorize/cycle.ll | 2 +- test/Transforms/BBVectorize/lit.local.cfg | 5 + test/Transforms/BBVectorize/loop1.ll | 2 +- test/Transforms/BBVectorize/search-limit.ll | 2 +- test/Transforms/BBVectorize/simple-int.ll | 6 +- test/Transforms/BBVectorize/simple-ldstr-ptrs.ll | 53 + test/Transforms/BBVectorize/simple-ldstr.ll | 64 +- test/Transforms/BBVectorize/simple-sel.ll | 4 +- test/Transforms/BBVectorize/simple.ll | 66 +- test/Transforms/ConstProp/loads.ll | 132 +- .../Transforms/CorrelatedValuePropagation/crash.ll | 25 + test/Transforms/DeadArgElim/dbginfo.ll | 64 + test/Transforms/DeadStoreElimination/libcalls.ll | 70 + test/Transforms/DeadStoreElimination/simple.ll | 14 + test/Transforms/EarlyCSE/commute.ll | 66 + test/Transforms/GVN/crash.ll | 36 + test/Transforms/GVN/malloc-load-removal.ll | 31 + test/Transforms/GVN/pr14166.ll | 27 + test/Transforms/GVN/rle.ll | 8 +- test/Transforms/GlobalOpt/blockaddress.ll | 20 + test/Transforms/GlobalOpt/load-store-global.ll | 25 +- test/Transforms/GlobalOpt/tls.ll | 53 + .../IndVarSimplify/2004-04-05-InvokeCastCrash.ll | 4 +- .../2012-10-19-congruent-constant.ll | 27 + test/Transforms/IndVarSimplify/crash.ll | 44 + .../IndVarSimplify/eliminate-comparison.ll | 103 + test/Transforms/IndVarSimplify/no-iv-rewrite.ll | 1 - test/Transforms/IndVarSimplify/verify-scev.ll | 421 +++ test/Transforms/Inline/recursive.ll | 38 + test/Transforms/InstCombine/2012-07-25-LoadPart.ll | 10 +- .../Transforms/InstCombine/2012-08-28-udiv_ashl.ll | 57 + .../InstCombine/2012-09-17-ZeroSizedAlloca.ll | 24 + .../2012-09-24-MemcpyFromGlobalCrash.ll | 19 + .../InstCombine/2012-10-25-vector-of-pointers.ll | 51 + test/Transforms/InstCombine/align-addr.ll | 16 + test/Transforms/InstCombine/alloca.ll | 16 + test/Transforms/InstCombine/and-fcmp.ll | 2 +- test/Transforms/InstCombine/cast.ll | 206 ++ .../InstCombine/disable-simplify-libcalls.ll | 236 ++ test/Transforms/InstCombine/div-shift.ll | 14 + test/Transforms/InstCombine/fcmp.ll | 93 +- test/Transforms/InstCombine/fold-vector-select.ll | 153 +- test/Transforms/InstCombine/icmp.ll | 18 + test/Transforms/InstCombine/memcmp-1.ll | 72 + test/Transforms/InstCombine/memcmp-2.ll | 17 + test/Transforms/InstCombine/memcpy-1.ll | 17 + test/Transforms/InstCombine/memcpy-2.ll | 17 + test/Transforms/InstCombine/memcpy-from-global.ll | 136 + test/Transforms/InstCombine/memcpy_chk-1.ll | 60 + test/Transforms/InstCombine/memcpy_chk-2.ll | 24 + test/Transforms/InstCombine/memmove-1.ll | 17 + test/Transforms/InstCombine/memmove-2.ll | 17 + test/Transforms/InstCombine/memmove_chk-1.ll | 60 + test/Transforms/InstCombine/memmove_chk-2.ll | 24 + test/Transforms/InstCombine/memset-1.ll | 17 + test/Transforms/InstCombine/memset-2.ll | 17 + test/Transforms/InstCombine/memset_chk-1.ll | 61 + test/Transforms/InstCombine/memset_chk-2.ll | 20 + test/Transforms/InstCombine/memset_chk.ll | 18 - test/Transforms/InstCombine/obfuscated_splat.ll | 11 + test/Transforms/InstCombine/objsize.ll | 3 +- test/Transforms/InstCombine/select.ll | 34 + test/Transforms/InstCombine/stpcpy-1.ll | 46 + test/Transforms/InstCombine/stpcpy-2.ll | 22 + test/Transforms/InstCombine/stpcpy_chk-1.ll | 96 + test/Transforms/InstCombine/stpcpy_chk-2.ll | 21 + test/Transforms/InstCombine/strcat-1.ll | 38 + test/Transforms/InstCombine/strcat-2.ll | 32 + test/Transforms/InstCombine/strcat-3.ll | 22 + test/Transforms/InstCombine/strchr-1.ll | 54 + test/Transforms/InstCombine/strchr-2.ll | 21 + test/Transforms/InstCombine/strcmp-1.ll | 82 + test/Transforms/InstCombine/strcmp-2.ll | 20 + test/Transforms/InstCombine/strcpy-1.ll | 45 + test/Transforms/InstCombine/strcpy-2.ll | 22 + test/Transforms/InstCombine/strcpy_chk-1.ll | 94 + test/Transforms/InstCombine/strcpy_chk-2.ll | 21 + test/Transforms/InstCombine/strcpy_chk.ll | 13 - test/Transforms/InstCombine/strcspn-1.ll | 57 + test/Transforms/InstCombine/strcspn-2.ll | 21 + test/Transforms/InstCombine/strlen-1.ll | 97 + test/Transforms/InstCombine/strlen-2.ll | 18 + test/Transforms/InstCombine/strncat-1.ll | 37 + test/Transforms/InstCombine/strncat-2.ll | 53 + test/Transforms/InstCombine/strncat-3.ll | 22 + test/Transforms/InstCombine/strncmp-1.ll | 99 + test/Transforms/InstCombine/strncmp-2.ll | 20 + test/Transforms/InstCombine/strncpy-1.ll | 95 + test/Transforms/InstCombine/strncpy-2.ll | 22 + test/Transforms/InstCombine/strncpy_chk-1.ll | 66 + test/Transforms/InstCombine/strncpy_chk-2.ll | 21 + test/Transforms/InstCombine/strpbrk-1.ll | 68 + test/Transforms/InstCombine/strpbrk-2.ll | 23 + test/Transforms/InstCombine/strrchr-1.ll | 54 + test/Transforms/InstCombine/strrchr-2.ll | 21 + test/Transforms/InstCombine/strspn-1.ll | 56 + test/Transforms/InstCombine/strstr-1.ll | 65 + test/Transforms/InstCombine/strstr-2.ll | 18 + test/Transforms/InstCombine/strto-1.ll | 82 + test/Transforms/InstCombine/struct-assign-tbaa.ll | 44 + test/Transforms/InstCombine/udiv-simplify-bug-1.ll | 4 +- test/Transforms/InstCombine/vec_demanded_elts.ll | 2 +- test/Transforms/InstCombine/vec_shuffle.ll | 43 + test/Transforms/InstCombine/vector_gep2.ll | 11 + test/Transforms/InstCombine/weak-symbols.ll | 33 + test/Transforms/InstSimplify/compare.ll | 9 + .../Internalize/2008-05-09-AllButMain.ll | 58 +- .../Internalize/2009-01-05-InternalizeAliases.ll | 2 +- test/Transforms/JumpThreading/crash.ll | 53 + test/Transforms/JumpThreading/select.ll | 36 + test/Transforms/LICM/2003-12-11-SinkingToPHI.ll | 2 +- test/Transforms/LICM/hoisting.ll | 28 +- test/Transforms/LoopIdiom/basic.ll | 33 + test/Transforms/LoopIdiom/crash.ll | 25 + test/Transforms/LoopIdiom/non-canonical-loop.ll | 34 + test/Transforms/LoopIdiom/scev-invalidation.ll | 74 + test/Transforms/LoopRotate/multiple-exits.ll | 236 ++ .../LoopStrengthReduce/2011-10-03-CritEdgeMerge.ll | 44 +- test/Transforms/LoopUnroll/pr11361.ll | 2 +- test/Transforms/LoopUnroll/pr14167.ll | 44 + .../LoopUnswitch/2011-06-02-CritSwitch.ll | 2 +- .../Transforms/LoopVectorize/2012-10-20-infloop.ll | 27 + .../LoopVectorize/2012-10-22-isconsec.ll | 57 + test/Transforms/LoopVectorize/X86/avx1.ll | 49 + .../LoopVectorize/X86/conversion-cost.ll | 48 + test/Transforms/LoopVectorize/X86/cost-model.ll | 38 + test/Transforms/LoopVectorize/X86/gcc-examples.ll | 62 + test/Transforms/LoopVectorize/X86/lit.local.cfg | 6 + test/Transforms/LoopVectorize/cpp-new-array.ll | 46 + test/Transforms/LoopVectorize/flags.ll | 53 + test/Transforms/LoopVectorize/gcc-examples.ll | 650 ++++ test/Transforms/LoopVectorize/increment.ll | 66 + test/Transforms/LoopVectorize/induction_plus.ll | 30 + test/Transforms/LoopVectorize/lit.local.cfg | 1 + test/Transforms/LoopVectorize/non-const-n.ll | 38 + test/Transforms/LoopVectorize/read-only.ll | 32 + test/Transforms/LoopVectorize/reduction.ll | 232 ++ test/Transforms/LoopVectorize/runtime-check.ll | 36 + test/Transforms/LoopVectorize/scalar-select.ll | 37 + test/Transforms/LoopVectorize/small-loop.ll | 33 + test/Transforms/LoopVectorize/start-non-zero.ll | 35 + test/Transforms/LoopVectorize/write-only.ll | 26 + .../MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll | 4 +- test/Transforms/MemCpyOpt/align.ll | 24 +- test/Transforms/MemCpyOpt/form-memset.ll | 24 + test/Transforms/MetaRenamer/lit.local.cfg | 1 + test/Transforms/MetaRenamer/metarenamer.ll | 96 + test/Transforms/ObjCARC/basic.ll | 4 +- test/Transforms/ObjCARC/nested.ll | 85 +- test/Transforms/ObjCARC/path-overflow.ll | 329 ++ test/Transforms/PhaseOrdering/gdce.ll | 106 + test/Transforms/Reassociate/crash.ll | 28 + test/Transforms/SCCP/loadtest.ll | 5 +- test/Transforms/SROA/alignment.ll | 171 + test/Transforms/SROA/basictest.ll | 1136 ++++++ test/Transforms/SROA/big-endian.ll | 119 + test/Transforms/SROA/fca.ll | 49 + test/Transforms/SROA/lit.local.cfg | 1 + test/Transforms/SROA/phi-and-select.ll | 427 +++ test/Transforms/SROA/vector-promotion.ll | 267 ++ test/Transforms/ScalarRepl/memcpy-from-global.ll | 146 - test/Transforms/SimplifyCFG/SPARC/lit.local.cfg | 6 + .../SimplifyCFG/SPARC/switch_to_lookup_table.ll | 32 + test/Transforms/SimplifyCFG/X86/lit.local.cfg | 6 + .../SimplifyCFG/X86/switch_to_lookup_table.ll | 779 ++++ test/Transforms/SimplifyCFG/phi-undef-loadstore.ll | 28 + .../SimplifyCFG/preserve-branchweights-partial.ll | 37 + .../preserve-branchweights-switch-create.ll | 140 + .../SimplifyCFG/preserve-branchweights.ll | 230 +- test/Transforms/SimplifyCFG/sink-common-code.ll | 53 + .../SimplifyLibCalls/2009-02-12-StrTo.ll | 14 - test/Transforms/SimplifyLibCalls/FFS.ll | 15 +- test/Transforms/SimplifyLibCalls/StpCpy.ll | 43 - test/Transforms/SimplifyLibCalls/StrCat.ll | 33 - test/Transforms/SimplifyLibCalls/StrChr.ll | 26 - test/Transforms/SimplifyLibCalls/StrCmp.ll | 65 - test/Transforms/SimplifyLibCalls/StrCpy.ll | 37 - test/Transforms/SimplifyLibCalls/StrLen.ll | 62 - test/Transforms/SimplifyLibCalls/StrNCat.ll | 31 - test/Transforms/SimplifyLibCalls/StrNCmp.ll | 78 - test/Transforms/SimplifyLibCalls/StrNCpy.ll | 29 - test/Transforms/SimplifyLibCalls/StrPBrk.ll | 25 - test/Transforms/SimplifyLibCalls/StrRChr.ll | 23 - test/Transforms/SimplifyLibCalls/StrSpn.ll | 41 - test/Transforms/SimplifyLibCalls/StrStr.ll | 60 - .../SimplifyLibCalls/double-float-shrink.ll | 333 ++ .../SimplifyLibCalls/float-shrink-compare.ll | 179 + test/Transforms/SimplifyLibCalls/floor.ll | 23 + test/Transforms/SimplifyLibCalls/memcmp.ll | 35 - test/Transforms/SimplifyLibCalls/memmove.ll | 12 - test/Transforms/SimplifyLibCalls/memset-64.ll | 12 - test/Transforms/SimplifyLibCalls/memset.ll | 12 - test/Transforms/SimplifyLibCalls/weak-symbols.ll | 26 - test/Verifier/invoke.ll | 1 - test/lit.cfg | 24 +- test/lit.site.cfg.in | 1 + tools/CMakeLists.txt | 1 + tools/LLVMBuild.txt | 2 +- tools/Makefile | 2 +- tools/bugpoint/ExtractFunction.cpp | 2 +- tools/bugpoint/OptimizerDriver.cpp | 2 +- tools/gold/Makefile | 4 +- tools/gold/gold-plugin.cpp | 10 - tools/llc/llc.cpp | 219 +- tools/lli/CMakeLists.txt | 6 +- tools/lli/LLVMBuild.txt | 2 +- tools/lli/Makefile | 2 +- tools/lli/RecordingMemoryManager.cpp | 87 + tools/lli/RecordingMemoryManager.h | 78 + tools/lli/RemoteTarget.cpp | 61 + tools/lli/RemoteTarget.h | 101 + tools/lli/lli.cpp | 258 +- tools/llvm-ar/CMakeLists.txt | 1 - tools/llvm-ar/Makefile | 1 - tools/llvm-ar/llvm-ar.cpp | 203 +- tools/llvm-as/CMakeLists.txt | 1 - tools/llvm-bcanalyzer/CMakeLists.txt | 1 - tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp | 30 +- tools/llvm-config/Makefile | 2 +- tools/llvm-dis/CMakeLists.txt | 1 - tools/llvm-dwarfdump/llvm-dwarfdump.cpp | 106 +- tools/llvm-extract/llvm-extract.cpp | 51 +- tools/llvm-mc/llvm-mc.cpp | 15 +- tools/llvm-mcmarkup/CMakeLists.txt | 5 + tools/llvm-mcmarkup/LLVMBuild.txt | 22 + tools/llvm-mcmarkup/Makefile | 17 + tools/llvm-mcmarkup/llvm-mcmarkup.cpp | 225 ++ tools/llvm-nm/llvm-nm.cpp | 9 +- tools/llvm-objdump/llvm-objdump.cpp | 17 +- tools/llvm-ranlib/CMakeLists.txt | 1 - tools/llvm-ranlib/Makefile | 1 - tools/llvm-ranlib/llvm-ranlib.cpp | 59 +- tools/llvm-rtdyld/llvm-rtdyld.cpp | 8 +- tools/llvm-stress/llvm-stress.cpp | 4 + tools/lto/LTOCodeGenerator.cpp | 38 +- tools/lto/LTOModule.cpp | 37 +- tools/lto/Makefile | 7 + tools/lto/lto.exports | 1 + tools/opt/CMakeLists.txt | 2 +- tools/opt/LLVMBuild.txt | 2 +- tools/opt/Makefile | 2 +- tools/opt/opt.cpp | 98 +- unittests/ADT/APFloatTest.cpp | 59 + unittests/ADT/BitVectorTest.cpp | 52 + unittests/ADT/CMakeLists.txt | 16 +- unittests/ADT/DenseMapTest.cpp | 33 + unittests/ADT/DenseSetTest.cpp | 2 +- unittests/ADT/ImmutableMapTest.cpp | 50 + unittests/ADT/StringRefTest.cpp | 23 + unittests/ADT/TripleTest.cpp | 12 + unittests/Analysis/ScalarEvolutionTest.cpp | 16 +- unittests/ExecutionEngine/CMakeLists.txt | 1 + unittests/ExecutionEngine/JIT/CMakeLists.txt | 2 - .../JIT/IntelJITEventListenerTest.cpp | 7 +- unittests/ExecutionEngine/JIT/JITTest.cpp | 37 +- unittests/ExecutionEngine/JIT/Makefile | 7 + unittests/ExecutionEngine/JIT/MultiJITTest.cpp | 6 +- unittests/ExecutionEngine/MCJIT/CMakeLists.txt | 25 + unittests/ExecutionEngine/MCJIT/MCJITTest.cpp | 231 ++ unittests/ExecutionEngine/MCJIT/MCJITTestBase.h | 245 ++ unittests/ExecutionEngine/MCJIT/MCJITTests.def | 1 + unittests/ExecutionEngine/MCJIT/Makefile | 18 + .../ExecutionEngine/MCJIT/SectionMemoryManager.cpp | 143 + .../ExecutionEngine/MCJIT/SectionMemoryManager.h | 118 + unittests/ExecutionEngine/Makefile | 2 +- unittests/Support/AlignOfTest.cpp | 129 +- unittests/Support/CMakeLists.txt | 5 +- unittests/Support/Casting.cpp | 59 +- unittests/Support/DataExtractorTest.cpp | 9 + unittests/Support/MemoryBufferTest.cpp | 99 + unittests/Support/MemoryTest.cpp | 356 ++ unittests/Support/formatted_raw_ostream_test.cpp | 33 + unittests/Transforms/Utils/CMakeLists.txt | 1 + unittests/Transforms/Utils/IntegerDivision.cpp | 142 + unittests/VMCore/IRBuilderTest.cpp | 12 + unittests/VMCore/InstructionsTest.cpp | 41 +- unittests/VMCore/PassManagerTest.cpp | 24 +- utils/FileCheck/FileCheck.cpp | 6 +- utils/TableGen/AsmMatcherEmitter.cpp | 558 ++- utils/TableGen/AsmWriterEmitter.cpp | 73 +- utils/TableGen/AsmWriterInst.cpp | 23 +- utils/TableGen/CMakeLists.txt | 3 +- utils/TableGen/CallingConvEmitter.cpp | 11 +- utils/TableGen/CodeEmitterGen.cpp | 13 +- utils/TableGen/CodeGenDAGPatterns.cpp | 666 ++-- utils/TableGen/CodeGenDAGPatterns.h | 63 +- utils/TableGen/CodeGenInstruction.cpp | 148 +- utils/TableGen/CodeGenInstruction.h | 23 +- utils/TableGen/CodeGenMapTable.cpp | 606 ++++ utils/TableGen/CodeGenRegisters.cpp | 112 +- utils/TableGen/CodeGenRegisters.h | 14 +- utils/TableGen/CodeGenSchedule.cpp | 1664 ++++++++- utils/TableGen/CodeGenSchedule.h | 368 +- utils/TableGen/CodeGenTarget.cpp | 59 +- utils/TableGen/CodeGenTarget.h | 8 +- utils/TableGen/DAGISelMatcher.h | 2 - utils/TableGen/DAGISelMatcherEmitter.cpp | 11 +- utils/TableGen/DAGISelMatcherGen.cpp | 29 +- utils/TableGen/DFAPacketizerEmitter.cpp | 168 +- utils/TableGen/DisassemblerEmitter.cpp | 6 +- utils/TableGen/EDEmitter.cpp | 44 +- utils/TableGen/FastISelEmitter.cpp | 13 +- utils/TableGen/FixedLenDecoderEmitter.cpp | 19 +- utils/TableGen/InstrInfoEmitter.cpp | 17 +- utils/TableGen/IntrinsicEmitter.cpp | 79 +- utils/TableGen/Makefile | 2 - utils/TableGen/PseudoLoweringEmitter.cpp | 34 +- utils/TableGen/RegisterInfoEmitter.cpp | 166 +- utils/TableGen/SequenceToOffsetTable.h | 6 +- utils/TableGen/SetTheory.cpp | 140 +- utils/TableGen/SetTheory.h | 10 +- utils/TableGen/SubtargetEmitter.cpp | 688 +++- utils/TableGen/TGValueTypes.cpp | 26 +- utils/TableGen/TableGen.cpp | 159 +- utils/TableGen/TableGenBackends.h | 1 + utils/TableGen/X86DisassemblerTables.cpp | 21 +- utils/TableGen/X86ModRMFilters.h | 25 +- utils/TableGen/X86RecognizableInstr.cpp | 35 +- utils/TableGen/X86RecognizableInstr.h | 23 +- .../ExampleTests/LLVM.InTree/test/Bar/bar-test.ll | 3 - .../lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg | 2 +- .../ExampleTests/LLVM.OutOfTree/src/test/lit.cfg | 2 +- utils/lit/lit/ExampleTests/lit.cfg | 2 +- utils/lit/lit/ExampleTests/vg-fail.c | 4 + utils/lit/lit/ExampleTests/xfail-feature.c | 4 + utils/lit/lit/LitConfig.py | 3 - utils/lit/lit/TestRunner.py | 34 +- utils/lit/lit/TestingConfig.py | 10 +- utils/lit/lit/Util.py | 2 +- utils/lldbDataFormatters.py | 24 +- utils/llvm-lit/llvm-lit.in | 13 +- utils/llvm.grm | 1 - utils/unittest/googletest/gtest-port.cc | 4 + .../googletest/include/gtest/internal/gtest-port.h | 2 +- utils/vim/llvm.vim | 3 +- utils/yaml2obj/yaml2obj.cpp | 2 +- 2007 files changed, 114172 insertions(+), 31549 deletions(-) create mode 100644 cmake/platforms/Android.cmake delete mode 100644 docs/CompilerWriterInfo.html create mode 100644 docs/CompilerWriterInfo.rst delete mode 100644 docs/DebuggingJITedCode.html create mode 100644 docs/DebuggingJITedCode.rst delete mode 100644 docs/ExtendingLLVM.html create mode 100644 docs/ExtendingLLVM.rst delete mode 100644 docs/GettingStarted.html create mode 100644 docs/GettingStarted.rst delete mode 100644 docs/GoldPlugin.html create mode 100644 docs/GoldPlugin.rst delete mode 100644 docs/HowToAddABuilder.html create mode 100644 docs/HowToAddABuilder.rst create mode 100644 docs/HowToBuildOnARM.rst create mode 100644 docs/HowToSetUpLLVMStyleRTTI.rst delete mode 100644 docs/HowToSubmitABug.html create mode 100644 docs/HowToSubmitABug.rst create mode 100755 docs/HowToUseInstrMappings.rst create mode 100644 docs/MarkedUpDisassembly.rst create mode 100644 docs/Phabricator.rst create mode 100644 docs/SphinxQuickstartTemplate.rst create mode 100644 docs/_themes/llvm-theme/layout.html create mode 100644 docs/_themes/llvm-theme/static/contents.png create mode 100644 docs/_themes/llvm-theme/static/llvm-theme.css create mode 100644 docs/_themes/llvm-theme/static/logo.png create mode 100644 docs/_themes/llvm-theme/static/navigation.png create mode 100644 docs/_themes/llvm-theme/theme.conf delete mode 100644 docs/llvm-theme/layout.html delete mode 100644 docs/llvm-theme/static/contents.png delete mode 100644 docs/llvm-theme/static/llvm-theme.css delete mode 100644 docs/llvm-theme/static/logo.png delete mode 100644 docs/llvm-theme/static/navigation.png delete mode 100644 docs/llvm-theme/theme.conf create mode 100644 include/llvm/ADT/MapVector.h delete mode 100644 include/llvm/ADT/Trie.h create mode 100644 include/llvm/AddressingMode.h create mode 100644 include/llvm/Analysis/DependenceAnalysis.h delete mode 100644 include/llvm/Analysis/LoopDependenceAnalysis.h create mode 100644 include/llvm/Analysis/ProfileDataLoader.h create mode 100644 include/llvm/Analysis/ProfileDataTypes.h create mode 100644 include/llvm/CodeGen/CommandFlags.h create mode 100644 include/llvm/CodeGen/MachinePostDominators.h create mode 100644 include/llvm/CodeGen/ScheduleDAGILP.h create mode 100644 include/llvm/CodeGen/TargetSchedule.h create mode 100644 include/llvm/DataLayout.h delete mode 100644 include/llvm/ExecutionEngine/IntelJITEventsWrapper.h create mode 100644 include/llvm/ExecutionEngine/ObjectBuffer.h create mode 100644 include/llvm/ExecutionEngine/ObjectImage.h create mode 100644 include/llvm/Object/RelocVisitor.h delete mode 100644 include/llvm/TableGen/TableGenAction.h delete mode 100644 include/llvm/Target/TargetData.h delete mode 100644 include/llvm/Target/TargetELFWriterInfo.h create mode 100644 include/llvm/Target/TargetTransformImpl.h create mode 100644 include/llvm/TargetTransformInfo.h create mode 100644 include/llvm/Transforms/Utils/BypassSlowDivision.h create mode 100644 include/llvm/Transforms/Utils/IntegerDivision.h create mode 100644 include/llvm/Transforms/Utils/SimplifyLibCalls.h create mode 100644 lib/Analysis/CostModel.cpp create mode 100644 lib/Analysis/DependenceAnalysis.cpp delete mode 100644 lib/Analysis/LoopDependenceAnalysis.cpp create mode 100644 lib/Analysis/ProfileDataLoader.cpp create mode 100644 lib/Analysis/ProfileDataLoaderPass.cpp create mode 100644 lib/CodeGen/MachinePostDominators.cpp create mode 100644 lib/CodeGen/StackColoring.cpp create mode 100644 lib/CodeGen/TargetSchedule.cpp create mode 100644 lib/DebugInfo/DWARFDebugRangeList.cpp create mode 100644 lib/DebugInfo/DWARFDebugRangeList.h create mode 100644 lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h create mode 100644 lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h create mode 100644 lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h create mode 100644 lib/ExecutionEngine/IntelJITEvents/jitprofiling.c create mode 100644 lib/ExecutionEngine/IntelJITEvents/jitprofiling.h delete mode 100644 lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp delete mode 100644 lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h delete mode 100644 lib/ExecutionEngine/RuntimeDyld/ObjectImage.h create mode 100644 lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h delete mode 100644 lib/TableGen/TableGenAction.cpp delete mode 100644 lib/Target/ARM/ARMELFWriterInfo.cpp delete mode 100644 lib/Target/ARM/ARMELFWriterInfo.h create mode 100644 lib/Target/ARM/ARMScheduleSwift.td create mode 100644 lib/Target/Hexagon/HexagonMachineScheduler.cpp create mode 100644 lib/Target/Hexagon/HexagonMachineScheduler.h delete mode 100644 lib/Target/MBlaze/MBlazeELFWriterInfo.cpp delete mode 100644 lib/Target/MBlaze/MBlazeELFWriterInfo.h create mode 100644 lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp create mode 100644 lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h create mode 100644 lib/Target/Mips/MipsDSPInstrFormats.td create mode 100644 lib/Target/Mips/MipsDSPInstrInfo.td create mode 100644 lib/Target/PowerPC/PPCScheduleE500mc.td create mode 100644 lib/Target/PowerPC/PPCScheduleE5500.td delete mode 100644 lib/Target/TargetData.cpp delete mode 100644 lib/Target/TargetELFWriterInfo.cpp create mode 100644 lib/Target/TargetTransformImpl.cpp delete mode 100644 lib/Target/X86/X86ELFWriterInfo.cpp delete mode 100644 lib/Target/X86/X86ELFWriterInfo.h create mode 100644 lib/Target/X86/X86InstrTSX.td delete mode 100644 lib/Target/X86/X86MCInstLower.h create mode 100644 lib/Transforms/IPO/BarrierNoopPass.cpp create mode 100644 lib/Transforms/Instrumentation/BlackList.cpp create mode 100644 lib/Transforms/Instrumentation/BlackList.h delete mode 100644 lib/Transforms/Instrumentation/FunctionBlackList.cpp delete mode 100644 lib/Transforms/Instrumentation/FunctionBlackList.h create mode 100644 lib/Transforms/Scalar/SROA.cpp create mode 100644 lib/Transforms/Utils/BypassSlowDivision.cpp create mode 100644 lib/Transforms/Utils/IntegerDivision.cpp create mode 100644 lib/Transforms/Utils/MetaRenamer.cpp create mode 100644 lib/Transforms/Utils/SimplifyLibCalls.cpp create mode 100644 lib/Transforms/Vectorize/LoopVectorize.cpp create mode 100644 lib/VMCore/AttributesImpl.h create mode 100644 lib/VMCore/DataLayout.cpp create mode 100644 lib/VMCore/TargetTransformInfo.cpp create mode 100644 test/Analysis/BasicAA/noalias-geps.ll create mode 100644 test/Analysis/BasicAA/phi-speculation.ll create mode 100644 test/Analysis/CallGraph/do-nothing-intrinsic.ll create mode 100644 test/Analysis/CostModel/X86/arith.ll create mode 100644 test/Analysis/CostModel/X86/cast.ll create mode 100644 test/Analysis/CostModel/X86/cmp.ll create mode 100644 test/Analysis/CostModel/X86/i32.ll create mode 100644 test/Analysis/CostModel/X86/insert-extract-at-zero.ll create mode 100644 test/Analysis/CostModel/X86/lit.local.cfg create mode 100644 test/Analysis/CostModel/X86/loop_v2.ll create mode 100644 test/Analysis/CostModel/X86/tiny.ll create mode 100644 test/Analysis/CostModel/X86/vectorized-loop.ll create mode 100644 test/Analysis/CostModel/lit.local.cfg create mode 100644 test/Analysis/CostModel/no_info.ll create mode 100644 test/Analysis/DependenceAnalysis/Banerjee.ll create mode 100644 test/Analysis/DependenceAnalysis/Coupled.ll create mode 100644 test/Analysis/DependenceAnalysis/ExactRDIV.ll create mode 100644 test/Analysis/DependenceAnalysis/ExactSIV.ll create mode 100644 test/Analysis/DependenceAnalysis/GCD.ll create mode 100644 test/Analysis/DependenceAnalysis/Preliminary.ll create mode 100644 test/Analysis/DependenceAnalysis/Propagating.ll create mode 100644 test/Analysis/DependenceAnalysis/Separability.ll create mode 100644 test/Analysis/DependenceAnalysis/StrongSIV.ll create mode 100644 test/Analysis/DependenceAnalysis/SymbolicRDIV.ll create mode 100644 test/Analysis/DependenceAnalysis/SymbolicSIV.ll create mode 100644 test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll create mode 100644 test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll create mode 100644 test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll create mode 100644 test/Analysis/DependenceAnalysis/ZIV.ll create mode 100644 test/Analysis/DependenceAnalysis/lit.local.cfg delete mode 100644 test/Analysis/LoopDependenceAnalysis/alias.ll delete mode 100644 test/Analysis/LoopDependenceAnalysis/lit.local.cfg delete mode 100644 test/Analysis/LoopDependenceAnalysis/siv-strong.ll delete mode 100644 test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll delete mode 100644 test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll delete mode 100644 test/Analysis/LoopDependenceAnalysis/ziv.ll create mode 100644 test/Analysis/Profiling/load-branch-weights-ifs.ll create mode 100644 test/Analysis/Profiling/load-branch-weights-loops.ll create mode 100644 test/Analysis/Profiling/load-branch-weights-switches.ll create mode 100644 test/Assembler/global-addrspace-forwardref.ll create mode 100644 test/Assembler/invalid-fwdref1.ll create mode 100644 test/Bitcode/function-encoding-rel-operands.ll create mode 100644 test/CodeGen/ARM/2012-05-04-vmov.ll create mode 100644 test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll create mode 100644 test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll create mode 100644 test/CodeGen/ARM/2012-08-30-select.ll create mode 100644 test/CodeGen/ARM/2012-09-18-ARMv4ISelBug.ll create mode 100644 test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv.ll create mode 100644 test/CodeGen/ARM/2012-09-25-InlineAsmScalarToVectorConv2.ll create mode 100644 test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll create mode 100644 test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll create mode 100644 test/CodeGen/ARM/2012-10-04-LDRB_POST_IMM-Crash.ll create mode 100644 test/CodeGen/ARM/2012-10-18-PR14099-ByvalFrameAddress.ll create mode 100644 test/CodeGen/ARM/a15-mla.ll create mode 100644 test/CodeGen/ARM/a15.ll create mode 100644 test/CodeGen/ARM/atomicrmw_minmax.ll create mode 100644 test/CodeGen/ARM/call-noret-minsize.ll create mode 100644 test/CodeGen/ARM/call-noret.ll create mode 100644 test/CodeGen/ARM/crash-shufflevector.ll create mode 100644 test/CodeGen/ARM/darwin-section-order.ll create mode 100644 test/CodeGen/ARM/deps-fix.ll create mode 100644 test/CodeGen/ARM/domain-conv-vmovs.ll create mode 100644 test/CodeGen/ARM/fast-isel-pic.ll create mode 100644 test/CodeGen/ARM/fp-fast.ll create mode 100644 test/CodeGen/ARM/ifcvt12.ll create mode 100644 test/CodeGen/ARM/indirectbr-2.ll create mode 100644 test/CodeGen/ARM/integer_insertelement.ll create mode 100644 test/CodeGen/ARM/longMAC.ll create mode 100644 test/CodeGen/ARM/neon-fma.ll create mode 100644 test/CodeGen/ARM/unaligned_load_store_vector.ll create mode 100644 test/CodeGen/ARM/vselect_imax.ll create mode 100644 test/CodeGen/Generic/MachineBranchProb.ll create mode 100644 test/CodeGen/Hexagon/remove_lsr.ll create mode 100644 test/CodeGen/MSP430/fp.ll create mode 100644 test/CodeGen/Mips/alloca16.ll create mode 100644 test/CodeGen/Mips/atomicops.ll create mode 100644 test/CodeGen/Mips/brconeq.ll create mode 100644 test/CodeGen/Mips/brconeqk.ll create mode 100644 test/CodeGen/Mips/brconeqz.ll create mode 100644 test/CodeGen/Mips/brconge.ll create mode 100644 test/CodeGen/Mips/brcongt.ll create mode 100644 test/CodeGen/Mips/brconle.ll create mode 100644 test/CodeGen/Mips/brconlt.ll create mode 100644 test/CodeGen/Mips/brconne.ll create mode 100644 test/CodeGen/Mips/brconnek.ll create mode 100644 test/CodeGen/Mips/brconnez.ll create mode 100644 test/CodeGen/Mips/brind.ll create mode 100644 test/CodeGen/Mips/check-noat.ll create mode 100644 test/CodeGen/Mips/div.ll create mode 100644 test/CodeGen/Mips/div_rem.ll create mode 100644 test/CodeGen/Mips/divu.ll create mode 100644 test/CodeGen/Mips/divu_remu.ll create mode 100644 test/CodeGen/Mips/dsp-r1.ll create mode 100644 test/CodeGen/Mips/dsp-r2.ll create mode 100644 test/CodeGen/Mips/eh-dwarf-cfa.ll create mode 100644 test/CodeGen/Mips/i32k.ll create mode 100644 test/CodeGen/Mips/init-array.ll create mode 100644 test/CodeGen/Mips/llcarry.ll create mode 100644 test/CodeGen/Mips/mips64-sret.ll create mode 100644 test/CodeGen/Mips/misha.ll create mode 100644 test/CodeGen/Mips/mul.ll create mode 100644 test/CodeGen/Mips/mulll.ll create mode 100644 test/CodeGen/Mips/mulull.ll create mode 100644 test/CodeGen/Mips/rem.ll create mode 100644 test/CodeGen/Mips/remat-immed-load.ll create mode 100644 test/CodeGen/Mips/remu.ll create mode 100644 test/CodeGen/Mips/return-vector.ll create mode 100644 test/CodeGen/Mips/selpat.ll create mode 100644 test/CodeGen/Mips/seteq.ll create mode 100644 test/CodeGen/Mips/seteqz.ll create mode 100644 test/CodeGen/Mips/setge.ll create mode 100644 test/CodeGen/Mips/setgek.ll create mode 100644 test/CodeGen/Mips/setle.ll create mode 100644 test/CodeGen/Mips/setlt.ll create mode 100644 test/CodeGen/Mips/setltk.ll create mode 100644 test/CodeGen/Mips/setne.ll create mode 100644 test/CodeGen/Mips/setuge.ll create mode 100644 test/CodeGen/Mips/setugt.ll create mode 100644 test/CodeGen/Mips/setule.ll create mode 100644 test/CodeGen/Mips/setult.ll create mode 100644 test/CodeGen/Mips/setultk.ll create mode 100644 test/CodeGen/Mips/small-section-reserve-gp.ll create mode 100644 test/CodeGen/Mips/stchar.ll create mode 100644 test/CodeGen/Mips/stldst.ll create mode 100644 test/CodeGen/Mips/tailcall.ll create mode 100644 test/CodeGen/Mips/tls16.ll create mode 100644 test/CodeGen/Mips/tls16_2.ll create mode 100644 test/CodeGen/Mips/uitofp.ll create mode 100644 test/CodeGen/Mips/ul1.ll create mode 100644 test/CodeGen/Mips/vector-load-store.ll create mode 100644 test/CodeGen/NVPTX/global-ordering.ll create mode 100644 test/CodeGen/NVPTX/param-align.ll create mode 100644 test/CodeGen/NVPTX/pr13291-i1-store.ll create mode 100644 test/CodeGen/NVPTX/ptx-version-30.ll create mode 100644 test/CodeGen/NVPTX/ptx-version-31.ll create mode 100644 test/CodeGen/NVPTX/sm-version-10.ll create mode 100644 test/CodeGen/NVPTX/sm-version-11.ll create mode 100644 test/CodeGen/NVPTX/sm-version-12.ll create mode 100644 test/CodeGen/NVPTX/sm-version-13.ll create mode 100644 test/CodeGen/NVPTX/sm-version-20.ll create mode 100644 test/CodeGen/NVPTX/sm-version-21.ll create mode 100644 test/CodeGen/NVPTX/sm-version-30.ll create mode 100644 test/CodeGen/NVPTX/sm-version-35.ll create mode 100644 test/CodeGen/PowerPC/2012-09-16-TOC-entry-check.ll create mode 100644 test/CodeGen/PowerPC/2012-10-11-dynalloc.ll create mode 100644 test/CodeGen/PowerPC/2012-10-12-bitcast.ll create mode 100644 test/CodeGen/PowerPC/asm-Zy.ll delete mode 100644 test/CodeGen/PowerPC/bl8_elf_nop.ll create mode 100644 test/CodeGen/PowerPC/cr1eq-no-extra-moves.ll create mode 100644 test/CodeGen/PowerPC/crsave.ll create mode 100644 test/CodeGen/PowerPC/emptystruct.ll create mode 100644 test/CodeGen/PowerPC/floatPSA.ll create mode 100644 test/CodeGen/PowerPC/fsl-e500mc.ll create mode 100644 test/CodeGen/PowerPC/fsl-e5500.ll create mode 100644 test/CodeGen/PowerPC/i64_fp_round.ll create mode 100644 test/CodeGen/PowerPC/jaggedstructs.ll create mode 100644 test/CodeGen/PowerPC/misched.ll create mode 100644 test/CodeGen/PowerPC/novrsave.ll create mode 100644 test/CodeGen/PowerPC/ppc64-abi-extend.ll create mode 100644 test/CodeGen/PowerPC/ppc64-align-long-double.ll create mode 100644 test/CodeGen/PowerPC/ppc64-calls.ll delete mode 100644 test/CodeGen/PowerPC/ppc64-ind-call.ll create mode 100644 test/CodeGen/PowerPC/ppc64-toc.ll create mode 100644 test/CodeGen/PowerPC/ppc64-zext.ll create mode 100644 test/CodeGen/PowerPC/pr12757.ll create mode 100644 test/CodeGen/PowerPC/pr13641.ll create mode 100644 test/CodeGen/PowerPC/pr13891.ll create mode 100644 test/CodeGen/PowerPC/remat-imm.ll create mode 100644 test/CodeGen/PowerPC/structsinmem.ll create mode 100644 test/CodeGen/PowerPC/structsinregs.ll create mode 100644 test/CodeGen/PowerPC/varargs-struct-float.ll create mode 100644 test/CodeGen/PowerPC/vec_cmp.ll create mode 100644 test/CodeGen/PowerPC/vec_conv.ll create mode 100644 test/CodeGen/PowerPC/vec_extload.ll create mode 100644 test/CodeGen/PowerPC/vec_sqrt.ll create mode 100644 test/CodeGen/PowerPC/vrspill.ll create mode 100644 test/CodeGen/Thumb2/longMACt.ll create mode 100644 test/CodeGen/X86/2012-08-16-setcc.ll create mode 100644 test/CodeGen/X86/2012-08-28-UnsafeMathCrash.ll create mode 100644 test/CodeGen/X86/2012-09-13-dagco-fneg.ll create mode 100644 test/CodeGen/X86/2012-09-28-CGPBug.ll create mode 100644 test/CodeGen/X86/2012-10-02-DAGCycle.ll create mode 100644 test/CodeGen/X86/2012-10-03-DAGCycle.ll create mode 100644 test/CodeGen/X86/2012-10-18-crash-dagco.ll create mode 100644 test/CodeGen/X86/MergeConsecutiveStores.ll create mode 100644 test/CodeGen/X86/StackColoring-dbg.ll create mode 100644 test/CodeGen/X86/StackColoring.ll create mode 100644 test/CodeGen/X86/atom-bypass-slow-division.ll create mode 100644 test/CodeGen/X86/atom-shuf.ll create mode 100644 test/CodeGen/X86/atomic-minmax-i6432.ll create mode 100644 test/CodeGen/X86/atomic-pointer.ll create mode 100644 test/CodeGen/X86/atomic16.ll create mode 100644 test/CodeGen/X86/atomic32.ll create mode 100644 test/CodeGen/X86/atomic64.ll create mode 100644 test/CodeGen/X86/atomic6432.ll create mode 100644 test/CodeGen/X86/atomic8.ll create mode 100644 test/CodeGen/X86/avx-intel-ocl.ll create mode 100644 test/CodeGen/X86/bitcast-i256.ll create mode 100644 test/CodeGen/X86/buildvec-insertvec.ll create mode 100644 test/CodeGen/X86/cmov-fp.ll create mode 100644 test/CodeGen/X86/cvtv2f32.ll create mode 100644 test/CodeGen/X86/early-ifcvt-crash.ll create mode 100644 test/CodeGen/X86/extract-concat.ll create mode 100644 test/CodeGen/X86/fp-fast.ll create mode 100644 test/CodeGen/X86/fp-load-trunc.ll create mode 100644 test/CodeGen/X86/handle-move.ll create mode 100644 test/CodeGen/X86/inlineasm-sched-bug.ll create mode 100644 test/CodeGen/X86/misched-balance.ll create mode 100644 test/CodeGen/X86/misched-ilp.ll create mode 100644 test/CodeGen/X86/ms-inline-asm.ll create mode 100644 test/CodeGen/X86/mulx32.ll create mode 100644 test/CodeGen/X86/mulx64.ll create mode 100644 test/CodeGen/X86/pmovext.ll create mode 100644 test/CodeGen/X86/pr11985.ll create mode 100644 test/CodeGen/X86/pr12312.ll create mode 100644 test/CodeGen/X86/pr12359.ll create mode 100644 test/CodeGen/X86/pr13458.ll create mode 100644 test/CodeGen/X86/pr13859.ll create mode 100644 test/CodeGen/X86/pr13899.ll create mode 100644 test/CodeGen/X86/pr14088.ll create mode 100644 test/CodeGen/X86/pr14090.ll create mode 100644 test/CodeGen/X86/pr14098.ll create mode 100644 test/CodeGen/X86/pr14161.ll create mode 100644 test/CodeGen/X86/pr14204.ll create mode 100644 test/CodeGen/X86/pr14314.ll create mode 100644 test/CodeGen/X86/pr14333.ll create mode 100644 test/CodeGen/X86/pr5145.ll create mode 100644 test/CodeGen/X86/rtm.ll create mode 100644 test/CodeGen/X86/select_const.ll create mode 100644 test/CodeGen/X86/shift-bmi2.ll create mode 100644 test/CodeGen/X86/sjlj.ll create mode 100644 test/CodeGen/X86/sse-intel-ocl.ll create mode 100644 test/CodeGen/X86/sse_partial_update.ll create mode 100644 test/CodeGen/X86/vec_fabs.ll create mode 100644 test/CodeGen/X86/vec_floor.ll create mode 100644 test/CodeGen/X86/xmulo.ll delete mode 100644 test/DebugInfo/2010-04-13-PubType.ll create mode 100755 test/DebugInfo/Inputs/dwarfdump-inl-test.elf-x86-64 create mode 100644 test/DebugInfo/X86/2010-04-13-PubType.ll create mode 100644 test/DebugInfo/X86/DW_AT_object_pointer.ll create mode 100644 test/DebugInfo/X86/elf-names.ll create mode 100644 test/DebugInfo/X86/linkage-name.ll delete mode 100644 test/DebugInfo/X86/pr13303.ll create mode 100644 test/DebugInfo/X86/prologue-stack.ll create mode 100644 test/DebugInfo/dwarfdump-inlining.test create mode 100644 test/ExecutionEngine/MCJIT/pr13727.ll create mode 100644 test/ExecutionEngine/MCJIT/test-common-symbols-alignment.ll create mode 100644 test/ExecutionEngine/MCJIT/test-data-align.ll create mode 100644 test/ExecutionEngine/MCJIT/test-ptr-reloc.ll create mode 100644 test/Feature/minsize_attr.ll create mode 100644 test/Instrumentation/AddressSanitizer/do-not-instrument-internal-globals.ll create mode 100644 test/Instrumentation/AddressSanitizer/instrument_initializer_metadata.ll create mode 100644 test/MC/ARM/arm-shift-encoding.s create mode 100644 test/MC/ARM/elf-jump24-fixup.s create mode 100644 test/MC/ARM/thumb-shift-encoding.s create mode 100644 test/MC/ARM/thumb2-b.w-encodingT4.s create mode 100644 test/MC/AsmParser/bad-macro.s create mode 100644 test/MC/AsmParser/macros-darwin.s create mode 100644 test/MC/COFF/comm.ll delete mode 100644 test/MC/COFF/global_ctors.ll create mode 100644 test/MC/COFF/global_ctors_dtors.ll create mode 100644 test/MC/Disassembler/ARM/invalid-VLD1LNd32_UPD-thumb.txt create mode 100644 test/MC/Disassembler/ARM/invalid-VLD4DUPd32_UPD-thumb.txt create mode 100644 test/MC/Disassembler/ARM/invalid-VLD4LNd32_UPD-thumb.txt create mode 100644 test/MC/Disassembler/ARM/invalid-VST1LNd32_UPD-thumb.txt create mode 100644 test/MC/Disassembler/ARM/invalid-VST4LNd32_UPD-thumb.txt create mode 100644 test/MC/Disassembler/ARM/marked-up-thumb.txt create mode 100644 test/MC/Disassembler/ARM/neont-VLD-reencoding.txt create mode 100644 test/MC/Disassembler/ARM/neont-VST-reencoding.txt create mode 100644 test/MC/Disassembler/X86/marked-up.txt create mode 100644 test/MC/ELF/cfi-reg.s create mode 100644 test/MC/ELF/lcomm.s create mode 100644 test/MC/MachO/ARM/long-call-branch-island-relocation.s create mode 100644 test/MC/MachO/absolute.s create mode 100644 test/MC/MachO/gen-dwarf-cpp.s create mode 100644 test/MC/MachO/gen-dwarf-macro-cpp.s create mode 100644 test/MC/MachO/i386-large-relocations.s create mode 100644 test/MC/MachO/x86-data-in-code.ll create mode 100644 test/MC/Markup/basic-markup.mc create mode 100644 test/MC/Markup/lit.local.cfg create mode 100644 test/MC/Mips/do_switch.ll create mode 100644 test/MC/Mips/mips-alu-instructions.s create mode 100644 test/MC/Mips/mips-coprocessor-encodings.s create mode 100644 test/MC/Mips/mips-expansions.s create mode 100644 test/MC/Mips/mips-fpu-instructions.s create mode 100644 test/MC/Mips/mips-jump-instructions.s create mode 100644 test/MC/Mips/mips-memory-instructions.s create mode 100644 test/MC/Mips/mips-register-names.s create mode 100644 test/MC/Mips/mips-relocations.s create mode 100644 test/MC/Mips/mips64-register-names.s create mode 100644 test/MC/Mips/mips64extins.ll create mode 100644 test/MC/Mips/mips_directives.s create mode 100644 test/MC/PowerPC/lit.local.cfg create mode 100644 test/MC/PowerPC/ppc64-initial-cfa.ll create mode 100644 test/MC/PowerPC/ppc64-relocs-01.ll create mode 100644 test/MC/PowerPC/ppc64-tls-relocs-01.ll create mode 100644 test/MC/X86/x86-32-ms-inline-asm.s create mode 100644 test/MC/X86/x86_64-rtm-encoding.s create mode 100644 test/MC/X86/x86_nop.s create mode 100644 test/Object/Inputs/dext-test.elf-mips64r2 create mode 100644 test/Object/Inputs/relocations.elf-x86-64 create mode 100644 test/Object/Mips/feature.test create mode 100644 test/Object/Mips/lit.local.cfg create mode 100644 test/Other/FileCheck-space.txt create mode 100644 test/Other/Inputs/llvm-cov.gcda create mode 100644 test/Other/Inputs/llvm-cov.gcno create mode 100644 test/Other/ResponseFile.ll create mode 100644 test/Other/extract-alias.ll create mode 100644 test/Other/extract-weak-odr.ll create mode 100644 test/Other/link-opts.ll create mode 100644 test/Other/llvm-cov.test create mode 100644 test/Other/llvm-nm-without-aliases.ll create mode 100644 test/Other/spir_cc.ll create mode 100644 test/TableGen/list-element-bitref.td create mode 100644 test/TableGen/pr8330.td create mode 100644 test/Transforms/BBVectorize/X86/cmp-types.ll create mode 100644 test/Transforms/BBVectorize/X86/loop1.ll create mode 100644 test/Transforms/BBVectorize/X86/sh-rec.ll create mode 100644 test/Transforms/BBVectorize/X86/sh-rec2.ll create mode 100644 test/Transforms/BBVectorize/X86/sh-rec3.ll create mode 100644 test/Transforms/BBVectorize/X86/sh-types.ll create mode 100644 test/Transforms/BBVectorize/X86/simple-ldstr.ll create mode 100644 test/Transforms/BBVectorize/X86/simple.ll create mode 100644 test/Transforms/BBVectorize/X86/vs-cast.ll create mode 100644 test/Transforms/DeadArgElim/dbginfo.ll create mode 100644 test/Transforms/DeadStoreElimination/libcalls.ll create mode 100644 test/Transforms/EarlyCSE/commute.ll create mode 100644 test/Transforms/GVN/malloc-load-removal.ll create mode 100644 test/Transforms/GVN/pr14166.ll create mode 100644 test/Transforms/GlobalOpt/blockaddress.ll create mode 100644 test/Transforms/GlobalOpt/tls.ll create mode 100644 test/Transforms/IndVarSimplify/2012-10-19-congruent-constant.ll create mode 100644 test/Transforms/IndVarSimplify/verify-scev.ll create mode 100644 test/Transforms/Inline/recursive.ll create mode 100644 test/Transforms/InstCombine/2012-08-28-udiv_ashl.ll create mode 100644 test/Transforms/InstCombine/2012-09-17-ZeroSizedAlloca.ll create mode 100644 test/Transforms/InstCombine/2012-09-24-MemcpyFromGlobalCrash.ll create mode 100644 test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll create mode 100644 test/Transforms/InstCombine/disable-simplify-libcalls.ll create mode 100644 test/Transforms/InstCombine/memcmp-1.ll create mode 100644 test/Transforms/InstCombine/memcmp-2.ll create mode 100644 test/Transforms/InstCombine/memcpy-1.ll create mode 100644 test/Transforms/InstCombine/memcpy-2.ll create mode 100644 test/Transforms/InstCombine/memcpy-from-global.ll create mode 100644 test/Transforms/InstCombine/memcpy_chk-1.ll create mode 100644 test/Transforms/InstCombine/memcpy_chk-2.ll create mode 100644 test/Transforms/InstCombine/memmove-1.ll create mode 100644 test/Transforms/InstCombine/memmove-2.ll create mode 100644 test/Transforms/InstCombine/memmove_chk-1.ll create mode 100644 test/Transforms/InstCombine/memmove_chk-2.ll create mode 100644 test/Transforms/InstCombine/memset-1.ll create mode 100644 test/Transforms/InstCombine/memset-2.ll create mode 100644 test/Transforms/InstCombine/memset_chk-1.ll create mode 100644 test/Transforms/InstCombine/memset_chk-2.ll delete mode 100644 test/Transforms/InstCombine/memset_chk.ll create mode 100644 test/Transforms/InstCombine/obfuscated_splat.ll create mode 100644 test/Transforms/InstCombine/stpcpy-1.ll create mode 100644 test/Transforms/InstCombine/stpcpy-2.ll create mode 100644 test/Transforms/InstCombine/stpcpy_chk-1.ll create mode 100644 test/Transforms/InstCombine/stpcpy_chk-2.ll create mode 100644 test/Transforms/InstCombine/strcat-1.ll create mode 100644 test/Transforms/InstCombine/strcat-2.ll create mode 100644 test/Transforms/InstCombine/strcat-3.ll create mode 100644 test/Transforms/InstCombine/strchr-1.ll create mode 100644 test/Transforms/InstCombine/strchr-2.ll create mode 100644 test/Transforms/InstCombine/strcmp-1.ll create mode 100644 test/Transforms/InstCombine/strcmp-2.ll create mode 100644 test/Transforms/InstCombine/strcpy-1.ll create mode 100644 test/Transforms/InstCombine/strcpy-2.ll create mode 100644 test/Transforms/InstCombine/strcpy_chk-1.ll create mode 100644 test/Transforms/InstCombine/strcpy_chk-2.ll delete mode 100644 test/Transforms/InstCombine/strcpy_chk.ll create mode 100644 test/Transforms/InstCombine/strcspn-1.ll create mode 100644 test/Transforms/InstCombine/strcspn-2.ll create mode 100644 test/Transforms/InstCombine/strlen-1.ll create mode 100644 test/Transforms/InstCombine/strlen-2.ll create mode 100644 test/Transforms/InstCombine/strncat-1.ll create mode 100644 test/Transforms/InstCombine/strncat-2.ll create mode 100644 test/Transforms/InstCombine/strncat-3.ll create mode 100644 test/Transforms/InstCombine/strncmp-1.ll create mode 100644 test/Transforms/InstCombine/strncmp-2.ll create mode 100644 test/Transforms/InstCombine/strncpy-1.ll create mode 100644 test/Transforms/InstCombine/strncpy-2.ll create mode 100644 test/Transforms/InstCombine/strncpy_chk-1.ll create mode 100644 test/Transforms/InstCombine/strncpy_chk-2.ll create mode 100644 test/Transforms/InstCombine/strpbrk-1.ll create mode 100644 test/Transforms/InstCombine/strpbrk-2.ll create mode 100644 test/Transforms/InstCombine/strrchr-1.ll create mode 100644 test/Transforms/InstCombine/strrchr-2.ll create mode 100644 test/Transforms/InstCombine/strspn-1.ll create mode 100644 test/Transforms/InstCombine/strstr-1.ll create mode 100644 test/Transforms/InstCombine/strstr-2.ll create mode 100644 test/Transforms/InstCombine/strto-1.ll create mode 100644 test/Transforms/InstCombine/struct-assign-tbaa.ll create mode 100644 test/Transforms/InstCombine/vector_gep2.ll create mode 100644 test/Transforms/InstCombine/weak-symbols.ll create mode 100644 test/Transforms/LoopIdiom/crash.ll create mode 100644 test/Transforms/LoopIdiom/non-canonical-loop.ll create mode 100644 test/Transforms/LoopIdiom/scev-invalidation.ll create mode 100644 test/Transforms/LoopRotate/multiple-exits.ll create mode 100644 test/Transforms/LoopUnroll/pr14167.ll create mode 100644 test/Transforms/LoopVectorize/2012-10-20-infloop.ll create mode 100644 test/Transforms/LoopVectorize/2012-10-22-isconsec.ll create mode 100644 test/Transforms/LoopVectorize/X86/avx1.ll create mode 100644 test/Transforms/LoopVectorize/X86/conversion-cost.ll create mode 100644 test/Transforms/LoopVectorize/X86/cost-model.ll create mode 100644 test/Transforms/LoopVectorize/X86/gcc-examples.ll create mode 100644 test/Transforms/LoopVectorize/X86/lit.local.cfg create mode 100644 test/Transforms/LoopVectorize/cpp-new-array.ll create mode 100644 test/Transforms/LoopVectorize/flags.ll create mode 100644 test/Transforms/LoopVectorize/gcc-examples.ll create mode 100644 test/Transforms/LoopVectorize/increment.ll create mode 100644 test/Transforms/LoopVectorize/induction_plus.ll create mode 100644 test/Transforms/LoopVectorize/lit.local.cfg create mode 100644 test/Transforms/LoopVectorize/non-const-n.ll create mode 100644 test/Transforms/LoopVectorize/read-only.ll create mode 100644 test/Transforms/LoopVectorize/reduction.ll create mode 100644 test/Transforms/LoopVectorize/runtime-check.ll create mode 100644 test/Transforms/LoopVectorize/scalar-select.ll create mode 100644 test/Transforms/LoopVectorize/small-loop.ll create mode 100644 test/Transforms/LoopVectorize/start-non-zero.ll create mode 100644 test/Transforms/LoopVectorize/write-only.ll create mode 100644 test/Transforms/MetaRenamer/lit.local.cfg create mode 100644 test/Transforms/MetaRenamer/metarenamer.ll create mode 100644 test/Transforms/ObjCARC/path-overflow.ll create mode 100644 test/Transforms/PhaseOrdering/gdce.ll create mode 100644 test/Transforms/SROA/alignment.ll create mode 100644 test/Transforms/SROA/basictest.ll create mode 100644 test/Transforms/SROA/big-endian.ll create mode 100644 test/Transforms/SROA/fca.ll create mode 100644 test/Transforms/SROA/lit.local.cfg create mode 100644 test/Transforms/SROA/phi-and-select.ll create mode 100644 test/Transforms/SROA/vector-promotion.ll delete mode 100644 test/Transforms/ScalarRepl/memcpy-from-global.ll create mode 100644 test/Transforms/SimplifyCFG/SPARC/lit.local.cfg create mode 100644 test/Transforms/SimplifyCFG/SPARC/switch_to_lookup_table.ll create mode 100644 test/Transforms/SimplifyCFG/X86/lit.local.cfg create mode 100644 test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll create mode 100644 test/Transforms/SimplifyCFG/preserve-branchweights-partial.ll create mode 100644 test/Transforms/SimplifyCFG/preserve-branchweights-switch-create.ll create mode 100644 test/Transforms/SimplifyCFG/sink-common-code.ll delete mode 100644 test/Transforms/SimplifyLibCalls/2009-02-12-StrTo.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StpCpy.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrCat.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrChr.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrCmp.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrCpy.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrLen.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrNCat.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrNCmp.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrNCpy.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrPBrk.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrRChr.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrSpn.ll delete mode 100644 test/Transforms/SimplifyLibCalls/StrStr.ll create mode 100644 test/Transforms/SimplifyLibCalls/double-float-shrink.ll create mode 100644 test/Transforms/SimplifyLibCalls/float-shrink-compare.ll delete mode 100644 test/Transforms/SimplifyLibCalls/memcmp.ll delete mode 100644 test/Transforms/SimplifyLibCalls/memmove.ll delete mode 100644 test/Transforms/SimplifyLibCalls/memset-64.ll delete mode 100644 test/Transforms/SimplifyLibCalls/memset.ll delete mode 100644 test/Transforms/SimplifyLibCalls/weak-symbols.ll create mode 100644 tools/lli/RecordingMemoryManager.cpp create mode 100644 tools/lli/RecordingMemoryManager.h create mode 100644 tools/lli/RemoteTarget.cpp create mode 100644 tools/lli/RemoteTarget.h create mode 100644 tools/llvm-mcmarkup/CMakeLists.txt create mode 100644 tools/llvm-mcmarkup/LLVMBuild.txt create mode 100644 tools/llvm-mcmarkup/Makefile create mode 100644 tools/llvm-mcmarkup/llvm-mcmarkup.cpp create mode 100644 unittests/ADT/ImmutableMapTest.cpp create mode 100644 unittests/ExecutionEngine/MCJIT/CMakeLists.txt create mode 100644 unittests/ExecutionEngine/MCJIT/MCJITTest.cpp create mode 100644 unittests/ExecutionEngine/MCJIT/MCJITTestBase.h create mode 100644 unittests/ExecutionEngine/MCJIT/MCJITTests.def create mode 100644 unittests/ExecutionEngine/MCJIT/Makefile create mode 100644 unittests/ExecutionEngine/MCJIT/SectionMemoryManager.cpp create mode 100644 unittests/ExecutionEngine/MCJIT/SectionMemoryManager.h create mode 100644 unittests/Support/MemoryBufferTest.cpp create mode 100644 unittests/Support/MemoryTest.cpp create mode 100644 unittests/Support/formatted_raw_ostream_test.cpp create mode 100644 unittests/Transforms/Utils/IntegerDivision.cpp create mode 100644 utils/TableGen/CodeGenMapTable.cpp delete mode 100644 utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll create mode 100644 utils/lit/lit/ExampleTests/vg-fail.c create mode 100644 utils/lit/lit/ExampleTests/xfail-feature.c diff --git a/.gitignore b/.gitignore index ecf2e3e..2462830 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ *.pyc # vim swap files .*.swp +.sw? #==============================================================================# # Explicit files to ignore (only matches one). @@ -27,6 +28,7 @@ cscope.files cscope.out autoconf/aclocal.m4 autoconf/autom4te.cache +compile_commands.json #==============================================================================# # Directories to ignore (do not add trailing '/'s, they skip symlinks). diff --git a/CMakeLists.txt b/CMakeLists.txt index fbf8e2b..d3edc02 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,6 +115,11 @@ if(LLVM_ENABLE_TIMESTAMPS) set(ENABLE_TIMESTAMPS 1) endif() +option(LLVM_ENABLE_BACKTRACES "Enable embedding backtraces on crash." ON) +if(LLVM_ENABLE_BACKTRACES) + set(ENABLE_BACKTRACES 1) +endif() + option(LLVM_ENABLE_FFI "Use libffi to call external functions from the interpreter" OFF) set(FFI_LIBRARY_DIR "" CACHE PATH "Additional directory, where CMake should search for libffi.so") set(FFI_INCLUDE_DIR "" CACHE PATH "Additional directory, where CMake should search for ffi.h or ffi/ffi.h") @@ -172,23 +177,7 @@ option(LLVM_USE_INTEL_JITEVENTS if( LLVM_USE_INTEL_JITEVENTS ) # Verify we are on a supported platform - if( CMAKE_SYSTEM_NAME MATCHES "Windows" OR CMAKE_SYSTEM_NAME MATCHES "Linux" ) - # Directory where Intel Parallel Amplifier XE 2011 is installed. - if ( WIN32 ) - set(LLVM_INTEL_JITEVENTS_DIR $ENV{VTUNE_AMPLIFIER_XE_2011_DIR}) - else ( WIN32 ) - set(LLVM_INTEL_JITEVENTS_DIR "/opt/intel/vtune_amplifier_xe_2011") - endif ( WIN32 ) - - # Set include and library search paths for Intel JIT Events API - set(LLVM_INTEL_JITEVENTS_INCDIR "${LLVM_INTEL_JITEVENTS_DIR}/include") - - if ( CMAKE_SIZEOF_VOID_P EQUAL 8 ) - set(LLVM_INTEL_JITEVENTS_LIBDIR "${LLVM_INTEL_JITEVENTS_DIR}/lib64") - else ( CMAKE_SIZEOF_VOID_P EQUAL 8 ) - set(LLVM_INTEL_JITEVENTS_LIBDIR "${LLVM_INTEL_JITEVENTS_DIR}/lib32") - endif ( CMAKE_SIZEOF_VOID_P EQUAL 8 ) - else() + if( NOT CMAKE_SYSTEM_NAME MATCHES "Windows" AND NOT CMAKE_SYSTEM_NAME MATCHES "Linux" ) message(FATAL_ERROR "Intel JIT API support is available on Linux and Windows only.") endif() @@ -249,6 +238,14 @@ option(LLVM_INCLUDE_TESTS "Generate build targets for the LLVM unit tests." ON) # BEFORE this include, otherwise options will not be correctly set on # first cmake run include(config-ix) + +# By default, we target the host, but this can be overridden at CMake +# invocation time. +set(LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_HOST_TRIPLE}" CACHE STRING + "Default target for which LLVM will generate code." ) +set(TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}" CACHE STRING + "Default target for which LLVM will generate code." ) + include(HandleLLVMOptions) # Verify that we can find a Python interpreter, diff --git a/CREDITS.TXT b/CREDITS.TXT index f090ad7..0257918 100644 --- a/CREDITS.TXT +++ b/CREDITS.TXT @@ -5,8 +5,8 @@ done! The list is sorted by surname and formatted to allow easy grepping and beautification by scripts. The fields are: name (N), email (E), web-address -(W), PGP key ID and fingerprint (P), description (D), and snail-mail address -(S). +(W), PGP key ID and fingerprint (P), description (D), snail-mail address +(S), and (I) IRC handle. N: Vikram Adve @@ -17,7 +17,7 @@ D: The Sparc64 backend, provider of much wisdom, and motivator for LLVM N: Owen Anderson E: resistor@mac.com D: LCSSA pass and related LoopUnswitch work -D: GVNPRE pass, TargetData refactoring, random improvements +D: GVNPRE pass, DataLayout refactoring, random improvements N: Henrik Bach D: MingW Win32 API portability layer @@ -328,10 +328,6 @@ D: LTO tool, PassManager rewrite, Loop Pass Manager, Loop Rotate D: GCC PCH Integration (llvm-gcc), llvm-gcc improvements D: Optimizer improvements, Loop Index Split -N: Sandeep Patel -E: deeppatel1987@gmail.com -D: ARM calling conventions rewrite, hard float support - N: Wesley Peck E: peckw@wesleypeck.com W: http://wesleypeck.com/ @@ -354,6 +350,11 @@ N: Xerxes Ranby E: xerxes@zafena.se D: Cmake dependency chain and various bug fixes +N: Alex Rosenberg +E: alexr@leftfield.org +I: arosenberg +D: ARM calling conventions rewrite, hard float support + N: Chad Rosier E: mcrosier@apple.com D: ARM fast-isel improvements @@ -369,6 +370,7 @@ D: MSIL backend N: Duncan Sands E: baldrick@free.fr +I: baldrick D: Ada support in llvm-gcc D: Dragonegg plugin D: Exception handling improvements diff --git a/Makefile b/Makefile index 604696a..1e5dae4 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,8 @@ endif ifeq ($(MAKECMDGOALS),install-clang) DIRS := tools/clang/tools/driver tools/clang/lib/Headers \ - tools/clang/tools/libclang tools/clang/tools/c-index-test \ + tools/clang/tools/libclang \ + tools/clang/tools/c-index-test \ tools/clang/include/clang-c \ tools/clang/runtime tools/clang/docs \ tools/lto runtime @@ -111,15 +112,18 @@ cross-compile-build-tools: cd BuildTools ; \ unset CFLAGS ; \ unset CXXFLAGS ; \ + unset SDKROOT ; \ + unset UNIVERSAL_SDK_PATH ; \ $(PROJ_SRC_DIR)/configure --build=$(BUILD_TRIPLE) \ --host=$(BUILD_TRIPLE) --target=$(BUILD_TRIPLE) \ --disable-polly ; \ cd .. ; \ fi; \ - (unset SDKROOT; \ - $(MAKE) -C BuildTools \ + ($(MAKE) -C BuildTools \ BUILD_DIRS_ONLY=1 \ UNIVERSAL= \ + UNIVERSAL_SDK_PATH= \ + SDKROOT= \ TARGET_NATIVE_ARCH="$(TARGET_NATIVE_ARCH)" \ TARGETS_TO_BUILD="$(TARGETS_TO_BUILD)" \ ENABLE_OPTIMIZED=$(ENABLE_OPTIMIZED) \ diff --git a/Makefile.config.in b/Makefile.config.in index e3bd2a2..b4ecea6 100644 --- a/Makefile.config.in +++ b/Makefile.config.in @@ -61,6 +61,7 @@ endif prefix := @prefix@ PROJ_prefix := $(prefix) +program_prefix := @program_prefix@ PROJ_VERSION := $(LLVMVersion) else ifndef PROJ_SRC_ROOT diff --git a/Makefile.rules b/Makefile.rules index 289adc2..b2b02c2 100644 --- a/Makefile.rules +++ b/Makefile.rules @@ -571,7 +571,11 @@ endif #-------------------------------------------------------------------- ifeq ($(HOST_OS),Darwin) + ifdef MACOSX_DEPLOYMENT_TARGET + DARWIN_VERSION := $(MACOSX_DEPLOYMENT_TARGET) + else DARWIN_VERSION := `sw_vers -productVersion` + endif # Strip a number like 10.4.7 to 10.4 DARWIN_VERSION := $(shell echo $(DARWIN_VERSION)| sed -E 's/(10.[0-9]).*/\1/') # Get "4" out of 10.4 for later pieces in the makefile. @@ -631,19 +635,23 @@ endif # Adjust linker flags for building an executable ifneq ($(HOST_OS), $(filter $(HOST_OS), Cygwin MingW)) -ifneq ($(HOST_OS), Darwin) -ifdef TOOLNAME - LD.Flags += $(RPATH) -Wl,'$$ORIGIN/../lib' - ifdef EXAMPLE_TOOL - LD.Flags += $(RPATH) -Wl,$(ExmplDir) $(DynamicFlag) - else - LD.Flags += $(RPATH) -Wl,$(ToolDir) $(DynamicFlag) + ifneq ($(HOST_OS), Darwin) + ifdef TOOLNAME + LD.Flags += $(RPATH) -Wl,'$$ORIGIN/../lib' + ifdef EXAMPLE_TOOL + LD.Flags += $(RPATH) -Wl,$(ExmplDir) $(DynamicFlag) + else + LD.Flags += $(RPATH) -Wl,$(ToolDir) $(DynamicFlag) + endif endif -endif else -ifneq ($(DARWIN_MAJVERS),4) - LD.Flags += $(RPATH) -Wl,@executable_path/../lib -endif + ifneq ($(DARWIN_MAJVERS),4) + LD.Flags += $(RPATH) -Wl,@executable_path/../lib + endif + ifeq ($(RC_BUILDIT),YES) + TempFile := $(shell mkdir -p ${OBJROOT}/dSYMs ; mktemp ${OBJROOT}/dSYMs/llvm-lto.XXXXXX) + LD.Flags += -Wl,-object_path_lto -Wl,$(TempFile) + endif endif endif @@ -1524,7 +1532,7 @@ ifneq ($(strip $(ToolAliasBuildPath)),) $(ToolAliasBuildPath): $(ToolBuildPath) $(Echo) Creating $(BuildMode) Alias $(TOOLALIAS) $(StripWarnMsg) $(Verb) $(RM) -f $(ToolAliasBuildPath) - $(Verb) $(AliasTool) $(TOOLEXENAME) $(ToolAliasBuildPath) + $(Verb) $(AliasTool) $(notdir $(ToolBuildPath)) $(ToolAliasBuildPath) $(Echo) ======= Finished Creating $(BuildMode) Alias $(TOOLALIAS) \ $(StripWarnMsg) endif @@ -1541,7 +1549,7 @@ ToolBinDir = $(DESTDIR)$(PROJ_internal_prefix)/bin else ToolBinDir = $(DESTDIR)$(PROJ_bindir) endif -DestTool = $(ToolBinDir)/$(TOOLEXENAME) +DestTool = $(ToolBinDir)/$(program_prefix)$(TOOLEXENAME) install-local:: $(DestTool) @@ -1556,14 +1564,14 @@ uninstall-local:: # TOOLALIAS install. ifdef TOOLALIAS -DestToolAlias = $(ToolBinDir)/$(TOOLALIAS)$(EXEEXT) +DestToolAlias = $(ToolBinDir)/$(program_prefix)$(TOOLALIAS)$(EXEEXT) install-local:: $(DestToolAlias) $(DestToolAlias): $(DestTool) $(Echo) Installing $(BuildMode) $(DestToolAlias) $(Verb) $(RM) -f $(DestToolAlias) - $(Verb) $(AliasTool) $(TOOLEXENAME) $(DestToolAlias) + $(Verb) $(AliasTool) $(notdir $(DestTool)) $(DestToolAlias) uninstall-local:: $(Echo) Uninstalling $(BuildMode) $(DestToolAlias) diff --git a/autoconf/configure.ac b/autoconf/configure.ac index 7fa883e..7715531 100644 --- a/autoconf/configure.ac +++ b/autoconf/configure.ac @@ -363,8 +363,8 @@ AC_CACHE_CHECK([target architecture],[llvm_cv_target_arch], sparc*-*) llvm_cv_target_arch="Sparc" ;; powerpc*-*) llvm_cv_target_arch="PowerPC" ;; arm*-*) llvm_cv_target_arch="ARM" ;; - mips-*) llvm_cv_target_arch="Mips" ;; - mipsel-*) llvm_cv_target_arch="Mips" ;; + mips-* | mips64-*) llvm_cv_target_arch="Mips" ;; + mipsel-* | mips64el-*) llvm_cv_target_arch="Mips" ;; xcore-*) llvm_cv_target_arch="XCore" ;; msp430-*) llvm_cv_target_arch="MSP430" ;; hexagon-*) llvm_cv_target_arch="Hexagon" ;; @@ -396,8 +396,8 @@ case $host in sparc*-*) host_arch="Sparc" ;; powerpc*-*) host_arch="PowerPC" ;; arm*-*) host_arch="ARM" ;; - mips-*) host_arch="Mips" ;; - mipsel-*) host_arch="Mips" ;; + mips-* | mips64-*) host_arch="Mips" ;; + mipsel-* | mips64el-*) host_arch="Mips" ;; xcore-*) host_arch="XCore" ;; msp430-*) host_arch="MSP430" ;; hexagon-*) host_arch="Hexagon" ;; @@ -678,6 +678,21 @@ esac AC_DEFINE_UNQUOTED([ENABLE_TIMESTAMPS],$ENABLE_TIMESTAMPS, [Define if timestamp information (e.g., __DATE__) is allowed]) +dnl Enable embedding timestamp information into build. + +AC_ARG_ENABLE(backtraces, + AS_HELP_STRING([--enable-backtraces], + [Enable embedding backtraces on crash (default is YES)]),, + enableval=default) +case "$enableval" in + yes) AC_SUBST(ENABLE_BACKTRACES,[1]) ;; + no) AC_SUBST(ENABLE_BACKTRACES,[0]) ;; + default) AC_SUBST(ENABLE_BACKTRACES,[1]) ;; + *) AC_MSG_ERROR([Invalid setting for --enable-backtraces. Use "yes" or "no"]) ;; +esac +AC_DEFINE_UNQUOTED([ENABLE_BACKTRACES],$ENABLE_BACKTRACES, + [Define if you want backtraces on crash]) + dnl Allow specific targets to be specified for building (or not) TARGETS_TO_BUILD="" AC_ARG_ENABLE([targets],AS_HELP_STRING([--enable-targets], @@ -699,6 +714,8 @@ case "$enableval" in arm) TARGETS_TO_BUILD="ARM $TARGETS_TO_BUILD" ;; mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; mipsel) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; + mips64) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; + mips64el) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;; xcore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;; msp430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;; @@ -1275,46 +1292,23 @@ AC_DEFINE_UNQUOTED([LLVM_USE_OPROFILE],$USE_OPROFILE, dnl Enable support for Intel JIT Events API. AC_ARG_WITH(intel-jitevents, - AS_HELP_STRING([--with-intel-jitevents=], - [Specify location of run-time support library for Intel JIT API (default=/opt/intel/vtune_amplifier_xe_2011)]), + AS_HELP_STRING([--with-intel-jitevents Notify Intel JIT profiling API of generated code]), [ + case "$withval" in + yes) AC_SUBST(USE_INTEL_JITEVENTS,[1]);; + no) AC_SUBST(USE_INTEL_JITEVENTS,[0]);; + *) AC_MSG_ERROR([Invalid setting for --with-intel-jitevents. Use "yes" or "no"]);; + esac + case $llvm_cv_os_type in Linux|Win32|Cygwin|MingW) ;; - *) - AC_MSG_ERROR([ - Intel JIT API support is available on Linux and Windows only."]) ;; + *) AC_MSG_ERROR([Intel JIT API support is available on Linux and Windows only.]);; esac - AC_SUBST(USE_INTEL_JITEVENTS, [1]) case "$llvm_cv_target_arch" in - x86) llvm_intel_jitevents_archdir="lib32";; - x86_64) llvm_intel_jitevents_archdir="lib64";; - *) echo "Target architecture $llvm_cv_target_arch does not support Intel JIT Events API" - exit -1;; - esac - INTEL_JITEVENTS_INCDIR="/opt/intel/vtune_amplifier_xe_2011/include" - INTEL_JITEVENTS_LIBDIR="/opt/intel/vtune_amplifier_xe_2011/$llvm_intel_jitevents_archdir" - case "$withval" in - /* | [[A-Za-z]]:[[\\/]]*) INTEL_JITEVENTS_INCDIR=$withval/include - INTEL_JITEVENTS_LIBDIR=$withval/$llvm_intel_jitevents_archdir ;; - *) ;; + x86|x86_64) ;; + *) AC_MSG_ERROR([Target architecture $llvm_cv_target_arch does not support Intel JIT Events API.]);; esac - - AC_SUBST(INTEL_JITEVENTS_INCDIR) - AC_SUBST(INTEL_JITEVENTS_LIBDIR) - - LIBS="$LIBS -L${INTEL_JITEVENTS_LIBDIR}" - CPPFLAGS="$CPPFLAGS -I$INTEL_JITEVENTS_INCDIR" - - AC_SEARCH_LIBS(iJIT_IsProfilingActive, jitprofiling, [], [ - echo "Error! Cannot find libjitprofiling.a. Please check path specified in flag --with-intel-jitevents" - exit -1 - ]) - AC_CHECK_HEADER([jitprofiling.h], [], [ - echo "Error! Cannot find jitprofiling.h. Please check path specified in flag --with-intel-jitevents" - exit -1 - ]) - ], [ AC_SUBST(USE_INTEL_JITEVENTS, [0]) @@ -1717,6 +1711,11 @@ fi dnl OCaml findlib META file AC_CONFIG_FILES([bindings/ocaml/llvm/META.llvm]) +dnl Add --program-prefix value to Makefile.rules. Already an ARG variable. +test "x$program_prefix" = "xNONE" && program_prefix="" +AC_SUBST([program_prefix]) + + dnl Do special configuration of Makefiles AC_CONFIG_COMMANDS([setup],,[llvm_src="${srcdir}"]) AC_CONFIG_MAKEFILE(Makefile) diff --git a/bindings/ocaml/executionengine/executionengine_ocaml.c b/bindings/ocaml/executionengine/executionengine_ocaml.c index 5b1e32e..02e0306 100644 --- a/bindings/ocaml/executionengine/executionengine_ocaml.c +++ b/bindings/ocaml/executionengine/executionengine_ocaml.c @@ -75,6 +75,9 @@ static struct custom_operations generic_value_ops = { custom_hash_default, custom_serialize_default, custom_deserialize_default +#ifdef custom_compare_ext_default + , custom_compare_ext_default +#endif }; static value alloc_generic_value(LLVMGenericValueRef Ref) { diff --git a/bindings/ocaml/executionengine/llvm_executionengine.ml b/bindings/ocaml/executionengine/llvm_executionengine.ml index a8535b2..ddb53bb 100644 --- a/bindings/ocaml/executionengine/llvm_executionengine.ml +++ b/bindings/ocaml/executionengine/llvm_executionengine.ml @@ -83,7 +83,7 @@ module ExecutionEngine = struct external free_machine_code: Llvm.llvalue -> t -> unit = "llvm_ee_free_machine_code" - external target_data: t -> Llvm_target.TargetData.t + external target_data: t -> Llvm_target.DataLayout.t = "LLVMGetExecutionEngineTargetData" (* The following are not bound. Patches are welcome. diff --git a/bindings/ocaml/executionengine/llvm_executionengine.mli b/bindings/ocaml/executionengine/llvm_executionengine.mli index 166b7bc..0b06078 100644 --- a/bindings/ocaml/executionengine/llvm_executionengine.mli +++ b/bindings/ocaml/executionengine/llvm_executionengine.mli @@ -155,7 +155,7 @@ module ExecutionEngine: sig (** [target_data ee] is the target data owned by the execution engine [ee]. *) - val target_data : t -> Llvm_target.TargetData.t + val target_data : t -> Llvm_target.DataLayout.t end diff --git a/bindings/ocaml/llvm/llvm.mli b/bindings/ocaml/llvm/llvm.mli index 96448cc..eb6c883 100644 --- a/bindings/ocaml/llvm/llvm.mli +++ b/bindings/ocaml/llvm/llvm.mli @@ -375,7 +375,7 @@ val module_context : llmodule -> llcontext val classify_type : lltype -> TypeKind.t (** [type_is_sized ty] returns whether the type has a size or not. - * If it doesn't then it is not safe to call the [TargetData::] methods on it. + * If it doesn't then it is not safe to call the [DataLayout::] methods on it. * *) val type_is_sized : lltype -> bool diff --git a/bindings/ocaml/llvm/llvm_ocaml.c b/bindings/ocaml/llvm/llvm_ocaml.c index a5985d9..c984bd1 100644 --- a/bindings/ocaml/llvm/llvm_ocaml.c +++ b/bindings/ocaml/llvm/llvm_ocaml.c @@ -1277,6 +1277,9 @@ static struct custom_operations builder_ops = { custom_hash_default, custom_serialize_default, custom_deserialize_default +#ifdef custom_compare_ext_default + , custom_compare_ext_default +#endif }; static value alloc_builder(LLVMBuilderRef B) { diff --git a/bindings/ocaml/target/llvm_target.ml b/bindings/ocaml/target/llvm_target.ml index 49940ee..f4891e2 100644 --- a/bindings/ocaml/target/llvm_target.ml +++ b/bindings/ocaml/target/llvm_target.ml @@ -13,7 +13,7 @@ module Endian = struct | Little end -module TargetData = struct +module DataLayout = struct type t external create : string -> t = "llvm_targetdata_create" @@ -23,20 +23,20 @@ module TargetData = struct external dispose : t -> unit = "llvm_targetdata_dispose" end -external byte_order : TargetData.t -> Endian.t = "llvm_byte_order" -external pointer_size : TargetData.t -> int = "llvm_pointer_size" -external intptr_type : TargetData.t -> Llvm.lltype = "LLVMIntPtrType" -external size_in_bits : TargetData.t -> Llvm.lltype -> Int64.t +external byte_order : DataLayout.t -> Endian.t = "llvm_byte_order" +external pointer_size : DataLayout.t -> int = "llvm_pointer_size" +external intptr_type : DataLayout.t -> Llvm.lltype = "LLVMIntPtrType" +external size_in_bits : DataLayout.t -> Llvm.lltype -> Int64.t = "llvm_size_in_bits" -external store_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_store_size" -external abi_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_abi_size" -external abi_align : TargetData.t -> Llvm.lltype -> int = "llvm_abi_align" -external stack_align : TargetData.t -> Llvm.lltype -> int = "llvm_stack_align" -external preferred_align : TargetData.t -> Llvm.lltype -> int +external store_size : DataLayout.t -> Llvm.lltype -> Int64.t = "llvm_store_size" +external abi_size : DataLayout.t -> Llvm.lltype -> Int64.t = "llvm_abi_size" +external abi_align : DataLayout.t -> Llvm.lltype -> int = "llvm_abi_align" +external stack_align : DataLayout.t -> Llvm.lltype -> int = "llvm_stack_align" +external preferred_align : DataLayout.t -> Llvm.lltype -> int = "llvm_preferred_align" -external preferred_align_of_global : TargetData.t -> Llvm.llvalue -> int +external preferred_align_of_global : DataLayout.t -> Llvm.llvalue -> int = "llvm_preferred_align_of_global" -external element_at_offset : TargetData.t -> Llvm.lltype -> Int64.t -> int +external element_at_offset : DataLayout.t -> Llvm.lltype -> Int64.t -> int = "llvm_element_at_offset" -external offset_of_element : TargetData.t -> Llvm.lltype -> int -> Int64.t +external offset_of_element : DataLayout.t -> Llvm.lltype -> int -> Int64.t = "llvm_offset_of_element" diff --git a/bindings/ocaml/target/llvm_target.mli b/bindings/ocaml/target/llvm_target.mli index c288b9a..ab9c5e4 100644 --- a/bindings/ocaml/target/llvm_target.mli +++ b/bindings/ocaml/target/llvm_target.mli @@ -18,11 +18,11 @@ module Endian : sig | Little end -module TargetData : sig +module DataLayout : sig type t - (** [TargetData.create rep] parses the target data string representation [rep]. - See the constructor llvm::TargetData::TargetData. *) + (** [DataLayout.create rep] parses the target data string representation [rep]. + See the constructor llvm::DataLayout::DataLayout. *) external create : string -> t = "llvm_targetdata_create" (** [add_target_data td pm] adds the target data [td] to the pass manager [pm]. @@ -32,64 +32,64 @@ module TargetData : sig = "llvm_targetdata_add" (** [as_string td] is the string representation of the target data [td]. - See the constructor llvm::TargetData::TargetData. *) + See the constructor llvm::DataLayout::DataLayout. *) external as_string : t -> string = "llvm_targetdata_as_string" - (** Deallocates a TargetData. - See the destructor llvm::TargetData::~TargetData. *) + (** Deallocates a DataLayout. + See the destructor llvm::DataLayout::~DataLayout. *) external dispose : t -> unit = "llvm_targetdata_dispose" end (** Returns the byte order of a target, either LLVMBigEndian or LLVMLittleEndian. - See the method llvm::TargetData::isLittleEndian. *) -external byte_order : TargetData.t -> Endian.t = "llvm_byte_order" + See the method llvm::DataLayout::isLittleEndian. *) +external byte_order : DataLayout.t -> Endian.t = "llvm_byte_order" (** Returns the pointer size in bytes for a target. - See the method llvm::TargetData::getPointerSize. *) -external pointer_size : TargetData.t -> int = "llvm_pointer_size" + See the method llvm::DataLayout::getPointerSize. *) +external pointer_size : DataLayout.t -> int = "llvm_pointer_size" (** Returns the integer type that is the same size as a pointer on a target. - See the method llvm::TargetData::getIntPtrType. *) -external intptr_type : TargetData.t -> Llvm.lltype = "LLVMIntPtrType" + See the method llvm::DataLayout::getIntPtrType. *) +external intptr_type : DataLayout.t -> Llvm.lltype = "LLVMIntPtrType" (** Computes the size of a type in bytes for a target. - See the method llvm::TargetData::getTypeSizeInBits. *) -external size_in_bits : TargetData.t -> Llvm.lltype -> Int64.t + See the method llvm::DataLayout::getTypeSizeInBits. *) +external size_in_bits : DataLayout.t -> Llvm.lltype -> Int64.t = "llvm_size_in_bits" (** Computes the storage size of a type in bytes for a target. - See the method llvm::TargetData::getTypeStoreSize. *) -external store_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_store_size" + See the method llvm::DataLayout::getTypeStoreSize. *) +external store_size : DataLayout.t -> Llvm.lltype -> Int64.t = "llvm_store_size" (** Computes the ABI size of a type in bytes for a target. - See the method llvm::TargetData::getTypeAllocSize. *) -external abi_size : TargetData.t -> Llvm.lltype -> Int64.t = "llvm_abi_size" + See the method llvm::DataLayout::getTypeAllocSize. *) +external abi_size : DataLayout.t -> Llvm.lltype -> Int64.t = "llvm_abi_size" (** Computes the ABI alignment of a type in bytes for a target. - See the method llvm::TargetData::getTypeABISize. *) -external abi_align : TargetData.t -> Llvm.lltype -> int = "llvm_abi_align" + See the method llvm::DataLayout::getTypeABISize. *) +external abi_align : DataLayout.t -> Llvm.lltype -> int = "llvm_abi_align" (** Computes the call frame alignment of a type in bytes for a target. - See the method llvm::TargetData::getTypeABISize. *) -external stack_align : TargetData.t -> Llvm.lltype -> int = "llvm_stack_align" + See the method llvm::DataLayout::getTypeABISize. *) +external stack_align : DataLayout.t -> Llvm.lltype -> int = "llvm_stack_align" (** Computes the preferred alignment of a type in bytes for a target. - See the method llvm::TargetData::getTypeABISize. *) -external preferred_align : TargetData.t -> Llvm.lltype -> int + See the method llvm::DataLayout::getTypeABISize. *) +external preferred_align : DataLayout.t -> Llvm.lltype -> int = "llvm_preferred_align" (** Computes the preferred alignment of a global variable in bytes for a target. - See the method llvm::TargetData::getPreferredAlignment. *) -external preferred_align_of_global : TargetData.t -> Llvm.llvalue -> int + See the method llvm::DataLayout::getPreferredAlignment. *) +external preferred_align_of_global : DataLayout.t -> Llvm.llvalue -> int = "llvm_preferred_align_of_global" (** Computes the structure element that contains the byte offset for a target. See the method llvm::StructLayout::getElementContainingOffset. *) -external element_at_offset : TargetData.t -> Llvm.lltype -> Int64.t -> int +external element_at_offset : DataLayout.t -> Llvm.lltype -> Int64.t -> int = "llvm_element_at_offset" (** Computes the byte offset of the indexed struct element for a target. See the method llvm::StructLayout::getElementContainingOffset. *) -external offset_of_element : TargetData.t -> Llvm.lltype -> int -> Int64.t +external offset_of_element : DataLayout.t -> Llvm.lltype -> int -> Int64.t = "llvm_offset_of_element" diff --git a/bindings/ocaml/target/target_ocaml.c b/bindings/ocaml/target/target_ocaml.c index ca01e77..62fe789 100644 --- a/bindings/ocaml/target/target_ocaml.c +++ b/bindings/ocaml/target/target_ocaml.c @@ -18,18 +18,18 @@ #include "llvm-c/Target.h" #include "caml/alloc.h" -/* string -> TargetData.t */ +/* string -> DataLayout.t */ CAMLprim LLVMTargetDataRef llvm_targetdata_create(value StringRep) { return LLVMCreateTargetData(String_val(StringRep)); } -/* TargetData.t -> [ unit */ +/* DataLayout.t -> [ unit */ CAMLprim value llvm_targetdata_add(LLVMTargetDataRef TD, LLVMPassManagerRef PM){ LLVMAddTargetData(TD, PM); return Val_unit; } -/* TargetData.t -> string */ +/* DataLayout.t -> string */ CAMLprim value llvm_targetdata_as_string(LLVMTargetDataRef TD) { char *StringRep = LLVMCopyStringRepOfTargetData(TD); value Copy = copy_string(StringRep); @@ -37,65 +37,65 @@ CAMLprim value llvm_targetdata_as_string(LLVMTargetDataRef TD) { return Copy; } -/* TargetData.t -> unit */ +/* DataLayout.t -> unit */ CAMLprim value llvm_targetdata_dispose(LLVMTargetDataRef TD) { LLVMDisposeTargetData(TD); return Val_unit; } -/* TargetData.t -> Endian.t */ +/* DataLayout.t -> Endian.t */ CAMLprim value llvm_byte_order(LLVMTargetDataRef TD) { return Val_int(LLVMByteOrder(TD)); } -/* TargetData.t -> int */ +/* DataLayout.t -> int */ CAMLprim value llvm_pointer_size(LLVMTargetDataRef TD) { return Val_int(LLVMPointerSize(TD)); } -/* TargetData.t -> Llvm.lltype -> Int64.t */ +/* DataLayout.t -> Llvm.lltype -> Int64.t */ CAMLprim value llvm_size_in_bits(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return caml_copy_int64(LLVMSizeOfTypeInBits(TD, Ty)); } -/* TargetData.t -> Llvm.lltype -> Int64.t */ +/* DataLayout.t -> Llvm.lltype -> Int64.t */ CAMLprim value llvm_store_size(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return caml_copy_int64(LLVMStoreSizeOfType(TD, Ty)); } -/* TargetData.t -> Llvm.lltype -> Int64.t */ +/* DataLayout.t -> Llvm.lltype -> Int64.t */ CAMLprim value llvm_abi_size(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return caml_copy_int64(LLVMABISizeOfType(TD, Ty)); } -/* TargetData.t -> Llvm.lltype -> int */ +/* DataLayout.t -> Llvm.lltype -> int */ CAMLprim value llvm_abi_align(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return Val_int(LLVMABIAlignmentOfType(TD, Ty)); } -/* TargetData.t -> Llvm.lltype -> int */ +/* DataLayout.t -> Llvm.lltype -> int */ CAMLprim value llvm_stack_align(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return Val_int(LLVMCallFrameAlignmentOfType(TD, Ty)); } -/* TargetData.t -> Llvm.lltype -> int */ +/* DataLayout.t -> Llvm.lltype -> int */ CAMLprim value llvm_preferred_align(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return Val_int(LLVMPreferredAlignmentOfType(TD, Ty)); } -/* TargetData.t -> Llvm.llvalue -> int */ +/* DataLayout.t -> Llvm.llvalue -> int */ CAMLprim value llvm_preferred_align_of_global(LLVMTargetDataRef TD, LLVMValueRef GlobalVar) { return Val_int(LLVMPreferredAlignmentOfGlobal(TD, GlobalVar)); } -/* TargetData.t -> Llvm.lltype -> Int64.t -> int */ +/* DataLayout.t -> Llvm.lltype -> Int64.t -> int */ CAMLprim value llvm_element_at_offset(LLVMTargetDataRef TD, LLVMTypeRef Ty, value Offset) { return Val_int(LLVMElementAtOffset(TD, Ty, Int_val(Offset))); } -/* TargetData.t -> Llvm.lltype -> int -> Int64.t */ +/* DataLayout.t -> Llvm.lltype -> int -> Int64.t */ CAMLprim value llvm_offset_of_element(LLVMTargetDataRef TD, LLVMTypeRef Ty, value Index) { return caml_copy_int64(LLVMOffsetOfElement(TD, Ty, Int_val(Index))); diff --git a/cmake/config-ix.cmake b/cmake/config-ix.cmake index 25d6211..fcd5dd5 100755 --- a/cmake/config-ix.cmake +++ b/cmake/config-ix.cmake @@ -85,13 +85,25 @@ check_include_file(mach-o/dyld.h HAVE_MACH_O_DYLD_H) # library checks if( NOT PURE_WINDOWS ) check_library_exists(pthread pthread_create "" HAVE_LIBPTHREAD) - check_library_exists(pthread pthread_getspecific "" HAVE_PTHREAD_GETSPECIFIC) - check_library_exists(pthread pthread_rwlock_init "" HAVE_PTHREAD_RWLOCK_INIT) + if (HAVE_LIBPTHREAD) + check_library_exists(pthread pthread_getspecific "" HAVE_PTHREAD_GETSPECIFIC) + check_library_exists(pthread pthread_rwlock_init "" HAVE_PTHREAD_RWLOCK_INIT) + check_library_exists(pthread pthread_mutex_lock "" HAVE_PTHREAD_MUTEX_LOCK) + else() + # this could be Android + check_library_exists(c pthread_create "" PTHREAD_IN_LIBC) + if (PTHREAD_IN_LIBC) + check_library_exists(c pthread_getspecific "" HAVE_PTHREAD_GETSPECIFIC) + check_library_exists(c pthread_rwlock_init "" HAVE_PTHREAD_RWLOCK_INIT) + check_library_exists(c pthread_mutex_lock "" HAVE_PTHREAD_MUTEX_LOCK) + endif() + endif() check_library_exists(dl dlopen "" HAVE_LIBDL) endif() # function checks check_symbol_exists(arc4random "stdlib.h" HAVE_ARC4RANDOM) +check_symbol_exists(backtrace "execinfo.h" HAVE_BACKTRACE) check_symbol_exists(getpagesize unistd.h HAVE_GETPAGESIZE) check_symbol_exists(getrusage sys/resource.h HAVE_GETRUSAGE) check_symbol_exists(setrlimit sys/resource.h HAVE_SETRLIMIT) @@ -134,9 +146,6 @@ check_symbol_exists(strchr string.h HAVE_STRCHR) check_symbol_exists(strcmp string.h HAVE_STRCMP) check_symbol_exists(strdup string.h HAVE_STRDUP) check_symbol_exists(strrchr string.h HAVE_STRRCHR) -if( NOT PURE_WINDOWS ) - check_symbol_exists(pthread_mutex_lock pthread.h HAVE_PTHREAD_MUTEX_LOCK) -endif() check_symbol_exists(sbrk unistd.h HAVE_SBRK) check_symbol_exists(srand48 stdlib.h HAVE_RAND48_SRAND48) if( HAVE_RAND48_SRAND48 ) @@ -294,9 +303,7 @@ get_host_triple(LLVM_HOST_TRIPLE) # By default, we target the host, but this can be overridden at CMake # invocation time. -set(LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_HOST_TRIPLE}") set(LLVM_HOSTTRIPLE "${LLVM_HOST_TRIPLE}") -set(TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}") # Determine the native architecture. string(TOLOWER "${LLVM_TARGET_ARCH}" LLVM_NATIVE_ARCH) @@ -324,6 +331,8 @@ elseif (LLVM_NATIVE_ARCH MATCHES "xcore") set(LLVM_NATIVE_ARCH XCore) elseif (LLVM_NATIVE_ARCH MATCHES "msp430") set(LLVM_NATIVE_ARCH MSP430) +elseif (LLVM_NATIVE_ARCH MATCHES "hexagon") + set(LLVM_NATIVE_ARCH Hexagon) else () message(FATAL_ERROR "Unknown architecture ${LLVM_NATIVE_ARCH}") endif () diff --git a/cmake/modules/AddLLVM.cmake b/cmake/modules/AddLLVM.cmake index f44a27c..43ee9a0 100755 --- a/cmake/modules/AddLLVM.cmake +++ b/cmake/modules/AddLLVM.cmake @@ -135,16 +135,22 @@ endmacro(add_llvm_target) # lld, and Polly. This adds two options. One for the source directory of the # project, which defaults to ${CMAKE_CURRENT_SOURCE_DIR}/${name}. Another to # enable or disable building it with everthing else. +# Additional parameter can be specified as the name of directory. macro(add_llvm_external_project name) - string(TOUPPER ${name} nameUPPER) - set(LLVM_EXTERNAL_${nameUPPER}_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${name}" + set(add_llvm_external_dir "${ARGN}") + if("${add_llvm_external_dir}" STREQUAL "") + set(add_llvm_external_dir ${name}) + endif() + string(REPLACE "-" "_" nameUNDERSCORE ${name}) + string(TOUPPER ${nameUNDERSCORE} nameUPPER) + set(LLVM_EXTERNAL_${nameUPPER}_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${add_llvm_external_dir}" CACHE PATH "Path to ${name} source directory") if (NOT ${LLVM_EXTERNAL_${nameUPPER}_SOURCE_DIR} STREQUAL "" AND EXISTS ${LLVM_EXTERNAL_${nameUPPER}_SOURCE_DIR}/CMakeLists.txt) option(LLVM_EXTERNAL_${nameUPPER}_BUILD "Whether to build ${name} as part of LLVM" ON) if (LLVM_EXTERNAL_${nameUPPER}_BUILD) - add_subdirectory(${LLVM_EXTERNAL_${nameUPPER}_SOURCE_DIR} ${name}) + add_subdirectory(${LLVM_EXTERNAL_${nameUPPER}_SOURCE_DIR} ${add_llvm_external_dir}) endif() endif() endmacro(add_llvm_external_project) @@ -278,11 +284,14 @@ endfunction() function(add_lit_testsuite target comment) parse_arguments(ARG "PARAMS;DEPENDS;ARGS" "" ${ARGN}) - # Register the testsuites, params and depends for the global check rule. - set_property(GLOBAL APPEND PROPERTY LLVM_LIT_TESTSUITES ${ARG_DEFAULT_ARGS}) - set_property(GLOBAL APPEND PROPERTY LLVM_LIT_PARAMS ${ARG_PARAMS}) - set_property(GLOBAL APPEND PROPERTY LLVM_LIT_DEPENDS ${ARG_DEPENDS}) - set_property(GLOBAL APPEND PROPERTY LLVM_LIT_EXTRA_ARGS ${ARG_ARGS}) + # EXCLUDE_FROM_ALL excludes the test ${target} out of check-all. + if(NOT EXCLUDE_FROM_ALL) + # Register the testsuites, params and depends for the global check rule. + set_property(GLOBAL APPEND PROPERTY LLVM_LIT_TESTSUITES ${ARG_DEFAULT_ARGS}) + set_property(GLOBAL APPEND PROPERTY LLVM_LIT_PARAMS ${ARG_PARAMS}) + set_property(GLOBAL APPEND PROPERTY LLVM_LIT_DEPENDS ${ARG_DEPENDS}) + set_property(GLOBAL APPEND PROPERTY LLVM_LIT_EXTRA_ARGS ${ARG_ARGS}) + endif() # Produce a specific suffixed check rule. add_lit_target(${target} ${comment} diff --git a/cmake/modules/LLVMProcessSources.cmake b/cmake/modules/LLVMProcessSources.cmake index 0e410ed..2cef6cf 100644 --- a/cmake/modules/LLVMProcessSources.cmake +++ b/cmake/modules/LLVMProcessSources.cmake @@ -48,7 +48,7 @@ function(llvm_process_sources OUT_VAR) set( f ${CMAKE_CURRENT_SOURCE_DIR}/${s} ) add_file_dependencies( ${f} ${TABLEGEN_OUTPUT} ) endforeach(s) - if( MSVC_IDE ) + if( MSVC_IDE OR XCODE ) # This adds .td and .h files to the Visual Studio solution: # FIXME: Shall we handle *.def here? add_td_sources(sources) diff --git a/cmake/platforms/Android.cmake b/cmake/platforms/Android.cmake new file mode 100644 index 0000000..72849b1 --- /dev/null +++ b/cmake/platforms/Android.cmake @@ -0,0 +1,21 @@ +# Toolchain config for Android NDK. +# This is expected to be used with a standalone Android toolchain (see +# docs/STANDALONE-TOOLCHAIN.html in the NDK on how to get one). +# +# Usage: +# mkdir build; cd build +# cmake ..; make +# mkdir android; cd android +# cmake -DLLVM_ANDROID_TOOLCHAIN_DIR=/path/to/android/ndk \ +# -DCMAKE_TOOLCHAIN_FILE=../../cmake/platforms/Android.cmake ../.. +# make + +SET(CMAKE_SYSTEM_NAME Linux) +SET(CMAKE_C_COMPILER ${CMAKE_BINARY_DIR}/../bin/clang) +SET(CMAKE_CXX_COMPILER ${CMAKE_BINARY_DIR}/../bin/clang++) +SET(ANDROID "1" CACHE STRING "ANDROID" FORCE) + +SET(ANDROID_COMMON_FLAGS "-target arm-linux-androideabi --sysroot=${LLVM_ANDROID_TOOLCHAIN_DIR}/sysroot -B${LLVM_ANDROID_TOOLCHAIN_DIR} -mllvm -arm-enable-ehabi") +SET(CMAKE_C_FLAGS "${ANDROID_COMMON_FLAGS}" CACHE STRING "toolchain_cflags" FORCE) +SET(CMAKE_CXX_FLAGS "${ANDROID_COMMON_FLAGS}" CACHE STRING "toolchain_cxxflags" FORCE) +SET(CMAKE_LINK_FLAGS "${ANDROID_COMMON_FLAGS}" CACHE STRING "toolchain_linkflags" FORCE) diff --git a/configure b/configure index 6fbc47c..4fa0705 100755 --- a/configure +++ b/configure @@ -704,6 +704,7 @@ ENABLE_PIC ENABLE_SHARED ENABLE_EMBED_STDCXX ENABLE_TIMESTAMPS +ENABLE_BACKTRACES TARGETS_TO_BUILD LLVM_ENUM_TARGETS LLVM_ENUM_ASM_PRINTERS @@ -766,8 +767,6 @@ COVERED_SWITCH_DEFAULT USE_UDIS86 USE_OPROFILE USE_INTEL_JITEVENTS -INTEL_JITEVENTS_INCDIR -INTEL_JITEVENTS_LIBDIR XML2CONFIG LIBXML2_LIBS LIBXML2_INC @@ -792,6 +791,7 @@ OCAML_LIBDIR ENABLE_VISIBILITY_INLINES_HIDDEN RPATH RDYNAMIC +program_prefix LIBOBJS LTLIBOBJS' ac_subst_files='' @@ -1423,6 +1423,8 @@ Optional Features: Win32 DLL (default is NO) --enable-timestamps Enable embedding timestamp information in build (default is YES) + --enable-backtraces Enable embedding backtraces on crash (default is + YES) --enable-targets Build specific host targets: all or target1,target2,... Valid targets are: host, x86, x86_64, sparc, powerpc, arm, mips, spu, hexagon, @@ -1460,10 +1462,8 @@ Optional Packages: --with-udis86= Use udis86 external x86 disassembler library --with-oprofile= Tell OProfile >= 0.9.4 how to symbolize JIT output - --with-intel-jitevents= - Specify location of run-time support library for - Intel JIT API - (default=/opt/intel/vtune_amplifier_xe_2011) + --with-intel-jitevents Notify Intel JIT profiling API of generated code + Some influential environment variables: CC C compiler command @@ -3904,8 +3904,8 @@ else sparc*-*) llvm_cv_target_arch="Sparc" ;; powerpc*-*) llvm_cv_target_arch="PowerPC" ;; arm*-*) llvm_cv_target_arch="ARM" ;; - mips-*) llvm_cv_target_arch="Mips" ;; - mipsel-*) llvm_cv_target_arch="Mips" ;; + mips-* | mips64-*) llvm_cv_target_arch="Mips" ;; + mipsel-* | mips64el-*) llvm_cv_target_arch="Mips" ;; xcore-*) llvm_cv_target_arch="XCore" ;; msp430-*) llvm_cv_target_arch="MSP430" ;; hexagon-*) llvm_cv_target_arch="Hexagon" ;; @@ -3937,8 +3937,8 @@ case $host in sparc*-*) host_arch="Sparc" ;; powerpc*-*) host_arch="PowerPC" ;; arm*-*) host_arch="ARM" ;; - mips-*) host_arch="Mips" ;; - mipsel-*) host_arch="Mips" ;; + mips-* | mips64-*) host_arch="Mips" ;; + mipsel-* | mips64el-*) host_arch="Mips" ;; xcore-*) host_arch="XCore" ;; msp430-*) host_arch="MSP430" ;; hexagon-*) host_arch="Hexagon" ;; @@ -5382,6 +5382,31 @@ cat >>confdefs.h <<_ACEOF _ACEOF + +# Check whether --enable-backtraces was given. +if test "${enable_backtraces+set}" = set; then + enableval=$enable_backtraces; +else + enableval=default +fi + +case "$enableval" in + yes) ENABLE_BACKTRACES=1 + ;; + no) ENABLE_BACKTRACES=0 + ;; + default) ENABLE_BACKTRACES=1 + ;; + *) { { echo "$as_me:$LINENO: error: Invalid setting for --enable-backtraces. Use \"yes\" or \"no\"" >&5 +echo "$as_me: error: Invalid setting for --enable-backtraces. Use \"yes\" or \"no\"" >&2;} + { (exit 1); exit 1; }; } ;; +esac + +cat >>confdefs.h <<_ACEOF +#define ENABLE_BACKTRACES $ENABLE_BACKTRACES +_ACEOF + + TARGETS_TO_BUILD="" # Check whether --enable-targets was given. if test "${enable_targets+set}" = set; then @@ -5404,6 +5429,8 @@ case "$enableval" in arm) TARGETS_TO_BUILD="ARM $TARGETS_TO_BUILD" ;; mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; mipsel) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; + mips64) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; + mips64el) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;; spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;; xcore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;; msp430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;; @@ -10289,7 +10316,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <&5 +echo "$as_me: error: Invalid setting for --with-intel-jitevents. Use \"yes\" or \"no\"" >&2;} + { (exit 1); exit 1; }; };; + esac + case $llvm_cv_os_type in Linux|Win32|Cygwin|MingW) ;; - *) - { { echo "$as_me:$LINENO: error: - Intel JIT API support is available on Linux and Windows only.\"" >&5 -echo "$as_me: error: - Intel JIT API support is available on Linux and Windows only.\"" >&2;} - { (exit 1); exit 1; }; } ;; + *) { { echo "$as_me:$LINENO: error: Intel JIT API support is available on Linux and Windows only." >&5 +echo "$as_me: error: Intel JIT API support is available on Linux and Windows only." >&2;} + { (exit 1); exit 1; }; };; esac - USE_INTEL_JITEVENTS=1 - case "$llvm_cv_target_arch" in - x86) llvm_intel_jitevents_archdir="lib32";; - x86_64) llvm_intel_jitevents_archdir="lib64";; - *) echo "Target architecture $llvm_cv_target_arch does not support Intel JIT Events API" - exit -1;; - esac - INTEL_JITEVENTS_INCDIR="/opt/intel/vtune_amplifier_xe_2011/include" - INTEL_JITEVENTS_LIBDIR="/opt/intel/vtune_amplifier_xe_2011/$llvm_intel_jitevents_archdir" - case "$withval" in - /* | [A-Za-z]:[\\/]*) INTEL_JITEVENTS_INCDIR=$withval/include - INTEL_JITEVENTS_LIBDIR=$withval/$llvm_intel_jitevents_archdir ;; - *) ;; + x86|x86_64) ;; + *) { { echo "$as_me:$LINENO: error: Target architecture $llvm_cv_target_arch does not support Intel JIT Events API." >&5 +echo "$as_me: error: Target architecture $llvm_cv_target_arch does not support Intel JIT Events API." >&2;} + { (exit 1); exit 1; }; };; esac - - - - LIBS="$LIBS -L${INTEL_JITEVENTS_LIBDIR}" - CPPFLAGS="$CPPFLAGS -I$INTEL_JITEVENTS_INCDIR" - - { echo "$as_me:$LINENO: checking for library containing iJIT_IsProfilingActive" >&5 -echo $ECHO_N "checking for library containing iJIT_IsProfilingActive... $ECHO_C" >&6; } -if test "${ac_cv_search_iJIT_IsProfilingActive+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_func_search_save_LIBS=$LIBS -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char iJIT_IsProfilingActive (); -int -main () -{ -return iJIT_IsProfilingActive (); - ; - return 0; -} -_ACEOF -for ac_lib in '' jitprofiling; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - rm -f conftest.$ac_objext conftest$ac_exeext -if { (ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_link") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' - { (case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; } && - { ac_try='test -s conftest$ac_exeext' - { (case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - ac_cv_search_iJIT_IsProfilingActive=$ac_res -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - -fi - -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if test "${ac_cv_search_iJIT_IsProfilingActive+set}" = set; then - break -fi -done -if test "${ac_cv_search_iJIT_IsProfilingActive+set}" = set; then - : -else - ac_cv_search_iJIT_IsProfilingActive=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ echo "$as_me:$LINENO: result: $ac_cv_search_iJIT_IsProfilingActive" >&5 -echo "${ECHO_T}$ac_cv_search_iJIT_IsProfilingActive" >&6; } -ac_res=$ac_cv_search_iJIT_IsProfilingActive -if test "$ac_res" != no; then - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -else - - echo "Error! Cannot find libjitprofiling.a. Please check path specified in flag --with-intel-jitevents" - exit -1 - -fi - - if test "${ac_cv_header_jitprofiling_h+set}" = set; then - { echo "$as_me:$LINENO: checking for jitprofiling.h" >&5 -echo $ECHO_N "checking for jitprofiling.h... $ECHO_C" >&6; } -if test "${ac_cv_header_jitprofiling_h+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -fi -{ echo "$as_me:$LINENO: result: $ac_cv_header_jitprofiling_h" >&5 -echo "${ECHO_T}$ac_cv_header_jitprofiling_h" >&6; } -else - # Is the header compilable? -{ echo "$as_me:$LINENO: checking jitprofiling.h usability" >&5 -echo $ECHO_N "checking jitprofiling.h usability... $ECHO_C" >&6; } -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -$ac_includes_default -#include -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && - { ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err' - { (case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; } && - { ac_try='test -s conftest.$ac_objext' - { (case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); }; }; then - ac_header_compiler=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_header_compiler=no -fi - -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 -echo "${ECHO_T}$ac_header_compiler" >&6; } - -# Is the header present? -{ echo "$as_me:$LINENO: checking jitprofiling.h presence" >&5 -echo $ECHO_N "checking jitprofiling.h presence... $ECHO_C" >&6; } -cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ -#include -_ACEOF -if { (ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } >/dev/null; then - if test -s conftest.err; then - ac_cpp_err=$ac_c_preproc_warn_flag - ac_cpp_err=$ac_cpp_err$ac_c_werror_flag - else - ac_cpp_err= - fi -else - ac_cpp_err=yes -fi -if test -z "$ac_cpp_err"; then - ac_header_preproc=yes -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_header_preproc=no -fi - -rm -f conftest.err conftest.$ac_ext -{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 -echo "${ECHO_T}$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in - yes:no: ) - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: accepted by the compiler, rejected by the preprocessor!" >&5 -echo "$as_me: WARNING: jitprofiling.h: accepted by the compiler, rejected by the preprocessor!" >&2;} - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: proceeding with the compiler's result" >&5 -echo "$as_me: WARNING: jitprofiling.h: proceeding with the compiler's result" >&2;} - ac_header_preproc=yes - ;; - no:yes:* ) - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: present but cannot be compiled" >&5 -echo "$as_me: WARNING: jitprofiling.h: present but cannot be compiled" >&2;} - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: check for missing prerequisite headers?" >&5 -echo "$as_me: WARNING: jitprofiling.h: check for missing prerequisite headers?" >&2;} - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: see the Autoconf documentation" >&5 -echo "$as_me: WARNING: jitprofiling.h: see the Autoconf documentation" >&2;} - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: section \"Present But Cannot Be Compiled\"" >&5 -echo "$as_me: WARNING: jitprofiling.h: section \"Present But Cannot Be Compiled\"" >&2;} - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: proceeding with the preprocessor's result" >&5 -echo "$as_me: WARNING: jitprofiling.h: proceeding with the preprocessor's result" >&2;} - { echo "$as_me:$LINENO: WARNING: jitprofiling.h: in the future, the compiler will take precedence" >&5 -echo "$as_me: WARNING: jitprofiling.h: in the future, the compiler will take precedence" >&2;} - ( cat <<\_ASBOX -## ------------------------------------ ## -## Report this to http://llvm.org/bugs/ ## -## ------------------------------------ ## -_ASBOX - ) | sed "s/^/$as_me: WARNING: /" >&2 - ;; -esac -{ echo "$as_me:$LINENO: checking for jitprofiling.h" >&5 -echo $ECHO_N "checking for jitprofiling.h... $ECHO_C" >&6; } -if test "${ac_cv_header_jitprofiling_h+set}" = set; then - echo $ECHO_N "(cached) $ECHO_C" >&6 -else - ac_cv_header_jitprofiling_h=$ac_header_preproc -fi -{ echo "$as_me:$LINENO: result: $ac_cv_header_jitprofiling_h" >&5 -echo "${ECHO_T}$ac_cv_header_jitprofiling_h" >&6; } - -fi -if test $ac_cv_header_jitprofiling_h = yes; then - : -else - - echo "Error! Cannot find jitprofiling.h. Please check path specified in flag --with-intel-jitevents" - exit -1 - -fi - - - - else USE_INTEL_JITEVENTS=0 @@ -21382,6 +21131,10 @@ fi ac_config_files="$ac_config_files bindings/ocaml/llvm/META.llvm" +test "x$program_prefix" = "xNONE" && program_prefix="" + + + ac_config_commands="$ac_config_commands setup" ac_config_commands="$ac_config_commands Makefile" @@ -22219,6 +21972,7 @@ ENABLE_PIC!$ENABLE_PIC$ac_delim ENABLE_SHARED!$ENABLE_SHARED$ac_delim ENABLE_EMBED_STDCXX!$ENABLE_EMBED_STDCXX$ac_delim ENABLE_TIMESTAMPS!$ENABLE_TIMESTAMPS$ac_delim +ENABLE_BACKTRACES!$ENABLE_BACKTRACES$ac_delim TARGETS_TO_BUILD!$TARGETS_TO_BUILD$ac_delim LLVM_ENUM_TARGETS!$LLVM_ENUM_TARGETS$ac_delim LLVM_ENUM_ASM_PRINTERS!$LLVM_ENUM_ASM_PRINTERS$ac_delim @@ -22281,8 +22035,6 @@ COVERED_SWITCH_DEFAULT!$COVERED_SWITCH_DEFAULT$ac_delim USE_UDIS86!$USE_UDIS86$ac_delim USE_OPROFILE!$USE_OPROFILE$ac_delim USE_INTEL_JITEVENTS!$USE_INTEL_JITEVENTS$ac_delim -INTEL_JITEVENTS_INCDIR!$INTEL_JITEVENTS_INCDIR$ac_delim -INTEL_JITEVENTS_LIBDIR!$INTEL_JITEVENTS_LIBDIR$ac_delim XML2CONFIG!$XML2CONFIG$ac_delim LIBXML2_LIBS!$LIBXML2_LIBS$ac_delim LIBXML2_INC!$LIBXML2_INC$ac_delim @@ -22307,6 +22059,7 @@ OCAML_LIBDIR!$OCAML_LIBDIR$ac_delim ENABLE_VISIBILITY_INLINES_HIDDEN!$ENABLE_VISIBILITY_INLINES_HIDDEN$ac_delim RPATH!$RPATH$ac_delim RDYNAMIC!$RDYNAMIC$ac_delim +program_prefix!$program_prefix$ac_delim LIBOBJS!$LIBOBJS$ac_delim LTLIBOBJS!$LTLIBOBJS$ac_delim _ACEOF diff --git a/docs/AliasAnalysis.rst b/docs/AliasAnalysis.rst index 2d4f291..fdaec89 100644 --- a/docs/AliasAnalysis.rst +++ b/docs/AliasAnalysis.rst @@ -230,7 +230,7 @@ any pass dependencies your pass has. Thus you should have something like this: .. code-block:: c++ - void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const { AliasAnalysis::getAnalysisUsage(AU); // declare your dependencies here. } diff --git a/docs/BitCodeFormat.rst b/docs/BitCodeFormat.rst index d3995e7..bd26f7b 100644 --- a/docs/BitCodeFormat.rst +++ b/docs/BitCodeFormat.rst @@ -489,6 +489,8 @@ The magic number for LLVM IR files is: When combined with the bitcode magic number and viewed as bytes, this is ``"BC 0xC0DE"``. +.. _Signed VBRs: + Signed VBRs ^^^^^^^^^^^ @@ -507,6 +509,7 @@ As such, signed VBR values of a specific width are emitted as follows: With this encoding, small positive and small negative values can both be emitted efficiently. Signed VBR encoding is used in ``CST_CODE_INTEGER`` and ``CST_CODE_WIDE_INTEGER`` records within ``CONSTANTS_BLOCK`` blocks. +It is also used for phi instruction operands in `MODULE_CODE_VERSION`_ 1. LLVM IR Blocks ^^^^^^^^^^^^^^ @@ -553,13 +556,57 @@ block may contain the following sub-blocks: * `FUNCTION_BLOCK`_ * `METADATA_BLOCK`_ +.. _MODULE_CODE_VERSION: + MODULE_CODE_VERSION Record ^^^^^^^^^^^^^^^^^^^^^^^^^^ ``[VERSION, version#]`` The ``VERSION`` record (code 1) contains a single value indicating the format -version. Only version 0 is supported at this time. +version. Versions 0 and 1 are supported at this time. The difference between +version 0 and 1 is in the encoding of instruction operands in +each `FUNCTION_BLOCK`_. + +In version 0, each value defined by an instruction is assigned an ID +unique to the function. Function-level value IDs are assigned starting from +``NumModuleValues`` since they share the same namespace as module-level +values. The value enumerator resets after each function. When a value is +an operand of an instruction, the value ID is used to represent the operand. +For large functions or large modules, these operand values can be large. + +The encoding in version 1 attempts to avoid large operand values +in common cases. Instead of using the value ID directly, operands are +encoded as relative to the current instruction. Thus, if an operand +is the value defined by the previous instruction, the operand +will be encoded as 1. + +For example, instead of + +.. code-block:: llvm + + #n = load #n-1 + #n+1 = icmp eq #n, #const0 + br #n+1, label #(bb1), label #(bb2) + +version 1 will encode the instructions as + +.. code-block:: llvm + + #n = load #1 + #n+1 = icmp eq #1, (#n+1)-#const0 + br #1, label #(bb1), label #(bb2) + +Note in the example that operands which are constants also use +the relative encoding, while operands like basic block labels +do not use the relative encoding. + +Forward references will result in a negative value. +This can be inefficient, as operands are normally encoded +as unsigned VBRs. However, forward references are rare, except in the +case of phi instructions. For phi instructions, operands are encoded as +`Signed VBRs`_ to deal with forward references. + MODULE_CODE_TRIPLE Record ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/CMake.rst b/docs/CMake.rst index e1761c5..7f0420c 100644 --- a/docs/CMake.rst +++ b/docs/CMake.rst @@ -273,11 +273,6 @@ LLVM-specific variables **LLVM_USE_INTEL_JITEVENTS**:BOOL Enable building support for Intel JIT Events API. Defaults to OFF -**LLVM_INTEL_JITEVENTS_DIR**:PATH - Path to installation of Intel(R) VTune(TM) Amplifier XE 2011, used to locate - the ``jitprofiling`` library. Default = ``%VTUNE_AMPLIFIER_XE_2011_DIR%`` - (Windows) | ``/opt/intel/vtune_amplifier_xe_2011`` (Linux) - Executing the test suite ======================== diff --git a/docs/CodeGenerator.rst b/docs/CodeGenerator.rst index d1d0231..5fab76e 100644 --- a/docs/CodeGenerator.rst +++ b/docs/CodeGenerator.rst @@ -81,7 +81,7 @@ Required components in the code generator The two pieces of the LLVM code generator are the high-level interface to the code generator and the set of reusable components that can be used to build target-specific backends. The two most important interfaces (:raw-html:`` -`TargetMachine`_ :raw-html:`` and :raw-html:`` `TargetData`_ +`TargetMachine`_ :raw-html:`` and :raw-html:`` `DataLayout`_ :raw-html:``) are the only ones that are required to be defined for a backend to fit into the LLVM system, but the others must be defined if the reusable code generator components are going to be used. @@ -197,7 +197,7 @@ any particular client. These classes are designed to capture the *abstract* properties of the target (such as the instructions and registers it has), and do not incorporate any particular pieces of code generation algorithms. -All of the target description classes (except the :raw-html:`` `TargetData`_ +All of the target description classes (except the :raw-html:`` `DataLayout`_ :raw-html:`` class) are designed to be subclassed by the concrete target implementation, and have virtual methods implemented. To get to these implementations, the :raw-html:`` `TargetMachine`_ :raw-html:`` class @@ -214,18 +214,18 @@ the ``get*Info`` methods (``getInstrInfo``, ``getRegisterInfo``, ``getFrameInfo``, etc.). This class is designed to be specialized by a concrete target implementation (e.g., ``X86TargetMachine``) which implements the various virtual methods. The only required target description class is the -:raw-html:`` `TargetData`_ :raw-html:`` class, but if the code +:raw-html:`` `DataLayout`_ :raw-html:`` class, but if the code generator components are to be used, the other interfaces should be implemented as well. -.. _TargetData: +.. _DataLayout: -The ``TargetData`` class +The ``DataLayout`` class ------------------------ -The ``TargetData`` class is the only required target description class, and it -is the only class that is not extensible (you cannot derived a new class from -it). ``TargetData`` specifies information about how the target lays out memory +The ``DataLayout`` class is the only required target description class, and it +is the only class that is not extensible (you cannot derive a new class from +it). ``DataLayout`` specifies information about how the target lays out memory for structures, the alignment requirements for various data types, the size of pointers in the target, and whether the target is little-endian or big-endian. @@ -248,7 +248,7 @@ operations. Among other things, this class indicates: * the type to use for shift amounts, and * various high-level characteristics, like whether it is profitable to turn - division by a constant into a multiplication sequence + division by a constant into a multiplication sequence. The ``TargetRegisterInfo`` class -------------------------------- @@ -256,10 +256,10 @@ The ``TargetRegisterInfo`` class The ``TargetRegisterInfo`` class is used to describe the register file of the target and any interactions between the registers. -Registers in the code generator are represented in the code generator by -unsigned integers. Physical registers (those that actually exist in the target -description) are unique small numbers, and virtual registers are generally -large. Note that register ``#0`` is reserved as a flag value. +Registers are represented in the code generator by unsigned integers. Physical +registers (those that actually exist in the target description) are unique +small numbers, and virtual registers are generally large. Note that +register ``#0`` is reserved as a flag value. Each register in the processor description has an associated ``TargetRegisterDesc`` entry, which provides a textual name for the register @@ -390,7 +390,7 @@ functions make it easy to build arbitrary machine instructions. Usage of the MachineInstr *MI = BuildMI(X86::MOV32ri, 1, DestReg).addImm(42); // Create the same instr, but insert it at the end of a basic block. - MachineBasicBlock &MBB = ... + MachineBasicBlock &MBB = ... BuildMI(MBB, X86::MOV32ri, 1, DestReg).addImm(42); // Create the same instr, but insert it before a specified iterator point. @@ -404,7 +404,7 @@ functions make it easy to build arbitrary machine instructions. Usage of the MI = BuildMI(X86::SAHF, 0); // Create a self looping branch instruction. - BuildMI(MBB, X86::JNE, 1).addMBB(&MBB); + BuildMI(MBB, X86::JNE, 1).addMBB(&MBB); The key thing to remember with the ``BuildMI`` functions is that you have to specify the number of operands that the machine instruction will take. This @@ -838,8 +838,7 @@ Initial SelectionDAG Construction ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The initial SelectionDAG is na\ :raw-html:`ï`\ vely peephole expanded from -the LLVM input by the ``SelectionDAGLowering`` class in the -``lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp`` file. The intent of this pass +the LLVM input by the ``SelectionDAGBuilder`` class. The intent of this pass is to expose as much low-level, target-specific details to the SelectionDAG as possible. This pass is mostly hard-coded (e.g. an LLVM ``add`` turns into an ``SDNode add`` while a ``getelementptr`` is expanded into the obvious diff --git a/docs/CodingStandards.rst b/docs/CodingStandards.rst index a416a1e..9083530 100644 --- a/docs/CodingStandards.rst +++ b/docs/CodingStandards.rst @@ -79,10 +79,11 @@ tree. The standard header looks like this: // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// - // - // This file contains the declaration of the Instruction class, which is the - // base class for all of the VM instructions. - // + /// + /// \file + /// \brief This file contains the declaration of the Instruction class, which is + /// the base class for all of the VM instructions. + /// //===----------------------------------------------------------------------===// A few things to note about this particular format: The "``-*- C++ -*-``" string @@ -100,10 +101,12 @@ The next section in the file is a concise note that defines the license that the file is released under. This makes it perfectly clear what terms the source code can be distributed under and should not be modified in any way. -The main body of the description does not have to be very long in most cases. -Here it's only two lines. If an algorithm is being implemented or something -tricky is going on, a reference to the paper where it is published should be -included, as well as any notes or *gotchas* in the code to watch out for. +The main body is a ``doxygen`` comment describing the purpose of the file. It +should have a ``\brief`` command that describes the file in one or two +sentences. Any additional information should be separated by a blank line. If +an algorithm is being implemented or something tricky is going on, a reference +to the paper where it is published should be included, as well as any notes or +*gotchas* in the code to watch out for. Class overviews """"""""""""""" @@ -143,6 +146,132 @@ useful to use C style (``/* */``) comments however: To comment out a large block of code, use ``#if 0`` and ``#endif``. These nest properly and are better behaved in general than C style comments. +Doxygen Use in Documentation Comments +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Use the ``\file`` command to turn the standard file header into a file-level +comment. + +Include descriptive ``\brief`` paragraphs for all public interfaces (public +classes, member and non-member functions). Explain API use and purpose in +``\brief`` paragraphs, don't just restate the information that can be inferred +from the API name. Put detailed discussion into separate paragraphs. + +To refer to parameter names inside a paragraph, use the ``\p name`` command. +Don't use the ``\arg name`` command since it starts a new paragraph that +contains documentation for the parameter. + +Wrap non-inline code examples in ``\code ... \endcode``. + +To document a function parameter, start a new paragraph with the +``\param name`` command. If the parameter is used as an out or an in/out +parameter, use the ``\param [out] name`` or ``\param [in,out] name`` command, +respectively. + +To describe function return value, start a new paragraph with the ``\returns`` +command. + +A minimal documentation comment: + +.. code-block:: c++ + + /// \brief Does foo and bar. + void fooBar(bool Baz); + +A documentation comment that uses all Doxygen features in a preferred way: + +.. code-block:: c++ + + /// \brief Does foo and bar. + /// + /// Does not do foo the usual way if \p Baz is true. + /// + /// Typical usage: + /// \code + /// fooBar(false, "quux", Res); + /// \endcode + /// + /// \param Quux kind of foo to do. + /// \param [out] Result filled with bar sequence on foo success. + /// + /// \returns true on success. + bool fooBar(bool Baz, StringRef Quux, std::vector &Result); + +Don't duplicate the documentation comment in the header file and in the +implementation file. Put the documentation comments for public APIs into the +header file. Documentation comments for private APIs can go to the +implementation file. In any case, implementation files can include additional +comments (not necessarily in Doxygen markup) to explain implementation details +as needed. + +Don't duplicate function or class name at the beginning of the comment. +For humans it is obvious which function or class is being documented; +automatic documentation processing tools are smart enough to bind the comment +to the correct declaration. + +Wrong: + +.. code-block:: c++ + + // In Something.h: + + /// Something - An abstraction for some complicated thing. + class Something { + public: + /// fooBar - Does foo and bar. + void fooBar(); + }; + + // In Something.cpp: + + /// fooBar - Does foo and bar. + void Something::fooBar() { ... } + +Correct: + +.. code-block:: c++ + + // In Something.h: + + /// \brief An abstraction for some complicated thing. + class Something { + public: + /// \brief Does foo and bar. + void fooBar(); + }; + + // In Something.cpp: + + // Builds a B-tree in order to do foo. See paper by... + void Something::fooBar() { ... } + +It is not required to use additional Doxygen features, but sometimes it might +be a good idea to do so. + +Consider: + +* adding comments to any narrow namespace containing a collection of + related functions or types; + +* using top-level groups to organize a collection of related functions at + namespace scope where the grouping is smaller than the namespace; + +* using member groups and additional comments attached to member + groups to organize within a class. + +For example: + +.. code-block:: c++ + + class Something { + /// \name Functions that do Foo. + /// @{ + void fooBar(); + void fooBaz(); + /// @} + ... + }; + ``#include`` Style ^^^^^^^^^^^^^^^^^^ @@ -421,9 +550,9 @@ exit from a function, consider this "bad" code: .. code-block:: c++ - Value *DoSomething(Instruction *I) { + Value *doSomething(Instruction *I) { if (!isa(I) && - I->hasOneUse() && SomeOtherThing(I)) { + I->hasOneUse() && doOtherThing(I)) { ... some long code .... } @@ -445,7 +574,7 @@ It is much preferred to format the code like this: .. code-block:: c++ - Value *DoSomething(Instruction *I) { + Value *doSomething(Instruction *I) { // Terminators never need 'something' done to them because ... if (isa(I)) return 0; @@ -456,7 +585,7 @@ It is much preferred to format the code like this: return 0; // This is really just here for example. - if (!SomeOtherThing(I)) + if (!doOtherThing(I)) return 0; ... some long code .... @@ -601,9 +730,8 @@ code to be structured like this: .. code-block:: c++ - /// ListContainsFoo - Return true if the specified list has an element that is - /// a foo. - static bool ListContainsFoo(const std::vector &List) { + /// \returns true if the specified list has an element that is a foo. + static bool containsFoo(const std::vector &List) { for (unsigned i = 0, e = List.size(); i != e; ++i) if (List[i]->isFoo()) return true; @@ -611,7 +739,7 @@ code to be structured like this: } ... - if (ListContainsFoo(BarList)) { + if (containsFoo(BarList)) { ... } @@ -714,7 +842,7 @@ enforced, and hopefully what to do about it. Here is one complete example: .. code-block:: c++ inline Value *getOperand(unsigned i) { - assert(i < Operands.size() && "getOperand() out of range!"); + assert(i < Operands.size() && "getOperand() out of range!"); return Operands[i]; } @@ -734,23 +862,28 @@ Here are more examples: You get the idea. -Please be aware that, when adding assert statements, not all compilers are aware -of the semantics of the assert. In some places, asserts are used to indicate a -piece of code that should not be reached. These are typically of the form: +In the past, asserts were used to indicate a piece of code that should not be +reached. These were typically of the form: .. code-block:: c++ - assert(0 && "Some helpful error message"); + assert(0 && "Invalid radix for integer literal"); -When used in a function that returns a value, they should be followed with a -return statement and a comment indicating that this line is never reached. This -will prevent a compiler which is unable to deduce that the assert statement -never returns from generating a warning. +This has a few issues, the main one being that some compilers might not +understand the assertion, or warn about a missing return in builds where +assertions are compiled out. + +Today, we have something much better: ``llvm_unreachable``: .. code-block:: c++ - assert(0 && "Some helpful error message"); - return 0; + llvm_unreachable("Invalid radix for integer literal"); + +When assertions are enabled, this will print the message if it's ever reached +and then exit the program. When assertions are disabled (i.e. in release +builds), ``llvm_unreachable`` becomes a hint to compilers to skip generating +code for this branch. If the compiler does not support this, it will fall back +to the "abort" implementation. Another issue is that values used only by assertions will produce an "unused value" warning when assertions are disabled. For example, this code will warn: @@ -818,6 +951,52 @@ least one out-of-line virtual method in the class. Without this, the compiler will copy the vtable and RTTI into every ``.o`` file that ``#include``\s the header, bloating ``.o`` file sizes and increasing link times. +Don't use default labels in fully covered switches over enumerations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``-Wswitch`` warns if a switch, without a default label, over an enumeration +does not cover every enumeration value. If you write a default label on a fully +covered switch over an enumeration then the ``-Wswitch`` warning won't fire +when new elements are added to that enumeration. To help avoid adding these +kinds of defaults, Clang has the warning ``-Wcovered-switch-default`` which is +off by default but turned on when building LLVM with a version of Clang that +supports the warning. + +A knock-on effect of this stylistic requirement is that when building LLVM with +GCC you may get warnings related to "control may reach end of non-void function" +if you return from each case of a covered switch-over-enum because GCC assumes +that the enum expression may take any representable value, not just those of +individual enumerators. To suppress this warning, use ``llvm_unreachable`` after +the switch. + +Use ``LLVM_DELETED_FUNCTION`` to mark uncallable methods +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Prior to C++11, a common pattern to make a class uncopyable was to declare an +unimplemented copy constructor and copy assignment operator and make them +private. This would give a compiler error for accessing a private method or a +linker error because it wasn't implemented. + +With C++11, we can mark methods that won't be implemented with ``= delete``. +This will trigger a much better error message and tell the compiler that the +method will never be implemented. This enables other checks like +``-Wunused-private-field`` to run correctly on classes that contain these +methods. + +To maintain compatibility with C++03, ``LLVM_DELETED_FUNCTION`` should be used +which will expand to ``= delete`` if the compiler supports it. These methods +should still be declared private. Example of the uncopyable pattern: + +.. code-block:: c++ + + class DontCopy { + private: + DontCopy(const DontCopy&) LLVM_DELETED_FUNCTION; + DontCopy &operator =(const DontCopy&) LLVM_DELETED_FUNCTION; + public: + ... + }; + Don't evaluate ``end()`` every time through a loop ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1002,21 +1181,21 @@ If a namespace definition is small and *easily* fits on a screen (say, less than namespace llvm { namespace X86 { - /// RelocationType - An enum for the x86 relocation codes. Note that + /// \brief An enum for the x86 relocation codes. Note that /// the terminology here doesn't follow x86 convention - word means /// 32-bit and dword means 64-bit. enum RelocationType { - /// reloc_pcrel_word - PC relative relocation, add the relocated value to + /// \brief PC relative relocation, add the relocated value to /// the value already in memory, after we adjust it for where the PC is. reloc_pcrel_word = 0, - /// reloc_picrel_word - PIC base relative relocation, add the relocated - /// value to the value already in memory, after we adjust it for where the + /// \brief PIC base relative relocation, add the relocated value to + /// the value already in memory, after we adjust it for where the /// PIC base is. reloc_picrel_word = 1, - /// reloc_absolute_word, reloc_absolute_dword - Absolute relocation, just - /// add the relocated value to the value already in memory. + /// \brief Absolute relocation, just add the relocated value to the + /// value already in memory. reloc_absolute_word = 2, reloc_absolute_dword = 3 }; @@ -1035,7 +1214,7 @@ closed. For example: namespace llvm { namespace knowledge { - /// Grokable - This class represents things that Smith can have an intimate + /// This class represents things that Smith can have an intimate /// understanding of and contains the data associated with it. class Grokable { ... @@ -1092,7 +1271,7 @@ good: }; } // end anonymous namespace - static void Helper() { + static void runHelper() { ... } @@ -1112,7 +1291,7 @@ This is bad: bool operator<(const char *RHS) const; }; - void Helper() { + void runHelper() { ... } @@ -1122,7 +1301,7 @@ This is bad: } // end anonymous namespace -This is bad specifically because if you're looking at "``Helper``" in the middle +This is bad specifically because if you're looking at "``runHelper``" in the middle of a large C++ file, that you have no immediate way to tell if it is local to the file. When it is marked static explicitly, this is immediately obvious. Also, there is no reason to enclose the definition of "``operator<``" in the diff --git a/docs/CommandGuide/FileCheck.rst b/docs/CommandGuide/FileCheck.rst index 51a9bf6..1d7a462 100644 --- a/docs/CommandGuide/FileCheck.rst +++ b/docs/CommandGuide/FileCheck.rst @@ -45,6 +45,11 @@ OPTIONS +**--input-file** *filename* + + File to check (defaults to stdin). + + **--strict-whitespace** By default, FileCheck canonicalizes input horizontal whitespace (spaces and @@ -271,8 +276,9 @@ simple example: The first check line matches a regex (**%[a-z]+**) and captures it into the variable "REGISTER". The second line verifies that whatever is in REGISTER occurs later in the file after an "andw". FileCheck variable references are -always contained in **[[ ]]** pairs, are named, and their names can be -name, then it is a definition of the variable, if not, it is a use. +always contained in **[[ ]]** pairs, and their names can be formed with the +regex **[a-zA-Z][a-zA-Z0-9]***. If a colon follows the name, then it is a +definition of the variable; otherwise, it is a use. FileCheck variables can be defined multiple times, and uses always get the latest value. Note that variables are all read at the start of a "CHECK" line diff --git a/docs/CommandGuide/lit.rst b/docs/CommandGuide/lit.rst index 3eb0be9..9e96cd2 100644 --- a/docs/CommandGuide/lit.rst +++ b/docs/CommandGuide/lit.rst @@ -125,6 +125,10 @@ EXECUTION OPTIONS *--error-exitcode* argument for valgrind is used so that valgrind failures will cause the program to exit with a non-zero status. + When this option is enabled, **lit** will also automatically provide a + "valgrind" feature that can be used to conditionally disable (or expect failure + in) certain tests. + **--vg-arg**\ =\ *ARG* @@ -133,6 +137,15 @@ EXECUTION OPTIONS +**--vg-leak** + + When *--vg* is used, enable memory leak checks. When this option is enabled, + **lit** will also automatically provide a "vg_leak" feature that can be + used to conditionally disable (or expect failure in) certain tests. + + + + **--time-tests** Track the wall time individual tests take to execute and includes the results in diff --git a/docs/CompilerWriterInfo.html b/docs/CompilerWriterInfo.html deleted file mode 100644 index 67da783..0000000 --- a/docs/CompilerWriterInfo.html +++ /dev/null @@ -1,267 +0,0 @@ - - - - - Architecture/platform information for compiler writers - - - - - -

    - Architecture/platform information for compiler writers -

    - -
    -

    Note: This document is a work-in-progress. Additions and clarifications - are welcome.

    -
    - -
      -
    1. Hardware -
        -
      1. ARM
      2. -
      3. Itanium
      4. -
      5. MIPS
      6. -
      7. PowerPC
      8. -
      9. SPARC
      10. -
      11. X86
      12. -
      13. Other lists
      14. -
    2. -
    3. Application Binary Interface (ABI) -
        -
      1. Linux
      2. -
      3. OS X
      4. -
    4. -
    5. Miscellaneous resources
    6. -
    - -
    -

    Compiled by Misha Brukman

    -
    - - -

    Hardware

    - - -
    - - -

    ARM

    - - - - -

    Itanium (ia64)

    - - - - -

    MIPS

    - - - - -

    PowerPC

    - - - - -

    SPARC

    - - - - -

    X86

    - -
    - - -

    AMD - Official manuals and docs

    - - - - -

    Intel - Official manuals and docs

    - - - - -

    Other x86-specific information

    - - - -
    - - -

    Other relevant lists

    - -
    - - - -
    - -
    - - -

    ABI

    - - - - - -

    Miscellaneous resources

    - - - - - - -
    -
    - Valid CSS - Valid HTML 4.01 - - Misha Brukman
    - LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-04-19 22:20:34 +0200 (Thu, 19 Apr 2012) $ -
    - - - diff --git a/docs/CompilerWriterInfo.rst b/docs/CompilerWriterInfo.rst new file mode 100644 index 0000000..e41f5f9 --- /dev/null +++ b/docs/CompilerWriterInfo.rst @@ -0,0 +1,118 @@ +.. _compiler_writer_info: + +======================================================== +Architecture & Platform Information for Compiler Writers +======================================================== + +.. contents:: + :local: + +.. note:: + + This document is a work-in-progress. Additions and clarifications are + welcome. + + Compiled by `Misha Brukman `_. + +Hardware +======== + +ARM +--- + +* `ARM documentation `_ (`Processor Cores `_ Cores) + +* `ABI `_ + +Itanium (ia64) +-------------- + +* `Itanium documentation `_ + +MIPS +---- + +* `MIPS Processor Architecture `_ + +PowerPC +------- + +IBM - Official manuals and docs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `PowerPC Architecture Book `_ + + * Book I: `PowerPC User Instruction Set Architecture `_ + + * Book II: `PowerPC Virtual Environment Architecture `_ + + * Book III: `PowerPC Operating Environment Architecture `_ + +* `PowerPC Compiler Writer's Guide `_ + +* `PowerPC Processor Manuals `_ + +* `Intro to PowerPC Architecture `_ + +* `IBM AIX/5L for POWER Assembly Reference `_ + +Other documents, collections, notes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `PowerPC ABI documents `_ +* `PowerPC64 alignment of long doubles (from GCC) `_ +* `Long branch stubs for powerpc64-linux (from binutils) `_ + +SPARC +----- + +* `SPARC resources `_ +* `SPARC standards `_ + +X86 +--- + +AMD - Official manuals and docs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `AMD processor manuals `_ +* `X86-64 ABI `_ + +Intel - Official manuals and docs +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `IA-32 manuals `_ +* `Intel Itanium documentation `_ + +Other x86-specific information +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +* `Calling conventions for different C++ compilers and operating systems `_ + +Other relevant lists +-------------------- + +* `GCC reading list `_ + +ABI +=== + +Linux +----- + +* `PowerPC 64-bit ELF ABI Supplement `_ + +OS X +---- + +* `Mach-O Runtime Architecture `_ +* `Notes on Mach-O ABI `_ + +Miscellaneous Resources +======================= + +* `Executable File Format library `_ + +* `GCC prefetch project `_ page has a + good survey of the prefetching capabilities of a variety of modern + processors. diff --git a/docs/DebuggingJITedCode.html b/docs/DebuggingJITedCode.html deleted file mode 100644 index 652572c..0000000 --- a/docs/DebuggingJITedCode.html +++ /dev/null @@ -1,184 +0,0 @@ - - - - - Debugging JITed Code With GDB - - - - -

    Debugging JIT-ed Code With GDB

    -
      -
    1. Background
    2. -
    3. GDB Version
    4. -
    5. Debugging MCJIT-ed code
    6. - -
    -
    Written by Reid Kleckner and Eli Bendersky
    - - -

    Background

    - -
    - -

    Without special runtime support, debugging dynamically generated code with -GDB (as well as most debuggers) can be quite painful. Debuggers generally read -debug information from the object file of the code, but for JITed code, there is -no such file to look for. -

    - -

    In order to communicate the necessary debug info to GDB, an interface for -registering JITed code with debuggers has been designed and implemented for -GDB and LLVM MCJIT. At a high level, whenever MCJIT generates new machine code, -it does so in an in-memory object file that contains the debug information in -DWARF format. MCJIT then adds this in-memory object file to a global list of -dynamically generated object files and calls a special function -(__jit_debug_register_code) marked noinline that GDB knows about. When -GDB attaches to a process, it puts a breakpoint in this function and loads all -of the object files in the global list. When MCJIT calls the registration -function, GDB catches the breakpoint signal, loads the new object file from -the inferior's memory, and resumes the execution. In this way, GDB can get the -necessary debug information. -

    -
    - - -

    GDB Version

    - - -

    In order to debug code JIT-ed by LLVM, you need GDB 7.0 or newer, which is -available on most modern distributions of Linux. The version of GDB that Apple -ships with Xcode has been frozen at 6.3 for a while. LLDB may be a better -option for debugging JIT-ed code on Mac OS X. -

    - - - -

    Debugging MCJIT-ed code

    - -
    - -

    The emerging MCJIT component of LLVM allows full debugging of JIT-ed code with -GDB. This is due to MCJIT's ability to use the MC emitter to provide full -DWARF debugging information to GDB.

    - -

    Note that lli has to be passed the -use-mcjit flag to JIT the code -with MCJIT instead of the old JIT.

    - -

    Example

    - -
    - -

    Consider the following C code (with line numbers added to make the example -easier to follow):

    - -
    -1   int compute_factorial(int n)
    -2   {
    -3       if (n <= 1)
    -4           return 1;
    -5
    -6       int f = n;
    -7       while (--n > 1) 
    -8           f *= n;
    -9       return f;
    -10  }
    -11
    -12
    -13  int main(int argc, char** argv)
    -14  {
    -15      if (argc < 2)
    -16          return -1;
    -17      char firstletter = argv[1][0];
    -18      int result = compute_factorial(firstletter - '0');
    -19  
    -20      // Returned result is clipped at 255...
    -21      return result;
    -22  }
    -
    - -

    Here is a sample command line session that shows how to build and run this -code via lli inside GDB: -

    - -
    -$ $BINPATH/clang -cc1 -O0 -g -emit-llvm showdebug.c
    -$ gdb --quiet --args $BINPATH/lli -use-mcjit showdebug.ll 5
    -Reading symbols from $BINPATH/lli...done.
    -(gdb) b showdebug.c:6
    -No source file named showdebug.c.
    -Make breakpoint pending on future shared library load? (y or [n]) y
    -Breakpoint 1 (showdebug.c:6) pending.
    -(gdb) r
    -Starting program: $BINPATH/lli -use-mcjit showdebug.ll 5
    -[Thread debugging using libthread_db enabled]
    -
    -Breakpoint 1, compute_factorial (n=5) at showdebug.c:6
    -6	    int f = n;
    -(gdb) p n
    -$1 = 5
    -(gdb) p f
    -$2 = 0
    -(gdb) n
    -7	    while (--n > 1) 
    -(gdb) p f
    -$3 = 5
    -(gdb) b showdebug.c:9
    -Breakpoint 2 at 0x7ffff7ed404c: file showdebug.c, line 9.
    -(gdb) c
    -Continuing.
    -
    -Breakpoint 2, compute_factorial (n=1) at showdebug.c:9
    -9	    return f;
    -(gdb) p f
    -$4 = 120
    -(gdb) bt
    -#0  compute_factorial (n=1) at showdebug.c:9
    -#1  0x00007ffff7ed40a9 in main (argc=2, argv=0x16677e0) at showdebug.c:18
    -#2  0x3500000001652748 in ?? ()
    -#3  0x00000000016677e0 in ?? ()
    -#4  0x0000000000000002 in ?? ()
    -#5  0x0000000000d953b3 in llvm::MCJIT::runFunction (this=0x16151f0, F=0x1603020, ArgValues=...) at /home/ebenders_test/llvm_svn_rw/lib/ExecutionEngine/MCJIT/MCJIT.cpp:161
    -#6  0x0000000000dc8872 in llvm::ExecutionEngine::runFunctionAsMain (this=0x16151f0, Fn=0x1603020, argv=..., envp=0x7fffffffe040)
    -    at /home/ebenders_test/llvm_svn_rw/lib/ExecutionEngine/ExecutionEngine.cpp:397
    -#7  0x000000000059c583 in main (argc=4, argv=0x7fffffffe018, envp=0x7fffffffe040) at /home/ebenders_test/llvm_svn_rw/tools/lli/lli.cpp:324
    -(gdb) finish
    -Run till exit from #0  compute_factorial (n=1) at showdebug.c:9
    -0x00007ffff7ed40a9 in main (argc=2, argv=0x16677e0) at showdebug.c:18
    -18	    int result = compute_factorial(firstletter - '0');
    -Value returned is $5 = 120
    -(gdb) p result
    -$6 = 23406408
    -(gdb) n
    -21	    return result;
    -(gdb) p result
    -$7 = 120
    -(gdb) c
    -Continuing.
    -
    -Program exited with code 0170.
    -(gdb) 
    -
    -
    - -
    -
    - - - -
    -
    - Valid CSS - Valid HTML 4.01 - Reid Kleckner, - Eli Bendersky
    - The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-05-13 16:36:15 +0200 (Sun, 13 May 2012) $ -
    - - diff --git a/docs/DebuggingJITedCode.rst b/docs/DebuggingJITedCode.rst new file mode 100644 index 0000000..eeb2f77 --- /dev/null +++ b/docs/DebuggingJITedCode.rst @@ -0,0 +1,147 @@ +.. _debugging-jited-code: + +============================== +Debugging JIT-ed Code With GDB +============================== + +.. sectionauthor:: Reid Kleckner and Eli Bendersky + +Background +========== + +Without special runtime support, debugging dynamically generated code with +GDB (as well as most debuggers) can be quite painful. Debuggers generally +read debug information from the object file of the code, but for JITed +code, there is no such file to look for. + +In order to communicate the necessary debug info to GDB, an interface for +registering JITed code with debuggers has been designed and implemented for +GDB and LLVM MCJIT. At a high level, whenever MCJIT generates new machine code, +it does so in an in-memory object file that contains the debug information in +DWARF format. MCJIT then adds this in-memory object file to a global list of +dynamically generated object files and calls a special function +(``__jit_debug_register_code``) marked noinline that GDB knows about. When +GDB attaches to a process, it puts a breakpoint in this function and loads all +of the object files in the global list. When MCJIT calls the registration +function, GDB catches the breakpoint signal, loads the new object file from +the inferior's memory, and resumes the execution. In this way, GDB can get the +necessary debug information. + +GDB Version +=========== + +In order to debug code JIT-ed by LLVM, you need GDB 7.0 or newer, which is +available on most modern distributions of Linux. The version of GDB that +Apple ships with Xcode has been frozen at 6.3 for a while. LLDB may be a +better option for debugging JIT-ed code on Mac OS X. + + +Debugging MCJIT-ed code +======================= + +The emerging MCJIT component of LLVM allows full debugging of JIT-ed code with +GDB. This is due to MCJIT's ability to use the MC emitter to provide full +DWARF debugging information to GDB. + +Note that lli has to be passed the ``-use-mcjit`` flag to JIT the code with +MCJIT instead of the old JIT. + +Example +------- + +Consider the following C code (with line numbers added to make the example +easier to follow): + +.. + FIXME: + Sphinx has the ability to automatically number these lines by adding + :linenos: on the line immediately following the `.. code-block:: c`, but + it looks like garbage; the line numbers don't even line up with the + lines. Is this a Sphinx bug, or is it a CSS problem? + +.. code-block:: c + + 1 int compute_factorial(int n) + 2 { + 3 if (n <= 1) + 4 return 1; + 5 + 6 int f = n; + 7 while (--n > 1) + 8 f *= n; + 9 return f; + 10 } + 11 + 12 + 13 int main(int argc, char** argv) + 14 { + 15 if (argc < 2) + 16 return -1; + 17 char firstletter = argv[1][0]; + 18 int result = compute_factorial(firstletter - '0'); + 19 + 20 // Returned result is clipped at 255... + 21 return result; + 22 } + +Here is a sample command line session that shows how to build and run this +code via ``lli`` inside GDB: + +.. code-block:: bash + + $ $BINPATH/clang -cc1 -O0 -g -emit-llvm showdebug.c + $ gdb --quiet --args $BINPATH/lli -use-mcjit showdebug.ll 5 + Reading symbols from $BINPATH/lli...done. + (gdb) b showdebug.c:6 + No source file named showdebug.c. + Make breakpoint pending on future shared library load? (y or [n]) y + Breakpoint 1 (showdebug.c:6) pending. + (gdb) r + Starting program: $BINPATH/lli -use-mcjit showdebug.ll 5 + [Thread debugging using libthread_db enabled] + + Breakpoint 1, compute_factorial (n=5) at showdebug.c:6 + 6 int f = n; + (gdb) p n + $1 = 5 + (gdb) p f + $2 = 0 + (gdb) n + 7 while (--n > 1) + (gdb) p f + $3 = 5 + (gdb) b showdebug.c:9 + Breakpoint 2 at 0x7ffff7ed404c: file showdebug.c, line 9. + (gdb) c + Continuing. + + Breakpoint 2, compute_factorial (n=1) at showdebug.c:9 + 9 return f; + (gdb) p f + $4 = 120 + (gdb) bt + #0 compute_factorial (n=1) at showdebug.c:9 + #1 0x00007ffff7ed40a9 in main (argc=2, argv=0x16677e0) at showdebug.c:18 + #2 0x3500000001652748 in ?? () + #3 0x00000000016677e0 in ?? () + #4 0x0000000000000002 in ?? () + #5 0x0000000000d953b3 in llvm::MCJIT::runFunction (this=0x16151f0, F=0x1603020, ArgValues=...) at /home/ebenders_test/llvm_svn_rw/lib/ExecutionEngine/MCJIT/MCJIT.cpp:161 + #6 0x0000000000dc8872 in llvm::ExecutionEngine::runFunctionAsMain (this=0x16151f0, Fn=0x1603020, argv=..., envp=0x7fffffffe040) + at /home/ebenders_test/llvm_svn_rw/lib/ExecutionEngine/ExecutionEngine.cpp:397 + #7 0x000000000059c583 in main (argc=4, argv=0x7fffffffe018, envp=0x7fffffffe040) at /home/ebenders_test/llvm_svn_rw/tools/lli/lli.cpp:324 + (gdb) finish + Run till exit from #0 compute_factorial (n=1) at showdebug.c:9 + 0x00007ffff7ed40a9 in main (argc=2, argv=0x16677e0) at showdebug.c:18 + 18 int result = compute_factorial(firstletter - '0'); + Value returned is $5 = 120 + (gdb) p result + $6 = 23406408 + (gdb) n + 21 return result; + (gdb) p result + $7 = 120 + (gdb) c + Continuing. + + Program exited with code 0170. + (gdb) diff --git a/docs/DeveloperPolicy.rst b/docs/DeveloperPolicy.rst index cda281a..e35e729 100644 --- a/docs/DeveloperPolicy.rst +++ b/docs/DeveloperPolicy.rst @@ -137,6 +137,9 @@ reviewees. If someone is kind enough to review your code, you should return the favor for someone else. Note that anyone is welcome to review and give feedback on a patch, but only people with Subversion write access can approve it. +There is a web based code review tool that can optionally be used +for code reviews. See :doc:`Phabricator`. + Code Owners ----------- @@ -279,7 +282,7 @@ If you have recently been granted commit access, these policies apply: #. You are granted *commit-after-approval* to all parts of LLVM. To get approval, submit a `patch`_ to `llvm-commits `_. When approved - you may commit it yourself.
  • + you may commit it yourself. #. You are allowed to commit patches without approval which you think are obvious. This is clearly a subjective decision --- we simply expect you to diff --git a/docs/ExtendingLLVM.html b/docs/ExtendingLLVM.html deleted file mode 100644 index 6782787..0000000 --- a/docs/ExtendingLLVM.html +++ /dev/null @@ -1,379 +0,0 @@ - - - - - Extending LLVM: Adding instructions, intrinsics, types, etc. - - - - - -

    - Extending LLVM: Adding instructions, intrinsics, types, etc. -

    - -
      -
    1. Introduction and Warning
    2. -
    3. Adding a new intrinsic function
    4. -
    5. Adding a new instruction
    6. -
    7. Adding a new SelectionDAG node
    8. -
    9. Adding a new type -
        -
      1. Adding a new fundamental type
      2. -
      3. Adding a new derived type
      4. -
    10. -
    - -
    -

    Written by Misha Brukman, - Brad Jones, Nate Begeman, - and Chris Lattner

    -
    - - -

    - Introduction and Warning -

    - - -
    - -

    During the course of using LLVM, you may wish to customize it for your -research project or for experimentation. At this point, you may realize that -you need to add something to LLVM, whether it be a new fundamental type, a new -intrinsic function, or a whole new instruction.

    - -

    When you come to this realization, stop and think. Do you really need to -extend LLVM? Is it a new fundamental capability that LLVM does not support at -its current incarnation or can it be synthesized from already pre-existing LLVM -elements? If you are not sure, ask on the LLVM-dev list. The -reason is that extending LLVM will get involved as you need to update all the -different passes that you intend to use with your extension, and there are -many LLVM analyses and transformations, so it may be quite a bit of -work.

    - -

    Adding an intrinsic function is far easier than -adding an instruction, and is transparent to optimization passes. If your added -functionality can be expressed as a -function call, an intrinsic function is the method of choice for LLVM -extension.

    - -

    Before you invest a significant amount of effort into a non-trivial -extension, ask on the list if what you are -looking to do can be done with already-existing infrastructure, or if maybe -someone else is already working on it. You will save yourself a lot of time and -effort by doing so.

    - -
    - - -

    - Adding a new intrinsic function -

    - - -
    - -

    Adding a new intrinsic function to LLVM is much easier than adding a new -instruction. Almost all extensions to LLVM should start as an intrinsic -function and then be turned into an instruction if warranted.

    - -
      -
    1. llvm/docs/LangRef.html: - Document the intrinsic. Decide whether it is code generator specific and - what the restrictions are. Talk to other people about it so that you are - sure it's a good idea.
    2. - -
    3. llvm/include/llvm/Intrinsics*.td: - Add an entry for your intrinsic. Describe its memory access characteristics - for optimization (this controls whether it will be DCE'd, CSE'd, etc). Note - that any intrinsic using the llvm_int_ty type for an argument will - be deemed by tblgen as overloaded and the corresponding suffix - will be required on the intrinsic's name.
    4. - -
    5. llvm/lib/Analysis/ConstantFolding.cpp: If it is possible to - constant fold your intrinsic, add support to it in the - canConstantFoldCallTo and ConstantFoldCall functions.
    6. - -
    7. llvm/test/Regression/*: Add test cases for your test cases to the - test suite
    8. -
    - -

    Once the intrinsic has been added to the system, you must add code generator -support for it. Generally you must do the following steps:

    - -
    - -
    Add support to the .td file for the target(s) of your choice in - lib/Target/*/*.td.
    - -
    This is usually a matter of adding a pattern to the .td file that matches - the intrinsic, though it may obviously require adding the instructions you - want to generate as well. There are lots of examples in the PowerPC and X86 - backend to follow.
    -
    - -
    - - -

    - Adding a new SelectionDAG node -

    - - -
    - -

    As with intrinsics, adding a new SelectionDAG node to LLVM is much easier -than adding a new instruction. New nodes are often added to help represent -instructions common to many targets. These nodes often map to an LLVM -instruction (add, sub) or intrinsic (byteswap, population count). In other -cases, new nodes have been added to allow many targets to perform a common task -(converting between floating point and integer representation) or capture more -complicated behavior in a single node (rotate).

    - -
      -
    1. include/llvm/CodeGen/ISDOpcodes.h: - Add an enum value for the new SelectionDAG node.
    2. -
    3. lib/CodeGen/SelectionDAG/SelectionDAG.cpp: - Add code to print the node to getOperationName. If your new node - can be evaluated at compile time when given constant arguments (such as an - add of a constant with another constant), find the getNode method - that takes the appropriate number of arguments, and add a case for your node - to the switch statement that performs constant folding for nodes that take - the same number of arguments as your new node.
    4. -
    5. lib/CodeGen/SelectionDAG/LegalizeDAG.cpp: - Add code to legalize, - promote, and expand the node as necessary. At a minimum, you will need - to add a case statement for your node in LegalizeOp which calls - LegalizeOp on the node's operands, and returns a new node if any of the - operands changed as a result of being legalized. It is likely that not all - targets supported by the SelectionDAG framework will natively support the - new node. In this case, you must also add code in your node's case - statement in LegalizeOp to Expand your node into simpler, legal - operations. The case for ISD::UREM for expanding a remainder into - a divide, multiply, and a subtract is a good example.
    6. -
    7. lib/CodeGen/SelectionDAG/LegalizeDAG.cpp: - If targets may support the new node being added only at certain sizes, you - will also need to add code to your node's case statement in - LegalizeOp to Promote your node's operands to a larger size, and - perform the correct operation. You will also need to add code to - PromoteOp to do this as well. For a good example, see - ISD::BSWAP, - which promotes its operand to a wider size, performs the byteswap, and then - shifts the correct bytes right to emulate the narrower byteswap in the - wider type.
    8. -
    9. lib/CodeGen/SelectionDAG/LegalizeDAG.cpp: - Add a case for your node in ExpandOp to teach the legalizer how to - perform the action represented by the new node on a value that has been - split into high and low halves. This case will be used to support your - node with a 64 bit operand on a 32 bit target.
    10. -
    11. lib/CodeGen/SelectionDAG/DAGCombiner.cpp: - If your node can be combined with itself, or other existing nodes in a - peephole-like fashion, add a visit function for it, and call that function - from . There are several good examples for simple combines you - can do; visitFABS and visitSRL are good starting places. -
    12. -
    13. lib/Target/PowerPC/PPCISelLowering.cpp: - Each target has an implementation of the TargetLowering class, - usually in its own file (although some targets include it in the same - file as the DAGToDAGISel). The default behavior for a target is to - assume that your new node is legal for all types that are legal for - that target. If this target does not natively support your node, then - tell the target to either Promote it (if it is supported at a larger - type) or Expand it. This will cause the code you wrote in - LegalizeOp above to decompose your new node into other legal - nodes for this target.
    14. -
    15. lib/Target/TargetSelectionDAG.td: - Most current targets supported by LLVM generate code using the DAGToDAG - method, where SelectionDAG nodes are pattern matched to target-specific - nodes, which represent individual instructions. In order for the targets - to match an instruction to your new node, you must add a def for that node - to the list in this file, with the appropriate type constraints. Look at - add, bswap, and fadd for examples.
    16. -
    17. lib/Target/PowerPC/PPCInstrInfo.td: - Each target has a tablegen file that describes the target's instruction - set. For targets that use the DAGToDAG instruction selection framework, - add a pattern for your new node that uses one or more target nodes. - Documentation for this is a bit sparse right now, but there are several - decent examples. See the patterns for rotl in - PPCInstrInfo.td.
    18. -
    19. TODO: document complex patterns.
    20. -
    21. llvm/test/Regression/CodeGen/*: Add test cases for your new node - to the test suite. llvm/test/Regression/CodeGen/X86/bswap.ll is - a good example.
    22. -
    - -
    - - -

    - Adding a new instruction -

    - - -
    - -

    WARNING: adding instructions changes the bitcode -format, and it will take some effort to maintain compatibility with -the previous version. Only add an instruction if it is absolutely -necessary.

    - -
      - -
    1. llvm/include/llvm/Instruction.def: - add a number for your instruction and an enum name
    2. - -
    3. llvm/include/llvm/Instructions.h: - add a definition for the class that will represent your instruction
    4. - -
    5. llvm/include/llvm/Support/InstVisitor.h: - add a prototype for a visitor to your new instruction type
    6. - -
    7. llvm/lib/AsmParser/Lexer.l: - add a new token to parse your instruction from assembly text file
    8. - -
    9. llvm/lib/AsmParser/llvmAsmParser.y: - add the grammar on how your instruction can be read and what it will - construct as a result
    10. - -
    11. llvm/lib/Bitcode/Reader/Reader.cpp: - add a case for your instruction and how it will be parsed from bitcode
    12. - -
    13. llvm/lib/VMCore/Instruction.cpp: - add a case for how your instruction will be printed out to assembly
    14. - -
    15. llvm/lib/VMCore/Instructions.cpp: - implement the class you defined in - llvm/include/llvm/Instructions.h
    16. - -
    17. Test your instruction
    18. - -
    19. llvm/lib/Target/*: - Add support for your instruction to code generators, or add a lowering - pass.
    20. - -
    21. llvm/test/Regression/*: add your test cases to the test suite.
    22. - -
    - -

    Also, you need to implement (or modify) any analyses or passes that you want -to understand this new instruction.

    - -
    - - - -

    - Adding a new type -

    - - -
    - -

    WARNING: adding new types changes the bitcode -format, and will break compatibility with currently-existing LLVM -installations. Only add new types if it is absolutely necessary.

    - - -

    - Adding a fundamental type -

    - -
    - -
      - -
    1. llvm/include/llvm/Type.h: - add enum for the new type; add static Type* for this type
    2. - -
    3. llvm/lib/VMCore/Type.cpp: - add mapping from TypeID => Type*; - initialize the static Type*
    4. - -
    5. llvm/lib/AsmReader/Lexer.l: - add ability to parse in the type from text assembly
    6. - -
    7. llvm/lib/AsmReader/llvmAsmParser.y: - add a token for that type
    8. - -
    - -
    - - -

    - Adding a derived type -

    - -
    - -
      -
    1. llvm/include/llvm/Type.h: - add enum for the new type; add a forward declaration of the type - also
    2. - -
    3. llvm/include/llvm/DerivedTypes.h: - add new class to represent new class in the hierarchy; add forward - declaration to the TypeMap value type
    4. - -
    5. llvm/lib/VMCore/Type.cpp: - add support for derived type to: -
      -
      -std::string getTypeDescription(const Type &Ty,
      -  std::vector<const Type*> &TypeStack)
      -bool TypesEqual(const Type *Ty, const Type *Ty2,
      -  std::map<const Type*, const Type*> & EqTypes)
      -
      -
      - add necessary member functions for type, and factory methods
    6. - -
    7. llvm/lib/AsmReader/Lexer.l: - add ability to parse in the type from text assembly
    8. - -
    9. llvm/lib/BitCode/Writer/Writer.cpp: - modify void BitcodeWriter::outputType(const Type *T) to serialize - your type
    10. - -
    11. llvm/lib/BitCode/Reader/Reader.cpp: - modify const Type *BitcodeReader::ParseType() to read your data - type
    12. - -
    13. llvm/lib/VMCore/AsmWriter.cpp: - modify -
      -
      -void calcTypeName(const Type *Ty,
      -                  std::vector<const Type*> &TypeStack,
      -                  std::map<const Type*,std::string> &TypeNames,
      -                  std::string & Result)
      -
      -
      - to output the new derived type -
    14. - - -
    - -
    - -
    - - - -
    -
    - Valid CSS - Valid HTML 4.01 - - The LLVM Compiler Infrastructure -
    - Last modified: $Date: 2012-04-19 22:20:34 +0200 (Thu, 19 Apr 2012) $ -
    - - - diff --git a/docs/ExtendingLLVM.rst b/docs/ExtendingLLVM.rst new file mode 100644 index 0000000..6df08ee --- /dev/null +++ b/docs/ExtendingLLVM.rst @@ -0,0 +1,306 @@ +.. _extending_llvm: + +============================================================ +Extending LLVM: Adding instructions, intrinsics, types, etc. +============================================================ + +Introduction and Warning +======================== + + +During the course of using LLVM, you may wish to customize it for your research +project or for experimentation. At this point, you may realize that you need to +add something to LLVM, whether it be a new fundamental type, a new intrinsic +function, or a whole new instruction. + +When you come to this realization, stop and think. Do you really need to extend +LLVM? Is it a new fundamental capability that LLVM does not support at its +current incarnation or can it be synthesized from already pre-existing LLVM +elements? If you are not sure, ask on the `LLVM-dev +`_ list. The reason is that +extending LLVM will get involved as you need to update all the different passes +that you intend to use with your extension, and there are ``many`` LLVM analyses +and transformations, so it may be quite a bit of work. + +Adding an `intrinsic function`_ is far easier than adding an +instruction, and is transparent to optimization passes. If your added +functionality can be expressed as a function call, an intrinsic function is the +method of choice for LLVM extension. + +Before you invest a significant amount of effort into a non-trivial extension, +**ask on the list** if what you are looking to do can be done with +already-existing infrastructure, or if maybe someone else is already working on +it. You will save yourself a lot of time and effort by doing so. + +.. _intrinsic function: + +Adding a new intrinsic function +=============================== + +Adding a new intrinsic function to LLVM is much easier than adding a new +instruction. Almost all extensions to LLVM should start as an intrinsic +function and then be turned into an instruction if warranted. + +#. ``llvm/docs/LangRef.html``: + + Document the intrinsic. Decide whether it is code generator specific and + what the restrictions are. Talk to other people about it so that you are + sure it's a good idea. + +#. ``llvm/include/llvm/Intrinsics*.td``: + + Add an entry for your intrinsic. Describe its memory access characteristics + for optimization (this controls whether it will be DCE'd, CSE'd, etc). Note + that any intrinsic using the ``llvm_int_ty`` type for an argument will + be deemed by ``tblgen`` as overloaded and the corresponding suffix will + be required on the intrinsic's name. + +#. ``llvm/lib/Analysis/ConstantFolding.cpp``: + + If it is possible to constant fold your intrinsic, add support to it in the + ``canConstantFoldCallTo`` and ``ConstantFoldCall`` functions. + +#. ``llvm/test/Regression/*``: + + Add test cases for your test cases to the test suite + +Once the intrinsic has been added to the system, you must add code generator +support for it. Generally you must do the following steps: + +Add support to the .td file for the target(s) of your choice in +``lib/Target/*/*.td``. + + This is usually a matter of adding a pattern to the .td file that matches the + intrinsic, though it may obviously require adding the instructions you want to + generate as well. There are lots of examples in the PowerPC and X86 backend + to follow. + +Adding a new SelectionDAG node +============================== + +As with intrinsics, adding a new SelectionDAG node to LLVM is much easier than +adding a new instruction. New nodes are often added to help represent +instructions common to many targets. These nodes often map to an LLVM +instruction (add, sub) or intrinsic (byteswap, population count). In other +cases, new nodes have been added to allow many targets to perform a common task +(converting between floating point and integer representation) or capture more +complicated behavior in a single node (rotate). + +#. ``include/llvm/CodeGen/ISDOpcodes.h``: + + Add an enum value for the new SelectionDAG node. + +#. ``lib/CodeGen/SelectionDAG/SelectionDAG.cpp``: + + Add code to print the node to ``getOperationName``. If your new node can be + evaluated at compile time when given constant arguments (such as an add of a + constant with another constant), find the ``getNode`` method that takes the + appropriate number of arguments, and add a case for your node to the switch + statement that performs constant folding for nodes that take the same number + of arguments as your new node. + +#. ``lib/CodeGen/SelectionDAG/LegalizeDAG.cpp``: + + Add code to `legalize, promote, and expand + `_ the node as necessary. At a + minimum, you will need to add a case statement for your node in + ``LegalizeOp`` which calls LegalizeOp on the node's operands, and returns a + new node if any of the operands changed as a result of being legalized. It + is likely that not all targets supported by the SelectionDAG framework will + natively support the new node. In this case, you must also add code in your + node's case statement in ``LegalizeOp`` to Expand your node into simpler, + legal operations. The case for ``ISD::UREM`` for expanding a remainder into + a divide, multiply, and a subtract is a good example. + +#. ``lib/CodeGen/SelectionDAG/LegalizeDAG.cpp``: + + If targets may support the new node being added only at certain sizes, you + will also need to add code to your node's case statement in ``LegalizeOp`` + to Promote your node's operands to a larger size, and perform the correct + operation. You will also need to add code to ``PromoteOp`` to do this as + well. For a good example, see ``ISD::BSWAP``, which promotes its operand to + a wider size, performs the byteswap, and then shifts the correct bytes right + to emulate the narrower byteswap in the wider type. + +#. ``lib/CodeGen/SelectionDAG/LegalizeDAG.cpp``: + + Add a case for your node in ``ExpandOp`` to teach the legalizer how to + perform the action represented by the new node on a value that has been split + into high and low halves. This case will be used to support your node with a + 64 bit operand on a 32 bit target. + +#. ``lib/CodeGen/SelectionDAG/DAGCombiner.cpp``: + + If your node can be combined with itself, or other existing nodes in a + peephole-like fashion, add a visit function for it, and call that function + from. There are several good examples for simple combines you can do; + ``visitFABS`` and ``visitSRL`` are good starting places. + +#. ``lib/Target/PowerPC/PPCISelLowering.cpp``: + + Each target has an implementation of the ``TargetLowering`` class, usually in + its own file (although some targets include it in the same file as the + DAGToDAGISel). The default behavior for a target is to assume that your new + node is legal for all types that are legal for that target. If this target + does not natively support your node, then tell the target to either Promote + it (if it is supported at a larger type) or Expand it. This will cause the + code you wrote in ``LegalizeOp`` above to decompose your new node into other + legal nodes for this target. + +#. ``lib/Target/TargetSelectionDAG.td``: + + Most current targets supported by LLVM generate code using the DAGToDAG + method, where SelectionDAG nodes are pattern matched to target-specific + nodes, which represent individual instructions. In order for the targets to + match an instruction to your new node, you must add a def for that node to + the list in this file, with the appropriate type constraints. Look at + ``add``, ``bswap``, and ``fadd`` for examples. + +#. ``lib/Target/PowerPC/PPCInstrInfo.td``: + + Each target has a tablegen file that describes the target's instruction set. + For targets that use the DAGToDAG instruction selection framework, add a + pattern for your new node that uses one or more target nodes. Documentation + for this is a bit sparse right now, but there are several decent examples. + See the patterns for ``rotl`` in ``PPCInstrInfo.td``. + +#. TODO: document complex patterns. + +#. ``llvm/test/Regression/CodeGen/*``: + + Add test cases for your new node to the test suite. + ``llvm/test/Regression/CodeGen/X86/bswap.ll`` is a good example. + +Adding a new instruction +======================== + +.. warning:: + + Adding instructions changes the bitcode format, and it will take some effort + to maintain compatibility with the previous version. Only add an instruction + if it is absolutely necessary. + +#. ``llvm/include/llvm/Instruction.def``: + + add a number for your instruction and an enum name + +#. ``llvm/include/llvm/Instructions.h``: + + add a definition for the class that will represent your instruction + +#. ``llvm/include/llvm/Support/InstVisitor.h``: + + add a prototype for a visitor to your new instruction type + +#. ``llvm/lib/AsmParser/Lexer.l``: + + add a new token to parse your instruction from assembly text file + +#. ``llvm/lib/AsmParser/llvmAsmParser.y``: + + add the grammar on how your instruction can be read and what it will + construct as a result + +#. ``llvm/lib/Bitcode/Reader/Reader.cpp``: + + add a case for your instruction and how it will be parsed from bitcode + +#. ``llvm/lib/VMCore/Instruction.cpp``: + + add a case for how your instruction will be printed out to assembly + +#. ``llvm/lib/VMCore/Instructions.cpp``: + + implement the class you defined in ``llvm/include/llvm/Instructions.h`` + +#. Test your instruction + +#. ``llvm/lib/Target/*``: + + add support for your instruction to code generators, or add a lowering pass. + +#. ``llvm/test/Regression/*``: + + add your test cases to the test suite. + +Also, you need to implement (or modify) any analyses or passes that you want to +understand this new instruction. + +Adding a new type +================= + +.. warning:: + + Adding new types changes the bitcode format, and will break compatibility with + currently-existing LLVM installations. Only add new types if it is absolutely + necessary. + +Adding a fundamental type +------------------------- + +#. ``llvm/include/llvm/Type.h``: + + add enum for the new type; add static ``Type*`` for this type + +#. ``llvm/lib/VMCore/Type.cpp``: + + add mapping from ``TypeID`` => ``Type*``; initialize the static ``Type*`` + +#. ``llvm/lib/AsmReader/Lexer.l``: + + add ability to parse in the type from text assembly + +#. ``llvm/lib/AsmReader/llvmAsmParser.y``: + + add a token for that type + +Adding a derived type +--------------------- + +#. ``llvm/include/llvm/Type.h``: + + add enum for the new type; add a forward declaration of the type also + +#. ``llvm/include/llvm/DerivedTypes.h``: + + add new class to represent new class in the hierarchy; add forward + declaration to the TypeMap value type + +#. ``llvm/lib/VMCore/Type.cpp``: + + add support for derived type to: + + .. code-block:: c++ + + std::string getTypeDescription(const Type &Ty, + std::vector &TypeStack) + bool TypesEqual(const Type *Ty, const Type *Ty2, + std::map &EqTypes) + + add necessary member functions for type, and factory methods + +#. ``llvm/lib/AsmReader/Lexer.l``: + + add ability to parse in the type from text assembly + +#. ``llvm/lib/BitCode/Writer/Writer.cpp``: + + modify ``void BitcodeWriter::outputType(const Type *T)`` to serialize your + type + +#. ``llvm/lib/BitCode/Reader/Reader.cpp``: + + modify ``const Type *BitcodeReader::ParseType()`` to read your data type + +#. ``llvm/lib/VMCore/AsmWriter.cpp``: + + modify + + .. code-block:: c++ + + void calcTypeName(const Type *Ty, + std::vector &TypeStack, + std::map &TypeNames, + std::string &Result) + + to output the new derived type diff --git a/docs/GarbageCollection.html b/docs/GarbageCollection.html index 0b8f588..e124851 100644 --- a/docs/GarbageCollection.html +++ b/docs/GarbageCollection.html @@ -1253,7 +1253,7 @@ methods. Here's a realistic example:

    >#include "llvm/CodeGen/AsmPrinter.h" #include "llvm/Function.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetAsmInfo.h" void MyGCPrinter::beginAssembly(std::ostream &OS, AsmPrinter &AP, @@ -1266,7 +1266,7 @@ void MyGCPrinter::finishAssembly(std::ostream &OS, AsmPrinter &AP, // Set up for emitting addresses. const char *AddressDirective; int AddressAlignLog; - if (AP.TM.getTargetData()->getPointerSize() == sizeof(int32_t)) { + if (AP.TM.getDataLayout()->getPointerSize() == sizeof(int32_t)) { AddressDirective = TAI.getData32bitsDirective(); AddressAlignLog = 2; } else { @@ -1382,7 +1382,7 @@ Fergus Henderson. International Symposium on Memory Management 2002.

    Chris Lattner
    LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-05-03 17:25:19 +0200 (Thu, 03 May 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/GettingStarted.html b/docs/GettingStarted.html deleted file mode 100644 index 61335af..0000000 --- a/docs/GettingStarted.html +++ /dev/null @@ -1,1760 +0,0 @@ - - - - - Getting Started with LLVM System - - - - -

    - Getting Started with the LLVM System -

    - - - -
    -

    Written by: - John Criswell, - Chris Lattner, - Misha Brukman, - Vikram Adve, and - Guochun Shi. -

    -
    - - - -

    - Overview -

    - - -
    - -

    Welcome to LLVM! In order to get started, you first need to know some -basic information.

    - -

    First, LLVM comes in three pieces. The first piece is the LLVM -suite. This contains all of the tools, libraries, and header files -needed to use LLVM. It contains an assembler, disassembler, bitcode -analyzer and bitcode optimizer. It also contains basic regression tests that -can be used to test the LLVM tools and the Clang front end.

    - -

    The second piece is the Clang front end. -This component compiles C, C++, Objective C, and Objective C++ code into LLVM -bitcode. Once compiled into LLVM bitcode, a program can be manipulated with the -LLVM tools from the LLVM suite. -

    - -

    -There is a third, optional piece called Test Suite. It is a suite of programs -with a testing harness that can be used to further test LLVM's functionality -and performance. -

    - -
    - - -

    - Getting Started Quickly (A Summary) -

    - - -
    - -

    The LLVM Getting Started documentation may be out of date. So, the Clang -Getting Started page might -also be a good place to start.

    - -

    Here's the short story for getting up and running quickly with LLVM:

    - -
      -
    1. Read the documentation.
    2. -
    3. Read the documentation.
    4. -
    5. Remember that you were warned twice about reading the documentation.
    6. - -
    7. Checkout LLVM: -
        -
      • cd where-you-want-llvm-to-live -
      • svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm
      • -
      -
    8. - -
    9. Checkout Clang: -
        -
      • cd where-you-want-llvm-to-live -
      • cd llvm/tools -
      • svn co http://llvm.org/svn/llvm-project/cfe/trunk clang
      • -
      -
    10. - -
    11. Checkout Compiler-RT: -
        -
      • cd where-you-want-llvm-to-live -
      • cd llvm/projects -
      • svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk - compiler-rt
      • -
      -
    12. - -
    13. Get the Test Suite Source Code [Optional] -
        -
      • cd where-you-want-llvm-to-live -
      • cd llvm/projects -
      • svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
      • -
      -
    14. - -
    15. Configure and build LLVM and Clang: -
        -
      • cd where-you-want-to-build-llvm
      • -
      • mkdir build (for building without polluting the source dir)
      • -
      • cd build
      • -
      • ../llvm/configure [options] -
        Some common options: - -
          -
        • --prefix=directory - - Specify for directory the full pathname of where you - want the LLVM tools and libraries to be installed (default - /usr/local).
        • -
        - -
          -
        • --enable-optimized - - Compile with optimizations enabled (default is NO).
        • -
        - -
          -
        • --enable-assertions - - Compile with assertion checks enabled (default is YES).
        • -
        -
      • -
      • make [-j] - The -j specifies the number of jobs (commands) to - run simultaneously. This builds both LLVM and Clang for Debug+Asserts mode. - The --enabled-optimized configure option is used to specify a Release build.
      • -
      • make check-all - - This run the regression tests to ensure everything is in working order.
      • -
      • make update - - This command is used to update all the svn repositories at once, rather then - having to cd into the individual repositories and running - svn update.
      • -
      • It is also possible to use CMake instead of the makefiles. With CMake - it is also possible to generate project files for several IDEs: Eclipse - CDT4, CodeBlocks, Qt-Creator (use the CodeBlocks generator), KDevelop3.
      • -
      • If you get an "internal compiler error (ICE)" or test failures, see - below.
      • - -
      -
    16. - -
    - -

    Consult the Getting Started with LLVM section for -detailed information on configuring and compiling LLVM. See Setting Up Your Environment for tips that simplify -working with the Clang front end and LLVM tools. Go to Program -Layout to learn about the layout of the source code tree.

    - -
    - - -

    - Requirements -

    - - -
    - -

    Before you begin to use the LLVM system, review the requirements given below. -This may save you some trouble by knowing ahead of time what hardware and -software you will need.

    - - -

    - Hardware -

    - -
    - -

    LLVM is known to work on the following platforms:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OSArchCompilers
    AuroraUXx861GCC
    Linuxx861GCC
    Linuxamd64GCC
    SolarisV9 (Ultrasparc)GCC
    FreeBSDx861GCC
    FreeBSDamd64GCC
    MacOS X2PowerPCGCC
    MacOS X2,9x86GCC
    Cygwin/Win32x861,8, - 11GCC 3.4.X, binutils 2.20
    MinGW/Win32x861,6, - 8, 10, - 11GCC 3.4.X, binutils 2.20
    - -

    LLVM has partial support for the following platforms:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OSArchCompilers
    Windowsx861Visual Studio 2008 or higher4,5
    AIX3,4PowerPCGCC
    Linux3,5PowerPCGCC
    Linux7AlphaGCC
    Linux7Itanium (IA-64)GCC
    HP-UX7Itanium (IA-64)HP aCC
    Windows x64x86-64mingw-w64's GCC-4.5.x12
    - -

    Notes:

    - - - -

    Note that you will need about 1-3 GB of space for a full LLVM build in Debug -mode, depending on the system (it is so large because of all the debugging -information and the fact that the libraries are statically linked into multiple -tools). If you do not need many of the tools and you are space-conscious, you -can pass ONLY_TOOLS="tools you need" to make. The Release build -requires considerably less space.

    - -

    The LLVM suite may compile on other platforms, but it is not -guaranteed to do so. If compilation is successful, the LLVM utilities should be -able to assemble, disassemble, analyze, and optimize LLVM bitcode. Code -generation should work as well, although the generated native code may not work -on your platform.

    - -
    - - -

    - Software -

    -
    -

    Compiling LLVM requires that you have several software packages - installed. The table below lists those required packages. The Package column - is the usual name for the software package that LLVM depends on. The Version - column provides "known to work" versions of the package. The Notes column - describes how LLVM uses the package and provides other details.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    PackageVersionNotes
    GNU Make3.79, 3.79.1Makefile/build processor
    GCC3.4.2C/C++ compiler1
    TeXinfo4.5For building the CFE
    SVN≥1.3Subversion access to LLVM2
    DejaGnu1.4.2Automated test suite3
    tcl8.3, 8.4Automated test suite3
    expect5.38.0Automated test suite3
    perl≥5.6.0Utilities
    GNU M4 - 1.4Macro processor for configuration4
    GNU Autoconf2.60Configuration script builder4
    GNU Automake1.9.6aclocal macro generator4
    libtool1.5.22Shared library manager4
    - -

    Notes:

    - - -

    Additionally, your compilation host is expected to have the usual - plethora of Unix utilities. Specifically:

    -
      -
    • ar - archive library builder
    • -
    • bzip2* - bzip2 command for distribution generation
    • -
    • bunzip2* - bunzip2 command for distribution checking
    • -
    • chmod - change permissions on a file
    • -
    • cat - output concatenation utility
    • -
    • cp - copy files
    • -
    • date - print the current date/time
    • -
    • echo - print to standard output
    • -
    • egrep - extended regular expression search utility
    • -
    • find - find files/dirs in a file system
    • -
    • grep - regular expression search utility
    • -
    • gzip* - gzip command for distribution generation
    • -
    • gunzip* - gunzip command for distribution checking
    • -
    • install - install directories/files
    • -
    • mkdir - create a directory
    • -
    • mv - move (rename) files
    • -
    • ranlib - symbol table builder for archive libraries
    • -
    • rm - remove (delete) files and directories
    • -
    • sed - stream editor for transforming output
    • -
    • sh - Bourne shell for make build scripts
    • -
    • tar - tape archive for distribution generation
    • -
    • test - test things in file system
    • -
    • unzip* - unzip command for distribution checking
    • -
    • zip* - zip command for distribution generation
    • -
    -
    - - -

    - Broken versions of GCC and other tools -

    - -
    - -

    LLVM is very demanding of the host C++ compiler, and as such tends to expose -bugs in the compiler. In particular, several versions of GCC crash when trying -to compile LLVM. We routinely use GCC 4.2 (and higher) or Clang. -Other versions of GCC will probably work as well. GCC versions listed -here are known to not work. If you are using one of these versions, please try -to upgrade your GCC to something more recent. If you run into a problem with a -version of GCC not listed here, please let -us know. Please use the "gcc -v" command to find out which version -of GCC you are using. -

    - -

    GCC versions prior to 3.0: GCC 2.96.x and before had several -problems in the STL that effectively prevent it from compiling LLVM. -

    - -

    GCC 3.2.2 and 3.2.3: These versions of GCC fails to compile LLVM with -a bogus template error. This was fixed in later GCCs.

    - -

    GCC 3.3.2: This version of GCC suffered from a serious bug which causes it to crash in -the "convert_from_eh_region_ranges_1" GCC function.

    - -

    Cygwin GCC 3.3.3: The version of GCC 3.3.3 commonly shipped with - Cygwin does not work.

    -

    SuSE GCC 3.3.3: The version of GCC 3.3.3 shipped with SuSE 9.1 (and - possibly others) does not compile LLVM correctly (it appears that exception - handling is broken in some cases). Please download the FSF 3.3.3 or upgrade - to a newer version of GCC.

    -

    GCC 3.4.0 on linux/x86 (32-bit): GCC miscompiles portions of the - code generator, causing an infinite loop in the llvm-gcc build when built - with optimizations enabled (i.e. a release build).

    -

    GCC 3.4.2 on linux/x86 (32-bit): GCC miscompiles portions of the - code generator at -O3, as with 3.4.0. However gcc 3.4.2 (unlike 3.4.0) - correctly compiles LLVM at -O2. A work around is to build release LLVM - builds with "make ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O2 ..."

    -

    GCC 3.4.x on X86-64/amd64: GCC - miscompiles portions of LLVM.

    -

    GCC 3.4.4 (CodeSourcery ARM 2005q3-2): this compiler miscompiles LLVM - when building with optimizations enabled. It appears to work with - "make ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O1" or build a debug - build.

    -

    IA-64 GCC 4.0.0: The IA-64 version of GCC 4.0.0 is known to - miscompile LLVM.

    -

    Apple Xcode 2.3: GCC crashes when compiling LLVM at -O3 (which is the - default with ENABLE_OPTIMIZED=1. To work around this, build with - "ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O2".

    -

    GCC 4.1.1: GCC fails to build LLVM with template concept check errors - compiling some files. At the time of this writing, GCC mainline (4.2) - did not share the problem.

    -

    GCC 4.1.1 on X86-64/amd64: GCC - miscompiles portions of LLVM when compiling llvm itself into 64-bit - code. LLVM will appear to mostly work but will be buggy, e.g. failing - portions of its testsuite.

    -

    GCC 4.1.2 on OpenSUSE: Seg faults during libstdc++ build and on x86_64 -platforms compiling md5.c gets a mangled constant.

    -

    GCC 4.1.2 (20061115 (prerelease) (Debian 4.1.1-21)) on Debian: Appears -to miscompile parts of LLVM 2.4. One symptom is ValueSymbolTable complaining -about symbols remaining in the table on destruction.

    -

    GCC 4.1.2 20071124 (Red Hat 4.1.2-42): Suffers from the same symptoms -as the previous one. It appears to work with ENABLE_OPTIMIZED=0 (the default).

    -

    Cygwin GCC 4.3.2 20080827 (beta) 2: - Users reported various problems related - with link errors when using this GCC version.

    -

    Debian GCC 4.3.2 on X86: Crashes building some files in LLVM 2.6.

    -

    GCC 4.3.3 (Debian 4.3.3-10) on ARM: Miscompiles parts of LLVM 2.6 -when optimizations are turned on. The symptom is an infinite loop in -FoldingSetImpl::RemoveNode while running the code generator.

    -

    SUSE 11 GCC 4.3.4: Miscompiles LLVM, causing crashes in ValueHandle logic.

    -

    GCC 4.3.5 and GCC 4.4.5 on ARM: These can miscompile value >> -1 even at -O0. A test failure in test/Assembler/alignstack.ll is -one symptom of the problem. -

    GNU ld 2.16.X. Some 2.16.X versions of the ld linker will produce very -long warning messages complaining that some ".gnu.linkonce.t.*" symbol was -defined in a discarded section. You can safely ignore these messages as they are -erroneous and the linkage is correct. These messages disappear using ld -2.17.

    - -

    GNU binutils 2.17: Binutils 2.17 contains a bug which -causes huge link times (minutes instead of seconds) when building LLVM. We -recommend upgrading to a newer version (2.17.50.0.4 or later).

    - -

    GNU Binutils 2.19.1 Gold: This version of Gold contained -a bug -which causes intermittent failures when building LLVM with position independent -code. The symptom is an error about cyclic dependencies. We recommend -upgrading to a newer version of Gold.

    - -
    - -
    - - -

    - Getting Started with LLVM -

    - - -
    - -

    The remainder of this guide is meant to get you up and running with -LLVM and to give you some basic information about the LLVM environment.

    - -

    The later sections of this guide describe the general layout of the LLVM source tree, a simple example using the LLVM tool chain, and links to find more information about LLVM or to get -help via e-mail.

    - - -

    - Terminology and Notation -

    - -
    - -

    Throughout this manual, the following names are used to denote paths -specific to the local system and working environment. These are not -environment variables you need to set but just strings used in the rest -of this document below. In any of the examples below, simply replace -each of these names with the appropriate pathname on your local system. -All these paths are absolute:

    - -
    -
    SRC_ROOT -
    - This is the top level directory of the LLVM source tree. -

    - -
    OBJ_ROOT -
    - This is the top level directory of the LLVM object tree (i.e. the - tree where object files and compiled programs will be placed. It - can be the same as SRC_ROOT). -

    - -
    - -
    - - -

    - Setting Up Your Environment -

    - -
    - -

    -In order to compile and use LLVM, you may need to set some environment -variables. - -

    -
    LLVM_LIB_SEARCH_PATH=/path/to/your/bitcode/libs
    -
    [Optional] This environment variable helps LLVM linking tools find the - locations of your bitcode libraries. It is provided only as a - convenience since you can specify the paths using the -L options of the - tools and the C/C++ front-end will automatically use the bitcode files - installed in its - lib directory.
    -
    - -
    - - -

    - Unpacking the LLVM Archives -

    - -
    - -

    -If you have the LLVM distribution, you will need to unpack it before you -can begin to compile it. LLVM is distributed as a set of two files: the LLVM -suite and the LLVM GCC front end compiled for your platform. There is an -additional test suite that is optional. Each file is a TAR archive that is -compressed with the gzip program. -

    - -

    The files are as follows, with x.y marking the version number: -

    -
    llvm-x.y.tar.gz
    -
    Source release for the LLVM libraries and tools.
    - -
    llvm-test-x.y.tar.gz
    -
    Source release for the LLVM test-suite.
    - -
    llvm-gcc-4.2-x.y.source.tar.gz
    -
    Source release of the llvm-gcc-4.2 front end. See README.LLVM in the root - directory for build instructions.
    - -
    llvm-gcc-4.2-x.y-platform.tar.gz
    -
    Binary release of the llvm-gcc-4.2 front end for a specific platform.
    - -
    - -
    - - -

    - Checkout LLVM from Subversion -

    - -
    - -

    If you have access to our Subversion repository, you can get a fresh copy of -the entire source code. All you need to do is check it out from Subversion as -follows:

    - -
      -
    • cd where-you-want-llvm-to-live
    • -
    • Read-Only: svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm
    • -
    • Read-Write:svn co https://user@llvm.org/svn/llvm-project/llvm/trunk - llvm
    • -
    - - -

    This will create an 'llvm' directory in the current -directory and fully populate it with the LLVM source code, Makefiles, -test directories, and local copies of documentation files.

    - -

    If you want to get a specific release (as opposed to the most recent -revision), you can checkout it from the 'tags' directory (instead of -'trunk'). The following releases are located in the following -subdirectories of the 'tags' directory:

    - -
      -
    • Release 3.1: RELEASE_31/final
    • -
    • Release 3.0: RELEASE_30/final
    • -
    • Release 2.9: RELEASE_29/final
    • -
    • Release 2.8: RELEASE_28
    • -
    • Release 2.7: RELEASE_27
    • -
    • Release 2.6: RELEASE_26
    • -
    • Release 2.5: RELEASE_25
    • -
    • Release 2.4: RELEASE_24
    • -
    • Release 2.3: RELEASE_23
    • -
    • Release 2.2: RELEASE_22
    • -
    • Release 2.1: RELEASE_21
    • -
    • Release 2.0: RELEASE_20
    • -
    • Release 1.9: RELEASE_19
    • -
    • Release 1.8: RELEASE_18
    • -
    • Release 1.7: RELEASE_17
    • -
    • Release 1.6: RELEASE_16
    • -
    • Release 1.5: RELEASE_15
    • -
    • Release 1.4: RELEASE_14
    • -
    • Release 1.3: RELEASE_13
    • -
    • Release 1.2: RELEASE_12
    • -
    • Release 1.1: RELEASE_11
    • -
    • Release 1.0: RELEASE_1
    • -
    - -

    If you would like to get the LLVM test suite (a separate package as of 1.4), -you get it from the Subversion repository:

    - -
    -
    -% cd llvm/projects
    -% svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
    -
    -
    - -

    By placing it in the llvm/projects, it will be automatically -configured by the LLVM configure script as well as automatically updated when -you run svn update.

    - -
    - - -

    - GIT mirror -

    - -
    - -

    GIT mirrors are available for a number of LLVM subprojects. These mirrors - sync automatically with each Subversion commit and contain all necessary - git-svn marks (so, you can recreate git-svn metadata locally). Note that right - now mirrors reflect only trunk for each project. You can do the - read-only GIT clone of LLVM via:

    - -
    -git clone http://llvm.org/git/llvm.git
    -
    - -

    If you want to check out clang too, run:

    - -
    -git clone http://llvm.org/git/llvm.git
    -cd llvm/tools
    -git clone http://llvm.org/git/clang.git
    -
    - -

    -Since the upstream repository is in Subversion, you should use -"git pull --rebase" -instead of "git pull" to avoid generating a non-linear -history in your clone. -To configure "git pull" to pass --rebase by default -on the master branch, run the following command: -

    - -
    -git config branch.master.rebase true
    -
    - -

    Sending patches with Git

    -
    -

    -Please read Developer Policy, too. -

    - -

    -Assume master points the upstream and mybranch points your -working branch, and mybranch is rebased onto master. -At first you may check sanity of whitespaces: -

    - -
    -git diff --check master..mybranch
    -
    - -

    -The easiest way to generate a patch is as below: -

    - -
    -git diff master..mybranch > /path/to/mybranch.diff
    -
    - -

    -It is a little different from svn-generated diff. git-diff-generated diff has -prefixes like a/ and b/. Don't worry, most developers might -know it could be accepted with patch -p1 -N. -

    - -

    -But you may generate patchset with git-format-patch. It generates -by-each-commit patchset. To generate patch files to attach to your article: -

    - -
    -git format-patch --no-attach master..mybranch -o /path/to/your/patchset
    -
    - -

    -If you would like to send patches directly, you may use git-send-email or -git-imap-send. Here is an example to generate the patchset in Gmail's [Drafts]. -

    - -
    -git format-patch --attach master..mybranch --stdout | git imap-send
    -
    - -

    -Then, your .git/config should have [imap] sections. -

    - -
    -[imap]
    -        host = imaps://imap.gmail.com
    -        user = your.gmail.account@gmail.com
    -        pass = himitsu!
    -        port = 993
    -        sslverify = false
    -; in English
    -        folder = "[Gmail]/Drafts"
    -; example for Japanese, "Modified UTF-7" encoded.
    -        folder = "[Gmail]/&Tgtm+DBN-"
    -; example for Traditional Chinese
    -        folder = "[Gmail]/&g0l6Pw-"
    -
    - -
    - -

    For developers to work with git-svn

    -
    - -

    To set up clone from which you can submit code using - git-svn, run:

    - -
    -git clone http://llvm.org/git/llvm.git
    -cd llvm
    -git svn init https://llvm.org/svn/llvm-project/llvm/trunk --username=<username>
    -git config svn-remote.svn.fetch :refs/remotes/origin/master
    -git svn rebase -l  # -l avoids fetching ahead of the git mirror.
    -
    -# If you have clang too:
    -cd tools
    -git clone http://llvm.org/git/clang.git
    -cd clang
    -git svn init https://llvm.org/svn/llvm-project/cfe/trunk --username=<username>
    -git config svn-remote.svn.fetch :refs/remotes/origin/master
    -git svn rebase -l
    -
    - -

    To update this clone without generating git-svn tags that conflict -with the upstream git repo, run:

    - -
    -git fetch && (cd tools/clang && git fetch)  # Get matching revisions of both trees.
    -git checkout master
    -git svn rebase -l
    -(cd tools/clang &&
    - git checkout master &&
    - git svn rebase -l)
    -
    - -

    This leaves your working directories on their master branches, so -you'll need to checkout each working branch individually and -rebase it on top of its parent branch. (Note: This script is -intended for relative newbies to git. If you have more experience, -you can likely improve on it.)

    - -

    The git-svn metadata can get out of sync after you mess around with -branches and dcommit. When that happens, git svn -dcommit stops working, complaining about files with uncommitted -changes. The fix is to rebuild the metadata:

    - -
    -rm -rf .git/svn
    -git svn rebase -l
    -
    - -
    - -
    - - -

    - Local LLVM Configuration -

    - -
    - -

    Once checked out from the Subversion repository, the LLVM suite source - code must be -configured via the configure script. This script sets variables in the -various *.in files, most notably llvm/Makefile.config and -llvm/include/Config/config.h. It also populates OBJ_ROOT with -the Makefiles needed to begin building LLVM.

    - -

    The following environment variables are used by the configure -script to configure the build system:

    - - - - - - - - - - - -
    VariablePurpose
    CCTells configure which C compiler to use. By default, - configure will look for the first GCC C compiler in - PATH. Use this variable to override - configure's default behavior.
    CXXTells configure which C++ compiler to use. By default, - configure will look for the first GCC C++ compiler in - PATH. Use this variable to override - configure's default behavior.
    - -

    The following options can be used to set or enable LLVM specific options:

    - -
    -
    --enable-optimized
    -
    - Enables optimized compilation (debugging symbols are removed - and GCC optimization flags are enabled). Note that this is the default - setting if you are using the LLVM distribution. The default behavior - of an Subversion checkout is to use an unoptimized build (also known as a - debug build). -

    -
    -
    --enable-debug-runtime
    -
    - Enables debug symbols in the runtime libraries. The default is to strip - debug symbols from the runtime libraries. -
    -
    --enable-jit
    -
    - Compile the Just In Time (JIT) compiler functionality. This is not - available - on all platforms. The default is dependent on platform, so it is best - to explicitly enable it if you want it. -

    -
    -
    --enable-targets=target-option
    -
    Controls which targets will be built and linked into llc. The default - value for target_options is "all" which builds and links all - available targets. The value "host-only" can be specified to build only a - native compiler (no cross-compiler targets available). The "native" target is - selected as the target of the build host. You can also specify a comma - separated list of target names that you want available in llc. The target - names use all lower case. The current set of targets is:
    - arm, cpp, hexagon, mblaze, mips, mipsel, msp430, powerpc, ptx, sparc, spu, x86, x86_64, xcore. -

    -
    --enable-doxygen
    -
    Look for the doxygen program and enable construction of doxygen based - documentation from the source code. This is disabled by default because - generating the documentation can take a long time and producess 100s of - megabytes of output.
    -
    --with-udis86
    -
    LLVM can use external disassembler library for various purposes (now it's - used only for examining code produced by JIT). This option will enable usage - of udis86 x86 (both 32 and 64 - bits) disassembler library.
    -
    - -

    To configure LLVM, follow these steps:

    - -
      -
    1. Change directory into the object root directory:

      - -
      % cd OBJ_ROOT
    2. - -
    3. Run the configure script located in the LLVM source - tree:

      - -
      -
      % SRC_ROOT/configure --prefix=/install/path [other options]
      -
    4. -
    - -
    - - -

    - Compiling the LLVM Suite Source Code -

    - -
    - -

    Once you have configured LLVM, you can build it. There are three types of -builds:

    - -
    -
    Debug Builds -
    - These builds are the default when one is using an Subversion checkout and - types gmake (unless the --enable-optimized option was - used during configuration). The build system will compile the tools and - libraries with debugging information. To get a Debug Build using the - LLVM distribution the --disable-optimized option must be passed - to configure. -

    - -
    Release (Optimized) Builds -
    - These builds are enabled with the --enable-optimized option to - configure or by specifying ENABLE_OPTIMIZED=1 on the - gmake command line. For these builds, the build system will - compile the tools and libraries with GCC optimizations enabled and strip - debugging information from the libraries and executables it generates. - Note that Release Builds are default when using an LLVM distribution. -

    - -
    Profile Builds -
    - These builds are for use with profiling. They compile profiling - information into the code for use with programs like gprof. - Profile builds must be started by specifying ENABLE_PROFILING=1 - on the gmake command line. -
    - -

    Once you have LLVM configured, you can build it by entering the -OBJ_ROOT directory and issuing the following command:

    - -
    % gmake
    - -

    If the build fails, please check here to see if you -are using a version of GCC that is known not to compile LLVM.

    - -

    -If you have multiple processors in your machine, you may wish to use some of -the parallel build options provided by GNU Make. For example, you could use the -command:

    - -
    % gmake -j2
    - -

    There are several special targets which are useful when working with the LLVM -source code:

    - -
    -
    gmake clean -
    - Removes all files generated by the build. This includes object files, - generated C/C++ files, libraries, and executables. -

    - -
    gmake dist-clean -
    - Removes everything that gmake clean does, but also removes files - generated by configure. It attempts to return the source tree to the - original state in which it was shipped. -

    - -
    gmake install -
    - Installs LLVM header files, libraries, tools, and documentation in a - hierarchy - under $PREFIX, specified with ./configure --prefix=[dir], which - defaults to /usr/local. -

    - -
    gmake -C runtime install-bytecode -
    - Assuming you built LLVM into $OBJDIR, when this command is run, it will - install bitcode libraries into the GCC front end's bitcode library - directory. If you need to update your bitcode libraries, - this is the target to use once you've built them. -

    -
    - -

    Please see the Makefile Guide for further -details on these make targets and descriptions of other targets -available.

    - -

    It is also possible to override default values from configure by -declaring variables on the command line. The following are some examples:

    - -
    -
    gmake ENABLE_OPTIMIZED=1 -
    - Perform a Release (Optimized) build. -

    - -
    gmake ENABLE_OPTIMIZED=1 DISABLE_ASSERTIONS=1 -
    - Perform a Release (Optimized) build without assertions enabled. -

    - -
    gmake ENABLE_OPTIMIZED=0 -
    - Perform a Debug build. -

    - -
    gmake ENABLE_PROFILING=1 -
    - Perform a Profiling build. -

    - -
    gmake VERBOSE=1 -
    - Print what gmake is doing on standard output. -

    - -
    gmake TOOL_VERBOSE=1
    -
    Ask each tool invoked by the makefiles to print out what it is doing on - the standard output. This also implies VERBOSE=1. -

    -
    - -

    Every directory in the LLVM object tree includes a Makefile to build -it and any subdirectories that it contains. Entering any directory inside the -LLVM object tree and typing gmake should rebuild anything in or below -that directory that is out of date.

    - -
    - - -

    - Cross-Compiling LLVM -

    - -
    -

    It is possible to cross-compile LLVM itself. That is, you can create LLVM - executables and libraries to be hosted on a platform different from the - platform where they are build (a Canadian Cross build). To configure a - cross-compile, supply the configure script with --build and - --host options that are different. The values of these options must - be legal target triples that your GCC compiler supports.

    - -

    The result of such a build is executables that are not runnable on - on the build host (--build option) but can be executed on the compile host - (--host option).

    -
    - - -

    - The Location of LLVM Object Files -

    - -
    - -

    The LLVM build system is capable of sharing a single LLVM source tree among -several LLVM builds. Hence, it is possible to build LLVM for several different -platforms or configurations using the same source tree.

    - -

    This is accomplished in the typical autoconf manner:

    - -
      -
    • Change directory to where the LLVM object files should live:

      - -
      % cd OBJ_ROOT
    • - -
    • Run the configure script found in the LLVM source - directory:

      - -
      % SRC_ROOT/configure
    • -
    - -

    The LLVM build will place files underneath OBJ_ROOT in directories -named after the build type:

    - -
    -
    Debug Builds with assertions enabled (the default) -
    -
    -
    Tools -
    OBJ_ROOT/Debug+Asserts/bin -
    Libraries -
    OBJ_ROOT/Debug+Asserts/lib -
    -

    - -
    Release Builds -
    -
    -
    Tools -
    OBJ_ROOT/Release/bin -
    Libraries -
    OBJ_ROOT/Release/lib -
    -

    - -
    Profile Builds -
    -
    -
    Tools -
    OBJ_ROOT/Profile/bin -
    Libraries -
    OBJ_ROOT/Profile/lib -
    -
    - -
    - - -

    - Optional Configuration Items -

    - -
    - -

    -If you're running on a Linux system that supports the "binfmt_misc" -module, and you have root access on the system, you can set your system up to -execute LLVM bitcode files directly. To do this, use commands like this (the -first command may not be required if you are already using the module):

    - -
    -
    -$ mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
    -$ echo ':llvm:M::BC::/path/to/lli:' > /proc/sys/fs/binfmt_misc/register
    -$ chmod u+x hello.bc   (if needed)
    -$ ./hello.bc
    -
    -
    - -

    -This allows you to execute LLVM bitcode files directly. On Debian, you -can also use this command instead of the 'echo' command above: -

    - -
    -
    -$ sudo update-binfmts --install llvm /path/to/lli --magic 'BC'
    -
    -
    - -
    - -
    - - -

    - Program Layout -

    - - -
    - -

    One useful source of information about the LLVM source base is the LLVM doxygen documentation available at http://llvm.org/doxygen/. -The following is a brief introduction to code layout:

    - - -

    - llvm/examples -

    - -
    -

    This directory contains some simple examples of how to use the LLVM IR and - JIT.

    -
    - - -

    - llvm/include -

    - -
    - -

    This directory contains public header files exported from the LLVM -library. The three main subdirectories of this directory are:

    - -
    -
    llvm/include/llvm
    -
    This directory contains all of the LLVM specific header files. This - directory also has subdirectories for different portions of LLVM: - Analysis, CodeGen, Target, Transforms, - etc...
    - -
    llvm/include/llvm/Support
    -
    This directory contains generic support libraries that are provided with - LLVM but not necessarily specific to LLVM. For example, some C++ STL utilities - and a Command Line option processing library store their header files here. -
    - -
    llvm/include/llvm/Config
    -
    This directory contains header files configured by the configure - script. They wrap "standard" UNIX and C header files. Source code can - include these header files which automatically take care of the conditional - #includes that the configure script generates.
    -
    -
    - - -

    - llvm/lib -

    - -
    - -

    This directory contains most of the source files of the LLVM system. In LLVM, -almost all code exists in libraries, making it very easy to share code among the -different tools.

    - -
    -
    llvm/lib/VMCore/
    -
    This directory holds the core LLVM source files that implement core - classes like Instruction and BasicBlock.
    - -
    llvm/lib/AsmParser/
    -
    This directory holds the source code for the LLVM assembly language parser - library.
    - -
    llvm/lib/BitCode/
    -
    This directory holds code for reading and write LLVM bitcode.
    - -
    llvm/lib/Analysis/
    This directory contains a variety of - different program analyses, such as Dominator Information, Call Graphs, - Induction Variables, Interval Identification, Natural Loop Identification, - etc.
    - -
    llvm/lib/Transforms/
    -
    This directory contains the source code for the LLVM to LLVM program - transformations, such as Aggressive Dead Code Elimination, Sparse Conditional - Constant Propagation, Inlining, Loop Invariant Code Motion, Dead Global - Elimination, and many others.
    - -
    llvm/lib/Target/
    -
    This directory contains files that describe various target architectures - for code generation. For example, the llvm/lib/Target/X86 - directory holds the X86 machine description while - llvm/lib/Target/ARM implements the ARM backend.
    - -
    llvm/lib/CodeGen/
    -
    This directory contains the major parts of the code generator: Instruction - Selector, Instruction Scheduling, and Register Allocation.
    - -
    llvm/lib/MC/
    -
    (FIXME: T.B.D.)
    - - -
    llvm/lib/Debugger/
    -
    This directory contains the source level debugger library that makes - it possible to instrument LLVM programs so that a debugger could identify - source code locations at which the program is executing.
    - -
    llvm/lib/ExecutionEngine/
    -
    This directory contains libraries for executing LLVM bitcode directly - at runtime in both interpreted and JIT compiled fashions.
    - -
    llvm/lib/Support/
    -
    This directory contains the source code that corresponds to the header - files located in llvm/include/ADT/ - and llvm/include/Support/.
    -
    - -
    - - -

    - llvm/projects -

    - -
    -

    This directory contains projects that are not strictly part of LLVM but are - shipped with LLVM. This is also the directory where you should create your own - LLVM-based projects. See llvm/projects/sample for an example of how - to set up your own project.

    -
    - - -

    - llvm/runtime -

    - -
    - -

    This directory contains libraries which are compiled into LLVM bitcode and -used when linking programs with the Clang front end. Most of these libraries are -skeleton versions of real libraries; for example, libc is a stripped down -version of glibc.

    - -

    Unlike the rest of the LLVM suite, this directory needs the LLVM GCC front -end to compile.

    - -
    - - -

    - llvm/test -

    - -
    -

    This directory contains feature and regression tests and other basic sanity - checks on the LLVM infrastructure. These are intended to run quickly and cover - a lot of territory without being exhaustive.

    -
    - - -

    - test-suite -

    - -
    -

    This is not a directory in the normal llvm module; it is a separate - Subversion - module that must be checked out (usually to projects/test-suite). - This - module contains a comprehensive correctness, performance, and benchmarking - test - suite for LLVM. It is a separate Subversion module because not every LLVM - user is - interested in downloading or building such a comprehensive test suite. For - further details on this test suite, please see the - Testing Guide document.

    -
    - - -

    - llvm/tools -

    - -
    - -

    The tools directory contains the executables built out of the -libraries above, which form the main part of the user interface. You can -always get help for a tool by typing tool_name -help. The -following is a brief introduction to the most important tools. More detailed -information is in the Command Guide.

    - -
    - -
    bugpoint
    -
    bugpoint is used to debug - optimization passes or code generation backends by narrowing down the - given test case to the minimum number of passes and/or instructions that - still cause a problem, whether it is a crash or miscompilation. See HowToSubmitABug.html for more information - on using bugpoint.
    - -
    llvm-ar
    -
    The archiver produces an archive containing - the given LLVM bitcode files, optionally with an index for faster - lookup.
    - -
    llvm-as
    -
    The assembler transforms the human readable LLVM assembly to LLVM - bitcode.
    - -
    llvm-dis
    -
    The disassembler transforms the LLVM bitcode to human readable - LLVM assembly.
    - -
    llvm-link
    -
    llvm-link, not surprisingly, links multiple LLVM modules into - a single program.
    - -
    lli
    -
    lli is the LLVM interpreter, which - can directly execute LLVM bitcode (although very slowly...). For architectures - that support it (currently x86, Sparc, and PowerPC), by default, lli - will function as a Just-In-Time compiler (if the functionality was compiled - in), and will execute the code much faster than the interpreter.
    - -
    llc
    -
    llc is the LLVM backend compiler, which - translates LLVM bitcode to a native code assembly file or to C code (with - the -march=c option).
    - -
    llvm-gcc
    -
    llvm-gcc is a GCC-based C frontend that has been retargeted to - use LLVM as its backend instead of GCC's RTL backend. It can also emit LLVM - bitcode or assembly (with the -emit-llvm option) instead of the - usual machine code output. It works just like any other GCC compiler, - taking the typical -c, -S, -E, -o options that are typically used. - Additionally, the source code for llvm-gcc is available as a - separate Subversion module.
    - -
    opt
    -
    opt reads LLVM bitcode, applies a series of LLVM to LLVM - transformations (which are specified on the command line), and then outputs - the resultant bitcode. The 'opt -help' command is a good way to - get a list of the program transformations available in LLVM.
    -
    opt can also be used to run a specific analysis on an input - LLVM bitcode file and print out the results. It is primarily useful for - debugging analyses, or familiarizing yourself with what an analysis does.
    -
    -
    - - -

    - llvm/utils -

    - -
    - -

    This directory contains utilities for working with LLVM source code, and some -of the utilities are actually required as part of the build process because they -are code generators for parts of LLVM infrastructure.

    - -
    -
    codegen-diff
    codegen-diff is a script - that finds differences between code that LLC generates and code that LLI - generates. This is a useful tool if you are debugging one of them, - assuming that the other generates correct output. For the full user - manual, run `perldoc codegen-diff'.

    - -
    emacs/
    The emacs directory contains - syntax-highlighting files which will work with Emacs and XEmacs editors, - providing syntax highlighting support for LLVM assembly files and TableGen - description files. For information on how to use the syntax files, consult - the README file in that directory.

    - -
    getsrcs.sh
    The getsrcs.sh script finds - and outputs all non-generated source files, which is useful if one wishes - to do a lot of development across directories and does not want to - individually find each file. One way to use it is to run, for example: - xemacs `utils/getsources.sh` from the top of your LLVM source - tree.

    - -
    llvmgrep
    -
    This little tool performs an "egrep -H -n" on each source file in LLVM and - passes to it a regular expression provided on llvmgrep's command - line. This is a very efficient way of searching the source base for a - particular regular expression.
    - -
    makellvm
    The makellvm script compiles all - files in the current directory and then compiles and links the tool that - is the first argument. For example, assuming you are in the directory - llvm/lib/Target/Sparc, if makellvm is in your path, - simply running makellvm llc will make a build of the current - directory, switch to directory llvm/tools/llc and build it, - causing a re-linking of LLC.

    - -
    TableGen/
    The TableGen directory contains - the tool used to generate register descriptions, instruction set - descriptions, and even assemblers from common TableGen description - files.

    - -
    vim/
    The vim directory contains - syntax-highlighting files which will work with the VIM editor, providing - syntax highlighting support for LLVM assembly files and TableGen - description files. For information on how to use the syntax files, consult - the README file in that directory.

    - -
    - -
    - -
    - - -

    - An Example Using the LLVM Tool Chain -

    - - -
    -

    This section gives an example of using LLVM with the Clang front end.

    - - -

    - Example with clang -

    - -
    - -
      -
    1. First, create a simple C file, name it 'hello.c':

      - -
      -
      -#include <stdio.h>
      -
      -int main() {
      -  printf("hello world\n");
      -  return 0;
      -}
      -
    2. - -
    3. Next, compile the C file into a native executable:

      - -
      % clang hello.c -o hello
      - -

      Note that clang works just like GCC by default. The standard -S and - -c arguments work as usual (producing a native .s or .o file, - respectively).

    4. - -
    5. Next, compile the C file into a LLVM bitcode file:

      - -
      -
      % clang -O3 -emit-llvm hello.c -c -o hello.bc
      - -

      The -emit-llvm option can be used with the -S or -c options to emit an - LLVM ".ll" or ".bc" file (respectively) for the code. This allows you - to use the standard LLVM tools on - the bitcode file.

    6. - -
    7. Run the program in both forms. To run the program, use:

      - -
      % ./hello
      - -

      and

      - -
      % lli hello.bc
      - -

      The second examples shows how to invoke the LLVM JIT, lli.

    8. - -
    9. Use the llvm-dis utility to take a look at the LLVM assembly - code:

      - -
      -
      llvm-dis < hello.bc | less
      -
    10. - -
    11. Compile the program to native assembly using the LLC code - generator:

      - -
      % llc hello.bc -o hello.s
    12. - -
    13. Assemble the native assembly language file into a program:

      - -
      -
      -Solaris: % /opt/SUNWspro/bin/cc -xarch=v9 hello.s -o hello.native
      -
      -Others:  % gcc hello.s -o hello.native
      -
      -
    14. - -
    15. Execute the native code program:

      - -
      % ./hello.native
      - -

      Note that using clang to compile directly to native code (i.e. when - the -emit-llvm option is not present) does steps 6/7/8 for you.

      -
    16. - -
    - -
    - -
    - - -

    - Common Problems -

    - - -
    - -

    If you are having problems building or using LLVM, or if you have any other -general questions about LLVM, please consult the Frequently -Asked Questions page.

    - -
    - - -

    - Links -

    - - -
    - -

    This document is just an introduction on how to use LLVM to do -some simple things... there are many more interesting and complicated things -that you can do that aren't documented here (but we'll gladly accept a patch -if you want to write something up!). For more information about LLVM, check -out:

    - - - -
    - - - -
    -
    - Valid CSS - Valid HTML 4.01 - - Chris Lattner
    - Reid Spencer
    - The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-07-23 10:51:15 +0200 (Mon, 23 Jul 2012) $ -
    - - diff --git a/docs/GettingStarted.rst b/docs/GettingStarted.rst new file mode 100644 index 0000000..6876892 --- /dev/null +++ b/docs/GettingStarted.rst @@ -0,0 +1,1304 @@ +.. _getting_started: + +==================================== +Getting Started with the LLVM System +==================================== + +Overview +======== + +Welcome to LLVM! In order to get started, you first need to know some basic +information. + +First, LLVM comes in three pieces. The first piece is the LLVM suite. This +contains all of the tools, libraries, and header files needed to use LLVM. It +contains an assembler, disassembler, bitcode analyzer and bitcode optimizer. It +also contains basic regression tests that can be used to test the LLVM tools and +the Clang front end. + +The second piece is the `Clang `_ front end. This +component compiles C, C++, Objective C, and Objective C++ code into LLVM +bitcode. Once compiled into LLVM bitcode, a program can be manipulated with the +LLVM tools from the LLVM suite. + +There is a third, optional piece called Test Suite. It is a suite of programs +with a testing harness that can be used to further test LLVM's functionality +and performance. + +Getting Started Quickly (A Summary) +=================================== + +The LLVM Getting Started documentation may be out of date. So, the `Clang +Getting Started `_ page might also be a +good place to start. + +Here's the short story for getting up and running quickly with LLVM: + +#. Read the documentation. +#. Read the documentation. +#. Remember that you were warned twice about reading the documentation. +#. Checkout LLVM: + + * ``cd where-you-want-llvm-to-live`` + * ``svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm`` + +#. Checkout Clang: + + * ``cd where-you-want-llvm-to-live`` + * ``cd llvm/tools`` + * ``svn co http://llvm.org/svn/llvm-project/cfe/trunk clang`` + +#. Checkout Compiler-RT: + + * ``cd where-you-want-llvm-to-live`` + * ``cd llvm/projects`` + * ``svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk compiler-rt`` + +#. Get the Test Suite Source Code **[Optional]** + + * ``cd where-you-want-llvm-to-live`` + * ``cd llvm/projects`` + * ``svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite`` + +#. Configure and build LLVM and Clang: + + * ``cd where-you-want-to-build-llvm`` + * ``mkdir build`` (for building without polluting the source dir) + * ``cd build`` + * ``../llvm/configure [options]`` + Some common options: + + * ``--prefix=directory`` --- + + Specify for *directory* the full pathname of where you want the LLVM + tools and libraries to be installed (default ``/usr/local``). + + * ``--enable-optimized`` --- + + Compile with optimizations enabled (default is NO). + + * ``--enable-assertions`` --- + + Compile with assertion checks enabled (default is YES). + + * ``make [-j]`` --- The ``-j`` specifies the number of jobs (commands) to run + simultaneously. This builds both LLVM and Clang for Debug+Asserts mode. + The --enabled-optimized configure option is used to specify a Release + build. + + * ``make check-all`` --- This run the regression tests to ensure everything + is in working order. + + * ``make update`` --- This command is used to update all the svn repositories + at once, rather then having to ``cd`` into the individual repositories and + running ``svn update``. + + * It is also possible to use CMake instead of the makefiles. With CMake it is + also possible to generate project files for several IDEs: Eclipse CDT4, + CodeBlocks, Qt-Creator (use the CodeBlocks generator), KDevelop3. + + * If you get an "internal compiler error (ICE)" or test failures, see + `below`. + +Consult the `Getting Started with LLVM`_ section for detailed information on +configuring and compiling LLVM. See `Setting Up Your Environment`_ for tips +that simplify working with the Clang front end and LLVM tools. Go to `Program +Layout`_ to learn about the layout of the source code tree. + +Requirements +============ + +Before you begin to use the LLVM system, review the requirements given below. +This may save you some trouble by knowing ahead of time what hardware and +software you will need. + +Hardware +-------- + +LLVM is known to work on the following platforms: + ++-----------------+----------------------+-------------------------+ +|OS | Arch | Compilers | ++=================+======================+=========================+ +|AuroraUX | x86\ :sup:`1` | GCC | ++-----------------+----------------------+-------------------------+ +|Linux | x86\ :sup:`1` | GCC | ++-----------------+----------------------+-------------------------+ +|Linux | amd64 | GCC | ++-----------------+----------------------+-------------------------+ +|Solaris | V9 (Ultrasparc) | GCC | ++-----------------+----------------------+-------------------------+ +|FreeBSD | x86\ :sup:`1` | GCC | ++-----------------+----------------------+-------------------------+ +|FreeBSD | amd64 | GCC | ++-----------------+----------------------+-------------------------+ +|MacOS X\ :sup:`2`| PowerPC | GCC | ++-----------------+----------------------+-------------------------+ +|MacOS X\ :sup:`9`| x86 | GCC | ++-----------------+----------------------+-------------------------+ +|Cygwin/Win32 | x86\ :sup:`1, 8, 11` | GCC 3.4.X, binutils 2.20| ++-----------------+----------------------+-------------------------+ + +LLVM has partial support for the following platforms: + ++-------------------+----------------------+-------------------------------------------+ +|OS | Arch | Compilers | ++===================+======================+===========================================+ +| Windows | x86\ :sup:`1` | Visual Studio 2000 or higher\ :sup:`4,5` | ++-------------------+----------------------+-------------------------------------------+ +| AIX\ :sup:`3,4` | PowerPC | GCC | ++-------------------+----------------------+-------------------------------------------+ +| Linux\ :sup:`3,5` | PowerPC | GCC | ++-------------------+----------------------+-------------------------------------------+ +| Linux\ :sup:`7` | Alpha | GCC | ++-------------------+----------------------+-------------------------------------------+ +| Linux\ :sup:`7` | Itanium (IA-64) | GCC | ++-------------------+----------------------+-------------------------------------------+ +| HP-UX\ :sup:`7` | Itanium (IA-64) | HP aCC | ++-------------------+----------------------+-------------------------------------------+ +| Windows x64 | x86-64 | mingw-w64's GCC-4.5.x\ :sup:`12` | ++-------------------+----------------------+-------------------------------------------+ + +.. note:: + + Code generation supported for Pentium processors and up + + #. Code generation supported for Pentium processors and up + #. Code generation supported for 32-bit ABI only + #. No native code generation + #. Build is not complete: one or more tools do not link or function + #. The GCC-based C/C++ frontend does not build + #. The port is done using the MSYS shell. + #. Native code generation exists but is not complete. + #. Binutils 2.20 or later is required to build the assembler generated by LLVM properly. + #. Xcode 2.5 and gcc 4.0.1 (Apple Build 5370) will trip internal LLVM assert + messages when compiled for Release at optimization levels greater than 0 + (i.e., ``-O1`` and higher). Add ``OPTIMIZE_OPTION="-O0"`` to the build + command line if compiling for LLVM Release or bootstrapping the LLVM + toolchain. + #. For MSYS/MinGW on Windows, be sure to install the MSYS version of the perl + package, and be sure it appears in your path before any Windows-based + versions such as Strawberry Perl and ActivePerl, as these have + Windows-specifics that will cause the build to fail. + #. To use LLVM modules on Win32-based system, you may configure LLVM + with ``--enable-shared``. + + #. To compile SPU backend, you need to add ``LDFLAGS=-Wl,--stack,16777216`` to + configure. + +Note that you will need about 1-3 GB of space for a full LLVM build in Debug +mode, depending on the system (it is so large because of all the debugging +information and the fact that the libraries are statically linked into multiple +tools). If you do not need many of the tools and you are space-conscious, you +can pass ``ONLY_TOOLS="tools you need"`` to make. The Release build requires +considerably less space. + +The LLVM suite *may* compile on other platforms, but it is not guaranteed to do +so. If compilation is successful, the LLVM utilities should be able to +assemble, disassemble, analyze, and optimize LLVM bitcode. Code generation +should work as well, although the generated native code may not work on your +platform. + +Software +-------- + +Compiling LLVM requires that you have several software packages installed. The +table below lists those required packages. The Package column is the usual name +for the software package that LLVM depends on. The Version column provides +"known to work" versions of the package. The Notes column describes how LLVM +uses the package and provides other details. + ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| Package | Version | Notes | ++==============================================================+=================+=============================================+ +| `GNU Make `_ | 3.79, 3.79.1 | Makefile/build processor | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `GCC `_ | 3.4.2 | C/C++ compiler\ :sup:`1` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `TeXinfo `_ | 4.5 | For building the CFE | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `SVN `_ | >=1.3 | Subversion access to LLVM\ :sup:`2` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `DejaGnu `_ | 1.4.2 | Automated test suite\ :sup:`3` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `tcl `_ | 8.3, 8.4 | Automated test suite\ :sup:`3` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `expect `_ | 5.38.0 | Automated test suite\ :sup:`3` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `perl `_ | >=5.6.0 | Utilities | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `GNU M4 `_ | 1.4 | Macro processor for configuration\ :sup:`4` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `GNU Autoconf `_ | 2.60 | Configuration script builder\ :sup:`4` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `GNU Automake `_ | 1.9.6 | aclocal macro generator\ :sup:`4` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ +| `libtool `_ | 1.5.22 | Shared library manager\ :sup:`4` | ++--------------------------------------------------------------+-----------------+---------------------------------------------+ + +.. note:: + + #. Only the C and C++ languages are needed so there's no need to build the + other languages for LLVM's purposes. See `below` for specific version + info. + #. You only need Subversion if you intend to build from the latest LLVM + sources. If you're working from a release distribution, you don't need + Subversion. + #. Only needed if you want to run the automated test suite in the + ``llvm/test`` directory. + #. If you want to make changes to the configure scripts, you will need GNU + autoconf (2.60), and consequently, GNU M4 (version 1.4 or higher). You + will also need automake (1.9.6). We only use aclocal from that package. + +Additionally, your compilation host is expected to have the usual plethora of +Unix utilities. Specifically: + +* **ar** --- archive library builder +* **bzip2** --- bzip2 command for distribution generation +* **bunzip2** --- bunzip2 command for distribution checking +* **chmod** --- change permissions on a file +* **cat** --- output concatenation utility +* **cp** --- copy files +* **date** --- print the current date/time +* **echo** --- print to standard output +* **egrep** --- extended regular expression search utility +* **find** --- find files/dirs in a file system +* **grep** --- regular expression search utility +* **gzip** --- gzip command for distribution generation +* **gunzip** --- gunzip command for distribution checking +* **install** --- install directories/files +* **mkdir** --- create a directory +* **mv** --- move (rename) files +* **ranlib** --- symbol table builder for archive libraries +* **rm** --- remove (delete) files and directories +* **sed** --- stream editor for transforming output +* **sh** --- Bourne shell for make build scripts +* **tar** --- tape archive for distribution generation +* **test** --- test things in file system +* **unzip** --- unzip command for distribution checking +* **zip** --- zip command for distribution generation + +.. _below: +.. _check here: + +Broken versions of GCC and other tools +-------------------------------------- + +LLVM is very demanding of the host C++ compiler, and as such tends to expose +bugs in the compiler. In particular, several versions of GCC crash when trying +to compile LLVM. We routinely use GCC 4.2 (and higher) or Clang. Other +versions of GCC will probably work as well. GCC versions listed here are known +to not work. If you are using one of these versions, please try to upgrade your +GCC to something more recent. If you run into a problem with a version of GCC +not listed here, please `let us know `_. Please use +the "``gcc -v``" command to find out which version of GCC you are using. + +**GCC versions prior to 3.0**: GCC 2.96.x and before had several problems in the +STL that effectively prevent it from compiling LLVM. + +**GCC 3.2.2 and 3.2.3**: These versions of GCC fails to compile LLVM with a +bogus template error. This was fixed in later GCCs. + +**GCC 3.3.2**: This version of GCC suffered from a `serious bug +`_ which causes it to crash in the +"``convert_from_eh_region_ranges_1``" GCC function. + +**Cygwin GCC 3.3.3**: The version of GCC 3.3.3 commonly shipped with Cygwin does +not work. + +**SuSE GCC 3.3.3**: The version of GCC 3.3.3 shipped with SuSE 9.1 (and possibly +others) does not compile LLVM correctly (it appears that exception handling is +broken in some cases). Please download the FSF 3.3.3 or upgrade to a newer +version of GCC. + +**GCC 3.4.0 on linux/x86 (32-bit)**: GCC miscompiles portions of the code +generator, causing an infinite loop in the llvm-gcc build when built with +optimizations enabled (i.e. a release build). + +**GCC 3.4.2 on linux/x86 (32-bit)**: GCC miscompiles portions of the code +generator at -O3, as with 3.4.0. However gcc 3.4.2 (unlike 3.4.0) correctly +compiles LLVM at -O2. A work around is to build release LLVM builds with +"``make ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O2 ...``" + +**GCC 3.4.x on X86-64/amd64**: GCC `miscompiles portions of LLVM +`__. + +**GCC 3.4.4 (CodeSourcery ARM 2005q3-2)**: this compiler miscompiles LLVM when +building with optimizations enabled. It appears to work with "``make +ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O1``" or build a debug build. + +**IA-64 GCC 4.0.0**: The IA-64 version of GCC 4.0.0 is known to miscompile LLVM. + +**Apple Xcode 2.3**: GCC crashes when compiling LLVM at -O3 (which is the +default with ENABLE_OPTIMIZED=1. To work around this, build with +"``ENABLE_OPTIMIZED=1 OPTIMIZE_OPTION=-O2``". + +**GCC 4.1.1**: GCC fails to build LLVM with template concept check errors +compiling some files. At the time of this writing, GCC mainline (4.2) did not +share the problem. + +**GCC 4.1.1 on X86-64/amd64**: GCC `miscompiles portions of LLVM +`__ when compiling llvm itself into 64-bit code. LLVM +will appear to mostly work but will be buggy, e.g. failing portions of its +testsuite. + +**GCC 4.1.2 on OpenSUSE**: Seg faults during libstdc++ build and on x86_64 +platforms compiling md5.c gets a mangled constant. + +**GCC 4.1.2 (20061115 (prerelease) (Debian 4.1.1-21)) on Debian**: Appears to +miscompile parts of LLVM 2.4. One symptom is ValueSymbolTable complaining about +symbols remaining in the table on destruction. + +**GCC 4.1.2 20071124 (Red Hat 4.1.2-42)**: Suffers from the same symptoms as the +previous one. It appears to work with ENABLE_OPTIMIZED=0 (the default). + +**Cygwin GCC 4.3.2 20080827 (beta) 2**: Users `reported +`_ various problems related with link errors when using +this GCC version. + +**Debian GCC 4.3.2 on X86**: Crashes building some files in LLVM 2.6. + +**GCC 4.3.3 (Debian 4.3.3-10) on ARM**: Miscompiles parts of LLVM 2.6 when +optimizations are turned on. The symptom is an infinite loop in +``FoldingSetImpl::RemoveNode`` while running the code generator. + +**SUSE 11 GCC 4.3.4**: Miscompiles LLVM, causing crashes in ValueHandle logic. + +**GCC 4.3.5 and GCC 4.4.5 on ARM**: These can miscompile ``value >> 1`` even at +``-O0``. A test failure in ``test/Assembler/alignstack.ll`` is one symptom of +the problem. + +**GNU ld 2.16.X**. Some 2.16.X versions of the ld linker will produce very long +warning messages complaining that some "``.gnu.linkonce.t.*``" symbol was +defined in a discarded section. You can safely ignore these messages as they are +erroneous and the linkage is correct. These messages disappear using ld 2.17. + +**GNU binutils 2.17**: Binutils 2.17 contains `a bug +`__ which causes huge link +times (minutes instead of seconds) when building LLVM. We recommend upgrading +to a newer version (2.17.50.0.4 or later). + +**GNU Binutils 2.19.1 Gold**: This version of Gold contained `a bug +`__ which causes +intermittent failures when building LLVM with position independent code. The +symptom is an error about cyclic dependencies. We recommend upgrading to a +newer version of Gold. + +.. _Getting Started with LLVM: + +Getting Started with LLVM +========================= + +The remainder of this guide is meant to get you up and running with LLVM and to +give you some basic information about the LLVM environment. + +The later sections of this guide describe the `general layout`_ of the LLVM +source tree, a `simple example`_ using the LLVM tool chain, and `links`_ to find +more information about LLVM or to get help via e-mail. + +Terminology and Notation +------------------------ + +Throughout this manual, the following names are used to denote paths specific to +the local system and working environment. *These are not environment variables +you need to set but just strings used in the rest of this document below*. In +any of the examples below, simply replace each of these names with the +appropriate pathname on your local system. All these paths are absolute: + +``SRC_ROOT`` + + This is the top level directory of the LLVM source tree. + +``OBJ_ROOT`` + + This is the top level directory of the LLVM object tree (i.e. the tree where + object files and compiled programs will be placed. It can be the same as + SRC_ROOT). + +.. _Setting Up Your Environment: + +Setting Up Your Environment +--------------------------- + +In order to compile and use LLVM, you may need to set some environment +variables. + +``LLVM_LIB_SEARCH_PATH=/path/to/your/bitcode/libs`` + + [Optional] This environment variable helps LLVM linking tools find the + locations of your bitcode libraries. It is provided only as a convenience + since you can specify the paths using the -L options of the tools and the + C/C++ front-end will automatically use the bitcode files installed in its + ``lib`` directory. + +Unpacking the LLVM Archives +--------------------------- + +If you have the LLVM distribution, you will need to unpack it before you can +begin to compile it. LLVM is distributed as a set of two files: the LLVM suite +and the LLVM GCC front end compiled for your platform. There is an additional +test suite that is optional. Each file is a TAR archive that is compressed with +the gzip program. + +The files are as follows, with *x.y* marking the version number: + +``llvm-x.y.tar.gz`` + + Source release for the LLVM libraries and tools. + +``llvm-test-x.y.tar.gz`` + + Source release for the LLVM test-suite. + +``llvm-gcc-4.2-x.y.source.tar.gz`` + + Source release of the llvm-gcc-4.2 front end. See README.LLVM in the root + directory for build instructions. + +``llvm-gcc-4.2-x.y-platform.tar.gz`` + + Binary release of the llvm-gcc-4.2 front end for a specific platform. + +Checkout LLVM from Subversion +----------------------------- + +If you have access to our Subversion repository, you can get a fresh copy of the +entire source code. All you need to do is check it out from Subversion as +follows: + +* ``cd where-you-want-llvm-to-live`` +* Read-Only: ``svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm`` +* Read-Write:``svn co https://user@llvm.org/svn/llvm-project/llvm/trunk llvm`` + +This will create an '``llvm``' directory in the current directory and fully +populate it with the LLVM source code, Makefiles, test directories, and local +copies of documentation files. + +If you want to get a specific release (as opposed to the most recent revision), +you can checkout it from the '``tags``' directory (instead of '``trunk``'). The +following releases are located in the following subdirectories of the '``tags``' +directory: + +* Release 3.1: **RELEASE_31/final** +* Release 3.0: **RELEASE_30/final** +* Release 2.9: **RELEASE_29/final** +* Release 2.8: **RELEASE_28** +* Release 2.7: **RELEASE_27** +* Release 2.6: **RELEASE_26** +* Release 2.5: **RELEASE_25** +* Release 2.4: **RELEASE_24** +* Release 2.3: **RELEASE_23** +* Release 2.2: **RELEASE_22** +* Release 2.1: **RELEASE_21** +* Release 2.0: **RELEASE_20** +* Release 1.9: **RELEASE_19** +* Release 1.8: **RELEASE_18** +* Release 1.7: **RELEASE_17** +* Release 1.6: **RELEASE_16** +* Release 1.5: **RELEASE_15** +* Release 1.4: **RELEASE_14** +* Release 1.3: **RELEASE_13** +* Release 1.2: **RELEASE_12** +* Release 1.1: **RELEASE_11** +* Release 1.0: **RELEASE_1** + +If you would like to get the LLVM test suite (a separate package as of 1.4), you +get it from the Subversion repository: + +.. code-block:: bash + + % cd llvm/projects + % svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite + +By placing it in the ``llvm/projects``, it will be automatically configured by +the LLVM configure script as well as automatically updated when you run ``svn +update``. + +GIT mirror +---------- + +GIT mirrors are available for a number of LLVM subprojects. These mirrors sync +automatically with each Subversion commit and contain all necessary git-svn +marks (so, you can recreate git-svn metadata locally). Note that right now +mirrors reflect only ``trunk`` for each project. You can do the read-only GIT +clone of LLVM via: + +.. code-block:: bash + + % git clone http://llvm.org/git/llvm.git + +If you want to check out clang too, run: + +.. code-block:: bash + + % git clone http://llvm.org/git/llvm.git + % cd llvm/tools + % git clone http://llvm.org/git/clang.git + +Since the upstream repository is in Subversion, you should use ``git +pull --rebase`` instead of ``git pull`` to avoid generating a non-linear history +in your clone. To configure ``git pull`` to pass ``--rebase`` by default on the +master branch, run the following command: + +.. code-block:: bash + + % git config branch.master.rebase true + +Sending patches with Git +^^^^^^^^^^^^^^^^^^^^^^^^ + +Please read `Developer Policy `_, too. + +Assume ``master`` points the upstream and ``mybranch`` points your working +branch, and ``mybranch`` is rebased onto ``master``. At first you may check +sanity of whitespaces: + +.. code-block:: bash + + % git diff --check master..mybranch + +The easiest way to generate a patch is as below: + +.. code-block:: bash + + % git diff master..mybranch > /path/to/mybranch.diff + +It is a little different from svn-generated diff. git-diff-generated diff has +prefixes like ``a/`` and ``b/``. Don't worry, most developers might know it +could be accepted with ``patch -p1 -N``. + +But you may generate patchset with git-format-patch. It generates by-each-commit +patchset. To generate patch files to attach to your article: + +.. code-block:: bash + + % git format-patch --no-attach master..mybranch -o /path/to/your/patchset + +If you would like to send patches directly, you may use git-send-email or +git-imap-send. Here is an example to generate the patchset in Gmail's [Drafts]. + +.. code-block:: bash + + % git format-patch --attach master..mybranch --stdout | git imap-send + +Then, your .git/config should have [imap] sections. + +.. code-block:: bash + + [imap] + host = imaps://imap.gmail.com + user = your.gmail.account@gmail.com + pass = himitsu! + port = 993 + sslverify = false + ; in English + folder = "[Gmail]/Drafts" + ; example for Japanese, "Modified UTF-7" encoded. + folder = "[Gmail]/&Tgtm+DBN-" + ; example for Traditional Chinese + folder = "[Gmail]/&g0l6Pw-" + +For developers to work with git-svn +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To set up clone from which you can submit code using ``git-svn``, run: + +.. code-block:: bash + + % git clone http://llvm.org/git/llvm.git + % cd llvm + % git svn init https://llvm.org/svn/llvm-project/llvm/trunk --username= + % git config svn-remote.svn.fetch :refs/remotes/origin/master + % git svn rebase -l # -l avoids fetching ahead of the git mirror. + + # If you have clang too: + % cd tools + % git clone http://llvm.org/git/clang.git + % cd clang + % git svn init https://llvm.org/svn/llvm-project/cfe/trunk --username= + % git config svn-remote.svn.fetch :refs/remotes/origin/master + % git svn rebase -l + +To update this clone without generating git-svn tags that conflict with the +upstream git repo, run: + +.. code-block:: bash + + % git fetch && (cd tools/clang && git fetch) # Get matching revisions of both trees. + % git checkout master + % git svn rebase -l + % (cd tools/clang && + git checkout master && + git svn rebase -l) + +This leaves your working directories on their master branches, so you'll need to +``checkout`` each working branch individually and ``rebase`` it on top of its +parent branch. (Note: This script is intended for relative newbies to git. If +you have more experience, you can likely improve on it.) + +The git-svn metadata can get out of sync after you mess around with branches and +``dcommit``. When that happens, ``git svn dcommit`` stops working, complaining +about files with uncommitted changes. The fix is to rebuild the metadata: + +.. code-block:: bash + + % rm -rf .git/svn + % git svn rebase -l + +Local LLVM Configuration +------------------------ + +Once checked out from the Subversion repository, the LLVM suite source code must +be configured via the ``configure`` script. This script sets variables in the +various ``*.in`` files, most notably ``llvm/Makefile.config`` and +``llvm/include/Config/config.h``. It also populates *OBJ_ROOT* with the +Makefiles needed to begin building LLVM. + +The following environment variables are used by the ``configure`` script to +configure the build system: + ++------------+-----------------------------------------------------------+ +| Variable | Purpose | ++============+===========================================================+ +| CC | Tells ``configure`` which C compiler to use. By default, | +| | ``configure`` will look for the first GCC C compiler in | +| | ``PATH``. Use this variable to override ``configure``\'s | +| | default behavior. | ++------------+-----------------------------------------------------------+ +| CXX | Tells ``configure`` which C++ compiler to use. By | +| | default, ``configure`` will look for the first GCC C++ | +| | compiler in ``PATH``. Use this variable to override | +| | ``configure``'s default behavior. | ++------------+-----------------------------------------------------------+ + +The following options can be used to set or enable LLVM specific options: + +``--enable-optimized`` + + Enables optimized compilation (debugging symbols are removed and GCC + optimization flags are enabled). Note that this is the default setting if you + are using the LLVM distribution. The default behavior of an Subversion + checkout is to use an unoptimized build (also known as a debug build). + +``--enable-debug-runtime`` + + Enables debug symbols in the runtime libraries. The default is to strip debug + symbols from the runtime libraries. + +``--enable-jit`` + + Compile the Just In Time (JIT) compiler functionality. This is not available + on all platforms. The default is dependent on platform, so it is best to + explicitly enable it if you want it. + +``--enable-targets=target-option`` + + Controls which targets will be built and linked into llc. The default value + for ``target_options`` is "all" which builds and links all available targets. + The value "host-only" can be specified to build only a native compiler (no + cross-compiler targets available). The "native" target is selected as the + target of the build host. You can also specify a comma separated list of + target names that you want available in llc. The target names use all lower + case. The current set of targets is: + + ``arm, cpp, hexagon, mblaze, mips, mipsel, msp430, powerpc, ptx, sparc, spu, + x86, x86_64, xcore``. + +``--enable-doxygen`` + + Look for the doxygen program and enable construction of doxygen based + documentation from the source code. This is disabled by default because + generating the documentation can take a long time and producess 100s of + megabytes of output. + +``--with-udis86`` + + LLVM can use external disassembler library for various purposes (now it's used + only for examining code produced by JIT). This option will enable usage of + `udis86 `_ x86 (both 32 and 64 bits) + disassembler library. + +To configure LLVM, follow these steps: + +#. Change directory into the object root directory: + + .. code-block:: bash + + % cd OBJ_ROOT + +#. Run the ``configure`` script located in the LLVM source tree: + + .. code-block:: bash + + % SRC_ROOT/configure --prefix=/install/path [other options] + +Compiling the LLVM Suite Source Code +------------------------------------ + +Once you have configured LLVM, you can build it. There are three types of +builds: + +Debug Builds + + These builds are the default when one is using an Subversion checkout and + types ``gmake`` (unless the ``--enable-optimized`` option was used during + configuration). The build system will compile the tools and libraries with + debugging information. To get a Debug Build using the LLVM distribution the + ``--disable-optimized`` option must be passed to ``configure``. + +Release (Optimized) Builds + + These builds are enabled with the ``--enable-optimized`` option to + ``configure`` or by specifying ``ENABLE_OPTIMIZED=1`` on the ``gmake`` command + line. For these builds, the build system will compile the tools and libraries + with GCC optimizations enabled and strip debugging information from the + libraries and executables it generates. Note that Release Builds are default + when using an LLVM distribution. + +Profile Builds + + These builds are for use with profiling. They compile profiling information + into the code for use with programs like ``gprof``. Profile builds must be + started by specifying ``ENABLE_PROFILING=1`` on the ``gmake`` command line. + +Once you have LLVM configured, you can build it by entering the *OBJ_ROOT* +directory and issuing the following command: + +.. code-block:: bash + + % gmake + +If the build fails, please `check here`_ to see if you are using a version of +GCC that is known not to compile LLVM. + +If you have multiple processors in your machine, you may wish to use some of the +parallel build options provided by GNU Make. For example, you could use the +command: + +.. code-block:: bash + + % gmake -j2 + +There are several special targets which are useful when working with the LLVM +source code: + +``gmake clean`` + + Removes all files generated by the build. This includes object files, + generated C/C++ files, libraries, and executables. + +``gmake dist-clean`` + + Removes everything that ``gmake clean`` does, but also removes files generated + by ``configure``. It attempts to return the source tree to the original state + in which it was shipped. + +``gmake install`` + + Installs LLVM header files, libraries, tools, and documentation in a hierarchy + under ``$PREFIX``, specified with ``./configure --prefix=[dir]``, which + defaults to ``/usr/local``. + +``gmake -C runtime install-bytecode`` + + Assuming you built LLVM into $OBJDIR, when this command is run, it will + install bitcode libraries into the GCC front end's bitcode library directory. + If you need to update your bitcode libraries, this is the target to use once + you've built them. + +Please see the `Makefile Guide `_ for further details on +these ``make`` targets and descriptions of other targets available. + +It is also possible to override default values from ``configure`` by declaring +variables on the command line. The following are some examples: + +``gmake ENABLE_OPTIMIZED=1`` + + Perform a Release (Optimized) build. + +``gmake ENABLE_OPTIMIZED=1 DISABLE_ASSERTIONS=1`` + + Perform a Release (Optimized) build without assertions enabled. + +``gmake ENABLE_OPTIMIZED=0`` + + Perform a Debug build. + +``gmake ENABLE_PROFILING=1`` + + Perform a Profiling build. + +``gmake VERBOSE=1`` + + Print what ``gmake`` is doing on standard output. + +``gmake TOOL_VERBOSE=1`` + + Ask each tool invoked by the makefiles to print out what it is doing on + the standard output. This also implies ``VERBOSE=1``. + +Every directory in the LLVM object tree includes a ``Makefile`` to build it and +any subdirectories that it contains. Entering any directory inside the LLVM +object tree and typing ``gmake`` should rebuild anything in or below that +directory that is out of date. + +Cross-Compiling LLVM +-------------------- + +It is possible to cross-compile LLVM itself. That is, you can create LLVM +executables and libraries to be hosted on a platform different from the platform +where they are build (a Canadian Cross build). To configure a cross-compile, +supply the configure script with ``--build`` and ``--host`` options that are +different. The values of these options must be legal target triples that your +GCC compiler supports. + +The result of such a build is executables that are not runnable on on the build +host (--build option) but can be executed on the compile host (--host option). + +The Location of LLVM Object Files +--------------------------------- + +The LLVM build system is capable of sharing a single LLVM source tree among +several LLVM builds. Hence, it is possible to build LLVM for several different +platforms or configurations using the same source tree. + +This is accomplished in the typical autoconf manner: + +* Change directory to where the LLVM object files should live: + + .. code-block:: bash + + % cd OBJ_ROOT + +* Run the ``configure`` script found in the LLVM source directory: + + .. code-block:: bash + + % SRC_ROOT/configure + +The LLVM build will place files underneath *OBJ_ROOT* in directories named after +the build type: + +Debug Builds with assertions enabled (the default) + + Tools + + ``OBJ_ROOT/Debug+Asserts/bin`` + + Libraries + + ``OBJ_ROOT/Debug+Asserts/lib`` + +Release Builds + + Tools + + ``OBJ_ROOT/Release/bin`` + + Libraries + + ``OBJ_ROOT/Release/lib`` + +Profile Builds + + Tools + + ``OBJ_ROOT/Profile/bin`` + + Libraries + + ``OBJ_ROOT/Profile/lib`` + +Optional Configuration Items +---------------------------- + +If you're running on a Linux system that supports the `binfmt_misc +`_ +module, and you have root access on the system, you can set your system up to +execute LLVM bitcode files directly. To do this, use commands like this (the +first command may not be required if you are already using the module): + +.. code-block:: bash + + % mount -t binfmt_misc none /proc/sys/fs/binfmt_misc + % echo ':llvm:M::BC::/path/to/lli:' > /proc/sys/fs/binfmt_misc/register + % chmod u+x hello.bc (if needed) + % ./hello.bc + +This allows you to execute LLVM bitcode files directly. On Debian, you can also +use this command instead of the 'echo' command above: + +.. code-block:: bash + + % sudo update-binfmts --install llvm /path/to/lli --magic 'BC' + +.. _Program Layout: +.. _general layout: + +Program Layout +============== + +One useful source of information about the LLVM source base is the LLVM `doxygen +`_ documentation available at +``_. The following is a brief introduction to code +layout: + +``llvm/examples`` +----------------- + +This directory contains some simple examples of how to use the LLVM IR and JIT. + +``llvm/include`` +---------------- + +This directory contains public header files exported from the LLVM library. The +three main subdirectories of this directory are: + +``llvm/include/llvm`` + + This directory contains all of the LLVM specific header files. This directory + also has subdirectories for different portions of LLVM: ``Analysis``, + ``CodeGen``, ``Target``, ``Transforms``, etc... + +``llvm/include/llvm/Support`` + + This directory contains generic support libraries that are provided with LLVM + but not necessarily specific to LLVM. For example, some C++ STL utilities and + a Command Line option processing library store their header files here. + +``llvm/include/llvm/Config`` + + This directory contains header files configured by the ``configure`` script. + They wrap "standard" UNIX and C header files. Source code can include these + header files which automatically take care of the conditional #includes that + the ``configure`` script generates. + +``llvm/lib`` +------------ + +This directory contains most of the source files of the LLVM system. In LLVM, +almost all code exists in libraries, making it very easy to share code among the +different `tools`_. + +``llvm/lib/VMCore/`` + + This directory holds the core LLVM source files that implement core classes + like Instruction and BasicBlock. + +``llvm/lib/AsmParser/`` + + This directory holds the source code for the LLVM assembly language parser + library. + +``llvm/lib/BitCode/`` + + This directory holds code for reading and write LLVM bitcode. + +``llvm/lib/Analysis/`` + + This directory contains a variety of different program analyses, such as + Dominator Information, Call Graphs, Induction Variables, Interval + Identification, Natural Loop Identification, etc. + +``llvm/lib/Transforms/`` + + This directory contains the source code for the LLVM to LLVM program + transformations, such as Aggressive Dead Code Elimination, Sparse Conditional + Constant Propagation, Inlining, Loop Invariant Code Motion, Dead Global + Elimination, and many others. + +``llvm/lib/Target/`` + + This directory contains files that describe various target architectures for + code generation. For example, the ``llvm/lib/Target/X86`` directory holds the + X86 machine description while ``llvm/lib/Target/ARM`` implements the ARM + backend. + +``llvm/lib/CodeGen/`` + + This directory contains the major parts of the code generator: Instruction + Selector, Instruction Scheduling, and Register Allocation. + +``llvm/lib/MC/`` + + (FIXME: T.B.D.) + +``llvm/lib/Debugger/`` + + This directory contains the source level debugger library that makes it + possible to instrument LLVM programs so that a debugger could identify source + code locations at which the program is executing. + +``llvm/lib/ExecutionEngine/`` + + This directory contains libraries for executing LLVM bitcode directly at + runtime in both interpreted and JIT compiled fashions. + +``llvm/lib/Support/`` + + This directory contains the source code that corresponds to the header files + located in ``llvm/include/ADT/`` and ``llvm/include/Support/``. + +``llvm/projects`` +----------------- + +This directory contains projects that are not strictly part of LLVM but are +shipped with LLVM. This is also the directory where you should create your own +LLVM-based projects. See ``llvm/projects/sample`` for an example of how to set +up your own project. + +``llvm/runtime`` +---------------- + +This directory contains libraries which are compiled into LLVM bitcode and used +when linking programs with the Clang front end. Most of these libraries are +skeleton versions of real libraries; for example, libc is a stripped down +version of glibc. + +Unlike the rest of the LLVM suite, this directory needs the LLVM GCC front end +to compile. + +``llvm/test`` +------------- + +This directory contains feature and regression tests and other basic sanity +checks on the LLVM infrastructure. These are intended to run quickly and cover a +lot of territory without being exhaustive. + +``test-suite`` +-------------- + +This is not a directory in the normal llvm module; it is a separate Subversion +module that must be checked out (usually to ``projects/test-suite``). This +module contains a comprehensive correctness, performance, and benchmarking test +suite for LLVM. It is a separate Subversion module because not every LLVM user +is interested in downloading or building such a comprehensive test suite. For +further details on this test suite, please see the `Testing +Guide `_ document. + +.. _tools: + +``llvm/tools`` +-------------- + +The **tools** directory contains the executables built out of the libraries +above, which form the main part of the user interface. You can always get help +for a tool by typing ``tool_name -help``. The following is a brief introduction +to the most important tools. More detailed information is in +the `Command Guide `_. + +``bugpoint`` + + ``bugpoint`` is used to debug optimization passes or code generation backends + by narrowing down the given test case to the minimum number of passes and/or + instructions that still cause a problem, whether it is a crash or + miscompilation. See ``_ for more information on using + ``bugpoint``. + +``llvm-ar`` + + The archiver produces an archive containing the given LLVM bitcode files, + optionally with an index for faster lookup. + +``llvm-as`` + + The assembler transforms the human readable LLVM assembly to LLVM bitcode. + +``llvm-dis`` + + The disassembler transforms the LLVM bitcode to human readable LLVM assembly. + +``llvm-link`` + + ``llvm-link``, not surprisingly, links multiple LLVM modules into a single + program. + +``lli`` + + ``lli`` is the LLVM interpreter, which can directly execute LLVM bitcode + (although very slowly...). For architectures that support it (currently x86, + Sparc, and PowerPC), by default, ``lli`` will function as a Just-In-Time + compiler (if the functionality was compiled in), and will execute the code + *much* faster than the interpreter. + +``llc`` + + ``llc`` is the LLVM backend compiler, which translates LLVM bitcode to a + native code assembly file or to C code (with the ``-march=c`` option). + +``opt`` + + ``opt`` reads LLVM bitcode, applies a series of LLVM to LLVM transformations + (which are specified on the command line), and then outputs the resultant + bitcode. The '``opt -help``' command is a good way to get a list of the + program transformations available in LLVM. + + ``opt`` can also be used to run a specific analysis on an input LLVM bitcode + file and print out the results. It is primarily useful for debugging + analyses, or familiarizing yourself with what an analysis does. + +``llvm/utils`` +-------------- + +This directory contains utilities for working with LLVM source code, and some of +the utilities are actually required as part of the build process because they +are code generators for parts of LLVM infrastructure. + + +``codegen-diff`` + + ``codegen-diff`` is a script that finds differences between code that LLC + generates and code that LLI generates. This is a useful tool if you are + debugging one of them, assuming that the other generates correct output. For + the full user manual, run ```perldoc codegen-diff'``. + +``emacs/`` + + The ``emacs`` directory contains syntax-highlighting files which will work + with Emacs and XEmacs editors, providing syntax highlighting support for LLVM + assembly files and TableGen description files. For information on how to use + the syntax files, consult the ``README`` file in that directory. + +``getsrcs.sh`` + + The ``getsrcs.sh`` script finds and outputs all non-generated source files, + which is useful if one wishes to do a lot of development across directories + and does not want to individually find each file. One way to use it is to run, + for example: ``xemacs `utils/getsources.sh``` from the top of your LLVM source + tree. + +``llvmgrep`` + + This little tool performs an ``egrep -H -n`` on each source file in LLVM and + passes to it a regular expression provided on ``llvmgrep``'s command + line. This is a very efficient way of searching the source base for a + particular regular expression. + +``makellvm`` + + The ``makellvm`` script compiles all files in the current directory and then + compiles and links the tool that is the first argument. For example, assuming + you are in the directory ``llvm/lib/Target/Sparc``, if ``makellvm`` is in your + path, simply running ``makellvm llc`` will make a build of the current + directory, switch to directory ``llvm/tools/llc`` and build it, causing a + re-linking of LLC. + +``TableGen/`` + + The ``TableGen`` directory contains the tool used to generate register + descriptions, instruction set descriptions, and even assemblers from common + TableGen description files. + +``vim/`` + + The ``vim`` directory contains syntax-highlighting files which will work with + the VIM editor, providing syntax highlighting support for LLVM assembly files + and TableGen description files. For information on how to use the syntax + files, consult the ``README`` file in that directory. + +.. _simple example: + +An Example Using the LLVM Tool Chain +==================================== + +This section gives an example of using LLVM with the Clang front end. + +Example with clang +------------------ + +#. First, create a simple C file, name it 'hello.c': + + .. code-block:: c + + #include + + int main() { + printf("hello world\n"); + return 0; + } + +#. Next, compile the C file into a native executable: + + .. code-block:: bash + + % clang hello.c -o hello + + .. note:: + + Clang works just like GCC by default. The standard -S and -c arguments + work as usual (producing a native .s or .o file, respectively). + +#. Next, compile the C file into a LLVM bitcode file: + + .. code-block:: bash + + % clang -O3 -emit-llvm hello.c -c -o hello.bc + + The -emit-llvm option can be used with the -S or -c options to emit an LLVM + ``.ll`` or ``.bc`` file (respectively) for the code. This allows you to use + the `standard LLVM tools `_ on the bitcode file. + +#. Run the program in both forms. To run the program, use: + + .. code-block:: bash + + % ./hello + + and + + .. code-block:: bash + + % lli hello.bc + + The second examples shows how to invoke the LLVM JIT, `lli + `_. + +#. Use the ``llvm-dis`` utility to take a look at the LLVM assembly code: + + .. code-block:: bash + + % llvm-dis < hello.bc | less + +#. Compile the program to native assembly using the LLC code generator: + + .. code-block:: bash + + % llc hello.bc -o hello.s + +#. Assemble the native assembly language file into a program: + + .. code-block:: bash + + **Solaris:** % /opt/SUNWspro/bin/cc -xarch=v9 hello.s -o hello.native + + **Others:** % gcc hello.s -o hello.native + +#. Execute the native code program: + + .. code-block:: bash + + % ./hello.native + + Note that using clang to compile directly to native code (i.e. when the + ``-emit-llvm`` option is not present) does steps 6/7/8 for you. + +Common Problems +=============== + +If you are having problems building or using LLVM, or if you have any other +general questions about LLVM, please consult the `Frequently Asked +Questions `_ page. + +.. _links: + +Links +===== + +This document is just an **introduction** on how to use LLVM to do some simple +things... there are many more interesting and complicated things that you can do +that aren't documented here (but we'll gladly accept a patch if you want to +write something up!). For more information about LLVM, check out: + +* `LLVM Homepage `_ +* `LLVM Doxygen Tree `_ +* `Starting a Project that Uses LLVM `_ diff --git a/docs/GoldPlugin.html b/docs/GoldPlugin.html deleted file mode 100644 index 1e99a5a..0000000 --- a/docs/GoldPlugin.html +++ /dev/null @@ -1,227 +0,0 @@ - - - - - LLVM gold plugin - - - - -

    LLVM gold plugin

    -
      -
    1. Introduction
    2. -
    3. How to build it
    4. -
    5. Usage -
    6. -
    7. Licensing
    8. -
    -
    Written by Nick Lewycky
    - - -

    Introduction

    - -
    -

    Building with link time optimization requires cooperation from the -system linker. LTO support on Linux systems requires that you use -the gold linker which supports -LTO via plugins. This is the same mechanism used by the -GCC LTO -project.

    -

    The LLVM gold plugin implements the -gold plugin interface -on top of -libLTO. -The same plugin can also be used by other tools such as ar and -nm. -

    - -

    How to build it

    - -
    -

    You need to have gold with plugin support and build the LLVMgold -plugin. Check whether you have gold running /usr/bin/ld -v. It will -report “GNU gold” or else “GNU ld” if not. If you have -gold, check for plugin support by running /usr/bin/ld -plugin. If it -complains “missing argument” then you have plugin support. If not, -such as an “unknown option” error then you will either need to -build gold or install a version with plugin support.

    -
      -
    • To build gold with plugin support: -
      -mkdir binutils
      -cd binutils
      -cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src login
      -{enter "anoncvs" as the password}
      -cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src co binutils
      -mkdir build
      -cd build
      -../src/configure --enable-gold --enable-plugins
      -make all-gold
      -
      - That should leave you with binutils/build/gold/ld-new which supports the -plugin option. It also built would have -binutils/build/binutils/ar and nm-new which support plugins -but don't have a visible -plugin option, instead relying on the gold plugin -being present in ../lib/bfd-plugins relative to where the binaries are -placed. -
    • Build the LLVMgold plugin: Configure LLVM with - --with-binutils-include=/path/to/binutils/src/include and run - make. -
    -
    - -

    Usage

    - -
    - -

    The linker takes a -plugin option that points to the path of - the plugin .so file. To find out what link command gcc - would run in a given situation, run gcc -v [...] and look - for the line where it runs collect2. Replace that with - ld-new -plugin /path/to/LLVMgold.so to test it out. Once you're - ready to switch to using gold, backup your existing /usr/bin/ld - then replace it with ld-new.

    - -

    You can produce bitcode files from clang using - -emit-llvm or -flto, or the -O4 flag which is - synonymous with -O3 -flto.

    - -

    Any of these flags will also cause clang to look for the - gold plugin in the lib directory under its prefix and pass the - -plugin option to ld. It will not look for an alternate - linker, which is why you need gold to be the installed system linker in - your path.

    - -

    If you want ar and nm to work seamlessly as well, install - LLVMgold.so to /usr/lib/bfd-plugins. If you built your - own gold, be sure to install the ar and nm-new you built to - /usr/bin.

    - - -

    - Example of link time optimization -

    - -
    -

    The following example shows a worked example of the gold plugin mixing - LLVM bitcode and native code. -

    ---- a.c ---
    -#include <stdio.h>
    -
    -extern void foo1(void);
    -extern void foo4(void);
    -
    -void foo2(void) {
    -  printf("Foo2\n");
    -}
    -
    -void foo3(void) {
    -  foo4();
    -}
    -
    -int main(void) {
    -  foo1();
    -}
    -
    ---- b.c ---
    -#include <stdio.h>
    -
    -extern void foo2(void);
    -
    -void foo1(void) {
    -  foo2();
    -}
    -
    -void foo4(void) {
    -  printf("Foo4");
    -}
    -
    ---- command lines ---
    -$ clang -flto a.c -c -o a.o      # <-- a.o is LLVM bitcode file
    -$ ar q a.a a.o                   # <-- a.a is an archive with LLVM bitcode
    -$ clang b.c -c -o b.o            # <-- b.o is native object file
    -$ clang -flto a.a b.o -o main    # <-- link with LLVMgold plugin
    -
    - -

    Gold informs the plugin that foo3 is never referenced outside the IR, - leading LLVM to delete that function. However, unlike in the - libLTO - example gold does not currently eliminate foo4.

    -
    - -
    - - -

    - - Quickstart for using LTO with autotooled projects - -

    - -
    -

    Once your system ld, ar, and nm all support LLVM - bitcode, everything is in place for an easy to use LTO build of autotooled - projects:

    - -
      -
    • Follow the instructions on how to build LLVMgold.so.
    • -
    • Install the newly built binutils to $PREFIX
    • -
    • Copy Release/lib/LLVMgold.so to - $PREFIX/lib/bfd-plugins/
    • -
    • Set environment variables ($PREFIX is where you installed clang and - binutils): -
      -export CC="$PREFIX/bin/clang -flto"
      -export CXX="$PREFIX/bin/clang++ -flto"
      -export AR="$PREFIX/bin/ar"
      -export NM="$PREFIX/bin/nm"
      -export RANLIB=/bin/true #ranlib is not needed, and doesn't support .bc files in .a
      -export CFLAGS="-O4"
      -
      -
    • -
    • Or you can just set your path: -
      -export PATH="$PREFIX/bin:$PATH"
      -export CC="clang -flto"
      -export CXX="clang++ -flto"
      -export RANLIB=/bin/true
      -export CFLAGS="-O4"
      -
    • -
    • Configure & build the project as usual: -
      -% ./configure && make && make check
      -
    • -
    - -

    The environment variable settings may work for non-autotooled projects - too, but you may need to set the LD environment variable as - well.

    -
    - - -

    Licensing

    - -
    -

    Gold is licensed under the GPLv3. LLVMgold uses the interface file -plugin-api.h from gold which means that the resulting LLVMgold.so -binary is also GPLv3. This can still be used to link non-GPLv3 programs just -as much as gold could without the plugin.

    -
    - - -
    -
    - Valid CSS - Valid HTML 4.01 - Nick Lewycky
    - The LLVM Compiler Infrastructure
    - Last modified: $Date: 2010-04-16 23:58:21 -0800 (Fri, 16 Apr 2010) $ -
    - - diff --git a/docs/GoldPlugin.rst b/docs/GoldPlugin.rst new file mode 100644 index 0000000..300aea9 --- /dev/null +++ b/docs/GoldPlugin.rst @@ -0,0 +1,186 @@ +.. _gold-plugin: + +==================== +The LLVM gold plugin +==================== + +.. sectionauthor:: Nick Lewycky + +Introduction +============ + +Building with link time optimization requires cooperation from +the system linker. LTO support on Linux systems requires that you use the +`gold linker`_ which supports LTO via plugins. This is the same mechanism +used by the `GCC LTO`_ project. + +The LLVM gold plugin implements the gold plugin interface on top of +:ref:`libLTO`. The same plugin can also be used by other tools such as +``ar`` and ``nm``. + +.. _`gold linker`: http://sourceware.org/binutils +.. _`GCC LTO`: http://gcc.gnu.org/wiki/LinkTimeOptimization +.. _`gold plugin interface`: http://gcc.gnu.org/wiki/whopr/driver + +.. _lto-how-to-build: + +How to build it +=============== + +You need to have gold with plugin support and build the LLVMgold plugin. +Check whether you have gold running ``/usr/bin/ld -v``. It will report "GNU +gold" or else "GNU ld" if not. If you have gold, check for plugin support +by running ``/usr/bin/ld -plugin``. If it complains "missing argument" then +you have plugin support. If not, such as an "unknown option" error then you +will either need to build gold or install a version with plugin support. + +* To build gold with plugin support: + + .. code-block:: bash + + $ mkdir binutils + $ cd binutils + $ cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src login + {enter "anoncvs" as the password} + $ cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src co binutils + $ mkdir build + $ cd build + $ ../src/configure --enable-gold --enable-plugins + $ make all-gold + + That should leave you with ``binutils/build/gold/ld-new`` which supports + the ``-plugin`` option. It also built would have + ``binutils/build/binutils/ar`` and ``nm-new`` which support plugins but + don't have a visible -plugin option, instead relying on the gold plugin + being present in ``../lib/bfd-plugins`` relative to where the binaries + are placed. + +* Build the LLVMgold plugin: Configure LLVM with + ``--with-binutils-include=/path/to/binutils/src/include`` and run + ``make``. + +Usage +===== + +The linker takes a ``-plugin`` option that points to the path of +the plugin ``.so`` file. To find out what link command ``gcc`` +would run in a given situation, run ``gcc -v [...]`` and +look for the line where it runs ``collect2``. Replace that with +``ld-new -plugin /path/to/LLVMgold.so`` to test it out. Once you're +ready to switch to using gold, backup your existing ``/usr/bin/ld`` +then replace it with ``ld-new``. + +You can produce bitcode files from ``clang`` using ``-emit-llvm`` or +``-flto``, or the ``-O4`` flag which is synonymous with ``-O3 -flto``. + +Any of these flags will also cause ``clang`` to look for the gold plugin in +the ``lib`` directory under its prefix and pass the ``-plugin`` option to +``ld``. It will not look for an alternate linker, which is why you need +gold to be the installed system linker in your path. + +If you want ``ar`` and ``nm`` to work seamlessly as well, install +``LLVMgold.so`` to ``/usr/lib/bfd-plugins``. If you built your own gold, be +sure to install the ``ar`` and ``nm-new`` you built to ``/usr/bin``. + + +Example of link time optimization +--------------------------------- + +The following example shows a worked example of the gold plugin mixing LLVM +bitcode and native code. + +.. code-block:: c + + --- a.c --- + #include + + extern void foo1(void); + extern void foo4(void); + + void foo2(void) { + printf("Foo2\n"); + } + + void foo3(void) { + foo4(); + } + + int main(void) { + foo1(); + } + + --- b.c --- + #include + + extern void foo2(void); + + void foo1(void) { + foo2(); + } + + void foo4(void) { + printf("Foo4"); + } + +.. code-block:: bash + + --- command lines --- + $ clang -flto a.c -c -o a.o # <-- a.o is LLVM bitcode file + $ ar q a.a a.o # <-- a.a is an archive with LLVM bitcode + $ clang b.c -c -o b.o # <-- b.o is native object file + $ clang -flto a.a b.o -o main # <-- link with LLVMgold plugin + +Gold informs the plugin that foo3 is never referenced outside the IR, +leading LLVM to delete that function. However, unlike in the :ref:`libLTO +example ` gold does not currently eliminate foo4. + +Quickstart for using LTO with autotooled projects +================================================= + +Once your system ``ld``, ``ar``, and ``nm`` all support LLVM bitcode, +everything is in place for an easy to use LTO build of autotooled projects: + +* Follow the instructions :ref:`on how to build LLVMgold.so + `. + +* Install the newly built binutils to ``$PREFIX`` + +* Copy ``Release/lib/LLVMgold.so`` to ``$PREFIX/lib/bfd-plugins/`` + +* Set environment variables (``$PREFIX`` is where you installed clang and + binutils): + + .. code-block:: bash + + export CC="$PREFIX/bin/clang -flto" + export CXX="$PREFIX/bin/clang++ -flto" + export AR="$PREFIX/bin/ar" + export NM="$PREFIX/bin/nm" + export RANLIB=/bin/true #ranlib is not needed, and doesn't support .bc files in .a + export CFLAGS="-O4" + +* Or you can just set your path: + + .. code-block:: bash + + export PATH="$PREFIX/bin:$PATH" + export CC="clang -flto" + export CXX="clang++ -flto" + export RANLIB=/bin/true + export CFLAGS="-O4" +* Configure and build the project as usual: + + .. code-block:: bash + + % ./configure && make && make check + +The environment variable settings may work for non-autotooled projects too, +but you may need to set the ``LD`` environment variable as well. + +Licensing +========= + +Gold is licensed under the GPLv3. LLVMgold uses the interface file +``plugin-api.h`` from gold which means that the resulting ``LLVMgold.so`` +binary is also GPLv3. This can still be used to link non-GPLv3 programs +just as much as gold could without the plugin. diff --git a/docs/HowToAddABuilder.html b/docs/HowToAddABuilder.html deleted file mode 100644 index 985b30e..0000000 --- a/docs/HowToAddABuilder.html +++ /dev/null @@ -1,142 +0,0 @@ - - - - - - How To Add Your Build Configuration To LLVM Buildbot Infrastructure - - - - - -

    How To Add Your Build Configuration To LLVM Buildbot Infrastructure

    -
      -
    1. Introduction
    2. -
    3. Steps To Add Builder To LLVM Buildbot
    4. -
    -
    -

    Written by Galina Kistanova

    -
    - - -

    Introduction

    - - -
    - -

    This document contains information about adding a build configuration and - buildslave to private slave builder to LLVM Buildbot Infrastructure - http://lab.llvm.org:8011

    - -
    - - -

    Steps To Add Builder To LLVM Buildbot

    - - -
    - -

    Volunteers can provide their build machines to work as build slaves to - public LLVM Buildbot.

    - -

    Here are the steps you can follow to do so:

    - -
      -
    1. Check the existing build configurations to make sure the one you are - interested in is not covered yet or gets built on your computer much - faster than on the existing one. We prefer faster builds so developers - will get feedback sooner after changes get committed.

    2. - -
    3. The computer you will be registering with the LLVM buildbot - infrastructure should have all dependencies installed and you can - actually build your configuration successfully. Please check what degree - of parallelism (-j param) would give the fastest build. - You can build multiple configurations on one computer.

    4. - -
    5. Install buildslave (currently we are using buildbot version 0.8.5). - Depending on the platform, buildslave could be available to download and - install with your packet manager, or you can download it directly from - http://trac.buildbot.net and - install it manually.

    6. - -
    7. Create a designated user account, your buildslave will be running - under, and set appropriate permissions.

    8. - -
    9. Choose the buildslave root directory (all builds will be placed under - it), buildslave access name and password the build master will be using - to authenticate your buildslave.

    10. - -
    11. Create a buildslave in context of that buildslave account. - Point it to the lab.llvm.org port 9990 (see - - Buildbot documentation, Creating a slave - for more details) by running the following command:

      - -
      -
      -$ buildslave create-slave buildslave-root-directory \
      -             lab.llvm.org:9990 \
      -             buildslave-access-name buildslave-access-password
      -
      -
    12. - -
    13. Fill the buildslave description and admin name/e-mail. - Here is an example of the buildslave description:

      - -
      -
      -Windows 7 x64
      -Core i7 (2.66GHz), 16GB of RAM
      -
      -g++.exe (TDM-1 mingw32) 4.4.0
      -GNU Binutils 2.19.1
      -cmake version 2.8.4
      -Microsoft(R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86
      -
      -
    14. - -
    15. Make sure you can actually start the buildslave successfully. Then set - up your buildslave to start automatically at the start up time. - See the buildbot documentation for help. - You may want to restart your computer to see if it works.

    16. - -
    17. Send a patch which adds your build slave and your builder to zorg.

      -
        -
      • slaves are added to - buildbot/osuosl/master/config/slaves.py
      • -
      • builders are added to - buildbot/osuosl/master/config/builders.py
      • -
    18. - -
    19. Send the buildslave access name and the access password directly - to Galina Kistanova, and wait - till she will let you know that your changes are applied and buildmaster - is reconfigured.

      - -
    20. Check the status of your buildslave on the - Waterfall Display - to make sure it is connected, and - - http://lab.llvm.org:8011/buildslaves/<your-buildslave-name> - to see if administrator contact and slave information are correct.

      -
    21. - -
    22. Wait for the first build to succeed and enjoy.

    23. -
    - -
    - - -
    -
    - Valid CSS - Valid HTML 4.01 - The LLVM Compiler Infrastructure -
    - Last modified: $Date: 2011-10-31 12:50:0 -0700 (Mon, 31 Oct 2011) $ -
    - - diff --git a/docs/HowToAddABuilder.rst b/docs/HowToAddABuilder.rst new file mode 100644 index 0000000..b0cd290 --- /dev/null +++ b/docs/HowToAddABuilder.rst @@ -0,0 +1,90 @@ +.. _how_to_add_a_builder: + +=================================================================== +How To Add Your Build Configuration To LLVM Buildbot Infrastructure +=================================================================== + +.. sectionauthor:: Galina Kistanova + +Introduction +============ + +This document contains information about adding a build configuration and +buildslave to private slave builder to LLVM Buildbot Infrastructure +``_. + + +Steps To Add Builder To LLVM Buildbot +===================================== +Volunteers can provide their build machines to work as build slaves to +public LLVM Buildbot. + +Here are the steps you can follow to do so: + +#. Check the existing build configurations to make sure the one you are + interested in is not covered yet or gets built on your computer much + faster than on the existing one. We prefer faster builds so developers + will get feedback sooner after changes get committed. + +#. The computer you will be registering with the LLVM buildbot + infrastructure should have all dependencies installed and you can + actually build your configuration successfully. Please check what degree + of parallelism (-j param) would give the fastest build. You can build + multiple configurations on one computer. + +#. Install buildslave (currently we are using buildbot version 0.8.5). + Depending on the platform, buildslave could be available to download and + install with your packet manager, or you can download it directly from + ``_ and install it manually. + +#. Create a designated user account, your buildslave will be running under, + and set appropriate permissions. + +#. Choose the buildslave root directory (all builds will be placed under + it), buildslave access name and password the build master will be using + to authenticate your buildslave. + +#. Create a buildslave in context of that buildslave account. Point it to + the **lab.llvm.org** port **9990** (see `Buildbot documentation, + Creating a slave + `_ + for more details) by running the following command: + + .. code-block:: bash + + $ buildslave create-slave \ + lab.llvm.org:9990 \ + + +#. Fill the buildslave description and admin name/e-mail. Here is an + example of the buildslave description:: + + Windows 7 x64 + Core i7 (2.66GHz), 16GB of RAM + + g++.exe (TDM-1 mingw32) 4.4.0 + GNU Binutils 2.19.1 + cmake version 2.8.4 + Microsoft(R) 32-bit C/C++ Optimizing Compiler Version 16.00.40219.01 for 80x86 + +#. Make sure you can actually start the buildslave successfully. Then set + up your buildslave to start automatically at the start up time. See the + buildbot documentation for help. You may want to restart your computer + to see if it works. + +#. Send a patch which adds your build slave and your builder to zorg. + + * slaves are added to ``buildbot/osuosl/master/config/slaves.py`` + * builders are added to ``buildbot/osuosl/master/config/builders.py`` + +#. Send the buildslave access name and the access password directly to + `Galina Kistanova `_, and wait till she + will let you know that your changes are applied and buildmaster is + reconfigured. + +#. Check the status of your buildslave on the `Waterfall Display + `_ to make sure it is connected, and + ``http://lab.llvm.org:8011/buildslaves/`` to see + if administrator contact and slave information are correct. + +#. Wait for the first build to succeed and enjoy. diff --git a/docs/HowToBuildOnARM.rst b/docs/HowToBuildOnARM.rst new file mode 100644 index 0000000..d786a7d --- /dev/null +++ b/docs/HowToBuildOnARM.rst @@ -0,0 +1,47 @@ +.. _how_to_build_on_arm: + +=================================================================== +How To Build On ARM +=================================================================== + +.. sectionauthor:: Wei-Ren Chen (陳韋任) + +Introduction +============ + +This document contains information about building/testing LLVM and +Clang on ARM. + +Notes On Building LLVM/Clang on ARM +===================================== +Here are some notes on building/testing LLVM/Clang on ARM. Note that +ARM encompasses a wide variety of CPUs; this advice is primarily based +on the ARMv6 and ARMv7 architectures and may be inapplicable to older chips. + +#. If you are building LLVM/Clang on an ARM board with 1G of memory or less, + please use ``gold`` rather then GNU ``ld``. + Building LLVM/Clang with ``--enable-optimized`` + is prefered since it consumes less memory. Otherwise, the building + process will very likely fail due to insufficient memory. In any + case it is probably a good idea to set up a swap partition. + +#. If you want to run ``make + check-all`` after building LLVM/Clang, to avoid false alarms (eg, ARCMT + failure) please use at least the following configuration: + + .. code-block:: bash + + $ ../$LLVM_SRC_DIR/configure --with-abi=aapcs-vfp + +#. The most popular linaro/ubuntu OS's for ARM boards, eg, the + Pandaboard, have become hard-float platforms. The following set + of configuration options appears to be a good choice for this + platform: + + .. code-block:: bash + + ./configure --build=armv7l-unknown-linux-gnueabihf + --host=armv7l-unknown-linux-gnueabihf + --target=armv7l-unknown-linux-gnueabihf --with-cpu=cortex-a9 + --with-float=hard --with-abi=aapcs-vfp --with-fpu=neon + --enable-targets=arm --disable-optimized --enable-assertions diff --git a/docs/HowToSetUpLLVMStyleRTTI.rst b/docs/HowToSetUpLLVMStyleRTTI.rst new file mode 100644 index 0000000..aa1ad84 --- /dev/null +++ b/docs/HowToSetUpLLVMStyleRTTI.rst @@ -0,0 +1,332 @@ +.. _how-to-set-up-llvm-style-rtti: + +====================================================== +How to set up LLVM-style RTTI for your class hierarchy +====================================================== + +.. sectionauthor:: Sean Silva + +.. contents:: + +Background +========== + +LLVM avoids using C++'s built in RTTI. Instead, it pervasively uses its +own hand-rolled form of RTTI which is much more efficient and flexible, +although it requires a bit more work from you as a class author. + +A description of how to use LLVM-style RTTI from a client's perspective is +given in the `Programmer's Manual `_. This +document, in contrast, discusses the steps you need to take as a class +hierarchy author to make LLVM-style RTTI available to your clients. + +Before diving in, make sure that you are familiar with the Object Oriented +Programming concept of "`is-a`_". + +.. _is-a: http://en.wikipedia.org/wiki/Is-a + +Basic Setup +=========== + +This section describes how to set up the most basic form of LLVM-style RTTI +(which is sufficient for 99.9% of the cases). We will set up LLVM-style +RTTI for this class hierarchy: + +.. code-block:: c++ + + class Shape { + public: + Shape() {} + virtual double computeArea() = 0; + }; + + class Square : public Shape { + double SideLength; + public: + Square(double S) : SideLength(S) {} + double computeArea() /* override */; + }; + + class Circle : public Shape { + double Radius; + public: + Circle(double R) : Radius(R) {} + double computeArea() /* override */; + }; + +The most basic working setup for LLVM-style RTTI requires the following +steps: + +#. In the header where you declare ``Shape``, you will want to ``#include + "llvm/Support/Casting.h"``, which declares LLVM's RTTI templates. That + way your clients don't even have to think about it. + + .. code-block:: c++ + + #include "llvm/Support/Casting.h" + +#. In the base class, introduce an enum which discriminates all of the + different concrete classes in the hierarchy, and stash the enum value + somewhere in the base class. + + Here is the code after introducing this change: + + .. code-block:: c++ + + class Shape { + public: + + /// Discriminator for LLVM-style RTTI (dyn_cast<> et al.) + + enum ShapeKind { + + SK_Square, + + SK_Circle + + }; + +private: + + const ShapeKind Kind; + +public: + + ShapeKind getKind() const { return Kind; } + + + Shape() {} + virtual double computeArea() = 0; + }; + + You will usually want to keep the ``Kind`` member encapsulated and + private, but let the enum ``ShapeKind`` be public along with providing a + ``getKind()`` method. This is convenient for clients so that they can do + a ``switch`` over the enum. + + A common naming convention is that these enums are "kind"s, to avoid + ambiguity with the words "type" or "class" which have overloaded meanings + in many contexts within LLVM. Sometimes there will be a natural name for + it, like "opcode". Don't bikeshed over this; when in doubt use ``Kind``. + + You might wonder why the ``Kind`` enum doesn't have an entry for + ``Shape``. The reason for this is that since ``Shape`` is abstract + (``computeArea() = 0;``), you will never actually have non-derived + instances of exactly that class (only subclasses). See `Concrete Bases + and Deeper Hierarchies`_ for information on how to deal with + non-abstract bases. It's worth mentioning here that unlike + ``dynamic_cast<>``, LLVM-style RTTI can be used (and is often used) for + classes that don't have v-tables. + +#. Next, you need to make sure that the ``Kind`` gets initialized to the + value corresponding to the dynamic type of the class. Typically, you will + want to have it be an argument to the constructor of the base class, and + then pass in the respective ``XXXKind`` from subclass constructors. + + Here is the code after that change: + + .. code-block:: c++ + + class Shape { + public: + /// Discriminator for LLVM-style RTTI (dyn_cast<> et al.) + enum ShapeKind { + SK_Square, + SK_Circle + }; + private: + const ShapeKind Kind; + public: + ShapeKind getKind() const { return Kind; } + + - Shape() {} + + Shape(ShapeKind K) : Kind(K) {} + virtual double computeArea() = 0; + }; + + class Square : public Shape { + double SideLength; + public: + - Square(double S) : SideLength(S) {} + + Square(double S) : Shape(SK_Square), SideLength(S) {} + double computeArea() /* override */; + }; + + class Circle : public Shape { + double Radius; + public: + - Circle(double R) : Radius(R) {} + + Circle(double R) : Shape(SK_Circle), Radius(R) {} + double computeArea() /* override */; + }; + +#. Finally, you need to inform LLVM's RTTI templates how to dynamically + determine the type of a class (i.e. whether the ``isa<>``/``dyn_cast<>`` + should succeed). The default "99.9% of use cases" way to accomplish this + is through a small static member function ``classof``. In order to have + proper context for an explanation, we will display this code first, and + then below describe each part: + + .. code-block:: c++ + + class Shape { + public: + /// Discriminator for LLVM-style RTTI (dyn_cast<> et al.) + enum ShapeKind { + SK_Square, + SK_Circle + }; + private: + const ShapeKind Kind; + public: + ShapeKind getKind() const { return Kind; } + + Shape(ShapeKind K) : Kind(K) {} + virtual double computeArea() = 0; + }; + + class Square : public Shape { + double SideLength; + public: + Square(double S) : Shape(SK_Square), SideLength(S) {} + double computeArea() /* override */; + + + + static bool classof(const Shape *S) { + + return S->getKind() == SK_Square; + + } + }; + + class Circle : public Shape { + double Radius; + public: + Circle(double R) : Shape(SK_Circle), Radius(R) {} + double computeArea() /* override */; + + + + static bool classof(const Shape *S) { + + return S->getKind() == SK_Circle; + + } + }; + + The job of ``classof`` is to dynamically determine whether an object of + a base class is in fact of a particular derived class. In order to + downcast a type ``Base`` to a type ``Derived``, there needs to be a + ``classof`` in ``Derived`` which will accept an object of type ``Base``. + + To be concrete, consider the following code: + + .. code-block:: c++ + + Shape *S = ...; + if (isa(S)) { + /* do something ... */ + } + + The code of the ``isa<>`` test in this code will eventually boil + down---after template instantiation and some other machinery---to a + check roughly like ``Circle::classof(S)``. For more information, see + :ref:`classof-contract`. + + The argument to ``classof`` should always be an *ancestor* class because + the implementation has logic to allow and optimize away + upcasts/up-``isa<>``'s automatically. It is as though every class + ``Foo`` automatically has a ``classof`` like: + + .. code-block:: c++ + + class Foo { + [...] + template + static bool classof(const T *, + ::llvm::enable_if_c< + ::llvm::is_base_of::value + >::type* = 0) { return true; } + [...] + }; + + Note that this is the reason that we did not need to introduce a + ``classof`` into ``Shape``: all relevant classes derive from ``Shape``, + and ``Shape`` itself is abstract (has no entry in the ``Kind`` enum), + so this notional inferred ``classof`` is all we need. See `Concrete + Bases and Deeper Hierarchies`_ for more information about how to extend + this example to more general hierarchies. + +Although for this small example setting up LLVM-style RTTI seems like a lot +of "boilerplate", if your classes are doing anything interesting then this +will end up being a tiny fraction of the code. + +Concrete Bases and Deeper Hierarchies +===================================== + +For concrete bases (i.e. non-abstract interior nodes of the inheritance +tree), the ``Kind`` check inside ``classof`` needs to be a bit more +complicated. The situation differs from the example above in that + +* Since the class is concrete, it must itself have an entry in the ``Kind`` + enum because it is possible to have objects with this class as a dynamic + type. + +* Since the class has children, the check inside ``classof`` must take them + into account. + +Say that ``SpecialSquare`` and ``OtherSpecialSquare`` derive +from ``Square``, and so ``ShapeKind`` becomes: + +.. code-block:: c++ + + enum ShapeKind { + SK_Square, + + SK_SpecialSquare, + + SK_OtherSpecialSquare, + SK_Circle + } + +Then in ``Square``, we would need to modify the ``classof`` like so: + +.. code-block:: c++ + + - static bool classof(const Shape *S) { + - return S->getKind() == SK_Square; + - } + + static bool classof(const Shape *S) { + + return S->getKind() >= SK_Square && + + S->getKind() <= SK_OtherSpecialSquare; + + } + +The reason that we need to test a range like this instead of just equality +is that both ``SpecialSquare`` and ``OtherSpecialSquare`` "is-a" +``Square``, and so ``classof`` needs to return ``true`` for them. + +This approach can be made to scale to arbitrarily deep hierarchies. The +trick is that you arrange the enum values so that they correspond to a +preorder traversal of the class hierarchy tree. With that arrangement, all +subclass tests can be done with two comparisons as shown above. If you just +list the class hierarchy like a list of bullet points, you'll get the +ordering right:: + + | Shape + | Square + | SpecialSquare + | OtherSpecialSquare + | Circle + +.. _classof-contract: + +The Contract of ``classof`` +--------------------------- + +To be more precise, let ``classof`` be inside a class ``C``. Then the +contract for ``classof`` is "return ``true`` if the dynamic type of the +argument is-a ``C``". As long as your implementation fulfills this +contract, you can tweak and optimize it as much as you want. + +.. TODO:: + + Touch on some of the more advanced features, like ``isa_impl`` and + ``simplify_type``. However, those two need reference documentation in + the form of doxygen comments as well. We need the doxygen so that we can + say "for full details, see http://llvm.org/doxygen/..." + +Rules of Thumb +============== + +#. The ``Kind`` enum should have one entry per concrete class, ordered + according to a preorder traversal of the inheritance tree. +#. The argument to ``classof`` should be a ``const Base *``, where ``Base`` + is some ancestor in the inheritance hierarchy. The argument should + *never* be a derived class or the class itself: the template machinery + for ``isa<>`` already handles this case and optimizes it. +#. For each class in the hierarchy that has no children, implement a + ``classof`` that checks only against its ``Kind``. +#. For each class in the hierarchy that has children, implement a + ``classof`` that checks a range of the first child's ``Kind`` and the + last child's ``Kind``. diff --git a/docs/HowToSubmitABug.html b/docs/HowToSubmitABug.html deleted file mode 100644 index ef7cf9e..0000000 --- a/docs/HowToSubmitABug.html +++ /dev/null @@ -1,345 +0,0 @@ - - - - - How to submit an LLVM bug report - - - - -

    - How to submit an LLVM bug report -

    - - - - - -
    -
      -
    1. Introduction - Got bugs?
    2. -
    3. Crashing Bugs -
    4. -
    5. Miscompilations
    6. -
    7. Incorrect code generation (JIT and LLC)
    8. -
    -
    -

    Written by Chris Lattner and - Misha Brukman

    -
    -
    - - -

    - Introduction - Got bugs? -

    - - -
    - -

    If you're working with LLVM and run into a bug, we definitely want to know -about it. This document describes what you can do to increase the odds of -getting it fixed quickly.

    - -

    Basically you have to do two things at a minimum. First, decide whether the -bug crashes the compiler (or an LLVM pass), or if the -compiler is miscompiling the program (i.e., the -compiler successfully produces an executable, but it doesn't run right). Based -on -what type of bug it is, follow the instructions in the linked section to narrow -down the bug so that the person who fixes it will be able to find the problem -more easily.

    - -

    Once you have a reduced test-case, go to the LLVM Bug Tracking -System and fill out the form with the necessary details (note that you don't -need to pick a category, just use the "new-bugs" category if you're not sure). -The bug description should contain the following -information:

    - -
      -
    • All information necessary to reproduce the problem.
    • -
    • The reduced test-case that triggers the bug.
    • -
    • The location where you obtained LLVM (if not from our Subversion - repository).
    • -
    - -

    Thanks for helping us make LLVM better!

    - -
    - - -

    - Crashing Bugs -

    - - -
    - -

    More often than not, bugs in the compiler cause it to crash—often due -to an assertion failure of some sort. The most important -piece of the puzzle is to figure out if it is crashing in the GCC front-end -or if it is one of the LLVM libraries (e.g. the optimizer or code generator) -that has problems.

    - -

    To figure out which component is crashing (the front-end, -optimizer or code generator), run the -llvm-gcc command line as you were when the crash occurred, but -with the following extra command line options:

    - -
      -
    • -O0 -emit-llvm: If llvm-gcc still crashes when - passed these options (which disable the optimizer and code generator), then - the crash is in the front-end. Jump ahead to the section on front-end bugs.
    • - -
    • -emit-llvm: If llvm-gcc crashes with this option - (which disables the code generator), you found an optimizer bug. Jump ahead - to compile-time optimization bugs.
    • - -
    • Otherwise, you have a code generator crash. Jump ahead to code generator bugs.
    • - -
    - - -

    - Front-end bugs -

    - -
    - -

    If the problem is in the front-end, you should re-run the same -llvm-gcc command that resulted in the crash, but add the --save-temps option. The compiler will crash again, but it will leave -behind a foo.i file (containing preprocessed C source code) and -possibly foo.s for each -compiled foo.c file. Send us the foo.i file, -along with the options you passed to llvm-gcc, and a brief description of the -error it caused.

    - -

    The delta tool helps to reduce the -preprocessed file down to the smallest amount of code that still replicates the -problem. You're encouraged to use delta to reduce the code to make the -developers' lives easier. This website -has instructions on the best way to use delta.

    - -
    - - -

    - Compile-time optimization bugs -

    - -
    - -

    If you find that a bug crashes in the optimizer, compile your test-case to a -.bc file by passing "-emit-llvm -O0 -c -o foo.bc". -Then run:

    - -
    -

    opt -std-compile-opts -debug-pass=Arguments foo.bc - -disable-output

    -
    - -

    This command should do two things: it should print out a list of passes, and -then it should crash in the same way as llvm-gcc. If it doesn't crash, please -follow the instructions for a front-end bug.

    - -

    If this does crash, then you should be able to debug this with the following -bugpoint command:

    - -
    -

    bugpoint foo.bc <list of passes printed by -opt>

    -
    - -

    Please run this, then file a bug with the instructions and reduced .bc files -that bugpoint emits. If something goes wrong with bugpoint, please submit the -"foo.bc" file and the list of passes printed by opt.

    - -
    - - -

    - Code generator bugs -

    - -
    - -

    If you find a bug that crashes llvm-gcc in the code generator, compile your -source file to a .bc file by passing "-emit-llvm -c -o foo.bc" -to llvm-gcc (in addition to the options you already pass). Once your have -foo.bc, one of the following commands should fail:

    - -
      -
    1. llc foo.bc
    2. -
    3. llc foo.bc -relocation-model=pic
    4. -
    5. llc foo.bc -relocation-model=static
    6. -
    - -

    If none of these crash, please follow the instructions for a -front-end bug. If one of these do crash, you should -be able to reduce this with one of the following bugpoint command lines (use -the one corresponding to the command above that failed):

    - -
      -
    1. bugpoint -run-llc foo.bc
    2. -
    3. bugpoint -run-llc foo.bc --tool-args - -relocation-model=pic
    4. -
    5. bugpoint -run-llc foo.bc --tool-args - -relocation-model=static
    6. -
    - -

    Please run this, then file a bug with the instructions and reduced .bc file -that bugpoint emits. If something goes wrong with bugpoint, please submit the -"foo.bc" file and the option that llc crashes with.

    - -
    - -
    - - -

    - Miscompilations -

    - - -
    - -

    If llvm-gcc successfully produces an executable, but that executable doesn't -run right, this is either a bug in the code or a bug in the -compiler. The first thing to check is to make sure it is not using undefined -behavior (e.g. reading a variable before it is defined). In particular, check -to see if the program valgrinds clean, -passes purify, or some other memory checker tool. Many of the "LLVM bugs" that -we have chased down ended up being bugs in the program being compiled, not - LLVM.

    - -

    Once you determine that the program itself is not buggy, you should choose -which code generator you wish to compile the program with (e.g. LLC or the JIT) -and optionally a series of LLVM passes to run. For example:

    - -
    -

    -bugpoint -run-llc [... optzn passes ...] file-to-test.bc --args -- [program arguments]

    -
    - -

    bugpoint will try to narrow down your list of passes to the one pass -that causes an error, and simplify the bitcode file as much as it can to assist -you. It will print a message letting you know how to reproduce the resulting -error.

    - -
    - - -

    - Incorrect code generation -

    - - -
    - -

    Similarly to debugging incorrect compilation by mis-behaving passes, you can -debug incorrect code generation by either LLC or the JIT, using -bugpoint. The process bugpoint follows in this case is to try -to narrow the code down to a function that is miscompiled by one or the other -method, but since for correctness, the entire program must be run, -bugpoint will compile the code it deems to not be affected with the C -Backend, and then link in the shared object it generates.

    - -

    To debug the JIT:

    - -
    -
    -bugpoint -run-jit -output=[correct output file] [bitcode file]  \
    -         --tool-args -- [arguments to pass to lli]              \
    -         --args -- [program arguments]
    -
    -
    - -

    Similarly, to debug the LLC, one would run:

    - -
    -
    -bugpoint -run-llc -output=[correct output file] [bitcode file]  \
    -         --tool-args -- [arguments to pass to llc]              \
    -         --args -- [program arguments]
    -
    -
    - -

    Special note: if you are debugging MultiSource or SPEC tests that -already exist in the llvm/test hierarchy, there is an easier way to -debug the JIT, LLC, and CBE, using the pre-written Makefile targets, which -will pass the program options specified in the Makefiles:

    - -
    -

    -cd llvm/test/../../program
    -make bugpoint-jit -

    -
    - -

    At the end of a successful bugpoint run, you will be presented -with two bitcode files: a safe file which can be compiled with the C -backend and the test file which either LLC or the JIT -mis-codegenerates, and thus causes the error.

    - -

    To reproduce the error that bugpoint found, it is sufficient to do -the following:

    - -
      - -
    1. Regenerate the shared object from the safe bitcode file:

      - -
      -

      -llc -march=c safe.bc -o safe.c
      -gcc -shared safe.c -o safe.so -

      -
    2. - -
    3. If debugging LLC, compile test bitcode native and link with the shared - object:

      - -
      -

      -llc test.bc -o test.s
      -gcc test.s safe.so -o test.llc
      -./test.llc [program options] -

      -
    4. - -
    5. If debugging the JIT, load the shared object and supply the test - bitcode:

      - -
      -

      lli -load=safe.so test.bc [program options]

      -
    6. - -
    - -
    - - -
    -
    - Valid CSS - Valid HTML 4.01 - - Chris Lattner
    - The LLVM Compiler Infrastructure -
    - Last modified: $Date: 2012-06-14 18:52:55 +0200 (Thu, 14 Jun 2012) $ -
    - - - diff --git a/docs/HowToSubmitABug.rst b/docs/HowToSubmitABug.rst new file mode 100644 index 0000000..ff2d649 --- /dev/null +++ b/docs/HowToSubmitABug.rst @@ -0,0 +1,233 @@ +.. _how-to-submit-a-bug-report: + +================================ +How to submit an LLVM bug report +================================ + +.. sectionauthor:: Chris Lattner and Misha Brukman + +Introduction - Got bugs? +======================== + + +If you're working with LLVM and run into a bug, we definitely want to know +about it. This document describes what you can do to increase the odds of +getting it fixed quickly. + +Basically you have to do two things at a minimum. First, decide whether +the bug `crashes the compiler`_ (or an LLVM pass), or if the +compiler is `miscompiling`_ the program (i.e., the +compiler successfully produces an executable, but it doesn't run right). +Based on what type of bug it is, follow the instructions in the linked +section to narrow down the bug so that the person who fixes it will be able +to find the problem more easily. + +Once you have a reduced test-case, go to `the LLVM Bug Tracking System +`_ and fill out the form with the +necessary details (note that you don't need to pick a category, just use +the "new-bugs" category if you're not sure). The bug description should +contain the following information: + +* All information necessary to reproduce the problem. +* The reduced test-case that triggers the bug. +* The location where you obtained LLVM (if not from our Subversion + repository). + +Thanks for helping us make LLVM better! + +.. _crashes the compiler: + +Crashing Bugs +============= + +More often than not, bugs in the compiler cause it to crash---often due to +an assertion failure of some sort. The most important piece of the puzzle +is to figure out if it is crashing in the GCC front-end or if it is one of +the LLVM libraries (e.g. the optimizer or code generator) that has +problems. + +To figure out which component is crashing (the front-end, optimizer or code +generator), run the ``llvm-gcc`` command line as you were when the crash +occurred, but with the following extra command line options: + +* ``-O0 -emit-llvm``: If ``llvm-gcc`` still crashes when passed these + options (which disable the optimizer and code generator), then the crash + is in the front-end. Jump ahead to the section on :ref:`front-end bugs + `. + +* ``-emit-llvm``: If ``llvm-gcc`` crashes with this option (which disables + the code generator), you found an optimizer bug. Jump ahead to + `compile-time optimization bugs`_. + +* Otherwise, you have a code generator crash. Jump ahead to `code + generator bugs`_. + +.. _front-end bug: +.. _front-end: + +Front-end bugs +-------------- + +If the problem is in the front-end, you should re-run the same ``llvm-gcc`` +command that resulted in the crash, but add the ``-save-temps`` option. +The compiler will crash again, but it will leave behind a ``foo.i`` file +(containing preprocessed C source code) and possibly ``foo.s`` for each +compiled ``foo.c`` file. Send us the ``foo.i`` file, along with the options +you passed to ``llvm-gcc``, and a brief description of the error it caused. + +The `delta `_ tool helps to reduce the +preprocessed file down to the smallest amount of code that still replicates +the problem. You're encouraged to use delta to reduce the code to make the +developers' lives easier. `This website +`_ has instructions +on the best way to use delta. + +.. _compile-time optimization bugs: + +Compile-time optimization bugs +------------------------------ + +If you find that a bug crashes in the optimizer, compile your test-case to a +``.bc`` file by passing "``-emit-llvm -O0 -c -o foo.bc``". +Then run: + +.. code-block:: bash + + opt -std-compile-opts -debug-pass=Arguments foo.bc -disable-output + +This command should do two things: it should print out a list of passes, and +then it should crash in the same way as llvm-gcc. If it doesn't crash, please +follow the instructions for a `front-end bug`_. + +If this does crash, then you should be able to debug this with the following +bugpoint command: + +.. code-block:: bash + + bugpoint foo.bc + +Please run this, then file a bug with the instructions and reduced .bc +files that bugpoint emits. If something goes wrong with bugpoint, please +submit the "foo.bc" file and the list of passes printed by ``opt``. + +.. _code generator bugs: + +Code generator bugs +------------------- + +If you find a bug that crashes llvm-gcc in the code generator, compile your +source file to a .bc file by passing "``-emit-llvm -c -o foo.bc``" to +llvm-gcc (in addition to the options you already pass). Once your have +foo.bc, one of the following commands should fail: + +#. ``llc foo.bc`` +#. ``llc foo.bc -relocation-model=pic`` +#. ``llc foo.bc -relocation-model=static`` + +If none of these crash, please follow the instructions for a `front-end +bug`_. If one of these do crash, you should be able to reduce this with +one of the following bugpoint command lines (use the one corresponding to +the command above that failed): + +#. ``bugpoint -run-llc foo.bc`` +#. ``bugpoint -run-llc foo.bc --tool-args -relocation-model=pic`` +#. ``bugpoint -run-llc foo.bc --tool-args -relocation-model=static`` + +Please run this, then file a bug with the instructions and reduced .bc file +that bugpoint emits. If something goes wrong with bugpoint, please submit +the "foo.bc" file and the option that llc crashes with. + +.. _miscompiling: + +Miscompilations +=============== + +If llvm-gcc successfully produces an executable, but that executable +doesn't run right, this is either a bug in the code or a bug in the +compiler. The first thing to check is to make sure it is not using +undefined behavior (e.g. reading a variable before it is defined). In +particular, check to see if the program `valgrind +`_'s clean, passes purify, or some other memory +checker tool. Many of the "LLVM bugs" that we have chased down ended up +being bugs in the program being compiled, not LLVM. + +Once you determine that the program itself is not buggy, you should choose +which code generator you wish to compile the program with (e.g. LLC or the JIT) +and optionally a series of LLVM passes to run. For example: + +.. code-block:: bash + + bugpoint -run-llc [... optzn passes ...] file-to-test.bc --args -- [program arguments] + +bugpoint will try to narrow down your list of passes to the one pass that +causes an error, and simplify the bitcode file as much as it can to assist +you. It will print a message letting you know how to reproduce the +resulting error. + +Incorrect code generation +========================= + +Similarly to debugging incorrect compilation by mis-behaving passes, you +can debug incorrect code generation by either LLC or the JIT, using +``bugpoint``. The process ``bugpoint`` follows in this case is to try to +narrow the code down to a function that is miscompiled by one or the other +method, but since for correctness, the entire program must be run, +``bugpoint`` will compile the code it deems to not be affected with the C +Backend, and then link in the shared object it generates. + +To debug the JIT: + +.. code-block:: bash + + bugpoint -run-jit -output=[correct output file] [bitcode file] \ + --tool-args -- [arguments to pass to lli] \ + --args -- [program arguments] + +Similarly, to debug the LLC, one would run: + +.. code-block:: bash + + bugpoint -run-llc -output=[correct output file] [bitcode file] \ + --tool-args -- [arguments to pass to llc] \ + --args -- [program arguments] + +**Special note:** if you are debugging MultiSource or SPEC tests that +already exist in the ``llvm/test`` hierarchy, there is an easier way to +debug the JIT, LLC, and CBE, using the pre-written Makefile targets, which +will pass the program options specified in the Makefiles: + +.. code-block:: bash + + cd llvm/test/../../program + make bugpoint-jit + +At the end of a successful ``bugpoint`` run, you will be presented +with two bitcode files: a *safe* file which can be compiled with the C +backend and the *test* file which either LLC or the JIT +mis-codegenerates, and thus causes the error. + +To reproduce the error that ``bugpoint`` found, it is sufficient to do +the following: + +#. Regenerate the shared object from the safe bitcode file: + + .. code-block:: bash + + llc -march=c safe.bc -o safe.c + gcc -shared safe.c -o safe.so + +#. If debugging LLC, compile test bitcode native and link with the shared + object: + + .. code-block:: bash + + llc test.bc -o test.s + gcc test.s safe.so -o test.llc + ./test.llc [program options] + +#. If debugging the JIT, load the shared object and supply the test + bitcode: + + .. code-block:: bash + + lli -load=safe.so test.bc [program options] diff --git a/docs/HowToUseInstrMappings.rst b/docs/HowToUseInstrMappings.rst new file mode 100755 index 0000000..b51e74e --- /dev/null +++ b/docs/HowToUseInstrMappings.rst @@ -0,0 +1,179 @@ +.. _how_to_use_instruction_mappings: + +=============================== +How To Use Instruction Mappings +=============================== + +.. sectionauthor:: Jyotsna Verma + +.. contents:: + :local: + +Introduction +============ + +This document contains information about adding instruction mapping support +for a target. The motivation behind this feature comes from the need to switch +between different instruction formats during various optimizations. One approach +could be to use switch cases which list all the instructions along with formats +they can transition to. However, it has large maintenance overhead +because of the hardcoded instruction names. Also, whenever a new instruction is +added in the .td files, all the relevant switch cases should be modified +accordingly. Instead, the same functionality could be achieved with TableGen and +some support from the .td files for a fraction of maintenance cost. + +``InstrMapping`` Class Overview +=============================== + +TableGen uses relationship models to map instructions with each other. These +models are described using ``InstrMapping`` class as a base. Each model sets +various fields of the ``InstrMapping`` class such that they can uniquely +describe all the instructions using that model. TableGen parses all the relation +models and uses the information to construct relation tables which relate +instructions with each other. These tables are emitted in the +``XXXInstrInfo.inc`` file along with the functions to query them. Following +is the definition of ``InstrMapping`` class definied in Target.td file: + +.. code-block:: llvm + + class InstrMapping { + // Used to reduce search space only to the instructions using this + // relation model. + string FilterClass; + + // List of fields/attributes that should be same for all the instructions in + // a row of the relation table. Think of this as a set of properties shared + // by all the instructions related by this relationship. + list RowFields = []; + + // List of fields/attributes that are same for all the instructions + // in a column of the relation table. + list ColFields = []; + + // Values for the fields/attributes listed in 'ColFields' corresponding to + // the key instruction. This is the instruction that will be transformed + // using this relation model. + list KeyCol = []; + + // List of values for the fields/attributes listed in 'ColFields', one for + // each column in the relation table. These are the instructions a key + // instruction will be transformed into. + list > ValueCols = []; + } + +Sample Example +-------------- + +Let's say that we want to have a function +``int getPredOpcode(uint16_t Opcode, enum PredSense inPredSense)`` which +takes a non-predicated instruction and returns its predicated true or false form +depending on some input flag, ``inPredSense``. The first step in the process is +to define a relationship model that relates predicated instructions to their +non-predicated form by assigning appropriate values to the ``InstrMapping`` +fields. For this relationship, non-predicated instructions are treated as key +instruction since they are the one used to query the interface function. + +.. code-block:: llvm + + def getPredOpcode : InstrMapping { + // Choose a FilterClass that is used as a base class for all the + // instructions modeling this relationship. This is done to reduce the + // search space only to these set of instructions. + let FilterClass = "PredRel"; + + // Instructions with same values for all the fields in RowFields form a + // row in the resulting relation table. + // For example, if we want to relate 'ADD' (non-predicated) with 'Add_pt' + // (predicated true) and 'Add_pf' (predicated false), then all 3 + // instructions need to have same value for BaseOpcode field. It can be any + // unique value (Ex: XYZ) and should not be shared with any other + // instruction not related to 'add'. + let RowFields = ["BaseOpcode"]; + + // List of attributes that can be used to define key and column instructions + // for a relation. Key instruction is passed as an argument + // to the function used for querying relation tables. Column instructions + // are the instructions they (key) can transform into. + // + // Here, we choose 'PredSense' as ColFields since this is the unique + // attribute of the key (non-predicated) and column (true/false) + // instructions involved in this relationship model. + let ColFields = ["PredSense"]; + + // The key column contains non-predicated instructions. + let KeyCol = ["none"]; + + // Two value columns - first column contains instructions with + // PredSense=true while second column has instructions with PredSense=false. + let ValueCols = [["true"], ["false"]]; + } + +TableGen uses the above relationship model to emit relation table that maps +non-predicated instructions with their predicated forms. It also outputs the +interface function +``int getPredOpcode(uint16_t Opcode, enum PredSense inPredSense)`` to query +the table. Here, Function ``getPredOpcode`` takes two arguments, opcode of the +current instruction and PredSense of the desired instruction, and returns +predicated form of the instruction, if found in the relation table. +In order for an instruction to be added into the relation table, it needs +to include relevant information in its definition. For example, consider +following to be the current definitions of ADD, ADD_pt (true) and ADD_pf (false) +instructions: + +.. code-block::llvm + + def ADD : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$a, IntRegs:$b), + "$dst = add($a, $b)", + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$a), + (i32 IntRegs:$b)))]>; + + def ADD_Pt : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$p, IntRegs:$a, IntRegs:$b), + "if ($p) $dst = add($a, $b)", + []>; + + def ADD_Pf : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$p, IntRegs:$a, IntRegs:$b), + "if (!$p) $dst = add($a, $b)", + []>; + +In this step, we modify these instructions to include the information +required by the relationship model, getPredOpcode, so that they can +be related. + +.. code-block::llvm + + def ADD : PredRel, ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$a, IntRegs:$b), + "$dst = add($a, $b)", + [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$a), + (i32 IntRegs:$b)))]> { + let BaseOpcode = "ADD"; + let PredSense = "none"; + } + + def ADD_Pt : PredRel, ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$p, IntRegs:$a, IntRegs:$b), + "if ($p) $dst = add($a, $b)", + []> { + let BaseOpcode = "ADD"; + let PredSense = "true"; + } + + def ADD_Pf : PredRel, ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$p, IntRegs:$a, IntRegs:$b), + "if (!$p) $dst = add($a, $b)", + []> { + let BaseOpcode = "ADD"; + let PredSense = "false"; + } + +Please note that all the above instructions use ``PredRel`` as a base class. +This is extremely important since TableGen uses it as a filter for selecting +instructions for ``getPredOpcode`` model. Any instruction not derived from +``PredRel`` is excluded from the analysis. ``BaseOpcode`` is another important +field. Since it's selected as a ``RowFields`` of the model, it is required +to have the same value for all 3 instructions in order to be related. Next, +``PredSense`` is used to determine their column positions by comparing its value +with ``KeyCol`` and ``ValueCols``. If an instruction sets its ``PredSense`` +value to something not used in the relation model, it will not be assigned +a column in the relation table. diff --git a/docs/LangRef.html b/docs/LangRef.html index 946380e..13daa65 100644 --- a/docs/LangRef.html +++ b/docs/LangRef.html @@ -25,7 +25,6 @@
  • 'private' Linkage
  • 'linker_private' Linkage
  • 'linker_private_weak' Linkage
  • -
  • 'linker_private_weak_def_auto' Linkage
  • 'internal' Linkage
  • 'available_externally' Linkage
  • 'linkonce' Linkage
  • @@ -34,6 +33,7 @@
  • 'appending' Linkage
  • 'extern_weak' Linkage
  • 'linkonce_odr' Linkage
  • +
  • 'linkonce_odr_auto_hide' Linkage
  • 'weak_odr' Linkage
  • 'external' Linkage
  • 'dllimport' Linkage
  • @@ -103,6 +103,7 @@
  • Metadata Nodes and Metadata Strings
    1. 'tbaa' Metadata
    2. +
    3. 'tbaa.struct' Metadata
    4. 'fpmath' Metadata
    5. 'range' Metadata
    @@ -576,15 +577,6 @@ define i32 @main() { ; i32()*   linker. The symbols are removed by the linker from the final linked image (executable or dynamic library). -
    linker_private_weak_def_auto
    -
    Similar to "linker_private_weak", but it's known that the address - of the object is not taken. For instance, functions that had an inline - definition, but the compiler decided not to inline it. Note, - unlike linker_private and linker_private_weak, - linker_private_weak_def_auto may have only default - visibility. The symbols are removed by the linker from the final linked - image (executable or dynamic library).
    -
    internal
    Similar to private, but the value shows as a local symbol (STB_LOCAL in the case of ELF) in the object file. This @@ -653,6 +645,14 @@ define i32 @main() { ; i32()*   be merged with equivalent globals. These linkage types are otherwise the same as their non-odr versions.
    +
    linkonce_odr_auto_hide
    +
    Similar to "linkonce_odr", but nothing in the translation unit + takes the address of this definition. For instance, functions that had an + inline definition, but the compiler decided not to inline it. + linkonce_odr_auto_hide may have only default visibility. + The symbols are removed by the linker from the final linked image + (executable or dynamic library).
    +
    external
    If none of the above identifiers are used, the global is externally visible, meaning that it participates in linkage and can be used to @@ -1107,9 +1107,9 @@ declare signext i8 @returns_signed_char()
    This indicates that the pointer parameter specifies the address of a structure that is the return value of the function in the source program. This pointer must be guaranteed by the caller to be valid: loads and - stores to the structure may be assumed by the callee to not to trap. This - may only be applied to the first parameter. This is not a valid attribute - for return values.
    + stores to the structure may be assumed by the callee to not to trap and + to be properly aligned. This may only be applied to the first parameter. + This is not a valid attribute for return values.
    noalias
    This indicates that pointer values @@ -1208,13 +1208,6 @@ define void @f() optsize { ... } may make calls to the function faster, at the cost of extra program startup time if the function is not called during program startup.
    -
    ia_nsdialect
    -
    This attribute indicates the associated inline assembly call is using a - non-standard assembly dialect. The standard dialect is ATT, which is - assumed when this attribute is not present. When present, the dialect - is assumed to be Intel. Currently, ATT and Intel are the only supported - dialects.
    -
    inlinehint
    This attribute indicates that the source code contained a hint that inlining this function is desirable (such as the "inline" keyword in C/C++). It @@ -1371,11 +1364,13 @@ target datalayout = "layout specification" 8-bits. If omitted, the natural stack alignment defaults to "unspecified", which does not prevent any alignment promotions.
    -
    p:size:abi:pref
    +
    p[n]:size:abi:pref
    This specifies the size of a pointer and its abi and - preferred alignments. All sizes are in bits. Specifying - the pref alignment is optional. If omitted, the - preceding : should be omitted too.
    + preferred alignments for address space n. All sizes are in + bits. Specifying the pref alignment is optional. If omitted, the + preceding : should be omitted too. The address space, + n is optional, and if not specified, denotes the default address + space 0. The value of n must be in the range [1,2^23).
    isize:abi:pref
    This specifies the alignment for an integer type of a given bit @@ -1416,6 +1411,10 @@ target datalayout = "layout specification"
    • E - big endian
    • p:64:64:64 - 64-bit pointers with 64-bit alignment
    • +
    • p1:32:32:32 - 32-bit pointers with 32-bit alignment for + address space 1
    • +
    • p2:16:32:32 - 16-bit pointers with 32-bit alignment for + address space 2
    • i1:8:8 - i1 is 8-bit (byte) aligned
    • i8:8:8 - i8 is 8-bit (byte) aligned
    • i16:16:16 - i16 is 16-bit aligned
    • @@ -2111,7 +2110,7 @@ in signal handlers).

      Structures may optionally be "packed" structures, which indicate that the alignment of the struct is one byte, and that there is no padding between the elements. In non-packed structs, padding between field types is inserted - as defined by the TargetData string in the module, which is required to match + as defined by the DataLayout string in the module, which is required to match what the underlying code generator expects.

      Structures can either be "literal" or "identified". A literal structure is @@ -2902,8 +2901,18 @@ call void asm sideeffect "eieio", ""() call void asm alignstack "eieio", ""() -

      If both keywords appear the 'sideeffect' keyword must come - first.

      +

      Inline asms also support using non-standard assembly dialects. The assumed + dialect is ATT. When the 'inteldialect' keyword is present, the + inline asm is using the Intel dialect. Currently, ATT and Intel are the + only supported dialects. An example is:

      + +
      +call void asm inteldialect "eieio", ""()
      +
      + +

      If multiple keywords appear the 'sideeffect' keyword must come + first, the 'alignstack' keyword second and the + 'inteldialect' keyword last.

      + 'tbaa.struct' Metadata +

      + +
      + +

      The llvm.memcpy is often used to implement +aggregate assignment operations in C and similar languages, however it is +defined to copy a contiguous region of memory, which is more than strictly +necessary for aggregate types which contain holes due to padding. Also, it +doesn't contain any TBAA information about the fields of the aggregate.

      + +

      !tbaa.struct metadata can describe which memory subregions in a memcpy +are padding and what the TBAA tags of the struct are.

      + +

      The current metadata format is very simple. !tbaa.struct metadata nodes + are a list of operands which are in conceptual groups of three. For each + group of three, the first operand gives the byte offset of a field in bytes, + the second gives its size in bytes, and the third gives its + tbaa tag. e.g.:

      + +
      +
      +!4 = metadata !{ i64 0, i64 4, metadata !1, i64 8, i64 4, metadata !2 }
      +
      +
      + +

      This describes a struct with two fields. The first is at offset 0 bytes + with size 4 bytes, and has tbaa tag !1. The second is at offset 8 bytes + and has size 4 bytes and has tbaa tag !2.

      + +

      Note that the fields need not be contiguous. In this example, there is a + 4 byte gap between the two fields. This gap represents padding which + does not carry useful data and need not be preserved.

      + +
      + + +

      'fpmath' Metadata

      @@ -5013,7 +5060,7 @@ IfUnequal:

      The optional constant align argument specifies the alignment of the operation (that is, the alignment of the memory address). A value of 0 or an - omitted align argument means that the operation has the preferential + omitted align argument means that the operation has the abi alignment for the target. It is the responsibility of the code emitter to ensure that the alignment information is correct. Overestimating the alignment results in undefined behavior. Underestimating the alignment may @@ -5094,7 +5141,7 @@ IfUnequal:

      The optional constant "align" argument specifies the alignment of the operation (that is, the alignment of the memory address). A value of 0 or an - omitted "align" argument means that the operation has the preferential + omitted "align" argument means that the operation has the abi alignment for the target. It is the responsibility of the code emitter to ensure that the alignment information is correct. Overestimating the alignment results in an undefined behavior. Underestimating the alignment may @@ -8722,7 +8769,7 @@ codegen.

      Chris Lattner
      The LLVM Compiler Infrastructure
      - Last modified: $Date: 2012-08-10 02:00:22 +0200 (Fri, 10 Aug 2012) $ + Last modified: $Date: 2012-10-29 15:12:44 +0100 (Mon, 29 Oct 2012) $ diff --git a/docs/Lexicon.rst b/docs/Lexicon.rst index 6ebe614..d568c0b 100644 --- a/docs/Lexicon.rst +++ b/docs/Lexicon.rst @@ -20,8 +20,10 @@ A B - -**BURS** +**BB Vectorization** + Basic Block Vectorization +**BURS** Bottom Up Rewriting System --- A method of instruction selection for code generation. An example is the `BURG `_ tool. @@ -156,7 +158,7 @@ R In garbage collection, a pointer variable lying outside of the `heap`_ from which the collector begins its reachability analysis. In the context of code generation, "root" almost always refers to a "stack root" --- a local or - temporary variable within an executing function.
    + temporary variable within an executing function. **RPO** Reverse postorder @@ -192,3 +194,10 @@ S **Stack Map** In garbage collection, metadata emitted by the code generator which identifies `roots`_ within the stack frame of an executing function. + +T +- + +**TBAA** + Type-Based Alias Analysis + diff --git a/docs/LinkTimeOptimization.rst b/docs/LinkTimeOptimization.rst index 53d673e..7eacf0b 100644 --- a/docs/LinkTimeOptimization.rst +++ b/docs/LinkTimeOptimization.rst @@ -29,6 +29,8 @@ bitcode files. This tight integration between the linker and LLVM optimizer helps to do optimizations that are not possible in other models. The linker input allows the optimizer to avoid relying on conservative escape analysis. +.. _libLTO-example: + Example of link time optimization --------------------------------- diff --git a/docs/Makefile.sphinx b/docs/Makefile.sphinx index 21f6648..81c13de 100644 --- a/docs/Makefile.sphinx +++ b/docs/Makefile.sphinx @@ -46,6 +46,10 @@ clean: html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo + @# FIXME: Remove this `cp` once HTML->Sphinx transition is completed. + @# Kind of a hack, but HTML-formatted docs are on the way out anyway. + @echo "Copying legacy HTML-formatted docs into $(BUILDDIR)/html" + @cp -a *.html tutorial $(BUILDDIR)/html @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: diff --git a/docs/MarkedUpDisassembly.rst b/docs/MarkedUpDisassembly.rst new file mode 100644 index 0000000..e1282e1 --- /dev/null +++ b/docs/MarkedUpDisassembly.rst @@ -0,0 +1,88 @@ +.. _marked_up_disassembly: + +======================================= +LLVM's Optional Rich Disassembly Output +======================================= + +.. contents:: + :local: + +Introduction +============ + +LLVM's default disassembly output is raw text. To allow consumers more ability +to introspect the instructions' textual representation or to reformat for a more +user friendly display there is an optional rich disassembly output. + +This optional output is sufficient to reference into individual portions of the +instruction text. This is intended for clients like disassemblers, list file +generators, and pretty-printers, which need more than the raw instructions and +the ability to print them. + +To provide this functionality the assembly text is marked up with annotations. +The markup is simple enough in syntax to be robust even in the case of version +mismatches between consumers and producers. That is, the syntax generally does +not carry semantics beyond "this text has an annotation," so consumers can +simply ignore annotations they do not understand or do not care about. + +After calling ``LLVMCreateDisasm()`` to create a disassembler context the +optional output is enable with this call: + +.. code-block:: c + + LLVMSetDisasmOptions(DC, LLVMDisassembler_Option_UseMarkup); + +Then subsequent calls to ``LLVMDisasmInstruction()`` will return output strings +with the marked up annotations. + +Instruction Annotations +======================= + +.. _contextual markups: + +Contextual markups +------------------ + +Annoated assembly display will supply contextual markup to help clients more +efficiently implement things like pretty printers. Most markup will be target +independent, so clients can effectively provide good display without any target +specific knowledge. + +Annotated assembly goes through the normal instruction printer, but optionally +includes contextual tags on portions of the instruction string. An annotation +is any '<' '>' delimited section of text(1). + +.. code-block:: bat + + annotation: '<' tag-name tag-modifier-list ':' annotated-text '>' + tag-name: identifier + tag-modifier-list: comma delimited identifier list + +The tag-name is an identifier which gives the type of the annotation. For the +first pass, this will be very simple, with memory references, registers, and +immediates having the tag names "mem", "reg", and "imm", respectively. + +The tag-modifier-list is typically additional target-specific context, such as +register class. + +Clients should accept and ignore any tag-names or tag-modifiers they do not +understand, allowing the annotations to grow in richness without breaking older +clients. + +For example, a possible annotation of an ARM load of a stack-relative location +might be annotated as: + +.. code-block:: nasm + + ldr , , ]> + + +1: For assembly dialects in which '<' and/or '>' are legal tokens, a literal token is escaped by following immediately with a repeat of the character. For example, a literal '<' character is output as '<<' in an annotated assembly string. + +C API Details +------------- + +The intended consumers of this information use the C API, therefore the new C +API function for the disassembler will be added to provide an option to produce +disassembled instructions with annotations, ``LLVMSetDisasmOptions()`` and the +``LLVMDisassembler_Option_UseMarkup`` option (see above). diff --git a/docs/Passes.html b/docs/Passes.html index e8048d5..16e8bd6 100644 --- a/docs/Passes.html +++ b/docs/Passes.html @@ -77,6 +77,7 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print "

    \n" if ! -basicaaBasic Alias Analysis (stateless AA impl) -basiccgBasic CallGraph Construction -count-aaCount Alias Analysis Query Responses +-daDependence Analysis -debug-aaAA use debugger -domfrontierDominance Frontier Construction -domtreeDominator Tree Construction @@ -92,7 +93,6 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print "

    \n" if ! -intervalsInterval Partition Construction -iv-usersInduction Variable Users -lazy-value-infoLazy Value Information Analysis --ldaLoop Dependence Analysis -libcall-aaLibCall Alias Analysis -lintStatically lint-checks LLVM IR -loopsNatural Loop Information @@ -182,7 +182,6 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print "

    \n" if ! -strip-debug-declareStrip all llvm.dbg.declare intrinsics -strip-nondebugStrip all symbols, except dbg symbols, from a module -tailcallelimTail Call Elimination --tailduplicateTail Duplication UTILITY PASSES @@ -251,6 +250,15 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print "

    \n" if !

    + -da: Dependence Analysis +

    +
    +

    Dependence analysis framework, which is used to detect dependences in + memory accesses.

    +
    + + +

    -debug-aa: AA use debugger

    @@ -433,15 +441,6 @@ perl -e '$/ = undef; for (split(/\n/, <>)) { s:^ *///? ?::; print "

    \n" if !

    - -lda: Loop Dependence Analysis -

    -
    -

    Loop dependence analysis framework, which is used to detect dependences in - memory accesses in loops.

    -
    - - -

    -libcall-aa: LibCall Alias Analysis

    @@ -1862,22 +1861,6 @@ if (X < 3) {
    - -

    - -tailduplicate: Tail Duplication -

    -
    -

    - This pass performs a limited form of tail duplication, intended to simplify - CFGs by removing some unconditional branches. This pass is necessary to - straighten out loops created by the C front-end, but also is capable of - making other code nicer. After this pass is run, the CFG simplify pass - should be run to clean up the mess. -

    -
    - -
    -

    Utility Passes

    @@ -2059,7 +2042,7 @@ if (X < 3) { Reid Spencer
    LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-07-26 00:01:31 +0200 (Thu, 26 Jul 2012) $ + Last modified: $Date: 2012-10-31 18:25:31 +0100 (Wed, 31 Oct 2012) $ diff --git a/docs/Phabricator.rst b/docs/Phabricator.rst new file mode 100644 index 0000000..b454497 --- /dev/null +++ b/docs/Phabricator.rst @@ -0,0 +1,100 @@ +============================= +Code Reviews with Phabricator +============================= + +.. contents:: + :local: + +If you prefer to use a web user interface for code reviews, +you can now submit your patches for Clang and LLVM at +`LLVM's Phabricator`_. + +Sign up +------- + +There are two options to get an account on Phabricator. You can sign up +immediately with one of the supported OAuth account types if you're comfortable +with OAuth, but you can also email chandlerc@gmail.com to request an account to +be created manually without using OAuth. We're working to get support in +Phabricator to directly create new accounts, but currently this is a manual +process. + +Note that if you use your Subversion user name as Phabricator user name, +Phabricator will automatically connect your submits to your Phabricator user in +the `Code Repository Browser`_. + + +Requesting a review via the command line +---------------------------------------- + +Phabricator has a tool called *Arcanist* to upload patches from +the command line. To get you set up, follow the +`Arcanist Quick Start`_ instructions. + +You can learn more about how to use arc to interact with +Phabricator in the `Arcanist User Guide`_. + +Requesting a review via the web interface +----------------------------------------- + +The tool to create and review patches in Phabricator is called +*Differential*. + +Note that you can upload patches created through various diff tools, +including git and svn. To make reviews easier, please always include +**as much context as possible** with your diff! Don't worry, Phabricator +will automatically send a diff with a smaller context in the review +email, but having the full file in the web interface will help the +reviewer understand your code. + +To get a full diff, use one of the following commands (or just use Arcanist +to upload your patch): + +* ``git diff -U999999 other-branch`` +* ``svn diff --diff-cmd=diff -x -U999999`` + +To upload a new patch: + +* Click *Differential*. +* Click *Create Revision*. +* Paste the text diff or upload the patch file. + Note that TODO +* Leave the drop down on *Create a new Revision...* and click *Continue*. +* Enter a descriptive title and summary; add reviewers and mailing + lists that you want to be included in the review. If your patch is + for LLVM, cc llvm-commits; if your patch is for Clang, cc cfe-commits. +* Click *Save*. + +To submit an updated patch: + +* Click *Differential*. +* Click *Create Revision*. +* Paste the updated diff. +* Select the review you want to from the *Attach To* dropdown and click + *Continue*. +* Click *Save*. + +Reviewing code with Phabricator +------------------------------- + +Phabricator allows you to add inline comments as well as overall comments +to a revision. To add an inline comment, select the lines of code you want +to comment on by clicking and dragging the line numbers in the diff pane. + +You can add overall comments or submit your comments at the bottom of the page. + +Phabricator has many useful features, for example allowing you to select +diffs between different versions of the patch as it was reviewed in the +*Revision Update History*. Most features are self descriptive - explore, and +if you have a question, drop by on #llvm in IRC to get help. + +Status +------ + +Currently, we're testing Phabricator for use with Clang/LLVM. Please let us +know whether you like it and what could be improved! + +.. _LLVM's Phabricator: http://llvm-reviews.chandlerc.com +.. _Code Repository Browser: http://llvm-reviews.chandlerc.com/diffusion/ +.. _Arcanist Quick Start: http://www.phabricator.com/docs/phabricator/article/Arcanist_Quick_Start.html +.. _Arcanist User Guide: http://www.phabricator.com/docs/phabricator/article/Arcanist_User_Guide.html diff --git a/docs/ProgrammersManual.html b/docs/ProgrammersManual.html index 5bf499b..7c2e6c8 100644 --- a/docs/ProgrammersManual.html +++ b/docs/ProgrammersManual.html @@ -98,6 +98,7 @@ option
  • "llvm/ADT/ValueMap.h"
  • "llvm/ADT/IntervalMap.h"
  • <map>
  • +
  • "llvm/ADT/MapVector.h"
  • "llvm/ADT/IntEqClasses.h"
  • "llvm/ADT/ImmutableMap.h"
  • Other Map-Like Container Options
  • @@ -432,10 +433,10 @@ if (AllocationInst *AI = dyn_cast<How to set up LLVM-style +RTTI for your class hierarchy . +

    @@ -1848,6 +1849,24 @@ another element takes place).

    + + +

    + "llvm/ADT/MapVector.h" +

    +
    + +

    MapVector<KeyT,ValueT> provides a subset of the DenseMap interface. + The main difference is that the iteration order is guaranteed to be + the insertion order, making it an easy (but somewhat expensive) solution + for non-deterministic iteration over maps of pointers.

    + +

    It is implemented by mapping from key to an index in a vector of key,value + pairs. This provides fast lookup and iteration, but has two main drawbacks: + The key is stored twice and it doesn't support removing elements.

    + +
    +

    "llvm/ADT/IntEqClasses.h" @@ -4130,7 +4149,7 @@ arguments. An argument has a pointer to the parent Function.

    Dinakar Dhurjati and Chris Lattner
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-07-25 15:46:11 +0200 (Wed, 25 Jul 2012) $ + Last modified: $Date: 2012-10-07 02:56:09 +0200 (Sun, 07 Oct 2012) $ diff --git a/docs/README.txt b/docs/README.txt index 2fbbf98..5ddd599 100644 --- a/docs/README.txt +++ b/docs/README.txt @@ -6,7 +6,7 @@ The LLVM documentation is currently written in two formats: * Plain HTML documentation. * reStructured Text documentation using the Sphinx documentation generator. It - is currently tested with Sphinx 1.1.3. + is currently tested with Sphinx 1.1.3. For more information, see the "Sphinx Introduction for LLVM Developers" document. diff --git a/docs/ReleaseNotes.html b/docs/ReleaseNotes.html index 85448a5..a4b1d58 100644 --- a/docs/ReleaseNotes.html +++ b/docs/ReleaseNotes.html @@ -466,7 +466,45 @@ Release Notes.

    In addition to many minor performance tweaks and bug fixes, this release includes a few major enhancements and additions to the optimizers:

    +

    Loop Vectorizer - We've added a loop vectorizer and we are now able to + vectorize small loops. The loop vectorizer is disabled by default and + can be enabled using the -mllvm -vectorize-loops flag. + The SIMD vector width can be specified using the flag + -mllvm -force-vector-width=4. + The default value is 0 which means auto-select. +
    + We can now vectorize this function: + +

    +    unsigned sum_arrays(int *A, int *B, int start, int end) {
    +      unsigned sum = 0;
    +      for (int i = start; i < end; ++i)
    +        sum += A[i] + B[i] + i;
    +
    +      return sum;
    +    }
    +    
    + + We vectorize under the following loops: +
      +
    • The inner most loops must have a single basic block.
    • +
    • The number of iterations are known before the loop starts to execute.
    • +
    • The loop counter needs to be incrimented by one.
    • +
    • The loop trip count can be a variable.
    • +
    • Loops do not need to start at zero.
    • +
    • The induction variable can be used inside the loop.
    • +
    • Loop reductions are supported.
    • +
    • Arrays with affine access pattern do not need to be marked as 'noalias' and are checked at runtime.
    • +
    • ...
    • +
    + +

    + +

    SROA - We've re-written SROA to be significantly more powerful. +

    +
      +
    • Branch weight metadata is preseved through more of the optimizer.
    • ...
    @@ -499,13 +537,14 @@ Release Notes.
    -

    We have changed the way that the Type Legalizer legalizes vectors. The type - legalizer now attempts to promote integer elements. This enabled the - implementation of vector-select. Additionally, we see a performance boost on - workloads which use vectors of chars and shorts, since they are now promoted - to 32-bit types, which are better supported by the SIMD instruction set. - Floating point types are still widened as before.

    +

    Stack Coloring - We have implemented a new optimization pass + to merge stack objects which are used in disjoin areas of the code. + This optimization reduces the required stack space significantly, in cases + where it is clear to the optimizer that the stack slot is not shared. + We use the lifetime markers to tell the codegen that a certain alloca + is used within a region.

    +

    We now merge consecutive loads and stores.

    We have put a significant amount of work into the code generator infrastructure, which allows us to implement more aggressive algorithms and @@ -608,6 +647,46 @@ Release Notes.

    +PowerPC Target Improvements +

    + +
    + +
      +

      Many fixes and changes across LLVM (and Clang) for better compliance with + the 64-bit PowerPC ELF Application Binary Interface, interoperability with + GCC, and overall 64-bit PowerPC support. Some highlights include:

      +
        +
      • MCJIT support added.
      • +
      • PPC64 relocation support and (small code model) TOC handling + added.
      • +
      • Parameter passing and return value fixes (alignment issues, + padding, varargs support, proper register usage, odd-sized + structure support, float support, extension of return values + for i32 return values).
      • +
      • Fixes in spill and reload code for vector registers.
      • +
      • C++ exception handling enabled.
      • +
      • Changes to remediate double-rounding compatibility issues with + respect to GCC behavior.
      • +
      • Refactoring to disentangle ppc64-elf-linux ABI from Darwin + ppc64 ABI support.
      • +
      • Assorted new test cases and test case fixes (endian and word + size issues).
      • +
      • Fixes for big-endian codegen bugs, instruction encodings, and + instruction constraints.
      • +
      • Implemented -integrated-as support.
      • +
      • Additional support for Altivec compare operations.
      • +
      • IBM long double support.
      • +
      +

      There have also been code generation improvements for both 32- and 64-bit + code. Instruction scheduling support for the Freescale e500mc and e5500 + cores has been added.

      +
    + +
    + + +

    Other Target Specific Improvements

    @@ -646,6 +725,14 @@ Release Notes.

    In addition, many APIs have changed in this release. Some of the major LLVM API changes are:

    +

    We've added a new interface for allowing IR-level passes to access + target-specific information. A new IR-level pass, called + "TargetTransformInfo" provides a number of low-level interfaces. + LSR and LowerInvoke already use the new interface.

    + +

    The TargetData structure has been renamed to DataLayout and moved to VMCore +to remove a dependency on Target.

    +
    • ...
    @@ -749,7 +836,7 @@ Release Notes. src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"> LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-07-13 14:44:23 +0200 (Fri, 13 Jul 2012) $ + Last modified: $Date: 2012-11-20 05:22:44 +0100 (Tue, 20 Nov 2012) $ diff --git a/docs/SourceLevelDebugging.html b/docs/SourceLevelDebugging.html index bb72bf3..1dcee54 100644 --- a/docs/SourceLevelDebugging.html +++ b/docs/SourceLevelDebugging.html @@ -2367,11 +2367,11 @@ bucket contents: | HEADER.header_data_len | uint32_t | HEADER_DATA | HeaderData |-------------------------| -| BUCKETS | uint32_t[n_buckets] // 32 bit hash indexes +| BUCKETS | uint32_t[bucket_count] // 32 bit hash indexes |-------------------------| -| HASHES | uint32_t[n_buckets] // 32 bit hash values +| HASHES | uint32_t[hashes_count] // 32 bit hash values |-------------------------| -| OFFSETS | uint32_t[n_buckets] // 32 bit offsets to hash value data +| OFFSETS | uint32_t[hashes_count] // 32 bit offsets to hash value data |-------------------------| | ALL HASH DATA | `-------------------------' @@ -2851,7 +2851,7 @@ int main () Chris Lattner
    LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-06-02 12:20:22 +0200 (Sat, 02 Jun 2012) $ + Last modified: $Date: 2012-10-09 01:54:10 +0200 (Tue, 09 Oct 2012) $ diff --git a/docs/SphinxQuickstartTemplate.rst b/docs/SphinxQuickstartTemplate.rst new file mode 100644 index 0000000..75d9163 --- /dev/null +++ b/docs/SphinxQuickstartTemplate.rst @@ -0,0 +1,125 @@ +========================== +Sphinx Quickstart Template +========================== + +.. sectionauthor:: Sean Silva + +Introduction and Quickstart +=========================== + +This document is meant to get you writing documentation as fast as possible +even if you have no previous experience with Sphinx. The goal is to take +someone in the state of "I want to write documentation and get it added to +LLVM's docs" and turn that into useful documentation mailed to llvm-commits +with as little nonsense as possible. + +You can find this document in ``docs/SphinxQuickstartTemplate.rst``. You +should copy it, open the new file in your text editor, write your docs, and +then send the new document to llvm-commits for review. + +Focus on *content*. It is easy to fix the Sphinx (reStructuredText) syntax +later if necessary, although reStructuredText tries to imitate common +plain-text conventions so it should be quite natural. A basic knowledge of +reStructuredText syntax is useful when writing the document, so the last +~half of this document (starting with `Example Section`_) gives examples +which should cover 99% of use cases. + +Let me say that again: focus on *content*. + +Once you have finished with the content, please send the ``.rst`` file to +llvm-commits for review. + +Guidelines +========== + +Try to answer the following questions in your first section: + +#. Why would I want to read this document? + +#. What should I know to be able to follow along with this document? + +#. What will I have learned by the end of this document? + +Common names for the first section are ``Introduction``, ``Overview``, or +``Background``. + +If possible, make your document a "how to". Give it a name ``HowTo*.rst`` +like the other "how to" documents. This format is usually the easiest +for another person to understand and also the most useful. + +You generally should not be writing documentation other than a "how to" +unless there is already a "how to" about your topic. The reason for this +is that without a "how to" document to read first, it is difficult for a +person to understand a more advanced document. + +Focus on content (yes, I had to say it again). + +The rest of this document shows example reStructuredText markup constructs +that are meant to be read by you in your text editor after you have copied +this file into a new file for the documentation you are about to write. + +Example Section +=============== + +Your text can be *emphasized*, **bold**, or ``monospace``. + +Use blank lines to separate paragraphs. + +Headings (like ``Example Section`` just above) give your document +structure. Use the same kind of adornments (e.g. ``======`` vs. ``------``) +as are used in this document. The adornment must be the same length as the +text above it. For Vim users, variations of ``yypVr=`` might be handy. + +Example Subsection +------------------ + +Make a link `like this `_. There is also a more +sophisticated syntax which `can be more readable`_ for longer links since +it disrupts the flow less. You can put the ``.. _`link text`: `` block +pretty much anywhere later in the document. + +.. _`can be more readable`: http://en.wikipedia.org/wiki/LLVM + +Lists can be made like this: + +#. A list starting with ``#.`` will be automatically numbered. + +#. This is a second list element. + + #. They nest too. + +You can also use unordered lists. + +* Stuff. + + + Deeper stuff. + +* More stuff. + +Example Subsubsection +^^^^^^^^^^^^^^^^^^^^^ + +You can make blocks of code like this: + +.. code-block:: c++ + + int main() { + return 0 + } + +For a shell session, use a ``bash`` code block: + +.. code-block:: bash + + $ echo "Goodbye cruel world!" + $ rm -rf / + +If you need to show LLVM IR use the ``llvm`` code block. + +Hopefully you won't need to be this deep +"""""""""""""""""""""""""""""""""""""""" + +If you need to do fancier things than what has been shown in this document, +you can mail the list or check Sphinx's `reStructuredText Primer`_. + +.. _`reStructuredText Primer`: http://sphinx.pocoo.org/rest.html diff --git a/docs/TestingGuide.html b/docs/TestingGuide.html index 804e929..c313083 100644 --- a/docs/TestingGuide.html +++ b/docs/TestingGuide.html @@ -218,11 +218,11 @@ you can run the LLVM and Clang tests simultaneously using:

    To run individual tests or subsets of tests, you can use the 'llvm-lit' script which is built as part of LLVM. For example, to run the -'Integer/BitCast.ll' test by itself you can run:

    +'Integer/BitPacked.ll' test by itself you can run:

    -% llvm-lit ~/llvm/test/Integer/BitCast.ll 
    +% llvm-lit ~/llvm/test/Integer/BitPacked.ll 
     
    @@ -798,14 +798,15 @@ define two separate CHECK lines that match on the same line.

    Sometimes it is necessary to mark a test case as "expected fail" or XFAIL. You can easily mark a test as XFAIL just by including XFAIL: on a line near the top of the file. This signals that the test case should succeed - if the test fails. Such test cases are counted separately by the testing tool. To - specify an expected fail, use the XFAIL keyword in the comments of the test - program followed by a colon and one or more regular expressions (separated by - a comma). The regular expressions allow you to XFAIL the test conditionally by - host platform. The regular expressions following the : are matched against the - target triplet for the host machine. If there is a match, the test is expected - to fail. If not, the test is expected to succeed. To XFAIL everywhere just - specify XFAIL: *. Here is an example of an XFAIL line:

    + if the test fails. Such test cases are counted separately by the testing + tool. To specify an expected fail, use the XFAIL keyword in the comments of + the test program followed by a colon and one or more failure patterns. Each + failure pattern can be either '*' (to specify fail everywhere), or a part of a + target triple (indicating the test should fail on that platform), or the name + of a configurable feature (for example, "loadable_module"). If there is a + match, the test is expected to fail. If not, the test is expected to + succeed. To XFAIL everywhere just specify XFAIL: *. Here is an + example of an XFAIL line:

    @@ -909,7 +910,7 @@ the Test Suite Makefile Guide.

    John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-05-08 20:26:07 +0200 (Tue, 08 May 2012) $ + Last modified: $Date: 2012-11-07 18:00:18 +0100 (Wed, 07 Nov 2012) $ diff --git a/docs/WritingAnLLVMBackend.html b/docs/WritingAnLLVMBackend.html index 11517c2..b7fdce4 100644 --- a/docs/WritingAnLLVMBackend.html +++ b/docs/WritingAnLLVMBackend.html @@ -32,6 +32,7 @@
  • Instruction Set
  • @@ -314,14 +315,14 @@ represent target components. These methods are named get*Info, and are intended to obtain the instruction set (getInstrInfo), register set (getRegisterInfo), stack frame layout (getFrameInfo), and similar information. XXXTargetMachine must also implement the -getTargetData method to access an object with target-specific data +getDataLayout method to access an object with target-specific data characteristics, such as data type size and alignment requirements.

    For instance, for the SPARC target, the header file SparcTargetMachine.h declares prototypes for several get*Info -and getTargetData methods that simply return a class member. +and getDataLayout methods that simply return a class member.

    @@ -331,7 +332,7 @@ namespace llvm { class Module; class SparcTargetMachine : public LLVMTargetMachine { - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DataLayout; // Calculates type size & alignment SparcSubtarget Subtarget; SparcInstrInfo InstrInfo; TargetFrameInfo FrameInfo; @@ -348,7 +349,7 @@ public: virtual const TargetRegisterInfo *getRegisterInfo() const { return &InstrInfo.getRegisterInfo(); } - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const DataLayout *getDataLayout() const { return &DataLayout; } static unsigned getModuleMatchQuality(const Module &M); // Pass Pipeline Configuration @@ -364,7 +365,7 @@ public:
  • getInstrInfo()
  • getRegisterInfo()
  • getFrameInfo()
  • -
  • getTargetData()
  • +
  • getDataLayout()
  • getSubtargetImpl()
  • @@ -1259,6 +1260,29 @@ the rd, rs1, and rs2 fields respectively.

    + Instruction Relation Mapping +

    + +
    + +

    +This TableGen feature is used to relate instructions with each other. It is +particularly useful when you have multiple instruction formats and need to +switch between them after instruction selection. This entire feature is driven +by relation models which can be defined in XXXInstrInfo.td files +according to the target-specific instruction set. Relation models are defined +using InstrMapping class as a base. TableGen parses all the models +and generates instruction relation maps using the specified information. +Relation maps are emitted as tables in the XXXGenInstrInfo.inc file +along with the functions to query them. For the detailed information on how to +use this feature, please refer to +How to add Instruction Mappings +document. +

    +
    + + +

    Implement a subclass of TargetInstrInfo

    @@ -2526,7 +2550,7 @@ with assembler. Mason Woo and Misha Brukman
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-04-19 22:20:34 +0200 (Thu, 19 Apr 2012) $ + Last modified: $Date: 2012-10-25 17:54:06 +0200 (Thu, 25 Oct 2012) $ diff --git a/docs/_themes/llvm-theme/layout.html b/docs/_themes/llvm-theme/layout.html new file mode 100644 index 0000000..746c2f5 --- /dev/null +++ b/docs/_themes/llvm-theme/layout.html @@ -0,0 +1,23 @@ +{# + sphinxdoc/layout.html + ~~~~~~~~~~~~~~~~~~~~~ + + Sphinx layout template for the sphinxdoc theme. + + :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. +#} +{% extends "basic/layout.html" %} + +{% block relbar1 %} + +{{ super() }} +{% endblock %} + +{# put the sidebar before the body #} +{% block sidebar1 %}{{ sidebar() }}{% endblock %} +{% block sidebar2 %}{% endblock %} diff --git a/docs/_themes/llvm-theme/static/contents.png b/docs/_themes/llvm-theme/static/contents.png new file mode 100644 index 0000000..7fb8215 Binary files /dev/null and b/docs/_themes/llvm-theme/static/contents.png differ diff --git a/docs/_themes/llvm-theme/static/llvm-theme.css b/docs/_themes/llvm-theme/static/llvm-theme.css new file mode 100644 index 0000000..beab2ca --- /dev/null +++ b/docs/_themes/llvm-theme/static/llvm-theme.css @@ -0,0 +1,371 @@ +/* + * sphinxdoc.css_t + * ~~~~~~~~~~~~~~~ + * + * Sphinx stylesheet -- sphinxdoc theme. Originally created by + * Armin Ronacher for Werkzeug. + * + * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', + 'Verdana', sans-serif; + font-size: 14px; + line-height: 150%; + text-align: center; + background-color: #BFD1D4; + color: black; + padding: 0; + border: 1px solid #aaa; + + margin: 0px 80px 0px 80px; + min-width: 740px; +} + +div.logo { + background-color: white; + text-align: left; + padding: 10px 10px 15px 15px; +} + +div.document { + background-color: white; + text-align: left; + background-image: url(contents.png); + background-repeat: repeat-x; +} + +div.bodywrapper { + margin: 0 240px 0 0; + border-right: 1px solid #ccc; +} + +div.body { + margin: 0; + padding: 0.5em 20px 20px 20px; +} + +div.related { + font-size: 1em; +} + +div.related ul { + background-image: url(navigation.png); + height: 2em; + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; +} + +div.related ul li { + margin: 0; + padding: 0; + height: 2em; + float: left; +} + +div.related ul li.right { + float: right; + margin-right: 5px; +} + +div.related ul li a { + margin: 0; + padding: 0 5px 0 5px; + line-height: 1.75em; + color: #EE9816; +} + +div.related ul li a:hover { + color: #3CA8E7; +} + +div.sphinxsidebarwrapper { + padding: 0; +} + +div.sphinxsidebar { + margin: 0; + padding: 0.5em 15px 15px 0; + width: 210px; + float: right; + font-size: 1em; + text-align: left; +} + +div.sphinxsidebar h3, div.sphinxsidebar h4 { + margin: 1em 0 0.5em 0; + font-size: 1em; + padding: 0.1em 0 0.1em 0.5em; + color: white; + border: 1px solid #86989B; + background-color: #AFC1C4; +} + +div.sphinxsidebar h3 a { + color: white; +} + +div.sphinxsidebar ul { + padding-left: 1.5em; + margin-top: 7px; + padding: 0; + line-height: 130%; +} + +div.sphinxsidebar ul ul { + margin-left: 20px; +} + +div.footer { + background-color: #E3EFF1; + color: #86989B; + padding: 3px 8px 3px 0; + clear: both; + font-size: 0.8em; + text-align: right; +} + +div.footer a { + color: #86989B; + text-decoration: underline; +} + +/* -- body styles ----------------------------------------------------------- */ + +p { + margin: 0.8em 0 0.5em 0; +} + +a { + color: #CA7900; + text-decoration: none; +} + +a:hover { + color: #2491CF; +} + +div.body p a{ + text-decoration: underline; +} + +h1 { + margin: 0; + padding: 0.7em 0 0.3em 0; + font-size: 1.5em; + color: #11557C; +} + +h2 { + margin: 1.3em 0 0.2em 0; + font-size: 1.35em; + padding: 0; +} + +h3 { + margin: 1em 0 -0.3em 0; + font-size: 1.2em; +} + +h3 a:hover { + text-decoration: underline; +} + +div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { + color: black!important; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { + display: none; + margin: 0 0 0 0.3em; + padding: 0 0.2em 0 0.2em; + color: #aaa!important; +} + +h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, +h5:hover a.anchor, h6:hover a.anchor { + display: inline; +} + +h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, +h5 a.anchor:hover, h6 a.anchor:hover { + color: #777; + background-color: #eee; +} + +a.headerlink { + color: #c60f0f!important; + font-size: 1em; + margin-left: 6px; + padding: 0 4px 0 4px; + text-decoration: none!important; +} + +a.headerlink:hover { + background-color: #ccc; + color: white!important; +} + +cite, code, tt { + font-family: 'Consolas', 'Deja Vu Sans Mono', + 'Bitstream Vera Sans Mono', monospace; + font-size: 0.95em; +} + +:not(a.reference) > tt { + background-color: #f2f2f2; + border-bottom: 1px solid #ddd; + color: #333; +} + +tt.descname, tt.descclassname, tt.xref { + border: 0; +} + +hr { + border: 1px solid #abc; + margin: 2em; +} + +p a tt { + border: 0; + color: #CA7900; +} + +p a tt:hover { + color: #2491CF; +} + +a tt { + border: none; +} + +pre { + font-family: 'Consolas', 'Deja Vu Sans Mono', + 'Bitstream Vera Sans Mono', monospace; + font-size: 0.95em; + line-height: 120%; + padding: 0.5em; + border: 1px solid #ccc; + background-color: #f8f8f8; +} + +pre a { + color: inherit; + text-decoration: underline; +} + +td.linenos pre { + padding: 0.5em 0; +} + +div.quotebar { + background-color: #f8f8f8; + max-width: 250px; + float: right; + padding: 2px 7px; + border: 1px solid #ccc; +} + +div.topic { + background-color: #f8f8f8; +} + +table { + border-collapse: collapse; + margin: 0 -0.5em 0 -0.5em; +} + +table td, table th { + padding: 0.2em 0.5em 0.2em 0.5em; +} + +div.admonition, div.warning { + font-size: 0.9em; + margin: 1em 0 1em 0; + border: 1px solid #86989B; + background-color: #f7f7f7; + padding: 0; +} + +div.admonition p, div.warning p { + margin: 0.5em 1em 0.5em 1em; + padding: 0; +} + +div.admonition pre, div.warning pre { + margin: 0.4em 1em 0.4em 1em; +} + +div.admonition p.admonition-title, +div.warning p.admonition-title { + margin: 0; + padding: 0.1em 0 0.1em 0.5em; + color: white; + border-bottom: 1px solid #86989B; + font-weight: bold; + background-color: #AFC1C4; +} + +div.warning { + border: 1px solid #940000; +} + +div.warning p.admonition-title { + background-color: #CF0000; + border-bottom-color: #940000; +} + +div.admonition ul, div.admonition ol, +div.warning ul, div.warning ol { + margin: 0.1em 0.5em 0.5em 3em; + padding: 0; +} + +div.versioninfo { + margin: 1em 0 0 0; + border: 1px solid #ccc; + background-color: #DDEAF0; + padding: 8px; + line-height: 1.3em; + font-size: 0.9em; +} + +.viewcode-back { + font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', + 'Verdana', sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} diff --git a/docs/_themes/llvm-theme/static/logo.png b/docs/_themes/llvm-theme/static/logo.png new file mode 100644 index 0000000..18d424c Binary files /dev/null and b/docs/_themes/llvm-theme/static/logo.png differ diff --git a/docs/_themes/llvm-theme/static/navigation.png b/docs/_themes/llvm-theme/static/navigation.png new file mode 100644 index 0000000..1081dc1 Binary files /dev/null and b/docs/_themes/llvm-theme/static/navigation.png differ diff --git a/docs/_themes/llvm-theme/theme.conf b/docs/_themes/llvm-theme/theme.conf new file mode 100644 index 0000000..573fd78 --- /dev/null +++ b/docs/_themes/llvm-theme/theme.conf @@ -0,0 +1,4 @@ +[theme] +inherit = basic +stylesheet = llvm-theme.css +pygments_style = friendly diff --git a/docs/conf.py b/docs/conf.py index de0585d..a1e9b5f 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -98,7 +98,7 @@ html_theme = 'llvm-theme' #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -html_theme_path = ["."] +html_theme_path = ["_themes"] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". @@ -134,18 +134,7 @@ html_sidebars = {'index': 'indexsidebar.html'} # Additional templates that should be rendered to pages, maps page names to # template names. -# -# We load all the old-school HTML documentation pages into Sphinx here. -basedir = os.path.dirname(__file__) -html_additional_pages = {} -for directory in ('', 'tutorial'): - for file in os.listdir(os.path.join(basedir, directory)): - if not file.endswith('.html'): - continue - - subpath = os.path.join(directory, file) - name,_ = os.path.splitext(subpath) - html_additional_pages[name] = subpath +#html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True @@ -226,6 +215,7 @@ man_pages = [] # Automatically derive the list of man pages from the contents of the command # guide subdirectory. +basedir = os.path.dirname(__file__) man_page_authors = "Maintained by The LLVM Team (http://llvm.org/)." command_guide_subpath = 'CommandGuide' command_guide_path = os.path.join(basedir, command_guide_subpath) @@ -237,9 +227,8 @@ for name in os.listdir(command_guide_path): # Otherwise, automatically extract the description. file_subpath = os.path.join(command_guide_subpath, name) with open(os.path.join(command_guide_path, name)) as f: - it = iter(f) - title = it.next()[:-1] - header = it.next()[:-1] + title = f.readline().rstrip('\n') + header = f.readline().rstrip('\n') if len(header) != len(title): print >>sys.stderr, ( diff --git a/docs/index.rst b/docs/index.rst index 53d3e7c..d406b52 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,43 +15,43 @@ research projects. Similarly, documentation is broken down into several high-level groupings targeted at different audiences: - * **Design & Overview** +* **Design & Overview** - Several introductory papers and presentations are available at - :ref:`design_and_overview`. + Several introductory papers and presentations are available at + :ref:`design_and_overview`. - * **Publications** +* **Publications** - The list of `publications `_ based on LLVM. + The list of `publications `_ based on LLVM. - * **User Guides** +* **User Guides** - Those new to the LLVM system should first vist the :ref:`userguides`. + Those new to the LLVM system should first visit the :ref:`userguides`. - NOTE: If you are a user who is only interested in using LLVM-based - compilers, you should look into `Clang `_ or - `DragonEgg `_ instead. The documentation here is - intended for users who have a need to work with the intermediate LLVM - representation. + NOTE: If you are a user who is only interested in using LLVM-based + compilers, you should look into `Clang `_ or + `DragonEgg `_ instead. The documentation here is + intended for users who have a need to work with the intermediate LLVM + representation. - * **API Clients** +* **API Clients** - Developers of applications which use LLVM as a library should visit the - :ref:`programming`. + Developers of applications which use LLVM as a library should visit the + :ref:`programming`. - * **Subsystems** +* **Subsystems** - API clients and LLVM developers may be interested in the - :ref:`subsystems` documentation. + API clients and LLVM developers may be interested in the + :ref:`subsystems` documentation. - * **Development Process** +* **Development Process** - Additional documentation on the LLVM project can be found at - :ref:`development_process`. + Additional documentation on the LLVM project can be found at + :ref:`development_process`. - * **Mailing Lists** +* **Mailing Lists** - For more information, consider consulting the LLVM :ref:`mailing_lists`. + For more information, consider consulting the LLVM :ref:`mailing_lists`. .. toctree:: :maxdepth: 2 diff --git a/docs/llvm-theme/layout.html b/docs/llvm-theme/layout.html deleted file mode 100644 index 746c2f5..0000000 --- a/docs/llvm-theme/layout.html +++ /dev/null @@ -1,23 +0,0 @@ -{# - sphinxdoc/layout.html - ~~~~~~~~~~~~~~~~~~~~~ - - Sphinx layout template for the sphinxdoc theme. - - :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. - :license: BSD, see LICENSE for details. -#} -{% extends "basic/layout.html" %} - -{% block relbar1 %} - -{{ super() }} -{% endblock %} - -{# put the sidebar before the body #} -{% block sidebar1 %}{{ sidebar() }}{% endblock %} -{% block sidebar2 %}{% endblock %} diff --git a/docs/llvm-theme/static/contents.png b/docs/llvm-theme/static/contents.png deleted file mode 100644 index 7fb8215..0000000 Binary files a/docs/llvm-theme/static/contents.png and /dev/null differ diff --git a/docs/llvm-theme/static/llvm-theme.css b/docs/llvm-theme/static/llvm-theme.css deleted file mode 100644 index f684d00..0000000 --- a/docs/llvm-theme/static/llvm-theme.css +++ /dev/null @@ -1,374 +0,0 @@ -/* - * sphinxdoc.css_t - * ~~~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- sphinxdoc theme. Originally created by - * Armin Ronacher for Werkzeug. - * - * :copyright: Copyright 2007-2010 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -body { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; - font-size: 14px; - letter-spacing: -0.01em; - line-height: 150%; - text-align: center; - background-color: #BFD1D4; - color: black; - padding: 0; - border: 1px solid #aaa; - - margin: 0px 80px 0px 80px; - min-width: 740px; -} - -div.logo { - background-color: white; - text-align: left; - padding: 10px 10px 15px 15px; -} - -div.document { - background-color: white; - text-align: left; - background-image: url(contents.png); - background-repeat: repeat-x; -} - -div.bodywrapper { - margin: 0 240px 0 0; - border-right: 1px solid #ccc; -} - -div.body { - margin: 0; - padding: 0.5em 20px 20px 20px; -} - -div.related { - font-size: 1em; -} - -div.related ul { - background-image: url(navigation.png); - height: 2em; - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; -} - -div.related ul li { - margin: 0; - padding: 0; - height: 2em; - float: left; -} - -div.related ul li.right { - float: right; - margin-right: 5px; -} - -div.related ul li a { - margin: 0; - padding: 0 5px 0 5px; - line-height: 1.75em; - color: #EE9816; -} - -div.related ul li a:hover { - color: #3CA8E7; -} - -div.sphinxsidebarwrapper { - padding: 0; -} - -div.sphinxsidebar { - margin: 0; - padding: 0.5em 15px 15px 0; - width: 210px; - float: right; - font-size: 1em; - text-align: left; -} - -div.sphinxsidebar h3, div.sphinxsidebar h4 { - margin: 1em 0 0.5em 0; - font-size: 1em; - padding: 0.1em 0 0.1em 0.5em; - color: white; - border: 1px solid #86989B; - background-color: #AFC1C4; -} - -div.sphinxsidebar h3 a { - color: white; -} - -div.sphinxsidebar ul { - padding-left: 1.5em; - margin-top: 7px; - padding: 0; - line-height: 130%; -} - -div.sphinxsidebar ul ul { - margin-left: 20px; -} - -div.footer { - background-color: #E3EFF1; - color: #86989B; - padding: 3px 8px 3px 0; - clear: both; - font-size: 0.8em; - text-align: right; -} - -div.footer a { - color: #86989B; - text-decoration: underline; -} - -/* -- body styles ----------------------------------------------------------- */ - -p { - margin: 0.8em 0 0.5em 0; -} - -a { - color: #CA7900; - text-decoration: none; -} - -a:hover { - color: #2491CF; -} - -div.body p a{ - text-decoration: underline; -} - -h1 { - margin: 0; - padding: 0.7em 0 0.3em 0; - font-size: 1.5em; - color: #11557C; -} - -h2 { - margin: 1.3em 0 0.2em 0; - font-size: 1.35em; - padding: 0; -} - -h3 { - margin: 1em 0 -0.3em 0; - font-size: 1.2em; -} - -h3 a:hover { - text-decoration: underline; -} - -div.body h1 a, div.body h2 a, div.body h3 a, div.body h4 a, div.body h5 a, div.body h6 a { - color: black!important; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - background-color: #f2f2f2; - font-weight: normal; - color: #20435c; - border-bottom: 1px solid #ccc; - margin: 20px -20px 10px -20px; - padding: 3px 0 3px 10px; -} - -div.body h1 { margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 160%; } -div.body h3 { font-size: 140%; } -div.body h4 { font-size: 120%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -h1 a.anchor, h2 a.anchor, h3 a.anchor, h4 a.anchor, h5 a.anchor, h6 a.anchor { - display: none; - margin: 0 0 0 0.3em; - padding: 0 0.2em 0 0.2em; - color: #aaa!important; -} - -h1:hover a.anchor, h2:hover a.anchor, h3:hover a.anchor, h4:hover a.anchor, -h5:hover a.anchor, h6:hover a.anchor { - display: inline; -} - -h1 a.anchor:hover, h2 a.anchor:hover, h3 a.anchor:hover, h4 a.anchor:hover, -h5 a.anchor:hover, h6 a.anchor:hover { - color: #777; - background-color: #eee; -} - -a.headerlink { - color: #c60f0f!important; - font-size: 1em; - margin-left: 6px; - padding: 0 4px 0 4px; - text-decoration: none!important; -} - -a.headerlink:hover { - background-color: #ccc; - color: white!important; -} - -cite, code, tt { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.01em; -} - -:not(a.reference) > tt { - background-color: #f2f2f2; - border-bottom: 1px solid #ddd; - color: #333; -} - -tt.descname, tt.descclassname, tt.xref { - border: 0; -} - -hr { - border: 1px solid #abc; - margin: 2em; -} - -p a tt { - border: 0; - color: #CA7900; -} - -p a tt:hover { - color: #2491CF; -} - -a tt { - border: none; -} - -pre { - font-family: 'Consolas', 'Deja Vu Sans Mono', - 'Bitstream Vera Sans Mono', monospace; - font-size: 0.95em; - letter-spacing: 0.015em; - line-height: 120%; - padding: 0.5em; - border: 1px solid #ccc; - background-color: #f8f8f8; -} - -pre a { - color: inherit; - text-decoration: underline; -} - -td.linenos pre { - padding: 0.5em 0; -} - -div.quotebar { - background-color: #f8f8f8; - max-width: 250px; - float: right; - padding: 2px 7px; - border: 1px solid #ccc; -} - -div.topic { - background-color: #f8f8f8; -} - -table { - border-collapse: collapse; - margin: 0 -0.5em 0 -0.5em; -} - -table td, table th { - padding: 0.2em 0.5em 0.2em 0.5em; -} - -div.admonition, div.warning { - font-size: 0.9em; - margin: 1em 0 1em 0; - border: 1px solid #86989B; - background-color: #f7f7f7; - padding: 0; -} - -div.admonition p, div.warning p { - margin: 0.5em 1em 0.5em 1em; - padding: 0; -} - -div.admonition pre, div.warning pre { - margin: 0.4em 1em 0.4em 1em; -} - -div.admonition p.admonition-title, -div.warning p.admonition-title { - margin: 0; - padding: 0.1em 0 0.1em 0.5em; - color: white; - border-bottom: 1px solid #86989B; - font-weight: bold; - background-color: #AFC1C4; -} - -div.warning { - border: 1px solid #940000; -} - -div.warning p.admonition-title { - background-color: #CF0000; - border-bottom-color: #940000; -} - -div.admonition ul, div.admonition ol, -div.warning ul, div.warning ol { - margin: 0.1em 0.5em 0.5em 3em; - padding: 0; -} - -div.versioninfo { - margin: 1em 0 0 0; - border: 1px solid #ccc; - background-color: #DDEAF0; - padding: 8px; - line-height: 1.3em; - font-size: 0.9em; -} - -.viewcode-back { - font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', - 'Verdana', sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} diff --git a/docs/llvm-theme/static/logo.png b/docs/llvm-theme/static/logo.png deleted file mode 100644 index 18d424c..0000000 Binary files a/docs/llvm-theme/static/logo.png and /dev/null differ diff --git a/docs/llvm-theme/static/navigation.png b/docs/llvm-theme/static/navigation.png deleted file mode 100644 index 1081dc1..0000000 Binary files a/docs/llvm-theme/static/navigation.png and /dev/null differ diff --git a/docs/llvm-theme/theme.conf b/docs/llvm-theme/theme.conf deleted file mode 100644 index 573fd78..0000000 --- a/docs/llvm-theme/theme.conf +++ /dev/null @@ -1,4 +0,0 @@ -[theme] -inherit = basic -stylesheet = llvm-theme.css -pygments_style = friendly diff --git a/docs/programming.rst b/docs/programming.rst index 27e4301..c4eec59 100644 --- a/docs/programming.rst +++ b/docs/programming.rst @@ -6,14 +6,22 @@ Programming Documentation .. toctree:: :hidden: + Atomics CodingStandards CommandLine + CompilerWriterInfo + ExtendingLLVM + HowToSetUpLLVMStyleRTTI * `LLVM Language Reference Manual `_ Defines the LLVM intermediate representation and the assembly form of the different nodes. +* :ref:`atomics` + + Information about LLVM's concurrency model. + * `The LLVM Programmers Manual `_ Introduction to the general layout of the LLVM sourcebase, important classes @@ -28,7 +36,12 @@ Programming Documentation Details the LLVM coding standards and provides useful information on writing efficient C++ code. -* `Extending LLVM `_ +* :doc:`HowToSetUpLLVMStyleRTTI` + + How to make ``isa<>``, ``dyn_cast<>``, etc. available for clients of your + class hierarchy. + +* :ref:`extending_llvm` Look here to see how to add instructions and intrinsics to LLVM. @@ -38,3 +51,7 @@ Programming Documentation (`tarball `_) * `ViewVC Repository Browser `_ + +* :ref:`compiler_writer_info` + + A list of helpful links for compiler writers. diff --git a/docs/subsystems.rst b/docs/subsystems.rst index be33295..80d0eed 100644 --- a/docs/subsystems.rst +++ b/docs/subsystems.rst @@ -15,6 +15,9 @@ Subsystem Documentation LinkTimeOptimization SegmentedStacks TableGenFundamentals + DebuggingJITedCode + GoldPlugin + MarkedUpDisassembly * `Writing an LLVM Pass `_ @@ -74,11 +77,11 @@ Subsystem Documentation This document describes the interface between LLVM intermodular optimizer and the linker and its design -* `The LLVM gold plugin `_ +* :ref:`gold-plugin` How to build your programs with link-time optimization on Linux. -* `The GDB JIT interface `_ +* :ref:`debugging-jited-code` How to debug JITed code with GDB. @@ -89,3 +92,15 @@ Subsystem Documentation * :ref:`segmented_stacks` This document describes segmented stacks and how they are used in LLVM. + +* `Howto: Implementing LLVM Integrated Assembler`_ + + A simple guide for how to implement an LLVM integrated assembler for an + architecture. + +.. _`Howto: Implementing LLVM Integrated Assembler`: http://www.embecosm.com/download/ean10.html + +* :ref:`marked_up_disassembly` + + This document describes the optional rich disassembly output syntax. + diff --git a/docs/tutorial/LangImpl4.html b/docs/tutorial/LangImpl4.html index 3f8d4a4..5e9c656 100644 --- a/docs/tutorial/LangImpl4.html +++ b/docs/tutorial/LangImpl4.html @@ -173,7 +173,7 @@ add a set of optimizations to run. The code looks like this:

    // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. @@ -523,7 +523,7 @@ at runtime.

    #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include <cstdio> @@ -1103,7 +1103,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. @@ -1146,7 +1146,7 @@ int main() { Chris Lattner
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-06-29 14:38:19 +0200 (Fri, 29 Jun 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/LangImpl5.html b/docs/tutorial/LangImpl5.html index a7a3737..9a9fd8c 100644 --- a/docs/tutorial/LangImpl5.html +++ b/docs/tutorial/LangImpl5.html @@ -901,7 +901,7 @@ clang++ -g toy.cpp `llvm-config --cppflags --ldflags --libs core jit native` -O3 #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include <cstdio> @@ -1723,7 +1723,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. @@ -1766,7 +1766,7 @@ int main() { Chris Lattner
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-06-29 14:38:19 +0200 (Fri, 29 Jun 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/LangImpl6.html b/docs/tutorial/LangImpl6.html index 1128893..7cd87da 100644 --- a/docs/tutorial/LangImpl6.html +++ b/docs/tutorial/LangImpl6.html @@ -840,7 +840,7 @@ library, although doing that will cause problems on Windows.

    #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include <cstdio> @@ -1780,7 +1780,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. @@ -1823,7 +1823,7 @@ int main() { Chris Lattner
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-07-31 09:05:57 +0200 (Tue, 31 Jul 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/LangImpl7.html b/docs/tutorial/LangImpl7.html index f1fe404..4d5a4aa 100644 --- a/docs/tutorial/LangImpl7.html +++ b/docs/tutorial/LangImpl7.html @@ -524,7 +524,7 @@ good codegen once again:

         // Set up the optimizer pipeline.  Start with registering info about how the
         // target lays out data structures.
    -    OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
    +    OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout()));
         // Promote allocas to registers.
         OurFPM.add(createPromoteMemoryToRegisterPass());
         // Do simple "peephole" optimizations and bit-twiddling optzns.
    @@ -1008,7 +1008,7 @@ clang++ -g toy.cpp `llvm-config --cppflags --ldflags --libs core jit native` -O3
     #include "llvm/PassManager.h"
     #include "llvm/Analysis/Verifier.h"
     #include "llvm/Analysis/Passes.h"
    -#include "llvm/Target/TargetData.h"
    +#include "llvm/DataLayout.h"
     #include "llvm/Transforms/Scalar.h"
     #include "llvm/Support/TargetSelect.h"
     #include <cstdio>
    @@ -2113,7 +2113,7 @@ int main() {
     
       // Set up the optimizer pipeline.  Start with registering info about how the
       // target lays out data structures.
    -  OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData()));
    +  OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout()));
       // Provide basic AliasAnalysis support for GVN.
       OurFPM.add(createBasicAliasAnalysisPass());
       // Promote allocas to registers.
    @@ -2158,7 +2158,7 @@ int main() {
     
       Chris Lattner
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-06-29 14:38:19 +0200 (Fri, 29 Jun 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/OCamlLangImpl4.html b/docs/tutorial/OCamlLangImpl4.html index e3e2469..d3cfd3d 100644 --- a/docs/tutorial/OCamlLangImpl4.html +++ b/docs/tutorial/OCamlLangImpl4.html @@ -189,7 +189,7 @@ add a set of optimizations to run. The code looks like this:

    (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combining the_fpm; @@ -965,7 +965,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combination the_fpm; @@ -1020,7 +1020,7 @@ extern double putchard(double X) { Chris Lattner
    Erick Tryzelaar
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-05-03 00:46:36 +0200 (Thu, 03 May 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/OCamlLangImpl5.html b/docs/tutorial/OCamlLangImpl5.html index 994957e..0a759ac 100644 --- a/docs/tutorial/OCamlLangImpl5.html +++ b/docs/tutorial/OCamlLangImpl5.html @@ -1498,7 +1498,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combination the_fpm; @@ -1554,7 +1554,7 @@ operators Chris Lattner
    Erick Tryzelaar
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-05-03 00:46:36 +0200 (Thu, 03 May 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/OCamlLangImpl6.html b/docs/tutorial/OCamlLangImpl6.html index cef3884..db25240 100644 --- a/docs/tutorial/OCamlLangImpl6.html +++ b/docs/tutorial/OCamlLangImpl6.html @@ -1506,7 +1506,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combination the_fpm; @@ -1568,7 +1568,7 @@ SSA construction Chris Lattner
    Erick Tryzelaar
    The LLVM Compiler Infrastructure
    - Last modified: $Date: 2012-07-31 09:05:57 +0200 (Tue, 31 Jul 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/tutorial/OCamlLangImpl7.html b/docs/tutorial/OCamlLangImpl7.html index abe8913..aa30555 100644 --- a/docs/tutorial/OCamlLangImpl7.html +++ b/docs/tutorial/OCamlLangImpl7.html @@ -545,7 +545,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Promote allocas to registers. *) add_memory_to_register_promotion the_fpm; @@ -1834,7 +1834,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Promote allocas to registers. *) add_memory_to_register_promotion the_fpm; @@ -1898,7 +1898,7 @@ extern double printd(double X) { Chris Lattner
    The LLVM Compiler Infrastructure
    Erick Tryzelaar
    - Last modified: $Date: 2012-05-03 00:46:36 +0200 (Thu, 03 May 2012) $ + Last modified: $Date: 2012-10-08 18:39:34 +0200 (Mon, 08 Oct 2012) $ diff --git a/docs/userguides.rst b/docs/userguides.rst index 26a5a8c..8c1554d 100644 --- a/docs/userguides.rst +++ b/docs/userguides.rst @@ -7,14 +7,21 @@ User Guides :hidden: CMake + HowToBuildOnARM CommandGuide/index DeveloperPolicy + GettingStarted GettingStartedVS FAQ Lexicon Packaging + HowToAddABuilder + yaml2obj + HowToSubmitABug + SphinxQuickstartTemplate + Phabricator -* `The LLVM Getting Started Guide `_ +* :ref:`getting_started` Discusses how to get up and running quickly with the LLVM infrastructure. Everything from unpacking and compilation of the distribution to execution @@ -24,7 +31,11 @@ User Guides An addendum to the main Getting Started guide for those using the `CMake build system `_. - + +* :ref:`how_to_build_on_arm` + + Notes on building and testing LLVM/Clang on ARM. + * `Getting Started with the LLVM System using Microsoft Visual Studio `_ @@ -57,10 +68,14 @@ User Guides This describes new features, known bugs, and other limitations. -* `How to Submit A Bug Report `_ +* :ref:`how-to-submit-a-bug-report` Instructions for properly submitting information about any bugs you run into in the LLVM system. +* :doc:`SphinxQuickstartTemplate` + + A template + tutorial for writing new Sphinx documentation. It is meant + to be read in source form. * `LLVM Testing Infrastructure Guide `_ @@ -78,7 +93,7 @@ User Guides Definition of acronyms, terms and concepts used in LLVM. -* `How To Add Your Build Configuration To LLVM Buildbot Infrastructure `_ +* :ref:`how_to_add_a_builder` Instructions for adding new builder to LLVM buildbot master. diff --git a/docs/yaml2obj.rst b/docs/yaml2obj.rst index cb59162..d051e7e 100644 --- a/docs/yaml2obj.rst +++ b/docs/yaml2obj.rst @@ -6,9 +6,9 @@ yaml2obj yaml2obj takes a YAML description of an object file and converts it to a binary file. - $ yaml2py input-file + $ yaml2obj input-file -.. program:: yaml2py +.. program:: yaml2obj Outputs the binary to stdout. diff --git a/examples/ExceptionDemo/ExceptionDemo.cpp b/examples/ExceptionDemo/ExceptionDemo.cpp index 6dbd662..215cb4d 100644 --- a/examples/ExceptionDemo/ExceptionDemo.cpp +++ b/examples/ExceptionDemo/ExceptionDemo.cpp @@ -10,13 +10,13 @@ // Demo program which implements an example LLVM exception implementation, and // shows several test cases including the handling of foreign exceptions. // It is run with type info types arguments to throw. A test will -// be run for each given type info type. While type info types with the value +// be run for each given type info type. While type info types with the value // of -1 will trigger a foreign C++ exception to be thrown; type info types -// <= 6 and >= 1 will cause the associated generated exceptions to be thrown +// <= 6 and >= 1 will cause the associated generated exceptions to be thrown // and caught by generated test functions; and type info types > 6 // will result in exceptions which pass through to the test harness. All other // type info types are not supported and could cause a crash. In all cases, -// the "finally" blocks of every generated test functions will executed +// the "finally" blocks of every generated test functions will executed // regardless of whether or not that test function ignores or catches the // thrown exception. // @@ -25,25 +25,25 @@ // ExceptionDemo // // causes a usage to be printed to stderr -// +// // ExceptionDemo 2 3 7 -1 // // results in the following cases: -// - Value 2 causes an exception with a type info type of 2 to be +// - Value 2 causes an exception with a type info type of 2 to be // thrown and caught by an inner generated test function. -// - Value 3 causes an exception with a type info type of 3 to be +// - Value 3 causes an exception with a type info type of 3 to be // thrown and caught by an outer generated test function. -// - Value 7 causes an exception with a type info type of 7 to be +// - Value 7 causes an exception with a type info type of 7 to be // thrown and NOT be caught by any generated function. // - Value -1 causes a foreign C++ exception to be thrown and not be // caught by any generated function // // Cases -1 and 7 are caught by a C++ test harness where the validity of -// of a C++ catch(...) clause catching a generated exception with a -// type info type of 7 is explained by: example in rules 1.6.4 in +// of a C++ catch(...) clause catching a generated exception with a +// type info type of 7 is explained by: example in rules 1.6.4 in // http://sourcery.mentor.com/public/cxx-abi/abi-eh.html (v1.22) // -// This code uses code from the llvm compiler-rt project and the llvm +// This code uses code from the llvm compiler-rt project and the llvm // Kaleidoscope project. // //===----------------------------------------------------------------------===// @@ -57,18 +57,18 @@ #include "llvm/PassManager.h" #include "llvm/Intrinsics.h" #include "llvm/Analysis/Verifier.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/TargetSelect.h" -// FIXME: Although all systems tested with (Linux, OS X), do not need this -// header file included. A user on ubuntu reported, undefined symbols +// FIXME: Although all systems tested with (Linux, OS X), do not need this +// header file included. A user on ubuntu reported, undefined symbols // for stderr, and fprintf, and the addition of this include fixed the -// issue for them. Given that LLVM's best practices include the goal -// of reducing the number of redundant header files included, the -// correct solution would be to find out why these symbols are not +// issue for them. Given that LLVM's best practices include the goal +// of reducing the number of redundant header files included, the +// correct solution would be to find out why these symbols are not // defined for the system in question, and fix the issue by finding out // which LLVM header file, if any, would include these symbols. #include @@ -81,11 +81,11 @@ #define USE_GLOBAL_STR_CONSTS true #endif -// System C++ ABI unwind types from: +// System C++ ABI unwind types from: // http://sourcery.mentor.com/public/cxx-abi/abi-eh.html (v1.22) extern "C" { - + typedef enum { _URC_NO_REASON = 0, _URC_FOREIGN_EXCEPTION_CAUGHT = 1, @@ -97,7 +97,7 @@ extern "C" { _URC_INSTALL_CONTEXT = 7, _URC_CONTINUE_UNWIND = 8 } _Unwind_Reason_Code; - + typedef enum { _UA_SEARCH_PHASE = 1, _UA_CLEANUP_PHASE = 2, @@ -105,34 +105,34 @@ extern "C" { _UA_FORCE_UNWIND = 8, _UA_END_OF_STACK = 16 } _Unwind_Action; - + struct _Unwind_Exception; - + typedef void (*_Unwind_Exception_Cleanup_Fn) (_Unwind_Reason_Code, struct _Unwind_Exception *); - + struct _Unwind_Exception { uint64_t exception_class; _Unwind_Exception_Cleanup_Fn exception_cleanup; - - uintptr_t private_1; - uintptr_t private_2; - + + uintptr_t private_1; + uintptr_t private_2; + // @@@ The IA-64 ABI says that this structure must be double-word aligned. - // Taking that literally does not make much sense generically. Instead + // Taking that literally does not make much sense generically. Instead // we provide the maximum alignment required by any type for the machine. } __attribute__((__aligned__)); - + struct _Unwind_Context; typedef struct _Unwind_Context *_Unwind_Context_t; - + extern const uint8_t *_Unwind_GetLanguageSpecificData (_Unwind_Context_t c); extern uintptr_t _Unwind_GetGR (_Unwind_Context_t c, int i); extern void _Unwind_SetGR (_Unwind_Context_t c, int i, uintptr_t n); extern void _Unwind_SetIP (_Unwind_Context_t, uintptr_t new_value); extern uintptr_t _Unwind_GetIP (_Unwind_Context_t context); extern uintptr_t _Unwind_GetRegionStart (_Unwind_Context_t context); - + } // extern "C" // @@ -148,13 +148,13 @@ struct OurExceptionType_t { /// This is our Exception class which relies on a negative offset to calculate /// pointers to its instances from pointers to its unwindException member. -/// +/// /// Note: The above unwind.h defines struct _Unwind_Exception to be aligned /// on a double word boundary. This is necessary to match the standard: /// http://refspecs.freestandards.org/abi-eh-1.21.html struct OurBaseException_t { struct OurExceptionType_t type; - + // Note: This is properly aligned in unwind.h struct _Unwind_Exception unwindException; }; @@ -165,7 +165,7 @@ typedef struct OurBaseException_t OurException; typedef struct _Unwind_Exception OurUnwindException; // -// Various globals used to support typeinfo and generatted exceptions in +// Various globals used to support typeinfo and generatted exceptions in // general // @@ -173,7 +173,7 @@ static std::map namedValues; int64_t ourBaseFromUnwindOffset; -const unsigned char ourBaseExcpClassChars[] = +const unsigned char ourBaseExcpClassChars[] = {'o', 'b', 'j', '\0', 'b', 'a', 's', '\0'}; @@ -203,7 +203,7 @@ typedef std::vector ArgTypes; /// @param retType function return type /// @param theArgTypes function's ordered argument types /// @param theArgNames function's ordered arguments needed if use of this -/// function corresponds to a function definition. Use empty +/// function corresponds to a function definition. Use empty /// aggregate for function declarations. /// @param functName function name /// @param linkage function linkage @@ -224,17 +224,17 @@ llvm::Function *createFunction(llvm::Module &module, llvm::Function::Create(functType, linkage, functName, &module); if (!ret || declarationOnly) return(ret); - + namedValues.clear(); - unsigned i = 0; + unsigned i = 0; for (llvm::Function::arg_iterator argIndex = ret->arg_begin(); i != theArgNames.size(); ++argIndex, ++i) { - + argIndex->setName(theArgNames[i]); namedValues[theArgNames[i]] = argIndex; } - + return(ret); } @@ -250,13 +250,13 @@ static llvm::AllocaInst *createEntryBlockAlloca(llvm::Function &function, const std::string &varName, llvm::Type *type, llvm::Constant *initWith = 0) { - llvm::BasicBlock &block = function.getEntryBlock(); + llvm::BasicBlock &block = function.getEntryBlock(); llvm::IRBuilder<> tmp(&block, block.begin()); llvm::AllocaInst *ret = tmp.CreateAlloca(type, 0, varName.c_str()); - - if (initWith) + + if (initWith) tmp.CreateStore(initWith, ret); - + return(ret); } @@ -266,7 +266,7 @@ static llvm::AllocaInst *createEntryBlockAlloca(llvm::Function &function, // // -// Runtime C Library functions +// Runtime C Library functions // // Note: using an extern "C" block so that static functions can be used @@ -275,7 +275,7 @@ extern "C" { // Note: Better ways to decide on bit width // /// Prints a 32 bit number, according to the format, to stderr. -/// @param intToPrint integer to print +/// @param intToPrint integer to print /// @param format printf like format to use when printing void print32Int(int intToPrint, const char *format) { if (format) { @@ -292,7 +292,7 @@ void print32Int(int intToPrint, const char *format) { // Note: Better ways to decide on bit width // /// Prints a 64 bit number, according to the format, to stderr. -/// @param intToPrint integer to print +/// @param intToPrint integer to print /// @param format printf like format to use when printing void print64Int(long int intToPrint, const char *format) { if (format) { @@ -327,19 +327,19 @@ void deleteOurException(OurUnwindException *expToDelete) { fprintf(stderr, "deleteOurException(...).\n"); #endif - + if (expToDelete && (expToDelete->exception_class == ourBaseExceptionClass)) { - + free(((char*) expToDelete) + ourBaseFromUnwindOffset); } } -/// This function is the struct _Unwind_Exception API mandated delete function -/// used by foreign exception handlers when deleting our exception +/// This function is the struct _Unwind_Exception API mandated delete function +/// used by foreign exception handlers when deleting our exception /// (OurException), instances. -/// @param reason @link http://refspecs.freestandards.org/abi-eh-1.21.html +/// @param reason @link http://refspecs.freestandards.org/abi-eh-1.21.html /// @unlink /// @param expToDelete exception instance to delete void deleteFromUnwindOurException(_Unwind_Reason_Code reason, @@ -348,7 +348,7 @@ void deleteFromUnwindOurException(_Unwind_Reason_Code reason, fprintf(stderr, "deleteFromUnwindOurException(...).\n"); #endif - + deleteOurException(expToDelete); } @@ -362,13 +362,13 @@ OurUnwindException *createOurException(int type) { (ret->type).type = type; (ret->unwindException).exception_class = ourBaseExceptionClass; (ret->unwindException).exception_cleanup = deleteFromUnwindOurException; - + return(&(ret->unwindException)); } -/// Read a uleb128 encoded value and advance pointer -/// See Variable Length Data in: +/// Read a uleb128 encoded value and advance pointer +/// See Variable Length Data in: /// @link http://dwarfstd.org/Dwarf3.pdf @unlink /// @param data reference variable holding memory pointer to decode from /// @returns decoded value @@ -377,22 +377,22 @@ static uintptr_t readULEB128(const uint8_t **data) { uintptr_t shift = 0; unsigned char byte; const uint8_t *p = *data; - + do { byte = *p++; result |= (byte & 0x7f) << shift; shift += 7; - } + } while (byte & 0x80); - + *data = p; - + return result; } -/// Read a sleb128 encoded value and advance pointer -/// See Variable Length Data in: +/// Read a sleb128 encoded value and advance pointer +/// See Variable Length Data in: /// @link http://dwarfstd.org/Dwarf3.pdf @unlink /// @param data reference variable holding memory pointer to decode from /// @returns decoded value @@ -401,26 +401,26 @@ static uintptr_t readSLEB128(const uint8_t **data) { uintptr_t shift = 0; unsigned char byte; const uint8_t *p = *data; - + do { byte = *p++; result |= (byte & 0x7f) << shift; shift += 7; - } + } while (byte & 0x80); - + *data = p; - + if ((byte & 0x40) && (shift < (sizeof(result) << 3))) { result |= (~0 << shift); } - + return result; } -/// Read a pointer encoded value and advance pointer -/// See Variable Length Data in: +/// Read a pointer encoded value and advance pointer +/// See Variable Length Data in: /// @link http://dwarfstd.org/Dwarf3.pdf @unlink /// @param data reference variable holding memory pointer to decode from /// @param encoding dwarf encoding type @@ -428,11 +428,11 @@ static uintptr_t readSLEB128(const uint8_t **data) { static uintptr_t readEncodedPointer(const uint8_t **data, uint8_t encoding) { uintptr_t result = 0; const uint8_t *p = *data; - - if (encoding == llvm::dwarf::DW_EH_PE_omit) + + if (encoding == llvm::dwarf::DW_EH_PE_omit) return(result); - - // first get value + + // first get value switch (encoding & 0x0F) { case llvm::dwarf::DW_EH_PE_absptr: result = *((uintptr_t*)p); @@ -470,15 +470,15 @@ static uintptr_t readEncodedPointer(const uint8_t **data, uint8_t encoding) { p += sizeof(int64_t); break; default: - // not supported + // not supported abort(); break; } - - // then add relative offset + + // then add relative offset switch (encoding & 0x70) { case llvm::dwarf::DW_EH_PE_absptr: - // do nothing + // do nothing break; case llvm::dwarf::DW_EH_PE_pcrel: result += (uintptr_t)(*data); @@ -488,34 +488,34 @@ static uintptr_t readEncodedPointer(const uint8_t **data, uint8_t encoding) { case llvm::dwarf::DW_EH_PE_funcrel: case llvm::dwarf::DW_EH_PE_aligned: default: - // not supported + // not supported abort(); break; } - - // then apply indirection + + // then apply indirection if (encoding & llvm::dwarf::DW_EH_PE_indirect) { result = *((uintptr_t*)result); } - + *data = p; - + return result; } -/// Deals with Dwarf actions matching our type infos -/// (OurExceptionType_t instances). Returns whether or not a dwarf emitted -/// action matches the supplied exception type. If such a match succeeds, -/// the resultAction argument will be set with > 0 index value. Only -/// corresponding llvm.eh.selector type info arguments, cleanup arguments +/// Deals with Dwarf actions matching our type infos +/// (OurExceptionType_t instances). Returns whether or not a dwarf emitted +/// action matches the supplied exception type. If such a match succeeds, +/// the resultAction argument will be set with > 0 index value. Only +/// corresponding llvm.eh.selector type info arguments, cleanup arguments /// are supported. Filters are not supported. -/// See Variable Length Data in: +/// See Variable Length Data in: /// @link http://dwarfstd.org/Dwarf3.pdf @unlink /// Also see @link http://refspecs.freestandards.org/abi-eh-1.21.html @unlink /// @param resultAction reference variable which will be set with result /// @param classInfo our array of type info pointers (to globals) -/// @param actionEntry index into above type info array or 0 (clean up). +/// @param actionEntry index into above type info array or 0 (clean up). /// We do not support filters. /// @param exceptionClass exception class (_Unwind_Exception::exception_class) /// of thrown exception. @@ -523,22 +523,22 @@ static uintptr_t readEncodedPointer(const uint8_t **data, uint8_t encoding) { /// @returns whether or not a type info was found. False is returned if only /// a cleanup was found static bool handleActionValue(int64_t *resultAction, - struct OurExceptionType_t **classInfo, - uintptr_t actionEntry, - uint64_t exceptionClass, + struct OurExceptionType_t **classInfo, + uintptr_t actionEntry, + uint64_t exceptionClass, struct _Unwind_Exception *exceptionObject) { bool ret = false; - - if (!resultAction || - !exceptionObject || + + if (!resultAction || + !exceptionObject || (exceptionClass != ourBaseExceptionClass)) return(ret); - + struct OurBaseException_t *excp = (struct OurBaseException_t*) (((char*) exceptionObject) + ourBaseFromUnwindOffset); struct OurExceptionType_t *excpType = &(excp->type); int type = excpType->type; - + #ifdef DEBUG fprintf(stderr, "handleActionValue(...): exceptionObject = <%p>, " @@ -546,12 +546,12 @@ static bool handleActionValue(int64_t *resultAction, exceptionObject, excp); #endif - + const uint8_t *actionPos = (uint8_t*) actionEntry, *tempActionPos; int64_t typeOffset = 0, actionOffset; - + for (int i = 0; true; ++i) { // Each emitted dwarf action corresponds to a 2 tuple of // type info address offset, and action offset to the next @@ -559,7 +559,7 @@ static bool handleActionValue(int64_t *resultAction, typeOffset = readSLEB128(&actionPos); tempActionPos = actionPos; actionOffset = readSLEB128(&tempActionPos); - + #ifdef DEBUG fprintf(stderr, "handleActionValue(...):typeOffset: <%lld>, " @@ -567,9 +567,9 @@ static bool handleActionValue(int64_t *resultAction, typeOffset, actionOffset); #endif - assert((typeOffset >= 0) && + assert((typeOffset >= 0) && "handleActionValue(...):filters are not supported."); - + // Note: A typeOffset == 0 implies that a cleanup llvm.eh.selector // argument has been matched. if ((typeOffset > 0) && @@ -583,17 +583,17 @@ static bool handleActionValue(int64_t *resultAction, ret = true; break; } - + #ifdef DEBUG fprintf(stderr, "handleActionValue(...):actionValue not found.\n"); #endif if (!actionOffset) break; - + actionPos += actionOffset; } - + return(ret); } @@ -602,52 +602,52 @@ static bool handleActionValue(int64_t *resultAction, /// See @link http://refspecs.freestandards.org/abi-eh-1.21.html @unlink /// @param version unsupported (ignored), unwind version /// @param lsda language specific data area -/// @param _Unwind_Action actions minimally supported unwind stage +/// @param _Unwind_Action actions minimally supported unwind stage /// (forced specifically not supported) /// @param exceptionClass exception class (_Unwind_Exception::exception_class) /// of thrown exception. /// @param exceptionObject thrown _Unwind_Exception instance. /// @param context unwind system context -/// @returns minimally supported unwinding control indicator -static _Unwind_Reason_Code handleLsda(int version, +/// @returns minimally supported unwinding control indicator +static _Unwind_Reason_Code handleLsda(int version, const uint8_t *lsda, _Unwind_Action actions, - uint64_t exceptionClass, + uint64_t exceptionClass, struct _Unwind_Exception *exceptionObject, _Unwind_Context_t context) { _Unwind_Reason_Code ret = _URC_CONTINUE_UNWIND; - + if (!lsda) return(ret); - + #ifdef DEBUG - fprintf(stderr, + fprintf(stderr, "handleLsda(...):lsda is non-zero.\n"); #endif - + // Get the current instruction pointer and offset it before next // instruction in the current frame which threw the exception. uintptr_t pc = _Unwind_GetIP(context)-1; - - // Get beginning current frame's code (as defined by the + + // Get beginning current frame's code (as defined by the // emitted dwarf code) uintptr_t funcStart = _Unwind_GetRegionStart(context); uintptr_t pcOffset = pc - funcStart; struct OurExceptionType_t **classInfo = NULL; - + // Note: See JITDwarfEmitter::EmitExceptionTable(...) for corresponding // dwarf emission - + // Parse LSDA header. uint8_t lpStartEncoding = *lsda++; - + if (lpStartEncoding != llvm::dwarf::DW_EH_PE_omit) { - readEncodedPointer(&lsda, lpStartEncoding); + readEncodedPointer(&lsda, lpStartEncoding); } - + uint8_t ttypeEncoding = *lsda++; uintptr_t classInfoOffset; - + if (ttypeEncoding != llvm::dwarf::DW_EH_PE_omit) { // Calculate type info locations in emitted dwarf code which // were flagged by type info arguments to llvm.eh.selector @@ -655,47 +655,47 @@ static _Unwind_Reason_Code handleLsda(int version, classInfoOffset = readULEB128(&lsda); classInfo = (struct OurExceptionType_t**) (lsda + classInfoOffset); } - - // Walk call-site table looking for range that - // includes current PC. - + + // Walk call-site table looking for range that + // includes current PC. + uint8_t callSiteEncoding = *lsda++; uint32_t callSiteTableLength = readULEB128(&lsda); const uint8_t *callSiteTableStart = lsda; - const uint8_t *callSiteTableEnd = callSiteTableStart + + const uint8_t *callSiteTableEnd = callSiteTableStart + callSiteTableLength; const uint8_t *actionTableStart = callSiteTableEnd; const uint8_t *callSitePtr = callSiteTableStart; - + bool foreignException = false; - + while (callSitePtr < callSiteTableEnd) { - uintptr_t start = readEncodedPointer(&callSitePtr, + uintptr_t start = readEncodedPointer(&callSitePtr, callSiteEncoding); - uintptr_t length = readEncodedPointer(&callSitePtr, + uintptr_t length = readEncodedPointer(&callSitePtr, callSiteEncoding); - uintptr_t landingPad = readEncodedPointer(&callSitePtr, + uintptr_t landingPad = readEncodedPointer(&callSitePtr, callSiteEncoding); - + // Note: Action value uintptr_t actionEntry = readULEB128(&callSitePtr); - + if (exceptionClass != ourBaseExceptionClass) { // We have been notified of a foreign exception being thrown, // and we therefore need to execute cleanup landing pads actionEntry = 0; foreignException = true; } - + if (landingPad == 0) { #ifdef DEBUG fprintf(stderr, "handleLsda(...): No landing pad found.\n"); #endif - + continue; // no landing pad for this entry } - + if (actionEntry) { actionEntry += ((uintptr_t) actionTableStart) - 1; } @@ -705,55 +705,55 @@ static _Unwind_Reason_Code handleLsda(int version, "handleLsda(...):No action table found.\n"); #endif } - + bool exceptionMatched = false; - + if ((start <= pcOffset) && (pcOffset < (start + length))) { #ifdef DEBUG fprintf(stderr, "handleLsda(...): Landing pad found.\n"); #endif int64_t actionValue = 0; - + if (actionEntry) { exceptionMatched = handleActionValue(&actionValue, - classInfo, - actionEntry, - exceptionClass, + classInfo, + actionEntry, + exceptionClass, exceptionObject); } - + if (!(actions & _UA_SEARCH_PHASE)) { #ifdef DEBUG fprintf(stderr, "handleLsda(...): installed landing pad " "context.\n"); #endif - + // Found landing pad for the PC. - // Set Instruction Pointer to so we re-enter function - // at landing pad. The landing pad is created by the + // Set Instruction Pointer to so we re-enter function + // at landing pad. The landing pad is created by the // compiler to take two parameters in registers. - _Unwind_SetGR(context, - __builtin_eh_return_data_regno(0), + _Unwind_SetGR(context, + __builtin_eh_return_data_regno(0), (uintptr_t)exceptionObject); - + // Note: this virtual register directly corresponds // to the return of the llvm.eh.selector intrinsic if (!actionEntry || !exceptionMatched) { // We indicate cleanup only - _Unwind_SetGR(context, - __builtin_eh_return_data_regno(1), + _Unwind_SetGR(context, + __builtin_eh_return_data_regno(1), 0); } else { // Matched type info index of llvm.eh.selector intrinsic // passed here. - _Unwind_SetGR(context, - __builtin_eh_return_data_regno(1), + _Unwind_SetGR(context, + __builtin_eh_return_data_regno(1), actionValue); } - + // To execute landing pad set here _Unwind_SetIP(context, funcStart + landingPad); ret = _URC_INSTALL_CONTEXT; @@ -767,19 +767,19 @@ static _Unwind_Reason_Code handleLsda(int version, } else { // Note: Only non-clean up handlers are marked as - // found. Otherwise the clean up handlers will be - // re-found and executed during the clean up + // found. Otherwise the clean up handlers will be + // re-found and executed during the clean up // phase. #ifdef DEBUG fprintf(stderr, "handleLsda(...): cleanup handler found.\n"); #endif } - + break; } } - + return(ret); } @@ -788,23 +788,23 @@ static _Unwind_Reason_Code handleLsda(int version, /// dwarf unwind info block. Again see: JITDwarfEmitter.cpp. /// See @link http://refspecs.freestandards.org/abi-eh-1.21.html @unlink /// @param version unsupported (ignored), unwind version -/// @param _Unwind_Action actions minimally supported unwind stage +/// @param _Unwind_Action actions minimally supported unwind stage /// (forced specifically not supported) /// @param exceptionClass exception class (_Unwind_Exception::exception_class) /// of thrown exception. /// @param exceptionObject thrown _Unwind_Exception instance. /// @param context unwind system context -/// @returns minimally supported unwinding control indicator -_Unwind_Reason_Code ourPersonality(int version, +/// @returns minimally supported unwinding control indicator +_Unwind_Reason_Code ourPersonality(int version, _Unwind_Action actions, - uint64_t exceptionClass, + uint64_t exceptionClass, struct _Unwind_Exception *exceptionObject, _Unwind_Context_t context) { #ifdef DEBUG - fprintf(stderr, + fprintf(stderr, "We are in ourPersonality(...):actions is <%d>.\n", actions); - + if (actions & _UA_SEARCH_PHASE) { fprintf(stderr, "ourPersonality(...):In search phase.\n"); } @@ -812,15 +812,15 @@ _Unwind_Reason_Code ourPersonality(int version, fprintf(stderr, "ourPersonality(...):In non-search phase.\n"); } #endif - + const uint8_t *lsda = _Unwind_GetLanguageSpecificData(context); - + #ifdef DEBUG - fprintf(stderr, + fprintf(stderr, "ourPersonality(...):lsda = <%p>.\n", lsda); #endif - + // The real work of the personality function is captured here return(handleLsda(version, lsda, @@ -841,12 +841,12 @@ _Unwind_Reason_Code ourPersonality(int version, uint64_t genClass(const unsigned char classChars[], size_t classCharsSize) { uint64_t ret = classChars[0]; - + for (unsigned i = 1; i < classCharsSize; ++i) { ret <<= 8; ret += classChars[i]; } - + return(ret); } @@ -865,37 +865,37 @@ uint64_t genClass(const unsigned char classChars[], size_t classCharsSize) /// @param module code for module instance /// @param builder builder instance /// @param toPrint string to print -/// @param useGlobal A value of true (default) indicates a GlobalValue is -/// generated, and is used to hold the constant string. A value of -/// false indicates that the constant string will be stored on the +/// @param useGlobal A value of true (default) indicates a GlobalValue is +/// generated, and is used to hold the constant string. A value of +/// false indicates that the constant string will be stored on the /// stack. -void generateStringPrint(llvm::LLVMContext &context, +void generateStringPrint(llvm::LLVMContext &context, llvm::Module &module, - llvm::IRBuilder<> &builder, + llvm::IRBuilder<> &builder, std::string toPrint, bool useGlobal = true) { llvm::Function *printFunct = module.getFunction("printStr"); - + llvm::Value *stringVar; - llvm::Constant *stringConstant = + llvm::Constant *stringConstant = llvm::ConstantDataArray::getString(context, toPrint); - + if (useGlobal) { // Note: Does not work without allocation - stringVar = - new llvm::GlobalVariable(module, + stringVar = + new llvm::GlobalVariable(module, stringConstant->getType(), - true, - llvm::GlobalValue::LinkerPrivateLinkage, - stringConstant, + true, + llvm::GlobalValue::LinkerPrivateLinkage, + stringConstant, ""); } else { stringVar = builder.CreateAlloca(stringConstant->getType()); builder.CreateStore(stringConstant, stringVar); } - - llvm::Value *cast = builder.CreatePointerCast(stringVar, + + llvm::Value *cast = builder.CreatePointerCast(stringVar, builder.getInt8PtrTy()); builder.CreateCall(printFunct, cast); } @@ -909,49 +909,49 @@ void generateStringPrint(llvm::LLVMContext &context, /// @param printFunct function used to "print" integer /// @param toPrint string to print /// @param format printf like formating string for print -/// @param useGlobal A value of true (default) indicates a GlobalValue is -/// generated, and is used to hold the constant string. A value of -/// false indicates that the constant string will be stored on the +/// @param useGlobal A value of true (default) indicates a GlobalValue is +/// generated, and is used to hold the constant string. A value of +/// false indicates that the constant string will be stored on the /// stack. -void generateIntegerPrint(llvm::LLVMContext &context, +void generateIntegerPrint(llvm::LLVMContext &context, llvm::Module &module, - llvm::IRBuilder<> &builder, + llvm::IRBuilder<> &builder, llvm::Function &printFunct, llvm::Value &toPrint, - std::string format, + std::string format, bool useGlobal = true) { llvm::Constant *stringConstant = llvm::ConstantDataArray::getString(context, format); llvm::Value *stringVar; - + if (useGlobal) { // Note: Does not seem to work without allocation - stringVar = - new llvm::GlobalVariable(module, + stringVar = + new llvm::GlobalVariable(module, stringConstant->getType(), - true, - llvm::GlobalValue::LinkerPrivateLinkage, - stringConstant, + true, + llvm::GlobalValue::LinkerPrivateLinkage, + stringConstant, ""); } else { stringVar = builder.CreateAlloca(stringConstant->getType()); builder.CreateStore(stringConstant, stringVar); } - - llvm::Value *cast = builder.CreateBitCast(stringVar, + + llvm::Value *cast = builder.CreateBitCast(stringVar, builder.getInt8PtrTy()); builder.CreateCall2(&printFunct, &toPrint, cast); } -/// Generates code to handle finally block type semantics: always runs -/// regardless of whether a thrown exception is passing through or the -/// parent function is simply exiting. In addition to printing some state -/// to stderr, this code will resume the exception handling--runs the -/// unwind resume block, if the exception has not been previously caught -/// by a catch clause, and will otherwise execute the end block (terminator -/// block). In addition this function creates the corresponding function's +/// Generates code to handle finally block type semantics: always runs +/// regardless of whether a thrown exception is passing through or the +/// parent function is simply exiting. In addition to printing some state +/// to stderr, this code will resume the exception handling--runs the +/// unwind resume block, if the exception has not been previously caught +/// by a catch clause, and will otherwise execute the end block (terminator +/// block). In addition this function creates the corresponding function's /// stack storage for the exception pointer and catch flag status. /// @param context llvm context /// @param module code for module instance @@ -965,9 +965,9 @@ void generateIntegerPrint(llvm::LLVMContext &context, /// @param exceptionStorage reference to exception pointer storage /// @param caughtResultStorage reference to landingpad result storage /// @returns newly created block -static llvm::BasicBlock *createFinallyBlock(llvm::LLVMContext &context, - llvm::Module &module, - llvm::IRBuilder<> &builder, +static llvm::BasicBlock *createFinallyBlock(llvm::LLVMContext &context, + llvm::Module &module, + llvm::IRBuilder<> &builder, llvm::Function &toAddTo, std::string &blockName, std::string &functionId, @@ -976,21 +976,21 @@ static llvm::BasicBlock *createFinallyBlock(llvm::LLVMContext &context, llvm::Value **exceptionCaughtFlag, llvm::Value **exceptionStorage, llvm::Value **caughtResultStorage) { - assert(exceptionCaughtFlag && + assert(exceptionCaughtFlag && "ExceptionDemo::createFinallyBlock(...):exceptionCaughtFlag " "is NULL"); - assert(exceptionStorage && + assert(exceptionStorage && "ExceptionDemo::createFinallyBlock(...):exceptionStorage " "is NULL"); - assert(caughtResultStorage && + assert(caughtResultStorage && "ExceptionDemo::createFinallyBlock(...):caughtResultStorage " "is NULL"); - + *exceptionCaughtFlag = createEntryBlockAlloca(toAddTo, "exceptionCaught", ourExceptionNotThrownState->getType(), ourExceptionNotThrownState); - + llvm::PointerType *exceptionStorageType = builder.getInt8PtrTy(); *exceptionStorage = createEntryBlockAlloca(toAddTo, "exceptionStorage", @@ -1002,35 +1002,35 @@ static llvm::BasicBlock *createFinallyBlock(llvm::LLVMContext &context, ourCaughtResultType, llvm::ConstantAggregateZero::get( ourCaughtResultType)); - + llvm::BasicBlock *ret = llvm::BasicBlock::Create(context, blockName, &toAddTo); - + builder.SetInsertPoint(ret); - + std::ostringstream bufferToPrint; bufferToPrint << "Gen: Executing finally block " << blockName << " in " << functionId << "\n"; - generateStringPrint(context, - module, - builder, + generateStringPrint(context, + module, + builder, bufferToPrint.str(), USE_GLOBAL_STR_CONSTS); - + llvm::SwitchInst *theSwitch = builder.CreateSwitch(builder.CreateLoad( - *exceptionCaughtFlag), + *exceptionCaughtFlag), &terminatorBlock, 2); theSwitch->addCase(ourExceptionCaughtState, &terminatorBlock); theSwitch->addCase(ourExceptionThrownState, &unwindResumeBlock); - + return(ret); } /// Generates catch block semantics which print a string to indicate type of -/// catch executed, sets an exception caught flag, and executes passed in +/// catch executed, sets an exception caught flag, and executes passed in /// end block (terminator block). /// @param context llvm context /// @param module code for module instance @@ -1041,52 +1041,52 @@ static llvm::BasicBlock *createFinallyBlock(llvm::LLVMContext &context, /// @param terminatorBlock terminator "end" block /// @param exceptionCaughtFlag exception caught/thrown status /// @returns newly created block -static llvm::BasicBlock *createCatchBlock(llvm::LLVMContext &context, - llvm::Module &module, - llvm::IRBuilder<> &builder, +static llvm::BasicBlock *createCatchBlock(llvm::LLVMContext &context, + llvm::Module &module, + llvm::IRBuilder<> &builder, llvm::Function &toAddTo, std::string &blockName, std::string &functionId, llvm::BasicBlock &terminatorBlock, llvm::Value &exceptionCaughtFlag) { - + llvm::BasicBlock *ret = llvm::BasicBlock::Create(context, blockName, &toAddTo); - + builder.SetInsertPoint(ret); - + std::ostringstream bufferToPrint; bufferToPrint << "Gen: Executing catch block " << blockName << " in " << functionId << std::endl; - generateStringPrint(context, - module, - builder, + generateStringPrint(context, + module, + builder, bufferToPrint.str(), USE_GLOBAL_STR_CONSTS); builder.CreateStore(ourExceptionCaughtState, &exceptionCaughtFlag); builder.CreateBr(&terminatorBlock); - + return(ret); } -/// Generates a function which invokes a function (toInvoke) and, whose -/// unwind block will "catch" the type info types correspondingly held in the -/// exceptionTypesToCatch argument. If the toInvoke function throws an -/// exception which does not match any type info types contained in -/// exceptionTypesToCatch, the generated code will call _Unwind_Resume -/// with the raised exception. On the other hand the generated code will +/// Generates a function which invokes a function (toInvoke) and, whose +/// unwind block will "catch" the type info types correspondingly held in the +/// exceptionTypesToCatch argument. If the toInvoke function throws an +/// exception which does not match any type info types contained in +/// exceptionTypesToCatch, the generated code will call _Unwind_Resume +/// with the raised exception. On the other hand the generated code will /// normally exit if the toInvoke function does not throw an exception. -/// The generated "finally" block is always run regardless of the cause of +/// The generated "finally" block is always run regardless of the cause of /// the generated function exit. /// The generated function is returned after being verified. /// @param module code for module instance /// @param builder builder instance -/// @param fpm a function pass manager holding optional IR to IR +/// @param fpm a function pass manager holding optional IR to IR /// transformations /// @param toInvoke inner function to invoke /// @param ourId id used to printing purposes @@ -1094,76 +1094,76 @@ static llvm::BasicBlock *createCatchBlock(llvm::LLVMContext &context, /// @param exceptionTypesToCatch array of type info types to "catch" /// @returns generated function static -llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, - llvm::IRBuilder<> &builder, +llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, + llvm::IRBuilder<> &builder, llvm::FunctionPassManager &fpm, llvm::Function &toInvoke, std::string ourId, unsigned numExceptionsToCatch, unsigned exceptionTypesToCatch[]) { - + llvm::LLVMContext &context = module.getContext(); llvm::Function *toPrint32Int = module.getFunction("print32Int"); - + ArgTypes argTypes; argTypes.push_back(builder.getInt32Ty()); - + ArgNames argNames; argNames.push_back("exceptTypeToThrow"); - - llvm::Function *ret = createFunction(module, + + llvm::Function *ret = createFunction(module, builder.getVoidTy(), - argTypes, - argNames, + argTypes, + argNames, ourId, - llvm::Function::ExternalLinkage, - false, + llvm::Function::ExternalLinkage, + false, false); - + // Block which calls invoke llvm::BasicBlock *entryBlock = llvm::BasicBlock::Create(context, - "entry", + "entry", ret); // Normal block for invoke - llvm::BasicBlock *normalBlock = llvm::BasicBlock::Create(context, - "normal", + llvm::BasicBlock *normalBlock = llvm::BasicBlock::Create(context, + "normal", ret); // Unwind block for invoke - llvm::BasicBlock *exceptionBlock = llvm::BasicBlock::Create(context, - "exception", + llvm::BasicBlock *exceptionBlock = llvm::BasicBlock::Create(context, + "exception", ret); - + // Block which routes exception to correct catch handler block - llvm::BasicBlock *exceptionRouteBlock = llvm::BasicBlock::Create(context, - "exceptionRoute", + llvm::BasicBlock *exceptionRouteBlock = llvm::BasicBlock::Create(context, + "exceptionRoute", ret); - + // Foreign exception handler - llvm::BasicBlock *externalExceptionBlock = llvm::BasicBlock::Create(context, - "externalException", + llvm::BasicBlock *externalExceptionBlock = llvm::BasicBlock::Create(context, + "externalException", ret); - + // Block which calls _Unwind_Resume - llvm::BasicBlock *unwindResumeBlock = llvm::BasicBlock::Create(context, - "unwindResume", + llvm::BasicBlock *unwindResumeBlock = llvm::BasicBlock::Create(context, + "unwindResume", ret); - + // Clean up block which delete exception if needed llvm::BasicBlock *endBlock = llvm::BasicBlock::Create(context, "end", ret); - + std::string nextName; std::vector catchBlocks(numExceptionsToCatch); llvm::Value *exceptionCaughtFlag = NULL; llvm::Value *exceptionStorage = NULL; llvm::Value *caughtResultStorage = NULL; - - // Finally block which will branch to unwindResumeBlock if + + // Finally block which will branch to unwindResumeBlock if // exception is not caught. Initializes/allocates stack locations. - llvm::BasicBlock *finallyBlock = createFinallyBlock(context, - module, - builder, - *ret, - nextName = "finally", + llvm::BasicBlock *finallyBlock = createFinallyBlock(context, + module, + builder, + *ret, + nextName = "finally", ourId, *endBlock, *unwindResumeBlock, @@ -1171,74 +1171,74 @@ llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, &exceptionStorage, &caughtResultStorage ); - + for (unsigned i = 0; i < numExceptionsToCatch; ++i) { nextName = ourTypeInfoNames[exceptionTypesToCatch[i]]; - + // One catch block per type info to be caught - catchBlocks[i] = createCatchBlock(context, - module, - builder, + catchBlocks[i] = createCatchBlock(context, + module, + builder, *ret, - nextName, + nextName, ourId, *finallyBlock, *exceptionCaughtFlag); } - + // Entry Block - + builder.SetInsertPoint(entryBlock); - + std::vector args; args.push_back(namedValues["exceptTypeToThrow"]); - builder.CreateInvoke(&toInvoke, - normalBlock, - exceptionBlock, + builder.CreateInvoke(&toInvoke, + normalBlock, + exceptionBlock, args); - + // End Block - + builder.SetInsertPoint(endBlock); - - generateStringPrint(context, + + generateStringPrint(context, module, - builder, + builder, "Gen: In end block: exiting in " + ourId + ".\n", USE_GLOBAL_STR_CONSTS); llvm::Function *deleteOurException = module.getFunction("deleteOurException"); - + // Note: function handles NULL exceptions - builder.CreateCall(deleteOurException, + builder.CreateCall(deleteOurException, builder.CreateLoad(exceptionStorage)); builder.CreateRetVoid(); - + // Normal Block - + builder.SetInsertPoint(normalBlock); - - generateStringPrint(context, + + generateStringPrint(context, module, - builder, + builder, "Gen: No exception in " + ourId + "!\n", USE_GLOBAL_STR_CONSTS); - + // Finally block is always called builder.CreateBr(finallyBlock); - + // Unwind Resume Block - + builder.SetInsertPoint(unwindResumeBlock); - + builder.CreateResume(builder.CreateLoad(caughtResultStorage)); - + // Exception Block - + builder.SetInsertPoint(exceptionBlock); - + llvm::Function *personality = module.getFunction("ourPersonality"); - - llvm::LandingPadInst *caughtResult = + + llvm::LandingPadInst *caughtResult = builder.CreateLandingPad(ourCaughtResultType, personality, numExceptionsToCatch, @@ -1255,48 +1255,48 @@ llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, llvm::Value *unwindException = builder.CreateExtractValue(caughtResult, 0); llvm::Value *retTypeInfoIndex = builder.CreateExtractValue(caughtResult, 1); - // FIXME: Redundant storage which, beyond utilizing value of - // caughtResultStore for unwindException storage, may be alleviated + // FIXME: Redundant storage which, beyond utilizing value of + // caughtResultStore for unwindException storage, may be alleviated // altogether with a block rearrangement builder.CreateStore(caughtResult, caughtResultStorage); builder.CreateStore(unwindException, exceptionStorage); builder.CreateStore(ourExceptionThrownState, exceptionCaughtFlag); - - // Retrieve exception_class member from thrown exception + + // Retrieve exception_class member from thrown exception // (_Unwind_Exception instance). This member tells us whether or not // the exception is foreign. - llvm::Value *unwindExceptionClass = + llvm::Value *unwindExceptionClass = builder.CreateLoad(builder.CreateStructGEP( - builder.CreatePointerCast(unwindException, - ourUnwindExceptionType->getPointerTo()), + builder.CreatePointerCast(unwindException, + ourUnwindExceptionType->getPointerTo()), 0)); - + // Branch to the externalExceptionBlock if the exception is foreign or // to a catch router if not. Either way the finally block will be run. builder.CreateCondBr(builder.CreateICmpEQ(unwindExceptionClass, - llvm::ConstantInt::get(builder.getInt64Ty(), + llvm::ConstantInt::get(builder.getInt64Ty(), ourBaseExceptionClass)), exceptionRouteBlock, externalExceptionBlock); - + // External Exception Block - + builder.SetInsertPoint(externalExceptionBlock); - - generateStringPrint(context, + + generateStringPrint(context, module, - builder, + builder, "Gen: Foreign exception received.\n", USE_GLOBAL_STR_CONSTS); - + // Branch to the finally block builder.CreateBr(finallyBlock); - + // Exception Route Block - + builder.SetInsertPoint(exceptionRouteBlock); - - // Casts exception pointer (_Unwind_Exception instance) to parent + + // Casts exception pointer (_Unwind_Exception instance) to parent // (OurException instance). // // Note: ourBaseFromUnwindOffset is usually negative @@ -1304,34 +1304,34 @@ llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, builder.CreateConstGEP1_64(unwindException, ourBaseFromUnwindOffset), ourExceptionType->getPointerTo()); - + // Retrieve thrown exception type info type // // Note: Index is not relative to pointer but instead to structure // unlike a true getelementptr (GEP) instruction typeInfoThrown = builder.CreateStructGEP(typeInfoThrown, 0); - - llvm::Value *typeInfoThrownType = + + llvm::Value *typeInfoThrownType = builder.CreateStructGEP(typeInfoThrown, 0); - - generateIntegerPrint(context, + + generateIntegerPrint(context, module, - builder, - *toPrint32Int, + builder, + *toPrint32Int, *(builder.CreateLoad(typeInfoThrownType)), - "Gen: Exception type <%d> received (stack unwound) " - " in " + - ourId + + "Gen: Exception type <%d> received (stack unwound) " + " in " + + ourId + ".\n", USE_GLOBAL_STR_CONSTS); - + // Route to matched type info catch block or run cleanup finally block - llvm::SwitchInst *switchToCatchBlock = builder.CreateSwitch(retTypeInfoIndex, - finallyBlock, + llvm::SwitchInst *switchToCatchBlock = builder.CreateSwitch(retTypeInfoIndex, + finallyBlock, numExceptionsToCatch); - + unsigned nextTypeToCatch; - + for (unsigned i = 1; i <= numExceptionsToCatch; ++i) { nextTypeToCatch = i - 1; switchToCatchBlock->addCase(llvm::ConstantInt::get( @@ -1341,18 +1341,18 @@ llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, llvm::verifyFunction(*ret); fpm.run(*ret); - + return(ret); } /// Generates function which throws either an exception matched to a runtime -/// determined type info type (argument to generated function), or if this -/// runtime value matches nativeThrowType, throws a foreign exception by +/// determined type info type (argument to generated function), or if this +/// runtime value matches nativeThrowType, throws a foreign exception by /// calling nativeThrowFunct. /// @param module code for module instance /// @param builder builder instance -/// @param fpm a function pass manager holding optional IR to IR +/// @param fpm a function pass manager holding optional IR to IR /// transformations /// @param ourId id used to printing purposes /// @param nativeThrowType a runtime argument of this value results in @@ -1361,8 +1361,8 @@ llvm::Function *createCatchWrappedInvokeFunction(llvm::Module &module, /// if the above nativeThrowType matches generated function's arg. /// @returns generated function static -llvm::Function *createThrowExceptionFunction(llvm::Module &module, - llvm::IRBuilder<> &builder, +llvm::Function *createThrowExceptionFunction(llvm::Module &module, + llvm::IRBuilder<> &builder, llvm::FunctionPassManager &fpm, std::string ourId, int32_t nativeThrowType, @@ -1373,7 +1373,7 @@ llvm::Function *createThrowExceptionFunction(llvm::Module &module, unwindArgTypes.push_back(builder.getInt32Ty()); ArgNames unwindArgNames; unwindArgNames.push_back("exceptTypeToThrow"); - + llvm::Function *ret = createFunction(module, builder.getVoidTy(), unwindArgTypes, @@ -1382,88 +1382,88 @@ llvm::Function *createThrowExceptionFunction(llvm::Module &module, llvm::Function::ExternalLinkage, false, false); - + // Throws either one of our exception or a native C++ exception depending // on a runtime argument value containing a type info type. llvm::BasicBlock *entryBlock = llvm::BasicBlock::Create(context, - "entry", + "entry", ret); // Throws a foreign exception llvm::BasicBlock *nativeThrowBlock = llvm::BasicBlock::Create(context, - "nativeThrow", + "nativeThrow", ret); // Throws one of our Exceptions llvm::BasicBlock *generatedThrowBlock = llvm::BasicBlock::Create(context, - "generatedThrow", + "generatedThrow", ret); // Retrieved runtime type info type to throw llvm::Value *exceptionType = namedValues["exceptTypeToThrow"]; - + // nativeThrowBlock block - + builder.SetInsertPoint(nativeThrowBlock); - + // Throws foreign exception builder.CreateCall(&nativeThrowFunct, exceptionType); builder.CreateUnreachable(); - + // entry block - + builder.SetInsertPoint(entryBlock); - + llvm::Function *toPrint32Int = module.getFunction("print32Int"); - generateIntegerPrint(context, + generateIntegerPrint(context, module, - builder, - *toPrint32Int, - *exceptionType, - "\nGen: About to throw exception type <%d> in " + - ourId + + builder, + *toPrint32Int, + *exceptionType, + "\nGen: About to throw exception type <%d> in " + + ourId + ".\n", USE_GLOBAL_STR_CONSTS); - + // Switches on runtime type info type value to determine whether or not - // a foreign exception is thrown. Defaults to throwing one of our + // a foreign exception is thrown. Defaults to throwing one of our // generated exceptions. llvm::SwitchInst *theSwitch = builder.CreateSwitch(exceptionType, generatedThrowBlock, 1); - - theSwitch->addCase(llvm::ConstantInt::get(llvm::Type::getInt32Ty(context), + + theSwitch->addCase(llvm::ConstantInt::get(llvm::Type::getInt32Ty(context), nativeThrowType), nativeThrowBlock); - + // generatedThrow block - + builder.SetInsertPoint(generatedThrowBlock); - + llvm::Function *createOurException = module.getFunction("createOurException"); llvm::Function *raiseOurException = module.getFunction( "_Unwind_RaiseException"); - + // Creates exception to throw with runtime type info type. - llvm::Value *exception = builder.CreateCall(createOurException, + llvm::Value *exception = builder.CreateCall(createOurException, namedValues["exceptTypeToThrow"]); - + // Throw generated Exception builder.CreateCall(raiseOurException, exception); builder.CreateUnreachable(); - + llvm::verifyFunction(*ret); fpm.run(*ret); - + return(ret); } static void createStandardUtilityFunctions(unsigned numTypeInfos, - llvm::Module &module, + llvm::Module &module, llvm::IRBuilder<> &builder); -/// Creates test code by generating and organizing these functions into the +/// Creates test code by generating and organizing these functions into the /// test case. The test case consists of an outer function setup to invoke -/// an inner function within an environment having multiple catch and single +/// an inner function within an environment having multiple catch and single /// finally blocks. This inner function is also setup to invoke a throw -/// function within an evironment similar in nature to the outer function's +/// function within an evironment similar in nature to the outer function's /// catch and finally blocks. Each of these two functions catch mutually /// exclusive subsets (even or odd) of the type info types configured /// for this this. All generated functions have a runtime argument which @@ -1474,26 +1474,26 @@ static void createStandardUtilityFunctions(unsigned numTypeInfos, /// a supplied a function which in turn will throw a foreign exception. /// @param module code for module instance /// @param builder builder instance -/// @param fpm a function pass manager holding optional IR to IR +/// @param fpm a function pass manager holding optional IR to IR /// transformations /// @param nativeThrowFunctName name of external function which will throw /// a foreign exception /// @returns outermost generated test function. -llvm::Function *createUnwindExceptionTest(llvm::Module &module, - llvm::IRBuilder<> &builder, +llvm::Function *createUnwindExceptionTest(llvm::Module &module, + llvm::IRBuilder<> &builder, llvm::FunctionPassManager &fpm, std::string nativeThrowFunctName) { // Number of type infos to generate unsigned numTypeInfos = 6; - + // Initialze intrisics and external functions to use along with exception // and type info globals. createStandardUtilityFunctions(numTypeInfos, module, builder); llvm::Function *nativeThrowFunct = module.getFunction(nativeThrowFunctName); - - // Create exception throw function using the value ~0 to cause + + // Create exception throw function using the value ~0 to cause // foreign exceptions to be thrown. llvm::Function *throwFunct = createThrowExceptionFunction(module, builder, @@ -1503,9 +1503,9 @@ llvm::Function *createUnwindExceptionTest(llvm::Module &module, *nativeThrowFunct); // Inner function will catch even type infos unsigned innerExceptionTypesToCatch[] = {6, 2, 4}; - size_t numExceptionTypesToCatch = sizeof(innerExceptionTypesToCatch) / + size_t numExceptionTypesToCatch = sizeof(innerExceptionTypesToCatch) / sizeof(unsigned); - + // Generate inner function. llvm::Function *innerCatchFunct = createCatchWrappedInvokeFunction(module, builder, @@ -1514,12 +1514,12 @@ llvm::Function *createUnwindExceptionTest(llvm::Module &module, "innerCatchFunct", numExceptionTypesToCatch, innerExceptionTypesToCatch); - + // Outer function will catch odd type infos unsigned outerExceptionTypesToCatch[] = {3, 1, 5}; - numExceptionTypesToCatch = sizeof(outerExceptionTypesToCatch) / + numExceptionTypesToCatch = sizeof(outerExceptionTypesToCatch) / sizeof(unsigned); - + // Generate outer function llvm::Function *outerCatchFunct = createCatchWrappedInvokeFunction(module, builder, @@ -1528,7 +1528,7 @@ llvm::Function *createUnwindExceptionTest(llvm::Module &module, "outerCatchFunct", numExceptionTypesToCatch, outerExceptionTypesToCatch); - + // Return outer function to run return(outerCatchFunct); } @@ -1539,15 +1539,15 @@ class OurCppRunException : public std::runtime_error { public: OurCppRunException(const std::string reason) : std::runtime_error(reason) {} - + OurCppRunException (const OurCppRunException &toCopy) : std::runtime_error(toCopy) {} - + OurCppRunException &operator = (const OurCppRunException &toCopy) { return(reinterpret_cast( std::runtime_error::operator=(toCopy))); } - + ~OurCppRunException (void) throw () {} }; @@ -1562,7 +1562,7 @@ void throwCppException (int32_t ignoreIt) { typedef void (*OurExceptionThrowFunctType) (int32_t typeToThrow); -/// This is a test harness which runs test by executing generated +/// This is a test harness which runs test by executing generated /// function with a type info type to throw. Harness wraps the execution /// of generated function in a C++ try catch clause. /// @param engine execution engine to use for executing generated function. @@ -1572,15 +1572,15 @@ typedef void (*OurExceptionThrowFunctType) (int32_t typeToThrow); /// @param typeToThrow type info type of generated exception to throw, or /// indicator to cause foreign exception to be thrown. static -void runExceptionThrow(llvm::ExecutionEngine *engine, - llvm::Function *function, +void runExceptionThrow(llvm::ExecutionEngine *engine, + llvm::Function *function, int32_t typeToThrow) { - + // Find test's function pointer - OurExceptionThrowFunctType functPtr = + OurExceptionThrowFunctType functPtr = reinterpret_cast( reinterpret_cast(engine->getPointerToFunction(function))); - + try { // Run test (*functPtr)(typeToThrow); @@ -1589,15 +1589,15 @@ void runExceptionThrow(llvm::ExecutionEngine *engine, // Catch foreign C++ exception fprintf(stderr, "\nrunExceptionThrow(...):In C++ catch OurCppRunException " - "with reason: %s.\n", + "with reason: %s.\n", exc.what()); } catch (...) { - // Catch all exceptions including our generated ones. This latter + // Catch all exceptions including our generated ones. This latter // functionality works according to the example in rules 1.6.4 of - // http://sourcery.mentor.com/public/cxx-abi/abi-eh.html (v1.22), - // given that these will be exceptions foreign to C++ - // (the _Unwind_Exception::exception_class should be different from + // http://sourcery.mentor.com/public/cxx-abi/abi-eh.html (v1.22), + // given that these will be exceptions foreign to C++ + // (the _Unwind_Exception::exception_class should be different from // the one used by C++). fprintf(stderr, "\nrunExceptionThrow(...):In C++ catch all.\n"); @@ -1610,32 +1610,32 @@ void runExceptionThrow(llvm::ExecutionEngine *engine, typedef llvm::ArrayRef TypeArray; -/// This initialization routine creates type info globals and +/// This initialization routine creates type info globals and /// adds external function declarations to module. /// @param numTypeInfos number of linear type info associated type info types /// to create as GlobalVariable instances, starting with the value 1. /// @param module code for module instance /// @param builder builder instance static void createStandardUtilityFunctions(unsigned numTypeInfos, - llvm::Module &module, + llvm::Module &module, llvm::IRBuilder<> &builder) { - + llvm::LLVMContext &context = module.getContext(); - + // Exception initializations - + // Setup exception catch state - ourExceptionNotThrownState = + ourExceptionNotThrownState = llvm::ConstantInt::get(llvm::Type::getInt8Ty(context), 0), - ourExceptionThrownState = + ourExceptionThrownState = llvm::ConstantInt::get(llvm::Type::getInt8Ty(context), 1), - ourExceptionCaughtState = + ourExceptionCaughtState = llvm::ConstantInt::get(llvm::Type::getInt8Ty(context), 2), - - - + + + // Create our type info type - ourTypeInfoType = llvm::StructType::get(context, + ourTypeInfoType = llvm::StructType::get(context, TypeArray(builder.getInt32Ty())); llvm::Type *caughtResultFieldTypes[] = { @@ -1648,47 +1648,47 @@ static void createStandardUtilityFunctions(unsigned numTypeInfos, TypeArray(caughtResultFieldTypes)); // Create OurException type - ourExceptionType = llvm::StructType::get(context, + ourExceptionType = llvm::StructType::get(context, TypeArray(ourTypeInfoType)); - + // Create portion of _Unwind_Exception type // // Note: Declaring only a portion of the _Unwind_Exception struct. // Does this cause problems? ourUnwindExceptionType = - llvm::StructType::get(context, + llvm::StructType::get(context, TypeArray(builder.getInt64Ty())); struct OurBaseException_t dummyException; - + // Calculate offset of OurException::unwindException member. - ourBaseFromUnwindOffset = ((uintptr_t) &dummyException) - + ourBaseFromUnwindOffset = ((uintptr_t) &dummyException) - ((uintptr_t) &(dummyException.unwindException)); - + #ifdef DEBUG fprintf(stderr, "createStandardUtilityFunctions(...):ourBaseFromUnwindOffset " "= %lld, sizeof(struct OurBaseException_t) - " "sizeof(struct _Unwind_Exception) = %lu.\n", ourBaseFromUnwindOffset, - sizeof(struct OurBaseException_t) - + sizeof(struct OurBaseException_t) - sizeof(struct _Unwind_Exception)); #endif - + size_t numChars = sizeof(ourBaseExcpClassChars) / sizeof(char); - + // Create our _Unwind_Exception::exception_class value ourBaseExceptionClass = genClass(ourBaseExcpClassChars, numChars); - + // Type infos - + std::string baseStr = "typeInfo", typeInfoName; std::ostringstream typeInfoNameBuilder; std::vector structVals; - + llvm::Constant *nextStruct; llvm::GlobalVariable *nextGlobal = NULL; - + // Generate each type info // // Note: First type info is not used. @@ -1696,202 +1696,202 @@ static void createStandardUtilityFunctions(unsigned numTypeInfos, structVals.clear(); structVals.push_back(llvm::ConstantInt::get(builder.getInt32Ty(), i)); nextStruct = llvm::ConstantStruct::get(ourTypeInfoType, structVals); - + typeInfoNameBuilder.str(""); typeInfoNameBuilder << baseStr << i; typeInfoName = typeInfoNameBuilder.str(); - + // Note: Does not seem to work without allocation - nextGlobal = - new llvm::GlobalVariable(module, - ourTypeInfoType, - true, - llvm::GlobalValue::ExternalLinkage, - nextStruct, + nextGlobal = + new llvm::GlobalVariable(module, + ourTypeInfoType, + true, + llvm::GlobalValue::ExternalLinkage, + nextStruct, typeInfoName); - + ourTypeInfoNames.push_back(typeInfoName); ourTypeInfoNamesIndex[i] = typeInfoName; } - + ArgNames argNames; ArgTypes argTypes; llvm::Function *funct = NULL; - + // print32Int - + llvm::Type *retType = builder.getVoidTy(); - + argTypes.clear(); argTypes.push_back(builder.getInt32Ty()); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "print32Int", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "print32Int", + llvm::Function::ExternalLinkage, + true, false); - + // print64Int - + retType = builder.getVoidTy(); - + argTypes.clear(); argTypes.push_back(builder.getInt64Ty()); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "print64Int", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "print64Int", + llvm::Function::ExternalLinkage, + true, false); - + // printStr - + retType = builder.getVoidTy(); - + argTypes.clear(); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "printStr", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "printStr", + llvm::Function::ExternalLinkage, + true, false); - + // throwCppException - + retType = builder.getVoidTy(); - + argTypes.clear(); argTypes.push_back(builder.getInt32Ty()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "throwCppException", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "throwCppException", + llvm::Function::ExternalLinkage, + true, false); - + // deleteOurException - + retType = builder.getVoidTy(); - + argTypes.clear(); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "deleteOurException", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "deleteOurException", + llvm::Function::ExternalLinkage, + true, false); - + // createOurException - + retType = builder.getInt8PtrTy(); - + argTypes.clear(); argTypes.push_back(builder.getInt32Ty()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "createOurException", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "createOurException", + llvm::Function::ExternalLinkage, + true, false); - + // _Unwind_RaiseException - + retType = builder.getInt32Ty(); - + argTypes.clear(); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - funct = createFunction(module, - retType, - argTypes, - argNames, - "_Unwind_RaiseException", - llvm::Function::ExternalLinkage, - true, + + funct = createFunction(module, + retType, + argTypes, + argNames, + "_Unwind_RaiseException", + llvm::Function::ExternalLinkage, + true, false); - - funct->addFnAttr(llvm::Attribute::NoReturn); - + + funct->setDoesNotReturn(); + // _Unwind_Resume - + retType = builder.getInt32Ty(); - + argTypes.clear(); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - funct = createFunction(module, - retType, - argTypes, - argNames, - "_Unwind_Resume", - llvm::Function::ExternalLinkage, - true, + + funct = createFunction(module, + retType, + argTypes, + argNames, + "_Unwind_Resume", + llvm::Function::ExternalLinkage, + true, false); - - funct->addFnAttr(llvm::Attribute::NoReturn); - + + funct->setDoesNotReturn(); + // ourPersonality - + retType = builder.getInt32Ty(); - + argTypes.clear(); argTypes.push_back(builder.getInt32Ty()); argTypes.push_back(builder.getInt32Ty()); argTypes.push_back(builder.getInt64Ty()); argTypes.push_back(builder.getInt8PtrTy()); argTypes.push_back(builder.getInt8PtrTy()); - + argNames.clear(); - - createFunction(module, - retType, - argTypes, - argNames, - "ourPersonality", - llvm::Function::ExternalLinkage, - true, + + createFunction(module, + retType, + argTypes, + argNames, + "ourPersonality", + llvm::Function::ExternalLinkage, + true, false); - + // llvm.eh.typeid.for intrinsic - + getDeclaration(&module, llvm::Intrinsic::eh_typeid_for); } @@ -1901,7 +1901,7 @@ static void createStandardUtilityFunctions(unsigned numTypeInfos, //===----------------------------------------------------------------------===// /// Demo main routine which takes the type info types to throw. A test will -/// be run for each given type info type. While type info types with the value +/// be run for each given type info type. While type info types with the value /// of -1 will trigger a foreign C++ exception to be thrown; type info types /// <= 6 and >= 1 will be caught by test functions; and type info types > 6 /// will result in exceptions which pass through to the test harness. All other @@ -1920,87 +1920,86 @@ int main(int argc, char *argv[]) { " for a full test.\n\n"); return(0); } - + // If not set, exception handling will not be turned on llvm::TargetOptions Opts; Opts.JITExceptionHandling = true; - + llvm::InitializeNativeTarget(); llvm::LLVMContext &context = llvm::getGlobalContext(); llvm::IRBuilder<> theBuilder(context); - + // Make the module, which holds all the code. llvm::Module *module = new llvm::Module("my cool jit", context); - + // Build engine with JIT llvm::EngineBuilder factory(module); factory.setEngineKind(llvm::EngineKind::JIT); factory.setAllocateGVsWithCode(false); factory.setTargetOptions(Opts); llvm::ExecutionEngine *executionEngine = factory.create(); - + { llvm::FunctionPassManager fpm(module); - - // Set up the optimizer pipeline. + + // Set up the optimizer pipeline. // Start with registering info about how the // target lays out data structures. - fpm.add(new llvm::TargetData(*executionEngine->getTargetData())); - + fpm.add(new llvm::DataLayout(*executionEngine->getDataLayout())); + // Optimizations turned on #ifdef ADD_OPT_PASSES - + // Basic AliasAnslysis support for GVN. fpm.add(llvm::createBasicAliasAnalysisPass()); - + // Promote allocas to registers. fpm.add(llvm::createPromoteMemoryToRegisterPass()); - + // Do simple "peephole" optimizations and bit-twiddling optzns. fpm.add(llvm::createInstructionCombiningPass()); - + // Reassociate expressions. fpm.add(llvm::createReassociatePass()); - + // Eliminate Common SubExpressions. fpm.add(llvm::createGVNPass()); - - // Simplify the control flow graph (deleting unreachable + + // Simplify the control flow graph (deleting unreachable // blocks, etc). fpm.add(llvm::createCFGSimplificationPass()); #endif // ADD_OPT_PASSES - + fpm.doInitialization(); - + // Generate test code using function throwCppException(...) as // the function which throws foreign exceptions. - llvm::Function *toRun = - createUnwindExceptionTest(*module, - theBuilder, + llvm::Function *toRun = + createUnwindExceptionTest(*module, + theBuilder, fpm, "throwCppException"); - + fprintf(stderr, "\nBegin module dump:\n\n"); - + module->dump(); - + fprintf(stderr, "\nEnd module dump:\n"); - + fprintf(stderr, "\n\nBegin Test:\n"); - + for (int i = 1; i < argc; ++i) { // Run test for each argument whose value is the exception // type to throw. - runExceptionThrow(executionEngine, - toRun, + runExceptionThrow(executionEngine, + toRun, (unsigned) strtoul(argv[i], NULL, 10)); } - + fprintf(stderr, "\nEnd Test:\n\n"); - } - + } + delete executionEngine; - + return 0; } - diff --git a/examples/Fibonacci/fibonacci.cpp b/examples/Fibonacci/fibonacci.cpp index cfd9b1e..417ad6f 100644 --- a/examples/Fibonacci/fibonacci.cpp +++ b/examples/Fibonacci/fibonacci.cpp @@ -37,7 +37,7 @@ using namespace llvm; static Function *CreateFibFunction(Module *M, LLVMContext &Context) { - // Create the fib function and insert it into module M. This function is said + // Create the fib function and insert it into module M. This function is said // to return an int and take an int parameter. Function *FibF = cast(M->getOrInsertFunction("fib", Type::getInt32Ty(Context), diff --git a/examples/Kaleidoscope/Chapter4/toy.cpp b/examples/Kaleidoscope/Chapter4/toy.cpp index cce4466..bc6028c 100644 --- a/examples/Kaleidoscope/Chapter4/toy.cpp +++ b/examples/Kaleidoscope/Chapter4/toy.cpp @@ -7,7 +7,7 @@ #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include @@ -584,7 +584,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. diff --git a/examples/Kaleidoscope/Chapter5/toy.cpp b/examples/Kaleidoscope/Chapter5/toy.cpp index 36dd760..2b0b9d5 100644 --- a/examples/Kaleidoscope/Chapter5/toy.cpp +++ b/examples/Kaleidoscope/Chapter5/toy.cpp @@ -7,7 +7,7 @@ #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include @@ -829,7 +829,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. diff --git a/examples/Kaleidoscope/Chapter6/toy.cpp b/examples/Kaleidoscope/Chapter6/toy.cpp index db3495d..b751e35 100644 --- a/examples/Kaleidoscope/Chapter6/toy.cpp +++ b/examples/Kaleidoscope/Chapter6/toy.cpp @@ -7,7 +7,7 @@ #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include @@ -947,7 +947,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Do simple "peephole" optimizations and bit-twiddling optzns. diff --git a/examples/Kaleidoscope/Chapter7/toy.cpp b/examples/Kaleidoscope/Chapter7/toy.cpp index 143b30b..0ac0996 100644 --- a/examples/Kaleidoscope/Chapter7/toy.cpp +++ b/examples/Kaleidoscope/Chapter7/toy.cpp @@ -7,7 +7,7 @@ #include "llvm/PassManager.h" #include "llvm/Analysis/Verifier.h" #include "llvm/Analysis/Passes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/TargetSelect.h" #include @@ -1111,7 +1111,7 @@ int main() { // Set up the optimizer pipeline. Start with registering info about how the // target lays out data structures. - OurFPM.add(new TargetData(*TheExecutionEngine->getTargetData())); + OurFPM.add(new DataLayout(*TheExecutionEngine->getDataLayout())); // Provide basic AliasAnalysis support for GVN. OurFPM.add(createBasicAliasAnalysisPass()); // Promote allocas to registers. diff --git a/examples/OCaml-Kaleidoscope/Chapter4/toy.ml b/examples/OCaml-Kaleidoscope/Chapter4/toy.ml index 5f9d912..5a6bde9 100644 --- a/examples/OCaml-Kaleidoscope/Chapter4/toy.ml +++ b/examples/OCaml-Kaleidoscope/Chapter4/toy.ml @@ -27,7 +27,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combination the_fpm; diff --git a/examples/OCaml-Kaleidoscope/Chapter5/toy.ml b/examples/OCaml-Kaleidoscope/Chapter5/toy.ml index 5f9d912..5a6bde9 100644 --- a/examples/OCaml-Kaleidoscope/Chapter5/toy.ml +++ b/examples/OCaml-Kaleidoscope/Chapter5/toy.ml @@ -27,7 +27,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combination the_fpm; diff --git a/examples/OCaml-Kaleidoscope/Chapter6/toy.ml b/examples/OCaml-Kaleidoscope/Chapter6/toy.ml index 5f9d912..5a6bde9 100644 --- a/examples/OCaml-Kaleidoscope/Chapter6/toy.ml +++ b/examples/OCaml-Kaleidoscope/Chapter6/toy.ml @@ -27,7 +27,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Do simple "peephole" optimizations and bit-twiddling optzn. *) add_instruction_combination the_fpm; diff --git a/examples/OCaml-Kaleidoscope/Chapter7/toy.ml b/examples/OCaml-Kaleidoscope/Chapter7/toy.ml index babab28..f2508a4 100644 --- a/examples/OCaml-Kaleidoscope/Chapter7/toy.ml +++ b/examples/OCaml-Kaleidoscope/Chapter7/toy.ml @@ -28,7 +28,7 @@ let main () = (* Set up the optimizer pipeline. Start with registering info about how the * target lays out data structures. *) - TargetData.add (ExecutionEngine.target_data the_execution_engine) the_fpm; + DataLayout.add (ExecutionEngine.target_data the_execution_engine) the_fpm; (* Promote allocas to registers. *) add_memory_to_register_promotion the_fpm; diff --git a/include/llvm-c/Core.h b/include/llvm-c/Core.h index 0bd5db3..620d088 100644 --- a/include/llvm-c/Core.h +++ b/include/llvm-c/Core.h @@ -173,10 +173,11 @@ typedef enum { LLVMUWTable = 1 << 30, LLVMNonLazyBind = 1 << 31 - // FIXME: This attribute is currently not included in the C API as - // a temporary measure until the API/ABI impact to the C API is understood - // and the path forward agreed upon. - //LLVMAddressSafety = 1ULL << 32 + /* FIXME: This attribute is currently not included in the C API as + a temporary measure until the API/ABI impact to the C API is understood + and the path forward agreed upon. + LLVMAddressSafety = 1ULL << 32 + */ } LLVMAttribute; typedef enum { @@ -282,6 +283,7 @@ typedef enum { LLVMLinkOnceAnyLinkage, /**< Keep one copy of function when linking (inline)*/ LLVMLinkOnceODRLinkage, /**< Same, but only replaced by something equivalent. */ + LLVMLinkOnceODRAutoHideLinkage, /**< Like LinkOnceODR, but possibly hidden. */ LLVMWeakAnyLinkage, /**< Keep one copy of function when linking (weak) */ LLVMWeakODRLinkage, /**< Same, but only replaced by something equivalent. */ @@ -295,9 +297,7 @@ typedef enum { LLVMGhostLinkage, /**< Obsolete */ LLVMCommonLinkage, /**< Tentative definitions */ LLVMLinkerPrivateLinkage, /**< Like Private, but linker removes. */ - LLVMLinkerPrivateWeakLinkage, /**< Like LinkerPrivate, but is weak. */ - LLVMLinkerPrivateWeakDefAutoLinkage /**< Like LinkerPrivateWeak, but possibly - hidden. */ + LLVMLinkerPrivateWeakLinkage /**< Like LinkerPrivate, but is weak. */ } LLVMLinkage; typedef enum { @@ -1803,7 +1803,7 @@ LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg); * Set the alignment for a function parameter. * * @see llvm::Argument::addAttr() - * @see llvm::Attribute::constructAlignmentFromInt() + * @see llvm::AttrBuilder::addAlignmentAttr() */ void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align); @@ -1869,6 +1869,27 @@ LLVMValueRef LLVMMDNode(LLVMValueRef *Vals, unsigned Count); const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len); /** + * Obtain the number of operands from an MDNode value. + * + * @param V MDNode to get number of operands from. + * @return Number of operands of the MDNode. + */ +unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V); + +/** + * Obtain the given MDNode's operands. + * + * The passed LLVMValueRef pointer should point to enough memory to hold all of + * the operands of the given MDNode (see LLVMGetMDNodeNumOperands) as + * LLVMValueRefs. This memory will be populated with the LLVMValueRefs of the + * MDNode's operands. + * + * @param V MDNode to get the operands from. + * @param Dest Destination array for operands. + */ +void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest); + +/** * @} */ @@ -2688,7 +2709,7 @@ namespace llvm { template inline T **unwrap(LLVMValueRef *Vals, unsigned Length) { - #if DEBUG + #ifdef DEBUG for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I) cast(*I); #endif diff --git a/include/llvm-c/Disassembler.h b/include/llvm-c/Disassembler.h index 69fdc64..b8c4ad9 100644 --- a/include/llvm-c/Disassembler.h +++ b/include/llvm-c/Disassembler.h @@ -146,6 +146,15 @@ LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo, LLVMSymbolLookupCallback SymbolLookUp); /** + * Set the disassembler's options. Returns 1 if it can set the Options and 0 + * otherwise. + */ +int LLVMSetDisasmOptions(LLVMDisasmContextRef DC, uint64_t Options); + +/* The option to produce marked up assembly. */ +#define LLVMDisassembler_Option_UseMarkup 1 + +/** * Dispose of a disassembler context. */ void LLVMDisasmDispose(LLVMDisasmContextRef DC); diff --git a/include/llvm-c/Target.h b/include/llvm-c/Target.h index 8915040..57abfa0 100644 --- a/include/llvm-c/Target.h +++ b/include/llvm-c/Target.h @@ -145,7 +145,7 @@ static inline LLVMBool LLVMInitializeNativeTarget(void) { /*===-- Target Data -------------------------------------------------------===*/ /** Creates target data from a target layout string. - See the constructor llvm::TargetData::TargetData. */ + See the constructor llvm::DataLayout::DataLayout. */ LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep); /** Adds target data information to a pass manager. This does not take ownership @@ -160,48 +160,58 @@ void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef, LLVMPassManagerRef); /** Converts target data to a target layout string. The string must be disposed with LLVMDisposeMessage. - See the constructor llvm::TargetData::TargetData. */ + See the constructor llvm::DataLayout::DataLayout. */ char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef); /** Returns the byte order of a target, either LLVMBigEndian or LLVMLittleEndian. - See the method llvm::TargetData::isLittleEndian. */ + See the method llvm::DataLayout::isLittleEndian. */ enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef); /** Returns the pointer size in bytes for a target. - See the method llvm::TargetData::getPointerSize. */ + See the method llvm::DataLayout::getPointerSize. */ unsigned LLVMPointerSize(LLVMTargetDataRef); +/** Returns the pointer size in bytes for a target for a specified + address space. + See the method llvm::DataLayout::getPointerSize. */ +unsigned LLVMPointerSizeForAS(LLVMTargetDataRef, unsigned AS); + /** Returns the integer type that is the same size as a pointer on a target. - See the method llvm::TargetData::getIntPtrType. */ + See the method llvm::DataLayout::getIntPtrType. */ LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef); +/** Returns the integer type that is the same size as a pointer on a target. + This version allows the address space to be specified. + See the method llvm::DataLayout::getIntPtrType. */ +LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef, unsigned AS); + /** Computes the size of a type in bytes for a target. - See the method llvm::TargetData::getTypeSizeInBits. */ + See the method llvm::DataLayout::getTypeSizeInBits. */ unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef); /** Computes the storage size of a type in bytes for a target. - See the method llvm::TargetData::getTypeStoreSize. */ + See the method llvm::DataLayout::getTypeStoreSize. */ unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the ABI size of a type in bytes for a target. - See the method llvm::TargetData::getTypeAllocSize. */ + See the method llvm::DataLayout::getTypeAllocSize. */ unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the ABI alignment of a type in bytes for a target. - See the method llvm::TargetData::getTypeABISize. */ + See the method llvm::DataLayout::getTypeABISize. */ unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the call frame alignment of a type in bytes for a target. - See the method llvm::TargetData::getTypeABISize. */ + See the method llvm::DataLayout::getTypeABISize. */ unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the preferred alignment of a type in bytes for a target. - See the method llvm::TargetData::getTypeABISize. */ + See the method llvm::DataLayout::getTypeABISize. */ unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); /** Computes the preferred alignment of a global variable in bytes for a target. - See the method llvm::TargetData::getPreferredAlignment. */ + See the method llvm::DataLayout::getPreferredAlignment. */ unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef, LLVMValueRef GlobalVar); @@ -216,7 +226,7 @@ unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef, LLVMTypeRef StructTy, unsigned Element); /** Deallocates a TargetData. - See the destructor llvm::TargetData::~TargetData. */ + See the destructor llvm::DataLayout::~DataLayout. */ void LLVMDisposeTargetData(LLVMTargetDataRef); /** @@ -227,15 +237,15 @@ void LLVMDisposeTargetData(LLVMTargetDataRef); } namespace llvm { - class TargetData; + class DataLayout; class TargetLibraryInfo; - inline TargetData *unwrap(LLVMTargetDataRef P) { - return reinterpret_cast(P); + inline DataLayout *unwrap(LLVMTargetDataRef P) { + return reinterpret_cast(P); } - inline LLVMTargetDataRef wrap(const TargetData *P) { - return reinterpret_cast(const_cast(P)); + inline LLVMTargetDataRef wrap(const DataLayout *P) { + return reinterpret_cast(const_cast(P)); } inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) { diff --git a/include/llvm-c/TargetMachine.h b/include/llvm-c/TargetMachine.h index 0d35d73..29668de 100644 --- a/include/llvm-c/TargetMachine.h +++ b/include/llvm-c/TargetMachine.h @@ -104,7 +104,7 @@ char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T); LLVMDisposeMessage. */ char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T); -/** Returns the llvm::TargetData used for this llvm:TargetMachine. */ +/** Returns the llvm::DataLayout used for this llvm:TargetMachine. */ LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T); /** Emits an asm or object file for the given module to the filename. This diff --git a/include/llvm-c/Transforms/Vectorize.h b/include/llvm-c/Transforms/Vectorize.h index 9e7c754..68a9bdd 100644 --- a/include/llvm-c/Transforms/Vectorize.h +++ b/include/llvm-c/Transforms/Vectorize.h @@ -36,6 +36,9 @@ extern "C" { /** See llvm::createBBVectorizePass function. */ void LLVMAddBBVectorizePass(LLVMPassManagerRef PM); +/** See llvm::createLoopVectorizePass function. */ +void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM); + /** * @} */ diff --git a/include/llvm/ADT/APFloat.h b/include/llvm/ADT/APFloat.h index 5a625a4..31c6e6a 100644 --- a/include/llvm/ADT/APFloat.h +++ b/include/llvm/ADT/APFloat.h @@ -455,14 +455,11 @@ namespace llvm { /* The sign bit of this number. */ unsigned int sign: 1; - - /* For PPCDoubleDouble, we have a second exponent and sign (the second - significand is appended to the first one, although it would be wrong to - regard these as a single number for arithmetic purposes). These fields - are not meaningful for any other type. */ - exponent_t exponent2 : 11; - unsigned int sign2: 1; }; + + // See friend declaration above. This additional declaration is required in + // order to compile LLVM with IBM xlC compiler. + hash_code hash_value(const APFloat &Arg); } /* namespace llvm */ #endif /* LLVM_FLOAT_H */ diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h index f30a6e3..c7c8016b 100644 --- a/include/llvm/ADT/APInt.h +++ b/include/llvm/ADT/APInt.h @@ -251,7 +251,7 @@ public: /// constructor. APInt(unsigned numBits, unsigned numWords, const uint64_t bigVal[]); - /// This constructor interprets the string \arg str in the given radix. The + /// This constructor interprets the string \p str in the given radix. The /// interpretation stops when the first character that is not suitable for the /// radix is encountered, or the end of the string. Acceptable radix values /// are 2, 8, 10, 16, and 36. It is an error for the value implied by the @@ -760,7 +760,7 @@ public: APInt shl(unsigned shiftAmt) const { assert(shiftAmt <= BitWidth && "Invalid shift amount"); if (isSingleWord()) { - if (shiftAmt == BitWidth) + if (shiftAmt >= BitWidth) return APInt(BitWidth, 0); // avoid undefined shift results return APInt(BitWidth, VAL << shiftAmt); } @@ -1231,15 +1231,15 @@ public: } /// This method determines how many bits are required to hold the APInt - /// equivalent of the string given by \arg str. + /// equivalent of the string given by \p str. /// @brief Get bits required for string value. static unsigned getBitsNeeded(StringRef str, uint8_t radix); /// countLeadingZeros - This function is an APInt version of the /// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number /// of zeros from the most significant bit to the first one bit. - /// @returns BitWidth if the value is zero. - /// @returns the number of zeros from the most significant bit to the first + /// @returns BitWidth if the value is zero, otherwise + /// returns the number of zeros from the most significant bit to the first /// one bits. unsigned countLeadingZeros() const { if (isSingleWord()) { @@ -1252,8 +1252,8 @@ public: /// countLeadingOnes - This function is an APInt version of the /// countLeadingOnes_{32,64} functions in MathExtras.h. It counts the number /// of ones from the most significant bit to the first zero bit. - /// @returns 0 if the high order bit is not set - /// @returns the number of 1 bits from the most significant to the least + /// @returns 0 if the high order bit is not set, otherwise + /// returns the number of 1 bits from the most significant to the least /// @brief Count the number of leading one bits. unsigned countLeadingOnes() const; @@ -1266,8 +1266,8 @@ public: /// countTrailingZeros - This function is an APInt version of the /// countTrailingZeros_{32,64} functions in MathExtras.h. It counts /// the number of zeros from the least significant bit to the first set bit. - /// @returns BitWidth if the value is zero. - /// @returns the number of zeros from the least significant bit to the first + /// @returns BitWidth if the value is zero, otherwise + /// returns the number of zeros from the least significant bit to the first /// one bit. /// @brief Count the number of trailing zero bits. unsigned countTrailingZeros() const; @@ -1275,8 +1275,8 @@ public: /// countTrailingOnes - This function is an APInt version of the /// countTrailingOnes_{32,64} functions in MathExtras.h. It counts /// the number of ones from the least significant bit to the first zero bit. - /// @returns BitWidth if the value is all ones. - /// @returns the number of ones from the least significant bit to the first + /// @returns BitWidth if the value is all ones, otherwise + /// returns the number of ones from the least significant bit to the first /// zero bit. /// @brief Count the number of trailing one bits. unsigned countTrailingOnes() const { @@ -1288,8 +1288,8 @@ public: /// countPopulation - This function is an APInt version of the /// countPopulation_{32,64} functions in MathExtras.h. It counts the number /// of 1 bits in the APInt value. - /// @returns 0 if the value is zero. - /// @returns the number of set bits. + /// @returns 0 if the value is zero, otherwise returns the number of set + /// bits. /// @brief Count the number of bits set. unsigned countPopulation() const { if (isSingleWord()) @@ -1780,6 +1780,9 @@ inline APInt Not(const APInt& APIVal) { } // End of APIntOps namespace + // See friend declaration above. This additional declaration is required in + // order to compile LLVM with IBM xlC compiler. + hash_code hash_value(const APInt &Arg); } // End of llvm namespace #endif diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h index cf55aad..1e35d62 100644 --- a/include/llvm/ADT/ArrayRef.h +++ b/include/llvm/ADT/ArrayRef.h @@ -59,12 +59,17 @@ namespace llvm { ArrayRef(const T *begin, const T *end) : Data(begin), Length(end - begin) {} - /// Construct an ArrayRef from a SmallVector. - /*implicit*/ ArrayRef(const SmallVectorTemplateCommon &Vec) - : Data(Vec.data()), Length(Vec.size()) {} + /// Construct an ArrayRef from a SmallVector. This is templated in order to + /// avoid instantiating SmallVectorTemplateCommon whenever we + /// copy-construct an ArrayRef. + template + /*implicit*/ ArrayRef(const SmallVectorTemplateCommon &Vec) + : Data(Vec.data()), Length(Vec.size()) { + } /// Construct an ArrayRef from a std::vector. - /*implicit*/ ArrayRef(const std::vector &Vec) + template + /*implicit*/ ArrayRef(const std::vector &Vec) : Data(Vec.empty() ? (T*)0 : &Vec[0]), Length(Vec.size()) {} /// Construct an ArrayRef from a C array. diff --git a/include/llvm/ADT/BitVector.h b/include/llvm/ADT/BitVector.h index 3e2e5f2..9d6388f 100644 --- a/include/llvm/ADT/BitVector.h +++ b/include/llvm/ADT/BitVector.h @@ -172,7 +172,7 @@ public: unsigned BitPos = Prev % BITWORD_SIZE; BitWord Copy = Bits[WordPos]; // Mask off previous bits. - Copy &= ~0L << BitPos; + Copy &= ~0UL << BitPos; if (Copy != 0) { if (sizeof(BitWord) == 4) @@ -237,6 +237,34 @@ public: return *this; } + /// set - Efficiently set a range of bits in [I, E) + BitVector &set(unsigned I, unsigned E) { + assert(I <= E && "Attempted to set backwards range!"); + assert(E <= size() && "Attempted to set out-of-bounds range!"); + + if (I == E) return *this; + + if (I / BITWORD_SIZE == E / BITWORD_SIZE) { + BitWord EMask = 1UL << (E % BITWORD_SIZE); + BitWord IMask = 1UL << (I % BITWORD_SIZE); + BitWord Mask = EMask - IMask; + Bits[I / BITWORD_SIZE] |= Mask; + return *this; + } + + BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE); + Bits[I / BITWORD_SIZE] |= PrefixMask; + I = RoundUpToAlignment(I, BITWORD_SIZE); + + for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE) + Bits[I / BITWORD_SIZE] = ~0UL; + + BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1; + Bits[I / BITWORD_SIZE] |= PostfixMask; + + return *this; + } + BitVector &reset() { init_words(Bits, Capacity, false); return *this; @@ -247,6 +275,34 @@ public: return *this; } + /// reset - Efficiently reset a range of bits in [I, E) + BitVector &reset(unsigned I, unsigned E) { + assert(I <= E && "Attempted to reset backwards range!"); + assert(E <= size() && "Attempted to reset out-of-bounds range!"); + + if (I == E) return *this; + + if (I / BITWORD_SIZE == E / BITWORD_SIZE) { + BitWord EMask = 1UL << (E % BITWORD_SIZE); + BitWord IMask = 1UL << (I % BITWORD_SIZE); + BitWord Mask = EMask - IMask; + Bits[I / BITWORD_SIZE] &= ~Mask; + return *this; + } + + BitWord PrefixMask = ~0UL << (I % BITWORD_SIZE); + Bits[I / BITWORD_SIZE] &= ~PrefixMask; + I = RoundUpToAlignment(I, BITWORD_SIZE); + + for (; I + BITWORD_SIZE <= E; I += BITWORD_SIZE) + Bits[I / BITWORD_SIZE] = 0UL; + + BitWord PostfixMask = (1UL << (E % BITWORD_SIZE)) - 1; + Bits[I / BITWORD_SIZE] &= ~PostfixMask; + + return *this; + } + BitVector &flip() { for (unsigned i = 0; i < NumBitWords(size()); ++i) Bits[i] = ~Bits[i]; @@ -311,7 +367,7 @@ public: return !(*this == RHS); } - // Intersection, union, disjoint union. + /// Intersection, union, disjoint union. BitVector &operator&=(const BitVector &RHS) { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); @@ -328,7 +384,7 @@ public: return *this; } - // reset - Reset bits that are set in RHS. Same as *this &= ~RHS. + /// reset - Reset bits that are set in RHS. Same as *this &= ~RHS. BitVector &reset(const BitVector &RHS) { unsigned ThisWords = NumBitWords(size()); unsigned RHSWords = NumBitWords(RHS.size()); @@ -338,6 +394,23 @@ public: return *this; } + /// test - Check if (This - RHS) is zero. + /// This is the same as reset(RHS) and any(). + bool test(const BitVector &RHS) const { + unsigned ThisWords = NumBitWords(size()); + unsigned RHSWords = NumBitWords(RHS.size()); + unsigned i; + for (i = 0; i != std::min(ThisWords, RHSWords); ++i) + if ((Bits[i] & ~RHS.Bits[i]) != 0) + return true; + + for (; i != ThisWords ; ++i) + if (Bits[i] != 0) + return true; + + return false; + } + BitVector &operator|=(const BitVector &RHS) { if (size() < RHS.size()) resize(RHS.size()); @@ -451,8 +524,11 @@ private: // Then set any stray high bits of the last used word. unsigned ExtraBits = Size % BITWORD_SIZE; if (ExtraBits) { - Bits[UsedWords-1] &= ~(~0L << ExtraBits); - Bits[UsedWords-1] |= (0 - (BitWord)t) << ExtraBits; + BitWord ExtraBitMask = ~0UL << ExtraBits; + if (t) + Bits[UsedWords-1] |= ExtraBitMask; + else + Bits[UsedWords-1] &= ~ExtraBitMask; } } diff --git a/include/llvm/ADT/DAGDeltaAlgorithm.h b/include/llvm/ADT/DAGDeltaAlgorithm.h index e502ac4..2dfed07 100644 --- a/include/llvm/ADT/DAGDeltaAlgorithm.h +++ b/include/llvm/ADT/DAGDeltaAlgorithm.h @@ -48,17 +48,18 @@ public: public: virtual ~DAGDeltaAlgorithm() {} - /// Run - Minimize the DAG formed by the \arg Changes vertices and the \arg - /// Dependencies edges by executing \see ExecuteOneTest() on subsets of + /// Run - Minimize the DAG formed by the \p Changes vertices and the + /// \p Dependencies edges by executing \see ExecuteOneTest() on subsets of /// changes and returning the smallest set which still satisfies the test - /// predicate and the input \arg Dependencies. + /// predicate and the input \p Dependencies. /// /// \param Changes The list of changes. /// /// \param Dependencies The list of dependencies amongst changes. For each - /// (x,y) in \arg Dependencies, both x and y must be in \arg Changes. The - /// minimization algorithm guarantees that for each tested changed set S, x - /// \in S implies y \in S. It is an error to have cyclic dependencies. + /// (x,y) in \p Dependencies, both x and y must be in \p Changes. The + /// minimization algorithm guarantees that for each tested changed set S, + /// \f$ x \in S \f$ implies \f$ y \in S \f$. It is an error to have cyclic + /// dependencies. changeset_ty Run(const changeset_ty &Changes, const std::vector &Dependencies); @@ -67,7 +68,7 @@ public: const changesetlist_ty &Sets, const changeset_ty &Required) {} - /// ExecuteOneTest - Execute a single test predicate on the change set \arg S. + /// ExecuteOneTest - Execute a single test predicate on the change set \p S. virtual bool ExecuteOneTest(const changeset_ty &S) = 0; }; diff --git a/include/llvm/ADT/DeltaAlgorithm.h b/include/llvm/ADT/DeltaAlgorithm.h index 45ba198..7bf7960 100644 --- a/include/llvm/ADT/DeltaAlgorithm.h +++ b/include/llvm/ADT/DeltaAlgorithm.h @@ -45,23 +45,23 @@ private: /// since we always reduce following a success. std::set FailedTestsCache; - /// GetTestResult - Get the test result for the \arg Changes from the + /// GetTestResult - Get the test result for the \p Changes from the /// cache, executing the test if necessary. /// /// \param Changes - The change set to test. /// \return - The test result. bool GetTestResult(const changeset_ty &Changes); - /// Split - Partition a set of changes \arg S into one or two subsets. + /// Split - Partition a set of changes \p S into one or two subsets. void Split(const changeset_ty &S, changesetlist_ty &Res); - /// Delta - Minimize a set of \arg Changes which has been partioned into + /// Delta - Minimize a set of \p Changes which has been partioned into /// smaller sets, by attempting to remove individual subsets. changeset_ty Delta(const changeset_ty &Changes, const changesetlist_ty &Sets); - /// Search - Search for a subset (or subsets) in \arg Sets which can be - /// removed from \arg Changes while still satisfying the predicate. + /// Search - Search for a subset (or subsets) in \p Sets which can be + /// removed from \p Changes while still satisfying the predicate. /// /// \param Res - On success, a subset of Changes which satisfies the /// predicate. @@ -74,13 +74,13 @@ protected: virtual void UpdatedSearchState(const changeset_ty &Changes, const changesetlist_ty &Sets) {} - /// ExecuteOneTest - Execute a single test predicate on the change set \arg S. + /// ExecuteOneTest - Execute a single test predicate on the change set \p S. virtual bool ExecuteOneTest(const changeset_ty &S) = 0; public: virtual ~DeltaAlgorithm(); - /// Run - Minimize the set \arg Changes by executing \see ExecuteOneTest() on + /// Run - Minimize the set \p Changes by executing \see ExecuteOneTest() on /// subsets of changes and returning the smallest set which still satisfies /// the test predicate. changeset_ty Run(const changeset_ty &Changes); diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h index f60d688..ac4bdbd 100644 --- a/include/llvm/ADT/DenseMap.h +++ b/include/llvm/ADT/DenseMap.h @@ -420,9 +420,10 @@ private: NumBuckets = getNumBuckets(); } if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) { - this->grow(NumBuckets); + this->grow(NumBuckets * 2); LookupBucketFor(Key, TheBucket); } + assert(TheBucket); // Only update the state after we've grown our bucket space appropriately // so that when growing buckets we have self-consistent entry count. @@ -599,7 +600,7 @@ public: unsigned OldNumBuckets = NumBuckets; BucketT *OldBuckets = Buckets; - allocateBuckets(std::max(64, NextPowerOf2(AtLeast))); + allocateBuckets(std::max(64, NextPowerOf2(AtLeast-1))); assert(Buckets); if (!OldBuckets) { this->BaseT::initEmpty(); @@ -825,11 +826,11 @@ public: } void grow(unsigned AtLeast) { - if (AtLeast > InlineBuckets) - AtLeast = std::max(64, NextPowerOf2(AtLeast)); + if (AtLeast >= InlineBuckets) + AtLeast = std::max(64, NextPowerOf2(AtLeast-1)); if (Small) { - if (AtLeast <= InlineBuckets) + if (AtLeast < InlineBuckets) return; // Nothing to do. // First move the inline buckets into a temporary storage. diff --git a/include/llvm/ADT/DenseMapInfo.h b/include/llvm/ADT/DenseMapInfo.h index 1559a35..6f17a64 100644 --- a/include/llvm/ADT/DenseMapInfo.h +++ b/include/llvm/ADT/DenseMapInfo.h @@ -31,12 +31,12 @@ struct DenseMapInfo { template struct DenseMapInfo { static inline T* getEmptyKey() { - intptr_t Val = -1; + uintptr_t Val = static_cast(-1); Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; return reinterpret_cast(Val); } static inline T* getTombstoneKey() { - intptr_t Val = -2; + uintptr_t Val = static_cast(-2); Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; return reinterpret_cast(Val); } @@ -105,7 +105,7 @@ template<> struct DenseMapInfo { // Provide DenseMapInfo for longs. template<> struct DenseMapInfo { static inline long getEmptyKey() { - return (1UL << (sizeof(long) * 8 - 1)) - 1L; + return (1UL << (sizeof(long) * 8 - 1)) - 1UL; } static inline long getTombstoneKey() { return getEmptyKey() - 1L; } static unsigned getHashValue(const long& Val) { diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h index 771476c..1d81772 100644 --- a/include/llvm/ADT/EquivalenceClasses.h +++ b/include/llvm/ADT/EquivalenceClasses.h @@ -33,6 +33,7 @@ namespace llvm { /// /// Here is a simple example using integers: /// +/// \code /// EquivalenceClasses EC; /// EC.unionSets(1, 2); // insert 1, 2 into the same set /// EC.insert(4); EC.insert(5); // insert 4, 5 into own sets @@ -46,6 +47,7 @@ namespace llvm { /// cerr << *MI << " "; // Print member. /// cerr << "\n"; // Finish set. /// } +/// \endcode /// /// This example prints: /// 4 diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h index ba415ac..375d84a 100644 --- a/include/llvm/ADT/FoldingSet.h +++ b/include/llvm/ADT/FoldingSet.h @@ -278,6 +278,10 @@ public: bool operator==(FoldingSetNodeIDRef) const; + /// Used to compare the "ordering" of two nodes as defined by the + /// profiled bits and their ordering defined by memcmp(). + bool operator<(FoldingSetNodeIDRef) const; + const unsigned *getData() const { return Data; } size_t getSize() const { return Size; } }; @@ -327,6 +331,11 @@ public: bool operator==(const FoldingSetNodeID &RHS) const; bool operator==(const FoldingSetNodeIDRef RHS) const; + /// Used to compare the "ordering" of two nodes as defined by the + /// profiled bits and their ordering defined by memcmp(). + bool operator<(const FoldingSetNodeID &RHS) const; + bool operator<(const FoldingSetNodeIDRef RHS) const; + /// Intern - Copy this node's data to a memory region allocated from the /// given allocator and return a FoldingSetNodeIDRef describing the /// interned data. diff --git a/include/llvm/ADT/Hashing.h b/include/llvm/ADT/Hashing.h index 6ab0725..cda31a2 100644 --- a/include/llvm/ADT/Hashing.h +++ b/include/llvm/ADT/Hashing.h @@ -409,7 +409,6 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value, /// combining them, this (as an optimization) directly combines the integers. template hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) { - typedef typename std::iterator_traits::value_type ValueT; const size_t seed = get_execution_seed(); char buffer[64], *buffer_ptr = buffer; char *const buffer_end = buffer_ptr + array_lengthof(buffer); @@ -711,7 +710,7 @@ hash_code hash_combine(const T1 &arg1) { #endif -// Implementation details for implementatinos of hash_value overloads provided +// Implementation details for implementations of hash_value overloads provided // here. namespace hashing { namespace detail { diff --git a/include/llvm/ADT/ImmutableList.h b/include/llvm/ADT/ImmutableList.h index d7c0074..20bdd90 100644 --- a/include/llvm/ADT/ImmutableList.h +++ b/include/llvm/ADT/ImmutableList.h @@ -33,9 +33,8 @@ class ImmutableListImpl : public FoldingSetNode { friend class ImmutableListFactory; - // Do not implement. - void operator=(const ImmutableListImpl&); - ImmutableListImpl(const ImmutableListImpl&); + void operator=(const ImmutableListImpl&) LLVM_DELETED_FUNCTION; + ImmutableListImpl(const ImmutableListImpl&) LLVM_DELETED_FUNCTION; public: const T& getHead() const { return Head; } diff --git a/include/llvm/ADT/ImmutableMap.h b/include/llvm/ADT/ImmutableMap.h index 8346ffa..4883c5b 100644 --- a/include/llvm/ADT/ImmutableMap.h +++ b/include/llvm/ADT/ImmutableMap.h @@ -122,8 +122,8 @@ public: } private: - Factory(const Factory& RHS); // DO NOT IMPLEMENT - void operator=(const Factory& RHS); // DO NOT IMPLEMENT + Factory(const Factory& RHS) LLVM_DELETED_FUNCTION; + void operator=(const Factory& RHS) LLVM_DELETED_FUNCTION; }; bool contains(key_type_ref K) const { diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h index 949dc44..3900f96 100644 --- a/include/llvm/ADT/ImmutableSet.h +++ b/include/llvm/ADT/ImmutableSet.h @@ -22,7 +22,6 @@ #include #include #include -#include namespace llvm { @@ -84,13 +83,13 @@ public: } return NULL; } - + /// getMaxElement - Find the subtree associated with the highest ranged /// key value. ImutAVLTree* getMaxElement() { ImutAVLTree *T = this; - ImutAVLTree *Right = T->getRight(); - while (Right) { T = right; right = T->getRight(); } + ImutAVLTree *Right = T->getRight(); + while (Right) { T = Right; Right = T->getRight(); } return T; } @@ -258,7 +257,7 @@ private: /// method returns false for an instance of ImutAVLTree, all subtrees /// will also have this method return false. The converse is not true. bool isMutable() const { return IsMutable; } - + /// hasCachedDigest - Returns true if the digest for this tree is cached. /// This can only be true if the tree is immutable. bool hasCachedDigest() const { return IsDigestCached; } @@ -280,7 +279,7 @@ private: assert(isMutable() && "Mutable flag already removed."); IsMutable = false; } - + /// markedCachedDigest - Clears the NoCachedDigest flag for a tree. void markedCachedDigest() { assert(!hasCachedDigest() && "NoCachedDigest flag already removed."); @@ -349,7 +348,7 @@ public: else factory->Cache[factory->maskCacheIndex(computeDigest())] = next; } - + // We need to clear the mutability bit in case we are // destroying the node as part of a sweep in ImutAVLFactory::recoverNodes(). IsMutable = false; @@ -415,7 +414,7 @@ public: TreeTy* getEmptyTree() const { return NULL; } protected: - + //===--------------------------------------------------===// // A bunch of quick helper functions used for reasoning // about the properties of trees and their children. @@ -461,7 +460,7 @@ protected: // returned to the caller. //===--------------------------------------------------===// - TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) { + TreeTy* createNode(TreeTy* L, value_type_ref V, TreeTy* R) { BumpPtrAllocator& A = getAllocator(); TreeTy* T; if (!freeNodes.empty()) { @@ -469,8 +468,7 @@ protected: freeNodes.pop_back(); assert(T != L); assert(T != R); - } - else { + } else { T = (TreeTy*) A.Allocate(); } new (T) TreeTy(this, L, R, V, incrementHeight(L,R)); @@ -513,7 +511,8 @@ protected: return createNode(createNode(LL,L,LRL), LR, createNode(LRR,V,R)); } - else if (hr > hl + 2) { + + if (hr > hl + 2) { assert(!isEmpty(R) && "Right tree cannot be empty to have a height >= 2"); TreeTy *RL = getLeft(R); @@ -529,8 +528,8 @@ protected: return createNode(createNode(L,V,RLL), RL, createNode(RLR,R,RR)); } - else - return createNode(L,V,R); + + return createNode(L,V,R); } /// add_internal - Creates a new tree that includes the specified @@ -604,7 +603,7 @@ protected: markImmutable(getLeft(T)); markImmutable(getRight(T)); } - + public: TreeTy *getCanonicalTree(TreeTy *TNew) { if (!TNew) @@ -937,7 +936,7 @@ public: private: TreeTy *Root; - + public: /// Constructs a set from a pointer to a tree root. In general one /// should use a Factory object to create sets instead of directly @@ -1006,10 +1005,10 @@ public: typename TreeTy::Factory *getTreeFactory() const { return const_cast(&F); } - + private: - Factory(const Factory& RHS); // DO NOT IMPLEMENT - void operator=(const Factory& RHS); // DO NOT IMPLEMENT + Factory(const Factory& RHS) LLVM_DELETED_FUNCTION; + void operator=(const Factory& RHS) LLVM_DELETED_FUNCTION; }; friend class Factory; @@ -1027,11 +1026,11 @@ public: return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; } - TreeTy *getRoot() { + TreeTy *getRoot() { if (Root) { Root->retain(); } return Root; } - + TreeTy *getRootWithoutRetain() const { return Root; } @@ -1092,7 +1091,7 @@ public: void validateTree() const { if (Root) Root->validateTree(); } }; - + // NOTE: This may some day replace the current ImmutableSet. template > class ImmutableSetRef { @@ -1101,11 +1100,11 @@ public: typedef typename ValInfo::value_type_ref value_type_ref; typedef ImutAVLTree TreeTy; typedef typename TreeTy::Factory FactoryTy; - + private: TreeTy *Root; FactoryTy *Factory; - + public: /// Constructs a set from a pointer to a tree root. In general one /// should use a Factory object to create sets instead of directly @@ -1133,44 +1132,44 @@ public: ~ImmutableSetRef() { if (Root) { Root->release(); } } - + static inline ImmutableSetRef getEmptySet(FactoryTy *F) { return ImmutableSetRef(0, F); } - + ImmutableSetRef add(value_type_ref V) { return ImmutableSetRef(Factory->add(Root, V), Factory); } - + ImmutableSetRef remove(value_type_ref V) { return ImmutableSetRef(Factory->remove(Root, V), Factory); } - + /// Returns true if the set contains the specified value. bool contains(value_type_ref V) const { return Root ? Root->contains(V) : false; } - + ImmutableSet asImmutableSet(bool canonicalize = true) const { return ImmutableSet(canonicalize ? Factory->getCanonicalTree(Root) : Root); } - + TreeTy *getRootWithoutRetain() const { return Root; } - + bool operator==(const ImmutableSetRef &RHS) const { return Root && RHS.Root ? Root->isEqual(*RHS.Root) : Root == RHS.Root; } - + bool operator!=(const ImmutableSetRef &RHS) const { return Root && RHS.Root ? Root->isNotEqual(*RHS.Root) : Root != RHS.Root; } /// isEmpty - Return true if the set contains no elements. bool isEmpty() const { return !Root; } - + /// isSingleton - Return true if the set contains exactly one element. /// This method runs in constant time. bool isSingleton() const { return getHeight() == 1; } @@ -1178,7 +1177,7 @@ public: //===--------------------------------------------------===// // Iterators. //===--------------------------------------------------===// - + class iterator { typename TreeTy::iterator itr; iterator(TreeTy* t) : itr(t) {} @@ -1194,28 +1193,28 @@ public: inline bool operator!=(const iterator& RHS) const { return RHS.itr != itr; } inline value_type *operator->() const { return &(operator*()); } }; - + iterator begin() const { return iterator(Root); } iterator end() const { return iterator(); } - + //===--------------------------------------------------===// // Utility methods. //===--------------------------------------------------===// - + unsigned getHeight() const { return Root ? Root->getHeight() : 0; } - + static inline void Profile(FoldingSetNodeID& ID, const ImmutableSetRef& S) { ID.AddPointer(S.Root); } - + inline void Profile(FoldingSetNodeID& ID) const { return Profile(ID,*this); } - + //===--------------------------------------------------===// // For testing. //===--------------------------------------------------===// - + void validateTree() const { if (Root) Root->validateTree(); } }; diff --git a/include/llvm/ADT/MapVector.h b/include/llvm/ADT/MapVector.h new file mode 100644 index 0000000..6aacca5 --- /dev/null +++ b/include/llvm/ADT/MapVector.h @@ -0,0 +1,90 @@ +//===- llvm/ADT/MapVector.h - Map with deterministic value order *- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a map that provides insertion order iteration. The +// interface is purposefully minimal. The key is assumed to be cheap to copy +// and 2 copies are kept, one for indexing in a DenseMap, one for iteration in +// a std::vector. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADT_MAPVECTOR_H +#define LLVM_ADT_MAPVECTOR_H + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseMap.h" +#include + +namespace llvm { + +/// This class implements a map that also provides access to all stored values +/// in a deterministic order. The values are kept in a std::vector and the +/// mapping is done with DenseMap from Keys to indexes in that vector. +template, + typename VectorType = std::vector > > +class MapVector { + typedef typename VectorType::size_type SizeType; + + MapType Map; + VectorType Vector; + +public: + typedef typename VectorType::iterator iterator; + typedef typename VectorType::const_iterator const_iterator; + + SizeType size() const { + return Vector.size(); + } + + iterator begin() { + return Vector.begin(); + } + + const_iterator begin() const { + return Vector.begin(); + } + + iterator end() { + return Vector.end(); + } + + const_iterator end() const { + return Vector.end(); + } + + bool empty() const { + return Vector.empty(); + } + + void clear() { + Map.clear(); + Vector.clear(); + } + + ValueT &operator[](const KeyT &Key) { + std::pair Pair = std::make_pair(Key, 0); + std::pair Result = Map.insert(Pair); + unsigned &I = Result.first->second; + if (Result.second) { + Vector.push_back(std::make_pair(Key, ValueT())); + I = Vector.size() - 1; + } + return Vector[I].second; + } + + unsigned count(const KeyT &Key) const { + typename MapType::const_iterator Pos = Map.find(Key); + return Pos == Map.end()? 0 : 1; + } +}; + +} + +#endif diff --git a/include/llvm/ADT/Optional.h b/include/llvm/ADT/Optional.h index ee8b69f..f43aeb1 100644 --- a/include/llvm/ADT/Optional.h +++ b/include/llvm/ADT/Optional.h @@ -16,8 +16,13 @@ #ifndef LLVM_ADT_OPTIONAL #define LLVM_ADT_OPTIONAL +#include "llvm/Support/Compiler.h" #include +#if LLVM_USE_RVALUE_REFERENCES +#include +#endif + namespace llvm { template @@ -28,6 +33,10 @@ public: explicit Optional() : x(), hasVal(false) {} Optional(const T &y) : x(y), hasVal(true) {} +#if LLVM_USE_RVALUE_REFERENCES + Optional(T &&y) : x(std::forward(y)), hasVal(true) {} +#endif + static inline Optional create(const T* y) { return y ? Optional(*y) : Optional(); } diff --git a/include/llvm/ADT/OwningPtr.h b/include/llvm/ADT/OwningPtr.h index 6d9c305..05bcd40 100644 --- a/include/llvm/ADT/OwningPtr.h +++ b/include/llvm/ADT/OwningPtr.h @@ -14,6 +14,7 @@ #ifndef LLVM_ADT_OWNING_PTR_H #define LLVM_ADT_OWNING_PTR_H +#include "llvm/Support/Compiler.h" #include #include @@ -25,12 +26,21 @@ namespace llvm { /// pointee object can be taken away from OwningPtr by using the take method. template class OwningPtr { - OwningPtr(OwningPtr const &); // DO NOT IMPLEMENT - OwningPtr &operator=(OwningPtr const &); // DO NOT IMPLEMENT + OwningPtr(OwningPtr const &) LLVM_DELETED_FUNCTION; + OwningPtr &operator=(OwningPtr const &) LLVM_DELETED_FUNCTION; T *Ptr; public: explicit OwningPtr(T *P = 0) : Ptr(P) {} +#if LLVM_USE_RVALUE_REFERENCES + OwningPtr(OwningPtr &&Other) : Ptr(Other.take()) {} + + OwningPtr &operator=(OwningPtr &&Other) { + reset(Other.take()); + return *this; + } +#endif + ~OwningPtr() { delete Ptr; } @@ -79,12 +89,21 @@ inline void swap(OwningPtr &a, OwningPtr &b) { /// functionality as OwningPtr, except that it works for array types. template class OwningArrayPtr { - OwningArrayPtr(OwningArrayPtr const &); // DO NOT IMPLEMENT - OwningArrayPtr &operator=(OwningArrayPtr const &); // DO NOT IMPLEMENT + OwningArrayPtr(OwningArrayPtr const &) LLVM_DELETED_FUNCTION; + OwningArrayPtr &operator=(OwningArrayPtr const &) LLVM_DELETED_FUNCTION; T *Ptr; public: explicit OwningArrayPtr(T *P = 0) : Ptr(P) {} +#if LLVM_USE_RVALUE_REFERENCES + OwningArrayPtr(OwningArrayPtr &&Other) : Ptr(Other.take()) {} + + OwningArrayPtr &operator=(OwningArrayPtr &&Other) { + reset(Other.take()); + return *this; + } +#endif + ~OwningArrayPtr() { delete [] Ptr; } diff --git a/include/llvm/ADT/PackedVector.h b/include/llvm/ADT/PackedVector.h index 2eaddc2..1ae2a77 100644 --- a/include/llvm/ADT/PackedVector.h +++ b/include/llvm/ADT/PackedVector.h @@ -19,32 +19,32 @@ namespace llvm { -template +template class PackedVectorBase; // This won't be necessary if we can specialize members without specializing // the parent template. -template -class PackedVectorBase { +template +class PackedVectorBase { protected: - static T getValue(const llvm::BitVector &Bits, unsigned Idx) { + static T getValue(const BitVectorTy &Bits, unsigned Idx) { T val = T(); for (unsigned i = 0; i != BitNum; ++i) val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i)); return val; } - static void setValue(llvm::BitVector &Bits, unsigned Idx, T val) { + static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { assert((val >> BitNum) == 0 && "value is too big"); for (unsigned i = 0; i != BitNum; ++i) Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i); } }; -template -class PackedVectorBase { +template +class PackedVectorBase { protected: - static T getValue(const llvm::BitVector &Bits, unsigned Idx) { + static T getValue(const BitVectorTy &Bits, unsigned Idx) { T val = T(); for (unsigned i = 0; i != BitNum-1; ++i) val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i)); @@ -53,7 +53,7 @@ protected: return val; } - static void setValue(llvm::BitVector &Bits, unsigned Idx, T val) { + static void setValue(BitVectorTy &Bits, unsigned Idx, T val) { if (val < 0) { val = ~val; Bits.set((Idx << (BitNum-1)) + BitNum-1); @@ -71,11 +71,12 @@ protected: /// @endcode /// will create a vector accepting values -2, -1, 0, 1. Any other value will hit /// an assertion. -template -class PackedVector : public PackedVectorBase +class PackedVector : public PackedVectorBase::is_signed> { - llvm::BitVector Bits; - typedef PackedVectorBase::is_signed> base; + BitVectorTy Bits; + typedef PackedVectorBase::is_signed> base; public: class reference { diff --git a/include/llvm/ADT/PointerIntPair.h b/include/llvm/ADT/PointerIntPair.h index fcc758b..71c379b 100644 --- a/include/llvm/ADT/PointerIntPair.h +++ b/include/llvm/ADT/PointerIntPair.h @@ -135,12 +135,12 @@ template struct DenseMapInfo > { typedef PointerIntPair Ty; static Ty getEmptyKey() { - intptr_t Val = -1; + uintptr_t Val = static_cast(-1); Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; return Ty(reinterpret_cast(Val), IntType((1 << IntBits)-1)); } static Ty getTombstoneKey() { - intptr_t Val = -2; + uintptr_t Val = static_cast(-2); Val <<= PointerLikeTypeTraits::NumLowBitsAvailable; return Ty(reinterpret_cast(Val), IntType(0)); } diff --git a/include/llvm/ADT/ScopedHashTable.h b/include/llvm/ADT/ScopedHashTable.h index a6803ee..efddd9f 100644 --- a/include/llvm/ADT/ScopedHashTable.h +++ b/include/llvm/ADT/ScopedHashTable.h @@ -90,8 +90,8 @@ class ScopedHashTableScope { /// LastValInScope - This is the last value that was inserted for this scope /// or null if none have been inserted yet. ScopedHashTableVal *LastValInScope; - void operator=(ScopedHashTableScope&); // DO NOT IMPLEMENT - ScopedHashTableScope(ScopedHashTableScope&); // DO NOT IMPLEMENT + void operator=(ScopedHashTableScope&) LLVM_DELETED_FUNCTION; + ScopedHashTableScope(ScopedHashTableScope&) LLVM_DELETED_FUNCTION; public: ScopedHashTableScope(ScopedHashTable &HT); ~ScopedHashTableScope(); diff --git a/include/llvm/ADT/SetVector.h b/include/llvm/ADT/SetVector.h index 965f0de..d2f7286 100644 --- a/include/llvm/ADT/SetVector.h +++ b/include/llvm/ADT/SetVector.h @@ -27,10 +27,11 @@ namespace llvm { +/// \brief A vector that has set insertion semantics. +/// /// This adapter class provides a way to keep a set of things that also has the /// property of a deterministic iteration order. The order of iteration is the /// order of insertion. -/// @brief A vector that has set insertion semantics. template , typename Set = SmallSet > class SetVector { @@ -45,59 +46,59 @@ public: typedef typename vector_type::const_iterator const_iterator; typedef typename vector_type::size_type size_type; - /// @brief Construct an empty SetVector + /// \brief Construct an empty SetVector SetVector() {} - /// @brief Initialize a SetVector with a range of elements + /// \brief Initialize a SetVector with a range of elements template SetVector(It Start, It End) { insert(Start, End); } - /// @brief Determine if the SetVector is empty or not. + /// \brief Determine if the SetVector is empty or not. bool empty() const { return vector_.empty(); } - /// @brief Determine the number of elements in the SetVector. + /// \brief Determine the number of elements in the SetVector. size_type size() const { return vector_.size(); } - /// @brief Get an iterator to the beginning of the SetVector. + /// \brief Get an iterator to the beginning of the SetVector. iterator begin() { return vector_.begin(); } - /// @brief Get a const_iterator to the beginning of the SetVector. + /// \brief Get a const_iterator to the beginning of the SetVector. const_iterator begin() const { return vector_.begin(); } - /// @brief Get an iterator to the end of the SetVector. + /// \brief Get an iterator to the end of the SetVector. iterator end() { return vector_.end(); } - /// @brief Get a const_iterator to the end of the SetVector. + /// \brief Get a const_iterator to the end of the SetVector. const_iterator end() const { return vector_.end(); } - /// @brief Return the last element of the SetVector. + /// \brief Return the last element of the SetVector. const T &back() const { assert(!empty() && "Cannot call back() on empty SetVector!"); return vector_.back(); } - /// @brief Index into the SetVector. + /// \brief Index into the SetVector. const_reference operator[](size_type n) const { assert(n < vector_.size() && "SetVector access out of range!"); return vector_[n]; } - /// @returns true iff the element was inserted into the SetVector. - /// @brief Insert a new element into the SetVector. + /// \brief Insert a new element into the SetVector. + /// \returns true iff the element was inserted into the SetVector. bool insert(const value_type &X) { bool result = set_.insert(X); if (result) @@ -105,7 +106,7 @@ public: return result; } - /// @brief Insert a range of elements into the SetVector. + /// \brief Insert a range of elements into the SetVector. template void insert(It Start, It End) { for (; Start != End; ++Start) @@ -113,7 +114,7 @@ public: vector_.push_back(*Start); } - /// @brief Remove an item from the set vector. + /// \brief Remove an item from the set vector. bool remove(const value_type& X) { if (set_.erase(X)) { typename vector_type::iterator I = @@ -125,20 +126,44 @@ public: return false; } - - /// @returns 0 if the element is not in the SetVector, 1 if it is. - /// @brief Count the number of elements of a given key in the SetVector. + /// \brief Remove items from the set vector based on a predicate function. + /// + /// This is intended to be equivalent to the following code, if we could + /// write it: + /// + /// \code + /// V.erase(std::remove_if(V.begin(), V.end(), P), V.end()); + /// \endcode + /// + /// However, SetVector doesn't expose non-const iterators, making any + /// algorithm like remove_if impossible to use. + /// + /// \returns true if any element is removed. + template + bool remove_if(UnaryPredicate P) { + typename vector_type::iterator I + = std::remove_if(vector_.begin(), vector_.end(), + TestAndEraseFromSet(P, set_)); + if (I == vector_.end()) + return false; + vector_.erase(I, vector_.end()); + return true; + } + + + /// \brief Count the number of elements of a given key in the SetVector. + /// \returns 0 if the element is not in the SetVector, 1 if it is. size_type count(const key_type &key) const { return set_.count(key); } - /// @brief Completely clear the SetVector + /// \brief Completely clear the SetVector void clear() { set_.clear(); vector_.clear(); } - /// @brief Remove the last element of the SetVector. + /// \brief Remove the last element of the SetVector. void pop_back() { assert(!empty() && "Cannot remove an element from an empty SetVector!"); set_.erase(back()); @@ -160,18 +185,41 @@ public: } private: + /// \brief A wrapper predicate designed for use with std::remove_if. + /// + /// This predicate wraps a predicate suitable for use with std::remove_if to + /// call set_.erase(x) on each element which is slated for removal. + template + class TestAndEraseFromSet { + UnaryPredicate P; + set_type &set_; + + public: + typedef typename UnaryPredicate::argument_type argument_type; + + TestAndEraseFromSet(UnaryPredicate P, set_type &set_) : P(P), set_(set_) {} + + bool operator()(argument_type Arg) { + if (P(Arg)) { + set_.erase(Arg); + return true; + } + return false; + } + }; + set_type set_; ///< The set. vector_type vector_; ///< The vector. }; -/// SmallSetVector - A SetVector that performs no allocations if smaller than +/// \brief A SetVector that performs no allocations if smaller than /// a certain size. template class SmallSetVector : public SetVector, SmallSet > { public: SmallSetVector() {} - /// @brief Initialize a SmallSetVector with a range of elements + /// \brief Initialize a SmallSetVector with a range of elements template SmallSetVector(It Start, It End) { this->insert(Start, End); diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h index 7a645e0..a9cd54e 100644 --- a/include/llvm/ADT/SmallBitVector.h +++ b/include/llvm/ADT/SmallBitVector.h @@ -300,6 +300,21 @@ public: return *this; } + /// set - Efficiently set a range of bits in [I, E) + SmallBitVector &set(unsigned I, unsigned E) { + assert(I <= E && "Attempted to set backwards range!"); + assert(E <= size() && "Attempted to set out-of-bounds range!"); + if (I == E) return *this; + if (isSmall()) { + uintptr_t EMask = ((uintptr_t)1) << E; + uintptr_t IMask = ((uintptr_t)1) << I; + uintptr_t Mask = EMask - IMask; + setSmallBits(getSmallBits() | Mask); + } else + getPointer()->set(I, E); + return *this; + } + SmallBitVector &reset() { if (isSmall()) setSmallBits(0); @@ -316,6 +331,21 @@ public: return *this; } + /// reset - Efficiently reset a range of bits in [I, E) + SmallBitVector &reset(unsigned I, unsigned E) { + assert(I <= E && "Attempted to reset backwards range!"); + assert(E <= size() && "Attempted to reset out-of-bounds range!"); + if (I == E) return *this; + if (isSmall()) { + uintptr_t EMask = ((uintptr_t)1) << E; + uintptr_t IMask = ((uintptr_t)1) << I; + uintptr_t Mask = EMask - IMask; + setSmallBits(getSmallBits() & ~Mask); + } else + getPointer()->reset(I, E); + return *this; + } + SmallBitVector &flip() { if (isSmall()) setSmallBits(~getSmallBits()); diff --git a/include/llvm/ADT/SmallPtrSet.h b/include/llvm/ADT/SmallPtrSet.h index 498a034..3bb8830 100644 --- a/include/llvm/ADT/SmallPtrSet.h +++ b/include/llvm/ADT/SmallPtrSet.h @@ -15,12 +15,13 @@ #ifndef LLVM_ADT_SMALLPTRSET_H #define LLVM_ADT_SMALLPTRSET_H +#include "llvm/Support/Compiler.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/PointerLikeTypeTraits.h" #include #include #include #include -#include "llvm/Support/DataTypes.h" -#include "llvm/Support/PointerLikeTypeTraits.h" namespace llvm { @@ -132,7 +133,7 @@ private: /// Grow - Allocate a larger backing store for the buckets and move it over. void Grow(unsigned NewSize); - void operator=(const SmallPtrSetImpl &RHS); // DO NOT IMPLEMENT. + void operator=(const SmallPtrSetImpl &RHS) LLVM_DELETED_FUNCTION; protected: /// swap - Swaps the elements of two sets. /// Note: This method assumes that both sets have the same small size. diff --git a/include/llvm/ADT/SmallString.h b/include/llvm/ADT/SmallString.h index c6f0a5b..8da99d1 100644 --- a/include/llvm/ADT/SmallString.h +++ b/include/llvm/ADT/SmallString.h @@ -44,25 +44,25 @@ public: /// @name String Assignment /// @{ - /// Assign from a repeated element + /// Assign from a repeated element. void assign(size_t NumElts, char Elt) { this->SmallVectorImpl::assign(NumElts, Elt); } - /// Assign from an iterator pair + /// Assign from an iterator pair. template void assign(in_iter S, in_iter E) { this->clear(); SmallVectorImpl::append(S, E); } - /// Assign from a StringRef + /// Assign from a StringRef. void assign(StringRef RHS) { this->clear(); SmallVectorImpl::append(RHS.begin(), RHS.end()); } - /// Assign from a SmallVector + /// Assign from a SmallVector. void assign(const SmallVectorImpl &RHS) { this->clear(); SmallVectorImpl::append(RHS.begin(), RHS.end()); @@ -72,7 +72,7 @@ public: /// @name String Concatenation /// @{ - /// Append from an iterator pair + /// Append from an iterator pair. template void append(in_iter S, in_iter E) { SmallVectorImpl::append(S, E); @@ -83,12 +83,12 @@ public: } - /// Append from a StringRef + /// Append from a StringRef. void append(StringRef RHS) { SmallVectorImpl::append(RHS.begin(), RHS.end()); } - /// Append from a SmallVector + /// Append from a SmallVector. void append(const SmallVectorImpl &RHS) { SmallVectorImpl::append(RHS.begin(), RHS.end()); } @@ -97,19 +97,19 @@ public: /// @name String Comparison /// @{ - /// equals - Check for string equality, this is more efficient than - /// compare() when the relative ordering of inequal strings isn't needed. + /// Check for string equality. This is more efficient than compare() when + /// the relative ordering of inequal strings isn't needed. bool equals(StringRef RHS) const { return str().equals(RHS); } - /// equals_lower - Check for string equality, ignoring case. + /// Check for string equality, ignoring case. bool equals_lower(StringRef RHS) const { return str().equals_lower(RHS); } - /// compare - Compare two strings; the result is -1, 0, or 1 if this string - /// is lexicographically less than, equal to, or greater than the \arg RHS. + /// Compare two strings; the result is -1, 0, or 1 if this string is + /// lexicographically less than, equal to, or greater than the \p RHS. int compare(StringRef RHS) const { return str().compare(RHS); } @@ -129,12 +129,12 @@ public: /// @name String Predicates /// @{ - /// startswith - Check if this string starts with the given \arg Prefix. + /// startswith - Check if this string starts with the given \p Prefix. bool startswith(StringRef Prefix) const { return str().startswith(Prefix); } - /// endswith - Check if this string ends with the given \arg Suffix. + /// endswith - Check if this string ends with the given \p Suffix. bool endswith(StringRef Suffix) const { return str().endswith(Suffix); } @@ -143,76 +143,76 @@ public: /// @name String Searching /// @{ - /// find - Search for the first character \arg C in the string. + /// find - Search for the first character \p C in the string. /// - /// \return - The index of the first occurrence of \arg C, or npos if not + /// \return - The index of the first occurrence of \p C, or npos if not /// found. size_t find(char C, size_t From = 0) const { return str().find(C, From); } - /// find - Search for the first string \arg Str in the string. + /// Search for the first string \p Str in the string. /// - /// \return - The index of the first occurrence of \arg Str, or npos if not + /// \returns The index of the first occurrence of \p Str, or npos if not /// found. size_t find(StringRef Str, size_t From = 0) const { return str().find(Str, From); } - /// rfind - Search for the last character \arg C in the string. + /// Search for the last character \p C in the string. /// - /// \return - The index of the last occurrence of \arg C, or npos if not + /// \returns The index of the last occurrence of \p C, or npos if not /// found. size_t rfind(char C, size_t From = StringRef::npos) const { return str().rfind(C, From); } - /// rfind - Search for the last string \arg Str in the string. + /// Search for the last string \p Str in the string. /// - /// \return - The index of the last occurrence of \arg Str, or npos if not + /// \returns The index of the last occurrence of \p Str, or npos if not /// found. size_t rfind(StringRef Str) const { return str().rfind(Str); } - /// find_first_of - Find the first character in the string that is \arg C, - /// or npos if not found. Same as find. + /// Find the first character in the string that is \p C, or npos if not + /// found. Same as find. size_t find_first_of(char C, size_t From = 0) const { return str().find_first_of(C, From); } - /// find_first_of - Find the first character in the string that is in \arg - /// Chars, or npos if not found. + /// Find the first character in the string that is in \p Chars, or npos if + /// not found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_t find_first_of(StringRef Chars, size_t From = 0) const { return str().find_first_of(Chars, From); } - /// find_first_not_of - Find the first character in the string that is not - /// \arg C or npos if not found. + /// Find the first character in the string that is not \p C or npos if not + /// found. size_t find_first_not_of(char C, size_t From = 0) const { return str().find_first_not_of(C, From); } - /// find_first_not_of - Find the first character in the string that is not - /// in the string \arg Chars, or npos if not found. + /// Find the first character in the string that is not in the string + /// \p Chars, or npos if not found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_t find_first_not_of(StringRef Chars, size_t From = 0) const { return str().find_first_not_of(Chars, From); } - /// find_last_of - Find the last character in the string that is \arg C, or - /// npos if not found. + /// Find the last character in the string that is \p C, or npos if not + /// found. size_t find_last_of(char C, size_t From = StringRef::npos) const { return str().find_last_of(C, From); } - /// find_last_of - Find the last character in the string that is in \arg C, - /// or npos if not found. + /// Find the last character in the string that is in \p C, or npos if not + /// found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_t find_last_of( StringRef Chars, size_t From = StringRef::npos) const { return str().find_last_of(Chars, From); @@ -222,13 +222,13 @@ public: /// @name Helpful Algorithms /// @{ - /// count - Return the number of occurrences of \arg C in the string. + /// Return the number of occurrences of \p C in the string. size_t count(char C) const { return str().count(C); } - /// count - Return the number of non-overlapped occurrences of \arg Str in - /// the string. + /// Return the number of non-overlapped occurrences of \p Str in the + /// string. size_t count(StringRef Str) const { return str().count(Str); } @@ -237,36 +237,36 @@ public: /// @name Substring Operations /// @{ - /// substr - Return a reference to the substring from [Start, Start + N). + /// Return a reference to the substring from [Start, Start + N). /// - /// \param Start - The index of the starting character in the substring; if + /// \param Start The index of the starting character in the substring; if /// the index is npos or greater than the length of the string then the /// empty substring will be returned. /// - /// \param N - The number of characters to included in the substring. If N + /// \param N The number of characters to included in the substring. If \p N /// exceeds the number of characters remaining in the string, the string - /// suffix (starting with \arg Start) will be returned. + /// suffix (starting with \p Start) will be returned. StringRef substr(size_t Start, size_t N = StringRef::npos) const { return str().substr(Start, N); } - /// slice - Return a reference to the substring from [Start, End). + /// Return a reference to the substring from [Start, End). /// - /// \param Start - The index of the starting character in the substring; if + /// \param Start The index of the starting character in the substring; if /// the index is npos or greater than the length of the string then the /// empty substring will be returned. /// - /// \param End - The index following the last character to include in the - /// substring. If this is npos, or less than \arg Start, or exceeds the + /// \param End The index following the last character to include in the + /// substring. If this is npos, or less than \p Start, or exceeds the /// number of characters remaining in the string, the string suffix - /// (starting with \arg Start) will be returned. + /// (starting with \p Start) will be returned. StringRef slice(size_t Start, size_t End) const { return str().slice(Start, End); } // Extra methods. - /// Explicit conversion to StringRef + /// Explicit conversion to StringRef. StringRef str() const { return StringRef(this->begin(), this->size()); } // TODO: Make this const, if it's safe... diff --git a/include/llvm/ADT/SmallVector.h b/include/llvm/ADT/SmallVector.h index 9fbbbe4..6e0fd94 100644 --- a/include/llvm/ADT/SmallVector.h +++ b/include/llvm/ADT/SmallVector.h @@ -14,6 +14,7 @@ #ifndef LLVM_ADT_SMALLVECTOR_H #define LLVM_ADT_SMALLVECTOR_H +#include "llvm/Support/AlignOf.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/type_traits.h" #include @@ -32,44 +33,20 @@ class SmallVectorBase { protected: void *BeginX, *EndX, *CapacityX; - // Allocate raw space for N elements of type T. If T has a ctor or dtor, we - // don't want it to be automatically run, so we need to represent the space as - // something else. An array of char would work great, but might not be - // aligned sufficiently. Instead we use some number of union instances for - // the space, which guarantee maximal alignment. - union U { - double D; - long double LD; - long long L; - void *P; - } FirstEl; - // Space after 'FirstEl' is clobbered, do not add any instance vars after it. - protected: - SmallVectorBase(size_t Size) - : BeginX(&FirstEl), EndX(&FirstEl), CapacityX((char*)&FirstEl+Size) {} - - /// isSmall - Return true if this is a smallvector which has not had dynamic - /// memory allocated for it. - bool isSmall() const { - return BeginX == static_cast(&FirstEl); - } - - /// resetToSmall - Put this vector in a state of being small. - void resetToSmall() { - BeginX = EndX = CapacityX = &FirstEl; - } + SmallVectorBase(void *FirstEl, size_t Size) + : BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {} /// grow_pod - This is an implementation of the grow() method which only works /// on POD-like data types and is out of line to reduce code duplication. - void grow_pod(size_t MinSizeInBytes, size_t TSize); + void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize); public: /// size_in_bytes - This returns size()*sizeof(T). size_t size_in_bytes() const { return size_t((char*)EndX - (char*)BeginX); } - + /// capacity_in_bytes - This returns capacity()*sizeof(T). size_t capacity_in_bytes() const { return size_t((char*)CapacityX - (char*)BeginX); @@ -78,11 +55,41 @@ public: bool empty() const { return BeginX == EndX; } }; +template struct SmallVectorStorage; -template +/// SmallVectorTemplateCommon - This is the part of SmallVectorTemplateBase +/// which does not depend on whether the type T is a POD. The extra dummy +/// template argument is used by ArrayRef to avoid unnecessarily requiring T +/// to be complete. +template class SmallVectorTemplateCommon : public SmallVectorBase { +private: + template friend struct SmallVectorStorage; + + // Allocate raw space for N elements of type T. If T has a ctor or dtor, we + // don't want it to be automatically run, so we need to represent the space as + // something else. Use an array of char of sufficient alignment. + typedef llvm::AlignedCharArrayUnion U; + U FirstEl; + // Space after 'FirstEl' is clobbered, do not add any instance vars after it. + protected: - SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(Size) {} + SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {} + + void grow_pod(size_t MinSizeInBytes, size_t TSize) { + SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize); + } + + /// isSmall - Return true if this is a smallvector which has not had dynamic + /// memory allocated for it. + bool isSmall() const { + return BeginX == static_cast(&FirstEl); + } + + /// resetToSmall - Put this vector in a state of being small. + void resetToSmall() { + BeginX = EndX = CapacityX = &FirstEl; + } void setEnd(T *P) { this->EndX = P; } public: @@ -677,8 +684,8 @@ public: RHS.begin(), RHS.end()); } - /// set_size - Set the array size to \arg N, which the current array must have - /// enough capacity for. + /// Set the array size to \p N, which the current array must have enough + /// capacity for. /// /// This does not construct or destroy any elements in the vector. /// @@ -844,6 +851,17 @@ SmallVectorImpl &SmallVectorImpl::operator=(SmallVectorImpl &&RHS) { } #endif +/// Storage for the SmallVector elements which aren't contained in +/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1' +/// element is in the base class. This is specialized for the N=1 and N=0 cases +/// to avoid allocating unnecessary storage. +template +struct SmallVectorStorage { + typename SmallVectorTemplateCommon::U InlineElts[N - 1]; +}; +template struct SmallVectorStorage {}; +template struct SmallVectorStorage {}; + /// SmallVector - This is a 'vector' (really, a variable-sized array), optimized /// for the case when the array is small. It contains some number of elements /// in-place, which allows it to avoid heap allocation when the actual number of @@ -854,41 +872,23 @@ SmallVectorImpl &SmallVectorImpl::operator=(SmallVectorImpl &&RHS) { /// template class SmallVector : public SmallVectorImpl { - /// InlineElts - These are 'N-1' elements that are stored inline in the body - /// of the vector. The extra '1' element is stored in SmallVectorImpl. - typedef typename SmallVectorImpl::U U; - enum { - // MinUs - The number of U's require to cover N T's. - MinUs = (static_cast(sizeof(T))*N + - static_cast(sizeof(U)) - 1) / - static_cast(sizeof(U)), - - // NumInlineEltsElts - The number of elements actually in this array. There - // is already one in the parent class, and we have to round up to avoid - // having a zero-element array. - NumInlineEltsElts = MinUs > 1 ? (MinUs - 1) : 1, - - // NumTsAvailable - The number of T's we actually have space for, which may - // be more than N due to rounding. - NumTsAvailable = (NumInlineEltsElts+1)*static_cast(sizeof(U))/ - static_cast(sizeof(T)) - }; - U InlineElts[NumInlineEltsElts]; + /// Storage - Inline space for elements which aren't stored in the base class. + SmallVectorStorage Storage; public: - SmallVector() : SmallVectorImpl(NumTsAvailable) { + SmallVector() : SmallVectorImpl(N) { } explicit SmallVector(unsigned Size, const T &Value = T()) - : SmallVectorImpl(NumTsAvailable) { + : SmallVectorImpl(N) { this->assign(Size, Value); } template - SmallVector(ItTy S, ItTy E) : SmallVectorImpl(NumTsAvailable) { + SmallVector(ItTy S, ItTy E) : SmallVectorImpl(N) { this->append(S, E); } - SmallVector(const SmallVector &RHS) : SmallVectorImpl(NumTsAvailable) { + SmallVector(const SmallVector &RHS) : SmallVectorImpl(N) { if (!RHS.empty()) SmallVectorImpl::operator=(RHS); } @@ -899,7 +899,7 @@ public: } #if LLVM_USE_RVALUE_REFERENCES - SmallVector(SmallVector &&RHS) : SmallVectorImpl(NumTsAvailable) { + SmallVector(SmallVector &&RHS) : SmallVectorImpl(N) { if (!RHS.empty()) SmallVectorImpl::operator=(::std::move(RHS)); } @@ -912,48 +912,6 @@ public: }; -/// Specialize SmallVector at N=0. This specialization guarantees -/// that it can be instantiated at an incomplete T if none of its -/// members are required. -template -class SmallVector : public SmallVectorImpl { -public: - SmallVector() : SmallVectorImpl(0) { - } - - explicit SmallVector(unsigned Size, const T &Value = T()) - : SmallVectorImpl(0) { - this->assign(Size, Value); - } - - template - SmallVector(ItTy S, ItTy E) : SmallVectorImpl(0) { - this->append(S, E); - } - - SmallVector(const SmallVector &RHS) : SmallVectorImpl(0) { - if (!RHS.empty()) - SmallVectorImpl::operator=(RHS); - } - - const SmallVector &operator=(const SmallVector &RHS) { - SmallVectorImpl::operator=(RHS); - return *this; - } - -#if LLVM_USE_RVALUE_REFERENCES - SmallVector(SmallVector &&RHS) : SmallVectorImpl(0) { - if (!RHS.empty()) - SmallVectorImpl::operator=(::std::move(RHS)); - } - - const SmallVector &operator=(SmallVector &&RHS) { - SmallVectorImpl::operator=(::std::move(RHS)); - return *this; - } -#endif -}; - template static inline size_t capacity_in_bytes(const SmallVector &X) { return X.capacity_in_bytes(); diff --git a/include/llvm/ADT/SparseBitVector.h b/include/llvm/ADT/SparseBitVector.h index 89774c3..306e928 100644 --- a/include/llvm/ADT/SparseBitVector.h +++ b/include/llvm/ADT/SparseBitVector.h @@ -158,7 +158,7 @@ public: && "Word Position outside of element"); // Mask off previous bits. - Copy &= ~0L << BitPos; + Copy &= ~0UL << BitPos; if (Copy != 0) { if (sizeof(BitWord) == 4) @@ -262,6 +262,22 @@ public: } }; +template +struct ilist_traits > + : public ilist_default_traits > { + typedef SparseBitVectorElement Element; + + Element *createSentinel() const { return static_cast(&Sentinel); } + static void destroySentinel(Element *) {} + + Element *provideInitialHead() const { return createSentinel(); } + Element *ensureHead(Element *) const { return createSentinel(); } + static void noteHead(Element *, Element *) {} + +private: + mutable ilist_half_node Sentinel; +}; + template class SparseBitVector { typedef ilist > ElementList; diff --git a/include/llvm/ADT/SparseSet.h b/include/llvm/ADT/SparseSet.h index 55696333..063c675 100644 --- a/include/llvm/ADT/SparseSet.h +++ b/include/llvm/ADT/SparseSet.h @@ -110,9 +110,9 @@ struct SparseSetValFunctor { /// For sets that may grow to thousands of elements, SparseT should be set to /// uint16_t or uint32_t. /// -/// @param ValueT The type of objects in the set. -/// @param KeyFunctorT A functor that computes an unsigned index from KeyT. -/// @param SparseT An unsigned integer type. See above. +/// @tparam ValueT The type of objects in the set. +/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT. +/// @tparam SparseT An unsigned integer type. See above. /// template, @@ -128,8 +128,8 @@ class SparseSet { // Disable copy construction and assignment. // This data structure is not meant to be used that way. - SparseSet(const SparseSet&); // DO NOT IMPLEMENT. - SparseSet &operator=(const SparseSet&); // DO NOT IMPLEMENT. + SparseSet(const SparseSet&) LLVM_DELETED_FUNCTION; + SparseSet &operator=(const SparseSet&) LLVM_DELETED_FUNCTION; public: typedef ValueT value_type; diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h index 655d884..bf27c43 100644 --- a/include/llvm/ADT/StringExtras.h +++ b/include/llvm/ADT/StringExtras.h @@ -21,7 +21,7 @@ namespace llvm { template class SmallVectorImpl; /// hexdigit - Return the hexadecimal character for the -/// given number \arg X (which should be less than 16). +/// given number \p X (which should be less than 16). static inline char hexdigit(unsigned X, bool LowerCase = false) { const char HexChar = LowerCase ? 'a' : 'A'; return X < 10 ? '0' + X : HexChar + X - 10; @@ -125,10 +125,29 @@ void SplitString(StringRef Source, // X*33+c -> X*33^c static inline unsigned HashString(StringRef Str, unsigned Result = 0) { for (unsigned i = 0, e = Str.size(); i != e; ++i) - Result = Result * 33 + Str[i]; + Result = Result * 33 + (unsigned char)Str[i]; return Result; } +/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th). +static inline StringRef getOrdinalSuffix(unsigned Val) { + // It is critically important that we do this perfectly for + // user-written sequences with over 100 elements. + switch (Val % 100) { + case 11: + case 12: + case 13: + return "th"; + default: + switch (Val % 10) { + case 1: return "st"; + case 2: return "nd"; + case 3: return "rd"; + default: return "th"; + } + } +} + } // End llvm namespace #endif diff --git a/include/llvm/ADT/StringRef.h b/include/llvm/ADT/StringRef.h index cd84603..292bde0 100644 --- a/include/llvm/ADT/StringRef.h +++ b/include/llvm/ADT/StringRef.h @@ -138,7 +138,7 @@ namespace llvm { } /// compare - Compare two strings; the result is -1, 0, or 1 if this string - /// is lexicographically less than, equal to, or greater than the \arg RHS. + /// is lexicographically less than, equal to, or greater than the \p RHS. int compare(StringRef RHS) const { // Check the prefix for a mismatch. if (int Res = compareMemory(Data, RHS.Data, min(Length, RHS.Length))) @@ -205,13 +205,13 @@ namespace llvm { /// @name String Predicates /// @{ - /// startswith - Check if this string starts with the given \arg Prefix. + /// Check if this string starts with the given \p Prefix. bool startswith(StringRef Prefix) const { return Length >= Prefix.Length && compareMemory(Data, Prefix.Data, Prefix.Length) == 0; } - /// endswith - Check if this string ends with the given \arg Suffix. + /// Check if this string ends with the given \p Suffix. bool endswith(StringRef Suffix) const { return Length >= Suffix.Length && compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0; @@ -221,9 +221,9 @@ namespace llvm { /// @name String Searching /// @{ - /// find - Search for the first character \arg C in the string. + /// Search for the first character \p C in the string. /// - /// \return - The index of the first occurrence of \arg C, or npos if not + /// \returns The index of the first occurrence of \p C, or npos if not /// found. size_t find(char C, size_t From = 0) const { for (size_t i = min(From, Length), e = Length; i != e; ++i) @@ -232,15 +232,15 @@ namespace llvm { return npos; } - /// find - Search for the first string \arg Str in the string. + /// Search for the first string \p Str in the string. /// - /// \return - The index of the first occurrence of \arg Str, or npos if not + /// \returns The index of the first occurrence of \p Str, or npos if not /// found. size_t find(StringRef Str, size_t From = 0) const; - /// rfind - Search for the last character \arg C in the string. + /// Search for the last character \p C in the string. /// - /// \return - The index of the last occurrence of \arg C, or npos if not + /// \returns The index of the last occurrence of \p C, or npos if not /// found. size_t rfind(char C, size_t From = npos) const { From = min(From, Length); @@ -253,61 +253,61 @@ namespace llvm { return npos; } - /// rfind - Search for the last string \arg Str in the string. + /// Search for the last string \p Str in the string. /// - /// \return - The index of the last occurrence of \arg Str, or npos if not + /// \returns The index of the last occurrence of \p Str, or npos if not /// found. size_t rfind(StringRef Str) const; - /// find_first_of - Find the first character in the string that is \arg C, - /// or npos if not found. Same as find. + /// Find the first character in the string that is \p C, or npos if not + /// found. Same as find. size_type find_first_of(char C, size_t From = 0) const { return find(C, From); } - /// find_first_of - Find the first character in the string that is in \arg - /// Chars, or npos if not found. + /// Find the first character in the string that is in \p Chars, or npos if + /// not found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_type find_first_of(StringRef Chars, size_t From = 0) const; - /// find_first_not_of - Find the first character in the string that is not - /// \arg C or npos if not found. + /// Find the first character in the string that is not \p C or npos if not + /// found. size_type find_first_not_of(char C, size_t From = 0) const; - /// find_first_not_of - Find the first character in the string that is not - /// in the string \arg Chars, or npos if not found. + /// Find the first character in the string that is not in the string + /// \p Chars, or npos if not found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_type find_first_not_of(StringRef Chars, size_t From = 0) const; - /// find_last_of - Find the last character in the string that is \arg C, or - /// npos if not found. + /// Find the last character in the string that is \p C, or npos if not + /// found. size_type find_last_of(char C, size_t From = npos) const { return rfind(C, From); } - /// find_last_of - Find the last character in the string that is in \arg C, - /// or npos if not found. + /// Find the last character in the string that is in \p C, or npos if not + /// found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_type find_last_of(StringRef Chars, size_t From = npos) const; - /// find_last_not_of - Find the last character in the string that is not - /// \arg C, or npos if not found. + /// Find the last character in the string that is not \p C, or npos if not + /// found. size_type find_last_not_of(char C, size_t From = npos) const; - /// find_last_not_of - Find the last character in the string that is not in - /// \arg Chars, or npos if not found. + /// Find the last character in the string that is not in \p Chars, or + /// npos if not found. /// - /// Note: O(size() + Chars.size()) + /// Complexity: O(size() + Chars.size()) size_type find_last_not_of(StringRef Chars, size_t From = npos) const; /// @} /// @name Helpful Algorithms /// @{ - /// count - Return the number of occurrences of \arg C in the string. + /// Return the number of occurrences of \p C in the string. size_t count(char C) const { size_t Count = 0; for (size_t i = 0, e = Length; i != e; ++i) @@ -316,18 +316,17 @@ namespace llvm { return Count; } - /// count - Return the number of non-overlapped occurrences of \arg Str in + /// Return the number of non-overlapped occurrences of \p Str in /// the string. size_t count(StringRef Str) const; - /// getAsInteger - Parse the current string as an integer of the specified - /// radix. If Radix is specified as zero, this does radix autosensing using + /// Parse the current string as an integer of the specified radix. If + /// \p Radix is specified as zero, this does radix autosensing using /// extended C rules: 0 is octal, 0x is hex, 0b is binary. /// /// If the string is invalid or if only a subset of the string is valid, /// this returns true to signify the error. The string is considered /// erroneous if empty or if it overflows T. - /// template typename enable_if_c::is_signed, bool>::type getAsInteger(unsigned Radix, T &Result) const { @@ -350,13 +349,12 @@ namespace llvm { return false; } - /// getAsInteger - Parse the current string as an integer of the - /// specified radix, or of an autosensed radix if the radix given - /// is 0. The current value in Result is discarded, and the - /// storage is changed to be wide enough to store the parsed - /// integer. + /// Parse the current string as an integer of the specified \p Radix, or of + /// an autosensed radix if the \p Radix given is 0. The current value in + /// \p Result is discarded, and the storage is changed to be wide enough to + /// store the parsed integer. /// - /// Returns true if the string does not solely consist of a valid + /// \returns true if the string does not solely consist of a valid /// non-empty number in the appropriate base. /// /// APInt::fromString is superficially similar but assumes the @@ -367,70 +365,70 @@ namespace llvm { /// @name String Operations /// @{ - // lower - Convert the given ASCII string to lowercase. + // Convert the given ASCII string to lowercase. std::string lower() const; - /// upper - Convert the given ASCII string to uppercase. + /// Convert the given ASCII string to uppercase. std::string upper() const; /// @} /// @name Substring Operations /// @{ - /// substr - Return a reference to the substring from [Start, Start + N). + /// Return a reference to the substring from [Start, Start + N). /// - /// \param Start - The index of the starting character in the substring; if + /// \param Start The index of the starting character in the substring; if /// the index is npos or greater than the length of the string then the /// empty substring will be returned. /// - /// \param N - The number of characters to included in the substring. If N + /// \param N The number of characters to included in the substring. If N /// exceeds the number of characters remaining in the string, the string - /// suffix (starting with \arg Start) will be returned. + /// suffix (starting with \p Start) will be returned. StringRef substr(size_t Start, size_t N = npos) const { Start = min(Start, Length); return StringRef(Data + Start, min(N, Length - Start)); } - /// drop_front - Return a StringRef equal to 'this' but with the first - /// elements dropped. + /// Return a StringRef equal to 'this' but with the first \p N elements + /// dropped. StringRef drop_front(unsigned N = 1) const { assert(size() >= N && "Dropping more elements than exist"); return substr(N); } - /// drop_back - Return a StringRef equal to 'this' but with the last - /// elements dropped. + /// Return a StringRef equal to 'this' but with the last \p N elements + /// dropped. StringRef drop_back(unsigned N = 1) const { assert(size() >= N && "Dropping more elements than exist"); return substr(0, size()-N); } - /// slice - Return a reference to the substring from [Start, End). + /// Return a reference to the substring from [Start, End). /// - /// \param Start - The index of the starting character in the substring; if + /// \param Start The index of the starting character in the substring; if /// the index is npos or greater than the length of the string then the /// empty substring will be returned. /// - /// \param End - The index following the last character to include in the - /// substring. If this is npos, or less than \arg Start, or exceeds the + /// \param End The index following the last character to include in the + /// substring. If this is npos, or less than \p Start, or exceeds the /// number of characters remaining in the string, the string suffix - /// (starting with \arg Start) will be returned. + /// (starting with \p Start) will be returned. StringRef slice(size_t Start, size_t End) const { Start = min(Start, Length); End = min(max(Start, End), Length); return StringRef(Data + Start, End - Start); } - /// split - Split into two substrings around the first occurrence of a - /// separator character. + /// Split into two substrings around the first occurrence of a separator + /// character. /// - /// If \arg Separator is in the string, then the result is a pair (LHS, RHS) + /// If \p Separator is in the string, then the result is a pair (LHS, RHS) /// such that (*this == LHS + Separator + RHS) is true and RHS is - /// maximal. If \arg Separator is not in the string, then the result is a + /// maximal. If \p Separator is not in the string, then the result is a /// pair (LHS, RHS) where (*this == LHS) and (RHS == ""). /// - /// \param Separator - The character to split on. - /// \return - The split substrings. + /// \param Separator The character to split on. + /// \returns The split substrings. std::pair split(char Separator) const { size_t Idx = find(Separator); if (Idx == npos) @@ -438,12 +436,12 @@ namespace llvm { return std::make_pair(slice(0, Idx), slice(Idx+1, npos)); } - /// split - Split into two substrings around the first occurrence of a - /// separator string. + /// Split into two substrings around the first occurrence of a separator + /// string. /// - /// If \arg Separator is in the string, then the result is a pair (LHS, RHS) + /// If \p Separator is in the string, then the result is a pair (LHS, RHS) /// such that (*this == LHS + Separator + RHS) is true and RHS is - /// maximal. If \arg Separator is not in the string, then the result is a + /// maximal. If \p Separator is not in the string, then the result is a /// pair (LHS, RHS) where (*this == LHS) and (RHS == ""). /// /// \param Separator - The string to split on. @@ -455,14 +453,13 @@ namespace llvm { return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos)); } - /// split - Split into substrings around the occurrences of a separator - /// string. + /// Split into substrings around the occurrences of a separator string. /// - /// Each substring is stored in \arg A. If \arg MaxSplit is >= 0, at most - /// \arg MaxSplit splits are done and consequently <= \arg MaxSplit + /// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most + /// \p MaxSplit splits are done and consequently <= \p MaxSplit /// elements are added to A. - /// If \arg KeepEmpty is false, empty strings are not added to \arg A. They - /// still count when considering \arg MaxSplit + /// If \p KeepEmpty is false, empty strings are not added to \p A. They + /// still count when considering \p MaxSplit /// An useful invariant is that /// Separator.join(A) == *this if MaxSplit == -1 and KeepEmpty == true /// @@ -474,12 +471,12 @@ namespace llvm { StringRef Separator, int MaxSplit = -1, bool KeepEmpty = true) const; - /// rsplit - Split into two substrings around the last occurrence of a - /// separator character. + /// Split into two substrings around the last occurrence of a separator + /// character. /// - /// If \arg Separator is in the string, then the result is a pair (LHS, RHS) + /// If \p Separator is in the string, then the result is a pair (LHS, RHS) /// such that (*this == LHS + Separator + RHS) is true and RHS is - /// minimal. If \arg Separator is not in the string, then the result is a + /// minimal. If \p Separator is not in the string, then the result is a /// pair (LHS, RHS) where (*this == LHS) and (RHS == ""). /// /// \param Separator - The character to split on. @@ -491,20 +488,20 @@ namespace llvm { return std::make_pair(slice(0, Idx), slice(Idx+1, npos)); } - /// ltrim - Return string with consecutive characters in \arg Chars starting - /// from the left removed. + /// Return string with consecutive characters in \p Chars starting from + /// the left removed. StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const { return drop_front(std::min(Length, find_first_not_of(Chars))); } - /// rtrim - Return string with consecutive characters in \arg Chars starting - /// from the right removed. + /// Return string with consecutive characters in \p Chars starting from + /// the right removed. StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const { return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1)); } - /// trim - Return string with consecutive characters in \arg Chars starting - /// from the left and right removed. + /// Return string with consecutive characters in \p Chars starting from + /// the left and right removed. StringRef trim(StringRef Chars = " \t\n\v\f\r") const { return ltrim(Chars).rtrim(Chars); } diff --git a/include/llvm/ADT/StringSet.h b/include/llvm/ADT/StringSet.h index 9c55f6b..b69a964 100644 --- a/include/llvm/ADT/StringSet.h +++ b/include/llvm/ADT/StringSet.h @@ -29,8 +29,13 @@ namespace llvm { assert(!InLang.empty()); const char *KeyStart = InLang.data(); const char *KeyEnd = KeyStart + InLang.size(); - return base::insert(llvm::StringMapEntry:: - Create(KeyStart, KeyEnd, base::getAllocator(), '+')); + llvm::StringMapEntry *Entry = llvm::StringMapEntry:: + Create(KeyStart, KeyEnd, base::getAllocator(), '+'); + if (!base::insert(Entry)) { + Entry->Destroy(base::getAllocator()); + return false; + } + return true; } }; } diff --git a/include/llvm/ADT/Trie.h b/include/llvm/ADT/Trie.h deleted file mode 100644 index 845af01..0000000 --- a/include/llvm/ADT/Trie.h +++ /dev/null @@ -1,334 +0,0 @@ -//===- llvm/ADT/Trie.h ---- Generic trie structure --------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This class defines a generic trie structure. The trie structure -// is immutable after creation, but the payload contained within it is not. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_ADT_TRIE_H -#define LLVM_ADT_TRIE_H - -#include "llvm/ADT/GraphTraits.h" -#include "llvm/Support/DOTGraphTraits.h" - -#include -#include - -namespace llvm { - -// FIXME: -// - Labels are usually small, maybe it's better to use SmallString -// - Should we use char* during construction? -// - Should we templatize Empty with traits-like interface? - -template -class Trie { - friend class GraphTraits >; - friend class DOTGraphTraits >; -public: - class Node { - friend class Trie; - - public: - typedef std::vector NodeVectorType; - typedef typename NodeVectorType::iterator iterator; - typedef typename NodeVectorType::const_iterator const_iterator; - - private: - enum QueryResult { - Same = -3, - StringIsPrefix = -2, - LabelIsPrefix = -1, - DontMatch = 0, - HaveCommonPart - }; - - struct NodeCmp { - bool operator() (Node* N1, Node* N2) { - return (N1->Label[0] < N2->Label[0]); - } - bool operator() (Node* N, char Id) { - return (N->Label[0] < Id); - } - }; - - std::string Label; - Payload Data; - NodeVectorType Children; - - // Do not implement - Node(const Node&); - Node& operator=(const Node&); - - inline void addEdge(Node* N) { - if (Children.empty()) - Children.push_back(N); - else { - iterator I = std::lower_bound(Children.begin(), Children.end(), - N, NodeCmp()); - // FIXME: no dups are allowed - Children.insert(I, N); - } - } - - inline void setEdge(Node* N) { - char Id = N->Label[0]; - iterator I = std::lower_bound(Children.begin(), Children.end(), - Id, NodeCmp()); - assert(I != Children.end() && "Node does not exists!"); - *I = N; - } - - QueryResult query(const std::string& s) const { - unsigned i, l; - unsigned l1 = s.length(); - unsigned l2 = Label.length(); - - // Find the length of common part - l = std::min(l1, l2); - i = 0; - while ((i < l) && (s[i] == Label[i])) - ++i; - - if (i == l) { // One is prefix of another, find who is who - if (l1 == l2) - return Same; - else if (i == l1) - return StringIsPrefix; - else - return LabelIsPrefix; - } else // s and Label have common (possible empty) part, return its length - return (QueryResult)i; - } - - public: - inline explicit Node(const Payload& data, const std::string& label = ""): - Label(label), Data(data) { } - - inline const Payload& data() const { return Data; } - inline void setData(const Payload& data) { Data = data; } - - inline const std::string& label() const { return Label; } - -#if 0 - inline void dump() { - llvm::cerr << "Node: " << this << "\n" - << "Label: " << Label << "\n" - << "Children:\n"; - - for (iterator I = Children.begin(), E = Children.end(); I != E; ++I) - llvm::cerr << (*I)->Label << "\n"; - } -#endif - - inline Node* getEdge(char Id) { - Node* fNode = NULL; - iterator I = std::lower_bound(Children.begin(), Children.end(), - Id, NodeCmp()); - if (I != Children.end() && (*I)->Label[0] == Id) - fNode = *I; - - return fNode; - } - - inline iterator begin() { return Children.begin(); } - inline const_iterator begin() const { return Children.begin(); } - inline iterator end () { return Children.end(); } - inline const_iterator end () const { return Children.end(); } - - inline size_t size () const { return Children.size(); } - inline bool empty() const { return Children.empty(); } - inline const Node* &front() const { return Children.front(); } - inline Node* &front() { return Children.front(); } - inline const Node* &back() const { return Children.back(); } - inline Node* &back() { return Children.back(); } - - }; - -private: - std::vector Nodes; - Payload Empty; - - inline Node* addNode(const Payload& data, const std::string label = "") { - Node* N = new Node(data, label); - Nodes.push_back(N); - return N; - } - - inline Node* splitEdge(Node* N, char Id, size_t index) { - Node* eNode = N->getEdge(Id); - assert(eNode && "Node doesn't exist"); - - const std::string &l = eNode->Label; - assert(index > 0 && index < l.length() && "Trying to split too far!"); - std::string l1 = l.substr(0, index); - std::string l2 = l.substr(index); - - Node* nNode = addNode(Empty, l1); - N->setEdge(nNode); - - eNode->Label = l2; - nNode->addEdge(eNode); - - return nNode; - } - - // Do not implement - Trie(const Trie&); - Trie& operator=(const Trie&); - -public: - inline explicit Trie(const Payload& empty):Empty(empty) { - addNode(Empty); - } - inline ~Trie() { - for (unsigned i = 0, e = Nodes.size(); i != e; ++i) - delete Nodes[i]; - } - - inline Node* getRoot() const { return Nodes[0]; } - - bool addString(const std::string& s, const Payload& data); - const Payload& lookup(const std::string& s) const; - -}; - -// Define this out-of-line to dissuade the C++ compiler from inlining it. -template -bool Trie::addString(const std::string& s, const Payload& data) { - Node* cNode = getRoot(); - Node* tNode = NULL; - std::string s1(s); - - while (tNode == NULL) { - char Id = s1[0]; - if (Node* nNode = cNode->getEdge(Id)) { - typename Node::QueryResult r = nNode->query(s1); - - switch (r) { - case Node::Same: - case Node::StringIsPrefix: - // Currently we don't allow to have two strings in the trie one - // being a prefix of another. This should be fixed. - assert(0 && "FIXME!"); - return false; - case Node::DontMatch: - llvm_unreachable("Impossible!"); - case Node::LabelIsPrefix: - s1 = s1.substr(nNode->label().length()); - cNode = nNode; - break; - default: - nNode = splitEdge(cNode, Id, r); - tNode = addNode(data, s1.substr(r)); - nNode->addEdge(tNode); - } - } else { - tNode = addNode(data, s1); - cNode->addEdge(tNode); - } - } - - return true; -} - -template -const Payload& Trie::lookup(const std::string& s) const { - Node* cNode = getRoot(); - Node* tNode = NULL; - std::string s1(s); - - while (tNode == NULL) { - char Id = s1[0]; - if (Node* nNode = cNode->getEdge(Id)) { - typename Node::QueryResult r = nNode->query(s1); - - switch (r) { - case Node::Same: - tNode = nNode; - break; - case Node::StringIsPrefix: - return Empty; - case Node::DontMatch: - llvm_unreachable("Impossible!"); - case Node::LabelIsPrefix: - s1 = s1.substr(nNode->label().length()); - cNode = nNode; - break; - default: - return Empty; - } - } else - return Empty; - } - - return tNode->data(); -} - -template -struct GraphTraits > { - typedef Trie TrieType; - typedef typename TrieType::Node NodeType; - typedef typename NodeType::iterator ChildIteratorType; - - static inline NodeType *getEntryNode(const TrieType& T) { - return T.getRoot(); - } - - static inline ChildIteratorType child_begin(NodeType *N) { - return N->begin(); - } - static inline ChildIteratorType child_end(NodeType *N) { return N->end(); } - - typedef typename std::vector::const_iterator nodes_iterator; - - static inline nodes_iterator nodes_begin(const TrieType& G) { - return G.Nodes.begin(); - } - static inline nodes_iterator nodes_end(const TrieType& G) { - return G.Nodes.end(); - } - -}; - -template -struct DOTGraphTraits > : public DefaultDOTGraphTraits { - typedef typename Trie::Node NodeType; - typedef typename GraphTraits >::ChildIteratorType EdgeIter; - - static std::string getGraphName(const Trie& T) { - return "Trie"; - } - - static std::string getNodeLabel(NodeType* Node, const Trie& T) { - if (T.getRoot() == Node) - return ""; - else - return Node->label(); - } - - static std::string getEdgeSourceLabel(NodeType* Node, EdgeIter I) { - NodeType* N = *I; - return N->label().substr(0, 1); - } - - static std::string getNodeAttributes(const NodeType* Node, - const Trie& T) { - if (Node->data() != T.Empty) - return "color=blue"; - - return ""; - } - -}; - -} // end of llvm namespace - -#endif // LLVM_ADT_TRIE_H diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h index 7f7061a..408d70c 100644 --- a/include/llvm/ADT/Triple.h +++ b/include/llvm/ADT/Triple.h @@ -65,7 +65,9 @@ public: nvptx, // NVPTX: 32-bit nvptx64, // NVPTX: 64-bit le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten) - amdil // amdil: amd IL + amdil, // amdil: amd IL + spir, // SPIR: standard portable IR for OpenCL 32-bit version + spir64 // SPIR: standard portable IR for OpenCL 64-bit version }; enum VendorType { UnknownVendor, @@ -74,7 +76,9 @@ public: PC, SCEI, BGP, - BGQ + BGQ, + Freescale, + IBM }; enum OSType { UnknownOS, @@ -99,7 +103,8 @@ public: RTEMS, NativeClient, CNK, // BG/P Compute-Node Kernel - Bitrig + Bitrig, + AIX }; enum EnvironmentType { UnknownEnvironment, @@ -109,7 +114,8 @@ public: GNUEABIHF, EABI, MachO, - ANDROIDEABI + Android, + ELF }; private: @@ -341,7 +347,7 @@ public: /// to a known type. void setEnvironment(EnvironmentType Kind); - /// setTriple - Set all components to the new triple \arg Str. + /// setTriple - Set all components to the new triple \p Str. void setTriple(const Twine &Str); /// setArchName - Set the architecture (first) component of the @@ -392,11 +398,10 @@ public: /// @name Static helpers for IDs. /// @{ - /// getArchTypeName - Get the canonical name for the \arg Kind - /// architecture. + /// getArchTypeName - Get the canonical name for the \p Kind architecture. static const char *getArchTypeName(ArchType Kind); - /// getArchTypePrefix - Get the "prefix" canonical name for the \arg Kind + /// getArchTypePrefix - Get the "prefix" canonical name for the \p Kind /// architecture. This is the prefix used by the architecture specific /// builtins, and is suitable for passing to \see /// Intrinsic::getIntrinsicForGCCBuiltin(). @@ -404,15 +409,13 @@ public: /// \return - The architecture prefix, or 0 if none is defined. static const char *getArchTypePrefix(ArchType Kind); - /// getVendorTypeName - Get the canonical name for the \arg Kind - /// vendor. + /// getVendorTypeName - Get the canonical name for the \p Kind vendor. static const char *getVendorTypeName(VendorType Kind); - /// getOSTypeName - Get the canonical name for the \arg Kind operating - /// system. + /// getOSTypeName - Get the canonical name for the \p Kind operating system. static const char *getOSTypeName(OSType Kind); - /// getEnvironmentTypeName - Get the canonical name for the \arg Kind + /// getEnvironmentTypeName - Get the canonical name for the \p Kind /// environment. static const char *getEnvironmentTypeName(EnvironmentType Kind); @@ -424,11 +427,6 @@ public: /// architecture name (e.g., "x86"). static ArchType getArchTypeForLLVMName(StringRef Str); - /// getArchTypeForDarwinArchName - Get the architecture type for a "Darwin" - /// architecture name, for example as accepted by "gcc -arch" (see also - /// arch(3)). - static ArchType getArchTypeForDarwinArchName(StringRef Str); - /// @} }; diff --git a/include/llvm/ADT/Twine.h b/include/llvm/ADT/Twine.h index 9101df8..cc290d5 100644 --- a/include/llvm/ADT/Twine.h +++ b/include/llvm/ADT/Twine.h @@ -44,7 +44,7 @@ namespace llvm { /// itself, and renders as an empty string. This can be returned from APIs to /// effectively nullify any concatenations performed on the result. /// - /// \b Implementation \n + /// \b Implementation /// /// Given the nature of a Twine, it is not possible for the Twine's /// concatenation method to construct interior nodes; the result must be @@ -67,7 +67,7 @@ namespace llvm { /// /// These invariants are check by \see isValid(). /// - /// \b Efficiency Considerations \n + /// \b Efficiency Considerations /// /// The Twine is designed to yield efficient and small code for common /// situations. For this reason, the concat() method is inlined so that @@ -303,37 +303,37 @@ namespace llvm { LHS.character = static_cast(Val); } - /// Construct a twine to print \arg Val as an unsigned decimal integer. + /// Construct a twine to print \p Val as an unsigned decimal integer. explicit Twine(unsigned Val) : LHSKind(DecUIKind), RHSKind(EmptyKind) { LHS.decUI = Val; } - /// Construct a twine to print \arg Val as a signed decimal integer. + /// Construct a twine to print \p Val as a signed decimal integer. explicit Twine(int Val) : LHSKind(DecIKind), RHSKind(EmptyKind) { LHS.decI = Val; } - /// Construct a twine to print \arg Val as an unsigned decimal integer. + /// Construct a twine to print \p Val as an unsigned decimal integer. explicit Twine(const unsigned long &Val) : LHSKind(DecULKind), RHSKind(EmptyKind) { LHS.decUL = &Val; } - /// Construct a twine to print \arg Val as a signed decimal integer. + /// Construct a twine to print \p Val as a signed decimal integer. explicit Twine(const long &Val) : LHSKind(DecLKind), RHSKind(EmptyKind) { LHS.decL = &Val; } - /// Construct a twine to print \arg Val as an unsigned decimal integer. + /// Construct a twine to print \p Val as an unsigned decimal integer. explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind), RHSKind(EmptyKind) { LHS.decULL = &Val; } - /// Construct a twine to print \arg Val as a signed decimal integer. + /// Construct a twine to print \p Val as a signed decimal integer. explicit Twine(const long long &Val) : LHSKind(DecLLKind), RHSKind(EmptyKind) { LHS.decLL = &Val; @@ -370,7 +370,7 @@ namespace llvm { /// @name Numeric Conversions /// @{ - // Construct a twine to print \arg Val as an unsigned hexadecimal integer. + // Construct a twine to print \p Val as an unsigned hexadecimal integer. static Twine utohexstr(const uint64_t &Val) { Child LHS, RHS; LHS.uHex = &Val; @@ -447,17 +447,17 @@ namespace llvm { /// The returned StringRef's size does not include the null terminator. StringRef toNullTerminatedStringRef(SmallVectorImpl &Out) const; - /// print - Write the concatenated string represented by this twine to the - /// stream \arg OS. + /// Write the concatenated string represented by this twine to the + /// stream \p OS. void print(raw_ostream &OS) const; - /// dump - Dump the concatenated string represented by this twine to stderr. + /// Dump the concatenated string represented by this twine to stderr. void dump() const; - /// print - Write the representation of this twine to the stream \arg OS. + /// Write the representation of this twine to the stream \p OS. void printRepr(raw_ostream &OS) const; - /// dumpRepr - Dump the representation of this twine to stderr. + /// Dump the representation of this twine to stderr. void dumpRepr() const; /// @} diff --git a/include/llvm/ADT/ValueMap.h b/include/llvm/ADT/ValueMap.h index f7e2551..d23fccf 100644 --- a/include/llvm/ADT/ValueMap.h +++ b/include/llvm/ADT/ValueMap.h @@ -80,8 +80,8 @@ class ValueMap { typedef typename Config::ExtraData ExtraData; MapT Map; ExtraData Data; - ValueMap(const ValueMap&); // DO NOT IMPLEMENT - ValueMap& operator=(const ValueMap&); // DO NOT IMPLEMENT + ValueMap(const ValueMap&) LLVM_DELETED_FUNCTION; + ValueMap& operator=(const ValueMap&) LLVM_DELETED_FUNCTION; public: typedef KeyT key_type; typedef ValueT mapped_type; diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h index ba9864a..7f5cd17 100644 --- a/include/llvm/ADT/ilist.h +++ b/include/llvm/ADT/ilist.h @@ -38,6 +38,7 @@ #ifndef LLVM_ADT_ILIST_H #define LLVM_ADT_ILIST_H +#include "llvm/Support/Compiler.h" #include #include #include @@ -331,8 +332,8 @@ class iplist : public Traits { // No fundamental reason why iplist can't be copyable, but the default // copy/copy-assign won't do. - iplist(const iplist &); // do not implement - void operator=(const iplist &); // do not implement + iplist(const iplist &) LLVM_DELETED_FUNCTION; + void operator=(const iplist &) LLVM_DELETED_FUNCTION; public: typedef NodeTy *pointer; diff --git a/include/llvm/AddressingMode.h b/include/llvm/AddressingMode.h new file mode 100644 index 0000000..70b3c05 --- /dev/null +++ b/include/llvm/AddressingMode.h @@ -0,0 +1,41 @@ +//===--------- llvm/AddressingMode.h - Addressing Mode -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This file contains addressing mode data structures which are shared +// between LSR and a number of places in the codegen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ADDRESSING_MODE_H +#define LLVM_ADDRESSING_MODE_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class GlobalValue; + +/// AddrMode - This represents an addressing mode of: +/// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg +/// If BaseGV is null, there is no BaseGV. +/// If BaseOffs is zero, there is no base offset. +/// If HasBaseReg is false, there is no base register. +/// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with +/// no scale. +/// +struct AddrMode { + GlobalValue *BaseGV; + int64_t BaseOffs; + bool HasBaseReg; + int64_t Scale; + AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/AliasAnalysis.h b/include/llvm/Analysis/AliasAnalysis.h index 674868a..be274af 100644 --- a/include/llvm/Analysis/AliasAnalysis.h +++ b/include/llvm/Analysis/AliasAnalysis.h @@ -45,7 +45,8 @@ namespace llvm { class LoadInst; class StoreInst; class VAArgInst; -class TargetData; +class DataLayout; +class TargetLibraryInfo; class Pass; class AnalysisUsage; class MemTransferInst; @@ -54,7 +55,8 @@ class DominatorTree; class AliasAnalysis { protected: - const TargetData *TD; + const DataLayout *TD; + const TargetLibraryInfo *TLI; private: AliasAnalysis *AA; // Previous Alias Analysis to chain to. @@ -73,7 +75,7 @@ protected: public: static char ID; // Class identification, replacement for typeinfo - AliasAnalysis() : TD(0), AA(0) {} + AliasAnalysis() : TD(0), TLI(0), AA(0) {} virtual ~AliasAnalysis(); // We want to be subclassed /// UnknownSize - This is a special value which can be used with the @@ -81,12 +83,17 @@ public: /// know the sizes of the potential memory references. static uint64_t const UnknownSize = ~UINT64_C(0); - /// getTargetData - Return a pointer to the current TargetData object, or - /// null if no TargetData object is available. + /// getDataLayout - Return a pointer to the current DataLayout object, or + /// null if no DataLayout object is available. /// - const TargetData *getTargetData() const { return TD; } + const DataLayout *getDataLayout() const { return TD; } - /// getTypeStoreSize - Return the TargetData store size for the given type, + /// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo + /// object, or null if no TargetLibraryInfo object is available. + /// + const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; } + + /// getTypeStoreSize - Return the DataLayout store size for the given type, /// if known, or a conservative value otherwise. /// uint64_t getTypeStoreSize(Type *Ty); @@ -187,6 +194,11 @@ public: return isNoAlias(Location(V1, V1Size), Location(V2, V2Size)); } + /// isNoAlias - A convenience wrapper. + bool isNoAlias(const Value *V1, const Value *V2) { + return isNoAlias(Location(V1), Location(V2)); + } + /// isMustAlias - A convenience wrapper. bool isMustAlias(const Location &LocA, const Location &LocB) { return alias(LocA, LocB) == MustAlias; diff --git a/include/llvm/Analysis/AliasSetTracker.h b/include/llvm/Analysis/AliasSetTracker.h index 95626d6..1e606c8 100644 --- a/include/llvm/Analysis/AliasSetTracker.h +++ b/include/llvm/Analysis/AliasSetTracker.h @@ -109,7 +109,6 @@ class AliasSet : public ilist_node { PointerRec *PtrList, **PtrListEnd; // Doubly linked list of nodes. AliasSet *Forward; // Forwarding pointer. - AliasSet *Next, *Prev; // Doubly linked list of AliasSets. // All instructions without a specific address in this alias set. std::vector > UnknownInsts; @@ -226,8 +225,8 @@ private: AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) { } - AliasSet(const AliasSet &AS); // do not implement - void operator=(const AliasSet &AS); // do not implement + AliasSet(const AliasSet &AS) LLVM_DELETED_FUNCTION; + void operator=(const AliasSet &AS) LLVM_DELETED_FUNCTION; PointerRec *getSomePointer() const { return PtrList; diff --git a/include/llvm/Analysis/BranchProbabilityInfo.h b/include/llvm/Analysis/BranchProbabilityInfo.h index 006daa0..c0567da 100644 --- a/include/llvm/Analysis/BranchProbabilityInfo.h +++ b/include/llvm/Analysis/BranchProbabilityInfo.h @@ -28,11 +28,14 @@ class raw_ostream; /// /// This is a function analysis pass which provides information on the relative /// probabilities of each "edge" in the function's CFG where such an edge is -/// defined by a pair of basic blocks. The probability for a given block and -/// a successor block are always relative to the probabilities of the other -/// successor blocks. Another way of looking at it is that the probabilities -/// for a given block B and each of its successors should sum to exactly -/// one (100%). +/// defined by a pair (PredBlock and an index in the successors). The +/// probability of an edge from one block is always relative to the +/// probabilities of other edges from the block. The probabilites of all edges +/// from a block sum to exactly one (100%). +/// We use a pair (PredBlock and an index in the successors) to uniquely +/// identify an edge, since we can have multiple edges from Src to Dst. +/// As an example, we can have a switch which jumps to Dst with value 0 and +/// value 10. class BranchProbabilityInfo : public FunctionPass { public: static char ID; @@ -52,6 +55,12 @@ public: /// leaving the 'Src' block. The returned probability is never zero, and can /// only be one if the source block has only one successor. BranchProbability getEdgeProbability(const BasicBlock *Src, + unsigned IndexInSuccessors) const; + + /// \brief Get the probability of going from Src to Dst. + /// + /// It returns the sum of all probabilities for edges from Src to Dst. + BranchProbability getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const; /// \brief Test if an edge is hot relative to other out-edges of the Src. @@ -74,25 +83,34 @@ public: raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src, const BasicBlock *Dst) const; - /// \brief Get the raw edge weight calculated for the block pair. + /// \brief Get the raw edge weight calculated for the edge. /// /// This returns the raw edge weight. It is guaranteed to fall between 1 and /// UINT32_MAX. Note that the raw edge weight is not meaningful in isolation. /// This interface should be very carefully, and primarily by routines that /// are updating the analysis by later calling setEdgeWeight. + uint32_t getEdgeWeight(const BasicBlock *Src, + unsigned IndexInSuccessors) const; + + /// \brief Get the raw edge weight calculated for the block pair. + /// + /// This returns the sum of all raw edge weights from Src to Dst. + /// It is guaranteed to fall between 1 and UINT32_MAX. uint32_t getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const; - /// \brief Set the raw edge weight for the block pair. + /// \brief Set the raw edge weight for a given edge. /// - /// This allows a pass to explicitly set the edge weight for a block. It can + /// This allows a pass to explicitly set the edge weight for an edge. It can /// be used when updating the CFG to update and preserve the branch /// probability information. Read the implementation of how these edge /// weights are calculated carefully before using! - void setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, + void setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors, uint32_t Weight); private: - typedef std::pair Edge; + // Since we allow duplicate edges from one basic block to another, we use + // a pair (PredBlock and an index in the successors) to specify an edge. + typedef std::pair Edge; // Default weight value. Used when we don't have information about the edge. // TODO: DEFAULT_WEIGHT makes sense during static predication, when none of diff --git a/include/llvm/Analysis/CallGraph.h b/include/llvm/Analysis/CallGraph.h index fb77da7..6a9ed31 100644 --- a/include/llvm/Analysis/CallGraph.h +++ b/include/llvm/Analysis/CallGraph.h @@ -185,9 +185,9 @@ private: /// in the CalledFunctions array of this or other CallGraphNodes. unsigned NumReferences; - CallGraphNode(const CallGraphNode &); // DO NOT IMPLEMENT - void operator=(const CallGraphNode &); // DO NOT IMPLEMENT - + CallGraphNode(const CallGraphNode &) LLVM_DELETED_FUNCTION; + void operator=(const CallGraphNode &) LLVM_DELETED_FUNCTION; + void DropRef() { --NumReferences; } void AddRef() { ++NumReferences; } public: diff --git a/include/llvm/Analysis/CaptureTracking.h b/include/llvm/Analysis/CaptureTracking.h index 9b5e842..2889269 100644 --- a/include/llvm/Analysis/CaptureTracking.h +++ b/include/llvm/Analysis/CaptureTracking.h @@ -46,7 +46,7 @@ namespace llvm { /// capture) return false. To search it, return true. /// /// U->getUser() is always an Instruction. - virtual bool shouldExplore(Use *U) = 0; + virtual bool shouldExplore(Use *U); /// captured - Information about the pointer was captured by the user of /// use U. Return true to stop the traversal or false to continue looking diff --git a/include/llvm/Analysis/CodeMetrics.h b/include/llvm/Analysis/CodeMetrics.h index 03c807c..4398faa 100644 --- a/include/llvm/Analysis/CodeMetrics.h +++ b/include/llvm/Analysis/CodeMetrics.h @@ -22,11 +22,11 @@ namespace llvm { class BasicBlock; class Function; class Instruction; - class TargetData; + class DataLayout; class Value; /// \brief Check whether an instruction is likely to be "free" when lowered. - bool isInstructionFree(const Instruction *I, const TargetData *TD = 0); + bool isInstructionFree(const Instruction *I, const DataLayout *TD = 0); /// \brief Check whether a call will lower to something small. /// @@ -85,10 +85,10 @@ namespace llvm { NumRets(0) {} /// \brief Add information about a block to the current state. - void analyzeBasicBlock(const BasicBlock *BB, const TargetData *TD = 0); + void analyzeBasicBlock(const BasicBlock *BB, const DataLayout *TD = 0); /// \brief Add information about a function to the current state. - void analyzeFunction(Function *F, const TargetData *TD = 0); + void analyzeFunction(Function *F, const DataLayout *TD = 0); }; } diff --git a/include/llvm/Analysis/ConstantFolding.h b/include/llvm/Analysis/ConstantFolding.h index 2fdef5f..12e623e 100644 --- a/include/llvm/Analysis/ConstantFolding.h +++ b/include/llvm/Analysis/ConstantFolding.h @@ -12,7 +12,7 @@ // // Also, to supplement the basic VMCore ConstantExpr simplifications, // this file declares some additional folding routines that can make use of -// TargetData information. These functions cannot go in VMCore due to library +// DataLayout information. These functions cannot go in VMCore due to library // dependency issues. // //===----------------------------------------------------------------------===// @@ -24,7 +24,7 @@ namespace llvm { class Constant; class ConstantExpr; class Instruction; - class TargetData; + class DataLayout; class TargetLibraryInfo; class Function; class Type; @@ -36,14 +36,14 @@ namespace llvm { /// Note that this fails if not all of the operands are constant. Otherwise, /// this function can only fail when attempting to fold instructions like loads /// and stores, which have no constant expression form. -Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0, +Constant *ConstantFoldInstruction(Instruction *I, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0); /// ConstantFoldConstantExpression - Attempt to fold the constant expression -/// using the specified TargetData. If successful, the constant result is +/// using the specified DataLayout. If successful, the constant result is /// result is returned, if not, null is returned. Constant *ConstantFoldConstantExpression(const ConstantExpr *CE, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0); /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the @@ -54,7 +54,7 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE, /// Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, ArrayRef Ops, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0); /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare @@ -63,7 +63,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, /// Constant *ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0); /// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue @@ -75,7 +75,7 @@ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would /// produce if it is constant and determinable. If this is not determinable, /// return null. -Constant *ConstantFoldLoadFromConstPtr(Constant *C, const TargetData *TD = 0); +Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD = 0); /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a /// getelementptr constantexpr, return the constant value being addressed by the diff --git a/include/llvm/Analysis/DependenceAnalysis.h b/include/llvm/Analysis/DependenceAnalysis.h new file mode 100644 index 0000000..b4327ee --- /dev/null +++ b/include/llvm/Analysis/DependenceAnalysis.h @@ -0,0 +1,885 @@ +//===-- llvm/Analysis/DependenceAnalysis.h -------------------- -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// DependenceAnalysis is an LLVM pass that analyses dependences between memory +// accesses. Currently, it is an implementation of the approach described in +// +// Practical Dependence Testing +// Goff, Kennedy, Tseng +// PLDI 1991 +// +// There's a single entry point that analyzes the dependence between a pair +// of memory references in a function, returning either NULL, for no dependence, +// or a more-or-less detailed description of the dependence between them. +// +// Please note that this is work in progress and the interface is subject to +// change. +// +// Plausible changes: +// Return a set of more precise dependences instead of just one dependence +// summarizing all. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_DEPENDENCEANALYSIS_H +#define LLVM_ANALYSIS_DEPENDENCEANALYSIS_H + +#include "llvm/Instructions.h" +#include "llvm/Pass.h" +#include "llvm/ADT/SmallBitVector.h" + +namespace llvm { + class AliasAnalysis; + class Loop; + class LoopInfo; + class ScalarEvolution; + class SCEV; + class SCEVConstant; + class raw_ostream; + + /// Dependence - This class represents a dependence between two memory + /// memory references in a function. It contains minimal information and + /// is used in the very common situation where the compiler is unable to + /// determine anything beyond the existence of a dependence; that is, it + /// represents a confused dependence (see also FullDependence). In most + /// cases (for output, flow, and anti dependences), the dependence implies + /// an ordering, where the source must precede the destination; in contrast, + /// input dependences are unordered. + class Dependence { + public: + Dependence(const Instruction *Source, + const Instruction *Destination) : + Src(Source), Dst(Destination) {} + virtual ~Dependence() {} + + /// Dependence::DVEntry - Each level in the distance/direction vector + /// has a direction (or perhaps a union of several directions), and + /// perhaps a distance. + struct DVEntry { + enum { NONE = 0, + LT = 1, + EQ = 2, + LE = 3, + GT = 4, + NE = 5, + GE = 6, + ALL = 7 }; + unsigned char Direction : 3; // Init to ALL, then refine. + bool Scalar : 1; // Init to true. + bool PeelFirst : 1; // Peeling the first iteration will break dependence. + bool PeelLast : 1; // Peeling the last iteration will break the dependence. + bool Splitable : 1; // Splitting the loop will break dependence. + const SCEV *Distance; // NULL implies no distance available. + DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false), + PeelLast(false), Splitable(false), Distance(NULL) { } + }; + + /// getSrc - Returns the source instruction for this dependence. + /// + const Instruction *getSrc() const { return Src; } + + /// getDst - Returns the destination instruction for this dependence. + /// + const Instruction *getDst() const { return Dst; } + + /// isInput - Returns true if this is an input dependence. + /// + bool isInput() const; + + /// isOutput - Returns true if this is an output dependence. + /// + bool isOutput() const; + + /// isFlow - Returns true if this is a flow (aka true) dependence. + /// + bool isFlow() const; + + /// isAnti - Returns true if this is an anti dependence. + /// + bool isAnti() const; + + /// isOrdered - Returns true if dependence is Output, Flow, or Anti + /// + bool isOrdered() const { return isOutput() || isFlow() || isAnti(); } + + /// isUnordered - Returns true if dependence is Input + /// + bool isUnordered() const { return isInput(); } + + /// isLoopIndependent - Returns true if this is a loop-independent + /// dependence. + virtual bool isLoopIndependent() const { return true; } + + /// isConfused - Returns true if this dependence is confused + /// (the compiler understands nothing and makes worst-case + /// assumptions). + virtual bool isConfused() const { return true; } + + /// isConsistent - Returns true if this dependence is consistent + /// (occurs every time the source and destination are executed). + virtual bool isConsistent() const { return false; } + + /// getLevels - Returns the number of common loops surrounding the + /// source and destination of the dependence. + virtual unsigned getLevels() const { return 0; } + + /// getDirection - Returns the direction associated with a particular + /// level. + virtual unsigned getDirection(unsigned Level) const { return DVEntry::ALL; } + + /// getDistance - Returns the distance (or NULL) associated with a + /// particular level. + virtual const SCEV *getDistance(unsigned Level) const { return NULL; } + + /// isPeelFirst - Returns true if peeling the first iteration from + /// this loop will break this dependence. + virtual bool isPeelFirst(unsigned Level) const { return false; } + + /// isPeelLast - Returns true if peeling the last iteration from + /// this loop will break this dependence. + virtual bool isPeelLast(unsigned Level) const { return false; } + + /// isSplitable - Returns true if splitting this loop will break + /// the dependence. + virtual bool isSplitable(unsigned Level) const { return false; } + + /// isScalar - Returns true if a particular level is scalar; that is, + /// if no subscript in the source or destination mention the induction + /// variable associated with the loop at this level. + virtual bool isScalar(unsigned Level) const; + + /// dump - For debugging purposes, dumps a dependence to OS. + /// + void dump(raw_ostream &OS) const; + private: + const Instruction *Src, *Dst; + friend class DependenceAnalysis; + }; + + + /// FullDependence - This class represents a dependence between two memory + /// references in a function. It contains detailed information about the + /// dependence (direction vectors, etc) and is used when the compiler is + /// able to accurately analyze the interaction of the references; that is, + /// it is not a confused dependence (see Dependence). In most cases + /// (for output, flow, and anti dependences), the dependence implies an + /// ordering, where the source must precede the destination; in contrast, + /// input dependences are unordered. + class FullDependence : public Dependence { + public: + FullDependence(const Instruction *Src, + const Instruction *Dst, + bool LoopIndependent, + unsigned Levels); + ~FullDependence() { + delete DV; + } + + /// isLoopIndependent - Returns true if this is a loop-independent + /// dependence. + bool isLoopIndependent() const { return LoopIndependent; } + + /// isConfused - Returns true if this dependence is confused + /// (the compiler understands nothing and makes worst-case + /// assumptions). + bool isConfused() const { return false; } + + /// isConsistent - Returns true if this dependence is consistent + /// (occurs every time the source and destination are executed). + bool isConsistent() const { return Consistent; } + + /// getLevels - Returns the number of common loops surrounding the + /// source and destination of the dependence. + unsigned getLevels() const { return Levels; } + + /// getDirection - Returns the direction associated with a particular + /// level. + unsigned getDirection(unsigned Level) const; + + /// getDistance - Returns the distance (or NULL) associated with a + /// particular level. + const SCEV *getDistance(unsigned Level) const; + + /// isPeelFirst - Returns true if peeling the first iteration from + /// this loop will break this dependence. + bool isPeelFirst(unsigned Level) const; + + /// isPeelLast - Returns true if peeling the last iteration from + /// this loop will break this dependence. + bool isPeelLast(unsigned Level) const; + + /// isSplitable - Returns true if splitting the loop will break + /// the dependence. + bool isSplitable(unsigned Level) const; + + /// isScalar - Returns true if a particular level is scalar; that is, + /// if no subscript in the source or destination mention the induction + /// variable associated with the loop at this level. + bool isScalar(unsigned Level) const; + private: + unsigned short Levels; + bool LoopIndependent; + bool Consistent; // Init to true, then refine. + DVEntry *DV; + friend class DependenceAnalysis; + }; + + + /// DependenceAnalysis - This class is the main dependence-analysis driver. + /// + class DependenceAnalysis : public FunctionPass { + void operator=(const DependenceAnalysis &); // do not implement + DependenceAnalysis(const DependenceAnalysis &); // do not implement + public: + /// depends - Tests for a dependence between the Src and Dst instructions. + /// Returns NULL if no dependence; otherwise, returns a Dependence (or a + /// FullDependence) with as much information as can be gleaned. + /// The flag PossiblyLoopIndependent should be set by the caller + /// if it appears that control flow can reach from Src to Dst + /// without traversing a loop back edge. + Dependence *depends(const Instruction *Src, + const Instruction *Dst, + bool PossiblyLoopIndependent); + + /// getSplitIteration - Give a dependence that's splitable at some + /// particular level, return the iteration that should be used to split + /// the loop. + /// + /// Generally, the dependence analyzer will be used to build + /// a dependence graph for a function (basically a map from instructions + /// to dependences). Looking for cycles in the graph shows us loops + /// that cannot be trivially vectorized/parallelized. + /// + /// We can try to improve the situation by examining all the dependences + /// that make up the cycle, looking for ones we can break. + /// Sometimes, peeling the first or last iteration of a loop will break + /// dependences, and there are flags for those possibilities. + /// Sometimes, splitting a loop at some other iteration will do the trick, + /// and we've got a flag for that case. Rather than waste the space to + /// record the exact iteration (since we rarely know), we provide + /// a method that calculates the iteration. It's a drag that it must work + /// from scratch, but wonderful in that it's possible. + /// + /// Here's an example: + /// + /// for (i = 0; i < 10; i++) + /// A[i] = ... + /// ... = A[11 - i] + /// + /// There's a loop-carried flow dependence from the store to the load, + /// found by the weak-crossing SIV test. The dependence will have a flag, + /// indicating that the dependence can be broken by splitting the loop. + /// Calling getSplitIteration will return 5. + /// Splitting the loop breaks the dependence, like so: + /// + /// for (i = 0; i <= 5; i++) + /// A[i] = ... + /// ... = A[11 - i] + /// for (i = 6; i < 10; i++) + /// A[i] = ... + /// ... = A[11 - i] + /// + /// breaks the dependence and allows us to vectorize/parallelize + /// both loops. + const SCEV *getSplitIteration(const Dependence *Dep, unsigned Level); + + private: + AliasAnalysis *AA; + ScalarEvolution *SE; + LoopInfo *LI; + Function *F; + + /// Subscript - This private struct represents a pair of subscripts from + /// a pair of potentially multi-dimensional array references. We use a + /// vector of them to guide subscript partitioning. + struct Subscript { + const SCEV *Src; + const SCEV *Dst; + enum ClassificationKind { ZIV, SIV, RDIV, MIV, NonLinear } Classification; + SmallBitVector Loops; + SmallBitVector GroupLoops; + SmallBitVector Group; + }; + + struct CoefficientInfo { + const SCEV *Coeff; + const SCEV *PosPart; + const SCEV *NegPart; + const SCEV *Iterations; + }; + + struct BoundInfo { + const SCEV *Iterations; + const SCEV *Upper[8]; + const SCEV *Lower[8]; + unsigned char Direction; + unsigned char DirSet; + }; + + /// Constraint - This private class represents a constraint, as defined + /// in the paper + /// + /// Practical Dependence Testing + /// Goff, Kennedy, Tseng + /// PLDI 1991 + /// + /// There are 5 kinds of constraint, in a hierarchy. + /// 1) Any - indicates no constraint, any dependence is possible. + /// 2) Line - A line ax + by = c, where a, b, and c are parameters, + /// representing the dependence equation. + /// 3) Distance - The value d of the dependence distance; + /// 4) Point - A point representing the dependence from + /// iteration x to iteration y. + /// 5) Empty - No dependence is possible. + class Constraint { + private: + enum ConstraintKind { Empty, Point, Distance, Line, Any } Kind; + ScalarEvolution *SE; + const SCEV *A; + const SCEV *B; + const SCEV *C; + const Loop *AssociatedLoop; + public: + /// isEmpty - Return true if the constraint is of kind Empty. + bool isEmpty() const { return Kind == Empty; } + + /// isPoint - Return true if the constraint is of kind Point. + bool isPoint() const { return Kind == Point; } + + /// isDistance - Return true if the constraint is of kind Distance. + bool isDistance() const { return Kind == Distance; } + + /// isLine - Return true if the constraint is of kind Line. + /// Since Distance's can also be represented as Lines, we also return + /// true if the constraint is of kind Distance. + bool isLine() const { return Kind == Line || Kind == Distance; } + + /// isAny - Return true if the constraint is of kind Any; + bool isAny() const { return Kind == Any; } + + /// getX - If constraint is a point , returns X. + /// Otherwise assert. + const SCEV *getX() const; + + /// getY - If constraint is a point , returns Y. + /// Otherwise assert. + const SCEV *getY() const; + + /// getA - If constraint is a line AX + BY = C, returns A. + /// Otherwise assert. + const SCEV *getA() const; + + /// getB - If constraint is a line AX + BY = C, returns B. + /// Otherwise assert. + const SCEV *getB() const; + + /// getC - If constraint is a line AX + BY = C, returns C. + /// Otherwise assert. + const SCEV *getC() const; + + /// getD - If constraint is a distance, returns D. + /// Otherwise assert. + const SCEV *getD() const; + + /// getAssociatedLoop - Returns the loop associated with this constraint. + const Loop *getAssociatedLoop() const; + + /// setPoint - Change a constraint to Point. + void setPoint(const SCEV *X, const SCEV *Y, const Loop *CurrentLoop); + + /// setLine - Change a constraint to Line. + void setLine(const SCEV *A, const SCEV *B, + const SCEV *C, const Loop *CurrentLoop); + + /// setDistance - Change a constraint to Distance. + void setDistance(const SCEV *D, const Loop *CurrentLoop); + + /// setEmpty - Change a constraint to Empty. + void setEmpty(); + + /// setAny - Change a constraint to Any. + void setAny(ScalarEvolution *SE); + + /// dump - For debugging purposes. Dumps the constraint + /// out to OS. + void dump(raw_ostream &OS) const; + }; + + + /// establishNestingLevels - Examines the loop nesting of the Src and Dst + /// instructions and establishes their shared loops. Sets the variables + /// CommonLevels, SrcLevels, and MaxLevels. + /// The source and destination instructions needn't be contained in the same + /// loop. The routine establishNestingLevels finds the level of most deeply + /// nested loop that contains them both, CommonLevels. An instruction that's + /// not contained in a loop is at level = 0. MaxLevels is equal to the level + /// of the source plus the level of the destination, minus CommonLevels. + /// This lets us allocate vectors MaxLevels in length, with room for every + /// distinct loop referenced in both the source and destination subscripts. + /// The variable SrcLevels is the nesting depth of the source instruction. + /// It's used to help calculate distinct loops referenced by the destination. + /// Here's the map from loops to levels: + /// 0 - unused + /// 1 - outermost common loop + /// ... - other common loops + /// CommonLevels - innermost common loop + /// ... - loops containing Src but not Dst + /// SrcLevels - innermost loop containing Src but not Dst + /// ... - loops containing Dst but not Src + /// MaxLevels - innermost loop containing Dst but not Src + /// Consider the follow code fragment: + /// for (a = ...) { + /// for (b = ...) { + /// for (c = ...) { + /// for (d = ...) { + /// A[] = ...; + /// } + /// } + /// for (e = ...) { + /// for (f = ...) { + /// for (g = ...) { + /// ... = A[]; + /// } + /// } + /// } + /// } + /// } + /// If we're looking at the possibility of a dependence between the store + /// to A (the Src) and the load from A (the Dst), we'll note that they + /// have 2 loops in common, so CommonLevels will equal 2 and the direction + /// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7. + /// A map from loop names to level indices would look like + /// a - 1 + /// b - 2 = CommonLevels + /// c - 3 + /// d - 4 = SrcLevels + /// e - 5 + /// f - 6 + /// g - 7 = MaxLevels + void establishNestingLevels(const Instruction *Src, + const Instruction *Dst); + + unsigned CommonLevels, SrcLevels, MaxLevels; + + /// mapSrcLoop - Given one of the loops containing the source, return + /// its level index in our numbering scheme. + unsigned mapSrcLoop(const Loop *SrcLoop) const; + + /// mapDstLoop - Given one of the loops containing the destination, + /// return its level index in our numbering scheme. + unsigned mapDstLoop(const Loop *DstLoop) const; + + /// isLoopInvariant - Returns true if Expression is loop invariant + /// in LoopNest. + bool isLoopInvariant(const SCEV *Expression, const Loop *LoopNest) const; + + /// removeMatchingExtensions - Examines a subscript pair. + /// If the source and destination are identically sign (or zero) + /// extended, it strips off the extension in an effort to + /// simplify the actual analysis. + void removeMatchingExtensions(Subscript *Pair); + + /// collectCommonLoops - Finds the set of loops from the LoopNest that + /// have a level <= CommonLevels and are referred to by the SCEV Expression. + void collectCommonLoops(const SCEV *Expression, + const Loop *LoopNest, + SmallBitVector &Loops) const; + + /// checkSrcSubscript - Examines the SCEV Src, returning true iff it's + /// linear. Collect the set of loops mentioned by Src. + bool checkSrcSubscript(const SCEV *Src, + const Loop *LoopNest, + SmallBitVector &Loops); + + /// checkDstSubscript - Examines the SCEV Dst, returning true iff it's + /// linear. Collect the set of loops mentioned by Dst. + bool checkDstSubscript(const SCEV *Dst, + const Loop *LoopNest, + SmallBitVector &Loops); + + /// isKnownPredicate - Compare X and Y using the predicate Pred. + /// Basically a wrapper for SCEV::isKnownPredicate, + /// but tries harder, especially in the presence of sign and zero + /// extensions and symbolics. + bool isKnownPredicate(ICmpInst::Predicate Pred, + const SCEV *X, + const SCEV *Y) const; + + /// collectUpperBound - All subscripts are the same type (on my machine, + /// an i64). The loop bound may be a smaller type. collectUpperBound + /// find the bound, if available, and zero extends it to the Type T. + /// (I zero extend since the bound should always be >= 0.) + /// If no upper bound is available, return NULL. + const SCEV *collectUpperBound(const Loop *l, Type *T) const; + + /// collectConstantUpperBound - Calls collectUpperBound(), then + /// attempts to cast it to SCEVConstant. If the cast fails, + /// returns NULL. + const SCEVConstant *collectConstantUpperBound(const Loop *l, Type *T) const; + + /// classifyPair - Examines the subscript pair (the Src and Dst SCEVs) + /// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear. + /// Collects the associated loops in a set. + Subscript::ClassificationKind classifyPair(const SCEV *Src, + const Loop *SrcLoopNest, + const SCEV *Dst, + const Loop *DstLoopNest, + SmallBitVector &Loops); + + /// testZIV - Tests the ZIV subscript pair (Src and Dst) for dependence. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// If the dependence isn't proven to exist, + /// marks the Result as inconsistent. + bool testZIV(const SCEV *Src, + const SCEV *Dst, + FullDependence &Result) const; + + /// testSIV - Tests the SIV subscript pair (Src and Dst) for dependence. + /// Things of the form [c1 + a1*i] and [c2 + a2*j], where + /// i and j are induction variables, c1 and c2 are loop invariant, + /// and a1 and a2 are constant. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Sets appropriate direction vector entry and, when possible, + /// the distance vector entry. + /// If the dependence isn't proven to exist, + /// marks the Result as inconsistent. + bool testSIV(const SCEV *Src, + const SCEV *Dst, + unsigned &Level, + FullDependence &Result, + Constraint &NewConstraint, + const SCEV *&SplitIter) const; + + /// testRDIV - Tests the RDIV subscript pair (Src and Dst) for dependence. + /// Things of the form [c1 + a1*i] and [c2 + a2*j] + /// where i and j are induction variables, c1 and c2 are loop invariant, + /// and a1 and a2 are constant. + /// With minor algebra, this test can also be used for things like + /// [c1 + a1*i + a2*j][c2]. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Marks the Result as inconsistent. + bool testRDIV(const SCEV *Src, + const SCEV *Dst, + FullDependence &Result) const; + + /// testMIV - Tests the MIV subscript pair (Src and Dst) for dependence. + /// Returns true if dependence disproved. + /// Can sometimes refine direction vectors. + bool testMIV(const SCEV *Src, + const SCEV *Dst, + const SmallBitVector &Loops, + FullDependence &Result) const; + + /// strongSIVtest - Tests the strong SIV subscript pair (Src and Dst) + /// for dependence. + /// Things of the form [c1 + a*i] and [c2 + a*i], + /// where i is an induction variable, c1 and c2 are loop invariant, + /// and a is a constant + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Sets appropriate direction and distance. + bool strongSIVtest(const SCEV *Coeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurrentLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const; + + /// weakCrossingSIVtest - Tests the weak-crossing SIV subscript pair + /// (Src and Dst) for dependence. + /// Things of the form [c1 + a*i] and [c2 - a*i], + /// where i is an induction variable, c1 and c2 are loop invariant, + /// and a is a constant. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Sets appropriate direction entry. + /// Set consistent to false. + /// Marks the dependence as splitable. + bool weakCrossingSIVtest(const SCEV *SrcCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurrentLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint, + const SCEV *&SplitIter) const; + + /// ExactSIVtest - Tests the SIV subscript pair + /// (Src and Dst) for dependence. + /// Things of the form [c1 + a1*i] and [c2 + a2*i], + /// where i is an induction variable, c1 and c2 are loop invariant, + /// and a1 and a2 are constant. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Sets appropriate direction entry. + /// Set consistent to false. + bool exactSIVtest(const SCEV *SrcCoeff, + const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurrentLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const; + + /// weakZeroSrcSIVtest - Tests the weak-zero SIV subscript pair + /// (Src and Dst) for dependence. + /// Things of the form [c1] and [c2 + a*i], + /// where i is an induction variable, c1 and c2 are loop invariant, + /// and a is a constant. See also weakZeroDstSIVtest. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Sets appropriate direction entry. + /// Set consistent to false. + /// If loop peeling will break the dependence, mark appropriately. + bool weakZeroSrcSIVtest(const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurrentLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const; + + /// weakZeroDstSIVtest - Tests the weak-zero SIV subscript pair + /// (Src and Dst) for dependence. + /// Things of the form [c1 + a*i] and [c2], + /// where i is an induction variable, c1 and c2 are loop invariant, + /// and a is a constant. See also weakZeroSrcSIVtest. + /// Returns true if any possible dependence is disproved. + /// If there might be a dependence, returns false. + /// Sets appropriate direction entry. + /// Set consistent to false. + /// If loop peeling will break the dependence, mark appropriately. + bool weakZeroDstSIVtest(const SCEV *SrcCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurrentLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const; + + /// exactRDIVtest - Tests the RDIV subscript pair for dependence. + /// Things of the form [c1 + a*i] and [c2 + b*j], + /// where i and j are induction variable, c1 and c2 are loop invariant, + /// and a and b are constants. + /// Returns true if any possible dependence is disproved. + /// Marks the result as inconsistent. + /// Works in some cases that symbolicRDIVtest doesn't, + /// and vice versa. + bool exactRDIVtest(const SCEV *SrcCoeff, + const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *SrcLoop, + const Loop *DstLoop, + FullDependence &Result) const; + + /// symbolicRDIVtest - Tests the RDIV subscript pair for dependence. + /// Things of the form [c1 + a*i] and [c2 + b*j], + /// where i and j are induction variable, c1 and c2 are loop invariant, + /// and a and b are constants. + /// Returns true if any possible dependence is disproved. + /// Marks the result as inconsistent. + /// Works in some cases that exactRDIVtest doesn't, + /// and vice versa. Can also be used as a backup for + /// ordinary SIV tests. + bool symbolicRDIVtest(const SCEV *SrcCoeff, + const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *SrcLoop, + const Loop *DstLoop) const; + + /// gcdMIVtest - Tests an MIV subscript pair for dependence. + /// Returns true if any possible dependence is disproved. + /// Marks the result as inconsistent. + /// Can sometimes disprove the equal direction for 1 or more loops. + // Can handle some symbolics that even the SIV tests don't get, + /// so we use it as a backup for everything. + bool gcdMIVtest(const SCEV *Src, + const SCEV *Dst, + FullDependence &Result) const; + + /// banerjeeMIVtest - Tests an MIV subscript pair for dependence. + /// Returns true if any possible dependence is disproved. + /// Marks the result as inconsistent. + /// Computes directions. + bool banerjeeMIVtest(const SCEV *Src, + const SCEV *Dst, + const SmallBitVector &Loops, + FullDependence &Result) const; + + /// collectCoefficientInfo - Walks through the subscript, + /// collecting each coefficient, the associated loop bounds, + /// and recording its positive and negative parts for later use. + CoefficientInfo *collectCoeffInfo(const SCEV *Subscript, + bool SrcFlag, + const SCEV *&Constant) const; + + /// getPositivePart - X^+ = max(X, 0). + /// + const SCEV *getPositivePart(const SCEV *X) const; + + /// getNegativePart - X^- = min(X, 0). + /// + const SCEV *getNegativePart(const SCEV *X) const; + + /// getLowerBound - Looks through all the bounds info and + /// computes the lower bound given the current direction settings + /// at each level. + const SCEV *getLowerBound(BoundInfo *Bound) const; + + /// getUpperBound - Looks through all the bounds info and + /// computes the upper bound given the current direction settings + /// at each level. + const SCEV *getUpperBound(BoundInfo *Bound) const; + + /// exploreDirections - Hierarchically expands the direction vector + /// search space, combining the directions of discovered dependences + /// in the DirSet field of Bound. Returns the number of distinct + /// dependences discovered. If the dependence is disproved, + /// it will return 0. + unsigned exploreDirections(unsigned Level, + CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + const SmallBitVector &Loops, + unsigned &DepthExpanded, + const SCEV *Delta) const; + + /// testBounds - Returns true iff the current bounds are plausible. + /// + bool testBounds(unsigned char DirKind, + unsigned Level, + BoundInfo *Bound, + const SCEV *Delta) const; + + /// findBoundsALL - Computes the upper and lower bounds for level K + /// using the * direction. Records them in Bound. + void findBoundsALL(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const; + + /// findBoundsLT - Computes the upper and lower bounds for level K + /// using the < direction. Records them in Bound. + void findBoundsLT(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const; + + /// findBoundsGT - Computes the upper and lower bounds for level K + /// using the > direction. Records them in Bound. + void findBoundsGT(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const; + + /// findBoundsEQ - Computes the upper and lower bounds for level K + /// using the = direction. Records them in Bound. + void findBoundsEQ(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const; + + /// intersectConstraints - Updates X with the intersection + /// of the Constraints X and Y. Returns true if X has changed. + bool intersectConstraints(Constraint *X, + const Constraint *Y); + + /// propagate - Review the constraints, looking for opportunities + /// to simplify a subscript pair (Src and Dst). + /// Return true if some simplification occurs. + /// If the simplification isn't exact (that is, if it is conservative + /// in terms of dependence), set consistent to false. + bool propagate(const SCEV *&Src, + const SCEV *&Dst, + SmallBitVector &Loops, + SmallVector &Constraints, + bool &Consistent); + + /// propagateDistance - Attempt to propagate a distance + /// constraint into a subscript pair (Src and Dst). + /// Return true if some simplification occurs. + /// If the simplification isn't exact (that is, if it is conservative + /// in terms of dependence), set consistent to false. + bool propagateDistance(const SCEV *&Src, + const SCEV *&Dst, + Constraint &CurConstraint, + bool &Consistent); + + /// propagatePoint - Attempt to propagate a point + /// constraint into a subscript pair (Src and Dst). + /// Return true if some simplification occurs. + bool propagatePoint(const SCEV *&Src, + const SCEV *&Dst, + Constraint &CurConstraint); + + /// propagateLine - Attempt to propagate a line + /// constraint into a subscript pair (Src and Dst). + /// Return true if some simplification occurs. + /// If the simplification isn't exact (that is, if it is conservative + /// in terms of dependence), set consistent to false. + bool propagateLine(const SCEV *&Src, + const SCEV *&Dst, + Constraint &CurConstraint, + bool &Consistent); + + /// findCoefficient - Given a linear SCEV, + /// return the coefficient corresponding to specified loop. + /// If there isn't one, return the SCEV constant 0. + /// For example, given a*i + b*j + c*k, returning the coefficient + /// corresponding to the j loop would yield b. + const SCEV *findCoefficient(const SCEV *Expr, + const Loop *TargetLoop) const; + + /// zeroCoefficient - Given a linear SCEV, + /// return the SCEV given by zeroing out the coefficient + /// corresponding to the specified loop. + /// For example, given a*i + b*j + c*k, zeroing the coefficient + /// corresponding to the j loop would yield a*i + c*k. + const SCEV *zeroCoefficient(const SCEV *Expr, + const Loop *TargetLoop) const; + + /// addToCoefficient - Given a linear SCEV Expr, + /// return the SCEV given by adding some Value to the + /// coefficient corresponding to the specified TargetLoop. + /// For example, given a*i + b*j + c*k, adding 1 to the coefficient + /// corresponding to the j loop would yield a*i + (b+1)*j + c*k. + const SCEV *addToCoefficient(const SCEV *Expr, + const Loop *TargetLoop, + const SCEV *Value) const; + + /// updateDirection - Update direction vector entry + /// based on the current constraint. + void updateDirection(Dependence::DVEntry &Level, + const Constraint &CurConstraint) const; + public: + static char ID; // Class identification, replacement for typeinfo + DependenceAnalysis() : FunctionPass(ID) { + initializeDependenceAnalysisPass(*PassRegistry::getPassRegistry()); + } + + bool runOnFunction(Function &F); + void releaseMemory(); + void getAnalysisUsage(AnalysisUsage &) const; + void print(raw_ostream &, const Module * = 0) const; + }; // class DependenceAnalysis + + /// createDependenceAnalysisPass - This creates an instance of the + /// DependenceAnalysis pass. + FunctionPass *createDependenceAnalysisPass(); + +} // namespace llvm + +#endif diff --git a/include/llvm/Analysis/Dominators.h b/include/llvm/Analysis/Dominators.h index a1cc196..8940971 100644 --- a/include/llvm/Analysis/Dominators.h +++ b/include/llvm/Analysis/Dominators.h @@ -346,7 +346,7 @@ public: DomTreeNodeBase *getRootNode() { return RootNode; } const DomTreeNodeBase *getRootNode() const { return RootNode; } - /// properlyDominates - Returns true iff this dominates N and this != N. + /// properlyDominates - Returns true iff A dominates B and A != B. /// Note that this is not a constant time operation! /// bool properlyDominates(const DomTreeNodeBase *A, diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h index 2bf79b9..9b98013 100644 --- a/include/llvm/Analysis/IVUsers.h +++ b/include/llvm/Analysis/IVUsers.h @@ -28,7 +28,7 @@ class IVUsers; class ScalarEvolution; class SCEV; class IVUsers; -class TargetData; +class DataLayout; /// IVStrideUse - Keep track of one use of a strided induction variable. /// The Expr member keeps track of the expression, User is the actual user @@ -123,7 +123,7 @@ class IVUsers : public LoopPass { LoopInfo *LI; DominatorTree *DT; ScalarEvolution *SE; - TargetData *TD; + DataLayout *TD; SmallPtrSet Processed; /// IVUses - A list of all tracked IV uses of induction variable expressions diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h index 0cba135..a075db3 100644 --- a/include/llvm/Analysis/InlineCost.h +++ b/include/llvm/Analysis/InlineCost.h @@ -26,7 +26,7 @@ namespace llvm { class CallSite; - class TargetData; + class DataLayout; namespace InlineConstants { // Various magic constants used to adjust heuristics. @@ -36,6 +36,9 @@ namespace llvm { const int LastCallToStaticBonus = -15000; const int ColdccPenalty = 2000; const int NoreturnPenalty = 10000; + /// Do not inline functions which allocate this many bytes on the stack + /// when the caller is recursive. + const unsigned TotalAllocaSizeRecursiveCaller = 1024; } /// \brief Represents the cost of inlining a function. @@ -101,13 +104,13 @@ namespace llvm { /// InlineCostAnalyzer - Cost analyzer used by inliner. class InlineCostAnalyzer { - // TargetData if available, or null. - const TargetData *TD; + // DataLayout if available, or null. + const DataLayout *TD; public: InlineCostAnalyzer(): TD(0) {} - void setTargetData(const TargetData *TData) { TD = TData; } + void setDataLayout(const DataLayout *TData) { TD = TData; } /// \brief Get an InlineCost object representing the cost of inlining this /// callsite. diff --git a/include/llvm/Analysis/InstructionSimplify.h b/include/llvm/Analysis/InstructionSimplify.h index 152e885..e561e37 100644 --- a/include/llvm/Analysis/InstructionSimplify.h +++ b/include/llvm/Analysis/InstructionSimplify.h @@ -24,7 +24,7 @@ namespace llvm { class ArrayRef; class DominatorTree; class Instruction; - class TargetData; + class DataLayout; class TargetLibraryInfo; class Type; class Value; @@ -32,122 +32,122 @@ namespace llvm { /// SimplifyAddInst - Given operands for an Add, see if we can /// fold the result. If not, this returns null. Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifySubInst - Given operands for a Sub, see if we can /// fold the result. If not, this returns null. Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyMulInst - Given operands for a Mul, see if we can /// fold the result. If not, this returns null. - Value *SimplifyMulInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifySDivInst - Given operands for an SDiv, see if we can /// fold the result. If not, this returns null. - Value *SimplifySDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyUDivInst - Given operands for a UDiv, see if we can /// fold the result. If not, this returns null. - Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyFDivInst - Given operands for an FDiv, see if we can /// fold the result. If not, this returns null. - Value *SimplifyFDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyFDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifySRemInst - Given operands for an SRem, see if we can /// fold the result. If not, this returns null. - Value *SimplifySRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyURemInst - Given operands for a URem, see if we can /// fold the result. If not, this returns null. - Value *SimplifyURemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyFRemInst - Given operands for an FRem, see if we can /// fold the result. If not, this returns null. - Value *SimplifyFRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyFRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyShlInst - Given operands for a Shl, see if we can /// fold the result. If not, this returns null. Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyLShrInst - Given operands for a LShr, see if we can /// fold the result. If not, this returns null. Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyAShrInst - Given operands for a AShr, see if we can /// fold the result. If not, this returns null. Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyAndInst - Given operands for an And, see if we can /// fold the result. If not, this returns null. - Value *SimplifyAndInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyOrInst - Given operands for an Or, see if we can /// fold the result. If not, this returns null. - Value *SimplifyOrInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyXorInst - Given operands for a Xor, see if we can /// fold the result. If not, this returns null. - Value *SimplifyXorInst(Value *LHS, Value *RHS, const TargetData *TD = 0, + Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyICmpInst - Given operands for an ICmpInst, see if we can /// fold the result. If not, this returns null. Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can /// fold the result. If not, this returns null. Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifySelectInst - Given operands for a SelectInst, see if we can fold /// the result. If not, this returns null. Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can /// fold the result. If not, this returns null. - Value *SimplifyGEPInst(ArrayRef Ops, const TargetData *TD = 0, + Value *SimplifyGEPInst(ArrayRef Ops, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); @@ -155,13 +155,13 @@ namespace llvm { /// can fold the result. If not, this returns null. Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef Idxs, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold /// the result. If not, this returns null. - Value *SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD = 0, + Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); @@ -171,20 +171,20 @@ namespace llvm { /// SimplifyCmpInst - Given operands for a CmpInst, see if we can /// fold the result. If not, this returns null. Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyBinOp - Given operands for a BinaryOperator, see if we can /// fold the result. If not, this returns null. Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); /// SimplifyInstruction - See if we can compute a simplified version of this /// instruction. If not, this returns null. - Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0, + Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); @@ -198,7 +198,7 @@ namespace llvm { /// /// The function returns true if any simplifications were performed. bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); @@ -209,7 +209,7 @@ namespace llvm { /// of the users impacted. It returns true if any simplifications were /// performed. bool recursivelySimplifyInstruction(Instruction *I, - const TargetData *TD = 0, + const DataLayout *TD = 0, const TargetLibraryInfo *TLI = 0, const DominatorTree *DT = 0); } // end namespace llvm diff --git a/include/llvm/Analysis/IntervalPartition.h b/include/llvm/Analysis/IntervalPartition.h index df7313f..bce84be 100644 --- a/include/llvm/Analysis/IntervalPartition.h +++ b/include/llvm/Analysis/IntervalPartition.h @@ -33,8 +33,8 @@ namespace llvm { // // IntervalPartition - This class builds and holds an "interval partition" for // a function. This partition divides the control flow graph into a set of -// maximal intervals, as defined with the properties above. Intuitively, a -// BasicBlock is a (possibly nonexistent) loop with a "tail" of non looping +// maximal intervals, as defined with the properties above. Intuitively, an +// interval is a (possibly nonexistent) loop with a "tail" of non looping // nodes following it. // class IntervalPartition : public FunctionPass { diff --git a/include/llvm/Analysis/LazyValueInfo.h b/include/llvm/Analysis/LazyValueInfo.h index 065c230..197e94e 100644 --- a/include/llvm/Analysis/LazyValueInfo.h +++ b/include/llvm/Analysis/LazyValueInfo.h @@ -19,18 +19,18 @@ namespace llvm { class Constant; - class TargetData; + class DataLayout; class TargetLibraryInfo; class Value; /// LazyValueInfo - This pass computes, caches, and vends lazy value constraint /// information. class LazyValueInfo : public FunctionPass { - class TargetData *TD; + class DataLayout *TD; class TargetLibraryInfo *TLI; void *PImpl; - LazyValueInfo(const LazyValueInfo&); // DO NOT IMPLEMENT. - void operator=(const LazyValueInfo&); // DO NOT IMPLEMENT. + LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION; + void operator=(const LazyValueInfo&) LLVM_DELETED_FUNCTION; public: static char ID; LazyValueInfo() : FunctionPass(ID), PImpl(0) { diff --git a/include/llvm/Analysis/Loads.h b/include/llvm/Analysis/Loads.h index 5f0aefb..afc90c2 100644 --- a/include/llvm/Analysis/Loads.h +++ b/include/llvm/Analysis/Loads.h @@ -19,7 +19,7 @@ namespace llvm { class AliasAnalysis; -class TargetData; +class DataLayout; class MDNode; /// isSafeToLoadUnconditionally - Return true if we know that executing a load @@ -27,7 +27,7 @@ class MDNode; /// specified pointer, we do a quick local scan of the basic block containing /// ScanFrom, to determine if the address is already accessed. bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, - unsigned Align, const TargetData *TD = 0); + unsigned Align, const DataLayout *TD = 0); /// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at /// the instruction before ScanFrom) checking to see if we have the value at diff --git a/include/llvm/Analysis/LoopDependenceAnalysis.h b/include/llvm/Analysis/LoopDependenceAnalysis.h deleted file mode 100644 index f195d27..0000000 --- a/include/llvm/Analysis/LoopDependenceAnalysis.h +++ /dev/null @@ -1,124 +0,0 @@ -//===- llvm/Analysis/LoopDependenceAnalysis.h --------------- -*- C++ -*---===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// LoopDependenceAnalysis is an LLVM pass that analyses dependences in memory -// accesses in loops. -// -// Please note that this is work in progress and the interface is subject to -// change. -// -// TODO: adapt as interface progresses -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_ANALYSIS_LOOP_DEPENDENCE_ANALYSIS_H -#define LLVM_ANALYSIS_LOOP_DEPENDENCE_ANALYSIS_H - -#include "llvm/ADT/DenseSet.h" -#include "llvm/ADT/FoldingSet.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/Analysis/LoopPass.h" -#include "llvm/Support/Allocator.h" - -namespace llvm { - -class AliasAnalysis; -class AnalysisUsage; -class ScalarEvolution; -class SCEV; -class Value; -class raw_ostream; - -class LoopDependenceAnalysis : public LoopPass { - AliasAnalysis *AA; - ScalarEvolution *SE; - - /// L - The loop we are currently analysing. - Loop *L; - - /// TODO: doc - enum DependenceResult { Independent = 0, Dependent = 1, Unknown = 2 }; - - /// TODO: doc - struct Subscript { - /// TODO: Add distance, direction, breaking conditions, ... - }; - - /// DependencePair - Represents a data dependence relation between to memory - /// reference instructions. - struct DependencePair : public FastFoldingSetNode { - Value *A; - Value *B; - DependenceResult Result; - SmallVector Subscripts; - - DependencePair(const FoldingSetNodeID &ID, Value *a, Value *b) : - FastFoldingSetNode(ID), A(a), B(b), Result(Unknown), Subscripts() {} - }; - - /// findOrInsertDependencePair - Return true if a DependencePair for the - /// given Values already exists, false if a new DependencePair had to be - /// created. The third argument is set to the pair found or created. - bool findOrInsertDependencePair(Value*, Value*, DependencePair*&); - - /// getLoops - Collect all loops of the loop nest L in which - /// a given SCEV is variant. - void getLoops(const SCEV*, DenseSet*) const; - - /// isLoopInvariant - True if a given SCEV is invariant in all loops of the - /// loop nest starting at the innermost loop L. - bool isLoopInvariant(const SCEV*) const; - - /// isAffine - An SCEV is affine with respect to the loop nest starting at - /// the innermost loop L if it is of the form A+B*X where A, B are invariant - /// in the loop nest and X is a induction variable in the loop nest. - bool isAffine(const SCEV*) const; - - /// TODO: doc - bool isZIVPair(const SCEV*, const SCEV*) const; - bool isSIVPair(const SCEV*, const SCEV*) const; - DependenceResult analyseZIV(const SCEV*, const SCEV*, Subscript*) const; - DependenceResult analyseSIV(const SCEV*, const SCEV*, Subscript*) const; - DependenceResult analyseMIV(const SCEV*, const SCEV*, Subscript*) const; - DependenceResult analyseSubscript(const SCEV*, const SCEV*, Subscript*) const; - DependenceResult analysePair(DependencePair*) const; - -public: - static char ID; // Class identification, replacement for typeinfo - LoopDependenceAnalysis() : LoopPass(ID) { - initializeLoopDependenceAnalysisPass(*PassRegistry::getPassRegistry()); - } - - /// isDependencePair - Check whether two values can possibly give rise to - /// a data dependence: that is the case if both are instructions accessing - /// memory and at least one of those accesses is a write. - bool isDependencePair(const Value*, const Value*) const; - - /// depends - Return a boolean indicating if there is a data dependence - /// between two instructions. - bool depends(Value*, Value*); - - bool runOnLoop(Loop*, LPPassManager&); - virtual void releaseMemory(); - virtual void getAnalysisUsage(AnalysisUsage&) const; - void print(raw_ostream&, const Module* = 0) const; - -private: - FoldingSet Pairs; - BumpPtrAllocator PairAllocator; -}; // class LoopDependenceAnalysis - -// createLoopDependenceAnalysisPass - This creates an instance of the -// LoopDependenceAnalysis pass. -// -LoopPass *createLoopDependenceAnalysisPass(); - -} // namespace llvm - -#endif /* LLVM_ANALYSIS_LOOP_DEPENDENCE_ANALYSIS_H */ diff --git a/include/llvm/Analysis/LoopInfo.h b/include/llvm/Analysis/LoopInfo.h index eeb482d..c5d7b01 100644 --- a/include/llvm/Analysis/LoopInfo.h +++ b/include/llvm/Analysis/LoopInfo.h @@ -72,10 +72,9 @@ class LoopBase { // Blocks - The list of blocks in this loop. First entry is the header node. std::vector Blocks; - // DO NOT IMPLEMENT - LoopBase(const LoopBase &); - // DO NOT IMPLEMENT - const LoopBase&operator=(const LoopBase &); + LoopBase(const LoopBase &) LLVM_DELETED_FUNCTION; + const LoopBase& + operator=(const LoopBase &) LLVM_DELETED_FUNCTION; public: /// Loop ctor - This creates an empty loop. LoopBase() : ParentLoop(0) {} @@ -416,8 +415,8 @@ class LoopInfoBase { friend class LoopBase; friend class LoopInfo; - void operator=(const LoopInfoBase &); // do not implement - LoopInfoBase(const LoopInfo &); // do not implement + void operator=(const LoopInfoBase &) LLVM_DELETED_FUNCTION; + LoopInfoBase(const LoopInfo &) LLVM_DELETED_FUNCTION; public: LoopInfoBase() { } ~LoopInfoBase() { releaseMemory(); } @@ -550,8 +549,8 @@ class LoopInfo : public FunctionPass { LoopInfoBase LI; friend class LoopBase; - void operator=(const LoopInfo &); // do not implement - LoopInfo(const LoopInfo &); // do not implement + void operator=(const LoopInfo &) LLVM_DELETED_FUNCTION; + LoopInfo(const LoopInfo &) LLVM_DELETED_FUNCTION; public: static char ID; // Pass identification, replacement for typeid diff --git a/include/llvm/Analysis/LoopInfoImpl.h b/include/llvm/Analysis/LoopInfoImpl.h index c07fbf7..3bb96f9 100644 --- a/include/llvm/Analysis/LoopInfoImpl.h +++ b/include/llvm/Analysis/LoopInfoImpl.h @@ -145,7 +145,6 @@ BlockT *LoopBase::getLoopPredecessor() const { // Loop over the predecessors of the header node... BlockT *Header = getHeader(); - typedef GraphTraits BlockTraits; typedef GraphTraits > InvBlockTraits; for (typename InvBlockTraits::ChildIteratorType PI = InvBlockTraits::child_begin(Header), diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h index e674e74..a842898 100644 --- a/include/llvm/Analysis/MemoryBuiltins.h +++ b/include/llvm/Analysis/MemoryBuiltins.h @@ -27,7 +27,8 @@ namespace llvm { class CallInst; class PointerType; -class TargetData; +class DataLayout; +class TargetLibraryInfo; class Type; class Value; @@ -35,27 +36,33 @@ class Value; /// \brief Tests if a value is a call or invoke to a library function that /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup /// like). -bool isAllocationFn(const Value *V, bool LookThroughBitCast = false); +bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); /// \brief Tests if a value is a call or invoke to a function that returns a /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). -bool isNoAliasFn(const Value *V, bool LookThroughBitCast = false); +bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); /// \brief Tests if a value is a call or invoke to a library function that /// allocates uninitialized memory (such as malloc). -bool isMallocLikeFn(const Value *V, bool LookThroughBitCast = false); +bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); /// \brief Tests if a value is a call or invoke to a library function that /// allocates zero-filled memory (such as calloc). -bool isCallocLikeFn(const Value *V, bool LookThroughBitCast = false); +bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); /// \brief Tests if a value is a call or invoke to a library function that /// allocates memory (either malloc, calloc, or strdup like). -bool isAllocLikeFn(const Value *V, bool LookThroughBitCast = false); +bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); /// \brief Tests if a value is a call or invoke to a library function that /// reallocates memory (such as realloc). -bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false); +bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast = false); //===----------------------------------------------------------------------===// @@ -65,36 +72,39 @@ bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false); /// extractMallocCall - Returns the corresponding CallInst if the instruction /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// ignore InvokeInst here. -const CallInst *extractMallocCall(const Value *I); -static inline CallInst *extractMallocCall(Value *I) { - return const_cast(extractMallocCall((const Value*)I)); +const CallInst *extractMallocCall(const Value *I, const TargetLibraryInfo *TLI); +static inline CallInst *extractMallocCall(Value *I, + const TargetLibraryInfo *TLI) { + return const_cast(extractMallocCall((const Value*)I, TLI)); } /// isArrayMalloc - Returns the corresponding CallInst if the instruction /// is a call to malloc whose array size can be determined and the array size /// is not constant 1. Otherwise, return NULL. -const CallInst *isArrayMalloc(const Value *I, const TargetData *TD); +const CallInst *isArrayMalloc(const Value *I, const DataLayout *TD, + const TargetLibraryInfo *TLI); /// getMallocType - Returns the PointerType resulting from the malloc call. /// The PointerType depends on the number of bitcast uses of the malloc call: /// 0: PointerType is the malloc calls' return type. /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. -PointerType *getMallocType(const CallInst *CI); +PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI); /// getMallocAllocatedType - Returns the Type allocated by malloc call. /// The Type depends on the number of bitcast uses of the malloc call: /// 0: PointerType is the malloc calls' return type. /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. -Type *getMallocAllocatedType(const CallInst *CI); +Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI); /// getMallocArraySize - Returns the array size of a malloc call. If the /// argument passed to malloc is a multiple of the size of the malloced type, /// then return that multiple. For non-array mallocs, the multiple is /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// determined. -Value *getMallocArraySize(CallInst *CI, const TargetData *TD, +Value *getMallocArraySize(CallInst *CI, const DataLayout *TD, + const TargetLibraryInfo *TLI, bool LookThroughSExt = false); @@ -104,9 +114,10 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD, /// extractCallocCall - Returns the corresponding CallInst if the instruction /// is a calloc call. -const CallInst *extractCallocCall(const Value *I); -static inline CallInst *extractCallocCall(Value *I) { - return const_cast(extractCallocCall((const Value*)I)); +const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI); +static inline CallInst *extractCallocCall(Value *I, + const TargetLibraryInfo *TLI) { + return const_cast(extractCallocCall((const Value*)I, TLI)); } @@ -115,10 +126,10 @@ static inline CallInst *extractCallocCall(Value *I) { // /// isFreeCall - Returns non-null if the value is a call to the builtin free() -const CallInst *isFreeCall(const Value *I); +const CallInst *isFreeCall(const Value *I, const TargetLibraryInfo *TLI); -static inline CallInst *isFreeCall(Value *I) { - return const_cast(isFreeCall((const Value*)I)); +static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) { + return const_cast(isFreeCall((const Value*)I, TLI)); } @@ -130,8 +141,8 @@ static inline CallInst *isFreeCall(Value *I) { /// object size in Size if successful, and false otherwise. /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// byval arguments, and global variables. -bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD, - bool RoundToAlign = false); +bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD, + const TargetLibraryInfo *TLI, bool RoundToAlign = false); @@ -142,10 +153,12 @@ typedef std::pair SizeOffsetType; class ObjectSizeOffsetVisitor : public InstVisitor { - const TargetData *TD; + const DataLayout *TD; + const TargetLibraryInfo *TLI; bool RoundToAlign; unsigned IntTyBits; APInt Zero; + SmallPtrSet SeenInsts; APInt align(APInt Size, uint64_t Align); @@ -154,8 +167,8 @@ class ObjectSizeOffsetVisitor } public: - ObjectSizeOffsetVisitor(const TargetData *TD, LLVMContext &Context, - bool RoundToAlign = false); + ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI, + LLVMContext &Context, bool RoundToAlign = false); SizeOffsetType compute(Value *V); @@ -200,10 +213,10 @@ class ObjectSizeOffsetEvaluator typedef DenseMap CacheMapTy; typedef SmallPtrSet PtrSetTy; - const TargetData *TD; + const DataLayout *TD; + const TargetLibraryInfo *TLI; LLVMContext &Context; BuilderTy Builder; - ObjectSizeOffsetVisitor Visitor; IntegerType *IntTy; Value *Zero; CacheMapTy CacheMap; @@ -215,7 +228,8 @@ class ObjectSizeOffsetEvaluator SizeOffsetEvalType compute_(Value *V); public: - ObjectSizeOffsetEvaluator(const TargetData *TD, LLVMContext &Context); + ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI, + LLVMContext &Context); SizeOffsetEvalType compute(Value *V); bool knownSize(SizeOffsetEvalType SizeOffset) { diff --git a/include/llvm/Analysis/MemoryDependenceAnalysis.h b/include/llvm/Analysis/MemoryDependenceAnalysis.h index 7e049d6..a715eae 100644 --- a/include/llvm/Analysis/MemoryDependenceAnalysis.h +++ b/include/llvm/Analysis/MemoryDependenceAnalysis.h @@ -29,7 +29,7 @@ namespace llvm { class Instruction; class CallSite; class AliasAnalysis; - class TargetData; + class DataLayout; class MemoryDependenceAnalysis; class PredIteratorCache; class DominatorTree; @@ -323,7 +323,7 @@ namespace llvm { /// Current AA implementation, just a cache. AliasAnalysis *AA; - TargetData *TD; + DataLayout *TD; DominatorTree *DT; OwningPtr PredCache; public: @@ -412,7 +412,7 @@ namespace llvm { int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI, - const TargetData &TD); + const DataLayout &TD); private: MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall, diff --git a/include/llvm/Analysis/PHITransAddr.h b/include/llvm/Analysis/PHITransAddr.h index ff9a247..5a77fce 100644 --- a/include/llvm/Analysis/PHITransAddr.h +++ b/include/llvm/Analysis/PHITransAddr.h @@ -19,7 +19,7 @@ namespace llvm { class DominatorTree; - class TargetData; + class DataLayout; class TargetLibraryInfo; /// PHITransAddr - An address value which tracks and handles phi translation. @@ -37,7 +37,7 @@ class PHITransAddr { Value *Addr; /// TD - The target data we are playing with if known, otherwise null. - const TargetData *TD; + const DataLayout *TD; /// TLI - The target library info if known, otherwise null. const TargetLibraryInfo *TLI; @@ -45,7 +45,7 @@ class PHITransAddr { /// InstInputs - The inputs for our symbolic address. SmallVector InstInputs; public: - PHITransAddr(Value *addr, const TargetData *td) : Addr(addr), TD(td), TLI(0) { + PHITransAddr(Value *addr, const DataLayout *td) : Addr(addr), TD(td), TLI(0) { // If the address is an instruction, the whole thing is considered an input. if (Instruction *I = dyn_cast(Addr)) InstInputs.push_back(I); diff --git a/include/llvm/Analysis/Passes.h b/include/llvm/Analysis/Passes.h index a22bd12..27726f4 100644 --- a/include/llvm/Analysis/Passes.h +++ b/include/llvm/Analysis/Passes.h @@ -103,6 +103,14 @@ namespace llvm { //===--------------------------------------------------------------------===// // + // createProfileMetadataLoaderPass - This pass loads information from a + // profile dump file and sets branch weight metadata. + // + ModulePass *createProfileMetadataLoaderPass(); + extern char &ProfileMetadataLoaderPassID; + + //===--------------------------------------------------------------------===// + // // createNoProfileInfoPass - This pass implements the default "no profile". // ImmutablePass *createNoProfileInfoPass(); @@ -172,11 +180,20 @@ namespace llvm { //===--------------------------------------------------------------------===// // - // createLoopDependenceAnalysisPass - This creates an instance of the - // LoopDependenceAnalysis pass. + // createDependenceAnalysisPass - This creates an instance of the + // DependenceAnalysis pass. + // + FunctionPass *createDependenceAnalysisPass(); + + //===--------------------------------------------------------------------===// + // + // createCostModelAnalysisPass - This creates an instance of the + // CostModelAnalysis pass. // - LoopPass *createLoopDependenceAnalysisPass(); + FunctionPass *createCostModelAnalysisPass(); + //===--------------------------------------------------------------------===// + // // Minor pass prototypes, allowing us to expose them through bugpoint and // analyze. FunctionPass *createInstCountPass(); diff --git a/include/llvm/Analysis/ProfileDataLoader.h b/include/llvm/Analysis/ProfileDataLoader.h new file mode 100644 index 0000000..9efbafc --- /dev/null +++ b/include/llvm/Analysis/ProfileDataLoader.h @@ -0,0 +1,139 @@ +//===- ProfileDataLoader.h - Load & convert profile info ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The ProfileDataLoader class is used to load profiling data from a dump file. +// The ProfileDataT class is used to store the mapping of this +// data to control flow edges. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ANALYSIS_PROFILEDATALOADER_H +#define LLVM_ANALYSIS_PROFILEDATALOADER_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include + +namespace llvm { + +class ModulePass; +class Function; +class BasicBlock; + +// Helper for dumping edges to dbgs(). +raw_ostream& operator<<(raw_ostream &O, std::pair E); + +/// \brief The ProfileDataT class is used to store the mapping of +/// profiling data to control flow edges. +/// +/// An edge is defined by its source and sink basic blocks. +template +class ProfileDataT { +public: + // The profiling information defines an Edge by its source and sink basic + // blocks. + typedef std::pair Edge; + +private: + typedef DenseMap EdgeWeights; + + /// \brief Count the number of times a transition between two blocks is + /// executed. + /// + /// As a special case, we also hold an edge from the null BasicBlock to the + /// entry block to indicate how many times the function was entered. + DenseMap EdgeInformation; + +public: + /// getFunction() - Returns the Function for an Edge. + static const FType *getFunction(Edge e) { + // e.first may be NULL + assert(((!e.first) || (e.first->getParent() == e.second->getParent())) + && "A ProfileData::Edge can not be between two functions"); + assert(e.second && "A ProfileData::Edge must have a real sink"); + return e.second->getParent(); + } + + /// getEdge() - Creates an Edge between two BasicBlocks. + static Edge getEdge(const BType *Src, const BType *Dest) { + return Edge(Src, Dest); + } + + /// getEdgeWeight - Return the number of times that a given edge was + /// executed. + unsigned getEdgeWeight(Edge e) const { + const FType *f = getFunction(e); + assert((EdgeInformation.find(f) != EdgeInformation.end()) + && "No profiling information for function"); + EdgeWeights weights = EdgeInformation.find(f)->second; + + assert((weights.find(e) != weights.end()) + && "No profiling information for edge"); + return weights.find(e)->second; + } + + /// addEdgeWeight - Add 'weight' to the already stored execution count for + /// this edge. + void addEdgeWeight(Edge e, unsigned weight) { + EdgeInformation[getFunction(e)][e] += weight; + } +}; + +typedef ProfileDataT ProfileData; +//typedef ProfileDataT MachineProfileData; + +/// The ProfileDataLoader class is used to load raw profiling data from the +/// dump file. +class ProfileDataLoader { +private: + /// The name of the file where the raw profiling data is stored. + const std::string &Filename; + + /// A vector of the command line arguments used when the target program was + /// run to generate profiling data. One entry per program run. + SmallVector CommandLines; + + /// The raw values for how many times each edge was traversed, values from + /// multiple program runs are accumulated. + SmallVector EdgeCounts; + +public: + /// ProfileDataLoader ctor - Read the specified profiling data file, exiting + /// the program if the file is invalid or broken. + ProfileDataLoader(const char *ToolName, const std::string &Filename); + + /// A special value used to represent the weight of an edge which has not + /// been counted yet. + static const unsigned Uncounted; + + /// getNumExecutions - Return the number of times the target program was run + /// to generate this profiling data. + unsigned getNumExecutions() const { return CommandLines.size(); } + + /// getExecution - Return the command line parameters used to generate the + /// i'th set of profiling data. + const std::string &getExecution(unsigned i) const { return CommandLines[i]; } + + const std::string &getFileName() const { return Filename; } + + /// getRawEdgeCounts - Return the raw profiling data, this is just a list of + /// numbers with no mappings to edges. + ArrayRef getRawEdgeCounts() const { return EdgeCounts; } +}; + +/// createProfileMetadataLoaderPass - This function returns a Pass that loads +/// the profiling information for the module from the specified filename. +ModulePass *createProfileMetadataLoaderPass(const std::string &Filename); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Analysis/ProfileDataTypes.h b/include/llvm/Analysis/ProfileDataTypes.h new file mode 100644 index 0000000..1be15e0 --- /dev/null +++ b/include/llvm/Analysis/ProfileDataTypes.h @@ -0,0 +1,39 @@ +/*===-- ProfileDataTypes.h - Profiling info shared constants --------------===*\ +|* +|* The LLVM Compiler Infrastructure +|* +|* This file is distributed under the University of Illinois Open Source +|* License. See LICENSE.TXT for details. +|* +|*===----------------------------------------------------------------------===*| +|* +|* This file defines constants shared by the various different profiling +|* runtime libraries and the LLVM C++ profile metadata loader. It must be a +|* C header because, at present, the profiling runtimes are written in C. +|* +\*===----------------------------------------------------------------------===*/ + +#ifndef LLVM_ANALYSIS_PROFILEDATATYPES_H +#define LLVM_ANALYSIS_PROFILEDATATYPES_H + +/* Included by libprofile. */ +#if defined(__cplusplus) +extern "C" { +#endif + +/* TODO: Strip out unused entries once ProfileInfo etc has been removed. */ +enum ProfilingType { + ArgumentInfo = 1, /* The command line argument block */ + FunctionInfo = 2, /* Function profiling information */ + BlockInfo = 3, /* Block profiling information */ + EdgeInfo = 4, /* Edge profiling information */ + PathInfo = 5, /* Path profiling information */ + BBTraceInfo = 6, /* Basic block trace information */ + OptEdgeInfo = 7 /* Edge profiling information, optimal version */ +}; + +#if defined(__cplusplus) +} +#endif + +#endif /* LLVM_ANALYSIS_PROFILEDATATYPES_H */ diff --git a/include/llvm/Analysis/ProfileInfoTypes.h b/include/llvm/Analysis/ProfileInfoTypes.h index 6b4ac85..45aab5b 100644 --- a/include/llvm/Analysis/ProfileInfoTypes.h +++ b/include/llvm/Analysis/ProfileInfoTypes.h @@ -27,15 +27,7 @@ enum ProfilingStorageType { ProfilingHash = 2 }; -enum ProfilingType { - ArgumentInfo = 1, /* The command line argument block */ - FunctionInfo = 2, /* Function profiling information */ - BlockInfo = 3, /* Block profiling information */ - EdgeInfo = 4, /* Edge profiling information */ - PathInfo = 5, /* Path profiling information */ - BBTraceInfo = 6, /* Basic block trace information */ - OptEdgeInfo = 7 /* Edge profiling information, optimal version */ -}; +#include "llvm/Analysis/ProfileDataTypes.h" /* * The header for tables that map path numbers to path counters. diff --git a/include/llvm/Analysis/RegionInfo.h b/include/llvm/Analysis/RegionInfo.h index 188d11c..48d7ee6 100644 --- a/include/llvm/Analysis/RegionInfo.h +++ b/include/llvm/Analysis/RegionInfo.h @@ -54,10 +54,8 @@ class FlatIt {}; /// @brief A RegionNode represents a subregion or a BasicBlock that is part of a /// Region. class RegionNode { - // DO NOT IMPLEMENT - RegionNode(const RegionNode &); - // DO NOT IMPLEMENT - const RegionNode &operator=(const RegionNode &); + RegionNode(const RegionNode &) LLVM_DELETED_FUNCTION; + const RegionNode &operator=(const RegionNode &) LLVM_DELETED_FUNCTION; protected: /// This is the entry basic block that starts this region node. If this is a @@ -203,10 +201,8 @@ inline Region* RegionNode::getNodeAs() const { /// tree, the second one creates a graphical representation using graphviz. class Region : public RegionNode { friend class RegionInfo; - // DO NOT IMPLEMENT - Region(const Region &); - // DO NOT IMPLEMENT - const Region &operator=(const Region &); + Region(const Region &) LLVM_DELETED_FUNCTION; + const Region &operator=(const Region &) LLVM_DELETED_FUNCTION; // Information necessary to manage this Region. RegionInfo* RI; @@ -473,27 +469,6 @@ public: const_iterator end() const { return children.end(); } //@} - /// @name BasicBlock Node Iterators - /// - /// These iterators iterate over all BasicBlock RegionNodes that are - /// contained in this Region. The iterator also iterates over BasicBlock - /// RegionNodes that are elements of a subregion of this Region. It is - /// therefore called a flat iterator. - //@{ - typedef df_iterator, false, - GraphTraits > > block_node_iterator; - - typedef df_iterator, - false, GraphTraits > > - const_block_node_iterator; - - block_node_iterator block_node_begin(); - block_node_iterator block_node_end(); - - const_block_node_iterator block_node_begin() const; - const_block_node_iterator block_node_end() const; - //@} - /// @name BasicBlock Iterators /// /// These iterators iterate over all BasicBlocks that are contained in this @@ -586,10 +561,8 @@ class RegionInfo : public FunctionPass { typedef DenseMap BBtoRegionMap; typedef SmallPtrSet RegionSet; - // DO NOT IMPLEMENT - RegionInfo(const RegionInfo &); - // DO NOT IMPLEMENT - const RegionInfo &operator=(const RegionInfo &); + RegionInfo(const RegionInfo &) LLVM_DELETED_FUNCTION; + const RegionInfo &operator=(const RegionInfo &) LLVM_DELETED_FUNCTION; DominatorTree *DT; PostDominatorTree *PDT; diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index c213ade..235adca0 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -40,7 +40,7 @@ namespace llvm { class DominatorTree; class Type; class ScalarEvolution; - class TargetData; + class DataLayout; class TargetLibraryInfo; class LLVMContext; class Loop; @@ -70,8 +70,8 @@ namespace llvm { unsigned short SubclassData; private: - SCEV(const SCEV &); // DO NOT IMPLEMENT - void operator=(const SCEV &); // DO NOT IMPLEMENT + SCEV(const SCEV &) LLVM_DELETED_FUNCTION; + void operator=(const SCEV &) LLVM_DELETED_FUNCTION; public: /// NoWrapFlags are bitfield indices into SubclassData. @@ -162,7 +162,6 @@ namespace llvm { SCEVCouldNotCompute(); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVCouldNotCompute *S) { return true; } static bool classof(const SCEV *S); }; @@ -227,7 +226,7 @@ namespace llvm { /// TD - The target data information for the target we are targeting. /// - TargetData *TD; + DataLayout *TD; /// TLI - The target library information for the target we are targeting. /// @@ -874,6 +873,7 @@ namespace llvm { virtual void releaseMemory(); virtual void getAnalysisUsage(AnalysisUsage &AU) const; virtual void print(raw_ostream &OS, const Module* = 0) const; + virtual void verifyAnalysis() const; private: FoldingSet UniqueSCEVs; diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h index ded1297..54db7d6 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpressions.h +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -46,7 +46,6 @@ namespace llvm { Type *getType() const { return V->getType(); } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVConstant *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scConstant; } @@ -68,7 +67,6 @@ namespace llvm { Type *getType() const { return Ty; } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVCastExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scTruncate || S->getSCEVType() == scZeroExtend || @@ -88,7 +86,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVTruncateExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scTruncate; } @@ -106,7 +103,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVZeroExtendExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scZeroExtend; } @@ -124,7 +120,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVSignExtendExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scSignExtend; } @@ -166,7 +161,6 @@ namespace llvm { } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVNAryExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr || @@ -188,7 +182,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVCommutativeExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr || S->getSCEVType() == scMulExpr || @@ -223,7 +216,6 @@ namespace llvm { } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVAddExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scAddExpr; } @@ -242,7 +234,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVMulExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scMulExpr; } @@ -274,7 +265,6 @@ namespace llvm { } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVUDivExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scUDivExpr; } @@ -358,7 +348,6 @@ namespace llvm { } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVAddRecExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scAddRecExpr; } @@ -380,7 +369,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVSMaxExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scSMaxExpr; } @@ -402,7 +390,6 @@ namespace llvm { public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVUMaxExpr *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scUMaxExpr; } @@ -449,7 +436,6 @@ namespace llvm { Type *getType() const { return getValPtr()->getType(); } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SCEVUnknown *S) { return true; } static inline bool classof(const SCEV *S) { return S->getSCEVType() == scUnknown; } diff --git a/include/llvm/Analysis/SparsePropagation.h b/include/llvm/Analysis/SparsePropagation.h index c3c2f4b..b758eca 100644 --- a/include/llvm/Analysis/SparsePropagation.h +++ b/include/llvm/Analysis/SparsePropagation.h @@ -130,9 +130,9 @@ class SparseSolver { /// PHI nodes retriggered. typedef std::pair Edge; std::set KnownFeasibleEdges; - - SparseSolver(const SparseSolver&); // DO NOT IMPLEMENT - void operator=(const SparseSolver&); // DO NOT IMPLEMENT + + SparseSolver(const SparseSolver&) LLVM_DELETED_FUNCTION; + void operator=(const SparseSolver&) LLVM_DELETED_FUNCTION; public: explicit SparseSolver(AbstractLatticeFunction *Lattice) : LatticeFunc(Lattice) {} diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h index e8d45f6..a857524 100644 --- a/include/llvm/Analysis/ValueTracking.h +++ b/include/llvm/Analysis/ValueTracking.h @@ -22,7 +22,7 @@ namespace llvm { class Value; class Instruction; class APInt; - class TargetData; + class DataLayout; class StringRef; class MDNode; @@ -37,27 +37,27 @@ namespace llvm { /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, - const TargetData *TD = 0, unsigned Depth = 0); + const DataLayout *TD = 0, unsigned Depth = 0); void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero); /// ComputeSignBit - Determine whether the sign bit is known to be zero or /// one. Convenience wrapper around ComputeMaskedBits. void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, - const TargetData *TD = 0, unsigned Depth = 0); + const DataLayout *TD = 0, unsigned Depth = 0); /// isPowerOfTwo - Return true if the given value is known to have exactly one /// bit set when defined. For vectors return true if every element is known to /// be a power of two when defined. Supports values with integer or pointer /// type and vectors of integers. If 'OrZero' is set then returns true if the /// given value is either a power of two or zero. - bool isPowerOfTwo(Value *V, const TargetData *TD = 0, bool OrZero = false, + bool isPowerOfTwo(Value *V, const DataLayout *TD = 0, bool OrZero = false, unsigned Depth = 0); /// isKnownNonZero - Return true if the given value is known to be non-zero /// when defined. For vectors return true if every element is known to be /// non-zero when defined. Supports values with integer or pointer type and /// vectors of integers. - bool isKnownNonZero(Value *V, const TargetData *TD = 0, unsigned Depth = 0); + bool isKnownNonZero(Value *V, const DataLayout *TD = 0, unsigned Depth = 0); /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use /// this predicate to simplify operations downstream. Mask is known to be @@ -69,7 +69,7 @@ namespace llvm { /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. bool MaskedValueIsZero(Value *V, const APInt &Mask, - const TargetData *TD = 0, unsigned Depth = 0); + const DataLayout *TD = 0, unsigned Depth = 0); /// ComputeNumSignBits - Return the number of times the sign bit of the @@ -80,7 +80,7 @@ namespace llvm { /// /// 'Op' must have a scalar integer type. /// - unsigned ComputeNumSignBits(Value *Op, const TargetData *TD = 0, + unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = 0, unsigned Depth = 0); /// ComputeMultiple - This function computes the integer multiple of Base that @@ -118,10 +118,10 @@ namespace llvm { /// it can be expressed as a base pointer plus a constant offset. Return the /// base and offset to the caller. Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, - const TargetData &TD); + const DataLayout &TD); static inline const Value * GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, - const TargetData &TD) { + const DataLayout &TD) { return GetPointerBaseWithConstantOffset(const_cast(Ptr), Offset,TD); } @@ -143,10 +143,10 @@ namespace llvm { /// being addressed. Note that the returned value has pointer type if the /// specified value does. If the MaxLookup value is non-zero, it limits the /// number of instructions to be stripped off. - Value *GetUnderlyingObject(Value *V, const TargetData *TD = 0, + Value *GetUnderlyingObject(Value *V, const DataLayout *TD = 0, unsigned MaxLookup = 6); static inline const Value * - GetUnderlyingObject(const Value *V, const TargetData *TD = 0, + GetUnderlyingObject(const Value *V, const DataLayout *TD = 0, unsigned MaxLookup = 6) { return GetUnderlyingObject(const_cast(V), TD, MaxLookup); } @@ -156,7 +156,7 @@ namespace llvm { /// multiple objects. void GetUnderlyingObjects(Value *V, SmallVectorImpl &Objects, - const TargetData *TD = 0, + const DataLayout *TD = 0, unsigned MaxLookup = 6); /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer @@ -182,7 +182,7 @@ namespace llvm { /// However, this method can return true for instructions that read memory; /// for such instructions, moving them may change the resulting value. bool isSafeToSpeculativelyExecute(const Value *V, - const TargetData *TD = 0); + const DataLayout *TD = 0); } // end namespace llvm diff --git a/include/llvm/Argument.h b/include/llvm/Argument.h index e66075c..b1c2218 100644 --- a/include/llvm/Argument.h +++ b/include/llvm/Argument.h @@ -68,8 +68,8 @@ public: /// attribute on it in its containing function. bool hasNoCaptureAttr() const; - /// hasSRetAttr - Return true if this argument has the sret attribute on it in - /// its containing function. + /// hasStructRetAttr - Return true if this argument has the sret attribute on + /// it in its containing function. bool hasStructRetAttr() const; /// addAttr - Add a Attribute to an argument @@ -81,7 +81,6 @@ public: /// classof - Methods for support type inquiry through isa, cast, and /// dyn_cast: /// - static inline bool classof(const Argument *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == ArgumentVal; } diff --git a/include/llvm/Attributes.h b/include/llvm/Attributes.h index 223aa00..a9c2d74 100644 --- a/include/llvm/Attributes.h +++ b/include/llvm/Attributes.h @@ -21,268 +21,280 @@ #include namespace llvm { -class Type; - -namespace Attribute { -/// We use this proxy POD type to allow constructing Attributes constants -/// using initializer lists. Do not use this class directly. -struct AttrConst { - uint64_t v; - AttrConst operator | (const AttrConst Attrs) const { - AttrConst Res = {v | Attrs.v}; - return Res; - } - AttrConst operator ~ () const { - AttrConst Res = {~v}; - return Res; - } -}; -} // namespace Attribute +class AttrBuilder; +class AttributesImpl; +class LLVMContext; +class Type; /// Attributes - A bitset of attributes. class Attributes { - public: - Attributes() : Bits(0) { } - explicit Attributes(uint64_t Val) : Bits(Val) { } - /*implicit*/ Attributes(Attribute::AttrConst Val) : Bits(Val.v) { } - // This is a "safe bool() operator". - operator const void *() const { return Bits ? this : 0; } - bool isEmptyOrSingleton() const { return (Bits & (Bits - 1)) == 0; } - bool operator == (const Attributes &Attrs) const { - return Bits == Attrs.Bits; +public: + /// Function parameters and results can have attributes to indicate how they + /// should be treated by optimizations and code generation. This enumeration + /// lists the attributes that can be associated with parameters, function + /// results or the function itself. + /// + /// Note that uwtable is about the ABI or the user mandating an entry in the + /// unwind table. The nounwind attribute is about an exception passing by the + /// function. + /// + /// In a theoretical system that uses tables for profiling and sjlj for + /// exceptions, they would be fully independent. In a normal system that uses + /// tables for both, the semantics are: + /// + /// nil = Needs an entry because an exception might pass by. + /// nounwind = No need for an entry + /// uwtable = Needs an entry because the ABI says so and because + /// an exception might pass by. + /// uwtable + nounwind = Needs an entry because the ABI says so. + + enum AttrVal { + // IR-Level Attributes + None, ///< No attributes have been set + AddressSafety, ///< Address safety checking is on. + Alignment, ///< Alignment of parameter (5 bits) + ///< stored as log2 of alignment with +1 bias + ///< 0 means unaligned different from align 1 + AlwaysInline, ///< inline=always + ByVal, ///< Pass structure by value + InlineHint, ///< Source said inlining was desirable + InReg, ///< Force argument to be passed in register + MinSize, ///< Function must be optimized for size first + Naked, ///< Naked function + Nest, ///< Nested function static chain + NoAlias, ///< Considered to not alias after call + NoCapture, ///< Function creates no aliases of pointer + NoImplicitFloat, ///< Disable implicit floating point insts + NoInline, ///< inline=never + NonLazyBind, ///< Function is called early and/or + ///< often, so lazy binding isn't worthwhile + NoRedZone, ///< Disable redzone + NoReturn, ///< Mark the function as not returning + NoUnwind, ///< Function doesn't unwind stack + OptimizeForSize, ///< opt_size + ReadNone, ///< Function does not access memory + ReadOnly, ///< Function only reads from memory + ReturnsTwice, ///< Function can return twice + SExt, ///< Sign extended before/after call + StackAlignment, ///< Alignment of stack for function (3 bits) + ///< stored as log2 of alignment with +1 bias 0 + ///< means unaligned (different from + ///< alignstack={1)) + StackProtect, ///< Stack protection. + StackProtectReq, ///< Stack protection required. + StructRet, ///< Hidden pointer to structure to return + UWTable, ///< Function must be in a unwind table + ZExt ///< Zero extended before/after call + }; +private: + AttributesImpl *Attrs; + Attributes(AttributesImpl *A) : Attrs(A) {} +public: + Attributes() : Attrs(0) {} + Attributes(const Attributes &A) : Attrs(A.Attrs) {} + Attributes &operator=(const Attributes &A) { + Attrs = A.Attrs; + return *this; } - bool operator != (const Attributes &Attrs) const { - return Bits != Attrs.Bits; + + /// get - Return a uniquified Attributes object. This takes the uniquified + /// value from the Builder and wraps it in the Attributes class. + static Attributes get(LLVMContext &Context, ArrayRef Vals); + static Attributes get(LLVMContext &Context, AttrBuilder &B); + + /// @brief Return true if the attribute is present. + bool hasAttribute(AttrVal Val) const; + + /// @brief Return true if attributes exist + bool hasAttributes() const; + + /// @brief Return true if the attributes are a non-null intersection. + bool hasAttributes(const Attributes &A) const; + + /// @brief Returns the alignment field of an attribute as a byte alignment + /// value. + unsigned getAlignment() const; + + /// @brief Returns the stack alignment field of an attribute as a byte + /// alignment value. + unsigned getStackAlignment() const; + + /// @brief Parameter attributes that do not apply to vararg call arguments. + bool hasIncompatibleWithVarArgsAttrs() const { + return hasAttribute(Attributes::StructRet); } - Attributes operator | (const Attributes &Attrs) const { - return Attributes(Bits | Attrs.Bits); + + /// @brief Attributes that only apply to function parameters. + bool hasParameterOnlyAttrs() const { + return hasAttribute(Attributes::ByVal) || + hasAttribute(Attributes::Nest) || + hasAttribute(Attributes::StructRet) || + hasAttribute(Attributes::NoCapture); } - Attributes operator & (const Attributes &Attrs) const { - return Attributes(Bits & Attrs.Bits); + + /// @brief Attributes that may be applied to the function itself. These cannot + /// be used on return values or function parameters. + bool hasFunctionOnlyAttrs() const { + return hasAttribute(Attributes::NoReturn) || + hasAttribute(Attributes::NoUnwind) || + hasAttribute(Attributes::ReadNone) || + hasAttribute(Attributes::ReadOnly) || + hasAttribute(Attributes::NoInline) || + hasAttribute(Attributes::AlwaysInline) || + hasAttribute(Attributes::OptimizeForSize) || + hasAttribute(Attributes::StackProtect) || + hasAttribute(Attributes::StackProtectReq) || + hasAttribute(Attributes::NoRedZone) || + hasAttribute(Attributes::NoImplicitFloat) || + hasAttribute(Attributes::Naked) || + hasAttribute(Attributes::InlineHint) || + hasAttribute(Attributes::StackAlignment) || + hasAttribute(Attributes::UWTable) || + hasAttribute(Attributes::NonLazyBind) || + hasAttribute(Attributes::ReturnsTwice) || + hasAttribute(Attributes::AddressSafety) || + hasAttribute(Attributes::MinSize); } - Attributes operator ^ (const Attributes &Attrs) const { - return Attributes(Bits ^ Attrs.Bits); + + bool operator==(const Attributes &A) const { + return Attrs == A.Attrs; } - Attributes &operator |= (const Attributes &Attrs) { - Bits |= Attrs.Bits; - return *this; + bool operator!=(const Attributes &A) const { + return Attrs != A.Attrs; } - Attributes &operator &= (const Attributes &Attrs) { - Bits &= Attrs.Bits; - return *this; + + uint64_t Raw() const; + + /// @brief Which attributes cannot be applied to a type. + static Attributes typeIncompatible(Type *Ty); + + /// encodeLLVMAttributesForBitcode - This returns an integer containing an + /// encoding of all the LLVM attributes found in the given attribute bitset. + /// Any change to this encoding is a breaking change to bitcode compatibility. + static uint64_t encodeLLVMAttributesForBitcode(Attributes Attrs); + + /// decodeLLVMAttributesForBitcode - This returns an attribute bitset + /// containing the LLVM attributes that have been decoded from the given + /// integer. This function must stay in sync with + /// 'encodeLLVMAttributesForBitcode'. + static Attributes decodeLLVMAttributesForBitcode(LLVMContext &C, + uint64_t EncodedAttrs); + + /// getAsString - The set of Attributes set in Attributes is converted to a + /// string of equivalent mnemonics. This is, presumably, for writing out the + /// mnemonics for the assembly writer. + /// @brief Convert attribute bits to text + std::string getAsString() const; +}; + +//===----------------------------------------------------------------------===// +/// AttrBuilder - This class is used in conjunction with the Attributes::get +/// method to create an Attributes object. The object itself is uniquified. The +/// Builder's value, however, is not. So this can be used as a quick way to test +/// for equality, presence of attributes, etc. +class AttrBuilder { + uint64_t Bits; +public: + AttrBuilder() : Bits(0) {} + explicit AttrBuilder(uint64_t B) : Bits(B) {} + AttrBuilder(const Attributes &A) : Bits(A.Raw()) {} + AttrBuilder(const AttrBuilder &B) : Bits(B.Bits) {} + + void clear() { Bits = 0; } + + /// addAttribute - Add an attribute to the builder. + AttrBuilder &addAttribute(Attributes::AttrVal Val); + + /// removeAttribute - Remove an attribute from the builder. + AttrBuilder &removeAttribute(Attributes::AttrVal Val); + + /// addAttribute - Add the attributes from A to the builder. + AttrBuilder &addAttributes(const Attributes &A); + + /// removeAttribute - Remove the attributes from A from the builder. + AttrBuilder &removeAttributes(const Attributes &A); + + /// hasAttribute - Return true if the builder has the specified attribute. + bool hasAttribute(Attributes::AttrVal A) const; + + /// hasAttributes - Return true if the builder has IR-level attributes. + bool hasAttributes() const; + + /// hasAttributes - Return true if the builder has any attribute that's in the + /// specified attribute. + bool hasAttributes(const Attributes &A) const; + + /// hasAlignmentAttr - Return true if the builder has an alignment attribute. + bool hasAlignmentAttr() const; + + /// getAlignment - Retrieve the alignment attribute, if it exists. + uint64_t getAlignment() const; + + /// getStackAlignment - Retrieve the stack alignment attribute, if it exists. + uint64_t getStackAlignment() const; + + /// addAlignmentAttr - This turns an int alignment (which must be a power of + /// 2) into the form used internally in Attributes. + AttrBuilder &addAlignmentAttr(unsigned Align); + + /// addStackAlignmentAttr - This turns an int stack alignment (which must be a + /// power of 2) into the form used internally in Attributes. + AttrBuilder &addStackAlignmentAttr(unsigned Align); + + /// addRawValue - Add the raw value to the internal representation. + /// N.B. This should be used ONLY for decoding LLVM bitcode! + AttrBuilder &addRawValue(uint64_t Val); + + /// @brief Remove attributes that are used on functions only. + void removeFunctionOnlyAttrs() { + removeAttribute(Attributes::NoReturn) + .removeAttribute(Attributes::NoUnwind) + .removeAttribute(Attributes::ReadNone) + .removeAttribute(Attributes::ReadOnly) + .removeAttribute(Attributes::NoInline) + .removeAttribute(Attributes::AlwaysInline) + .removeAttribute(Attributes::OptimizeForSize) + .removeAttribute(Attributes::StackProtect) + .removeAttribute(Attributes::StackProtectReq) + .removeAttribute(Attributes::NoRedZone) + .removeAttribute(Attributes::NoImplicitFloat) + .removeAttribute(Attributes::Naked) + .removeAttribute(Attributes::InlineHint) + .removeAttribute(Attributes::StackAlignment) + .removeAttribute(Attributes::UWTable) + .removeAttribute(Attributes::NonLazyBind) + .removeAttribute(Attributes::ReturnsTwice) + .removeAttribute(Attributes::AddressSafety) + .removeAttribute(Attributes::MinSize); } - Attributes operator ~ () const { return Attributes(~Bits); } + uint64_t Raw() const { return Bits; } - private: - // Currently, we need less than 64 bits. - uint64_t Bits; -}; -namespace Attribute { - -/// Function parameters and results can have attributes to indicate how they -/// should be treated by optimizations and code generation. This enumeration -/// lists the attributes that can be associated with parameters, function -/// results or the function itself. -/// @brief Function attributes. - -// We declare AttrConst objects that will be used throughout the code -// and also raw uint64_t objects with _i suffix to be used below for other -// constant declarations. This is done to avoid static CTORs and at the same -// time to keep type-safety of Attributes. -#define DECLARE_LLVM_ATTRIBUTE(name, value) \ - const uint64_t name##_i = value; \ - const AttrConst name = {value}; - -DECLARE_LLVM_ATTRIBUTE(None,0) ///< No attributes have been set -DECLARE_LLVM_ATTRIBUTE(ZExt,1<<0) ///< Zero extended before/after call -DECLARE_LLVM_ATTRIBUTE(SExt,1<<1) ///< Sign extended before/after call -DECLARE_LLVM_ATTRIBUTE(NoReturn,1<<2) ///< Mark the function as not returning -DECLARE_LLVM_ATTRIBUTE(InReg,1<<3) ///< Force argument to be passed in register -DECLARE_LLVM_ATTRIBUTE(StructRet,1<<4) ///< Hidden pointer to structure to return -DECLARE_LLVM_ATTRIBUTE(NoUnwind,1<<5) ///< Function doesn't unwind stack -DECLARE_LLVM_ATTRIBUTE(NoAlias,1<<6) ///< Considered to not alias after call -DECLARE_LLVM_ATTRIBUTE(ByVal,1<<7) ///< Pass structure by value -DECLARE_LLVM_ATTRIBUTE(Nest,1<<8) ///< Nested function static chain -DECLARE_LLVM_ATTRIBUTE(ReadNone,1<<9) ///< Function does not access memory -DECLARE_LLVM_ATTRIBUTE(ReadOnly,1<<10) ///< Function only reads from memory -DECLARE_LLVM_ATTRIBUTE(NoInline,1<<11) ///< inline=never -DECLARE_LLVM_ATTRIBUTE(AlwaysInline,1<<12) ///< inline=always -DECLARE_LLVM_ATTRIBUTE(OptimizeForSize,1<<13) ///< opt_size -DECLARE_LLVM_ATTRIBUTE(StackProtect,1<<14) ///< Stack protection. -DECLARE_LLVM_ATTRIBUTE(StackProtectReq,1<<15) ///< Stack protection required. -DECLARE_LLVM_ATTRIBUTE(Alignment,31<<16) ///< Alignment of parameter (5 bits) - // stored as log2 of alignment with +1 bias - // 0 means unaligned different from align 1 -DECLARE_LLVM_ATTRIBUTE(NoCapture,1<<21) ///< Function creates no aliases of pointer -DECLARE_LLVM_ATTRIBUTE(NoRedZone,1<<22) /// disable redzone -DECLARE_LLVM_ATTRIBUTE(NoImplicitFloat,1<<23) /// disable implicit floating point - /// instructions. -DECLARE_LLVM_ATTRIBUTE(Naked,1<<24) ///< Naked function -DECLARE_LLVM_ATTRIBUTE(InlineHint,1<<25) ///< source said inlining was - ///desirable -DECLARE_LLVM_ATTRIBUTE(StackAlignment,7<<26) ///< Alignment of stack for - ///function (3 bits) stored as log2 - ///of alignment with +1 bias - ///0 means unaligned (different from - ///alignstack= {1)) -DECLARE_LLVM_ATTRIBUTE(ReturnsTwice,1<<29) ///< Function can return twice -DECLARE_LLVM_ATTRIBUTE(UWTable,1<<30) ///< Function must be in a unwind - ///table -DECLARE_LLVM_ATTRIBUTE(NonLazyBind,1U<<31) ///< Function is called early and/or - /// often, so lazy binding isn't - /// worthwhile. -DECLARE_LLVM_ATTRIBUTE(AddressSafety,1ULL<<32) ///< Address safety checking is on. -DECLARE_LLVM_ATTRIBUTE(IANSDialect,1ULL<<33) ///< Inline asm non-standard dialect. - /// When not set, ATT dialect assumed. - /// When set implies the Intel dialect. - -#undef DECLARE_LLVM_ATTRIBUTE - -/// Note that uwtable is about the ABI or the user mandating an entry in the -/// unwind table. The nounwind attribute is about an exception passing by the -/// function. -/// In a theoretical system that uses tables for profiling and sjlj for -/// exceptions, they would be fully independent. In a normal system that -/// uses tables for both, the semantics are: -/// nil = Needs an entry because an exception might pass by. -/// nounwind = No need for an entry -/// uwtable = Needs an entry because the ABI says so and because -/// an exception might pass by. -/// uwtable + nounwind = Needs an entry because the ABI says so. - -/// @brief Attributes that only apply to function parameters. -const AttrConst ParameterOnly = {ByVal_i | Nest_i | - StructRet_i | NoCapture_i}; - -/// @brief Attributes that may be applied to the function itself. These cannot -/// be used on return values or function parameters. -const AttrConst FunctionOnly = {NoReturn_i | NoUnwind_i | ReadNone_i | - ReadOnly_i | NoInline_i | AlwaysInline_i | OptimizeForSize_i | - StackProtect_i | StackProtectReq_i | NoRedZone_i | NoImplicitFloat_i | - Naked_i | InlineHint_i | StackAlignment_i | - UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i | - IANSDialect_i}; - -/// @brief Parameter attributes that do not apply to vararg call arguments. -const AttrConst VarArgsIncompatible = {StructRet_i}; - -/// @brief Attributes that are mutually incompatible. -const AttrConst MutuallyIncompatible[5] = { - {ByVal_i | Nest_i | StructRet_i}, - {ByVal_i | Nest_i | InReg_i }, - {ZExt_i | SExt_i}, - {ReadNone_i | ReadOnly_i}, - {NoInline_i | AlwaysInline_i} + bool operator==(const AttrBuilder &B) { + return Bits == B.Bits; + } + bool operator!=(const AttrBuilder &B) { + return Bits != B.Bits; + } }; -/// @brief Which attributes cannot be applied to a type. -Attributes typeIncompatible(Type *Ty); - -/// This turns an int alignment (a power of 2, normally) into the -/// form used internally in Attributes. -inline Attributes constructAlignmentFromInt(unsigned i) { - // Default alignment, allow the target to define how to align it. - if (i == 0) - return None; - - assert(isPowerOf2_32(i) && "Alignment must be a power of two."); - assert(i <= 0x40000000 && "Alignment too large."); - return Attributes((Log2_32(i)+1) << 16); -} - -/// This returns the alignment field of an attribute as a byte alignment value. -inline unsigned getAlignmentFromAttrs(Attributes A) { - Attributes Align = A & Attribute::Alignment; - if (!Align) - return 0; - - return 1U << ((Align.Raw() >> 16) - 1); -} - -/// This turns an int stack alignment (which must be a power of 2) into -/// the form used internally in Attributes. -inline Attributes constructStackAlignmentFromInt(unsigned i) { - // Default alignment, allow the target to define how to align it. - if (i == 0) - return None; - - assert(isPowerOf2_32(i) && "Alignment must be a power of two."); - assert(i <= 0x100 && "Alignment too large."); - return Attributes((Log2_32(i)+1) << 26); -} - -/// This returns the stack alignment field of an attribute as a byte alignment -/// value. -inline unsigned getStackAlignmentFromAttrs(Attributes A) { - Attributes StackAlign = A & Attribute::StackAlignment; - if (!StackAlign) - return 0; - - return 1U << ((StackAlign.Raw() >> 26) - 1); -} - -/// This returns an integer containing an encoding of all the -/// LLVM attributes found in the given attribute bitset. Any -/// change to this encoding is a breaking change to bitcode -/// compatibility. -inline uint64_t encodeLLVMAttributesForBitcode(Attributes Attrs) { - // FIXME: It doesn't make sense to store the alignment information as an - // expanded out value, we should store it as a log2 value. However, we can't - // just change that here without breaking bitcode compatibility. If this ever - // becomes a problem in practice, we should introduce new tag numbers in the - // bitcode file and have those tags use a more efficiently encoded alignment - // field. - - // Store the alignment in the bitcode as a 16-bit raw value instead of a - // 5-bit log2 encoded value. Shift the bits above the alignment up by - // 11 bits. - - uint64_t EncodedAttrs = Attrs.Raw() & 0xffff; - if (Attrs & Attribute::Alignment) - EncodedAttrs |= (1ull << 16) << - (((Attrs & Attribute::Alignment).Raw()-1) >> 16); - EncodedAttrs |= (Attrs.Raw() & (0xfffull << 21)) << 11; - - return EncodedAttrs; -} - -/// This returns an attribute bitset containing the LLVM attributes -/// that have been decoded from the given integer. This function -/// must stay in sync with 'encodeLLVMAttributesForBitcode'. -inline Attributes decodeLLVMAttributesForBitcode(uint64_t EncodedAttrs) { - // The alignment is stored as a 16-bit raw value from bits 31--16. - // We shift the bits above 31 down by 11 bits. - - unsigned Alignment = (EncodedAttrs & (0xffffull << 16)) >> 16; - assert((!Alignment || isPowerOf2_32(Alignment)) && - "Alignment must be a power of two."); - - Attributes Attrs(EncodedAttrs & 0xffff); - if (Alignment) - Attrs |= Attribute::constructAlignmentFromInt(Alignment); - Attrs |= Attributes((EncodedAttrs & (0xfffull << 32)) >> 11); - - return Attrs; -} - - -/// The set of Attributes set in Attributes is converted to a -/// string of equivalent mnemonics. This is, presumably, for writing out -/// the mnemonics for the assembly writer. -/// @brief Convert attribute bits to text -std::string getAsString(Attributes Attrs); -} // end namespace Attribute - -/// This is just a pair of values to associate a set of attributes -/// with an index. -struct AttributeWithIndex { - Attributes Attrs; ///< The attributes that are set, or'd together. - unsigned Index; ///< Index of the parameter for which the attributes apply. - ///< Index 0 is used for return value attributes. - ///< Index ~0U is used for function attributes. +//===----------------------------------------------------------------------===// +// AttributeWithIndex +//===----------------------------------------------------------------------===// +/// AttributeWithIndex - This is just a pair of values to associate a set of +/// attributes with an index. +struct AttributeWithIndex { + Attributes Attrs; ///< The attributes that are set, or'd together. + unsigned Index; ///< Index of the parameter for which the attributes apply. + ///< Index 0 is used for return value attributes. + ///< Index ~0U is used for function attributes. + + static AttributeWithIndex get(LLVMContext &C, unsigned Idx, + ArrayRef Attrs) { + return get(Idx, Attributes::get(C, Attrs)); + } static AttributeWithIndex get(unsigned Idx, Attributes Attrs) { AttributeWithIndex P; P.Index = Idx; @@ -300,31 +312,42 @@ class AttributeListImpl; /// AttrListPtr - This class manages the ref count for the opaque /// AttributeListImpl object and provides accessors for it. class AttrListPtr { - /// AttrList - The attributes that we are managing. This can be null - /// to represent the empty attributes list. +public: + enum AttrIndex { + ReturnIndex = 0U, + FunctionIndex = ~0U + }; +private: + /// @brief The attributes that we are managing. This can be null to represent + /// the empty attributes list. AttributeListImpl *AttrList; + + /// @brief The attributes for the specified index are returned. Attributes + /// for the result are denoted with Idx = 0. + Attributes getAttributes(unsigned Idx) const; + + explicit AttrListPtr(AttributeListImpl *LI) : AttrList(LI) {} public: AttrListPtr() : AttrList(0) {} - AttrListPtr(const AttrListPtr &P); + AttrListPtr(const AttrListPtr &P) : AttrList(P.AttrList) {} const AttrListPtr &operator=(const AttrListPtr &RHS); - ~AttrListPtr(); //===--------------------------------------------------------------------===// // Attribute List Construction and Mutation //===--------------------------------------------------------------------===// /// get - Return a Attributes list with the specified parameters in it. - static AttrListPtr get(ArrayRef Attrs); + static AttrListPtr get(LLVMContext &C, ArrayRef Attrs); /// addAttr - Add the specified attribute at the specified index to this /// attribute list. Since attribute lists are immutable, this /// returns the new list. - AttrListPtr addAttr(unsigned Idx, Attributes Attrs) const; + AttrListPtr addAttr(LLVMContext &C, unsigned Idx, Attributes Attrs) const; /// removeAttr - Remove the specified attribute at the specified index from /// this attribute list. Since attribute lists are immutable, this /// returns the new list. - AttrListPtr removeAttr(unsigned Idx, Attributes Attrs) const; + AttrListPtr removeAttr(LLVMContext &C, unsigned Idx, Attributes Attrs) const; //===--------------------------------------------------------------------===// // Attribute List Accessors @@ -332,36 +355,38 @@ public: /// getParamAttributes - The attributes for the specified index are /// returned. Attributes getParamAttributes(unsigned Idx) const { - assert (Idx && Idx != ~0U && "Invalid parameter index!"); return getAttributes(Idx); } /// getRetAttributes - The attributes for the ret value are /// returned. Attributes getRetAttributes() const { - return getAttributes(0); + return getAttributes(ReturnIndex); } /// getFnAttributes - The function attributes are returned. Attributes getFnAttributes() const { - return getAttributes(~0U); + return getAttributes(FunctionIndex); } /// paramHasAttr - Return true if the specified parameter index has the /// specified attribute set. bool paramHasAttr(unsigned Idx, Attributes Attr) const { - return getAttributes(Idx) & Attr; + return getAttributes(Idx).hasAttributes(Attr); } /// getParamAlignment - Return the alignment for the specified function /// parameter. unsigned getParamAlignment(unsigned Idx) const { - return Attribute::getAlignmentFromAttrs(getAttributes(Idx)); + return getAttributes(Idx).getAlignment(); } /// hasAttrSomewhere - Return true if the specified attribute is set for at /// least one parameter or for the return value. - bool hasAttrSomewhere(Attributes Attr) const; + bool hasAttrSomewhere(Attributes::AttrVal Attr) const; + + unsigned getNumAttrs() const; + Attributes &getAttributesAtIndex(unsigned i) const; /// operator==/!= - Provide equality predicates. bool operator==(const AttrListPtr &RHS) const @@ -369,8 +394,6 @@ public: bool operator!=(const AttrListPtr &RHS) const { return AttrList != RHS.AttrList; } - void dump() const; - //===--------------------------------------------------------------------===// // Attribute List Introspection //===--------------------------------------------------------------------===// @@ -400,13 +423,7 @@ public: /// holds a index number plus a set of attributes. const AttributeWithIndex &getSlot(unsigned Slot) const; -private: - explicit AttrListPtr(AttributeListImpl *L); - - /// getAttributes - The attributes for the specified index are - /// returned. Attributes for the result are denoted with Idx = 0. - Attributes getAttributes(unsigned Idx) const; - + void dump() const; }; } // End llvm namespace diff --git a/include/llvm/BasicBlock.h b/include/llvm/BasicBlock.h index d2aa167..02c2a96 100644 --- a/include/llvm/BasicBlock.h +++ b/include/llvm/BasicBlock.h @@ -79,8 +79,8 @@ private: void setParent(Function *parent); friend class SymbolTableListTraits; - BasicBlock(const BasicBlock &); // Do not implement - void operator=(const BasicBlock &); // Do not implement + BasicBlock(const BasicBlock &) LLVM_DELETED_FUNCTION; + void operator=(const BasicBlock &) LLVM_DELETED_FUNCTION; /// BasicBlock ctor - If the function parameter is specified, the basic block /// is automatically inserted at either the end of the function (if @@ -213,7 +213,6 @@ public: ValueSymbolTable *getValueSymbolTable(); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const BasicBlock *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == Value::BasicBlockVal; } diff --git a/include/llvm/Bitcode/Archive.h b/include/llvm/Bitcode/Archive.h index 3c75e58..4fd4b5d 100644 --- a/include/llvm/Bitcode/Archive.h +++ b/include/llvm/Bitcode/Archive.h @@ -415,8 +415,8 @@ class Archive { /// name will be truncated at 15 characters. If \p Compress is specified, /// all archive members will be compressed before being written. If /// \p PrintSymTab is true, the symbol table will be printed to std::cout. - /// @returns true if an error occurred, \p error set to error message - /// @returns false if the writing succeeded. + /// @returns true if an error occurred, \p error set to error message; + /// returns false if the writing succeeded. /// @brief Write (possibly modified) archive contents to disk bool writeToDisk( bool CreateSymbolTable=false, ///< Create Symbol table @@ -480,8 +480,8 @@ class Archive { /// Writes one ArchiveMember to an ofstream. If an error occurs, returns /// false, otherwise true. If an error occurs and error is non-null then /// it will be set to an error message. - /// @returns false Writing member succeeded - /// @returns true Writing member failed, \p error set to error message + /// @returns false if writing member succeeded, + /// returns true if writing member failed, \p error set to error message. bool writeMember( const ArchiveMember& member, ///< The member to be written std::ofstream& ARFile, ///< The file to write member onto @@ -527,9 +527,9 @@ class Archive { /// @name Hidden /// @{ private: - Archive(); ///< Do not implement - Archive(const Archive&); ///< Do not implement - Archive& operator=(const Archive&); ///< Do not implement + Archive() LLVM_DELETED_FUNCTION; + Archive(const Archive&) LLVM_DELETED_FUNCTION; + Archive& operator=(const Archive&) LLVM_DELETED_FUNCTION; /// @} }; diff --git a/include/llvm/Bitcode/BitstreamReader.h b/include/llvm/Bitcode/BitstreamReader.h index 6586829..840f57e 100644 --- a/include/llvm/Bitcode/BitstreamReader.h +++ b/include/llvm/Bitcode/BitstreamReader.h @@ -47,9 +47,9 @@ private: /// block/record name information in the BlockInfo block. Only llvm-bcanalyzer /// uses this. bool IgnoreBlockInfoNames; - - BitstreamReader(const BitstreamReader&); // DO NOT IMPLEMENT - void operator=(const BitstreamReader&); // DO NOT IMPLEMENT + + BitstreamReader(const BitstreamReader&) LLVM_DELETED_FUNCTION; + void operator=(const BitstreamReader&) LLVM_DELETED_FUNCTION; public: BitstreamReader() : IgnoreBlockInfoNames(true) { } @@ -409,7 +409,7 @@ public: } /// EnterSubBlock - Having read the ENTER_SUBBLOCK abbrevid, enter - /// the block, and return true if the block is valid. + /// the block, and return true if the block has an error. bool EnterSubBlock(unsigned BlockID, unsigned *NumWordsP = 0) { // Save the current block's state on BlockScope. BlockScope.push_back(Block(CurCodeSize)); diff --git a/include/llvm/Bitcode/BitstreamWriter.h b/include/llvm/Bitcode/BitstreamWriter.h index 475da13..dea118f 100644 --- a/include/llvm/Bitcode/BitstreamWriter.h +++ b/include/llvm/Bitcode/BitstreamWriter.h @@ -155,6 +155,7 @@ public: } void EmitVBR(uint32_t Val, unsigned NumBits) { + assert(NumBits <= 32 && "Too many bits to emit!"); uint32_t Threshold = 1U << (NumBits-1); // Emit the bits with VBR encoding, NumBits-1 bits at a time. @@ -167,10 +168,11 @@ public: } void EmitVBR64(uint64_t Val, unsigned NumBits) { + assert(NumBits <= 32 && "Too many bits to emit!"); if ((uint32_t)Val == Val) return EmitVBR((uint32_t)Val, NumBits); - uint64_t Threshold = 1U << (NumBits-1); + uint32_t Threshold = 1U << (NumBits-1); // Emit the bits with VBR encoding, NumBits-1 bits at a time. while (Val >= Threshold) { diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h index a8c34cb..c1dc190 100644 --- a/include/llvm/Bitcode/LLVMBitCodes.h +++ b/include/llvm/Bitcode/LLVMBitCodes.h @@ -161,11 +161,14 @@ namespace bitc { CST_CODE_CE_INSERTELT = 15, // CE_INSERTELT: [opval, opval, opval] CST_CODE_CE_SHUFFLEVEC = 16, // CE_SHUFFLEVEC: [opval, opval, opval] CST_CODE_CE_CMP = 17, // CE_CMP: [opty, opval, opval, pred] - CST_CODE_INLINEASM = 18, // INLINEASM: [sideeffect,asmstr,conststr] + CST_CODE_INLINEASM_OLD = 18, // INLINEASM: [sideeffect|alignstack, + // asmstr,conststr] CST_CODE_CE_SHUFVEC_EX = 19, // SHUFVEC_EX: [opty, opval, opval, opval] CST_CODE_CE_INBOUNDS_GEP = 20,// INBOUNDS_GEP: [n x operands] CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#] - CST_CODE_DATA = 22 // DATA: [n x elements] + CST_CODE_DATA = 22, // DATA: [n x elements] + CST_CODE_INLINEASM = 23 // INLINEASM: [sideeffect|alignstack| + // asmdialect,asmstr,conststr] }; /// CastOpcodes - These are values used in the bitcode files to encode which diff --git a/include/llvm/CallingConv.h b/include/llvm/CallingConv.h index 4c5ee62..053f4eb 100644 --- a/include/llvm/CallingConv.h +++ b/include/llvm/CallingConv.h @@ -94,7 +94,29 @@ namespace CallingConv { /// MBLAZE_INTR - Calling convention used for MBlaze interrupt support /// routines (i.e. GCC's save_volatiles attribute). - MBLAZE_SVOL = 74 + MBLAZE_SVOL = 74, + + /// SPIR_FUNC - Calling convention for SPIR non-kernel device functions. + /// No lowering or expansion of arguments. + /// Structures are passed as a pointer to a struct with the byval attribute. + /// Functions can only call SPIR_FUNC and SPIR_KERNEL functions. + /// Functions can only have zero or one return values. + /// Variable arguments are not allowed, except for printf. + /// How arguments/return values are lowered are not specified. + /// Functions are only visible to the devices. + SPIR_FUNC = 75, + + /// SPIR_KERNEL - Calling convention for SPIR kernel functions. + /// Inherits the restrictions of SPIR_FUNC, except + /// Cannot have non-void return values. + /// Cannot have variable arguments. + /// Can also be called by the host. + /// Is externally visible. + SPIR_KERNEL = 76, + + /// Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins + Intel_OCL_BI = 77 + }; } // End CallingConv namespace diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h index 170a528..a92b859 100644 --- a/include/llvm/CodeGen/AsmPrinter.h +++ b/include/llvm/CodeGen/AsmPrinter.h @@ -17,6 +17,7 @@ #define LLVM_CODEGEN_ASMPRINTER_H #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/InlineAsm.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" @@ -47,7 +48,7 @@ namespace llvm { class DwarfException; class Mangler; class TargetLoweringObjectFile; - class TargetData; + class DataLayout; class TargetMachine; /// AsmPrinter - This class is intended to be used as a driving class for all @@ -130,8 +131,8 @@ namespace llvm { /// getObjFileLowering - Return information about object file lowering. const TargetLoweringObjectFile &getObjFileLowering() const; - /// getTargetData - Return information about data layout. - const TargetData &getTargetData() const; + /// getDataLayout - Return information about data layout. + const DataLayout &getDataLayout() const; /// getCurrentSection() - Return the current section we are emitting to. const MCSection *getCurrentSection() const; @@ -460,7 +461,8 @@ namespace llvm { mutable unsigned SetCounter; /// EmitInlineAsm - Emit a blob of inline asm to the output streamer. - void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0) const; + void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0, + InlineAsm::AsmDialect AsmDialect = InlineAsm::AD_ATT) const; /// EmitInlineAsm - This method formats and emits the specified machine /// instruction that is an inline asm. diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h index 3afe309..436918b1 100644 --- a/include/llvm/CodeGen/CallingConvLower.h +++ b/include/llvm/CodeGen/CallingConvLower.h @@ -17,6 +17,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/Target/TargetCallingConv.h" #include "llvm/CallingConv.h" @@ -288,6 +289,7 @@ public: StackOffset = ((StackOffset + Align-1) & ~(Align-1)); unsigned Result = StackOffset; StackOffset += Size; + MF.getFrameInfo()->ensureMaxAlignment(Align); return Result; } diff --git a/include/llvm/CodeGen/CommandFlags.h b/include/llvm/CodeGen/CommandFlags.h new file mode 100644 index 0000000..90ee234 --- /dev/null +++ b/include/llvm/CodeGen/CommandFlags.h @@ -0,0 +1,228 @@ +//===-- CommandFlags.h - Register Coalescing Interface ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains codegen-specific flags that are shared between different +// command line tools. The tools "llc" and "opt" both use this file to prevent +// flag duplication. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_COMMAND_LINE_FLAGS_H +#define LLVM_CODEGEN_COMMAND_LINE_FLAGS_H + +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/CodeGen.h" +#include "llvm/Target/TargetMachine.h" + +#include +using namespace llvm; + +cl::opt +MArch("march", cl::desc("Architecture to generate code for (see --version)")); + +cl::opt +MCPU("mcpu", + cl::desc("Target a specific cpu type (-mcpu=help for details)"), + cl::value_desc("cpu-name"), + cl::init("")); + +cl::list +MAttrs("mattr", + cl::CommaSeparated, + cl::desc("Target specific attributes (-mattr=help for details)"), + cl::value_desc("a1,+a2,-a3,...")); + +cl::opt +RelocModel("relocation-model", + cl::desc("Choose relocation model"), + cl::init(Reloc::Default), + cl::values( + clEnumValN(Reloc::Default, "default", + "Target default relocation model"), + clEnumValN(Reloc::Static, "static", + "Non-relocatable code"), + clEnumValN(Reloc::PIC_, "pic", + "Fully relocatable, position independent code"), + clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic", + "Relocatable external references, non-relocatable code"), + clEnumValEnd)); + +cl::opt +CMModel("code-model", + cl::desc("Choose code model"), + cl::init(CodeModel::Default), + cl::values(clEnumValN(CodeModel::Default, "default", + "Target default code model"), + clEnumValN(CodeModel::Small, "small", + "Small code model"), + clEnumValN(CodeModel::Kernel, "kernel", + "Kernel code model"), + clEnumValN(CodeModel::Medium, "medium", + "Medium code model"), + clEnumValN(CodeModel::Large, "large", + "Large code model"), + clEnumValEnd)); + +cl::opt +RelaxAll("mc-relax-all", + cl::desc("When used with filetype=obj, " + "relax all fixups in the emitted object file")); + +cl::opt +FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile), + cl::desc("Choose a file type (not all types are supported by all targets):"), + cl::values( + clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm", + "Emit an assembly ('.s') file"), + clEnumValN(TargetMachine::CGFT_ObjectFile, "obj", + "Emit a native object ('.o') file"), + clEnumValN(TargetMachine::CGFT_Null, "null", + "Emit nothing, for performance testing"), + clEnumValEnd)); + +cl::opt DisableDotLoc("disable-dot-loc", cl::Hidden, + cl::desc("Do not use .loc entries")); + +cl::opt DisableCFI("disable-cfi", cl::Hidden, + cl::desc("Do not use .cfi_* directives")); + +cl::opt EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden, + cl::desc("Use .file directives with an explicit directory.")); + +cl::opt +DisableRedZone("disable-red-zone", + cl::desc("Do not emit code that uses the red zone."), + cl::init(false)); + +cl::opt +EnableFPMAD("enable-fp-mad", + cl::desc("Enable less precise MAD instructions to be generated"), + cl::init(false)); + +cl::opt +DisableFPElim("disable-fp-elim", + cl::desc("Disable frame pointer elimination optimization"), + cl::init(false)); + +cl::opt +DisableFPElimNonLeaf("disable-non-leaf-fp-elim", + cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"), + cl::init(false)); + +cl::opt +EnableUnsafeFPMath("enable-unsafe-fp-math", + cl::desc("Enable optimizations that may decrease FP precision"), + cl::init(false)); + +cl::opt +EnableNoInfsFPMath("enable-no-infs-fp-math", + cl::desc("Enable FP math optimizations that assume no +-Infs"), + cl::init(false)); + +cl::opt +EnableNoNaNsFPMath("enable-no-nans-fp-math", + cl::desc("Enable FP math optimizations that assume no NaNs"), + cl::init(false)); + +cl::opt +EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math", + cl::Hidden, + cl::desc("Force codegen to assume rounding mode can change dynamically"), + cl::init(false)); + +cl::opt +GenerateSoftFloatCalls("soft-float", + cl::desc("Generate software floating point library calls"), + cl::init(false)); + +cl::opt +FloatABIForCalls("float-abi", + cl::desc("Choose float ABI type"), + cl::init(FloatABI::Default), + cl::values( + clEnumValN(FloatABI::Default, "default", + "Target default float ABI type"), + clEnumValN(FloatABI::Soft, "soft", + "Soft float ABI (implied by -soft-float)"), + clEnumValN(FloatABI::Hard, "hard", + "Hard float ABI (uses FP registers)"), + clEnumValEnd)); + +cl::opt +FuseFPOps("fp-contract", + cl::desc("Enable aggresive formation of fused FP ops"), + cl::init(FPOpFusion::Standard), + cl::values( + clEnumValN(FPOpFusion::Fast, "fast", + "Fuse FP ops whenever profitable"), + clEnumValN(FPOpFusion::Standard, "on", + "Only fuse 'blessed' FP ops."), + clEnumValN(FPOpFusion::Strict, "off", + "Only fuse FP ops when the result won't be effected."), + clEnumValEnd)); + +cl::opt +DontPlaceZerosInBSS("nozero-initialized-in-bss", + cl::desc("Don't place zero-initialized symbols into bss section"), + cl::init(false)); + +cl::opt +EnableGuaranteedTailCallOpt("tailcallopt", + cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."), + cl::init(false)); + +cl::opt +DisableTailCalls("disable-tail-calls", + cl::desc("Never emit tail calls"), + cl::init(false)); + +cl::opt +OverrideStackAlignment("stack-alignment", + cl::desc("Override default stack alignment"), + cl::init(0)); + +cl::opt +EnableRealignStack("realign-stack", + cl::desc("Realign stack if needed"), + cl::init(true)); + +cl::opt +TrapFuncName("trap-func", cl::Hidden, + cl::desc("Emit a call to trap function rather than a trap instruction"), + cl::init("")); + +cl::opt +EnablePIE("enable-pie", + cl::desc("Assume the creation of a position independent executable."), + cl::init(false)); + +cl::opt +SegmentedStacks("segmented-stacks", + cl::desc("Use segmented stacks if possible."), + cl::init(false)); + +cl::opt +UseInitArray("use-init-array", + cl::desc("Use .init_array instead of .ctors."), + cl::init(false)); + +cl::opt StopAfter("stop-after", + cl::desc("Stop compilation after a specific pass"), + cl::value_desc("pass-name"), + cl::init("")); +cl::opt StartAfter("start-after", + cl::desc("Resume compilation after a specific pass"), + cl::value_desc("pass-name"), + cl::init("")); + +cl::opt +SSPBufferSize("stack-protector-buffer-size", cl::init(8), + cl::desc("Lower bound for a buffer to be considered for " + "stack protection")); +#endif diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h index 7cb9695..7c24e36 100644 --- a/include/llvm/CodeGen/FastISel.h +++ b/include/llvm/CodeGen/FastISel.h @@ -32,7 +32,7 @@ class MachineFunction; class MachineInstr; class MachineFrameInfo; class MachineRegisterInfo; -class TargetData; +class DataLayout; class TargetInstrInfo; class TargetLibraryInfo; class TargetLowering; @@ -54,7 +54,7 @@ protected: MachineConstantPool &MCP; DebugLoc DL; const TargetMachine &TM; - const TargetData &TD; + const DataLayout &TD; const TargetInstrInfo &TII; const TargetLowering &TLI; const TargetRegisterInfo &TRI; diff --git a/include/llvm/CodeGen/GCMetadata.h b/include/llvm/CodeGen/GCMetadata.h index 20e33f7..076f6f3 100644 --- a/include/llvm/CodeGen/GCMetadata.h +++ b/include/llvm/CodeGen/GCMetadata.h @@ -122,6 +122,11 @@ namespace llvm { Roots.push_back(GCRoot(Num, Metadata)); } + /// removeStackRoot - Removes a root. + roots_iterator removeStackRoot(roots_iterator position) { + return Roots.erase(position); + } + /// addSafePoint - Notes the existence of a safe point. Num is the ID of the /// label just prior to the safe point (if the code generator is using /// MachineModuleInfo). diff --git a/include/llvm/CodeGen/GCMetadataPrinter.h b/include/llvm/CodeGen/GCMetadataPrinter.h index 17a2653..4a6b5ac 100644 --- a/include/llvm/CodeGen/GCMetadataPrinter.h +++ b/include/llvm/CodeGen/GCMetadataPrinter.h @@ -48,9 +48,10 @@ namespace llvm { // May only be subclassed. GCMetadataPrinter(); - // Do not implement. - GCMetadataPrinter(const GCMetadataPrinter &); - GCMetadataPrinter &operator=(const GCMetadataPrinter &); + private: + GCMetadataPrinter(const GCMetadataPrinter &) LLVM_DELETED_FUNCTION; + GCMetadataPrinter & + operator=(const GCMetadataPrinter &) LLVM_DELETED_FUNCTION; public: GCStrategy &getStrategy() { return *S; } diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h index f387bd5..5d0a3b4 100644 --- a/include/llvm/CodeGen/ISDOpcodes.h +++ b/include/llvm/CodeGen/ISDOpcodes.h @@ -637,6 +637,10 @@ namespace ISD { ATOMIC_LOAD_UMIN, ATOMIC_LOAD_UMAX, + /// This corresponds to the llvm.lifetime.* intrinsics. The first operand + /// is the chain and the second operand is the alloca pointer. + LIFETIME_START, LIFETIME_END, + /// BUILTIN_OP_END - This must be the last enum value in this list. /// The target-specific pre-isel opcode values start here. BUILTIN_OP_END diff --git a/include/llvm/CodeGen/IntrinsicLowering.h b/include/llvm/CodeGen/IntrinsicLowering.h index 767b666..5a3fb4b 100644 --- a/include/llvm/CodeGen/IntrinsicLowering.h +++ b/include/llvm/CodeGen/IntrinsicLowering.h @@ -21,15 +21,15 @@ namespace llvm { class CallInst; class Module; - class TargetData; + class DataLayout; class IntrinsicLowering { - const TargetData& TD; + const DataLayout& TD; bool Warned; public: - explicit IntrinsicLowering(const TargetData &td) : + explicit IntrinsicLowering(const DataLayout &td) : TD(td), Warned(false) {} /// AddPrototypes - This method, if called, causes all of the prototypes diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h index a3ce47c..185e414 100644 --- a/include/llvm/CodeGen/LiveInterval.h +++ b/include/llvm/CodeGen/LiveInterval.h @@ -29,6 +29,7 @@ #include namespace llvm { + class CoalescerPair; class LiveIntervals; class MachineInstr; class MachineRegisterInfo; @@ -113,9 +114,6 @@ namespace llvm { void dump() const; void print(raw_ostream &os) const; - - private: - LiveRange(); // DO NOT IMPLEMENT }; template <> struct isPodLike { static const bool value = true; }; @@ -275,11 +273,6 @@ namespace llvm { void MergeValueInAsValue(const LiveInterval &RHS, const VNInfo *RHSValNo, VNInfo *LHSValNo); - /// Copy - Copy the specified live interval. This copies all the fields - /// except for the register of the interval. - void Copy(const LiveInterval &RHS, MachineRegisterInfo *MRI, - VNInfo::Allocator &VNInfoAllocator); - bool empty() const { return ranges.empty(); } /// beginIndex - Return the lowest numbered slot covered by interval. @@ -312,12 +305,6 @@ namespace llvm { return r != end() && r->end == index; } - /// killedInRange - Return true if the interval has kills in [Start,End). - /// Note that the kill point is considered the end of a live range, so it is - /// not contained in the live range. If a live range ends at End, it won't - /// be counted as a kill by this method. - bool killedInRange(SlotIndex Start, SlotIndex End) const; - /// getLiveRangeContaining - Return the live range that contains the /// specified index, or null if there is none. const LiveRange *getLiveRangeContaining(SlotIndex Idx) const { @@ -366,6 +353,14 @@ namespace llvm { return overlapsFrom(other, other.begin()); } + /// overlaps - Return true if the two intervals have overlapping segments + /// that are not coalescable according to CP. + /// + /// Overlapping segments where one interval is defined by a coalescable + /// copy are allowed. + bool overlaps(const LiveInterval &Other, const CoalescerPair &CP, + const SlotIndexes&) const; + /// overlaps - Return true if the live interval overlaps a range specified /// by [Start, End). bool overlaps(SlotIndex Start, SlotIndex End) const; @@ -469,7 +464,7 @@ namespace llvm { VNInfo *LHSValNo = 0, const VNInfo *RHSValNo = 0); - LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT + LiveInterval& operator=(const LiveInterval& rhs) LLVM_DELETED_FUNCTION; }; @@ -501,7 +496,9 @@ namespace llvm { if (I == E) return; // Is this an instruction live-in segment? - if (SlotIndex::isEarlierInstr(I->start, Idx)) { + // If Idx is the start index of a basic block, include live-in segments + // that start at Idx.getBaseIndex(). + if (I->start <= Idx.getBaseIndex()) { EarlyVal = I->valno; EndPoint = I->end; // Move to the potentially live-out segment. @@ -510,6 +507,12 @@ namespace llvm { if (++I == E) return; } + // Special case: A PHIDef value can have its def in the middle of a + // segment if the value happens to be live out of the layout + // predecessor. + // Such a value is not live-in. + if (EarlyVal->def == Idx.getBaseIndex()) + EarlyVal = 0; } // I now points to the segment that may be live-through, or defined by // this instr. Ignore segments starting after the current instr. diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h index da521db..b421753 100644 --- a/include/llvm/CodeGen/LiveIntervalAnalysis.h +++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h @@ -65,12 +65,6 @@ namespace llvm { /// Live interval pointers for all the virtual registers. IndexedMap VirtRegIntervals; - /// AllocatableRegs - A bit vector of allocatable registers. - BitVector AllocatableRegs; - - /// ReservedRegs - A bit vector of reserved registers. - BitVector ReservedRegs; - /// RegMaskSlots - Sorted list of instructions with register mask operands. /// Always use the 'r' slot, RegMasks are normal clobbers, not early /// clobbers. @@ -123,18 +117,6 @@ namespace llvm { return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg]; } - /// isAllocatable - is the physical register reg allocatable in the current - /// function? - bool isAllocatable(unsigned reg) const { - return AllocatableRegs.test(reg); - } - - /// isReserved - is the physical register reg reserved in the current - /// function - bool isReserved(unsigned reg) const { - return ReservedRegs.test(reg); - } - // Interval creation. LiveInterval &getOrCreateInterval(unsigned Reg) { if (!hasInterval(Reg)) { @@ -165,6 +147,26 @@ namespace llvm { bool shrinkToUses(LiveInterval *li, SmallVectorImpl *dead = 0); + /// extendToIndices - Extend the live range of LI to reach all points in + /// Indices. The points in the Indices array must be jointly dominated by + /// existing defs in LI. PHI-defs are added as needed to maintain SSA form. + /// + /// If a SlotIndex in Indices is the end index of a basic block, LI will be + /// extended to be live out of the basic block. + /// + /// See also LiveRangeCalc::extend(). + void extendToIndices(LiveInterval *LI, ArrayRef Indices); + + /// pruneValue - If an LI value is live at Kill, prune its live range by + /// removing any liveness reachable from Kill. Add live range end points to + /// EndPoints such that extendToIndices(LI, EndPoints) will reconstruct the + /// value's live range. + /// + /// Calling pruneValue() and extendToIndices() can be used to reconstruct + /// SSA form after adding defs to a virtual register. + void pruneValue(LiveInterval *LI, SlotIndex Kill, + SmallVectorImpl *EndPoints); + SlotIndexes *getSlotIndexes() const { return Indexes; } @@ -252,21 +254,26 @@ namespace llvm { /// addKillFlags - Add kill flags to any instruction that kills a virtual /// register. - void addKillFlags(); + void addKillFlags(const VirtRegMap*); /// handleMove - call this method to notify LiveIntervals that /// instruction 'mi' has been moved within a basic block. This will update /// the live intervals for all operands of mi. Moves between basic blocks /// are not supported. - void handleMove(MachineInstr* MI); + /// + /// \param UpdateFlags Update live intervals for nonallocatable physregs. + void handleMove(MachineInstr* MI, bool UpdateFlags = false); /// moveIntoBundle - Update intervals for operands of MI so that they /// begin/end on the SlotIndex for BundleStart. /// + /// \param UpdateFlags Update live intervals for nonallocatable physregs. + /// /// Requires MI and BundleStart to have SlotIndexes, and assumes /// existing liveness is accurate. BundleStart should be the first /// instruction in the Bundle. - void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart); + void handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart, + bool UpdateFlags = false); // Register mask functions. // diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h index d4bb409..3bb134b 100644 --- a/include/llvm/CodeGen/LiveVariables.h +++ b/include/llvm/CodeGen/LiveVariables.h @@ -126,12 +126,6 @@ private: /// building live intervals. SparseBitVector<> PHIJoins; - /// ReservedRegisters - This vector keeps track of which registers - /// are reserved register which are not allocatable by the target machine. - /// We can not track liveness for values that are in this set. - /// - BitVector ReservedRegisters; - private: // Intermediate data structures MachineFunction *MF; diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h index c917bd8..97c3945 100644 --- a/include/llvm/CodeGen/MachineBasicBlock.h +++ b/include/llvm/CodeGen/MachineBasicBlock.h @@ -351,6 +351,8 @@ public: /// parameter is stored in Weights list and it may be used by /// MachineBranchProbabilityInfo analysis to calculate branch probability. /// + /// Note that duplicate Machine CFG edges are not allowed. + /// void addSuccessor(MachineBasicBlock *succ, uint32_t weight = 0); /// removeSuccessor - Remove successor from the successors list of this @@ -545,6 +547,28 @@ public: return findDebugLoc(MBBI.getInstrIterator()); } + /// Possible outcome of a register liveness query to computeRegisterLiveness() + enum LivenessQueryResult { + LQR_Live, ///< Register is known to be live. + LQR_OverlappingLive, ///< Register itself is not live, but some overlapping + ///< register is. + LQR_Dead, ///< Register is known to be dead. + LQR_Unknown ///< Register liveness not decidable from local + ///< neighborhood. + }; + + /// computeRegisterLiveness - Return whether (physical) register \c Reg + /// has been ined and not ed as of just before \c MI. + /// + /// Search is localised to a neighborhood of + /// \c Neighborhood instructions before (searching for defs or kills) and + /// Neighborhood instructions after (searching just for defs) MI. + /// + /// \c Reg must be a physical register. + LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, + unsigned Reg, MachineInstr *MI, + unsigned Neighborhood=10); + // Debugging methods. void dump() const; void print(raw_ostream &OS, SlotIndexes* = 0) const; @@ -572,7 +596,7 @@ private: /// getSuccWeight - Return weight of the edge from this block to MBB. This /// method should NOT be called directly, but by using getEdgeWeight method /// from MachineBranchProbabilityInfo class. - uint32_t getSuccWeight(const MachineBasicBlock *succ) const; + uint32_t getSuccWeight(const_succ_iterator Succ) const; // Methods used to maintain doubly linked list of blocks... diff --git a/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/include/llvm/CodeGen/MachineBranchProbabilityInfo.h index af4db7d..12189ce 100644 --- a/include/llvm/CodeGen/MachineBranchProbabilityInfo.h +++ b/include/llvm/CodeGen/MachineBranchProbabilityInfo.h @@ -16,14 +16,12 @@ #define LLVM_CODEGEN_MACHINEBRANCHPROBABILITYINFO_H #include "llvm/Pass.h" +#include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/Support/BranchProbability.h" #include namespace llvm { -class raw_ostream; -class MachineBasicBlock; - class MachineBranchProbabilityInfo : public ImmutablePass { virtual void anchor(); @@ -52,6 +50,11 @@ public: uint32_t getEdgeWeight(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const; + // Same thing, but using a const_succ_iterator from Src. This is faster when + // the iterator is already available. + uint32_t getEdgeWeight(const MachineBasicBlock *Src, + MachineBasicBlock::const_succ_iterator Dst) const; + // Get sum of the block successors' weights, potentially scaling them to fit // within 32-bits. If scaling is required, sets Scale based on the necessary // adjustment. Any edge weights used with the sum should be divided by Scale. diff --git a/include/llvm/CodeGen/MachineConstantPool.h b/include/llvm/CodeGen/MachineConstantPool.h index d6d65a2..8ed215d 100644 --- a/include/llvm/CodeGen/MachineConstantPool.h +++ b/include/llvm/CodeGen/MachineConstantPool.h @@ -25,7 +25,7 @@ namespace llvm { class Constant; class FoldingSetNodeID; -class TargetData; +class DataLayout; class TargetMachine; class Type; class MachineConstantPool; @@ -132,14 +132,14 @@ public: /// address of the function constant pool values. /// @brief The machine constant pool. class MachineConstantPool { - const TargetData *TD; ///< The machine's TargetData. + const DataLayout *TD; ///< The machine's DataLayout. unsigned PoolAlignment; ///< The alignment for the pool. std::vector Constants; ///< The pool of constants. /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry. DenseSet MachineCPVsSharingEntries; public: /// @brief The only constructor. - explicit MachineConstantPool(const TargetData *td) + explicit MachineConstantPool(const DataLayout *td) : TD(td), PoolAlignment(1) {} ~MachineConstantPool(); diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h index 8b958e4..0e4e132 100644 --- a/include/llvm/CodeGen/MachineFrameInfo.h +++ b/include/llvm/CodeGen/MachineFrameInfo.h @@ -21,13 +21,15 @@ namespace llvm { class raw_ostream; -class TargetData; +class DataLayout; class TargetRegisterClass; class Type; class MachineFunction; class MachineBasicBlock; class TargetFrameLowering; class BitVector; +class Value; +class AllocaInst; /// The CalleeSavedInfo class tracks the information need to locate where a /// callee saved register is in the current frame. @@ -103,14 +105,18 @@ class MachineFrameInfo { // protector. bool MayNeedSP; + /// Alloca - If this stack object is originated from an Alloca instruction + /// this value saves the original IR allocation. Can be NULL. + const AllocaInst *Alloca; + // PreAllocated - If true, the object was mapped into the local frame // block and doesn't need additional handling for allocation beyond that. bool PreAllocated; StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM, - bool isSS, bool NSP) + bool isSS, bool NSP, const AllocaInst *Val) : SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM), - isSpillSlot(isSS), MayNeedSP(NSP), PreAllocated(false) {} + isSpillSlot(isSS), MayNeedSP(NSP), Alloca(Val), PreAllocated(false) {} }; /// Objects - The list of stack objects allocated... @@ -362,6 +368,14 @@ public: ensureMaxAlignment(Align); } + /// getObjectAllocation - Return the underlying Alloca of the specified + /// stack object if it exists. Returns 0 if none exists. + const AllocaInst* getObjectAllocation(int ObjectIdx) const { + assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && + "Invalid Object Idx!"); + return Objects[ObjectIdx+NumFixedObjects].Alloca; + } + /// NeedsStackProtector - Returns true if the object may need stack /// protectors. bool MayNeedStackProtector(int ObjectIdx) const { @@ -482,9 +496,10 @@ public: /// a nonnegative identifier to represent it. /// int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, - bool MayNeedSP = false) { + bool MayNeedSP = false, const AllocaInst *Alloca = 0) { assert(Size != 0 && "Cannot allocate zero size stack objects!"); - Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP)); + Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP, + Alloca)); int Index = (int)Objects.size() - NumFixedObjects - 1; assert(Index >= 0 && "Bad frame index!"); ensureMaxAlignment(Alignment); @@ -516,7 +531,7 @@ public: /// int CreateVariableSizedObject(unsigned Alignment) { HasVarSizedObjects = true; - Objects.push_back(StackObject(0, Alignment, 0, false, false, true)); + Objects.push_back(StackObject(0, Alignment, 0, false, false, true, 0)); ensureMaxAlignment(Alignment); return (int)Objects.size()-NumFixedObjects-1; } diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h index 062c750..025e18a 100644 --- a/include/llvm/CodeGen/MachineFunction.h +++ b/include/llvm/CodeGen/MachineFunction.h @@ -127,8 +127,8 @@ class MachineFunction { /// about the control flow of such functions. bool ExposesReturnsTwice; - MachineFunction(const MachineFunction &); // DO NOT IMPLEMENT - void operator=(const MachineFunction&); // DO NOT IMPLEMENT + MachineFunction(const MachineFunction &) LLVM_DELETED_FUNCTION; + void operator=(const MachineFunction&) LLVM_DELETED_FUNCTION; public: MachineFunction(const Function *Fn, const TargetMachine &TM, unsigned FunctionNum, MachineModuleInfo &MMI, @@ -138,15 +138,19 @@ public: MachineModuleInfo &getMMI() const { return MMI; } GCModuleInfo *getGMI() const { return GMI; } MCContext &getContext() const { return Ctx; } - + /// getFunction - Return the LLVM function that this machine code represents /// const Function *getFunction() const { return Fn; } + /// getName - Return the name of the corresponding LLVM function. + /// + StringRef getName() const; + /// getFunctionNumber - Return a unique ID for the current function. /// unsigned getFunctionNumber() const { return FunctionNumber; } - + /// getTarget - Return the target machine this machine code is compiled with /// const TargetMachine &getTarget() const { return Target; } diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h index 27756ab..7eb03a9 100644 --- a/include/llvm/CodeGen/MachineInstr.h +++ b/include/llvm/CodeGen/MachineInstr.h @@ -25,6 +25,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/DenseMapInfo.h" +#include "llvm/InlineAsm.h" #include "llvm/Support/DebugLoc.h" #include @@ -81,8 +82,8 @@ private: MachineBasicBlock *Parent; // Pointer to the owning basic block. DebugLoc debugLoc; // Source line information. - MachineInstr(const MachineInstr&); // DO NOT IMPLEMENT - void operator=(const MachineInstr&); // DO NOT IMPLEMENT + MachineInstr(const MachineInstr&) LLVM_DELETED_FUNCTION; + void operator=(const MachineInstr&) LLVM_DELETED_FUNCTION; // Intrusive list support friend struct ilist_traits; @@ -97,25 +98,10 @@ private: /// MCID NULL and no operands. MachineInstr(); - // The next two constructors have DebugLoc and non-DebugLoc versions; - // over time, the non-DebugLoc versions should be phased out and eventually - // removed. - - /// MachineInstr ctor - This constructor creates a MachineInstr and adds the - /// implicit operands. It reserves space for the number of operands specified - /// by the MCInstrDesc. The version with a DebugLoc should be preferred. - explicit MachineInstr(const MCInstrDesc &MCID, bool NoImp = false); - - /// MachineInstr ctor - Work exactly the same as the ctor above, except that - /// the MachineInstr is created and added to the end of the specified basic - /// block. The version with a DebugLoc should be preferred. - MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &MCID); - /// MachineInstr ctor - This constructor create a MachineInstr and add the /// implicit operands. It reserves space for number of operands specified by /// MCInstrDesc. An explicit DebugLoc is supplied. - explicit MachineInstr(const MCInstrDesc &MCID, const DebugLoc dl, - bool NoImp = false); + MachineInstr(const MCInstrDesc &MCID, const DebugLoc dl, bool NoImp = false); /// MachineInstr ctor - Work exactly the same as the ctor above, except that /// the MachineInstr is created and added to the end of the specified basic @@ -459,6 +445,11 @@ public: /// Instructions with this flag set are not necessarily simple load /// instructions, they may load a value and modify it, for example. bool mayLoad(QueryType Type = AnyInBundle) const { + if (isInlineAsm()) { + unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + if (ExtraInfo & InlineAsm::Extra_MayLoad) + return true; + } return hasProperty(MCID::MayLoad, Type); } @@ -468,6 +459,11 @@ public: /// instructions, they may store a modified value based on their operands, or /// may not actually modify anything, for example. bool mayStore(QueryType Type = AnyInBundle) const { + if (isInlineAsm()) { + unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + if (ExtraInfo & InlineAsm::Extra_MayStore) + return true; + } return hasProperty(MCID::MayStore, Type); } @@ -610,6 +606,7 @@ public: bool isImplicitDef() const { return getOpcode()==TargetOpcode::IMPLICIT_DEF; } bool isInlineAsm() const { return getOpcode() == TargetOpcode::INLINEASM; } bool isStackAligningInlineAsm() const; + InlineAsm::AsmDialect getInlineAsmDialect() const; bool isInsertSubreg() const { return getOpcode() == TargetOpcode::INSERT_SUBREG; } @@ -782,16 +779,43 @@ public: const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const; + /// tieOperands - Add a tie between the register operands at DefIdx and + /// UseIdx. The tie will cause the register allocator to ensure that the two + /// operands are assigned the same physical register. + /// + /// Tied operands are managed automatically for explicit operands in the + /// MCInstrDesc. This method is for exceptional cases like inline asm. + void tieOperands(unsigned DefIdx, unsigned UseIdx); + + /// findTiedOperandIdx - Given the index of a tied register operand, find the + /// operand it is tied to. Defs are tied to uses and vice versa. Returns the + /// index of the tied operand which must exist. + unsigned findTiedOperandIdx(unsigned OpIdx) const; + /// isRegTiedToUseOperand - Given the index of a register def operand, /// check if the register def is tied to a source operand, due to either /// two-address elimination or inline assembly constraints. Returns the /// first tied use operand index by reference if UseOpIdx is not null. - bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const; + bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const { + const MachineOperand &MO = getOperand(DefOpIdx); + if (!MO.isReg() || !MO.isDef() || !MO.isTied()) + return false; + if (UseOpIdx) + *UseOpIdx = findTiedOperandIdx(DefOpIdx); + return true; + } /// isRegTiedToDefOperand - Return true if the use operand of the specified /// index is tied to an def operand. It also returns the def operand index by /// reference if DefOpIdx is not null. - bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const; + bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const { + const MachineOperand &MO = getOperand(UseOpIdx); + if (!MO.isReg() || !MO.isUse() || !MO.isTied()) + return false; + if (DefOpIdx) + *DefOpIdx = findTiedOperandIdx(UseOpIdx); + return true; + } /// clearKillInfo - Clears kill flags on all operands. /// @@ -852,11 +876,11 @@ public: bool isSafeToReMat(const TargetInstrInfo *TII, AliasAnalysis *AA, unsigned DstReg) const; - /// hasVolatileMemoryRef - Return true if this instruction may have a - /// volatile memory reference, or if the information describing the - /// memory reference is not available. Return false if it is known to - /// have no volatile memory references. - bool hasVolatileMemoryRef() const; + /// hasOrderedMemoryRef - Return true if this instruction may have an ordered + /// or volatile memory reference, or if the information describing the memory + /// reference is not available. Return false if it is known to have no + /// ordered or volatile memory references. + bool hasOrderedMemoryRef() const; /// isInvariantLoad - Return true if this instruction is loading from a /// location whose value is invariant across the function. For example, @@ -935,6 +959,15 @@ private: /// return null. MachineRegisterInfo *getRegInfo(); + /// untieRegOperand - Break any tie involving OpIdx. + void untieRegOperand(unsigned OpIdx) { + MachineOperand &MO = getOperand(OpIdx); + if (MO.isReg() && MO.isTied()) { + getOperand(findTiedOperandIdx(OpIdx)).TiedTo = 0; + MO.TiedTo = 0; + } + } + /// addImplicitDefUseOperands - Add all implicit def and use operands to /// this instruction. void addImplicitDefUseOperands(); diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h index 654361f..7706853 100644 --- a/include/llvm/CodeGen/MachineInstrBuilder.h +++ b/include/llvm/CodeGen/MachineInstrBuilder.h @@ -176,15 +176,24 @@ public: } // Add a displacement from an existing MachineOperand with an added offset. - const MachineInstrBuilder &addDisp(const MachineOperand &Disp, - int64_t off) const { + const MachineInstrBuilder &addDisp(const MachineOperand &Disp, int64_t off, + unsigned char TargetFlags = 0) const { switch (Disp.getType()) { default: llvm_unreachable("Unhandled operand type in addDisp()"); case MachineOperand::MO_Immediate: return addImm(Disp.getImm() + off); - case MachineOperand::MO_GlobalAddress: - return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off); + case MachineOperand::MO_GlobalAddress: { + // If caller specifies new TargetFlags then use it, otherwise the + // default behavior is to copy the target flags from the existing + // MachineOperand. This means if the caller wants to clear the + // target flags it needs to do so explicitly. + if (TargetFlags) + return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off, + TargetFlags); + return addGlobalAddress(Disp.getGlobal(), Disp.getOffset() + off, + Disp.getTargetFlags()); + } } } }; diff --git a/include/llvm/CodeGen/MachineInstrBundle.h b/include/llvm/CodeGen/MachineInstrBundle.h index dc5f9a6..854ba06 100644 --- a/include/llvm/CodeGen/MachineInstrBundle.h +++ b/include/llvm/CodeGen/MachineInstrBundle.h @@ -130,9 +130,9 @@ public: return OpI - InstrI->operands_begin(); } - /// RegInfo - Information about a virtual register used by a set of operands. + /// VirtRegInfo - Information about a virtual register used by a set of operands. /// - struct RegInfo { + struct VirtRegInfo { /// Reads - One of the operands read the virtual register. This does not /// include or use operands, see MO::readsReg(). bool Reads; @@ -146,6 +146,32 @@ public: bool Tied; }; + /// PhysRegInfo - Information about a physical register used by a set of + /// operands. + struct PhysRegInfo { + /// Clobbers - Reg or an overlapping register is defined, or a regmask + /// clobbers Reg. + bool Clobbers; + + /// Defines - Reg or a super-register is defined. + bool Defines; + + /// DefinesOverlap - Reg or an overlapping register is defined. + bool DefinesOverlap; + + /// Reads - Read or a super-register is read. + bool Reads; + + /// ReadsOverlap - Reg or an overlapping register is read. + bool ReadsOverlap; + + /// DefinesDead - All defs of a Reg or a super-register are dead. + bool DefinesDead; + + /// There is a kill of Reg or a super-register. + bool Kills; + }; + /// analyzeVirtReg - Analyze how the current instruction or bundle uses a /// virtual register. This function should not be called after operator++(), /// it expects a fresh iterator. @@ -154,8 +180,16 @@ public: /// @param Ops When set, this vector will receive an (MI, OpNum) entry for /// each operand referring to Reg. /// @returns A filled-in RegInfo struct. - RegInfo analyzeVirtReg(unsigned Reg, + VirtRegInfo analyzeVirtReg(unsigned Reg, SmallVectorImpl > *Ops = 0); + + /// analyzePhysReg - Analyze how the current instruction or bundle uses a + /// physical register. This function should not be called after operator++(), + /// it expects a fresh iterator. + /// + /// @param Reg The physical register to analyze. + /// @returns A filled-in PhysRegInfo struct. + PhysRegInfo analyzePhysReg(unsigned Reg, const TargetRegisterInfo *TRI); }; /// MIOperands - Iterate over operands of a single instruction. diff --git a/include/llvm/CodeGen/MachineJumpTableInfo.h b/include/llvm/CodeGen/MachineJumpTableInfo.h index f7c4e86..928145d 100644 --- a/include/llvm/CodeGen/MachineJumpTableInfo.h +++ b/include/llvm/CodeGen/MachineJumpTableInfo.h @@ -26,7 +26,7 @@ namespace llvm { class MachineBasicBlock; -class TargetData; +class DataLayout; class raw_ostream; /// MachineJumpTableEntry - One jump table in the jump table info. @@ -84,9 +84,9 @@ public: JTEntryKind getEntryKind() const { return EntryKind; } /// getEntrySize - Return the size of each entry in the jump table. - unsigned getEntrySize(const TargetData &TD) const; + unsigned getEntrySize(const DataLayout &TD) const; /// getEntryAlignment - Return the alignment of each entry in the jump table. - unsigned getEntryAlignment(const TargetData &TD) const; + unsigned getEntryAlignment(const DataLayout &TD) const; /// createJumpTableIndex - Create a new jump table. /// diff --git a/include/llvm/CodeGen/MachineLoopInfo.h b/include/llvm/CodeGen/MachineLoopInfo.h index 3e204be..d53f041 100644 --- a/include/llvm/CodeGen/MachineLoopInfo.h +++ b/include/llvm/CodeGen/MachineLoopInfo.h @@ -73,8 +73,8 @@ class MachineLoopInfo : public MachineFunctionPass { LoopInfoBase LI; friend class LoopBase; - void operator=(const MachineLoopInfo &); // do not implement - MachineLoopInfo(const MachineLoopInfo &); // do not implement + void operator=(const MachineLoopInfo &) LLVM_DELETED_FUNCTION; + MachineLoopInfo(const MachineLoopInfo &) LLVM_DELETED_FUNCTION; public: static char ID; // Pass identification, replacement for typeid diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h index 1ac9080..ddb1271 100644 --- a/include/llvm/CodeGen/MachineMemOperand.h +++ b/include/llvm/CodeGen/MachineMemOperand.h @@ -151,6 +151,15 @@ public: bool isNonTemporal() const { return Flags & MONonTemporal; } bool isInvariant() const { return Flags & MOInvariant; } + /// isUnordered - Returns true if this memory operation doesn't have any + /// ordering constraints other than normal aliasing. Volatile and atomic + /// memory operations can't be reordered. + /// + /// Currently, we don't model the difference between volatile and atomic + /// operations. They should retain their ordering relative to all memory + /// operations. + bool isUnordered() const { return !isVolatile(); } + /// refineAlignment - Update this MachineMemOperand to reflect the alignment /// of MMO, if it has a greater alignment. This must only be used when the /// new alignment applies to all users of this MachineMemOperand. diff --git a/include/llvm/CodeGen/MachineModuleInfoImpls.h b/include/llvm/CodeGen/MachineModuleInfoImpls.h index 9401ffd1..7afc7eb 100644 --- a/include/llvm/CodeGen/MachineModuleInfoImpls.h +++ b/include/llvm/CodeGen/MachineModuleInfoImpls.h @@ -38,7 +38,7 @@ namespace llvm { /// this GV is external. DenseMap HiddenGVStubs; - virtual void Anchor(); // Out of line virtual method. + virtual void anchor(); // Out of line virtual method. public: MachineModuleInfoMachO(const MachineModuleInfo &) {} @@ -76,7 +76,7 @@ namespace llvm { /// mode. DenseMap GVStubs; - virtual void Anchor(); // Out of line virtual method. + virtual void anchor(); // Out of line virtual method. public: MachineModuleInfoELF(const MachineModuleInfo &) {} diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h index 37d42b3..606833c 100644 --- a/include/llvm/CodeGen/MachineOperand.h +++ b/include/llvm/CodeGen/MachineOperand.h @@ -14,7 +14,6 @@ #ifndef LLVM_CODEGEN_MACHINEOPERAND_H #define LLVM_CODEGEN_MACHINEOPERAND_H -#include "llvm/ADT/Hashing.h" #include "llvm/Support/DataTypes.h" #include @@ -30,6 +29,7 @@ class MachineRegisterInfo; class MDNode; class TargetMachine; class TargetRegisterInfo; +class hash_code; class raw_ostream; class MCSymbol; @@ -60,12 +60,20 @@ private: /// union. unsigned char OpKind; // MachineOperandType - /// SubReg - Subregister number, only valid for MO_Register. A value of 0 - /// indicates the MO_Register has no subReg. - unsigned char SubReg; + // This union is discriminated by OpKind. + union { + /// SubReg - Subregister number, only valid for MO_Register. A value of 0 + /// indicates the MO_Register has no subReg. + unsigned char SubReg; + + /// TargetFlags - This is a set of target-specific operand flags. + unsigned char TargetFlags; + }; - /// TargetFlags - This is a set of target-specific operand flags. - unsigned char TargetFlags; + /// TiedTo - Non-zero when this register operand is tied to another register + /// operand. The encoding of this field is described in the block comment + /// before MachineInstr::tieOperands(). + unsigned char TiedTo : 4; /// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register /// operands. @@ -176,9 +184,17 @@ public: /// MachineOperandType getType() const { return (MachineOperandType)OpKind; } - unsigned char getTargetFlags() const { return TargetFlags; } - void setTargetFlags(unsigned char F) { TargetFlags = F; } - void addTargetFlag(unsigned char F) { TargetFlags |= F; } + unsigned char getTargetFlags() const { + return isReg() ? 0 : TargetFlags; + } + void setTargetFlags(unsigned char F) { + assert(!isReg() && "Register operands can't have target flags"); + TargetFlags = F; + } + void addTargetFlag(unsigned char F) { + assert(!isReg() && "Register operands can't have target flags"); + TargetFlags |= F; + } /// getParent - Return the instruction that this operand belongs to. @@ -288,6 +304,11 @@ public: return IsEarlyClobber; } + bool isTied() const { + assert(isReg() && "Wrong MachineOperand accessor"); + return TiedTo; + } + bool isDebug() const { assert(isReg() && "Wrong MachineOperand accessor"); return IsDebug; @@ -421,7 +442,7 @@ public: int64_t getOffset() const { assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() || isBlockAddress()) && "Wrong MachineOperand accessor"); - return (int64_t(Contents.OffsetedInfo.OffsetHi) << 32) | + return int64_t(uint64_t(Contents.OffsetedInfo.OffsetHi) << 32) | SmallContents.OffsetLo; } @@ -548,6 +569,7 @@ public: Op.IsUndef = isUndef; Op.IsInternalRead = isInternalRead; Op.IsEarlyClobber = isEarlyClobber; + Op.TiedTo = 0; Op.IsDebug = isDebug; Op.SmallContents.RegNo = Reg; Op.Contents.Reg.Prev = 0; @@ -606,11 +628,11 @@ public: Op.setTargetFlags(TargetFlags); return Op; } - static MachineOperand CreateBA(const BlockAddress *BA, + static MachineOperand CreateBA(const BlockAddress *BA, int64_t Offset, unsigned char TargetFlags = 0) { MachineOperand Op(MachineOperand::MO_BlockAddress); Op.Contents.OffsetedInfo.Val.BA = BA; - Op.setOffset(0); // Offset is always 0. + Op.setOffset(Offset); Op.setTargetFlags(TargetFlags); return Op; } @@ -665,6 +687,9 @@ inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) { return OS; } + // See friend declaration above. This additional declaration is required in + // order to compile LLVM with IBM xlC compiler. + hash_code hash_value(const MachineOperand &MO); } // End llvm namespace #endif diff --git a/include/llvm/CodeGen/MachinePostDominators.h b/include/llvm/CodeGen/MachinePostDominators.h new file mode 100644 index 0000000..a9fc843 --- /dev/null +++ b/include/llvm/CodeGen/MachinePostDominators.h @@ -0,0 +1,87 @@ +//=- llvm/CodeGen/MachineDominators.h ----------------------------*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes interfaces to post dominance information for +// target-specific code. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H +#define LLVM_CODEGEN_MACHINEPOSTDOMINATORS_H + +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/DominatorInternals.h" + +namespace llvm { + +/// +/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used +/// to compute the a post-dominator tree. +/// +struct MachinePostDominatorTree : public MachineFunctionPass { +private: + DominatorTreeBase *DT; + +public: + static char ID; + + MachinePostDominatorTree(); + + ~MachinePostDominatorTree(); + + FunctionPass *createMachinePostDominatorTreePass(); + + const std::vector &getRoots() const { + return DT->getRoots(); + } + + MachineDomTreeNode *getRootNode() const { + return DT->getRootNode(); + } + + MachineDomTreeNode *operator[](MachineBasicBlock *BB) const { + return DT->getNode(BB); + } + + MachineDomTreeNode *getNode(MachineBasicBlock *BB) const { + return DT->getNode(BB); + } + + bool dominates(MachineDomTreeNode *A, MachineDomTreeNode *B) const { + return DT->dominates(A, B); + } + + bool dominates(MachineBasicBlock *A, MachineBasicBlock *B) const { + return DT->dominates(A, B); + } + + bool + properlyDominates(const MachineDomTreeNode *A, MachineDomTreeNode *B) const { + return DT->properlyDominates(A, B); + } + + bool + properlyDominates(MachineBasicBlock *A, MachineBasicBlock *B) const { + return DT->properlyDominates(A, B); + } + + MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A, + MachineBasicBlock *B) { + return DT->findNearestCommonDominator(A, B); + } + + virtual bool runOnMachineFunction(MachineFunction &MF); + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + virtual void print(llvm::raw_ostream &OS, const Module *M = 0) const; +}; +} //end of namespace llvm + +#endif diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h index 42a8aa4..4e86363 100644 --- a/include/llvm/CodeGen/MachineRegisterInfo.h +++ b/include/llvm/CodeGen/MachineRegisterInfo.h @@ -77,16 +77,20 @@ class MachineRegisterInfo { return MO->Contents.Reg.Next; } - /// UsedPhysRegs - This is a bit vector that is computed and set by the + /// UsedRegUnits - This is a bit vector that is computed and set by the /// register allocator, and must be kept up to date by passes that run after /// register allocation (though most don't modify this). This is used /// so that the code generator knows which callee save registers to save and /// for other target specific uses. - /// This vector only has bits set for registers explicitly used, not their - /// aliases. - BitVector UsedPhysRegs; - - /// UsedPhysRegMask - Additional used physregs, but including aliases. + /// This vector has bits set for register units that are modified in the + /// current function. It doesn't include registers clobbered by function + /// calls with register mask operands. + BitVector UsedRegUnits; + + /// UsedPhysRegMask - Additional used physregs including aliases. + /// This bit vector represents all the registers clobbered by function calls. + /// It can model things that UsedRegUnits can't, such as function calls that + /// clobber ymm7 but preserve the low half in xmm7. BitVector UsedPhysRegMask; /// ReservedRegs - This is a bit vector of reserved registers. The target @@ -95,9 +99,6 @@ class MachineRegisterInfo { /// started. BitVector ReservedRegs; - /// AllocatableRegs - From TRI->getAllocatableSet. - mutable BitVector AllocatableRegs; - /// LiveIns/LiveOuts - Keep track of the physical registers that are /// livein/liveout of the function. Live in values are typically arguments in /// registers, live out values are typically return values in registers. @@ -106,8 +107,8 @@ class MachineRegisterInfo { std::vector > LiveIns; std::vector LiveOuts; - MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT - void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT + MachineRegisterInfo(const MachineRegisterInfo&) LLVM_DELETED_FUNCTION; + void operator=(const MachineRegisterInfo&) LLVM_DELETED_FUNCTION; public: explicit MachineRegisterInfo(const TargetRegisterInfo &TRI); ~MachineRegisterInfo(); @@ -360,29 +361,27 @@ public: //===--------------------------------------------------------------------===// /// isPhysRegUsed - Return true if the specified register is used in this - /// function. This only works after register allocation. + /// function. Also check for clobbered aliases and registers clobbered by + /// function calls with register mask operands. + /// + /// This only works after register allocation. It is primarily used by + /// PrologEpilogInserter to determine which callee-saved registers need + /// spilling. bool isPhysRegUsed(unsigned Reg) const { - return UsedPhysRegs.test(Reg) || UsedPhysRegMask.test(Reg); - } - - /// isPhysRegOrOverlapUsed - Return true if Reg or any overlapping register - /// is used in this function. - bool isPhysRegOrOverlapUsed(unsigned Reg) const { if (UsedPhysRegMask.test(Reg)) return true; - for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) - if (UsedPhysRegs.test(*AI)) + for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) + if (UsedRegUnits.test(*Units)) return true; return false; } /// setPhysRegUsed - Mark the specified register used in this function. /// This should only be called during and after register allocation. - void setPhysRegUsed(unsigned Reg) { UsedPhysRegs.set(Reg); } - - /// addPhysRegsUsed - Mark the specified registers used in this function. - /// This should only be called during and after register allocation. - void addPhysRegsUsed(const BitVector &Regs) { UsedPhysRegs |= Regs; } + void setPhysRegUsed(unsigned Reg) { + for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) + UsedRegUnits.set(*Units); + } /// addPhysRegsUsedFromRegMask - Mark any registers not in RegMask as used. /// This corresponds to the bit mask attached to register mask operands. @@ -393,8 +392,9 @@ public: /// setPhysRegUnused - Mark the specified register unused in this function. /// This should only be called during and after register allocation. void setPhysRegUnused(unsigned Reg) { - UsedPhysRegs.reset(Reg); UsedPhysRegMask.reset(Reg); + for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) + UsedRegUnits.reset(*Units); } @@ -427,6 +427,34 @@ public: return !reservedRegsFrozen() || ReservedRegs.test(PhysReg); } + /// getReservedRegs - Returns a reference to the frozen set of reserved + /// registers. This method should always be preferred to calling + /// TRI::getReservedRegs() when possible. + const BitVector &getReservedRegs() const { + assert(reservedRegsFrozen() && + "Reserved registers haven't been frozen yet. " + "Use TRI::getReservedRegs()."); + return ReservedRegs; + } + + /// isReserved - Returns true when PhysReg is a reserved register. + /// + /// Reserved registers may belong to an allocatable register class, but the + /// target has explicitly requested that they are not used. + /// + bool isReserved(unsigned PhysReg) const { + return getReservedRegs().test(PhysReg); + } + + /// isAllocatable - Returns true when PhysReg belongs to an allocatable + /// register class and it hasn't been reserved. + /// + /// Allocatable registers may show up in the allocation order of some virtual + /// register, so a register allocator needs to track its liveness and + /// availability. + bool isAllocatable(unsigned PhysReg) const { + return TRI->isInAllocatableClass(PhysReg) && !isReserved(PhysReg); + } //===--------------------------------------------------------------------===// // LiveIn/LiveOut Management diff --git a/include/llvm/CodeGen/MachineSSAUpdater.h b/include/llvm/CodeGen/MachineSSAUpdater.h index cbb45a7..edf93d1 100644 --- a/include/llvm/CodeGen/MachineSSAUpdater.h +++ b/include/llvm/CodeGen/MachineSSAUpdater.h @@ -14,6 +14,8 @@ #ifndef LLVM_CODEGEN_MACHINESSAUPDATER_H #define LLVM_CODEGEN_MACHINESSAUPDATER_H +#include "llvm/Support/Compiler.h" + namespace llvm { class MachineBasicBlock; class MachineFunction; @@ -106,8 +108,8 @@ private: void ReplaceRegWith(unsigned OldReg, unsigned NewReg); unsigned GetValueAtEndOfBlockInternal(MachineBasicBlock *BB); - void operator=(const MachineSSAUpdater&); // DO NOT IMPLEMENT - MachineSSAUpdater(const MachineSSAUpdater&); // DO NOT IMPLEMENT + void operator=(const MachineSSAUpdater&) LLVM_DELETED_FUNCTION; + MachineSSAUpdater(const MachineSSAUpdater&) LLVM_DELETED_FUNCTION; }; } // End llvm namespace diff --git a/include/llvm/CodeGen/MachineScheduler.h b/include/llvm/CodeGen/MachineScheduler.h index 8da2045..31bd606 100644 --- a/include/llvm/CodeGen/MachineScheduler.h +++ b/include/llvm/CodeGen/MachineScheduler.h @@ -28,9 +28,15 @@ #define MACHINESCHEDULER_H #include "llvm/CodeGen/MachinePassRegistry.h" +#include "llvm/CodeGen/RegisterPressure.h" +#include "llvm/CodeGen/ScheduleDAGInstrs.h" +#include "llvm/Target/TargetInstrInfo.h" namespace llvm { +extern cl::opt ForceTopDown; +extern cl::opt ForceBottomUp; + class AliasAnalysis; class LiveIntervals; class MachineDominatorTree; @@ -93,6 +99,237 @@ public: } }; +class ScheduleDAGMI; + +/// MachineSchedStrategy - Interface to the scheduling algorithm used by +/// ScheduleDAGMI. +class MachineSchedStrategy { +public: + virtual ~MachineSchedStrategy() {} + + /// Initialize the strategy after building the DAG for a new region. + virtual void initialize(ScheduleDAGMI *DAG) = 0; + + /// Notify this strategy that all roots have been released (including those + /// that depend on EntrySU or ExitSU). + virtual void registerRoots() {} + + /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to + /// schedule the node at the top of the unscheduled region. Otherwise it will + /// be scheduled at the bottom. + virtual SUnit *pickNode(bool &IsTopNode) = 0; + + /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled an + /// instruction and updated scheduled/remaining flags in the DAG nodes. + virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; + + /// When all predecessor dependencies have been resolved, free this node for + /// top-down scheduling. + virtual void releaseTopNode(SUnit *SU) = 0; + /// When all successor dependencies have been resolved, free this node for + /// bottom-up scheduling. + virtual void releaseBottomNode(SUnit *SU) = 0; +}; + +/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience +/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified +/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. +/// +/// This is a convenience class that may be used by implementations of +/// MachineSchedStrategy. +class ReadyQueue { + unsigned ID; + std::string Name; + std::vector Queue; + +public: + ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} + + unsigned getID() const { return ID; } + + StringRef getName() const { return Name; } + + // SU is in this queue if it's NodeQueueID is a superset of this ID. + bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } + + bool empty() const { return Queue.empty(); } + + void clear() { Queue.clear(); } + + unsigned size() const { return Queue.size(); } + + typedef std::vector::iterator iterator; + + iterator begin() { return Queue.begin(); } + + iterator end() { return Queue.end(); } + + iterator find(SUnit *SU) { + return std::find(Queue.begin(), Queue.end(), SU); + } + + void push(SUnit *SU) { + Queue.push_back(SU); + SU->NodeQueueId |= ID; + } + + iterator remove(iterator I) { + (*I)->NodeQueueId &= ~ID; + *I = Queue.back(); + unsigned idx = I - Queue.begin(); + Queue.pop_back(); + return Queue.begin() + idx; + } + +#ifndef NDEBUG + void dump(); +#endif +}; + +/// Mutate the DAG as a postpass after normal DAG building. +class ScheduleDAGMutation { +public: + virtual ~ScheduleDAGMutation() {} + + virtual void apply(ScheduleDAGMI *DAG) = 0; +}; + +/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules +/// machine instructions while updating LiveIntervals and tracking regpressure. +class ScheduleDAGMI : public ScheduleDAGInstrs { +protected: + AliasAnalysis *AA; + RegisterClassInfo *RegClassInfo; + MachineSchedStrategy *SchedImpl; + + /// Ordered list of DAG postprocessing steps. + std::vector Mutations; + + MachineBasicBlock::iterator LiveRegionEnd; + + /// Register pressure in this region computed by buildSchedGraph. + IntervalPressure RegPressure; + RegPressureTracker RPTracker; + + /// List of pressure sets that exceed the target's pressure limit before + /// scheduling, listed in increasing set ID order. Each pressure set is paired + /// with its max pressure in the currently scheduled regions. + std::vector RegionCriticalPSets; + + /// The top of the unscheduled zone. + MachineBasicBlock::iterator CurrentTop; + IntervalPressure TopPressure; + RegPressureTracker TopRPTracker; + + /// The bottom of the unscheduled zone. + MachineBasicBlock::iterator CurrentBottom; + IntervalPressure BotPressure; + RegPressureTracker BotRPTracker; + +#ifndef NDEBUG + /// The number of instructions scheduled so far. Used to cut off the + /// scheduler at the point determined by misched-cutoff. + unsigned NumInstrsScheduled; +#endif + +public: + ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): + ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), + AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), + RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), + CurrentBottom(), BotRPTracker(BotPressure) { +#ifndef NDEBUG + NumInstrsScheduled = 0; +#endif + } + + virtual ~ScheduleDAGMI() { + delete SchedImpl; + } + + /// Add a postprocessing step to the DAG builder. + /// Mutations are applied in the order that they are added after normal DAG + /// building and before MachineSchedStrategy initialization. + void addMutation(ScheduleDAGMutation *Mutation) { + Mutations.push_back(Mutation); + } + + MachineBasicBlock::iterator top() const { return CurrentTop; } + MachineBasicBlock::iterator bottom() const { return CurrentBottom; } + + /// Implement the ScheduleDAGInstrs interface for handling the next scheduling + /// region. This covers all instructions in a block, while schedule() may only + /// cover a subset. + void enterRegion(MachineBasicBlock *bb, + MachineBasicBlock::iterator begin, + MachineBasicBlock::iterator end, + unsigned endcount); + + + /// Implement ScheduleDAGInstrs interface for scheduling a sequence of + /// reorderable instructions. + virtual void schedule(); + + /// Get current register pressure for the top scheduled instructions. + const IntervalPressure &getTopPressure() const { return TopPressure; } + const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } + + /// Get current register pressure for the bottom scheduled instructions. + const IntervalPressure &getBotPressure() const { return BotPressure; } + const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } + + /// Get register pressure for the entire scheduling region before scheduling. + const IntervalPressure &getRegPressure() const { return RegPressure; } + + const std::vector &getRegionCriticalPSets() const { + return RegionCriticalPSets; + } + +protected: + // Top-Level entry points for the schedule() driver... + + /// Call ScheduleDAGInstrs::buildSchedGraph with register pressure tracking + /// enabled. This sets up three trackers. RPTracker will cover the entire DAG + /// region, TopTracker and BottomTracker will be initialized to the top and + /// bottom of the DAG region without covereing any unscheduled instruction. + void buildDAGWithRegPressure(); + + /// Apply each ScheduleDAGMutation step in order. This allows different + /// instances of ScheduleDAGMI to perform custom DAG postprocessing. + void postprocessDAG(); + + /// Identify DAG roots and setup scheduler queues. + void initQueues(); + + /// Move an instruction and update register pressure. + void scheduleMI(SUnit *SU, bool IsTopNode); + + /// Update scheduler DAG and queues after scheduling an instruction. + void updateQueues(SUnit *SU, bool IsTopNode); + + /// Reinsert debug_values recorded in ScheduleDAGInstrs::DbgValues. + void placeDebugValues(); + + /// \brief dump the scheduled Sequence. + void dumpSchedule() const; + + // Lesser helpers... + + void initRegPressure(); + + void updateScheduledPressure(std::vector NewMaxPressure); + + void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); + bool checkSchedLimit(); + + void releaseRoots(); + + void releaseSucc(SUnit *SU, SDep *SuccEdge); + void releaseSuccessors(SUnit *SU); + void releasePred(SUnit *SU, SDep *PredEdge); + void releasePredecessors(SUnit *SU); +}; + } // namespace llvm #endif diff --git a/include/llvm/CodeGen/PBQP/Graph.h b/include/llvm/CodeGen/PBQP/Graph.h index a5d8b0d..83c379b 100644 --- a/include/llvm/CodeGen/PBQP/Graph.h +++ b/include/llvm/CodeGen/PBQP/Graph.h @@ -19,6 +19,7 @@ #include #include +#include namespace PBQP { @@ -31,16 +32,16 @@ namespace PBQP { class NodeEntry; class EdgeEntry; - typedef std::list NodeList; - typedef std::list EdgeList; + typedef llvm::ilist NodeList; + typedef llvm::ilist EdgeList; public: - typedef NodeList::iterator NodeItr; - typedef NodeList::const_iterator ConstNodeItr; + typedef NodeEntry* NodeItr; + typedef const NodeEntry* ConstNodeItr; - typedef EdgeList::iterator EdgeItr; - typedef EdgeList::const_iterator ConstEdgeItr; + typedef EdgeEntry* EdgeItr; + typedef const EdgeEntry* ConstEdgeItr; private: @@ -52,12 +53,14 @@ namespace PBQP { private: - class NodeEntry { + class NodeEntry : public llvm::ilist_node { + friend struct llvm::ilist_sentinel_traits; private: Vector costs; AdjEdgeList adjEdges; unsigned degree; void *data; + NodeEntry() : costs(0, 0) {} public: NodeEntry(const Vector &costs) : costs(costs), degree(0) {} Vector& getCosts() { return costs; } @@ -77,12 +80,14 @@ namespace PBQP { void* getData() { return data; } }; - class EdgeEntry { + class EdgeEntry : public llvm::ilist_node { + friend struct llvm::ilist_sentinel_traits; private: NodeItr node1, node2; Matrix costs; AdjEdgeItr node1AEItr, node2AEItr; void *data; + EdgeEntry() : costs(0, 0, 0) {} public: EdgeEntry(NodeItr node1, NodeItr node2, const Matrix &costs) : node1(node1), node2(node2), costs(costs) {} diff --git a/include/llvm/CodeGen/PBQP/HeuristicBase.h b/include/llvm/CodeGen/PBQP/HeuristicBase.h index 3fee18c..0c1fcb7 100644 --- a/include/llvm/CodeGen/PBQP/HeuristicBase.h +++ b/include/llvm/CodeGen/PBQP/HeuristicBase.h @@ -113,7 +113,7 @@ namespace PBQP { } /// \brief Add the given node to the list of nodes to be optimally reduced. - /// @return nItr Node iterator to be added. + /// @param nItr Node iterator to be added. /// /// You probably don't want to over-ride this, except perhaps to record /// statistics before calling this implementation. HeuristicBase relies on @@ -193,8 +193,9 @@ namespace PBQP { /// reduce list. /// @return True if a reduction takes place, false if the heuristic reduce /// list is empty. - void heuristicReduce() { + bool heuristicReduce() { llvm_unreachable("Must be implemented in derived class."); + return false; } /// \brief Prepare a change in the costs on the given edge. diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h index 07b3b45..7bd5764 100644 --- a/include/llvm/CodeGen/Passes.h +++ b/include/llvm/CodeGen/Passes.h @@ -404,6 +404,10 @@ namespace llvm { /// inserting cmov instructions. extern char &EarlyIfConverterID; + /// StackSlotColoring - This pass performs stack coloring and merging. + /// It merges disjoint allocas to reduce the stack size. + extern char &StackColoringID; + /// IfConverter - This pass performs machine code if conversion. extern char &IfConverterID; diff --git a/include/llvm/CodeGen/PseudoSourceValue.h b/include/llvm/CodeGen/PseudoSourceValue.h index 7dab4f9..8f52d3b 100644 --- a/include/llvm/CodeGen/PseudoSourceValue.h +++ b/include/llvm/CodeGen/PseudoSourceValue.h @@ -50,7 +50,6 @@ namespace llvm { /// classof - Methods for support type inquiry through isa, cast, and /// dyn_cast: /// - static inline bool classof(const PseudoSourceValue *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == PseudoSourceValueVal || V->getValueID() == FixedStackPseudoSourceValueVal; @@ -90,9 +89,6 @@ namespace llvm { /// classof - Methods for support type inquiry through isa, cast, and /// dyn_cast: /// - static inline bool classof(const FixedStackPseudoSourceValue *) { - return true; - } static inline bool classof(const Value *V) { return V->getValueID() == FixedStackPseudoSourceValueVal; } diff --git a/include/llvm/CodeGen/RegAllocPBQP.h b/include/llvm/CodeGen/RegAllocPBQP.h index bce3ec7..acfc07d 100644 --- a/include/llvm/CodeGen/RegAllocPBQP.h +++ b/include/llvm/CodeGen/RegAllocPBQP.h @@ -109,8 +109,8 @@ namespace llvm { /// class to support additional constraints for your architecture. class PBQPBuilder { private: - PBQPBuilder(const PBQPBuilder&) {} - void operator=(const PBQPBuilder&) {} + PBQPBuilder(const PBQPBuilder&) LLVM_DELETED_FUNCTION; + void operator=(const PBQPBuilder&) LLVM_DELETED_FUNCTION; public: typedef std::set RegSet; diff --git a/include/llvm/CodeGen/RegisterClassInfo.h b/include/llvm/CodeGen/RegisterClassInfo.h index 400e1f4..4467b62 100644 --- a/include/llvm/CodeGen/RegisterClassInfo.h +++ b/include/llvm/CodeGen/RegisterClassInfo.h @@ -106,25 +106,6 @@ public: return CalleeSaved[N-1]; return 0; } - - /// isReserved - Returns true when PhysReg is a reserved register. - /// - /// Reserved registers may belong to an allocatable register class, but the - /// target has explicitly requested that they are not used. - /// - bool isReserved(unsigned PhysReg) const { - return Reserved.test(PhysReg); - } - - /// isAllocatable - Returns true when PhysReg belongs to an allocatable - /// register class and it hasn't been reserved. - /// - /// Allocatable registers may show up in the allocation order of some virtual - /// register, so a register allocator needs to track its liveness and - /// availability. - bool isAllocatable(unsigned PhysReg) const { - return TRI->isInAllocatableClass(PhysReg) && !isReserved(PhysReg); - } }; } // end namespace llvm diff --git a/include/llvm/CodeGen/RegisterPressure.h b/include/llvm/CodeGen/RegisterPressure.h index 2043155..30326d0 100644 --- a/include/llvm/CodeGen/RegisterPressure.h +++ b/include/llvm/CodeGen/RegisterPressure.h @@ -43,7 +43,7 @@ struct RegisterPressure { /// class. This is only useful to account for spilling or rematerialization. void decrease(const TargetRegisterClass *RC, const TargetRegisterInfo *TRI); - void dump(const TargetRegisterInfo *TRI); + void dump(const TargetRegisterInfo *TRI) const; }; /// RegisterPressure computed within a region of instructions delimited by @@ -197,6 +197,7 @@ public: /// This result is complete if either advance() or recede() has returned true, /// or if closeRegion() was explicitly invoked. RegisterPressure &getPressure() { return P; } + const RegisterPressure &getPressure() const { return P; } /// Get the register set pressure at the current position, which may be less /// than the pressure across the traversed region. diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h index 3986a8d..08d3169 100644 --- a/include/llvm/CodeGen/RegisterScavenging.h +++ b/include/llvm/CodeGen/RegisterScavenging.h @@ -18,6 +18,7 @@ #define LLVM_CODEGEN_REGISTER_SCAVENGING_H #include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/ADT/BitVector.h" namespace llvm { @@ -59,10 +60,6 @@ class RegScavenger { /// BitVector CalleeSavedRegs; - /// ReservedRegs - A bitvector of reserved registers. - /// - BitVector ReservedRegs; - /// RegsAvailable - The current state of all the physical registers immediately /// before MBBI. One bit per physical register. If bit is set that means it's /// available, unset means the register is currently being used. @@ -130,12 +127,12 @@ public: void setUsed(unsigned Reg); private: /// isReserved - Returns true if a register is reserved. It is never "unused". - bool isReserved(unsigned Reg) const { return ReservedRegs.test(Reg); } + bool isReserved(unsigned Reg) const { return MRI->isReserved(Reg); } /// isUsed / isUnused - Test if a register is currently being used. /// bool isUsed(unsigned Reg) const { - return !RegsAvailable.test(Reg) || ReservedRegs.test(Reg); + return !RegsAvailable.test(Reg) || isReserved(Reg); } /// isAliasUsed - Is Reg or an alias currently in use? diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h index 85ab47b..7e0ca14 100644 --- a/include/llvm/CodeGen/ScheduleDAG.h +++ b/include/llvm/CodeGen/ScheduleDAG.h @@ -31,6 +31,7 @@ namespace llvm { class MachineFunction; class MachineRegisterInfo; class MachineInstr; + struct MCSchedClassDesc; class TargetRegisterInfo; class ScheduleDAG; class SDNode; @@ -52,6 +53,13 @@ namespace llvm { Order ///< Any other ordering dependency. }; + enum OrderKind { + Barrier, ///< An unknown scheduling barrier. + MayAliasMem, ///< Nonvolatile load/Store instructions that may alias. + MustAliasMem, ///< Nonvolatile load/Store instructions that must alias. + Artificial ///< Arbitrary weak DAG edge (no actual dependence). + }; + private: /// Dep - A pointer to the depending/depended-on SUnit, and an enum /// indicating the kind of the dependency. @@ -65,26 +73,18 @@ namespace llvm { unsigned Reg; /// Order - Additional information about Order dependencies. - struct { - /// isNormalMemory - True if both sides of the dependence - /// access memory in non-volatile and fully modeled ways. - bool isNormalMemory : 1; - - /// isMustAlias - True if both sides of the dependence are known to - /// access the same memory. - bool isMustAlias : 1; - - /// isArtificial - True if this is an artificial dependency, meaning - /// it is not necessary for program correctness, and may be safely - /// deleted if necessary. - bool isArtificial : 1; - } Order; + unsigned OrdKind; // enum OrderKind } Contents; /// Latency - The time associated with this edge. Often this is just /// the value of the Latency field of the predecessor, however advanced /// models may provide additional information about specific edges. unsigned Latency; + /// Record MinLatency seperately from "expected" Latency. + /// + /// FIXME: this field is not packed on LP64. Convert to 16-bit DAG edge + /// latency after introducing saturating truncation. + unsigned MinLatency; public: /// SDep - Construct a null SDep. This is only for use by container @@ -93,28 +93,28 @@ namespace llvm { SDep() : Dep(0, Data) {} /// SDep - Construct an SDep with the specified values. - SDep(SUnit *S, Kind kind, unsigned latency = 1, unsigned Reg = 0, - bool isNormalMemory = false, bool isMustAlias = false, - bool isArtificial = false) - : Dep(S, kind), Contents(), Latency(latency) { + SDep(SUnit *S, Kind kind, unsigned Reg) + : Dep(S, kind), Contents() { switch (kind) { + default: + llvm_unreachable("Reg given for non-register dependence!"); case Anti: case Output: assert(Reg != 0 && "SDep::Anti and SDep::Output must use a non-zero Reg!"); - // fall through - case Data: - assert(!isMustAlias && "isMustAlias only applies with SDep::Order!"); - assert(!isArtificial && "isArtificial only applies with SDep::Order!"); Contents.Reg = Reg; + Latency = 0; break; - case Order: - assert(Reg == 0 && "Reg given for non-register dependence!"); - Contents.Order.isNormalMemory = isNormalMemory; - Contents.Order.isMustAlias = isMustAlias; - Contents.Order.isArtificial = isArtificial; + case Data: + Contents.Reg = Reg; + Latency = 1; break; } + MinLatency = Latency; + } + SDep(SUnit *S, OrderKind kind) + : Dep(S, Order), Contents(), Latency(0), MinLatency(0) { + Contents.OrdKind = kind; } /// Return true if the specified SDep is equivalent except for latency. @@ -126,16 +126,14 @@ namespace llvm { case Output: return Contents.Reg == Other.Contents.Reg; case Order: - return Contents.Order.isNormalMemory == - Other.Contents.Order.isNormalMemory && - Contents.Order.isMustAlias == Other.Contents.Order.isMustAlias && - Contents.Order.isArtificial == Other.Contents.Order.isArtificial; + return Contents.OrdKind == Other.Contents.OrdKind; } llvm_unreachable("Invalid dependency kind!"); } bool operator==(const SDep &Other) const { - return overlaps(Other) && Latency == Other.Latency; + return overlaps(Other) + && Latency == Other.Latency && MinLatency == Other.MinLatency; } bool operator!=(const SDep &Other) const { @@ -155,6 +153,18 @@ namespace llvm { Latency = Lat; } + /// getMinLatency - Return the minimum latency for this edge. Minimum + /// latency is used for scheduling groups, while normal (expected) latency + /// is for instruction cost and critical path. + unsigned getMinLatency() const { + return MinLatency; + } + + /// setMinLatency - Set the minimum latency for this edge. + void setMinLatency(unsigned Lat) { + MinLatency = Lat; + } + //// getSUnit - Return the SUnit to which this edge points. SUnit *getSUnit() const { return Dep.getPointer(); @@ -179,20 +189,21 @@ namespace llvm { /// memory accesses where both sides of the dependence access memory /// in non-volatile and fully modeled ways. bool isNormalMemory() const { - return getKind() == Order && Contents.Order.isNormalMemory; + return getKind() == Order && (Contents.OrdKind == MayAliasMem + || Contents.OrdKind == MustAliasMem); } /// isMustAlias - Test if this is an Order dependence that is marked /// as "must alias", meaning that the SUnits at either end of the edge /// have a memory dependence on a known memory location. bool isMustAlias() const { - return getKind() == Order && Contents.Order.isMustAlias; + return getKind() == Order && Contents.OrdKind == MustAliasMem; } /// isArtificial - Test if this is an Order dependence that is marked /// as "artificial", meaning it isn't necessary for correctness. bool isArtificial() const { - return getKind() == Order && Contents.Order.isArtificial; + return getKind() == Order && Contents.OrdKind == Artificial; } /// isAssignedRegDep - Test if this is a Data dependence that is @@ -239,6 +250,8 @@ namespace llvm { // this node was cloned. // (SD scheduling only) + const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass. + // Preds/Succs - The SUnits before/after us in the graph. SmallVector Preds; // All sunit predecessors. SmallVector Succs; // All sunit successors. @@ -286,7 +299,7 @@ namespace llvm { /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent /// an SDNode and any nodes flagged to it. SUnit(SDNode *node, unsigned nodenum) - : Node(node), Instr(0), OrigNode(0), NodeNum(nodenum), + : Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false), @@ -300,7 +313,7 @@ namespace llvm { /// SUnit - Construct an SUnit for post-regalloc scheduling to represent /// a MachineInstr. SUnit(MachineInstr *instr, unsigned nodenum) - : Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum), + : Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false), @@ -313,7 +326,7 @@ namespace llvm { /// SUnit - Construct a placeholder SUnit. SUnit() - : Node(0), Instr(0), OrigNode(0), NodeNum(~0u), + : Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(~0u), NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0), NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false), isCallOp(false), isTwoAddress(false), @@ -555,16 +568,6 @@ namespace llvm { unsigned VerifyScheduledDAG(bool isBottomUp); #endif - protected: - /// ComputeLatency - Compute node latency. - /// - virtual void computeLatency(SUnit *SU) = 0; - - /// ForceUnitLatencies - Return true if all scheduling edges should be given - /// a latency value of one. The default is to return false; schedulers may - /// override this as needed. - virtual bool forceUnitLatencies() const { return false; } - private: // Return the MCInstrDesc of this SDNode or NULL. const MCInstrDesc *getNodeDesc(const SDNode *Node) const; diff --git a/include/llvm/CodeGen/ScheduleDAGILP.h b/include/llvm/CodeGen/ScheduleDAGILP.h new file mode 100644 index 0000000..1aa4058 --- /dev/null +++ b/include/llvm/CodeGen/ScheduleDAGILP.h @@ -0,0 +1,86 @@ +//===- ScheduleDAGILP.h - ILP metric for ScheduleDAGInstrs ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Definition of an ILP metric for machine level instruction scheduling. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_CODEGEN_SCHEDULEDAGILP_H +#define LLVM_CODEGEN_SCHEDULEDAGILP_H + +#include "llvm/Support/DataTypes.h" +#include + +namespace llvm { + +class raw_ostream; +class ScheduleDAGInstrs; +class SUnit; + +/// \brief Represent the ILP of the subDAG rooted at a DAG node. +struct ILPValue { + unsigned InstrCount; + unsigned Cycles; + + ILPValue(): InstrCount(0), Cycles(0) {} + + ILPValue(unsigned count, unsigned cycles): + InstrCount(count), Cycles(cycles) {} + + bool isValid() const { return Cycles > 0; } + + // Order by the ILP metric's value. + bool operator<(ILPValue RHS) const { + return (uint64_t)InstrCount * RHS.Cycles + < (uint64_t)Cycles * RHS.InstrCount; + } + bool operator>(ILPValue RHS) const { + return RHS < *this; + } + bool operator<=(ILPValue RHS) const { + return (uint64_t)InstrCount * RHS.Cycles + <= (uint64_t)Cycles * RHS.InstrCount; + } + bool operator>=(ILPValue RHS) const { + return RHS <= *this; + } + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + void print(raw_ostream &OS) const; + + void dump() const; +#endif +}; + +/// \brief Compute the values of each DAG node for an ILP metric. +/// +/// This metric assumes that the DAG is a forest of trees with roots at the +/// bottom of the schedule. +class ScheduleDAGILP { + bool IsBottomUp; + std::vector ILPValues; + +public: + ScheduleDAGILP(bool IsBU): IsBottomUp(IsBU) {} + + /// \brief Initialize the result data with the size of the DAG. + void resize(unsigned NumSUnits); + + /// \brief Compute the ILP metric for the subDAG at this root. + void computeILP(const SUnit *Root); + + /// \brief Get the ILP value for a DAG node. + ILPValue getILP(const SUnit *SU); +}; + +raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val); + +} // namespace llvm + +#endif diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h index 1bde942..4bcd35a 100644 --- a/include/llvm/CodeGen/ScheduleDAGInstrs.h +++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h @@ -18,6 +18,7 @@ #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/ScheduleDAG.h" +#include "llvm/CodeGen/TargetSchedule.h" #include "llvm/Support/Compiler.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/ADT/SmallSet.h" @@ -30,72 +31,6 @@ namespace llvm { class LiveIntervals; class RegPressureTracker; - /// LoopDependencies - This class analyzes loop-oriented register - /// dependencies, which are used to guide scheduling decisions. - /// For example, loop induction variable increments should be - /// scheduled as soon as possible after the variable's last use. - /// - class LoopDependencies { - const MachineDominatorTree &MDT; - - public: - typedef std::map > - LoopDeps; - LoopDeps Deps; - - LoopDependencies(const MachineDominatorTree &mdt) : MDT(mdt) {} - - /// VisitLoop - Clear out any previous state and analyze the given loop. - /// - void VisitLoop(const MachineLoop *Loop) { - assert(Deps.empty() && "stale loop dependencies"); - - MachineBasicBlock *Header = Loop->getHeader(); - SmallSet LoopLiveIns; - for (MachineBasicBlock::livein_iterator LI = Header->livein_begin(), - LE = Header->livein_end(); LI != LE; ++LI) - LoopLiveIns.insert(*LI); - - const MachineDomTreeNode *Node = MDT.getNode(Header); - const MachineBasicBlock *MBB = Node->getBlock(); - assert(Loop->contains(MBB) && - "Loop does not contain header!"); - VisitRegion(Node, MBB, Loop, LoopLiveIns); - } - - private: - void VisitRegion(const MachineDomTreeNode *Node, - const MachineBasicBlock *MBB, - const MachineLoop *Loop, - const SmallSet &LoopLiveIns) { - unsigned Count = 0; - for (MachineBasicBlock::const_iterator I = MBB->begin(), E = MBB->end(); - I != E; ++I) { - const MachineInstr *MI = I; - if (MI->isDebugValue()) - continue; - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg() || !MO.isUse()) - continue; - unsigned MOReg = MO.getReg(); - if (LoopLiveIns.count(MOReg)) - Deps.insert(std::make_pair(MOReg, std::make_pair(&MO, Count))); - } - ++Count; // Not every iteration due to dbg_value above. - } - - const std::vector &Children = Node->getChildren(); - for (std::vector::const_iterator I = - Children.begin(), E = Children.end(); I != E; ++I) { - const MachineDomTreeNode *ChildNode = *I; - MachineBasicBlock *ChildBlock = ChildNode->getBlock(); - if (Loop->contains(ChildBlock)) - VisitRegion(ChildNode, ChildBlock, Loop, LoopLiveIns); - } - } - }; - /// An individual mapping from virtual register number to SUnit. struct VReg2SUnit { unsigned VirtReg; @@ -108,6 +43,15 @@ namespace llvm { } }; + /// Record a physical register access. + /// For non data-dependent uses, OpIdx == -1. + struct PhysRegSUOper { + SUnit *SU; + int OpIdx; + + PhysRegSUOper(SUnit *su, int op): SU(su), OpIdx(op) {} + }; + /// Combine a SparseSet with a 1x1 vector to track physical registers. /// The SparseSet allows iterating over the (few) live registers for quickly /// comparing against a regmask or clearing the set. @@ -116,7 +60,7 @@ namespace llvm { /// cleared between scheduling regions without freeing unused entries. class Reg2SUnitsMap { SparseSet PhysRegSet; - std::vector > SUnits; + std::vector > SUnits; public: typedef SparseSet::const_iterator const_iterator; @@ -140,7 +84,7 @@ namespace llvm { /// If this register is mapped, return its existing SUnits vector. /// Otherwise map the register and return an empty SUnits vector. - std::vector &operator[](unsigned Reg) { + std::vector &operator[](unsigned Reg) { bool New = PhysRegSet.insert(Reg).second; assert((!New || SUnits[Reg].empty()) && "stale SUnits vector"); (void)New; @@ -167,11 +111,13 @@ namespace llvm { const MachineLoopInfo &MLI; const MachineDominatorTree &MDT; const MachineFrameInfo *MFI; - const InstrItineraryData *InstrItins; /// Live Intervals provides reaching defs in preRA scheduling. LiveIntervals *LIS; + /// TargetSchedModel provides an interface to the machine model. + TargetSchedModel SchedModel; + /// isPostRA flag indicates vregs cannot be present. bool IsPostRA; @@ -223,10 +169,6 @@ namespace llvm { /// to minimize construction/destruction. std::vector PendingLoads; - /// LoopRegs - Track which registers are used for loop-carried dependencies. - /// - LoopDependencies LoopRegs; - /// DbgValues - Remember instruction that precedes DBG_VALUE. /// These are generated by buildSchedGraph but persist so they can be /// referenced when emitting the final schedule. @@ -244,6 +186,16 @@ namespace llvm { virtual ~ScheduleDAGInstrs() {} + /// \brief Get the machine model for instruction scheduling. + const TargetSchedModel *getSchedModel() const { return &SchedModel; } + + /// \brief Resolve and cache a resolved scheduling class for an SUnit. + const MCSchedClassDesc *getSchedClass(SUnit *SU) const { + if (!SU->SchedClass) + SU->SchedClass = SchedModel.resolveSchedClass(SU->getInstr()); + return SU->SchedClass; + } + /// begin - Return an iterator to the top of the current scheduling region. MachineBasicBlock::iterator begin() const { return RegionBegin; } @@ -284,20 +236,6 @@ namespace llvm { /// used by instructions in the fallthrough block. void addSchedBarrierDeps(); - /// computeLatency - Compute node latency. - /// - virtual void computeLatency(SUnit *SU); - - /// computeOperandLatency - Return dependence edge latency using - /// operand use/def information - /// - /// FindMin may be set to get the minimum vs. expected latency. Minimum - /// latency is used for scheduling groups, while expected latency is for - /// instruction cost and critical path. - virtual unsigned computeOperandLatency(SUnit *Def, SUnit *Use, - const SDep& dep, - bool FindMin = false) const; - /// schedule - Order nodes according to selected style, filling /// in the Sequence member. /// @@ -319,7 +257,7 @@ namespace llvm { protected: void initSUnits(); - void addPhysRegDataDeps(SUnit *SU, const MachineOperand &MO); + void addPhysRegDataDeps(SUnit *SU, unsigned OperIdx); void addPhysRegDeps(SUnit *SU, unsigned OperIdx); void addVRegDefDeps(SUnit *SU, unsigned OperIdx); void addVRegUseDeps(SUnit *SU, unsigned OperIdx); diff --git a/include/llvm/CodeGen/SchedulerRegistry.h b/include/llvm/CodeGen/SchedulerRegistry.h index a582b0c..836b73a 100644 --- a/include/llvm/CodeGen/SchedulerRegistry.h +++ b/include/llvm/CodeGen/SchedulerRegistry.h @@ -102,6 +102,11 @@ ScheduleDAGSDNodes *createVLIWDAGScheduler(SelectionDAGISel *IS, ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS, CodeGenOpt::Level OptLevel); +/// createDAGLinearizer - This creates a "no-scheduling" scheduler which +/// linearize the DAG using topological order. +ScheduleDAGSDNodes *createDAGLinearizer(SelectionDAGISel *IS, + CodeGenOpt::Level OptLevel); + } // end namespace llvm #endif diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index 1ccfe54..619ee69 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -73,8 +73,8 @@ class SDDbgInfo { SmallVector ByvalParmDbgValues; DenseMap > DbgValMap; - void operator=(const SDDbgInfo&); // Do not implement. - SDDbgInfo(const SDDbgInfo&); // Do not implement. + void operator=(const SDDbgInfo&) LLVM_DELETED_FUNCTION; + SDDbgInfo(const SDDbgInfo&) LLVM_DELETED_FUNCTION; public: SDDbgInfo() {} @@ -222,8 +222,8 @@ private: DenseSet &visited, int level, bool &printed); - void operator=(const SelectionDAG&); // Do not implement. - SelectionDAG(const SelectionDAG&); // Do not implement. + void operator=(const SelectionDAG&) LLVM_DELETED_FUNCTION; + SelectionDAG(const SelectionDAG&) LLVM_DELETED_FUNCTION; public: explicit SelectionDAG(const TargetMachine &TM, llvm::CodeGenOpt::Level); @@ -437,7 +437,13 @@ public: SDValue getRegisterMask(const uint32_t *RegMask); SDValue getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label); SDValue getBlockAddress(const BlockAddress *BA, EVT VT, - bool isTarget = false, unsigned char TargetFlags = 0); + int64_t Offset = 0, bool isTarget = false, + unsigned char TargetFlags = 0); + SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, + int64_t Offset = 0, + unsigned char TargetFlags = 0) { + return getBlockAddress(BA, VT, Offset, true, TargetFlags); + } SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) { return getNode(ISD::CopyToReg, dl, MVT::Other, Chain, diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index db361ee..362e9af 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -216,8 +216,8 @@ class SDUse { /// this operand. SDUse **Prev, *Next; - SDUse(const SDUse &U); // Do not implement - void operator=(const SDUse &U); // Do not implement + SDUse(const SDUse &U) LLVM_DELETED_FUNCTION; + void operator=(const SDUse &U) LLVM_DELETED_FUNCTION; public: SDUse() : Val(), User(NULL), Prev(NULL), Next(NULL) {} @@ -662,9 +662,6 @@ public: /// void dumprWithDepth(const SelectionDAG *G = 0, unsigned depth = 100) const; - - static bool classof(const SDNode *) { return true; } - /// Profile - Gather unique data for the node. /// void Profile(FoldingSetNodeID &ID) const; @@ -956,7 +953,12 @@ public: const MachinePointerInfo &getPointerInfo() const { return MMO->getPointerInfo(); } - + + /// getAddressSpace - Return the address space for the associated pointer + unsigned getAddressSpace() const { + return getPointerInfo().getAddrSpace(); + } + /// refineAlignment - Update this MemSDNode's MachineMemOperand information /// to reflect the alignment of NewMMO, if it has a greater alignment. /// This must only be used when the new alignment applies to all users of @@ -971,7 +973,6 @@ public: } // Methods to support isa and dyn_cast - static bool classof(const MemSDNode *) { return true; } static bool classof(const SDNode *N) { // For some targets, we lower some target intrinsics to a MemIntrinsicNode // with either an intrinsic or a target opcode. @@ -1011,11 +1012,6 @@ class AtomicSDNode : public MemSDNode { SubclassData |= SynchScope << 12; assert(getOrdering() == Ordering && "Ordering encoding error!"); assert(getSynchScope() == SynchScope && "Synch-scope encoding error!"); - - assert((readMem() || getOrdering() <= Monotonic) && - "Acquire/Release MachineMemOperand must be a load!"); - assert((writeMem() || getOrdering() <= Monotonic) && - "Acquire/Release MachineMemOperand must be a store!"); } public: @@ -1061,7 +1057,6 @@ public: } // Methods to support isa and dyn_cast - static bool classof(const AtomicSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::ATOMIC_CMP_SWAP || N->getOpcode() == ISD::ATOMIC_SWAP || @@ -1093,7 +1088,6 @@ public: } // Methods to support isa and dyn_cast - static bool classof(const MemIntrinsicSDNode *) { return true; } static bool classof(const SDNode *N) { // We lower some target intrinsics to their target opcode // early a node with a target opcode can be of this class @@ -1148,7 +1142,6 @@ public: } static bool isSplatMask(const int *Mask, EVT VT); - static bool classof(const ShuffleVectorSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::VECTOR_SHUFFLE; } @@ -1172,7 +1165,6 @@ public: bool isNullValue() const { return Value->isNullValue(); } bool isAllOnesValue() const { return Value->isAllOnesValue(); } - static bool classof(const ConstantSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::Constant || N->getOpcode() == ISD::TargetConstant; @@ -1207,9 +1199,6 @@ public: /// have to duplicate its logic everywhere it's called. bool isExactlyValue(double V) const { bool ignored; - // convert is not supported on this type - if (&Value->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble) - return false; APFloat Tmp(V); Tmp.convert(Value->getValueAPF().getSemantics(), APFloat::rmNearestTiesToEven, &ignored); @@ -1219,7 +1208,6 @@ public: static bool isValueValidForType(EVT VT, const APFloat& Val); - static bool classof(const ConstantFPSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::ConstantFP || N->getOpcode() == ISD::TargetConstantFP; @@ -1241,7 +1229,6 @@ public: // Return the address space this GlobalAddress belongs to. unsigned getAddressSpace() const; - static bool classof(const GlobalAddressSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::GlobalAddress || N->getOpcode() == ISD::TargetGlobalAddress || @@ -1261,7 +1248,6 @@ public: int getIndex() const { return FI; } - static bool classof(const FrameIndexSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::FrameIndex || N->getOpcode() == ISD::TargetFrameIndex; @@ -1281,7 +1267,6 @@ public: int getIndex() const { return JTI; } unsigned char getTargetFlags() const { return TargetFlags; } - static bool classof(const JumpTableSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::JumpTable || N->getOpcode() == ISD::TargetJumpTable; @@ -1342,7 +1327,6 @@ public: Type *getType() const; - static bool classof(const ConstantPoolSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::ConstantPool || N->getOpcode() == ISD::TargetConstantPool; @@ -1366,7 +1350,6 @@ public: int getIndex() const { return Index; } int64_t getOffset() const { return Offset; } - static bool classof(const TargetIndexSDNode*) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::TargetIndex; } @@ -1385,7 +1368,6 @@ public: MachineBasicBlock *getBasicBlock() const { return MBB; } - static bool classof(const BasicBlockSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::BasicBlock; } @@ -1395,7 +1377,7 @@ public: /// BUILD_VECTORs. class BuildVectorSDNode : public SDNode { // These are constructed as SDNodes and then cast to BuildVectorSDNodes. - explicit BuildVectorSDNode(); // Do not implement + explicit BuildVectorSDNode() LLVM_DELETED_FUNCTION; public: /// isConstantSplat - Check if this is a constant splat, and if so, find the /// smallest element size that splats the vector. If MinSplatBits is @@ -1410,7 +1392,6 @@ public: unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits = 0, bool isBigEndian = false); - static inline bool classof(const BuildVectorSDNode *) { return true; } static inline bool classof(const SDNode *N) { return N->getOpcode() == ISD::BUILD_VECTOR; } @@ -1431,7 +1412,6 @@ public: /// getValue - return the contained Value. const Value *getValue() const { return V; } - static bool classof(const SrcValueSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::SRCVALUE; } @@ -1446,7 +1426,6 @@ public: const MDNode *getMD() const { return MD; } - static bool classof(const MDNodeSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::MDNODE_SDNODE; } @@ -1463,7 +1442,6 @@ public: unsigned getReg() const { return Reg; } - static bool classof(const RegisterSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::Register; } @@ -1480,7 +1458,6 @@ public: const uint32_t *getRegMask() const { return RegMask; } - static bool classof(const RegisterMaskSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::RegisterMask; } @@ -1488,18 +1465,19 @@ public: class BlockAddressSDNode : public SDNode { const BlockAddress *BA; + int64_t Offset; unsigned char TargetFlags; friend class SelectionDAG; BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba, - unsigned char Flags) + int64_t o, unsigned char Flags) : SDNode(NodeTy, DebugLoc(), getSDVTList(VT)), - BA(ba), TargetFlags(Flags) { + BA(ba), Offset(o), TargetFlags(Flags) { } public: const BlockAddress *getBlockAddress() const { return BA; } + int64_t getOffset() const { return Offset; } unsigned char getTargetFlags() const { return TargetFlags; } - static bool classof(const BlockAddressSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::BlockAddress || N->getOpcode() == ISD::TargetBlockAddress; @@ -1517,7 +1495,6 @@ class EHLabelSDNode : public SDNode { public: MCSymbol *getLabel() const { return Label; } - static bool classof(const EHLabelSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::EH_LABEL; } @@ -1537,7 +1514,6 @@ public: const char *getSymbol() const { return Symbol; } unsigned char getTargetFlags() const { return TargetFlags; } - static bool classof(const ExternalSymbolSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::ExternalSymbol || N->getOpcode() == ISD::TargetExternalSymbol; @@ -1555,7 +1531,6 @@ public: ISD::CondCode get() const { return Condition; } - static bool classof(const CondCodeSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::CONDCODE; } @@ -1575,7 +1550,6 @@ class CvtRndSatSDNode : public SDNode { public: ISD::CvtCode getCvtCode() const { return CvtCode; } - static bool classof(const CvtRndSatSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::CONVERT_RNDSAT; } @@ -1594,7 +1568,6 @@ public: EVT getVT() const { return ValueType; } - static bool classof(const VTSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::VALUETYPE; } @@ -1638,7 +1611,6 @@ public: /// isUnindexed - Return true if this is NOT a pre/post inc/dec load/store. bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; } - static bool classof(const LSBaseSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::LOAD || N->getOpcode() == ISD::STORE; @@ -1670,7 +1642,6 @@ public: const SDValue &getBasePtr() const { return getOperand(1); } const SDValue &getOffset() const { return getOperand(2); } - static bool classof(const LoadSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::LOAD; } @@ -1701,7 +1672,6 @@ public: const SDValue &getBasePtr() const { return getOperand(2); } const SDValue &getOffset() const { return getOperand(3); } - static bool classof(const StoreSDNode *) { return true; } static bool classof(const SDNode *N) { return N->getOpcode() == ISD::STORE; } @@ -1742,7 +1712,6 @@ public: MemRefsEnd = NewMemRefsEnd; } - static bool classof(const MachineSDNode *) { return true; } static bool classof(const SDNode *N) { return N->isMachineOpcode(); } @@ -1750,10 +1719,10 @@ public: class SDNodeIterator : public std::iterator { - SDNode *Node; + const SDNode *Node; unsigned Operand; - SDNodeIterator(SDNode *N, unsigned Op) : Node(N), Operand(Op) {} + SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {} public: bool operator==(const SDNodeIterator& x) const { return Operand == x.Operand; @@ -1784,8 +1753,8 @@ public: return Operand - Other.Operand; } - static SDNodeIterator begin(SDNode *N) { return SDNodeIterator(N, 0); } - static SDNodeIterator end (SDNode *N) { + static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); } + static SDNodeIterator end (const SDNode *N) { return SDNodeIterator(N, N->getNumOperands()); } diff --git a/include/llvm/CodeGen/TargetSchedule.h b/include/llvm/CodeGen/TargetSchedule.h new file mode 100644 index 0000000..88e6105 --- /dev/null +++ b/include/llvm/CodeGen/TargetSchedule.h @@ -0,0 +1,167 @@ +//===-- llvm/CodeGen/TargetSchedule.h - Sched Machine Model -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a wrapper around MCSchedModel that allows the interface to +// benefit from information currently only available in TargetInstrInfo. +// Ideally, the scheduling interface would be fully defined in the MC layer. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGETSCHEDMODEL_H +#define LLVM_TARGET_TARGETSCHEDMODEL_H + +#include "llvm/Target/TargetSubtargetInfo.h" +#include "llvm/MC/MCSchedule.h" +#include "llvm/MC/MCInstrItineraries.h" +#include "llvm/ADT/SmallVector.h" + +namespace llvm { + +class TargetRegisterInfo; +class TargetSubtargetInfo; +class TargetInstrInfo; +class MachineInstr; + +/// Provide an instruction scheduling machine model to CodeGen passes. +class TargetSchedModel { + // For efficiency, hold a copy of the statically defined MCSchedModel for this + // processor. + MCSchedModel SchedModel; + InstrItineraryData InstrItins; + const TargetSubtargetInfo *STI; + const TargetInstrInfo *TII; + + SmallVector ResourceFactors; + unsigned MicroOpFactor; // Multiply to normalize microops to resource units. + unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor. +public: + TargetSchedModel(): STI(0), TII(0) {} + + /// \brief Initialize the machine model for instruction scheduling. + /// + /// The machine model API keeps a copy of the top-level MCSchedModel table + /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve + /// dynamic properties. + void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti, + const TargetInstrInfo *tii); + + /// Return the MCSchedClassDesc for this instruction. + const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const; + + /// \brief TargetInstrInfo getter. + const TargetInstrInfo *getInstrInfo() const { return TII; } + + /// \brief Return true if this machine model includes an instruction-level + /// scheduling model. + /// + /// This is more detailed than the course grain IssueWidth and default + /// latency properties, but separate from the per-cycle itinerary data. + bool hasInstrSchedModel() const; + + const MCSchedModel *getMCSchedModel() const { return &SchedModel; } + + /// \brief Return true if this machine model includes cycle-to-cycle itinerary + /// data. + /// + /// This models scheduling at each stage in the processor pipeline. + bool hasInstrItineraries() const; + + const InstrItineraryData *getInstrItineraries() const { + if (hasInstrItineraries()) + return &InstrItins; + return 0; + } + + /// \brief Identify the processor corresponding to the current subtarget. + unsigned getProcessorID() const { return SchedModel.getProcessorID(); } + + /// \brief Maximum number of micro-ops that may be scheduled per cycle. + unsigned getIssueWidth() const { return SchedModel.IssueWidth; } + + /// \brief Return the number of issue slots required for this MI. + unsigned getNumMicroOps(const MachineInstr *MI, + const MCSchedClassDesc *SC = 0) const; + + /// \brief Get the number of kinds of resources for this target. + unsigned getNumProcResourceKinds() const { + return SchedModel.getNumProcResourceKinds(); + } + + /// \brief Get a processor resource by ID for convenience. + const MCProcResourceDesc *getProcResource(unsigned PIdx) const { + return SchedModel.getProcResource(PIdx); + } + + typedef const MCWriteProcResEntry *ProcResIter; + + // \brief Get an iterator into the processor resources consumed by this + // scheduling class. + ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const { + // The subtarget holds a single resource table for all processors. + return STI->getWriteProcResBegin(SC); + } + ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const { + return STI->getWriteProcResEnd(SC); + } + + /// \brief Multiply the number of units consumed for a resource by this factor + /// to normalize it relative to other resources. + unsigned getResourceFactor(unsigned ResIdx) const { + return ResourceFactors[ResIdx]; + } + + /// \brief Multiply number of micro-ops by this factor to normalize it + /// relative to other resources. + unsigned getMicroOpFactor() const { + return MicroOpFactor; + } + + /// \brief Multiply cycle count by this factor to normalize it relative to + /// other resources. This is the number of resource units per cycle. + unsigned getLatencyFactor() const { + return ResourceLCM; + } + + /// \brief Compute operand latency based on the available machine model. + /// + /// Computes and return the latency of the given data dependent def and use + /// when the operand indices are already known. UseMI may be NULL for an + /// unknown user. + /// + /// FindMin may be set to get the minimum vs. expected latency. Minimum + /// latency is used for scheduling groups, while expected latency is for + /// instruction cost and critical path. + unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, + const MachineInstr *UseMI, unsigned UseOperIdx, + bool FindMin) const; + + /// \brief Compute the instruction latency based on the available machine + /// model. + /// + /// Compute and return the expected latency of this instruction independent of + /// a particular use. computeOperandLatency is the prefered API, but this is + /// occasionally useful to help estimate instruction cost. + unsigned computeInstrLatency(const MachineInstr *MI) const; + + /// \brief Output dependency latency of a pair of defs of the same register. + /// + /// This is typically one cycle. + unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx, + const MachineInstr *DepMI) const; + +private: + /// getDefLatency is a helper for computeOperandLatency. Return the + /// instruction's latency if operand lookup is not required. + /// Otherwise return -1. + int getDefLatency(const MachineInstr *DefMI, bool FindMin) const; +}; + +} // namespace llvm + +#endif diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h index eb38cd3..2401992 100644 --- a/include/llvm/CodeGen/ValueTypes.h +++ b/include/llvm/CodeGen/ValueTypes.h @@ -56,50 +56,56 @@ namespace llvm { FIRST_FP_VALUETYPE = f16, LAST_FP_VALUETYPE = ppcf128, - v2i8 = 13, // 2 x i8 - v4i8 = 14, // 4 x i8 - v8i8 = 15, // 8 x i8 - v16i8 = 16, // 16 x i8 - v32i8 = 17, // 32 x i8 - v2i16 = 18, // 2 x i16 - v4i16 = 19, // 4 x i16 - v8i16 = 20, // 8 x i16 - v16i16 = 21, // 16 x i16 - v2i32 = 22, // 2 x i32 - v4i32 = 23, // 4 x i32 - v8i32 = 24, // 8 x i32 - v16i32 = 25, // 16 x i32 - v1i64 = 26, // 1 x i64 - v2i64 = 27, // 2 x i64 - v4i64 = 28, // 4 x i64 - v8i64 = 29, // 8 x i64 - v16i64 = 30, // 16 x i64 - - v2f16 = 31, // 2 x f16 - v2f32 = 32, // 2 x f32 - v4f32 = 33, // 4 x f32 - v8f32 = 34, // 8 x f32 - v2f64 = 35, // 2 x f64 - v4f64 = 36, // 4 x f64 - - FIRST_VECTOR_VALUETYPE = v2i8, + v2i1 = 13, // 2 x i1 + v4i1 = 14, // 4 x i1 + v8i1 = 15, // 8 x i1 + v16i1 = 16, // 16 x i1 + v2i8 = 17, // 2 x i8 + v4i8 = 18, // 4 x i8 + v8i8 = 19, // 8 x i8 + v16i8 = 20, // 16 x i8 + v32i8 = 21, // 32 x i8 + v1i16 = 22, // 1 x i16 + v2i16 = 23, // 2 x i16 + v4i16 = 24, // 4 x i16 + v8i16 = 25, // 8 x i16 + v16i16 = 26, // 16 x i16 + v1i32 = 27, // 1 x i32 + v2i32 = 28, // 2 x i32 + v4i32 = 29, // 4 x i32 + v8i32 = 30, // 8 x i32 + v16i32 = 31, // 16 x i32 + v1i64 = 32, // 1 x i64 + v2i64 = 33, // 2 x i64 + v4i64 = 34, // 4 x i64 + v8i64 = 35, // 8 x i64 + v16i64 = 36, // 16 x i64 + + v2f16 = 37, // 2 x f16 + v2f32 = 38, // 2 x f32 + v4f32 = 39, // 4 x f32 + v8f32 = 40, // 8 x f32 + v2f64 = 41, // 2 x f64 + v4f64 = 42, // 4 x f64 + + FIRST_VECTOR_VALUETYPE = v2i1, LAST_VECTOR_VALUETYPE = v4f64, - FIRST_INTEGER_VECTOR_VALUETYPE = v2i8, + FIRST_INTEGER_VECTOR_VALUETYPE = v2i1, LAST_INTEGER_VECTOR_VALUETYPE = v16i64, FIRST_FP_VECTOR_VALUETYPE = v2f16, LAST_FP_VECTOR_VALUETYPE = v4f64, - x86mmx = 37, // This is an X86 MMX value + x86mmx = 43, // This is an X86 MMX value - Glue = 38, // This glues nodes together during pre-RA sched + Glue = 44, // This glues nodes together during pre-RA sched - isVoid = 39, // This has no value + isVoid = 45, // This has no value - Untyped = 40, // This value takes a register, but has + Untyped = 46, // This value takes a register, but has // unspecified type. The register class // will be determined by the opcode. - LAST_VALUETYPE = 41, // This always remains at the end of the list. + LAST_VALUETYPE = 47, // This always remains at the end of the list. // This is the current maximum for LAST_VALUETYPE. // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors @@ -175,6 +181,18 @@ namespace llvm { SimpleTy <= MVT::LAST_VECTOR_VALUETYPE); } + /// is16BitVector - Return true if this is a 16-bit vector type. + bool is16BitVector() const { + return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 || + SimpleTy == MVT::v16i1); + } + + /// is32BitVector - Return true if this is a 32-bit vector type. + bool is32BitVector() const { + return (SimpleTy == MVT::v4i8 || SimpleTy == MVT::v2i16 || + SimpleTy == MVT::v1i32); + } + /// is64BitVector - Return true if this is a 64-bit vector type. bool is64BitVector() const { return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 || @@ -233,15 +251,21 @@ namespace llvm { switch (SimpleTy) { default: llvm_unreachable("Not a vector MVT!"); + case v2i1 : + case v4i1 : + case v8i1 : + case v16i1: return i1; case v2i8 : case v4i8 : case v8i8 : case v16i8: case v32i8: return i8; + case v1i16: case v2i16: case v4i16: case v8i16: case v16i16: return i16; + case v1i32: case v2i32: case v4i32: case v8i32: @@ -265,21 +289,25 @@ namespace llvm { default: llvm_unreachable("Not a vector MVT!"); case v32i8: return 32; + case v16i1: case v16i8: case v16i16: case v16i32: case v16i64:return 16; + case v8i1: case v8i8 : case v8i16: case v8i32: case v8i64: case v8f32: return 8; + case v4i1: case v4i8: case v4i16: case v4i32: case v4i64: case v4f32: case v4f64: return 4; + case v2i1: case v2i8: case v2i16: case v2i32: @@ -287,6 +315,8 @@ namespace llvm { case v2f16: case v2f32: case v2f64: return 2; + case v1i16: + case v1i32: case v1i64: return 1; } } @@ -302,15 +332,21 @@ namespace llvm { default: llvm_unreachable("getSizeInBits called on extended MVT."); case i1 : return 1; - case i8 : return 8; + case v2i1: return 2; + case v4i1: return 4; + case i8 : + case v8i1: return 8; case i16 : case f16: - case v2i8: return 16; + case v16i1: + case v2i8: + case v1i16: return 16; case f32 : case i32 : case v4i8: case v2i16: - case v2f16: return 32; + case v2f16: + case v1i32: return 32; case x86mmx: case f64 : case i64 : @@ -393,6 +429,12 @@ namespace llvm { switch (VT.SimpleTy) { default: break; + case MVT::i1: + if (NumElements == 2) return MVT::v2i1; + if (NumElements == 4) return MVT::v4i1; + if (NumElements == 8) return MVT::v8i1; + if (NumElements == 16) return MVT::v16i1; + break; case MVT::i8: if (NumElements == 2) return MVT::v2i8; if (NumElements == 4) return MVT::v4i8; @@ -401,12 +443,14 @@ namespace llvm { if (NumElements == 32) return MVT::v32i8; break; case MVT::i16: + if (NumElements == 1) return MVT::v1i16; if (NumElements == 2) return MVT::v2i16; if (NumElements == 4) return MVT::v4i16; if (NumElements == 8) return MVT::v8i16; if (NumElements == 16) return MVT::v16i16; break; case MVT::i32: + if (NumElements == 1) return MVT::v1i32; if (NumElements == 2) return MVT::v2i32; if (NumElements == 4) return MVT::v4i32; if (NumElements == 8) return MVT::v8i32; @@ -529,6 +573,16 @@ namespace llvm { return isSimple() ? V.isVector() : isExtendedVector(); } + /// is16BitVector - Return true if this is a 16-bit vector type. + bool is16BitVector() const { + return isSimple() ? V.is16BitVector() : isExtended16BitVector(); + } + + /// is32BitVector - Return true if this is a 32-bit vector type. + bool is32BitVector() const { + return isSimple() ? V.is32BitVector() : isExtended32BitVector(); + } + /// is64BitVector - Return true if this is a 64-bit vector type. bool is64BitVector() const { return isSimple() ? V.is64BitVector() : isExtended64BitVector(); @@ -740,6 +794,8 @@ namespace llvm { bool isExtendedFloatingPoint() const; bool isExtendedInteger() const; bool isExtendedVector() const; + bool isExtended16BitVector() const; + bool isExtended32BitVector() const; bool isExtended64BitVector() const; bool isExtended128BitVector() const; bool isExtended256BitVector() const; diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td index f4b75bd..a707f88 100644 --- a/include/llvm/CodeGen/ValueTypes.td +++ b/include/llvm/CodeGen/ValueTypes.td @@ -33,36 +33,42 @@ def f80 : ValueType<80 , 10>; // 80-bit floating point value def f128 : ValueType<128, 11>; // 128-bit floating point value def ppcf128: ValueType<128, 12>; // PPC 128-bit floating point value -def v2i8 : ValueType<16 , 13>; // 2 x i8 vector value -def v4i8 : ValueType<32 , 14>; // 4 x i8 vector value -def v8i8 : ValueType<64 , 15>; // 8 x i8 vector value -def v16i8 : ValueType<128, 16>; // 16 x i8 vector value -def v32i8 : ValueType<256, 17>; // 32 x i8 vector value -def v2i16 : ValueType<32 , 18>; // 2 x i16 vector value -def v4i16 : ValueType<64 , 19>; // 4 x i16 vector value -def v8i16 : ValueType<128, 20>; // 8 x i16 vector value -def v16i16 : ValueType<256, 21>; // 16 x i16 vector value -def v2i32 : ValueType<64 , 22>; // 2 x i32 vector value -def v4i32 : ValueType<128, 23>; // 4 x i32 vector value -def v8i32 : ValueType<256, 24>; // 8 x i32 vector value -def v16i32 : ValueType<512, 25>; // 16 x i32 vector value -def v1i64 : ValueType<64 , 26>; // 1 x i64 vector value -def v2i64 : ValueType<128, 27>; // 2 x i64 vector value -def v4i64 : ValueType<256, 28>; // 4 x i64 vector value -def v8i64 : ValueType<512, 29>; // 8 x i64 vector value -def v16i64 : ValueType<1024,30>; // 16 x i64 vector value +def v2i1 : ValueType<2 , 13>; // 2 x i1 vector value +def v4i1 : ValueType<4 , 14>; // 4 x i1 vector value +def v8i1 : ValueType<8 , 15>; // 8 x i1 vector value +def v16i1 : ValueType<16, 16>; // 16 x i1 vector value +def v2i8 : ValueType<16 , 17>; // 2 x i8 vector value +def v4i8 : ValueType<32 , 18>; // 4 x i8 vector value +def v8i8 : ValueType<64 , 19>; // 8 x i8 vector value +def v16i8 : ValueType<128, 20>; // 16 x i8 vector value +def v32i8 : ValueType<256, 21>; // 32 x i8 vector value +def v1i16 : ValueType<16 , 22>; // 1 x i16 vector value +def v2i16 : ValueType<32 , 23>; // 2 x i16 vector value +def v4i16 : ValueType<64 , 24>; // 4 x i16 vector value +def v8i16 : ValueType<128, 25>; // 8 x i16 vector value +def v16i16 : ValueType<256, 26>; // 16 x i16 vector value +def v1i32 : ValueType<32 , 27>; // 1 x i32 vector value +def v2i32 : ValueType<64 , 28>; // 2 x i32 vector value +def v4i32 : ValueType<128, 29>; // 4 x i32 vector value +def v8i32 : ValueType<256, 30>; // 8 x i32 vector value +def v16i32 : ValueType<512, 31>; // 16 x i32 vector value +def v1i64 : ValueType<64 , 32>; // 1 x i64 vector value +def v2i64 : ValueType<128, 33>; // 2 x i64 vector value +def v4i64 : ValueType<256, 34>; // 4 x i64 vector value +def v8i64 : ValueType<512, 35>; // 8 x i64 vector value +def v16i64 : ValueType<1024,36>; // 16 x i64 vector value -def v2f16 : ValueType<32 , 31>; // 2 x f16 vector value -def v2f32 : ValueType<64 , 32>; // 2 x f32 vector value -def v4f32 : ValueType<128, 33>; // 4 x f32 vector value -def v8f32 : ValueType<256, 34>; // 8 x f32 vector value -def v2f64 : ValueType<128, 35>; // 2 x f64 vector value -def v4f64 : ValueType<256, 36>; // 4 x f64 vector value +def v2f16 : ValueType<32 , 37>; // 2 x f16 vector value +def v2f32 : ValueType<64 , 38>; // 2 x f32 vector value +def v4f32 : ValueType<128, 39>; // 4 x f32 vector value +def v8f32 : ValueType<256, 40>; // 8 x f32 vector value +def v2f64 : ValueType<128, 41>; // 2 x f64 vector value +def v4f64 : ValueType<256, 42>; // 4 x f64 vector value -def x86mmx : ValueType<64 , 37>; // X86 MMX value -def FlagVT : ValueType<0 , 38>; // Pre-RA sched glue -def isVoid : ValueType<0 , 39>; // Produces no value -def untyped: ValueType<8 , 40>; // Produces an untyped value +def x86mmx : ValueType<64 , 43>; // X86 MMX value +def FlagVT : ValueType<0 , 44>; // Pre-RA sched glue +def isVoid : ValueType<0 , 45>; // Produces no value +def untyped: ValueType<8 , 46>; // Produces an untyped value def MetadataVT: ValueType<0, 250>; // Metadata diff --git a/include/llvm/Config/AsmParsers.def.in b/include/llvm/Config/AsmParsers.def.in index 041af83..d636753 100644 --- a/include/llvm/Config/AsmParsers.def.in +++ b/include/llvm/Config/AsmParsers.def.in @@ -1,24 +1,24 @@ -//===- llvm/Config/AsmParsers.def - LLVM Assembly Parsers -------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file enumerates all of the assembly-language parsers -// supported by this build of LLVM. Clients of this file should define -// the LLVM_ASM_PARSER macro to be a function-like macro with a -// single parameter (the name of the target whose assembly can be -// generated); including this file will then enumerate all of the -// targets with assembly parsers. -// -// The set of targets supported by LLVM is generated at configuration -// time, at which point this header is generated. Do not modify this -// header directly. -// -//===----------------------------------------------------------------------===// +/*===- llvm/Config/AsmParsers.def - LLVM Assembly Parsers -------*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This file enumerates all of the assembly-language parsers *| +|* supported by this build of LLVM. Clients of this file should define *| +|* the LLVM_ASM_PARSER macro to be a function-like macro with a *| +|* single parameter (the name of the target whose assembly can be *| +|* generated); including this file will then enumerate all of the *| +|* targets with assembly parsers. *| +|* *| +|* The set of targets supported by LLVM is generated at configuration *| +|* time, at which point this header is generated. Do not modify this *| +|* header directly. *| +|* *| +\*===----------------------------------------------------------------------===*/ #ifndef LLVM_ASM_PARSER # error Please define the macro LLVM_ASM_PARSER(TargetName) diff --git a/include/llvm/Config/AsmPrinters.def.in b/include/llvm/Config/AsmPrinters.def.in index 9729bd7..f0152a4 100644 --- a/include/llvm/Config/AsmPrinters.def.in +++ b/include/llvm/Config/AsmPrinters.def.in @@ -1,24 +1,24 @@ -//===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file enumerates all of the assembly-language printers -// supported by this build of LLVM. Clients of this file should define -// the LLVM_ASM_PRINTER macro to be a function-like macro with a -// single parameter (the name of the target whose assembly can be -// generated); including this file will then enumerate all of the -// targets with assembly printers. -// -// The set of targets supported by LLVM is generated at configuration -// time, at which point this header is generated. Do not modify this -// header directly. -// -//===----------------------------------------------------------------------===// +/*===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This file enumerates all of the assembly-language printers *| +|* supported by this build of LLVM. Clients of this file should define *| +|* the LLVM_ASM_PRINTER macro to be a function-like macro with a *| +|* single parameter (the name of the target whose assembly can be *| +|* generated); including this file will then enumerate all of the *| +|* targets with assembly printers. *| +|* *| +|* The set of targets supported by LLVM is generated at configuration *| +|* time, at which point this header is generated. Do not modify this *| +|* header directly. *| +|* *| +\*===----------------------------------------------------------------------===*/ #ifndef LLVM_ASM_PRINTER # error Please define the macro LLVM_ASM_PRINTER(TargetName) diff --git a/include/llvm/Config/Disassemblers.def.in b/include/llvm/Config/Disassemblers.def.in index 1e6281d..d3a9bbd 100644 --- a/include/llvm/Config/Disassemblers.def.in +++ b/include/llvm/Config/Disassemblers.def.in @@ -1,24 +1,24 @@ -//===- llvm/Config/Disassemblers.def - LLVM Assembly Parsers ----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file enumerates all of the assembly-language parsers -// supported by this build of LLVM. Clients of this file should define -// the LLVM_DISASSEMBLER macro to be a function-like macro with a -// single parameter (the name of the target whose assembly can be -// generated); including this file will then enumerate all of the -// targets with assembly parsers. -// -// The set of targets supported by LLVM is generated at configuration -// time, at which point this header is generated. Do not modify this -// header directly. -// -//===----------------------------------------------------------------------===// +/*===- llvm/Config/Disassemblers.def - LLVM Assembly Parsers ----*- C++ -*-===*\ +|* *| +|* The LLVM Compiler Infrastructure *| +|* *| +|* This file is distributed under the University of Illinois Open Source *| +|* License. See LICENSE.TXT for details. *| +|* *| +|*===----------------------------------------------------------------------===*| +|* *| +|* This file enumerates all of the assembly-language parsers *| +|* supported by this build of LLVM. Clients of this file should define *| +|* the LLVM_DISASSEMBLER macro to be a function-like macro with a *| +|* single parameter (the name of the target whose assembly can be *| +|* generated); including this file will then enumerate all of the *| +|* targets with assembly parsers. *| +|* *| +|* The set of targets supported by LLVM is generated at configuration *| +|* time, at which point this header is generated. Do not modify this *| +|* header directly. *| +|* *| +\*===----------------------------------------------------------------------===*/ #ifndef LLVM_DISASSEMBLER # error Please define the macro LLVM_DISASSEMBLER(TargetName) diff --git a/include/llvm/Config/config.h.cmake b/include/llvm/Config/config.h.cmake index b912251..ca64124 100644 --- a/include/llvm/Config/config.h.cmake +++ b/include/llvm/Config/config.h.cmake @@ -1,6 +1,4 @@ -/************************************** -** Created by Kevin from config.h.in ** -***************************************/ +/* include/llvm/Config/config.h.cmake corresponding to config.h.in. */ #ifndef CONFIG_H #define CONFIG_H @@ -17,6 +15,9 @@ /* Default to all compiler invocations for --sysroot=. */ #undef DEFAULT_SYSROOT +/* Define if you want backtraces on crash */ +#cmakedefine ENABLE_BACKTRACES + /* Define if position independent code is enabled */ #cmakedefine ENABLE_PIC @@ -51,7 +52,7 @@ #cmakedefine HAVE_ASSERT_H ${HAVE_ASSERT_H} /* Define to 1 if you have the `backtrace' function. */ -#undef HAVE_BACKTRACE +#cmakedefine HAVE_BACKTRACE ${HAVE_BACKTRACE} /* Define to 1 if you have the `bcopy' function. */ #undef HAVE_BCOPY diff --git a/include/llvm/Config/config.h.in b/include/llvm/Config/config.h.in index 5a60ba5..a4f8af4 100644 --- a/include/llvm/Config/config.h.in +++ b/include/llvm/Config/config.h.in @@ -18,6 +18,9 @@ /* Default to all compiler invocations for --sysroot=. */ #undef DEFAULT_SYSROOT +/* Define if you want backtraces on crash */ +#undef ENABLE_BACKTRACES + /* Define if position independent code is enabled */ #undef ENABLE_PIC diff --git a/include/llvm/Constant.h b/include/llvm/Constant.h index e0e516d..0ddd1db 100644 --- a/include/llvm/Constant.h +++ b/include/llvm/Constant.h @@ -39,8 +39,8 @@ namespace llvm { /// don't have to worry about the lifetime of the objects. /// @brief LLVM Constant Representation class Constant : public User { - void operator=(const Constant &); // Do not implement - Constant(const Constant &); // Do not implement + void operator=(const Constant &) LLVM_DELETED_FUNCTION; + Constant(const Constant &) LLVM_DELETED_FUNCTION; virtual void anchor(); protected: @@ -65,6 +65,9 @@ public: /// true for things like constant expressions that could divide by zero. bool canTrap() const; + /// isThreadDependent - Return true if the value can vary between threads. + bool isThreadDependent() const; + /// isConstantUsed - Return true if the constant has users other than constant /// exprs and other dangling things. bool isConstantUsed() const; @@ -108,8 +111,6 @@ public: virtual void destroyConstant() { llvm_unreachable("Not reached!"); } //// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const Constant *) { return true; } - static inline bool classof(const GlobalValue *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() >= ConstantFirstVal && V->getValueID() <= ConstantLastVal; diff --git a/include/llvm/Constants.h b/include/llvm/Constants.h index fdd5382..7f94ef4 100644 --- a/include/llvm/Constants.h +++ b/include/llvm/Constants.h @@ -49,8 +49,8 @@ struct ConvertConstantType; /// @brief Class for constant integers. class ConstantInt : public Constant { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - ConstantInt(const ConstantInt &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantInt(const ConstantInt &) LLVM_DELETED_FUNCTION; ConstantInt(IntegerType *Ty, const APInt& V); APInt Val; protected: @@ -221,7 +221,6 @@ public: } /// @brief Methods to support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const ConstantInt *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantIntVal; } @@ -234,8 +233,8 @@ public: class ConstantFP : public Constant { APFloat Val; virtual void anchor(); - void *operator new(size_t, unsigned);// DO NOT IMPLEMENT - ConstantFP(const ConstantFP &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantFP(const ConstantFP &) LLVM_DELETED_FUNCTION; friend class LLVMContextImpl; protected: ConstantFP(Type *Ty, const APFloat& V); @@ -283,15 +282,11 @@ public: bool isExactlyValue(double V) const { bool ignored; - // convert is not supported on this type - if (&Val.getSemantics() == &APFloat::PPCDoubleDouble) - return false; APFloat FV(V); FV.convert(Val.getSemantics(), APFloat::rmNearestTiesToEven, &ignored); return isExactlyValue(FV); } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ConstantFP *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantFPVal; } @@ -301,8 +296,8 @@ public: /// ConstantAggregateZero - All zero aggregate value /// class ConstantAggregateZero : public Constant { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - ConstantAggregateZero(const ConstantAggregateZero &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantAggregateZero(const ConstantAggregateZero &) LLVM_DELETED_FUNCTION; protected: explicit ConstantAggregateZero(Type *ty) : Constant(ty, ConstantAggregateZeroVal, 0, 0) {} @@ -334,7 +329,6 @@ public: /// Methods for support type inquiry through isa, cast, and dyn_cast: /// - static bool classof(const ConstantAggregateZero *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantAggregateZeroVal; } @@ -346,7 +340,7 @@ public: /// class ConstantArray : public Constant { friend struct ConstantArrayCreator; - ConstantArray(const ConstantArray &); // DO NOT IMPLEMENT + ConstantArray(const ConstantArray &) LLVM_DELETED_FUNCTION; protected: ConstantArray(ArrayType *T, ArrayRef Val); public: @@ -367,7 +361,6 @@ public: virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ConstantArray *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantArrayVal; } @@ -385,7 +378,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantArray, Constant) // class ConstantStruct : public Constant { friend struct ConstantArrayCreator; - ConstantStruct(const ConstantStruct &); // DO NOT IMPLEMENT + ConstantStruct(const ConstantStruct &) LLVM_DELETED_FUNCTION; protected: ConstantStruct(StructType *T, ArrayRef Val); public: @@ -426,7 +419,6 @@ public: virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ConstantStruct *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantStructVal; } @@ -445,7 +437,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantStruct, Constant) /// class ConstantVector : public Constant { friend struct ConstantArrayCreator; - ConstantVector(const ConstantVector &); // DO NOT IMPLEMENT + ConstantVector(const ConstantVector &) LLVM_DELETED_FUNCTION; protected: ConstantVector(VectorType *T, ArrayRef Val); public: @@ -474,7 +466,6 @@ public: virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ConstantVector *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantVectorVal; } @@ -491,8 +482,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantVector, Constant) /// ConstantPointerNull - a constant pointer value that points to null /// class ConstantPointerNull : public Constant { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - ConstantPointerNull(const ConstantPointerNull &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantPointerNull(const ConstantPointerNull &) LLVM_DELETED_FUNCTION; protected: explicit ConstantPointerNull(PointerType *T) : Constant(reinterpret_cast(T), @@ -517,7 +508,6 @@ public: } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ConstantPointerNull *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantPointerNullVal; } @@ -543,8 +533,8 @@ class ConstantDataSequential : public Constant { /// element array of i8, or a 1-element array of i32. They'll both end up in /// the same StringMap bucket, linked up. ConstantDataSequential *Next; - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - ConstantDataSequential(const ConstantDataSequential &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantDataSequential(const ConstantDataSequential &) LLVM_DELETED_FUNCTION; protected: explicit ConstantDataSequential(Type *ty, ValueTy VT, const char *Data) : Constant(ty, VT, 0, 0), DataElements(Data), Next(0) {} @@ -639,7 +629,6 @@ public: /// Methods for support type inquiry through isa, cast, and dyn_cast: /// - static bool classof(const ConstantDataSequential *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantDataArrayVal || V->getValueID() == ConstantDataVectorVal; @@ -655,8 +644,8 @@ private: /// operands because it stores all of the elements of the constant as densely /// packed data, instead of as Value*'s. class ConstantDataArray : public ConstantDataSequential { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - ConstantDataArray(const ConstantDataArray &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantDataArray(const ConstantDataArray &) LLVM_DELETED_FUNCTION; virtual void anchor(); friend class ConstantDataSequential; explicit ConstantDataArray(Type *ty, const char *Data) @@ -695,7 +684,6 @@ public: /// Methods for support type inquiry through isa, cast, and dyn_cast: /// - static bool classof(const ConstantDataArray *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantDataArrayVal; } @@ -708,8 +696,8 @@ public: /// operands because it stores all of the elements of the constant as densely /// packed data, instead of as Value*'s. class ConstantDataVector : public ConstantDataSequential { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - ConstantDataVector(const ConstantDataVector &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + ConstantDataVector(const ConstantDataVector &) LLVM_DELETED_FUNCTION; virtual void anchor(); friend class ConstantDataSequential; explicit ConstantDataVector(Type *ty, const char *Data) @@ -749,7 +737,6 @@ public: /// Methods for support type inquiry through isa, cast, and dyn_cast: /// - static bool classof(const ConstantDataVector *) { return true; } static bool classof(const Value *V) { return V->getValueID() == ConstantDataVectorVal; } @@ -760,7 +747,7 @@ public: /// BlockAddress - The address of a basic block. /// class BlockAddress : public Constant { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; void *operator new(size_t s) { return User::operator new(s, 2); } BlockAddress(Function *F, BasicBlock *BB); public: @@ -781,7 +768,6 @@ public: virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const BlockAddress *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == BlockAddressVal; } @@ -1094,7 +1080,6 @@ public: virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ConstantExpr *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == ConstantExprVal; } @@ -1125,8 +1110,8 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantExpr, Constant) /// LangRef.html#undefvalues for details. /// class UndefValue : public Constant { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - UndefValue(const UndefValue &); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + UndefValue(const UndefValue &) LLVM_DELETED_FUNCTION; protected: explicit UndefValue(Type *T) : Constant(T, UndefValueVal, 0, 0) {} protected: @@ -1159,7 +1144,6 @@ public: virtual void destroyConstant(); /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const UndefValue *) { return true; } static bool classof(const Value *V) { return V->getValueID() == UndefValueVal; } diff --git a/include/llvm/DIBuilder.h b/include/llvm/DIBuilder.h index 2ed48a9..2f07800 100644 --- a/include/llvm/DIBuilder.h +++ b/include/llvm/DIBuilder.h @@ -63,8 +63,8 @@ namespace llvm { SmallVector AllSubprograms; SmallVector AllGVs; - DIBuilder(const DIBuilder &); // DO NOT IMPLEMENT - void operator=(const DIBuilder &); // DO NOT IMPLEMENT + DIBuilder(const DIBuilder &) LLVM_DELETED_FUNCTION; + void operator=(const DIBuilder &) LLVM_DELETED_FUNCTION; public: explicit DIBuilder(Module &M); @@ -179,8 +179,10 @@ namespace llvm { /// @param Ty Parent type. /// @param PropertyName Name of the Objective C property associated with /// this ivar. - /// @param GetterName Name of the Objective C property getter selector. - /// @param SetterName Name of the Objective C property setter selector. + /// @param PropertyGetterName Name of the Objective C property getter + /// selector. + /// @param PropertySetterName Name of the Objective C property setter + /// selector. /// @param PropertyAttributes Objective C property attributes. DIType createObjCIVar(StringRef Name, DIFile File, unsigned LineNo, uint64_t SizeInBits, @@ -201,7 +203,7 @@ namespace llvm { /// @param OffsetInBits Member offset. /// @param Flags Flags to encode member attribute, e.g. private /// @param Ty Parent type. - /// @param Property Property associated with this ivar. + /// @param PropertyNode Property associated with this ivar. DIType createObjCIVar(StringRef Name, DIFile File, unsigned LineNo, uint64_t SizeInBits, uint64_t AlignInBits, uint64_t OffsetInBits, @@ -228,7 +230,7 @@ namespace llvm { /// @param Scope Scope in which this class is defined. /// @param Name class name. /// @param File File where this member is defined. - /// @param LineNo Line number. + /// @param LineNumber Line number. /// @param SizeInBits Member size. /// @param AlignInBits Member alignment. /// @param OffsetInBits Member offset. @@ -250,7 +252,7 @@ namespace llvm { /// @param Scope Scope in which this struct is defined. /// @param Name Struct name. /// @param File File where this member is defined. - /// @param LineNo Line number. + /// @param LineNumber Line number. /// @param SizeInBits Member size. /// @param AlignInBits Member alignment. /// @param Flags Flags to encode member attribute, e.g. private @@ -265,7 +267,7 @@ namespace llvm { /// @param Scope Scope in which this union is defined. /// @param Name Union name. /// @param File File where this member is defined. - /// @param LineNo Line number. + /// @param LineNumber Line number. /// @param SizeInBits Member size. /// @param AlignInBits Member alignment. /// @param Flags Flags to encode member attribute, e.g. private @@ -325,33 +327,36 @@ namespace llvm { /// @param Scope Scope in which this enumeration is defined. /// @param Name Union name. /// @param File File where this member is defined. - /// @param LineNo Line number. + /// @param LineNumber Line number. /// @param SizeInBits Member size. /// @param AlignInBits Member alignment. /// @param Elements Enumeration elements. - /// @param Flags Flags (e.g. forward decl) DIType createEnumerationType(DIDescriptor Scope, StringRef Name, DIFile File, unsigned LineNumber, uint64_t SizeInBits, uint64_t AlignInBits, - DIArray Elements, DIType ClassType, - unsigned Flags); + DIArray Elements, DIType ClassType); /// createSubroutineType - Create subroutine type. - /// @param File File in which this subroutine is defined. - /// @param ParamterTypes An array of subroutine parameter types. This - /// includes return type at 0th index. + /// @param File File in which this subroutine is defined. + /// @param ParameterTypes An array of subroutine parameter types. This + /// includes return type at 0th index. DIType createSubroutineType(DIFile File, DIArray ParameterTypes); /// createArtificialType - Create a new DIType with "artificial" flag set. DIType createArtificialType(DIType Ty); + /// createObjectPointerType - Create a new DIType with the "object pointer" + /// flag set. + DIType createObjectPointerType(DIType Ty); + /// createTemporaryType - Create a temporary forward-declared type. DIType createTemporaryType(); DIType createTemporaryType(DIFile F); /// createForwardDecl - Create a temporary forward-declared type. DIType createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope, - DIFile F, unsigned Line, unsigned RuntimeLang = 0); + DIFile F, unsigned Line, unsigned RuntimeLang = 0, + uint64_t SizeInBits = 0, uint64_t AlignInBits = 0); /// retainType - Retain DIType in a module even if it is not referenced /// through debug info anchors. @@ -383,9 +388,9 @@ namespace llvm { /// createStaticVariable - Create a new descriptor for the specified /// variable. - /// @param Conext Variable scope. + /// @param Context Variable scope. /// @param Name Name of the variable. - /// @param LinakgeName Mangled name of the variable. + /// @param LinkageName Mangled name of the variable. /// @param File File where this variable is defined. /// @param LineNo Line number. /// @param Ty Variable Type. @@ -426,7 +431,7 @@ namespace llvm { /// DW_TAG_arg_variable. /// @param Scope Variable scope. /// @param Name Variable name. - /// @param File File where this variable is defined. + /// @param F File where this variable is defined. /// @param LineNo Line number. /// @param Ty Variable Type /// @param Addr An array of complex address operations. diff --git a/include/llvm/DataLayout.h b/include/llvm/DataLayout.h new file mode 100644 index 0000000..24ad05f --- /dev/null +++ b/include/llvm/DataLayout.h @@ -0,0 +1,429 @@ +//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines layout properties related to datatype size/offset/alignment +// information. It uses lazy annotations to cache information about how +// structure types are laid out and used. +// +// This structure should be created once, filled in if the defaults are not +// correct and then passed around by const&. None of the members functions +// require modification to the object. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DATALAYOUT_H +#define LLVM_DATALAYOUT_H + +#include "llvm/Pass.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + +class Value; +class Type; +class IntegerType; +class StructType; +class StructLayout; +class GlobalVariable; +class LLVMContext; +template +class ArrayRef; + +/// Enum used to categorize the alignment types stored by LayoutAlignElem +enum AlignTypeEnum { + INTEGER_ALIGN = 'i', ///< Integer type alignment + VECTOR_ALIGN = 'v', ///< Vector type alignment + FLOAT_ALIGN = 'f', ///< Floating point type alignment + AGGREGATE_ALIGN = 'a', ///< Aggregate alignment + STACK_ALIGN = 's' ///< Stack objects alignment +}; + +/// Layout alignment element. +/// +/// Stores the alignment data associated with a given alignment type (integer, +/// vector, float) and type bit width. +/// +/// @note The unusual order of elements in the structure attempts to reduce +/// padding and make the structure slightly more cache friendly. +struct LayoutAlignElem { + unsigned AlignType : 8; ///< Alignment type (AlignTypeEnum) + unsigned TypeBitWidth : 24; ///< Type bit width + unsigned ABIAlign : 16; ///< ABI alignment for this type/bitw + unsigned PrefAlign : 16; ///< Pref. alignment for this type/bitw + + /// Initializer + static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width); + /// Equality predicate + bool operator==(const LayoutAlignElem &rhs) const; +}; + +/// Layout pointer alignment element. +/// +/// Stores the alignment data associated with a given pointer and address space. +/// +/// @note The unusual order of elements in the structure attempts to reduce +/// padding and make the structure slightly more cache friendly. +struct PointerAlignElem { + unsigned ABIAlign; ///< ABI alignment for this type/bitw + unsigned PrefAlign; ///< Pref. alignment for this type/bitw + uint32_t TypeBitWidth; ///< Type bit width + uint32_t AddressSpace; ///< Address space for the pointer type + + /// Initializer + static PointerAlignElem get(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width); + /// Equality predicate + bool operator==(const PointerAlignElem &rhs) const; +}; + + +/// DataLayout - This class holds a parsed version of the target data layout +/// string in a module and provides methods for querying it. The target data +/// layout string is specified *by the target* - a frontend generating LLVM IR +/// is required to generate the right target data for the target being codegen'd +/// to. If some measure of portability is desired, an empty string may be +/// specified in the module. +class DataLayout : public ImmutablePass { +private: + bool LittleEndian; ///< Defaults to false + unsigned StackNaturalAlign; ///< Stack natural alignment + + SmallVector LegalIntWidths; ///< Legal Integers. + + /// Alignments- Where the primitive type alignment data is stored. + /// + /// @sa init(). + /// @note Could support multiple size pointer alignments, e.g., 32-bit + /// pointers vs. 64-bit pointers by extending LayoutAlignment, but for now, + /// we don't. + SmallVector Alignments; + DenseMap Pointers; + + /// InvalidAlignmentElem - This member is a signal that a requested alignment + /// type and bit width were not found in the SmallVector. + static const LayoutAlignElem InvalidAlignmentElem; + + /// InvalidPointerElem - This member is a signal that a requested pointer + /// type and bit width were not found in the DenseSet. + static const PointerAlignElem InvalidPointerElem; + + // The StructType -> StructLayout map. + mutable void *LayoutMap; + + //! Set/initialize target alignments + void setAlignment(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width); + unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, + bool ABIAlign, Type *Ty) const; + + //! Set/initialize pointer alignments + void setPointerAlignment(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width); + + //! Internal helper method that returns requested alignment for type. + unsigned getAlignment(Type *Ty, bool abi_or_pref) const; + + /// Valid alignment predicate. + /// + /// Predicate that tests a LayoutAlignElem reference returned by get() against + /// InvalidAlignmentElem. + bool validAlignment(const LayoutAlignElem &align) const { + return &align != &InvalidAlignmentElem; + } + + /// Valid pointer predicate. + /// + /// Predicate that tests a PointerAlignElem reference returned by get() against + /// InvalidPointerElem. + bool validPointer(const PointerAlignElem &align) const { + return &align != &InvalidPointerElem; + } + + /// Initialise a DataLayout object with default values, ensure that the + /// target data pass is registered. + void init(); + +public: + /// Default ctor. + /// + /// @note This has to exist, because this is a pass, but it should never be + /// used. + DataLayout(); + + /// Constructs a DataLayout from a specification string. See init(). + explicit DataLayout(StringRef LayoutDescription) + : ImmutablePass(ID) { + std::string errMsg = parseSpecifier(LayoutDescription, this); + assert(errMsg == "" && "Invalid target data layout string."); + (void)errMsg; + } + + /// Parses a target data specification string. Returns an error message + /// if the string is malformed, or the empty string on success. Optionally + /// initialises a DataLayout object if passed a non-null pointer. + static std::string parseSpecifier(StringRef LayoutDescription, + DataLayout* td = 0); + + /// Initialize target data from properties stored in the module. + explicit DataLayout(const Module *M); + + DataLayout(const DataLayout &TD) : + ImmutablePass(ID), + LittleEndian(TD.isLittleEndian()), + LegalIntWidths(TD.LegalIntWidths), + Alignments(TD.Alignments), + Pointers(TD.Pointers), + LayoutMap(0) + { } + + ~DataLayout(); // Not virtual, do not subclass this class + + /// Layout endianness... + bool isLittleEndian() const { return LittleEndian; } + bool isBigEndian() const { return !LittleEndian; } + + /// getStringRepresentation - Return the string representation of the + /// DataLayout. This representation is in the same format accepted by the + /// string constructor above. + std::string getStringRepresentation() const; + + /// isLegalInteger - This function returns true if the specified type is + /// known to be a native integer type supported by the CPU. For example, + /// i64 is not native on most 32-bit CPUs and i37 is not native on any known + /// one. This returns false if the integer width is not legal. + /// + /// The width is specified in bits. + /// + bool isLegalInteger(unsigned Width) const { + for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i) + if (LegalIntWidths[i] == Width) + return true; + return false; + } + + bool isIllegalInteger(unsigned Width) const { + return !isLegalInteger(Width); + } + + /// Returns true if the given alignment exceeds the natural stack alignment. + bool exceedsNaturalStackAlignment(unsigned Align) const { + return (StackNaturalAlign != 0) && (Align > StackNaturalAlign); + } + + /// fitsInLegalInteger - This function returns true if the specified type fits + /// in a native integer type supported by the CPU. For example, if the CPU + /// only supports i32 as a native integer type, then i27 fits in a legal + // integer type but i45 does not. + bool fitsInLegalInteger(unsigned Width) const { + for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i) + if (Width <= LegalIntWidths[i]) + return true; + return false; + } + + /// Layout pointer alignment + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerABIAlignment(unsigned AS = 0) const { + DenseMap::const_iterator val = Pointers.find(AS); + if (val == Pointers.end()) { + val = Pointers.find(0); + } + return val->second.ABIAlign; + } + /// Return target's alignment for stack-based pointers + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerPrefAlignment(unsigned AS = 0) const { + DenseMap::const_iterator val = Pointers.find(AS); + if (val == Pointers.end()) { + val = Pointers.find(0); + } + return val->second.PrefAlign; + } + /// Layout pointer size + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerSize(unsigned AS = 0) const { + DenseMap::const_iterator val = Pointers.find(AS); + if (val == Pointers.end()) { + val = Pointers.find(0); + } + return val->second.TypeBitWidth; + } + /// Layout pointer size, in bits + /// FIXME: The defaults need to be removed once all of + /// the backends/clients are updated. + unsigned getPointerSizeInBits(unsigned AS = 0) const { + return getPointerSize(AS) * 8; + } + /// Size examples: + /// + /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] + /// ---- ---------- --------------- --------------- + /// i1 1 8 8 + /// i8 8 8 8 + /// i19 19 24 32 + /// i32 32 32 32 + /// i100 100 104 128 + /// i128 128 128 128 + /// Float 32 32 32 + /// Double 64 64 64 + /// X86_FP80 80 80 96 + /// + /// [*] The alloc size depends on the alignment, and thus on the target. + /// These values are for x86-32 linux. + + /// getTypeSizeInBits - Return the number of bits necessary to hold the + /// specified type. For example, returns 36 for i36 and 80 for x86_fp80. + uint64_t getTypeSizeInBits(Type* Ty) const; + + /// getTypeStoreSize - Return the maximum number of bytes that may be + /// overwritten by storing the specified type. For example, returns 5 + /// for i36 and 10 for x86_fp80. + uint64_t getTypeStoreSize(Type *Ty) const { + return (getTypeSizeInBits(Ty)+7)/8; + } + + /// getTypeStoreSizeInBits - Return the maximum number of bits that may be + /// overwritten by storing the specified type; always a multiple of 8. For + /// example, returns 40 for i36 and 80 for x86_fp80. + uint64_t getTypeStoreSizeInBits(Type *Ty) const { + return 8*getTypeStoreSize(Ty); + } + + /// getTypeAllocSize - Return the offset in bytes between successive objects + /// of the specified type, including alignment padding. This is the amount + /// that alloca reserves for this type. For example, returns 12 or 16 for + /// x86_fp80, depending on alignment. + uint64_t getTypeAllocSize(Type* Ty) const { + // Round up to the next alignment boundary. + return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); + } + + /// getTypeAllocSizeInBits - Return the offset in bits between successive + /// objects of the specified type, including alignment padding; always a + /// multiple of 8. This is the amount that alloca reserves for this type. + /// For example, returns 96 or 128 for x86_fp80, depending on alignment. + uint64_t getTypeAllocSizeInBits(Type* Ty) const { + return 8*getTypeAllocSize(Ty); + } + + /// getABITypeAlignment - Return the minimum ABI-required alignment for the + /// specified type. + unsigned getABITypeAlignment(Type *Ty) const; + + /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for + /// an integer type of the specified bitwidth. + unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const; + + + /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment + /// for the specified type when it is part of a call frame. + unsigned getCallFrameTypeAlignment(Type *Ty) const; + + + /// getPrefTypeAlignment - Return the preferred stack/global alignment for + /// the specified type. This is always at least as good as the ABI alignment. + unsigned getPrefTypeAlignment(Type *Ty) const; + + /// getPreferredTypeAlignmentShift - Return the preferred alignment for the + /// specified type, returned as log2 of the value (a shift amount). + /// + unsigned getPreferredTypeAlignmentShift(Type *Ty) const; + + /// getIntPtrType - Return an integer type with size at least as big as that + /// of a pointer in the given address space. + IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const; + + /// getIntPtrType - Return an integer (vector of integer) type with size at + /// least as big as that of a pointer of the given pointer (vector of pointer) + /// type. + Type *getIntPtrType(Type *) const; + + /// getIndexedOffset - return the offset from the beginning of the type for + /// the specified indices. This is used to implement getelementptr. + /// + uint64_t getIndexedOffset(Type *Ty, ArrayRef Indices) const; + + /// getStructLayout - Return a StructLayout object, indicating the alignment + /// of the struct, its size, and the offsets of its fields. Note that this + /// information is lazily cached. + const StructLayout *getStructLayout(StructType *Ty) const; + + /// getPreferredAlignment - Return the preferred alignment of the specified + /// global. This includes an explicitly requested alignment (if the global + /// has one). + unsigned getPreferredAlignment(const GlobalVariable *GV) const; + + /// getPreferredAlignmentLog - Return the preferred alignment of the + /// specified global, returned in log form. This includes an explicitly + /// requested alignment (if the global has one). + unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const; + + /// RoundUpAlignment - Round the specified value up to the next alignment + /// boundary specified by Alignment. For example, 7 rounded up to an + /// alignment boundary of 4 is 8. 8 rounded up to the alignment boundary of 4 + /// is 8 because it is already aligned. + template + static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) { + assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!"); + return (Val + (Alignment-1)) & ~UIntTy(Alignment-1); + } + + static char ID; // Pass identification, replacement for typeid +}; + +/// StructLayout - used to lazily calculate structure layout information for a +/// target machine, based on the DataLayout structure. +/// +class StructLayout { + uint64_t StructSize; + unsigned StructAlignment; + unsigned NumElements; + uint64_t MemberOffsets[1]; // variable sized array! +public: + + uint64_t getSizeInBytes() const { + return StructSize; + } + + uint64_t getSizeInBits() const { + return 8*StructSize; + } + + unsigned getAlignment() const { + return StructAlignment; + } + + /// getElementContainingOffset - Given a valid byte offset into the structure, + /// return the structure index that contains it. + /// + unsigned getElementContainingOffset(uint64_t Offset) const; + + uint64_t getElementOffset(unsigned Idx) const { + assert(Idx < NumElements && "Invalid element idx!"); + return MemberOffsets[Idx]; + } + + uint64_t getElementOffsetInBits(unsigned Idx) const { + return getElementOffset(Idx)*8; + } + +private: + friend class DataLayout; // Only DataLayout can create this class + StructLayout(StructType *ST, const DataLayout &TD); +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/DebugInfo.h b/include/llvm/DebugInfo.h index 618220f..dae03ad 100644 --- a/include/llvm/DebugInfo.h +++ b/include/llvm/DebugInfo.h @@ -60,7 +60,8 @@ namespace llvm { FlagArtificial = 1 << 6, FlagExplicit = 1 << 7, FlagPrototyped = 1 << 8, - FlagObjcClassComplete = 1 << 9 + FlagObjcClassComplete = 1 << 9, + FlagObjectPointer = 1 << 10 }; protected: const MDNode *DbgNode; @@ -80,6 +81,7 @@ namespace llvm { GlobalVariable *getGlobalVariableField(unsigned Elt) const; Constant *getConstantField(unsigned Elt) const; Function *getFunctionField(unsigned Elt) const; + void replaceFunctionField(unsigned Elt, Function *F); public: explicit DIDescriptor() : DbgNode(0) {} @@ -287,6 +289,9 @@ namespace llvm { bool isArtificial() const { return (getFlags() & FlagArtificial) != 0; } + bool isObjectPointer() const { + return (getFlags() & FlagObjectPointer) != 0; + } bool isObjcClassComplete() const { return (getFlags() & FlagObjcClassComplete) != 0; } @@ -558,6 +563,7 @@ namespace llvm { bool describes(const Function *F); Function *getFunction() const { return getFunctionField(16); } + void replaceFunction(Function *F) { replaceFunctionField(16, F); } DIArray getTemplateParams() const { return getFieldAs(17); } DISubprogram getFunctionDeclaration() const { return getFieldAs(18); @@ -644,6 +650,10 @@ namespace llvm { return (getUnsignedField(6) & FlagArtificial) != 0; } + bool isObjectPointer() const { + return (getUnsignedField(6) & FlagObjectPointer) != 0; + } + /// getInlinedAt - If this variable is inlined then return inline location. MDNode *getInlinedAt() const; diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h index cfdeb46..26bd1f6 100644 --- a/include/llvm/DebugInfo/DIContext.h +++ b/include/llvm/DebugInfo/DIContext.h @@ -15,6 +15,8 @@ #ifndef LLVM_DEBUGINFO_DICONTEXT_H #define LLVM_DEBUGINFO_DICONTEXT_H +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" @@ -54,6 +56,23 @@ public: } }; +/// DIInliningInfo - a format-neutral container for inlined code description. +class DIInliningInfo { + SmallVector Frames; + public: + DIInliningInfo() {} + DILineInfo getFrame(unsigned Index) const { + assert(Index < Frames.size()); + return Frames[Index]; + } + uint32_t getNumberOfFrames() const { + return Frames.size(); + } + void addFrame(const DILineInfo &Frame) { + Frames.push_back(Frame); + } +}; + /// DILineInfoSpecifier - controls which fields of DILineInfo container /// should be filled with data. class DILineInfoSpecifier { @@ -71,6 +90,13 @@ public: } }; +// In place of applying the relocations to the data we've read from disk we use +// a separate mapping table to the side and checking that at locations in the +// dwarf where we expect relocated values. This adds a bit of complexity to the +// dwarf parsing/extraction at the benefit of not allocating memory for the +// entire size of the debug info sections. +typedef DenseMap > RelocAddrMap; + class DIContext { public: virtual ~DIContext(); @@ -81,12 +107,16 @@ public: StringRef abbrevSection, StringRef aRangeSection = StringRef(), StringRef lineSection = StringRef(), - StringRef stringSection = StringRef()); + StringRef stringSection = StringRef(), + StringRef rangeSection = StringRef(), + const RelocAddrMap &Map = RelocAddrMap()); virtual void dump(raw_ostream &OS) = 0; - virtual DILineInfo getLineInfoForAddress(uint64_t address, - DILineInfoSpecifier specifier = DILineInfoSpecifier()) = 0; + virtual DILineInfo getLineInfoForAddress(uint64_t Address, + DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0; + virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address, + DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0; }; } diff --git a/include/llvm/DefaultPasses.h b/include/llvm/DefaultPasses.h index 929569d..9f1ade8 100644 --- a/include/llvm/DefaultPasses.h +++ b/include/llvm/DefaultPasses.h @@ -14,7 +14,7 @@ #ifndef LLVM_DEFAULT_PASS_SUPPORT_H #define LLVM_DEFAULT_PASS_SUPPORT_H -#include +#include "llvm/PassSupport.h" namespace llvm { diff --git a/include/llvm/DerivedTypes.h b/include/llvm/DerivedTypes.h index da5ad27..c862c2c 100644 --- a/include/llvm/DerivedTypes.h +++ b/include/llvm/DerivedTypes.h @@ -20,6 +20,7 @@ #include "llvm/Type.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/Compiler.h" namespace llvm { @@ -84,7 +85,6 @@ public: bool isPowerOf2ByteWidth() const; // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const IntegerType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == IntegerTyID; } @@ -94,8 +94,8 @@ public: /// FunctionType - Class to represent function types /// class FunctionType : public Type { - FunctionType(const FunctionType &); // Do not implement - const FunctionType &operator=(const FunctionType &); // Do not implement + FunctionType(const FunctionType &) LLVM_DELETED_FUNCTION; + const FunctionType &operator=(const FunctionType &) LLVM_DELETED_FUNCTION; FunctionType(Type *Result, ArrayRef Params, bool IsVarArgs); public: @@ -133,7 +133,6 @@ public: unsigned getNumParams() const { return NumContainedTys - 1; } // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const FunctionType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == FunctionTyID; } @@ -156,7 +155,6 @@ public: bool indexValid(unsigned Idx) const; // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const CompositeType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == ArrayTyID || T->getTypeID() == StructTyID || @@ -183,12 +181,12 @@ public: /// Independent of what kind of struct you have, the body of a struct type are /// laid out in memory consequtively with the elements directly one after the /// other (if the struct is packed) or (if not packed) with padding between the -/// elements as defined by TargetData (which is required to match what the code +/// elements as defined by DataLayout (which is required to match what the code /// generator for a target expects). /// class StructType : public CompositeType { - StructType(const StructType &); // Do not implement - const StructType &operator=(const StructType &); // Do not implement + StructType(const StructType &) LLVM_DELETED_FUNCTION; + const StructType &operator=(const StructType &) LLVM_DELETED_FUNCTION; StructType(LLVMContext &C) : CompositeType(C, StructTyID), SymbolTableEntry(0) {} enum { @@ -292,7 +290,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const StructType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == StructTyID; } @@ -308,8 +305,8 @@ public: /// class SequentialType : public CompositeType { Type *ContainedType; ///< Storage for the single contained type. - SequentialType(const SequentialType &); // Do not implement! - const SequentialType &operator=(const SequentialType &); // Do not implement! + SequentialType(const SequentialType &) LLVM_DELETED_FUNCTION; + const SequentialType &operator=(const SequentialType &) LLVM_DELETED_FUNCTION; protected: SequentialType(TypeID TID, Type *ElType) @@ -322,7 +319,6 @@ public: Type *getElementType() const { return ContainedTys[0]; } // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const SequentialType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == ArrayTyID || T->getTypeID() == PointerTyID || @@ -336,8 +332,8 @@ public: class ArrayType : public SequentialType { uint64_t NumElements; - ArrayType(const ArrayType &); // Do not implement - const ArrayType &operator=(const ArrayType &); // Do not implement + ArrayType(const ArrayType &) LLVM_DELETED_FUNCTION; + const ArrayType &operator=(const ArrayType &) LLVM_DELETED_FUNCTION; ArrayType(Type *ElType, uint64_t NumEl); public: /// ArrayType::get - This static method is the primary way to construct an @@ -352,7 +348,6 @@ public: uint64_t getNumElements() const { return NumElements; } // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const ArrayType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == ArrayTyID; } @@ -363,8 +358,8 @@ public: class VectorType : public SequentialType { unsigned NumElements; - VectorType(const VectorType &); // Do not implement - const VectorType &operator=(const VectorType &); // Do not implement + VectorType(const VectorType &) LLVM_DELETED_FUNCTION; + const VectorType &operator=(const VectorType &) LLVM_DELETED_FUNCTION; VectorType(Type *ElType, unsigned NumEl); public: /// VectorType::get - This static method is the primary way to construct an @@ -419,7 +414,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const VectorType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == VectorTyID; } @@ -429,8 +423,8 @@ public: /// PointerType - Class to represent pointers. /// class PointerType : public SequentialType { - PointerType(const PointerType &); // Do not implement - const PointerType &operator=(const PointerType &); // Do not implement + PointerType(const PointerType &) LLVM_DELETED_FUNCTION; + const PointerType &operator=(const PointerType &) LLVM_DELETED_FUNCTION; explicit PointerType(Type *ElType, unsigned AddrSpace); public: /// PointerType::get - This constructs a pointer to an object of the specified @@ -451,7 +445,6 @@ public: inline unsigned getAddressSpace() const { return getSubclassData(); } // Implement support type inquiry through isa, cast, and dyn_cast. - static inline bool classof(const PointerType *) { return true; } static inline bool classof(const Type *T) { return T->getTypeID() == PointerTyID; } diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h index ae8b68d..8073d8f 100644 --- a/include/llvm/ExecutionEngine/ExecutionEngine.h +++ b/include/llvm/ExecutionEngine/ExecutionEngine.h @@ -42,7 +42,7 @@ class JITMemoryManager; class MachineCodeInfo; class Module; class MutexGuard; -class TargetData; +class DataLayout; class Triple; class Type; @@ -88,7 +88,7 @@ public: /// \brief Erase an entry from the mapping table. /// - /// \returns The address that \arg ToUnmap was happed to. + /// \returns The address that \p ToUnmap was happed to. void *RemoveMapping(const MutexGuard &, const GlobalValue *ToUnmap); }; @@ -104,7 +104,7 @@ class ExecutionEngine { ExecutionEngineState EEState; /// The target data for the platform for which execution is being performed. - const TargetData *TD; + const DataLayout *TD; /// Whether lazy JIT compilation is enabled. bool CompilingLazily; @@ -123,7 +123,7 @@ protected: /// optimize for the case where there is only one module. SmallVector Modules; - void setTargetData(const TargetData *td) { TD = td; } + void setDataLayout(const DataLayout *td) { TD = td; } /// getMemoryforGV - Allocate memory for a global variable. virtual char *getMemoryForGV(const GlobalVariable *GV); @@ -213,7 +213,7 @@ public: //===--------------------------------------------------------------------===// - const TargetData *getTargetData() const { return TD; } + const DataLayout *getDataLayout() const { return TD; } /// removeModule - Remove a Module from the list of modules. Returns true if /// M is found. @@ -244,11 +244,18 @@ public: /// Map the address of a JIT section as returned from the memory manager /// to the address in the target process as the running code will see it. /// This is the address which will be used for relocation resolution. - virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) { + virtual void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress) { llvm_unreachable("Re-mapping of section addresses not supported with this " "EE!"); } + // finalizeObject - This method should be called after sections within an + // object have been relocated using mapSectionAddress. When this method is + // called the MCJIT execution engine will reapply relocations for a loaded + // object. This method has no effect for the legacy JIT engine or the + // interpeter. + virtual void finalizeObject() {} + /// runStaticConstructorsDestructors - This method is used to execute all of /// the static constructors or destructors for a program. /// diff --git a/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h b/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h deleted file mode 100644 index ca87342..0000000 --- a/include/llvm/ExecutionEngine/IntelJITEventsWrapper.h +++ /dev/null @@ -1,102 +0,0 @@ -//===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines a wrapper for the Intel JIT Events API. It allows for the -// implementation of the jitprofiling library to be swapped with an alternative -// implementation (for testing). To include this file, you must have the -// jitprofiling.h header available; it is available in Intel(R) VTune(TM) -// Amplifier XE 2011. -// -//===----------------------------------------------------------------------===// - -#ifndef INTEL_JIT_EVENTS_WRAPPER_H -#define INTEL_JIT_EVENTS_WRAPPER_H - -#include - -namespace llvm { - -class IntelJITEventsWrapper { - // Function pointer types for testing implementation of Intel jitprofiling - // library - typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*); - typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx ); - typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void); - typedef void (*FinalizeThreadPtr)(void); - typedef void (*FinalizeProcessPtr)(void); - typedef unsigned int (*GetNewMethodIDPtr)(void); - - NotifyEventPtr NotifyEventFunc; - RegisterCallbackExPtr RegisterCallbackExFunc; - IsProfilingActivePtr IsProfilingActiveFunc; - FinalizeThreadPtr FinalizeThreadFunc; - FinalizeProcessPtr FinalizeProcessFunc; - GetNewMethodIDPtr GetNewMethodIDFunc; - -public: - bool isAmplifierRunning() { - return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON; - } - - IntelJITEventsWrapper() - : NotifyEventFunc(::iJIT_NotifyEvent), - RegisterCallbackExFunc(::iJIT_RegisterCallbackEx), - IsProfilingActiveFunc(::iJIT_IsProfilingActive), - FinalizeThreadFunc(::FinalizeThread), - FinalizeProcessFunc(::FinalizeProcess), - GetNewMethodIDFunc(::iJIT_GetNewMethodID) { - } - - IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl, - RegisterCallbackExPtr RegisterCallbackExImpl, - IsProfilingActivePtr IsProfilingActiveImpl, - FinalizeThreadPtr FinalizeThreadImpl, - FinalizeProcessPtr FinalizeProcessImpl, - GetNewMethodIDPtr GetNewMethodIDImpl) - : NotifyEventFunc(NotifyEventImpl), - RegisterCallbackExFunc(RegisterCallbackExImpl), - IsProfilingActiveFunc(IsProfilingActiveImpl), - FinalizeThreadFunc(FinalizeThreadImpl), - FinalizeProcessFunc(FinalizeProcessImpl), - GetNewMethodIDFunc(GetNewMethodIDImpl) { - } - - // Sends an event anncouncing that a function has been emitted - // return values are event-specific. See Intel documentation for details. - int iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) { - if (!NotifyEventFunc) - return -1; - return NotifyEventFunc(EventType, EventSpecificData); - } - - // Registers a callback function to receive notice of profiling state changes - void iJIT_RegisterCallbackEx(void *UserData, - iJIT_ModeChangedEx NewModeCallBackFuncEx) { - if (RegisterCallbackExFunc) - RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx); - } - - // Returns the current profiler mode - iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) { - if (!IsProfilingActiveFunc) - return iJIT_NOTHING_RUNNING; - return IsProfilingActiveFunc(); - } - - // Generates a locally unique method ID for use in code registration - unsigned int iJIT_GetNewMethodID(void) { - if (!GetNewMethodIDFunc) - return -1; - return GetNewMethodIDFunc(); - } -}; - -} //namespace llvm - -#endif //INTEL_JIT_EVENTS_WRAPPER_H diff --git a/include/llvm/ExecutionEngine/JITEventListener.h b/include/llvm/ExecutionEngine/JITEventListener.h index eea603f..e6586e7 100644 --- a/include/llvm/ExecutionEngine/JITEventListener.h +++ b/include/llvm/ExecutionEngine/JITEventListener.h @@ -26,6 +26,7 @@ class Function; class MachineFunction; class OProfileWrapper; class IntelJITEventsWrapper; +class ObjectImage; /// JITEvent_EmittedFunctionDetails - Helper struct for containing information /// about a generated machine code function. @@ -76,6 +77,20 @@ public: /// matching NotifyFreeingMachineCode call. virtual void NotifyFreeingMachineCode(void *) {} + /// NotifyObjectEmitted - Called after an object has been successfully + /// emitted to memory. NotifyFunctionEmitted will not be called for + /// individual functions in the object. + /// + /// ELF-specific information + /// The ObjectImage contains the generated object image + /// with section headers updated to reflect the address at which sections + /// were loaded and with relocations performed in-place on debug sections. + virtual void NotifyObjectEmitted(const ObjectImage &Obj) {} + + /// NotifyFreeingObject - Called just before the memory associated with + /// a previously emitted object is released. + virtual void NotifyFreeingObject(const ObjectImage &Obj) {} + #if LLVM_USE_INTEL_JITEVENTS // Construct an IntelJITEventListener static JITEventListener *createIntelJITEventListener(); diff --git a/include/llvm/ExecutionEngine/JITMemoryManager.h b/include/llvm/ExecutionEngine/JITMemoryManager.h index 4c75b6a..9089646 100644 --- a/include/llvm/ExecutionEngine/JITMemoryManager.h +++ b/include/llvm/ExecutionEngine/JITMemoryManager.h @@ -10,7 +10,9 @@ #ifndef LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H #define LLVM_EXECUTION_ENGINE_JIT_MEMMANAGER_H +#include "llvm/ExecutionEngine/RuntimeDyld.h" #include "llvm/Support/DataTypes.h" + #include namespace llvm { @@ -22,7 +24,7 @@ namespace llvm { /// memory for the code generated by the JIT. This can be reimplemented by /// clients that have a strong desire to control how the layout of JIT'd memory /// works. -class JITMemoryManager { +class JITMemoryManager : public RTDyldMemoryManager { protected: bool HasGOT; @@ -47,17 +49,6 @@ public: /// debugging, and may be turned on by default in debug mode. virtual void setPoisonMemory(bool poison) = 0; - /// getPointerToNamedFunction - This method returns the address of the - /// specified function. As such it is only useful for resolving library - /// symbols, not code generated symbols. - /// - /// If AbortOnFailure is false and no function with the given name is - /// found, this function silently returns a null pointer. Otherwise, - /// it prints a message to stderr and aborts. - /// - virtual void *getPointerToNamedFunction(const std::string &Name, - bool AbortOnFailure = true) = 0; - //===--------------------------------------------------------------------===// // Global Offset Table Management //===--------------------------------------------------------------------===// @@ -112,22 +103,6 @@ public: virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart, uint8_t *FunctionEnd) = 0; - /// allocateCodeSection - Allocate a memory block of (at least) the given - /// size suitable for executable code. The SectionID is a unique identifier - /// assigned by the JIT and passed through to the memory manager for - /// the instance class to use if it needs to communicate to the JIT about - /// a given section after the fact. - virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, - unsigned SectionID) = 0; - - /// allocateDataSection - Allocate a memory block of (at least) the given - /// size suitable for data. The SectionID is a unique identifier - /// assigned by the JIT and passed through to the memory manager for - /// the instance class to use if it needs to communicate to the JIT about - /// a given section after the fact. - virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, - unsigned SectionID) = 0; - /// allocateSpace - Allocate a memory block of the given size. This method /// cannot be called between calls to startFunctionBody and endFunctionBody. virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) = 0; diff --git a/include/llvm/ExecutionEngine/ObjectBuffer.h b/include/llvm/ExecutionEngine/ObjectBuffer.h new file mode 100644 index 0000000..a0a77b8 --- /dev/null +++ b/include/llvm/ExecutionEngine/ObjectBuffer.h @@ -0,0 +1,80 @@ +//===---- ObjectBuffer.h - Utility class to wrap object image memory -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares a wrapper class to hold the memory into which an +// object will be generated. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTIONENGINE_OBJECTBUFFER_H +#define LLVM_EXECUTIONENGINE_OBJECTBUFFER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/MemoryBuffer.h" + +namespace llvm { + +/// ObjectBuffer - This class acts as a container for the memory buffer used during +/// generation and loading of executable objects using MCJIT and RuntimeDyld. The +/// underlying memory for the object will be owned by the ObjectBuffer instance +/// throughout its lifetime. The getMemBuffer() method provides a way to create a +/// MemoryBuffer wrapper object instance to be owned by other classes (such as +/// ObjectFile) as needed, but the MemoryBuffer instance returned does not own the +/// actual memory it points to. +class ObjectBuffer { +public: + ObjectBuffer() {} + ObjectBuffer(MemoryBuffer* Buf) : Buffer(Buf) {} + virtual ~ObjectBuffer() {} + + /// getMemBuffer - Like MemoryBuffer::getMemBuffer() this function + /// returns a pointer to an object that is owned by the caller. However, + /// the caller does not take ownership of the underlying memory. + MemoryBuffer *getMemBuffer() const { + return MemoryBuffer::getMemBuffer(Buffer->getBuffer(), "", false); + } + + const char *getBufferStart() const { return Buffer->getBufferStart(); } + size_t getBufferSize() const { return Buffer->getBufferSize(); } + +protected: + // The memory contained in an ObjectBuffer + OwningPtr Buffer; +}; + +/// ObjectBufferStream - This class encapsulates the SmallVector and +/// raw_svector_ostream needed to generate an object using MC code emission +/// while providing a common ObjectBuffer interface for access to the +/// memory once the object has been generated. +class ObjectBufferStream : public ObjectBuffer { +public: + ObjectBufferStream() : OS(SV) {} + virtual ~ObjectBufferStream() {} + + raw_ostream &getOStream() { return OS; } + void flush() + { + OS.flush(); + + // Make the data accessible via the ObjectBuffer::Buffer + Buffer.reset(MemoryBuffer::getMemBuffer(StringRef(SV.data(), SV.size()), + "", + false)); + } + +protected: + SmallVector SV; // Working buffer into which we JIT. + raw_svector_ostream OS; // streaming wrapper +}; + +} // namespace llvm + +#endif diff --git a/include/llvm/ExecutionEngine/ObjectImage.h b/include/llvm/ExecutionEngine/ObjectImage.h new file mode 100644 index 0000000..82549ad --- /dev/null +++ b/include/llvm/ExecutionEngine/ObjectImage.h @@ -0,0 +1,61 @@ +//===---- ObjectImage.h - Format independent executuable object image -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares a file format independent ObjectImage class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTIONENGINE_OBJECTIMAGE_H +#define LLVM_EXECUTIONENGINE_OBJECTIMAGE_H + +#include "llvm/Object/ObjectFile.h" +#include "llvm/ExecutionEngine/ObjectBuffer.h" + +namespace llvm { + + +/// ObjectImage - A container class that represents an ObjectFile that has been +/// or is in the process of being loaded into memory for execution. +class ObjectImage { + ObjectImage() LLVM_DELETED_FUNCTION; + ObjectImage(const ObjectImage &other) LLVM_DELETED_FUNCTION; + +protected: + OwningPtr Buffer; + +public: + ObjectImage(ObjectBuffer *Input) : Buffer(Input) {} + virtual ~ObjectImage() {} + + virtual object::symbol_iterator begin_symbols() const = 0; + virtual object::symbol_iterator end_symbols() const = 0; + + virtual object::section_iterator begin_sections() const = 0; + virtual object::section_iterator end_sections() const = 0; + + virtual /* Triple::ArchType */ unsigned getArch() const = 0; + + // Subclasses can override these methods to update the image with loaded + // addresses for sections and common symbols + virtual void updateSectionAddress(const object::SectionRef &Sec, + uint64_t Addr) = 0; + virtual void updateSymbolAddress(const object::SymbolRef &Sym, + uint64_t Addr) = 0; + + virtual StringRef getData() const = 0; + + // Subclasses can override these methods to provide JIT debugging support + virtual void registerWithDebugger() = 0; + virtual void deregisterWithDebugger() = 0; +}; + +} // end namespace llvm + +#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H + diff --git a/include/llvm/ExecutionEngine/RuntimeDyld.h b/include/llvm/ExecutionEngine/RuntimeDyld.h index a5c9272..891f534 100644 --- a/include/llvm/ExecutionEngine/RuntimeDyld.h +++ b/include/llvm/ExecutionEngine/RuntimeDyld.h @@ -15,43 +15,55 @@ #define LLVM_RUNTIME_DYLD_H #include "llvm/ADT/StringRef.h" +#include "llvm/ExecutionEngine/ObjectBuffer.h" #include "llvm/Support/Memory.h" namespace llvm { class RuntimeDyldImpl; -class MemoryBuffer; +class ObjectImage; // RuntimeDyld clients often want to handle the memory management of -// what gets placed where. For JIT clients, this is an abstraction layer -// over the JITMemoryManager, which references objects by their source -// representations in LLVM IR. +// what gets placed where. For JIT clients, this is the subset of +// JITMemoryManager required for dynamic loading of binaries. +// // FIXME: As the RuntimeDyld fills out, additional routines will be needed // for the varying types of objects to be allocated. class RTDyldMemoryManager { - RTDyldMemoryManager(const RTDyldMemoryManager&); // DO NOT IMPLEMENT - void operator=(const RTDyldMemoryManager&); // DO NOT IMPLEMENT + RTDyldMemoryManager(const RTDyldMemoryManager&) LLVM_DELETED_FUNCTION; + void operator=(const RTDyldMemoryManager&) LLVM_DELETED_FUNCTION; public: RTDyldMemoryManager() {} virtual ~RTDyldMemoryManager(); /// allocateCodeSection - Allocate a memory block of (at least) the given - /// size suitable for executable code. + /// size suitable for executable code. The SectionID is a unique identifier + /// assigned by the JIT engine, and optionally recorded by the memory manager + /// to access a loaded section. virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) = 0; /// allocateDataSection - Allocate a memory block of (at least) the given - /// size suitable for data. + /// size suitable for data. The SectionID is a unique identifier + /// assigned by the JIT engine, and optionally recorded by the memory manager + /// to access a loaded section. virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID) = 0; + /// getPointerToNamedFunction - This method returns the address of the + /// specified function. As such it is only useful for resolving library + /// symbols, not code generated symbols. + /// + /// If AbortOnFailure is false and no function with the given name is + /// found, this function returns a null pointer. Otherwise, it prints a + /// message to stderr and aborts. virtual void *getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure = true) = 0; }; class RuntimeDyld { - RuntimeDyld(const RuntimeDyld &); // DO NOT IMPLEMENT - void operator=(const RuntimeDyld &); // DO NOT IMPLEMENT + RuntimeDyld(const RuntimeDyld &) LLVM_DELETED_FUNCTION; + void operator=(const RuntimeDyld &) LLVM_DELETED_FUNCTION; // RuntimeDyldImpl is the actual class. RuntimeDyld is just the public // interface. @@ -62,17 +74,24 @@ protected: // Any relocations already associated with the symbol will be re-resolved. void reassignSectionAddress(unsigned SectionID, uint64_t Addr); public: - RuntimeDyld(RTDyldMemoryManager*); + RuntimeDyld(RTDyldMemoryManager *); ~RuntimeDyld(); - /// Load an in-memory object file into the dynamic linker. - bool loadObject(MemoryBuffer *InputBuffer); + /// loadObject - prepare the object contained in the input buffer for + /// execution. Ownership of the input buffer is transferred to the + /// ObjectImage instance returned from this function if successful. + /// In the case of load failure, the input buffer will be deleted. + ObjectImage *loadObject(ObjectBuffer *InputBuffer); /// Get the address of our local copy of the symbol. This may or may not /// be the address used for relocation (clients can copy the data around /// and resolve relocatons based on where they put it). void *getSymbolAddress(StringRef Name); + /// Get the address of the target copy of the symbol. This is the address + /// used for relocation. + uint64_t getSymbolLoadAddress(StringRef Name); + /// Resolve the relocations for all symbols we currently know about. void resolveRelocations(); @@ -80,7 +99,7 @@ public: /// Map the address of a JIT section as returned from the memory manager /// to the address in the target process as the running code will see it. /// This is the address which will be used for relocation resolution. - void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress); + void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress); StringRef getErrorString(); }; diff --git a/include/llvm/Function.h b/include/llvm/Function.h index fdd90d1..e211e9a 100644 --- a/include/llvm/Function.h +++ b/include/llvm/Function.h @@ -109,9 +109,9 @@ private: BuildLazyArguments(); } void BuildLazyArguments() const; - - Function(const Function&); // DO NOT IMPLEMENT - void operator=(const Function&); // DO NOT IMPLEMENT + + Function(const Function&) LLVM_DELETED_FUNCTION; + void operator=(const Function&) LLVM_DELETED_FUNCTION; /// Function ctor - If the (optional) Module argument is specified, the /// function is automatically inserted into the end of the function list for @@ -168,17 +168,17 @@ public: /// void setAttributes(const AttrListPtr &attrs) { AttributeList = attrs; } - /// hasFnAttr - Return true if this function has the given attribute. - bool hasFnAttr(Attributes N) const { - // Function Attributes are stored at ~0 index - return AttributeList.paramHasAttr(~0U, N); + /// getFnAttributes - Return the function attributes for querying. + /// + Attributes getFnAttributes() const { + return AttributeList.getFnAttributes(); } /// addFnAttr - Add function attributes to this function. /// - void addFnAttr(Attributes N) { + void addFnAttr(Attributes::AttrVal N) { // Function Attributes are stored at ~0 index - addAttribute(~0U, N); + addAttribute(AttrListPtr::FunctionIndex, Attributes::get(getContext(), N)); } /// removeFnAttr - Remove function attributes from this function. @@ -195,9 +195,15 @@ public: void setGC(const char *Str); void clearGC(); - /// @brief Determine whether the function has the given attribute. - bool paramHasAttr(unsigned i, Attributes attr) const { - return AttributeList.paramHasAttr(i, attr); + + /// getRetAttributes - Return the return attributes for querying. + Attributes getRetAttributes() const { + return AttributeList.getRetAttributes(); + } + + /// getParamAttributes - Return the parameter attributes for querying. + Attributes getParamAttributes(unsigned Idx) const { + return AttributeList.getParamAttributes(Idx); } /// addAttribute - adds the attribute to the list of attributes. @@ -213,50 +219,44 @@ public: /// @brief Determine if the function does not access memory. bool doesNotAccessMemory() const { - return hasFnAttr(Attribute::ReadNone); + return getFnAttributes().hasAttribute(Attributes::ReadNone); } - void setDoesNotAccessMemory(bool DoesNotAccessMemory = true) { - if (DoesNotAccessMemory) addFnAttr(Attribute::ReadNone); - else removeFnAttr(Attribute::ReadNone); + void setDoesNotAccessMemory() { + addFnAttr(Attributes::ReadNone); } /// @brief Determine if the function does not access or only reads memory. bool onlyReadsMemory() const { - return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly); + return doesNotAccessMemory() || + getFnAttributes().hasAttribute(Attributes::ReadOnly); } - void setOnlyReadsMemory(bool OnlyReadsMemory = true) { - if (OnlyReadsMemory) addFnAttr(Attribute::ReadOnly); - else removeFnAttr(Attribute::ReadOnly | Attribute::ReadNone); + void setOnlyReadsMemory() { + addFnAttr(Attributes::ReadOnly); } /// @brief Determine if the function cannot return. bool doesNotReturn() const { - return hasFnAttr(Attribute::NoReturn); + return getFnAttributes().hasAttribute(Attributes::NoReturn); } - void setDoesNotReturn(bool DoesNotReturn = true) { - if (DoesNotReturn) addFnAttr(Attribute::NoReturn); - else removeFnAttr(Attribute::NoReturn); + void setDoesNotReturn() { + addFnAttr(Attributes::NoReturn); } /// @brief Determine if the function cannot unwind. bool doesNotThrow() const { - return hasFnAttr(Attribute::NoUnwind); + return getFnAttributes().hasAttribute(Attributes::NoUnwind); } - void setDoesNotThrow(bool DoesNotThrow = true) { - if (DoesNotThrow) addFnAttr(Attribute::NoUnwind); - else removeFnAttr(Attribute::NoUnwind); + void setDoesNotThrow() { + addFnAttr(Attributes::NoUnwind); } /// @brief True if the ABI mandates (or the user requested) that this /// function be in a unwind table. bool hasUWTable() const { - return hasFnAttr(Attribute::UWTable); + return getFnAttributes().hasAttribute(Attributes::UWTable); } - void setHasUWTable(bool HasUWTable = true) { - if (HasUWTable) - addFnAttr(Attribute::UWTable); - else - removeFnAttr(Attribute::UWTable); + void setHasUWTable() { + addFnAttr(Attributes::UWTable); } /// @brief True if this function needs an unwind table. @@ -267,27 +267,25 @@ public: /// @brief Determine if the function returns a structure through first /// pointer argument. bool hasStructRetAttr() const { - return paramHasAttr(1, Attribute::StructRet); + return getParamAttributes(1).hasAttribute(Attributes::StructRet); } /// @brief Determine if the parameter does not alias other parameters. /// @param n The parameter to check. 1 is the first parameter, 0 is the return bool doesNotAlias(unsigned n) const { - return paramHasAttr(n, Attribute::NoAlias); + return getParamAttributes(n).hasAttribute(Attributes::NoAlias); } - void setDoesNotAlias(unsigned n, bool DoesNotAlias = true) { - if (DoesNotAlias) addAttribute(n, Attribute::NoAlias); - else removeAttribute(n, Attribute::NoAlias); + void setDoesNotAlias(unsigned n) { + addAttribute(n, Attributes::get(getContext(), Attributes::NoAlias)); } /// @brief Determine if the parameter can be captured. /// @param n The parameter to check. 1 is the first parameter, 0 is the return bool doesNotCapture(unsigned n) const { - return paramHasAttr(n, Attribute::NoCapture); + return getParamAttributes(n).hasAttribute(Attributes::NoCapture); } - void setDoesNotCapture(unsigned n, bool DoesNotCapture = true) { - if (DoesNotCapture) addAttribute(n, Attribute::NoCapture); - else removeAttribute(n, Attribute::NoCapture); + void setDoesNotCapture(unsigned n) { + addAttribute(n, Attributes::get(getContext(), Attributes::NoCapture)); } /// copyAttributesFrom - copy all additional attributes (those not needed to @@ -400,7 +398,6 @@ public: void viewCFGOnly() const; /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const Function *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == Value::FunctionVal; } diff --git a/include/llvm/GlobalAlias.h b/include/llvm/GlobalAlias.h index 164d976..d0f0147 100644 --- a/include/llvm/GlobalAlias.h +++ b/include/llvm/GlobalAlias.h @@ -28,8 +28,8 @@ template class GlobalAlias : public GlobalValue, public ilist_node { friend class SymbolTableListTraits; - void operator=(const GlobalAlias &); // Do not implement - GlobalAlias(const GlobalAlias &); // Do not implement + void operator=(const GlobalAlias &) LLVM_DELETED_FUNCTION; + GlobalAlias(const GlobalAlias &) LLVM_DELETED_FUNCTION; void setParent(Module *parent); @@ -76,7 +76,6 @@ public: const GlobalValue *resolveAliasedGlobal(bool stopOnWeak = true) const; // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const GlobalAlias *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == Value::GlobalAliasVal; } diff --git a/include/llvm/GlobalValue.h b/include/llvm/GlobalValue.h index 8b969f3..7f7f74b 100644 --- a/include/llvm/GlobalValue.h +++ b/include/llvm/GlobalValue.h @@ -26,7 +26,7 @@ class PointerType; class Module; class GlobalValue : public Constant { - GlobalValue(const GlobalValue &); // do not implement + GlobalValue(const GlobalValue &) LLVM_DELETED_FUNCTION; public: /// @brief An enumeration for the kinds of linkage for global values. enum LinkageTypes { @@ -34,6 +34,7 @@ public: AvailableExternallyLinkage, ///< Available for inspection, not emission. LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline) LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent. + LinkOnceODRAutoHideLinkage, ///< Like LinkOnceODRLinkage but addr not taken. WeakAnyLinkage, ///< Keep one copy of named function when linking (weak) WeakODRLinkage, ///< Same, but only replaced by something equivalent. AppendingLinkage, ///< Special purpose, only applies to global arrays @@ -41,8 +42,6 @@ public: PrivateLinkage, ///< Like Internal, but omit from symbol table. LinkerPrivateLinkage, ///< Like Private, but linker removes. LinkerPrivateWeakLinkage, ///< Like LinkerPrivate, but weak. - LinkerPrivateWeakDefAutoLinkage, ///< Like LinkerPrivateWeak, but possibly - /// hidden. DLLImportLinkage, ///< Function to be imported from DLL DLLExportLinkage, ///< Function to be accessible from DLL. ExternalWeakLinkage,///< ExternalWeak linkage description. @@ -123,7 +122,12 @@ public: return Linkage == AvailableExternallyLinkage; } static bool isLinkOnceLinkage(LinkageTypes Linkage) { - return Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage; + return Linkage == LinkOnceAnyLinkage || + Linkage == LinkOnceODRLinkage || + Linkage == LinkOnceODRAutoHideLinkage; + } + static bool isLinkOnceODRAutoHideLinkage(LinkageTypes Linkage) { + return Linkage == LinkOnceODRAutoHideLinkage; } static bool isWeakLinkage(LinkageTypes Linkage) { return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage; @@ -143,13 +147,9 @@ public: static bool isLinkerPrivateWeakLinkage(LinkageTypes Linkage) { return Linkage == LinkerPrivateWeakLinkage; } - static bool isLinkerPrivateWeakDefAutoLinkage(LinkageTypes Linkage) { - return Linkage == LinkerPrivateWeakDefAutoLinkage; - } static bool isLocalLinkage(LinkageTypes Linkage) { return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage) || - isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage) || - isLinkerPrivateWeakDefAutoLinkage(Linkage); + isLinkerPrivateLinkage(Linkage) || isLinkerPrivateWeakLinkage(Linkage); } static bool isDLLImportLinkage(LinkageTypes Linkage) { return Linkage == DLLImportLinkage; @@ -178,8 +178,7 @@ public: Linkage == LinkOnceAnyLinkage || Linkage == CommonLinkage || Linkage == ExternalWeakLinkage || - Linkage == LinkerPrivateWeakLinkage || - Linkage == LinkerPrivateWeakDefAutoLinkage; + Linkage == LinkerPrivateWeakLinkage; } /// isWeakForLinker - Whether the definition of this global may be replaced at @@ -192,10 +191,10 @@ public: Linkage == WeakODRLinkage || Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage || + Linkage == LinkOnceODRAutoHideLinkage || Linkage == CommonLinkage || Linkage == ExternalWeakLinkage || - Linkage == LinkerPrivateWeakLinkage || - Linkage == LinkerPrivateWeakDefAutoLinkage; + Linkage == LinkerPrivateWeakLinkage; } bool hasExternalLinkage() const { return isExternalLinkage(Linkage); } @@ -205,6 +204,9 @@ public: bool hasLinkOnceLinkage() const { return isLinkOnceLinkage(Linkage); } + bool hasLinkOnceODRAutoHideLinkage() const { + return isLinkOnceODRAutoHideLinkage(Linkage); + } bool hasWeakLinkage() const { return isWeakLinkage(Linkage); } @@ -215,9 +217,6 @@ public: bool hasLinkerPrivateWeakLinkage() const { return isLinkerPrivateWeakLinkage(Linkage); } - bool hasLinkerPrivateWeakDefAutoLinkage() const { - return isLinkerPrivateWeakDefAutoLinkage(Linkage); - } bool hasLocalLinkage() const { return isLocalLinkage(Linkage); } bool hasDLLImportLinkage() const { return isDLLImportLinkage(Linkage); } bool hasDLLExportLinkage() const { return isDLLExportLinkage(Linkage); } @@ -288,7 +287,6 @@ public: inline const Module *getParent() const { return Parent; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const GlobalValue *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == Value::FunctionVal || V->getValueID() == Value::GlobalVariableVal || diff --git a/include/llvm/GlobalVariable.h b/include/llvm/GlobalVariable.h index 99b7a73..b9d3f68 100644 --- a/include/llvm/GlobalVariable.h +++ b/include/llvm/GlobalVariable.h @@ -34,9 +34,9 @@ template class GlobalVariable : public GlobalValue, public ilist_node { friend class SymbolTableListTraits; - void *operator new(size_t, unsigned); // Do not implement - void operator=(const GlobalVariable &); // Do not implement - GlobalVariable(const GlobalVariable &); // Do not implement + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + void operator=(const GlobalVariable &) LLVM_DELETED_FUNCTION; + GlobalVariable(const GlobalVariable &) LLVM_DELETED_FUNCTION; void setParent(Module *parent); @@ -174,7 +174,6 @@ public: virtual void replaceUsesOfWithOnConstant(Value *From, Value *To, Use *U); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const GlobalVariable *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == Value::GlobalVariableVal; } diff --git a/include/llvm/IRBuilder.h b/include/llvm/IRBuilder.h index d5b6f47..f63a160 100644 --- a/include/llvm/IRBuilder.h +++ b/include/llvm/IRBuilder.h @@ -17,6 +17,7 @@ #include "llvm/Instructions.h" #include "llvm/BasicBlock.h" +#include "llvm/DataLayout.h" #include "llvm/LLVMContext.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/StringRef.h" @@ -266,6 +267,10 @@ public: return Type::getInt8PtrTy(Context, AddrSpace); } + IntegerType* getIntPtrTy(DataLayout *DL, unsigned AddrSpace = 0) { + return DL->getIntPtrType(Context, AddrSpace); + } + //===--------------------------------------------------------------------===// // Intrinsic creation methods //===--------------------------------------------------------------------===// @@ -285,12 +290,15 @@ public: /// If the pointers aren't i8*, they will be converted. If a TBAA tag is /// specified, it will be added to the instruction. CallInst *CreateMemCpy(Value *Dst, Value *Src, uint64_t Size, unsigned Align, - bool isVolatile = false, MDNode *TBAATag = 0) { - return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag); + bool isVolatile = false, MDNode *TBAATag = 0, + MDNode *TBAAStructTag = 0) { + return CreateMemCpy(Dst, Src, getInt64(Size), Align, isVolatile, TBAATag, + TBAAStructTag); } CallInst *CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, - bool isVolatile = false, MDNode *TBAATag = 0); + bool isVolatile = false, MDNode *TBAATag = 0, + MDNode *TBAAStructTag = 0); /// CreateMemMove - Create and insert a memmove between the specified /// pointers. If the pointers aren't i8*, they will be converted. If a TBAA @@ -810,6 +818,31 @@ public: StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { return Insert(new StoreInst(Val, Ptr, isVolatile)); } + // Provided to resolve 'CreateAlignedLoad(Ptr, Align, "...")' correctly, + // instead of converting the string to 'bool' for the isVolatile parameter. + LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, const char *Name) { + LoadInst *LI = CreateLoad(Ptr, Name); + LI->setAlignment(Align); + return LI; + } + LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, + const Twine &Name = "") { + LoadInst *LI = CreateLoad(Ptr, Name); + LI->setAlignment(Align); + return LI; + } + LoadInst *CreateAlignedLoad(Value *Ptr, unsigned Align, bool isVolatile, + const Twine &Name = "") { + LoadInst *LI = CreateLoad(Ptr, isVolatile, Name); + LI->setAlignment(Align); + return LI; + } + StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, unsigned Align, + bool isVolatile = false) { + StoreInst *SI = CreateStore(Val, Ptr, isVolatile); + SI->setAlignment(Align); + return SI; + } FenceInst *CreateFence(AtomicOrdering Ordering, SynchronizationScope SynchScope = CrossThread) { return Insert(new FenceInst(Context, Ordering, SynchScope)); @@ -970,6 +1003,30 @@ public: Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { return CreateCast(Instruction::SExt, V, DestTy, Name); } + /// CreateZExtOrTrunc - Create a ZExt or Trunc from the integer value V to + /// DestTy. Return the value untouched if the type of V is already DestTy. + Value *CreateZExtOrTrunc(Value *V, IntegerType *DestTy, + const Twine &Name = "") { + assert(isa(V->getType()) && "Can only zero extend integers!"); + IntegerType *IntTy = cast(V->getType()); + if (IntTy->getBitWidth() < DestTy->getBitWidth()) + return CreateZExt(V, DestTy, Name); + if (IntTy->getBitWidth() > DestTy->getBitWidth()) + return CreateTrunc(V, DestTy, Name); + return V; + } + /// CreateSExtOrTrunc - Create a SExt or Trunc from the integer value V to + /// DestTy. Return the value untouched if the type of V is already DestTy. + Value *CreateSExtOrTrunc(Value *V, IntegerType *DestTy, + const Twine &Name = "") { + assert(isa(V->getType()) && "Can only sign extend integers!"); + IntegerType *IntTy = cast(V->getType()); + if (IntTy->getBitWidth() < DestTy->getBitWidth()) + return CreateSExt(V, DestTy, Name); + if (IntTy->getBitWidth() > DestTy->getBitWidth()) + return CreateTrunc(V, DestTy, Name); + return V; + } Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = ""){ return CreateCast(Instruction::FPToUI, V, DestTy, Name); } @@ -1052,7 +1109,7 @@ public: private: // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a compile time // error, instead of converting the string to bool for the isSigned parameter. - Value *CreateIntCast(Value *, Type *, const char *); // DO NOT IMPLEMENT + Value *CreateIntCast(Value *, Type *, const char *) LLVM_DELETED_FUNCTION; public: Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { if (V->getType() == DestTy) @@ -1261,13 +1318,13 @@ public: // Utility creation methods //===--------------------------------------------------------------------===// - /// CreateIsNull - Return an i1 value testing if \arg Arg is null. + /// CreateIsNull - Return an i1 value testing if \p Arg is null. Value *CreateIsNull(Value *Arg, const Twine &Name = "") { return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), Name); } - /// CreateIsNotNull - Return an i1 value testing if \arg Arg is not null. + /// CreateIsNotNull - Return an i1 value testing if \p Arg is not null. Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), Name); diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index de97957..8c164eb 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -66,6 +66,7 @@ void initializeAliasDebuggerPass(PassRegistry&); void initializeAliasSetPrinterPass(PassRegistry&); void initializeAlwaysInlinerPass(PassRegistry&); void initializeArgPromotionPass(PassRegistry&); +void initializeBarrierNoopPass(PassRegistry&); void initializeBasicAliasAnalysisPass(PassRegistry&); void initializeBasicCallGraphPass(PassRegistry&); void initializeBlockExtractorPassPass(PassRegistry&); @@ -87,6 +88,7 @@ void initializeCodePlacementOptPass(PassRegistry&); void initializeConstantMergePass(PassRegistry&); void initializeConstantPropagationPass(PassRegistry&); void initializeMachineCopyPropagationPass(PassRegistry&); +void initializeCostModelAnalysisPass(PassRegistry&); void initializeCorrelatedValuePropagationPass(PassRegistry&); void initializeDAEPass(PassRegistry&); void initializeDAHPass(PassRegistry&); @@ -94,6 +96,7 @@ void initializeDCEPass(PassRegistry&); void initializeDSEPass(PassRegistry&); void initializeDeadInstEliminationPass(PassRegistry&); void initializeDeadMachineInstructionElimPass(PassRegistry&); +void initializeDependenceAnalysisPass(PassRegistry&); void initializeDomOnlyPrinterPass(PassRegistry&); void initializeDomOnlyViewerPass(PassRegistry&); void initializeDomPrinterPass(PassRegistry&); @@ -141,10 +144,10 @@ void initializeLiveRegMatrixPass(PassRegistry&); void initializeLiveStacksPass(PassRegistry&); void initializeLiveVariablesPass(PassRegistry&); void initializeLoaderPassPass(PassRegistry&); +void initializeProfileMetadataLoaderPassPass(PassRegistry&); void initializePathProfileLoaderPassPass(PassRegistry&); void initializeLocalStackSlotPassPass(PassRegistry&); void initializeLoopDeletionPass(PassRegistry&); -void initializeLoopDependenceAnalysisPass(PassRegistry&); void initializeLoopExtractorPass(PassRegistry&); void initializeLoopInfoPass(PassRegistry&); void initializeLoopInstSimplifyPass(PassRegistry&); @@ -166,6 +169,7 @@ void initializeMachineBlockPlacementStatsPass(PassRegistry&); void initializeMachineBranchProbabilityInfoPass(PassRegistry&); void initializeMachineCSEPass(PassRegistry&); void initializeMachineDominatorTreePass(PassRegistry&); +void initializeMachinePostDominatorTreePass(PassRegistry&); void initializeMachineLICMPass(PassRegistry&); void initializeMachineLoopInfoPass(PassRegistry&); void initializeMachineLoopRangesPass(PassRegistry&); @@ -177,6 +181,7 @@ void initializeMachineVerifierPassPass(PassRegistry&); void initializeMemCpyOptPass(PassRegistry&); void initializeMemDepPrinterPass(PassRegistry&); void initializeMemoryDependenceAnalysisPass(PassRegistry&); +void initializeMetaRenamerPass(PassRegistry&); void initializeMergeFunctionsPass(PassRegistry&); void initializeModuleDebugInfoPrinterPass(PassRegistry&); void initializeNoAAPass(PassRegistry&); @@ -219,6 +224,7 @@ void initializeRegionOnlyViewerPass(PassRegistry&); void initializeRegionPrinterPass(PassRegistry&); void initializeRegionViewerPass(PassRegistry&); void initializeSCCPPass(PassRegistry&); +void initializeSROAPass(PassRegistry&); void initializeSROA_DTPass(PassRegistry&); void initializeSROA_SSAUpPass(PassRegistry&); void initializeScalarEvolutionAliasAnalysisPass(PassRegistry&); @@ -231,6 +237,7 @@ void initializeSinkingPass(PassRegistry&); void initializeSlotIndexesPass(PassRegistry&); void initializeSpillPlacementPass(PassRegistry&); void initializeStackProtectorPass(PassRegistry&); +void initializeStackColoringPass(PassRegistry&); void initializeStackSlotColoringPass(PassRegistry&); void initializeStripDeadDebugInfoPass(PassRegistry&); void initializeStripDeadPrototypesPassPass(PassRegistry&); @@ -241,7 +248,8 @@ void initializeStrongPHIEliminationPass(PassRegistry&); void initializeTailCallElimPass(PassRegistry&); void initializeTailDuplicatePassPass(PassRegistry&); void initializeTargetPassConfigPass(PassRegistry&); -void initializeTargetDataPass(PassRegistry&); +void initializeDataLayoutPass(PassRegistry&); +void initializeTargetTransformInfoPass(PassRegistry&); void initializeTargetLibraryInfoPass(PassRegistry&); void initializeTwoAddressInstructionPassPass(PassRegistry&); void initializeTypeBasedAliasAnalysisPass(PassRegistry&); @@ -254,6 +262,7 @@ void initializeVirtRegRewriterPass(PassRegistry&); void initializeInstSimplifierPass(PassRegistry&); void initializeUnpackMachineBundlesPass(PassRegistry&); void initializeFinalizeMachineBundlesPass(PassRegistry&); +void initializeLoopVectorizePass(PassRegistry&); void initializeBBVectorizePass(PassRegistry&); void initializeMachineFunctionPrinterPassPass(PassRegistry&); } diff --git a/include/llvm/InlineAsm.h b/include/llvm/InlineAsm.h index 37aa18b..b5e0fd4 100644 --- a/include/llvm/InlineAsm.h +++ b/include/llvm/InlineAsm.h @@ -33,20 +33,28 @@ template struct ConstantCreator; class InlineAsm : public Value { +public: + enum AsmDialect { + AD_ATT, + AD_Intel + }; + +private: friend struct ConstantCreator; friend class ConstantUniqueMap; - InlineAsm(const InlineAsm &); // do not implement - void operator=(const InlineAsm&); // do not implement + InlineAsm(const InlineAsm &) LLVM_DELETED_FUNCTION; + void operator=(const InlineAsm&) LLVM_DELETED_FUNCTION; std::string AsmString, Constraints; bool HasSideEffects; bool IsAlignStack; - + AsmDialect Dialect; + InlineAsm(PointerType *Ty, const std::string &AsmString, const std::string &Constraints, bool hasSideEffects, - bool isAlignStack); + bool isAlignStack, AsmDialect asmDialect); virtual ~InlineAsm(); /// When the ConstantUniqueMap merges two types and makes two InlineAsms @@ -58,11 +66,13 @@ public: /// static InlineAsm *get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, - bool isAlignStack = false); + bool isAlignStack = false, + AsmDialect asmDialect = AD_ATT); bool hasSideEffects() const { return HasSideEffects; } bool isAlignStack() const { return IsAlignStack; } - + AsmDialect getDialect() const { return Dialect; } + /// getType - InlineAsm's are always pointers. /// PointerType *getType() const { @@ -179,7 +189,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const InlineAsm *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() == Value::InlineAsmVal; } @@ -193,17 +202,20 @@ public: Op_InputChain = 0, Op_AsmString = 1, Op_MDNode = 2, - Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack + Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack, AsmDialect. Op_FirstOperand = 4, // Fixed operands on an INLINEASM MachineInstr. MIOp_AsmString = 0, - MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack + MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack, AsmDialect. MIOp_FirstOperand = 2, // Interpretation of the MIOp_ExtraInfo bit field. Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, + Extra_AsmDialect = 4, + Extra_MayLoad = 8, + Extra_MayStore = 16, // Inline asm operands map to multiple SDNode / MachineInstr operands. // The first operand is an immediate describing the asm operand, the low diff --git a/include/llvm/InstrTypes.h b/include/llvm/InstrTypes.h index 2529f24..da17f3b 100644 --- a/include/llvm/InstrTypes.h +++ b/include/llvm/InstrTypes.h @@ -73,7 +73,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const TerminatorInst *) { return true; } static inline bool classof(const Instruction *I) { return I->isTerminator(); } @@ -88,7 +87,7 @@ public: //===----------------------------------------------------------------------===// class UnaryInstruction : public Instruction { - void *operator new(size_t, unsigned); // Do not implement + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; protected: UnaryInstruction(Type *Ty, unsigned iType, Value *V, @@ -113,7 +112,6 @@ public: DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const UnaryInstruction *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Alloca || I->getOpcode() == Instruction::Load || @@ -138,14 +136,14 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value) //===----------------------------------------------------------------------===// class BinaryOperator : public Instruction { - void *operator new(size_t, unsigned); // Do not implement + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; protected: void init(BinaryOps iType); BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, Instruction *InsertBefore); BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd); - virtual BinaryOperator *clone_impl() const; + virtual BinaryOperator *clone_impl() const LLVM_OVERRIDE; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -361,7 +359,6 @@ public: bool isExact() const; // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const BinaryOperator *) { return true; } static inline bool classof(const Instruction *I) { return I->isBinaryOp(); } @@ -388,7 +385,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BinaryOperator, Value) /// if (isa(Instr)) { ... } /// @brief Base class of casting instructions. class CastInst : public UnaryInstruction { - virtual void anchor(); + virtual void anchor() LLVM_OVERRIDE; protected: /// @brief Constructor with insert-before-instruction semantics for subclasses CastInst(Type *Ty, unsigned iType, Value *S, @@ -563,7 +560,7 @@ public: /// IntPtrTy argument is used to make accurate determinations for casts /// involving Integer and Pointer types. They are no-op casts if the integer /// is the same size as the pointer. However, pointer size varies with - /// platform. Generally, the result of TargetData::getIntPtrType() should be + /// platform. Generally, the result of DataLayout::getIntPtrType() should be /// passed in. If that's not available, use Type::Int64Ty, which will make /// the isNoopCast call conservative. /// @brief Determine if the described cast is a no-op cast. @@ -581,8 +578,8 @@ public: /// Determine how a pair of casts can be eliminated, if they can be at all. /// This is a helper function for both CastInst and ConstantExpr. - /// @returns 0 if the CastInst pair can't be eliminated - /// @returns Instruction::CastOps value for a cast that can replace + /// @returns 0 if the CastInst pair can't be eliminated, otherwise + /// returns Instruction::CastOps value for a cast that can replace /// the pair, casting SrcTy to DstTy. /// @brief Determine if a cast pair is eliminable static unsigned isEliminableCastPair( @@ -591,7 +588,9 @@ public: Type *SrcTy, ///< SrcTy of 1st cast Type *MidTy, ///< DstTy of 1st cast & SrcTy of 2nd cast Type *DstTy, ///< DstTy of 2nd cast - Type *IntPtrTy ///< Integer type corresponding to Ptr types, or null + Type *SrcIntPtrTy, ///< Integer type corresponding to Ptr SrcTy, or null + Type *MidIntPtrTy, ///< Integer type corresponding to Ptr MidTy, or null + Type *DstIntPtrTy ///< Integer type corresponding to Ptr DstTy, or null ); /// @brief Return the opcode of this CastInst @@ -611,7 +610,6 @@ public: static bool castIsValid(Instruction::CastOps op, Value *S, Type *DstTy); /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const CastInst *) { return true; } static inline bool classof(const Instruction *I) { return I->isCast(); } @@ -627,8 +625,8 @@ public: /// This class is the base class for the comparison instructions. /// @brief Abstract base class of comparison instructions. class CmpInst : public Instruction { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT - CmpInst(); // do not implement + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + CmpInst() LLVM_DELETED_FUNCTION; protected: CmpInst(Type *ty, Instruction::OtherOps op, unsigned short pred, Value *LHS, Value *RHS, const Twine &Name = "", @@ -638,7 +636,7 @@ protected: Value *LHS, Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd); - virtual void Anchor() const; // Out of line virtual method. + virtual void anchor() LLVM_OVERRIDE; // Out of line virtual method. public: /// This enumeration lists the possible predicates for CmpInst subclasses. /// Values in the range 0-31 are reserved for FCmpInst, while values in the @@ -816,7 +814,6 @@ public: static bool isFalseWhenEqual(unsigned short predicate); /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const CmpInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ICmp || I->getOpcode() == Instruction::FCmp; diff --git a/include/llvm/Instruction.h b/include/llvm/Instruction.h index 5512dcc..8aa8a56 100644 --- a/include/llvm/Instruction.h +++ b/include/llvm/Instruction.h @@ -28,8 +28,8 @@ template class SymbolTableListTraits; class Instruction : public User, public ilist_node { - void operator=(const Instruction &); // Do not implement - Instruction(const Instruction &); // Do not implement + void operator=(const Instruction &) LLVM_DELETED_FUNCTION; + Instruction(const Instruction &) LLVM_DELETED_FUNCTION; BasicBlock *Parent; DebugLoc DbgLoc; // 'dbg' Metadata cache. @@ -310,7 +310,6 @@ public: /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const Instruction *) { return true; } static inline bool classof(const Value *V) { return V->getValueID() >= Value::InstructionVal; } diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index f5187e6..69593b4 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -112,7 +112,6 @@ public: bool isStaticAlloca() const; // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const AllocaInst *) { return true; } static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Alloca); } @@ -226,13 +225,13 @@ public: const Value *getPointerOperand() const { return getOperand(0); } static unsigned getPointerOperandIndex() { return 0U; } + /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { - return cast(getPointerOperand()->getType())->getAddressSpace(); + return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const LoadInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Load; } @@ -255,7 +254,7 @@ private: /// StoreInst - an instruction for storing to memory /// class StoreInst : public Instruction { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; void AssertOK(); protected: virtual StoreInst *clone_impl() const; @@ -349,12 +348,12 @@ public: const Value *getPointerOperand() const { return getOperand(1); } static unsigned getPointerOperandIndex() { return 1U; } + /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { - return cast(getPointerOperand()->getType())->getAddressSpace(); + return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const StoreInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Store; } @@ -382,7 +381,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value) /// FenceInst - an instruction for ordering other memory operations /// class FenceInst : public Instruction { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; void Init(AtomicOrdering Ordering, SynchronizationScope SynchScope); protected: virtual FenceInst *clone_impl() const; @@ -426,7 +425,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const FenceInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Fence; } @@ -450,7 +448,7 @@ private: /// there. Returns the value that was loaded. /// class AtomicCmpXchgInst : public Instruction { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; void Init(Value *Ptr, Value *Cmp, Value *NewVal, AtomicOrdering Ordering, SynchronizationScope SynchScope); protected: @@ -521,12 +519,12 @@ public: Value *getNewValOperand() { return getOperand(2); } const Value *getNewValOperand() const { return getOperand(2); } + /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { - return cast(getPointerOperand()->getType())->getAddressSpace(); + return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const AtomicCmpXchgInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::AtomicCmpXchg; } @@ -557,7 +555,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value) /// the old value. /// class AtomicRMWInst : public Instruction { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; protected: virtual AtomicRMWInst *clone_impl() const; public: @@ -665,12 +663,12 @@ public: Value *getValOperand() { return getOperand(1); } const Value *getValOperand() const { return getOperand(1); } + /// \brief Returns the address space of the pointer operand. unsigned getPointerAddressSpace() const { - return cast(getPointerOperand()->getType())->getAddressSpace(); + return getPointerOperand()->getType()->getPointerAddressSpace(); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const AtomicRMWInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::AtomicRMW; } @@ -768,6 +766,13 @@ public: return reinterpret_cast(Instruction::getType()); } + /// \brief Returns the address space of this instruction's pointer type. + unsigned getAddressSpace() const { + // Note that this is always the same as the pointer operand's address space + // and that is cheaper to compute, so cheat here. + return getPointerAddressSpace(); + } + /// getIndexedType - Returns the type of the element that would be loaded with /// a load instruction with the specified parameters. /// @@ -778,10 +783,6 @@ public: static Type *getIndexedType(Type *Ptr, ArrayRef IdxList); static Type *getIndexedType(Type *Ptr, ArrayRef IdxList); - /// getIndexedType - Returns the address space used by the GEP pointer. - /// - static unsigned getAddressSpace(Value *Ptr); - inline op_iterator idx_begin() { return op_begin()+1; } inline const_op_iterator idx_begin() const { return op_begin()+1; } inline op_iterator idx_end() { return op_end(); } @@ -797,22 +798,23 @@ public: return 0U; // get index for modifying correct operand. } - unsigned getPointerAddressSpace() const { - return cast(getType())->getAddressSpace(); - } - /// getPointerOperandType - Method to return the pointer operand as a /// PointerType. Type *getPointerOperandType() const { return getPointerOperand()->getType(); } + /// \brief Returns the address space of the pointer operand. + unsigned getPointerAddressSpace() const { + return getPointerOperandType()->getPointerAddressSpace(); + } + /// GetGEPReturnType - Returns the pointer type returned by the GEP /// instruction, which may be a vector of pointers. static Type *getGEPReturnType(Value *Ptr, ArrayRef IdxList) { Type *PtrTy = PointerType::get(checkGEPType( getIndexedType(Ptr->getType(), IdxList)), - getAddressSpace(Ptr)); + Ptr->getType()->getPointerAddressSpace()); // Vector GEP if (Ptr->getType()->isVectorTy()) { unsigned NumElem = cast(Ptr->getType())->getNumElements(); @@ -849,7 +851,6 @@ public: bool isInBounds() const; // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const GetElementPtrInst *) { return true; } static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::GetElementPtr); } @@ -897,13 +898,13 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) /// This instruction compares its operands according to the predicate given /// to the constructor. It only operates on integers or pointers. The operands /// must be identical types. -/// @brief Represent an integer comparison operator. +/// \brief Represent an integer comparison operator. class ICmpInst: public CmpInst { protected: - /// @brief Clone an identical ICmpInst + /// \brief Clone an identical ICmpInst virtual ICmpInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics. + /// \brief Constructor with insert-before-instruction semantics. ICmpInst( Instruction *InsertBefore, ///< Where to insert Predicate pred, ///< The predicate to use for the comparison @@ -924,7 +925,7 @@ public: "Invalid operand types for ICmp instruction"); } - /// @brief Constructor with insert-at-end semantics. + /// \brief Constructor with insert-at-end semantics. ICmpInst( BasicBlock &InsertAtEnd, ///< Block to insert into. Predicate pred, ///< The predicate to use for the comparison @@ -945,7 +946,7 @@ public: "Invalid operand types for ICmp instruction"); } - /// @brief Constructor with no-insertion semantics + /// \brief Constructor with no-insertion semantics ICmpInst( Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression @@ -967,25 +968,25 @@ public: /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. /// @returns the predicate that would be the result if the operand were /// regarded as signed. - /// @brief Return the signed version of the predicate + /// \brief Return the signed version of the predicate Predicate getSignedPredicate() const { return getSignedPredicate(getPredicate()); } /// This is a static version that you can use without an instruction. - /// @brief Return the signed version of the predicate. + /// \brief Return the signed version of the predicate. static Predicate getSignedPredicate(Predicate pred); /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. /// @returns the predicate that would be the result if the operand were /// regarded as unsigned. - /// @brief Return the unsigned version of the predicate + /// \brief Return the unsigned version of the predicate Predicate getUnsignedPredicate() const { return getUnsignedPredicate(getPredicate()); } /// This is a static version that you can use without an instruction. - /// @brief Return the unsigned version of the predicate. + /// \brief Return the unsigned version of the predicate. static Predicate getUnsignedPredicate(Predicate pred); /// isEquality - Return true if this predicate is either EQ or NE. This also @@ -1001,7 +1002,7 @@ public: } /// @returns true if the predicate of this ICmpInst is commutative - /// @brief Determine if this relation is commutative. + /// \brief Determine if this relation is commutative. bool isCommutative() const { return isEquality(); } /// isRelational - Return true if the predicate is relational (not EQ or NE). @@ -1017,21 +1018,20 @@ public: } /// Initialize a set of values that all satisfy the predicate with C. - /// @brief Make a ConstantRange for a relation with a constant value. + /// \brief Make a ConstantRange for a relation with a constant value. static ConstantRange makeConstantRange(Predicate pred, const APInt &C); /// Exchange the two operands to this instruction in such a way that it does /// not modify the semantics of the instruction. The predicate value may be /// changed to retain the same result if the predicate is order dependent /// (e.g. ult). - /// @brief Swap operands and adjust predicate. + /// \brief Swap operands and adjust predicate. void swapOperands() { setPredicate(getSwappedPredicate()); Op<0>().swap(Op<1>()); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ICmpInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ICmp; } @@ -1048,13 +1048,13 @@ public: /// This instruction compares its operands according to the predicate given /// to the constructor. It only operates on floating point values or packed /// vectors of floating point values. The operands must be identical types. -/// @brief Represents a floating point comparison operator. +/// \brief Represents a floating point comparison operator. class FCmpInst: public CmpInst { protected: - /// @brief Clone an identical FCmpInst + /// \brief Clone an identical FCmpInst virtual FCmpInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics. + /// \brief Constructor with insert-before-instruction semantics. FCmpInst( Instruction *InsertBefore, ///< Where to insert Predicate pred, ///< The predicate to use for the comparison @@ -1073,7 +1073,7 @@ public: "Invalid operand types for FCmp instruction"); } - /// @brief Constructor with insert-at-end semantics. + /// \brief Constructor with insert-at-end semantics. FCmpInst( BasicBlock &InsertAtEnd, ///< Block to insert into. Predicate pred, ///< The predicate to use for the comparison @@ -1092,7 +1092,7 @@ public: "Invalid operand types for FCmp instruction"); } - /// @brief Constructor with no-insertion semantics + /// \brief Constructor with no-insertion semantics FCmpInst( Predicate pred, ///< The predicate to use for the comparison Value *LHS, ///< The left-hand-side of the expression @@ -1110,14 +1110,14 @@ public: } /// @returns true if the predicate of this instruction is EQ or NE. - /// @brief Determine if this is an equality predicate. + /// \brief Determine if this is an equality predicate. bool isEquality() const { return getPredicate() == FCMP_OEQ || getPredicate() == FCMP_ONE || getPredicate() == FCMP_UEQ || getPredicate() == FCMP_UNE; } /// @returns true if the predicate of this instruction is commutative. - /// @brief Determine if this is a commutative predicate. + /// \brief Determine if this is a commutative predicate. bool isCommutative() const { return isEquality() || getPredicate() == FCMP_FALSE || @@ -1127,21 +1127,20 @@ public: } /// @returns true if the predicate is relational (not EQ or NE). - /// @brief Determine if this a relational predicate. + /// \brief Determine if this a relational predicate. bool isRelational() const { return !isEquality(); } /// Exchange the two operands to this instruction in such a way that it does /// not modify the semantics of the instruction. The predicate value may be /// changed to retain the same result if the predicate is order dependent /// (e.g. ult). - /// @brief Swap operands and adjust predicate. + /// \brief Swap operands and adjust predicate. void swapOperands() { setPredicate(getSwappedPredicate()); Op<0>().swap(Op<1>()); } - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const FCmpInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::FCmp; } @@ -1163,12 +1162,12 @@ class CallInst : public Instruction { void init(Value *Func, const Twine &NameStr); /// Construct a CallInst given a range of arguments. - /// @brief Construct a CallInst from a range of arguments + /// \brief Construct a CallInst from a range of arguments inline CallInst(Value *Func, ArrayRef Args, const Twine &NameStr, Instruction *InsertBefore); /// Construct a CallInst given a range of arguments. - /// @brief Construct a CallInst from a range of arguments + /// \brief Construct a CallInst from a range of arguments inline CallInst(Value *Func, ArrayRef Args, const Twine &NameStr, BasicBlock *InsertAtEnd); @@ -1267,77 +1266,78 @@ public: /// removeAttribute - removes the attribute from the list of attributes. void removeAttribute(unsigned i, Attributes attr); - /// \brief Return true if this call has the given attribute. - bool hasFnAttr(Attributes N) const { - return paramHasAttr(~0, N); - } + /// \brief Determine whether this call has the given attribute. + bool hasFnAttr(Attributes::AttrVal A) const; - /// @brief Determine whether the call or the callee has the given attribute. - bool paramHasAttr(unsigned i, Attributes attr) const; + /// \brief Determine whether the call or the callee has the given attributes. + bool paramHasAttr(unsigned i, Attributes::AttrVal A) const; - /// @brief Extract the alignment for a call or parameter (0=unknown). + /// \brief Extract the alignment for a call or parameter (0=unknown). unsigned getParamAlignment(unsigned i) const { return AttributeList.getParamAlignment(i); } - /// @brief Return true if the call should not be inlined. - bool isNoInline() const { return hasFnAttr(Attribute::NoInline); } - void setIsNoInline(bool Value = true) { - if (Value) addAttribute(~0, Attribute::NoInline); - else removeAttribute(~0, Attribute::NoInline); + /// \brief Return true if the call should not be inlined. + bool isNoInline() const { return hasFnAttr(Attributes::NoInline); } + void setIsNoInline() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::NoInline)); } - /// @brief Return true if the call can return twice + /// \brief Return true if the call can return twice bool canReturnTwice() const { - return hasFnAttr(Attribute::ReturnsTwice); + return hasFnAttr(Attributes::ReturnsTwice); } - void setCanReturnTwice(bool Value = true) { - if (Value) addAttribute(~0, Attribute::ReturnsTwice); - else removeAttribute(~0, Attribute::ReturnsTwice); + void setCanReturnTwice() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::ReturnsTwice)); } - /// @brief Determine if the call does not access memory. + /// \brief Determine if the call does not access memory. bool doesNotAccessMemory() const { - return hasFnAttr(Attribute::ReadNone); + return hasFnAttr(Attributes::ReadNone); } - void setDoesNotAccessMemory(bool NotAccessMemory = true) { - if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone); - else removeAttribute(~0, Attribute::ReadNone); + void setDoesNotAccessMemory() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::ReadNone)); } - /// @brief Determine if the call does not access or only reads memory. + /// \brief Determine if the call does not access or only reads memory. bool onlyReadsMemory() const { - return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly); + return doesNotAccessMemory() || hasFnAttr(Attributes::ReadOnly); } - void setOnlyReadsMemory(bool OnlyReadsMemory = true) { - if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly); - else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone); + void setOnlyReadsMemory() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::ReadOnly)); } - /// @brief Determine if the call cannot return. - bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); } - void setDoesNotReturn(bool DoesNotReturn = true) { - if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn); - else removeAttribute(~0, Attribute::NoReturn); + /// \brief Determine if the call cannot return. + bool doesNotReturn() const { return hasFnAttr(Attributes::NoReturn); } + void setDoesNotReturn() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::NoReturn)); } - /// @brief Determine if the call cannot unwind. - bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); } - void setDoesNotThrow(bool DoesNotThrow = true) { - if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind); - else removeAttribute(~0, Attribute::NoUnwind); + /// \brief Determine if the call cannot unwind. + bool doesNotThrow() const { return hasFnAttr(Attributes::NoUnwind); } + void setDoesNotThrow() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::NoUnwind)); } - /// @brief Determine if the call returns a structure through first + /// \brief Determine if the call returns a structure through first /// pointer argument. bool hasStructRetAttr() const { // Be friendly and also check the callee. - return paramHasAttr(1, Attribute::StructRet); + return paramHasAttr(1, Attributes::StructRet); } - /// @brief Determine if any call argument is an aggregate passed by value. + /// \brief Determine if any call argument is an aggregate passed by value. bool hasByValArgument() const { - return AttributeList.hasAttrSomewhere(Attribute::ByVal); + for (unsigned I = 0, E = AttributeList.getNumAttrs(); I != E; ++I) + if (AttributeList.getAttributesAtIndex(I).hasAttribute(Attributes::ByVal)) + return true; + return false; } /// getCalledFunction - Return the function called, or null if this is an @@ -1363,7 +1363,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const CallInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Call; } @@ -1469,7 +1468,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SelectInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Select; } @@ -1512,7 +1510,6 @@ public: static unsigned getPointerOperandIndex() { return 0U; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const VAArgInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == VAArg; } @@ -1566,7 +1563,6 @@ public: DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ExtractElementInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ExtractElement; } @@ -1625,7 +1621,6 @@ public: DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const InsertElementInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::InsertElement; } @@ -1706,7 +1701,6 @@ public: // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ShuffleVectorInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ShuffleVector; } @@ -1802,7 +1796,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ExtractValueInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::ExtractValue; } @@ -1839,7 +1832,7 @@ ExtractValueInst::ExtractValueInst(Value *Agg, class InsertValueInst : public Instruction { SmallVector Indices; - void *operator new(size_t, unsigned); // Do not implement + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; InsertValueInst(const InsertValueInst &IVI); void init(Value *Agg, Value *Val, ArrayRef Idxs, const Twine &NameStr); @@ -1924,7 +1917,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const InsertValueInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::InsertValue; } @@ -1970,7 +1962,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value) // scientist's overactive imagination. // class PHINode : public Instruction { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; /// ReservedSpace - The number of operands actually allocated. NumOperands is /// the number actually in use. unsigned ReservedSpace; @@ -2141,7 +2133,6 @@ public: Value *hasConstantValue() const; /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const PHINode *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::PHI; } @@ -2178,7 +2169,7 @@ class LandingPadInst : public Instruction { public: enum ClauseType { Catch, Filter }; private: - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; // Allocate space for exactly zero operands. void *operator new(size_t s) { return User::operator new(s, 0); @@ -2249,7 +2240,6 @@ public: void reserveClauses(unsigned Size) { growOperands(Size); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const LandingPadInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::LandingPad; } @@ -2318,7 +2308,6 @@ public: unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ReturnInst *) { return true; } static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Ret); } @@ -2418,7 +2407,6 @@ public: void swapSuccessors(); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const BranchInst *) { return true; } static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Br); } @@ -2445,7 +2433,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value) /// SwitchInst - Multiway switch /// class SwitchInst : public TerminatorInst { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; unsigned ReservedSpace; // Operands format: // Operand[0] = Value to switch on @@ -2613,7 +2601,7 @@ public: } /// addCase - Add an entry to the switch instruction... - /// @Deprecated + /// @deprecated /// Note: /// This action invalidates case_end(). Old case_end() iterator will /// point to the added case. @@ -2699,7 +2687,7 @@ public: } /// Resolves case value for current case. - /// @Deprecated + /// @deprecated ConstantIntTy *getCaseValue() { assert(Index < SI->getNumCases() && "Index out the number of cases."); IntegersSubsetRef CaseRanges = *SubsetIt; @@ -2803,7 +2791,7 @@ public: CaseIt(const ParentTy& Src) : ParentTy(Src) {} /// Sets the new value for current case. - /// @Deprecated. + /// @deprecated. void setValue(ConstantInt *V) { assert(Index < SI->getNumCases() && "Index out the number of cases."); IntegersSubsetToBB Mapping; @@ -2829,7 +2817,6 @@ public: // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SwitchInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Switch; } @@ -2857,7 +2844,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value) /// IndirectBrInst - Indirect Branch Instruction. /// class IndirectBrInst : public TerminatorInst { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; unsigned ReservedSpace; // Operand[0] = Value to switch on // Operand[1] = Default basic block destination @@ -2928,7 +2915,6 @@ public: } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const IndirectBrInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::IndirectBr; } @@ -2963,14 +2949,14 @@ class InvokeInst : public TerminatorInst { /// Construct an InvokeInst given a range of arguments. /// - /// @brief Construct an InvokeInst from a range of arguments + /// \brief Construct an InvokeInst from a range of arguments inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef Args, unsigned Values, const Twine &NameStr, Instruction *InsertBefore); /// Construct an InvokeInst given a range of arguments. /// - /// @brief Construct an InvokeInst from a range of arguments + /// \brief Construct an InvokeInst from a range of arguments inline InvokeInst(Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef Args, unsigned Values, const Twine &NameStr, BasicBlock *InsertAtEnd); @@ -3029,68 +3015,69 @@ public: /// removeAttribute - removes the attribute from the list of attributes. void removeAttribute(unsigned i, Attributes attr); - /// \brief Return true if this call has the given attribute. - bool hasFnAttr(Attributes N) const { - return paramHasAttr(~0, N); - } + /// \brief Determine whether this call has the NoAlias attribute. + bool hasFnAttr(Attributes::AttrVal A) const; - /// @brief Determine whether the call or the callee has the given attribute. - bool paramHasAttr(unsigned i, Attributes attr) const; + /// \brief Determine whether the call or the callee has the given attributes. + bool paramHasAttr(unsigned i, Attributes::AttrVal A) const; - /// @brief Extract the alignment for a call or parameter (0=unknown). + /// \brief Extract the alignment for a call or parameter (0=unknown). unsigned getParamAlignment(unsigned i) const { return AttributeList.getParamAlignment(i); } - /// @brief Return true if the call should not be inlined. - bool isNoInline() const { return hasFnAttr(Attribute::NoInline); } - void setIsNoInline(bool Value = true) { - if (Value) addAttribute(~0, Attribute::NoInline); - else removeAttribute(~0, Attribute::NoInline); + /// \brief Return true if the call should not be inlined. + bool isNoInline() const { return hasFnAttr(Attributes::NoInline); } + void setIsNoInline() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::NoInline)); } - /// @brief Determine if the call does not access memory. + /// \brief Determine if the call does not access memory. bool doesNotAccessMemory() const { - return hasFnAttr(Attribute::ReadNone); + return hasFnAttr(Attributes::ReadNone); } - void setDoesNotAccessMemory(bool NotAccessMemory = true) { - if (NotAccessMemory) addAttribute(~0, Attribute::ReadNone); - else removeAttribute(~0, Attribute::ReadNone); + void setDoesNotAccessMemory() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::ReadNone)); } - /// @brief Determine if the call does not access or only reads memory. + /// \brief Determine if the call does not access or only reads memory. bool onlyReadsMemory() const { - return doesNotAccessMemory() || hasFnAttr(Attribute::ReadOnly); + return doesNotAccessMemory() || hasFnAttr(Attributes::ReadOnly); } - void setOnlyReadsMemory(bool OnlyReadsMemory = true) { - if (OnlyReadsMemory) addAttribute(~0, Attribute::ReadOnly); - else removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone); + void setOnlyReadsMemory() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::ReadOnly)); } - /// @brief Determine if the call cannot return. - bool doesNotReturn() const { return hasFnAttr(Attribute::NoReturn); } - void setDoesNotReturn(bool DoesNotReturn = true) { - if (DoesNotReturn) addAttribute(~0, Attribute::NoReturn); - else removeAttribute(~0, Attribute::NoReturn); + /// \brief Determine if the call cannot return. + bool doesNotReturn() const { return hasFnAttr(Attributes::NoReturn); } + void setDoesNotReturn() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::NoReturn)); } - /// @brief Determine if the call cannot unwind. - bool doesNotThrow() const { return hasFnAttr(Attribute::NoUnwind); } - void setDoesNotThrow(bool DoesNotThrow = true) { - if (DoesNotThrow) addAttribute(~0, Attribute::NoUnwind); - else removeAttribute(~0, Attribute::NoUnwind); + /// \brief Determine if the call cannot unwind. + bool doesNotThrow() const { return hasFnAttr(Attributes::NoUnwind); } + void setDoesNotThrow() { + addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(getContext(), Attributes::NoUnwind)); } - /// @brief Determine if the call returns a structure through first + /// \brief Determine if the call returns a structure through first /// pointer argument. bool hasStructRetAttr() const { // Be friendly and also check the callee. - return paramHasAttr(1, Attribute::StructRet); + return paramHasAttr(1, Attributes::StructRet); } - /// @brief Determine if any call argument is an aggregate passed by value. + /// \brief Determine if any call argument is an aggregate passed by value. bool hasByValArgument() const { - return AttributeList.hasAttrSomewhere(Attribute::ByVal); + for (unsigned I = 0, E = AttributeList.getNumAttrs(); I != E; ++I) + if (AttributeList.getAttributesAtIndex(I).hasAttribute(Attributes::ByVal)) + return true; + return false; } /// getCalledFunction - Return the function called, or null if this is an @@ -3141,7 +3128,6 @@ public: unsigned getNumSuccessors() const { return 2; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const InvokeInst *) { return true; } static inline bool classof(const Instruction *I) { return (I->getOpcode() == Instruction::Invoke); } @@ -3221,7 +3207,6 @@ public: unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ResumeInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Resume; } @@ -3251,7 +3236,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value) /// end of the block cannot be reached. /// class UnreachableInst : public TerminatorInst { - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; protected: virtual UnreachableInst *clone_impl() const; @@ -3266,7 +3251,6 @@ public: unsigned getNumSuccessors() const { return 0; } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const UnreachableInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Unreachable; } @@ -3283,14 +3267,14 @@ private: // TruncInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a truncation of integer types. +/// \brief This class represents a truncation of integer types. class TruncInst : public CastInst { protected: - /// @brief Clone an identical TruncInst + /// \brief Clone an identical TruncInst virtual TruncInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics TruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The (smaller) type to truncate to @@ -3298,7 +3282,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics TruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The (smaller) type to truncate to @@ -3306,8 +3290,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const TruncInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == Trunc; } @@ -3320,14 +3303,14 @@ public: // ZExtInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents zero extension of integer types. +/// \brief This class represents zero extension of integer types. class ZExtInst : public CastInst { protected: - /// @brief Clone an identical ZExtInst + /// \brief Clone an identical ZExtInst virtual ZExtInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics ZExtInst( Value *S, ///< The value to be zero extended Type *Ty, ///< The type to zero extend to @@ -3335,7 +3318,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end semantics. + /// \brief Constructor with insert-at-end semantics. ZExtInst( Value *S, ///< The value to be zero extended Type *Ty, ///< The type to zero extend to @@ -3343,8 +3326,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const ZExtInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == ZExt; } @@ -3357,14 +3339,14 @@ public: // SExtInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a sign extension of integer types. +/// \brief This class represents a sign extension of integer types. class SExtInst : public CastInst { protected: - /// @brief Clone an identical SExtInst + /// \brief Clone an identical SExtInst virtual SExtInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics SExtInst( Value *S, ///< The value to be sign extended Type *Ty, ///< The type to sign extend to @@ -3372,7 +3354,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics SExtInst( Value *S, ///< The value to be sign extended Type *Ty, ///< The type to sign extend to @@ -3380,8 +3362,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SExtInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == SExt; } @@ -3394,14 +3375,14 @@ public: // FPTruncInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a truncation of floating point types. +/// \brief This class represents a truncation of floating point types. class FPTruncInst : public CastInst { protected: - /// @brief Clone an identical FPTruncInst + /// \brief Clone an identical FPTruncInst virtual FPTruncInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics FPTruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The type to truncate to @@ -3409,7 +3390,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics FPTruncInst( Value *S, ///< The value to be truncated Type *Ty, ///< The type to truncate to @@ -3417,8 +3398,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const FPTruncInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPTrunc; } @@ -3431,14 +3411,14 @@ public: // FPExtInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents an extension of floating point types. +/// \brief This class represents an extension of floating point types. class FPExtInst : public CastInst { protected: - /// @brief Clone an identical FPExtInst + /// \brief Clone an identical FPExtInst virtual FPExtInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics FPExtInst( Value *S, ///< The value to be extended Type *Ty, ///< The type to extend to @@ -3446,7 +3426,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics FPExtInst( Value *S, ///< The value to be extended Type *Ty, ///< The type to extend to @@ -3454,8 +3434,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const FPExtInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPExt; } @@ -3468,14 +3447,14 @@ public: // UIToFPInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a cast unsigned integer to floating point. +/// \brief This class represents a cast unsigned integer to floating point. class UIToFPInst : public CastInst { protected: - /// @brief Clone an identical UIToFPInst + /// \brief Clone an identical UIToFPInst virtual UIToFPInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics UIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3483,7 +3462,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics UIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3491,8 +3470,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const UIToFPInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == UIToFP; } @@ -3505,14 +3483,14 @@ public: // SIToFPInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a cast from signed integer to floating point. +/// \brief This class represents a cast from signed integer to floating point. class SIToFPInst : public CastInst { protected: - /// @brief Clone an identical SIToFPInst + /// \brief Clone an identical SIToFPInst virtual SIToFPInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics SIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3520,7 +3498,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics SIToFPInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3528,8 +3506,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const SIToFPInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == SIToFP; } @@ -3542,14 +3519,14 @@ public: // FPToUIInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a cast from floating point to unsigned integer +/// \brief This class represents a cast from floating point to unsigned integer class FPToUIInst : public CastInst { protected: - /// @brief Clone an identical FPToUIInst + /// \brief Clone an identical FPToUIInst virtual FPToUIInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics FPToUIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3557,7 +3534,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics FPToUIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3565,8 +3542,7 @@ public: BasicBlock *InsertAtEnd ///< Where to insert the new instruction ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const FPToUIInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPToUI; } @@ -3579,14 +3555,14 @@ public: // FPToSIInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a cast from floating point to signed integer. +/// \brief This class represents a cast from floating point to signed integer. class FPToSIInst : public CastInst { protected: - /// @brief Clone an identical FPToSIInst + /// \brief Clone an identical FPToSIInst virtual FPToSIInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics FPToSIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3594,7 +3570,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics FPToSIInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3602,8 +3578,7 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const FPToSIInst *) { return true; } + /// \brief Methods for support type inquiry through isa, cast, and dyn_cast: static inline bool classof(const Instruction *I) { return I->getOpcode() == FPToSI; } @@ -3616,10 +3591,10 @@ public: // IntToPtrInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a cast from an integer to a pointer. +/// \brief This class represents a cast from an integer to a pointer. class IntToPtrInst : public CastInst { public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics IntToPtrInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3627,7 +3602,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics IntToPtrInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3635,11 +3610,15 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); - /// @brief Clone an identical IntToPtrInst + /// \brief Clone an identical IntToPtrInst virtual IntToPtrInst *clone_impl() const; + /// \brief Returns the address space of this instruction's pointer type. + unsigned getAddressSpace() const { + return getType()->getPointerAddressSpace(); + } + // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const IntToPtrInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == IntToPtr; } @@ -3652,14 +3631,14 @@ public: // PtrToIntInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a cast from a pointer to an integer +/// \brief This class represents a cast from a pointer to an integer class PtrToIntInst : public CastInst { protected: - /// @brief Clone an identical PtrToIntInst + /// \brief Clone an identical PtrToIntInst virtual PtrToIntInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics PtrToIntInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3667,7 +3646,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics PtrToIntInst( Value *S, ///< The value to be converted Type *Ty, ///< The type to convert to @@ -3675,8 +3654,19 @@ public: BasicBlock *InsertAtEnd ///< The block to insert the instruction into ); + /// \brief Gets the pointer operand. + Value *getPointerOperand() { return getOperand(0); } + /// \brief Gets the pointer operand. + const Value *getPointerOperand() const { return getOperand(0); } + /// \brief Gets the operand index of the pointer operand. + static unsigned getPointerOperandIndex() { return 0U; } + + /// \brief Returns the address space of the pointer operand. + unsigned getPointerAddressSpace() const { + return getPointerOperand()->getType()->getPointerAddressSpace(); + } + // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const PtrToIntInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == PtrToInt; } @@ -3689,14 +3679,14 @@ public: // BitCastInst Class //===----------------------------------------------------------------------===// -/// @brief This class represents a no-op cast from one type to another. +/// \brief This class represents a no-op cast from one type to another. class BitCastInst : public CastInst { protected: - /// @brief Clone an identical BitCastInst + /// \brief Clone an identical BitCastInst virtual BitCastInst *clone_impl() const; public: - /// @brief Constructor with insert-before-instruction semantics + /// \brief Constructor with insert-before-instruction semantics BitCastInst( Value *S, ///< The value to be casted Type *Ty, ///< The type to casted to @@ -3704,7 +3694,7 @@ public: Instruction *InsertBefore = 0 ///< Where to insert the new instruction ); - /// @brief Constructor with insert-at-end-of-block semantics + /// \brief Constructor with insert-at-end-of-block semantics BitCastInst( Value *S, ///< The value to be casted Type *Ty, ///< The type to casted to @@ -3713,7 +3703,6 @@ public: ); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const BitCastInst *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == BitCast; } diff --git a/include/llvm/IntrinsicInst.h b/include/llvm/IntrinsicInst.h index 1cebdd2..9b2afd5 100644 --- a/include/llvm/IntrinsicInst.h +++ b/include/llvm/IntrinsicInst.h @@ -34,9 +34,9 @@ namespace llvm { /// functions. This allows the standard isa/dyncast/cast functionality to /// work with calls to intrinsic functions. class IntrinsicInst : public CallInst { - IntrinsicInst(); // DO NOT IMPLEMENT - IntrinsicInst(const IntrinsicInst&); // DO NOT IMPLEMENT - void operator=(const IntrinsicInst&); // DO NOT IMPLEMENT + IntrinsicInst() LLVM_DELETED_FUNCTION; + IntrinsicInst(const IntrinsicInst&) LLVM_DELETED_FUNCTION; + void operator=(const IntrinsicInst&) LLVM_DELETED_FUNCTION; public: /// getIntrinsicID - Return the intrinsic ID of this intrinsic. /// @@ -45,7 +45,6 @@ namespace llvm { } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const IntrinsicInst *) { return true; } static inline bool classof(const CallInst *I) { if (const Function *CF = I->getCalledFunction()) return CF->getIntrinsicID() != 0; @@ -62,7 +61,6 @@ namespace llvm { public: // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const DbgInfoIntrinsic *) { return true; } static inline bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { case Intrinsic::dbg_declare: @@ -86,7 +84,6 @@ namespace llvm { MDNode *getVariable() const { return cast(getArgOperand(1)); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const DbgDeclareInst *) { return true; } static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::dbg_declare; } @@ -108,7 +105,6 @@ namespace llvm { MDNode *getVariable() const { return cast(getArgOperand(2)); } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const DbgValueInst *) { return true; } static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::dbg_value; } @@ -175,7 +171,6 @@ namespace llvm { } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MemIntrinsic *) { return true; } static inline bool classof(const IntrinsicInst *I) { switch (I->getIntrinsicID()) { case Intrinsic::memcpy: @@ -205,7 +200,6 @@ namespace llvm { } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MemSetInst *) { return true; } static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memset; } @@ -238,7 +232,6 @@ namespace llvm { } // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MemTransferInst *) { return true; } static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memcpy || I->getIntrinsicID() == Intrinsic::memmove; @@ -254,7 +247,6 @@ namespace llvm { class MemCpyInst : public MemTransferInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MemCpyInst *) { return true; } static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memcpy; } @@ -268,7 +260,6 @@ namespace llvm { class MemMoveInst : public MemTransferInst { public: // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MemMoveInst *) { return true; } static inline bool classof(const IntrinsicInst *I) { return I->getIntrinsicID() == Intrinsic::memmove; } @@ -277,6 +268,49 @@ namespace llvm { } }; + /// VAStartInst - This represents the llvm.va_start intrinsic. + /// + class VAStartInst : public IntrinsicInst { + public: + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::vastart; + } + static inline bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } + + Value *getArgList() const { return const_cast(getArgOperand(0)); } + }; + + /// VAEndInst - This represents the llvm.va_end intrinsic. + /// + class VAEndInst : public IntrinsicInst { + public: + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::vaend; + } + static inline bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } + + Value *getArgList() const { return const_cast(getArgOperand(0)); } + }; + + /// VACopyInst - This represents the llvm.va_copy intrinsic. + /// + class VACopyInst : public IntrinsicInst { + public: + static inline bool classof(const IntrinsicInst *I) { + return I->getIntrinsicID() == Intrinsic::vacopy; + } + static inline bool classof(const Value *V) { + return isa(V) && classof(cast(V)); + } + + Value *getDest() const { return const_cast(getArgOperand(0)); } + Value *getSrc() const { return const_cast(getArgOperand(1)); } + }; + } #endif diff --git a/include/llvm/Intrinsics.h b/include/llvm/Intrinsics.h index c350388..3108a8e 100644 --- a/include/llvm/Intrinsics.h +++ b/include/llvm/Intrinsics.h @@ -50,7 +50,7 @@ namespace Intrinsic { /// Intrinsic::getType(ID) - Return the function type for an intrinsic. /// FunctionType *getType(LLVMContext &Context, ID id, - ArrayRef Tys = ArrayRef()); + ArrayRef Tys = ArrayRef()); /// Intrinsic::isOverloaded(ID) - Returns true if the intrinsic can be /// overloaded. @@ -58,7 +58,7 @@ namespace Intrinsic { /// Intrinsic::getAttributes(ID) - Return the attributes for an intrinsic. /// - AttrListPtr getAttributes(ID id); + AttrListPtr getAttributes(LLVMContext &C, ID id); /// Intrinsic::getDeclaration(M, ID) - Create or insert an LLVM Function /// declaration for an intrinsic, and return it. diff --git a/include/llvm/Intrinsics.td b/include/llvm/Intrinsics.td index d1a0fee..2e1597f 100644 --- a/include/llvm/Intrinsics.td +++ b/include/llvm/Intrinsics.td @@ -121,15 +121,21 @@ def llvm_metadata_ty : LLVMType; // !{...} def llvm_x86mmx_ty : LLVMType; def llvm_ptrx86mmx_ty : LLVMPointerType; // <1 x i64>* +def llvm_v2i1_ty : LLVMType; // 2 x i1 +def llvm_v4i1_ty : LLVMType; // 4 x i1 +def llvm_v8i1_ty : LLVMType; // 8 x i1 +def llvm_v16i1_ty : LLVMType; // 16 x i1 def llvm_v2i8_ty : LLVMType; // 2 x i8 def llvm_v4i8_ty : LLVMType; // 4 x i8 def llvm_v8i8_ty : LLVMType; // 8 x i8 def llvm_v16i8_ty : LLVMType; // 16 x i8 def llvm_v32i8_ty : LLVMType; // 32 x i8 +def llvm_v1i16_ty : LLVMType; // 1 x i16 def llvm_v2i16_ty : LLVMType; // 2 x i16 def llvm_v4i16_ty : LLVMType; // 4 x i16 def llvm_v8i16_ty : LLVMType; // 8 x i16 def llvm_v16i16_ty : LLVMType; // 16 x i16 +def llvm_v1i32_ty : LLVMType; // 1 x i32 def llvm_v2i32_ty : LLVMType; // 2 x i32 def llvm_v4i32_ty : LLVMType; // 4 x i32 def llvm_v8i32_ty : LLVMType; // 8 x i32 @@ -279,9 +285,9 @@ let Properties = [IntrNoMem] in { // NOTE: these are internal interfaces. def int_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; -def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>; +def int_longjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>; def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>; -def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>; +def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>; // Internal interface for object size checking def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty], @@ -339,7 +345,7 @@ let Properties = [IntrNoMem] in { } def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>; def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; -def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>; +def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>; //===---------------- Generic Variable Attribute Intrinsics----------------===// // diff --git a/include/llvm/IntrinsicsARM.td b/include/llvm/IntrinsicsARM.td index fa8034e..93b1ae1 100644 --- a/include/llvm/IntrinsicsARM.td +++ b/include/llvm/IntrinsicsARM.td @@ -16,147 +16,136 @@ // TLS let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". - def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">, - Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; -} + +def int_arm_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">, + Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; //===----------------------------------------------------------------------===// // Saturating Arithmentic -let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". - def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, Commutative]>; - def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; - def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; - def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; -} +def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, Commutative]>; +def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; +def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; +def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; //===----------------------------------------------------------------------===// // Load and Store exclusive doubleword -let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". - def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, - llvm_ptr_ty], [IntrReadWriteArgMem]>; - def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty], - [IntrReadArgMem]>; -} +def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, + llvm_ptr_ty], [IntrReadWriteArgMem]>; +def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty], + [IntrReadArgMem]>; //===----------------------------------------------------------------------===// // VFP -let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". - def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">, - Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; - def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">, - Intrinsic<[], [llvm_i32_ty], []>; - def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty], - [IntrNoMem]>; - def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty], - [IntrNoMem]>; -} +def int_arm_get_fpscr : GCCBuiltin<"__builtin_arm_get_fpscr">, + Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; +def int_arm_set_fpscr : GCCBuiltin<"__builtin_arm_set_fpscr">, + Intrinsic<[], [llvm_i32_ty], []>; +def int_arm_vcvtr : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty], + [IntrNoMem]>; +def int_arm_vcvtru : Intrinsic<[llvm_float_ty], [llvm_anyfloat_ty], + [IntrNoMem]>; //===----------------------------------------------------------------------===// // Coprocessor -let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". - // Move to coprocessor - def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; - def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; - - // Move from coprocessor - def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty], []>; - def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty], []>; - - // Coprocessor data processing - def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; - def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; - - // Move from two registers to coprocessor - def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty], []>; - def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">, - Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty, llvm_i32_ty], []>; -} +// Move to coprocessor +def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; +def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +// Move from coprocessor +def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty], []>; +def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty], []>; + +// Coprocessor data processing +def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; +def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>; + +// Move from two registers to coprocessor +def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty], []>; +def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">, + Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, + llvm_i32_ty, llvm_i32_ty], []>; //===----------------------------------------------------------------------===// // Advanced SIMD (NEON) -let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.". - - // The following classes do not correspond directly to GCC builtins. - class Neon_1Arg_Intrinsic - : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>; - class Neon_1Arg_Narrow_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMExtendedElementVectorType<0>], [IntrNoMem]>; - class Neon_2Arg_Intrinsic - : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; - class Neon_2Arg_Narrow_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMExtendedElementVectorType<0>, - LLVMExtendedElementVectorType<0>], - [IntrNoMem]>; - class Neon_2Arg_Long_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMTruncatedElementVectorType<0>, - LLVMTruncatedElementVectorType<0>], - [IntrNoMem]>; - class Neon_3Arg_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; - class Neon_3Arg_Long_Intrinsic - : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, - LLVMTruncatedElementVectorType<0>, - LLVMTruncatedElementVectorType<0>], - [IntrNoMem]>; - class Neon_CvtFxToFP_Intrinsic - : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>; - class Neon_CvtFPToFx_Intrinsic - : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>; - - // The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors. - // Besides the table, VTBL has one other v8i8 argument and VTBX has two. - // Overall, the classes range from 2 to 6 v8i8 arguments. - class Neon_Tbl2Arg_Intrinsic - : Intrinsic<[llvm_v8i8_ty], - [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>; - class Neon_Tbl3Arg_Intrinsic - : Intrinsic<[llvm_v8i8_ty], - [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>; - class Neon_Tbl4Arg_Intrinsic - : Intrinsic<[llvm_v8i8_ty], - [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], - [IntrNoMem]>; - class Neon_Tbl5Arg_Intrinsic - : Intrinsic<[llvm_v8i8_ty], - [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, - llvm_v8i8_ty], [IntrNoMem]>; - class Neon_Tbl6Arg_Intrinsic - : Intrinsic<[llvm_v8i8_ty], - [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, - llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>; -} +// The following classes do not correspond directly to GCC builtins. +class Neon_1Arg_Intrinsic + : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>; +class Neon_1Arg_Narrow_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMExtendedElementVectorType<0>], [IntrNoMem]>; +class Neon_2Arg_Intrinsic + : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; +class Neon_2Arg_Narrow_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMExtendedElementVectorType<0>, + LLVMExtendedElementVectorType<0>], + [IntrNoMem]>; +class Neon_2Arg_Long_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMTruncatedElementVectorType<0>, + LLVMTruncatedElementVectorType<0>], + [IntrNoMem]>; +class Neon_3Arg_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; +class Neon_3Arg_Long_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMTruncatedElementVectorType<0>, + LLVMTruncatedElementVectorType<0>], + [IntrNoMem]>; +class Neon_CvtFxToFP_Intrinsic + : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty], [IntrNoMem]>; +class Neon_CvtFPToFx_Intrinsic + : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]>; + +// The table operands for VTBL and VTBX consist of 1 to 4 v8i8 vectors. +// Besides the table, VTBL has one other v8i8 argument and VTBX has two. +// Overall, the classes range from 2 to 6 v8i8 arguments. +class Neon_Tbl2Arg_Intrinsic + : Intrinsic<[llvm_v8i8_ty], + [llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>; +class Neon_Tbl3Arg_Intrinsic + : Intrinsic<[llvm_v8i8_ty], + [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>; +class Neon_Tbl4Arg_Intrinsic + : Intrinsic<[llvm_v8i8_ty], + [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty], + [IntrNoMem]>; +class Neon_Tbl5Arg_Intrinsic + : Intrinsic<[llvm_v8i8_ty], + [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, + llvm_v8i8_ty], [IntrNoMem]>; +class Neon_Tbl6Arg_Intrinsic + : Intrinsic<[llvm_v8i8_ty], + [llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, llvm_v8i8_ty, + llvm_v8i8_ty, llvm_v8i8_ty], [IntrNoMem]>; // Arithmetic ops @@ -209,20 +198,18 @@ def int_arm_neon_vsubhn : Neon_2Arg_Narrow_Intrinsic; def int_arm_neon_vrsubhn : Neon_2Arg_Narrow_Intrinsic; // Vector Absolute Compare. -let TargetPrefix = "arm" in { - def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty], - [llvm_v2f32_ty, llvm_v2f32_ty], - [IntrNoMem]>; - def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty], - [llvm_v4f32_ty, llvm_v4f32_ty], - [IntrNoMem]>; - def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty], - [llvm_v2f32_ty, llvm_v2f32_ty], - [IntrNoMem]>; - def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty], - [llvm_v4f32_ty, llvm_v4f32_ty], - [IntrNoMem]>; -} +def int_arm_neon_vacged : Intrinsic<[llvm_v2i32_ty], + [llvm_v2f32_ty, llvm_v2f32_ty], + [IntrNoMem]>; +def int_arm_neon_vacgeq : Intrinsic<[llvm_v4i32_ty], + [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; +def int_arm_neon_vacgtd : Intrinsic<[llvm_v2i32_ty], + [llvm_v2f32_ty, llvm_v2f32_ty], + [IntrNoMem]>; +def int_arm_neon_vacgtq : Intrinsic<[llvm_v4i32_ty], + [llvm_v4f32_ty, llvm_v4f32_ty], + [IntrNoMem]>; // Vector Absolute Differences. def int_arm_neon_vabds : Neon_2Arg_Intrinsic; @@ -235,24 +222,20 @@ def int_arm_neon_vpadd : Neon_2Arg_Intrinsic; // Note: This is different than the other "long" NEON intrinsics because // the result vector has half as many elements as the source vector. // The source and destination vector types must be specified separately. -let TargetPrefix = "arm" in { - def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], - [IntrNoMem]>; - def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], - [IntrNoMem]>; -} +def int_arm_neon_vpaddls : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], + [IntrNoMem]>; +def int_arm_neon_vpaddlu : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], + [IntrNoMem]>; // Vector Pairwise Add and Accumulate Long. // Note: This is similar to vpaddl but the destination vector also appears // as the first argument. -let TargetPrefix = "arm" in { - def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyvector_ty], - [IntrNoMem]>; - def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, llvm_anyvector_ty], - [IntrNoMem]>; -} +def int_arm_neon_vpadals : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty], + [IntrNoMem]>; +def int_arm_neon_vpadalu : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, llvm_anyvector_ty], + [IntrNoMem]>; // Vector Pairwise Maximum and Minimum. def int_arm_neon_vpmaxs : Neon_2Arg_Intrinsic; @@ -364,79 +347,83 @@ def int_arm_neon_vtbx2 : Neon_Tbl4Arg_Intrinsic; def int_arm_neon_vtbx3 : Neon_Tbl5Arg_Intrinsic; def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic; -let TargetPrefix = "arm" in { - - // De-interleaving vector loads from N-element structures. - // Source operands are the address and alignment. - def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty], - [llvm_ptr_ty, llvm_i32_ty], - [IntrReadArgMem]>; - def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], - [llvm_ptr_ty, llvm_i32_ty], - [IntrReadArgMem]>; - def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, - LLVMMatchType<0>], - [llvm_ptr_ty, llvm_i32_ty], - [IntrReadArgMem]>; - def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMMatchType<0>], - [llvm_ptr_ty, llvm_i32_ty], - [IntrReadArgMem]>; - - // Vector load N-element structure to one lane. - // Source operands are: the address, the N input vectors (since only one - // lane is assigned), the lane number, and the alignment. - def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], - [llvm_ptr_ty, LLVMMatchType<0>, - LLVMMatchType<0>, llvm_i32_ty, - llvm_i32_ty], [IntrReadArgMem]>; - def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, - LLVMMatchType<0>], - [llvm_ptr_ty, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMMatchType<0>, - llvm_i32_ty, llvm_i32_ty], - [IntrReadArgMem]>; - def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMMatchType<0>], - [llvm_ptr_ty, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMMatchType<0>, - LLVMMatchType<0>, llvm_i32_ty, - llvm_i32_ty], [IntrReadArgMem]>; - - // Interleaving vector stores from N-element structures. - // Source operands are: the address, the N vectors, and the alignment. - def int_arm_neon_vst1 : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - llvm_i32_ty], [IntrReadWriteArgMem]>; - def int_arm_neon_vst2 : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - LLVMMatchType<0>, llvm_i32_ty], - [IntrReadWriteArgMem]>; - def int_arm_neon_vst3 : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - LLVMMatchType<0>, LLVMMatchType<0>, - llvm_i32_ty], [IntrReadWriteArgMem]>; - def int_arm_neon_vst4 : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - LLVMMatchType<0>, LLVMMatchType<0>, - LLVMMatchType<0>, llvm_i32_ty], - [IntrReadWriteArgMem]>; - - // Vector store N-element structure from one lane. - // Source operands are: the address, the N vectors, the lane number, and - // the alignment. - def int_arm_neon_vst2lane : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - LLVMMatchType<0>, llvm_i32_ty, - llvm_i32_ty], [IntrReadWriteArgMem]>; - def int_arm_neon_vst3lane : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - LLVMMatchType<0>, LLVMMatchType<0>, - llvm_i32_ty, llvm_i32_ty], - [IntrReadWriteArgMem]>; - def int_arm_neon_vst4lane : Intrinsic<[], - [llvm_ptr_ty, llvm_anyvector_ty, - LLVMMatchType<0>, LLVMMatchType<0>, - LLVMMatchType<0>, llvm_i32_ty, - llvm_i32_ty], [IntrReadWriteArgMem]>; -} +// De-interleaving vector loads from N-element structures. +// Source operands are the address and alignment. +def int_arm_neon_vld1 : Intrinsic<[llvm_anyvector_ty], + [llvm_ptr_ty, llvm_i32_ty], + [IntrReadArgMem]>; +def int_arm_neon_vld2 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], + [llvm_ptr_ty, llvm_i32_ty], + [IntrReadArgMem]>; +def int_arm_neon_vld3 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, + LLVMMatchType<0>], + [llvm_ptr_ty, llvm_i32_ty], + [IntrReadArgMem]>; +def int_arm_neon_vld4 : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, + LLVMMatchType<0>, LLVMMatchType<0>], + [llvm_ptr_ty, llvm_i32_ty], + [IntrReadArgMem]>; + +// Vector load N-element structure to one lane. +// Source operands are: the address, the N input vectors (since only one +// lane is assigned), the lane number, and the alignment. +def int_arm_neon_vld2lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], + [llvm_ptr_ty, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_i32_ty, + llvm_i32_ty], [IntrReadArgMem]>; +def int_arm_neon_vld3lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, + LLVMMatchType<0>], + [llvm_ptr_ty, LLVMMatchType<0>, + LLVMMatchType<0>, LLVMMatchType<0>, + llvm_i32_ty, llvm_i32_ty], + [IntrReadArgMem]>; +def int_arm_neon_vld4lane : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, + LLVMMatchType<0>, LLVMMatchType<0>], + [llvm_ptr_ty, LLVMMatchType<0>, + LLVMMatchType<0>, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_i32_ty, + llvm_i32_ty], [IntrReadArgMem]>; + +// Interleaving vector stores from N-element structures. +// Source operands are: the address, the N vectors, and the alignment. +def int_arm_neon_vst1 : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + llvm_i32_ty], [IntrReadWriteArgMem]>; +def int_arm_neon_vst2 : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + LLVMMatchType<0>, llvm_i32_ty], + [IntrReadWriteArgMem]>; +def int_arm_neon_vst3 : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + LLVMMatchType<0>, LLVMMatchType<0>, + llvm_i32_ty], [IntrReadWriteArgMem]>; +def int_arm_neon_vst4 : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + LLVMMatchType<0>, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_i32_ty], + [IntrReadWriteArgMem]>; + +// Vector store N-element structure from one lane. +// Source operands are: the address, the N vectors, the lane number, and +// the alignment. +def int_arm_neon_vst2lane : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + LLVMMatchType<0>, llvm_i32_ty, + llvm_i32_ty], [IntrReadWriteArgMem]>; +def int_arm_neon_vst3lane : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + LLVMMatchType<0>, LLVMMatchType<0>, + llvm_i32_ty, llvm_i32_ty], + [IntrReadWriteArgMem]>; +def int_arm_neon_vst4lane : Intrinsic<[], + [llvm_ptr_ty, llvm_anyvector_ty, + LLVMMatchType<0>, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_i32_ty, + llvm_i32_ty], [IntrReadWriteArgMem]>; + +// Vector bitwise select. +def int_arm_neon_vbsl : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem]>; + +} // end TargetPrefix diff --git a/include/llvm/IntrinsicsMips.td b/include/llvm/IntrinsicsMips.td index 4375ac2..e40e162 100644 --- a/include/llvm/IntrinsicsMips.td +++ b/include/llvm/IntrinsicsMips.td @@ -14,11 +14,15 @@ //===----------------------------------------------------------------------===// // MIPS DSP data types def mips_v2q15_ty: LLVMType; +def mips_v4q7_ty: LLVMType; def mips_q31_ty: LLVMType; let TargetPrefix = "mips" in { // All intrinsics start with "llvm.mips.". //===----------------------------------------------------------------------===// +// MIPS DSP Rev 1 + +//===----------------------------------------------------------------------===// // Addition/subtraction def int_mips_addu_qb : GCCBuiltin<"__builtin_mips_addu_qb">, @@ -261,4 +265,125 @@ def int_mips_lhx: GCCBuiltin<"__builtin_mips_lhx">, Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>; def int_mips_lwx: GCCBuiltin<"__builtin_mips_lwx">, Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrReadArgMem]>; + +//===----------------------------------------------------------------------===// +// MIPS DSP Rev 2 + +def int_mips_absq_s_qb: GCCBuiltin<"__builtin_mips_absq_s_qb">, + Intrinsic<[mips_v4q7_ty], [mips_v4q7_ty], []>; + +def int_mips_addqh_ph: GCCBuiltin<"__builtin_mips_addqh_ph">, + Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], + [IntrNoMem, Commutative]>; +def int_mips_addqh_r_ph: GCCBuiltin<"__builtin_mips_addqh_r_ph">, + Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], + [IntrNoMem, Commutative]>; +def int_mips_addqh_w: GCCBuiltin<"__builtin_mips_addqh_w">, + Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], + [IntrNoMem, Commutative]>; +def int_mips_addqh_r_w: GCCBuiltin<"__builtin_mips_addqh_r_w">, + Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], + [IntrNoMem, Commutative]>; + +def int_mips_addu_ph: GCCBuiltin<"__builtin_mips_addu_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>; +def int_mips_addu_s_ph: GCCBuiltin<"__builtin_mips_addu_s_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>; + +def int_mips_adduh_qb: GCCBuiltin<"__builtin_mips_adduh_qb">, + Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], + [IntrNoMem, Commutative]>; +def int_mips_adduh_r_qb: GCCBuiltin<"__builtin_mips_adduh_r_qb">, + Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], + [IntrNoMem, Commutative]>; + +def int_mips_append: GCCBuiltin<"__builtin_mips_append">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; +def int_mips_balign: GCCBuiltin<"__builtin_mips_balign">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; + +def int_mips_cmpgdu_eq_qb: GCCBuiltin<"__builtin_mips_cmpgdu_eq_qb">, + Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>; +def int_mips_cmpgdu_lt_qb: GCCBuiltin<"__builtin_mips_cmpgdu_lt_qb">, + Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>; +def int_mips_cmpgdu_le_qb: GCCBuiltin<"__builtin_mips_cmpgdu_le_qb">, + Intrinsic<[llvm_i32_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [Commutative]>; + +def int_mips_dpa_w_ph: GCCBuiltin<"__builtin_mips_dpa_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty], + [IntrNoMem]>; +def int_mips_dps_w_ph: GCCBuiltin<"__builtin_mips_dps_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty], + [IntrNoMem]>; + +def int_mips_dpaqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_s_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>; +def int_mips_dpaqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpaqx_sa_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>; +def int_mips_dpax_w_ph: GCCBuiltin<"__builtin_mips_dpax_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty], + [IntrNoMem]>; +def int_mips_dpsx_w_ph: GCCBuiltin<"__builtin_mips_dpsx_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty], + [IntrNoMem]>; +def int_mips_dpsqx_s_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_s_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>; +def int_mips_dpsqx_sa_w_ph: GCCBuiltin<"__builtin_mips_dpsqx_sa_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, mips_v2q15_ty, mips_v2q15_ty], []>; + +def int_mips_mul_ph: GCCBuiltin<"__builtin_mips_mul_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>; +def int_mips_mul_s_ph: GCCBuiltin<"__builtin_mips_mul_s_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], [Commutative]>; + +def int_mips_mulq_rs_w: GCCBuiltin<"__builtin_mips_mulq_rs_w">, + Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>; +def int_mips_mulq_s_ph: GCCBuiltin<"__builtin_mips_mulq_s_ph">, + Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [Commutative]>; +def int_mips_mulq_s_w: GCCBuiltin<"__builtin_mips_mulq_s_w">, + Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [Commutative]>; +def int_mips_mulsa_w_ph: GCCBuiltin<"__builtin_mips_mulsa_w_ph">, + Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_v2i16_ty, llvm_v2i16_ty], + [IntrNoMem]>; + +def int_mips_precr_qb_ph: GCCBuiltin<"__builtin_mips_precr_qb_ph">, + Intrinsic<[llvm_v4i8_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>; +def int_mips_precr_sra_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_ph_w">, + Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; +def int_mips_precr_sra_r_ph_w: GCCBuiltin<"__builtin_mips_precr_sra_r_ph_w">, + Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; + +def int_mips_prepend: GCCBuiltin<"__builtin_mips_prepend">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; + +def int_mips_shra_qb: GCCBuiltin<"__builtin_mips_shra_qb">, + Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>; +def int_mips_shra_r_qb: GCCBuiltin<"__builtin_mips_shra_r_qb">, + Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_i32_ty], [IntrNoMem]>; +def int_mips_shrl_ph: GCCBuiltin<"__builtin_mips_shrl_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_i32_ty], [IntrNoMem]>; + +def int_mips_subqh_ph: GCCBuiltin<"__builtin_mips_subqh_ph">, + Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>; +def int_mips_subqh_r_ph: GCCBuiltin<"__builtin_mips_subqh_r_ph">, + Intrinsic<[mips_v2q15_ty], [mips_v2q15_ty, mips_v2q15_ty], [IntrNoMem]>; +def int_mips_subqh_w: GCCBuiltin<"__builtin_mips_subqh_w">, + Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>; +def int_mips_subqh_r_w: GCCBuiltin<"__builtin_mips_subqh_r_w">, + Intrinsic<[mips_q31_ty], [mips_q31_ty, mips_q31_ty], [IntrNoMem]>; + +def int_mips_subu_ph: GCCBuiltin<"__builtin_mips_subu_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>; +def int_mips_subu_s_ph: GCCBuiltin<"__builtin_mips_subu_s_ph">, + Intrinsic<[llvm_v2i16_ty], [llvm_v2i16_ty, llvm_v2i16_ty], []>; + +def int_mips_subuh_qb: GCCBuiltin<"__builtin_mips_subuh_qb">, + Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>; +def int_mips_subuh_r_qb: GCCBuiltin<"__builtin_mips_subuh_r_qb">, + Intrinsic<[llvm_v4i8_ty], [llvm_v4i8_ty, llvm_v4i8_ty], [IntrNoMem]>; } diff --git a/include/llvm/IntrinsicsX86.td b/include/llvm/IntrinsicsX86.td index e8039f2..d2463c0 100644 --- a/include/llvm/IntrinsicsX86.td +++ b/include/llvm/IntrinsicsX86.td @@ -219,7 +219,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse_storeu_ps : GCCBuiltin<"__builtin_ia32_storeups">, Intrinsic<[], [llvm_ptr_ty, - llvm_v4f32_ty], []>; + llvm_v4f32_ty], [IntrReadWriteArgMem]>; } // Cacheability support ops @@ -502,13 +502,13 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_sse2_storeu_pd : GCCBuiltin<"__builtin_ia32_storeupd">, Intrinsic<[], [llvm_ptr_ty, - llvm_v2f64_ty], []>; + llvm_v2f64_ty], [IntrReadWriteArgMem]>; def int_x86_sse2_storeu_dq : GCCBuiltin<"__builtin_ia32_storedqu">, Intrinsic<[], [llvm_ptr_ty, - llvm_v16i8_ty], []>; + llvm_v16i8_ty], [IntrReadWriteArgMem]>; def int_x86_sse2_storel_dq : GCCBuiltin<"__builtin_ia32_storelv4si">, Intrinsic<[], [llvm_ptr_ty, - llvm_v4i32_ty], []>; + llvm_v4i32_ty], [IntrReadWriteArgMem]>; } // Misc. @@ -1270,19 +1270,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_vbroadcast_ss : GCCBuiltin<"__builtin_ia32_vbroadcastss">, - Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>; def int_x86_avx_vbroadcast_sd_256 : GCCBuiltin<"__builtin_ia32_vbroadcastsd256">, - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>; def int_x86_avx_vbroadcast_ss_256 : GCCBuiltin<"__builtin_ia32_vbroadcastss256">, - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>; + Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>; def int_x86_avx_vbroadcastf128_pd_256 : GCCBuiltin<"__builtin_ia32_vbroadcastf128_pd256">, - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty], [IntrReadArgMem]>; def int_x86_avx_vbroadcastf128_ps_256 : GCCBuiltin<"__builtin_ia32_vbroadcastf128_ps256">, - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadMem]>; + Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty], [IntrReadArgMem]>; } // SIMD load ops @@ -1294,41 +1294,45 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // SIMD store ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_storeu_pd_256 : GCCBuiltin<"__builtin_ia32_storeupd256">, - Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>; def int_x86_avx_storeu_ps_256 : GCCBuiltin<"__builtin_ia32_storeups256">, - Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>; def int_x86_avx_storeu_dq_256 : GCCBuiltin<"__builtin_ia32_storedqu256">, - Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty], [IntrReadWriteArgMem]>; } // Conditional load ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_maskload_pd : GCCBuiltin<"__builtin_ia32_maskloadpd">, - Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty], [IntrReadMem]>; + Intrinsic<[llvm_v2f64_ty], [llvm_ptr_ty, llvm_v2f64_ty], + [IntrReadArgMem]>; def int_x86_avx_maskload_ps : GCCBuiltin<"__builtin_ia32_maskloadps">, - Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4f32_ty], [llvm_ptr_ty, llvm_v4f32_ty], + [IntrReadArgMem]>; def int_x86_avx_maskload_pd_256 : GCCBuiltin<"__builtin_ia32_maskloadpd256">, - Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4f64_ty], [llvm_ptr_ty, llvm_v4f64_ty], + [IntrReadArgMem]>; def int_x86_avx_maskload_ps_256 : GCCBuiltin<"__builtin_ia32_maskloadps256">, - Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty], [IntrReadMem]>; + Intrinsic<[llvm_v8f32_ty], [llvm_ptr_ty, llvm_v8f32_ty], + [IntrReadArgMem]>; } // Conditional store ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx_maskstore_pd : GCCBuiltin<"__builtin_ia32_maskstorepd">, Intrinsic<[], [llvm_ptr_ty, - llvm_v2f64_ty, llvm_v2f64_ty], []>; + llvm_v2f64_ty, llvm_v2f64_ty], [IntrReadWriteArgMem]>; def int_x86_avx_maskstore_ps : GCCBuiltin<"__builtin_ia32_maskstoreps">, Intrinsic<[], [llvm_ptr_ty, - llvm_v4f32_ty, llvm_v4f32_ty], []>; + llvm_v4f32_ty, llvm_v4f32_ty], [IntrReadWriteArgMem]>; def int_x86_avx_maskstore_pd_256 : GCCBuiltin<"__builtin_ia32_maskstorepd256">, Intrinsic<[], [llvm_ptr_ty, - llvm_v4f64_ty, llvm_v4f64_ty], []>; + llvm_v4f64_ty, llvm_v4f64_ty], [IntrReadWriteArgMem]>; def int_x86_avx_maskstore_ps_256 : GCCBuiltin<"__builtin_ia32_maskstoreps256">, Intrinsic<[], [llvm_ptr_ty, - llvm_v8f32_ty, llvm_v8f32_ty], []>; + llvm_v8f32_ty, llvm_v8f32_ty], [IntrReadWriteArgMem]>; } //===----------------------------------------------------------------------===// @@ -1632,7 +1636,7 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". Intrinsic<[llvm_v8f32_ty], [llvm_v4f32_ty], [IntrNoMem]>; def int_x86_avx2_vbroadcasti128 : GCCBuiltin<"__builtin_ia32_vbroadcastsi256">, - Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadArgMem]>; def int_x86_avx2_pbroadcastb_128 : GCCBuiltin<"__builtin_ia32_pbroadcastb128">, Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>; @@ -1685,27 +1689,35 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". // Conditional load ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx2_maskload_d : GCCBuiltin<"__builtin_ia32_maskloadd">, - Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4i32_ty], [llvm_ptr_ty, llvm_v4i32_ty], + [IntrReadArgMem]>; def int_x86_avx2_maskload_q : GCCBuiltin<"__builtin_ia32_maskloadq">, - Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty], [IntrReadMem]>; + Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty], + [IntrReadArgMem]>; def int_x86_avx2_maskload_d_256 : GCCBuiltin<"__builtin_ia32_maskloadd256">, - Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty], [IntrReadMem]>; + Intrinsic<[llvm_v8i32_ty], [llvm_ptr_ty, llvm_v8i32_ty], + [IntrReadArgMem]>; def int_x86_avx2_maskload_q_256 : GCCBuiltin<"__builtin_ia32_maskloadq256">, - Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty], [IntrReadMem]>; + Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty, llvm_v4i64_ty], + [IntrReadArgMem]>; } // Conditional store ops let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_avx2_maskstore_d : GCCBuiltin<"__builtin_ia32_maskstored">, - Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v4i32_ty, llvm_v4i32_ty], + [IntrReadWriteArgMem]>; def int_x86_avx2_maskstore_q : GCCBuiltin<"__builtin_ia32_maskstoreq">, - Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty, llvm_v2i64_ty], + [IntrReadWriteArgMem]>; def int_x86_avx2_maskstore_d_256 : GCCBuiltin<"__builtin_ia32_maskstored256">, - Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v8i32_ty, llvm_v8i32_ty], + [IntrReadWriteArgMem]>; def int_x86_avx2_maskstore_q_256 : GCCBuiltin<"__builtin_ia32_maskstoreq256">, - Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_v4i64_ty, llvm_v4i64_ty], + [IntrReadWriteArgMem]>; } // Variable bit shift ops @@ -2547,3 +2559,15 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". def int_x86_rdrand_32 : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [], []>; def int_x86_rdrand_64 : Intrinsic<[llvm_i64_ty, llvm_i32_ty], [], []>; } + +//===----------------------------------------------------------------------===// +// RTM intrinsics. Transactional Memory support. + +let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.". + def int_x86_xbegin : GCCBuiltin<"__builtin_ia32_xbegin">, + Intrinsic<[llvm_i32_ty], [], []>; + def int_x86_xend : GCCBuiltin<"__builtin_ia32_xend">, + Intrinsic<[], [], []>; + def int_x86_xabort : GCCBuiltin<"__builtin_ia32_xabort">, + Intrinsic<[], [llvm_i8_ty], [IntrNoReturn]>; +} diff --git a/include/llvm/LLVMContext.h b/include/llvm/LLVMContext.h index a8306a9..5903e2e 100644 --- a/include/llvm/LLVMContext.h +++ b/include/llvm/LLVMContext.h @@ -15,6 +15,8 @@ #ifndef LLVM_LLVMCONTEXT_H #define LLVM_LLVMCONTEXT_H +#include "llvm/Support/Compiler.h" + namespace llvm { class LLVMContextImpl; @@ -43,7 +45,8 @@ public: MD_tbaa = 1, // "tbaa" MD_prof = 2, // "prof" MD_fpmath = 3, // "fpmath" - MD_range = 4 // "range" + MD_range = 4, // "range" + MD_tbaa_struct = 5 // "tbaa.struct" }; /// getMDKindID - Return a unique non-zero ID for the specified metadata kind. @@ -87,9 +90,8 @@ public: void emitError(const Twine &ErrorStr); private: - // DO NOT IMPLEMENT - LLVMContext(LLVMContext&); - void operator=(LLVMContext&); + LLVMContext(LLVMContext&) LLVM_DELETED_FUNCTION; + void operator=(LLVMContext&) LLVM_DELETED_FUNCTION; /// addModule - Register a module as being instantiated in this context. If /// the context is deleted, the module will be deleted as well. diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index 697c94c..806e4b3 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -60,10 +60,12 @@ namespace { (void) llvm::createCFGSimplificationPass(); (void) llvm::createConstantMergePass(); (void) llvm::createConstantPropagationPass(); + (void) llvm::createCostModelAnalysisPass(); (void) llvm::createDeadArgEliminationPass(); (void) llvm::createDeadCodeEliminationPass(); (void) llvm::createDeadInstEliminationPass(); (void) llvm::createDeadStoreEliminationPass(); + (void) llvm::createDependenceAnalysisPass(); (void) llvm::createDomOnlyPrinterPass(); (void) llvm::createDomPrinterPass(); (void) llvm::createDomOnlyViewerPass(); @@ -81,11 +83,10 @@ namespace { (void) llvm::createIPSCCPPass(); (void) llvm::createIndVarSimplifyPass(); (void) llvm::createInstructionCombiningPass(); - (void) llvm::createInternalizePass(false); + (void) llvm::createInternalizePass(); (void) llvm::createLCSSAPass(); (void) llvm::createLICMPass(); (void) llvm::createLazyValueInfoPass(); - (void) llvm::createLoopDependenceAnalysisPass(); (void) llvm::createLoopExtractorPass(); (void) llvm::createLoopSimplifyPass(); (void) llvm::createLoopStrengthReducePass(); @@ -107,6 +108,7 @@ namespace { (void) llvm::createProfileVerifierPass(); (void) llvm::createPathProfileVerifierPass(); (void) llvm::createProfileLoaderPass(); + (void) llvm::createProfileMetadataLoaderPass(); (void) llvm::createPathProfileLoaderPass(); (void) llvm::createPromoteMemoryToRegisterPass(); (void) llvm::createDemoteRegisterToMemoryPass(); @@ -140,6 +142,7 @@ namespace { (void) llvm::createLoopDeletionPass(); (void) llvm::createPostDomTree(); (void) llvm::createInstructionNamerPass(); + (void) llvm::createMetaRenamerPass(); (void) llvm::createFunctionAttrsPass(); (void) llvm::createMergeFunctionsPass(); (void) llvm::createPrintModulePass(0); @@ -153,6 +156,7 @@ namespace { (void) llvm::createCorrelatedValuePropagationPass(); (void) llvm::createMemDepPrinter(); (void) llvm::createInstructionSimplifierPass(); + (void) llvm::createLoopVectorizePass(); (void) llvm::createBBVectorizePass(); (void)new llvm::IntervalPartition(); diff --git a/include/llvm/MC/MCAsmBackend.h b/include/llvm/MC/MCAsmBackend.h index 05e6286..72ed1a3 100644 --- a/include/llvm/MC/MCAsmBackend.h +++ b/include/llvm/MC/MCAsmBackend.h @@ -30,12 +30,13 @@ class raw_ostream; /// MCAsmBackend - Generic interface to target specific assembler backends. class MCAsmBackend { - MCAsmBackend(const MCAsmBackend &); // DO NOT IMPLEMENT - void operator=(const MCAsmBackend &); // DO NOT IMPLEMENT + MCAsmBackend(const MCAsmBackend &) LLVM_DELETED_FUNCTION; + void operator=(const MCAsmBackend &) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses. MCAsmBackend(); unsigned HasReliableSymbolDifference : 1; + unsigned HasDataInCodeSupport : 1; public: virtual ~MCAsmBackend(); @@ -65,6 +66,12 @@ public: return HasReliableSymbolDifference; } + /// hasDataInCodeSupport - Check whether this target implements data-in-code + /// markers. If not, data region directives will be ignored. + bool hasDataInCodeSupport() const { + return HasDataInCodeSupport; + } + /// doesSectionRequireSymbols - Check whether the given section requires that /// all symbols (even temporaries) have symbol table entries. virtual bool doesSectionRequireSymbols(const MCSection &Section) const { @@ -99,7 +106,7 @@ public: /// @} - /// applyFixup - Apply the \arg Value for given \arg Fixup into the provided + /// applyFixup - Apply the \p Value for given \p Fixup into the provided /// data fragment, at the offset specified by the fixup and following the /// fixup kind as appropriate. virtual void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, @@ -126,13 +133,20 @@ public: /// RelaxInstruction - Relax the instruction in the given fragment to the next /// wider instruction. /// - /// \param Inst - The instruction to relax, which may be the same as the + /// \param Inst The instruction to relax, which may be the same as the /// output. - /// \parm Res [output] - On return, the relaxed instruction. + /// \param [out] Res On return, the relaxed instruction. virtual void relaxInstruction(const MCInst &Inst, MCInst &Res) const = 0; /// @} + /// getMinimumNopSize - Returns the minimum size of a nop in bytes on this + /// target. The assembler will use this to emit excess padding in situations + /// where the padding required for simple alignment would be less than the + /// minimum nop size. + /// + virtual unsigned getMinimumNopSize() const { return 1; } + /// writeNopData - Write an (optimal) nop sequence of Count bytes to the given /// output. If the target cannot generate such a sequence, it should return an /// error. diff --git a/include/llvm/MC/MCAsmInfo.h b/include/llvm/MC/MCAsmInfo.h index 9f5230b..97aad71 100644 --- a/include/llvm/MC/MCAsmInfo.h +++ b/include/llvm/MC/MCAsmInfo.h @@ -33,7 +33,7 @@ namespace llvm { } namespace LCOMM { - enum LCOMMType { None, NoAlignment, ByteAlignment }; + enum LCOMMType { NoAlignment, ByteAlignment, Log2Alignment }; } /// MCAsmInfo - This class is intended to be used as a base class for asm @@ -247,14 +247,14 @@ namespace llvm { /// .long a - b bool HasAggressiveSymbolFolding; // Defaults to true. - /// LCOMMDirectiveType - Describes if the target supports the .lcomm - /// directive and whether it has an alignment parameter. - LCOMM::LCOMMType LCOMMDirectiveType; // Defaults to LCOMM::None. - - /// COMMDirectiveAlignmentIsInBytes - True is COMMDirective's optional + /// COMMDirectiveAlignmentIsInBytes - True is .comm's and .lcomms optional /// alignment is to be specified in bytes instead of log2(n). bool COMMDirectiveAlignmentIsInBytes; // Defaults to true; + /// LCOMMDirectiveAlignment - Describes if the .lcomm directive for the + /// target supports an alignment argument and how it is interpreted. + LCOMM::LCOMMType LCOMMDirectiveAlignmentType; // Defaults to NoAlignment. + /// HasDotTypeDotSizeDirective - True if the target has .type and .size /// directives, this is true for most ELF targets. bool HasDotTypeDotSizeDirective; // Defaults to true. @@ -496,13 +496,13 @@ namespace llvm { bool hasAggressiveSymbolFolding() const { return HasAggressiveSymbolFolding; } - LCOMM::LCOMMType getLCOMMDirectiveType() const { - return LCOMMDirectiveType; - } - bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;} bool getCOMMDirectiveAlignmentIsInBytes() const { return COMMDirectiveAlignmentIsInBytes; } + LCOMM::LCOMMType getLCOMMDirectiveAlignmentType() const { + return LCOMMDirectiveAlignmentType; + } + bool hasDotTypeDotSizeDirective() const {return HasDotTypeDotSizeDirective;} bool hasSingleParameterDotFile() const { return HasSingleParameterDotFile; } bool hasNoDeadStrip() const { return HasNoDeadStrip; } bool hasSymbolResolver() const { return HasSymbolResolver; } diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h index b7b2d66..5771415 100644 --- a/include/llvm/MC/MCAssembler.h +++ b/include/llvm/MC/MCAssembler.h @@ -40,8 +40,8 @@ class MCAsmBackend; class MCFragment : public ilist_node { friend class MCAsmLayout; - MCFragment(const MCFragment&); // DO NOT IMPLEMENT - void operator=(const MCFragment&); // DO NOT IMPLEMENT + MCFragment(const MCFragment&) LLVM_DELETED_FUNCTION; + void operator=(const MCFragment&) LLVM_DELETED_FUNCTION; public: enum FragmentType { @@ -99,8 +99,6 @@ public: unsigned getLayoutOrder() const { return LayoutOrder; } void setLayoutOrder(unsigned Value) { LayoutOrder = Value; } - static bool classof(const MCFragment *O) { return true; } - void dump(); }; @@ -151,7 +149,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_Data; } - static bool classof(const MCDataFragment *) { return true; } }; // FIXME: This current incarnation of MCInstFragment doesn't make much sense, as @@ -176,7 +173,7 @@ public: typedef SmallVectorImpl::iterator fixup_iterator; public: - MCInstFragment(MCInst _Inst, MCSectionData *SD = 0) + MCInstFragment(const MCInst &_Inst, MCSectionData *SD = 0) : MCFragment(FT_Inst, SD), Inst(_Inst) { } @@ -191,7 +188,7 @@ public: MCInst &getInst() { return Inst; } const MCInst &getInst() const { return Inst; } - void setInst(MCInst Value) { Inst = Value; } + void setInst(const MCInst& Value) { Inst = Value; } /// @} /// @name Fixup Access @@ -213,7 +210,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_Inst; } - static bool classof(const MCInstFragment *) { return true; } }; class MCAlignFragment : public MCFragment { @@ -225,7 +221,7 @@ class MCAlignFragment : public MCFragment { /// Value - Value to use for filling padding bytes. int64_t Value; - /// ValueSize - The size of the integer (in bytes) of \arg Value. + /// ValueSize - The size of the integer (in bytes) of \p Value. unsigned ValueSize; /// MaxBytesToEmit - The maximum number of bytes to emit; if the alignment @@ -263,7 +259,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_Align; } - static bool classof(const MCAlignFragment *) { return true; } }; class MCFillFragment : public MCFragment { @@ -272,7 +267,7 @@ class MCFillFragment : public MCFragment { /// Value - Value to use for filling bytes. int64_t Value; - /// ValueSize - The size (in bytes) of \arg Value to use when filling, or 0 if + /// ValueSize - The size (in bytes) of \p Value to use when filling, or 0 if /// this is a virtual fill fragment. unsigned ValueSize; @@ -302,7 +297,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_Fill; } - static bool classof(const MCFillFragment *) { return true; } }; class MCOrgFragment : public MCFragment { @@ -331,7 +325,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_Org; } - static bool classof(const MCOrgFragment *) { return true; } }; class MCLEBFragment : public MCFragment { @@ -364,7 +357,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_LEB; } - static bool classof(const MCLEBFragment *) { return true; } }; class MCDwarfLineAddrFragment : public MCFragment { @@ -401,7 +393,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_Dwarf; } - static bool classof(const MCDwarfLineAddrFragment *) { return true; } }; class MCDwarfCallFrameFragment : public MCFragment { @@ -431,7 +422,6 @@ public: static bool classof(const MCFragment *F) { return F->getKind() == MCFragment::FT_DwarfFrame; } - static bool classof(const MCDwarfCallFrameFragment *) { return true; } }; // FIXME: Should this be a separate class, or just merged into MCSection? Since @@ -440,8 +430,8 @@ public: class MCSectionData : public ilist_node { friend class MCAsmLayout; - MCSectionData(const MCSectionData&); // DO NOT IMPLEMENT - void operator=(const MCSectionData&); // DO NOT IMPLEMENT + MCSectionData(const MCSectionData&) LLVM_DELETED_FUNCTION; + void operator=(const MCSectionData&) LLVM_DELETED_FUNCTION; public: typedef iplist FragmentListType; @@ -683,8 +673,8 @@ public: typedef std::vector::iterator data_region_iterator; private: - MCAssembler(const MCAssembler&); // DO NOT IMPLEMENT - void operator=(const MCAssembler&); // DO NOT IMPLEMENT + MCAssembler(const MCAssembler&) LLVM_DELETED_FUNCTION; + void operator=(const MCAssembler&) LLVM_DELETED_FUNCTION; MCContext &Context; @@ -738,7 +728,7 @@ private: /// \param Value [out] On return, the value of the fixup as currently laid /// out. /// \return Whether the fixup value was fully resolved. This is true if the - /// \arg Value result is fixed, otherwise the value may change due to + /// \p Value result is fixed, otherwise the value may change due to /// relocation. bool evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, const MCFragment *DF, @@ -775,7 +765,7 @@ private: public: /// Compute the effective fragment size assuming it is laid out at the given - /// \arg SectionAddress and \arg FragmentOffset. + /// \p SectionAddress and \p FragmentOffset. uint64_t computeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const; @@ -804,7 +794,7 @@ public: public: /// Construct a new assembler instance. /// - /// \arg OS - The stream to output to. + /// \param OS The stream to output to. // // FIXME: How are we going to parameterize this? Two obvious options are stay // concrete and require clients to pass in a target like object. The other @@ -824,7 +814,7 @@ public: MCObjectWriter &getWriter() const { return Writer; } /// Finish - Do final processing and write the object to the output stream. - /// \arg Writer is used for custom object writer (as the MCJIT does), + /// \p Writer is used for custom object writer (as the MCJIT does), /// if not specified it is automatically created from backend. void Finish(); diff --git a/include/llvm/MC/MCCodeEmitter.h b/include/llvm/MC/MCCodeEmitter.h index 934ef69..0574890 100644 --- a/include/llvm/MC/MCCodeEmitter.h +++ b/include/llvm/MC/MCCodeEmitter.h @@ -10,6 +10,8 @@ #ifndef LLVM_MC_MCCODEEMITTER_H #define LLVM_MC_MCCODEEMITTER_H +#include "llvm/Support/Compiler.h" + namespace llvm { class MCFixup; class MCInst; @@ -19,16 +21,16 @@ template class SmallVectorImpl; /// MCCodeEmitter - Generic instruction encoding interface. class MCCodeEmitter { private: - MCCodeEmitter(const MCCodeEmitter &); // DO NOT IMPLEMENT - void operator=(const MCCodeEmitter &); // DO NOT IMPLEMENT + MCCodeEmitter(const MCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const MCCodeEmitter &) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses. MCCodeEmitter(); public: virtual ~MCCodeEmitter(); - /// EncodeInstruction - Encode the given \arg Inst to bytes on the output - /// stream \arg OS. + /// EncodeInstruction - Encode the given \p Inst to bytes on the output + /// stream \p OS. virtual void EncodeInstruction(const MCInst &Inst, raw_ostream &OS, SmallVectorImpl &Fixups) const = 0; }; diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h index 59545d3..5a8830c 100644 --- a/include/llvm/MC/MCContext.h +++ b/include/llvm/MC/MCContext.h @@ -40,8 +40,8 @@ namespace llvm { /// of the sections that it creates. /// class MCContext { - MCContext(const MCContext&); // DO NOT IMPLEMENT - MCContext &operator=(const MCContext&); // DO NOT IMPLEMENT + MCContext(const MCContext&) LLVM_DELETED_FUNCTION; + MCContext &operator=(const MCContext&) LLVM_DELETED_FUNCTION; public: typedef StringMap SymbolTable; private: @@ -183,6 +183,7 @@ namespace llvm { /// LookupSymbol - Get the symbol for \p Name, or null. MCSymbol *LookupSymbol(StringRef Name) const; + MCSymbol *LookupSymbol(const Twine &Name) const; /// getSymbols - Get a reference for the symbol table for clients that /// want to, for example, iterate over all symbols. 'const' because we diff --git a/include/llvm/MC/MCDwarf.h b/include/llvm/MC/MCDwarf.h index fdb7ab2..8fc437f 100644 --- a/include/llvm/MC/MCDwarf.h +++ b/include/llvm/MC/MCDwarf.h @@ -19,6 +19,7 @@ #include "llvm/MC/MachineLocation.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/Dwarf.h" +#include "llvm/Support/Compiler.h" #include namespace llvm { @@ -48,8 +49,8 @@ namespace llvm { MCDwarfFile(StringRef name, unsigned dirIndex) : Name(name), DirIndex(dirIndex) {} - MCDwarfFile(const MCDwarfFile&); // DO NOT IMPLEMENT - void operator=(const MCDwarfFile&); // DO NOT IMPLEMENT + MCDwarfFile(const MCDwarfFile&) LLVM_DELETED_FUNCTION; + void operator=(const MCDwarfFile&) LLVM_DELETED_FUNCTION; public: /// getName - Get the base name of this MCDwarfFile. StringRef getName() const { return Name; } @@ -58,7 +59,7 @@ namespace llvm { unsigned getDirIndex() const { return DirIndex; } - /// print - Print the value to the stream \arg OS. + /// print - Print the value to the stream \p OS. void print(raw_ostream &OS) const; /// dump - Print the value to stderr. @@ -177,8 +178,8 @@ namespace llvm { class MCLineSection { private: - MCLineSection(const MCLineSection&); // DO NOT IMPLEMENT - void operator=(const MCLineSection&); // DO NOT IMPLEMENT + MCLineSection(const MCLineSection&) LLVM_DELETED_FUNCTION; + void operator=(const MCLineSection&) LLVM_DELETED_FUNCTION; public: // Constructor to create an MCLineSection with an empty MCLineEntries diff --git a/include/llvm/MC/MCELFObjectWriter.h b/include/llvm/MC/MCELFObjectWriter.h index abbe188..38cdc72 100644 --- a/include/llvm/MC/MCELFObjectWriter.h +++ b/include/llvm/MC/MCELFObjectWriter.h @@ -85,6 +85,9 @@ public: const MCFragment &F, const MCFixup &Fixup, bool IsPCRel) const; + virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const; virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset); @@ -93,9 +96,9 @@ public: /// @name Accessors /// @{ - uint8_t getOSABI() { return OSABI; } - uint16_t getEMachine() { return EMachine; } - bool hasRelocationAddend() { return HasRelocationAddend; } + uint8_t getOSABI() const { return OSABI; } + uint16_t getEMachine() const { return EMachine; } + bool hasRelocationAddend() const { return HasRelocationAddend; } bool is64Bit() const { return Is64Bit; } bool isN64() const { return IsN64; } /// @} diff --git a/include/llvm/MC/MCExpr.h b/include/llvm/MC/MCExpr.h index aa62eb2..00eef27 100644 --- a/include/llvm/MC/MCExpr.h +++ b/include/llvm/MC/MCExpr.h @@ -41,8 +41,8 @@ public: private: ExprKind Kind; - MCExpr(const MCExpr&); // DO NOT IMPLEMENT - void operator=(const MCExpr&); // DO NOT IMPLEMENT + MCExpr(const MCExpr&) LLVM_DELETED_FUNCTION; + void operator=(const MCExpr&) LLVM_DELETED_FUNCTION; bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm, const MCAsmLayout *Layout, @@ -78,11 +78,11 @@ public: /// values. If not given, then only non-symbolic expressions will be /// evaluated. /// @result - True on success. + bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout, + const SectionAddrMap &Addrs) const; bool EvaluateAsAbsolute(int64_t &Res) const; bool EvaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const; bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout) const; - bool EvaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout, - const SectionAddrMap &Addrs) const; /// EvaluateAsRelocatable - Try to evaluate the expression to a relocatable /// value, i.e. an expression of the fixed form (a - b + constant). @@ -99,8 +99,6 @@ public: const MCSection *FindAssociatedSection() const; /// @} - - static bool classof(const MCExpr *) { return true; } }; inline raw_ostream &operator<<(raw_ostream &OS, const MCExpr &E) { @@ -132,7 +130,6 @@ public: static bool classof(const MCExpr *E) { return E->getKind() == MCExpr::Constant; } - static bool classof(const MCConstantExpr *) { return true; } }; /// MCSymbolRefExpr - Represent a reference to a symbol from inside an @@ -170,8 +167,10 @@ public: VK_ARM_TPOFF, VK_ARM_GOTTPOFF, VK_ARM_TARGET1, + VK_ARM_TARGET2, - VK_PPC_TOC, + VK_PPC_TOC, // TOC base + VK_PPC_TOC_ENTRY, // TOC entry VK_PPC_DARWIN_HA16, // ha16(symbol) VK_PPC_DARWIN_LO16, // lo16(symbol) VK_PPC_GAS_HA16, // symbol@ha @@ -247,7 +246,6 @@ public: static bool classof(const MCExpr *E) { return E->getKind() == MCExpr::SymbolRef; } - static bool classof(const MCSymbolRefExpr *) { return true; } }; /// MCUnaryExpr - Unary assembler expressions. @@ -301,7 +299,6 @@ public: static bool classof(const MCExpr *E) { return E->getKind() == MCExpr::Unary; } - static bool classof(const MCUnaryExpr *) { return true; } }; /// MCBinaryExpr - Binary assembler expressions. @@ -436,7 +433,6 @@ public: static bool classof(const MCExpr *E) { return E->getKind() == MCExpr::Binary; } - static bool classof(const MCBinaryExpr *) { return true; } }; /// MCTargetExpr - This is an extension point for target-specific MCExpr @@ -445,7 +441,7 @@ public: /// NOTE: All subclasses are required to have trivial destructors because /// MCExprs are bump pointer allocated and not destructed. class MCTargetExpr : public MCExpr { - virtual void Anchor(); + virtual void anchor(); protected: MCTargetExpr() : MCExpr(Target) {} virtual ~MCTargetExpr() {} @@ -460,7 +456,6 @@ public: static bool classof(const MCExpr *E) { return E->getKind() == MCExpr::Target; } - static bool classof(const MCTargetExpr *) { return true; } }; } // end namespace llvm diff --git a/include/llvm/MC/MCInst.h b/include/llvm/MC/MCInst.h index 397a37d..e91c6a2 100644 --- a/include/llvm/MC/MCInst.h +++ b/include/llvm/MC/MCInst.h @@ -182,7 +182,7 @@ public: void dump() const; /// \brief Dump the MCInst as prettily as possible using the additional MC - /// structures, if given. Operators are separated by the \arg Separator + /// structures, if given. Operators are separated by the \p Separator /// string. void dump_pretty(raw_ostream &OS, const MCAsmInfo *MAI = 0, const MCInstPrinter *Printer = 0, diff --git a/include/llvm/MC/MCInstPrinter.h b/include/llvm/MC/MCInstPrinter.h index 3c4f28b..3b9420a 100644 --- a/include/llvm/MC/MCInstPrinter.h +++ b/include/llvm/MC/MCInstPrinter.h @@ -33,12 +33,16 @@ protected: /// The current set of available features. unsigned AvailableFeatures; + /// True if we are printing marked up assembly. + bool UseMarkup; + /// Utility function for printing annotations. void printAnnotation(raw_ostream &OS, StringRef Annot); public: MCInstPrinter(const MCAsmInfo &mai, const MCInstrInfo &mii, const MCRegisterInfo &mri) - : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0) {} + : CommentStream(0), MAI(mai), MII(mii), MRI(mri), AvailableFeatures(0), + UseMarkup(0) {} virtual ~MCInstPrinter(); @@ -59,6 +63,13 @@ public: unsigned getAvailableFeatures() const { return AvailableFeatures; } void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; } + + bool getUseMarkup() const { return UseMarkup; } + void setUseMarkup(bool Value) { UseMarkup = Value; } + + /// Utility functions to make adding mark ups simpler. + StringRef markup(StringRef s) const; + StringRef markup(StringRef a, StringRef b) const; }; } // namespace llvm diff --git a/include/llvm/MC/MCInstrDesc.h b/include/llvm/MC/MCInstrDesc.h index dbf16d8..02383f8 100644 --- a/include/llvm/MC/MCInstrDesc.h +++ b/include/llvm/MC/MCInstrDesc.h @@ -1,4 +1,4 @@ -//===-- llvm/Mc/McInstrDesc.h - Instruction Descriptors -*- C++ -*-===// +//===-- llvm/MC/MCInstrDesc.h - Instruction Descriptors -*- C++ -*-===// // // The LLVM Compiler Infrastructure // diff --git a/include/llvm/MC/MCLabel.h b/include/llvm/MC/MCLabel.h index 727520d..f531de8 100644 --- a/include/llvm/MC/MCLabel.h +++ b/include/llvm/MC/MCLabel.h @@ -14,6 +14,8 @@ #ifndef LLVM_MC_MCLABEL_H #define LLVM_MC_MCLABEL_H +#include "llvm/Support/Compiler.h" + namespace llvm { class MCContext; class raw_ostream; @@ -30,8 +32,8 @@ namespace llvm { MCLabel(unsigned instance) : Instance(instance) {} - MCLabel(const MCLabel&); // DO NOT IMPLEMENT - void operator=(const MCLabel&); // DO NOT IMPLEMENT + MCLabel(const MCLabel&) LLVM_DELETED_FUNCTION; + void operator=(const MCLabel&) LLVM_DELETED_FUNCTION; public: /// getInstance - Get the current instance of this Directional Local Label. unsigned getInstance() const { return Instance; } @@ -40,7 +42,7 @@ namespace llvm { /// Label. unsigned incInstance() { return ++Instance; } - /// print - Print the value to the stream \arg OS. + /// print - Print the value to the stream \p OS. void print(raw_ostream &OS) const; /// dump - Print the value to stderr. diff --git a/include/llvm/MC/MCMachObjectWriter.h b/include/llvm/MC/MCMachObjectWriter.h index 949d907..efaabfb 100644 --- a/include/llvm/MC/MCMachObjectWriter.h +++ b/include/llvm/MC/MCMachObjectWriter.h @@ -153,8 +153,8 @@ public: /// WriteSegmentLoadCommand - Write a segment load command. /// - /// \arg NumSections - The number of sections in this segment. - /// \arg SectionDataSize - The total size of the sections. + /// \param NumSections The number of sections in this segment. + /// \param SectionDataSize The total size of the sections. void WriteSegmentLoadCommand(unsigned NumSections, uint64_t VMSize, uint64_t SectionDataStartOffset, @@ -233,6 +233,8 @@ public: void computeSectionAddresses(const MCAssembler &Asm, const MCAsmLayout &Layout); + void markAbsoluteVariableSymbols(MCAssembler &Asm, + const MCAsmLayout &Layout); void ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout); virtual bool IsSymbolRefDifferenceFullyResolvedImpl(const MCAssembler &Asm, diff --git a/include/llvm/MC/MCObjectFileInfo.h b/include/llvm/MC/MCObjectFileInfo.h index 74e2263..23e5513 100644 --- a/include/llvm/MC/MCObjectFileInfo.h +++ b/include/llvm/MC/MCObjectFileInfo.h @@ -84,7 +84,8 @@ protected: /// this is the section to emit them into. const MCSection *CompactUnwindSection; - /// DwarfAccelNamesSection, DwarfAccelObjCSection + /// DwarfAccelNamesSection, DwarfAccelObjCSection, + /// DwarfAccelNamespaceSection, DwarfAccelTypesSection - /// If we use the DWARF accelerated hash tables then we want toe emit these /// sections. const MCSection *DwarfAccelNamesSection; diff --git a/include/llvm/MC/MCObjectStreamer.h b/include/llvm/MC/MCObjectStreamer.h index a69075d..08b00f1 100644 --- a/include/llvm/MC/MCObjectStreamer.h +++ b/include/llvm/MC/MCObjectStreamer.h @@ -72,6 +72,13 @@ public: virtual void ChangeSection(const MCSection *Section); virtual void EmitInstruction(const MCInst &Inst); virtual void EmitInstToFragment(const MCInst &Inst); + virtual void EmitBytes(StringRef Data, unsigned AddrSpace); + virtual void EmitValueToAlignment(unsigned ByteAlignment, + int64_t Value = 0, + unsigned ValueSize = 1, + unsigned MaxBytesToEmit = 0); + virtual void EmitCodeAlignment(unsigned ByteAlignment, + unsigned MaxBytesToEmit = 0); virtual bool EmitValueToOffset(const MCExpr *Offset, unsigned char Value); virtual void EmitDwarfAdvanceLineAddr(int64_t LineDelta, const MCSymbol *LastLabel, @@ -80,6 +87,9 @@ public: virtual void EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel, const MCSymbol *Label); virtual void EmitGPRel32Value(const MCExpr *Value); + virtual void EmitGPRel64Value(const MCExpr *Value); + virtual void EmitFill(uint64_t NumBytes, uint8_t FillValue, + unsigned AddrSpace); virtual void FinishImpl(); /// @} diff --git a/include/llvm/MC/MCObjectWriter.h b/include/llvm/MC/MCObjectWriter.h index 9591a00..14fe75f 100644 --- a/include/llvm/MC/MCObjectWriter.h +++ b/include/llvm/MC/MCObjectWriter.h @@ -11,6 +11,7 @@ #define LLVM_MC_MCOBJECTWRITER_H #include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" #include @@ -35,8 +36,8 @@ class MCValue; /// The object writer also contains a number of helper methods for writing /// binary data to the output stream. class MCObjectWriter { - MCObjectWriter(const MCObjectWriter &); // DO NOT IMPLEMENT - void operator=(const MCObjectWriter &); // DO NOT IMPLEMENT + MCObjectWriter(const MCObjectWriter &) LLVM_DELETED_FUNCTION; + void operator=(const MCObjectWriter &) LLVM_DELETED_FUNCTION; protected: raw_ostream &OS; diff --git a/include/llvm/MC/MCParser/AsmLexer.h b/include/llvm/MC/MCParser/AsmLexer.h index 9a8735f..e102dfb 100644 --- a/include/llvm/MC/MCParser/AsmLexer.h +++ b/include/llvm/MC/MCParser/AsmLexer.h @@ -31,8 +31,8 @@ class AsmLexer : public MCAsmLexer { const MemoryBuffer *CurBuf; bool isAtStartOfLine; - void operator=(const AsmLexer&); // DO NOT IMPLEMENT - AsmLexer(const AsmLexer&); // DO NOT IMPLEMENT + void operator=(const AsmLexer&) LLVM_DELETED_FUNCTION; + AsmLexer(const AsmLexer&) LLVM_DELETED_FUNCTION; protected: /// LexToken - Read the next token and return its code. diff --git a/include/llvm/MC/MCParser/MCAsmLexer.h b/include/llvm/MC/MCParser/MCAsmLexer.h index 5e29ad4..0a961d6 100644 --- a/include/llvm/MC/MCParser/MCAsmLexer.h +++ b/include/llvm/MC/MCParser/MCAsmLexer.h @@ -11,6 +11,7 @@ #define LLVM_MC_MCASMLEXER_H #include "llvm/ADT/StringRef.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/SMLoc.h" @@ -39,6 +40,7 @@ public: // No-value. EndOfStatement, Colon, + Space, Plus, Minus, Tilde, Slash, // '/' BackSlash, // '\' @@ -121,10 +123,11 @@ class MCAsmLexer { SMLoc ErrLoc; std::string Err; - MCAsmLexer(const MCAsmLexer &); // DO NOT IMPLEMENT - void operator=(const MCAsmLexer &); // DO NOT IMPLEMENT + MCAsmLexer(const MCAsmLexer &) LLVM_DELETED_FUNCTION; + void operator=(const MCAsmLexer &) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses. const char *TokStart; + bool SkipSpace; MCAsmLexer(); @@ -169,11 +172,14 @@ public: /// getKind - Get the kind of current token. AsmToken::TokenKind getKind() const { return CurTok.getKind(); } - /// is - Check if the current token has kind \arg K. + /// is - Check if the current token has kind \p K. bool is(AsmToken::TokenKind K) const { return CurTok.is(K); } - /// isNot - Check if the current token has kind \arg K. + /// isNot - Check if the current token has kind \p K. bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); } + + /// setSkipSpace - Set whether spaces should be ignored by the lexer + void setSkipSpace(bool val) { SkipSpace = val; } }; } // End llvm namespace diff --git a/include/llvm/MC/MCParser/MCAsmParser.h b/include/llvm/MC/MCParser/MCAsmParser.h index 793c709..a71d3c3 100644 --- a/include/llvm/MC/MCParser/MCAsmParser.h +++ b/include/llvm/MC/MCParser/MCAsmParser.h @@ -20,6 +20,9 @@ class MCAsmLexer; class MCAsmParserExtension; class MCContext; class MCExpr; +class MCInstPrinter; +class MCInstrInfo; +class MCParsedAsmOperand; class MCStreamer; class MCTargetAsmParser; class SMLoc; @@ -28,6 +31,16 @@ class SourceMgr; class StringRef; class Twine; +/// MCAsmParserSemaCallback - Generic Sema callback for assembly parser. +class MCAsmParserSemaCallback { +public: + virtual ~MCAsmParserSemaCallback(); + virtual void *LookupInlineAsmIdentifier(StringRef Name, void *Loc, + unsigned &Size) = 0; + virtual bool LookupInlineAsmField(StringRef Base, StringRef Member, + unsigned &Offset) = 0; +}; + /// MCAsmParser - Generic assembler parser interface, for use by target specific /// assembly parsers. class MCAsmParser { @@ -35,8 +48,8 @@ public: typedef bool (*DirectiveHandler)(MCAsmParserExtension*, StringRef, SMLoc); private: - MCAsmParser(const MCAsmParser &); // DO NOT IMPLEMENT - void operator=(const MCAsmParser &); // DO NOT IMPLEMENT + MCAsmParser(const MCAsmParser &) LLVM_DELETED_FUNCTION; + void operator=(const MCAsmParser &) LLVM_DELETED_FUNCTION; MCTargetAsmParser *TargetParser; @@ -73,15 +86,26 @@ public: /// Run - Run the parser on the input source buffer. virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false) = 0; - /// Warning - Emit a warning at the location \arg L, with the message \arg - /// Msg. + virtual void setParsingInlineAsm(bool V) = 0; + virtual bool isParsingInlineAsm() = 0; + + /// ParseMSInlineAsm - Parse ms-style inline assembly. + virtual bool ParseMSInlineAsm(void *AsmLoc, std::string &AsmString, + unsigned &NumOutputs, unsigned &NumInputs, + SmallVectorImpl > &OpDecls, + SmallVectorImpl &Constraints, + SmallVectorImpl &Clobbers, + const MCInstrInfo *MII, + const MCInstPrinter *IP, + MCAsmParserSemaCallback &SI) = 0; + + /// Warning - Emit a warning at the location \p L, with the message \p Msg. /// /// \return The return value is true, if warnings are fatal. virtual bool Warning(SMLoc L, const Twine &Msg, ArrayRef Ranges = ArrayRef()) = 0; - /// Error - Emit an error at the location \arg L, with the message \arg - /// Msg. + /// Error - Emit an error at the location \p L, with the message \p Msg. /// /// \return The return value is always true, as an idiomatic convenience to /// clients. @@ -100,7 +124,7 @@ public: ArrayRef Ranges = ArrayRef()); /// ParseIdentifier - Parse an identifier or string (as a quoted identifier) - /// and set \arg Res to the identifier contents. + /// and set \p Res to the identifier contents. virtual bool ParseIdentifier(StringRef &Res) = 0; /// \brief Parse up to the end of statement and return the contents from the diff --git a/include/llvm/MC/MCParser/MCAsmParserExtension.h b/include/llvm/MC/MCParser/MCAsmParserExtension.h index 4e2aee9..0918c93 100644 --- a/include/llvm/MC/MCParser/MCAsmParserExtension.h +++ b/include/llvm/MC/MCParser/MCAsmParserExtension.h @@ -21,8 +21,8 @@ class Twine; /// which is implemented by target and object file assembly parser /// implementations. class MCAsmParserExtension { - MCAsmParserExtension(const MCAsmParserExtension &); // DO NOT IMPLEMENT - void operator=(const MCAsmParserExtension &); // DO NOT IMPLEMENT + MCAsmParserExtension(const MCAsmParserExtension &) LLVM_DELETED_FUNCTION; + void operator=(const MCAsmParserExtension &) LLVM_DELETED_FUNCTION; MCAsmParser *Parser; @@ -43,8 +43,8 @@ protected: public: virtual ~MCAsmParserExtension(); - /// \brief Initialize the extension for parsing using the given \arg - /// Parser. The extension should use the AsmParser interfaces to register its + /// \brief Initialize the extension for parsing using the given \p Parser. + /// The extension should use the AsmParser interfaces to register its /// parsing routines. virtual void Initialize(MCAsmParser &Parser); diff --git a/include/llvm/MC/MCParser/MCParsedAsmOperand.h b/include/llvm/MC/MCParser/MCParsedAsmOperand.h index 2556e5f..60e7887 100644 --- a/include/llvm/MC/MCParser/MCParsedAsmOperand.h +++ b/include/llvm/MC/MCParser/MCParsedAsmOperand.h @@ -19,15 +19,69 @@ class raw_ostream; /// base class is used by target-independent clients and is the interface /// between parsing an asm instruction and recognizing it. class MCParsedAsmOperand { + /// MCOperandNum - The corresponding MCInst operand number. Only valid when + /// parsing MS-style inline assembly. + unsigned MCOperandNum; + + /// Constraint - The constraint on this operand. Only valid when parsing + /// MS-style inline assembly. + std::string Constraint; + public: MCParsedAsmOperand() {} virtual ~MCParsedAsmOperand() {} + void setConstraint(StringRef C) { Constraint = C.str(); } + StringRef getConstraint() { return Constraint; } + + void setMCOperandNum (unsigned OpNum) { MCOperandNum = OpNum; } + unsigned getMCOperandNum() { return MCOperandNum; } + + unsigned getNameLen() { + assert (getStartLoc().isValid() && "Invalid StartLoc!"); + assert (getEndLoc().isValid() && "Invalid EndLoc!"); + return getEndLoc().getPointer() - getStartLoc().getPointer(); + } + + StringRef getName() { + return StringRef(getStartLoc().getPointer(), getNameLen()); + } + + /// isToken - Is this a token operand? + virtual bool isToken() const = 0; + /// isImm - Is this an immediate operand? + virtual bool isImm() const = 0; + /// isReg - Is this a register operand? + virtual bool isReg() const = 0; + virtual unsigned getReg() const = 0; + + /// isMem - Is this a memory operand? + virtual bool isMem() const = 0; + virtual unsigned getMemSize() const { return 0; } + /// getStartLoc - Get the location of the first token of this operand. virtual SMLoc getStartLoc() const = 0; /// getEndLoc - Get the location of the last token of this operand. virtual SMLoc getEndLoc() const = 0; + /// needAsmRewrite - AsmRewrites happen in both the target-independent and + /// target-dependent parsers. The target-independent parser calls this + /// function to determine if the target-dependent parser has already taken + /// care of the rewrites. Only valid when parsing MS-style inline assembly. + virtual bool needAsmRewrite() const { return true; } + + /// isOffsetOf - Do we need to emit code to get the offset of the variable, + /// rather then the value of the variable? Only valid when parsing MS-style + /// inline assembly. + virtual bool isOffsetOf() const { return false; } + + /// getOffsetOfLoc - Get the location of the offset operator. + virtual SMLoc getOffsetOfLoc() const { return SMLoc(); } + + /// needSizeDirective - Do we need to emit a sizing directive for this + /// operand? Only valid when parsing MS-style inline assembly. + virtual bool needSizeDirective() const { return false; } + /// print - Print a debug representation of the operand to the given stream. virtual void print(raw_ostream &OS) const = 0; /// dump - Print to the debug stream. diff --git a/include/llvm/MC/MCRegisterInfo.h b/include/llvm/MC/MCRegisterInfo.h index 46a9d71..f05baea 100644 --- a/include/llvm/MC/MCRegisterInfo.h +++ b/include/llvm/MC/MCRegisterInfo.h @@ -333,6 +333,13 @@ public: return NumRegs; } + /// getNumSubRegIndices - Return the number of sub-register indices + /// understood by the target. Index 0 is reserved for the no-op sub-register, + /// while 1 to getNumSubRegIndices() - 1 represent real sub-registers. + unsigned getNumSubRegIndices() const { + return NumSubRegIndices; + } + /// getNumRegUnits - Return the number of (native) register units in the /// target. Register units are numbered from 0 to getNumRegUnits() - 1. They /// can be accessed through MCRegUnitIterator defined below. @@ -363,7 +370,7 @@ public: /// getRegClass - Returns the register class associated with the enumeration /// value. See class MCOperandInfo. - const MCRegisterClass getRegClass(unsigned i) const { + const MCRegisterClass& getRegClass(unsigned i) const { assert(i < getNumRegClasses() && "Register Class ID out of range"); return Classes[i]; } diff --git a/include/llvm/MC/MCSchedule.h b/include/llvm/MC/MCSchedule.h index 3b1cdf1..0c71ee5 100644 --- a/include/llvm/MC/MCSchedule.h +++ b/include/llvm/MC/MCSchedule.h @@ -16,17 +16,111 @@ #define LLVM_MC_MCSCHEDMODEL_H #include "llvm/Support/DataTypes.h" +#include namespace llvm { struct InstrItinerary; +/// Define a kind of processor resource that will be modeled by the scheduler. +struct MCProcResourceDesc { +#ifndef NDEBUG + const char *Name; +#endif + unsigned NumUnits; // Number of resource of this kind + unsigned SuperIdx; // Index of the resources kind that contains this kind. + + // Buffered resources may be consumed at some indeterminate cycle after + // dispatch (e.g. for instructions that may issue out-of-order). Unbuffered + // resources always consume their resource some fixed number of cycles after + // dispatch (e.g. for instruction interlocking that may stall the pipeline). + bool IsBuffered; + + bool operator==(const MCProcResourceDesc &Other) const { + return NumUnits == Other.NumUnits && SuperIdx == Other.SuperIdx + && IsBuffered == Other.IsBuffered; + } +}; + +/// Identify one of the processor resource kinds consumed by a particular +/// scheduling class for the specified number of cycles. +struct MCWriteProcResEntry { + unsigned ProcResourceIdx; + unsigned Cycles; + + bool operator==(const MCWriteProcResEntry &Other) const { + return ProcResourceIdx == Other.ProcResourceIdx && Cycles == Other.Cycles; + } +}; + +/// Specify the latency in cpu cycles for a particular scheduling class and def +/// index. -1 indicates an invalid latency. Heuristics would typically consider +/// an instruction with invalid latency to have infinite latency. Also identify +/// the WriteResources of this def. When the operand expands to a sequence of +/// writes, this ID is the last write in the sequence. +struct MCWriteLatencyEntry { + int Cycles; + unsigned WriteResourceID; + + bool operator==(const MCWriteLatencyEntry &Other) const { + return Cycles == Other.Cycles && WriteResourceID == Other.WriteResourceID; + } +}; + +/// Specify the number of cycles allowed after instruction issue before a +/// particular use operand reads its registers. This effectively reduces the +/// write's latency. Here we allow negative cycles for corner cases where +/// latency increases. This rule only applies when the entry's WriteResource +/// matches the write's WriteResource. +/// +/// MCReadAdvanceEntries are sorted first by operand index (UseIdx), then by +/// WriteResourceIdx. +struct MCReadAdvanceEntry { + unsigned UseIdx; + unsigned WriteResourceID; + int Cycles; + + bool operator==(const MCReadAdvanceEntry &Other) const { + return UseIdx == Other.UseIdx && WriteResourceID == Other.WriteResourceID + && Cycles == Other.Cycles; + } +}; + +/// Summarize the scheduling resources required for an instruction of a +/// particular scheduling class. +/// +/// Defined as an aggregate struct for creating tables with initializer lists. +struct MCSchedClassDesc { + static const unsigned short InvalidNumMicroOps = UINT16_MAX; + static const unsigned short VariantNumMicroOps = UINT16_MAX - 1; + +#ifndef NDEBUG + const char* Name; +#endif + unsigned short NumMicroOps; + bool BeginGroup; + bool EndGroup; + unsigned WriteProcResIdx; // First index into WriteProcResTable. + unsigned NumWriteProcResEntries; + unsigned WriteLatencyIdx; // First index into WriteLatencyTable. + unsigned NumWriteLatencyEntries; + unsigned ReadAdvanceIdx; // First index into ReadAdvanceTable. + unsigned NumReadAdvanceEntries; + + bool isValid() const { + return NumMicroOps != InvalidNumMicroOps; + } + bool isVariant() const { + return NumMicroOps == VariantNumMicroOps; + } +}; + /// Machine model for scheduling, bundling, and heuristics. /// /// The machine model directly provides basic information about the /// microarchitecture to the scheduler in the form of properties. It also -/// optionally refers to scheduler resources tables and itinerary -/// tables. Scheduler resources tables model the latency and cost for each +/// optionally refers to scheduler resource tables and itinerary +/// tables. Scheduler resource tables model the latency and cost for each /// instruction type. Itinerary tables are an independant mechanism that /// provides a detailed reservation table describing each cycle of instruction /// execution. Subtargets may define any or all of the above categories of data @@ -84,8 +178,11 @@ public: static const unsigned DefaultMispredictPenalty = 10; private: - // TODO: Add a reference to proc resource types and sched resource tables. - + unsigned ProcID; + const MCProcResourceDesc *ProcResourceTable; + const MCSchedClassDesc *SchedClassTable; + unsigned NumProcResourceKinds; + unsigned NumSchedClasses; // Instruction itinerary tables used by InstrItineraryData. friend class InstrItineraryData; const InstrItinerary *InstrItineraries; @@ -100,13 +197,45 @@ public: LoadLatency(DefaultLoadLatency), HighLatency(DefaultHighLatency), MispredictPenalty(DefaultMispredictPenalty), - InstrItineraries(0) {} + ProcID(0), ProcResourceTable(0), SchedClassTable(0), + NumProcResourceKinds(0), NumSchedClasses(0), + InstrItineraries(0) { + (void)NumProcResourceKinds; + (void)NumSchedClasses; + } // Table-gen driven ctor. MCSchedModel(unsigned iw, int ml, unsigned ll, unsigned hl, unsigned mp, + unsigned pi, const MCProcResourceDesc *pr, + const MCSchedClassDesc *sc, unsigned npr, unsigned nsc, const InstrItinerary *ii): IssueWidth(iw), MinLatency(ml), LoadLatency(ll), HighLatency(hl), - MispredictPenalty(mp), InstrItineraries(ii){} + MispredictPenalty(mp), ProcID(pi), ProcResourceTable(pr), + SchedClassTable(sc), NumProcResourceKinds(npr), NumSchedClasses(nsc), + InstrItineraries(ii) {} + + unsigned getProcessorID() const { return ProcID; } + + /// Does this machine model include instruction-level scheduling. + bool hasInstrSchedModel() const { return SchedClassTable; } + + unsigned getNumProcResourceKinds() const { + return NumProcResourceKinds; + } + + const MCProcResourceDesc *getProcResource(unsigned ProcResourceIdx) const { + assert(hasInstrSchedModel() && "No scheduling machine model"); + + assert(ProcResourceIdx < NumProcResourceKinds && "bad proc resource idx"); + return &ProcResourceTable[ProcResourceIdx]; + } + + const MCSchedClassDesc *getSchedClassDesc(unsigned SchedClassIdx) const { + assert(hasInstrSchedModel() && "No scheduling machine model"); + + assert(SchedClassIdx < NumSchedClasses && "bad scheduling class idx"); + return &SchedClassTable[SchedClassIdx]; + } }; } // End llvm namespace diff --git a/include/llvm/MC/MCSection.h b/include/llvm/MC/MCSection.h index 7da6534..21fdb6b 100644 --- a/include/llvm/MC/MCSection.h +++ b/include/llvm/MC/MCSection.h @@ -15,7 +15,7 @@ #define LLVM_MC_MCSECTION_H #include "llvm/MC/SectionKind.h" -#include "llvm/Support/Casting.h" +#include "llvm/Support/Compiler.h" namespace llvm { class MCAsmInfo; @@ -33,8 +33,8 @@ namespace llvm { }; private: - MCSection(const MCSection&); // DO NOT IMPLEMENT - void operator=(const MCSection&); // DO NOT IMPLEMENT + MCSection(const MCSection&) LLVM_DELETED_FUNCTION; + void operator=(const MCSection&) LLVM_DELETED_FUNCTION; protected: MCSection(SectionVariant V, SectionKind K) : Variant(V), Kind(K) {} SectionVariant Variant; @@ -64,8 +64,6 @@ namespace llvm { /// isVirtualSection - Check whether this section is "virtual", that is /// has no actual object file contents. virtual bool isVirtualSection() const = 0; - - static bool classof(const MCSection *) { return true; } }; } // end namespace llvm diff --git a/include/llvm/MC/MCSectionCOFF.h b/include/llvm/MC/MCSectionCOFF.h index 7eacde5..b050c0f 100644 --- a/include/llvm/MC/MCSectionCOFF.h +++ b/include/llvm/MC/MCSectionCOFF.h @@ -61,7 +61,6 @@ namespace llvm { static bool classof(const MCSection *S) { return S->getVariant() == SV_COFF; } - static bool classof(const MCSectionCOFF *) { return true; } }; } // end namespace llvm diff --git a/include/llvm/MC/MCSectionELF.h b/include/llvm/MC/MCSectionELF.h index 7321ca8..4d54465 100644 --- a/include/llvm/MC/MCSectionELF.h +++ b/include/llvm/MC/MCSectionELF.h @@ -76,7 +76,6 @@ public: static bool classof(const MCSection *S) { return S->getVariant() == SV_ELF; } - static bool classof(const MCSectionELF *) { return true; } // Return the entry size for sections with fixed-width data. static unsigned DetermineEntrySize(SectionKind Kind); diff --git a/include/llvm/MC/MCSectionMachO.h b/include/llvm/MC/MCSectionMachO.h index 15eb4f4..71ea8f3 100644 --- a/include/llvm/MC/MCSectionMachO.h +++ b/include/llvm/MC/MCSectionMachO.h @@ -174,7 +174,6 @@ public: static bool classof(const MCSection *S) { return S->getVariant() == SV_MachO; } - static bool classof(const MCSectionMachO *) { return true; } }; } // end namespace llvm diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h index e8c3e59..230d27e 100644 --- a/include/llvm/MC/MCStreamer.h +++ b/include/llvm/MC/MCStreamer.h @@ -47,8 +47,8 @@ namespace llvm { class MCStreamer { MCContext &Context; - MCStreamer(const MCStreamer&); // DO NOT IMPLEMENT - MCStreamer &operator=(const MCStreamer&); // DO NOT IMPLEMENT + MCStreamer(const MCStreamer&) LLVM_DELETED_FUNCTION; + MCStreamer &operator=(const MCStreamer&) LLVM_DELETED_FUNCTION; bool EmitEHFrame; bool EmitDebugFrame; @@ -342,7 +342,7 @@ namespace llvm { /// @name Generating Data /// @{ - /// EmitBytes - Emit the bytes in \arg Data into the output. + /// EmitBytes - Emit the bytes in \p Data into the output. /// /// This is used to implement assembler directives such as .byte, .ascii, /// etc. @@ -554,6 +554,11 @@ namespace llvm { virtual void EmitRegSave(const SmallVectorImpl &RegList, bool isVector); + /// PPC-related methods. + /// FIXME: Eventually replace it with some "target MC streamer" and move + /// these methods there. + virtual void EmitTCEntry(const MCSymbol &S); + /// FinishImpl - Streamer specific finalization. virtual void FinishImpl() = 0; /// Finish - Finish emission of machine code. @@ -573,17 +578,14 @@ namespace llvm { /// InstPrint. /// /// \param CE - If given, a code emitter to use to show the instruction - /// encoding inline with the assembly. This method takes ownership of \arg CE. + /// encoding inline with the assembly. This method takes ownership of \p CE. /// /// \param TAB - If given, a target asm backend to use to show the fixup /// information in conjunction with encoding information. This method takes - /// ownership of \arg TAB. + /// ownership of \p TAB. /// /// \param ShowInst - Whether to show the MCInst representation inline with /// the assembly. - /// - /// \param DecodeLSDA - If true, emit comments that translates the LSDA into a - /// human readable format. Only usable with CFI. MCStreamer *createAsmStreamer(MCContext &Ctx, formatted_raw_ostream &OS, bool isVerboseAsm, bool useLoc, @@ -597,7 +599,7 @@ namespace llvm { /// createMachOStreamer - Create a machine code streamer which will generate /// Mach-O format object files. /// - /// Takes ownership of \arg TAB and \arg CE. + /// Takes ownership of \p TAB and \p CE. MCStreamer *createMachOStreamer(MCContext &Ctx, MCAsmBackend &TAB, raw_ostream &OS, MCCodeEmitter *CE, bool RelaxAll = false); @@ -605,7 +607,7 @@ namespace llvm { /// createWinCOFFStreamer - Create a machine code streamer which will /// generate Microsoft COFF format object files. /// - /// Takes ownership of \arg TAB and \arg CE. + /// Takes ownership of \p TAB and \p CE. MCStreamer *createWinCOFFStreamer(MCContext &Ctx, MCAsmBackend &TAB, MCCodeEmitter &CE, raw_ostream &OS, @@ -620,7 +622,7 @@ namespace llvm { /// createPureStreamer - Create a machine code streamer which will generate /// "pure" MC object files, for use with MC-JIT and testing tools. /// - /// Takes ownership of \arg TAB and \arg CE. + /// Takes ownership of \p TAB and \p CE. MCStreamer *createPureStreamer(MCContext &Ctx, MCAsmBackend &TAB, raw_ostream &OS, MCCodeEmitter *CE); diff --git a/include/llvm/MC/MCSubtargetInfo.h b/include/llvm/MC/MCSubtargetInfo.h index 31d632d..69213cd 100644 --- a/include/llvm/MC/MCSubtargetInfo.h +++ b/include/llvm/MC/MCSubtargetInfo.h @@ -30,7 +30,14 @@ class MCSubtargetInfo { std::string TargetTriple; // Target triple const SubtargetFeatureKV *ProcFeatures; // Processor feature list const SubtargetFeatureKV *ProcDesc; // Processor descriptions - const SubtargetInfoKV *ProcSchedModel; // Scheduler machine model + + // Scheduler machine model + const SubtargetInfoKV *ProcSchedModels; + const MCWriteProcResEntry *WriteProcResTable; + const MCWriteLatencyEntry *WriteLatencyTable; + const MCReadAdvanceEntry *ReadAdvanceTable; + const MCSchedModel *CPUSchedModel; + const InstrStage *Stages; // Instruction itinerary stages const unsigned *OperandCycles; // Itinerary operand cycles const unsigned *ForwardingPaths; // Forwarding paths @@ -43,6 +50,9 @@ public: const SubtargetFeatureKV *PF, const SubtargetFeatureKV *PD, const SubtargetInfoKV *ProcSched, + const MCWriteProcResEntry *WPR, + const MCWriteLatencyEntry *WL, + const MCReadAdvanceEntry *RA, const InstrStage *IS, const unsigned *OC, const unsigned *FP, unsigned NF, unsigned NP); @@ -58,9 +68,9 @@ public: return FeatureBits; } - /// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with - /// feature string), recompute and return feature bits. - uint64_t ReInitMCSubtargetInfo(StringRef CPU, StringRef FS); + /// InitMCProcessorInfo - Set or change the CPU (optionally supplemented with + /// feature string). Recompute feature bits and scheduling model. + void InitMCProcessorInfo(StringRef CPU, StringRef FS); /// ToggleFeature - Toggle a feature and returns the re-computed feature /// bits. This version does not change the implied bits. @@ -72,11 +82,56 @@ public: /// getSchedModelForCPU - Get the machine model of a CPU. /// - MCSchedModel *getSchedModelForCPU(StringRef CPU) const; + const MCSchedModel *getSchedModelForCPU(StringRef CPU) const; + + /// getSchedModel - Get the machine model for this subtarget's CPU. + /// + const MCSchedModel *getSchedModel() const { return CPUSchedModel; } + + /// Return an iterator at the first process resource consumed by the given + /// scheduling class. + const MCWriteProcResEntry *getWriteProcResBegin( + const MCSchedClassDesc *SC) const { + return &WriteProcResTable[SC->WriteProcResIdx]; + } + const MCWriteProcResEntry *getWriteProcResEnd( + const MCSchedClassDesc *SC) const { + return getWriteProcResBegin(SC) + SC->NumWriteProcResEntries; + } + + const MCWriteLatencyEntry *getWriteLatencyEntry(const MCSchedClassDesc *SC, + unsigned DefIdx) const { + assert(DefIdx < SC->NumWriteLatencyEntries && + "MachineModel does not specify a WriteResource for DefIdx"); + + return &WriteLatencyTable[SC->WriteLatencyIdx + DefIdx]; + } + + int getReadAdvanceCycles(const MCSchedClassDesc *SC, unsigned UseIdx, + unsigned WriteResID) const { + // TODO: The number of read advance entries in a class can be significant + // (~50). Consider compressing the WriteID into a dense ID of those that are + // used by ReadAdvance and representing them as a bitset. + for (const MCReadAdvanceEntry *I = &ReadAdvanceTable[SC->ReadAdvanceIdx], + *E = I + SC->NumReadAdvanceEntries; I != E; ++I) { + if (I->UseIdx < UseIdx) + continue; + if (I->UseIdx > UseIdx) + break; + // Find the first WriteResIdx match, which has the highest cycle count. + if (!I->WriteResourceID || I->WriteResourceID == WriteResID) { + return I->Cycles; + } + } + return 0; + } /// getInstrItineraryForCPU - Get scheduling itinerary of a CPU. /// InstrItineraryData getInstrItineraryForCPU(StringRef CPU) const; + + /// Initialize an InstrItineraryData instance. + void initInstrItins(InstrItineraryData &InstrItins) const; }; } // End llvm namespace diff --git a/include/llvm/MC/MCSymbol.h b/include/llvm/MC/MCSymbol.h index 0583ce5..fe92755 100644 --- a/include/llvm/MC/MCSymbol.h +++ b/include/llvm/MC/MCSymbol.h @@ -15,6 +15,7 @@ #define LLVM_MC_MCSYMBOL_H #include "llvm/ADT/StringRef.h" +#include "llvm/Support/Compiler.h" namespace llvm { class MCExpr; @@ -62,8 +63,8 @@ namespace llvm { : Name(name), Section(0), Value(0), IsTemporary(isTemporary), IsUsed(false) {} - MCSymbol(const MCSymbol&); // DO NOT IMPLEMENT - void operator=(const MCSymbol&); // DO NOT IMPLEMENT + MCSymbol(const MCSymbol&) LLVM_DELETED_FUNCTION; + void operator=(const MCSymbol&) LLVM_DELETED_FUNCTION; public: /// getName - Get the symbol name. StringRef getName() const { return Name; } @@ -112,7 +113,7 @@ namespace llvm { return *Section; } - /// setSection - Mark the symbol as defined in the section \arg S. + /// setSection - Mark the symbol as defined in the section \p S. void setSection(const MCSection &S) { Section = &S; } /// setUndefined - Mark the symbol as undefined. @@ -132,7 +133,7 @@ namespace llvm { return Value != 0; } - /// getValue() - Get the value for variable symbols. + /// getVariableValue() - Get the value for variable symbols. const MCExpr *getVariableValue() const { assert(isVariable() && "Invalid accessor!"); IsUsed = true; @@ -148,7 +149,7 @@ namespace llvm { /// @} - /// print - Print the value to the stream \arg OS. + /// print - Print the value to the stream \p OS. void print(raw_ostream &OS) const; /// dump - Print the value to stderr. diff --git a/include/llvm/MC/MCTargetAsmLexer.h b/include/llvm/MC/MCTargetAsmLexer.h index f5c8c09..b1cc546 100644 --- a/include/llvm/MC/MCTargetAsmLexer.h +++ b/include/llvm/MC/MCTargetAsmLexer.h @@ -24,8 +24,8 @@ class MCTargetAsmLexer { SMLoc ErrLoc; std::string Err; - MCTargetAsmLexer(const MCTargetAsmLexer &); // DO NOT IMPLEMENT - void operator=(const MCTargetAsmLexer &); // DO NOT IMPLEMENT + MCTargetAsmLexer(const MCTargetAsmLexer &) LLVM_DELETED_FUNCTION; + void operator=(const MCTargetAsmLexer &) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses. MCTargetAsmLexer(const Target &); @@ -45,7 +45,7 @@ public: const Target &getTarget() const { return TheTarget; } - /// InstallLexer - Set the lexer to get tokens from lower-level lexer \arg L. + /// InstallLexer - Set the lexer to get tokens from lower-level lexer \p L. void InstallLexer(MCAsmLexer &L) { Lexer = &L; } @@ -77,10 +77,10 @@ public: /// getKind - Get the kind of current token. AsmToken::TokenKind getKind() const { return CurTok.getKind(); } - /// is - Check if the current token has kind \arg K. + /// is - Check if the current token has kind \p K. bool is(AsmToken::TokenKind K) const { return CurTok.is(K); } - /// isNot - Check if the current token has kind \arg K. + /// isNot - Check if the current token has kind \p K. bool isNot(AsmToken::TokenKind K) const { return CurTok.isNot(K); } }; diff --git a/include/llvm/MC/MCTargetAsmParser.h b/include/llvm/MC/MCTargetAsmParser.h index 929a204..483a80b 100644 --- a/include/llvm/MC/MCTargetAsmParser.h +++ b/include/llvm/MC/MCTargetAsmParser.h @@ -21,11 +21,43 @@ class MCParsedAsmOperand; class MCInst; template class SmallVectorImpl; +enum AsmRewriteKind { + AOK_DotOperator, // Rewrite a dot operator expression as an immediate. + // E.g., [eax].foo.bar -> [eax].8 + AOK_Emit, // Rewrite _emit as .byte. + AOK_Imm, // Rewrite as $$N. + AOK_ImmPrefix, // Add $$ before a parsed Imm. + AOK_Input, // Rewrite in terms of $N. + AOK_Output, // Rewrite in terms of $N. + AOK_SizeDirective, // Add a sizing directive (e.g., dword ptr). + AOK_Skip // Skip emission (e.g., offset/type operators). +}; + +struct AsmRewrite { + AsmRewriteKind Kind; + SMLoc Loc; + unsigned Len; + unsigned Val; +public: + AsmRewrite(AsmRewriteKind kind, SMLoc loc, unsigned len = 0, unsigned val = 0) + : Kind(kind), Loc(loc), Len(len), Val(val) {} +}; + +struct ParseInstructionInfo { + + SmallVectorImpl *AsmRewrites; + + ParseInstructionInfo() : AsmRewrites(0) {} + ParseInstructionInfo(SmallVectorImpl *rewrites) + : AsmRewrites(rewrites) {} + + ~ParseInstructionInfo() {} +}; + /// MCTargetAsmParser - Generic interface to target specific assembly parsers. class MCTargetAsmParser : public MCAsmParserExtension { public: enum MatchResultTy { - Match_ConversionFail, Match_InvalidOperand, Match_MissingFeature, Match_MnemonicFail, @@ -34,20 +66,34 @@ public: }; private: - MCTargetAsmParser(const MCTargetAsmParser &); // DO NOT IMPLEMENT - void operator=(const MCTargetAsmParser &); // DO NOT IMPLEMENT + MCTargetAsmParser(const MCTargetAsmParser &) LLVM_DELETED_FUNCTION; + void operator=(const MCTargetAsmParser &) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses. MCTargetAsmParser(); /// AvailableFeatures - The current set of available features. unsigned AvailableFeatures; + /// ParsingInlineAsm - Are we parsing ms-style inline assembly? + bool ParsingInlineAsm; + + /// SemaCallback - The Sema callback implementation. Must be set when parsing + /// ms-style inline assembly. + MCAsmParserSemaCallback *SemaCallback; + public: virtual ~MCTargetAsmParser(); unsigned getAvailableFeatures() const { return AvailableFeatures; } void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; } + bool isParsingInlineAsm () { return ParsingInlineAsm; } + void setParsingInlineAsm (bool Value) { ParsingInlineAsm = Value; } + + void setSemaCallback(MCAsmParserSemaCallback *Callback) { + SemaCallback = Callback; + } + virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) = 0; @@ -64,7 +110,8 @@ public: /// \param Operands [out] - The list of parsed operands, this returns /// ownership of them to the caller. /// \return True on failure. - virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc, + virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, SmallVectorImpl &Operands) = 0; /// ParseDirective - Parse a target specific assembler directive @@ -79,18 +126,9 @@ public: /// \param DirectiveID - the identifier token of the directive. virtual bool ParseDirective(AsmToken DirectiveID) = 0; - /// MatchInstruction - Recognize a series of operands of a parsed instruction - /// as an actual MCInst. This returns false on success and returns true on - /// failure to match. - /// - /// On failure, the target parser is responsible for emitting a diagnostic - /// explaining the match failure. - virtual bool - MatchInstruction(SMLoc IDLoc, - SmallVectorImpl &Operands, - SmallVectorImpl &MCInsts) { - return true; - } + /// mnemonicIsValid - This returns true if this is a valid mnemonic and false + /// otherwise. + virtual bool mnemonicIsValid(StringRef Mnemonic) = 0; /// MatchAndEmitInstruction - Recognize a series of operands of a parsed /// instruction as an actual MCInst and emit it to the specified MCStreamer. @@ -99,9 +137,10 @@ public: /// On failure, the target parser is responsible for emitting a diagnostic /// explaining the match failure. virtual bool - MatchAndEmitInstruction(SMLoc IDLoc, + MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out) = 0; + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm) = 0; /// checkTargetMatchPredicate - Validate the instruction match against /// any complex target predicates not expressible via match classes. @@ -109,6 +148,8 @@ public: return Match_Success; } + virtual void convertToMapAndConstraints(unsigned Kind, + const SmallVectorImpl &Operands) = 0; }; } // End llvm namespace diff --git a/include/llvm/MC/MCValue.h b/include/llvm/MC/MCValue.h index 8352ed1..f9af8bc 100644 --- a/include/llvm/MC/MCValue.h +++ b/include/llvm/MC/MCValue.h @@ -46,7 +46,7 @@ public: /// isAbsolute - Is this an absolute (as opposed to relocatable) value. bool isAbsolute() const { return !SymA && !SymB; } - /// print - Print the value to the stream \arg OS. + /// print - Print the value to the stream \p OS. void print(raw_ostream &OS, const MCAsmInfo *MAI) const; /// dump - Print the value to stderr. diff --git a/include/llvm/MC/SubtargetFeature.h b/include/llvm/MC/SubtargetFeature.h index 507d882..57f0518 100644 --- a/include/llvm/MC/SubtargetFeature.h +++ b/include/llvm/MC/SubtargetFeature.h @@ -50,7 +50,7 @@ struct SubtargetFeatureKV { // struct SubtargetInfoKV { const char *Key; // K-V key string - void *Value; // K-V pointer value + const void *Value; // K-V pointer value // Compare routine for std binary search bool operator<(const SubtargetInfoKV &S) const { @@ -95,10 +95,6 @@ public: const SubtargetFeatureKV *FeatureTable, size_t FeatureTableSize); - /// Get scheduling itinerary of a CPU. - void *getItinerary(const StringRef CPU, - const SubtargetInfoKV *Table, size_t TableSize); - /// Print feature string. void print(raw_ostream &OS) const; diff --git a/include/llvm/MDBuilder.h b/include/llvm/MDBuilder.h index 2aa48b0..1867a63 100644 --- a/include/llvm/MDBuilder.h +++ b/include/llvm/MDBuilder.h @@ -134,6 +134,27 @@ namespace llvm { } } + struct TBAAStructField { + uint64_t Offset; + uint64_t Size; + MDNode *TBAA; + TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *TBAA) : + Offset(Offset), Size(Size), TBAA(TBAA) {} + }; + + /// \brief Return metadata for a tbaa.struct node with the given + /// struct field descriptions. + MDNode *createTBAAStructNode(ArrayRef Fields) { + SmallVector Vals(Fields.size() * 3); + Type *Int64 = IntegerType::get(Context, 64); + for (unsigned i = 0, e = Fields.size(); i != e; ++i) { + Vals[i * 3 + 0] = ConstantInt::get(Int64, Fields[i].Offset); + Vals[i * 3 + 1] = ConstantInt::get(Int64, Fields[i].Size); + Vals[i * 3 + 2] = Fields[i].TBAA; + } + return MDNode::get(Context, Vals); + } + }; } // end namespace llvm diff --git a/include/llvm/Metadata.h b/include/llvm/Metadata.h index b40549b..0fbbb95 100644 --- a/include/llvm/Metadata.h +++ b/include/llvm/Metadata.h @@ -37,7 +37,7 @@ template /// MDString is always unnamed. class MDString : public Value { virtual void anchor(); - MDString(const MDString &); // DO NOT IMPLEMENT + MDString(const MDString &) LLVM_DELETED_FUNCTION; explicit MDString(LLVMContext &C); public: @@ -59,7 +59,6 @@ public: iterator end() const { return getName().end(); } /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MDString *) { return true; } static bool classof(const Value *V) { return V->getValueID() == MDStringVal; } @@ -71,8 +70,8 @@ class MDNodeOperand; //===----------------------------------------------------------------------===// /// MDNode - a tuple of other values. class MDNode : public Value, public FoldingSetNode { - MDNode(const MDNode &); // DO NOT IMPLEMENT - void operator=(const MDNode &); // DO NOT IMPLEMENT + MDNode(const MDNode &) LLVM_DELETED_FUNCTION; + void operator=(const MDNode &) LLVM_DELETED_FUNCTION; friend class MDNodeOperand; friend class LLVMContextImpl; friend struct FoldingSetTrait; @@ -161,7 +160,6 @@ public: void Profile(FoldingSetNodeID &ID) const; /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const MDNode *) { return true; } static bool classof(const Value *V) { return V->getValueID() == MDNodeVal; } @@ -195,7 +193,7 @@ class NamedMDNode : public ilist_node { friend struct ilist_traits; friend class LLVMContextImpl; friend class Module; - NamedMDNode(const NamedMDNode &); // DO NOT IMPLEMENT + NamedMDNode(const NamedMDNode &) LLVM_DELETED_FUNCTION; std::string Name; Module *Parent; diff --git a/include/llvm/Object/Archive.h b/include/llvm/Object/Archive.h index 358b27a..f3d8249 100644 --- a/include/llvm/Object/Archive.h +++ b/include/llvm/Object/Archive.h @@ -129,7 +129,6 @@ public: symbol_iterator end_symbols() const; // Cast methods. - static inline bool classof(Archive const *v) { return true; } static inline bool classof(Binary const *v) { return v->isArchive(); } diff --git a/include/llvm/Object/Binary.h b/include/llvm/Object/Binary.h index befe812..d555de3 100644 --- a/include/llvm/Object/Binary.h +++ b/include/llvm/Object/Binary.h @@ -26,8 +26,8 @@ namespace object { class Binary { private: - Binary(); // = delete - Binary(const Binary &other); // = delete + Binary() LLVM_DELETED_FUNCTION; + Binary(const Binary &other) LLVM_DELETED_FUNCTION; unsigned int TypeID; @@ -64,7 +64,6 @@ public: // Cast methods. unsigned int getType() const { return TypeID; } - static inline bool classof(const Binary *v) { return true; } // Convenience methods bool isObject() const { diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h index 967420e..6f42d76 100644 --- a/include/llvm/Object/COFF.h +++ b/include/llvm/Object/COFF.h @@ -116,6 +116,7 @@ protected: virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const; + virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const; virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const; virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const; @@ -128,6 +129,7 @@ protected: virtual error_code isSectionBSS(DataRefImpl Sec, bool &Res) const; virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const; virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const; + virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const; virtual error_code isSectionRequiredForExecution(DataRefImpl Sec, bool &Res) const; virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, @@ -197,7 +199,6 @@ public: static inline bool classof(const Binary *v) { return v->isCOFF(); } - static inline bool classof(const COFFObjectFile *v) { return true; } }; } diff --git a/include/llvm/Object/ELF.h b/include/llvm/Object/ELF.h index 7698441..466de93 100644 --- a/include/llvm/Object/ELF.h +++ b/include/llvm/Object/ELF.h @@ -387,11 +387,65 @@ struct Elf_Rel_Impl } }; +template +struct Elf_Ehdr_Impl { + LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) + unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes + Elf_Half e_type; // Type of file (see ET_*) + Elf_Half e_machine; // Required architecture for this file (see EM_*) + Elf_Word e_version; // Must be equal to 1 + Elf_Addr e_entry; // Address to jump to in order to start program + Elf_Off e_phoff; // Program header table's file offset, in bytes + Elf_Off e_shoff; // Section header table's file offset, in bytes + Elf_Word e_flags; // Processor-specific flags + Elf_Half e_ehsize; // Size of ELF header, in bytes + Elf_Half e_phentsize;// Size of an entry in the program header table + Elf_Half e_phnum; // Number of entries in the program header table + Elf_Half e_shentsize;// Size of an entry in the section header table + Elf_Half e_shnum; // Number of entries in the section header table + Elf_Half e_shstrndx; // Section header table index of section name + // string table + bool checkMagic() const { + return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0; + } + unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; } + unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; } +}; + +template +struct Elf_Phdr; + +template +struct Elf_Phdr { + LLVM_ELF_IMPORT_TYPES(target_endianness, false) + Elf_Word p_type; // Type of segment + Elf_Off p_offset; // FileOffset where segment is located, in bytes + Elf_Addr p_vaddr; // Virtual Address of beginning of segment + Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific) + Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero) + Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero) + Elf_Word p_flags; // Segment flags + Elf_Word p_align; // Segment alignment constraint +}; + +template +struct Elf_Phdr { + LLVM_ELF_IMPORT_TYPES(target_endianness, true) + Elf_Word p_type; // Type of segment + Elf_Word p_flags; // Segment flags + Elf_Off p_offset; // FileOffset where segment is located, in bytes + Elf_Addr p_vaddr; // Virtual Address of beginning of segment + Elf_Addr p_paddr; // Physical address of beginning of segment (OS-specific) + Elf_Word p_filesz; // Num. of bytes in file image of segment (may be zero) + Elf_Word p_memsz; // Num. of bytes in mem image of segment (may be zero) + Elf_Word p_align; // Segment alignment constraint +}; template class ELFObjectFile : public ObjectFile { LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits) + typedef Elf_Ehdr_Impl Elf_Ehdr; typedef Elf_Shdr_Impl Elf_Shdr; typedef Elf_Sym_Impl Elf_Sym; typedef Elf_Dyn_Impl Elf_Dyn; @@ -406,28 +460,6 @@ class ELFObjectFile : public ObjectFile { typedef content_iterator dyn_iterator; protected: - struct Elf_Ehdr { - unsigned char e_ident[ELF::EI_NIDENT]; // ELF Identification bytes - Elf_Half e_type; // Type of file (see ET_*) - Elf_Half e_machine; // Required architecture for this file (see EM_*) - Elf_Word e_version; // Must be equal to 1 - Elf_Addr e_entry; // Address to jump to in order to start program - Elf_Off e_phoff; // Program header table's file offset, in bytes - Elf_Off e_shoff; // Section header table's file offset, in bytes - Elf_Word e_flags; // Processor-specific flags - Elf_Half e_ehsize; // Size of ELF header, in bytes - Elf_Half e_phentsize;// Size of an entry in the program header table - Elf_Half e_phnum; // Number of entries in the program header table - Elf_Half e_shentsize;// Size of an entry in the section header table - Elf_Half e_shnum; // Number of entries in the section header table - Elf_Half e_shstrndx; // Section header table index of section name - // string table - bool checkMagic() const { - return (memcmp(e_ident, ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0; - } - unsigned char getFileClass() const { return e_ident[ELF::EI_CLASS]; } - unsigned char getDataEncoding() const { return e_ident[ELF::EI_DATA]; } - }; // This flag is used for classof, to distinguish ELFObjectFile from // its subclass. If more subclasses will be created, this flag will // have to become an enum. @@ -459,6 +491,59 @@ private: // This is set the first time getLoadName is called. mutable const char *dt_soname; +public: + /// \brief Iterate over relocations in a .rel or .rela section. + template + class ELFRelocationIterator { + public: + typedef void difference_type; + typedef const RelocT value_type; + typedef std::forward_iterator_tag iterator_category; + typedef value_type &reference; + typedef value_type *pointer; + + /// \brief Default construct iterator. + ELFRelocationIterator() : Section(0), Current(0) {} + ELFRelocationIterator(const Elf_Shdr *Sec, const char *Start) + : Section(Sec) + , Current(Start) {} + + reference operator *() { + assert(Current && "Attempted to dereference an invalid iterator!"); + return *reinterpret_cast(Current); + } + + pointer operator ->() { + assert(Current && "Attempted to dereference an invalid iterator!"); + return reinterpret_cast(Current); + } + + bool operator ==(const ELFRelocationIterator &Other) { + return Section == Other.Section && Current == Other.Current; + } + + bool operator !=(const ELFRelocationIterator &Other) { + return !(*this == Other); + } + + ELFRelocationIterator &operator ++(int) { + assert(Current && "Attempted to increment an invalid iterator!"); + Current += Section->sh_entsize; + return *this; + } + + ELFRelocationIterator operator ++() { + ELFRelocationIterator Tmp = *this; + ++*this; + return Tmp; + } + + private: + const Elf_Shdr *Section; + const char *Current; + }; + +private: // Records for each version index the corresponding Verdef or Vernaux entry. // This is filled the first time LoadVersionMap() is called. class VersionMapEntry : public PointerIntPair { @@ -535,6 +620,7 @@ protected: virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const; + virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const; friend class DynRefImpl; virtual error_code getDynNext(DataRefImpl DynData, DynRef &Result) const; @@ -555,6 +641,7 @@ protected: bool &Res) const; virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const; virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const; + virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const; virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, bool &Result) const; virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const; @@ -594,6 +681,27 @@ public: virtual dyn_iterator begin_dynamic_table() const; virtual dyn_iterator end_dynamic_table() const; + typedef ELFRelocationIterator Elf_Rela_Iter; + typedef ELFRelocationIterator Elf_Rel_Iter; + + virtual Elf_Rela_Iter beginELFRela(const Elf_Shdr *sec) const { + return Elf_Rela_Iter(sec, (const char *)(base() + sec->sh_offset)); + } + + virtual Elf_Rela_Iter endELFRela(const Elf_Shdr *sec) const { + return Elf_Rela_Iter(sec, (const char *) + (base() + sec->sh_offset + sec->sh_size)); + } + + virtual Elf_Rel_Iter beginELFRel(const Elf_Shdr *sec) const { + return Elf_Rel_Iter(sec, (const char *)(base() + sec->sh_offset)); + } + + virtual Elf_Rel_Iter endELFRel(const Elf_Shdr *sec) const { + return Elf_Rel_Iter(sec, (const char *) + (base() + sec->sh_offset + sec->sh_size)); + } + virtual uint8_t getBytesInAddress() const; virtual StringRef getFileFormatName() const; virtual StringRef getObjectType() const { return "ELF"; } @@ -608,6 +716,7 @@ public: const Elf_Shdr *getSection(const Elf_Sym *symb) const; const Elf_Shdr *getElfSection(section_iterator &It) const; const Elf_Sym *getElfSymbol(symbol_iterator &It) const; + const Elf_Sym *getElfSymbol(uint32_t index) const; // Methods for type inquiry through isa, cast, and dyn_cast bool isDyldType() const { return isDyldELFObject; } @@ -615,7 +724,6 @@ public: return v->getType() == getELFType(target_endianness == support::little, is64Bits); } - static inline bool classof(const ELFObjectFile *v) { return true; } }; // Iterate through the version definitions, and place each Elf_Verdef @@ -804,6 +912,16 @@ ELFObjectFile } template +const typename ELFObjectFile::Elf_Sym * +ELFObjectFile + ::getElfSymbol(uint32_t index) const { + DataRefImpl SymbolData; + SymbolData.d.a = index; + SymbolData.d.b = 1; + return getSymbol(SymbolData); +} + +template error_code ELFObjectFile ::getSymbolFileOffset(DataRefImpl Symb, uint64_t &Result) const { @@ -863,7 +981,18 @@ error_code ELFObjectFile case ELF::STT_FUNC: case ELF::STT_OBJECT: case ELF::STT_NOTYPE: - Result = symb->st_value + (Section ? Section->sh_addr : 0); + bool IsRelocatable; + switch(Header->e_type) { + case ELF::ET_EXEC: + case ELF::ET_DYN: + IsRelocatable = false; + break; + default: + IsRelocatable = true; + } + Result = symb->st_value; + if (IsRelocatable && Section != 0) + Result += Section->sh_addr; return object_error::success; default: Result = UnknownAddressOrSize; @@ -1034,6 +1163,16 @@ error_code ELFObjectFile template error_code ELFObjectFile + ::getSymbolValue(DataRefImpl Symb, + uint64_t &Val) const { + validateSymbol(Symb); + const Elf_Sym *symb = getSymbol(Symb); + Val = symb->st_value; + return object_error::success; +} + +template +error_code ELFObjectFile ::getSectionNext(DataRefImpl Sec, SectionRef &Result) const { const uint8_t *sec = reinterpret_cast(Sec.p); sec += Header->e_shentsize; @@ -1160,7 +1299,8 @@ error_code ELFObjectFile } template -error_code ELFObjectFile::isSectionZeroInit(DataRefImpl Sec, +error_code ELFObjectFile + ::isSectionZeroInit(DataRefImpl Sec, bool &Result) const { const Elf_Shdr *sec = reinterpret_cast(Sec.p); // For ELF, all zero-init sections are virtual (that is, they occupy no space @@ -1174,6 +1314,18 @@ error_code ELFObjectFile::isSectionZeroInit(DataRef template error_code ELFObjectFile + ::isSectionReadOnlyData(DataRefImpl Sec, + bool &Result) const { + const Elf_Shdr *sec = reinterpret_cast(Sec.p); + if (sec->sh_flags & ELF::SHF_WRITE || sec->sh_flags & ELF::SHF_EXECINSTR) + Result = false; + else + Result = true; + return object_error::success; +} + +template +error_code ELFObjectFile ::sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, bool &Result) const { @@ -1444,6 +1596,143 @@ error_code ELFObjectFile res = "Unknown"; } break; + case ELF::EM_ARM: + switch (type) { + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_NONE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PC24); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_REL32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS16); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_ABS5); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_SBREL32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_PC8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BREL_ADJ); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DESC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_SWI8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_XPC25); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_XPC22); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DTPMOD32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DTPOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_TPOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_COPY); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GLOB_DAT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_JUMP_SLOT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_RELATIVE); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTOFF32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BASE_PREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_BREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PLT32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_JUMP24); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP24); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_BASE_ABS); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_7_0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_15_8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PCREL_23_15); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SBREL_11_0_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SBREL_19_12_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SBREL_27_20_CK); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TARGET1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_SBREL31); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_V4BX); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TARGET2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PREL31); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_ABS_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_ABS); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_PREL_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_PREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_ABS_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_ABS); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_PREL_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_PREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP19); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP6); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_ALU_PREL_11_0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_PC12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ABS32_NOI); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_REL32_NOI); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G0_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G1_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_PC_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_PC_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_PC_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_PC_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G0_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G1_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ALU_SB_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDR_SB_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDRS_SB_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_LDC_SB_G2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_BREL_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVT_BREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_MOVW_BREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_BREL_NC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVT_BREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_MOVW_BREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_GOTDESC); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_DESCSEQ); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_CALL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PLT32_ABS); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_ABS); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_PREL); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOT_BREL12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTOFF12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GOTRELAX); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GNU_VTENTRY); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_GNU_VTINHERIT); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP11); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_JUMP8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_GD32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDM32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDO32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_IE32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LE32); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LDO12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_LE12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_TLS_IE12GP); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_0); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_1); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_2); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_3); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_4); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_5); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_6); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_7); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_8); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_9); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_10); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_11); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_12); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_13); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_14); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_PRIVATE_15); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_ME_TOO); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_DESCSEQ16); + LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_ARM_THM_TLS_DESCSEQ32); + default: + res = "Unknown"; + } + break; case ELF::EM_HEXAGON: switch (type) { LLVM_ELF_SWITCH_RELOC_TYPE_NAME(R_HEX_NONE); @@ -1574,15 +1863,15 @@ error_code ELFObjectFile int64_t addend = 0; uint16_t symbol_index = 0; switch (sec->sh_type) { - default : + default: return object_error::parse_failed; - case ELF::SHT_REL : { + case ELF::SHT_REL: { type = getRel(Rel)->getType(); symbol_index = getRel(Rel)->getSymbol(); // TODO: Read implicit addend from section data. break; } - case ELF::SHT_RELA : { + case ELF::SHT_RELA: { type = getRela(Rel)->getType(); symbol_index = getRela(Rel)->getSymbol(); addend = getRela(Rel)->r_addend; @@ -1596,9 +1885,8 @@ error_code ELFObjectFile switch (Header->e_machine) { case ELF::EM_X86_64: switch (type) { - case ELF::R_X86_64_32S: - res = symname; - break; + case ELF::R_X86_64_PC8: + case ELF::R_X86_64_PC16: case ELF::R_X86_64_PC32: { std::string fmtbuf; raw_string_ostream fmt(fmtbuf); @@ -1607,10 +1895,23 @@ error_code ELFObjectFile Result.append(fmtbuf.begin(), fmtbuf.end()); } break; + case ELF::R_X86_64_8: + case ELF::R_X86_64_16: + case ELF::R_X86_64_32: + case ELF::R_X86_64_32S: + case ELF::R_X86_64_64: { + std::string fmtbuf; + raw_string_ostream fmt(fmtbuf); + fmt << symname << (addend < 0 ? "" : "+") << addend; + fmt.flush(); + Result.append(fmtbuf.begin(), fmtbuf.end()); + } + break; default: res = "Unknown"; } break; + case ELF::EM_ARM: case ELF::EM_HEXAGON: res = symname; break; @@ -2024,6 +2325,8 @@ StringRef ELFObjectFile return "ELF64-i386"; case ELF::EM_X86_64: return "ELF64-x86-64"; + case ELF::EM_PPC64: + return "ELF64-ppc64"; default: return "ELF64-unknown"; } @@ -2044,6 +2347,11 @@ unsigned ELFObjectFile::getArch() const { return Triple::arm; case ELF::EM_HEXAGON: return Triple::hexagon; + case ELF::EM_MIPS: + return (target_endianness == support::little) ? + Triple::mipsel : Triple::mips; + case ELF::EM_PPC64: + return Triple::ppc64; default: return Triple::UnknownArch; } diff --git a/include/llvm/Object/MachO.h b/include/llvm/Object/MachO.h index 0b73f94..4e03daa 100644 --- a/include/llvm/Object/MachO.h +++ b/include/llvm/Object/MachO.h @@ -49,7 +49,6 @@ public: static inline bool classof(const Binary *v) { return v->isMachO(); } - static inline bool classof(const MachOObjectFile *v) { return true; } protected: virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const; @@ -62,6 +61,7 @@ protected: virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const; + virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const; virtual error_code getSectionNext(DataRefImpl Sec, SectionRef &Res) const; virtual error_code getSectionName(DataRefImpl Sec, StringRef &Res) const; @@ -76,6 +76,7 @@ protected: bool &Res) const; virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const; virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const; + virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const; virtual error_code sectionContainsSymbol(DataRefImpl DRI, DataRefImpl S, bool &Result) const; virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const; diff --git a/include/llvm/Object/MachOFormat.h b/include/llvm/Object/MachOFormat.h index f30d431..c0f700d 100644 --- a/include/llvm/Object/MachOFormat.h +++ b/include/llvm/Object/MachOFormat.h @@ -61,7 +61,10 @@ namespace mach { CSARM_V6 = 6, CSARM_V5TEJ = 7, CSARM_XSCALE = 8, - CSARM_V7 = 9 + CSARM_V7 = 9, + CSARM_V7F = 10, + CSARM_V7S = 11, + CSARM_V7K = 12 }; /// \brief PowerPC Machine Subtypes. @@ -273,6 +276,10 @@ namespace macho { uint16_t Flags; uint32_t Value; }; + // Despite containing a uint64_t, this structure is only 4-byte aligned within + // a MachO file. +#pragma pack(push) +#pragma pack(4) struct Symbol64TableEntry { uint32_t StringIndex; uint8_t Type; @@ -280,6 +287,7 @@ namespace macho { uint16_t Flags; uint64_t Value; }; +#pragma pack(pop) /// @} /// @name Data-in-code Table Entry diff --git a/include/llvm/Object/ObjectFile.h b/include/llvm/Object/ObjectFile.h index 2ec656b..1a3120a 100644 --- a/include/llvm/Object/ObjectFile.h +++ b/include/llvm/Object/ObjectFile.h @@ -76,13 +76,13 @@ public: } }; -inline bool operator ==(const DataRefImpl &a, const DataRefImpl &b) { +inline bool operator==(const DataRefImpl &a, const DataRefImpl &b) { // Check bitwise identical. This is the only legal way to compare a union w/o // knowing which member is in use. return std::memcmp(&a, &b, sizeof(DataRefImpl)) == 0; } -inline bool operator <(const DataRefImpl &a, const DataRefImpl &b) { +inline bool operator<(const DataRefImpl &a, const DataRefImpl &b) { // Check bitwise identical. This is the only legal way to compare a union w/o // knowing which member is in use. return std::memcmp(&a, &b, sizeof(DataRefImpl)) < 0; @@ -144,7 +144,7 @@ public: SectionRef(DataRefImpl SectionP, const ObjectFile *Owner); bool operator==(const SectionRef &Other) const; - bool operator <(const SectionRef &Other) const; + bool operator<(const SectionRef &Other) const; error_code getNext(SectionRef &Result) const; @@ -163,6 +163,7 @@ public: error_code isRequiredForExecution(bool &Result) const; error_code isVirtual(bool &Result) const; error_code isZeroInit(bool &Result) const; + error_code isReadOnlyData(bool &Result) const; error_code containsSymbol(SymbolRef S, bool &Result) const; @@ -207,11 +208,13 @@ public: SymbolRef(DataRefImpl SymbolP, const ObjectFile *Owner); bool operator==(const SymbolRef &Other) const; - bool operator <(const SymbolRef &Other) const; + bool operator<(const SymbolRef &Other) const; error_code getNext(SymbolRef &Result) const; error_code getName(StringRef &Result) const; + /// Returns the symbol virtual address (i.e. address at which it will be + /// mapped). error_code getAddress(uint64_t &Result) const; error_code getFileOffset(uint64_t &Result) const; error_code getSize(uint64_t &Result) const; @@ -231,6 +234,9 @@ public: /// end_sections() if it is undefined or is an absolute symbol. error_code getSection(section_iterator &Result) const; + /// @brief Get value of the symbol in the symbol table. + error_code getValue(uint64_t &Val) const; + DataRefImpl getRawDataRefImpl() const; }; typedef content_iterator symbol_iterator; @@ -248,7 +254,7 @@ public: LibraryRef(DataRefImpl LibraryP, const ObjectFile *Owner); bool operator==(const LibraryRef &Other) const; - bool operator <(const LibraryRef &Other) const; + bool operator<(const LibraryRef &Other) const; error_code getNext(LibraryRef &Result) const; @@ -263,11 +269,11 @@ const uint64_t UnknownAddressOrSize = ~0ULL; /// ObjectFile - This class is the base class for all object file types. /// Concrete instances of this object are created by createObjectFile, which -/// figure out which type to create. +/// figures out which type to create. class ObjectFile : public Binary { virtual void anchor(); - ObjectFile(); // = delete - ObjectFile(const ObjectFile &other); // = delete + ObjectFile() LLVM_DELETED_FUNCTION; + ObjectFile(const ObjectFile &other) LLVM_DELETED_FUNCTION; protected: ObjectFile(unsigned int Type, MemoryBuffer *source, error_code &ec); @@ -287,8 +293,8 @@ protected: friend class SymbolRef; virtual error_code getSymbolNext(DataRefImpl Symb, SymbolRef &Res) const = 0; virtual error_code getSymbolName(DataRefImpl Symb, StringRef &Res) const = 0; - virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const =0; - virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res) const =0; + virtual error_code getSymbolAddress(DataRefImpl Symb, uint64_t &Res) const = 0; + virtual error_code getSymbolFileOffset(DataRefImpl Symb, uint64_t &Res)const=0; virtual error_code getSymbolSize(DataRefImpl Symb, uint64_t &Res) const = 0; virtual error_code getSymbolType(DataRefImpl Symb, SymbolRef::Type &Res) const = 0; @@ -297,6 +303,7 @@ protected: uint32_t &Res) const = 0; virtual error_code getSymbolSection(DataRefImpl Symb, section_iterator &Res) const = 0; + virtual error_code getSymbolValue(DataRefImpl Symb, uint64_t &Val) const = 0; // Same as above for SectionRef. friend class SectionRef; @@ -314,6 +321,7 @@ protected: // A section is 'virtual' if its contents aren't present in the object image. virtual error_code isSectionVirtual(DataRefImpl Sec, bool &Res) const = 0; virtual error_code isSectionZeroInit(DataRefImpl Sec, bool &Res) const = 0; + virtual error_code isSectionReadOnlyData(DataRefImpl Sec, bool &Res) const =0; virtual error_code sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, bool &Result) const = 0; virtual relocation_iterator getSectionRelBegin(DataRefImpl Sec) const = 0; @@ -384,7 +392,6 @@ public: static inline bool classof(const Binary *v) { return v->isObject(); } - static inline bool classof(const ObjectFile *v) { return true; } public: static ObjectFile *createCOFFObjectFile(MemoryBuffer *Object); @@ -401,7 +408,7 @@ inline bool SymbolRef::operator==(const SymbolRef &Other) const { return SymbolPimpl == Other.SymbolPimpl; } -inline bool SymbolRef::operator <(const SymbolRef &Other) const { +inline bool SymbolRef::operator<(const SymbolRef &Other) const { return SymbolPimpl < Other.SymbolPimpl; } @@ -441,6 +448,10 @@ inline error_code SymbolRef::getType(SymbolRef::Type &Result) const { return OwningObject->getSymbolType(SymbolPimpl, Result); } +inline error_code SymbolRef::getValue(uint64_t &Val) const { + return OwningObject->getSymbolValue(SymbolPimpl, Val); +} + inline DataRefImpl SymbolRef::getRawDataRefImpl() const { return SymbolPimpl; } @@ -456,7 +467,7 @@ inline bool SectionRef::operator==(const SectionRef &Other) const { return SectionPimpl == Other.SectionPimpl; } -inline bool SectionRef::operator <(const SectionRef &Other) const { +inline bool SectionRef::operator<(const SectionRef &Other) const { return SectionPimpl < Other.SectionPimpl; } @@ -508,6 +519,10 @@ inline error_code SectionRef::isZeroInit(bool &Result) const { return OwningObject->isSectionZeroInit(SectionPimpl, Result); } +inline error_code SectionRef::isReadOnlyData(bool &Result) const { + return OwningObject->isSectionReadOnlyData(SectionPimpl, Result); +} + inline error_code SectionRef::containsSymbol(SymbolRef S, bool &Result) const { return OwningObject->sectionContainsSymbol(SectionPimpl, S.SymbolPimpl, Result); @@ -586,7 +601,7 @@ inline bool LibraryRef::operator==(const LibraryRef &Other) const { return LibraryPimpl == Other.LibraryPimpl; } -inline bool LibraryRef::operator <(const LibraryRef &Other) const { +inline bool LibraryRef::operator<(const LibraryRef &Other) const { return LibraryPimpl < Other.LibraryPimpl; } diff --git a/include/llvm/Object/RelocVisitor.h b/include/llvm/Object/RelocVisitor.h new file mode 100644 index 0000000..7668bde --- /dev/null +++ b/include/llvm/Object/RelocVisitor.h @@ -0,0 +1,131 @@ +//===-- RelocVisitor.h - Visitor for object file relocations -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides a wrapper around all the different types of relocations +// in different file formats, such that a client can handle them in a unified +// manner by only implementing a minimal number of functions. +// +//===----------------------------------------------------------------------===// + +#ifndef _LLVM_OBJECT_RELOCVISITOR +#define _LLVM_OBJECT_RELOCVISITOR + +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Object/ObjectFile.h" +#include "llvm/Object/ELF.h" +#include "llvm/ADT/StringRef.h" + +namespace llvm { +namespace object { + +struct RelocToApply { + // The computed value after applying the relevant relocations. + int64_t Value; + + // The width of the value; how many bytes to touch when applying the + // relocation. + char Width; + RelocToApply(const RelocToApply &In) : Value(In.Value), Width(In.Width) {} + RelocToApply(int64_t Value, char Width) : Value(Value), Width(Width) {} + RelocToApply() : Value(0), Width(0) {} +}; + +/// @brief Base class for object file relocation visitors. +class RelocVisitor { +public: + explicit RelocVisitor(llvm::StringRef FileFormat) + : FileFormat(FileFormat), HasError(false) {} + + // TODO: Should handle multiple applied relocations via either passing in the + // previously computed value or just count paired relocations as a single + // visit. + RelocToApply visit(uint32_t RelocType, RelocationRef R, uint64_t SecAddr = 0, + uint64_t Value = 0) { + if (FileFormat == "ELF64-x86-64") { + switch (RelocType) { + case llvm::ELF::R_X86_64_NONE: + return visitELF_X86_64_NONE(R); + case llvm::ELF::R_X86_64_64: + return visitELF_X86_64_64(R, Value); + case llvm::ELF::R_X86_64_PC32: + return visitELF_X86_64_PC32(R, Value, SecAddr); + case llvm::ELF::R_X86_64_32: + return visitELF_X86_64_32(R, Value); + case llvm::ELF::R_X86_64_32S: + return visitELF_X86_64_32S(R, Value); + default: + HasError = true; + return RelocToApply(); + } + } + return RelocToApply(); + } + + bool error() { return HasError; } + +private: + llvm::StringRef FileFormat; + bool HasError; + + /// Operations + + // Width is the width in bytes of the extend. + RelocToApply zeroExtend(RelocToApply r, char Width) { + if (Width == r.Width) + return r; + r.Value &= (1 << ((Width * 8))) - 1; + return r; + } + RelocToApply signExtend(RelocToApply r, char Width) { + if (Width == r.Width) + return r; + bool SignBit = r.Value & (1 << ((Width * 8) - 1)); + if (SignBit) { + r.Value |= ~((1 << (Width * 8)) - 1); + } else { + r.Value &= (1 << (Width * 8)) - 1; + } + return r; + } + + /// X86-64 ELF + RelocToApply visitELF_X86_64_NONE(RelocationRef R) { + return RelocToApply(0, 0); + } + RelocToApply visitELF_X86_64_64(RelocationRef R, uint64_t Value) { + int64_t Addend; + R.getAdditionalInfo(Addend); + return RelocToApply(Value + Addend, 8); + } + RelocToApply visitELF_X86_64_PC32(RelocationRef R, uint64_t Value, + uint64_t SecAddr) { + int64_t Addend; + R.getAdditionalInfo(Addend); + uint64_t Address; + R.getAddress(Address); + return RelocToApply(Value + Addend - Address, 4); + } + RelocToApply visitELF_X86_64_32(RelocationRef R, uint64_t Value) { + int64_t Addend; + R.getAdditionalInfo(Addend); + uint32_t Res = (Value + Addend) & 0xFFFFFFFF; + return RelocToApply(Res, 4); + } + RelocToApply visitELF_X86_64_32S(RelocationRef R, uint64_t Value) { + int64_t Addend; + R.getAdditionalInfo(Addend); + int32_t Res = (Value + Addend) & 0xFFFFFFFF; + return RelocToApply(Res, 4); + } +}; + +} +} +#endif diff --git a/include/llvm/Operator.h b/include/llvm/Operator.h index 1e86980..b326c11 100644 --- a/include/llvm/Operator.h +++ b/include/llvm/Operator.h @@ -16,6 +16,7 @@ #define LLVM_OPERATOR_H #include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" #include "llvm/Instruction.h" #include "llvm/Type.h" @@ -32,9 +33,14 @@ class Operator : public User { private: // Do not implement any of these. The Operator class is intended to be used // as a utility, and is never itself instantiated. - void *operator new(size_t, unsigned); - void *operator new(size_t s); - Operator(); + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; + void *operator new(size_t s) LLVM_DELETED_FUNCTION; + Operator() LLVM_DELETED_FUNCTION; + +protected: + // NOTE: Cannot use LLVM_DELETED_FUNCTION because it's not legal to delete + // an overridden method that's not deleted in the base class. Cannot leave + // this unimplemented because that leads to an ODR-violation. ~Operator(); public: @@ -57,7 +63,6 @@ public: return Instruction::UserOp1; } - static inline bool classof(const Operator *) { return true; } static inline bool classof(const Instruction *) { return true; } static inline bool classof(const ConstantExpr *) { return true; } static inline bool classof(const Value *V) { @@ -77,8 +82,6 @@ public: }; private: - ~OverflowingBinaryOperator(); // do not implement - friend class BinaryOperator; friend class ConstantExpr; void setHasNoUnsignedWrap(bool B) { @@ -103,7 +106,6 @@ public: return (SubclassOptionalData & NoSignedWrap) != 0; } - static inline bool classof(const OverflowingBinaryOperator *) { return true; } static inline bool classof(const Instruction *I) { return I->getOpcode() == Instruction::Add || I->getOpcode() == Instruction::Sub || @@ -131,8 +133,6 @@ public: }; private: - ~PossiblyExactOperator(); // do not implement - friend class BinaryOperator; friend class ConstantExpr; void setIsExact(bool B) { @@ -167,9 +167,6 @@ public: /// FPMathOperator - Utility class for floating point operations which can have /// information about relaxed accuracy requirements attached to them. class FPMathOperator : public Operator { -private: - ~FPMathOperator(); // do not implement - public: /// \brief Get the maximum error permitted by this operation in ULPs. An @@ -177,7 +174,6 @@ public: /// default precision. float getFPAccuracy() const; - static inline bool classof(const FPMathOperator *) { return true; } static inline bool classof(const Instruction *I) { return I->getType()->isFPOrFPVectorTy(); } @@ -191,11 +187,7 @@ public: /// opcodes. template class ConcreteOperator : public SuperClass { - ~ConcreteOperator(); // DO NOT IMPLEMENT public: - static inline bool classof(const ConcreteOperator *) { - return true; - } static inline bool classof(const Instruction *I) { return I->getOpcode() == Opc; } @@ -210,45 +202,35 @@ public: class AddOperator : public ConcreteOperator { - ~AddOperator(); // DO NOT IMPLEMENT }; class SubOperator : public ConcreteOperator { - ~SubOperator(); // DO NOT IMPLEMENT }; class MulOperator : public ConcreteOperator { - ~MulOperator(); // DO NOT IMPLEMENT }; class ShlOperator : public ConcreteOperator { - ~ShlOperator(); // DO NOT IMPLEMENT }; - + class SDivOperator : public ConcreteOperator { - ~SDivOperator(); // DO NOT IMPLEMENT }; class UDivOperator : public ConcreteOperator { - ~UDivOperator(); // DO NOT IMPLEMENT }; class AShrOperator : public ConcreteOperator { - ~AShrOperator(); // DO NOT IMPLEMENT }; class LShrOperator : public ConcreteOperator { - ~LShrOperator(); // DO NOT IMPLEMENT }; - - - + + + class GEPOperator : public ConcreteOperator { - ~GEPOperator(); // DO NOT IMPLEMENT - enum { IsInBounds = (1 << 0) }; @@ -288,6 +270,12 @@ public: return getPointerOperand()->getType(); } + /// getPointerAddressSpace - Method to return the address space of the + /// pointer operand. + unsigned getPointerAddressSpace() const { + return cast(getPointerOperandType())->getAddressSpace(); + } + unsigned getNumIndices() const { // Note: always non-negative return getNumOperands() - 1; } diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h index 888537d..cd651db 100644 --- a/include/llvm/Pass.h +++ b/include/llvm/Pass.h @@ -29,6 +29,7 @@ #ifndef LLVM_PASS_H #define LLVM_PASS_H +#include "llvm/Support/Compiler.h" #include namespace llvm { @@ -82,8 +83,8 @@ class Pass { AnalysisResolver *Resolver; // Used to resolve analysis const void *PassID; PassKind Kind; - void operator=(const Pass&); // DO NOT IMPLEMENT - Pass(const Pass &); // DO NOT IMPLEMENT + void operator=(const Pass&) LLVM_DELETED_FUNCTION; + Pass(const Pass &) LLVM_DELETED_FUNCTION; public: explicit Pass(PassKind K, char &pid) : Resolver(0), PassID(&pid), Kind(K) { } diff --git a/include/llvm/PassAnalysisSupport.h b/include/llvm/PassAnalysisSupport.h index 5c6a2d7..d14d73b 100644 --- a/include/llvm/PassAnalysisSupport.h +++ b/include/llvm/PassAnalysisSupport.h @@ -120,7 +120,7 @@ public: class PMDataManager; class AnalysisResolver { private: - AnalysisResolver(); // DO NOT IMPLEMENT + AnalysisResolver() LLVM_DELETED_FUNCTION; public: explicit AnalysisResolver(PMDataManager &P) : PM(P) { } diff --git a/include/llvm/PassSupport.h b/include/llvm/PassSupport.h index c50c2cc..c6ad44f 100644 --- a/include/llvm/PassSupport.h +++ b/include/llvm/PassSupport.h @@ -126,8 +126,8 @@ public: } private: - void operator=(const PassInfo &); // do not implement - PassInfo(const PassInfo &); // do not implement + void operator=(const PassInfo &) LLVM_DELETED_FUNCTION; + PassInfo(const PassInfo &) LLVM_DELETED_FUNCTION; }; #define CALL_ONCE_INITIALIZATION(function) \ diff --git a/include/llvm/Support/AlignOf.h b/include/llvm/Support/AlignOf.h index cf71251..d6b0ab8 100644 --- a/include/llvm/Support/AlignOf.h +++ b/include/llvm/Support/AlignOf.h @@ -68,24 +68,20 @@ inline unsigned alignOf() { return AlignOf::Alignment; } /// integer literal can be used to specify an alignment constraint. Once built /// up here, we can then begin to indirect between these using normal C++ /// template parameters. -template struct AlignedCharArrayImpl {}; -template <> struct AlignedCharArrayImpl<0> { - typedef char type; -}; +template struct AlignedCharArrayImpl; + +// MSVC requires special handling here. +#ifndef _MSC_VER + #if __has_feature(cxx_alignas) #define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ template <> struct AlignedCharArrayImpl { \ - typedef char alignas(x) type; \ + char alignas(x) aligned; \ } -#elif defined(__clang__) || defined(__GNUC__) +#elif defined(__GNUC__) || defined(__IBM_ATTRIBUTES) #define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ template <> struct AlignedCharArrayImpl { \ - typedef char type __attribute__((aligned(x))); \ - } -#elif defined(_MSC_VER) -#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ - template <> struct AlignedCharArrayImpl { \ - typedef __declspec(align(x)) char type; \ + char aligned __attribute__((aligned(x))); \ } #else # error No supported align as directive. @@ -104,9 +100,38 @@ LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024); LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048); LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096); LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192); + +#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT + +#else // _MSC_VER + +// We provide special variations of this template for the most common +// alignments because __declspec(align(...)) doesn't actually work when it is +// a member of a by-value function argument in MSVC, even if the alignment +// request is something reasonably like 8-byte or 16-byte. +template <> struct AlignedCharArrayImpl<1> { char aligned; }; +template <> struct AlignedCharArrayImpl<2> { short aligned; }; +template <> struct AlignedCharArrayImpl<4> { int aligned; }; +template <> struct AlignedCharArrayImpl<8> { double aligned; }; + +#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ + template <> struct AlignedCharArrayImpl { \ + __declspec(align(x)) char aligned; \ + } +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192); // Any larger and MSVC complains. #undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT +#endif // _MSC_VER + /// \brief This union template exposes a suitably aligned and sized character /// array member which can hold elements of any of up to four types. /// @@ -134,17 +159,11 @@ public: /// constrain the layout of this character array. char buffer[sizeof(SizerImpl)]; - // Sadly, Clang and GCC both fail to align a character array properly even - // with an explicit alignment attribute. To work around this, we union - // the character array that will actually be used with a struct that contains - // a single aligned character member. Tests seem to indicate that both Clang - // and GCC will properly register the alignment of a struct containing an - // aligned member, and this alignment should carry over to the character - // array in the union. - struct { - typename llvm::AlignedCharArrayImpl::Alignment>::type - nonce_inner_member; - } nonce_member; +private: + // Tests seem to indicate that both Clang and GCC will properly register the + // alignment of a struct containing an aligned member, and this alignment + // should carry over to the character array in the union. + llvm::AlignedCharArrayImpl::Alignment> nonce_member; }; } // end namespace llvm diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h index a2ad24f..a644b13 100644 --- a/include/llvm/Support/Allocator.h +++ b/include/llvm/Support/Allocator.h @@ -79,8 +79,8 @@ class MallocSlabAllocator : public SlabAllocator { public: MallocSlabAllocator() : Allocator() { } virtual ~MallocSlabAllocator(); - virtual MemSlab *Allocate(size_t Size); - virtual void Deallocate(MemSlab *Slab); + virtual MemSlab *Allocate(size_t Size) LLVM_OVERRIDE; + virtual void Deallocate(MemSlab *Slab) LLVM_OVERRIDE; }; /// BumpPtrAllocator - This allocator is useful for containers that need @@ -88,8 +88,8 @@ public: /// allocating memory, and never deletes it until the entire block is dead. This /// makes allocation speedy, but must only be used when the trade-off is ok. class BumpPtrAllocator { - BumpPtrAllocator(const BumpPtrAllocator &); // do not implement - void operator=(const BumpPtrAllocator &); // do not implement + BumpPtrAllocator(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION; + void operator=(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION; /// SlabSize - Allocate data into slabs of this size unless we get an /// allocation above SizeThreshold. diff --git a/include/llvm/Support/CallSite.h b/include/llvm/Support/CallSite.h index c23bb6a..ad8d6d4 100644 --- a/include/llvm/Support/CallSite.h +++ b/include/llvm/Support/CallSite.h @@ -81,7 +81,7 @@ public: InstrTy *operator->() const { return I.getPointer(); } operator bool() const { return I.getPointer(); } - /// getCalledValue - Return the pointer to function that is being called... + /// getCalledValue - Return the pointer to function that is being called. /// ValTy *getCalledValue() const { assert(getInstruction() && "Not a call or invoke instruction!"); @@ -95,7 +95,7 @@ public: return dyn_cast(getCalledValue()); } - /// setCalledFunction - Set the callee to the specified value... + /// setCalledFunction - Set the callee to the specified value. /// void setCalledFunction(Value *V) { assert(getInstruction() && "Not a call or invoke instruction!"); @@ -130,7 +130,7 @@ public: } /// arg_iterator - The type of iterator to use when looping over actual - /// arguments at this call site... + /// arguments at this call site. typedef IterTy arg_iterator; /// arg_begin/arg_end - Return iterators corresponding to the actual argument @@ -185,13 +185,13 @@ public: } /// \brief Return true if this function has the given attribute. - bool hasFnAttr(Attributes N) const { - CALLSITE_DELEGATE_GETTER(hasFnAttr(N)); + bool hasFnAttr(Attributes::AttrVal A) const { + CALLSITE_DELEGATE_GETTER(hasFnAttr(A)); } - /// paramHasAttr - whether the call or the callee has the given attribute. - bool paramHasAttr(uint16_t i, Attributes attr) const { - CALLSITE_DELEGATE_GETTER(paramHasAttr(i, attr)); + /// \brief Return true if the call or the callee has the given attribute. + bool paramHasAttr(unsigned i, Attributes::AttrVal A) const { + CALLSITE_DELEGATE_GETTER(paramHasAttr(i, A)); } /// @brief Extract the alignment for a call or parameter (0=unknown). @@ -211,32 +211,32 @@ public: bool doesNotAccessMemory() const { CALLSITE_DELEGATE_GETTER(doesNotAccessMemory()); } - void setDoesNotAccessMemory(bool doesNotAccessMemory = true) { - CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory(doesNotAccessMemory)); + void setDoesNotAccessMemory() { + CALLSITE_DELEGATE_SETTER(setDoesNotAccessMemory()); } /// @brief Determine if the call does not access or only reads memory. bool onlyReadsMemory() const { CALLSITE_DELEGATE_GETTER(onlyReadsMemory()); } - void setOnlyReadsMemory(bool onlyReadsMemory = true) { - CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory(onlyReadsMemory)); + void setOnlyReadsMemory() { + CALLSITE_DELEGATE_SETTER(setOnlyReadsMemory()); } /// @brief Determine if the call cannot return. bool doesNotReturn() const { CALLSITE_DELEGATE_GETTER(doesNotReturn()); } - void setDoesNotReturn(bool doesNotReturn = true) { - CALLSITE_DELEGATE_SETTER(setDoesNotReturn(doesNotReturn)); + void setDoesNotReturn() { + CALLSITE_DELEGATE_SETTER(setDoesNotReturn()); } /// @brief Determine if the call cannot unwind. bool doesNotThrow() const { CALLSITE_DELEGATE_GETTER(doesNotThrow()); } - void setDoesNotThrow(bool doesNotThrow = true) { - CALLSITE_DELEGATE_SETTER(setDoesNotThrow(doesNotThrow)); + void setDoesNotThrow() { + CALLSITE_DELEGATE_SETTER(setDoesNotThrow()); } #undef CALLSITE_DELEGATE_GETTER @@ -244,12 +244,12 @@ public: /// @brief Determine whether this argument is not captured. bool doesNotCapture(unsigned ArgNo) const { - return paramHasAttr(ArgNo + 1, Attribute::NoCapture); + return paramHasAttr(ArgNo + 1, Attributes::NoCapture); } /// @brief Determine whether this argument is passed by value. bool isByValArgument(unsigned ArgNo) const { - return paramHasAttr(ArgNo + 1, Attribute::ByVal); + return paramHasAttr(ArgNo + 1, Attributes::ByVal); } /// hasArgument - Returns true if this CallSite passes the given Value* as an diff --git a/include/llvm/Support/Casting.h b/include/llvm/Support/Casting.h index 3aab436..0c71882 100644 --- a/include/llvm/Support/Casting.h +++ b/include/llvm/Support/Casting.h @@ -15,6 +15,7 @@ #ifndef LLVM_SUPPORT_CASTING_H #define LLVM_SUPPORT_CASTING_H +#include "llvm/Support/type_traits.h" #include namespace llvm { @@ -44,13 +45,23 @@ template struct simplify_type { // The core of the implementation of isa is here; To and From should be // the names of classes. This template can be specialized to customize the // implementation of isa<> without rewriting it from scratch. -template +template struct isa_impl { static inline bool doit(const From &Val) { return To::classof(&Val); } }; +/// \brief Always allow upcasts, and perform no dynamic check for them. +template +struct isa_impl::value + >::type + > { + static inline bool doit(const From &) { return true; } +}; + template struct isa_impl_cl { static inline bool doit(const From &Val) { return isa_impl::doit(Val); @@ -65,18 +76,21 @@ template struct isa_impl_cl { template struct isa_impl_cl { static inline bool doit(const From *Val) { + assert(Val && "isa<> used on a null pointer"); return isa_impl::doit(*Val); } }; template struct isa_impl_cl { static inline bool doit(const From *Val) { + assert(Val && "isa<> used on a null pointer"); return isa_impl::doit(*Val); } }; template struct isa_impl_cl { static inline bool doit(const From *Val) { + assert(Val && "isa<> used on a null pointer"); return isa_impl::doit(*Val); } }; diff --git a/include/llvm/Support/CommandLine.h b/include/llvm/Support/CommandLine.h index ae1570d..872c579 100644 --- a/include/llvm/Support/CommandLine.h +++ b/include/llvm/Support/CommandLine.h @@ -41,16 +41,14 @@ namespace cl { // ParseCommandLineOptions - Command line option processing entry point. // void ParseCommandLineOptions(int argc, const char * const *argv, - const char *Overview = 0, - bool ReadResponseFiles = false); + const char *Overview = 0); //===----------------------------------------------------------------------===// // ParseEnvironmentOptions - Environment variable option processing alternate // entry point. // void ParseEnvironmentOptions(const char *progName, const char *envvar, - const char *Overview = 0, - bool ReadResponseFiles = false); + const char *Overview = 0); ///===---------------------------------------------------------------------===// /// SetVersionPrinter - Override the default (LLVM specific) version printer @@ -1509,7 +1507,7 @@ class bits : public Option, public bits_storage { typename ParserClass::parser_data_type(); if (Parser.parse(*this, ArgName, Arg, Val)) return true; // Parse Error! - addValue(Val); + this->addValue(Val); setPosition(pos); Positions.push_back(pos); return false; @@ -1608,15 +1606,16 @@ public: class alias : public Option { Option *AliasFor; virtual bool handleOccurrence(unsigned pos, StringRef /*ArgName*/, - StringRef Arg) { + StringRef Arg) LLVM_OVERRIDE { return AliasFor->handleOccurrence(pos, AliasFor->ArgStr, Arg); } // Handle printing stuff... - virtual size_t getOptionWidth() const; - virtual void printOptionInfo(size_t GlobalWidth) const; + virtual size_t getOptionWidth() const LLVM_OVERRIDE; + virtual void printOptionInfo(size_t GlobalWidth) const LLVM_OVERRIDE; // Aliases do not need to print their values. - virtual void printOptionValue(size_t /*GlobalWidth*/, bool /*Force*/) const {} + virtual void printOptionValue(size_t /*GlobalWidth*/, + bool /*Force*/) const LLVM_OVERRIDE {} void done() { if (!hasArgStr()) diff --git a/include/llvm/Support/Compiler.h b/include/llvm/Support/Compiler.h index 4469ae3..7ceeb32 100644 --- a/include/llvm/Support/Compiler.h +++ b/include/llvm/Support/Compiler.h @@ -24,7 +24,7 @@ /// does not imply the existence of any other C++ library features. #if (__has_feature(cxx_rvalue_references) \ || defined(__GXX_EXPERIMENTAL_CXX0X__) \ - || _MSC_VER >= 1600) + || (defined(_MSC_VER) && _MSC_VER >= 1600)) #define LLVM_USE_RVALUE_REFERENCES 1 #else #define LLVM_USE_RVALUE_REFERENCES 0 @@ -40,7 +40,7 @@ /// LLVM_DELETED_FUNCTION - Expands to = delete if the compiler supports it. /// Use to mark functions as uncallable. Member functions with this should -/// be declared private so that some behaivor is kept in C++03 mode. +/// be declared private so that some behavior is kept in C++03 mode. /// /// class DontCopy { /// private: @@ -57,6 +57,22 @@ #define LLVM_DELETED_FUNCTION #endif +/// LLVM_FINAL - Expands to 'final' if the compiler supports it. +/// Use to mark classes or virtual methods as final. +#if (__has_feature(cxx_override_control)) +#define LLVM_FINAL final +#else +#define LLVM_FINAL +#endif + +/// LLVM_OVERRIDE - Expands to 'override' if the compiler supports it. +/// Use to mark virtual methods as overriding a base class method. +#if (__has_feature(cxx_override_control)) +#define LLVM_OVERRIDE override +#else +#define LLVM_OVERRIDE +#endif + /// LLVM_LIBRARY_VISIBILITY - If a class marked with this attribute is linked /// into a shared library, then the class should be private to the library and /// not accessible from outside it. Can also be used to mark variables and @@ -106,9 +122,11 @@ #endif #if (__GNUC__ >= 4) -#define BUILTIN_EXPECT(EXPR, VALUE) __builtin_expect((EXPR), (VALUE)) +#define LLVM_LIKELY(EXPR) __builtin_expect((bool)(EXPR), true) +#define LLVM_UNLIKELY(EXPR) __builtin_expect((bool)(EXPR), false) #else -#define BUILTIN_EXPECT(EXPR, VALUE) (EXPR) +#define LLVM_LIKELY(EXPR) (EXPR) +#define LLVM_UNLIKELY(EXPR) (EXPR) #endif @@ -187,4 +205,13 @@ # define LLVM_BUILTIN_UNREACHABLE __builtin_unreachable() #endif +// LLVM_BUILTIN_TRAP - On compilers which support it, expands to an expression +// which causes the program to exit abnormally. +#if defined(__clang__) || (__GNUC__ > 4) \ + || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) +# define LLVM_BUILTIN_TRAP __builtin_trap() +#else +# define LLVM_BUILTIN_TRAP *(volatile int*)0x11 = 0 +#endif + #endif diff --git a/include/llvm/Support/DataExtractor.h b/include/llvm/Support/DataExtractor.h index 506ec96..a3ae782 100644 --- a/include/llvm/Support/DataExtractor.h +++ b/include/llvm/Support/DataExtractor.h @@ -10,6 +10,7 @@ #ifndef LLVM_SUPPORT_DATAEXTRACTOR_H #define LLVM_SUPPORT_DATAEXTRACTOR_H +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/DataTypes.h" @@ -99,8 +100,8 @@ public: /// enough bytes to extract this value, the offset will be left /// unmodified. /// - /// @param[in] byte_size - /// The size in byte of the integer to extract. + /// @param[in] size + /// The size in bytes of the integer to extract. /// /// @return /// The sign extended signed integer value that was extracted, diff --git a/include/llvm/Support/ELF.h b/include/llvm/Support/ELF.h index f7ae60f..2cd2671 100644 --- a/include/llvm/Support/ELF.h +++ b/include/llvm/Support/ELF.h @@ -441,6 +441,7 @@ enum { R_MICROBLAZE_COPY = 21 }; +// ELF Relocation types for PPC32 enum { R_PPC_NONE = 0, /* No relocation. */ R_PPC_ADDR32 = 1, @@ -456,7 +457,23 @@ enum { R_PPC_REL14 = 11, R_PPC_REL14_BRTAKEN = 12, R_PPC_REL14_BRNTAKEN = 13, - R_PPC_REL32 = 26 + R_PPC_REL32 = 26, + R_PPC_TPREL16_LO = 70, + R_PPC_TPREL16_HA = 72 +}; + +// ELF Relocation types for PPC64 +enum { + R_PPC64_ADDR16_LO = 4, + R_PPC64_ADDR16_HI = 5, + R_PPC64_ADDR14 = 7, + R_PPC64_REL24 = 10, + R_PPC64_ADDR64 = 38, + R_PPC64_ADDR16_HIGHER = 39, + R_PPC64_ADDR16_HIGHEST = 41, + R_PPC64_TOC16 = 47, + R_PPC64_TOC = 51, + R_PPC64_TOC16_DS = 63 }; // ARM Specific e_flags @@ -674,8 +691,36 @@ enum { R_MIPS_NUM = 218 }; +// Hexagon Specific e_flags +// Release 5 ABI +enum { + // Object processor version flags, bits[3:0] + EF_HEXAGON_MACH_V2 = 0x00000001, // Hexagon V2 + EF_HEXAGON_MACH_V3 = 0x00000002, // Hexagon V3 + EF_HEXAGON_MACH_V4 = 0x00000003, // Hexagon V4 + EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5 + + // Highest ISA version flags + EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[3:0] + // of e_flags + EF_HEXAGON_ISA_V2 = 0x00000010, // Hexagon V2 ISA + EF_HEXAGON_ISA_V3 = 0x00000020, // Hexagon V3 ISA + EF_HEXAGON_ISA_V4 = 0x00000030, // Hexagon V4 ISA + EF_HEXAGON_ISA_V5 = 0x00000040 // Hexagon V5 ISA +}; + +// Hexagon specific Section indexes for common small data +// Release 5 ABI +enum { + SHN_HEXAGON_SCOMMON = 0xff00, // Other access sizes + SHN_HEXAGON_SCOMMON_1 = 0xff01, // Byte-sized access + SHN_HEXAGON_SCOMMON_2 = 0xff02, // Half-word-sized access + SHN_HEXAGON_SCOMMON_4 = 0xff03, // Word-sized access + SHN_HEXAGON_SCOMMON_8 = 0xff04 // Double-word-size access +}; + // ELF Relocation types for Hexagon -// Release 5 ABI - Document: 80-V9418-3 Rev. J +// Release 5 ABI enum { R_HEX_NONE = 0, R_HEX_B22_PCREL = 1, @@ -1103,6 +1148,9 @@ enum { PT_PHDR = 6, // The program header table itself. PT_TLS = 7, // The thread-local storage template. PT_LOOS = 0x60000000, // Lowest operating system-specific pt entry type. + PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type. + PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type. + PT_HIPROC = 0x7fffffff, // Highest processor-specific program hdr entry type. // x86-64 program header types. // These all contain stack unwind tables. @@ -1113,9 +1161,11 @@ enum { PT_GNU_STACK = 0x6474e551, // Indicates stack executability. PT_GNU_RELRO = 0x6474e552, // Read-only after relocation. - PT_HIOS = 0x6fffffff, // Highest operating system-specific pt entry type. - PT_LOPROC = 0x70000000, // Lowest processor-specific program hdr entry type. - PT_HIPROC = 0x7fffffff // Highest processor-specific program hdr entry type. + // ARM program header types. + PT_ARM_ARCHEXT = 0x70000000, // Platform architecture compatibility information + // These all contain stack unwind tables. + PT_ARM_EXIDX = 0x70000001, + PT_ARM_UNWIND = 0x70000001 }; // Segment flag bits. diff --git a/include/llvm/Support/FileOutputBuffer.h b/include/llvm/Support/FileOutputBuffer.h index 0f07164..bcd35e3 100644 --- a/include/llvm/Support/FileOutputBuffer.h +++ b/include/llvm/Support/FileOutputBuffer.h @@ -78,10 +78,11 @@ public: ~FileOutputBuffer(); +private: + FileOutputBuffer(const FileOutputBuffer &) LLVM_DELETED_FUNCTION; + FileOutputBuffer &operator=(const FileOutputBuffer &) LLVM_DELETED_FUNCTION; protected: - FileOutputBuffer(const FileOutputBuffer &); // DO NOT IMPLEMENT - FileOutputBuffer &operator=(const FileOutputBuffer &); // DO NOT IMPLEMENT - FileOutputBuffer(uint8_t *Start, uint8_t *End, + FileOutputBuffer(uint8_t *Start, uint8_t *End, StringRef Path, StringRef TempPath); uint8_t *BufferStart; diff --git a/include/llvm/Support/FileSystem.h b/include/llvm/Support/FileSystem.h index f4a9aa0..b455b28 100644 --- a/include/llvm/Support/FileSystem.h +++ b/include/llvm/Support/FileSystem.h @@ -40,7 +40,7 @@ #include #include -#if HAVE_SYS_STAT_H +#ifdef HAVE_SYS_STAT_H #include #endif @@ -280,7 +280,7 @@ error_code create_symlink(const Twine &to, const Twine &from); /// @brief Get the current path. /// /// @param result Holds the current path on return. -/// @results errc::success if the current path has been stored in result, +/// @returns errc::success if the current path has been stored in result, /// otherwise a platform specific error_code. error_code current_path(SmallVectorImpl &result); @@ -289,7 +289,7 @@ error_code current_path(SmallVectorImpl &result); /// @param path Input path. /// @param existed Set to true if \a path existed, false if it did not. /// undefined otherwise. -/// @results errc::success if path has been removed and existed has been +/// @returns errc::success if path has been removed and existed has been /// successfully set, otherwise a platform specific error_code. error_code remove(const Twine &path, bool &existed); @@ -298,7 +298,7 @@ error_code remove(const Twine &path, bool &existed); /// /// @param path Input path. /// @param num_removed Number of files removed. -/// @results errc::success if path has been removed and num_removed has been +/// @returns errc::success if path has been removed and num_removed has been /// successfully set, otherwise a platform specific error_code. error_code remove_all(const Twine &path, uint32_t &num_removed); @@ -323,7 +323,7 @@ error_code resize_file(const Twine &path, uint64_t size); /// @brief Does file exist? /// /// @param status A file_status previously returned from stat. -/// @results True if the file represented by status exists, false if it does +/// @returns True if the file represented by status exists, false if it does /// not. bool exists(file_status status); @@ -332,7 +332,7 @@ bool exists(file_status status); /// @param path Input path. /// @param result Set to true if the file represented by status exists, false if /// it does not. Undefined otherwise. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code exists(const Twine &path, bool &result); @@ -350,7 +350,7 @@ inline bool exists(const Twine &path) { /// /// assert(status_known(A) || status_known(B)); /// -/// @results True if A and B both represent the same file system entity, false +/// @returns True if A and B both represent the same file system entity, false /// otherwise. bool equivalent(file_status A, file_status B); @@ -362,7 +362,7 @@ bool equivalent(file_status A, file_status B); /// @param B Input path B. /// @param result Set to true if stat(A) and stat(B) have the same device and /// inode (or equivalent). -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code equivalent(const Twine &A, const Twine &B, bool &result); @@ -384,7 +384,7 @@ error_code file_size(const Twine &path, uint64_t &result); /// @brief Does status represent a directory? /// /// @param status A file_status previously returned from status. -/// @results status.type() == file_type::directory_file. +/// @returns status.type() == file_type::directory_file. bool is_directory(file_status status); /// @brief Is path a directory? @@ -392,14 +392,14 @@ bool is_directory(file_status status); /// @param path Input path. /// @param result Set to true if \a path is a directory, false if it is not. /// Undefined otherwise. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code is_directory(const Twine &path, bool &result); /// @brief Does status represent a regular file? /// /// @param status A file_status previously returned from status. -/// @results status_known(status) && status.type() == file_type::regular_file. +/// @returns status_known(status) && status.type() == file_type::regular_file. bool is_regular_file(file_status status); /// @brief Is path a regular file? @@ -407,7 +407,7 @@ bool is_regular_file(file_status status); /// @param path Input path. /// @param result Set to true if \a path is a regular file, false if it is not. /// Undefined otherwise. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code is_regular_file(const Twine &path, bool &result); @@ -415,7 +415,7 @@ error_code is_regular_file(const Twine &path, bool &result); /// directory, regular file, or symlink? /// /// @param status A file_status previously returned from status. -/// @results exists(s) && !is_regular_file(s) && !is_directory(s) && +/// @returns exists(s) && !is_regular_file(s) && !is_directory(s) && /// !is_symlink(s) bool is_other(file_status status); @@ -425,14 +425,14 @@ bool is_other(file_status status); /// @param path Input path. /// @param result Set to true if \a path exists, but is not a directory, regular /// file, or a symlink, false if it does not. Undefined otherwise. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code is_other(const Twine &path, bool &result); /// @brief Does status represent a symlink? /// /// @param status A file_status previously returned from stat. -/// @param result status.type() == symlink_file. +/// @returns status.type() == symlink_file. bool is_symlink(file_status status); /// @brief Is path a symlink? @@ -440,7 +440,7 @@ bool is_symlink(file_status status); /// @param path Input path. /// @param result Set to true if \a path is a symlink, false if it is not. /// Undefined otherwise. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code is_symlink(const Twine &path, bool &result); @@ -448,28 +448,28 @@ error_code is_symlink(const Twine &path, bool &result); /// /// @param path Input path. /// @param result Set to the file status. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code status(const Twine &path, file_status &result); /// @brief Modifies permission bits on a file /// /// @param path Input path. -/// @results errc::success if permissions have been changed, otherwise a +/// @returns errc::success if permissions have been changed, otherwise a /// platform specific error_code. error_code permissions(const Twine &path, perms prms); /// @brief Is status available? /// -/// @param path Input path. -/// @results True if status() != status_error. +/// @param s Input file status. +/// @returns True if status() != status_error. bool status_known(file_status s); /// @brief Is status available? /// /// @param path Input path. /// @param result Set to true if status() != status_error. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code status_known(const Twine &path, bool &result); @@ -486,11 +486,11 @@ error_code status_known(const Twine &path, bool &result); /// clang-%%-%%-%%-%%-%%.s => /tmp/clang-a0-b1-c2-d3-e4.s /// /// @param model Name to base unique path off of. -/// @param result_fs Set to the opened file's file descriptor. +/// @param result_fd Set to the opened file's file descriptor. /// @param result_path Set to the opened file's absolute path. -/// @param makeAbsolute If true and @model is not an absolute path, a temp +/// @param makeAbsolute If true and \a model is not an absolute path, a temp /// directory will be prepended. -/// @results errc::success if result_{fd,path} have been successfully set, +/// @returns errc::success if result_{fd,path} have been successfully set, /// otherwise a platform specific error_code. error_code unique_file(const Twine &model, int &result_fd, SmallVectorImpl &result_path, @@ -503,7 +503,7 @@ error_code unique_file(const Twine &model, int &result_fd, /// /// @param path Input path. /// @param result Set to the canonicalized version of \a path. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code canonicalize(const Twine &path, SmallVectorImpl &result); @@ -511,7 +511,7 @@ error_code canonicalize(const Twine &path, SmallVectorImpl &result); /// /// @param path Input path. /// @param magic Byte sequence to compare \a path's first len(magic) bytes to. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code has_magic(const Twine &path, const Twine &magic, bool &result); @@ -522,7 +522,7 @@ error_code has_magic(const Twine &path, const Twine &magic, bool &result); /// @param result Set to the first \a len bytes in the file pointed to by /// \a path. Or the entire file if file_size(path) < len, in which /// case result.size() returns the size of the file. -/// @results errc::success if result has been successfully set, +/// @returns errc::success if result has been successfully set, /// errc::value_too_large if len is larger then the file pointed to by /// \a path, otherwise a platform specific error_code. error_code get_magic(const Twine &path, uint32_t len, @@ -535,14 +535,14 @@ file_magic identify_magic(StringRef magic); /// /// @param path Input path. /// @param result Set to the type of file, or LLVMFileType::Unknown_FileType. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code identify_magic(const Twine &path, file_magic &result); /// @brief Get library paths the system linker uses. /// /// @param result Set to the list of system library paths. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code GetSystemLibraryPaths(SmallVectorImpl &result); @@ -550,7 +550,7 @@ error_code GetSystemLibraryPaths(SmallVectorImpl &result); /// + LLVM_LIB_SEARCH_PATH + LLVM_LIBDIR. /// /// @param result Set to the list of bitcode library paths. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code GetBitcodeLibraryPaths(SmallVectorImpl &result); @@ -563,7 +563,7 @@ error_code GetBitcodeLibraryPaths(SmallVectorImpl &result); /// /// @param short_name Library name one would give to the system linker. /// @param result Set to the absolute path \a short_name represents. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code FindLibrary(const Twine &short_name, SmallVectorImpl &result); @@ -572,7 +572,7 @@ error_code FindLibrary(const Twine &short_name, SmallVectorImpl &result); /// @param argv0 The program name as it was spelled on the command line. /// @param MainAddr Address of some symbol in the executable (not in a library). /// @param result Set to the absolute path of the current executable. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code GetMainExecutable(const char *argv0, void *MainAddr, SmallVectorImpl &result); @@ -586,9 +586,9 @@ class mapped_file_region { public: enum mapmode { - readonly, //< May only access map via const_data as read only. - readwrite, //< May access map via data and modify it. Written to path. - priv //< May modify via data, but changes are lost on destruction. + readonly, ///< May only access map via const_data as read only. + readwrite, ///< May access map via data and modify it. Written to path. + priv ///< May modify via data, but changes are lost on destruction. }; private: @@ -596,7 +596,7 @@ private: mapmode Mode; uint64_t Size; void *Mapping; -#if LLVM_ON_WIN32 +#ifdef LLVM_ON_WIN32 int FileDescriptor; void *FileHandle; void *FileMappingHandle; @@ -658,13 +658,13 @@ public: /// /// @param path Path to file to map. /// @param file_offset Byte offset in file where mapping should begin. -/// @param size_t Byte length of range of the file to map. +/// @param size Byte length of range of the file to map. /// @param map_writable If true, the file will be mapped in r/w such /// that changes to the mapped buffer will be flushed back /// to the file. If false, the file will be mapped read-only /// and the buffer will be read-only. /// @param result Set to the start address of the mapped buffer. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code map_file_pages(const Twine &path, off_t file_offset, size_t size, bool map_writable, void *&result); @@ -674,7 +674,7 @@ error_code map_file_pages(const Twine &path, off_t file_offset, size_t size, /// /// @param base Pointer to the start of the buffer. /// @param size Byte length of the range to unmmap. -/// @results errc::success if result has been successfully set, otherwise a +/// @returns errc::success if result has been successfully set, otherwise a /// platform specific error_code. error_code unmap_file_pages(void *base, size_t size); diff --git a/include/llvm/Support/Format.h b/include/llvm/Support/Format.h index 59812d9..aaa54e1 100644 --- a/include/llvm/Support/Format.h +++ b/include/llvm/Support/Format.h @@ -170,31 +170,47 @@ public: } }; -/// format - This is a helper function that is used to produce formatted output. -/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +/// This is a helper function that is used to produce formatted output. +/// +/// This is typically used like: +/// \code +/// OS << format("%0.4f", myfloat) << '\n'; +/// \endcode template inline format_object1 format(const char *Fmt, const T &Val) { return format_object1(Fmt, Val); } -/// format - This is a helper function that is used to produce formatted output. -/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +/// This is a helper function that is used to produce formatted output. +/// +/// This is typically used like: +/// \code +/// OS << format("%0.4f", myfloat) << '\n'; +/// \endcode template inline format_object2 format(const char *Fmt, const T1 &Val1, const T2 &Val2) { return format_object2(Fmt, Val1, Val2); } -/// format - This is a helper function that is used to produce formatted output. -/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +/// This is a helper function that is used to produce formatted output. +/// +/// This is typically used like: +/// \code +/// OS << format("%0.4f", myfloat) << '\n'; +/// \endcode template inline format_object3 format(const char *Fmt, const T1 &Val1, const T2 &Val2, const T3 &Val3) { return format_object3(Fmt, Val1, Val2, Val3); } -/// format - This is a helper function that is used to produce formatted output. -/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +/// This is a helper function that is used to produce formatted output. +/// +/// This is typically used like: +/// \code +/// OS << format("%0.4f", myfloat) << '\n'; +/// \endcode template inline format_object4 format(const char *Fmt, const T1 &Val1, const T2 &Val2, const T3 &Val3, @@ -202,8 +218,12 @@ inline format_object4 format(const char *Fmt, const T1 &Val1, return format_object4(Fmt, Val1, Val2, Val3, Val4); } -/// format - This is a helper function that is used to produce formatted output. -/// This is typically used like: OS << format("%0.4f", myfloat) << '\n'; +/// This is a helper function that is used to produce formatted output. +/// +/// This is typically used like: +/// \code +/// OS << format("%0.4f", myfloat) << '\n'; +/// \endcode template inline format_object5 format(const char *Fmt,const T1 &Val1, const T2 &Val2, const T3 &Val3, diff --git a/include/llvm/Support/FormattedStream.h b/include/llvm/Support/FormattedStream.h index 58a1885..21635dc 100644 --- a/include/llvm/Support/FormattedStream.h +++ b/include/llvm/Support/FormattedStream.h @@ -55,14 +55,15 @@ namespace llvm /// const char *Scanned; - virtual void write_impl(const char *Ptr, size_t Size); + virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE; /// current_pos - Return the current position within the stream, /// not counting the bytes currently in the buffer. - virtual uint64_t current_pos() const { - // This has the same effect as calling TheStream.current_pos(), - // but that interface is private. - return TheStream->tell() - TheStream->GetNumBytesInBuffer(); + virtual uint64_t current_pos() const LLVM_OVERRIDE { + // Our current position in the stream is all the contents which have been + // written to the underlying stream (*not* the current position of the + // underlying stream). + return TheStream->tell(); } /// ComputeColumn - Examine the given output buffer and figure out which diff --git a/include/llvm/Support/GCOV.h b/include/llvm/Support/GCOV.h index 19e1ce8..e552315 100644 --- a/include/llvm/Support/GCOV.h +++ b/include/llvm/Support/GCOV.h @@ -27,13 +27,15 @@ class GCOVBlock; class GCOVLines; class FileInfo; -enum GCOVFormat { - InvalidGCOV, - GCNO_402, - GCNO_404, - GCDA_402, - GCDA_404 -}; +namespace GCOV { + enum GCOVFormat { + InvalidGCOV, + GCNO_402, + GCNO_404, + GCDA_402, + GCDA_404 + }; +} // end GCOV namespace /// GCOVBuffer - A wrapper around MemoryBuffer to provide GCOV specific /// read operations. @@ -42,20 +44,20 @@ public: GCOVBuffer(MemoryBuffer *B) : Buffer(B), Cursor(0) {} /// readGCOVFormat - Read GCOV signature at the beginning of buffer. - enum GCOVFormat readGCOVFormat() { + GCOV::GCOVFormat readGCOVFormat() { StringRef Magic = Buffer->getBuffer().slice(0, 12); Cursor = 12; if (Magic == "oncg*404MVLL") - return GCNO_404; + return GCOV::GCNO_404; else if (Magic == "oncg*204MVLL") - return GCNO_402; + return GCOV::GCNO_402; else if (Magic == "adcg*404MVLL") - return GCDA_404; + return GCOV::GCDA_404; else if (Magic == "adcg*204MVLL") - return GCDA_402; + return GCOV::GCDA_402; Cursor = 0; - return InvalidGCOV; + return GCOV::InvalidGCOV; } /// readFunctionTag - If cursor points to a function tag then increment the @@ -128,7 +130,7 @@ public: StringRef Str = Buffer->getBuffer().slice(Cursor, Cursor+4); assert (Str.empty() == false && "Unexpected memory buffer end!"); Cursor += 4; - Result = *(uint32_t *)(Str.data()); + Result = *(const uint32_t *)(Str.data()); return Result; } @@ -170,7 +172,7 @@ class GCOVFunction { public: GCOVFunction() : Ident(0), LineNumber(0) {} ~GCOVFunction(); - bool read(GCOVBuffer &Buffer, GCOVFormat Format); + bool read(GCOVBuffer &Buffer, GCOV::GCOVFormat Format); void dump(); void collectLineCounts(FileInfo &FI); private: diff --git a/include/llvm/Support/InstVisitor.h b/include/llvm/Support/InstVisitor.h index 109b3cf..6dfb4de 100644 --- a/include/llvm/Support/InstVisitor.h +++ b/include/llvm/Support/InstVisitor.h @@ -209,6 +209,9 @@ public: RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); } RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); } RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); } + RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); } + RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); } + RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); } RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); } // Call and Invoke are slightly different as they delegate first through @@ -262,6 +265,9 @@ private: case Intrinsic::memcpy: DELEGATE(MemCpyInst); case Intrinsic::memmove: DELEGATE(MemMoveInst); case Intrinsic::memset: DELEGATE(MemSetInst); + case Intrinsic::vastart: DELEGATE(VAStartInst); + case Intrinsic::vaend: DELEGATE(VAEndInst); + case Intrinsic::vacopy: DELEGATE(VACopyInst); case Intrinsic::not_intrinsic: break; } } diff --git a/include/llvm/Support/IntegersSubset.h b/include/llvm/Support/IntegersSubset.h index bb9e769..03039fd 100644 --- a/include/llvm/Support/IntegersSubset.h +++ b/include/llvm/Support/IntegersSubset.h @@ -411,8 +411,8 @@ public: unsigned getSize() const { APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0); for (unsigned i = 0, e = getNumItems(); i != e; ++i) { - const APInt &Low = getItem(i).getLow(); - const APInt &High = getItem(i).getHigh(); + const APInt Low = getItem(i).getLow(); + const APInt High = getItem(i).getHigh(); APInt S = High - Low + 1; sz += S; } @@ -426,8 +426,8 @@ public: APInt getSingleValue(unsigned idx) const { APInt sz(((const APInt&)getItem(0).getLow()).getBitWidth(), 0); for (unsigned i = 0, e = getNumItems(); i != e; ++i) { - const APInt &Low = getItem(i).getLow(); - const APInt &High = getItem(i).getHigh(); + const APInt Low = getItem(i).getLow(); + const APInt High = getItem(i).getHigh(); APInt S = High - Low + 1; APInt oldSz = sz; sz += S; diff --git a/include/llvm/Support/IntegersSubsetMapping.h b/include/llvm/Support/IntegersSubsetMapping.h index cab18dc..7635d5e 100644 --- a/include/llvm/Support/IntegersSubsetMapping.h +++ b/include/llvm/Support/IntegersSubsetMapping.h @@ -42,6 +42,7 @@ public: struct RangeEx : public RangeTy { RangeEx() : Weight(1) {} RangeEx(const RangeTy &R) : RangeTy(R), Weight(1) {} + RangeEx(const RangeTy &R, unsigned W) : RangeTy(R), Weight(W) {} RangeEx(const IntTy &C) : RangeTy(C), Weight(1) {} RangeEx(const IntTy &L, const IntTy &H) : RangeTy(L, H), Weight(1) {} RangeEx(const IntTy &L, const IntTy &H, unsigned W) : @@ -316,13 +317,13 @@ public: Items.clear(); const IntTy *Low = &OldItems.begin()->first.getLow(); const IntTy *High = &OldItems.begin()->first.getHigh(); - unsigned Weight = 1; + unsigned Weight = OldItems.begin()->first.Weight; SuccessorClass *Successor = OldItems.begin()->second; for (CaseItemIt j = OldItems.begin(), i = j++, e = OldItems.end(); j != e; i = j++) { if (isJoinable(i, j)) { const IntTy *CurHigh = &j->first.getHigh(); - ++Weight; + Weight += j->first.Weight; if (*CurHigh > *High) High = CurHigh; } else { @@ -330,7 +331,7 @@ public: add(R, Successor); Low = &j->first.getLow(); High = &j->first.getHigh(); - Weight = 1; + Weight = j->first.Weight; Successor = j->second; } } @@ -362,10 +363,17 @@ public: /// Adds all ranges and values from given ranges set to the current /// mapping. - void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0) { + void add(const IntegersSubsetTy &CRS, SuccessorClass *S = 0, + unsigned Weight = 0) { + unsigned ItemWeight = 1; + if (Weight) + // Weight is associated with CRS, for now we perform a division to + // get the weight for each item. + ItemWeight = Weight / CRS.getNumItems(); for (unsigned i = 0, e = CRS.getNumItems(); i < e; ++i) { RangeTy R = CRS.getItem(i); - add(R, S); + RangeEx REx(R, ItemWeight); + add(REx, S); } } diff --git a/include/llvm/Support/LEB128.h b/include/llvm/Support/LEB128.h index 410edd4..b52e5bc 100644 --- a/include/llvm/Support/LEB128.h +++ b/include/llvm/Support/LEB128.h @@ -15,7 +15,7 @@ #ifndef LLVM_SYSTEM_LEB128_H #define LLVM_SYSTEM_LEB128_H -#include +#include "llvm/Support/raw_ostream.h" namespace llvm { diff --git a/include/llvm/Support/LockFileManager.h b/include/llvm/Support/LockFileManager.h index e2fa8eb..8c4a760 100644 --- a/include/llvm/Support/LockFileManager.h +++ b/include/llvm/Support/LockFileManager.h @@ -47,8 +47,8 @@ private: Optional > Owner; Optional Error; - LockFileManager(const LockFileManager &); - LockFileManager &operator=(const LockFileManager &); + LockFileManager(const LockFileManager &) LLVM_DELETED_FUNCTION; + LockFileManager &operator=(const LockFileManager &) LLVM_DELETED_FUNCTION; static Optional > readLockFile(StringRef LockFileName); diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h index 4005161..11f9e63 100644 --- a/include/llvm/Support/MathExtras.h +++ b/include/llvm/Support/MathExtras.h @@ -431,21 +431,22 @@ inline uint64_t NextPowerOf2(uint64_t A) { return A + 1; } -/// RoundUpToAlignment - Returns the next integer (mod 2**64) that is -/// greater than or equal to \arg Value and is a multiple of \arg -/// Align. Align must be non-zero. +/// Returns the next integer (mod 2**64) that is greater than or equal to +/// \p Value and is a multiple of \p Align. \p Align must be non-zero. /// /// Examples: -/// RoundUpToAlignment(5, 8) = 8 -/// RoundUpToAlignment(17, 8) = 24 -/// RoundUpToAlignment(~0LL, 8) = 0 +/// \code +/// RoundUpToAlignment(5, 8) = 8 +/// RoundUpToAlignment(17, 8) = 24 +/// RoundUpToAlignment(~0LL, 8) = 0 +/// \endcode inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) { return ((Value + Align - 1) / Align) * Align; } -/// OffsetToAlignment - Return the offset to the next integer (mod 2**64) that -/// is greater than or equal to \arg Value and is a multiple of \arg -/// Align. Align must be non-zero. +/// Returns the offset to the next integer (mod 2**64) that is greater than +/// or equal to \p Value and is a multiple of \p Align. \p Align must be +/// non-zero. inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { return RoundUpToAlignment(Value, Align) - Value; } @@ -463,12 +464,24 @@ template inline int32_t SignExtend32(uint32_t x) { return int32_t(x << (32 - B)) >> (32 - B); } +/// \brief Sign extend number in the bottom B bits of X to a 32-bit int. +/// Requires 0 < B <= 32. +inline int32_t SignExtend32(uint32_t X, unsigned B) { + return int32_t(X << (32 - B)) >> (32 - B); +} + /// SignExtend64 - Sign extend B-bit number x to 64-bit int. /// Usage int64_t r = SignExtend64<5>(x); template inline int64_t SignExtend64(uint64_t x) { return int64_t(x << (64 - B)) >> (64 - B); } +/// \brief Sign extend number in the bottom B bits of X to a 64-bit int. +/// Requires 0 < B <= 64. +inline int64_t SignExtend64(uint64_t X, unsigned B) { + return int64_t(X << (64 - B)) >> (64 - B); +} + } // End llvm namespace #endif diff --git a/include/llvm/Support/Memory.h b/include/llvm/Support/Memory.h index 37890e7..025eee7 100644 --- a/include/llvm/Support/Memory.h +++ b/include/llvm/Support/Memory.h @@ -15,6 +15,7 @@ #define LLVM_SYSTEM_MEMORY_H #include "llvm/Support/DataTypes.h" +#include "llvm/Support/system_error.h" #include namespace llvm { @@ -43,6 +44,70 @@ namespace sys { /// @brief An abstraction for memory operations. class Memory { public: + enum ProtectionFlags { + MF_READ = 0x1000000, + MF_WRITE = 0x2000000, + MF_EXEC = 0x4000000 + }; + + /// This method allocates a block of memory that is suitable for loading + /// dynamically generated code (e.g. JIT). An attempt to allocate + /// \p NumBytes bytes of virtual memory is made. + /// \p NearBlock may point to an existing allocation in which case + /// an attempt is made to allocate more memory near the existing block. + /// The actual allocated address is not guaranteed to be near the requested + /// address. + /// \p Flags is used to set the initial protection flags for the block + /// of the memory. + /// \p EC [out] returns an object describing any error that occurs. + /// + /// This method may allocate more than the number of bytes requested. The + /// actual number of bytes allocated is indicated in the returned + /// MemoryBlock. + /// + /// The start of the allocated block must be aligned with the + /// system allocation granularity (64K on Windows, page size on Linux). + /// If the address following \p NearBlock is not so aligned, it will be + /// rounded up to the next allocation granularity boundary. + /// + /// \r a non-null MemoryBlock if the function was successful, + /// otherwise a null MemoryBlock is with \p EC describing the error. + /// + /// @brief Allocate mapped memory. + static MemoryBlock allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned Flags, + error_code &EC); + + /// This method releases a block of memory that was allocated with the + /// allocateMappedMemory method. It should not be used to release any + /// memory block allocated any other way. + /// \p Block describes the memory to be released. + /// + /// \r error_success if the function was successful, or an error_code + /// describing the failure if an error occurred. + /// + /// @brief Release mapped memory. + static error_code releaseMappedMemory(MemoryBlock &Block); + + /// This method sets the protection flags for a block of memory to the + /// state specified by /p Flags. The behavior is not specified if the + /// memory was not allocated using the allocateMappedMemory method. + /// \p Block describes the memory block to be protected. + /// \p Flags specifies the new protection state to be assigned to the block. + /// \p ErrMsg [out] returns a string describing any error that occured. + /// + /// If \p Flags is MF_WRITE, the actual behavior varies + /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the + /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386). + /// + /// \r error_success if the function was successful, or an error_code + /// describing the failure if an error occurred. + /// + /// @brief Set memory protection state. + static error_code protectMappedMemory(const MemoryBlock &Block, + unsigned Flags); + /// This method allocates a block of Read/Write/Execute memory that is /// suitable for executing dynamically generated code (e.g. JIT). An /// attempt to allocate \p NumBytes bytes of virtual memory is made. diff --git a/include/llvm/Support/MemoryBuffer.h b/include/llvm/Support/MemoryBuffer.h index 06816de..1f02907 100644 --- a/include/llvm/Support/MemoryBuffer.h +++ b/include/llvm/Support/MemoryBuffer.h @@ -15,6 +15,7 @@ #define LLVM_SUPPORT_MEMORYBUFFER_H #include "llvm/ADT/StringRef.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" namespace llvm { @@ -36,8 +37,8 @@ class MemoryBuffer { const char *BufferStart; // Start of the buffer. const char *BufferEnd; // End of the buffer. - MemoryBuffer(const MemoryBuffer &); // DO NOT IMPLEMENT - MemoryBuffer &operator=(const MemoryBuffer &); // DO NOT IMPLEMENT + MemoryBuffer(const MemoryBuffer &) LLVM_DELETED_FUNCTION; + MemoryBuffer &operator=(const MemoryBuffer &) LLVM_DELETED_FUNCTION; protected: MemoryBuffer() {} void init(const char *BufStart, const char *BufEnd, diff --git a/include/llvm/Support/Mutex.h b/include/llvm/Support/Mutex.h index 42ea630..6abc533 100644 --- a/include/llvm/Support/Mutex.h +++ b/include/llvm/Support/Mutex.h @@ -14,6 +14,7 @@ #ifndef LLVM_SYSTEM_MUTEX_H #define LLVM_SYSTEM_MUTEX_H +#include "llvm/Support/Compiler.h" #include "llvm/Support/Threading.h" #include @@ -75,8 +76,8 @@ namespace llvm /// @name Do Not Implement /// @{ private: - MutexImpl(const MutexImpl & original); - void operator=(const MutexImpl &); + MutexImpl(const MutexImpl &) LLVM_DELETED_FUNCTION; + void operator=(const MutexImpl &) LLVM_DELETED_FUNCTION; /// @} }; diff --git a/include/llvm/Support/MutexGuard.h b/include/llvm/Support/MutexGuard.h index cd13bfe..6bb1622 100644 --- a/include/llvm/Support/MutexGuard.h +++ b/include/llvm/Support/MutexGuard.h @@ -26,8 +26,8 @@ namespace llvm { /// @brief Guard a section of code with a Mutex. class MutexGuard { sys::Mutex &M; - MutexGuard(const MutexGuard &); // DO NOT IMPLEMENT - void operator=(const MutexGuard &); // DO NOT IMPLEMENT + MutexGuard(const MutexGuard &) LLVM_DELETED_FUNCTION; + void operator=(const MutexGuard &) LLVM_DELETED_FUNCTION; public: MutexGuard(sys::Mutex &m) : M(m) { M.acquire(); } ~MutexGuard() { M.release(); } diff --git a/include/llvm/Support/PathV1.h b/include/llvm/Support/PathV1.h index f4bedf9..643ee8c 100644 --- a/include/llvm/Support/PathV1.h +++ b/include/llvm/Support/PathV1.h @@ -683,8 +683,8 @@ namespace sys { /// This function returns status information about the file. The type of /// path (file or directory) is updated to reflect the actual contents /// of the file system. - /// @returns 0 on failure, with Error explaining why (if non-zero) - /// @returns a pointer to a FileStatus structure on success. + /// @returns 0 on failure, with Error explaining why (if non-zero), + /// otherwise returns a pointer to a FileStatus structure on success. /// @brief Get file status. const FileStatus *getFileStatus( bool forceUpdate = false, ///< Force an update from the file system diff --git a/include/llvm/Support/PathV2.h b/include/llvm/Support/PathV2.h index 8d79709..ae1a21c 100644 --- a/include/llvm/Support/PathV2.h +++ b/include/llvm/Support/PathV2.h @@ -39,13 +39,14 @@ namespace path { /// The backwards traversal order is the reverse of forward traversal. /// /// Iteration examples. Each component is separated by ',': -/// / => / -/// /foo => /,foo -/// foo/ => foo,. -/// /foo/bar => /,foo,bar -/// ../ => ..,. -/// C:\foo\bar => C:,/,foo,bar -/// +/// @code +/// / => / +/// /foo => /,foo +/// foo/ => foo,. +/// /foo/bar => /,foo,bar +/// ../ => ..,. +/// C:\foo\bar => C:,/,foo,bar +/// @endcode class const_iterator { StringRef Path; ///< The entire path. StringRef Component; ///< The current component. Not necessarily in Path. @@ -107,18 +108,22 @@ inline reverse_iterator rend(StringRef path) { /// @brief Remove the last component from \a path unless it is the root dir. /// -/// directory/filename.cpp => directory/ -/// directory/ => directory -/// / => / +/// @code +/// directory/filename.cpp => directory/ +/// directory/ => directory +/// / => / +/// @endcode /// /// @param path A path that is modified to not have a file component. void remove_filename(SmallVectorImpl &path); /// @brief Replace the file extension of \a path with \a extension. /// -/// ./filename.cpp => ./filename.extension -/// ./filename => ./filename.extension -/// ./ => ./.extension +/// @code +/// ./filename.cpp => ./filename.extension +/// ./filename => ./filename.extension +/// ./ => ./.extension +/// @endcode /// /// @param path A path that has its extension replaced with \a extension. /// @param extension The extension to be added. It may be empty. It may also @@ -128,12 +133,14 @@ void replace_extension(SmallVectorImpl &path, const Twine &extension); /// @brief Append to path. /// -/// /foo + bar/f => /foo/bar/f -/// /foo/ + bar/f => /foo/bar/f -/// foo + bar/f => foo/bar/f +/// @code +/// /foo + bar/f => /foo/bar/f +/// /foo/ + bar/f => /foo/bar/f +/// foo + bar/f => foo/bar/f +/// @endcode /// /// @param path Set to \a path + \a component. -/// @param component The component to be appended to \a path. +/// @param a The component to be appended to \a path. void append(SmallVectorImpl &path, const Twine &a, const Twine &b = "", const Twine &c = "", @@ -141,9 +148,11 @@ void append(SmallVectorImpl &path, const Twine &a, /// @brief Append to path. /// -/// /foo + [bar,f] => /foo/bar/f -/// /foo/ + [bar,f] => /foo/bar/f -/// foo + [bar,f] => foo/bar/f +/// @code +/// /foo + [bar,f] => /foo/bar/f +/// /foo/ + [bar,f] => /foo/bar/f +/// foo + [bar,f] => foo/bar/f +/// @endcode /// /// @param path Set to \a path + [\a begin, \a end). /// @param begin Start of components to append. @@ -169,9 +178,11 @@ void native(const Twine &path, SmallVectorImpl &result); /// @brief Get root name. /// -/// //net/hello => //net -/// c:/hello => c: (on Windows, on other platforms nothing) -/// /hello => +/// @code +/// //net/hello => //net +/// c:/hello => c: (on Windows, on other platforms nothing) +/// /hello => +/// @endcode /// /// @param path Input path. /// @result The root name of \a path if it has one, otherwise "". @@ -179,9 +190,11 @@ const StringRef root_name(StringRef path); /// @brief Get root directory. /// -/// /goo/hello => / -/// c:/hello => / -/// d/file.txt => +/// @code +/// /goo/hello => / +/// c:/hello => / +/// d/file.txt => +/// @endcode /// /// @param path Input path. /// @result The root directory of \a path if it has one, otherwise @@ -198,9 +211,11 @@ const StringRef root_path(StringRef path); /// @brief Get relative path. /// -/// C:\hello\world => hello\world -/// foo/bar => foo/bar -/// /foo/bar => foo/bar +/// @code +/// C:\hello\world => hello\world +/// foo/bar => foo/bar +/// /foo/bar => foo/bar +/// @endcode /// /// @param path Input path. /// @result The path starting after root_path if one exists, otherwise "". @@ -208,9 +223,11 @@ const StringRef relative_path(StringRef path); /// @brief Get parent path. /// -/// / => -/// /foo => / -/// foo/../bar => foo/.. +/// @code +/// / => +/// /foo => / +/// foo/../bar => foo/.. +/// @endcode /// /// @param path Input path. /// @result The parent path of \a path if one exists, otherwise "". @@ -218,10 +235,12 @@ const StringRef parent_path(StringRef path); /// @brief Get filename. /// -/// /foo.txt => foo.txt -/// . => . -/// .. => .. -/// / => / +/// @code +/// /foo.txt => foo.txt +/// . => . +/// .. => .. +/// / => / +/// @endcode /// /// @param path Input path. /// @result The filename part of \a path. This is defined as the last component @@ -234,11 +253,13 @@ const StringRef filename(StringRef path); /// substring of filename ending at (but not including) the last dot. Otherwise /// it is filename. /// -/// /foo/bar.txt => bar -/// /foo/bar => bar -/// /foo/.txt => -/// /foo/. => . -/// /foo/.. => .. +/// @code +/// /foo/bar.txt => bar +/// /foo/bar => bar +/// /foo/.txt => +/// /foo/. => . +/// /foo/.. => .. +/// @endcode /// /// @param path Input path. /// @result The stem of \a path. @@ -250,9 +271,11 @@ const StringRef stem(StringRef path); /// substring of filename starting at (and including) the last dot, and ending /// at the end of \a path. Otherwise "". /// -/// /foo/bar.txt => .txt -/// /foo/bar => -/// /foo/.txt => .txt +/// @code +/// /foo/bar.txt => .txt +/// /foo/bar => +/// /foo/.txt => .txt +/// @endcode /// /// @param path Input path. /// @result The extension of \a path. @@ -272,7 +295,7 @@ bool is_separator(char value); /// ignored if the user or system has set the typical environment variable /// (e.g., TEMP on Windows, TMPDIR on *nix) to specify a temporary directory. /// -/// @param Result Holds the resulting path name. +/// @param result Holds the resulting path name. void system_temp_directory(bool erasedOnReboot, SmallVectorImpl &result); /// @brief Has root name? diff --git a/include/llvm/Support/PrettyStackTrace.h b/include/llvm/Support/PrettyStackTrace.h index 9b3ecda..2122e06 100644 --- a/include/llvm/Support/PrettyStackTrace.h +++ b/include/llvm/Support/PrettyStackTrace.h @@ -16,6 +16,8 @@ #ifndef LLVM_SUPPORT_PRETTYSTACKTRACE_H #define LLVM_SUPPORT_PRETTYSTACKTRACE_H +#include "llvm/Support/Compiler.h" + namespace llvm { class raw_ostream; @@ -32,8 +34,8 @@ namespace llvm { /// virtual stack trace. This gets dumped out if the program crashes. class PrettyStackTraceEntry { const PrettyStackTraceEntry *NextEntry; - PrettyStackTraceEntry(const PrettyStackTraceEntry &); // DO NOT IMPLEMENT - void operator=(const PrettyStackTraceEntry&); // DO NOT IMPLEMENT + PrettyStackTraceEntry(const PrettyStackTraceEntry &) LLVM_DELETED_FUNCTION; + void operator=(const PrettyStackTraceEntry&) LLVM_DELETED_FUNCTION; public: PrettyStackTraceEntry(); virtual ~PrettyStackTraceEntry(); @@ -52,7 +54,7 @@ namespace llvm { const char *Str; public: PrettyStackTraceString(const char *str) : Str(str) {} - virtual void print(raw_ostream &OS) const; + virtual void print(raw_ostream &OS) const LLVM_OVERRIDE; }; /// PrettyStackTraceProgram - This object prints a specified program arguments @@ -63,7 +65,7 @@ namespace llvm { public: PrettyStackTraceProgram(int argc, const char * const*argv) : ArgC(argc), ArgV(argv) {} - virtual void print(raw_ostream &OS) const; + virtual void print(raw_ostream &OS) const LLVM_OVERRIDE; }; } // end namespace llvm diff --git a/include/llvm/Support/Program.h b/include/llvm/Support/Program.h index a85f235..7c9a951 100644 --- a/include/llvm/Support/Program.h +++ b/include/llvm/Support/Program.h @@ -34,8 +34,8 @@ namespace sys { void *Data_; // Noncopyable. - Program(const Program& other); - Program& operator=(const Program& other); + Program(const Program& other) LLVM_DELETED_FUNCTION; + Program& operator=(const Program& other) LLVM_DELETED_FUNCTION; /// @name Methods /// @{ diff --git a/include/llvm/Support/RWMutex.h b/include/llvm/Support/RWMutex.h index 0d4cb81..935b307 100644 --- a/include/llvm/Support/RWMutex.h +++ b/include/llvm/Support/RWMutex.h @@ -14,6 +14,7 @@ #ifndef LLVM_SYSTEM_RWMUTEX_H #define LLVM_SYSTEM_RWMUTEX_H +#include "llvm/Support/Compiler.h" #include "llvm/Support/Threading.h" #include @@ -75,8 +76,8 @@ namespace llvm /// @name Do Not Implement /// @{ private: - RWMutexImpl(const RWMutexImpl & original); - void operator=(const RWMutexImpl &); + RWMutexImpl(const RWMutexImpl & original) LLVM_DELETED_FUNCTION; + void operator=(const RWMutexImpl &) LLVM_DELETED_FUNCTION; /// @} }; diff --git a/include/llvm/Support/Regex.h b/include/llvm/Support/Regex.h index 7648e77..ffe09b1 100644 --- a/include/llvm/Support/Regex.h +++ b/include/llvm/Support/Regex.h @@ -36,7 +36,7 @@ namespace llvm { Newline=2 }; - /// Compiles the given POSIX Extended Regular Expression \arg Regex. + /// Compiles the given POSIX Extended Regular Expression \p Regex. /// This implementation supports regexes and matching strings with embedded /// NUL characters. Regex(StringRef Regex, unsigned Flags = NoFlags); @@ -51,17 +51,17 @@ namespace llvm { /// many entries plus one for the whole regex (as element 0). unsigned getNumMatches() const; - /// matches - Match the regex against a given \arg String. + /// matches - Match the regex against a given \p String. /// /// \param Matches - If given, on a successful match this will be filled in - /// with references to the matched group expressions (inside \arg String), + /// with references to the matched group expressions (inside \p String), /// the first group is always the entire pattern. /// /// This returns true on a successful match. bool match(StringRef String, SmallVectorImpl *Matches = 0); /// sub - Return the result of replacing the first match of the regex in - /// \arg String with the \arg Repl string. Backreferences like "\0" in the + /// \p String with the \p Repl string. Backreferences like "\0" in the /// replacement string are replaced with the appropriate match substring. /// /// Note that the replacement string has backslash escaping performed on diff --git a/include/llvm/Support/Registry.h b/include/llvm/Support/Registry.h index d0375be..29eafb6 100644 --- a/include/llvm/Support/Registry.h +++ b/include/llvm/Support/Registry.h @@ -37,7 +37,7 @@ namespace llvm { /// is necessary to define an alternate traits class. template class RegistryTraits { - RegistryTraits(); // Do not implement. + RegistryTraits() LLVM_DELETED_FUNCTION; public: typedef SimpleRegistryEntry entry; @@ -63,7 +63,7 @@ namespace llvm { class iterator; private: - Registry(); // Do not implement. + Registry() LLVM_DELETED_FUNCTION; static void Announce(const entry &E) { for (listener *Cur = ListenerHead; Cur; Cur = Cur->Next) @@ -120,6 +120,7 @@ namespace llvm { /// Abstract base class for registry listeners, which are informed when new /// entries are added to the registry. Simply subclass and instantiate: /// + /// \code /// class CollectorPrinter : public Registry::listener { /// protected: /// void registered(const Registry::entry &e) { @@ -131,7 +132,7 @@ namespace llvm { /// }; /// /// CollectorPrinter Printer; - /// + /// \endcode class listener { listener *Prev, *Next; diff --git a/include/llvm/Support/SourceMgr.h b/include/llvm/Support/SourceMgr.h index 8949a3a..bcf95f2 100644 --- a/include/llvm/Support/SourceMgr.h +++ b/include/llvm/Support/SourceMgr.h @@ -64,9 +64,9 @@ private: DiagHandlerTy DiagHandler; void *DiagContext; - - SourceMgr(const SourceMgr&); // DO NOT IMPLEMENT - void operator=(const SourceMgr&); // DO NOT IMPLEMENT + + SourceMgr(const SourceMgr&) LLVM_DELETED_FUNCTION; + void operator=(const SourceMgr&) LLVM_DELETED_FUNCTION; public: SourceMgr() : LineNoCache(0), DiagHandler(0), DiagContext(0) {} ~SourceMgr(); @@ -145,7 +145,7 @@ public: /// GetMessage - Return an SMDiagnostic at the specified location with the /// specified string. /// - /// @param Type - If non-null, the kind of message (e.g., "error") which is + /// @param Msg If non-null, the kind of message (e.g., "error") which is /// prefixed to the message. SMDiagnostic GetMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg, ArrayRef Ranges = ArrayRef()) const; diff --git a/include/llvm/Support/StreamableMemoryObject.h b/include/llvm/Support/StreamableMemoryObject.h index 531dbb2..a2b4bcb 100644 --- a/include/llvm/Support/StreamableMemoryObject.h +++ b/include/llvm/Support/StreamableMemoryObject.h @@ -12,6 +12,7 @@ #define STREAMABLEMEMORYOBJECT_H_ #include "llvm/ADT/OwningPtr.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/MemoryObject.h" #include "llvm/Support/DataStream.h" #include @@ -107,14 +108,15 @@ class StreamableMemoryObject : public MemoryObject { class StreamingMemoryObject : public StreamableMemoryObject { public: StreamingMemoryObject(DataStreamer *streamer); - virtual uint64_t getBase() const { return 0; } - virtual uint64_t getExtent() const; - virtual int readByte(uint64_t address, uint8_t* ptr) const; + virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; } + virtual uint64_t getExtent() const LLVM_OVERRIDE; + virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE; virtual int readBytes(uint64_t address, uint64_t size, uint8_t* buf, - uint64_t* copied) const ; - virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const { + uint64_t* copied) const LLVM_OVERRIDE; + virtual const uint8_t *getPointer(uint64_t address, + uint64_t size) const LLVM_OVERRIDE { // This could be fixed by ensuring the bytes are fetched and making a copy, // requiring that the bitcode size be known, or otherwise ensuring that // the memory doesn't go away/get reallocated, but it's @@ -122,8 +124,8 @@ public: assert(0 && "getPointer in streaming memory objects not allowed"); return NULL; } - virtual bool isValidAddress(uint64_t address) const; - virtual bool isObjectEnd(uint64_t address) const; + virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE; + virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE; /// Drop s bytes from the front of the stream, pushing the positions of the /// remaining bytes down by s. This is used to skip past the bitcode header, @@ -170,8 +172,8 @@ private: return true; } - StreamingMemoryObject(const StreamingMemoryObject&); // DO NOT IMPLEMENT - void operator=(const StreamingMemoryObject&); // DO NOT IMPLEMENT + StreamingMemoryObject(const StreamingMemoryObject&) LLVM_DELETED_FUNCTION; + void operator=(const StreamingMemoryObject&) LLVM_DELETED_FUNCTION; }; StreamableMemoryObject *getNonStreamedMemoryObject( diff --git a/include/llvm/Support/TargetFolder.h b/include/llvm/Support/TargetFolder.h index c65faa6..45f7816 100644 --- a/include/llvm/Support/TargetFolder.h +++ b/include/llvm/Support/TargetFolder.h @@ -26,11 +26,11 @@ namespace llvm { -class TargetData; +class DataLayout; /// TargetFolder - Create constants with target dependent folding. class TargetFolder { - const TargetData *TD; + const DataLayout *TD; /// Fold - Fold the constant using target specific information. Constant *Fold(Constant *C) const { @@ -41,7 +41,7 @@ class TargetFolder { } public: - explicit TargetFolder(const TargetData *TheTD) : TD(TheTD) {} + explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {} //===--------------------------------------------------------------------===// // Binary Operators @@ -177,7 +177,14 @@ public: return Fold(ConstantExpr::getIntegerCast(C, DestTy, isSigned)); } Constant *CreatePointerCast(Constant *C, Type *DestTy) const { - return ConstantExpr::getPointerCast(C, DestTy); + if (C->getType() == DestTy) + return C; // avoid calling Fold + return Fold(ConstantExpr::getPointerCast(C, DestTy)); + } + Constant *CreateFPCast(Constant *C, Type *DestTy) const { + if (C->getType() == DestTy) + return C; // avoid calling Fold + return Fold(ConstantExpr::getFPCast(C, DestTy)); } Constant *CreateBitCast(Constant *C, Type *DestTy) const { return CreateCast(Instruction::BitCast, C, DestTy); diff --git a/include/llvm/Support/TargetRegistry.h b/include/llvm/Support/TargetRegistry.h index c0be8f1..ca58bfb 100644 --- a/include/llvm/Support/TargetRegistry.h +++ b/include/llvm/Support/TargetRegistry.h @@ -93,7 +93,9 @@ namespace llvm { CodeGenOpt::Level OL); typedef AsmPrinter *(*AsmPrinterCtorTy)(TargetMachine &TM, MCStreamer &Streamer); - typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T, StringRef TT); + typedef MCAsmBackend *(*MCAsmBackendCtorTy)(const Target &T, + StringRef TT, + StringRef CPU); typedef MCTargetAsmLexer *(*MCAsmLexerCtorTy)(const Target &T, const MCRegisterInfo &MRI, const MCAsmInfo &MAI); @@ -271,7 +273,7 @@ namespace llvm { /// createMCAsmInfo - Create a MCAsmInfo implementation for the specified /// target triple. /// - /// \arg Triple - This argument is used to determine the target machine + /// \param Triple This argument is used to determine the target machine /// feature set; it should always be provided. Generally this should be /// either the target triple from the module, or the target triple of the /// host if that does not exist. @@ -317,12 +319,12 @@ namespace llvm { /// createMCSubtargetInfo - Create a MCSubtargetInfo implementation. /// - /// \arg Triple - This argument is used to determine the target machine + /// \param Triple This argument is used to determine the target machine /// feature set; it should always be provided. Generally this should be /// either the target triple from the module, or the target triple of the /// host if that does not exist. - /// \arg CPU - This specifies the name of the target CPU. - /// \arg Features - This specifies the string representation of the + /// \param CPU This specifies the name of the target CPU. + /// \param Features This specifies the string representation of the /// additional target features. MCSubtargetInfo *createMCSubtargetInfo(StringRef Triple, StringRef CPU, StringRef Features) const { @@ -332,9 +334,9 @@ namespace llvm { } /// createTargetMachine - Create a target specific machine implementation - /// for the specified \arg Triple. + /// for the specified \p Triple. /// - /// \arg Triple - This argument is used to determine the target machine + /// \param Triple This argument is used to determine the target machine /// feature set; it should always be provided. Generally this should be /// either the target triple from the module, or the target triple of the /// host if that does not exist. @@ -351,12 +353,11 @@ namespace llvm { /// createMCAsmBackend - Create a target specific assembly parser. /// - /// \arg Triple - The target triple string. - /// \arg Backend - The target independent assembler object. - MCAsmBackend *createMCAsmBackend(StringRef Triple) const { + /// \param Triple The target triple string. + MCAsmBackend *createMCAsmBackend(StringRef Triple, StringRef CPU) const { if (!MCAsmBackendCtorFn) return 0; - return MCAsmBackendCtorFn(*this, Triple); + return MCAsmBackendCtorFn(*this, Triple, CPU); } /// createMCAsmLexer - Create a target specific assembly lexer. @@ -370,7 +371,7 @@ namespace llvm { /// createMCAsmParser - Create a target specific assembly parser. /// - /// \arg Parser - The target independent parser implementation to use for + /// \param Parser The target independent parser implementation to use for /// parsing and lexing. MCTargetAsmParser *createMCAsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser) const { @@ -416,13 +417,13 @@ namespace llvm { /// createMCObjectStreamer - Create a target specific MCStreamer. /// - /// \arg TT - The target triple. - /// \arg Ctx - The target context. - /// \arg TAB - The target assembler backend object. Takes ownership. - /// \arg _OS - The stream object. - /// \arg _Emitter - The target independent assembler object.Takes ownership. - /// \arg RelaxAll - Relax all fixups? - /// \arg NoExecStack - Mark file as not needing a executable stack. + /// \param TT The target triple. + /// \param Ctx The target context. + /// \param TAB The target assembler backend object. Takes ownership. + /// \param _OS The stream object. + /// \param _Emitter The target independent assembler object.Takes ownership. + /// \param RelaxAll Relax all fixups? + /// \param NoExecStack Mark file as not needing a executable stack. MCStreamer *createMCObjectStreamer(StringRef TT, MCContext &Ctx, MCAsmBackend &TAB, raw_ostream &_OS, @@ -1063,8 +1064,9 @@ namespace llvm { } private: - static MCAsmBackend *Allocator(const Target &T, StringRef Triple) { - return new MCAsmBackendImpl(T, Triple); + static MCAsmBackend *Allocator(const Target &T, StringRef Triple, + StringRef CPU) { + return new MCAsmBackendImpl(T, Triple, CPU); } }; diff --git a/include/llvm/Support/Threading.h b/include/llvm/Support/Threading.h index c0e842c..9017afb 100644 --- a/include/llvm/Support/Threading.h +++ b/include/llvm/Support/Threading.h @@ -41,8 +41,8 @@ namespace llvm { /// before llvm_start_multithreaded(). void llvm_release_global_lock(); - /// llvm_execute_on_thread - Execute the given \arg UserFn on a separate - /// thread, passing it the provided \arg UserData. + /// llvm_execute_on_thread - Execute the given \p UserFn on a separate + /// thread, passing it the provided \p UserData. /// /// This function does not guarantee that the code will actually be executed /// on a separate thread or honoring the requested stack size, but tries to do diff --git a/include/llvm/Support/TimeValue.h b/include/llvm/Support/TimeValue.h index 94f132a..e780b50 100644 --- a/include/llvm/Support/TimeValue.h +++ b/include/llvm/Support/TimeValue.h @@ -153,7 +153,6 @@ namespace sys { /// Determine if \p this is greater than or equal to \p that. /// @returns True iff *this >= that. - /// @brief True if this >= that. int operator >= (const TimeValue &that) const { if ( this->seconds_ > that.seconds_ ) { return 1; @@ -164,8 +163,7 @@ namespace sys { } /// Determines if two TimeValue objects represent the same moment in time. - /// @brief True iff *this == that. - /// @brief True if this == that. + /// @returns True iff *this == that. int operator == (const TimeValue &that) const { return (this->seconds_ == that.seconds_) && (this->nanos_ == that.nanos_); @@ -173,8 +171,7 @@ namespace sys { /// Determines if two TimeValue objects represent times that are not the /// same. - /// @return True iff *this != that. - /// @brief True if this != that. + /// @returns True iff *this != that. int operator != (const TimeValue &that) const { return !(*this == that); } /// Adds two TimeValue objects together. diff --git a/include/llvm/Support/Timer.h b/include/llvm/Support/Timer.h index 404cb6d..a741882 100644 --- a/include/llvm/Support/Timer.h +++ b/include/llvm/Support/Timer.h @@ -15,6 +15,7 @@ #ifndef LLVM_SUPPORT_TIMER_H #define LLVM_SUPPORT_TIMER_H +#include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" #include "llvm/ADT/StringRef.h" #include @@ -130,7 +131,7 @@ private: /// class TimeRegion { Timer *T; - TimeRegion(const TimeRegion &); // DO NOT IMPLEMENT + TimeRegion(const TimeRegion &) LLVM_DELETED_FUNCTION; public: explicit TimeRegion(Timer &t) : T(&t) { T->startTimer(); @@ -168,8 +169,8 @@ class TimerGroup { std::vector > TimersToPrint; TimerGroup **Prev, *Next; // Doubly linked list of TimerGroup's. - TimerGroup(const TimerGroup &TG); // DO NOT IMPLEMENT - void operator=(const TimerGroup &TG); // DO NOT IMPLEMENT + TimerGroup(const TimerGroup &TG) LLVM_DELETED_FUNCTION; + void operator=(const TimerGroup &TG) LLVM_DELETED_FUNCTION; public: explicit TimerGroup(StringRef name); ~TimerGroup(); diff --git a/include/llvm/Support/ValueHandle.h b/include/llvm/Support/ValueHandle.h index 61e21b8..dbcf0fd 100644 --- a/include/llvm/Support/ValueHandle.h +++ b/include/llvm/Support/ValueHandle.h @@ -59,8 +59,8 @@ private: // pair. The 'setValPtrInt' and 'getValPtrInt' methods below give them this // access. PointerIntPair VP; - - explicit ValueHandleBase(const ValueHandleBase&); // DO NOT IMPLEMENT. + + ValueHandleBase(const ValueHandleBase&) LLVM_DELETED_FUNCTION; public: explicit ValueHandleBase(HandleBaseKind Kind) : PrevPair(0, Kind), Next(0), VP(0, 0) {} diff --git a/include/llvm/Support/YAMLParser.h b/include/llvm/Support/YAMLParser.h index 98910eb..12958fa 100644 --- a/include/llvm/Support/YAMLParser.h +++ b/include/llvm/Support/YAMLParser.h @@ -133,7 +133,6 @@ public: virtual void skip() {} unsigned int getType() const { return TypeID; } - static inline bool classof(const Node *) { return true; } void *operator new ( size_t Size , BumpPtrAllocator &Alloc @@ -166,7 +165,6 @@ class NullNode : public Node { public: NullNode(OwningPtr &D) : Node(NK_Null, D, StringRef()) {} - static inline bool classof(const NullNode *) { return true; } static inline bool classof(const Node *N) { return N->getType() == NK_Null; } @@ -199,7 +197,6 @@ public: /// This happens with escaped characters and multi-line literals. StringRef getValue(SmallVectorImpl &Storage) const; - static inline bool classof(const ScalarNode *) { return true; } static inline bool classof(const Node *N) { return N->getType() == NK_Scalar; } @@ -241,12 +238,11 @@ public: /// @returns The value, or nullptr if failed() == true. Node *getValue(); - virtual void skip() { + virtual void skip() LLVM_OVERRIDE { getKey()->skip(); getValue()->skip(); } - static inline bool classof(const KeyValueNode *) { return true; } static inline bool classof(const Node *N) { return N->getType() == NK_KeyValue; } @@ -358,11 +354,10 @@ public: iterator end() { return iterator(); } - virtual void skip() { + virtual void skip() LLVM_OVERRIDE { yaml::skip(*this); } - static inline bool classof(const MappingNode *) { return true; } static inline bool classof(const Node *N) { return N->getType() == NK_Mapping; } @@ -421,11 +416,10 @@ public: iterator end() { return iterator(); } - virtual void skip() { + virtual void skip() LLVM_OVERRIDE { yaml::skip(*this); } - static inline bool classof(const SequenceNode *) { return true; } static inline bool classof(const Node *N) { return N->getType() == NK_Sequence; } @@ -450,7 +444,6 @@ public: StringRef getName() const { return Name; } Node *getTarget(); - static inline bool classof(const ScalarNode *) { return true; } static inline bool classof(const Node *N) { return N->getType() == NK_Alias; } diff --git a/include/llvm/Support/circular_raw_ostream.h b/include/llvm/Support/circular_raw_ostream.h index 2b3c329..2823af3 100644 --- a/include/llvm/Support/circular_raw_ostream.h +++ b/include/llvm/Support/circular_raw_ostream.h @@ -81,12 +81,12 @@ namespace llvm Filled = false; } - virtual void write_impl(const char *Ptr, size_t Size); + virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE; /// current_pos - Return the current position within the stream, /// not counting the bytes currently in the buffer. /// - virtual uint64_t current_pos() const { + virtual uint64_t current_pos() const LLVM_OVERRIDE { // This has the same effect as calling TheStream.current_pos(), // but that interface is private. return TheStream->tell() - TheStream->GetNumBytesInBuffer(); diff --git a/include/llvm/Support/raw_os_ostream.h b/include/llvm/Support/raw_os_ostream.h index 4f5d361..4385721 100644 --- a/include/llvm/Support/raw_os_ostream.h +++ b/include/llvm/Support/raw_os_ostream.h @@ -24,14 +24,14 @@ namespace llvm { /// use the underlying stream to detect errors. class raw_os_ostream : public raw_ostream { std::ostream &OS; - + /// write_impl - See raw_ostream::write_impl. - virtual void write_impl(const char *Ptr, size_t Size); - + virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE; + /// current_pos - Return the current position within the stream, not /// counting the bytes currently in the buffer. - virtual uint64_t current_pos() const; - + virtual uint64_t current_pos() const LLVM_OVERRIDE; + public: raw_os_ostream(std::ostream &O) : OS(O) {} ~raw_os_ostream(); diff --git a/include/llvm/Support/raw_ostream.h b/include/llvm/Support/raw_ostream.h index 5de749a..eab0f2d 100644 --- a/include/llvm/Support/raw_ostream.h +++ b/include/llvm/Support/raw_ostream.h @@ -15,6 +15,7 @@ #define LLVM_SUPPORT_RAW_OSTREAM_H #include "llvm/ADT/StringRef.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/DataTypes.h" namespace llvm { @@ -29,8 +30,8 @@ namespace llvm { class raw_ostream { private: // Do not implement. raw_ostream is noncopyable. - void operator=(const raw_ostream &); - raw_ostream(const raw_ostream &); + void operator=(const raw_ostream &) LLVM_DELETED_FUNCTION; + raw_ostream(const raw_ostream &) LLVM_DELETED_FUNCTION; /// The buffer is handled in such a way that the buffer is /// uninitialized, unbuffered, or out of space when OutBufCur >= @@ -191,10 +192,10 @@ public: raw_ostream &operator<<(double N); - /// write_hex - Output \arg N in hexadecimal, without any prefix or padding. + /// write_hex - Output \p N in hexadecimal, without any prefix or padding. raw_ostream &write_hex(unsigned long long N); - /// write_escaped - Output \arg Str, turning '\\', '\t', '\n', '"', and + /// write_escaped - Output \p Str, turning '\\', '\t', '\n', '"', and /// anything that doesn't satisfy std::isprint into an escape sequence. raw_ostream &write_escaped(StringRef Str, bool UseHexEscapes = false); @@ -210,13 +211,19 @@ public: /// Changes the foreground color of text that will be output from this point /// forward. - /// @param colors ANSI color to use, the special SAVEDCOLOR can be used to + /// @param Color ANSI color to use, the special SAVEDCOLOR can be used to /// change only the bold attribute, and keep colors untouched - /// @param bold bold/brighter text, default false - /// @param bg if true change the background, default: change foreground + /// @param Bold bold/brighter text, default false + /// @param BG if true change the background, default: change foreground /// @returns itself so it can be used within << invocations - virtual raw_ostream &changeColor(enum Colors, bool = false, bool = false) { - return *this; } + virtual raw_ostream &changeColor(enum Colors Color, + bool Bold = false, + bool BG = false) { + (void)Color; + (void)Bold; + (void)BG; + return *this; + } /// Resets the colors to terminal defaults. Call this when you are done /// outputting colored text, or before program exit. @@ -239,15 +246,16 @@ public: private: /// write_impl - The is the piece of the class that is implemented - /// by subclasses. This writes the \args Size bytes starting at - /// \arg Ptr to the underlying stream. + /// by subclasses. This writes the \p Size bytes starting at + /// \p Ptr to the underlying stream. /// /// This function is guaranteed to only be called at a point at which it is /// safe for the subclass to install a new buffer via SetBuffer. /// - /// \arg Ptr - The start of the data to be written. For buffered streams this + /// \param Ptr The start of the data to be written. For buffered streams this /// is guaranteed to be the start of the buffer. - /// \arg Size - The number of bytes to be written. + /// + /// \param Size The number of bytes to be written. /// /// \invariant { Size > 0 } virtual void write_impl(const char *Ptr, size_t Size) = 0; @@ -314,14 +322,14 @@ class raw_fd_ostream : public raw_ostream { uint64_t pos; /// write_impl - See raw_ostream::write_impl. - virtual void write_impl(const char *Ptr, size_t Size); + virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE; /// current_pos - Return the current position within the stream, not /// counting the bytes currently in the buffer. - virtual uint64_t current_pos() const { return pos; } + virtual uint64_t current_pos() const LLVM_OVERRIDE { return pos; } /// preferred_buffer_size - Determine an efficient buffer size. - virtual size_t preferred_buffer_size() const; + virtual size_t preferred_buffer_size() const LLVM_OVERRIDE; /// error_detected - Set the flag indicating that an output error has /// been encountered. @@ -382,14 +390,14 @@ public: } virtual raw_ostream &changeColor(enum Colors colors, bool bold=false, - bool bg=false); - virtual raw_ostream &resetColor(); + bool bg=false) LLVM_OVERRIDE; + virtual raw_ostream &resetColor() LLVM_OVERRIDE; - virtual raw_ostream &reverseColor(); + virtual raw_ostream &reverseColor() LLVM_OVERRIDE; - virtual bool is_displayed() const; + virtual bool is_displayed() const LLVM_OVERRIDE; - virtual bool has_colors() const; + virtual bool has_colors() const LLVM_OVERRIDE; /// has_error - Return the value of the flag in this raw_fd_ostream indicating /// whether an output error has been encountered. @@ -435,11 +443,11 @@ class raw_string_ostream : public raw_ostream { std::string &OS; /// write_impl - See raw_ostream::write_impl. - virtual void write_impl(const char *Ptr, size_t Size); + virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE; /// current_pos - Return the current position within the stream, not /// counting the bytes currently in the buffer. - virtual uint64_t current_pos() const { return OS.size(); } + virtual uint64_t current_pos() const LLVM_OVERRIDE { return OS.size(); } public: explicit raw_string_ostream(std::string &O) : OS(O) {} ~raw_string_ostream(); @@ -459,15 +467,15 @@ class raw_svector_ostream : public raw_ostream { SmallVectorImpl &OS; /// write_impl - See raw_ostream::write_impl. - virtual void write_impl(const char *Ptr, size_t Size); + virtual void write_impl(const char *Ptr, size_t Size) LLVM_OVERRIDE; /// current_pos - Return the current position within the stream, not /// counting the bytes currently in the buffer. - virtual uint64_t current_pos() const; + virtual uint64_t current_pos() const LLVM_OVERRIDE; public: /// Construct a new raw_svector_ostream. /// - /// \arg O - The vector to write to; this should generally have at least 128 + /// \param O The vector to write to; this should generally have at least 128 /// bytes free to avoid any extraneous memory overhead. explicit raw_svector_ostream(SmallVectorImpl &O); ~raw_svector_ostream(); @@ -485,11 +493,11 @@ public: /// raw_null_ostream - A raw_ostream that discards all output. class raw_null_ostream : public raw_ostream { /// write_impl - See raw_ostream::write_impl. - virtual void write_impl(const char *Ptr, size_t size); + virtual void write_impl(const char *Ptr, size_t size) LLVM_OVERRIDE; /// current_pos - Return the current position within the stream, not /// counting the bytes currently in the buffer. - virtual uint64_t current_pos() const; + virtual uint64_t current_pos() const LLVM_OVERRIDE; public: explicit raw_null_ostream() {} diff --git a/include/llvm/Support/system_error.h b/include/llvm/Support/system_error.h index af81206..0d164f6 100644 --- a/include/llvm/Support/system_error.h +++ b/include/llvm/Support/system_error.h @@ -17,6 +17,8 @@ #ifndef LLVM_SYSTEM_SYSTEM_ERROR_H #define LLVM_SYSTEM_SYSTEM_ERROR_H +#include "llvm/Support/Compiler.h" + /* system_error synopsis @@ -629,8 +631,8 @@ public: private: error_category(); - error_category(const error_category&);// = delete; - error_category& operator=(const error_category&);// = delete; + error_category(const error_category&) LLVM_DELETED_FUNCTION; + error_category& operator=(const error_category&) LLVM_DELETED_FUNCTION; public: virtual const char* name() const = 0; @@ -651,7 +653,7 @@ public: class _do_message : public error_category { public: - virtual std::string message(int ev) const; + virtual std::string message(int ev) const LLVM_OVERRIDE; }; const error_category& generic_category(); diff --git a/include/llvm/Support/type_traits.h b/include/llvm/Support/type_traits.h index 7b97547..f930639 100644 --- a/include/llvm/Support/type_traits.h +++ b/include/llvm/Support/type_traits.h @@ -54,8 +54,9 @@ struct is_class // is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For // more details: // http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1 - public: - enum { value = sizeof(char) == sizeof(dont_use::is_class_helper(0)) }; +public: + static const bool value = + sizeof(char) == sizeof(dont_use::is_class_helper(0)); }; @@ -162,12 +163,11 @@ template class is_integral_or_enum { static UnderlyingT &nonce_instance; public: - enum { + static const bool value = (!is_class::value && !is_pointer::value && !is_same::value && !is_same::value && - sizeof(char) != sizeof(check_int_convertible(nonce_instance))) - }; + sizeof(char) != sizeof(check_int_convertible(nonce_instance))); }; // enable_if_c - Enable/disable a template based on a metafunction diff --git a/include/llvm/SymbolTableListTraits.h b/include/llvm/SymbolTableListTraits.h index 91a4eb9..ec5c88f 100644 --- a/include/llvm/SymbolTableListTraits.h +++ b/include/llvm/SymbolTableListTraits.h @@ -46,7 +46,6 @@ public: /// getListOwner - Return the object that owns this list. If this is a list /// of instructions, it returns the BasicBlock that owns them. ItemParentClass *getListOwner() { - typedef iplist ItemParentClass::*Sublist; size_t Offset(size_t(&((ItemParentClass*)0->*ItemParentClass:: getSublistAccess(static_cast(0))))); iplist* Anchor(static_cast*>(this)); diff --git a/include/llvm/TableGen/Error.h b/include/llvm/TableGen/Error.h index fd5f805..2f6b7e6 100644 --- a/include/llvm/TableGen/Error.h +++ b/include/llvm/TableGen/Error.h @@ -19,26 +19,17 @@ namespace llvm { -class TGError { - SMLoc Loc; - std::string Message; -public: - TGError(SMLoc loc, const std::string &message) : Loc(loc), Message(message) {} - - SMLoc getLoc() const { return Loc; } - const std::string &getMessage() const { return Message; } -}; - -void PrintWarning(SMLoc WarningLoc, const Twine &Msg); +void PrintWarning(ArrayRef WarningLoc, const Twine &Msg); void PrintWarning(const char *Loc, const Twine &Msg); void PrintWarning(const Twine &Msg); -void PrintWarning(const TGError &Warning); -void PrintError(SMLoc ErrorLoc, const Twine &Msg); +void PrintError(ArrayRef ErrorLoc, const Twine &Msg); void PrintError(const char *Loc, const Twine &Msg); void PrintError(const Twine &Msg); -void PrintError(const TGError &Error); +LLVM_ATTRIBUTE_NORETURN void PrintFatalError(const std::string &Msg); +LLVM_ATTRIBUTE_NORETURN void PrintFatalError(ArrayRef ErrorLoc, + const std::string &Msg); extern SourceMgr SrcMgr; diff --git a/include/llvm/TableGen/Main.h b/include/llvm/TableGen/Main.h index deaef4a..6b51e20 100644 --- a/include/llvm/TableGen/Main.h +++ b/include/llvm/TableGen/Main.h @@ -16,10 +16,13 @@ namespace llvm { -class TableGenAction; +class RecordKeeper; +class raw_ostream; +/// \brief Perform the action using Records, and write output to OS. +/// \returns true on error, false otherwise +typedef bool TableGenMainFn(raw_ostream &OS, RecordKeeper &Records); -/// Run the table generator, performing the specified Action on parsed records. -int TableGenMain(char *argv0, TableGenAction &Action); +int TableGenMain(char *argv0, TableGenMainFn *MainFn); } diff --git a/include/llvm/TableGen/Record.h b/include/llvm/TableGen/Record.h index a8256b7..319298c 100644 --- a/include/llvm/TableGen/Record.h +++ b/include/llvm/TableGen/Record.h @@ -18,6 +18,7 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/Support/Allocator.h" +#include "llvm/Support/Casting.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" @@ -66,10 +67,27 @@ class RecordKeeper; //===----------------------------------------------------------------------===// class RecTy { +public: + /// \brief Subclass discriminator (for dyn_cast<> et al.) + enum RecTyKind { + BitRecTyKind, + BitsRecTyKind, + IntRecTyKind, + StringRecTyKind, + ListRecTyKind, + DagRecTyKind, + RecordRecTyKind + }; + +private: + RecTyKind Kind; ListRecTy *ListTy; virtual void anchor(); + public: - RecTy() : ListTy(0) {} + RecTyKind getRecTyKind() const { return Kind; } + + RecTy(RecTyKind K) : Kind(K), ListTy(0) {} virtual ~RecTy() {} virtual std::string getAsString() const = 0; @@ -132,8 +150,12 @@ inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) { /// class BitRecTy : public RecTy { static BitRecTy Shared; - BitRecTy() {} + BitRecTy() : RecTy(BitRecTyKind) {} public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == BitRecTyKind; + } + static BitRecTy *get() { return &Shared; } virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; } @@ -152,9 +174,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const { return "bit"; } + virtual std::string getAsString() const { return "bit"; } - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } virtual bool baseClassOf(const BitRecTy *RHS) const { return true; } @@ -173,8 +195,12 @@ public: /// class BitsRecTy : public RecTy { unsigned Size; - explicit BitsRecTy(unsigned Sz) : Size(Sz) {} + explicit BitsRecTy(unsigned Sz) : RecTy(BitsRecTyKind), Size(Sz) {} public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == BitsRecTyKind; + } + static BitsRecTy *get(unsigned Sz); unsigned getNumBits() const { return Size; } @@ -195,9 +221,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const; + virtual std::string getAsString() const; - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } virtual bool baseClassOf(const BitRecTy *RHS) const { return Size == 1; } @@ -217,8 +243,12 @@ public: /// class IntRecTy : public RecTy { static IntRecTy Shared; - IntRecTy() {} + IntRecTy() : RecTy(IntRecTyKind) {} public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == IntRecTyKind; + } + static IntRecTy *get() { return &Shared; } virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; } @@ -237,9 +267,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const { return "int"; } + virtual std::string getAsString() const { return "int"; } - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } @@ -257,8 +287,12 @@ public: /// class StringRecTy : public RecTy { static StringRecTy Shared; - StringRecTy() {} + StringRecTy() : RecTy(StringRecTyKind) {} public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == StringRecTyKind; + } + static StringRecTy *get() { return &Shared; } virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; } @@ -278,9 +312,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const { return "string"; } + virtual std::string getAsString() const { return "string"; } - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } @@ -300,9 +334,13 @@ public: /// class ListRecTy : public RecTy { RecTy *Ty; - explicit ListRecTy(RecTy *T) : Ty(T) {} + explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {} friend ListRecTy *RecTy::getListTy(); public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == ListRecTyKind; + } + static ListRecTy *get(RecTy *T) { return T->getListTy(); } RecTy *getElementType() const { return Ty; } @@ -322,9 +360,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const; + virtual std::string getAsString() const; - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } @@ -343,8 +381,12 @@ public: /// class DagRecTy : public RecTy { static DagRecTy Shared; - DagRecTy() {} + DagRecTy() : RecTy(DagRecTyKind) {} public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == DagRecTyKind; + } + static DagRecTy *get() { return &Shared; } virtual Init *convertValue( UnsetInit *UI) { return (Init*)UI; } @@ -363,9 +405,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const { return "dag"; } + virtual std::string getAsString() const { return "dag"; } - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } @@ -384,9 +426,13 @@ public: /// class RecordRecTy : public RecTy { Record *Rec; - explicit RecordRecTy(Record *R) : Rec(R) {} + explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {} friend class Record; public: + static bool classof(const RecTy *RT) { + return RT->getRecTyKind() == RecordRecTyKind; + } + static RecordRecTy *get(Record *R); Record *getRecord() const { return Rec; } @@ -407,9 +453,9 @@ public: virtual Init *convertValue( VarInit *VI) { return RecTy::convertValue(VI);} virtual Init *convertValue( FieldInit *FI) { return RecTy::convertValue(FI);} - std::string getAsString() const; + virtual std::string getAsString() const; - bool typeIsConvertibleTo(const RecTy *RHS) const { + virtual bool typeIsConvertibleTo(const RecTy *RHS) const { return RHS->baseClassOf(this); } virtual bool baseClassOf(const BitRecTy *RHS) const { return false; } @@ -431,12 +477,53 @@ RecTy *resolveTypes(RecTy *T1, RecTy *T2); //===----------------------------------------------------------------------===// class Init { - Init(const Init &); // Do not define. - Init &operator=(const Init &); // Do not define. +protected: + /// \brief Discriminator enum (for isa<>, dyn_cast<>, et al.) + /// + /// This enum is laid out by a preorder traversal of the inheritance + /// hierarchy, and does not contain an entry for abstract classes, as per + /// the recommendation in docs/HowToSetUpLLVMStyleRTTI.rst. + /// + /// We also explicitly include "first" and "last" values for each + /// interior node of the inheritance tree, to make it easier to read the + /// corresponding classof(). + /// + /// We could pack these a bit tighter by not having the IK_FirstXXXInit + /// and IK_LastXXXInit be their own values, but that would degrade + /// readability for really no benefit. + enum InitKind { + IK_BitInit, + IK_BitsInit, + IK_FirstTypedInit, + IK_DagInit, + IK_DefInit, + IK_FieldInit, + IK_IntInit, + IK_ListInit, + IK_FirstOpInit, + IK_BinOpInit, + IK_TernOpInit, + IK_UnOpInit, + IK_LastOpInit, + IK_StringInit, + IK_VarInit, + IK_VarListElementInit, + IK_LastTypedInit, + IK_UnsetInit, + IK_VarBitInit + }; + +private: + const InitKind Kind; + Init(const Init &) LLVM_DELETED_FUNCTION; + Init &operator=(const Init &) LLVM_DELETED_FUNCTION; virtual void anchor(); +public: + InitKind getKind() const { return Kind; } + protected: - Init(void) {} + explicit Init(InitKind K) : Kind(K) {} public: virtual ~Init() {} @@ -509,6 +596,18 @@ public: virtual Init *resolveReferences(Record &R, const RecordVal *RV) const { return const_cast(this); } + + /// getBit - This method is used to return the initializer for the specified + /// bit. + virtual Init *getBit(unsigned Bit) const = 0; + + /// getBitVar - This method is used to retrieve the initializer for bit + /// reference. For non-VarBitInit, it simply returns itself. + virtual Init *getBitVar() const { return const_cast(this); } + + /// getBitNum - This method is used to retrieve the bit number of a bit + /// reference. For non-VarBitInit, it simply returns 0. + virtual unsigned getBitNum() const { return 0; } }; inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) { @@ -521,13 +620,17 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) { class TypedInit : public Init { RecTy *Ty; - TypedInit(const TypedInit &Other); // Do not define. - TypedInit &operator=(const TypedInit &Other); // Do not define. + TypedInit(const TypedInit &Other) LLVM_DELETED_FUNCTION; + TypedInit &operator=(const TypedInit &Other) LLVM_DELETED_FUNCTION; protected: - explicit TypedInit(RecTy *T) : Ty(T) {} + explicit TypedInit(InitKind K, RecTy *T) : Init(K), Ty(T) {} public: + static bool classof(const Init *I) { + return I->getKind() >= IK_FirstTypedInit && + I->getKind() <= IK_LastTypedInit; + } RecTy *getType() const { return Ty; } virtual Init * @@ -541,13 +644,6 @@ public: /// virtual RecTy *getFieldType(const std::string &FieldName) const; - /// resolveBitReference - This method is used to implement - /// VarBitInit::resolveReferences. If the bit is able to be resolved, we - /// simply return the resolved value, otherwise we return null. - /// - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const = 0; - /// resolveListElementReference - This method is used to implement /// VarListElementInit::resolveReferences. If the list element is resolvable /// now, we return the resolved value, otherwise we return null. @@ -559,18 +655,25 @@ public: /// UnsetInit - ? - Represents an uninitialized value /// class UnsetInit : public Init { - UnsetInit() : Init() {} - UnsetInit(const UnsetInit &); // Do not define. - UnsetInit &operator=(const UnsetInit &Other); // Do not define. + UnsetInit() : Init(IK_UnsetInit) {} + UnsetInit(const UnsetInit &) LLVM_DELETED_FUNCTION; + UnsetInit &operator=(const UnsetInit &Other) LLVM_DELETED_FUNCTION; virtual void anchor(); public: + static bool classof(const Init *I) { + return I->getKind() == IK_UnsetInit; + } static UnsetInit *get(); virtual Init *convertInitializerTo(RecTy *Ty) const { return Ty->convertValue(const_cast(this)); } + virtual Init *getBit(unsigned Bit) const { + return const_cast(this); + } + virtual bool isComplete() const { return false; } virtual std::string getAsString() const { return "?"; } }; @@ -581,12 +684,15 @@ public: class BitInit : public Init { bool Value; - explicit BitInit(bool V) : Value(V) {} - BitInit(const BitInit &Other); // Do not define. - BitInit &operator=(BitInit &Other); // Do not define. + explicit BitInit(bool V) : Init(IK_BitInit), Value(V) {} + BitInit(const BitInit &Other) LLVM_DELETED_FUNCTION; + BitInit &operator=(BitInit &Other) LLVM_DELETED_FUNCTION; virtual void anchor(); public: + static bool classof(const Init *I) { + return I->getKind() == IK_BitInit; + } static BitInit *get(bool V); bool getValue() const { return Value; } @@ -595,6 +701,11 @@ public: return Ty->convertValue(const_cast(this)); } + virtual Init *getBit(unsigned Bit) const { + assert(Bit < 1 && "Bit index out of range!"); + return const_cast(this); + } + virtual std::string getAsString() const { return Value ? "1" : "0"; } }; @@ -604,23 +715,22 @@ public: class BitsInit : public Init, public FoldingSetNode { std::vector Bits; - BitsInit(ArrayRef Range) : Bits(Range.begin(), Range.end()) {} + BitsInit(ArrayRef Range) + : Init(IK_BitsInit), Bits(Range.begin(), Range.end()) {} - BitsInit(const BitsInit &Other); // Do not define. - BitsInit &operator=(const BitsInit &Other); // Do not define. + BitsInit(const BitsInit &Other) LLVM_DELETED_FUNCTION; + BitsInit &operator=(const BitsInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_BitsInit; + } static BitsInit *get(ArrayRef Range); void Profile(FoldingSetNodeID &ID) const; unsigned getNumBits() const { return Bits.size(); } - Init *getBit(unsigned Bit) const { - assert(Bit < Bits.size() && "Bit index out of range!"); - return Bits[Bit]; - } - virtual Init *convertInitializerTo(RecTy *Ty) const { return Ty->convertValue(const_cast(this)); } @@ -640,6 +750,11 @@ public: virtual std::string getAsString() const; virtual Init *resolveReferences(Record &R, const RecordVal *RV) const; + + virtual Init *getBit(unsigned Bit) const { + assert(Bit < Bits.size() && "Bit index out of range!"); + return Bits[Bit]; + } }; @@ -648,12 +763,16 @@ public: class IntInit : public TypedInit { int64_t Value; - explicit IntInit(int64_t V) : TypedInit(IntRecTy::get()), Value(V) {} + explicit IntInit(int64_t V) + : TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {} - IntInit(const IntInit &Other); // Do not define. - IntInit &operator=(const IntInit &Other); // Do note define. + IntInit(const IntInit &Other) LLVM_DELETED_FUNCTION; + IntInit &operator=(const IntInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_IntInit; + } static IntInit *get(int64_t V); int64_t getValue() const { return Value; } @@ -666,15 +785,6 @@ public: virtual std::string getAsString() const; - /// resolveBitReference - This method is used to implement - /// VarBitInit::resolveReferences. If the bit is able to be resolved, we - /// simply return the resolved value, otherwise we return null. - /// - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { - llvm_unreachable("Illegal bit reference off int"); - } - /// resolveListElementReference - This method is used to implement /// VarListElementInit::resolveReferences. If the list element is resolvable /// now, we return the resolved value, otherwise we return null. @@ -682,6 +792,10 @@ public: unsigned Elt) const { llvm_unreachable("Illegal element reference off int"); } + + virtual Init *getBit(unsigned Bit) const { + return BitInit::get((Value & (1ULL << Bit)) != 0); + } }; @@ -691,13 +805,16 @@ class StringInit : public TypedInit { std::string Value; explicit StringInit(const std::string &V) - : TypedInit(StringRecTy::get()), Value(V) {} + : TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {} - StringInit(const StringInit &Other); // Do not define. - StringInit &operator=(const StringInit &Other); // Do not define. + StringInit(const StringInit &Other) LLVM_DELETED_FUNCTION; + StringInit &operator=(const StringInit &Other) LLVM_DELETED_FUNCTION; virtual void anchor(); public: + static bool classof(const Init *I) { + return I->getKind() == IK_StringInit; + } static StringInit *get(StringRef); const std::string &getValue() const { return Value; } @@ -709,15 +826,6 @@ public: virtual std::string getAsString() const { return "\"" + Value + "\""; } virtual std::string getAsUnquotedString() const { return Value; } - /// resolveBitReference - This method is used to implement - /// VarBitInit::resolveReferences. If the bit is able to be resolved, we - /// simply return the resolved value, otherwise we return null. - /// - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { - llvm_unreachable("Illegal bit reference off string"); - } - /// resolveListElementReference - This method is used to implement /// VarListElementInit::resolveReferences. If the list element is resolvable /// now, we return the resolved value, otherwise we return null. @@ -725,6 +833,10 @@ public: unsigned Elt) const { llvm_unreachable("Illegal element reference off string"); } + + virtual Init *getBit(unsigned Bit) const { + llvm_unreachable("Illegal bit reference off string"); + } }; /// ListInit - [AL, AH, CL] - Represent a list of defs @@ -736,12 +848,16 @@ public: private: explicit ListInit(ArrayRef Range, RecTy *EltTy) - : TypedInit(ListRecTy::get(EltTy)), Values(Range.begin(), Range.end()) {} + : TypedInit(IK_ListInit, ListRecTy::get(EltTy)), + Values(Range.begin(), Range.end()) {} - ListInit(const ListInit &Other); // Do not define. - ListInit &operator=(const ListInit &Other); // Do not define. + ListInit(const ListInit &Other) LLVM_DELETED_FUNCTION; + ListInit &operator=(const ListInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_ListInit; + } static ListInit *get(ArrayRef Range, RecTy *EltTy); void Profile(FoldingSetNodeID &ID) const; @@ -754,7 +870,8 @@ public: Record *getElementAsRecord(unsigned i) const; - Init *convertInitListSlice(const std::vector &Elements) const; + virtual Init * + convertInitListSlice(const std::vector &Elements) const; virtual Init *convertInitializerTo(RecTy *Ty) const { return Ty->convertValue(const_cast(this)); @@ -777,33 +894,32 @@ public: inline size_t size () const { return Values.size(); } inline bool empty() const { return Values.empty(); } - /// resolveBitReference - This method is used to implement - /// VarBitInit::resolveReferences. If the bit is able to be resolved, we - /// simply return the resolved value, otherwise we return null. - /// - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { - llvm_unreachable("Illegal bit reference off list"); - } - /// resolveListElementReference - This method is used to implement /// VarListElementInit::resolveReferences. If the list element is resolvable /// now, we return the resolved value, otherwise we return null. virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const; + + virtual Init *getBit(unsigned Bit) const { + llvm_unreachable("Illegal bit reference off list"); + } }; /// OpInit - Base class for operators /// class OpInit : public TypedInit { - OpInit(const OpInit &Other); // Do not define. - OpInit &operator=(OpInit &Other); // Do not define. + OpInit(const OpInit &Other) LLVM_DELETED_FUNCTION; + OpInit &operator=(OpInit &Other) LLVM_DELETED_FUNCTION; protected: - explicit OpInit(RecTy *Type) : TypedInit(Type) {} + explicit OpInit(InitKind K, RecTy *Type) : TypedInit(K, Type) {} public: + static bool classof(const Init *I) { + return I->getKind() >= IK_FirstOpInit && + I->getKind() <= IK_LastOpInit; + } // Clone - Clone this operator, replacing arguments with the new list virtual OpInit *clone(std::vector &Operands) const = 0; @@ -818,10 +934,10 @@ public: return Ty->convertValue(const_cast(this)); } - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const; virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const; + + virtual Init *getBit(unsigned Bit) const; }; @@ -835,12 +951,15 @@ private: Init *LHS; UnOpInit(UnaryOp opc, Init *lhs, RecTy *Type) - : OpInit(Type), Opc(opc), LHS(lhs) {} + : OpInit(IK_UnOpInit, Type), Opc(opc), LHS(lhs) {} - UnOpInit(const UnOpInit &Other); // Do not define. - UnOpInit &operator=(const UnOpInit &Other); // Do not define. + UnOpInit(const UnOpInit &Other) LLVM_DELETED_FUNCTION; + UnOpInit &operator=(const UnOpInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_UnOpInit; + } static UnOpInit *get(UnaryOp opc, Init *lhs, RecTy *Type); // Clone - Clone this operator, replacing arguments with the new list @@ -850,8 +969,8 @@ public: return UnOpInit::get(getOpcode(), *Operands.begin(), getType()); } - int getNumOperands() const { return 1; } - Init *getOperand(int i) const { + virtual int getNumOperands() const { return 1; } + virtual Init *getOperand(int i) const { assert(i == 0 && "Invalid operand id for unary operator"); return getOperand(); } @@ -861,7 +980,7 @@ public: // Fold - If possible, fold this to a simpler init. Return this if not // possible to fold. - Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const; + virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const; virtual Init *resolveReferences(Record &R, const RecordVal *RV) const; @@ -878,12 +997,15 @@ private: Init *LHS, *RHS; BinOpInit(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type) : - OpInit(Type), Opc(opc), LHS(lhs), RHS(rhs) {} + OpInit(IK_BinOpInit, Type), Opc(opc), LHS(lhs), RHS(rhs) {} - BinOpInit(const BinOpInit &Other); // Do not define. - BinOpInit &operator=(const BinOpInit &Other); // Do not define. + BinOpInit(const BinOpInit &Other) LLVM_DELETED_FUNCTION; + BinOpInit &operator=(const BinOpInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_BinOpInit; + } static BinOpInit *get(BinaryOp opc, Init *lhs, Init *rhs, RecTy *Type); @@ -894,8 +1016,8 @@ public: return BinOpInit::get(getOpcode(), Operands[0], Operands[1], getType()); } - int getNumOperands() const { return 2; } - Init *getOperand(int i) const { + virtual int getNumOperands() const { return 2; } + virtual Init *getOperand(int i) const { assert((i == 0 || i == 1) && "Invalid operand id for binary operator"); if (i == 0) { return getLHS(); @@ -910,7 +1032,7 @@ public: // Fold - If possible, fold this to a simpler init. Return this if not // possible to fold. - Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const; + virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const; virtual Init *resolveReferences(Record &R, const RecordVal *RV) const; @@ -928,12 +1050,15 @@ private: TernOpInit(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs, RecTy *Type) : - OpInit(Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {} + OpInit(IK_TernOpInit, Type), Opc(opc), LHS(lhs), MHS(mhs), RHS(rhs) {} - TernOpInit(const TernOpInit &Other); // Do not define. - TernOpInit &operator=(const TernOpInit &Other); // Do not define. + TernOpInit(const TernOpInit &Other) LLVM_DELETED_FUNCTION; + TernOpInit &operator=(const TernOpInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_TernOpInit; + } static TernOpInit *get(TernaryOp opc, Init *lhs, Init *mhs, Init *rhs, RecTy *Type); @@ -946,8 +1071,8 @@ public: getType()); } - int getNumOperands() const { return 3; } - Init *getOperand(int i) const { + virtual int getNumOperands() const { return 3; } + virtual Init *getOperand(int i) const { assert((i == 0 || i == 1 || i == 2) && "Invalid operand id for ternary operator"); if (i == 0) { @@ -966,7 +1091,7 @@ public: // Fold - If possible, fold this to a simpler init. Return this if not // possible to fold. - Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const; + virtual Init *Fold(Record *CurRec, MultiClass *CurMultiClass) const; virtual bool isComplete() const { return false; } @@ -982,14 +1107,17 @@ class VarInit : public TypedInit { Init *VarName; explicit VarInit(const std::string &VN, RecTy *T) - : TypedInit(T), VarName(StringInit::get(VN)) {} + : TypedInit(IK_VarInit, T), VarName(StringInit::get(VN)) {} explicit VarInit(Init *VN, RecTy *T) - : TypedInit(T), VarName(VN) {} + : TypedInit(IK_VarInit, T), VarName(VN) {} - VarInit(const VarInit &Other); // Do not define. - VarInit &operator=(const VarInit &Other); // Do not define. + VarInit(const VarInit &Other) LLVM_DELETED_FUNCTION; + VarInit &operator=(const VarInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_VarInit; + } static VarInit *get(const std::string &VN, RecTy *T); static VarInit *get(Init *VN, RecTy *T); @@ -1003,8 +1131,6 @@ public: return getNameInit()->getAsUnquotedString(); } - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const; virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const; @@ -1019,6 +1145,8 @@ public: /// virtual Init *resolveReferences(Record &R, const RecordVal *RV) const; + virtual Init *getBit(unsigned Bit) const; + virtual std::string getAsString() const { return getName(); } }; @@ -1029,27 +1157,37 @@ class VarBitInit : public Init { TypedInit *TI; unsigned Bit; - VarBitInit(TypedInit *T, unsigned B) : TI(T), Bit(B) { - assert(T->getType() && dynamic_cast(T->getType()) && - ((BitsRecTy*)T->getType())->getNumBits() > B && + VarBitInit(TypedInit *T, unsigned B) : Init(IK_VarBitInit), TI(T), Bit(B) { + assert(T->getType() && + (isa(T->getType()) || + (isa(T->getType()) && + cast(T->getType())->getNumBits() > B)) && "Illegal VarBitInit expression!"); } - VarBitInit(const VarBitInit &Other); // Do not define. - VarBitInit &operator=(const VarBitInit &Other); // Do not define. + VarBitInit(const VarBitInit &Other) LLVM_DELETED_FUNCTION; + VarBitInit &operator=(const VarBitInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_VarBitInit; + } static VarBitInit *get(TypedInit *T, unsigned B); virtual Init *convertInitializerTo(RecTy *Ty) const { return Ty->convertValue(const_cast(this)); } - TypedInit *getVariable() const { return TI; } - unsigned getBitNum() const { return Bit; } + virtual Init *getBitVar() const { return TI; } + virtual unsigned getBitNum() const { return Bit; } virtual std::string getAsString() const; virtual Init *resolveReferences(Record &R, const RecordVal *RV) const; + + virtual Init *getBit(unsigned B) const { + assert(B < 1 && "Bit index out of range!"); + return const_cast(this); + } }; /// VarListElementInit - List[4] - Represent access to one element of a var or @@ -1059,18 +1197,20 @@ class VarListElementInit : public TypedInit { unsigned Element; VarListElementInit(TypedInit *T, unsigned E) - : TypedInit(dynamic_cast(T->getType())->getElementType()), - TI(T), Element(E) { - assert(T->getType() && dynamic_cast(T->getType()) && + : TypedInit(IK_VarListElementInit, + cast(T->getType())->getElementType()), + TI(T), Element(E) { + assert(T->getType() && isa(T->getType()) && "Illegal VarBitInit expression!"); } - VarListElementInit(const VarListElementInit &Other); // Do not define. - VarListElementInit &operator=(const VarListElementInit &Other); // Do - // not - // define. + VarListElementInit(const VarListElementInit &Other) LLVM_DELETED_FUNCTION; + void operator=(const VarListElementInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_VarListElementInit; + } static VarListElementInit *get(TypedInit *T, unsigned E); virtual Init *convertInitializerTo(RecTy *Ty) const { @@ -1080,9 +1220,6 @@ public: TypedInit *getVariable() const { return TI; } unsigned getElementNum() const { return Element; } - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const; - /// resolveListElementReference - This method is used to implement /// VarListElementInit::resolveReferences. If the list element is resolvable /// now, we return the resolved value, otherwise we return null. @@ -1092,6 +1229,8 @@ public: virtual std::string getAsString() const; virtual Init *resolveReferences(Record &R, const RecordVal *RV) const; + + virtual Init *getBit(unsigned Bit) const; }; /// DefInit - AL - Represent a reference to a 'def' in the description @@ -1099,13 +1238,16 @@ public: class DefInit : public TypedInit { Record *Def; - DefInit(Record *D, RecordRecTy *T) : TypedInit(T), Def(D) {} + DefInit(Record *D, RecordRecTy *T) : TypedInit(IK_DefInit, T), Def(D) {} friend class Record; - DefInit(const DefInit &Other); // Do not define. - DefInit &operator=(const DefInit &Other); // Do not define. + DefInit(const DefInit &Other) LLVM_DELETED_FUNCTION; + DefInit &operator=(const DefInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_DefInit; + } static DefInit *get(Record*); virtual Init *convertInitializerTo(RecTy *Ty) const { @@ -1122,12 +1264,7 @@ public: virtual std::string getAsString() const; - /// resolveBitReference - This method is used to implement - /// VarBitInit::resolveReferences. If the bit is able to be resolved, we - /// simply return the resolved value, otherwise we return null. - /// - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { + virtual Init *getBit(unsigned Bit) const { llvm_unreachable("Illegal bit reference off def"); } @@ -1148,14 +1285,17 @@ class FieldInit : public TypedInit { std::string FieldName; // Field we are accessing FieldInit(Init *R, const std::string &FN) - : TypedInit(R->getFieldType(FN)), Rec(R), FieldName(FN) { + : TypedInit(IK_FieldInit, R->getFieldType(FN)), Rec(R), FieldName(FN) { assert(getType() && "FieldInit with non-record type!"); } - FieldInit(const FieldInit &Other); // Do not define. - FieldInit &operator=(const FieldInit &Other); // Do not define. + FieldInit(const FieldInit &Other) LLVM_DELETED_FUNCTION; + FieldInit &operator=(const FieldInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_FieldInit; + } static FieldInit *get(Init *R, const std::string &FN); static FieldInit *get(Init *R, const Init *FN); @@ -1163,8 +1303,8 @@ public: return Ty->convertValue(const_cast(this)); } - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const; + virtual Init *getBit(unsigned Bit) const; + virtual Init *resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const; @@ -1189,14 +1329,17 @@ class DagInit : public TypedInit, public FoldingSetNode { DagInit(Init *V, const std::string &VN, ArrayRef ArgRange, ArrayRef NameRange) - : TypedInit(DagRecTy::get()), Val(V), ValName(VN), + : TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN), Args(ArgRange.begin(), ArgRange.end()), ArgNames(NameRange.begin(), NameRange.end()) {} - DagInit(const DagInit &Other); // Do not define. - DagInit &operator=(const DagInit &Other); // Do not define. + DagInit(const DagInit &Other) LLVM_DELETED_FUNCTION; + DagInit &operator=(const DagInit &Other) LLVM_DELETED_FUNCTION; public: + static bool classof(const Init *I) { + return I->getKind() == IK_DagInit; + } static DagInit *get(Init *V, const std::string &VN, ArrayRef ArgRange, ArrayRef NameRange); @@ -1243,8 +1386,7 @@ public: inline size_t name_size () const { return ArgNames.size(); } inline bool name_empty() const { return ArgNames.empty(); } - virtual Init *resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { + virtual Init *getBit(unsigned Bit) const { llvm_unreachable("Illegal bit reference off dag"); } @@ -1301,7 +1443,9 @@ class Record { // Unique record ID. unsigned ID; Init *Name; - SMLoc Loc; + // Location where record was instantiated, followed by the location of + // multiclass prototypes used. + SmallVector Locs; std::vector TemplateArgs; std::vector Values; std::vector SuperClasses; @@ -1317,15 +1461,25 @@ class Record { public: // Constructs a record. - explicit Record(const std::string &N, SMLoc loc, RecordKeeper &records) : - ID(LastID++), Name(StringInit::get(N)), Loc(loc), TrackedRecords(records), - TheInit(0) { + explicit Record(const std::string &N, ArrayRef locs, + RecordKeeper &records) : + ID(LastID++), Name(StringInit::get(N)), Locs(locs.begin(), locs.end()), + TrackedRecords(records), TheInit(0) { init(); } - explicit Record(Init *N, SMLoc loc, RecordKeeper &records) : - ID(LastID++), Name(N), Loc(loc), TrackedRecords(records), TheInit(0) { + explicit Record(Init *N, ArrayRef locs, RecordKeeper &records) : + ID(LastID++), Name(N), Locs(locs.begin(), locs.end()), + TrackedRecords(records), TheInit(0) { init(); } + + // When copy-constructing a Record, we must still guarantee a globally unique + // ID number. All other fields can be copied normally. + Record(const Record &O) : + ID(LastID++), Name(O.Name), Locs(O.Locs), TemplateArgs(O.TemplateArgs), + Values(O.Values), SuperClasses(O.SuperClasses), + TrackedRecords(O.TrackedRecords), TheInit(O.TheInit) { } + ~Record() {} @@ -1345,7 +1499,7 @@ public: void setName(Init *Name); // Also updates RecordKeeper. void setName(const std::string &Name); // Also updates RecordKeeper. - SMLoc getLoc() const { return Loc; } + ArrayRef getLoc() const { return Locs; } /// get the corresponding DefInit. DefInit *getDefInit(); @@ -1507,6 +1661,12 @@ public: /// bool getValueAsBit(StringRef FieldName) const; + /// getValueAsBitOrUnset - This method looks up the specified field and + /// returns its value as a bit. If the field is unset, sets Unset to true and + /// retunrs false. + /// + bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const; + /// getValueAsInt - This method looks up the specified field and returns its /// value as an int64_t, throwing an exception if the field does not exist or /// if the value is not the right type. @@ -1601,6 +1761,16 @@ struct LessRecord { } }; +/// LessRecordByID - Sorting predicate to sort record pointers by their +/// unique ID. If you just need a deterministic order, use this, since it +/// just compares two `unsigned`; the other sorting predicates require +/// string manipulation. +struct LessRecordByID { + bool operator()(const Record *LHS, const Record *RHS) const { + return LHS->getID() < RHS->getID(); + } +}; + /// LessRecordFieldName - Sorting predicate to sort record pointers by their /// name field. /// diff --git a/include/llvm/TableGen/TableGenAction.h b/include/llvm/TableGen/TableGenAction.h deleted file mode 100644 index 733ae62..0000000 --- a/include/llvm/TableGen/TableGenAction.h +++ /dev/null @@ -1,35 +0,0 @@ -//===- llvm/TableGen/TableGenAction.h - defines TableGenAction --*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the TableGenAction base class to be derived from by -// tblgen tools. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TABLEGEN_TABLEGENACTION_H -#define LLVM_TABLEGEN_TABLEGENACTION_H - -namespace llvm { - -class raw_ostream; -class RecordKeeper; - -class TableGenAction { - virtual void anchor(); -public: - virtual ~TableGenAction() {} - - /// Perform the action using Records, and write output to OS. - /// @returns true on error, false otherwise - virtual bool operator()(raw_ostream &OS, RecordKeeper &Records) = 0; -}; - -} - -#endif diff --git a/include/llvm/Target/Mangler.h b/include/llvm/Target/Mangler.h index d5e165e..a50f54a 100644 --- a/include/llvm/Target/Mangler.h +++ b/include/llvm/Target/Mangler.h @@ -22,7 +22,7 @@ class GlobalValue; template class SmallVectorImpl; class MCContext; class MCSymbol; -class TargetData; +class DataLayout; class Mangler { public: @@ -34,7 +34,7 @@ public: private: MCContext &Context; - const TargetData &TD; + const DataLayout &TD; /// AnonGlobalIDs - We need to give global values the same name every time /// they are mangled. This keeps track of the number we give to anonymous @@ -47,20 +47,19 @@ private: unsigned NextAnonGlobalID; public: - Mangler(MCContext &context, const TargetData &td) + Mangler(MCContext &context, const DataLayout &td) : Context(context), TD(td), NextAnonGlobalID(1) {} /// getSymbol - Return the MCSymbol for the specified global value. This /// symbol is the main label that is the address of the global. MCSymbol *getSymbol(const GlobalValue *GV); - /// getNameWithPrefix - Fill OutName with the name of the appropriate prefix /// and the specified global variable's name. If the global variable doesn't /// have a name, this fills in a unique name for the global. void getNameWithPrefix(SmallVectorImpl &OutName, const GlobalValue *GV, bool isImplicitlyPrivate); - + /// getNameWithPrefix - Fill OutName with the name of the appropriate prefix /// and the specified name as the global variable name. GVName must not be /// empty. diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td index 1816445..12f5c0e 100644 --- a/include/llvm/Target/Target.td +++ b/include/llvm/Target/Target.td @@ -343,8 +343,8 @@ class Instruction { bit isBarrier = 0; // Can control flow fall through this instruction? bit isCall = 0; // Is this instruction a call instruction? bit canFoldAsLoad = 0; // Can this be folded as a simple memory operand? - bit mayLoad = 0; // Is it possible for this inst to read memory? - bit mayStore = 0; // Is it possible for this inst to write memory? + bit mayLoad = ?; // Is it possible for this inst to read memory? + bit mayStore = ?; // Is it possible for this inst to write memory? bit isConvertibleToThreeAddress = 0; // Can this 2-addr instruction promote? bit isCommutable = 0; // Is this 3 operand instruction commutable? bit isTerminator = 0; // Is this part of the terminator for a basic block? @@ -369,7 +369,7 @@ class Instruction { // // neverHasSideEffects - Set on an instruction with no pattern if it has no // side effects. - bit hasSideEffects = 0; + bit hasSideEffects = ?; bit neverHasSideEffects = 0; // Is this instruction a "real" instruction (with a distinct machine @@ -495,7 +495,8 @@ def ptr_rc : PointerLikeRegClass<0>; /// unknown definition - Mark this operand as being of unknown type, causing /// it to be resolved by inference in the context it is used. -def unknown; +class unknown_class; +def unknown : unknown_class; /// AsmOperandClass - Representation for the kinds of operands which the target /// specific parser can create and the assembly matcher may need to distinguish. @@ -602,23 +603,31 @@ def f64imm : Operand; /// def zero_reg; +/// OperandWithDefaultOps - This Operand class can be used as the parent class +/// for an Operand that needs to be initialized with a default value if +/// no value is supplied in a pattern. This class can be used to simplify the +/// pattern definitions for instructions that have target specific flags +/// encoded as immediate operands. +class OperandWithDefaultOps + : Operand { + dag DefaultOps = defaultops; +} + /// PredicateOperand - This can be used to define a predicate operand for an /// instruction. OpTypes specifies the MIOperandInfo for the operand, and /// AlwaysVal specifies the value of this predicate when set to "always /// execute". class PredicateOperand - : Operand { + : OperandWithDefaultOps { let MIOperandInfo = OpTypes; - dag DefaultOps = AlwaysVal; } /// OptionalDefOperand - This is used to define a optional definition operand /// for an instruction. DefaultOps is the register the operand represents if /// none is supplied, e.g. zero_reg. class OptionalDefOperand - : Operand { + : OperandWithDefaultOps { let MIOperandInfo = OpTypes; - dag DefaultOps = defaultops; } @@ -631,6 +640,17 @@ class InstrInfo { // Sparc manual specifies its instructions in the format [31..0] (big), while // PowerPC specifies them using the format [0..31] (little). bit isLittleEndianEncoding = 0; + + // The instruction properties mayLoad, mayStore, and hasSideEffects are unset + // by default, and TableGen will infer their value from the instruction + // pattern when possible. + // + // Normally, TableGen will issue an error it it can't infer the value of a + // property that hasn't been set explicitly. When guessInstructionProperties + // is set, it will guess a safe value instead. + // + // This option is a temporary migration help. It will go away. + bit guessInstructionProperties = 1; } // Standard Pseudo Instructions. @@ -734,6 +754,18 @@ def BUNDLE : Instruction { let InOperandList = (ins variable_ops); let AsmString = "BUNDLE"; } +def LIFETIME_START : Instruction { + let OutOperandList = (outs); + let InOperandList = (ins i32imm:$id); + let AsmString = "LIFETIME_START"; + let neverHasSideEffects = 1; +} +def LIFETIME_END : Instruction { + let OutOperandList = (outs); + let InOperandList = (ins i32imm:$id); + let AsmString = "LIFETIME_END"; + let neverHasSideEffects = 1; +} } //===----------------------------------------------------------------------===// @@ -753,6 +785,10 @@ class AsmParser { // function of the AsmParser class to call on every matched instruction. // This can be used to perform target specific instruction post-processing. string AsmParserInstCleanup = ""; + + //ShouldEmitMatchRegisterName - Set to false if the target needs a hand + //written register name matcher + bit ShouldEmitMatchRegisterName = 1; } def DefaultAsmParser : AsmParser; @@ -953,12 +989,64 @@ class Processor f> { // ProcessorModel allows subtargets to specify the more general // SchedMachineModel instead if a ProcessorItinerary. Subtargets will // gradually move to this newer form. +// +// Although this class always passes NoItineraries to the Processor +// class, the SchedMachineModel may still define valid Itineraries. class ProcessorModel f> : Processor { let SchedModel = m; } //===----------------------------------------------------------------------===// +// InstrMapping - This class is used to create mapping tables to relate +// instructions with each other based on the values specified in RowFields, +// ColFields, KeyCol and ValueCols. +// +class InstrMapping { + // FilterClass - Used to limit search space only to the instructions that + // define the relationship modeled by this InstrMapping record. + string FilterClass; + + // RowFields - List of fields/attributes that should be same for all the + // instructions in a row of the relation table. Think of this as a set of + // properties shared by all the instructions related by this relationship + // model and is used to categorize instructions into subgroups. For instance, + // if we want to define a relation that maps 'Add' instruction to its + // predicated forms, we can define RowFields like this: + // + // let RowFields = BaseOp + // All add instruction predicated/non-predicated will have to set their BaseOp + // to the same value. + // + // def Add: { let BaseOp = 'ADD'; let predSense = 'nopred' } + // def Add_predtrue: { let BaseOp = 'ADD'; let predSense = 'true' } + // def Add_predfalse: { let BaseOp = 'ADD'; let predSense = 'false' } + list RowFields = []; + + // List of fields/attributes that are same for all the instructions + // in a column of the relation table. + // Ex: let ColFields = 'predSense' -- It means that the columns are arranged + // based on the 'predSense' values. All the instruction in a specific + // column have the same value and it is fixed for the column according + // to the values set in 'ValueCols'. + list ColFields = []; + + // Values for the fields/attributes listed in 'ColFields'. + // Ex: let KeyCol = 'nopred' -- It means that the key instruction (instruction + // that models this relation) should be non-predicated. + // In the example above, 'Add' is the key instruction. + list KeyCol = []; + + // List of values for the fields/attributes listed in 'ColFields', one for + // each column in the relation table. + // + // Ex: let ValueCols = [['true'],['false']] -- It adds two columns in the + // table. First column requires all the instructions to have predSense + // set to 'true' and second column requires it to be 'false'. + list > ValueCols = []; +} + +//===----------------------------------------------------------------------===// // Pull in the common support for calling conventions. // include "llvm/Target/TargetCallingConv.td" diff --git a/include/llvm/Target/TargetCallingConv.h b/include/llvm/Target/TargetCallingConv.h index f8cebef..2160e37 100644 --- a/include/llvm/Target/TargetCallingConv.h +++ b/include/llvm/Target/TargetCallingConv.h @@ -113,9 +113,18 @@ namespace ISD { MVT VT; bool Used; + /// Index original Function's argument. + unsigned OrigArgIndex; + + /// Offset in bytes of current input value relative to the beginning of + /// original argument. E.g. if argument was splitted into four 32 bit + /// registers, we got 4 InputArgs with PartOffsets 0, 4, 8 and 12. + unsigned PartOffset; + InputArg() : VT(MVT::Other), Used(false) {} - InputArg(ArgFlagsTy flags, EVT vt, bool used) - : Flags(flags), Used(used) { + InputArg(ArgFlagsTy flags, EVT vt, bool used, + unsigned origIdx, unsigned partOffs) + : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) { VT = vt.getSimpleVT(); } }; @@ -131,9 +140,19 @@ namespace ISD { /// IsFixed - Is this a "fixed" value, ie not passed through a vararg "...". bool IsFixed; + /// Index original Function's argument. + unsigned OrigArgIndex; + + /// Offset in bytes of current output value relative to the beginning of + /// original argument. E.g. if argument was splitted into four 32 bit + /// registers, we got 4 OutputArgs with PartOffsets 0, 4, 8 and 12. + unsigned PartOffset; + OutputArg() : IsFixed(false) {} - OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed) - : Flags(flags), IsFixed(isfixed) { + OutputArg(ArgFlagsTy flags, EVT vt, bool isfixed, + unsigned origIdx, unsigned partOffs) + : Flags(flags), IsFixed(isfixed), OrigArgIndex(origIdx), + PartOffset(partOffs) { VT = vt.getSimpleVT(); } }; diff --git a/include/llvm/Target/TargetData.h b/include/llvm/Target/TargetData.h deleted file mode 100644 index 4f94ab7..0000000 --- a/include/llvm/Target/TargetData.h +++ /dev/null @@ -1,363 +0,0 @@ -//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines target properties related to datatype size/offset/alignment -// information. It uses lazy annotations to cache information about how -// structure types are laid out and used. -// -// This structure should be created once, filled in if the defaults are not -// correct and then passed around by const&. None of the members functions -// require modification to the object. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TARGET_TARGETDATA_H -#define LLVM_TARGET_TARGETDATA_H - -#include "llvm/Pass.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/Support/DataTypes.h" - -namespace llvm { - -class Value; -class Type; -class IntegerType; -class StructType; -class StructLayout; -class GlobalVariable; -class LLVMContext; -template -class ArrayRef; - -/// Enum used to categorize the alignment types stored by TargetAlignElem -enum AlignTypeEnum { - INTEGER_ALIGN = 'i', ///< Integer type alignment - VECTOR_ALIGN = 'v', ///< Vector type alignment - FLOAT_ALIGN = 'f', ///< Floating point type alignment - AGGREGATE_ALIGN = 'a', ///< Aggregate alignment - STACK_ALIGN = 's' ///< Stack objects alignment -}; - -/// Target alignment element. -/// -/// Stores the alignment data associated with a given alignment type (pointer, -/// integer, vector, float) and type bit width. -/// -/// @note The unusual order of elements in the structure attempts to reduce -/// padding and make the structure slightly more cache friendly. -struct TargetAlignElem { - AlignTypeEnum AlignType : 8; ///< Alignment type (AlignTypeEnum) - unsigned ABIAlign; ///< ABI alignment for this type/bitw - unsigned PrefAlign; ///< Pref. alignment for this type/bitw - uint32_t TypeBitWidth; ///< Type bit width - - /// Initializer - static TargetAlignElem get(AlignTypeEnum align_type, unsigned abi_align, - unsigned pref_align, uint32_t bit_width); - /// Equality predicate - bool operator==(const TargetAlignElem &rhs) const; -}; - -/// TargetData - This class holds a parsed version of the target data layout -/// string in a module and provides methods for querying it. The target data -/// layout string is specified *by the target* - a frontend generating LLVM IR -/// is required to generate the right target data for the target being codegen'd -/// to. If some measure of portability is desired, an empty string may be -/// specified in the module. -class TargetData : public ImmutablePass { -private: - bool LittleEndian; ///< Defaults to false - unsigned PointerMemSize; ///< Pointer size in bytes - unsigned PointerABIAlign; ///< Pointer ABI alignment - unsigned PointerPrefAlign; ///< Pointer preferred alignment - unsigned StackNaturalAlign; ///< Stack natural alignment - - SmallVector LegalIntWidths; ///< Legal Integers. - - /// Alignments- Where the primitive type alignment data is stored. - /// - /// @sa init(). - /// @note Could support multiple size pointer alignments, e.g., 32-bit - /// pointers vs. 64-bit pointers by extending TargetAlignment, but for now, - /// we don't. - SmallVector Alignments; - - /// InvalidAlignmentElem - This member is a signal that a requested alignment - /// type and bit width were not found in the SmallVector. - static const TargetAlignElem InvalidAlignmentElem; - - // The StructType -> StructLayout map. - mutable void *LayoutMap; - - //! Set/initialize target alignments - void setAlignment(AlignTypeEnum align_type, unsigned abi_align, - unsigned pref_align, uint32_t bit_width); - unsigned getAlignmentInfo(AlignTypeEnum align_type, uint32_t bit_width, - bool ABIAlign, Type *Ty) const; - //! Internal helper method that returns requested alignment for type. - unsigned getAlignment(Type *Ty, bool abi_or_pref) const; - - /// Valid alignment predicate. - /// - /// Predicate that tests a TargetAlignElem reference returned by get() against - /// InvalidAlignmentElem. - bool validAlignment(const TargetAlignElem &align) const { - return &align != &InvalidAlignmentElem; - } - - /// Initialise a TargetData object with default values, ensure that the - /// target data pass is registered. - void init(); - -public: - /// Default ctor. - /// - /// @note This has to exist, because this is a pass, but it should never be - /// used. - TargetData(); - - /// Constructs a TargetData from a specification string. See init(). - explicit TargetData(StringRef TargetDescription) - : ImmutablePass(ID) { - std::string errMsg = parseSpecifier(TargetDescription, this); - assert(errMsg == "" && "Invalid target data layout string."); - (void)errMsg; - } - - /// Parses a target data specification string. Returns an error message - /// if the string is malformed, or the empty string on success. Optionally - /// initialises a TargetData object if passed a non-null pointer. - static std::string parseSpecifier(StringRef TargetDescription, TargetData* td = 0); - - /// Initialize target data from properties stored in the module. - explicit TargetData(const Module *M); - - TargetData(const TargetData &TD) : - ImmutablePass(ID), - LittleEndian(TD.isLittleEndian()), - PointerMemSize(TD.PointerMemSize), - PointerABIAlign(TD.PointerABIAlign), - PointerPrefAlign(TD.PointerPrefAlign), - LegalIntWidths(TD.LegalIntWidths), - Alignments(TD.Alignments), - LayoutMap(0) - { } - - ~TargetData(); // Not virtual, do not subclass this class - - /// Target endianness... - bool isLittleEndian() const { return LittleEndian; } - bool isBigEndian() const { return !LittleEndian; } - - /// getStringRepresentation - Return the string representation of the - /// TargetData. This representation is in the same format accepted by the - /// string constructor above. - std::string getStringRepresentation() const; - - /// isLegalInteger - This function returns true if the specified type is - /// known to be a native integer type supported by the CPU. For example, - /// i64 is not native on most 32-bit CPUs and i37 is not native on any known - /// one. This returns false if the integer width is not legal. - /// - /// The width is specified in bits. - /// - bool isLegalInteger(unsigned Width) const { - for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i) - if (LegalIntWidths[i] == Width) - return true; - return false; - } - - bool isIllegalInteger(unsigned Width) const { - return !isLegalInteger(Width); - } - - /// Returns true if the given alignment exceeds the natural stack alignment. - bool exceedsNaturalStackAlignment(unsigned Align) const { - return (StackNaturalAlign != 0) && (Align > StackNaturalAlign); - } - - /// fitsInLegalInteger - This function returns true if the specified type fits - /// in a native integer type supported by the CPU. For example, if the CPU - /// only supports i32 as a native integer type, then i27 fits in a legal - // integer type but i45 does not. - bool fitsInLegalInteger(unsigned Width) const { - for (unsigned i = 0, e = (unsigned)LegalIntWidths.size(); i != e; ++i) - if (Width <= LegalIntWidths[i]) - return true; - return false; - } - - /// Target pointer alignment - unsigned getPointerABIAlignment() const { return PointerABIAlign; } - /// Return target's alignment for stack-based pointers - unsigned getPointerPrefAlignment() const { return PointerPrefAlign; } - /// Target pointer size - unsigned getPointerSize() const { return PointerMemSize; } - /// Target pointer size, in bits - unsigned getPointerSizeInBits() const { return 8*PointerMemSize; } - - /// Size examples: - /// - /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*] - /// ---- ---------- --------------- --------------- - /// i1 1 8 8 - /// i8 8 8 8 - /// i19 19 24 32 - /// i32 32 32 32 - /// i100 100 104 128 - /// i128 128 128 128 - /// Float 32 32 32 - /// Double 64 64 64 - /// X86_FP80 80 80 96 - /// - /// [*] The alloc size depends on the alignment, and thus on the target. - /// These values are for x86-32 linux. - - /// getTypeSizeInBits - Return the number of bits necessary to hold the - /// specified type. For example, returns 36 for i36 and 80 for x86_fp80. - uint64_t getTypeSizeInBits(Type* Ty) const; - - /// getTypeStoreSize - Return the maximum number of bytes that may be - /// overwritten by storing the specified type. For example, returns 5 - /// for i36 and 10 for x86_fp80. - uint64_t getTypeStoreSize(Type *Ty) const { - return (getTypeSizeInBits(Ty)+7)/8; - } - - /// getTypeStoreSizeInBits - Return the maximum number of bits that may be - /// overwritten by storing the specified type; always a multiple of 8. For - /// example, returns 40 for i36 and 80 for x86_fp80. - uint64_t getTypeStoreSizeInBits(Type *Ty) const { - return 8*getTypeStoreSize(Ty); - } - - /// getTypeAllocSize - Return the offset in bytes between successive objects - /// of the specified type, including alignment padding. This is the amount - /// that alloca reserves for this type. For example, returns 12 or 16 for - /// x86_fp80, depending on alignment. - uint64_t getTypeAllocSize(Type* Ty) const { - // Round up to the next alignment boundary. - return RoundUpAlignment(getTypeStoreSize(Ty), getABITypeAlignment(Ty)); - } - - /// getTypeAllocSizeInBits - Return the offset in bits between successive - /// objects of the specified type, including alignment padding; always a - /// multiple of 8. This is the amount that alloca reserves for this type. - /// For example, returns 96 or 128 for x86_fp80, depending on alignment. - uint64_t getTypeAllocSizeInBits(Type* Ty) const { - return 8*getTypeAllocSize(Ty); - } - - /// getABITypeAlignment - Return the minimum ABI-required alignment for the - /// specified type. - unsigned getABITypeAlignment(Type *Ty) const; - - /// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for - /// an integer type of the specified bitwidth. - unsigned getABIIntegerTypeAlignment(unsigned BitWidth) const; - - - /// getCallFrameTypeAlignment - Return the minimum ABI-required alignment - /// for the specified type when it is part of a call frame. - unsigned getCallFrameTypeAlignment(Type *Ty) const; - - - /// getPrefTypeAlignment - Return the preferred stack/global alignment for - /// the specified type. This is always at least as good as the ABI alignment. - unsigned getPrefTypeAlignment(Type *Ty) const; - - /// getPreferredTypeAlignmentShift - Return the preferred alignment for the - /// specified type, returned as log2 of the value (a shift amount). - /// - unsigned getPreferredTypeAlignmentShift(Type *Ty) const; - - /// getIntPtrType - Return an unsigned integer type that is the same size or - /// greater to the host pointer size. - /// - IntegerType *getIntPtrType(LLVMContext &C) const; - - /// getIndexedOffset - return the offset from the beginning of the type for - /// the specified indices. This is used to implement getelementptr. - /// - uint64_t getIndexedOffset(Type *Ty, ArrayRef Indices) const; - - /// getStructLayout - Return a StructLayout object, indicating the alignment - /// of the struct, its size, and the offsets of its fields. Note that this - /// information is lazily cached. - const StructLayout *getStructLayout(StructType *Ty) const; - - /// getPreferredAlignment - Return the preferred alignment of the specified - /// global. This includes an explicitly requested alignment (if the global - /// has one). - unsigned getPreferredAlignment(const GlobalVariable *GV) const; - - /// getPreferredAlignmentLog - Return the preferred alignment of the - /// specified global, returned in log form. This includes an explicitly - /// requested alignment (if the global has one). - unsigned getPreferredAlignmentLog(const GlobalVariable *GV) const; - - /// RoundUpAlignment - Round the specified value up to the next alignment - /// boundary specified by Alignment. For example, 7 rounded up to an - /// alignment boundary of 4 is 8. 8 rounded up to the alignment boundary of 4 - /// is 8 because it is already aligned. - template - static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) { - assert((Alignment & (Alignment-1)) == 0 && "Alignment must be power of 2!"); - return (Val + (Alignment-1)) & ~UIntTy(Alignment-1); - } - - static char ID; // Pass identification, replacement for typeid -}; - -/// StructLayout - used to lazily calculate structure layout information for a -/// target machine, based on the TargetData structure. -/// -class StructLayout { - uint64_t StructSize; - unsigned StructAlignment; - unsigned NumElements; - uint64_t MemberOffsets[1]; // variable sized array! -public: - - uint64_t getSizeInBytes() const { - return StructSize; - } - - uint64_t getSizeInBits() const { - return 8*StructSize; - } - - unsigned getAlignment() const { - return StructAlignment; - } - - /// getElementContainingOffset - Given a valid byte offset into the structure, - /// return the structure index that contains it. - /// - unsigned getElementContainingOffset(uint64_t Offset) const; - - uint64_t getElementOffset(unsigned Idx) const { - assert(Idx < NumElements && "Invalid element idx!"); - return MemberOffsets[Idx]; - } - - uint64_t getElementOffsetInBits(unsigned Idx) const { - return getElementOffset(Idx)*8; - } - -private: - friend class TargetData; // Only TargetData can create this class - StructLayout(StructType *ST, const TargetData &TD); -}; - -} // End llvm namespace - -#endif diff --git a/include/llvm/Target/TargetELFWriterInfo.h b/include/llvm/Target/TargetELFWriterInfo.h deleted file mode 100644 index 5e48629..0000000 --- a/include/llvm/Target/TargetELFWriterInfo.h +++ /dev/null @@ -1,121 +0,0 @@ -//===-- llvm/Target/TargetELFWriterInfo.h - ELF Writer Info -----*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the TargetELFWriterInfo class. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_TARGET_TARGETELFWRITERINFO_H -#define LLVM_TARGET_TARGETELFWRITERINFO_H - -namespace llvm { - - //===--------------------------------------------------------------------===// - // TargetELFWriterInfo - //===--------------------------------------------------------------------===// - - class TargetELFWriterInfo { - protected: - // EMachine - This field is the target specific value to emit as the - // e_machine member of the ELF header. - unsigned short EMachine; - bool is64Bit, isLittleEndian; - public: - - // Machine architectures - enum MachineType { - EM_NONE = 0, // No machine - EM_M32 = 1, // AT&T WE 32100 - EM_SPARC = 2, // SPARC - EM_386 = 3, // Intel 386 - EM_68K = 4, // Motorola 68000 - EM_88K = 5, // Motorola 88000 - EM_486 = 6, // Intel 486 (deprecated) - EM_860 = 7, // Intel 80860 - EM_MIPS = 8, // MIPS R3000 - EM_PPC = 20, // PowerPC - EM_ARM = 40, // ARM - EM_ALPHA = 41, // DEC Alpha - EM_SPARCV9 = 43, // SPARC V9 - EM_X86_64 = 62, // AMD64 - EM_HEXAGON = 164 // Qualcomm Hexagon - }; - - // ELF File classes - enum { - ELFCLASS32 = 1, // 32-bit object file - ELFCLASS64 = 2 // 64-bit object file - }; - - // ELF Endianess - enum { - ELFDATA2LSB = 1, // Little-endian object file - ELFDATA2MSB = 2 // Big-endian object file - }; - - explicit TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_); - virtual ~TargetELFWriterInfo(); - - unsigned short getEMachine() const { return EMachine; } - unsigned getEFlags() const { return 0; } - unsigned getEIClass() const { return is64Bit ? ELFCLASS64 : ELFCLASS32; } - unsigned getEIData() const { - return isLittleEndian ? ELFDATA2LSB : ELFDATA2MSB; - } - - /// ELF Header and ELF Section Header Info - unsigned getHdrSize() const { return is64Bit ? 64 : 52; } - unsigned getSHdrSize() const { return is64Bit ? 64 : 40; } - - /// Symbol Table Info - unsigned getSymTabEntrySize() const { return is64Bit ? 24 : 16; } - - /// getPrefELFAlignment - Returns the preferred alignment for ELF. This - /// is used to align some sections. - unsigned getPrefELFAlignment() const { return is64Bit ? 8 : 4; } - - /// getRelocationEntrySize - Entry size used in the relocation section - unsigned getRelocationEntrySize() const { - return is64Bit ? (hasRelocationAddend() ? 24 : 16) - : (hasRelocationAddend() ? 12 : 8); - } - - /// getRelocationType - Returns the target specific ELF Relocation type. - /// 'MachineRelTy' contains the object code independent relocation type - virtual unsigned getRelocationType(unsigned MachineRelTy) const = 0; - - /// hasRelocationAddend - True if the target uses an addend in the - /// ELF relocation entry. - virtual bool hasRelocationAddend() const = 0; - - /// getDefaultAddendForRelTy - Gets the default addend value for a - /// relocation entry based on the target ELF relocation type. - virtual long int getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier = 0) const = 0; - - /// getRelTySize - Returns the size of relocatable field in bits - virtual unsigned getRelocationTySize(unsigned RelTy) const = 0; - - /// isPCRelativeRel - True if the relocation type is pc relative - virtual bool isPCRelativeRel(unsigned RelTy) const = 0; - - /// getJumpTableRelocationTy - Returns the machine relocation type used - /// to reference a jumptable. - virtual unsigned getAbsoluteLabelMachineRelTy() const = 0; - - /// computeRelocation - Some relocatable fields could be relocated - /// directly, avoiding the relocation symbol emission, compute the - /// final relocation value for this symbol. - virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset, - unsigned RelTy) const = 0; - }; - -} // end llvm namespace - -#endif // LLVM_TARGET_TARGETELFWRITERINFO_H diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h index da30ab8..4570813 100644 --- a/include/llvm/Target/TargetInstrInfo.h +++ b/include/llvm/Target/TargetInstrInfo.h @@ -45,8 +45,8 @@ template class SmallVectorImpl; /// TargetInstrInfo - Interface to description of machine instruction set /// class TargetInstrInfo : public MCInstrInfo { - TargetInstrInfo(const TargetInstrInfo &); // DO NOT IMPLEMENT - void operator=(const TargetInstrInfo &); // DO NOT IMPLEMENT + TargetInstrInfo(const TargetInstrInfo &) LLVM_DELETED_FUNCTION; + void operator=(const TargetInstrInfo &) LLVM_DELETED_FUNCTION; public: TargetInstrInfo(int CFSetupOpcode = -1, int CFDestroyOpcode = -1) : CallFrameSetupOpcode(CFSetupOpcode), @@ -459,6 +459,13 @@ public: } /// copyPhysReg - Emit instructions to copy a pair of physical registers. + /// + /// This function should support copies within any legal register class as + /// well as any cross-class copies created during instruction selection. + /// + /// The source and destination registers may overlap, which may require a + /// careful implementation when multiple copy instructions are required for + /// large registers. See for example the ARM target. virtual void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc DL, unsigned DestReg, unsigned SrcReg, @@ -794,29 +801,6 @@ public: const MachineInstr *UseMI, unsigned UseIdx, bool FindMin = false) const; - /// computeOperandLatency - Compute and return the latency of the given data - /// dependent def and use. DefMI must be a valid def. UseMI may be NULL for - /// an unknown use. If the subtarget allows, this may or may not need to call - /// getOperandLatency(). - /// - /// FindMin may be set to get the minimum vs. expected latency. Minimum - /// latency is used for scheduling groups, while expected latency is for - /// instruction cost and critical path. - unsigned computeOperandLatency(const InstrItineraryData *ItinData, - const TargetRegisterInfo *TRI, - const MachineInstr *DefMI, - const MachineInstr *UseMI, - unsigned Reg, bool FindMin) const; - - /// getOutputLatency - Compute and return the output dependency latency of a - /// a given pair of defs which both target the same register. This is usually - /// one. - virtual unsigned getOutputLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *DepMI) const { - return 1; - } - /// getInstrLatency - Compute the instruction latency of a given instruction. /// If the instruction has higher cost when predicated, it's returned via /// PredCost. @@ -831,6 +815,9 @@ public: unsigned defaultDefLatency(const MCSchedModel *SchedModel, const MachineInstr *DefMI) const; + int computeDefOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr *DefMI, bool FindMin) const; + /// isHighLatencyDef - Return true if this opcode has high latency to its /// result. virtual bool isHighLatencyDef(int opc) const { return false; } diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h index c44b923..ce21349 100644 --- a/include/llvm/Target/TargetIntrinsicInfo.h +++ b/include/llvm/Target/TargetIntrinsicInfo.h @@ -14,6 +14,7 @@ #ifndef LLVM_TARGET_TARGETINTRINSICINFO_H #define LLVM_TARGET_TARGETINTRINSICINFO_H +#include "llvm/Support/Compiler.h" #include namespace llvm { @@ -27,8 +28,8 @@ class Type; /// TargetIntrinsicInfo - Interface to description of machine instruction set /// class TargetIntrinsicInfo { - TargetIntrinsicInfo(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT - void operator=(const TargetIntrinsicInfo &); // DO NOT IMPLEMENT + TargetIntrinsicInfo(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION; + void operator=(const TargetIntrinsicInfo &) LLVM_DELETED_FUNCTION; public: TargetIntrinsicInfo(); virtual ~TargetIntrinsicInfo(); diff --git a/include/llvm/Target/TargetLibraryInfo.h b/include/llvm/Target/TargetLibraryInfo.h index ea2874f..a2c97d7 100644 --- a/include/llvm/Target/TargetLibraryInfo.h +++ b/include/llvm/Target/TargetLibraryInfo.h @@ -18,6 +18,26 @@ namespace llvm { namespace LibFunc { enum Func { + /// void operator delete[](void*); + ZdaPv, + /// void operator delete(void*); + ZdlPv, + /// void *new[](unsigned int); + Znaj, + /// void *new[](unsigned int, nothrow); + ZnajRKSt9nothrow_t, + /// void *new[](unsigned long); + Znam, + /// void *new[](unsigned long, nothrow); + ZnamRKSt9nothrow_t, + /// void *new(unsigned int); + Znwj, + /// void *new(unsigned int, nothrow); + ZnwjRKSt9nothrow_t, + /// void *new(unsigned long); + Znwm, + /// void *new(unsigned long, nothrow); + ZnwmRKSt9nothrow_t, /// int __cxa_atexit(void (*f)(void *), void *p, void *d); cxa_atexit, /// void __cxa_guard_abort(guard_t *guard); @@ -33,12 +53,24 @@ namespace llvm { acos, /// float acosf(float x); acosf, + /// double acosh(double x); + acosh, + /// float acoshf(float x); + acoshf, + /// long double acoshl(long double x); + acoshl, /// long double acosl(long double x); acosl, /// double asin(double x); asin, /// float asinf(float x); asinf, + /// double asinh(double x); + asinh, + /// float asinhf(float x); + asinhf, + /// long double asinhl(long double x); + asinhl, /// long double asinl(long double x); asinl, /// double atan(double x); @@ -51,8 +83,22 @@ namespace llvm { atan2l, /// float atanf(float x); atanf, + /// double atanh(double x); + atanh, + /// float atanhf(float x); + atanhf, + /// long double atanhl(long double x); + atanhl, /// long double atanl(long double x); atanl, + /// void *calloc(size_t count, size_t size); + calloc, + /// double cbrt(double x); + cbrt, + /// float cbrtf(float x); + cbrtf, + /// long double cbrtl(long double x); + cbrtl, /// double ceil(double x); ceil, /// float ceilf(float x); @@ -79,6 +125,12 @@ namespace llvm { cosl, /// double exp(double x); exp, + /// double exp10(double x); + exp10, + /// float exp10f(float x); + exp10f, + /// long double exp10l(long double x); + exp10l, /// double exp2(double x); exp2, /// float exp2f(float x); @@ -119,6 +171,8 @@ namespace llvm { fputc, /// int fputs(const char *s, FILE *stream); fputs, + /// void free(void *ptr); + free, /// size_t fwrite(const void *ptr, size_t size, size_t nitems, /// FILE *stream); fwrite, @@ -144,10 +198,18 @@ namespace llvm { log2f, /// double long double log2l(long double x); log2l, + /// double logb(double x); + logb, + /// float logbf(float x); + logbf, + /// long double logbl(long double x); + logbl, /// float logf(float x); logf, /// long double logl(long double x); logl, + /// void *malloc(size_t size); + malloc, /// void *memchr(const void *s, int c, size_t n); memchr, /// int memcmp(const void *s1, const void *s2, size_t n); @@ -166,6 +228,8 @@ namespace llvm { nearbyintf, /// long double nearbyintl(long double x); nearbyintl, + /// int posix_memalign(void **memptr, size_t alignment, size_t size); + posix_memalign, /// double pow(double x, double y); pow, /// float powf(float x, float y); @@ -176,6 +240,10 @@ namespace llvm { putchar, /// int puts(const char *s); puts, + /// void *realloc(void *ptr, size_t size); + realloc, + /// void *reallocf(void *ptr, size_t size); + reallocf, /// double rint(double x); rint, /// float rintf(float x); @@ -208,12 +276,20 @@ namespace llvm { sqrtf, /// long double sqrtl(long double x); sqrtl, + /// char *stpcpy(char *s1, const char *s2); + stpcpy, /// char *strcat(char *s1, const char *s2); strcat, /// char *strchr(const char *s, int c); strchr, + /// int strcmp(const char *s1, const char *s2); + strcmp, /// char *strcpy(char *s1, const char *s2); strcpy, + /// size_t strcspn(const char *s1, const char *s2); + strcspn, + /// char *strdup(const char *s1); + strdup, /// size_t strlen(const char *s); strlen, /// char *strncat(char *s1, const char *s2, size_t n); @@ -222,8 +298,33 @@ namespace llvm { strncmp, /// char *strncpy(char *s1, const char *s2, size_t n); strncpy, + /// char *strndup(const char *s1, size_t n); + strndup, /// size_t strnlen(const char *s, size_t maxlen); strnlen, + /// char *strpbrk(const char *s1, const char *s2); + strpbrk, + /// char *strrchr(const char *s, int c); + strrchr, + /// size_t strspn(const char *s1, const char *s2); + strspn, + /// char *strstr(const char *s1, const char *s2); + strstr, + /// double strtod(const char *nptr, char **endptr); + strtod, + /// float strtof(const char *nptr, char **endptr); + strtof, + /// long int strtol(const char *nptr, char **endptr, int base); + strtol, + /// long double strtold(const char *nptr, char **endptr); + strtold, + /// long long int strtoll(const char *nptr, char **endptr, int base); + strtoll, + /// unsigned long int strtoul(const char *nptr, char **endptr, int base); + strtoul, + /// unsigned long long int strtoull(const char *nptr, char **endptr, + /// int base); + strtoull, /// double tan(double x); tan, /// float tanf(float x); @@ -242,6 +343,8 @@ namespace llvm { truncf, /// long double truncl(long double x); truncl, + /// void *valloc(size_t size); + valloc, NumLibFuncs }; diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index acf0419..580a30f 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -22,9 +22,11 @@ #ifndef LLVM_TARGET_TARGETLOWERING_H #define LLVM_TARGET_TARGETLOWERING_H +#include "llvm/AddressingMode.h" #include "llvm/CallingConv.h" #include "llvm/InlineAsm.h" #include "llvm/Attributes.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/Support/CallSite.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/RuntimeLibcalls.h" @@ -49,7 +51,7 @@ namespace llvm { class MCContext; class MCExpr; template class SmallVectorImpl; - class TargetData; + class DataLayout; class TargetRegisterClass; class TargetLibraryInfo; class TargetLoweringObjectFile; @@ -76,8 +78,8 @@ namespace llvm { /// target-specific constructs to SelectionDAG operators. /// class TargetLowering { - TargetLowering(const TargetLowering&); // DO NOT IMPLEMENT - void operator=(const TargetLowering&); // DO NOT IMPLEMENT + TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION; + void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION; public: /// LegalizeAction - This enum indicates whether operations are valid for a /// target, and if not, what action should be used to make them valid. @@ -101,12 +103,24 @@ public: TypeWidenVector // This vector should be widened into a larger vector. }; + /// LegalizeKind holds the legalization kind that needs to happen to EVT + /// in order to type-legalize it. + typedef std::pair LegalizeKind; + enum BooleanContent { // How the target represents true/false values. UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage. ZeroOrOneBooleanContent, // All bits zero except for bit 0. ZeroOrNegativeOneBooleanContent // All bits equal to bit 0. }; + enum SelectSupportKind { + ScalarValSelect, // The target supports scalar selects (ex: cmov). + ScalarCondVectorVal, // The target supports selects with a scalar condition + // and vector values (ex: cmov). + VectorMaskSelect // The target supports vector selects with a vector + // mask (ex: x86 blends). + }; + static ISD::NodeType getExtendForContent(BooleanContent Content) { switch (Content) { case UndefinedBooleanContent: @@ -128,22 +142,37 @@ public: virtual ~TargetLowering(); const TargetMachine &getTargetMachine() const { return TM; } - const TargetData *getTargetData() const { return TD; } + const DataLayout *getDataLayout() const { return TD; } const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } bool isBigEndian() const { return !IsLittleEndian; } bool isLittleEndian() const { return IsLittleEndian; } - MVT getPointerTy() const { return PointerTy; } + // Return the pointer type for the given address space, defaults to + // the pointer type from the data layout. + // FIXME: The default needs to be removed once all the code is updated. + virtual MVT getPointerTy(uint32_t AS = 0) const { return PointerTy; } virtual MVT getShiftAmountTy(EVT LHSTy) const; /// isSelectExpensive - Return true if the select operation is expensive for /// this target. bool isSelectExpensive() const { return SelectIsExpensive; } + virtual bool isSelectSupported(SelectSupportKind kind) const { return true; } + /// isIntDivCheap() - Return true if integer divide is usually cheaper than /// a sequence of several shifts, adds, and multiplies for this target. bool isIntDivCheap() const { return IntDivIsCheap; } + /// isSlowDivBypassed - Returns true if target has indicated at least one + /// type should be bypassed. + bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); } + + /// getBypassSlowDivTypes - Returns map of slow types for division or + /// remainder with corresponding fast types + const DenseMap &getBypassSlowDivWidths() const { + return BypassSlowDivWidths; + } + /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of /// srl/add/sra. bool isPow2DivCheap() const { return Pow2DivIsCheap; } @@ -382,6 +411,13 @@ public: getOperationAction(Op, VT) == Custom); } + /// isOperationExpand - Return true if the specified operation is illegal on + /// this target or unlikely to be made legal with custom lowering. This is + /// used to help guide high-level lowering decisions. + bool isOperationExpand(unsigned Op, EVT VT) const { + return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand); + } + /// isOperationLegal - Return true if the specified operation is legal on this /// target. bool isOperationLegal(unsigned Op, EVT VT) const { @@ -475,8 +511,12 @@ public: assert((unsigned)CC < array_lengthof(CondCodeActions) && (unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 && "Table isn't big enough!"); + /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit + /// value and the upper 27 bits index into the second dimension of the + /// array to select what 64bit value to use. LegalizeAction Action = (LegalizeAction) - ((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3); + ((CondCodeActions[CC][VT.getSimpleVT().SimpleTy >> 5] + >> (2*(VT.getSimpleVT().SimpleTy & 0x1F))) & 3); assert(Action != Promote && "Can't promote condition code!"); return Action; } @@ -533,6 +573,7 @@ public: } return EVT::getEVT(Ty, AllowUnknown); } + /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual @@ -686,6 +727,12 @@ public: return SupportJumpTables; } + /// getMinimumJumpTableEntries - return integer threshold on number of + /// blocks to use jump tables rather than if sequence. + int getMinimumJumpTableEntries() const { + return MinimumJumpTableEntries; + } + /// getStackPointerRegisterToSaveRestore - If a physical register, this /// specifies the register that llvm.savestack/llvm.restorestack should save /// and restore. @@ -1006,6 +1053,12 @@ protected: SupportJumpTables = Val; } + /// setMinimumJumpTableEntries - Indicate the number of blocks to generate + /// jump tables rather than if sequence. + void setMinimumJumpTableEntries(int Val) { + MinimumJumpTableEntries = Val; + } + /// setStackPointerRegisterToSaveRestore - If set to a physical register, this /// specifies the register that llvm.savestack/llvm.restorestack should save /// and restore. @@ -1045,6 +1098,11 @@ protected: /// of instructions not containing an integer divide. void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; } + /// addBypassSlowDiv - Tells the code generator which bitwidths to bypass. + void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) { + BypassSlowDivWidths[SlowBitWidth] = FastBitWidth; + } + /// setPow2DivIsCheap - Tells the code generator that it shouldn't generate /// srl/add/sra for a signed divide by power of two, and let the target handle /// it. @@ -1127,8 +1185,13 @@ protected: assert(VT < MVT::LAST_VALUETYPE && (unsigned)CC < array_lengthof(CondCodeActions) && "Table isn't big enough!"); - CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2); - CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2; + /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit + /// value and the upper 27 bits index into the second dimension of the + /// array to select what 64bit value to use. + CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] + &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2); + CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5] + |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2; } /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the @@ -1201,7 +1264,7 @@ protected: public: //===--------------------------------------------------------------------===// // Lowering methods - These methods must be implemented by targets so that - // the SelectionDAGLowering code knows how to lower these. + // the SelectionDAGBuilder code knows how to lower these. // /// LowerFormalArguments - This hook must be implemented to lower the @@ -1271,9 +1334,9 @@ public: FunctionType *FTy, bool isTailCall, SDValue callee, ArgListTy &args, SelectionDAG &dag, DebugLoc dl, ImmutableCallSite &cs) - : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)), - RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()), - IsInReg(cs.paramHasAttr(0, Attribute::InReg)), + : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attributes::SExt)), + RetZExt(cs.paramHasAttr(0, Attributes::ZExt)), IsVarArg(FTy->isVarArg()), + IsInReg(cs.paramHasAttr(0, Attributes::InReg)), DoesNotReturn(cs.doesNotReturn()), IsReturnValueUsed(!cs.getInstruction()->use_empty()), IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()), @@ -1314,7 +1377,7 @@ public: } /// HandleByVal - Target-specific cleanup for formal ByVal parameters. - virtual void HandleByVal(CCState *, unsigned &) const {} + virtual void HandleByVal(CCState *, unsigned &, unsigned) const {} /// CanLowerReturn - This hook should be implemented to check whether the /// return values described by the Outs array can fit into the return @@ -1584,22 +1647,6 @@ public: // Addressing mode description hooks (used by LSR etc). // - /// AddrMode - This represents an addressing mode of: - /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg - /// If BaseGV is null, there is no BaseGV. - /// If BaseOffs is zero, there is no base offset. - /// If HasBaseReg is false, there is no base register. - /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with - /// no scale. - /// - struct AddrMode { - GlobalValue *BaseGV; - int64_t BaseOffs; - bool HasBaseReg; - int64_t Scale; - AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} - }; - /// GetAddrModeArguments - CodeGenPrepare sinks address calculations into the /// same BB as Load/Store instructions reading the address. This allows as /// much computation as possible to be done in the address mode for that @@ -1741,10 +1788,11 @@ public: private: const TargetMachine &TM; - const TargetData *TD; + const DataLayout *TD; const TargetLoweringObjectFile &TLOF; - /// PointerTy - The type to use for pointers, usually i32 or i64. + /// PointerTy - The type to use for pointers for the default address space, + /// usually i32 or i64. /// MVT PointerTy; @@ -1762,6 +1810,12 @@ private: /// set to true unconditionally. bool IntDivIsCheap; + /// BypassSlowDivMap - Tells the code generator to bypass slow divide or + /// remainder instructions. For example, BypassSlowDivWidths[32,8] tells the + /// code generator to bypass 32-bit integer div/rem with an 8-bit unsigned + /// integer div/rem when the operands are positive and less than 256. + DenseMap BypassSlowDivWidths; + /// Pow2DivIsCheap - Tells the code generator that it shouldn't generate /// srl/add/sra for a signed divide by power of two, and let the target handle /// it. @@ -1784,6 +1838,9 @@ private: /// If it's not true, then each jumptable must be lowered into if-then-else's. bool SupportJumpTables; + /// MinimumJumpTableEntries - Number of blocks threshold to use jump tables. + int MinimumJumpTableEntries; + /// BooleanContents - Information about the contents of the high-bits in /// boolean values held in a type wider than i1. See getBooleanContents. BooleanContent BooleanContents; @@ -1901,12 +1958,14 @@ private: /// CondCodeActions - For each condition code (ISD::CondCode) keep a /// LegalizeAction that indicates how instruction selection should /// deal with the condition code. - uint64_t CondCodeActions[ISD::SETCC_INVALID]; + /// Because each CC action takes up 2 bits, we need to have the array size + /// be large enough to fit all of the value types. This can be done by + /// dividing the MVT::LAST_VALUETYPE by 32 and adding one. + uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1]; ValueTypeActionImpl ValueTypeActions; - typedef std::pair LegalizeKind; - +public: LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const { // If this is a simple type, use the ComputeRegisterProp mechanism. @@ -1921,6 +1980,9 @@ private: ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger) && "Promote may not follow Expand or Promote"); + if (LA == TypeSplitVector) + NVT = EVT::getVectorVT(Context, VT.getVectorElementType(), + VT.getVectorNumElements() / 2); return LegalizeKind(LA, NVT); } @@ -2023,6 +2085,7 @@ private: return LegalizeKind(TypeSplitVector, NVT); } +private: std::vector > AvailableRegClasses; /// TargetDAGCombineArray - Targets can specify ISD nodes that they would diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h index d631f58..13a6fe3 100644 --- a/include/llvm/Target/TargetLoweringObjectFile.h +++ b/include/llvm/Target/TargetLoweringObjectFile.h @@ -33,10 +33,11 @@ namespace llvm { class TargetLoweringObjectFile : public MCObjectFileInfo { MCContext *Ctx; - - TargetLoweringObjectFile(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT - void operator=(const TargetLoweringObjectFile&); // DO NOT IMPLEMENT - + + TargetLoweringObjectFile( + const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION; + void operator=(const TargetLoweringObjectFile&) LLVM_DELETED_FUNCTION; + public: MCContext &getContext() const { return *Ctx; } diff --git a/include/llvm/Target/TargetMachine.h b/include/llvm/Target/TargetMachine.h index e4bf32b..5006647 100644 --- a/include/llvm/Target/TargetMachine.h +++ b/include/llvm/Target/TargetMachine.h @@ -17,6 +17,8 @@ #include "llvm/Pass.h" #include "llvm/Support/CodeGen.h" #include "llvm/Target/TargetOptions.h" +#include "llvm/TargetTransformInfo.h" +#include "llvm/Target/TargetTransformImpl.h" #include "llvm/ADT/StringRef.h" #include #include @@ -31,8 +33,7 @@ class MCCodeGenInfo; class MCContext; class PassManagerBase; class Target; -class TargetData; -class TargetELFWriterInfo; +class DataLayout; class TargetFrameLowering; class TargetInstrInfo; class TargetIntrinsicInfo; @@ -52,8 +53,8 @@ class raw_ostream; /// through this interface. /// class TargetMachine { - TargetMachine(const TargetMachine &); // DO NOT IMPLEMENT - void operator=(const TargetMachine &); // DO NOT IMPLEMENT + TargetMachine(const TargetMachine &) LLVM_DELETED_FUNCTION; + void operator=(const TargetMachine &) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses. TargetMachine(const Target &T, StringRef TargetTriple, StringRef CPU, StringRef FS, const TargetOptions &Options); @@ -106,7 +107,11 @@ public: virtual const TargetFrameLowering *getFrameLowering() const { return 0; } virtual const TargetLowering *getTargetLowering() const { return 0; } virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; } - virtual const TargetData *getTargetData() const { return 0; } + virtual const DataLayout *getDataLayout() const { return 0; } + virtual const ScalarTargetTransformInfo* + getScalarTargetTransformInfo() const { return 0; } + virtual const VectorTargetTransformInfo* + getVectorTargetTransformInfo() const { return 0; } /// getMCAsmInfo - Return target specific asm information. /// @@ -142,11 +147,6 @@ public: return 0; } - /// getELFWriterInfo - If this target supports an ELF writer, return - /// information for it, otherwise return null. - /// - virtual const TargetELFWriterInfo *getELFWriterInfo() const { return 0; } - /// hasMCRelaxAll - Check whether all machine code instructions should be /// relaxed. bool hasMCRelaxAll() const { return MCRelaxAll; } diff --git a/include/llvm/Target/TargetOpcodes.h b/include/llvm/Target/TargetOpcodes.h index f0b181e..516e070 100644 --- a/include/llvm/Target/TargetOpcodes.h +++ b/include/llvm/Target/TargetOpcodes.h @@ -87,7 +87,11 @@ namespace TargetOpcode { /// BUNDLE - This instruction represents an instruction bundle. Instructions /// which immediately follow a BUNDLE instruction which are marked with /// 'InsideBundle' flag are inside the bundle. - BUNDLE + BUNDLE = 14, + + /// Lifetime markers. + LIFETIME_START = 15, + LIFETIME_END = 16 }; } // end namespace TargetOpcode } // end namespace llvm diff --git a/include/llvm/Target/TargetOptions.h b/include/llvm/Target/TargetOptions.h index d1a07d1..68ca567 100644 --- a/include/llvm/Target/TargetOptions.h +++ b/include/llvm/Target/TargetOptions.h @@ -155,6 +155,10 @@ namespace llvm { /// automatically realigned, if needed. unsigned RealignStack : 1; + /// SSPBufferSize - The minimum size of buffers that will receive stack + /// smashing protection when -fstack-protection is used. + unsigned SSPBufferSize; + /// EnableFastISel - This flag enables fast-path instruction selection /// which trades away generated code quality in favor of reducing /// compile time. diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h index df4d900..afa2ee2 100644 --- a/include/llvm/Target/TargetRegisterInfo.h +++ b/include/llvm/Target/TargetRegisterInfo.h @@ -221,13 +221,17 @@ public: private: const TargetRegisterInfoDesc *InfoDesc; // Extra desc array for codegen const char *const *SubRegIndexNames; // Names of subreg indexes. + // Pointer to array of lane masks, one per sub-reg index. + const unsigned *SubRegIndexLaneMasks; + regclass_iterator RegClassBegin, RegClassEnd; // List of regclasses protected: TargetRegisterInfo(const TargetRegisterInfoDesc *ID, regclass_iterator RegClassBegin, regclass_iterator RegClassEnd, - const char *const *subregindexnames); + const char *const *SRINames, + const unsigned *SRILaneMasks); virtual ~TargetRegisterInfo(); public: @@ -327,10 +331,36 @@ public: /// getSubRegIndexName - Return the human-readable symbolic target-specific /// name for the specified SubRegIndex. const char *getSubRegIndexName(unsigned SubIdx) const { - assert(SubIdx && "This is not a subregister index"); + assert(SubIdx && SubIdx < getNumSubRegIndices() && + "This is not a subregister index"); return SubRegIndexNames[SubIdx-1]; } + /// getSubRegIndexLaneMask - Return a bitmask representing the parts of a + /// register that are covered by SubIdx. + /// + /// Lane masks for sub-register indices are similar to register units for + /// physical registers. The individual bits in a lane mask can't be assigned + /// any specific meaning. They can be used to check if two sub-register + /// indices overlap. + /// + /// If the target has a register such that: + /// + /// getSubReg(Reg, A) overlaps getSubReg(Reg, B) + /// + /// then: + /// + /// getSubRegIndexLaneMask(A) & getSubRegIndexLaneMask(B) != 0 + /// + /// The converse is not necessarily true. If two lane masks have a common + /// bit, the corresponding sub-registers may not overlap, but it can be + /// assumed that they usually will. + unsigned getSubRegIndexLaneMask(unsigned SubIdx) const { + // SubIdx == 0 is allowed, it has the lane mask ~0u. + assert(SubIdx < getNumSubRegIndices() && "This is not a subregister index"); + return SubRegIndexLaneMasks[SubIdx]; + } + /// regsOverlap - Returns true if the two registers are equal or alias each /// other. The registers may be virtual register. bool regsOverlap(unsigned regA, unsigned regB) const { @@ -416,18 +446,6 @@ public: return MCRegisterInfo::getMatchingSuperReg(Reg, SubIdx, RC->MC); } - /// canCombineSubRegIndices - Given a register class and a list of - /// subregister indices, return true if it's possible to combine the - /// subregister indices into one that corresponds to a larger - /// subregister. Return the new subregister index by reference. Note the - /// new index may be zero if the given subregisters can be combined to - /// form the whole register. - virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC, - SmallVectorImpl &SubIndices, - unsigned &NewSubIdx) const { - return 0; - } - /// getMatchingSuperRegClass - Return a subclass of the specified register /// class A so that each register in it has a sub-register of the /// specified sub-register index which is in the specified register class B. @@ -458,6 +476,8 @@ public: /// composeSubRegIndices - Return the subregister index you get from composing /// two subregister indices. /// + /// The special null sub-register index composes as the identity. + /// /// If R:a:b is the same register as R:c, then composeSubRegIndices(a, b) /// returns c. Note that composeSubRegIndices does not tell you about illegal /// compositions. If R does not have a subreg a, or R:a does not have a subreg @@ -467,11 +487,19 @@ public: /// ssub_0:S0 - ssub_3:S3 subregs. /// If you compose subreg indices dsub_1, ssub_0 you get ssub_2. /// - virtual unsigned composeSubRegIndices(unsigned a, unsigned b) const { - // This default implementation is correct for most targets. - return b; + unsigned composeSubRegIndices(unsigned a, unsigned b) const { + if (!a) return b; + if (!b) return a; + return composeSubRegIndicesImpl(a, b); } +protected: + /// Overridden by TableGen in targets that have sub-registers. + virtual unsigned composeSubRegIndicesImpl(unsigned, unsigned) const { + llvm_unreachable("Target has no sub-registers"); + } + +public: /// getCommonSuperRegClass - Find a common super-register class if it exists. /// /// Find a register class, SuperRC and two sub-register indices, PreA and diff --git a/include/llvm/Target/TargetSchedule.td b/include/llvm/Target/TargetSchedule.td index 4dc488d..0da82fd 100644 --- a/include/llvm/Target/TargetSchedule.td +++ b/include/llvm/Target/TargetSchedule.td @@ -10,25 +10,77 @@ // This file defines the target-independent scheduling interfaces which should // be implemented by each target which is using TableGen based scheduling. // +// The SchedMachineModel is defined by subtargets for three categories of data: +// 1. Basic properties for coarse grained instruction cost model. +// 2. Scheduler Read/Write resources for simple per-opcode cost model. +// 3. Instruction itineraties for detailed reservation tables. +// +// (1) Basic properties are defined by the SchedMachineModel +// class. Target hooks allow subtargets to associate opcodes with +// those properties. +// +// (2) A per-operand machine model can be implemented in any +// combination of the following ways: +// +// A. Associate per-operand SchedReadWrite types with Instructions by +// modifying the Instruction definition to inherit from Sched. For +// each subtarget, define WriteRes and ReadAdvance to associate +// processor resources and latency with each SchedReadWrite type. +// +// B. In each instruction definition, name an ItineraryClass. For each +// subtarget, define ItinRW entries to map ItineraryClass to +// per-operand SchedReadWrite types. Unlike method A, these types may +// be subtarget specific and can be directly associated with resources +// by defining SchedWriteRes and SchedReadAdvance. +// +// C. In the subtarget, map SchedReadWrite types to specific +// opcodes. This overrides any SchedReadWrite types or +// ItineraryClasses defined by the Instruction. As in method B, the +// subtarget can directly associate resources with SchedReadWrite +// types by defining SchedWriteRes and SchedReadAdvance. +// +// D. In either the target or subtarget, define SchedWriteVariant or +// SchedReadVariant to map one SchedReadWrite type onto another +// sequence of SchedReadWrite types. This allows dynamic selection of +// an instruction's machine model via custom C++ code. It also allows +// a machine-independent SchedReadWrite type to map to a sequence of +// machine-dependent types. +// +// (3) A per-pipeline-stage machine model can be implemented by providing +// Itineraries in addition to mapping instructions to ItineraryClasses. //===----------------------------------------------------------------------===// +// Include legacy support for instruction itineraries. include "llvm/Target/TargetItinerary.td" -// The SchedMachineModel is defined by subtargets for three categories of data: -// 1) Basic properties for coarse grained instruction cost model. -// 2) Scheduler Read/Write resources for simple per-opcode cost model. -// 3) Instruction itineraties for detailed reservation tables. +class Instruction; // Forward def + +// DAG operator that interprets the DAG args as Instruction defs. +def instrs; + +// DAG operator that interprets each DAG arg as a regex pattern for +// matching Instruction opcode names. +// The regex must match the beginning of the opcode (as in Python re.match). +// To avoid matching prefixes, append '$' to the pattern. +def instregex; + +// Define the SchedMachineModel and provide basic properties for +// coarse grained instruction cost model. Default values for the +// properties are defined in MCSchedModel. A value of "-1" in the +// target description's SchedMachineModel indicates that the property +// is not overriden by the target. // -// Default values for basic properties are defined in MCSchedModel. "-1" -// indicates that the property is not overriden by the target description. +// Target hooks allow subtargets to associate LoadLatency and +// HighLatency with groups of opcodes. class SchedMachineModel { - int IssueWidth = -1; // Max instructions that may be scheduled per cycle. + int IssueWidth = -1; // Max micro-ops that may be scheduled per cycle. int MinLatency = -1; // Determines which instrucions are allowed in a group. // (-1) inorder (0) ooo, (1): inorder +var latencies. int LoadLatency = -1; // Cycles for loads to access the cache. int HighLatency = -1; // Approximation of cycles for "high latency" ops. int MispredictPenalty = -1; // Extra cycles for a mispredicted branch. + // Per-cycle resources tables. ProcessorItineraries Itineraries = NoItineraries; bit NoModel = 0; // Special tag to indicate missing machine model. @@ -38,4 +90,276 @@ def NoSchedModel : SchedMachineModel { let NoModel = 1; } -// TODO: Define classes for processor and scheduler resources. +// Define a kind of processor resource that may be common across +// similar subtargets. +class ProcResourceKind; + +// Define a number of interchangeable processor resources. NumUnits +// determines the throughput of instructions that require the resource. +// +// An optional Super resource may be given to model these resources as +// a subset of the more general super resources. Using one of these +// resources implies using one of the super resoruces. +// +// ProcResourceUnits normally model a few buffered resources within an +// out-of-order engine that the compiler attempts to conserve. +// Buffered resources may be held for multiple clock cycles, but the +// scheduler does not pin them to a particular clock cycle relative to +// instruction dispatch. Setting Buffered=0 changes this to an +// in-order resource. In this case, the scheduler counts down from the +// cycle that the instruction issues in-order, forcing an interlock +// with subsequent instructions that require the same resource until +// the number of ResourceCyles specified in WriteRes expire. +// +// SchedModel ties these units to a processor for any stand-alone defs +// of this class. Instances of subclass ProcResource will be automatically +// attached to a processor, so SchedModel is not needed. +class ProcResourceUnits { + ProcResourceKind Kind = kind; + int NumUnits = num; + ProcResourceKind Super = ?; + bit Buffered = 1; + SchedMachineModel SchedModel = ?; +} + +// EponymousProcResourceKind helps implement ProcResourceUnits by +// allowing a ProcResourceUnits definition to reference itself. It +// should not be referenced anywhere else. +def EponymousProcResourceKind : ProcResourceKind; + +// Subtargets typically define processor resource kind and number of +// units in one place. +class ProcResource : ProcResourceKind, + ProcResourceUnits; + +// A target architecture may define SchedReadWrite types and associate +// them with instruction operands. +class SchedReadWrite; + +// List the per-operand types that map to the machine model of an +// instruction. One SchedWrite type must be listed for each explicit +// def operand in order. Additional SchedWrite types may optionally be +// listed for implicit def operands. SchedRead types may optionally +// be listed for use operands in order. The order of defs relative to +// uses is insignificant. This way, the same SchedReadWrite list may +// be used for multiple forms of an operation. For example, a +// two-address instruction could have two tied operands or single +// operand that both reads and writes a reg. In both cases we have a +// single SchedWrite and single SchedRead in any order. +class Sched schedrw> { + list SchedRW = schedrw; +} + +// Define a scheduler resource associated with a def operand. +class SchedWrite : SchedReadWrite; +def NoWrite : SchedWrite; + +// Define a scheduler resource associated with a use operand. +class SchedRead : SchedReadWrite; + +// Define a SchedWrite that is modeled as a sequence of other +// SchedWrites with additive latency. This allows a single operand to +// be mapped the resources composed from a set of previously defined +// SchedWrites. +// +// If the final write in this sequence is a SchedWriteVariant marked +// Variadic, then the list of prior writes are distributed across all +// operands after resolving the predicate for the final write. +// +// SchedModel silences warnings but is ignored. +class WriteSequence writes, int rep = 1> : SchedWrite { + list Writes = writes; + int Repeat = rep; + SchedMachineModel SchedModel = ?; +} + +// Define values common to WriteRes and SchedWriteRes. +// +// SchedModel ties these resources to a processor. +class ProcWriteResources resources> { + list ProcResources = resources; + list ResourceCycles = []; + int Latency = 1; + int NumMicroOps = 1; + bit BeginGroup = 0; + bit EndGroup = 0; + // Allow a processor to mark some scheduling classes as unsupported + // for stronger verification. + bit Unsupported = 0; + SchedMachineModel SchedModel = ?; +} + +// Define the resources and latency of a SchedWrite. This will be used +// directly by targets that have no itinerary classes. In this case, +// SchedWrite is defined by the target, while WriteResources is +// defined by the subtarget, and maps the SchedWrite to processor +// resources. +// +// If a target already has itinerary classes, SchedWriteResources can +// be used instead to define subtarget specific SchedWrites and map +// them to processor resources in one place. Then ItinRW can map +// itinerary classes to the subtarget's SchedWrites. +// +// ProcResources indicates the set of resources consumed by the write. +// Optionally, ResourceCycles indicates the number of cycles the +// resource is consumed. Each ResourceCycles item is paired with the +// ProcResource item at the same position in its list. Since +// ResourceCycles are rarely specialized, the list may be +// incomplete. By default, resources are consumed for a single cycle, +// regardless of latency, which models a fully pipelined processing +// unit. A value of 0 for ResourceCycles means that the resource must +// be available but is not consumed, which is only relevant for +// unbuffered resources. +// +// By default, each SchedWrite takes one micro-op, which is counted +// against the processor's IssueWidth limit. If an instruction can +// write multiple registers with a single micro-op, the subtarget +// should define one of the writes to be zero micro-ops. If a +// subtarget requires multiple micro-ops to write a single result, it +// should either override the write's NumMicroOps to be greater than 1 +// or require additional writes. Extra writes can be required either +// by defining a WriteSequence, or simply listing extra writes in the +// instruction's list of writers beyond the number of "def" +// operands. The scheduler assumes that all micro-ops must be +// dispatched in the same cycle. These micro-ops may be required to +// begin or end the current dispatch group. +class WriteRes resources> + : ProcWriteResources { + SchedWrite WriteType = write; +} + +// Directly name a set of WriteResources defining a new SchedWrite +// type at the same time. This class is unaware of its SchedModel so +// must be referenced by InstRW or ItinRW. +class SchedWriteRes resources> : SchedWrite, + ProcWriteResources; + +// Define values common to ReadAdvance and SchedReadAdvance. +// +// SchedModel ties these resources to a processor. +class ProcReadAdvance writes = []> { + int Cycles = cycles; + list ValidWrites = writes; + // Allow a processor to mark some scheduling classes as unsupported + // for stronger verification. + bit Unsupported = 0; + SchedMachineModel SchedModel = ?; +} + +// A processor may define a ReadAdvance associated with a SchedRead +// to reduce latency of a prior write by N cycles. A negative advance +// effectively increases latency, which may be used for cross-domain +// stalls. +// +// A ReadAdvance may be associated with a list of SchedWrites +// to implement pipeline bypass. The Writes list may be empty to +// indicate operands that are always read this number of Cycles later +// than a normal register read, allowing the read's parent instruction +// to issue earlier relative to the writer. +class ReadAdvance writes = []> + : ProcReadAdvance { + SchedRead ReadType = read; +} + +// Directly associate a new SchedRead type with a delay and optional +// pipeline bypess. For use with InstRW or ItinRW. +class SchedReadAdvance writes = []> : SchedRead, + ProcReadAdvance; + +// Define SchedRead defaults. Reads seldom need special treatment. +def ReadDefault : SchedRead; +def NoReadAdvance : SchedReadAdvance<0>; + +// Define shared code that will be in the same scope as all +// SchedPredicates. Available variables are: +// (const MachineInstr *MI, const TargetSchedModel *SchedModel) +class PredicateProlog { + code Code = c; +} + +// Define a predicate to determine which SchedVariant applies to a +// particular MachineInstr. The code snippet is used as an +// if-statement's expression. Available variables are MI, SchedModel, +// and anything defined in a PredicateProlog. +// +// SchedModel silences warnings but is ignored. +class SchedPredicate { + SchedMachineModel SchedModel = ?; + code Predicate = pred; +} +def NoSchedPred : SchedPredicate<[{true}]>; + +// Associate a predicate with a list of SchedReadWrites. By default, +// the selected SchedReadWrites are still associated with a single +// operand and assumed to execute sequentially with additive +// latency. However, if the parent SchedWriteVariant or +// SchedReadVariant is marked "Variadic", then each Selected +// SchedReadWrite is mapped in place to the instruction's variadic +// operands. In this case, latency is not additive. If the current Variant +// is already part of a Sequence, then that entire chain leading up to +// the Variant is distributed over the variadic operands. +class SchedVar selected> { + SchedPredicate Predicate = pred; + list Selected = selected; +} + +// SchedModel silences warnings but is ignored. +class SchedVariant variants> { + list Variants = variants; + bit Variadic = 0; + SchedMachineModel SchedModel = ?; +} + +// A SchedWriteVariant is a single SchedWrite type that maps to a list +// of SchedWrite types under the conditions defined by its predicates. +// +// A Variadic write is expanded to cover multiple "def" operands. The +// SchedVariant's Expansion list is then interpreted as one write +// per-operand instead of the usual sequential writes feeding a single +// operand. +class SchedWriteVariant variants> : SchedWrite, + SchedVariant { +} + +// A SchedReadVariant is a single SchedRead type that maps to a list +// of SchedRead types under the conditions defined by its predicates. +// +// A Variadic write is expanded to cover multiple "readsReg" operands as +// explained above. +class SchedReadVariant variants> : SchedRead, + SchedVariant { +} + +// Map a set of opcodes to a list of SchedReadWrite types. This allows +// the subtarget to easily override specific operations. +// +// SchedModel ties this opcode mapping to a processor. +class InstRW rw, dag instrlist> { + list OperandReadWrites = rw; + dag Instrs = instrlist; + SchedMachineModel SchedModel = ?; +} + +// Map a set of itinerary classes to SchedReadWrite resources. This is +// used to bootstrap a target (e.g. ARM) when itineraries already +// exist and changing InstrInfo is undesirable. +// +// SchedModel ties this ItineraryClass mapping to a processor. +class ItinRW rw, list iic> { + list MatchedItinClasses = iic; + list OperandReadWrites = rw; + SchedMachineModel SchedModel = ?; +} + +// Alias a target-defined SchedReadWrite to a processor specific +// SchedReadWrite. This allows a subtarget to easily map a +// SchedReadWrite type onto a WriteSequence, SchedWriteVariant, or +// SchedReadVariant. +// +// SchedModel will usually be provided by surrounding let statement +// and ties this SchedAlias mapping to a processor. +class SchedAlias { + SchedReadWrite MatchRW = match; + SchedReadWrite AliasRW = alias; + SchedMachineModel SchedModel = ?; +} diff --git a/include/llvm/Target/TargetSelectionDAG.td b/include/llvm/Target/TargetSelectionDAG.td index 3f81c06..83bd787 100644 --- a/include/llvm/Target/TargetSelectionDAG.td +++ b/include/llvm/Target/TargetSelectionDAG.td @@ -445,9 +445,9 @@ def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2, def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2, [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; + [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore, - [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>; + [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>; // Do not use ld, st directly. Use load, extload, sextload, zextload, store, // and truncst (see below). diff --git a/include/llvm/Target/TargetSelectionDAGInfo.h b/include/llvm/Target/TargetSelectionDAGInfo.h index c9ca722..96793bc 100644 --- a/include/llvm/Target/TargetSelectionDAGInfo.h +++ b/include/llvm/Target/TargetSelectionDAGInfo.h @@ -20,7 +20,7 @@ namespace llvm { -class TargetData; +class DataLayout; class TargetMachine; //===----------------------------------------------------------------------===// @@ -28,13 +28,13 @@ class TargetMachine; /// SelectionDAG lowering and instruction selection process. /// class TargetSelectionDAGInfo { - TargetSelectionDAGInfo(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT - void operator=(const TargetSelectionDAGInfo &); // DO NOT IMPLEMENT + TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION; + void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION; - const TargetData *TD; + const DataLayout *TD; protected: - const TargetData *getTargetData() const { return TD; } + const DataLayout *getDataLayout() const { return TD; } public: explicit TargetSelectionDAGInfo(const TargetMachine &TM); diff --git a/include/llvm/Target/TargetSubtargetInfo.h b/include/llvm/Target/TargetSubtargetInfo.h index fc23b2c..6db96d9 100644 --- a/include/llvm/Target/TargetSubtargetInfo.h +++ b/include/llvm/Target/TargetSubtargetInfo.h @@ -19,9 +19,11 @@ namespace llvm { +class MachineInstr; class SDep; class SUnit; class TargetRegisterClass; +class TargetSchedModel; template class SmallVectorImpl; //===----------------------------------------------------------------------===// @@ -31,8 +33,8 @@ template class SmallVectorImpl; /// be exposed through a TargetSubtargetInfo-derived class. /// class TargetSubtargetInfo : public MCSubtargetInfo { - TargetSubtargetInfo(const TargetSubtargetInfo&); // DO NOT IMPLEMENT - void operator=(const TargetSubtargetInfo&); // DO NOT IMPLEMENT + TargetSubtargetInfo(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION; + void operator=(const TargetSubtargetInfo&) LLVM_DELETED_FUNCTION; protected: // Can only create subclasses... TargetSubtargetInfo(); public: @@ -43,23 +45,26 @@ public: virtual ~TargetSubtargetInfo(); - /// getSpecialAddressLatency - For targets where it is beneficial to - /// backschedule instructions that compute addresses, return a value - /// indicating the number of scheduling cycles of backscheduling that - /// should be attempted. - virtual unsigned getSpecialAddressLatency() const { return 0; } + /// Resolve a SchedClass at runtime, where SchedClass identifies an + /// MCSchedClassDesc with the isVariant property. This may return the ID of + /// another variant SchedClass, but repeated invocation must quickly terminate + /// in a nonvariant SchedClass. + virtual unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *MI, + const TargetSchedModel* SchedModel) const { + return 0; + } // enablePostRAScheduler - If the target can benefit from post-regalloc // scheduling and the specified optimization level meets the requirement // return true to enable post-register-allocation scheduling. In // CriticalPathRCs return any register classes that should only be broken - // if on the critical path. + // if on the critical path. virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, AntiDepBreakMode& Mode, RegClassVector& CriticalPathRCs) const; // adjustSchedDependency - Perform target specific adjustments to // the latency of a schedule dependency. - virtual void adjustSchedDependency(SUnit *def, SUnit *use, + virtual void adjustSchedDependency(SUnit *def, SUnit *use, SDep& dep) const { } }; diff --git a/include/llvm/Target/TargetTransformImpl.h b/include/llvm/Target/TargetTransformImpl.h new file mode 100644 index 0000000..7ea2396 --- /dev/null +++ b/include/llvm/Target/TargetTransformImpl.h @@ -0,0 +1,98 @@ +//=- llvm/Target/TargetTransformImpl.h - Target Loop Trans Info----*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the target-specific implementations of the +// TargetTransform interfaces. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H +#define LLVM_TARGET_TARGET_TRANSFORMATION_IMPL_H + +#include "llvm/TargetTransformInfo.h" +#include "llvm/CodeGen/ValueTypes.h" + +namespace llvm { + +class TargetLowering; + +/// ScalarTargetTransformInfo - This is a default implementation for the +/// ScalarTargetTransformInfo interface. Different targets can implement +/// this interface differently. +class ScalarTargetTransformImpl : public ScalarTargetTransformInfo { +private: + const TargetLowering *TLI; + +public: + /// Ctor + explicit ScalarTargetTransformImpl(const TargetLowering *TL) : TLI(TL) {} + + virtual bool isLegalAddImmediate(int64_t imm) const; + + virtual bool isLegalICmpImmediate(int64_t imm) const; + + virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const; + + virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; + + virtual bool isTypeLegal(Type *Ty) const; + + virtual unsigned getJumpBufAlignment() const; + + virtual unsigned getJumpBufSize() const; + + virtual bool shouldBuildLookupTables() const; +}; + +class VectorTargetTransformImpl : public VectorTargetTransformInfo { +protected: + const TargetLowering *TLI; + + /// Estimate the cost of type-legalization and the legalized type. + std::pair getTypeLegalizationCost(Type *Ty) const; + + /// Estimate the overhead of scalarizing an instruction. Insert and Extract + /// are set if the result needs to be inserted and/or extracted from vectors. + unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const; + + // Get the ISD node that corresponds to the Instruction class opcode. + int InstructionOpcodeToISD(unsigned Opcode) const; + +public: + explicit VectorTargetTransformImpl(const TargetLowering *TL) : TLI(TL) {} + + virtual ~VectorTargetTransformImpl() {} + + virtual unsigned getInstrCost(unsigned Opcode, Type *Ty1, Type *Ty2) const; + + virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; + + virtual unsigned getBroadcastCost(Type *Tp) const; + + virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, + Type *Src) const; + + virtual unsigned getCFInstrCost(unsigned Opcode) const; + + virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, + Type *CondTy) const; + + virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, + unsigned Index) const; + + virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, + unsigned Alignment, + unsigned AddressSpace) const; + + virtual unsigned getNumberOfParts(Type *Tp) const; +}; + +} // end llvm namespace + +#endif diff --git a/include/llvm/TargetTransformInfo.h b/include/llvm/TargetTransformInfo.h new file mode 100644 index 0000000..94db490 --- /dev/null +++ b/include/llvm/TargetTransformInfo.h @@ -0,0 +1,204 @@ +//===- llvm/Transforms/TargetTransformInfo.h --------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass exposes codegen information to IR-level passes. Every +// transformation that uses codegen information is broken into three parts: +// 1. The IR-level analysis pass. +// 2. The IR-level transformation interface which provides the needed +// information. +// 3. Codegen-level implementation which uses target-specific hooks. +// +// This file defines #2, which is the interface that IR-level transformations +// use for querying the codegen. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_TARGET_TRANSFORM_INTERFACE +#define LLVM_TRANSFORMS_TARGET_TRANSFORM_INTERFACE + +#include "llvm/Pass.h" +#include "llvm/AddressingMode.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Type.h" + +namespace llvm { + +class ScalarTargetTransformInfo; +class VectorTargetTransformInfo; + +/// TargetTransformInfo - This pass provides access to the codegen +/// interfaces that are needed for IR-level transformations. +class TargetTransformInfo : public ImmutablePass { +private: + const ScalarTargetTransformInfo *STTI; + const VectorTargetTransformInfo *VTTI; +public: + /// Default ctor. + /// + /// @note This has to exist, because this is a pass, but it should never be + /// used. + TargetTransformInfo(); + + TargetTransformInfo(const ScalarTargetTransformInfo* S, + const VectorTargetTransformInfo *V) + : ImmutablePass(ID), STTI(S), VTTI(V) { + initializeTargetTransformInfoPass(*PassRegistry::getPassRegistry()); + } + + TargetTransformInfo(const TargetTransformInfo &T) : + ImmutablePass(ID), STTI(T.STTI), VTTI(T.VTTI) { } + + const ScalarTargetTransformInfo* getScalarTargetTransformInfo() const { + return STTI; + } + const VectorTargetTransformInfo* getVectorTargetTransformInfo() const { + return VTTI; + } + + /// Pass identification, replacement for typeid. + static char ID; +}; + +// ---------------------------------------------------------------------------// +// The classes below are inherited and implemented by target-specific classes +// in the codegen. +// ---------------------------------------------------------------------------// + +/// ScalarTargetTransformInfo - This interface is used by IR-level passes +/// that need target-dependent information for generic scalar transformations. +/// LSR, and LowerInvoke use this interface. +class ScalarTargetTransformInfo { +public: + virtual ~ScalarTargetTransformInfo() {} + + /// isLegalAddImmediate - Return true if the specified immediate is legal + /// add immediate, that is the target has add instructions which can add + /// a register with the immediate without having to materialize the + /// immediate into a register. + virtual bool isLegalAddImmediate(int64_t) const { + return false; + } + /// isLegalICmpImmediate - Return true if the specified immediate is legal + /// icmp immediate, that is the target has icmp instructions which can compare + /// a register against the immediate without having to materialize the + /// immediate into a register. + virtual bool isLegalICmpImmediate(int64_t) const { + return false; + } + /// isLegalAddressingMode - Return true if the addressing mode represented by + /// AM is legal for this target, for a load/store of the specified type. + /// The type may be VoidTy, in which case only return true if the addressing + /// mode is legal for a load/store of any legal type. + /// TODO: Handle pre/postinc as well. + virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const { + return false; + } + /// isTruncateFree - Return true if it's free to truncate a value of + /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in + /// register EAX to i16 by referencing its sub-register AX. + virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const { + return false; + } + /// Is this type legal. + virtual bool isTypeLegal(Type *Ty) const { + return false; + } + /// getJumpBufAlignment - returns the target's jmp_buf alignment in bytes + virtual unsigned getJumpBufAlignment() const { + return 0; + } + /// getJumpBufSize - returns the target's jmp_buf size in bytes. + virtual unsigned getJumpBufSize() const { + return 0; + } + /// shouldBuildLookupTables - Return true if switches should be turned into + /// lookup tables for the target. + virtual bool shouldBuildLookupTables() const { + return true; + } +}; + +/// VectorTargetTransformInfo - This interface is used by the vectorizers +/// to estimate the profitability of vectorization for different instructions. +class VectorTargetTransformInfo { +public: + virtual ~VectorTargetTransformInfo() {} + + /// Returns the expected cost of the instruction opcode. The opcode is one of + /// the enums like Instruction::Add. The type arguments are the type of the + /// operation. + /// Most instructions only use the first type and in that case the second + /// operand is ignored. + /// + /// Exceptions: + /// * Br instructions do not use any of the types. + /// * Select instructions pass the return type as Ty1 and the selector as Ty2. + /// * Cast instructions pass the destination as Ty1 and the source as Ty2. + /// * Insert/Extract element pass only the vector type as Ty1. + /// * ShuffleVector, Load, Store do not use this call. + virtual unsigned getInstrCost(unsigned Opcode, + Type *Ty1 = 0, + Type *Ty2 = 0) const { + return 1; + } + + /// Returns the expected cost of arithmetic ops, such as mul, xor, fsub, etc. + virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const { + return 1; + } + + /// Returns the cost of a vector broadcast of a scalar at place zero to a + /// vector of type 'Tp'. + virtual unsigned getBroadcastCost(Type *Tp) const { + return 1; + } + + /// Returns the expected cost of cast instructions, such as bitcast, trunc, + /// zext, etc. + virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, + Type *Src) const { + return 1; + } + + /// Returns the expected cost of control-flow related instrutctions such as + /// Phi, Ret, Br. + virtual unsigned getCFInstrCost(unsigned Opcode) const { + return 1; + } + + /// Returns the expected cost of compare and select instructions. + virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, + Type *CondTy = 0) const { + return 1; + } + + /// Returns the expected cost of vector Insert and Extract. + /// Use -1 to indicate that there is no information on the index value. + virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, + unsigned Index = -1) const { + return 1; + } + + /// Returns the cost of Load and Store instructions. + virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, + unsigned Alignment, + unsigned AddressSpace) const { + return 1; + } + + /// Returns the number of pieces into which the provided type must be + /// split during legalization. Zero is returned when the answer is unknown. + virtual unsigned getNumberOfParts(Type *Tp) const { + return 0; + } +}; + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/IPO.h b/include/llvm/Transforms/IPO.h index 18176e8..fc1cd59 100644 --- a/include/llvm/Transforms/IPO.h +++ b/include/llvm/Transforms/IPO.h @@ -104,23 +104,14 @@ Pass *createPruneEHPass(); //===----------------------------------------------------------------------===// /// createInternalizePass - This pass loops over all of the functions in the -/// input module, internalizing all globals (functions and variables) not part -/// of the api. If a list of symbols is specified with the -/// -internalize-public-api-* command line options, those symbols are not -/// internalized and all others are. Otherwise if AllButMain is set and the -/// main function is found, all other globals are marked as internal. If no api -/// is supplied and AllButMain is not set, or no main function is found, nothing -/// is internalized. -/// -ModulePass *createInternalizePass(bool AllButMain); - -/// createInternalizePass - This pass loops over all of the functions in the /// input module, internalizing all globals (functions and variables) not in the /// given exportList. /// /// Note that commandline options that are used with the above function are not -/// used now! Also, when exportList is empty, nothing is internalized. +/// used now! ModulePass *createInternalizePass(const std::vector &exportList); +/// createInternalizePass - Same as above, but with an empty exportList. +ModulePass *createInternalizePass(); //===----------------------------------------------------------------------===// /// createDeadArgEliminationPass - This pass removes arguments from functions @@ -192,6 +183,16 @@ ModulePass *createMergeFunctionsPass(); /// createPartialInliningPass - This pass inlines parts of functions. /// ModulePass *createPartialInliningPass(); + +//===----------------------------------------------------------------------===// +// createMetaRenamerPass - Rename everything with metasyntatic names. +// +ModulePass *createMetaRenamerPass(); + +//===----------------------------------------------------------------------===// +/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass +/// manager. +ModulePass *createBarrierNoopPass(); } // End llvm namespace diff --git a/include/llvm/Transforms/IPO/InlinerPass.h b/include/llvm/Transforms/IPO/InlinerPass.h index 7c3cfc8..b036040 100644 --- a/include/llvm/Transforms/IPO/InlinerPass.h +++ b/include/llvm/Transforms/IPO/InlinerPass.h @@ -21,7 +21,7 @@ namespace llvm { class CallSite; - class TargetData; + class DataLayout; class InlineCost; template class SmallPtrSet; diff --git a/include/llvm/Transforms/IPO/PassManagerBuilder.h b/include/llvm/Transforms/IPO/PassManagerBuilder.h index 47ce902..3ea0a42 100644 --- a/include/llvm/Transforms/IPO/PassManagerBuilder.h +++ b/include/llvm/Transforms/IPO/PassManagerBuilder.h @@ -104,6 +104,7 @@ public: bool DisableUnitAtATime; bool DisableUnrollLoops; bool Vectorize; + bool LoopVectorize; private: /// ExtensionList - This is list of all of the extensions that are registered. diff --git a/include/llvm/Transforms/Instrumentation.h b/include/llvm/Transforms/Instrumentation.h index 4b0c448..8e63aaa 100644 --- a/include/llvm/Transforms/Instrumentation.h +++ b/include/llvm/Transforms/Instrumentation.h @@ -34,7 +34,7 @@ ModulePass *createGCOVProfilerPass(bool EmitNotes = true, bool EmitData = true, bool UseExtraChecksum = false); // Insert AddressSanitizer (address sanity checking) instrumentation -ModulePass *createAddressSanitizerPass(); +FunctionPass *createAddressSanitizerPass(); // Insert ThreadSanitizer (race detection) instrumentation FunctionPass *createThreadSanitizerPass(); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 3dce6fe..a5d8eed 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -70,6 +70,12 @@ FunctionPass *createAggressiveDCEPass(); //===----------------------------------------------------------------------===// // +// SROA - Replace aggregates or pieces of aggregates with scalar SSA values. +// +FunctionPass *createSROAPass(bool RequiresDomTree = true); + +//===----------------------------------------------------------------------===// +// // ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas // if possible. // diff --git a/include/llvm/Transforms/Utils/AddrModeMatcher.h b/include/llvm/Transforms/Utils/AddrModeMatcher.h index 90485eb..7d67283 100644 --- a/include/llvm/Transforms/Utils/AddrModeMatcher.h +++ b/include/llvm/Transforms/Utils/AddrModeMatcher.h @@ -19,6 +19,7 @@ #ifndef LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H #define LLVM_TRANSFORMS_UTILS_ADDRMODEMATCHER_H +#include "llvm/AddressingMode.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Target/TargetLowering.h" @@ -33,7 +34,7 @@ class raw_ostream; /// ExtAddrMode - This is an extended version of TargetLowering::AddrMode /// which holds actual Value*'s for register values. -struct ExtAddrMode : public TargetLowering::AddrMode { +struct ExtAddrMode : public AddrMode { Value *BaseReg; Value *ScaledReg; ExtAddrMode() : BaseReg(0), ScaledReg(0) {} diff --git a/include/llvm/Transforms/Utils/BasicBlockUtils.h b/include/llvm/Transforms/Utils/BasicBlockUtils.h index 8a939cc..b810f1a 100644 --- a/include/llvm/Transforms/Utils/BasicBlockUtils.h +++ b/include/llvm/Transforms/Utils/BasicBlockUtils.h @@ -25,8 +25,11 @@ namespace llvm { class AliasAnalysis; class Instruction; +class MDNode; class Pass; class ReturnInst; +class TargetLibraryInfo; +class TerminatorInst; /// DeleteDeadBlock - Delete the specified block, which must have no /// predecessors. @@ -44,7 +47,7 @@ void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = 0); /// a result. This includes tracing the def-use list from the PHI to see if /// it is ultimately unused or if it reaches an unused cycle. Return true /// if any PHIs were deleted. -bool DeleteDeadPHIs(BasicBlock *BB); +bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = 0); /// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor, /// if possible. The return value indicates success or failure. @@ -202,6 +205,29 @@ void SplitLandingPadPredecessors(BasicBlock *OrigBB,ArrayRef Preds, ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, BasicBlock *Pred); +/// SplitBlockAndInsertIfThen - Split the containing block at the +/// specified instruction - everything before and including Cmp stays +/// in the old basic block, and everything after Cmp is moved to a +/// new block. The two blocks are connected by a conditional branch +/// (with value of Cmp being the condition). +/// Before: +/// Head +/// Cmp +/// Tail +/// After: +/// Head +/// Cmp +/// if (Cmp) +/// ThenBlock +/// Tail +/// +/// If Unreachable is true, then ThenBlock ends with +/// UnreachableInst, otherwise it branches to Tail. +/// Returns the NewBasicBlock's terminator. + +TerminatorInst *SplitBlockAndInsertIfThen(Instruction *Cmp, + bool Unreachable, MDNode *BranchWeights = 0); + } // End llvm namespace #endif diff --git a/include/llvm/Transforms/Utils/BuildLibCalls.h b/include/llvm/Transforms/Utils/BuildLibCalls.h index a6e41f0..ab9fc47 100644 --- a/include/llvm/Transforms/Utils/BuildLibCalls.h +++ b/include/llvm/Transforms/Utils/BuildLibCalls.h @@ -19,7 +19,7 @@ namespace llvm { class Value; - class TargetData; + class DataLayout; class TargetLibraryInfo; /// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*. @@ -28,52 +28,52 @@ namespace llvm { /// EmitStrLen - Emit a call to the strlen function to the builder, for the /// specified pointer. Ptr is required to be some pointer type, and the /// return value has 'intptr_t' type. - Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD, + Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the /// specified pointer. Ptr is required to be some pointer type, MaxLen must /// be of size_t type, and the return value has 'intptr_t' type. Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI); + const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitStrChr - Emit a call to the strchr function to the builder, for the /// specified pointer and character. Ptr is required to be some pointer type, /// and the return value has 'i8*' type. - Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD, + Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitStrNCmp - Emit a call to the strncmp function to the builder. Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI); + const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the /// specified pointer arguments. Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, StringRef Name = "strcpy"); /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the /// specified pointer arguments and length. Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, StringRef Name = "strncpy"); /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder. /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src /// are pointers. Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize, - IRBuilder<> &B, const TargetData *TD, + IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value. Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI); + const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitMemCmp - Emit a call to the memcmp function. Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI); + const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' /// (e.g. 'floor'). This function is known to take a single of type matching @@ -85,28 +85,28 @@ namespace llvm { /// EmitPutChar - Emit a call to the putchar function. This assumes that Char /// is an integer. - Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD, + Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitPutS - Emit a call to the puts function. This assumes that Str is /// some pointer. - Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD, + Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitFPutC - Emit a call to the fputc function. This assumes that Char is /// an i32, and File is a pointer to FILE. Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI); + const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitFPutS - Emit a call to the puts function. Str is required to be a /// pointer and File is a pointer to FILE. - Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD, + Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI); /// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE. Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI); + const DataLayout *TD, const TargetLibraryInfo *TLI); /// SimplifyFortifiedLibCalls - Helper class for folding checked library /// calls (e.g. __strcpy_chk) into their unchecked counterparts. @@ -118,7 +118,7 @@ namespace llvm { bool isString) const = 0; public: virtual ~SimplifyFortifiedLibCalls(); - bool fold(CallInst *CI, const TargetData *TD, const TargetLibraryInfo *TLI); + bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI); }; } diff --git a/include/llvm/Transforms/Utils/BypassSlowDivision.h b/include/llvm/Transforms/Utils/BypassSlowDivision.h new file mode 100644 index 0000000..ac8af12 --- /dev/null +++ b/include/llvm/Transforms/Utils/BypassSlowDivision.h @@ -0,0 +1,33 @@ +//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains an optimization for div and rem on architectures that +// execute short instructions significantly faster than longer instructions. +// For example, on Intel Atom 32-bit divides are slow enough that during +// runtime it is profitable to check the value of the operands, and if they are +// positive and less than 256 use an unsigned 8-bit divide. +// +//===----------------------------------------------------------------------===// + +#ifndef TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H +#define TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H + +#include "llvm/Function.h" + +namespace llvm { + +/// This optimization identifies DIV instructions that can be +/// profitably bypassed and carried out with a shorter, faster divide. +bool bypassSlowDivision(Function &F, + Function::iterator &I, + const DenseMap &BypassWidth); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h index b7b5d29..1780025 100644 --- a/include/llvm/Transforms/Utils/Cloning.h +++ b/include/llvm/Transforms/Utils/Cloning.h @@ -39,7 +39,7 @@ class ReturnInst; class CallSite; class Trace; class CallGraph; -class TargetData; +class DataLayout; class Loop; class LoopInfo; class AllocaInst; @@ -116,13 +116,6 @@ Function *CloneFunction(const Function *F, bool ModuleLevelChanges, ClonedCodeInfo *CodeInfo = 0); -/// CloneFunction - Version of the function that doesn't need the VMap. -/// -inline Function *CloneFunction(const Function *F, ClonedCodeInfo *CodeInfo = 0){ - ValueToValueMapTy VMap; - return CloneFunction(F, VMap, CodeInfo); -} - /// Clone OldFunc into NewFunc, transforming the old arguments into references /// to VMap values. Note that if NewFunc already has basic blocks, the ones /// cloned into it will be added to the end of the function. This function @@ -157,7 +150,7 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, SmallVectorImpl &Returns, const char *NameSuffix = "", ClonedCodeInfo *CodeInfo = 0, - const TargetData *TD = 0, + const DataLayout *TD = 0, Instruction *TheCall = 0); @@ -165,13 +158,13 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, /// InlineFunction call, and records the auxiliary results produced by it. class InlineFunctionInfo { public: - explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0) + explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0) : CG(cg), TD(td) {} /// CG - If non-null, InlineFunction will update the callgraph to reflect the /// changes it makes. CallGraph *CG; - const TargetData *TD; + const DataLayout *TD; /// StaticAllocas - InlineFunction fills this in with all static allocas that /// get copied into the caller. diff --git a/include/llvm/Transforms/Utils/IntegerDivision.h b/include/llvm/Transforms/Utils/IntegerDivision.h new file mode 100644 index 0000000..cecc807 --- /dev/null +++ b/include/llvm/Transforms/Utils/IntegerDivision.h @@ -0,0 +1,48 @@ +//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains an implementation of 32bit integer division for targets +// that don't have native support. It's largely derived from compiler-rt's +// implementation of __udivsi3, but hand-tuned for targets that prefer less +// control flow. +// +//===----------------------------------------------------------------------===// + +#ifndef TRANSFORMS_UTILS_INTEGERDIVISION_H +#define TRANSFORMS_UTILS_INTEGERDIVISION_H + +namespace llvm { + class BinaryOperator; +} + +namespace llvm { + + /// Generate code to calculate the remainder of two integers, replacing Rem + /// with the generated code. This currently generates code using the udiv + /// expansion, but future work includes generating more specialized code, + /// e.g. when more information about the operands are known. Currently only + /// implements 32bit scalar division (due to udiv's limitation), but future + /// work is removing this limitation. + /// + /// @brief Replace Rem with generated code. + bool expandRemainder(BinaryOperator *Rem); + + /// Generate code to divide two integers, replacing Div with the generated + /// code. This currently generates code similarly to compiler-rt's + /// implementations, but future work includes generating more specialized code + /// when more information about the operands are known. Currently only + /// implements 32bit scalar division, but future work is removing this + /// limitation. + /// + /// @brief Replace Div with generated code. + bool expandDivision(BinaryOperator* Div); + +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h index 495eab7..be3029e 100644 --- a/include/llvm/Transforms/Utils/Local.h +++ b/include/llvm/Transforms/Utils/Local.h @@ -18,7 +18,7 @@ #include "llvm/IRBuilder.h" #include "llvm/Operator.h" #include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" namespace llvm { @@ -35,7 +35,9 @@ class Pass; class PHINode; class AllocaInst; class ConstantExpr; -class TargetData; +class DataLayout; +class TargetLibraryInfo; +class TargetTransformInfo; class DIBuilder; template class SmallVectorImpl; @@ -51,7 +53,8 @@ template class SmallVectorImpl; /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch /// conditions and indirectbr addresses this might make dead if /// DeleteDeadConditions is true. -bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false); +bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false, + const TargetLibraryInfo *TLI = 0); //===----------------------------------------------------------------------===// // Local dead code elimination. @@ -60,20 +63,21 @@ bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false); /// isInstructionTriviallyDead - Return true if the result produced by the /// instruction is not used, and the instruction has no side effects. /// -bool isInstructionTriviallyDead(Instruction *I); +bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0); /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a /// trivially dead instruction, delete it. If that makes any of its operands /// trivially dead, delete them too, recursively. Return true if any /// instructions were deleted. -bool RecursivelyDeleteTriviallyDeadInstructions(Value *V); +bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, + const TargetLibraryInfo *TLI=0); /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively /// dead PHI node, due to being a def-use chain of single-use nodes that /// either forms a cycle or is terminated by a trivially dead instruction, /// delete it. If that makes any of its operands trivially dead, delete them /// too, recursively. Return true if a change was made. -bool RecursivelyDeleteDeadPHINode(PHINode *PN); +bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0); /// SimplifyInstructionsInBlock - Scan the specified basic block and try to @@ -81,7 +85,8 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN); /// /// This returns true if it changed the code, note that it can delete /// instructions in other blocks as well in this block. -bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0); +bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0, + const TargetLibraryInfo *TLI = 0); //===----------------------------------------------------------------------===// // Control Flow Graph Restructuring. @@ -99,7 +104,7 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0); /// .. and delete the predecessor corresponding to the '1', this will attempt to /// recursively fold the 'and' to 0. void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, - TargetData *TD = 0); + DataLayout *TD = 0); /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its @@ -130,7 +135,8 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB); /// of the CFG. It returns true if a modification was made, possibly deleting /// the basic block that was pointed to. /// -bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0); +bool SimplifyCFG(BasicBlock *BB, const DataLayout *TD = 0, + const TargetTransformInfo *TTI = 0); /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch, /// and if a predecessor branches to us and one of our successors, fold the @@ -158,10 +164,10 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0); /// and it is more than the alignment of the ultimate object, see if we can /// increase the alignment of the ultimate object, making this check succeed. unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, - const TargetData *TD = 0); + const DataLayout *TD = 0); /// getKnownAlignment - Try to infer an alignment for the specified pointer. -static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) { +static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) { return getOrEnforceKnownAlignment(V, 0, TD); } @@ -171,7 +177,7 @@ static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) { /// When NoAssumptions is true, no assumptions about index computation not /// overflowing is made. template -Value *EmitGEPOffset(IRBuilderTy *Builder, const TargetData &TD, User *GEP, +Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP, bool NoAssumptions = false) { gep_type_iterator GTI = gep_type_begin(GEP); Type *IntPtrTy = TD.getIntPtrType(GEP->getContext()); diff --git a/include/llvm/Transforms/Utils/SSAUpdater.h b/include/llvm/Transforms/Utils/SSAUpdater.h index 4c82149..db65a47 100644 --- a/include/llvm/Transforms/Utils/SSAUpdater.h +++ b/include/llvm/Transforms/Utils/SSAUpdater.h @@ -109,8 +109,8 @@ public: private: Value *GetValueAtEndOfBlockInternal(BasicBlock *BB); - void operator=(const SSAUpdater&); // DO NOT IMPLEMENT - SSAUpdater(const SSAUpdater&); // DO NOT IMPLEMENT + void operator=(const SSAUpdater&) LLVM_DELETED_FUNCTION; + SSAUpdater(const SSAUpdater&) LLVM_DELETED_FUNCTION; }; /// LoadAndStorePromoter - This little helper class provides a convenient way to diff --git a/include/llvm/Transforms/Utils/SimplifyIndVar.h b/include/llvm/Transforms/Utils/SimplifyIndVar.h index 2632d18..7e97e21 100644 --- a/include/llvm/Transforms/Utils/SimplifyIndVar.h +++ b/include/llvm/Transforms/Utils/SimplifyIndVar.h @@ -21,8 +21,6 @@ namespace llvm { -extern cl::opt DisableIVRewrite; - class CastInst; class IVUsers; class Loop; diff --git a/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/include/llvm/Transforms/Utils/SimplifyLibCalls.h new file mode 100644 index 0000000..fde452b --- /dev/null +++ b/include/llvm/Transforms/Utils/SimplifyLibCalls.h @@ -0,0 +1,52 @@ +//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file exposes an interface to build some C language libcalls for +// optimization passes that need to call the various functions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H +#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H + +namespace llvm { + class Value; + class CallInst; + class DataLayout; + class Instruction; + class TargetLibraryInfo; + class LibCallSimplifierImpl; + + /// LibCallSimplifier - This class implements a collection of optimizations + /// that replace well formed calls to library functions with a more optimal + /// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'. + class LibCallSimplifier { + /// Impl - A pointer to the actual implementation of the library call + /// simplifier. + LibCallSimplifierImpl *Impl; + public: + LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI); + virtual ~LibCallSimplifier(); + + /// optimizeCall - Take the given call instruction and return a more + /// optimal value to replace the instruction with or 0 if a more + /// optimal form can't be found. Note that the returned value may + /// be equal to the instruction being optimized. In this case all + /// other instructions that use the given instruction were modified + /// and the given instruction is dead. + Value *optimizeCall(CallInst *CI); + + /// replaceAllUsesWith - This method is used when the library call + /// simplifier needs to replace instructions other than the library + /// call being modified. + virtual void replaceAllUsesWith(Instruction *I, Value *With) const; + }; +} // End llvm namespace + +#endif diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h index 8594707..5390c5e 100644 --- a/include/llvm/Transforms/Utils/ValueMapper.h +++ b/include/llvm/Transforms/Utils/ValueMapper.h @@ -25,7 +25,7 @@ namespace llvm { /// ValueMapTypeRemapper - This is a class that can be implemented by clients /// to remap types when cloning constants and instructions. class ValueMapTypeRemapper { - virtual void Anchor(); // Out of line method. + virtual void anchor(); // Out of line method. public: virtual ~ValueMapTypeRemapper() {} diff --git a/include/llvm/Transforms/Vectorize.h b/include/llvm/Transforms/Vectorize.h index 1e49a9c..41e53a8 100644 --- a/include/llvm/Transforms/Vectorize.h +++ b/include/llvm/Transforms/Vectorize.h @@ -107,6 +107,12 @@ BasicBlockPass * createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig()); //===----------------------------------------------------------------------===// +// +// LoopVectorize - Create a loop vectorization pass. +// +Pass * createLoopVectorizePass(); + +//===----------------------------------------------------------------------===// /// @brief Vectorize the BasicBlock. /// /// @param BB The BasicBlock to be vectorized diff --git a/include/llvm/Type.h b/include/llvm/Type.h index 185258d..def4575 100644 --- a/include/llvm/Type.h +++ b/include/llvm/Type.h @@ -153,7 +153,7 @@ public: /// isPPC_FP128Ty - Return true if this is powerpc long double. bool isPPC_FP128Ty() const { return getTypeID() == PPC_FP128TyID; } - /// isFloatingPointTy - Return true if this is one of the five floating point + /// isFloatingPointTy - Return true if this is one of the six floating point /// types bool isFloatingPointTy() const { return getTypeID() == HalfTyID || getTypeID() == FloatTyID || @@ -167,7 +167,7 @@ public: /// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP. /// - bool isFPOrFPVectorTy() const; + bool isFPOrFPVectorTy() const { return getScalarType()->isFloatingPointTy(); } /// isLabelTy - Return true if this is 'label'. bool isLabelTy() const { return getTypeID() == LabelTyID; } @@ -185,7 +185,7 @@ public: /// isIntOrIntVectorTy - Return true if this is an integer type or a vector of /// integer types. /// - bool isIntOrIntVectorTy() const; + bool isIntOrIntVectorTy() const { return getScalarType()->isIntegerTy(); } /// isFunctionTy - True if this is an instance of FunctionType. /// @@ -203,6 +203,11 @@ public: /// bool isPointerTy() const { return getTypeID() == PointerTyID; } + /// isPtrOrPtrVectorTy - Return true if this is a pointer type or a vector of + /// pointer types. + /// + bool isPtrOrPtrVectorTy() const { return getScalarType()->isPointerTy(); } + /// isVectorTy - True if this is an instance of VectorType. /// bool isVectorTy() const { return getTypeID() == VectorTyID; } @@ -252,7 +257,7 @@ public: /// isSized - Return true if it makes sense to take the size of this type. To /// get the actual size for a particular target, it is reasonable to use the - /// TargetData subsystem to do this. + /// DataLayout subsystem to do this. /// bool isSized() const { // If it's a primitive, it is always sized. @@ -276,7 +281,7 @@ public: /// /// Note that this may not reflect the size of memory allocated for an /// instance of the type or the number of bytes that are written when an - /// instance of the type is stored to memory. The TargetData class provides + /// instance of the type is stored to memory. The DataLayout class provides /// additional query functions to provide this information. /// unsigned getPrimitiveSizeInBits() const; @@ -293,6 +298,7 @@ public: /// getScalarType - If this is a vector type, return the element type, /// otherwise return 'this'. + const Type *getScalarType() const; Type *getScalarType(); //===--------------------------------------------------------------------===// @@ -340,8 +346,10 @@ public: unsigned getVectorNumElements() const; Type *getVectorElementType() const { return getSequentialElementType(); } - unsigned getPointerAddressSpace() const; Type *getPointerElementType() const { return getSequentialElementType(); } + + /// \brief Get the address space of this pointer or pointer vector type. + unsigned getPointerAddressSpace() const; //===--------------------------------------------------------------------===// // Static members exported by the Type class itself. Useful for getting @@ -389,9 +397,6 @@ public: static PointerType *getInt32PtrTy(LLVMContext &C, unsigned AS = 0); static PointerType *getInt64PtrTy(LLVMContext &C, unsigned AS = 0); - /// Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const Type *) { return true; } - /// getPointerTo - Return a pointer to the current type. This is equivalent /// to PointerType::get(Foo, AddrSpace). PointerType *getPointerTo(unsigned AddrSpace = 0); diff --git a/include/llvm/Use.h b/include/llvm/Use.h index a496325..8080445 100644 --- a/include/llvm/Use.h +++ b/include/llvm/Use.h @@ -26,6 +26,7 @@ #define LLVM_USE_H #include "llvm/ADT/PointerIntPair.h" +#include "llvm/Support/Compiler.h" #include #include @@ -66,7 +67,7 @@ public: private: /// Copy ctor - do not implement - Use(const Use &U); + Use(const Use &U) LLVM_DELETED_FUNCTION; /// Destructor - Only for zap() ~Use() { diff --git a/include/llvm/User.h b/include/llvm/User.h index 5d5460c..df303d0 100644 --- a/include/llvm/User.h +++ b/include/llvm/User.h @@ -31,8 +31,8 @@ template struct OperandTraits; class User : public Value { - User(const User &); // Do not implement - void *operator new(size_t); // Do not implement + User(const User &) LLVM_DELETED_FUNCTION; + void *operator new(size_t) LLVM_DELETED_FUNCTION; template friend struct HungoffOperandTraits; virtual void anchor(); @@ -104,7 +104,7 @@ public: assert(i < NumOperands && "getOperandUse() out of range!"); return OperandList[i]; } - + unsigned getNumOperands() const { return NumOperands; } // --------------------------------------------------------------------------- @@ -118,6 +118,45 @@ public: inline op_iterator op_end() { return OperandList+NumOperands; } inline const_op_iterator op_end() const { return OperandList+NumOperands; } + /// Convenience iterator for directly iterating over the Values in the + /// OperandList + class value_op_iterator : public std::iterator { + op_iterator OI; + public: + explicit value_op_iterator(Use *U) : OI(U) {} + + bool operator==(const value_op_iterator &x) const { + return OI == x.OI; + } + bool operator!=(const value_op_iterator &x) const { + return !operator==(x); + } + + /// Iterator traversal: forward iteration only + value_op_iterator &operator++() { // Preincrement + ++OI; + return *this; + } + value_op_iterator operator++(int) { // Postincrement + value_op_iterator tmp = *this; ++*this; return tmp; + } + + /// Retrieve a pointer to the current Value. + Value *operator*() const { + return *OI; + } + + Value *operator->() const { return operator*(); } + }; + + inline value_op_iterator value_op_begin() { + return value_op_iterator(op_begin()); + } + inline value_op_iterator value_op_end() { + return value_op_iterator(op_end()); + } + // dropAllReferences() - This function is in charge of "letting go" of all // objects that this User refers to. This allows one to // 'delete' a whole class at a time, even though there may be circular @@ -137,7 +176,6 @@ public: void replaceUsesOfWith(Value *From, Value *To); // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const User *) { return true; } static inline bool classof(const Value *V) { return isa(V) || isa(V); } diff --git a/include/llvm/Value.h b/include/llvm/Value.h index a82ac45..5b19435 100644 --- a/include/llvm/Value.h +++ b/include/llvm/Value.h @@ -16,6 +16,7 @@ #include "llvm/Use.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/Compiler.h" namespace llvm { @@ -80,8 +81,8 @@ private: friend class ValueHandleBase; ValueName *Name; - void operator=(const Value &); // Do not implement - Value(const Value &); // Do not implement + void operator=(const Value &) LLVM_DELETED_FUNCTION; + Value(const Value &) LLVM_DELETED_FUNCTION; protected: /// printCustom - Value subclasses can override this to implement custom @@ -120,7 +121,7 @@ public: /// setName() - Change the name of the value, choosing a new unique name if /// the provided name is taken. /// - /// \arg Name - The new name; or "" if the value's name should be removed. + /// \param Name The new name; or "" if the value's name should be removed. void setName(const Twine &Name); @@ -256,11 +257,6 @@ public: /// hasValueHandle - Return true if there is a value handle associated with /// this value. bool hasValueHandle() const { return HasValueHandle; } - - // Methods for support type inquiry through isa, cast, and dyn_cast: - static inline bool classof(const Value *) { - return true; // Values are always values. - } /// stripPointerCasts - This method strips off any unneeded pointer casts and /// all-zero GEPs from the specified value, returning the original uncasted diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index 3b6aab1..752edd5 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -35,7 +35,8 @@ #include "llvm/Instructions.h" #include "llvm/LLVMContext.h" #include "llvm/Type.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" using namespace llvm; // Register the AliasAnalysis interface, providing a nice name to refer to. @@ -451,7 +452,8 @@ AliasAnalysis::~AliasAnalysis() {} /// AliasAnalysis interface before any other methods are called. /// void AliasAnalysis::InitializeAliasAnalysis(Pass *P) { - TD = P->getAnalysisIfAvailable(); + TD = P->getAnalysisIfAvailable(); + TLI = P->getAnalysisIfAvailable(); AA = &P->getAnalysis(); } @@ -461,7 +463,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); // All AA's chain } -/// getTypeStoreSize - Return the TargetData store size for the given type, +/// getTypeStoreSize - Return the DataLayout store size for the given type, /// if known, or a conservative value otherwise. /// uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) { @@ -501,7 +503,7 @@ bool AliasAnalysis::canInstructionRangeModify(const Instruction &I1, bool llvm::isNoAliasCall(const Value *V) { if (isa(V) || isa(V)) return ImmutableCallSite(cast(V)) - .paramHasAttr(0, Attribute::NoAlias); + .paramHasAttr(0, Attributes::NoAlias); return false; } diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp index 92e8906..388c755 100644 --- a/lib/Analysis/AliasSetTracker.cpp +++ b/lib/Analysis/AliasSetTracker.cpp @@ -18,7 +18,7 @@ #include "llvm/LLVMContext.h" #include "llvm/Pass.h" #include "llvm/Type.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Assembly/Writer.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -550,7 +550,7 @@ void AliasSetTracker::copyValue(Value *From, Value *To) { //===----------------------------------------------------------------------===// void AliasSet::print(raw_ostream &OS) const { - OS << " AliasSet[" << (void*)this << ", " << RefCount << "] "; + OS << " AliasSet[" << (const void*)this << ", " << RefCount << "] "; OS << (AliasTy == MustAlias ? "must" : "may") << " alias, "; switch (AccessTy) { case NoModRef: OS << "No access "; break; @@ -590,8 +590,10 @@ void AliasSetTracker::print(raw_ostream &OS) const { OS << "\n"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void AliasSet::dump() const { print(dbgs()); } void AliasSetTracker::dump() const { print(dbgs()); } +#endif //===----------------------------------------------------------------------===// // ASTCallbackVH Class Implementation diff --git a/lib/Analysis/Analysis.cpp b/lib/Analysis/Analysis.cpp index 0ba6af9..9dc81a6 100644 --- a/lib/Analysis/Analysis.cpp +++ b/lib/Analysis/Analysis.cpp @@ -26,11 +26,13 @@ void llvm::initializeAnalysis(PassRegistry &Registry) { initializeBasicAliasAnalysisPass(Registry); initializeBlockFrequencyInfoPass(Registry); initializeBranchProbabilityInfoPass(Registry); + initializeCostModelAnalysisPass(Registry); initializeCFGViewerPass(Registry); initializeCFGPrinterPass(Registry); initializeCFGOnlyViewerPass(Registry); initializeCFGOnlyPrinterPass(Registry); initializePrintDbgInfoPass(Registry); + initializeDependenceAnalysisPass(Registry); initializeDominanceFrontierPass(Registry); initializeDomViewerPass(Registry); initializeDomPrinterPass(Registry); @@ -46,7 +48,6 @@ void llvm::initializeAnalysis(PassRegistry &Registry) { initializeLazyValueInfoPass(Registry); initializeLibCallAliasAnalysisPass(Registry); initializeLintPass(Registry); - initializeLoopDependenceAnalysisPass(Registry); initializeLoopInfoPass(Registry); initializeMemDepPrinterPass(Registry); initializeMemoryDependenceAnalysisPass(Registry); @@ -61,6 +62,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) { initializePathProfileLoaderPassPass(Registry); initializeProfileVerifierPassPass(Registry); initializePathProfileVerifierPass(Registry); + initializeProfileMetadataLoaderPassPass(Registry); initializeRegionInfoPass(Registry); initializeRegionViewerPass(Registry); initializeRegionPrinterPass(Registry); diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 1d028c2..4bb93ee 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -29,7 +29,7 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" @@ -58,12 +58,12 @@ static bool isNonEscapingLocalObject(const Value *V) { // then it has not escaped before entering the function. Check if it escapes // inside the function. if (const Argument *A = dyn_cast(V)) - if (A->hasByValAttr() || A->hasNoAliasAttr()) { - // Don't bother analyzing arguments already known not to escape. - if (A->hasNoCaptureAttr()) - return true; + if (A->hasByValAttr() || A->hasNoAliasAttr()) + // Note even if the argument is marked nocapture we still need to check + // for copies made inside the function. The nocapture attribute only + // specifies that there are no copies made that outlive the function. return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true); - } + return false; } @@ -84,10 +84,11 @@ static bool isEscapeSource(const Value *V) { /// getObjectSize - Return the size of the object specified by V, or /// UnknownSize if unknown. -static uint64_t getObjectSize(const Value *V, const TargetData &TD, +static uint64_t getObjectSize(const Value *V, const DataLayout &TD, + const TargetLibraryInfo &TLI, bool RoundToAlign = false) { uint64_t Size; - if (getObjectSize(V, Size, &TD, RoundToAlign)) + if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign)) return Size; return AliasAnalysis::UnknownSize; } @@ -95,10 +96,11 @@ static uint64_t getObjectSize(const Value *V, const TargetData &TD, /// isObjectSmallerThan - Return true if we can prove that the object specified /// by V is smaller than Size. static bool isObjectSmallerThan(const Value *V, uint64_t Size, - const TargetData &TD) { + const DataLayout &TD, + const TargetLibraryInfo &TLI) { // This function needs to use the aligned object size because we allow // reads a bit past the end given sufficient alignment. - uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true); + uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true); return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; } @@ -106,8 +108,8 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size, /// isObjectSize - Return true if we can prove that the object specified /// by V has size Size. static bool isObjectSize(const Value *V, uint64_t Size, - const TargetData &TD) { - uint64_t ObjectSize = getObjectSize(V, TD); + const DataLayout &TD, const TargetLibraryInfo &TLI) { + uint64_t ObjectSize = getObjectSize(V, TD, TLI); return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; } @@ -126,6 +128,15 @@ namespace { const Value *V; ExtensionKind Extension; int64_t Scale; + + bool operator==(const VariableGEPIndex &Other) const { + return V == Other.V && Extension == Other.Extension && + Scale == Other.Scale; + } + + bool operator!=(const VariableGEPIndex &Other) const { + return !operator==(Other); + } }; } @@ -140,7 +151,7 @@ namespace { /// represented in the result. static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, ExtensionKind &Extension, - const TargetData &TD, unsigned Depth) { + const DataLayout &TD, unsigned Depth) { assert(V->getType()->isIntegerTy() && "Not an integer value"); // Limit our recursion depth. @@ -215,14 +226,14 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, /// specified amount, but which may have other unrepresented high bits. As such, /// the gep cannot necessarily be reconstructed from its decomposed form. /// -/// When TargetData is around, this function is capable of analyzing everything +/// When DataLayout is around, this function is capable of analyzing everything /// that GetUnderlyingObject can look through. When not, it just looks /// through pointer casts. /// static const Value * DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, SmallVectorImpl &VarIndices, - const TargetData *TD) { + const DataLayout *TD) { // Limit recursion depth to limit compile time in crazy cases. unsigned MaxLookup = 6; @@ -266,7 +277,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, ->getElementType()->isSized()) return V; - // If we are lacking TargetData information, we can't compute the offets of + // If we are lacking DataLayout information, we can't compute the offets of // elements computed by GEPs. However, we can handle bitcast equivalent // GEPs. if (TD == 0) { @@ -417,13 +428,7 @@ namespace { /// BasicAliasAnalysis - This is the primary alias analysis implementation. struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { static char ID; // Class identification, replacement for typeinfo - BasicAliasAnalysis() : ImmutablePass(ID), - // AliasCache rarely has more than 1 or 2 elements, - // so start it off fairly small so that clear() - // doesn't have to tromp through 64 (the default) - // elements on each alias query. This really wants - // something like a SmallDenseMap. - AliasCache(8) { + BasicAliasAnalysis() : ImmutablePass(ID) { initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry()); } @@ -443,7 +448,11 @@ namespace { "BasicAliasAnalysis doesn't support interprocedural queries."); AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag, LocB.Ptr, LocB.Size, LocB.TBAATag); - AliasCache.clear(); + // AliasCache rarely has more than 1 or 2 elements, always use + // shrink_and_clear so it quickly returns to the inline capacity of the + // SmallDenseMap if it ever grows larger. + // FIXME: This should really be shrink_to_inline_capacity_and_clear(). + AliasCache.shrink_and_clear(); return Alias; } @@ -481,7 +490,7 @@ namespace { private: // AliasCache - Track alias queries to guard against recursion. typedef std::pair LocPair; - typedef DenseMap AliasCacheTy; + typedef SmallDenseMap AliasCacheTy; AliasCacheTy AliasCache; // Visited - Track instructions visited by pointsToConstantMemory. @@ -490,6 +499,7 @@ namespace { // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP // instruction against another. AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size, + const MDNode *V1TBAAInfo, const Value *V2, uint64_t V2Size, const MDNode *V2TBAAInfo, const Value *UnderlyingV1, const Value *UnderlyingV2); @@ -807,6 +817,21 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min); } +static bool areVarIndicesEqual(SmallVector &Indices1, + SmallVector &Indices2) { + unsigned Size1 = Indices1.size(); + unsigned Size2 = Indices2.size(); + + if (Size1 != Size2) + return false; + + for (unsigned I = 0; I != Size1; ++I) + if (Indices1[I] != Indices2[I]) + return false; + + return true; +} + /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction /// against another pointer. We know that V1 is a GEP, but we don't know /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD), @@ -814,6 +839,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS, /// AliasAnalysis::AliasResult BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, + const MDNode *V1TBAAInfo, const Value *V2, uint64_t V2Size, const MDNode *V2TBAAInfo, const Value *UnderlyingV1, @@ -821,9 +847,41 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, int64_t GEP1BaseOffset; SmallVector GEP1VariableIndices; - // If we have two gep instructions with must-alias'ing base pointers, figure - // out if the indexes to the GEP tell us anything about the derived pointer. + // If we have two gep instructions with must-alias or not-alias'ing base + // pointers, figure out if the indexes to the GEP tell us anything about the + // derived pointer. if (const GEPOperator *GEP2 = dyn_cast(V2)) { + // Check for geps of non-aliasing underlying pointers where the offsets are + // identical. + if (V1Size == V2Size) { + // Do the base pointers alias assuming type and size. + AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, + V1TBAAInfo, UnderlyingV2, + V2Size, V2TBAAInfo); + if (PreciseBaseAlias == NoAlias) { + // See if the computed offset from the common pointer tells us about the + // relation of the resulting pointer. + int64_t GEP2BaseOffset; + SmallVector GEP2VariableIndices; + const Value *GEP2BasePtr = + DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); + const Value *GEP1BasePtr = + DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); + // DecomposeGEPExpression and GetUnderlyingObject should return the + // same result except when DecomposeGEPExpression has no DataLayout. + if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { + assert(TD == 0 && + "DecomposeGEPExpression and GetUnderlyingObject disagree!"); + return MayAlias; + } + // Same offsets. + if (GEP1BaseOffset == GEP2BaseOffset && + areVarIndicesEqual(GEP1VariableIndices, GEP2VariableIndices)) + return NoAlias; + GEP1VariableIndices.clear(); + } + } + // Do the base pointers alias? AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0, UnderlyingV2, UnknownSize, 0); @@ -843,9 +901,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, const Value *GEP2BasePtr = DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); - // If DecomposeGEPExpression isn't able to look all the way through the - // addressing operation, we must not have TD and this is too complex for us - // to handle without it. + // DecomposeGEPExpression and GetUnderlyingObject should return the + // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { assert(TD == 0 && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); @@ -879,9 +936,8 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, const Value *GEP1BasePtr = DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); - // If DecomposeGEPExpression isn't able to look all the way through the - // addressing operation, we must not have TD and this is too complex for us - // to handle without it. + // DecomposeGEPExpression and GetUnderlyingObject should return the + // same result except when DecomposeGEPExpression has no DataLayout. if (GEP1BasePtr != UnderlyingV1) { assert(TD == 0 && "DecomposeGEPExpression and GetUnderlyingObject disagree!"); @@ -1004,12 +1060,42 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, // on corresponding edges. if (const PHINode *PN2 = dyn_cast(V2)) if (PN2->getParent() == PN->getParent()) { + LocPair Locs(Location(PN, PNSize, PNTBAAInfo), + Location(V2, V2Size, V2TBAAInfo)); + if (PN > V2) + std::swap(Locs.first, Locs.second); + AliasResult Alias = aliasCheck(PN->getIncomingValue(0), PNSize, PNTBAAInfo, PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)), V2Size, V2TBAAInfo); if (Alias == MayAlias) return MayAlias; + + // If the first source of the PHI nodes NoAlias and the other inputs are + // the PHI node itself through some amount of recursion this does not add + // any new information so just return NoAlias. + // bb: + // ptr = ptr2 + 1 + // loop: + // ptr_phi = phi [bb, ptr], [loop, ptr_plus_one] + // ptr2_phi = phi [bb, ptr2], [loop, ptr2_plus_one] + // ... + // ptr_plus_one = gep ptr_phi, 1 + // ptr2_plus_one = gep ptr2_phi, 1 + // We assume for the recursion that the the phis (ptr_phi, ptr2_phi) do + // not alias each other. + bool ArePhisAssumedNoAlias = false; + AliasResult OrigAliasResult = NoAlias; + if (Alias == NoAlias) { + // Pretend the phis do not alias. + assert(AliasCache.count(Locs) && + "There must exist an entry for the phi node"); + OrigAliasResult = AliasCache[Locs]; + AliasCache[Locs] = NoAlias; + ArePhisAssumedNoAlias = true; + } + for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) { AliasResult ThisAlias = aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo, @@ -1019,6 +1105,11 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize, if (Alias == MayAlias) break; } + + // Reset if speculation failed. + if (ArePhisAssumedNoAlias && Alias != NoAlias) + AliasCache[Locs] = OrigAliasResult; + return Alias; } @@ -1133,8 +1224,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, // If the size of one access is larger than the entire object on the other // side, then we know such behavior is undefined and can assume no alias. if (TD) - if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) || - (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD))) + if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) || + (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI))) return NoAlias; // Check the cache before climbing up use-def chains. This also terminates @@ -1154,15 +1245,17 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, std::swap(V1, V2); std::swap(V1Size, V2Size); std::swap(O1, O2); + std::swap(V1TBAAInfo, V2TBAAInfo); } if (const GEPOperator *GV1 = dyn_cast(V1)) { - AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2); + AliasResult Result = aliasGEP(GV1, V1Size, V1TBAAInfo, V2, V2Size, V2TBAAInfo, O1, O2); if (Result != MayAlias) return AliasCache[Locs] = Result; } if (isa(V2) && !isa(V1)) { std::swap(V1, V2); std::swap(V1Size, V2Size); + std::swap(V1TBAAInfo, V2TBAAInfo); } if (const PHINode *PN = dyn_cast(V1)) { AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo, @@ -1173,6 +1266,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, if (isa(V2) && !isa(V1)) { std::swap(V1, V2); std::swap(V1Size, V2Size); + std::swap(V1TBAAInfo, V2TBAAInfo); } if (const SelectInst *S1 = dyn_cast(V1)) { AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo, @@ -1184,8 +1278,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size, // accesses is accessing the entire object, then the accesses must // overlap in some way. if (TD && O1 == O2) - if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) || - (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD))) + if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) || + (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI))) return AliasCache[Locs] = PartialAlias; AliasResult Result = diff --git a/lib/Analysis/BranchProbabilityInfo.cpp b/lib/Analysis/BranchProbabilityInfo.cpp index b255ce6..04a6560 100644 --- a/lib/Analysis/BranchProbabilityInfo.cpp +++ b/lib/Analysis/BranchProbabilityInfo.cpp @@ -115,14 +115,14 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) { return false; } - SmallPtrSet UnreachableEdges; - SmallPtrSet ReachableEdges; + SmallVector UnreachableEdges; + SmallVector ReachableEdges; for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) { if (PostDominatedByUnreachable.count(*I)) - UnreachableEdges.insert(*I); + UnreachableEdges.push_back(I.getSuccessorIndex()); else - ReachableEdges.insert(*I); + ReachableEdges.push_back(I.getSuccessorIndex()); } // If all successors are in the set of blocks post-dominated by unreachable, @@ -136,18 +136,19 @@ bool BranchProbabilityInfo::calcUnreachableHeuristics(BasicBlock *BB) { return false; uint32_t UnreachableWeight = - std::max(UR_TAKEN_WEIGHT / UnreachableEdges.size(), MIN_WEIGHT); - for (SmallPtrSet::iterator I = UnreachableEdges.begin(), - E = UnreachableEdges.end(); + std::max(UR_TAKEN_WEIGHT / (unsigned)UnreachableEdges.size(), MIN_WEIGHT); + for (SmallVector::iterator I = UnreachableEdges.begin(), + E = UnreachableEdges.end(); I != E; ++I) setEdgeWeight(BB, *I, UnreachableWeight); if (ReachableEdges.empty()) return true; uint32_t ReachableWeight = - std::max(UR_NONTAKEN_WEIGHT / ReachableEdges.size(), NORMAL_WEIGHT); - for (SmallPtrSet::iterator I = ReachableEdges.begin(), - E = ReachableEdges.end(); + std::max(UR_NONTAKEN_WEIGHT / (unsigned)ReachableEdges.size(), + NORMAL_WEIGHT); + for (SmallVector::iterator I = ReachableEdges.begin(), + E = ReachableEdges.end(); I != E; ++I) setEdgeWeight(BB, *I, ReachableWeight); @@ -187,7 +188,7 @@ bool BranchProbabilityInfo::calcMetadataWeights(BasicBlock *BB) { } assert(Weights.size() == TI->getNumSuccessors() && "Checked above"); for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) - setEdgeWeight(BB, TI->getSuccessor(i), Weights[i]); + setEdgeWeight(BB, i, Weights[i]); return true; } @@ -211,19 +212,17 @@ bool BranchProbabilityInfo::calcPointerHeuristics(BasicBlock *BB) { assert(CI->getOperand(1)->getType()->isPointerTy()); - BasicBlock *Taken = BI->getSuccessor(0); - BasicBlock *NonTaken = BI->getSuccessor(1); - // p != 0 -> isProb = true // p == 0 -> isProb = false // p != q -> isProb = true // p == q -> isProb = false; + unsigned TakenIdx = 0, NonTakenIdx = 1; bool isProb = CI->getPredicate() == ICmpInst::ICMP_NE; if (!isProb) - std::swap(Taken, NonTaken); + std::swap(TakenIdx, NonTakenIdx); - setEdgeWeight(BB, Taken, PH_TAKEN_WEIGHT); - setEdgeWeight(BB, NonTaken, PH_NONTAKEN_WEIGHT); + setEdgeWeight(BB, TakenIdx, PH_TAKEN_WEIGHT); + setEdgeWeight(BB, NonTakenIdx, PH_NONTAKEN_WEIGHT); return true; } @@ -234,17 +233,17 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) { if (!L) return false; - SmallPtrSet BackEdges; - SmallPtrSet ExitingEdges; - SmallPtrSet InEdges; // Edges from header to the loop. + SmallVector BackEdges; + SmallVector ExitingEdges; + SmallVector InEdges; // Edges from header to the loop. for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) { if (!L->contains(*I)) - ExitingEdges.insert(*I); + ExitingEdges.push_back(I.getSuccessorIndex()); else if (L->getHeader() == *I) - BackEdges.insert(*I); + BackEdges.push_back(I.getSuccessorIndex()); else - InEdges.insert(*I); + InEdges.push_back(I.getSuccessorIndex()); } if (uint32_t numBackEdges = BackEdges.size()) { @@ -252,10 +251,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) { if (backWeight < NORMAL_WEIGHT) backWeight = NORMAL_WEIGHT; - for (SmallPtrSet::iterator EI = BackEdges.begin(), + for (SmallVector::iterator EI = BackEdges.begin(), EE = BackEdges.end(); EI != EE; ++EI) { - BasicBlock *Back = *EI; - setEdgeWeight(BB, Back, backWeight); + setEdgeWeight(BB, *EI, backWeight); } } @@ -264,10 +262,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) { if (inWeight < NORMAL_WEIGHT) inWeight = NORMAL_WEIGHT; - for (SmallPtrSet::iterator EI = InEdges.begin(), + for (SmallVector::iterator EI = InEdges.begin(), EE = InEdges.end(); EI != EE; ++EI) { - BasicBlock *Back = *EI; - setEdgeWeight(BB, Back, inWeight); + setEdgeWeight(BB, *EI, inWeight); } } @@ -276,10 +273,9 @@ bool BranchProbabilityInfo::calcLoopBranchHeuristics(BasicBlock *BB) { if (exitWeight < MIN_WEIGHT) exitWeight = MIN_WEIGHT; - for (SmallPtrSet::iterator EI = ExitingEdges.begin(), + for (SmallVector::iterator EI = ExitingEdges.begin(), EE = ExitingEdges.end(); EI != EE; ++EI) { - BasicBlock *Exiting = *EI; - setEdgeWeight(BB, Exiting, exitWeight); + setEdgeWeight(BB, *EI, exitWeight); } } @@ -335,14 +331,13 @@ bool BranchProbabilityInfo::calcZeroHeuristics(BasicBlock *BB) { return false; } - BasicBlock *Taken = BI->getSuccessor(0); - BasicBlock *NonTaken = BI->getSuccessor(1); + unsigned TakenIdx = 0, NonTakenIdx = 1; if (!isProb) - std::swap(Taken, NonTaken); + std::swap(TakenIdx, NonTakenIdx); - setEdgeWeight(BB, Taken, ZH_TAKEN_WEIGHT); - setEdgeWeight(BB, NonTaken, ZH_NONTAKEN_WEIGHT); + setEdgeWeight(BB, TakenIdx, ZH_TAKEN_WEIGHT); + setEdgeWeight(BB, NonTakenIdx, ZH_NONTAKEN_WEIGHT); return true; } @@ -372,14 +367,13 @@ bool BranchProbabilityInfo::calcFloatingPointHeuristics(BasicBlock *BB) { return false; } - BasicBlock *Taken = BI->getSuccessor(0); - BasicBlock *NonTaken = BI->getSuccessor(1); + unsigned TakenIdx = 0, NonTakenIdx = 1; if (!isProb) - std::swap(Taken, NonTaken); + std::swap(TakenIdx, NonTakenIdx); - setEdgeWeight(BB, Taken, FPH_TAKEN_WEIGHT); - setEdgeWeight(BB, NonTaken, FPH_NONTAKEN_WEIGHT); + setEdgeWeight(BB, TakenIdx, FPH_TAKEN_WEIGHT); + setEdgeWeight(BB, NonTakenIdx, FPH_NONTAKEN_WEIGHT); return true; } @@ -389,11 +383,8 @@ bool BranchProbabilityInfo::calcInvokeHeuristics(BasicBlock *BB) { if (!II) return false; - BasicBlock *Normal = II->getNormalDest(); - BasicBlock *Unwind = II->getUnwindDest(); - - setEdgeWeight(BB, Normal, IH_TAKEN_WEIGHT); - setEdgeWeight(BB, Unwind, IH_NONTAKEN_WEIGHT); + setEdgeWeight(BB, 0/*Index for Normal*/, IH_TAKEN_WEIGHT); + setEdgeWeight(BB, 1/*Index for Unwind*/, IH_NONTAKEN_WEIGHT); return true; } @@ -450,8 +441,7 @@ uint32_t BranchProbabilityInfo::getSumForBlock(const BasicBlock *BB) const { uint32_t Sum = 0; for (succ_const_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) { - const BasicBlock *Succ = *I; - uint32_t Weight = getEdgeWeight(BB, Succ); + uint32_t Weight = getEdgeWeight(BB, I.getSuccessorIndex()); uint32_t PrevSum = Sum; Sum += Weight; @@ -494,11 +484,13 @@ BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const { return 0; } -// Return edge's weight. If can't find it, return DEFAULT_WEIGHT value. +/// Get the raw edge weight for the edge. If can't find it, return +/// DEFAULT_WEIGHT value. Here an edge is specified using PredBlock and an index +/// to the successors. uint32_t BranchProbabilityInfo:: -getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const { - Edge E(Src, Dst); - DenseMap::const_iterator I = Weights.find(E); +getEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors) const { + DenseMap::const_iterator I = + Weights.find(std::make_pair(Src, IndexInSuccessors)); if (I != Weights.end()) return I->second; @@ -506,15 +498,43 @@ getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const { return DEFAULT_WEIGHT; } +/// Get the raw edge weight calculated for the block pair. This returns the sum +/// of all raw edge weights from Src to Dst. +uint32_t BranchProbabilityInfo:: +getEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst) const { + uint32_t Weight = 0; + DenseMap::const_iterator MapI; + for (succ_const_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I) + if (*I == Dst) { + MapI = Weights.find(std::make_pair(Src, I.getSuccessorIndex())); + if (MapI != Weights.end()) + Weight += MapI->second; + } + return (Weight == 0) ? DEFAULT_WEIGHT : Weight; +} + +/// Set the edge weight for a given edge specified by PredBlock and an index +/// to the successors. void BranchProbabilityInfo:: -setEdgeWeight(const BasicBlock *Src, const BasicBlock *Dst, uint32_t Weight) { - Weights[std::make_pair(Src, Dst)] = Weight; +setEdgeWeight(const BasicBlock *Src, unsigned IndexInSuccessors, + uint32_t Weight) { + Weights[std::make_pair(Src, IndexInSuccessors)] = Weight; DEBUG(dbgs() << "set edge " << Src->getName() << " -> " - << Dst->getName() << " weight to " << Weight - << (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n")); + << IndexInSuccessors << " successor weight to " + << Weight << "\n"); } +/// Get an edge's probability, relative to other out-edges from Src. +BranchProbability BranchProbabilityInfo:: +getEdgeProbability(const BasicBlock *Src, unsigned IndexInSuccessors) const { + uint32_t N = getEdgeWeight(Src, IndexInSuccessors); + uint32_t D = getSumForBlock(Src); + + return BranchProbability(N, D); +} +/// Get the probability of going from Src to Dst. It returns the sum of all +/// probabilities for edges from Src to Dst. BranchProbability BranchProbabilityInfo:: getEdgeProbability(const BasicBlock *Src, const BasicBlock *Dst) const { diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt index 96e68b4..b3a40be 100644 --- a/lib/Analysis/CMakeLists.txt +++ b/lib/Analysis/CMakeLists.txt @@ -10,9 +10,11 @@ add_llvm_library(LLVMAnalysis BranchProbabilityInfo.cpp CFGPrinter.cpp CaptureTracking.cpp + CostModel.cpp CodeMetrics.cpp ConstantFolding.cpp DbgInfoPrinter.cpp + DependenceAnalysis.cpp DomPrinter.cpp DominanceFrontier.cpp IVUsers.cpp @@ -26,7 +28,6 @@ add_llvm_library(LLVMAnalysis LibCallSemantics.cpp Lint.cpp Loads.cpp - LoopDependenceAnalysis.cpp LoopInfo.cpp LoopPass.cpp MemDepPrinter.cpp @@ -44,6 +45,8 @@ add_llvm_library(LLVMAnalysis ProfileInfoLoader.cpp ProfileInfoLoaderPass.cpp ProfileVerifierPass.cpp + ProfileDataLoader.cpp + ProfileDataLoaderPass.cpp RegionInfo.cpp RegionPass.cpp RegionPrinter.cpp diff --git a/lib/Analysis/CaptureTracking.cpp b/lib/Analysis/CaptureTracking.cpp index 974b906..d9c0299 100644 --- a/lib/Analysis/CaptureTracking.cpp +++ b/lib/Analysis/CaptureTracking.cpp @@ -23,6 +23,8 @@ using namespace llvm; CaptureTracker::~CaptureTracker() {} +bool CaptureTracker::shouldExplore(Use *U) { return true; } + namespace { struct SimpleCaptureTracker : public CaptureTracker { explicit SimpleCaptureTracker(bool ReturnCaptures) @@ -30,8 +32,6 @@ namespace { void tooManyUses() { Captured = true; } - bool shouldExplore(Use *U) { return true; } - bool captured(Use *U) { if (isa(U->getUser()) && !ReturnCaptures) return false; diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp index acda34b..651a54b 100644 --- a/lib/Analysis/CodeMetrics.cpp +++ b/lib/Analysis/CodeMetrics.cpp @@ -15,7 +15,7 @@ #include "llvm/Function.h" #include "llvm/Support/CallSite.h" #include "llvm/IntrinsicInst.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; @@ -54,7 +54,7 @@ bool llvm::callIsSmall(ImmutableCallSite CS) { return false; } -bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) { +bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) { if (isa(I)) return true; @@ -119,7 +119,7 @@ bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) { /// analyzeBasicBlock - Fill in the current structure with information gleaned /// from the specified block. void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, - const TargetData *TD) { + const DataLayout *TD) { ++NumBlocks; unsigned NumInstsBeforeThisBB = NumInsts; for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); @@ -189,14 +189,14 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB; } -void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) { +void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) { // If this function contains a call that "returns twice" (e.g., setjmp or // _setjmp) and it isn't marked with "returns twice" itself, never inline it. // This is a hack because we depend on the user marking their local variables // as volatile if they are live across a setjmp call, and they probably // won't do this in callers. exposesReturnsTwice = F->callsFunctionThatReturnsTwice() && - !F->hasFnAttr(Attribute::ReturnsTwice); + !F->getFnAttributes().hasAttribute(Attributes::ReturnsTwice); // Look at the size of the callee. for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) diff --git a/lib/Analysis/ConstantFolding.cpp b/lib/Analysis/ConstantFolding.cpp index f5e619c..91a5b84 100644 --- a/lib/Analysis/ConstantFolding.cpp +++ b/lib/Analysis/ConstantFolding.cpp @@ -11,7 +11,7 @@ // // Also, to supplement the basic VMCore ConstantExpr simplifications, // this file defines some additional folding routines that can make use of -// TargetData information. These functions cannot go in VMCore due to library +// DataLayout information. These functions cannot go in VMCore due to library // dependency issues. // //===----------------------------------------------------------------------===// @@ -25,7 +25,7 @@ #include "llvm/Intrinsics.h" #include "llvm/Operator.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" @@ -41,11 +41,11 @@ using namespace llvm; // Constant Folding internal helper functions //===----------------------------------------------------------------------===// -/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with -/// TargetData. This always returns a non-null constant, but it may be a +/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with +/// DataLayout. This always returns a non-null constant, but it may be a /// ConstantExpr if unfoldable. static Constant *FoldBitCast(Constant *C, Type *DestTy, - const TargetData &TD) { + const DataLayout &TD) { // Catch the obvious splat cases. if (C->isNullValue() && !DestTy->isX86_MMXTy()) return Constant::getNullValue(DestTy); @@ -59,9 +59,9 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, return ConstantExpr::getBitCast(C, DestTy); unsigned NumSrcElts = CDV->getType()->getNumElements(); - + Type *SrcEltTy = CDV->getType()->getElementType(); - + // If the vector is a vector of floating point, convert it to vector of int // to simplify things. if (SrcEltTy->isFloatingPointTy()) { @@ -72,7 +72,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, C = ConstantExpr::getBitCast(C, SrcIVTy); CDV = cast(C); } - + // Now that we know that the input value is a vector of integers, just shift // and insert them into our result. unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy); @@ -84,43 +84,43 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, else Result |= CDV->getElementAsInteger(i); } - + return ConstantInt::get(IT, Result); } - + // The code below only handles casts to vectors currently. VectorType *DestVTy = dyn_cast(DestTy); if (DestVTy == 0) return ConstantExpr::getBitCast(C, DestTy); - + // If this is a scalar -> vector cast, convert the input into a <1 x scalar> // vector so the code below can handle it uniformly. if (isa(C) || isa(C)) { Constant *Ops = C; // don't take the address of C! return FoldBitCast(ConstantVector::get(Ops), DestTy, TD); } - + // If this is a bitcast from constant vector -> vector, fold it. if (!isa(C) && !isa(C)) return ConstantExpr::getBitCast(C, DestTy); - + // If the element types match, VMCore can fold it. unsigned NumDstElt = DestVTy->getNumElements(); unsigned NumSrcElt = C->getType()->getVectorNumElements(); if (NumDstElt == NumSrcElt) return ConstantExpr::getBitCast(C, DestTy); - + Type *SrcEltTy = C->getType()->getVectorElementType(); Type *DstEltTy = DestVTy->getElementType(); - - // Otherwise, we're changing the number of elements in a vector, which + + // Otherwise, we're changing the number of elements in a vector, which // requires endianness information to do the right thing. For example, // bitcast (<2 x i64> to <4 x i32>) // folds to (little endian): // <4 x i32> // and to (big endian): // <4 x i32> - + // First thing is first. We only want to think about integer here, so if // we have something in FP form, recast it as integer. if (DstEltTy->isFloatingPointTy()) { @@ -130,11 +130,11 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt); // Recursively handle this integer conversion, if possible. C = FoldBitCast(C, DestIVTy, TD); - + // Finally, VMCore can handle this now that #elts line up. return ConstantExpr::getBitCast(C, DestTy); } - + // Okay, we know the destination is integer, if the input is FP, convert // it to integer first. if (SrcEltTy->isFloatingPointTy()) { @@ -148,13 +148,13 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, !isa(C)) return C; } - + // Now we know that the input and output vectors are both integer vectors // of the same size, and that their #elements is not the same. Do the // conversion here, which depends on whether the input or output has // more elements. bool isLittleEndian = TD.isLittleEndian(); - + SmallVector Result; if (NumDstElt < NumSrcElt) { // Handle: bitcast (<4 x i32> to <2 x i64>) @@ -170,15 +170,15 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, Constant *Src =dyn_cast(C->getAggregateElement(SrcElt++)); if (!Src) // Reject constantexpr elements. return ConstantExpr::getBitCast(C, DestTy); - + // Zero extend the element to the right size. Src = ConstantExpr::getZExt(Src, Elt->getType()); - + // Shift it to the right place, depending on endianness. - Src = ConstantExpr::getShl(Src, + Src = ConstantExpr::getShl(Src, ConstantInt::get(Src->getType(), ShiftAmt)); ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize; - + // Mix it in. Elt = ConstantExpr::getOr(Elt, Src); } @@ -186,30 +186,30 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, } return ConstantVector::get(Result); } - + // Handle: bitcast (<2 x i64> to <4 x i32>) unsigned Ratio = NumDstElt/NumSrcElt; unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits(); - + // Loop over each source value, expanding into multiple results. for (unsigned i = 0; i != NumSrcElt; ++i) { Constant *Src = dyn_cast(C->getAggregateElement(i)); if (!Src) // Reject constantexpr elements. return ConstantExpr::getBitCast(C, DestTy); - + unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1); for (unsigned j = 0; j != Ratio; ++j) { // Shift the piece of the value into the right place, depending on // endianness. - Constant *Elt = ConstantExpr::getLShr(Src, + Constant *Elt = ConstantExpr::getLShr(Src, ConstantInt::get(Src->getType(), ShiftAmt)); ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize; - + // Truncate and remember this piece. Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy)); } } - + return ConstantVector::get(Result); } @@ -218,34 +218,34 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy, /// from a global, return the global and the constant. Because of /// constantexprs, this function is recursive. static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, - int64_t &Offset, const TargetData &TD) { + int64_t &Offset, const DataLayout &TD) { // Trivial case, constant is the global. if ((GV = dyn_cast(C))) { Offset = 0; return true; } - + // Otherwise, if this isn't a constant expr, bail out. ConstantExpr *CE = dyn_cast(C); if (!CE) return false; - + // Look through ptr->int and ptr->ptr casts. if (CE->getOpcode() == Instruction::PtrToInt || CE->getOpcode() == Instruction::BitCast) return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD); - - // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) + + // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5) if (CE->getOpcode() == Instruction::GetElementPtr) { // Cannot compute this if the element type of the pointer is missing size // info. if (!cast(CE->getOperand(0)->getType()) ->getElementType()->isSized()) return false; - + // If the base isn't a global+constant, we aren't either. if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD)) return false; - + // Otherwise, add any offset that our operands provide. gep_type_iterator GTI = gep_type_begin(CE); for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end(); @@ -253,7 +253,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, ConstantInt *CI = dyn_cast(*i); if (!CI) return false; // Index isn't a simple constant? if (CI->isZero()) continue; // Not adding anything. - + if (StructType *ST = dyn_cast(*GTI)) { // N = N + Offset Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue()); @@ -264,7 +264,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, } return true; } - + return false; } @@ -274,30 +274,33 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, /// the CurPtr buffer. TD is the target data. static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, unsigned BytesLeft, - const TargetData &TD) { + const DataLayout &TD) { assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && "Out of range access"); - + // If this element is zero or undefined, we can just return since *CurPtr is // zero initialized. if (isa(C) || isa(C)) return true; - + if (ConstantInt *CI = dyn_cast(C)) { if (CI->getBitWidth() > 64 || (CI->getBitWidth() & 7) != 0) return false; - + uint64_t Val = CI->getZExtValue(); unsigned IntBytes = unsigned(CI->getBitWidth()/8); - + for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) { - CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8)); + int n = ByteOffset; + if (!TD.isLittleEndian()) + n = IntBytes - n - 1; + CurPtr[i] = (unsigned char)(Val >> (n * 8)); ++ByteOffset; } return true; } - + if (ConstantFP *CFP = dyn_cast(C)) { if (CFP->getType()->isDoubleTy()) { C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD); @@ -309,13 +312,13 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, } return false; } - + if (ConstantStruct *CS = dyn_cast(C)) { const StructLayout *SL = TD.getStructLayout(CS->getType()); unsigned Index = SL->getElementContainingOffset(ByteOffset); uint64_t CurEltOffset = SL->getElementOffset(Index); ByteOffset -= CurEltOffset; - + while (1) { // If the element access is to the element itself and not to tail padding, // read the bytes from the element. @@ -325,9 +328,9 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr, BytesLeft, TD)) return false; - + ++Index; - + // Check to see if we read from the last struct element, if so we're done. if (Index == CS->getType()->getNumElements()) return true; @@ -375,11 +378,11 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, } return true; } - + if (ConstantExpr *CE = dyn_cast(C)) { if (CE->getOpcode() == Instruction::IntToPtr && - CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) - return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, + CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext())) + return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr, BytesLeft, TD); } @@ -388,10 +391,10 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, } static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, - const TargetData &TD) { + const DataLayout &TD) { Type *LoadTy = cast(C->getType())->getElementType(); IntegerType *IntType = dyn_cast(LoadTy); - + // If this isn't an integer load we can't fold it directly. if (!IntType) { // If this is a float/double load, we can try folding it as an int32/64 load @@ -415,15 +418,15 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, return FoldBitCast(Res, LoadTy, TD); return 0; } - + unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8; if (BytesLoaded > 32 || BytesLoaded == 0) return 0; - + GlobalValue *GVal; int64_t Offset; if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD)) return 0; - + GlobalVariable *GV = dyn_cast(GVal); if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() || !GV->getInitializer()->getType()->isSized()) @@ -432,20 +435,29 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, // If we're loading off the beginning of the global, some bytes may be valid, // but we don't try to handle this. if (Offset < 0) return 0; - + // If we're not accessing anything in this constant, the result is undefined. if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType())) return UndefValue::get(IntType); - + unsigned char RawBytes[32] = {0}; if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes, BytesLoaded, TD)) return 0; - APInt ResultVal = APInt(IntType->getBitWidth(), RawBytes[BytesLoaded-1]); - for (unsigned i = 1; i != BytesLoaded; ++i) { - ResultVal <<= 8; - ResultVal |= RawBytes[BytesLoaded-1-i]; + APInt ResultVal = APInt(IntType->getBitWidth(), 0); + if (TD.isLittleEndian()) { + ResultVal = RawBytes[BytesLoaded - 1]; + for (unsigned i = 1; i != BytesLoaded; ++i) { + ResultVal <<= 8; + ResultVal |= RawBytes[BytesLoaded-1-i]; + } + } else { + ResultVal = RawBytes[0]; + for (unsigned i = 1; i != BytesLoaded; ++i) { + ResultVal <<= 8; + ResultVal |= RawBytes[i]; + } } return ConstantInt::get(IntType->getContext(), ResultVal); @@ -455,7 +467,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, /// produce if it is constant and determinable. If this is not determinable, /// return null. Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, - const TargetData *TD) { + const DataLayout *TD) { // First, try the easy cases: if (GlobalVariable *GV = dyn_cast(C)) if (GV->isConstant() && GV->hasDefinitiveInitializer()) @@ -464,15 +476,15 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, // If the loaded value isn't a constant expr, we can't handle it. ConstantExpr *CE = dyn_cast(C); if (!CE) return 0; - + if (CE->getOpcode() == Instruction::GetElementPtr) { if (GlobalVariable *GV = dyn_cast(CE->getOperand(0))) if (GV->isConstant() && GV->hasDefinitiveInitializer()) - if (Constant *V = + if (Constant *V = ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) return V; } - + // Instead of loading constant c string, use corresponding integer value // directly if string length is small enough. StringRef Str; @@ -500,14 +512,14 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, SingleChar = 0; StrVal = (StrVal << 8) | SingleChar; } - + Constant *Res = ConstantInt::get(CE->getContext(), StrVal); if (Ty->isFloatingPointTy()) Res = ConstantExpr::getBitCast(Res, Ty); return Res; } } - + // If this load comes from anywhere in a constant global, and if the global // is all undef or zero, we know what it loads. if (GlobalVariable *GV = @@ -520,18 +532,16 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, return UndefValue::get(ResTy); } } - - // Try hard to fold loads from bitcasted strange and non-type-safe things. We - // currently don't do any of this for big endian systems. It can be - // generalized in the future if someone is interested. - if (TD && TD->isLittleEndian()) + + // Try hard to fold loads from bitcasted strange and non-type-safe things. + if (TD) return FoldReinterpretLoadFromConstPtr(CE, *TD); return 0; } -static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){ +static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){ if (LI->isVolatile()) return 0; - + if (Constant *C = dyn_cast(LI->getOperand(0))) return ConstantFoldLoadFromConstPtr(C, TD); @@ -540,23 +550,23 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){ /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression. /// Attempt to symbolically evaluate the result of a binary operator merging -/// these together. If target data info is available, it is provided as TD, +/// these together. If target data info is available, it is provided as TD, /// otherwise TD is null. static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, - Constant *Op1, const TargetData *TD){ + Constant *Op1, const DataLayout *TD){ // SROA - + // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute // bits. - - + + // If the constant expr is something like &A[123] - &A[4].f, fold this into a // constant. This happens frequently when iterating over a global array. if (Opc == Instruction::Sub && TD) { GlobalValue *GV1, *GV2; int64_t Offs1, Offs2; - + if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD)) if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) && GV1 == GV2) { @@ -564,7 +574,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, return ConstantInt::get(Op0->getType(), Offs1-Offs2); } } - + return 0; } @@ -572,7 +582,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, /// explicitly cast them so that they aren't implicitly casted by the /// getelementptr. static Constant *CastGEPIndices(ArrayRef Ops, - Type *ResultTy, const TargetData *TD, + Type *ResultTy, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TD) return 0; Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); @@ -622,20 +632,20 @@ static Constant* StripPtrCastKeepAS(Constant* Ptr) { /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP /// constant expression, do so. static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, - Type *ResultTy, const TargetData *TD, + Type *ResultTy, const DataLayout *TD, const TargetLibraryInfo *TLI) { Constant *Ptr = Ops[0]; if (!TD || !cast(Ptr->getType())->getElementType()->isSized() || !Ptr->getType()->isPointerTy()) return 0; - + Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext()); // If this is a constant expr gep that is effectively computing an // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12' for (unsigned i = 1, e = Ops.size(); i != e; ++i) if (!isa(Ops[i])) { - + // If this is "gep i8* Ptr, (sub 0, V)", fold this as: // "inttoptr (sub (ptrtoint Ptr), V)" if (Ops.size() == 2 && @@ -659,7 +669,8 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy); APInt Offset = APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(), - makeArrayRef((Value **)Ops.data() + 1, + makeArrayRef((Value *const*) + Ops.data() + 1, Ops.size() - 1))); Ptr = StripPtrCastKeepAS(Ptr); @@ -708,12 +719,12 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, // The only pointer indexing we'll do is on the first index of the GEP. if (!NewIdxs.empty()) break; - + // Only handle pointers to sized types, not pointers to functions. if (!ATy->getElementType()->isSized()) return 0; } - + // Determine which element of the array the offset points into. APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType())); IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext()); @@ -785,7 +796,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef Ops, /// this function can only fail when attempting to fold instructions like loads /// and stores, which have no constant expression form. Constant *llvm::ConstantFoldInstruction(Instruction *I, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI) { // Handle PHI nodes quickly here... if (PHINode *PN = dyn_cast(I)) { @@ -836,7 +847,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, if (const CmpInst *CI = dyn_cast(I)) return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1], TD, TLI); - + if (const LoadInst *LI = dyn_cast(I)) return ConstantFoldLoadInst(LI, TD); @@ -855,10 +866,10 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, } /// ConstantFoldConstantExpression - Attempt to fold the constant expression -/// using the specified TargetData. If successful, the constant result is +/// using the specified DataLayout. If successful, the constant result is /// result is returned, if not, null is returned. Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI) { SmallVector Ops; for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); @@ -886,19 +897,19 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, /// information, due to only being passed an opcode and operands. Constant /// folding using this function strips this information. /// -Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, +Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, ArrayRef Ops, - const TargetData *TD, - const TargetLibraryInfo *TLI) { + const DataLayout *TD, + const TargetLibraryInfo *TLI) { // Handle easy binops first. if (Instruction::isBinaryOp(Opcode)) { if (isa(Ops[0]) || isa(Ops[1])) if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD)) return C; - + return ConstantExpr::get(Opcode, Ops[0], Ops[1]); } - + switch (Opcode) { default: return 0; case Instruction::ICmp: @@ -916,7 +927,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, Constant *Input = CE->getOperand(0); unsigned InWidth = Input->getType()->getScalarSizeInBits(); if (TD->getPointerSizeInBits() < InWidth) { - Constant *Mask = + Constant *Mask = ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth, TD->getPointerSizeInBits())); Input = ConstantExpr::getAnd(Input, Mask); @@ -964,7 +975,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, return C; if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI)) return C; - + return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1)); } } @@ -974,8 +985,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, /// returns a constant expression of the specified operands. /// Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, - Constant *Ops0, Constant *Ops1, - const TargetData *TD, + Constant *Ops0, Constant *Ops1, + const DataLayout *TD, const TargetLibraryInfo *TLI) { // fold: icmp (inttoptr x), null -> icmp x, 0 // fold: icmp (ptrtoint x), 0 -> icmp x, null @@ -995,17 +1006,17 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, Constant *Null = Constant::getNullValue(C->getType()); return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); } - + // Only do this transformation if the int is intptrty in size, otherwise // there is a truncation or extension that we aren't modeling. - if (CE0->getOpcode() == Instruction::PtrToInt && + if (CE0->getOpcode() == Instruction::PtrToInt && CE0->getType() == IntPtrTy) { Constant *C = CE0->getOperand(0); Constant *Null = Constant::getNullValue(C->getType()); return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI); } } - + if (ConstantExpr *CE1 = dyn_cast(Ops1)) { if (TD && CE0->getOpcode() == CE1->getOpcode()) { Type *IntPtrTy = TD->getIntPtrType(CE0->getContext()); @@ -1029,24 +1040,24 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, CE1->getOperand(0), TD, TLI); } } - + // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0) // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0) if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) && CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) { - Constant *LHS = + Constant *LHS = ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1, TD, TLI); - Constant *RHS = + Constant *RHS = ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1, TD, TLI); - unsigned OpC = + unsigned OpC = Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; Constant *Ops[] = { LHS, RHS }; return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI); } } - + return ConstantExpr::getCompare(Predicate, Ops0, Ops1); } @@ -1054,7 +1065,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a /// getelementptr constantexpr, return the constant value being addressed by the /// constant expression, or null if something is funny and we can't decide. -Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, +Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE) { if (!CE->getOperand(1)->isNullValue()) return 0; // Do not allow stepping over the value! @@ -1124,14 +1135,14 @@ llvm::canConstantFoldCallTo(const Function *F) { if (!F->hasName()) return false; StringRef Name = F->getName(); - + // In these cases, the check of the length is required. We don't want to // return true for a name like "cos\0blah" which strcmp would return equal to // "cos", but has length 8. switch (Name[0]) { default: return false; case 'a': - return Name == "acos" || Name == "asin" || + return Name == "acos" || Name == "asin" || Name == "atan" || Name == "atan2"; case 'c': return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh"; @@ -1151,7 +1162,7 @@ llvm::canConstantFoldCallTo(const Function *F) { } } -static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, +static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) { sys::llvm_fenv_clearexcept(); V = NativeFP(V); @@ -1159,7 +1170,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V, sys::llvm_fenv_clearexcept(); return 0; } - + if (Ty->isFloatTy()) return ConstantFP::get(Ty->getContext(), APFloat((float)V)); if (Ty->isDoubleTy()) @@ -1175,7 +1186,7 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), sys::llvm_fenv_clearexcept(); return 0; } - + if (Ty->isFloatTy()) return ConstantFP::get(Ty->getContext(), APFloat((float)V)); if (Ty->isDoubleTy()) @@ -1269,7 +1280,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, case 'e': if (Name == "exp" && TLI->has(LibFunc::exp)) return ConstantFoldFP(exp, V, Ty); - + if (Name == "exp2" && TLI->has(LibFunc::exp2)) { // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a // C99 library. @@ -1345,7 +1356,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, } // Support ConstantVector in case we have an Undef in the top. - if (isa(Operands[0]) || + if (isa(Operands[0]) || isa(Operands[0])) { Constant *Op = cast(Operands[0]); switch (F->getIntrinsicID()) { @@ -1364,11 +1375,11 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, case Intrinsic::x86_sse2_cvttsd2si64: if (ConstantFP *FPOp = dyn_cast_or_null(Op->getAggregateElement(0U))) - return ConstantFoldConvertToInt(FPOp->getValueAPF(), + return ConstantFoldConvertToInt(FPOp->getValueAPF(), /*roundTowardZero=*/true, Ty); } } - + if (isa(Operands[0])) { if (F->getIntrinsicID() == Intrinsic::bswap) return Operands[0]; @@ -1382,14 +1393,14 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, if (ConstantFP *Op1 = dyn_cast(Operands[0])) { if (!Ty->isFloatTy() && !Ty->isDoubleTy()) return 0; - double Op1V = Ty->isFloatTy() ? + double Op1V = Ty->isFloatTy() ? (double)Op1->getValueAPF().convertToFloat() : Op1->getValueAPF().convertToDouble(); if (ConstantFP *Op2 = dyn_cast(Operands[1])) { if (Op2->getType() != Op1->getType()) return 0; - double Op2V = Ty->isFloatTy() ? + double Op2V = Ty->isFloatTy() ? (double)Op2->getValueAPF().convertToFloat(): Op2->getValueAPF().convertToDouble(); @@ -1416,7 +1427,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, } return 0; } - + if (ConstantInt *Op1 = dyn_cast(Operands[0])) { if (ConstantInt *Op2 = dyn_cast(Operands[1])) { switch (F->getIntrinsicID()) { @@ -1466,7 +1477,7 @@ llvm::ConstantFoldCall(Function *F, ArrayRef Operands, return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros()); } } - + return 0; } return 0; diff --git a/lib/Analysis/CostModel.cpp b/lib/Analysis/CostModel.cpp new file mode 100644 index 0000000..5adbf45 --- /dev/null +++ b/lib/Analysis/CostModel.cpp @@ -0,0 +1,193 @@ +//===- CostModel.cpp ------ Cost Model Analysis ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the cost model analysis. It provides a very basic cost +// estimation for LLVM-IR. The cost result can be thought of as cycles, but it +// is really unit-less. The estimated cost is ment to be used for comparing +// alternatives. +// +//===----------------------------------------------------------------------===// + +#define CM_NAME "cost-model" +#define DEBUG_TYPE CM_NAME +#include "llvm/Analysis/Passes.h" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/Pass.h" +#include "llvm/TargetTransformInfo.h" +#include "llvm/Value.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +namespace { + class CostModelAnalysis : public FunctionPass { + + public: + static char ID; // Class identification, replacement for typeinfo + CostModelAnalysis() : FunctionPass(ID), F(0), VTTI(0) { + initializeCostModelAnalysisPass( + *PassRegistry::getPassRegistry()); + } + + /// Returns the expected cost of the instruction. + /// Returns -1 if the cost is unknown. + /// Note, this method does not cache the cost calculation and it + /// can be expensive in some cases. + unsigned getInstructionCost(Instruction *I) const; + + private: + virtual void getAnalysisUsage(AnalysisUsage &AU) const; + virtual bool runOnFunction(Function &F); + virtual void print(raw_ostream &OS, const Module*) const; + + /// The function that we analyze. + Function *F; + /// Vector target information. + const VectorTargetTransformInfo *VTTI; + }; +} // End of anonymous namespace + +// Register this pass. +char CostModelAnalysis::ID = 0; +static const char cm_name[] = "Cost Model Analysis"; +INITIALIZE_PASS_BEGIN(CostModelAnalysis, CM_NAME, cm_name, false, true) +INITIALIZE_PASS_END (CostModelAnalysis, CM_NAME, cm_name, false, true) + +FunctionPass *llvm::createCostModelAnalysisPass() { + return new CostModelAnalysis(); +} + +void +CostModelAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); +} + +bool +CostModelAnalysis::runOnFunction(Function &F) { + this->F = &F; + + // Target information. + TargetTransformInfo *TTI; + TTI = getAnalysisIfAvailable(); + if (TTI) + VTTI = TTI->getVectorTargetTransformInfo(); + + return false; +} + +unsigned CostModelAnalysis::getInstructionCost(Instruction *I) const { + if (!VTTI) + return -1; + + switch (I->getOpcode()) { + case Instruction::Ret: + case Instruction::PHI: + case Instruction::Br: { + return VTTI->getCFInstrCost(I->getOpcode()); + } + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + return VTTI->getArithmeticInstrCost(I->getOpcode(), I->getType()); + } + case Instruction::Select: { + SelectInst *SI = cast(I); + Type *CondTy = SI->getCondition()->getType(); + return VTTI->getCmpSelInstrCost(I->getOpcode(), I->getType(), CondTy); + } + case Instruction::ICmp: + case Instruction::FCmp: { + Type *ValTy = I->getOperand(0)->getType(); + return VTTI->getCmpSelInstrCost(I->getOpcode(), ValTy); + } + case Instruction::Store: { + StoreInst *SI = cast(I); + Type *ValTy = SI->getValueOperand()->getType(); + return VTTI->getMemoryOpCost(I->getOpcode(), ValTy, + SI->getAlignment(), + SI->getPointerAddressSpace()); + } + case Instruction::Load: { + LoadInst *LI = cast(I); + return VTTI->getMemoryOpCost(I->getOpcode(), I->getType(), + LI->getAlignment(), + LI->getPointerAddressSpace()); + } + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: { + Type *SrcTy = I->getOperand(0)->getType(); + return VTTI->getCastInstrCost(I->getOpcode(), I->getType(), SrcTy); + } + case Instruction::ExtractElement: { + ExtractElementInst * EEI = cast(I); + ConstantInt *CI = dyn_cast(I->getOperand(1)); + unsigned Idx = -1; + if (CI) + Idx = CI->getZExtValue(); + return VTTI->getVectorInstrCost(I->getOpcode(), + EEI->getOperand(0)->getType(), Idx); + } + case Instruction::InsertElement: { + InsertElementInst * IE = cast(I); + ConstantInt *CI = dyn_cast(IE->getOperand(2)); + unsigned Idx = -1; + if (CI) + Idx = CI->getZExtValue(); + return VTTI->getVectorInstrCost(I->getOpcode(), + IE->getType(), Idx); + } + default: + // We don't have any information on this instruction. + return -1; + } +} + +void CostModelAnalysis::print(raw_ostream &OS, const Module*) const { + if (!F) + return; + + for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) { + for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) { + Instruction *Inst = it; + unsigned Cost = getInstructionCost(Inst); + if (Cost != (unsigned)-1) + OS << "Cost Model: Found an estimated cost of " << Cost; + else + OS << "Cost Model: Unknown cost"; + + OS << " for instruction: "<< *Inst << "\n"; + } + } +} diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp new file mode 100644 index 0000000..95ac5ea --- /dev/null +++ b/lib/Analysis/DependenceAnalysis.cpp @@ -0,0 +1,3786 @@ +//===-- DependenceAnalysis.cpp - DA Implementation --------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// DependenceAnalysis is an LLVM pass that analyses dependences between memory +// accesses. Currently, it is an (incomplete) implementation of the approach +// described in +// +// Practical Dependence Testing +// Goff, Kennedy, Tseng +// PLDI 1991 +// +// There's a single entry point that analyzes the dependence between a pair +// of memory references in a function, returning either NULL, for no dependence, +// or a more-or-less detailed description of the dependence between them. +// +// Currently, the implementation cannot propagate constraints between +// coupled RDIV subscripts and lacks a multi-subscript MIV test. +// Both of these are conservative weaknesses; +// that is, not a source of correctness problems. +// +// The implementation depends on the GEP instruction to +// differentiate subscripts. Since Clang linearizes subscripts +// for most arrays, we give up some precision (though the existing MIV tests +// will help). We trust that the GEP instruction will eventually be extended. +// In the meantime, we should explore Maslov's ideas about delinearization. +// +// We should pay some careful attention to the possibility of integer overflow +// in the implementation of the various tests. This could happen with Add, +// Subtract, or Multiply, with both APInt's and SCEV's. +// +// Some non-linear subscript pairs can be handled by the GCD test +// (and perhaps other tests). +// Should explore how often these things occur. +// +// Finally, it seems like certain test cases expose weaknesses in the SCEV +// simplification, especially in the handling of sign and zero extensions. +// It could be useful to spend time exploring these. +// +// Please note that this is work in progress and the interface is subject to +// change. +// +//===----------------------------------------------------------------------===// +// // +// In memory of Ken Kennedy, 1945 - 2007 // +// // +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "da" + +#include "llvm/Analysis/DependenceAnalysis.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/Operator.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/InstIterator.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +//===----------------------------------------------------------------------===// +// statistics + +STATISTIC(TotalArrayPairs, "Array pairs tested"); +STATISTIC(SeparableSubscriptPairs, "Separable subscript pairs"); +STATISTIC(CoupledSubscriptPairs, "Coupled subscript pairs"); +STATISTIC(NonlinearSubscriptPairs, "Nonlinear subscript pairs"); +STATISTIC(ZIVapplications, "ZIV applications"); +STATISTIC(ZIVindependence, "ZIV independence"); +STATISTIC(StrongSIVapplications, "Strong SIV applications"); +STATISTIC(StrongSIVsuccesses, "Strong SIV successes"); +STATISTIC(StrongSIVindependence, "Strong SIV independence"); +STATISTIC(WeakCrossingSIVapplications, "Weak-Crossing SIV applications"); +STATISTIC(WeakCrossingSIVsuccesses, "Weak-Crossing SIV successes"); +STATISTIC(WeakCrossingSIVindependence, "Weak-Crossing SIV independence"); +STATISTIC(ExactSIVapplications, "Exact SIV applications"); +STATISTIC(ExactSIVsuccesses, "Exact SIV successes"); +STATISTIC(ExactSIVindependence, "Exact SIV independence"); +STATISTIC(WeakZeroSIVapplications, "Weak-Zero SIV applications"); +STATISTIC(WeakZeroSIVsuccesses, "Weak-Zero SIV successes"); +STATISTIC(WeakZeroSIVindependence, "Weak-Zero SIV independence"); +STATISTIC(ExactRDIVapplications, "Exact RDIV applications"); +STATISTIC(ExactRDIVindependence, "Exact RDIV independence"); +STATISTIC(SymbolicRDIVapplications, "Symbolic RDIV applications"); +STATISTIC(SymbolicRDIVindependence, "Symbolic RDIV independence"); +STATISTIC(DeltaApplications, "Delta applications"); +STATISTIC(DeltaSuccesses, "Delta successes"); +STATISTIC(DeltaIndependence, "Delta independence"); +STATISTIC(DeltaPropagations, "Delta propagations"); +STATISTIC(GCDapplications, "GCD applications"); +STATISTIC(GCDsuccesses, "GCD successes"); +STATISTIC(GCDindependence, "GCD independence"); +STATISTIC(BanerjeeApplications, "Banerjee applications"); +STATISTIC(BanerjeeIndependence, "Banerjee independence"); +STATISTIC(BanerjeeSuccesses, "Banerjee successes"); + +//===----------------------------------------------------------------------===// +// basics + +INITIALIZE_PASS_BEGIN(DependenceAnalysis, "da", + "Dependence Analysis", true, true) +INITIALIZE_PASS_DEPENDENCY(LoopInfo) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_END(DependenceAnalysis, "da", + "Dependence Analysis", true, true) + +char DependenceAnalysis::ID = 0; + + +FunctionPass *llvm::createDependenceAnalysisPass() { + return new DependenceAnalysis(); +} + + +bool DependenceAnalysis::runOnFunction(Function &F) { + this->F = &F; + AA = &getAnalysis(); + SE = &getAnalysis(); + LI = &getAnalysis(); + return false; +} + + +void DependenceAnalysis::releaseMemory() { +} + + +void DependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + AU.addRequiredTransitive(); + AU.addRequiredTransitive(); + AU.addRequiredTransitive(); +} + + +// Used to test the dependence analyzer. +// Looks through the function, noting the first store instruction +// and the first load instruction +// (which always follows the first load in our tests). +// Calls depends() and prints out the result. +// Ignores all other instructions. +static +void dumpExampleDependence(raw_ostream &OS, Function *F, + DependenceAnalysis *DA) { + for (inst_iterator SrcI = inst_begin(F), SrcE = inst_end(F); + SrcI != SrcE; ++SrcI) { + if (const StoreInst *Src = dyn_cast(&*SrcI)) { + for (inst_iterator DstI = SrcI, DstE = inst_end(F); + DstI != DstE; ++DstI) { + if (const LoadInst *Dst = dyn_cast(&*DstI)) { + OS << "da analyze - "; + if (Dependence *D = DA->depends(Src, Dst, true)) { + D->dump(OS); + for (unsigned Level = 1; Level <= D->getLevels(); Level++) { + if (D->isSplitable(Level)) { + OS << "da analyze - split level = " << Level; + OS << ", iteration = " << *DA->getSplitIteration(D, Level); + OS << "!\n"; + } + } + delete D; + } + else + OS << "none!\n"; + return; + } + } + } + } +} + + +void DependenceAnalysis::print(raw_ostream &OS, const Module*) const { + dumpExampleDependence(OS, F, const_cast(this)); +} + +//===----------------------------------------------------------------------===// +// Dependence methods + +// Returns true if this is an input dependence. +bool Dependence::isInput() const { + return Src->mayReadFromMemory() && Dst->mayReadFromMemory(); +} + + +// Returns true if this is an output dependence. +bool Dependence::isOutput() const { + return Src->mayWriteToMemory() && Dst->mayWriteToMemory(); +} + + +// Returns true if this is an flow (aka true) dependence. +bool Dependence::isFlow() const { + return Src->mayWriteToMemory() && Dst->mayReadFromMemory(); +} + + +// Returns true if this is an anti dependence. +bool Dependence::isAnti() const { + return Src->mayReadFromMemory() && Dst->mayWriteToMemory(); +} + + +// Returns true if a particular level is scalar; that is, +// if no subscript in the source or destination mention the induction +// variable associated with the loop at this level. +// Leave this out of line, so it will serve as a virtual method anchor +bool Dependence::isScalar(unsigned level) const { + return false; +} + + +//===----------------------------------------------------------------------===// +// FullDependence methods + +FullDependence::FullDependence(const Instruction *Source, + const Instruction *Destination, + bool PossiblyLoopIndependent, + unsigned CommonLevels) : + Dependence(Source, Destination), + Levels(CommonLevels), + LoopIndependent(PossiblyLoopIndependent) { + Consistent = true; + DV = CommonLevels ? new DVEntry[CommonLevels] : NULL; +} + +// The rest are simple getters that hide the implementation. + +// getDirection - Returns the direction associated with a particular level. +unsigned FullDependence::getDirection(unsigned Level) const { + assert(0 < Level && Level <= Levels && "Level out of range"); + return DV[Level - 1].Direction; +} + + +// Returns the distance (or NULL) associated with a particular level. +const SCEV *FullDependence::getDistance(unsigned Level) const { + assert(0 < Level && Level <= Levels && "Level out of range"); + return DV[Level - 1].Distance; +} + + +// Returns true if a particular level is scalar; that is, +// if no subscript in the source or destination mention the induction +// variable associated with the loop at this level. +bool FullDependence::isScalar(unsigned Level) const { + assert(0 < Level && Level <= Levels && "Level out of range"); + return DV[Level - 1].Scalar; +} + + +// Returns true if peeling the first iteration from this loop +// will break this dependence. +bool FullDependence::isPeelFirst(unsigned Level) const { + assert(0 < Level && Level <= Levels && "Level out of range"); + return DV[Level - 1].PeelFirst; +} + + +// Returns true if peeling the last iteration from this loop +// will break this dependence. +bool FullDependence::isPeelLast(unsigned Level) const { + assert(0 < Level && Level <= Levels && "Level out of range"); + return DV[Level - 1].PeelLast; +} + + +// Returns true if splitting this loop will break the dependence. +bool FullDependence::isSplitable(unsigned Level) const { + assert(0 < Level && Level <= Levels && "Level out of range"); + return DV[Level - 1].Splitable; +} + + +//===----------------------------------------------------------------------===// +// DependenceAnalysis::Constraint methods + +// If constraint is a point , returns X. +// Otherwise assert. +const SCEV *DependenceAnalysis::Constraint::getX() const { + assert(Kind == Point && "Kind should be Point"); + return A; +} + + +// If constraint is a point , returns Y. +// Otherwise assert. +const SCEV *DependenceAnalysis::Constraint::getY() const { + assert(Kind == Point && "Kind should be Point"); + return B; +} + + +// If constraint is a line AX + BY = C, returns A. +// Otherwise assert. +const SCEV *DependenceAnalysis::Constraint::getA() const { + assert((Kind == Line || Kind == Distance) && + "Kind should be Line (or Distance)"); + return A; +} + + +// If constraint is a line AX + BY = C, returns B. +// Otherwise assert. +const SCEV *DependenceAnalysis::Constraint::getB() const { + assert((Kind == Line || Kind == Distance) && + "Kind should be Line (or Distance)"); + return B; +} + + +// If constraint is a line AX + BY = C, returns C. +// Otherwise assert. +const SCEV *DependenceAnalysis::Constraint::getC() const { + assert((Kind == Line || Kind == Distance) && + "Kind should be Line (or Distance)"); + return C; +} + + +// If constraint is a distance, returns D. +// Otherwise assert. +const SCEV *DependenceAnalysis::Constraint::getD() const { + assert(Kind == Distance && "Kind should be Distance"); + return SE->getNegativeSCEV(C); +} + + +// Returns the loop associated with this constraint. +const Loop *DependenceAnalysis::Constraint::getAssociatedLoop() const { + assert((Kind == Distance || Kind == Line || Kind == Point) && + "Kind should be Distance, Line, or Point"); + return AssociatedLoop; +} + + +void DependenceAnalysis::Constraint::setPoint(const SCEV *X, + const SCEV *Y, + const Loop *CurLoop) { + Kind = Point; + A = X; + B = Y; + AssociatedLoop = CurLoop; +} + + +void DependenceAnalysis::Constraint::setLine(const SCEV *AA, + const SCEV *BB, + const SCEV *CC, + const Loop *CurLoop) { + Kind = Line; + A = AA; + B = BB; + C = CC; + AssociatedLoop = CurLoop; +} + + +void DependenceAnalysis::Constraint::setDistance(const SCEV *D, + const Loop *CurLoop) { + Kind = Distance; + A = SE->getConstant(D->getType(), 1); + B = SE->getNegativeSCEV(A); + C = SE->getNegativeSCEV(D); + AssociatedLoop = CurLoop; +} + + +void DependenceAnalysis::Constraint::setEmpty() { + Kind = Empty; +} + + +void DependenceAnalysis::Constraint::setAny(ScalarEvolution *NewSE) { + SE = NewSE; + Kind = Any; +} + + +// For debugging purposes. Dumps the constraint out to OS. +void DependenceAnalysis::Constraint::dump(raw_ostream &OS) const { + if (isEmpty()) + OS << " Empty\n"; + else if (isAny()) + OS << " Any\n"; + else if (isPoint()) + OS << " Point is <" << *getX() << ", " << *getY() << ">\n"; + else if (isDistance()) + OS << " Distance is " << *getD() << + " (" << *getA() << "*X + " << *getB() << "*Y = " << *getC() << ")\n"; + else if (isLine()) + OS << " Line is " << *getA() << "*X + " << + *getB() << "*Y = " << *getC() << "\n"; + else + llvm_unreachable("unknown constraint type in Constraint::dump"); +} + + +// Updates X with the intersection +// of the Constraints X and Y. Returns true if X has changed. +// Corresponds to Figure 4 from the paper +// +// Practical Dependence Testing +// Goff, Kennedy, Tseng +// PLDI 1991 +bool DependenceAnalysis::intersectConstraints(Constraint *X, + const Constraint *Y) { + ++DeltaApplications; + DEBUG(dbgs() << "\tintersect constraints\n"); + DEBUG(dbgs() << "\t X ="; X->dump(dbgs())); + DEBUG(dbgs() << "\t Y ="; Y->dump(dbgs())); + assert(!Y->isPoint() && "Y must not be a Point"); + if (X->isAny()) { + if (Y->isAny()) + return false; + *X = *Y; + return true; + } + if (X->isEmpty()) + return false; + if (Y->isEmpty()) { + X->setEmpty(); + return true; + } + + if (X->isDistance() && Y->isDistance()) { + DEBUG(dbgs() << "\t intersect 2 distances\n"); + if (isKnownPredicate(CmpInst::ICMP_EQ, X->getD(), Y->getD())) + return false; + if (isKnownPredicate(CmpInst::ICMP_NE, X->getD(), Y->getD())) { + X->setEmpty(); + ++DeltaSuccesses; + return true; + } + // Hmmm, interesting situation. + // I guess if either is constant, keep it and ignore the other. + if (isa(Y->getD())) { + *X = *Y; + return true; + } + return false; + } + + // At this point, the pseudo-code in Figure 4 of the paper + // checks if (X->isPoint() && Y->isPoint()). + // This case can't occur in our implementation, + // since a Point can only arise as the result of intersecting + // two Line constraints, and the right-hand value, Y, is never + // the result of an intersection. + assert(!(X->isPoint() && Y->isPoint()) && + "We shouldn't ever see X->isPoint() && Y->isPoint()"); + + if (X->isLine() && Y->isLine()) { + DEBUG(dbgs() << "\t intersect 2 lines\n"); + const SCEV *Prod1 = SE->getMulExpr(X->getA(), Y->getB()); + const SCEV *Prod2 = SE->getMulExpr(X->getB(), Y->getA()); + if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) { + // slopes are equal, so lines are parallel + DEBUG(dbgs() << "\t\tsame slope\n"); + Prod1 = SE->getMulExpr(X->getC(), Y->getB()); + Prod2 = SE->getMulExpr(X->getB(), Y->getC()); + if (isKnownPredicate(CmpInst::ICMP_EQ, Prod1, Prod2)) + return false; + if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) { + X->setEmpty(); + ++DeltaSuccesses; + return true; + } + return false; + } + if (isKnownPredicate(CmpInst::ICMP_NE, Prod1, Prod2)) { + // slopes differ, so lines intersect + DEBUG(dbgs() << "\t\tdifferent slopes\n"); + const SCEV *C1B2 = SE->getMulExpr(X->getC(), Y->getB()); + const SCEV *C1A2 = SE->getMulExpr(X->getC(), Y->getA()); + const SCEV *C2B1 = SE->getMulExpr(Y->getC(), X->getB()); + const SCEV *C2A1 = SE->getMulExpr(Y->getC(), X->getA()); + const SCEV *A1B2 = SE->getMulExpr(X->getA(), Y->getB()); + const SCEV *A2B1 = SE->getMulExpr(Y->getA(), X->getB()); + const SCEVConstant *C1A2_C2A1 = + dyn_cast(SE->getMinusSCEV(C1A2, C2A1)); + const SCEVConstant *C1B2_C2B1 = + dyn_cast(SE->getMinusSCEV(C1B2, C2B1)); + const SCEVConstant *A1B2_A2B1 = + dyn_cast(SE->getMinusSCEV(A1B2, A2B1)); + const SCEVConstant *A2B1_A1B2 = + dyn_cast(SE->getMinusSCEV(A2B1, A1B2)); + if (!C1B2_C2B1 || !C1A2_C2A1 || + !A1B2_A2B1 || !A2B1_A1B2) + return false; + APInt Xtop = C1B2_C2B1->getValue()->getValue(); + APInt Xbot = A1B2_A2B1->getValue()->getValue(); + APInt Ytop = C1A2_C2A1->getValue()->getValue(); + APInt Ybot = A2B1_A1B2->getValue()->getValue(); + DEBUG(dbgs() << "\t\tXtop = " << Xtop << "\n"); + DEBUG(dbgs() << "\t\tXbot = " << Xbot << "\n"); + DEBUG(dbgs() << "\t\tYtop = " << Ytop << "\n"); + DEBUG(dbgs() << "\t\tYbot = " << Ybot << "\n"); + APInt Xq = Xtop; // these need to be initialized, even + APInt Xr = Xtop; // though they're just going to be overwritten + APInt::sdivrem(Xtop, Xbot, Xq, Xr); + APInt Yq = Ytop; + APInt Yr = Ytop;; + APInt::sdivrem(Ytop, Ybot, Yq, Yr); + if (Xr != 0 || Yr != 0) { + X->setEmpty(); + ++DeltaSuccesses; + return true; + } + DEBUG(dbgs() << "\t\tX = " << Xq << ", Y = " << Yq << "\n"); + if (Xq.slt(0) || Yq.slt(0)) { + X->setEmpty(); + ++DeltaSuccesses; + return true; + } + if (const SCEVConstant *CUB = + collectConstantUpperBound(X->getAssociatedLoop(), Prod1->getType())) { + APInt UpperBound = CUB->getValue()->getValue(); + DEBUG(dbgs() << "\t\tupper bound = " << UpperBound << "\n"); + if (Xq.sgt(UpperBound) || Yq.sgt(UpperBound)) { + X->setEmpty(); + ++DeltaSuccesses; + return true; + } + } + X->setPoint(SE->getConstant(Xq), + SE->getConstant(Yq), + X->getAssociatedLoop()); + ++DeltaSuccesses; + return true; + } + return false; + } + + // if (X->isLine() && Y->isPoint()) This case can't occur. + assert(!(X->isLine() && Y->isPoint()) && "This case should never occur"); + + if (X->isPoint() && Y->isLine()) { + DEBUG(dbgs() << "\t intersect Point and Line\n"); + const SCEV *A1X1 = SE->getMulExpr(Y->getA(), X->getX()); + const SCEV *B1Y1 = SE->getMulExpr(Y->getB(), X->getY()); + const SCEV *Sum = SE->getAddExpr(A1X1, B1Y1); + if (isKnownPredicate(CmpInst::ICMP_EQ, Sum, Y->getC())) + return false; + if (isKnownPredicate(CmpInst::ICMP_NE, Sum, Y->getC())) { + X->setEmpty(); + ++DeltaSuccesses; + return true; + } + return false; + } + + llvm_unreachable("shouldn't reach the end of Constraint intersection"); + return false; +} + + +//===----------------------------------------------------------------------===// +// DependenceAnalysis methods + +// For debugging purposes. Dumps a dependence to OS. +void Dependence::dump(raw_ostream &OS) const { + bool Splitable = false; + if (isConfused()) + OS << "confused"; + else { + if (isConsistent()) + OS << "consistent "; + if (isFlow()) + OS << "flow"; + else if (isOutput()) + OS << "output"; + else if (isAnti()) + OS << "anti"; + else if (isInput()) + OS << "input"; + unsigned Levels = getLevels(); + if (Levels) { + OS << " ["; + for (unsigned II = 1; II <= Levels; ++II) { + if (isSplitable(II)) + Splitable = true; + if (isPeelFirst(II)) + OS << 'p'; + const SCEV *Distance = getDistance(II); + if (Distance) + OS << *Distance; + else if (isScalar(II)) + OS << "S"; + else { + unsigned Direction = getDirection(II); + if (Direction == DVEntry::ALL) + OS << "*"; + else { + if (Direction & DVEntry::LT) + OS << "<"; + if (Direction & DVEntry::EQ) + OS << "="; + if (Direction & DVEntry::GT) + OS << ">"; + } + } + if (isPeelLast(II)) + OS << 'p'; + if (II < Levels) + OS << " "; + } + if (isLoopIndependent()) + OS << "|<"; + OS << "]"; + if (Splitable) + OS << " splitable"; + } + } + OS << "!\n"; +} + + + +static +AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA, + const Value *A, + const Value *B) { + const Value *AObj = GetUnderlyingObject(A); + const Value *BObj = GetUnderlyingObject(B); + return AA->alias(AObj, AA->getTypeStoreSize(AObj->getType()), + BObj, AA->getTypeStoreSize(BObj->getType())); +} + + +// Returns true if the load or store can be analyzed. Atomic and volatile +// operations have properties which this analysis does not understand. +static +bool isLoadOrStore(const Instruction *I) { + if (const LoadInst *LI = dyn_cast(I)) + return LI->isUnordered(); + else if (const StoreInst *SI = dyn_cast(I)) + return SI->isUnordered(); + return false; +} + + +static +const Value *getPointerOperand(const Instruction *I) { + if (const LoadInst *LI = dyn_cast(I)) + return LI->getPointerOperand(); + if (const StoreInst *SI = dyn_cast(I)) + return SI->getPointerOperand(); + llvm_unreachable("Value is not load or store instruction"); + return 0; +} + + +// Examines the loop nesting of the Src and Dst +// instructions and establishes their shared loops. Sets the variables +// CommonLevels, SrcLevels, and MaxLevels. +// The source and destination instructions needn't be contained in the same +// loop. The routine establishNestingLevels finds the level of most deeply +// nested loop that contains them both, CommonLevels. An instruction that's +// not contained in a loop is at level = 0. MaxLevels is equal to the level +// of the source plus the level of the destination, minus CommonLevels. +// This lets us allocate vectors MaxLevels in length, with room for every +// distinct loop referenced in both the source and destination subscripts. +// The variable SrcLevels is the nesting depth of the source instruction. +// It's used to help calculate distinct loops referenced by the destination. +// Here's the map from loops to levels: +// 0 - unused +// 1 - outermost common loop +// ... - other common loops +// CommonLevels - innermost common loop +// ... - loops containing Src but not Dst +// SrcLevels - innermost loop containing Src but not Dst +// ... - loops containing Dst but not Src +// MaxLevels - innermost loops containing Dst but not Src +// Consider the follow code fragment: +// for (a = ...) { +// for (b = ...) { +// for (c = ...) { +// for (d = ...) { +// A[] = ...; +// } +// } +// for (e = ...) { +// for (f = ...) { +// for (g = ...) { +// ... = A[]; +// } +// } +// } +// } +// } +// If we're looking at the possibility of a dependence between the store +// to A (the Src) and the load from A (the Dst), we'll note that they +// have 2 loops in common, so CommonLevels will equal 2 and the direction +// vector for Result will have 2 entries. SrcLevels = 4 and MaxLevels = 7. +// A map from loop names to loop numbers would look like +// a - 1 +// b - 2 = CommonLevels +// c - 3 +// d - 4 = SrcLevels +// e - 5 +// f - 6 +// g - 7 = MaxLevels +void DependenceAnalysis::establishNestingLevels(const Instruction *Src, + const Instruction *Dst) { + const BasicBlock *SrcBlock = Src->getParent(); + const BasicBlock *DstBlock = Dst->getParent(); + unsigned SrcLevel = LI->getLoopDepth(SrcBlock); + unsigned DstLevel = LI->getLoopDepth(DstBlock); + const Loop *SrcLoop = LI->getLoopFor(SrcBlock); + const Loop *DstLoop = LI->getLoopFor(DstBlock); + SrcLevels = SrcLevel; + MaxLevels = SrcLevel + DstLevel; + while (SrcLevel > DstLevel) { + SrcLoop = SrcLoop->getParentLoop(); + SrcLevel--; + } + while (DstLevel > SrcLevel) { + DstLoop = DstLoop->getParentLoop(); + DstLevel--; + } + while (SrcLoop != DstLoop) { + SrcLoop = SrcLoop->getParentLoop(); + DstLoop = DstLoop->getParentLoop(); + SrcLevel--; + } + CommonLevels = SrcLevel; + MaxLevels -= CommonLevels; +} + + +// Given one of the loops containing the source, return +// its level index in our numbering scheme. +unsigned DependenceAnalysis::mapSrcLoop(const Loop *SrcLoop) const { + return SrcLoop->getLoopDepth(); +} + + +// Given one of the loops containing the destination, +// return its level index in our numbering scheme. +unsigned DependenceAnalysis::mapDstLoop(const Loop *DstLoop) const { + unsigned D = DstLoop->getLoopDepth(); + if (D > CommonLevels) + return D - CommonLevels + SrcLevels; + else + return D; +} + + +// Returns true if Expression is loop invariant in LoopNest. +bool DependenceAnalysis::isLoopInvariant(const SCEV *Expression, + const Loop *LoopNest) const { + if (!LoopNest) + return true; + return SE->isLoopInvariant(Expression, LoopNest) && + isLoopInvariant(Expression, LoopNest->getParentLoop()); +} + + + +// Finds the set of loops from the LoopNest that +// have a level <= CommonLevels and are referred to by the SCEV Expression. +void DependenceAnalysis::collectCommonLoops(const SCEV *Expression, + const Loop *LoopNest, + SmallBitVector &Loops) const { + while (LoopNest) { + unsigned Level = LoopNest->getLoopDepth(); + if (Level <= CommonLevels && !SE->isLoopInvariant(Expression, LoopNest)) + Loops.set(Level); + LoopNest = LoopNest->getParentLoop(); + } +} + + +// removeMatchingExtensions - Examines a subscript pair. +// If the source and destination are identically sign (or zero) +// extended, it strips off the extension in an effect to simplify +// the actual analysis. +void DependenceAnalysis::removeMatchingExtensions(Subscript *Pair) { + const SCEV *Src = Pair->Src; + const SCEV *Dst = Pair->Dst; + if ((isa(Src) && isa(Dst)) || + (isa(Src) && isa(Dst))) { + const SCEVCastExpr *SrcCast = cast(Src); + const SCEVCastExpr *DstCast = cast(Dst); + if (SrcCast->getType() == DstCast->getType()) { + Pair->Src = SrcCast->getOperand(); + Pair->Dst = DstCast->getOperand(); + } + } +} + + +// Examine the scev and return true iff it's linear. +// Collect any loops mentioned in the set of "Loops". +bool DependenceAnalysis::checkSrcSubscript(const SCEV *Src, + const Loop *LoopNest, + SmallBitVector &Loops) { + const SCEVAddRecExpr *AddRec = dyn_cast(Src); + if (!AddRec) + return isLoopInvariant(Src, LoopNest); + const SCEV *Start = AddRec->getStart(); + const SCEV *Step = AddRec->getStepRecurrence(*SE); + if (!isLoopInvariant(Step, LoopNest)) + return false; + Loops.set(mapSrcLoop(AddRec->getLoop())); + return checkSrcSubscript(Start, LoopNest, Loops); +} + + + +// Examine the scev and return true iff it's linear. +// Collect any loops mentioned in the set of "Loops". +bool DependenceAnalysis::checkDstSubscript(const SCEV *Dst, + const Loop *LoopNest, + SmallBitVector &Loops) { + const SCEVAddRecExpr *AddRec = dyn_cast(Dst); + if (!AddRec) + return isLoopInvariant(Dst, LoopNest); + const SCEV *Start = AddRec->getStart(); + const SCEV *Step = AddRec->getStepRecurrence(*SE); + if (!isLoopInvariant(Step, LoopNest)) + return false; + Loops.set(mapDstLoop(AddRec->getLoop())); + return checkDstSubscript(Start, LoopNest, Loops); +} + + +// Examines the subscript pair (the Src and Dst SCEVs) +// and classifies it as either ZIV, SIV, RDIV, MIV, or Nonlinear. +// Collects the associated loops in a set. +DependenceAnalysis::Subscript::ClassificationKind +DependenceAnalysis::classifyPair(const SCEV *Src, const Loop *SrcLoopNest, + const SCEV *Dst, const Loop *DstLoopNest, + SmallBitVector &Loops) { + SmallBitVector SrcLoops(MaxLevels + 1); + SmallBitVector DstLoops(MaxLevels + 1); + if (!checkSrcSubscript(Src, SrcLoopNest, SrcLoops)) + return Subscript::NonLinear; + if (!checkDstSubscript(Dst, DstLoopNest, DstLoops)) + return Subscript::NonLinear; + Loops = SrcLoops; + Loops |= DstLoops; + unsigned N = Loops.count(); + if (N == 0) + return Subscript::ZIV; + if (N == 1) + return Subscript::SIV; + if (N == 2 && (SrcLoops.count() == 0 || + DstLoops.count() == 0 || + (SrcLoops.count() == 1 && DstLoops.count() == 1))) + return Subscript::RDIV; + return Subscript::MIV; +} + + +// A wrapper around SCEV::isKnownPredicate. +// Looks for cases where we're interested in comparing for equality. +// If both X and Y have been identically sign or zero extended, +// it strips off the (confusing) extensions before invoking +// SCEV::isKnownPredicate. Perhaps, someday, the ScalarEvolution package +// will be similarly updated. +// +// If SCEV::isKnownPredicate can't prove the predicate, +// we try simple subtraction, which seems to help in some cases +// involving symbolics. +bool DependenceAnalysis::isKnownPredicate(ICmpInst::Predicate Pred, + const SCEV *X, + const SCEV *Y) const { + if (Pred == CmpInst::ICMP_EQ || + Pred == CmpInst::ICMP_NE) { + if ((isa(X) && + isa(Y)) || + (isa(X) && + isa(Y))) { + const SCEVCastExpr *CX = cast(X); + const SCEVCastExpr *CY = cast(Y); + const SCEV *Xop = CX->getOperand(); + const SCEV *Yop = CY->getOperand(); + if (Xop->getType() == Yop->getType()) { + X = Xop; + Y = Yop; + } + } + } + if (SE->isKnownPredicate(Pred, X, Y)) + return true; + // If SE->isKnownPredicate can't prove the condition, + // we try the brute-force approach of subtracting + // and testing the difference. + // By testing with SE->isKnownPredicate first, we avoid + // the possibility of overflow when the arguments are constants. + const SCEV *Delta = SE->getMinusSCEV(X, Y); + switch (Pred) { + case CmpInst::ICMP_EQ: + return Delta->isZero(); + case CmpInst::ICMP_NE: + return SE->isKnownNonZero(Delta); + case CmpInst::ICMP_SGE: + return SE->isKnownNonNegative(Delta); + case CmpInst::ICMP_SLE: + return SE->isKnownNonPositive(Delta); + case CmpInst::ICMP_SGT: + return SE->isKnownPositive(Delta); + case CmpInst::ICMP_SLT: + return SE->isKnownNegative(Delta); + default: + llvm_unreachable("unexpected predicate in isKnownPredicate"); + } +} + + +// All subscripts are all the same type. +// Loop bound may be smaller (e.g., a char). +// Should zero extend loop bound, since it's always >= 0. +// This routine collects upper bound and extends if needed. +// Return null if no bound available. +const SCEV *DependenceAnalysis::collectUpperBound(const Loop *L, + Type *T) const { + if (SE->hasLoopInvariantBackedgeTakenCount(L)) { + const SCEV *UB = SE->getBackedgeTakenCount(L); + return SE->getNoopOrZeroExtend(UB, T); + } + return NULL; +} + + +// Calls collectUpperBound(), then attempts to cast it to SCEVConstant. +// If the cast fails, returns NULL. +const SCEVConstant *DependenceAnalysis::collectConstantUpperBound(const Loop *L, + Type *T + ) const { + if (const SCEV *UB = collectUpperBound(L, T)) + return dyn_cast(UB); + return NULL; +} + + +// testZIV - +// When we have a pair of subscripts of the form [c1] and [c2], +// where c1 and c2 are both loop invariant, we attack it using +// the ZIV test. Basically, we test by comparing the two values, +// but there are actually three possible results: +// 1) the values are equal, so there's a dependence +// 2) the values are different, so there's no dependence +// 3) the values might be equal, so we have to assume a dependence. +// +// Return true if dependence disproved. +bool DependenceAnalysis::testZIV(const SCEV *Src, + const SCEV *Dst, + FullDependence &Result) const { + DEBUG(dbgs() << " src = " << *Src << "\n"); + DEBUG(dbgs() << " dst = " << *Dst << "\n"); + ++ZIVapplications; + if (isKnownPredicate(CmpInst::ICMP_EQ, Src, Dst)) { + DEBUG(dbgs() << " provably dependent\n"); + return false; // provably dependent + } + if (isKnownPredicate(CmpInst::ICMP_NE, Src, Dst)) { + DEBUG(dbgs() << " provably independent\n"); + ++ZIVindependence; + return true; // provably independent + } + DEBUG(dbgs() << " possibly dependent\n"); + Result.Consistent = false; + return false; // possibly dependent +} + + +// strongSIVtest - +// From the paper, Practical Dependence Testing, Section 4.2.1 +// +// When we have a pair of subscripts of the form [c1 + a*i] and [c2 + a*i], +// where i is an induction variable, c1 and c2 are loop invariant, +// and a is a constant, we can solve it exactly using the Strong SIV test. +// +// Can prove independence. Failing that, can compute distance (and direction). +// In the presence of symbolic terms, we can sometimes make progress. +// +// If there's a dependence, +// +// c1 + a*i = c2 + a*i' +// +// The dependence distance is +// +// d = i' - i = (c1 - c2)/a +// +// A dependence only exists if d is an integer and abs(d) <= U, where U is the +// loop's upper bound. If a dependence exists, the dependence direction is +// defined as +// +// { < if d > 0 +// direction = { = if d = 0 +// { > if d < 0 +// +// Return true if dependence disproved. +bool DependenceAnalysis::strongSIVtest(const SCEV *Coeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const { + DEBUG(dbgs() << "\tStrong SIV test\n"); + DEBUG(dbgs() << "\t Coeff = " << *Coeff); + DEBUG(dbgs() << ", " << *Coeff->getType() << "\n"); + DEBUG(dbgs() << "\t SrcConst = " << *SrcConst); + DEBUG(dbgs() << ", " << *SrcConst->getType() << "\n"); + DEBUG(dbgs() << "\t DstConst = " << *DstConst); + DEBUG(dbgs() << ", " << *DstConst->getType() << "\n"); + ++StrongSIVapplications; + assert(0 < Level && Level <= CommonLevels && "level out of range"); + Level--; + + const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst); + DEBUG(dbgs() << "\t Delta = " << *Delta); + DEBUG(dbgs() << ", " << *Delta->getType() << "\n"); + + // check that |Delta| < iteration count + if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { + DEBUG(dbgs() << "\t UpperBound = " << *UpperBound); + DEBUG(dbgs() << ", " << *UpperBound->getType() << "\n"); + const SCEV *AbsDelta = + SE->isKnownNonNegative(Delta) ? Delta : SE->getNegativeSCEV(Delta); + const SCEV *AbsCoeff = + SE->isKnownNonNegative(Coeff) ? Coeff : SE->getNegativeSCEV(Coeff); + const SCEV *Product = SE->getMulExpr(UpperBound, AbsCoeff); + if (isKnownPredicate(CmpInst::ICMP_SGT, AbsDelta, Product)) { + // Distance greater than trip count - no dependence + ++StrongSIVindependence; + ++StrongSIVsuccesses; + return true; + } + } + + // Can we compute distance? + if (isa(Delta) && isa(Coeff)) { + APInt ConstDelta = cast(Delta)->getValue()->getValue(); + APInt ConstCoeff = cast(Coeff)->getValue()->getValue(); + APInt Distance = ConstDelta; // these need to be initialized + APInt Remainder = ConstDelta; + APInt::sdivrem(ConstDelta, ConstCoeff, Distance, Remainder); + DEBUG(dbgs() << "\t Distance = " << Distance << "\n"); + DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n"); + // Make sure Coeff divides Delta exactly + if (Remainder != 0) { + // Coeff doesn't divide Distance, no dependence + ++StrongSIVindependence; + ++StrongSIVsuccesses; + return true; + } + Result.DV[Level].Distance = SE->getConstant(Distance); + NewConstraint.setDistance(SE->getConstant(Distance), CurLoop); + if (Distance.sgt(0)) + Result.DV[Level].Direction &= Dependence::DVEntry::LT; + else if (Distance.slt(0)) + Result.DV[Level].Direction &= Dependence::DVEntry::GT; + else + Result.DV[Level].Direction &= Dependence::DVEntry::EQ; + ++StrongSIVsuccesses; + } + else if (Delta->isZero()) { + // since 0/X == 0 + Result.DV[Level].Distance = Delta; + NewConstraint.setDistance(Delta, CurLoop); + Result.DV[Level].Direction &= Dependence::DVEntry::EQ; + ++StrongSIVsuccesses; + } + else { + if (Coeff->isOne()) { + DEBUG(dbgs() << "\t Distance = " << *Delta << "\n"); + Result.DV[Level].Distance = Delta; // since X/1 == X + NewConstraint.setDistance(Delta, CurLoop); + } + else { + Result.Consistent = false; + NewConstraint.setLine(Coeff, + SE->getNegativeSCEV(Coeff), + SE->getNegativeSCEV(Delta), CurLoop); + } + + // maybe we can get a useful direction + bool DeltaMaybeZero = !SE->isKnownNonZero(Delta); + bool DeltaMaybePositive = !SE->isKnownNonPositive(Delta); + bool DeltaMaybeNegative = !SE->isKnownNonNegative(Delta); + bool CoeffMaybePositive = !SE->isKnownNonPositive(Coeff); + bool CoeffMaybeNegative = !SE->isKnownNonNegative(Coeff); + // The double negatives above are confusing. + // It helps to read !SE->isKnownNonZero(Delta) + // as "Delta might be Zero" + unsigned NewDirection = Dependence::DVEntry::NONE; + if ((DeltaMaybePositive && CoeffMaybePositive) || + (DeltaMaybeNegative && CoeffMaybeNegative)) + NewDirection = Dependence::DVEntry::LT; + if (DeltaMaybeZero) + NewDirection |= Dependence::DVEntry::EQ; + if ((DeltaMaybeNegative && CoeffMaybePositive) || + (DeltaMaybePositive && CoeffMaybeNegative)) + NewDirection |= Dependence::DVEntry::GT; + if (NewDirection < Result.DV[Level].Direction) + ++StrongSIVsuccesses; + Result.DV[Level].Direction &= NewDirection; + } + return false; +} + + +// weakCrossingSIVtest - +// From the paper, Practical Dependence Testing, Section 4.2.2 +// +// When we have a pair of subscripts of the form [c1 + a*i] and [c2 - a*i], +// where i is an induction variable, c1 and c2 are loop invariant, +// and a is a constant, we can solve it exactly using the +// Weak-Crossing SIV test. +// +// Given c1 + a*i = c2 - a*i', we can look for the intersection of +// the two lines, where i = i', yielding +// +// c1 + a*i = c2 - a*i +// 2a*i = c2 - c1 +// i = (c2 - c1)/2a +// +// If i < 0, there is no dependence. +// If i > upperbound, there is no dependence. +// If i = 0 (i.e., if c1 = c2), there's a dependence with distance = 0. +// If i = upperbound, there's a dependence with distance = 0. +// If i is integral, there's a dependence (all directions). +// If the non-integer part = 1/2, there's a dependence (<> directions). +// Otherwise, there's no dependence. +// +// Can prove independence. Failing that, +// can sometimes refine the directions. +// Can determine iteration for splitting. +// +// Return true if dependence disproved. +bool DependenceAnalysis::weakCrossingSIVtest(const SCEV *Coeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint, + const SCEV *&SplitIter) const { + DEBUG(dbgs() << "\tWeak-Crossing SIV test\n"); + DEBUG(dbgs() << "\t Coeff = " << *Coeff << "\n"); + DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); + DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); + ++WeakCrossingSIVapplications; + assert(0 < Level && Level <= CommonLevels && "Level out of range"); + Level--; + Result.Consistent = false; + const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); + DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); + NewConstraint.setLine(Coeff, Coeff, Delta, CurLoop); + if (Delta->isZero()) { + Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT); + Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT); + ++WeakCrossingSIVsuccesses; + if (!Result.DV[Level].Direction) { + ++WeakCrossingSIVindependence; + return true; + } + Result.DV[Level].Distance = Delta; // = 0 + return false; + } + const SCEVConstant *ConstCoeff = dyn_cast(Coeff); + if (!ConstCoeff) + return false; + + Result.DV[Level].Splitable = true; + if (SE->isKnownNegative(ConstCoeff)) { + ConstCoeff = dyn_cast(SE->getNegativeSCEV(ConstCoeff)); + assert(ConstCoeff && + "dynamic cast of negative of ConstCoeff should yield constant"); + Delta = SE->getNegativeSCEV(Delta); + } + assert(SE->isKnownPositive(ConstCoeff) && "ConstCoeff should be positive"); + + // compute SplitIter for use by DependenceAnalysis::getSplitIteration() + SplitIter = + SE->getUDivExpr(SE->getSMaxExpr(SE->getConstant(Delta->getType(), 0), + Delta), + SE->getMulExpr(SE->getConstant(Delta->getType(), 2), + ConstCoeff)); + DEBUG(dbgs() << "\t Split iter = " << *SplitIter << "\n"); + + const SCEVConstant *ConstDelta = dyn_cast(Delta); + if (!ConstDelta) + return false; + + // We're certain that ConstCoeff > 0; therefore, + // if Delta < 0, then no dependence. + DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); + DEBUG(dbgs() << "\t ConstCoeff = " << *ConstCoeff << "\n"); + if (SE->isKnownNegative(Delta)) { + // No dependence, Delta < 0 + ++WeakCrossingSIVindependence; + ++WeakCrossingSIVsuccesses; + return true; + } + + // We're certain that Delta > 0 and ConstCoeff > 0. + // Check Delta/(2*ConstCoeff) against upper loop bound + if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { + DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n"); + const SCEV *ConstantTwo = SE->getConstant(UpperBound->getType(), 2); + const SCEV *ML = SE->getMulExpr(SE->getMulExpr(ConstCoeff, UpperBound), + ConstantTwo); + DEBUG(dbgs() << "\t ML = " << *ML << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, ML)) { + // Delta too big, no dependence + ++WeakCrossingSIVindependence; + ++WeakCrossingSIVsuccesses; + return true; + } + if (isKnownPredicate(CmpInst::ICMP_EQ, Delta, ML)) { + // i = i' = UB + Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::LT); + Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::GT); + ++WeakCrossingSIVsuccesses; + if (!Result.DV[Level].Direction) { + ++WeakCrossingSIVindependence; + return true; + } + Result.DV[Level].Splitable = false; + Result.DV[Level].Distance = SE->getConstant(Delta->getType(), 0); + return false; + } + } + + // check that Coeff divides Delta + APInt APDelta = ConstDelta->getValue()->getValue(); + APInt APCoeff = ConstCoeff->getValue()->getValue(); + APInt Distance = APDelta; // these need to be initialzed + APInt Remainder = APDelta; + APInt::sdivrem(APDelta, APCoeff, Distance, Remainder); + DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n"); + if (Remainder != 0) { + // Coeff doesn't divide Delta, no dependence + ++WeakCrossingSIVindependence; + ++WeakCrossingSIVsuccesses; + return true; + } + DEBUG(dbgs() << "\t Distance = " << Distance << "\n"); + + // if 2*Coeff doesn't divide Delta, then the equal direction isn't possible + APInt Two = APInt(Distance.getBitWidth(), 2, true); + Remainder = Distance.srem(Two); + DEBUG(dbgs() << "\t Remainder = " << Remainder << "\n"); + if (Remainder != 0) { + // Equal direction isn't possible + Result.DV[Level].Direction &= unsigned(~Dependence::DVEntry::EQ); + ++WeakCrossingSIVsuccesses; + } + return false; +} + + +// Kirch's algorithm, from +// +// Optimizing Supercompilers for Supercomputers +// Michael Wolfe +// MIT Press, 1989 +// +// Program 2.1, page 29. +// Computes the GCD of AM and BM. +// Also finds a solution to the equation ax - by = gdc(a, b). +// Returns true iff the gcd divides Delta. +static +bool findGCD(unsigned Bits, APInt AM, APInt BM, APInt Delta, + APInt &G, APInt &X, APInt &Y) { + APInt A0(Bits, 1, true), A1(Bits, 0, true); + APInt B0(Bits, 0, true), B1(Bits, 1, true); + APInt G0 = AM.abs(); + APInt G1 = BM.abs(); + APInt Q = G0; // these need to be initialized + APInt R = G0; + APInt::sdivrem(G0, G1, Q, R); + while (R != 0) { + APInt A2 = A0 - Q*A1; A0 = A1; A1 = A2; + APInt B2 = B0 - Q*B1; B0 = B1; B1 = B2; + G0 = G1; G1 = R; + APInt::sdivrem(G0, G1, Q, R); + } + G = G1; + DEBUG(dbgs() << "\t GCD = " << G << "\n"); + X = AM.slt(0) ? -A1 : A1; + Y = BM.slt(0) ? B1 : -B1; + + // make sure gcd divides Delta + R = Delta.srem(G); + if (R != 0) + return true; // gcd doesn't divide Delta, no dependence + Q = Delta.sdiv(G); + X *= Q; + Y *= Q; + return false; +} + + +static +APInt floorOfQuotient(APInt A, APInt B) { + APInt Q = A; // these need to be initialized + APInt R = A; + APInt::sdivrem(A, B, Q, R); + if (R == 0) + return Q; + if ((A.sgt(0) && B.sgt(0)) || + (A.slt(0) && B.slt(0))) + return Q; + else + return Q - 1; +} + + +static +APInt ceilingOfQuotient(APInt A, APInt B) { + APInt Q = A; // these need to be initialized + APInt R = A; + APInt::sdivrem(A, B, Q, R); + if (R == 0) + return Q; + if ((A.sgt(0) && B.sgt(0)) || + (A.slt(0) && B.slt(0))) + return Q + 1; + else + return Q; +} + + +static +APInt maxAPInt(APInt A, APInt B) { + return A.sgt(B) ? A : B; +} + + +static +APInt minAPInt(APInt A, APInt B) { + return A.slt(B) ? A : B; +} + + +// exactSIVtest - +// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*i], +// where i is an induction variable, c1 and c2 are loop invariant, and a1 +// and a2 are constant, we can solve it exactly using an algorithm developed +// by Banerjee and Wolfe. See Section 2.5.3 in +// +// Optimizing Supercompilers for Supercomputers +// Michael Wolfe +// MIT Press, 1989 +// +// It's slower than the specialized tests (strong SIV, weak-zero SIV, etc), +// so use them if possible. They're also a bit better with symbolics and, +// in the case of the strong SIV test, can compute Distances. +// +// Return true if dependence disproved. +bool DependenceAnalysis::exactSIVtest(const SCEV *SrcCoeff, + const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const { + DEBUG(dbgs() << "\tExact SIV test\n"); + DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n"); + DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n"); + DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); + DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); + ++ExactSIVapplications; + assert(0 < Level && Level <= CommonLevels && "Level out of range"); + Level--; + Result.Consistent = false; + const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); + DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); + NewConstraint.setLine(SrcCoeff, SE->getNegativeSCEV(DstCoeff), + Delta, CurLoop); + const SCEVConstant *ConstDelta = dyn_cast(Delta); + const SCEVConstant *ConstSrcCoeff = dyn_cast(SrcCoeff); + const SCEVConstant *ConstDstCoeff = dyn_cast(DstCoeff); + if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff) + return false; + + // find gcd + APInt G, X, Y; + APInt AM = ConstSrcCoeff->getValue()->getValue(); + APInt BM = ConstDstCoeff->getValue()->getValue(); + unsigned Bits = AM.getBitWidth(); + if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) { + // gcd doesn't divide Delta, no dependence + ++ExactSIVindependence; + ++ExactSIVsuccesses; + return true; + } + + DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n"); + + // since SCEV construction normalizes, LM = 0 + APInt UM(Bits, 1, true); + bool UMvalid = false; + // UM is perhaps unavailable, let's check + if (const SCEVConstant *CUB = + collectConstantUpperBound(CurLoop, Delta->getType())) { + UM = CUB->getValue()->getValue(); + DEBUG(dbgs() << "\t UM = " << UM << "\n"); + UMvalid = true; + } + + APInt TU(APInt::getSignedMaxValue(Bits)); + APInt TL(APInt::getSignedMinValue(Bits)); + + // test(BM/G, LM-X) and test(-BM/G, X-UM) + APInt TMUL = BM.sdiv(G); + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + if (UMvalid) { + TU = minAPInt(TU, floorOfQuotient(UM - X, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + } + } + else { + TU = minAPInt(TU, floorOfQuotient(-X, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + if (UMvalid) { + TL = maxAPInt(TL, ceilingOfQuotient(UM - X, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + } + } + + // test(AM/G, LM-Y) and test(-AM/G, Y-UM) + TMUL = AM.sdiv(G); + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + if (UMvalid) { + TU = minAPInt(TU, floorOfQuotient(UM - Y, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + } + } + else { + TU = minAPInt(TU, floorOfQuotient(-Y, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + if (UMvalid) { + TL = maxAPInt(TL, ceilingOfQuotient(UM - Y, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + } + } + if (TL.sgt(TU)) { + ++ExactSIVindependence; + ++ExactSIVsuccesses; + return true; + } + + // explore directions + unsigned NewDirection = Dependence::DVEntry::NONE; + + // less than + APInt SaveTU(TU); // save these + APInt SaveTL(TL); + DEBUG(dbgs() << "\t exploring LT direction\n"); + TMUL = AM - BM; + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(X - Y + 1, TMUL)); + DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); + } + else { + TU = minAPInt(TU, floorOfQuotient(X - Y + 1, TMUL)); + DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); + } + if (TL.sle(TU)) { + NewDirection |= Dependence::DVEntry::LT; + ++ExactSIVsuccesses; + } + + // equal + TU = SaveTU; // restore + TL = SaveTL; + DEBUG(dbgs() << "\t exploring EQ direction\n"); + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(X - Y, TMUL)); + DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); + } + else { + TU = minAPInt(TU, floorOfQuotient(X - Y, TMUL)); + DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); + } + TMUL = BM - AM; + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(Y - X, TMUL)); + DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); + } + else { + TU = minAPInt(TU, floorOfQuotient(Y - X, TMUL)); + DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); + } + if (TL.sle(TU)) { + NewDirection |= Dependence::DVEntry::EQ; + ++ExactSIVsuccesses; + } + + // greater than + TU = SaveTU; // restore + TL = SaveTL; + DEBUG(dbgs() << "\t exploring GT direction\n"); + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(Y - X + 1, TMUL)); + DEBUG(dbgs() << "\t\t TL = " << TL << "\n"); + } + else { + TU = minAPInt(TU, floorOfQuotient(Y - X + 1, TMUL)); + DEBUG(dbgs() << "\t\t TU = " << TU << "\n"); + } + if (TL.sle(TU)) { + NewDirection |= Dependence::DVEntry::GT; + ++ExactSIVsuccesses; + } + + // finished + Result.DV[Level].Direction &= NewDirection; + if (Result.DV[Level].Direction == Dependence::DVEntry::NONE) + ++ExactSIVindependence; + return Result.DV[Level].Direction == Dependence::DVEntry::NONE; +} + + + +// Return true if the divisor evenly divides the dividend. +static +bool isRemainderZero(const SCEVConstant *Dividend, + const SCEVConstant *Divisor) { + APInt ConstDividend = Dividend->getValue()->getValue(); + APInt ConstDivisor = Divisor->getValue()->getValue(); + return ConstDividend.srem(ConstDivisor) == 0; +} + + +// weakZeroSrcSIVtest - +// From the paper, Practical Dependence Testing, Section 4.2.2 +// +// When we have a pair of subscripts of the form [c1] and [c2 + a*i], +// where i is an induction variable, c1 and c2 are loop invariant, +// and a is a constant, we can solve it exactly using the +// Weak-Zero SIV test. +// +// Given +// +// c1 = c2 + a*i +// +// we get +// +// (c1 - c2)/a = i +// +// If i is not an integer, there's no dependence. +// If i < 0 or > UB, there's no dependence. +// If i = 0, the direction is <= and peeling the +// 1st iteration will break the dependence. +// If i = UB, the direction is >= and peeling the +// last iteration will break the dependence. +// Otherwise, the direction is *. +// +// Can prove independence. Failing that, we can sometimes refine +// the directions. Can sometimes show that first or last +// iteration carries all the dependences (so worth peeling). +// +// (see also weakZeroDstSIVtest) +// +// Return true if dependence disproved. +bool DependenceAnalysis::weakZeroSrcSIVtest(const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const { + // For the WeakSIV test, it's possible the loop isn't common to + // the Src and Dst loops. If it isn't, then there's no need to + // record a direction. + DEBUG(dbgs() << "\tWeak-Zero (src) SIV test\n"); + DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << "\n"); + DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); + DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); + ++WeakZeroSIVapplications; + assert(0 < Level && Level <= MaxLevels && "Level out of range"); + Level--; + Result.Consistent = false; + const SCEV *Delta = SE->getMinusSCEV(SrcConst, DstConst); + NewConstraint.setLine(SE->getConstant(Delta->getType(), 0), + DstCoeff, Delta, CurLoop); + DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); + if (isKnownPredicate(CmpInst::ICMP_EQ, SrcConst, DstConst)) { + if (Level < CommonLevels) { + Result.DV[Level].Direction &= Dependence::DVEntry::LE; + Result.DV[Level].PeelFirst = true; + ++WeakZeroSIVsuccesses; + } + return false; // dependences caused by first iteration + } + const SCEVConstant *ConstCoeff = dyn_cast(DstCoeff); + if (!ConstCoeff) + return false; + const SCEV *AbsCoeff = + SE->isKnownNegative(ConstCoeff) ? + SE->getNegativeSCEV(ConstCoeff) : ConstCoeff; + const SCEV *NewDelta = + SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta; + + // check that Delta/SrcCoeff < iteration count + // really check NewDelta < count*AbsCoeff + if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { + DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n"); + const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound); + if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) { + ++WeakZeroSIVindependence; + ++WeakZeroSIVsuccesses; + return true; + } + if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) { + // dependences caused by last iteration + if (Level < CommonLevels) { + Result.DV[Level].Direction &= Dependence::DVEntry::GE; + Result.DV[Level].PeelLast = true; + ++WeakZeroSIVsuccesses; + } + return false; + } + } + + // check that Delta/SrcCoeff >= 0 + // really check that NewDelta >= 0 + if (SE->isKnownNegative(NewDelta)) { + // No dependence, newDelta < 0 + ++WeakZeroSIVindependence; + ++WeakZeroSIVsuccesses; + return true; + } + + // if SrcCoeff doesn't divide Delta, then no dependence + if (isa(Delta) && + !isRemainderZero(cast(Delta), ConstCoeff)) { + ++WeakZeroSIVindependence; + ++WeakZeroSIVsuccesses; + return true; + } + return false; +} + + +// weakZeroDstSIVtest - +// From the paper, Practical Dependence Testing, Section 4.2.2 +// +// When we have a pair of subscripts of the form [c1 + a*i] and [c2], +// where i is an induction variable, c1 and c2 are loop invariant, +// and a is a constant, we can solve it exactly using the +// Weak-Zero SIV test. +// +// Given +// +// c1 + a*i = c2 +// +// we get +// +// i = (c2 - c1)/a +// +// If i is not an integer, there's no dependence. +// If i < 0 or > UB, there's no dependence. +// If i = 0, the direction is <= and peeling the +// 1st iteration will break the dependence. +// If i = UB, the direction is >= and peeling the +// last iteration will break the dependence. +// Otherwise, the direction is *. +// +// Can prove independence. Failing that, we can sometimes refine +// the directions. Can sometimes show that first or last +// iteration carries all the dependences (so worth peeling). +// +// (see also weakZeroSrcSIVtest) +// +// Return true if dependence disproved. +bool DependenceAnalysis::weakZeroDstSIVtest(const SCEV *SrcCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *CurLoop, + unsigned Level, + FullDependence &Result, + Constraint &NewConstraint) const { + // For the WeakSIV test, it's possible the loop isn't common to the + // Src and Dst loops. If it isn't, then there's no need to record a direction. + DEBUG(dbgs() << "\tWeak-Zero (dst) SIV test\n"); + DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << "\n"); + DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); + DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); + ++WeakZeroSIVapplications; + assert(0 < Level && Level <= SrcLevels && "Level out of range"); + Level--; + Result.Consistent = false; + const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); + NewConstraint.setLine(SrcCoeff, SE->getConstant(Delta->getType(), 0), + Delta, CurLoop); + DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); + if (isKnownPredicate(CmpInst::ICMP_EQ, DstConst, SrcConst)) { + if (Level < CommonLevels) { + Result.DV[Level].Direction &= Dependence::DVEntry::LE; + Result.DV[Level].PeelFirst = true; + ++WeakZeroSIVsuccesses; + } + return false; // dependences caused by first iteration + } + const SCEVConstant *ConstCoeff = dyn_cast(SrcCoeff); + if (!ConstCoeff) + return false; + const SCEV *AbsCoeff = + SE->isKnownNegative(ConstCoeff) ? + SE->getNegativeSCEV(ConstCoeff) : ConstCoeff; + const SCEV *NewDelta = + SE->isKnownNegative(ConstCoeff) ? SE->getNegativeSCEV(Delta) : Delta; + + // check that Delta/SrcCoeff < iteration count + // really check NewDelta < count*AbsCoeff + if (const SCEV *UpperBound = collectUpperBound(CurLoop, Delta->getType())) { + DEBUG(dbgs() << "\t UpperBound = " << *UpperBound << "\n"); + const SCEV *Product = SE->getMulExpr(AbsCoeff, UpperBound); + if (isKnownPredicate(CmpInst::ICMP_SGT, NewDelta, Product)) { + ++WeakZeroSIVindependence; + ++WeakZeroSIVsuccesses; + return true; + } + if (isKnownPredicate(CmpInst::ICMP_EQ, NewDelta, Product)) { + // dependences caused by last iteration + if (Level < CommonLevels) { + Result.DV[Level].Direction &= Dependence::DVEntry::GE; + Result.DV[Level].PeelLast = true; + ++WeakZeroSIVsuccesses; + } + return false; + } + } + + // check that Delta/SrcCoeff >= 0 + // really check that NewDelta >= 0 + if (SE->isKnownNegative(NewDelta)) { + // No dependence, newDelta < 0 + ++WeakZeroSIVindependence; + ++WeakZeroSIVsuccesses; + return true; + } + + // if SrcCoeff doesn't divide Delta, then no dependence + if (isa(Delta) && + !isRemainderZero(cast(Delta), ConstCoeff)) { + ++WeakZeroSIVindependence; + ++WeakZeroSIVsuccesses; + return true; + } + return false; +} + + +// exactRDIVtest - Tests the RDIV subscript pair for dependence. +// Things of the form [c1 + a*i] and [c2 + b*j], +// where i and j are induction variable, c1 and c2 are loop invariant, +// and a and b are constants. +// Returns true if any possible dependence is disproved. +// Marks the result as inconsistent. +// Works in some cases that symbolicRDIVtest doesn't, and vice versa. +bool DependenceAnalysis::exactRDIVtest(const SCEV *SrcCoeff, + const SCEV *DstCoeff, + const SCEV *SrcConst, + const SCEV *DstConst, + const Loop *SrcLoop, + const Loop *DstLoop, + FullDependence &Result) const { + DEBUG(dbgs() << "\tExact RDIV test\n"); + DEBUG(dbgs() << "\t SrcCoeff = " << *SrcCoeff << " = AM\n"); + DEBUG(dbgs() << "\t DstCoeff = " << *DstCoeff << " = BM\n"); + DEBUG(dbgs() << "\t SrcConst = " << *SrcConst << "\n"); + DEBUG(dbgs() << "\t DstConst = " << *DstConst << "\n"); + ++ExactRDIVapplications; + Result.Consistent = false; + const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); + DEBUG(dbgs() << "\t Delta = " << *Delta << "\n"); + const SCEVConstant *ConstDelta = dyn_cast(Delta); + const SCEVConstant *ConstSrcCoeff = dyn_cast(SrcCoeff); + const SCEVConstant *ConstDstCoeff = dyn_cast(DstCoeff); + if (!ConstDelta || !ConstSrcCoeff || !ConstDstCoeff) + return false; + + // find gcd + APInt G, X, Y; + APInt AM = ConstSrcCoeff->getValue()->getValue(); + APInt BM = ConstDstCoeff->getValue()->getValue(); + unsigned Bits = AM.getBitWidth(); + if (findGCD(Bits, AM, BM, ConstDelta->getValue()->getValue(), G, X, Y)) { + // gcd doesn't divide Delta, no dependence + ++ExactRDIVindependence; + return true; + } + + DEBUG(dbgs() << "\t X = " << X << ", Y = " << Y << "\n"); + + // since SCEV construction seems to normalize, LM = 0 + APInt SrcUM(Bits, 1, true); + bool SrcUMvalid = false; + // SrcUM is perhaps unavailable, let's check + if (const SCEVConstant *UpperBound = + collectConstantUpperBound(SrcLoop, Delta->getType())) { + SrcUM = UpperBound->getValue()->getValue(); + DEBUG(dbgs() << "\t SrcUM = " << SrcUM << "\n"); + SrcUMvalid = true; + } + + APInt DstUM(Bits, 1, true); + bool DstUMvalid = false; + // UM is perhaps unavailable, let's check + if (const SCEVConstant *UpperBound = + collectConstantUpperBound(DstLoop, Delta->getType())) { + DstUM = UpperBound->getValue()->getValue(); + DEBUG(dbgs() << "\t DstUM = " << DstUM << "\n"); + DstUMvalid = true; + } + + APInt TU(APInt::getSignedMaxValue(Bits)); + APInt TL(APInt::getSignedMinValue(Bits)); + + // test(BM/G, LM-X) and test(-BM/G, X-UM) + APInt TMUL = BM.sdiv(G); + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(-X, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + if (SrcUMvalid) { + TU = minAPInt(TU, floorOfQuotient(SrcUM - X, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + } + } + else { + TU = minAPInt(TU, floorOfQuotient(-X, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + if (SrcUMvalid) { + TL = maxAPInt(TL, ceilingOfQuotient(SrcUM - X, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + } + } + + // test(AM/G, LM-Y) and test(-AM/G, Y-UM) + TMUL = AM.sdiv(G); + if (TMUL.sgt(0)) { + TL = maxAPInt(TL, ceilingOfQuotient(-Y, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + if (DstUMvalid) { + TU = minAPInt(TU, floorOfQuotient(DstUM - Y, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + } + } + else { + TU = minAPInt(TU, floorOfQuotient(-Y, TMUL)); + DEBUG(dbgs() << "\t TU = " << TU << "\n"); + if (DstUMvalid) { + TL = maxAPInt(TL, ceilingOfQuotient(DstUM - Y, TMUL)); + DEBUG(dbgs() << "\t TL = " << TL << "\n"); + } + } + if (TL.sgt(TU)) + ++ExactRDIVindependence; + return TL.sgt(TU); +} + + +// symbolicRDIVtest - +// In Section 4.5 of the Practical Dependence Testing paper,the authors +// introduce a special case of Banerjee's Inequalities (also called the +// Extreme-Value Test) that can handle some of the SIV and RDIV cases, +// particularly cases with symbolics. Since it's only able to disprove +// dependence (not compute distances or directions), we'll use it as a +// fall back for the other tests. +// +// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j] +// where i and j are induction variables and c1 and c2 are loop invariants, +// we can use the symbolic tests to disprove some dependences, serving as a +// backup for the RDIV test. Note that i and j can be the same variable, +// letting this test serve as a backup for the various SIV tests. +// +// For a dependence to exist, c1 + a1*i must equal c2 + a2*j for some +// 0 <= i <= N1 and some 0 <= j <= N2, where N1 and N2 are the (normalized) +// loop bounds for the i and j loops, respectively. So, ... +// +// c1 + a1*i = c2 + a2*j +// a1*i - a2*j = c2 - c1 +// +// To test for a dependence, we compute c2 - c1 and make sure it's in the +// range of the maximum and minimum possible values of a1*i - a2*j. +// Considering the signs of a1 and a2, we have 4 possible cases: +// +// 1) If a1 >= 0 and a2 >= 0, then +// a1*0 - a2*N2 <= c2 - c1 <= a1*N1 - a2*0 +// -a2*N2 <= c2 - c1 <= a1*N1 +// +// 2) If a1 >= 0 and a2 <= 0, then +// a1*0 - a2*0 <= c2 - c1 <= a1*N1 - a2*N2 +// 0 <= c2 - c1 <= a1*N1 - a2*N2 +// +// 3) If a1 <= 0 and a2 >= 0, then +// a1*N1 - a2*N2 <= c2 - c1 <= a1*0 - a2*0 +// a1*N1 - a2*N2 <= c2 - c1 <= 0 +// +// 4) If a1 <= 0 and a2 <= 0, then +// a1*N1 - a2*0 <= c2 - c1 <= a1*0 - a2*N2 +// a1*N1 <= c2 - c1 <= -a2*N2 +// +// return true if dependence disproved +bool DependenceAnalysis::symbolicRDIVtest(const SCEV *A1, + const SCEV *A2, + const SCEV *C1, + const SCEV *C2, + const Loop *Loop1, + const Loop *Loop2) const { + ++SymbolicRDIVapplications; + DEBUG(dbgs() << "\ttry symbolic RDIV test\n"); + DEBUG(dbgs() << "\t A1 = " << *A1); + DEBUG(dbgs() << ", type = " << *A1->getType() << "\n"); + DEBUG(dbgs() << "\t A2 = " << *A2 << "\n"); + DEBUG(dbgs() << "\t C1 = " << *C1 << "\n"); + DEBUG(dbgs() << "\t C2 = " << *C2 << "\n"); + const SCEV *N1 = collectUpperBound(Loop1, A1->getType()); + const SCEV *N2 = collectUpperBound(Loop2, A1->getType()); + DEBUG(if (N1) dbgs() << "\t N1 = " << *N1 << "\n"); + DEBUG(if (N2) dbgs() << "\t N2 = " << *N2 << "\n"); + const SCEV *C2_C1 = SE->getMinusSCEV(C2, C1); + const SCEV *C1_C2 = SE->getMinusSCEV(C1, C2); + DEBUG(dbgs() << "\t C2 - C1 = " << *C2_C1 << "\n"); + DEBUG(dbgs() << "\t C1 - C2 = " << *C1_C2 << "\n"); + if (SE->isKnownNonNegative(A1)) { + if (SE->isKnownNonNegative(A2)) { + // A1 >= 0 && A2 >= 0 + if (N1) { + // make sure that c2 - c1 <= a1*N1 + const SCEV *A1N1 = SE->getMulExpr(A1, N1); + DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1)) { + ++SymbolicRDIVindependence; + return true; + } + } + if (N2) { + // make sure that -a2*N2 <= c2 - c1, or a2*N2 >= c1 - c2 + const SCEV *A2N2 = SE->getMulExpr(A2, N2); + DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SLT, A2N2, C1_C2)) { + ++SymbolicRDIVindependence; + return true; + } + } + } + else if (SE->isKnownNonPositive(A2)) { + // a1 >= 0 && a2 <= 0 + if (N1 && N2) { + // make sure that c2 - c1 <= a1*N1 - a2*N2 + const SCEV *A1N1 = SE->getMulExpr(A1, N1); + const SCEV *A2N2 = SE->getMulExpr(A2, N2); + const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2); + DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SGT, C2_C1, A1N1_A2N2)) { + ++SymbolicRDIVindependence; + return true; + } + } + // make sure that 0 <= c2 - c1 + if (SE->isKnownNegative(C2_C1)) { + ++SymbolicRDIVindependence; + return true; + } + } + } + else if (SE->isKnownNonPositive(A1)) { + if (SE->isKnownNonNegative(A2)) { + // a1 <= 0 && a2 >= 0 + if (N1 && N2) { + // make sure that a1*N1 - a2*N2 <= c2 - c1 + const SCEV *A1N1 = SE->getMulExpr(A1, N1); + const SCEV *A2N2 = SE->getMulExpr(A2, N2); + const SCEV *A1N1_A2N2 = SE->getMinusSCEV(A1N1, A2N2); + DEBUG(dbgs() << "\t A1*N1 - A2*N2 = " << *A1N1_A2N2 << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1_A2N2, C2_C1)) { + ++SymbolicRDIVindependence; + return true; + } + } + // make sure that c2 - c1 <= 0 + if (SE->isKnownPositive(C2_C1)) { + ++SymbolicRDIVindependence; + return true; + } + } + else if (SE->isKnownNonPositive(A2)) { + // a1 <= 0 && a2 <= 0 + if (N1) { + // make sure that a1*N1 <= c2 - c1 + const SCEV *A1N1 = SE->getMulExpr(A1, N1); + DEBUG(dbgs() << "\t A1*N1 = " << *A1N1 << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SGT, A1N1, C2_C1)) { + ++SymbolicRDIVindependence; + return true; + } + } + if (N2) { + // make sure that c2 - c1 <= -a2*N2, or c1 - c2 >= a2*N2 + const SCEV *A2N2 = SE->getMulExpr(A2, N2); + DEBUG(dbgs() << "\t A2*N2 = " << *A2N2 << "\n"); + if (isKnownPredicate(CmpInst::ICMP_SLT, C1_C2, A2N2)) { + ++SymbolicRDIVindependence; + return true; + } + } + } + } + return false; +} + + +// testSIV - +// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 - a2*i] +// where i is an induction variable, c1 and c2 are loop invariant, and a1 and +// a2 are constant, we attack it with an SIV test. While they can all be +// solved with the Exact SIV test, it's worthwhile to use simpler tests when +// they apply; they're cheaper and sometimes more precise. +// +// Return true if dependence disproved. +bool DependenceAnalysis::testSIV(const SCEV *Src, + const SCEV *Dst, + unsigned &Level, + FullDependence &Result, + Constraint &NewConstraint, + const SCEV *&SplitIter) const { + DEBUG(dbgs() << " src = " << *Src << "\n"); + DEBUG(dbgs() << " dst = " << *Dst << "\n"); + const SCEVAddRecExpr *SrcAddRec = dyn_cast(Src); + const SCEVAddRecExpr *DstAddRec = dyn_cast(Dst); + if (SrcAddRec && DstAddRec) { + const SCEV *SrcConst = SrcAddRec->getStart(); + const SCEV *DstConst = DstAddRec->getStart(); + const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE); + const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE); + const Loop *CurLoop = SrcAddRec->getLoop(); + assert(CurLoop == DstAddRec->getLoop() && + "both loops in SIV should be same"); + Level = mapSrcLoop(CurLoop); + bool disproven; + if (SrcCoeff == DstCoeff) + disproven = strongSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, + Level, Result, NewConstraint); + else if (SrcCoeff == SE->getNegativeSCEV(DstCoeff)) + disproven = weakCrossingSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, + Level, Result, NewConstraint, SplitIter); + else + disproven = exactSIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, + Level, Result, NewConstraint); + return disproven || + gcdMIVtest(Src, Dst, Result) || + symbolicRDIVtest(SrcCoeff, DstCoeff, SrcConst, DstConst, CurLoop, CurLoop); + } + if (SrcAddRec) { + const SCEV *SrcConst = SrcAddRec->getStart(); + const SCEV *SrcCoeff = SrcAddRec->getStepRecurrence(*SE); + const SCEV *DstConst = Dst; + const Loop *CurLoop = SrcAddRec->getLoop(); + Level = mapSrcLoop(CurLoop); + return weakZeroDstSIVtest(SrcCoeff, SrcConst, DstConst, CurLoop, + Level, Result, NewConstraint) || + gcdMIVtest(Src, Dst, Result); + } + if (DstAddRec) { + const SCEV *DstConst = DstAddRec->getStart(); + const SCEV *DstCoeff = DstAddRec->getStepRecurrence(*SE); + const SCEV *SrcConst = Src; + const Loop *CurLoop = DstAddRec->getLoop(); + Level = mapDstLoop(CurLoop); + return weakZeroSrcSIVtest(DstCoeff, SrcConst, DstConst, + CurLoop, Level, Result, NewConstraint) || + gcdMIVtest(Src, Dst, Result); + } + llvm_unreachable("SIV test expected at least one AddRec"); + return false; +} + + +// testRDIV - +// When we have a pair of subscripts of the form [c1 + a1*i] and [c2 + a2*j] +// where i and j are induction variables, c1 and c2 are loop invariant, +// and a1 and a2 are constant, we can solve it exactly with an easy adaptation +// of the Exact SIV test, the Restricted Double Index Variable (RDIV) test. +// It doesn't make sense to talk about distance or direction in this case, +// so there's no point in making special versions of the Strong SIV test or +// the Weak-crossing SIV test. +// +// With minor algebra, this test can also be used for things like +// [c1 + a1*i + a2*j][c2]. +// +// Return true if dependence disproved. +bool DependenceAnalysis::testRDIV(const SCEV *Src, + const SCEV *Dst, + FullDependence &Result) const { + // we have 3 possible situations here: + // 1) [a*i + b] and [c*j + d] + // 2) [a*i + c*j + b] and [d] + // 3) [b] and [a*i + c*j + d] + // We need to find what we've got and get organized + + const SCEV *SrcConst, *DstConst; + const SCEV *SrcCoeff, *DstCoeff; + const Loop *SrcLoop, *DstLoop; + + DEBUG(dbgs() << " src = " << *Src << "\n"); + DEBUG(dbgs() << " dst = " << *Dst << "\n"); + const SCEVAddRecExpr *SrcAddRec = dyn_cast(Src); + const SCEVAddRecExpr *DstAddRec = dyn_cast(Dst); + if (SrcAddRec && DstAddRec) { + SrcConst = SrcAddRec->getStart(); + SrcCoeff = SrcAddRec->getStepRecurrence(*SE); + SrcLoop = SrcAddRec->getLoop(); + DstConst = DstAddRec->getStart(); + DstCoeff = DstAddRec->getStepRecurrence(*SE); + DstLoop = DstAddRec->getLoop(); + } + else if (SrcAddRec) { + if (const SCEVAddRecExpr *tmpAddRec = + dyn_cast(SrcAddRec->getStart())) { + SrcConst = tmpAddRec->getStart(); + SrcCoeff = tmpAddRec->getStepRecurrence(*SE); + SrcLoop = tmpAddRec->getLoop(); + DstConst = Dst; + DstCoeff = SE->getNegativeSCEV(SrcAddRec->getStepRecurrence(*SE)); + DstLoop = SrcAddRec->getLoop(); + } + else + llvm_unreachable("RDIV reached by surprising SCEVs"); + } + else if (DstAddRec) { + if (const SCEVAddRecExpr *tmpAddRec = + dyn_cast(DstAddRec->getStart())) { + DstConst = tmpAddRec->getStart(); + DstCoeff = tmpAddRec->getStepRecurrence(*SE); + DstLoop = tmpAddRec->getLoop(); + SrcConst = Src; + SrcCoeff = SE->getNegativeSCEV(DstAddRec->getStepRecurrence(*SE)); + SrcLoop = DstAddRec->getLoop(); + } + else + llvm_unreachable("RDIV reached by surprising SCEVs"); + } + else + llvm_unreachable("RDIV expected at least one AddRec"); + return exactRDIVtest(SrcCoeff, DstCoeff, + SrcConst, DstConst, + SrcLoop, DstLoop, + Result) || + gcdMIVtest(Src, Dst, Result) || + symbolicRDIVtest(SrcCoeff, DstCoeff, + SrcConst, DstConst, + SrcLoop, DstLoop); +} + + +// Tests the single-subscript MIV pair (Src and Dst) for dependence. +// Return true if dependence disproved. +// Can sometimes refine direction vectors. +bool DependenceAnalysis::testMIV(const SCEV *Src, + const SCEV *Dst, + const SmallBitVector &Loops, + FullDependence &Result) const { + DEBUG(dbgs() << " src = " << *Src << "\n"); + DEBUG(dbgs() << " dst = " << *Dst << "\n"); + Result.Consistent = false; + return gcdMIVtest(Src, Dst, Result) || + banerjeeMIVtest(Src, Dst, Loops, Result); +} + + +// Given a product, e.g., 10*X*Y, returns the first constant operand, +// in this case 10. If there is no constant part, returns NULL. +static +const SCEVConstant *getConstantPart(const SCEVMulExpr *Product) { + for (unsigned Op = 0, Ops = Product->getNumOperands(); Op < Ops; Op++) { + if (const SCEVConstant *Constant = dyn_cast(Product->getOperand(Op))) + return Constant; + } + return NULL; +} + + +//===----------------------------------------------------------------------===// +// gcdMIVtest - +// Tests an MIV subscript pair for dependence. +// Returns true if any possible dependence is disproved. +// Marks the result as inconsistent. +// Can sometimes disprove the equal direction for 1 or more loops, +// as discussed in Michael Wolfe's book, +// High Performance Compilers for Parallel Computing, page 235. +// +// We spend some effort (code!) to handle cases like +// [10*i + 5*N*j + 15*M + 6], where i and j are induction variables, +// but M and N are just loop-invariant variables. +// This should help us handle linearized subscripts; +// also makes this test a useful backup to the various SIV tests. +// +// It occurs to me that the presence of loop-invariant variables +// changes the nature of the test from "greatest common divisor" +// to "a common divisor!" +bool DependenceAnalysis::gcdMIVtest(const SCEV *Src, + const SCEV *Dst, + FullDependence &Result) const { + DEBUG(dbgs() << "starting gcd\n"); + ++GCDapplications; + unsigned BitWidth = Src->getType()->getIntegerBitWidth(); + APInt RunningGCD = APInt::getNullValue(BitWidth); + + // Examine Src coefficients. + // Compute running GCD and record source constant. + // Because we're looking for the constant at the end of the chain, + // we can't quit the loop just because the GCD == 1. + const SCEV *Coefficients = Src; + while (const SCEVAddRecExpr *AddRec = + dyn_cast(Coefficients)) { + const SCEV *Coeff = AddRec->getStepRecurrence(*SE); + const SCEVConstant *Constant = dyn_cast(Coeff); + if (const SCEVMulExpr *Product = dyn_cast(Coeff)) + // If the coefficient is the product of a constant and other stuff, + // we can use the constant in the GCD computation. + Constant = getConstantPart(Product); + if (!Constant) + return false; + APInt ConstCoeff = Constant->getValue()->getValue(); + RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); + Coefficients = AddRec->getStart(); + } + const SCEV *SrcConst = Coefficients; + + // Examine Dst coefficients. + // Compute running GCD and record destination constant. + // Because we're looking for the constant at the end of the chain, + // we can't quit the loop just because the GCD == 1. + Coefficients = Dst; + while (const SCEVAddRecExpr *AddRec = + dyn_cast(Coefficients)) { + const SCEV *Coeff = AddRec->getStepRecurrence(*SE); + const SCEVConstant *Constant = dyn_cast(Coeff); + if (const SCEVMulExpr *Product = dyn_cast(Coeff)) + // If the coefficient is the product of a constant and other stuff, + // we can use the constant in the GCD computation. + Constant = getConstantPart(Product); + if (!Constant) + return false; + APInt ConstCoeff = Constant->getValue()->getValue(); + RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); + Coefficients = AddRec->getStart(); + } + const SCEV *DstConst = Coefficients; + + APInt ExtraGCD = APInt::getNullValue(BitWidth); + const SCEV *Delta = SE->getMinusSCEV(DstConst, SrcConst); + DEBUG(dbgs() << " Delta = " << *Delta << "\n"); + const SCEVConstant *Constant = dyn_cast(Delta); + if (const SCEVAddExpr *Sum = dyn_cast(Delta)) { + // If Delta is a sum of products, we may be able to make further progress. + for (unsigned Op = 0, Ops = Sum->getNumOperands(); Op < Ops; Op++) { + const SCEV *Operand = Sum->getOperand(Op); + if (isa(Operand)) { + assert(!Constant && "Surprised to find multiple constants"); + Constant = cast(Operand); + } + else if (const SCEVMulExpr *Product = dyn_cast(Operand)) { + // Search for constant operand to participate in GCD; + // If none found; return false. + const SCEVConstant *ConstOp = getConstantPart(Product); + if (!ConstOp) + return false; + APInt ConstOpValue = ConstOp->getValue()->getValue(); + ExtraGCD = APIntOps::GreatestCommonDivisor(ExtraGCD, + ConstOpValue.abs()); + } + else + return false; + } + } + if (!Constant) + return false; + APInt ConstDelta = cast(Constant)->getValue()->getValue(); + DEBUG(dbgs() << " ConstDelta = " << ConstDelta << "\n"); + if (ConstDelta == 0) + return false; + RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ExtraGCD); + DEBUG(dbgs() << " RunningGCD = " << RunningGCD << "\n"); + APInt Remainder = ConstDelta.srem(RunningGCD); + if (Remainder != 0) { + ++GCDindependence; + return true; + } + + // Try to disprove equal directions. + // For example, given a subscript pair [3*i + 2*j] and [i' + 2*j' - 1], + // the code above can't disprove the dependence because the GCD = 1. + // So we consider what happen if i = i' and what happens if j = j'. + // If i = i', we can simplify the subscript to [2*i + 2*j] and [2*j' - 1], + // which is infeasible, so we can disallow the = direction for the i level. + // Setting j = j' doesn't help matters, so we end up with a direction vector + // of [<>, *] + // + // Given A[5*i + 10*j*M + 9*M*N] and A[15*i + 20*j*M - 21*N*M + 5], + // we need to remember that the constant part is 5 and the RunningGCD should + // be initialized to ExtraGCD = 30. + DEBUG(dbgs() << " ExtraGCD = " << ExtraGCD << '\n'); + + bool Improved = false; + Coefficients = Src; + while (const SCEVAddRecExpr *AddRec = + dyn_cast(Coefficients)) { + Coefficients = AddRec->getStart(); + const Loop *CurLoop = AddRec->getLoop(); + RunningGCD = ExtraGCD; + const SCEV *SrcCoeff = AddRec->getStepRecurrence(*SE); + const SCEV *DstCoeff = SE->getMinusSCEV(SrcCoeff, SrcCoeff); + const SCEV *Inner = Src; + while (RunningGCD != 1 && isa(Inner)) { + AddRec = cast(Inner); + const SCEV *Coeff = AddRec->getStepRecurrence(*SE); + if (CurLoop == AddRec->getLoop()) + ; // SrcCoeff == Coeff + else { + if (const SCEVMulExpr *Product = dyn_cast(Coeff)) + // If the coefficient is the product of a constant and other stuff, + // we can use the constant in the GCD computation. + Constant = getConstantPart(Product); + else + Constant = cast(Coeff); + APInt ConstCoeff = Constant->getValue()->getValue(); + RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); + } + Inner = AddRec->getStart(); + } + Inner = Dst; + while (RunningGCD != 1 && isa(Inner)) { + AddRec = cast(Inner); + const SCEV *Coeff = AddRec->getStepRecurrence(*SE); + if (CurLoop == AddRec->getLoop()) + DstCoeff = Coeff; + else { + if (const SCEVMulExpr *Product = dyn_cast(Coeff)) + // If the coefficient is the product of a constant and other stuff, + // we can use the constant in the GCD computation. + Constant = getConstantPart(Product); + else + Constant = cast(Coeff); + APInt ConstCoeff = Constant->getValue()->getValue(); + RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); + } + Inner = AddRec->getStart(); + } + Delta = SE->getMinusSCEV(SrcCoeff, DstCoeff); + if (const SCEVMulExpr *Product = dyn_cast(Delta)) + // If the coefficient is the product of a constant and other stuff, + // we can use the constant in the GCD computation. + Constant = getConstantPart(Product); + else if (isa(Delta)) + Constant = cast(Delta); + else { + // The difference of the two coefficients might not be a product + // or constant, in which case we give up on this direction. + continue; + } + APInt ConstCoeff = Constant->getValue()->getValue(); + RunningGCD = APIntOps::GreatestCommonDivisor(RunningGCD, ConstCoeff.abs()); + DEBUG(dbgs() << "\tRunningGCD = " << RunningGCD << "\n"); + if (RunningGCD != 0) { + Remainder = ConstDelta.srem(RunningGCD); + DEBUG(dbgs() << "\tRemainder = " << Remainder << "\n"); + if (Remainder != 0) { + unsigned Level = mapSrcLoop(CurLoop); + Result.DV[Level - 1].Direction &= unsigned(~Dependence::DVEntry::EQ); + Improved = true; + } + } + } + if (Improved) + ++GCDsuccesses; + DEBUG(dbgs() << "all done\n"); + return false; +} + + +//===----------------------------------------------------------------------===// +// banerjeeMIVtest - +// Use Banerjee's Inequalities to test an MIV subscript pair. +// (Wolfe, in the race-car book, calls this the Extreme Value Test.) +// Generally follows the discussion in Section 2.5.2 of +// +// Optimizing Supercompilers for Supercomputers +// Michael Wolfe +// +// The inequalities given on page 25 are simplified in that loops are +// normalized so that the lower bound is always 0 and the stride is always 1. +// For example, Wolfe gives +// +// LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k +// +// where A_k is the coefficient of the kth index in the source subscript, +// B_k is the coefficient of the kth index in the destination subscript, +// U_k is the upper bound of the kth index, L_k is the lower bound of the Kth +// index, and N_k is the stride of the kth index. Since all loops are normalized +// by the SCEV package, N_k = 1 and L_k = 0, allowing us to simplify the +// equation to +// +// LB^<_k = (A^-_k - B_k)^- (U_k - 0 - 1) + (A_k - B_k)0 - B_k 1 +// = (A^-_k - B_k)^- (U_k - 1) - B_k +// +// Similar simplifications are possible for the other equations. +// +// When we can't determine the number of iterations for a loop, +// we use NULL as an indicator for the worst case, infinity. +// When computing the upper bound, NULL denotes +inf; +// for the lower bound, NULL denotes -inf. +// +// Return true if dependence disproved. +bool DependenceAnalysis::banerjeeMIVtest(const SCEV *Src, + const SCEV *Dst, + const SmallBitVector &Loops, + FullDependence &Result) const { + DEBUG(dbgs() << "starting Banerjee\n"); + ++BanerjeeApplications; + DEBUG(dbgs() << " Src = " << *Src << '\n'); + const SCEV *A0; + CoefficientInfo *A = collectCoeffInfo(Src, true, A0); + DEBUG(dbgs() << " Dst = " << *Dst << '\n'); + const SCEV *B0; + CoefficientInfo *B = collectCoeffInfo(Dst, false, B0); + BoundInfo *Bound = new BoundInfo[MaxLevels + 1]; + const SCEV *Delta = SE->getMinusSCEV(B0, A0); + DEBUG(dbgs() << "\tDelta = " << *Delta << '\n'); + + // Compute bounds for all the * directions. + DEBUG(dbgs() << "\tBounds[*]\n"); + for (unsigned K = 1; K <= MaxLevels; ++K) { + Bound[K].Iterations = A[K].Iterations ? A[K].Iterations : B[K].Iterations; + Bound[K].Direction = Dependence::DVEntry::ALL; + Bound[K].DirSet = Dependence::DVEntry::NONE; + findBoundsALL(A, B, Bound, K); +#ifndef NDEBUG + DEBUG(dbgs() << "\t " << K << '\t'); + if (Bound[K].Lower[Dependence::DVEntry::ALL]) + DEBUG(dbgs() << *Bound[K].Lower[Dependence::DVEntry::ALL] << '\t'); + else + DEBUG(dbgs() << "-inf\t"); + if (Bound[K].Upper[Dependence::DVEntry::ALL]) + DEBUG(dbgs() << *Bound[K].Upper[Dependence::DVEntry::ALL] << '\n'); + else + DEBUG(dbgs() << "+inf\n"); +#endif + } + + // Test the *, *, *, ... case. + bool Disproved = false; + if (testBounds(Dependence::DVEntry::ALL, 0, Bound, Delta)) { + // Explore the direction vector hierarchy. + unsigned DepthExpanded = 0; + unsigned NewDeps = exploreDirections(1, A, B, Bound, + Loops, DepthExpanded, Delta); + if (NewDeps > 0) { + bool Improved = false; + for (unsigned K = 1; K <= CommonLevels; ++K) { + if (Loops[K]) { + unsigned Old = Result.DV[K - 1].Direction; + Result.DV[K - 1].Direction = Old & Bound[K].DirSet; + Improved |= Old != Result.DV[K - 1].Direction; + if (!Result.DV[K - 1].Direction) { + Improved = false; + Disproved = true; + break; + } + } + } + if (Improved) + ++BanerjeeSuccesses; + } + else { + ++BanerjeeIndependence; + Disproved = true; + } + } + else { + ++BanerjeeIndependence; + Disproved = true; + } + delete [] Bound; + delete [] A; + delete [] B; + return Disproved; +} + + +// Hierarchically expands the direction vector +// search space, combining the directions of discovered dependences +// in the DirSet field of Bound. Returns the number of distinct +// dependences discovered. If the dependence is disproved, +// it will return 0. +unsigned DependenceAnalysis::exploreDirections(unsigned Level, + CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + const SmallBitVector &Loops, + unsigned &DepthExpanded, + const SCEV *Delta) const { + if (Level > CommonLevels) { + // record result + DEBUG(dbgs() << "\t["); + for (unsigned K = 1; K <= CommonLevels; ++K) { + if (Loops[K]) { + Bound[K].DirSet |= Bound[K].Direction; +#ifndef NDEBUG + switch (Bound[K].Direction) { + case Dependence::DVEntry::LT: + DEBUG(dbgs() << " <"); + break; + case Dependence::DVEntry::EQ: + DEBUG(dbgs() << " ="); + break; + case Dependence::DVEntry::GT: + DEBUG(dbgs() << " >"); + break; + case Dependence::DVEntry::ALL: + DEBUG(dbgs() << " *"); + break; + default: + llvm_unreachable("unexpected Bound[K].Direction"); + } +#endif + } + } + DEBUG(dbgs() << " ]\n"); + return 1; + } + if (Loops[Level]) { + if (Level > DepthExpanded) { + DepthExpanded = Level; + // compute bounds for <, =, > at current level + findBoundsLT(A, B, Bound, Level); + findBoundsGT(A, B, Bound, Level); + findBoundsEQ(A, B, Bound, Level); +#ifndef NDEBUG + DEBUG(dbgs() << "\tBound for level = " << Level << '\n'); + DEBUG(dbgs() << "\t <\t"); + if (Bound[Level].Lower[Dependence::DVEntry::LT]) + DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::LT] << '\t'); + else + DEBUG(dbgs() << "-inf\t"); + if (Bound[Level].Upper[Dependence::DVEntry::LT]) + DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::LT] << '\n'); + else + DEBUG(dbgs() << "+inf\n"); + DEBUG(dbgs() << "\t =\t"); + if (Bound[Level].Lower[Dependence::DVEntry::EQ]) + DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::EQ] << '\t'); + else + DEBUG(dbgs() << "-inf\t"); + if (Bound[Level].Upper[Dependence::DVEntry::EQ]) + DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::EQ] << '\n'); + else + DEBUG(dbgs() << "+inf\n"); + DEBUG(dbgs() << "\t >\t"); + if (Bound[Level].Lower[Dependence::DVEntry::GT]) + DEBUG(dbgs() << *Bound[Level].Lower[Dependence::DVEntry::GT] << '\t'); + else + DEBUG(dbgs() << "-inf\t"); + if (Bound[Level].Upper[Dependence::DVEntry::GT]) + DEBUG(dbgs() << *Bound[Level].Upper[Dependence::DVEntry::GT] << '\n'); + else + DEBUG(dbgs() << "+inf\n"); +#endif + } + + unsigned NewDeps = 0; + + // test bounds for <, *, *, ... + if (testBounds(Dependence::DVEntry::LT, Level, Bound, Delta)) + NewDeps += exploreDirections(Level + 1, A, B, Bound, + Loops, DepthExpanded, Delta); + + // Test bounds for =, *, *, ... + if (testBounds(Dependence::DVEntry::EQ, Level, Bound, Delta)) + NewDeps += exploreDirections(Level + 1, A, B, Bound, + Loops, DepthExpanded, Delta); + + // test bounds for >, *, *, ... + if (testBounds(Dependence::DVEntry::GT, Level, Bound, Delta)) + NewDeps += exploreDirections(Level + 1, A, B, Bound, + Loops, DepthExpanded, Delta); + + Bound[Level].Direction = Dependence::DVEntry::ALL; + return NewDeps; + } + else + return exploreDirections(Level + 1, A, B, Bound, Loops, DepthExpanded, Delta); +} + + +// Returns true iff the current bounds are plausible. +bool DependenceAnalysis::testBounds(unsigned char DirKind, + unsigned Level, + BoundInfo *Bound, + const SCEV *Delta) const { + Bound[Level].Direction = DirKind; + if (const SCEV *LowerBound = getLowerBound(Bound)) + if (isKnownPredicate(CmpInst::ICMP_SGT, LowerBound, Delta)) + return false; + if (const SCEV *UpperBound = getUpperBound(Bound)) + if (isKnownPredicate(CmpInst::ICMP_SGT, Delta, UpperBound)) + return false; + return true; +} + + +// Computes the upper and lower bounds for level K +// using the * direction. Records them in Bound. +// Wolfe gives the equations +// +// LB^*_k = (A^-_k - B^+_k)(U_k - L_k) + (A_k - B_k)L_k +// UB^*_k = (A^+_k - B^-_k)(U_k - L_k) + (A_k - B_k)L_k +// +// Since we normalize loops, we can simplify these equations to +// +// LB^*_k = (A^-_k - B^+_k)U_k +// UB^*_k = (A^+_k - B^-_k)U_k +// +// We must be careful to handle the case where the upper bound is unknown. +// Note that the lower bound is always <= 0 +// and the upper bound is always >= 0. +void DependenceAnalysis::findBoundsALL(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const { + Bound[K].Lower[Dependence::DVEntry::ALL] = NULL; // Default value = -infinity. + Bound[K].Upper[Dependence::DVEntry::ALL] = NULL; // Default value = +infinity. + if (Bound[K].Iterations) { + Bound[K].Lower[Dependence::DVEntry::ALL] = + SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart), + Bound[K].Iterations); + Bound[K].Upper[Dependence::DVEntry::ALL] = + SE->getMulExpr(SE->getMinusSCEV(A[K].PosPart, B[K].NegPart), + Bound[K].Iterations); + } + else { + // If the difference is 0, we won't need to know the number of iterations. + if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].NegPart, B[K].PosPart)) + Bound[K].Lower[Dependence::DVEntry::ALL] = + SE->getConstant(A[K].Coeff->getType(), 0); + if (isKnownPredicate(CmpInst::ICMP_EQ, A[K].PosPart, B[K].NegPart)) + Bound[K].Upper[Dependence::DVEntry::ALL] = + SE->getConstant(A[K].Coeff->getType(), 0); + } +} + + +// Computes the upper and lower bounds for level K +// using the = direction. Records them in Bound. +// Wolfe gives the equations +// +// LB^=_k = (A_k - B_k)^- (U_k - L_k) + (A_k - B_k)L_k +// UB^=_k = (A_k - B_k)^+ (U_k - L_k) + (A_k - B_k)L_k +// +// Since we normalize loops, we can simplify these equations to +// +// LB^=_k = (A_k - B_k)^- U_k +// UB^=_k = (A_k - B_k)^+ U_k +// +// We must be careful to handle the case where the upper bound is unknown. +// Note that the lower bound is always <= 0 +// and the upper bound is always >= 0. +void DependenceAnalysis::findBoundsEQ(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const { + Bound[K].Lower[Dependence::DVEntry::EQ] = NULL; // Default value = -infinity. + Bound[K].Upper[Dependence::DVEntry::EQ] = NULL; // Default value = +infinity. + if (Bound[K].Iterations) { + const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff); + const SCEV *NegativePart = getNegativePart(Delta); + Bound[K].Lower[Dependence::DVEntry::EQ] = + SE->getMulExpr(NegativePart, Bound[K].Iterations); + const SCEV *PositivePart = getPositivePart(Delta); + Bound[K].Upper[Dependence::DVEntry::EQ] = + SE->getMulExpr(PositivePart, Bound[K].Iterations); + } + else { + // If the positive/negative part of the difference is 0, + // we won't need to know the number of iterations. + const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff); + const SCEV *NegativePart = getNegativePart(Delta); + if (NegativePart->isZero()) + Bound[K].Lower[Dependence::DVEntry::EQ] = NegativePart; // Zero + const SCEV *PositivePart = getPositivePart(Delta); + if (PositivePart->isZero()) + Bound[K].Upper[Dependence::DVEntry::EQ] = PositivePart; // Zero + } +} + + +// Computes the upper and lower bounds for level K +// using the < direction. Records them in Bound. +// Wolfe gives the equations +// +// LB^<_k = (A^-_k - B_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k +// UB^<_k = (A^+_k - B_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k - B_k N_k +// +// Since we normalize loops, we can simplify these equations to +// +// LB^<_k = (A^-_k - B_k)^- (U_k - 1) - B_k +// UB^<_k = (A^+_k - B_k)^+ (U_k - 1) - B_k +// +// We must be careful to handle the case where the upper bound is unknown. +void DependenceAnalysis::findBoundsLT(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const { + Bound[K].Lower[Dependence::DVEntry::LT] = NULL; // Default value = -infinity. + Bound[K].Upper[Dependence::DVEntry::LT] = NULL; // Default value = +infinity. + if (Bound[K].Iterations) { + const SCEV *Iter_1 = + SE->getMinusSCEV(Bound[K].Iterations, + SE->getConstant(Bound[K].Iterations->getType(), 1)); + const SCEV *NegPart = + getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff)); + Bound[K].Lower[Dependence::DVEntry::LT] = + SE->getMinusSCEV(SE->getMulExpr(NegPart, Iter_1), B[K].Coeff); + const SCEV *PosPart = + getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff)); + Bound[K].Upper[Dependence::DVEntry::LT] = + SE->getMinusSCEV(SE->getMulExpr(PosPart, Iter_1), B[K].Coeff); + } + else { + // If the positive/negative part of the difference is 0, + // we won't need to know the number of iterations. + const SCEV *NegPart = + getNegativePart(SE->getMinusSCEV(A[K].NegPart, B[K].Coeff)); + if (NegPart->isZero()) + Bound[K].Lower[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff); + const SCEV *PosPart = + getPositivePart(SE->getMinusSCEV(A[K].PosPart, B[K].Coeff)); + if (PosPart->isZero()) + Bound[K].Upper[Dependence::DVEntry::LT] = SE->getNegativeSCEV(B[K].Coeff); + } +} + + +// Computes the upper and lower bounds for level K +// using the > direction. Records them in Bound. +// Wolfe gives the equations +// +// LB^>_k = (A_k - B^+_k)^- (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k +// UB^>_k = (A_k - B^-_k)^+ (U_k - L_k - N_k) + (A_k - B_k)L_k + A_k N_k +// +// Since we normalize loops, we can simplify these equations to +// +// LB^>_k = (A_k - B^+_k)^- (U_k - 1) + A_k +// UB^>_k = (A_k - B^-_k)^+ (U_k - 1) + A_k +// +// We must be careful to handle the case where the upper bound is unknown. +void DependenceAnalysis::findBoundsGT(CoefficientInfo *A, + CoefficientInfo *B, + BoundInfo *Bound, + unsigned K) const { + Bound[K].Lower[Dependence::DVEntry::GT] = NULL; // Default value = -infinity. + Bound[K].Upper[Dependence::DVEntry::GT] = NULL; // Default value = +infinity. + if (Bound[K].Iterations) { + const SCEV *Iter_1 = + SE->getMinusSCEV(Bound[K].Iterations, + SE->getConstant(Bound[K].Iterations->getType(), 1)); + const SCEV *NegPart = + getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart)); + Bound[K].Lower[Dependence::DVEntry::GT] = + SE->getAddExpr(SE->getMulExpr(NegPart, Iter_1), A[K].Coeff); + const SCEV *PosPart = + getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart)); + Bound[K].Upper[Dependence::DVEntry::GT] = + SE->getAddExpr(SE->getMulExpr(PosPart, Iter_1), A[K].Coeff); + } + else { + // If the positive/negative part of the difference is 0, + // we won't need to know the number of iterations. + const SCEV *NegPart = getNegativePart(SE->getMinusSCEV(A[K].Coeff, B[K].PosPart)); + if (NegPart->isZero()) + Bound[K].Lower[Dependence::DVEntry::GT] = A[K].Coeff; + const SCEV *PosPart = getPositivePart(SE->getMinusSCEV(A[K].Coeff, B[K].NegPart)); + if (PosPart->isZero()) + Bound[K].Upper[Dependence::DVEntry::GT] = A[K].Coeff; + } +} + + +// X^+ = max(X, 0) +const SCEV *DependenceAnalysis::getPositivePart(const SCEV *X) const { + return SE->getSMaxExpr(X, SE->getConstant(X->getType(), 0)); +} + + +// X^- = min(X, 0) +const SCEV *DependenceAnalysis::getNegativePart(const SCEV *X) const { + return SE->getSMinExpr(X, SE->getConstant(X->getType(), 0)); +} + + +// Walks through the subscript, +// collecting each coefficient, the associated loop bounds, +// and recording its positive and negative parts for later use. +DependenceAnalysis::CoefficientInfo * +DependenceAnalysis::collectCoeffInfo(const SCEV *Subscript, + bool SrcFlag, + const SCEV *&Constant) const { + const SCEV *Zero = SE->getConstant(Subscript->getType(), 0); + CoefficientInfo *CI = new CoefficientInfo[MaxLevels + 1]; + for (unsigned K = 1; K <= MaxLevels; ++K) { + CI[K].Coeff = Zero; + CI[K].PosPart = Zero; + CI[K].NegPart = Zero; + CI[K].Iterations = NULL; + } + while (const SCEVAddRecExpr *AddRec = dyn_cast(Subscript)) { + const Loop *L = AddRec->getLoop(); + unsigned K = SrcFlag ? mapSrcLoop(L) : mapDstLoop(L); + CI[K].Coeff = AddRec->getStepRecurrence(*SE); + CI[K].PosPart = getPositivePart(CI[K].Coeff); + CI[K].NegPart = getNegativePart(CI[K].Coeff); + CI[K].Iterations = collectUpperBound(L, Subscript->getType()); + Subscript = AddRec->getStart(); + } + Constant = Subscript; +#ifndef NDEBUG + DEBUG(dbgs() << "\tCoefficient Info\n"); + for (unsigned K = 1; K <= MaxLevels; ++K) { + DEBUG(dbgs() << "\t " << K << "\t" << *CI[K].Coeff); + DEBUG(dbgs() << "\tPos Part = "); + DEBUG(dbgs() << *CI[K].PosPart); + DEBUG(dbgs() << "\tNeg Part = "); + DEBUG(dbgs() << *CI[K].NegPart); + DEBUG(dbgs() << "\tUpper Bound = "); + if (CI[K].Iterations) + DEBUG(dbgs() << *CI[K].Iterations); + else + DEBUG(dbgs() << "+inf"); + DEBUG(dbgs() << '\n'); + } + DEBUG(dbgs() << "\t Constant = " << *Subscript << '\n'); +#endif + return CI; +} + + +// Looks through all the bounds info and +// computes the lower bound given the current direction settings +// at each level. If the lower bound for any level is -inf, +// the result is -inf. +const SCEV *DependenceAnalysis::getLowerBound(BoundInfo *Bound) const { + const SCEV *Sum = Bound[1].Lower[Bound[1].Direction]; + for (unsigned K = 2; Sum && K <= MaxLevels; ++K) { + if (Bound[K].Lower[Bound[K].Direction]) + Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]); + else + Sum = NULL; + } + return Sum; +} + + +// Looks through all the bounds info and +// computes the upper bound given the current direction settings +// at each level. If the upper bound at any level is +inf, +// the result is +inf. +const SCEV *DependenceAnalysis::getUpperBound(BoundInfo *Bound) const { + const SCEV *Sum = Bound[1].Upper[Bound[1].Direction]; + for (unsigned K = 2; Sum && K <= MaxLevels; ++K) { + if (Bound[K].Upper[Bound[K].Direction]) + Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]); + else + Sum = NULL; + } + return Sum; +} + + +//===----------------------------------------------------------------------===// +// Constraint manipulation for Delta test. + +// Given a linear SCEV, +// return the coefficient (the step) +// corresponding to the specified loop. +// If there isn't one, return 0. +// For example, given a*i + b*j + c*k, zeroing the coefficient +// corresponding to the j loop would yield b. +const SCEV *DependenceAnalysis::findCoefficient(const SCEV *Expr, + const Loop *TargetLoop) const { + const SCEVAddRecExpr *AddRec = dyn_cast(Expr); + if (!AddRec) + return SE->getConstant(Expr->getType(), 0); + if (AddRec->getLoop() == TargetLoop) + return AddRec->getStepRecurrence(*SE); + return findCoefficient(AddRec->getStart(), TargetLoop); +} + + +// Given a linear SCEV, +// return the SCEV given by zeroing out the coefficient +// corresponding to the specified loop. +// For example, given a*i + b*j + c*k, zeroing the coefficient +// corresponding to the j loop would yield a*i + c*k. +const SCEV *DependenceAnalysis::zeroCoefficient(const SCEV *Expr, + const Loop *TargetLoop) const { + const SCEVAddRecExpr *AddRec = dyn_cast(Expr); + if (!AddRec) + return Expr; // ignore + if (AddRec->getLoop() == TargetLoop) + return AddRec->getStart(); + return SE->getAddRecExpr(zeroCoefficient(AddRec->getStart(), TargetLoop), + AddRec->getStepRecurrence(*SE), + AddRec->getLoop(), + AddRec->getNoWrapFlags()); +} + + +// Given a linear SCEV Expr, +// return the SCEV given by adding some Value to the +// coefficient corresponding to the specified TargetLoop. +// For example, given a*i + b*j + c*k, adding 1 to the coefficient +// corresponding to the j loop would yield a*i + (b+1)*j + c*k. +const SCEV *DependenceAnalysis::addToCoefficient(const SCEV *Expr, + const Loop *TargetLoop, + const SCEV *Value) const { + const SCEVAddRecExpr *AddRec = dyn_cast(Expr); + if (!AddRec) // create a new addRec + return SE->getAddRecExpr(Expr, + Value, + TargetLoop, + SCEV::FlagAnyWrap); // Worst case, with no info. + if (AddRec->getLoop() == TargetLoop) { + const SCEV *Sum = SE->getAddExpr(AddRec->getStepRecurrence(*SE), Value); + if (Sum->isZero()) + return AddRec->getStart(); + return SE->getAddRecExpr(AddRec->getStart(), + Sum, + AddRec->getLoop(), + AddRec->getNoWrapFlags()); + } + return SE->getAddRecExpr(addToCoefficient(AddRec->getStart(), + TargetLoop, Value), + AddRec->getStepRecurrence(*SE), + AddRec->getLoop(), + AddRec->getNoWrapFlags()); +} + + +// Review the constraints, looking for opportunities +// to simplify a subscript pair (Src and Dst). +// Return true if some simplification occurs. +// If the simplification isn't exact (that is, if it is conservative +// in terms of dependence), set consistent to false. +// Corresponds to Figure 5 from the paper +// +// Practical Dependence Testing +// Goff, Kennedy, Tseng +// PLDI 1991 +bool DependenceAnalysis::propagate(const SCEV *&Src, + const SCEV *&Dst, + SmallBitVector &Loops, + SmallVector &Constraints, + bool &Consistent) { + bool Result = false; + for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) { + DEBUG(dbgs() << "\t Constraint[" << LI << "] is"); + DEBUG(Constraints[LI].dump(dbgs())); + if (Constraints[LI].isDistance()) + Result |= propagateDistance(Src, Dst, Constraints[LI], Consistent); + else if (Constraints[LI].isLine()) + Result |= propagateLine(Src, Dst, Constraints[LI], Consistent); + else if (Constraints[LI].isPoint()) + Result |= propagatePoint(Src, Dst, Constraints[LI]); + } + return Result; +} + + +// Attempt to propagate a distance +// constraint into a subscript pair (Src and Dst). +// Return true if some simplification occurs. +// If the simplification isn't exact (that is, if it is conservative +// in terms of dependence), set consistent to false. +bool DependenceAnalysis::propagateDistance(const SCEV *&Src, + const SCEV *&Dst, + Constraint &CurConstraint, + bool &Consistent) { + const Loop *CurLoop = CurConstraint.getAssociatedLoop(); + DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n"); + const SCEV *A_K = findCoefficient(Src, CurLoop); + if (A_K->isZero()) + return false; + const SCEV *DA_K = SE->getMulExpr(A_K, CurConstraint.getD()); + Src = SE->getMinusSCEV(Src, DA_K); + Src = zeroCoefficient(Src, CurLoop); + DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n"); + DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n"); + Dst = addToCoefficient(Dst, CurLoop, SE->getNegativeSCEV(A_K)); + DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n"); + if (!findCoefficient(Dst, CurLoop)->isZero()) + Consistent = false; + return true; +} + + +// Attempt to propagate a line +// constraint into a subscript pair (Src and Dst). +// Return true if some simplification occurs. +// If the simplification isn't exact (that is, if it is conservative +// in terms of dependence), set consistent to false. +bool DependenceAnalysis::propagateLine(const SCEV *&Src, + const SCEV *&Dst, + Constraint &CurConstraint, + bool &Consistent) { + const Loop *CurLoop = CurConstraint.getAssociatedLoop(); + const SCEV *A = CurConstraint.getA(); + const SCEV *B = CurConstraint.getB(); + const SCEV *C = CurConstraint.getC(); + DEBUG(dbgs() << "\t\tA = " << *A << ", B = " << *B << ", C = " << *C << "\n"); + DEBUG(dbgs() << "\t\tSrc = " << *Src << "\n"); + DEBUG(dbgs() << "\t\tDst = " << *Dst << "\n"); + if (A->isZero()) { + const SCEVConstant *Bconst = dyn_cast(B); + const SCEVConstant *Cconst = dyn_cast(C); + if (!Bconst || !Cconst) return false; + APInt Beta = Bconst->getValue()->getValue(); + APInt Charlie = Cconst->getValue()->getValue(); + APInt CdivB = Charlie.sdiv(Beta); + assert(Charlie.srem(Beta) == 0 && "C should be evenly divisible by B"); + const SCEV *AP_K = findCoefficient(Dst, CurLoop); + // Src = SE->getAddExpr(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB))); + Src = SE->getMinusSCEV(Src, SE->getMulExpr(AP_K, SE->getConstant(CdivB))); + Dst = zeroCoefficient(Dst, CurLoop); + if (!findCoefficient(Src, CurLoop)->isZero()) + Consistent = false; + } + else if (B->isZero()) { + const SCEVConstant *Aconst = dyn_cast(A); + const SCEVConstant *Cconst = dyn_cast(C); + if (!Aconst || !Cconst) return false; + APInt Alpha = Aconst->getValue()->getValue(); + APInt Charlie = Cconst->getValue()->getValue(); + APInt CdivA = Charlie.sdiv(Alpha); + assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A"); + const SCEV *A_K = findCoefficient(Src, CurLoop); + Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA))); + Src = zeroCoefficient(Src, CurLoop); + if (!findCoefficient(Dst, CurLoop)->isZero()) + Consistent = false; + } + else if (isKnownPredicate(CmpInst::ICMP_EQ, A, B)) { + const SCEVConstant *Aconst = dyn_cast(A); + const SCEVConstant *Cconst = dyn_cast(C); + if (!Aconst || !Cconst) return false; + APInt Alpha = Aconst->getValue()->getValue(); + APInt Charlie = Cconst->getValue()->getValue(); + APInt CdivA = Charlie.sdiv(Alpha); + assert(Charlie.srem(Alpha) == 0 && "C should be evenly divisible by A"); + const SCEV *A_K = findCoefficient(Src, CurLoop); + Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, SE->getConstant(CdivA))); + Src = zeroCoefficient(Src, CurLoop); + Dst = addToCoefficient(Dst, CurLoop, A_K); + if (!findCoefficient(Dst, CurLoop)->isZero()) + Consistent = false; + } + else { + // paper is incorrect here, or perhaps just misleading + const SCEV *A_K = findCoefficient(Src, CurLoop); + Src = SE->getMulExpr(Src, A); + Dst = SE->getMulExpr(Dst, A); + Src = SE->getAddExpr(Src, SE->getMulExpr(A_K, C)); + Src = zeroCoefficient(Src, CurLoop); + Dst = addToCoefficient(Dst, CurLoop, SE->getMulExpr(A_K, B)); + if (!findCoefficient(Dst, CurLoop)->isZero()) + Consistent = false; + } + DEBUG(dbgs() << "\t\tnew Src = " << *Src << "\n"); + DEBUG(dbgs() << "\t\tnew Dst = " << *Dst << "\n"); + return true; +} + + +// Attempt to propagate a point +// constraint into a subscript pair (Src and Dst). +// Return true if some simplification occurs. +bool DependenceAnalysis::propagatePoint(const SCEV *&Src, + const SCEV *&Dst, + Constraint &CurConstraint) { + const Loop *CurLoop = CurConstraint.getAssociatedLoop(); + const SCEV *A_K = findCoefficient(Src, CurLoop); + const SCEV *AP_K = findCoefficient(Dst, CurLoop); + const SCEV *XA_K = SE->getMulExpr(A_K, CurConstraint.getX()); + const SCEV *YAP_K = SE->getMulExpr(AP_K, CurConstraint.getY()); + DEBUG(dbgs() << "\t\tSrc is " << *Src << "\n"); + Src = SE->getAddExpr(Src, SE->getMinusSCEV(XA_K, YAP_K)); + Src = zeroCoefficient(Src, CurLoop); + DEBUG(dbgs() << "\t\tnew Src is " << *Src << "\n"); + DEBUG(dbgs() << "\t\tDst is " << *Dst << "\n"); + Dst = zeroCoefficient(Dst, CurLoop); + DEBUG(dbgs() << "\t\tnew Dst is " << *Dst << "\n"); + return true; +} + + +// Update direction vector entry based on the current constraint. +void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level, + const Constraint &CurConstraint + ) const { + DEBUG(dbgs() << "\tUpdate direction, constraint ="); + DEBUG(CurConstraint.dump(dbgs())); + if (CurConstraint.isAny()) + ; // use defaults + else if (CurConstraint.isDistance()) { + // this one is consistent, the others aren't + Level.Scalar = false; + Level.Distance = CurConstraint.getD(); + unsigned NewDirection = Dependence::DVEntry::NONE; + if (!SE->isKnownNonZero(Level.Distance)) // if may be zero + NewDirection = Dependence::DVEntry::EQ; + if (!SE->isKnownNonPositive(Level.Distance)) // if may be positive + NewDirection |= Dependence::DVEntry::LT; + if (!SE->isKnownNonNegative(Level.Distance)) // if may be negative + NewDirection |= Dependence::DVEntry::GT; + Level.Direction &= NewDirection; + } + else if (CurConstraint.isLine()) { + Level.Scalar = false; + Level.Distance = NULL; + // direction should be accurate + } + else if (CurConstraint.isPoint()) { + Level.Scalar = false; + Level.Distance = NULL; + unsigned NewDirection = Dependence::DVEntry::NONE; + if (!isKnownPredicate(CmpInst::ICMP_NE, + CurConstraint.getY(), + CurConstraint.getX())) + // if X may be = Y + NewDirection |= Dependence::DVEntry::EQ; + if (!isKnownPredicate(CmpInst::ICMP_SLE, + CurConstraint.getY(), + CurConstraint.getX())) + // if Y may be > X + NewDirection |= Dependence::DVEntry::LT; + if (!isKnownPredicate(CmpInst::ICMP_SGE, + CurConstraint.getY(), + CurConstraint.getX())) + // if Y may be < X + NewDirection |= Dependence::DVEntry::GT; + Level.Direction &= NewDirection; + } + else + llvm_unreachable("constraint has unexpected kind"); +} + + +//===----------------------------------------------------------------------===// + +#ifndef NDEBUG +// For debugging purposes, dump a small bit vector to dbgs(). +static void dumpSmallBitVector(SmallBitVector &BV) { + dbgs() << "{"; + for (int VI = BV.find_first(); VI >= 0; VI = BV.find_next(VI)) { + dbgs() << VI; + if (BV.find_next(VI) >= 0) + dbgs() << ' '; + } + dbgs() << "}\n"; +} +#endif + + +// depends - +// Returns NULL if there is no dependence. +// Otherwise, return a Dependence with as many details as possible. +// Corresponds to Section 3.1 in the paper +// +// Practical Dependence Testing +// Goff, Kennedy, Tseng +// PLDI 1991 +// +// Care is required to keep the code below up to date w.r.t. this routine. +Dependence *DependenceAnalysis::depends(const Instruction *Src, + const Instruction *Dst, + bool PossiblyLoopIndependent) { + if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) || + (!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory())) + // if both instructions don't reference memory, there's no dependence + return NULL; + + if (!isLoadOrStore(Src) || !isLoadOrStore(Dst)) + // can only analyze simple loads and stores, i.e., no calls, invokes, etc. + return new Dependence(Src, Dst); + + const Value *SrcPtr = getPointerOperand(Src); + const Value *DstPtr = getPointerOperand(Dst); + + switch (underlyingObjectsAlias(AA, DstPtr, SrcPtr)) { + case AliasAnalysis::MayAlias: + case AliasAnalysis::PartialAlias: + // cannot analyse objects if we don't understand their aliasing. + return new Dependence(Src, Dst); + case AliasAnalysis::NoAlias: + // If the objects noalias, they are distinct, accesses are independent. + return NULL; + case AliasAnalysis::MustAlias: + break; // The underlying objects alias; test accesses for dependence. + } + + const GEPOperator *SrcGEP = dyn_cast(SrcPtr); + const GEPOperator *DstGEP = dyn_cast(DstPtr); + if (!SrcGEP || !DstGEP) + return new Dependence(Src, Dst); // missing GEP, assume dependence + + if (SrcGEP->getPointerOperandType() != DstGEP->getPointerOperandType()) + return new Dependence(Src, Dst); // different types, assume dependence + + // establish loop nesting levels + establishNestingLevels(Src, Dst); + DEBUG(dbgs() << " common nesting levels = " << CommonLevels << "\n"); + DEBUG(dbgs() << " maximum nesting levels = " << MaxLevels << "\n"); + + FullDependence Result(Src, Dst, PossiblyLoopIndependent, CommonLevels); + ++TotalArrayPairs; + + // classify subscript pairs + unsigned Pairs = SrcGEP->idx_end() - SrcGEP->idx_begin(); + SmallVector Pair(Pairs); + for (unsigned SI = 0; SI < Pairs; ++SI) { + Pair[SI].Loops.resize(MaxLevels + 1); + Pair[SI].GroupLoops.resize(MaxLevels + 1); + Pair[SI].Group.resize(Pairs); + } + Pairs = 0; + for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(), + SrcEnd = SrcGEP->idx_end(), + DstIdx = DstGEP->idx_begin(), + DstEnd = DstGEP->idx_end(); + SrcIdx != SrcEnd && DstIdx != DstEnd; + ++SrcIdx, ++DstIdx, ++Pairs) { + Pair[Pairs].Src = SE->getSCEV(*SrcIdx); + Pair[Pairs].Dst = SE->getSCEV(*DstIdx); + removeMatchingExtensions(&Pair[Pairs]); + Pair[Pairs].Classification = + classifyPair(Pair[Pairs].Src, LI->getLoopFor(Src->getParent()), + Pair[Pairs].Dst, LI->getLoopFor(Dst->getParent()), + Pair[Pairs].Loops); + Pair[Pairs].GroupLoops = Pair[Pairs].Loops; + Pair[Pairs].Group.set(Pairs); + DEBUG(dbgs() << " subscript " << Pairs << "\n"); + DEBUG(dbgs() << "\tsrc = " << *Pair[Pairs].Src << "\n"); + DEBUG(dbgs() << "\tdst = " << *Pair[Pairs].Dst << "\n"); + DEBUG(dbgs() << "\tclass = " << Pair[Pairs].Classification << "\n"); + DEBUG(dbgs() << "\tloops = "); + DEBUG(dumpSmallBitVector(Pair[Pairs].Loops)); + } + + SmallBitVector Separable(Pairs); + SmallBitVector Coupled(Pairs); + + // Partition subscripts into separable and minimally-coupled groups + // Algorithm in paper is algorithmically better; + // this may be faster in practice. Check someday. + // + // Here's an example of how it works. Consider this code: + // + // for (i = ...) { + // for (j = ...) { + // for (k = ...) { + // for (l = ...) { + // for (m = ...) { + // A[i][j][k][m] = ...; + // ... = A[0][j][l][i + j]; + // } + // } + // } + // } + // } + // + // There are 4 subscripts here: + // 0 [i] and [0] + // 1 [j] and [j] + // 2 [k] and [l] + // 3 [m] and [i + j] + // + // We've already classified each subscript pair as ZIV, SIV, etc., + // and collected all the loops mentioned by pair P in Pair[P].Loops. + // In addition, we've initialized Pair[P].GroupLoops to Pair[P].Loops + // and set Pair[P].Group = {P}. + // + // Src Dst Classification Loops GroupLoops Group + // 0 [i] [0] SIV {1} {1} {0} + // 1 [j] [j] SIV {2} {2} {1} + // 2 [k] [l] RDIV {3,4} {3,4} {2} + // 3 [m] [i + j] MIV {1,2,5} {1,2,5} {3} + // + // For each subscript SI 0 .. 3, we consider each remaining subscript, SJ. + // So, 0 is compared against 1, 2, and 3; 1 is compared against 2 and 3, etc. + // + // We begin by comparing 0 and 1. The intersection of the GroupLoops is empty. + // Next, 0 and 2. Again, the intersection of their GroupLoops is empty. + // Next 0 and 3. The intersection of their GroupLoop = {1}, not empty, + // so Pair[3].Group = {0,3} and Done = false (that is, 0 will not be added + // to either Separable or Coupled). + // + // Next, we consider 1 and 2. The intersection of the GroupLoops is empty. + // Next, 1 and 3. The intersectionof their GroupLoops = {2}, not empty, + // so Pair[3].Group = {0, 1, 3} and Done = false. + // + // Next, we compare 2 against 3. The intersection of the GroupLoops is empty. + // Since Done remains true, we add 2 to the set of Separable pairs. + // + // Finally, we consider 3. There's nothing to compare it with, + // so Done remains true and we add it to the Coupled set. + // Pair[3].Group = {0, 1, 3} and GroupLoops = {1, 2, 5}. + // + // In the end, we've got 1 separable subscript and 1 coupled group. + for (unsigned SI = 0; SI < Pairs; ++SI) { + if (Pair[SI].Classification == Subscript::NonLinear) { + // ignore these, but collect loops for later + ++NonlinearSubscriptPairs; + collectCommonLoops(Pair[SI].Src, + LI->getLoopFor(Src->getParent()), + Pair[SI].Loops); + collectCommonLoops(Pair[SI].Dst, + LI->getLoopFor(Dst->getParent()), + Pair[SI].Loops); + Result.Consistent = false; + } + else if (Pair[SI].Classification == Subscript::ZIV) { + // always separable + Separable.set(SI); + } + else { + // SIV, RDIV, or MIV, so check for coupled group + bool Done = true; + for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) { + SmallBitVector Intersection = Pair[SI].GroupLoops; + Intersection &= Pair[SJ].GroupLoops; + if (Intersection.any()) { + // accumulate set of all the loops in group + Pair[SJ].GroupLoops |= Pair[SI].GroupLoops; + // accumulate set of all subscripts in group + Pair[SJ].Group |= Pair[SI].Group; + Done = false; + } + } + if (Done) { + if (Pair[SI].Group.count() == 1) { + Separable.set(SI); + ++SeparableSubscriptPairs; + } + else { + Coupled.set(SI); + ++CoupledSubscriptPairs; + } + } + } + } + + DEBUG(dbgs() << " Separable = "); + DEBUG(dumpSmallBitVector(Separable)); + DEBUG(dbgs() << " Coupled = "); + DEBUG(dumpSmallBitVector(Coupled)); + + Constraint NewConstraint; + NewConstraint.setAny(SE); + + // test separable subscripts + for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) { + DEBUG(dbgs() << "testing subscript " << SI); + switch (Pair[SI].Classification) { + case Subscript::ZIV: + DEBUG(dbgs() << ", ZIV\n"); + if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result)) + return NULL; + break; + case Subscript::SIV: { + DEBUG(dbgs() << ", SIV\n"); + unsigned Level; + const SCEV *SplitIter = NULL; + if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level, + Result, NewConstraint, SplitIter)) + return NULL; + break; + } + case Subscript::RDIV: + DEBUG(dbgs() << ", RDIV\n"); + if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result)) + return NULL; + break; + case Subscript::MIV: + DEBUG(dbgs() << ", MIV\n"); + if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result)) + return NULL; + break; + default: + llvm_unreachable("subscript has unexpected classification"); + } + } + + if (Coupled.count()) { + // test coupled subscript groups + DEBUG(dbgs() << "starting on coupled subscripts\n"); + DEBUG(dbgs() << "MaxLevels + 1 = " << MaxLevels + 1 << "\n"); + SmallVector Constraints(MaxLevels + 1); + for (unsigned II = 0; II <= MaxLevels; ++II) + Constraints[II].setAny(SE); + for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) { + DEBUG(dbgs() << "testing subscript group " << SI << " { "); + SmallBitVector Group(Pair[SI].Group); + SmallBitVector Sivs(Pairs); + SmallBitVector Mivs(Pairs); + SmallBitVector ConstrainedLevels(MaxLevels + 1); + for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) { + DEBUG(dbgs() << SJ << " "); + if (Pair[SJ].Classification == Subscript::SIV) + Sivs.set(SJ); + else + Mivs.set(SJ); + } + DEBUG(dbgs() << "}\n"); + while (Sivs.any()) { + bool Changed = false; + for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) { + DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n"); + // SJ is an SIV subscript that's part of the current coupled group + unsigned Level; + const SCEV *SplitIter = NULL; + DEBUG(dbgs() << "SIV\n"); + if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level, + Result, NewConstraint, SplitIter)) + return NULL; + ConstrainedLevels.set(Level); + if (intersectConstraints(&Constraints[Level], &NewConstraint)) { + if (Constraints[Level].isEmpty()) { + ++DeltaIndependence; + return NULL; + } + Changed = true; + } + Sivs.reset(SJ); + } + if (Changed) { + // propagate, possibly creating new SIVs and ZIVs + DEBUG(dbgs() << " propagating\n"); + DEBUG(dbgs() << "\tMivs = "); + DEBUG(dumpSmallBitVector(Mivs)); + for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + // SJ is an MIV subscript that's part of the current coupled group + DEBUG(dbgs() << "\tSJ = " << SJ << "\n"); + if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, + Constraints, Result.Consistent)) { + DEBUG(dbgs() << "\t Changed\n"); + ++DeltaPropagations; + Pair[SJ].Classification = + classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()), + Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()), + Pair[SJ].Loops); + switch (Pair[SJ].Classification) { + case Subscript::ZIV: + DEBUG(dbgs() << "ZIV\n"); + if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result)) + return NULL; + Mivs.reset(SJ); + break; + case Subscript::SIV: + Sivs.set(SJ); + Mivs.reset(SJ); + break; + case Subscript::RDIV: + case Subscript::MIV: + break; + default: + llvm_unreachable("bad subscript classification"); + } + } + } + } + } + + // test & propagate remaining RDIVs + for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + if (Pair[SJ].Classification == Subscript::RDIV) { + DEBUG(dbgs() << "RDIV test\n"); + if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result)) + return NULL; + // I don't yet understand how to propagate RDIV results + Mivs.reset(SJ); + } + } + + // test remaining MIVs + // This code is temporary. + // Better to somehow test all remaining subscripts simultaneously. + for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + if (Pair[SJ].Classification == Subscript::MIV) { + DEBUG(dbgs() << "MIV test\n"); + if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result)) + return NULL; + } + else + llvm_unreachable("expected only MIV subscripts at this point"); + } + + // update Result.DV from constraint vector + DEBUG(dbgs() << " updating\n"); + for (int SJ = ConstrainedLevels.find_first(); + SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) { + updateDirection(Result.DV[SJ - 1], Constraints[SJ]); + if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE) + return NULL; + } + } + } + + // make sure Scalar flags are set correctly + SmallBitVector CompleteLoops(MaxLevels + 1); + for (unsigned SI = 0; SI < Pairs; ++SI) + CompleteLoops |= Pair[SI].Loops; + for (unsigned II = 1; II <= CommonLevels; ++II) + if (CompleteLoops[II]) + Result.DV[II - 1].Scalar = false; + + // make sure loopIndepent flag is set correctly + if (PossiblyLoopIndependent) { + for (unsigned II = 1; II <= CommonLevels; ++II) { + if (!(Result.getDirection(II) & Dependence::DVEntry::EQ)) { + Result.LoopIndependent = false; + break; + } + } + } + + FullDependence *Final = new FullDependence(Result); + Result.DV = NULL; + return Final; +} + + + +//===----------------------------------------------------------------------===// +// getSplitIteration - +// Rather than spend rarely-used space recording the splitting iteration +// during the Weak-Crossing SIV test, we re-compute it on demand. +// The re-computation is basically a repeat of the entire dependence test, +// though simplified since we know that the dependence exists. +// It's tedious, since we must go through all propagations, etc. +// +// Care is required to keep this code up to date w.r.t. the code above. +// +// Generally, the dependence analyzer will be used to build +// a dependence graph for a function (basically a map from instructions +// to dependences). Looking for cycles in the graph shows us loops +// that cannot be trivially vectorized/parallelized. +// +// We can try to improve the situation by examining all the dependences +// that make up the cycle, looking for ones we can break. +// Sometimes, peeling the first or last iteration of a loop will break +// dependences, and we've got flags for those possibilities. +// Sometimes, splitting a loop at some other iteration will do the trick, +// and we've got a flag for that case. Rather than waste the space to +// record the exact iteration (since we rarely know), we provide +// a method that calculates the iteration. It's a drag that it must work +// from scratch, but wonderful in that it's possible. +// +// Here's an example: +// +// for (i = 0; i < 10; i++) +// A[i] = ... +// ... = A[11 - i] +// +// There's a loop-carried flow dependence from the store to the load, +// found by the weak-crossing SIV test. The dependence will have a flag, +// indicating that the dependence can be broken by splitting the loop. +// Calling getSplitIteration will return 5. +// Splitting the loop breaks the dependence, like so: +// +// for (i = 0; i <= 5; i++) +// A[i] = ... +// ... = A[11 - i] +// for (i = 6; i < 10; i++) +// A[i] = ... +// ... = A[11 - i] +// +// breaks the dependence and allows us to vectorize/parallelize +// both loops. +const SCEV *DependenceAnalysis::getSplitIteration(const Dependence *Dep, + unsigned SplitLevel) { + assert(Dep && "expected a pointer to a Dependence"); + assert(Dep->isSplitable(SplitLevel) && + "Dep should be splitable at SplitLevel"); + const Instruction *Src = Dep->getSrc(); + const Instruction *Dst = Dep->getDst(); + assert(Src->mayReadFromMemory() || Src->mayWriteToMemory()); + assert(Dst->mayReadFromMemory() || Dst->mayWriteToMemory()); + assert(isLoadOrStore(Src)); + assert(isLoadOrStore(Dst)); + const Value *SrcPtr = getPointerOperand(Src); + const Value *DstPtr = getPointerOperand(Dst); + assert(underlyingObjectsAlias(AA, DstPtr, SrcPtr) == + AliasAnalysis::MustAlias); + const GEPOperator *SrcGEP = dyn_cast(SrcPtr); + const GEPOperator *DstGEP = dyn_cast(DstPtr); + assert(SrcGEP); + assert(DstGEP); + assert(SrcGEP->getPointerOperandType() == DstGEP->getPointerOperandType()); + + // establish loop nesting levels + establishNestingLevels(Src, Dst); + + FullDependence Result(Src, Dst, false, CommonLevels); + + // classify subscript pairs + unsigned Pairs = SrcGEP->idx_end() - SrcGEP->idx_begin(); + SmallVector Pair(Pairs); + for (unsigned SI = 0; SI < Pairs; ++SI) { + Pair[SI].Loops.resize(MaxLevels + 1); + Pair[SI].GroupLoops.resize(MaxLevels + 1); + Pair[SI].Group.resize(Pairs); + } + Pairs = 0; + for (GEPOperator::const_op_iterator SrcIdx = SrcGEP->idx_begin(), + SrcEnd = SrcGEP->idx_end(), + DstIdx = DstGEP->idx_begin(), + DstEnd = DstGEP->idx_end(); + SrcIdx != SrcEnd && DstIdx != DstEnd; + ++SrcIdx, ++DstIdx, ++Pairs) { + Pair[Pairs].Src = SE->getSCEV(*SrcIdx); + Pair[Pairs].Dst = SE->getSCEV(*DstIdx); + Pair[Pairs].Classification = + classifyPair(Pair[Pairs].Src, LI->getLoopFor(Src->getParent()), + Pair[Pairs].Dst, LI->getLoopFor(Dst->getParent()), + Pair[Pairs].Loops); + Pair[Pairs].GroupLoops = Pair[Pairs].Loops; + Pair[Pairs].Group.set(Pairs); + } + + SmallBitVector Separable(Pairs); + SmallBitVector Coupled(Pairs); + + // partition subscripts into separable and minimally-coupled groups + for (unsigned SI = 0; SI < Pairs; ++SI) { + if (Pair[SI].Classification == Subscript::NonLinear) { + // ignore these, but collect loops for later + collectCommonLoops(Pair[SI].Src, + LI->getLoopFor(Src->getParent()), + Pair[SI].Loops); + collectCommonLoops(Pair[SI].Dst, + LI->getLoopFor(Dst->getParent()), + Pair[SI].Loops); + Result.Consistent = false; + } + else if (Pair[SI].Classification == Subscript::ZIV) + Separable.set(SI); + else { + // SIV, RDIV, or MIV, so check for coupled group + bool Done = true; + for (unsigned SJ = SI + 1; SJ < Pairs; ++SJ) { + SmallBitVector Intersection = Pair[SI].GroupLoops; + Intersection &= Pair[SJ].GroupLoops; + if (Intersection.any()) { + // accumulate set of all the loops in group + Pair[SJ].GroupLoops |= Pair[SI].GroupLoops; + // accumulate set of all subscripts in group + Pair[SJ].Group |= Pair[SI].Group; + Done = false; + } + } + if (Done) { + if (Pair[SI].Group.count() == 1) + Separable.set(SI); + else + Coupled.set(SI); + } + } + } + + Constraint NewConstraint; + NewConstraint.setAny(SE); + + // test separable subscripts + for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) { + switch (Pair[SI].Classification) { + case Subscript::SIV: { + unsigned Level; + const SCEV *SplitIter = NULL; + (void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level, + Result, NewConstraint, SplitIter); + if (Level == SplitLevel) { + assert(SplitIter != NULL); + return SplitIter; + } + break; + } + case Subscript::ZIV: + case Subscript::RDIV: + case Subscript::MIV: + break; + default: + llvm_unreachable("subscript has unexpected classification"); + } + } + + if (Coupled.count()) { + // test coupled subscript groups + SmallVector Constraints(MaxLevels + 1); + for (unsigned II = 0; II <= MaxLevels; ++II) + Constraints[II].setAny(SE); + for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) { + SmallBitVector Group(Pair[SI].Group); + SmallBitVector Sivs(Pairs); + SmallBitVector Mivs(Pairs); + SmallBitVector ConstrainedLevels(MaxLevels + 1); + for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) { + if (Pair[SJ].Classification == Subscript::SIV) + Sivs.set(SJ); + else + Mivs.set(SJ); + } + while (Sivs.any()) { + bool Changed = false; + for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) { + // SJ is an SIV subscript that's part of the current coupled group + unsigned Level; + const SCEV *SplitIter = NULL; + (void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level, + Result, NewConstraint, SplitIter); + if (Level == SplitLevel && SplitIter) + return SplitIter; + ConstrainedLevels.set(Level); + if (intersectConstraints(&Constraints[Level], &NewConstraint)) + Changed = true; + Sivs.reset(SJ); + } + if (Changed) { + // propagate, possibly creating new SIVs and ZIVs + for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + // SJ is an MIV subscript that's part of the current coupled group + if (propagate(Pair[SJ].Src, Pair[SJ].Dst, + Pair[SJ].Loops, Constraints, Result.Consistent)) { + Pair[SJ].Classification = + classifyPair(Pair[SJ].Src, LI->getLoopFor(Src->getParent()), + Pair[SJ].Dst, LI->getLoopFor(Dst->getParent()), + Pair[SJ].Loops); + switch (Pair[SJ].Classification) { + case Subscript::ZIV: + Mivs.reset(SJ); + break; + case Subscript::SIV: + Sivs.set(SJ); + Mivs.reset(SJ); + break; + case Subscript::RDIV: + case Subscript::MIV: + break; + default: + llvm_unreachable("bad subscript classification"); + } + } + } + } + } + } + } + llvm_unreachable("somehow reached end of routine"); + return NULL; +} diff --git a/lib/Analysis/DominanceFrontier.cpp b/lib/Analysis/DominanceFrontier.cpp index 1604576..3e537e9 100644 --- a/lib/Analysis/DominanceFrontier.cpp +++ b/lib/Analysis/DominanceFrontier.cpp @@ -133,7 +133,9 @@ void DominanceFrontierBase::print(raw_ostream &OS, const Module* ) const { } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void DominanceFrontierBase::dump() const { print(dbgs()); } +#endif diff --git a/lib/Analysis/IPA/CallGraph.cpp b/lib/Analysis/IPA/CallGraph.cpp index 0df3e8a..dec0ece 100644 --- a/lib/Analysis/IPA/CallGraph.cpp +++ b/lib/Analysis/IPA/CallGraph.cpp @@ -141,12 +141,13 @@ private: for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE; ++II) { CallSite CS(cast(II)); - if (CS && !isa(II)) { + if (CS) { const Function *Callee = CS.getCalledFunction(); - if (Callee) - Node->addCalledFunction(CS, getOrInsertFunction(Callee)); - else + if (!Callee) + // Indirect calls of intrinsics are not allowed so no need to check. Node->addCalledFunction(CS, CallsExternalNode); + else if (!Callee->isIntrinsic()) + Node->addCalledFunction(CS, getOrInsertFunction(Callee)); } } } @@ -198,9 +199,11 @@ void CallGraph::print(raw_ostream &OS, Module*) const { for (CallGraph::const_iterator I = begin(), E = end(); I != E; ++I) I->second->print(OS); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void CallGraph::dump() const { print(dbgs(), 0); } +#endif //===----------------------------------------------------------------------===// // Implementations of public modification methods @@ -267,7 +270,9 @@ void CallGraphNode::print(raw_ostream &OS) const { OS << '\n'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void CallGraphNode::dump() const { print(dbgs()); } +#endif /// removeCallEdgeFor - This method removes the edge in the node for the /// specified call site. Note that this method takes linear time, so it diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp index 22f6e96..990caa8 100644 --- a/lib/Analysis/IPA/GlobalsModRef.cpp +++ b/lib/Analysis/IPA/GlobalsModRef.cpp @@ -263,7 +263,7 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V, } else if (BitCastInst *BCI = dyn_cast(U)) { if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest)) return true; - } else if (isFreeCall(U)) { + } else if (isFreeCall(U, TLI)) { Writers.push_back(cast(U)->getParent()->getParent()); } else if (CallInst *CI = dyn_cast(U)) { // Make sure that this is just the function being called, not that it is @@ -329,7 +329,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) { // Check the value being stored. Value *Ptr = GetUnderlyingObject(SI->getOperand(0)); - if (!isAllocLikeFn(Ptr)) + if (!isAllocLikeFn(Ptr, TLI)) return false; // Too hard to analyze. // Analyze all uses of the allocation. If any of them are used in a @@ -458,7 +458,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) { if (SI->isVolatile()) // Treat volatile stores as reading memory somewhere. FunctionEffect |= Ref; - } else if (isAllocationFn(&*II) || isFreeCall(&*II)) { + } else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) { FunctionEffect |= ModRef; } else if (IntrinsicInst *Intrinsic = dyn_cast(&*II)) { // The callgraph doesn't include intrinsic calls. diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index 0a6682a..d4221b8 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -22,7 +22,7 @@ #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Assembly/Writer.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/Debug.h" @@ -235,7 +235,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { LI = &getAnalysis(); DT = &getAnalysis(); SE = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); // Find all uses of induction variables in this loop, and categorize // them by stride. Start by finding all of the PHI nodes in the header for @@ -273,9 +273,11 @@ void IVUsers::print(raw_ostream &OS, const Module *M) const { } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void IVUsers::dump() const { print(dbgs()); } +#endif void IVUsers::releaseMemory() { Processed.clear(); diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index bc1ecd2..5f51f77 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -24,7 +24,7 @@ #include "llvm/IntrinsicInst.h" #include "llvm/Operator.h" #include "llvm/GlobalAlias.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" @@ -41,8 +41,8 @@ class CallAnalyzer : public InstVisitor { typedef InstVisitor Base; friend class InstVisitor; - // TargetData if available, or null. - const TargetData *const TD; + // DataLayout if available, or null. + const DataLayout *const TD; // The called function. Function &F; @@ -51,9 +51,12 @@ class CallAnalyzer : public InstVisitor { int Cost; const bool AlwaysInline; - bool IsRecursive; + bool IsCallerRecursive; + bool IsRecursiveCall; bool ExposesReturnsTwice; bool HasDynamicAlloca; + /// Number of bytes allocated statically by the callee. + uint64_t AllocatedSize; unsigned NumInstructions, NumVectorInstructions; int FiftyPercentVectorBonus, TenPercentVectorBonus; int VectorBonus; @@ -123,10 +126,11 @@ class CallAnalyzer : public InstVisitor { bool visitCallSite(CallSite CS); public: - CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold) + CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold) : TD(TD), F(Callee), Threshold(Threshold), Cost(0), - AlwaysInline(F.hasFnAttr(Attribute::AlwaysInline)), - IsRecursive(false), ExposesReturnsTwice(false), HasDynamicAlloca(false), + AlwaysInline(F.getFnAttributes().hasAttribute(Attributes::AlwaysInline)), + IsCallerRecursive(false), IsRecursiveCall(false), + ExposesReturnsTwice(false), HasDynamicAlloca(false), AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0), FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), @@ -138,6 +142,7 @@ public: int getThreshold() { return Threshold; } int getCost() { return Cost; } + bool isAlwaysInline() { return AlwaysInline; } // Keep a bunch of stats about the cost savings found so we can print them // out when debugging. @@ -269,6 +274,13 @@ bool CallAnalyzer::visitAlloca(AllocaInst &I) { // FIXME: Check whether inlining will turn a dynamic alloca into a static // alloca, and handle that case. + // Accumulate the allocated size. + if (I.isStaticAlloca()) { + Type *Ty = I.getAllocatedType(); + AllocatedSize += (TD ? TD->getTypeAllocSize(Ty) : + Ty->getPrimitiveSizeInBits()); + } + // We will happily inline static alloca instructions or dynamic alloca // instructions in always-inline situations. if (AlwaysInline || I.isStaticAlloca()) @@ -602,7 +614,7 @@ bool CallAnalyzer::visitStore(StoreInst &I) { bool CallAnalyzer::visitCallSite(CallSite CS) { if (CS.isCall() && cast(CS.getInstruction())->canReturnTwice() && - !F.hasFnAttr(Attribute::ReturnsTwice)) { + !F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice)) { // This aborts the entire analysis. ExposesReturnsTwice = true; return false; @@ -625,7 +637,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) { if (F == CS.getInstruction()->getParent()->getParent()) { // This flag will fully abort the analysis, so don't bother with anything // else. - IsRecursive = true; + IsRecursiveCall = true; return false; } @@ -712,7 +724,14 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB) { Cost += InlineConstants::InstrCost; // If the visit this instruction detected an uninlinable pattern, abort. - if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca) + if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) + return false; + + // If the caller is a recursive function then we don't want to inline + // functions which allocate a lot of stack space because it would increase + // the caller stack usage dramatically. + if (IsCallerRecursive && + AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) return false; if (NumVectorInstructions > NumInstructions/2) @@ -814,7 +833,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { // one load and one store per word copied. // FIXME: The maxStoresPerMemcpy setting from the target should be used // here instead of a magic number of 8, but it's not available via - // TargetData. + // DataLayout. NumStores = std::min(NumStores, 8U); Cost -= 2 * NumStores * InlineConstants::InstrCost; @@ -831,12 +850,14 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { Cost += InlineConstants::LastCallToStaticBonus; // If the instruction after the call, or if the normal destination of the - // invoke is an unreachable instruction, the function is noreturn. As such, - // there is little point in inlining this unless there is literally zero cost. - if (InvokeInst *II = dyn_cast(CS.getInstruction())) { + // invoke is an unreachable instruction, the function is noreturn. As such, + // there is little point in inlining this unless there is literally zero + // cost. + Instruction *Instr = CS.getInstruction(); + if (InvokeInst *II = dyn_cast(Instr)) { if (isa(II->getNormalDest()->begin())) Threshold = 1; - } else if (isa(++BasicBlock::iterator(CS.getInstruction()))) + } else if (isa(++BasicBlock::iterator(Instr))) Threshold = 1; // If this function uses the coldcc calling convention, prefer not to inline @@ -852,6 +873,20 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { if (F.empty()) return true; + Function *Caller = CS.getInstruction()->getParent()->getParent(); + // Check if the caller function is recursive itself. + for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end(); + U != E; ++U) { + CallSite Site(cast(*U)); + if (!Site) + continue; + Instruction *I = Site.getInstruction(); + if (I->getParent()->getParent() == Caller) { + IsCallerRecursive = true; + break; + } + } + // Track whether we've seen a return instruction. The first return // instruction is free, as at least one will usually disappear in inlining. bool HasReturn = false; @@ -908,9 +943,9 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { // We never want to inline functions that contain an indirectbr. This is // incorrect because all the blockaddress's (in static global initializers - // for example) would be referring to the original function, and this indirect - // jump would jump from the inlined copy of the function into the original - // function which is extremely undefined behavior. + // for example) would be referring to the original function, and this + // indirect jump would jump from the inlined copy of the function into the + // original function which is extremely undefined behavior. // FIXME: This logic isn't really right; we can safely inline functions // with indirectbr's as long as no other function or global references the // blockaddress of a block within the current function. And as a QOI issue, @@ -928,8 +963,16 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { // Analyze the cost of this block. If we blow through the threshold, this // returns false, and we can bail on out. if (!analyzeBlock(BB)) { - if (IsRecursive || ExposesReturnsTwice || HasDynamicAlloca) + if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca) return false; + + // If the caller is a recursive function then we don't want to inline + // functions which allocate a lot of stack space because it would increase + // the caller stack usage dramatically. + if (IsCallerRecursive && + AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller) + return false; + break; } @@ -955,7 +998,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { // If we're unable to select a particular successor, just count all of // them. - for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; ++TIdx) + for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize; + ++TIdx) BBWorklist.insert(TI->getSuccessor(TIdx)); // If we had any successors at this point, than post-inlining is likely to @@ -974,6 +1018,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) { return AlwaysInline || Cost < Threshold; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// \brief Dump stats about this call's analysis. void CallAnalyzer::dump() { #define DEBUG_PRINT_STAT(x) llvm::dbgs() << " " #x ": " << x << "\n" @@ -987,6 +1032,7 @@ void CallAnalyzer::dump() { DEBUG_PRINT_STAT(SROACostSavingsLost); #undef DEBUG_PRINT_STAT } +#endif InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, int Threshold) { return getInlineCost(CS, CS.getCalledFunction(), Threshold); @@ -998,10 +1044,12 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, // something else. Don't inline functions marked noinline or call sites // marked noinline. if (!Callee || Callee->mayBeOverridden() || - Callee->hasFnAttr(Attribute::NoInline) || CS.isNoInline()) + Callee->getFnAttributes().hasAttribute(Attributes::NoInline) || + CS.isNoInline()) return llvm::InlineCost::getNever(); - DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() << "...\n"); + DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName() + << "...\n"); CallAnalyzer CA(TD, *Callee, Threshold); bool ShouldInline = CA.analyzeCall(CS); @@ -1011,7 +1059,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, // Check if there was a reason to force inlining or no inlining. if (!ShouldInline && CA.getCost() < CA.getThreshold()) return InlineCost::getNever(); - if (ShouldInline && CA.getCost() >= CA.getThreshold()) + if (ShouldInline && (CA.isAlwaysInline() || + CA.getCost() >= CA.getThreshold())) return InlineCost::getAlways(); return llvm::InlineCost::get(CA.getCost(), CA.getThreshold()); diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 379a35a..a76e5ad 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -31,7 +31,7 @@ #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/PatternMatch.h" #include "llvm/Support/ValueHandle.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; using namespace llvm::PatternMatch; @@ -42,11 +42,11 @@ STATISTIC(NumFactor , "Number of factorizations"); STATISTIC(NumReassoc, "Number of reassociations"); struct Query { - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; const DominatorTree *DT; - Query(const TargetData *td, const TargetLibraryInfo *tli, + Query(const DataLayout *td, const TargetLibraryInfo *tli, const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {} }; @@ -651,7 +651,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, } Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT), RecursionLimit); @@ -664,7 +664,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, /// if the GEP has all-constant indices. Returns false if any non-constant /// index is encountered leaving the 'Offset' in an undefined state. The /// 'Offset' APInt must be the bitwidth of the target's pointer size. -static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP, +static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP, APInt &Offset) { unsigned IntPtrWidth = TD.getPointerSizeInBits(); assert(IntPtrWidth == Offset.getBitWidth()); @@ -696,7 +696,7 @@ static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP, /// accumulates the total constant offset applied in the returned constant. It /// returns 0 if V is not a pointer, and returns the constant '0' if there are /// no constant offsets applied. -static Constant *stripAndComputeConstantOffsets(const TargetData &TD, +static Constant *stripAndComputeConstantOffsets(const DataLayout &TD, Value *&V) { if (!V->getType()->isPointerTy()) return 0; @@ -731,7 +731,7 @@ static Constant *stripAndComputeConstantOffsets(const TargetData &TD, /// \brief Compute the constant difference between two pointer values. /// If the difference is not a constant, returns zero. -static Constant *computePointerDifference(const TargetData &TD, +static Constant *computePointerDifference(const DataLayout &TD, Value *LHS, Value *RHS) { Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS); if (!LHSOffset) @@ -880,7 +880,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, } Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT), RecursionLimit); @@ -951,7 +951,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1039,7 +1039,7 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1055,7 +1055,7 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1074,7 +1074,7 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1144,7 +1144,7 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1160,7 +1160,7 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1179,7 +1179,7 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &, return 0; } -Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1248,7 +1248,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, } Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT), RecursionLimit); @@ -1275,7 +1275,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, } Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT), @@ -1307,7 +1307,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, } Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT), @@ -1407,7 +1407,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1501,7 +1501,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1561,7 +1561,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q, return 0; } -Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD, +Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); @@ -1591,7 +1591,7 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, return 0; } -static Constant *computePointerICmp(const TargetData &TD, +static Constant *computePointerICmp(const DataLayout &TD, CmpInst::Predicate Pred, Value *LHS, Value *RHS) { // We can only fold certain predicates on pointer comparisons. @@ -2065,8 +2065,25 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && NoRHSWrapProblem) { // Determine Y and Z in the form icmp (X+Y), (X+Z). - Value *Y = (A == C || A == D) ? B : A; - Value *Z = (C == A || C == B) ? D : C; + Value *Y, *Z; + if (A == C) { + // C + B == C + D -> B == D + Y = B; + Z = D; + } else if (A == D) { + // D + B == C + D -> B == C + Y = B; + Z = C; + } else if (B == C) { + // A + C == C + D -> A == D + Y = A; + Z = D; + } else { + assert(B == D); + // A + D == C + D -> A == C + Y = A; + Z = C; + } if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse-1)) return V; } @@ -2399,7 +2416,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, } Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT), @@ -2496,7 +2513,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, } Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT), @@ -2531,7 +2548,7 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal, } Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT), @@ -2579,7 +2596,7 @@ static Value *SimplifyGEPInst(ArrayRef Ops, const Query &Q, unsigned) { return ConstantExpr::getGetElementPtr(cast(Ops[0]), Ops.slice(1)); } -Value *llvm::SimplifyGEPInst(ArrayRef Ops, const TargetData *TD, +Value *llvm::SimplifyGEPInst(ArrayRef Ops, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit); @@ -2616,7 +2633,7 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef Idxs, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT), @@ -2664,7 +2681,7 @@ static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) { return 0; } -Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD, +Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit); @@ -2730,7 +2747,7 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, } Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit); } @@ -2745,7 +2762,7 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, } Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT), RecursionLimit); @@ -2761,7 +2778,7 @@ static Value *SimplifyCallInst(CallInst *CI, const Query &) { /// SimplifyInstruction - See if we can compute a simplified version of this /// instruction. If not, this returns null. -Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD, +Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { Value *Result; @@ -2881,7 +2898,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD, /// This routine returns 'true' only when *it* simplifies something. The passed /// in simplified value does not count toward this. static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { bool Simplified = false; @@ -2936,14 +2953,14 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, } bool llvm::recursivelySimplifyInstruction(Instruction *I, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT); } bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI, const DominatorTree *DT) { assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp index 9140786..2b87d80 100644 --- a/lib/Analysis/LazyValueInfo.cpp +++ b/lib/Analysis/LazyValueInfo.cpp @@ -13,13 +13,14 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "lazy-value-info" +#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/LazyValueInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Constants.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/CFG.h" #include "llvm/Support/ConstantRange.h" @@ -212,7 +213,7 @@ public: // Unless we can prove that the two Constants are different, we must // move to overdefined. - // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding. + // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding. if (ConstantInt *Res = dyn_cast( ConstantFoldCompareInstOperands(CmpInst::ICMP_NE, getConstant(), @@ -238,7 +239,7 @@ public: // Unless we can prove that the two Constants are different, we must // move to overdefined. - // FIXME: use TargetData/TargetLibraryInfo for smarter constant folding. + // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding. if (ConstantInt *Res = dyn_cast( ConstantFoldCompareInstOperands(CmpInst::ICMP_NE, getNotConstant(), @@ -294,7 +295,7 @@ raw_ostream &operator<<(raw_ostream &OS, const LVILatticeVal &Val) { //===----------------------------------------------------------------------===// namespace { - /// LVIValueHandle - A callback value handle update the cache when + /// LVIValueHandle - A callback value handle updates the cache when /// values are erased. class LazyValueInfoCache; struct LVIValueHandle : public CallbackVH { @@ -470,8 +471,10 @@ bool LazyValueInfoCache::hasBlockValue(Value *Val, BasicBlock *BB) { return true; LVIValueHandle ValHandle(Val, this); - if (!ValueCache.count(ValHandle)) return false; - return ValueCache[ValHandle].count(BB); + std::map::iterator I = + ValueCache.find(ValHandle); + if (I == ValueCache.end()) return false; + return I->second.count(BB); } LVILatticeVal LazyValueInfoCache::getBlockValue(Value *Val, BasicBlock *BB) { @@ -555,13 +558,11 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) { static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) { if (LoadInst *L = dyn_cast(I)) { return L->getPointerAddressSpace() == 0 && - GetUnderlyingObject(L->getPointerOperand()) == - GetUnderlyingObject(Ptr); + GetUnderlyingObject(L->getPointerOperand()) == Ptr; } if (StoreInst *S = dyn_cast(I)) { return S->getPointerAddressSpace() == 0 && - GetUnderlyingObject(S->getPointerOperand()) == - GetUnderlyingObject(Ptr); + GetUnderlyingObject(S->getPointerOperand()) == Ptr; } if (MemIntrinsic *MI = dyn_cast(I)) { if (MI->isVolatile()) return false; @@ -571,11 +572,11 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) { if (!Len || Len->isZero()) return false; if (MI->getDestAddressSpace() == 0) - if (MI->getRawDest() == Ptr || MI->getDest() == Ptr) + if (GetUnderlyingObject(MI->getRawDest()) == Ptr) return true; if (MemTransferInst *MTI = dyn_cast(MI)) if (MTI->getSourceAddressSpace() == 0) - if (MTI->getRawSource() == Ptr || MTI->getSource() == Ptr) + if (GetUnderlyingObject(MTI->getRawSource()) == Ptr) return true; } return false; @@ -589,13 +590,19 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV, // then we know that the pointer can't be NULL. bool NotNull = false; if (Val->getType()->isPointerTy()) { - if (isa(Val)) { + if (isKnownNonNull(Val)) { NotNull = true; } else { - for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();BI != BE;++BI){ - if (InstructionDereferencesPointer(BI, Val)) { - NotNull = true; - break; + Value *UnderlyingVal = GetUnderlyingObject(Val); + // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge + // inside InstructionDereferencesPointer either. + if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) { + for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); + BI != BE; ++BI) { + if (InstructionDereferencesPointer(BI, UnderlyingVal)) { + NotNull = true; + break; + } } } } @@ -845,9 +852,12 @@ static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom, for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) { ConstantRange EdgeVal(i.getCaseValue()->getValue()); - if (DefaultCase) - EdgesVals = EdgesVals.difference(EdgeVal); - else if (i.getCaseSuccessor() == BBTo) + if (DefaultCase) { + // It is possible that the default destination is the destination of + // some cases. There is no need to perform difference for those cases. + if (i.getCaseSuccessor() != BBTo) + EdgesVals = EdgesVals.difference(EdgeVal); + } else if (i.getCaseSuccessor() == BBTo) EdgesVals = EdgesVals.unionWith(EdgeVal); } Result = LVILatticeVal::getRange(EdgesVals); @@ -1004,7 +1014,7 @@ bool LazyValueInfo::runOnFunction(Function &F) { if (PImpl) getCache(PImpl).clear(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); // Fully lazy. diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index 83bdf52..6d6d580 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -43,7 +43,7 @@ #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Pass.h" #include "llvm/PassManager.h" @@ -103,7 +103,7 @@ namespace { Module *Mod; AliasAnalysis *AA; DominatorTree *DT; - TargetData *TD; + DataLayout *TD; TargetLibraryInfo *TLI; std::string Messages; @@ -177,7 +177,7 @@ bool Lint::runOnFunction(Function &F) { Mod = F.getParent(); AA = &getAnalysis(); DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); visit(F); dbgs() << MessagesStr.str(); @@ -411,14 +411,50 @@ void Lint::visitMemoryReference(Instruction &I, "Undefined behavior: Branch to non-blockaddress", &I); } + // Check for buffer overflows and misalignment. if (TD) { - if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty); + // Only handles memory references that read/write something simple like an + // alloca instruction or a global variable. + int64_t Offset = 0; + if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *TD)) { + // OK, so the access is to a constant offset from Ptr. Check that Ptr is + // something we can handle and if so extract the size of this base object + // along with its alignment. + uint64_t BaseSize = AliasAnalysis::UnknownSize; + unsigned BaseAlign = 0; + + if (AllocaInst *AI = dyn_cast(Base)) { + Type *ATy = AI->getAllocatedType(); + if (!AI->isArrayAllocation() && ATy->isSized()) + BaseSize = TD->getTypeAllocSize(ATy); + BaseAlign = AI->getAlignment(); + if (BaseAlign == 0 && ATy->isSized()) + BaseAlign = TD->getABITypeAlignment(ATy); + } else if (GlobalVariable *GV = dyn_cast(Base)) { + // If the global may be defined differently in another compilation unit + // then don't warn about funky memory accesses. + if (GV->hasDefinitiveInitializer()) { + Type *GTy = GV->getType()->getElementType(); + if (GTy->isSized()) + BaseSize = TD->getTypeAllocSize(GTy); + BaseAlign = GV->getAlignment(); + if (BaseAlign == 0 && GTy->isSized()) + BaseAlign = TD->getABITypeAlignment(GTy); + } + } - if (Align != 0) { - unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType()); - APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(Ptr, KnownZero, KnownOne, TD); - Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))), + // Accesses from before the start or after the end of the object are not + // defined. + Assert1(Size == AliasAnalysis::UnknownSize || + BaseSize == AliasAnalysis::UnknownSize || + (Offset >= 0 && Offset + Size <= BaseSize), + "Undefined behavior: Buffer overflow", &I); + + // Accesses that say that the memory is more aligned than it is are not + // defined. + if (Align == 0 && Ty && Ty->isSized()) + Align = TD->getABITypeAlignment(Ty); + Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset), "Undefined behavior: Memory reference address is misaligned", &I); } } @@ -470,7 +506,7 @@ void Lint::visitShl(BinaryOperator &I) { "Undefined result: Shift count out of range", &I); } -static bool isZero(Value *V, TargetData *TD) { +static bool isZero(Value *V, DataLayout *TD) { // Assume undef could be zero. if (isa(V)) return true; diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp index 873a275..73aa8b4 100644 --- a/lib/Analysis/Loads.cpp +++ b/lib/Analysis/Loads.cpp @@ -13,7 +13,7 @@ #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/GlobalAlias.h" #include "llvm/GlobalVariable.h" #include "llvm/IntrinsicInst.h" @@ -52,8 +52,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) { /// bitcasts to get back to the underlying object being addressed, keeping /// track of the offset in bytes from the GEPs relative to the result. /// This is closely related to GetUnderlyingObject but is located -/// here to avoid making VMCore depend on TargetData. -static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD, +/// here to avoid making VMCore depend on DataLayout. +static Value *getUnderlyingObjectWithOffset(Value *V, const DataLayout *TD, uint64_t &ByteOffset, unsigned MaxLookup = 6) { if (!V->getType()->isPointerTy()) @@ -85,7 +85,7 @@ static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD, /// specified pointer, we do a quick local scan of the basic block containing /// ScanFrom, to determine if the address is already accessed. bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, - unsigned Align, const TargetData *TD) { + unsigned Align, const DataLayout *TD) { uint64_t ByteOffset = 0; Value *Base = V; if (TD) diff --git a/lib/Analysis/LoopDependenceAnalysis.cpp b/lib/Analysis/LoopDependenceAnalysis.cpp deleted file mode 100644 index 463269d..0000000 --- a/lib/Analysis/LoopDependenceAnalysis.cpp +++ /dev/null @@ -1,362 +0,0 @@ -//===- LoopDependenceAnalysis.cpp - LDA Implementation ----------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This is the (beginning) of an implementation of a loop dependence analysis -// framework, which is used to detect dependences in memory accesses in loops. -// -// Please note that this is work in progress and the interface is subject to -// change. -// -// TODO: adapt as implementation progresses. -// -// TODO: document lingo (pair, subscript, index) -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "lda" -#include "llvm/ADT/DenseSet.h" -#include "llvm/ADT/Statistic.h" -#include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/LoopDependenceAnalysis.h" -#include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/ScalarEvolution.h" -#include "llvm/Analysis/ScalarEvolutionExpressions.h" -#include "llvm/Analysis/ValueTracking.h" -#include "llvm/Assembly/Writer.h" -#include "llvm/Instructions.h" -#include "llvm/Operator.h" -#include "llvm/Support/Allocator.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" -using namespace llvm; - -STATISTIC(NumAnswered, "Number of dependence queries answered"); -STATISTIC(NumAnalysed, "Number of distinct dependence pairs analysed"); -STATISTIC(NumDependent, "Number of pairs with dependent accesses"); -STATISTIC(NumIndependent, "Number of pairs with independent accesses"); -STATISTIC(NumUnknown, "Number of pairs with unknown accesses"); - -LoopPass *llvm::createLoopDependenceAnalysisPass() { - return new LoopDependenceAnalysis(); -} - -INITIALIZE_PASS_BEGIN(LoopDependenceAnalysis, "lda", - "Loop Dependence Analysis", false, true) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) -INITIALIZE_AG_DEPENDENCY(AliasAnalysis) -INITIALIZE_PASS_END(LoopDependenceAnalysis, "lda", - "Loop Dependence Analysis", false, true) -char LoopDependenceAnalysis::ID = 0; - -//===----------------------------------------------------------------------===// -// Utility Functions -//===----------------------------------------------------------------------===// - -static inline bool IsMemRefInstr(const Value *V) { - const Instruction *I = dyn_cast(V); - return I && (I->mayReadFromMemory() || I->mayWriteToMemory()); -} - -static void GetMemRefInstrs(const Loop *L, - SmallVectorImpl &Memrefs) { - for (Loop::block_iterator b = L->block_begin(), be = L->block_end(); - b != be; ++b) - for (BasicBlock::iterator i = (*b)->begin(), ie = (*b)->end(); - i != ie; ++i) - if (IsMemRefInstr(i)) - Memrefs.push_back(i); -} - -static bool IsLoadOrStoreInst(Value *I) { - // Returns true if the load or store can be analyzed. Atomic and volatile - // operations have properties which this analysis does not understand. - if (LoadInst *LI = dyn_cast(I)) - return LI->isUnordered(); - else if (StoreInst *SI = dyn_cast(I)) - return SI->isUnordered(); - return false; -} - -static Value *GetPointerOperand(Value *I) { - if (LoadInst *i = dyn_cast(I)) - return i->getPointerOperand(); - if (StoreInst *i = dyn_cast(I)) - return i->getPointerOperand(); - llvm_unreachable("Value is no load or store instruction!"); -} - -static AliasAnalysis::AliasResult UnderlyingObjectsAlias(AliasAnalysis *AA, - const Value *A, - const Value *B) { - const Value *aObj = GetUnderlyingObject(A); - const Value *bObj = GetUnderlyingObject(B); - return AA->alias(aObj, AA->getTypeStoreSize(aObj->getType()), - bObj, AA->getTypeStoreSize(bObj->getType())); -} - -static inline const SCEV *GetZeroSCEV(ScalarEvolution *SE) { - return SE->getConstant(Type::getInt32Ty(SE->getContext()), 0L); -} - -//===----------------------------------------------------------------------===// -// Dependence Testing -//===----------------------------------------------------------------------===// - -bool LoopDependenceAnalysis::isDependencePair(const Value *A, - const Value *B) const { - return IsMemRefInstr(A) && - IsMemRefInstr(B) && - (cast(A)->mayWriteToMemory() || - cast(B)->mayWriteToMemory()); -} - -bool LoopDependenceAnalysis::findOrInsertDependencePair(Value *A, - Value *B, - DependencePair *&P) { - void *insertPos = 0; - FoldingSetNodeID id; - id.AddPointer(A); - id.AddPointer(B); - - P = Pairs.FindNodeOrInsertPos(id, insertPos); - if (P) return true; - - P = new (PairAllocator) DependencePair(id, A, B); - Pairs.InsertNode(P, insertPos); - return false; -} - -void LoopDependenceAnalysis::getLoops(const SCEV *S, - DenseSet* Loops) const { - // Refactor this into an SCEVVisitor, if efficiency becomes a concern. - for (const Loop *L = this->L; L != 0; L = L->getParentLoop()) - if (!SE->isLoopInvariant(S, L)) - Loops->insert(L); -} - -bool LoopDependenceAnalysis::isLoopInvariant(const SCEV *S) const { - DenseSet loops; - getLoops(S, &loops); - return loops.empty(); -} - -bool LoopDependenceAnalysis::isAffine(const SCEV *S) const { - const SCEVAddRecExpr *rec = dyn_cast(S); - return isLoopInvariant(S) || (rec && rec->isAffine()); -} - -bool LoopDependenceAnalysis::isZIVPair(const SCEV *A, const SCEV *B) const { - return isLoopInvariant(A) && isLoopInvariant(B); -} - -bool LoopDependenceAnalysis::isSIVPair(const SCEV *A, const SCEV *B) const { - DenseSet loops; - getLoops(A, &loops); - getLoops(B, &loops); - return loops.size() == 1; -} - -LoopDependenceAnalysis::DependenceResult -LoopDependenceAnalysis::analyseZIV(const SCEV *A, - const SCEV *B, - Subscript *S) const { - assert(isZIVPair(A, B) && "Attempted to ZIV-test non-ZIV SCEVs!"); - return A == B ? Dependent : Independent; -} - -LoopDependenceAnalysis::DependenceResult -LoopDependenceAnalysis::analyseSIV(const SCEV *A, - const SCEV *B, - Subscript *S) const { - return Unknown; // TODO: Implement. -} - -LoopDependenceAnalysis::DependenceResult -LoopDependenceAnalysis::analyseMIV(const SCEV *A, - const SCEV *B, - Subscript *S) const { - return Unknown; // TODO: Implement. -} - -LoopDependenceAnalysis::DependenceResult -LoopDependenceAnalysis::analyseSubscript(const SCEV *A, - const SCEV *B, - Subscript *S) const { - DEBUG(dbgs() << " Testing subscript: " << *A << ", " << *B << "\n"); - - if (A == B) { - DEBUG(dbgs() << " -> [D] same SCEV\n"); - return Dependent; - } - - if (!isAffine(A) || !isAffine(B)) { - DEBUG(dbgs() << " -> [?] not affine\n"); - return Unknown; - } - - if (isZIVPair(A, B)) - return analyseZIV(A, B, S); - - if (isSIVPair(A, B)) - return analyseSIV(A, B, S); - - return analyseMIV(A, B, S); -} - -LoopDependenceAnalysis::DependenceResult -LoopDependenceAnalysis::analysePair(DependencePair *P) const { - DEBUG(dbgs() << "Analysing:\n" << *P->A << "\n" << *P->B << "\n"); - - // We only analyse loads and stores but no possible memory accesses by e.g. - // free, call, or invoke instructions. - if (!IsLoadOrStoreInst(P->A) || !IsLoadOrStoreInst(P->B)) { - DEBUG(dbgs() << "--> [?] no load/store\n"); - return Unknown; - } - - Value *aPtr = GetPointerOperand(P->A); - Value *bPtr = GetPointerOperand(P->B); - - switch (UnderlyingObjectsAlias(AA, aPtr, bPtr)) { - case AliasAnalysis::MayAlias: - case AliasAnalysis::PartialAlias: - // We can not analyse objects if we do not know about their aliasing. - DEBUG(dbgs() << "---> [?] may alias\n"); - return Unknown; - - case AliasAnalysis::NoAlias: - // If the objects noalias, they are distinct, accesses are independent. - DEBUG(dbgs() << "---> [I] no alias\n"); - return Independent; - - case AliasAnalysis::MustAlias: - break; // The underlying objects alias, test accesses for dependence. - } - - const GEPOperator *aGEP = dyn_cast(aPtr); - const GEPOperator *bGEP = dyn_cast(bPtr); - - if (!aGEP || !bGEP) - return Unknown; - - // FIXME: Is filtering coupled subscripts necessary? - - // Collect GEP operand pairs (FIXME: use GetGEPOperands from BasicAA), adding - // trailing zeroes to the smaller GEP, if needed. - typedef SmallVector, 4> GEPOpdPairsTy; - GEPOpdPairsTy opds; - for(GEPOperator::const_op_iterator aIdx = aGEP->idx_begin(), - aEnd = aGEP->idx_end(), - bIdx = bGEP->idx_begin(), - bEnd = bGEP->idx_end(); - aIdx != aEnd && bIdx != bEnd; - aIdx += (aIdx != aEnd), bIdx += (bIdx != bEnd)) { - const SCEV* aSCEV = (aIdx != aEnd) ? SE->getSCEV(*aIdx) : GetZeroSCEV(SE); - const SCEV* bSCEV = (bIdx != bEnd) ? SE->getSCEV(*bIdx) : GetZeroSCEV(SE); - opds.push_back(std::make_pair(aSCEV, bSCEV)); - } - - if (!opds.empty() && opds[0].first != opds[0].second) { - // We cannot (yet) handle arbitrary GEP pointer offsets. By limiting - // - // TODO: this could be relaxed by adding the size of the underlying object - // to the first subscript. If we have e.g. (GEP x,0,i; GEP x,2,-i) and we - // know that x is a [100 x i8]*, we could modify the first subscript to be - // (i, 200-i) instead of (i, -i). - return Unknown; - } - - // Now analyse the collected operand pairs (skipping the GEP ptr offsets). - for (GEPOpdPairsTy::const_iterator i = opds.begin() + 1, end = opds.end(); - i != end; ++i) { - Subscript subscript; - DependenceResult result = analyseSubscript(i->first, i->second, &subscript); - if (result != Dependent) { - // We either proved independence or failed to analyse this subscript. - // Further subscripts will not improve the situation, so abort early. - return result; - } - P->Subscripts.push_back(subscript); - } - // We successfully analysed all subscripts but failed to prove independence. - return Dependent; -} - -bool LoopDependenceAnalysis::depends(Value *A, Value *B) { - assert(isDependencePair(A, B) && "Values form no dependence pair!"); - ++NumAnswered; - - DependencePair *p; - if (!findOrInsertDependencePair(A, B, p)) { - // The pair is not cached, so analyse it. - ++NumAnalysed; - switch (p->Result = analysePair(p)) { - case Dependent: ++NumDependent; break; - case Independent: ++NumIndependent; break; - case Unknown: ++NumUnknown; break; - } - } - return p->Result != Independent; -} - -//===----------------------------------------------------------------------===// -// LoopDependenceAnalysis Implementation -//===----------------------------------------------------------------------===// - -bool LoopDependenceAnalysis::runOnLoop(Loop *L, LPPassManager &) { - this->L = L; - AA = &getAnalysis(); - SE = &getAnalysis(); - return false; -} - -void LoopDependenceAnalysis::releaseMemory() { - Pairs.clear(); - PairAllocator.Reset(); -} - -void LoopDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesAll(); - AU.addRequiredTransitive(); - AU.addRequiredTransitive(); -} - -static void PrintLoopInfo(raw_ostream &OS, - LoopDependenceAnalysis *LDA, const Loop *L) { - if (!L->empty()) return; // ignore non-innermost loops - - SmallVector memrefs; - GetMemRefInstrs(L, memrefs); - - OS << "Loop at depth " << L->getLoopDepth() << ", header block: "; - WriteAsOperand(OS, L->getHeader(), false); - OS << "\n"; - - OS << " Load/store instructions: " << memrefs.size() << "\n"; - for (SmallVector::const_iterator x = memrefs.begin(), - end = memrefs.end(); x != end; ++x) - OS << "\t" << (x - memrefs.begin()) << ": " << **x << "\n"; - - OS << " Pairwise dependence results:\n"; - for (SmallVector::const_iterator x = memrefs.begin(), - end = memrefs.end(); x != end; ++x) - for (SmallVector::const_iterator y = x + 1; - y != end; ++y) - if (LDA->isDependencePair(*x, *y)) - OS << "\t" << (x - memrefs.begin()) << "," << (y - memrefs.begin()) - << ": " << (LDA->depends(*x, *y) ? "dependent" : "independent") - << "\n"; -} - -void LoopDependenceAnalysis::print(raw_ostream &OS, const Module*) const { - // TODO: doc why const_cast is safe - PrintLoopInfo(OS, const_cast(this), this->L); -} diff --git a/lib/Analysis/LoopInfo.cpp b/lib/Analysis/LoopInfo.cpp index 20c33a3..8341f9d 100644 --- a/lib/Analysis/LoopInfo.cpp +++ b/lib/Analysis/LoopInfo.cpp @@ -306,9 +306,11 @@ BasicBlock *Loop::getUniqueExitBlock() const { return 0; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Loop::dump() const { print(dbgs()); } +#endif //===----------------------------------------------------------------------===// // UnloopUpdater implementation @@ -429,8 +431,8 @@ void UnloopUpdater::updateSubloopParents() { Unloop->removeChildLoop(llvm::prior(Unloop->end())); assert(SubloopParents.count(Subloop) && "DFS failed to visit subloop"); - if (SubloopParents[Subloop]) - SubloopParents[Subloop]->addChildLoop(Subloop); + if (Loop *Parent = SubloopParents[Subloop]) + Parent->addChildLoop(Subloop); else LI->addTopLevelLoop(Subloop); } @@ -456,9 +458,8 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) { assert(Subloop && "subloop is not an ancestor of the original loop"); } // Get the current nearest parent of the Subloop exits, initially Unloop. - if (!SubloopParents.count(Subloop)) - SubloopParents[Subloop] = Unloop; - NearLoop = SubloopParents[Subloop]; + NearLoop = + SubloopParents.insert(std::make_pair(Subloop, Unloop)).first->second; } succ_iterator I = succ_begin(BB), E = succ_end(BB); diff --git a/lib/Analysis/MemoryBuiltins.cpp b/lib/Analysis/MemoryBuiltins.cpp index e77d2ff..0a539fe 100644 --- a/lib/Analysis/MemoryBuiltins.cpp +++ b/lib/Analysis/MemoryBuiltins.cpp @@ -25,7 +25,8 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; @@ -39,7 +40,7 @@ enum AllocType { }; struct AllocFnsTy { - const char *Name; + LibFunc::Func Func; AllocType AllocTy; unsigned char NumParams; // First and Second size parameters (or -1 if unused) @@ -49,22 +50,22 @@ struct AllocFnsTy { // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to // know which functions are nounwind, noalias, nocapture parameters, etc. static const AllocFnsTy AllocationFnData[] = { - {"malloc", MallocLike, 1, 0, -1}, - {"valloc", MallocLike, 1, 0, -1}, - {"_Znwj", MallocLike, 1, 0, -1}, // new(unsigned int) - {"_ZnwjRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned int, nothrow) - {"_Znwm", MallocLike, 1, 0, -1}, // new(unsigned long) - {"_ZnwmRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned long, nothrow) - {"_Znaj", MallocLike, 1, 0, -1}, // new[](unsigned int) - {"_ZnajRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow) - {"_Znam", MallocLike, 1, 0, -1}, // new[](unsigned long) - {"_ZnamRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow) - {"posix_memalign", MallocLike, 3, 2, -1}, - {"calloc", CallocLike, 2, 0, 1}, - {"realloc", ReallocLike, 2, 1, -1}, - {"reallocf", ReallocLike, 2, 1, -1}, - {"strdup", StrDupLike, 1, -1, -1}, - {"strndup", StrDupLike, 2, 1, -1} + {LibFunc::malloc, MallocLike, 1, 0, -1}, + {LibFunc::valloc, MallocLike, 1, 0, -1}, + {LibFunc::Znwj, MallocLike, 1, 0, -1}, // new(unsigned int) + {LibFunc::ZnwjRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned int, nothrow) + {LibFunc::Znwm, MallocLike, 1, 0, -1}, // new(unsigned long) + {LibFunc::ZnwmRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned long, nothrow) + {LibFunc::Znaj, MallocLike, 1, 0, -1}, // new[](unsigned int) + {LibFunc::ZnajRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow) + {LibFunc::Znam, MallocLike, 1, 0, -1}, // new[](unsigned long) + {LibFunc::ZnamRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow) + {LibFunc::posix_memalign, MallocLike, 3, 2, -1}, + {LibFunc::calloc, CallocLike, 2, 0, 1}, + {LibFunc::realloc, ReallocLike, 2, 1, -1}, + {LibFunc::reallocf, ReallocLike, 2, 1, -1}, + {LibFunc::strdup, StrDupLike, 1, -1, -1}, + {LibFunc::strndup, StrDupLike, 2, 1, -1} }; @@ -85,15 +86,22 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) { /// \brief Returns the allocation data for the given value if it is a call to a /// known allocation function, and NULL otherwise. static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy, + const TargetLibraryInfo *TLI, bool LookThroughBitCast = false) { Function *Callee = getCalledFunction(V, LookThroughBitCast); if (!Callee) return 0; + // Make sure that the function is available. + StringRef FnName = Callee->getName(); + LibFunc::Func TLIFn; + if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn)) + return 0; + unsigned i = 0; bool found = false; for ( ; i < array_lengthof(AllocationFnData); ++i) { - if (Callee->getName() == AllocationFnData[i].Name) { + if (AllocationFnData[i].Func == TLIFn) { found = true; break; } @@ -106,7 +114,6 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy, return 0; // Check function prototype. - // FIXME: Check the nobuiltin metadata?? (PR5130) int FstParam = FnData->FstParam; int SndParam = FnData->SndParam; FunctionType *FTy = Callee->getFunctionType(); @@ -125,64 +132,72 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy, static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) { ImmutableCallSite CS(LookThroughBitCast ? V->stripPointerCasts() : V); - return CS && CS.hasFnAttr(Attribute::NoAlias); + return CS && CS.hasFnAttr(Attributes::NoAlias); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup /// like). -bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) { - return getAllocationData(V, AnyAlloc, LookThroughBitCast); +bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a function that returns a /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). -bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) { +bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { // it's safe to consider realloc as noalias since accessing the original // pointer is undefined behavior - return isAllocationFn(V, LookThroughBitCast) || + return isAllocationFn(V, TLI, LookThroughBitCast) || hasNoAliasAttr(V, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates uninitialized memory (such as malloc). -bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) { - return getAllocationData(V, MallocLike, LookThroughBitCast); +bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, MallocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates zero-filled memory (such as calloc). -bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) { - return getAllocationData(V, CallocLike, LookThroughBitCast); +bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, CallocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// allocates memory (either malloc, calloc, or strdup like). -bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) { - return getAllocationData(V, AllocLike, LookThroughBitCast); +bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, AllocLike, TLI, LookThroughBitCast); } /// \brief Tests if a value is a call or invoke to a library function that /// reallocates memory (such as realloc). -bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) { - return getAllocationData(V, ReallocLike, LookThroughBitCast); +bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI, + bool LookThroughBitCast) { + return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast); } /// extractMallocCall - Returns the corresponding CallInst if the instruction /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// ignore InvokeInst here. -const CallInst *llvm::extractMallocCall(const Value *I) { - return isMallocLikeFn(I) ? dyn_cast(I) : 0; +const CallInst *llvm::extractMallocCall(const Value *I, + const TargetLibraryInfo *TLI) { + return isMallocLikeFn(I, TLI) ? dyn_cast(I) : 0; } -static Value *computeArraySize(const CallInst *CI, const TargetData *TD, +static Value *computeArraySize(const CallInst *CI, const DataLayout *TD, + const TargetLibraryInfo *TLI, bool LookThroughSExt = false) { if (!CI) return NULL; // The size of the malloc's result type must be known to determine array size. - Type *T = getMallocAllocatedType(CI); + Type *T = getMallocAllocatedType(CI, TLI); if (!T || !T->isSized() || !TD) return NULL; @@ -204,9 +219,11 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD, /// isArrayMalloc - Returns the corresponding CallInst if the instruction /// is a call to malloc whose array size can be determined and the array size /// is not constant 1. Otherwise, return NULL. -const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) { - const CallInst *CI = extractMallocCall(I); - Value *ArraySize = computeArraySize(CI, TD); +const CallInst *llvm::isArrayMalloc(const Value *I, + const DataLayout *TD, + const TargetLibraryInfo *TLI) { + const CallInst *CI = extractMallocCall(I, TLI); + Value *ArraySize = computeArraySize(CI, TD, TLI); if (ArraySize && ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) @@ -221,8 +238,9 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) { /// 0: PointerType is the calls' return type. /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. -PointerType *llvm::getMallocType(const CallInst *CI) { - assert(isMallocLikeFn(CI) && "getMallocType and not malloc call"); +PointerType *llvm::getMallocType(const CallInst *CI, + const TargetLibraryInfo *TLI) { + assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call"); PointerType *MallocType = NULL; unsigned NumOfBitCastUses = 0; @@ -252,8 +270,9 @@ PointerType *llvm::getMallocType(const CallInst *CI) { /// 0: PointerType is the malloc calls' return type. /// 1: PointerType is the bitcast's result type. /// >1: Unique PointerType cannot be determined, return NULL. -Type *llvm::getMallocAllocatedType(const CallInst *CI) { - PointerType *PT = getMallocType(CI); +Type *llvm::getMallocAllocatedType(const CallInst *CI, + const TargetLibraryInfo *TLI) { + PointerType *PT = getMallocType(CI, TLI); return PT ? PT->getElementType() : NULL; } @@ -262,22 +281,24 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI) { /// then return that multiple. For non-array mallocs, the multiple is /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// determined. -Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD, +Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *TD, + const TargetLibraryInfo *TLI, bool LookThroughSExt) { - assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call"); - return computeArraySize(CI, TD, LookThroughSExt); + assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call"); + return computeArraySize(CI, TD, TLI, LookThroughSExt); } /// extractCallocCall - Returns the corresponding CallInst if the instruction /// is a calloc call. -const CallInst *llvm::extractCallocCall(const Value *I) { - return isCallocLikeFn(I) ? cast(I) : 0; +const CallInst *llvm::extractCallocCall(const Value *I, + const TargetLibraryInfo *TLI) { + return isCallocLikeFn(I, TLI) ? cast(I) : 0; } /// isFreeCall - Returns non-null if the value is a call to the builtin free() -const CallInst *llvm::isFreeCall(const Value *I) { +const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { const CallInst *CI = dyn_cast(I); if (!CI) return 0; @@ -285,9 +306,14 @@ const CallInst *llvm::isFreeCall(const Value *I) { if (Callee == 0 || !Callee->isDeclaration()) return 0; - if (Callee->getName() != "free" && - Callee->getName() != "_ZdlPv" && // operator delete(void*) - Callee->getName() != "_ZdaPv") // operator delete[](void*) + StringRef FnName = Callee->getName(); + LibFunc::Func TLIFn; + if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn)) + return 0; + + if (TLIFn != LibFunc::free && + TLIFn != LibFunc::ZdlPv && // operator delete(void*) + TLIFn != LibFunc::ZdaPv) // operator delete[](void*) return 0; // Check free prototype. @@ -315,12 +341,12 @@ const CallInst *llvm::isFreeCall(const Value *I) { /// object size in Size if successful, and false otherwise. /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// byval arguments, and global variables. -bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD, - bool RoundToAlign) { +bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD, + const TargetLibraryInfo *TLI, bool RoundToAlign) { if (!TD) return false; - ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign); + ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign); SizeOffsetType Data = Visitor.compute(const_cast(Ptr)); if (!Visitor.bothKnown(Data)) return false; @@ -347,10 +373,11 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) { return Size; } -ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD, +ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD, + const TargetLibraryInfo *TLI, LLVMContext &Context, bool RoundToAlign) -: TD(TD), RoundToAlign(RoundToAlign) { +: TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) { IntegerType *IntTy = TD->getIntPtrType(Context); IntTyBits = IntTy->getBitWidth(); Zero = APInt::getNullValue(IntTyBits); @@ -358,11 +385,16 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD, SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { V = V->stripPointerCasts(); + if (Instruction *I = dyn_cast(V)) { + // If we have already seen this instruction, bail out. Cycles can happen in + // unreachable code after constant propagation. + if (!SeenInsts.insert(I)) + return unknown(); - if (GEPOperator *GEP = dyn_cast(V)) - return visitGEPOperator(*GEP); - if (Instruction *I = dyn_cast(V)) + if (GEPOperator *GEP = dyn_cast(V)) + return visitGEPOperator(*GEP); return visit(*I); + } if (Argument *A = dyn_cast(V)) return visitArgument(*A); if (ConstantPointerNull *P = dyn_cast(V)) @@ -371,9 +403,12 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { return visitGlobalVariable(*GV); if (UndefValue *UV = dyn_cast(V)) return visitUndefValue(*UV); - if (ConstantExpr *CE = dyn_cast(V)) + if (ConstantExpr *CE = dyn_cast(V)) { if (CE->getOpcode() == Instruction::IntToPtr) return unknown(); // clueless + if (CE->getOpcode() == Instruction::GetElementPtr) + return visitGEPOperator(cast(*CE)); + } DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " << *V << '\n'); @@ -408,7 +443,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { } SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) { - const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc); + const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc, + TLI); if (!FnData) return unknown(); @@ -473,10 +509,6 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { } SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) { - // Ignore self-referencing GEPs, they can occur in unreachable code. - if (&GEP == GEP.getPointerOperand()) - return unknown(); - SizeOffsetType PtrData = compute(GEP.getPointerOperand()); if (!bothKnown(PtrData) || !GEP.hasAllConstantIndices()) return unknown(); @@ -510,10 +542,6 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode&) { } SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { - // ignore malformed self-looping selects - if (I.getTrueValue() == &I || I.getFalseValue() == &I) - return unknown(); - SizeOffsetType TrueSide = compute(I.getTrueValue()); SizeOffsetType FalseSide = compute(I.getFalseValue()); if (bothKnown(TrueSide) && bothKnown(FalseSide) && TrueSide == FalseSide) @@ -531,10 +559,10 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { } -ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD, +ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD, + const TargetLibraryInfo *TLI, LLVMContext &Context) -: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)), -Visitor(TD, Context) { +: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) { IntTy = TD->getIntPtrType(Context); Zero = ConstantInt::get(IntTy, 0); } @@ -559,6 +587,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { } SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { + ObjectSizeOffsetVisitor Visitor(TD, TLI, Context); SizeOffsetType Const = Visitor.compute(V); if (Visitor.bothKnown(Const)) return std::make_pair(ConstantInt::get(Context, Const.first), @@ -621,7 +650,8 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) { - const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc); + const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc, + TLI); if (!FnData) return unknown(); @@ -719,10 +749,6 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { } SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { - // ignore malformed self-looping selects - if (I.getTrueValue() == &I || I.getFalseValue() == &I) - return unknown(); - SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index 059e574..9872890 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -30,7 +30,7 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/Support/PredIteratorCache.h" #include "llvm/Support/Debug.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); @@ -89,7 +89,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { bool MemoryDependenceAnalysis::runOnFunction(Function &) { AA = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); DT = getAnalysisIfAvailable(); if (PredCache == 0) PredCache.reset(new PredIteratorCache()); @@ -148,7 +148,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst, return AliasAnalysis::ModRef; } - if (const CallInst *CI = isFreeCall(Inst)) { + if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) { // calls to free() deallocate the entire structure Loc = AliasAnalysis::Location(CI->getArgOperand(0)); return AliasAnalysis::Mod; @@ -256,7 +256,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase, int64_t &MemLocOffs, const LoadInst *LI, - const TargetData *TD) { + const DataLayout *TD) { // If we have no target data, we can't do this. if (TD == 0) return false; @@ -280,7 +280,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc, unsigned MemoryDependenceAnalysis:: getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize, const LoadInst *LI, - const TargetData &TD) { + const DataLayout &TD) { // We can only extend simple integer loads. if (!isa(LI->getType()) || !LI->isSimple()) return 0; @@ -327,12 +327,12 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, return 0; if (LIOffs+NewLoadByteSize > MemLocEnd && - LI->getParent()->getParent()->hasFnAttr(Attribute::AddressSafety)) { + LI->getParent()->getParent()->getFnAttributes(). + hasAttribute(Attributes::AddressSafety)) // We will be reading past the location accessed by the original program. // While this is safe in a regular build, Address Safety analysis tools // may start reporting false warnings. So, don't do widening. return 0; - } // If a load of this width would include all of MemLoc, then we succeed. if (LIOffs+NewLoadByteSize >= MemLocEnd) @@ -479,12 +479,20 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad, // a subsequent bitcast of the malloc call result. There can be stores to // the malloced memory between the malloc call and its bitcast uses, and we // need to continue scanning until the malloc call. - if (isa(Inst) || isNoAliasFn(Inst)) { + const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo(); + if (isa(Inst) || isNoAliasFn(Inst, TLI)) { const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD); if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr)) return MemDepResult::getDef(Inst); - continue; + // Be conservative if the accessed pointer may alias the allocation. + if (AA->alias(Inst, AccessPtr) != AliasAnalysis::NoAlias) + return MemDepResult::getClobber(Inst); + // If the allocation is not aliased and does not read memory (like + // strdup), it is safe to ignore. + if (isa(Inst) || + isMallocLikeFn(Inst, TLI) || isCallocLikeFn(Inst, TLI)) + continue; } // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. @@ -975,7 +983,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end(); I != E; ++I) { Visited.insert(std::make_pair(I->getBB(), Addr)); - if (!I->getResult().isNonLocal()) + if (!I->getResult().isNonLocal() && DT->isReachableFromEntry(I->getBB())) Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr)); } ++NumCacheCompleteNonLocalPtr; @@ -1021,7 +1029,7 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer, NumSortedEntries); // If we got a Def or Clobber, add this to the list of results. - if (!Dep.isNonLocal()) { + if (!Dep.isNonLocal() && DT->isReachableFromEntry(BB)) { Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); continue; } diff --git a/lib/Analysis/NoAliasAnalysis.cpp b/lib/Analysis/NoAliasAnalysis.cpp index 101c2d5..2eb4137 100644 --- a/lib/Analysis/NoAliasAnalysis.cpp +++ b/lib/Analysis/NoAliasAnalysis.cpp @@ -15,7 +15,7 @@ #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/Passes.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; namespace { @@ -36,7 +36,7 @@ namespace { virtual void initializePass() { // Note: NoAA does not call InitializeAliasAnalysis because it's // special and does not support chaining. - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); } virtual AliasResult alias(const Location &LocA, const Location &LocB) { diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp index 38cb1c9..c35737e 100644 --- a/lib/Analysis/PHITransAddr.cpp +++ b/lib/Analysis/PHITransAddr.cpp @@ -41,6 +41,7 @@ static bool CanPHITrans(Instruction *Inst) { return false; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void PHITransAddr::dump() const { if (Addr == 0) { dbgs() << "PHITransAddr: null\n"; @@ -50,6 +51,7 @@ void PHITransAddr::dump() const { for (unsigned i = 0, e = InstInputs.size(); i != e; ++i) dbgs() << " Input #" << i << " is " << *InstInputs[i] << "\n"; } +#endif static bool VerifySubExpr(Value *Expr, diff --git a/lib/Analysis/ProfileDataLoader.cpp b/lib/Analysis/ProfileDataLoader.cpp new file mode 100644 index 0000000..a4f634a --- /dev/null +++ b/lib/Analysis/ProfileDataLoader.cpp @@ -0,0 +1,155 @@ +//===- ProfileDataLoader.cpp - Load profile information from disk ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// The ProfileDataLoader class is used to load raw profiling data from the dump +// file. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Module.h" +#include "llvm/InstrTypes.h" +#include "llvm/Analysis/ProfileDataLoader.h" +#include "llvm/Analysis/ProfileDataTypes.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/system_error.h" +#include +#include +using namespace llvm; + +raw_ostream &llvm::operator<<(raw_ostream &O, std::pair E) { + O << "("; + + if (E.first) + O << E.first->getName(); + else + O << "0"; + + O << ","; + + if (E.second) + O << E.second->getName(); + else + O << "0"; + + return O << ")"; +} + +/// AddCounts - Add 'A' and 'B', accounting for the fact that the value of one +/// (or both) may not be defined. +static unsigned AddCounts(unsigned A, unsigned B) { + // If either value is undefined, use the other. + // Undefined + undefined = undefined. + if (A == ProfileDataLoader::Uncounted) return B; + if (B == ProfileDataLoader::Uncounted) return A; + + return A + B; +} + +/// ReadProfilingData - Load 'NumEntries' items of type 'T' from file 'F' +template +static void ReadProfilingData(const char *ToolName, FILE *F, + T *Data, size_t NumEntries) { + // Read in the block of data... + if (fread(Data, sizeof(T), NumEntries, F) != NumEntries) + report_fatal_error(Twine(ToolName) + ": Profiling data truncated"); +} + +/// ReadProfilingNumEntries - Read how many entries are in this profiling data +/// packet. +static unsigned ReadProfilingNumEntries(const char *ToolName, FILE *F, + bool ShouldByteSwap) { + unsigned Entry; + ReadProfilingData(ToolName, F, &Entry, 1); + return ShouldByteSwap ? ByteSwap_32(Entry) : Entry; +} + +/// ReadProfilingBlock - Read the number of entries in the next profiling data +/// packet and then accumulate the entries into 'Data'. +static void ReadProfilingBlock(const char *ToolName, FILE *F, + bool ShouldByteSwap, + SmallVector &Data) { + // Read the number of entries... + unsigned NumEntries = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap); + + // Read in the data. + SmallVector TempSpace(NumEntries); + ReadProfilingData(ToolName, F, TempSpace.data(), NumEntries); + + // Make sure we have enough space ... + if (Data.size() < NumEntries) + Data.resize(NumEntries, ProfileDataLoader::Uncounted); + + // Accumulate the data we just read into the existing data. + for (unsigned i = 0; i < NumEntries; ++i) { + unsigned Entry = ShouldByteSwap ? ByteSwap_32(TempSpace[i]) : TempSpace[i]; + Data[i] = AddCounts(Entry, Data[i]); + } +} + +/// ReadProfilingArgBlock - Read the command line arguments that the progam was +/// run with when the current profiling data packet(s) were generated. +static void ReadProfilingArgBlock(const char *ToolName, FILE *F, + bool ShouldByteSwap, + SmallVector &CommandLines) { + // Read the number of bytes ... + unsigned ArgLength = ReadProfilingNumEntries(ToolName, F, ShouldByteSwap); + + // Read in the arguments (if there are any to read). Round up the length to + // the nearest 4-byte multiple. + SmallVector Args(ArgLength+4); + if (ArgLength) + ReadProfilingData(ToolName, F, Args.data(), (ArgLength+3) & ~3); + + // Store the arguments. + CommandLines.push_back(std::string(&Args[0], &Args[ArgLength])); +} + +const unsigned ProfileDataLoader::Uncounted = ~0U; + +/// ProfileDataLoader ctor - Read the specified profiling data file, reporting +/// a fatal error if the file is invalid or broken. +ProfileDataLoader::ProfileDataLoader(const char *ToolName, + const std::string &Filename) + : Filename(Filename) { + FILE *F = fopen(Filename.c_str(), "rb"); + if (F == 0) + report_fatal_error(Twine(ToolName) + ": Error opening '" + + Filename + "': "); + + // Keep reading packets until we run out of them. + unsigned PacketType; + while (fread(&PacketType, sizeof(unsigned), 1, F) == 1) { + // If the low eight bits of the packet are zero, we must be dealing with an + // endianness mismatch. Byteswap all words read from the profiling + // information. This can happen when the compiler host and target have + // different endianness. + bool ShouldByteSwap = (char)PacketType == 0; + PacketType = ShouldByteSwap ? ByteSwap_32(PacketType) : PacketType; + + switch (PacketType) { + case ArgumentInfo: + ReadProfilingArgBlock(ToolName, F, ShouldByteSwap, CommandLines); + break; + + case EdgeInfo: + ReadProfilingBlock(ToolName, F, ShouldByteSwap, EdgeCounts); + break; + + default: + report_fatal_error(std::string(ToolName) + + ": Unknown profiling packet type"); + break; + } + } + + fclose(F); +} diff --git a/lib/Analysis/ProfileDataLoaderPass.cpp b/lib/Analysis/ProfileDataLoaderPass.cpp new file mode 100644 index 0000000..c43cff0 --- /dev/null +++ b/lib/Analysis/ProfileDataLoaderPass.cpp @@ -0,0 +1,188 @@ +//===- ProfileDataLoaderPass.cpp - Set branch weight metadata from prof ---===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass loads profiling data from a dump file and sets branch weight +// metadata. +// +// TODO: Replace all "profile-metadata-loader" strings with "profile-loader" +// once ProfileInfo etc. has been removed. +// +//===----------------------------------------------------------------------===// +#define DEBUG_TYPE "profile-metadata-loader" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/BasicBlock.h" +#include "llvm/InstrTypes.h" +#include "llvm/Module.h" +#include "llvm/LLVMContext.h" +#include "llvm/MDBuilder.h" +#include "llvm/Metadata.h" +#include "llvm/Pass.h" +#include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/ProfileDataLoader.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/CFG.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Format.h" +#include "llvm/ADT/Statistic.h" +using namespace llvm; + +STATISTIC(NumEdgesRead, "The # of edges read."); +STATISTIC(NumTermsAnnotated, "The # of terminator instructions annotated."); + +static cl::opt +ProfileMetadataFilename("profile-file", cl::init("llvmprof.out"), + cl::value_desc("filename"), + cl::desc("Profile file loaded by -profile-metadata-loader")); + +namespace { + /// This pass loads profiling data from a dump file and sets branch weight + /// metadata. + class ProfileMetadataLoaderPass : public ModulePass { + std::string Filename; + public: + static char ID; // Class identification, replacement for typeinfo + explicit ProfileMetadataLoaderPass(const std::string &filename = "") + : ModulePass(ID), Filename(filename) { + initializeProfileMetadataLoaderPassPass(*PassRegistry::getPassRegistry()); + if (filename.empty()) Filename = ProfileMetadataFilename; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + + virtual const char *getPassName() const { + return "Profile loader"; + } + + virtual void readEdge(unsigned, ProfileData&, ProfileData::Edge, + ArrayRef); + virtual unsigned matchEdges(Module&, ProfileData&, ArrayRef); + virtual void setBranchWeightMetadata(Module&, ProfileData&); + + virtual bool runOnModule(Module &M); + }; +} // End of anonymous namespace + +char ProfileMetadataLoaderPass::ID = 0; +INITIALIZE_PASS_BEGIN(ProfileMetadataLoaderPass, "profile-metadata-loader", + "Load profile information from llvmprof.out", false, true) +INITIALIZE_PASS_END(ProfileMetadataLoaderPass, "profile-metadata-loader", + "Load profile information from llvmprof.out", false, true) + +char &llvm::ProfileMetadataLoaderPassID = ProfileMetadataLoaderPass::ID; + +/// createProfileMetadataLoaderPass - This function returns a Pass that loads +/// the profiling information for the module from the specified filename, +/// making it available to the optimizers. +ModulePass *llvm::createProfileMetadataLoaderPass() { + return new ProfileMetadataLoaderPass(); +} +ModulePass *llvm::createProfileMetadataLoaderPass(const std::string &Filename) { + return new ProfileMetadataLoaderPass(Filename); +} + +/// readEdge - Take the value from a profile counter and assign it to an edge. +void ProfileMetadataLoaderPass::readEdge(unsigned ReadCount, + ProfileData &PB, ProfileData::Edge e, + ArrayRef Counters) { + if (ReadCount >= Counters.size()) return; + + unsigned weight = Counters[ReadCount]; + assert(weight != ProfileDataLoader::Uncounted); + PB.addEdgeWeight(e, weight); + + DEBUG(dbgs() << "-- Read Edge Counter for " << e + << " (# "<< (ReadCount) << "): " + << PB.getEdgeWeight(e) << "\n"); +} + +/// matchEdges - Link every profile counter with an edge. +unsigned ProfileMetadataLoaderPass::matchEdges(Module &M, ProfileData &PB, + ArrayRef Counters) { + if (Counters.size() == 0) return 0; + + unsigned ReadCount = 0; + + for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { + if (F->isDeclaration()) continue; + DEBUG(dbgs() << "Loading edges in '" << F->getName() << "'\n"); + readEdge(ReadCount++, PB, PB.getEdge(0, &F->getEntryBlock()), Counters); + for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { + TerminatorInst *TI = BB->getTerminator(); + for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) { + readEdge(ReadCount++, PB, PB.getEdge(BB,TI->getSuccessor(s)), + Counters); + } + } + } + + return ReadCount; +} + +/// setBranchWeightMetadata - Translate the counter values associated with each +/// edge into branch weights for each conditional branch (a branch with 2 or +/// more desinations). +void ProfileMetadataLoaderPass::setBranchWeightMetadata(Module &M, + ProfileData &PB) { + for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { + if (F->isDeclaration()) continue; + DEBUG(dbgs() << "Setting branch metadata in '" << F->getName() << "'\n"); + + for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { + TerminatorInst *TI = BB->getTerminator(); + unsigned NumSuccessors = TI->getNumSuccessors(); + + // If there is only one successor then we can not set a branch + // probability as the target is certain. + if (NumSuccessors < 2) continue; + + // Load the weights of all edges leading from this terminator. + DEBUG(dbgs() << "-- Terminator with " << NumSuccessors + << " successors:\n"); + SmallVector Weights(NumSuccessors); + for (unsigned s = 0 ; s < NumSuccessors ; ++s) { + ProfileData::Edge edge = PB.getEdge(BB, TI->getSuccessor(s)); + Weights[s] = (uint32_t)PB.getEdgeWeight(edge); + DEBUG(dbgs() << "---- Edge '" << edge << "' has weight " + << Weights[s] << "\n"); + } + + // Set branch weight metadata. This will set branch probabilities of + // 100%/0% if that is true of the dynamic execution. + // BranchProbabilityInfo can account for this when it loads this metadata + // (it gives the unexectuted branch a weight of 1 for the purposes of + // probability calculations). + MDBuilder MDB(TI->getContext()); + MDNode *Node = MDB.createBranchWeights(Weights); + TI->setMetadata(LLVMContext::MD_prof, Node); + NumTermsAnnotated++; + } + } +} + +bool ProfileMetadataLoaderPass::runOnModule(Module &M) { + ProfileDataLoader PDL("profile-data-loader", Filename); + ProfileData PB; + + ArrayRef Counters = PDL.getRawEdgeCounts(); + + unsigned ReadCount = matchEdges(M, PB, Counters); + + if (ReadCount != Counters.size()) { + errs() << "WARNING: profile information is inconsistent with " + << "the current program!\n"; + } + NumEdgesRead = ReadCount; + + setBranchWeightMetadata(M, PB); + + return ReadCount > 0; +} diff --git a/lib/Analysis/ProfileEstimatorPass.cpp b/lib/Analysis/ProfileEstimatorPass.cpp index 63468f8..12b59e0 100644 --- a/lib/Analysis/ProfileEstimatorPass.cpp +++ b/lib/Analysis/ProfileEstimatorPass.cpp @@ -286,7 +286,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { } } - double fraction = floor(BBWeight/Edges.size()); + double fraction = Edges.size() ? floor(BBWeight/Edges.size()) : 0.0; // Finally we know what flow is still not leaving the block, distribute this // flow onto the empty edges. for (SmallVector::iterator ei = Edges.begin(), ee = Edges.end(); diff --git a/lib/Analysis/ProfileInfo.cpp b/lib/Analysis/ProfileInfo.cpp index 173de2c..b5b7ac1 100644 --- a/lib/Analysis/ProfileInfo.cpp +++ b/lib/Analysis/ProfileInfo.cpp @@ -1016,40 +1016,14 @@ void ProfileInfoT::repair(const Function *F) { } } -raw_ostream& operator<<(raw_ostream &O, const Function *F) { - return O << F->getName(); -} - raw_ostream& operator<<(raw_ostream &O, const MachineFunction *MF) { return O << MF->getFunction()->getName() << "(MF)"; } -raw_ostream& operator<<(raw_ostream &O, const BasicBlock *BB) { - return O << BB->getName(); -} - raw_ostream& operator<<(raw_ostream &O, const MachineBasicBlock *MBB) { return O << MBB->getBasicBlock()->getName() << "(MB)"; } -raw_ostream& operator<<(raw_ostream &O, std::pair E) { - O << "("; - - if (E.first) - O << E.first; - else - O << "0"; - - O << ","; - - if (E.second) - O << E.second; - else - O << "0"; - - return O << ")"; -} - raw_ostream& operator<<(raw_ostream &O, std::pair E) { O << "("; diff --git a/lib/Analysis/RegionInfo.cpp b/lib/Analysis/RegionInfo.cpp index 868f483..30f0d2f 100644 --- a/lib/Analysis/RegionInfo.cpp +++ b/lib/Analysis/RegionInfo.cpp @@ -47,7 +47,7 @@ static cl::opt printStyle("print-region-style", cl::values( clEnumValN(Region::PrintNone, "none", "print no details"), clEnumValN(Region::PrintBB, "bb", - "print regions in detail with block_node_iterator"), + "print regions in detail with block_iterator"), clEnumValN(Region::PrintRN, "rn", "print regions in detail with element_iterator"), clEnumValEnd)); @@ -246,22 +246,6 @@ void Region::verifyRegionNest() const { verifyRegion(); } -Region::block_node_iterator Region::block_node_begin() { - return GraphTraits >::nodes_begin(this); -} - -Region::block_node_iterator Region::block_node_end() { - return GraphTraits >::nodes_end(this); -} - -Region::const_block_node_iterator Region::block_node_begin() const { - return GraphTraits >::nodes_begin(this); -} - -Region::const_block_node_iterator Region::block_node_end() const { - return GraphTraits >::nodes_end(this); -} - Region::element_iterator Region::element_begin() { return GraphTraits::nodes_begin(this); } @@ -425,10 +409,8 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level, OS.indent(level*2 + 2); if (Style == PrintBB) { - for (const_block_node_iterator I = block_node_begin(), - E = block_node_end(); - I != E; ++I) - OS << **I << ", "; // TODO: remove the last "," + for (const_block_iterator I = block_begin(), E = block_end(); I != E; ++I) + OS << (*I)->getName() << ", "; // TODO: remove the last "," } else if (Style == PrintRN) { for (const_element_iterator I = element_begin(), E = element_end(); I!=E; ++I) OS << **I << ", "; // TODO: remove the last ", @@ -445,9 +427,11 @@ void Region::print(raw_ostream &OS, bool print_tree, unsigned level, OS.indent(level*2) << "} \n"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Region::dump() const { print(dbgs(), true, getDepth(), printStyle.getValue()); } +#endif void Region::clearNodeCache() { // Free the cached nodes. diff --git a/lib/Analysis/RegionPass.cpp b/lib/Analysis/RegionPass.cpp index c97b5eb..9208fa2 100644 --- a/lib/Analysis/RegionPass.cpp +++ b/lib/Analysis/RegionPass.cpp @@ -195,10 +195,9 @@ public: virtual bool runOnRegion(Region *R, RGPassManager &RGM) { Out << Banner; - for (Region::block_node_iterator I = R->block_node_begin(), - E = R->block_node_end(); + for (Region::block_iterator I = R->block_begin(), E = R->block_end(); I != E; ++I) - (*I)->getEntry()->print(Out); + (*I)->print(Out); return false; } diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index a654648..e3189ec 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -73,7 +73,7 @@ #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ConstantRange.h" @@ -105,6 +105,11 @@ MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, "derived loop"), cl::init(100)); +// FIXME: Enable this with XDEBUG when the test suite is clean. +static cl::opt +VerifySCEV("verify-scev", + cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); + INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution", "Scalar Evolution Analysis", false, true) INITIALIZE_PASS_DEPENDENCY(LoopInfo) @@ -122,10 +127,12 @@ char ScalarEvolution::ID = 0; // Implementation of the SCEV class. // +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void SCEV::dump() const { print(dbgs()); dbgs() << '\n'; } +#endif void SCEV::print(raw_ostream &OS) const { switch (getSCEVType()) { @@ -2580,7 +2587,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS, } const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) { - // If we have TargetData, we can bypass creating a target-independent + // If we have DataLayout, we can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. if (TD) @@ -2606,7 +2613,7 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) { const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, unsigned FieldNo) { - // If we have TargetData, we can bypass creating a target-independent + // If we have DataLayout, we can bypass creating a target-independent // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. if (TD) @@ -2671,7 +2678,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const { uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); - // If we have a TargetData, use it! + // If we have a DataLayout, use it! if (TD) return TD->getTypeSizeInBits(Ty); @@ -2679,7 +2686,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { if (Ty->isIntegerTy()) return Ty->getPrimitiveSizeInBits(); - // The only other support type is pointer. Without TargetData, conservatively + // The only other support type is pointer. Without DataLayout, conservatively // assume pointers are 64-bit. assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); return 64; @@ -2699,7 +2706,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); if (TD) return TD->getIntPtrType(getContext()); - // Without TargetData, conservatively assume pointers are 64-bit. + // Without DataLayout, conservatively assume pointers are 64-bit. return Type::getInt64Ty(getContext()); } @@ -3978,8 +3985,11 @@ getSmallConstantTripMultiple(Loop *L, BasicBlock *ExitingBlock) { ConstantInt *Result = MulC->getValue(); - // Guard against huge trip counts. - if (!Result || Result->getValue().getActiveBits() > 32) + // Guard against huge trip counts (this requires checking + // for zero to handle the case where the trip count == -1 and the + // addition wraps). + if (!Result || Result->getValue().getActiveBits() > 32 || + Result->getValue().getActiveBits() == 0) return 1; return (unsigned)Result->getZExtValue(); @@ -4749,7 +4759,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) { /// reason, return null. static Constant *EvaluateExpression(Value *V, const Loop *L, DenseMap &Vals, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI) { // Convenient constant check, but redundant for recursive calls. if (Constant *C = dyn_cast(V)) return C; @@ -6141,7 +6151,7 @@ bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, return CmpInst::isTrueWhenEqual(Pred); if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS)) if (FoundLHS == FoundRHS) - return CmpInst::isFalseWhenEqual(Pred); + return CmpInst::isFalseWhenEqual(FoundPred); // Check to see if we can make the LHS or RHS match. if (LHS == FoundRHS || RHS == FoundLHS) { @@ -6588,7 +6598,7 @@ ScalarEvolution::ScalarEvolution() bool ScalarEvolution::runOnFunction(Function &F) { this->F = &F; LI = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); DT = &getAnalysis(); return false; @@ -6930,3 +6940,87 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { UnsignedRanges.erase(S); SignedRanges.erase(S); } + +typedef DenseMap VerifyMap; + +/// replaceSubString - Replaces all occurences of From in Str with To. +static void replaceSubString(std::string &Str, StringRef From, StringRef To) { + size_t Pos = 0; + while ((Pos = Str.find(From, Pos)) != std::string::npos) { + Str.replace(Pos, From.size(), To.data(), To.size()); + Pos += To.size(); + } +} + +/// getLoopBackedgeTakenCounts - Helper method for verifyAnalysis. +static void +getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) { + for (Loop::reverse_iterator I = L->rbegin(), E = L->rend(); I != E; ++I) { + getLoopBackedgeTakenCounts(*I, Map, SE); // recurse. + + std::string &S = Map[L]; + if (S.empty()) { + raw_string_ostream OS(S); + SE.getBackedgeTakenCount(L)->print(OS); + + // false and 0 are semantically equivalent. This can happen in dead loops. + replaceSubString(OS.str(), "false", "0"); + // Remove wrap flags, their use in SCEV is highly fragile. + // FIXME: Remove this when SCEV gets smarter about them. + replaceSubString(OS.str(), "", ""); + replaceSubString(OS.str(), "", ""); + replaceSubString(OS.str(), "", ""); + } + } +} + +void ScalarEvolution::verifyAnalysis() const { + if (!VerifySCEV) + return; + + ScalarEvolution &SE = *const_cast(this); + + // Gather stringified backedge taken counts for all loops using SCEV's caches. + // FIXME: It would be much better to store actual values instead of strings, + // but SCEV pointers will change if we drop the caches. + VerifyMap BackedgeDumpsOld, BackedgeDumpsNew; + for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) + getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE); + + // Gather stringified backedge taken counts for all loops without using + // SCEV's caches. + SE.releaseMemory(); + for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) + getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE); + + // Now compare whether they're the same with and without caches. This allows + // verifying that no pass changed the cache. + assert(BackedgeDumpsOld.size() == BackedgeDumpsNew.size() && + "New loops suddenly appeared!"); + + for (VerifyMap::iterator OldI = BackedgeDumpsOld.begin(), + OldE = BackedgeDumpsOld.end(), + NewI = BackedgeDumpsNew.begin(); + OldI != OldE; ++OldI, ++NewI) { + assert(OldI->first == NewI->first && "Loop order changed!"); + + // Compare the stringified SCEVs. We don't care if undef backedgetaken count + // changes. + // FIXME: We currently ignore SCEV changes from/to CouldNotCompute. This + // means that a pass is buggy or SCEV has to learn a new pattern but is + // usually not harmful. + if (OldI->second != NewI->second && + OldI->second.find("undef") == std::string::npos && + NewI->second.find("undef") == std::string::npos && + OldI->second != "***COULDNOTCOMPUTE***" && + NewI->second != "***COULDNOTCOMPUTE***") { + dbgs() << "SCEVValidator: SCEV for loop '" + << OldI->first->getHeader()->getName() + << "' changed from '" << OldI->second + << "' to '" << NewI->second << "'!\n"; + std::abort(); + } + } + + // TODO: Verify more things. +} diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 62710c5..111bfb4 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -18,7 +18,7 @@ #include "llvm/IntrinsicInst.h" #include "llvm/LLVMContext.h" #include "llvm/Support/Debug.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/ADT/STLExtras.h" @@ -212,7 +212,7 @@ static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, - const TargetData *TD) { + const DataLayout *TD) { // Everything is divisible by one. if (Factor->isOne()) return true; @@ -253,7 +253,7 @@ static bool FactorOutConstant(const SCEV *&S, // of the given factor. if (const SCEVMulExpr *M = dyn_cast(S)) { if (TD) { - // With TargetData, the size is known. Check if there is a constant + // With DataLayout, the size is known. Check if there is a constant // operand which is a multiple of the given factor. If so, we can // factor it. const SCEVConstant *FC = cast(Factor); @@ -267,7 +267,7 @@ static bool FactorOutConstant(const SCEV *&S, return true; } } else { - // Without TargetData, check if Factor can be factored out of any of the + // Without DataLayout, check if Factor can be factored out of any of the // Mul's operands. If so, we can just remove it. for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { const SCEV *SOp = M->getOperand(i); @@ -458,7 +458,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, // An empty struct has no fields. if (STy->getNumElements() == 0) break; if (SE.TD) { - // With TargetData, field offsets are known. See if a constant offset + // With DataLayout, field offsets are known. See if a constant offset // falls within any of the struct fields. if (Ops.empty()) break; if (const SCEVConstant *C = dyn_cast(Ops[0])) @@ -477,7 +477,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, } } } else { - // Without TargetData, just check for an offsetof expression of the + // Without DataLayout, just check for an offsetof expression of the // appropriate struct type. for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (const SCEVUnknown *U = dyn_cast(Ops[i])) { @@ -1618,6 +1618,17 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, PEnd = Phis.end(); PIter != PEnd; ++PIter) { PHINode *Phi = *PIter; + // Fold constant phis. They may be congruent to other constant phis and + // would confuse the logic below that expects proper IVs. + if (Value *V = Phi->hasConstantValue()) { + Phi->replaceAllUsesWith(V); + DeadInsts.push_back(Phi); + ++NumElim; + DEBUG_WITH_TYPE(DebugType, dbgs() + << "INDVARS: Eliminated constant iv: " << *Phi << '\n'); + continue; + } + if (!SE.isSCEVable(Phi->getType())) continue; diff --git a/lib/Analysis/Trace.cpp b/lib/Analysis/Trace.cpp index ff5010b..22da857 100644 --- a/lib/Analysis/Trace.cpp +++ b/lib/Analysis/Trace.cpp @@ -43,9 +43,11 @@ void Trace::print(raw_ostream &O) const { O << "; Trace parent function: \n" << *F; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// dump - Debugger convenience method; writes trace to standard error /// output stream. /// void Trace::dump() const { print(dbgs()); } +#endif diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index cea34e1..3beb373 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -22,7 +22,7 @@ #include "llvm/LLVMContext.h" #include "llvm/Metadata.h" #include "llvm/Operator.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/ConstantRange.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/MathExtras.h" @@ -36,7 +36,7 @@ const unsigned MaxDepth = 6; /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if /// unknown returns 0). For vector types, returns the element type's bitwidth. -static unsigned getBitWidth(Type *Ty, const TargetData *TD) { +static unsigned getBitWidth(Type *Ty, const DataLayout *TD) { if (unsigned BitWidth = Ty->getScalarSizeInBits()) return BitWidth; assert(isa(Ty) && "Expected a pointer type!"); @@ -46,7 +46,7 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) { static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2, - const TargetData *TD, unsigned Depth) { + const DataLayout *TD, unsigned Depth) { if (!Add) { if (ConstantInt *CLHS = dyn_cast(Op0)) { // We know that the top bits of C-X are clear if X contains less bits @@ -132,7 +132,7 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW, APInt &KnownZero, APInt &KnownOne, APInt &KnownZero2, APInt &KnownOne2, - const TargetData *TD, unsigned Depth) { + const DataLayout *TD, unsigned Depth) { unsigned BitWidth = KnownZero.getBitWidth(); ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1); ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1); @@ -226,7 +226,7 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) { /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, - const TargetData *TD, unsigned Depth) { + const DataLayout *TD, unsigned Depth) { assert(V && "No Value?"); assert(Depth <= MaxDepth && "Limit Search Depth"); unsigned BitWidth = KnownZero.getBitWidth(); @@ -308,11 +308,20 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, } if (Argument *A = dyn_cast(V)) { - // Get alignment information off byval arguments if specified in the IR. - if (A->hasByValAttr()) - if (unsigned Align = A->getParamAlignment()) - KnownZero = APInt::getLowBitsSet(BitWidth, - CountTrailingZeros_32(Align)); + unsigned Align = 0; + + if (A->hasByValAttr()) { + // Get alignment information off byval arguments if specified in the IR. + Align = A->getParamAlignment(); + } else if (TD && A->hasStructRetAttr()) { + // An sret parameter has at least the ABI alignment of the return type. + Type *EltTy = cast(A->getType())->getElementType(); + if (EltTy->isSized()) + Align = TD->getABITypeAlignment(EltTy); + } + + if (Align) + KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align)); return; } @@ -420,15 +429,13 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, case Instruction::ZExt: case Instruction::Trunc: { Type *SrcTy = I->getOperand(0)->getType(); - + unsigned SrcBitWidth; // Note that we handle pointer operands here because of inttoptr/ptrtoint // which fall through here. - if (SrcTy->isPointerTy()) - SrcBitWidth = TD->getTypeSizeInBits(SrcTy); - else - SrcBitWidth = SrcTy->getScalarSizeInBits(); - + SrcBitWidth = TD->getTypeSizeInBits(SrcTy->getScalarType()); + + assert(SrcBitWidth && "SrcBitWidth can't be zero"); KnownZero = KnownZero.zextOrTrunc(SrcBitWidth); KnownOne = KnownOne.zextOrTrunc(SrcBitWidth); ComputeMaskedBits(I->getOperand(0), KnownZero, KnownOne, TD, Depth+1); @@ -778,7 +785,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, /// ComputeSignBit - Determine whether the sign bit is known to be zero or /// one. Convenience wrapper around ComputeMaskedBits. void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, - const TargetData *TD, unsigned Depth) { + const DataLayout *TD, unsigned Depth) { unsigned BitWidth = getBitWidth(V->getType(), TD); if (!BitWidth) { KnownZero = false; @@ -796,7 +803,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, /// bit set when defined. For vectors return true if every element is known to /// be a power of two when defined. Supports values with integer or pointer /// types and vectors of integers. -bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero, +bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero, unsigned Depth) { if (Constant *C = dyn_cast(V)) { if (C->isNullValue()) @@ -859,7 +866,7 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero, /// when defined. For vectors return true if every element is known to be /// non-zero when defined. Supports values with integer or pointer type and /// vectors of integers. -bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) { +bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) { if (Constant *C = dyn_cast(V)) { if (C->isNullValue()) return false; @@ -986,7 +993,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) { /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, - const TargetData *TD, unsigned Depth) { + const DataLayout *TD, unsigned Depth) { APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); @@ -1003,10 +1010,10 @@ bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, /// /// 'Op' must have a scalar integer type. /// -unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, +unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD, unsigned Depth) { assert((TD || V->getType()->isIntOrIntVectorTy()) && - "ComputeNumSignBits requires a TargetData object to operate " + "ComputeNumSignBits requires a DataLayout object to operate " "on non-integer values!"); Type *Ty = V->getType(); unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : @@ -1582,7 +1589,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef idx_range, /// it can be expressed as a base pointer plus a constant offset. Return the /// base and offset to the caller. Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, - const TargetData &TD) { + const DataLayout &TD) { Operator *PtrOp = dyn_cast(Ptr); if (PtrOp == 0 || Ptr->getType()->isVectorTy()) return Ptr; @@ -1614,7 +1621,7 @@ Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, // right. unsigned PtrSize = TD.getPointerSizeInBits(); if (PtrSize < 64) - Offset = (Offset << (64-PtrSize)) >> (64-PtrSize); + Offset = SignExtend64(Offset, PtrSize); return GetPointerBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD); } @@ -1768,7 +1775,7 @@ uint64_t llvm::GetStringLength(Value *V) { } Value * -llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) { +llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) { if (!V->getType()->isPointerTy()) return V; for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { @@ -1799,7 +1806,7 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) { void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl &Objects, - const TargetData *TD, + const DataLayout *TD, unsigned MaxLookup) { SmallPtrSet Visited; SmallVector Worklist; @@ -1844,7 +1851,7 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { } bool llvm::isSafeToSpeculativelyExecute(const Value *V, - const TargetData *TD) { + const DataLayout *TD) { const Operator *Inst = dyn_cast(V); if (!Inst) return false; diff --git a/lib/Archive/ArchiveInternals.h b/lib/Archive/ArchiveInternals.h index 55684f7..639f5ac 100644 --- a/lib/Archive/ArchiveInternals.h +++ b/lib/Archive/ArchiveInternals.h @@ -66,7 +66,7 @@ namespace llvm { fmag[1] = '\n'; } - bool checkSignature() { + bool checkSignature() const { return 0 == memcmp(fmag, ARFILE_MEMBER_MAGIC,2); } }; diff --git a/lib/Archive/ArchiveReader.cpp b/lib/Archive/ArchiveReader.cpp index 5cfc810..5052495 100644 --- a/lib/Archive/ArchiveReader.cpp +++ b/lib/Archive/ArchiveReader.cpp @@ -79,7 +79,7 @@ Archive::parseMemberHeader(const char*& At, const char* End, std::string* error) } // Cast archive member header - ArchiveMemberHeader* Hdr = (ArchiveMemberHeader*)At; + const ArchiveMemberHeader* Hdr = (const ArchiveMemberHeader*)At; At += sizeof(ArchiveMemberHeader); int flags = 0; @@ -196,7 +196,7 @@ Archive::parseMemberHeader(const char*& At, const char* End, std::string* error) /* FALL THROUGH */ default: - char* slash = (char*) memchr(Hdr->name, '/', 16); + const char* slash = (const char*) memchr(Hdr->name, '/', 16); if (slash == 0) slash = Hdr->name + 16; pathname.assign(Hdr->name, slash - Hdr->name); diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index 481733d..a60e4aa 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -456,11 +456,12 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(private); KEYWORD(linker_private); KEYWORD(linker_private_weak); - KEYWORD(linker_private_weak_def_auto); + KEYWORD(linker_private_weak_def_auto); // FIXME: For backwards compatibility. KEYWORD(internal); KEYWORD(available_externally); KEYWORD(linkonce); KEYWORD(linkonce_odr); + KEYWORD(linkonce_odr_auto_hide); KEYWORD(weak); KEYWORD(weak_odr); KEYWORD(appending); @@ -509,6 +510,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(asm); KEYWORD(sideeffect); KEYWORD(alignstack); + KEYWORD(inteldialect); KEYWORD(gc); KEYWORD(ccc); @@ -523,6 +525,9 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(msp430_intrcc); KEYWORD(ptx_kernel); KEYWORD(ptx_device); + KEYWORD(spir_kernel); + KEYWORD(spir_func); + KEYWORD(intel_ocl_bicc); KEYWORD(cc); KEYWORD(c); @@ -553,7 +558,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(naked); KEYWORD(nonlazybind); KEYWORD(address_safety); - KEYWORD(ia_nsdialect); + KEYWORD(minsize); KEYWORD(type); KEYWORD(opaque); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index 0ff8edd..b24291f 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -184,12 +184,13 @@ bool LLParser::ParseTopLevelEntities() { case lltok::kw_private: // OptionalLinkage case lltok::kw_linker_private: // OptionalLinkage case lltok::kw_linker_private_weak: // OptionalLinkage - case lltok::kw_linker_private_weak_def_auto: // OptionalLinkage + case lltok::kw_linker_private_weak_def_auto: // FIXME: backwards compat. case lltok::kw_internal: // OptionalLinkage case lltok::kw_weak: // OptionalLinkage case lltok::kw_weak_odr: // OptionalLinkage case lltok::kw_linkonce: // OptionalLinkage case lltok::kw_linkonce_odr: // OptionalLinkage + case lltok::kw_linkonce_odr_auto_hide: // OptionalLinkage case lltok::kw_appending: // OptionalLinkage case lltok::kw_dllexport: // OptionalLinkage case lltok::kw_common: // OptionalLinkage @@ -576,8 +577,7 @@ bool LLParser::ParseAlias(const std::string &Name, LocTy NameLoc, Linkage != GlobalValue::InternalLinkage && Linkage != GlobalValue::PrivateLinkage && Linkage != GlobalValue::LinkerPrivateLinkage && - Linkage != GlobalValue::LinkerPrivateWeakLinkage && - Linkage != GlobalValue::LinkerPrivateWeakDefAutoLinkage) + Linkage != GlobalValue::LinkerPrivateWeakLinkage) return Error(LinkageLoc, "invalid linkage type for alias"); Constant *Aliasee; @@ -779,7 +779,9 @@ GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty, FwdVal = Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M); else FwdVal = new GlobalVariable(*M, PTy->getElementType(), false, - GlobalValue::ExternalWeakLinkage, 0, Name); + GlobalValue::ExternalWeakLinkage, 0, Name, + 0, GlobalVariable::NotThreadLocal, + PTy->getAddressSpace()); ForwardRefVals[Name] = std::make_pair(FwdVal, Loc); return FwdVal; @@ -916,59 +918,50 @@ bool LLParser::ParseOptionalAddrSpace(unsigned &AddrSpace) { /// ParseOptionalAttrs - Parse a potentially empty attribute list. AttrKind /// indicates what kind of attribute list this is: 0: function arg, 1: result, /// 2: function attr. -bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) { - Attrs = Attribute::None; +bool LLParser::ParseOptionalAttrs(AttrBuilder &B, unsigned AttrKind) { LocTy AttrLoc = Lex.getLoc(); + bool HaveError = false; + + B.clear(); while (1) { - switch (Lex.getKind()) { + lltok::Kind Token = Lex.getKind(); + switch (Token) { default: // End of attributes. - if (AttrKind != 2 && (Attrs & Attribute::FunctionOnly)) - return Error(AttrLoc, "invalid use of function-only attribute"); - - // As a hack, we allow "align 2" on functions as a synonym for - // "alignstack 2". - if (AttrKind == 2 && - (Attrs & ~(Attribute::FunctionOnly | Attribute::Alignment))) - return Error(AttrLoc, "invalid use of attribute on a function"); - - if (AttrKind != 0 && (Attrs & Attribute::ParameterOnly)) - return Error(AttrLoc, "invalid use of parameter-only attribute"); - - return false; - case lltok::kw_zeroext: Attrs |= Attribute::ZExt; break; - case lltok::kw_signext: Attrs |= Attribute::SExt; break; - case lltok::kw_inreg: Attrs |= Attribute::InReg; break; - case lltok::kw_sret: Attrs |= Attribute::StructRet; break; - case lltok::kw_noalias: Attrs |= Attribute::NoAlias; break; - case lltok::kw_nocapture: Attrs |= Attribute::NoCapture; break; - case lltok::kw_byval: Attrs |= Attribute::ByVal; break; - case lltok::kw_nest: Attrs |= Attribute::Nest; break; - - case lltok::kw_noreturn: Attrs |= Attribute::NoReturn; break; - case lltok::kw_nounwind: Attrs |= Attribute::NoUnwind; break; - case lltok::kw_uwtable: Attrs |= Attribute::UWTable; break; - case lltok::kw_returns_twice: Attrs |= Attribute::ReturnsTwice; break; - case lltok::kw_noinline: Attrs |= Attribute::NoInline; break; - case lltok::kw_readnone: Attrs |= Attribute::ReadNone; break; - case lltok::kw_readonly: Attrs |= Attribute::ReadOnly; break; - case lltok::kw_inlinehint: Attrs |= Attribute::InlineHint; break; - case lltok::kw_alwaysinline: Attrs |= Attribute::AlwaysInline; break; - case lltok::kw_optsize: Attrs |= Attribute::OptimizeForSize; break; - case lltok::kw_ssp: Attrs |= Attribute::StackProtect; break; - case lltok::kw_sspreq: Attrs |= Attribute::StackProtectReq; break; - case lltok::kw_noredzone: Attrs |= Attribute::NoRedZone; break; - case lltok::kw_noimplicitfloat: Attrs |= Attribute::NoImplicitFloat; break; - case lltok::kw_naked: Attrs |= Attribute::Naked; break; - case lltok::kw_nonlazybind: Attrs |= Attribute::NonLazyBind; break; - case lltok::kw_address_safety: Attrs |= Attribute::AddressSafety; break; - case lltok::kw_ia_nsdialect: Attrs |= Attribute::IANSDialect; break; + return HaveError; + case lltok::kw_zeroext: B.addAttribute(Attributes::ZExt); break; + case lltok::kw_signext: B.addAttribute(Attributes::SExt); break; + case lltok::kw_inreg: B.addAttribute(Attributes::InReg); break; + case lltok::kw_sret: B.addAttribute(Attributes::StructRet); break; + case lltok::kw_noalias: B.addAttribute(Attributes::NoAlias); break; + case lltok::kw_nocapture: B.addAttribute(Attributes::NoCapture); break; + case lltok::kw_byval: B.addAttribute(Attributes::ByVal); break; + case lltok::kw_nest: B.addAttribute(Attributes::Nest); break; + + case lltok::kw_noreturn: B.addAttribute(Attributes::NoReturn); break; + case lltok::kw_nounwind: B.addAttribute(Attributes::NoUnwind); break; + case lltok::kw_uwtable: B.addAttribute(Attributes::UWTable); break; + case lltok::kw_returns_twice: B.addAttribute(Attributes::ReturnsTwice); break; + case lltok::kw_noinline: B.addAttribute(Attributes::NoInline); break; + case lltok::kw_readnone: B.addAttribute(Attributes::ReadNone); break; + case lltok::kw_readonly: B.addAttribute(Attributes::ReadOnly); break; + case lltok::kw_inlinehint: B.addAttribute(Attributes::InlineHint); break; + case lltok::kw_alwaysinline: B.addAttribute(Attributes::AlwaysInline); break; + case lltok::kw_optsize: B.addAttribute(Attributes::OptimizeForSize); break; + case lltok::kw_ssp: B.addAttribute(Attributes::StackProtect); break; + case lltok::kw_sspreq: B.addAttribute(Attributes::StackProtectReq); break; + case lltok::kw_noredzone: B.addAttribute(Attributes::NoRedZone); break; + case lltok::kw_noimplicitfloat: B.addAttribute(Attributes::NoImplicitFloat); break; + case lltok::kw_naked: B.addAttribute(Attributes::Naked); break; + case lltok::kw_nonlazybind: B.addAttribute(Attributes::NonLazyBind); break; + case lltok::kw_address_safety: B.addAttribute(Attributes::AddressSafety); break; + case lltok::kw_minsize: B.addAttribute(Attributes::MinSize); break; case lltok::kw_alignstack: { unsigned Alignment; if (ParseOptionalStackAlignment(Alignment)) return true; - Attrs |= Attribute::constructStackAlignmentFromInt(Alignment); + B.addStackAlignmentAttr(Alignment); continue; } @@ -976,11 +969,57 @@ bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) { unsigned Alignment; if (ParseOptionalAlignment(Alignment)) return true; - Attrs |= Attribute::constructAlignmentFromInt(Alignment); + B.addAlignmentAttr(Alignment); continue; } } + + // Perform some error checking. + switch (Token) { + default: + if (AttrKind == 2) + HaveError |= Error(AttrLoc, "invalid use of attribute on a function"); + break; + case lltok::kw_align: + // As a hack, we allow "align 2" on functions as a synonym for + // "alignstack 2". + break; + + // Parameter Only: + case lltok::kw_sret: + case lltok::kw_nocapture: + case lltok::kw_byval: + case lltok::kw_nest: + if (AttrKind != 0) + HaveError |= Error(AttrLoc, "invalid use of parameter-only attribute"); + break; + + // Function Only: + case lltok::kw_noreturn: + case lltok::kw_nounwind: + case lltok::kw_readnone: + case lltok::kw_readonly: + case lltok::kw_noinline: + case lltok::kw_alwaysinline: + case lltok::kw_optsize: + case lltok::kw_ssp: + case lltok::kw_sspreq: + case lltok::kw_noredzone: + case lltok::kw_noimplicitfloat: + case lltok::kw_naked: + case lltok::kw_inlinehint: + case lltok::kw_alignstack: + case lltok::kw_uwtable: + case lltok::kw_nonlazybind: + case lltok::kw_returns_twice: + case lltok::kw_address_safety: + case lltok::kw_minsize: + if (AttrKind != 2) + HaveError |= Error(AttrLoc, "invalid use of function-only attribute"); + break; + } + Lex.Lex(); } } @@ -990,12 +1029,12 @@ bool LLParser::ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind) { /// ::= 'private' /// ::= 'linker_private' /// ::= 'linker_private_weak' -/// ::= 'linker_private_weak_def_auto' /// ::= 'internal' /// ::= 'weak' /// ::= 'weak_odr' /// ::= 'linkonce' /// ::= 'linkonce_odr' +/// ::= 'linkonce_odr_auto_hide' /// ::= 'available_externally' /// ::= 'appending' /// ::= 'dllexport' @@ -1012,14 +1051,15 @@ bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage) { case lltok::kw_linker_private_weak: Res = GlobalValue::LinkerPrivateWeakLinkage; break; - case lltok::kw_linker_private_weak_def_auto: - Res = GlobalValue::LinkerPrivateWeakDefAutoLinkage; - break; case lltok::kw_internal: Res = GlobalValue::InternalLinkage; break; case lltok::kw_weak: Res = GlobalValue::WeakAnyLinkage; break; case lltok::kw_weak_odr: Res = GlobalValue::WeakODRLinkage; break; case lltok::kw_linkonce: Res = GlobalValue::LinkOnceAnyLinkage; break; case lltok::kw_linkonce_odr: Res = GlobalValue::LinkOnceODRLinkage; break; + case lltok::kw_linkonce_odr_auto_hide: + case lltok::kw_linker_private_weak_def_auto: // FIXME: For backwards compat. + Res = GlobalValue::LinkOnceODRAutoHideLinkage; + break; case lltok::kw_available_externally: Res = GlobalValue::AvailableExternallyLinkage; break; @@ -1056,6 +1096,7 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) { /// ::= /*empty*/ /// ::= 'ccc' /// ::= 'fastcc' +/// ::= 'kw_intel_ocl_bicc' /// ::= 'coldcc' /// ::= 'x86_stdcallcc' /// ::= 'x86_fastcallcc' @@ -1066,6 +1107,8 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) { /// ::= 'msp430_intrcc' /// ::= 'ptx_kernel' /// ::= 'ptx_device' +/// ::= 'spir_func' +/// ::= 'spir_kernel' /// ::= 'cc' UINT /// bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) { @@ -1083,6 +1126,9 @@ bool LLParser::ParseOptionalCallingConv(CallingConv::ID &CC) { case lltok::kw_msp430_intrcc: CC = CallingConv::MSP430_INTR; break; case lltok::kw_ptx_kernel: CC = CallingConv::PTX_Kernel; break; case lltok::kw_ptx_device: CC = CallingConv::PTX_Device; break; + case lltok::kw_spir_kernel: CC = CallingConv::SPIR_KERNEL; break; + case lltok::kw_spir_func: CC = CallingConv::SPIR_FUNC; break; + case lltok::kw_intel_ocl_bicc: CC = CallingConv::Intel_OCL_BI; break; case lltok::kw_cc: { unsigned ArbitraryCC; Lex.Lex(); @@ -1395,16 +1441,16 @@ bool LLParser::ParseParameterList(SmallVectorImpl &ArgList, // Parse the argument. LocTy ArgLoc; Type *ArgTy = 0; - Attributes ArgAttrs1; - Attributes ArgAttrs2; + AttrBuilder ArgAttrs; Value *V; if (ParseType(ArgTy, ArgLoc)) return true; // Otherwise, handle normal operands. - if (ParseOptionalAttrs(ArgAttrs1, 0) || ParseValue(ArgTy, V, PFS)) + if (ParseOptionalAttrs(ArgAttrs, 0) || ParseValue(ArgTy, V, PFS)) return true; - ArgList.push_back(ParamInfo(ArgLoc, V, ArgAttrs1|ArgAttrs2)); + ArgList.push_back(ParamInfo(ArgLoc, V, Attributes::get(V->getContext(), + ArgAttrs))); } Lex.Lex(); // Lex the ')'. @@ -1436,7 +1482,7 @@ bool LLParser::ParseArgumentList(SmallVectorImpl &ArgList, } else { LocTy TypeLoc = Lex.getLoc(); Type *ArgTy = 0; - Attributes Attrs; + AttrBuilder Attrs; std::string Name; if (ParseType(ArgTy) || @@ -1453,7 +1499,9 @@ bool LLParser::ParseArgumentList(SmallVectorImpl &ArgList, if (!FunctionType::isValidArgumentType(ArgTy)) return Error(TypeLoc, "invalid type for function argument"); - ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name)); + ArgList.push_back(ArgInfo(TypeLoc, ArgTy, + Attributes::get(ArgTy->getContext(), + Attrs), Name)); while (EatIfPresent(lltok::comma)) { // Handle ... at end of arg list. @@ -1479,7 +1527,9 @@ bool LLParser::ParseArgumentList(SmallVectorImpl &ArgList, if (!ArgTy->isFirstClassType()) return Error(TypeLoc, "invalid type for function argument"); - ArgList.push_back(ArgInfo(TypeLoc, ArgTy, Attrs, Name)); + ArgList.push_back(ArgInfo(TypeLoc, ArgTy, + Attributes::get(ArgTy->getContext(), Attrs), + Name)); } } @@ -1503,7 +1553,7 @@ bool LLParser::ParseFunctionType(Type *&Result) { for (unsigned i = 0, e = ArgList.size(); i != e; ++i) { if (!ArgList[i].Name.empty()) return Error(ArgList[i].Loc, "argument name invalid in function type"); - if (ArgList[i].Attrs) + if (ArgList[i].Attrs.hasAttributes()) return Error(ArgList[i].Loc, "argument attributes invalid in function type"); } @@ -2069,16 +2119,18 @@ bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) { case lltok::kw_asm: { // ValID ::= 'asm' SideEffect? AlignStack? STRINGCONSTANT ',' STRINGCONSTANT - bool HasSideEffect, AlignStack; + bool HasSideEffect, AlignStack, AsmDialect; Lex.Lex(); if (ParseOptionalToken(lltok::kw_sideeffect, HasSideEffect) || ParseOptionalToken(lltok::kw_alignstack, AlignStack) || + ParseOptionalToken(lltok::kw_inteldialect, AsmDialect) || ParseStringConstant(ID.StrVal) || ParseToken(lltok::comma, "expected comma in inline asm expression") || ParseToken(lltok::StringConstant, "expected constraint string")) return true; ID.StrVal2 = Lex.getStrVal(); - ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1); + ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1) | + (unsigned(AsmDialect)<<2); ID.Kind = ValID::t_InlineAsm; return false; } @@ -2495,7 +2547,8 @@ bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V, PTy ? dyn_cast(PTy->getElementType()) : 0; if (!FTy || !InlineAsm::Verify(FTy, ID.StrVal2)) return Error(ID.Loc, "invalid type for inline asm constraint string"); - V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1, ID.UIntVal>>1); + V = InlineAsm::get(FTy, ID.StrVal, ID.StrVal2, ID.UIntVal&1, + (ID.UIntVal>>1)&1, (InlineAsm::AsmDialect(ID.UIntVal>>2))); return false; } case ValID::t_MDNode: @@ -2630,7 +2683,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) { unsigned Linkage; unsigned Visibility; - Attributes RetAttrs; + AttrBuilder RetAttrs; CallingConv::ID CC; Type *RetType = 0; LocTy RetTypeLoc = Lex.getLoc(); @@ -2653,11 +2706,11 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) { case GlobalValue::PrivateLinkage: case GlobalValue::LinkerPrivateLinkage: case GlobalValue::LinkerPrivateWeakLinkage: - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: case GlobalValue::InternalLinkage: case GlobalValue::AvailableExternallyLinkage: case GlobalValue::LinkOnceAnyLinkage: case GlobalValue::LinkOnceODRLinkage: + case GlobalValue::LinkOnceODRAutoHideLinkage: case GlobalValue::WeakAnyLinkage: case GlobalValue::WeakODRLinkage: case GlobalValue::DLLExportLinkage: @@ -2694,7 +2747,7 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) { SmallVector ArgList; bool isVarArg; - Attributes FuncAttrs; + AttrBuilder FuncAttrs; std::string Section; unsigned Alignment; std::string GC; @@ -2713,9 +2766,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) { return true; // If the alignment was parsed as an attribute, move to the alignment field. - if (FuncAttrs & Attribute::Alignment) { - Alignment = Attribute::getAlignmentFromAttrs(FuncAttrs); - FuncAttrs &= ~Attribute::Alignment; + if (FuncAttrs.hasAlignmentAttr()) { + Alignment = FuncAttrs.getAlignment(); + FuncAttrs.removeAttribute(Attributes::Alignment); } // Okay, if we got here, the function is syntactically valid. Convert types @@ -2723,21 +2776,28 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) { std::vector ParamTypeList; SmallVector Attrs; - if (RetAttrs != Attribute::None) - Attrs.push_back(AttributeWithIndex::get(0, RetAttrs)); + if (RetAttrs.hasAttributes()) + Attrs.push_back( + AttributeWithIndex::get(AttrListPtr::ReturnIndex, + Attributes::get(RetType->getContext(), + RetAttrs))); for (unsigned i = 0, e = ArgList.size(); i != e; ++i) { ParamTypeList.push_back(ArgList[i].Ty); - if (ArgList[i].Attrs != Attribute::None) + if (ArgList[i].Attrs.hasAttributes()) Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs)); } - if (FuncAttrs != Attribute::None) - Attrs.push_back(AttributeWithIndex::get(~0, FuncAttrs)); + if (FuncAttrs.hasAttributes()) + Attrs.push_back( + AttributeWithIndex::get(AttrListPtr::FunctionIndex, + Attributes::get(RetType->getContext(), + FuncAttrs))); - AttrListPtr PAL = AttrListPtr::get(Attrs); + AttrListPtr PAL = AttrListPtr::get(Context, Attrs); - if (PAL.paramHasAttr(1, Attribute::StructRet) && !RetType->isVoidTy()) + if (PAL.getParamAttributes(1).hasAttribute(Attributes::StructRet) && + !RetType->isVoidTy()) return Error(RetTypeLoc, "functions with 'sret' argument must return void"); FunctionType *FT = @@ -2752,6 +2812,9 @@ bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) { ForwardRefVals.find(FunctionName); if (FRVI != ForwardRefVals.end()) { Fn = M->getFunction(FunctionName); + if (!Fn) + return Error(FRVI->second.second, "invalid forward reference to " + "function as global value!"); if (Fn->getType() != PFT) return Error(FRVI->second.second, "invalid forward reference to " "function '" + FunctionName + "' with wrong type!"); @@ -3205,7 +3268,7 @@ bool LLParser::ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) { /// OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) { LocTy CallLoc = Lex.getLoc(); - Attributes RetAttrs, FnAttrs; + AttrBuilder RetAttrs, FnAttrs; CallingConv::ID CC; Type *RetType = 0; LocTy RetTypeLoc; @@ -3250,8 +3313,11 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) { // Set up the Attributes for the function. SmallVector Attrs; - if (RetAttrs != Attribute::None) - Attrs.push_back(AttributeWithIndex::get(0, RetAttrs)); + if (RetAttrs.hasAttributes()) + Attrs.push_back( + AttributeWithIndex::get(AttrListPtr::ReturnIndex, + Attributes::get(Callee->getContext(), + RetAttrs))); SmallVector Args; @@ -3271,18 +3337,21 @@ bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) { return Error(ArgList[i].Loc, "argument is not of expected type '" + getTypeString(ExpectedTy) + "'"); Args.push_back(ArgList[i].V); - if (ArgList[i].Attrs != Attribute::None) + if (ArgList[i].Attrs.hasAttributes()) Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs)); } if (I != E) return Error(CallLoc, "not enough parameters specified for call"); - if (FnAttrs != Attribute::None) - Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs)); + if (FnAttrs.hasAttributes()) + Attrs.push_back( + AttributeWithIndex::get(AttrListPtr::FunctionIndex, + Attributes::get(Callee->getContext(), + FnAttrs))); // Finish off the Attributes and check them - AttrListPtr PAL = AttrListPtr::get(Attrs); + AttrListPtr PAL = AttrListPtr::get(Context, Attrs); InvokeInst *II = InvokeInst::Create(Callee, NormalBB, UnwindBB, Args); II->setCallingConv(CC); @@ -3604,7 +3673,7 @@ bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) { /// ParameterList OptionalAttrs bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS, bool isTail) { - Attributes RetAttrs, FnAttrs; + AttrBuilder RetAttrs, FnAttrs; CallingConv::ID CC; Type *RetType = 0; LocTy RetTypeLoc; @@ -3646,8 +3715,11 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS, // Set up the Attributes for the function. SmallVector Attrs; - if (RetAttrs != Attribute::None) - Attrs.push_back(AttributeWithIndex::get(0, RetAttrs)); + if (RetAttrs.hasAttributes()) + Attrs.push_back( + AttributeWithIndex::get(AttrListPtr::ReturnIndex, + Attributes::get(Callee->getContext(), + RetAttrs))); SmallVector Args; @@ -3667,18 +3739,21 @@ bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS, return Error(ArgList[i].Loc, "argument is not of expected type '" + getTypeString(ExpectedTy) + "'"); Args.push_back(ArgList[i].V); - if (ArgList[i].Attrs != Attribute::None) + if (ArgList[i].Attrs.hasAttributes()) Attrs.push_back(AttributeWithIndex::get(i+1, ArgList[i].Attrs)); } if (I != E) return Error(CallLoc, "not enough parameters specified for call"); - if (FnAttrs != Attribute::None) - Attrs.push_back(AttributeWithIndex::get(~0, FnAttrs)); + if (FnAttrs.hasAttributes()) + Attrs.push_back( + AttributeWithIndex::get(AttrListPtr::FunctionIndex, + Attributes::get(Callee->getContext(), + FnAttrs))); // Finish off the Attributes and check them - AttrListPtr PAL = AttrListPtr::get(Attrs); + AttrListPtr PAL = AttrListPtr::get(Context, Attrs); CallInst *CI = CallInst::Create(Callee, Args); CI->setTailCall(isTail); diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h index 257c726..c6bbdb2 100644 --- a/lib/AsmParser/LLParser.h +++ b/lib/AsmParser/LLParser.h @@ -175,7 +175,7 @@ namespace llvm { bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM); bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM); bool ParseOptionalAddrSpace(unsigned &AddrSpace); - bool ParseOptionalAttrs(Attributes &Attrs, unsigned AttrKind); + bool ParseOptionalAttrs(AttrBuilder &Attrs, unsigned AttrKind); bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage); bool ParseOptionalLinkage(unsigned &Linkage) { bool HasLinkage; return ParseOptionalLinkage(Linkage, HasLinkage); diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index 0b0b980..036686d 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -37,8 +37,10 @@ namespace lltok { kw_global, kw_constant, kw_private, kw_linker_private, kw_linker_private_weak, - kw_linker_private_weak_def_auto, kw_internal, - kw_linkonce, kw_linkonce_odr, kw_weak, kw_weak_odr, kw_appending, + kw_linker_private_weak_def_auto, // FIXME: For backwards compatibility. + kw_internal, + kw_linkonce, kw_linkonce_odr, kw_linkonce_odr_auto_hide, + kw_weak, kw_weak_odr, kw_appending, kw_dllimport, kw_dllexport, kw_common, kw_available_externally, kw_default, kw_hidden, kw_protected, kw_unnamed_addr, @@ -70,14 +72,17 @@ namespace lltok { kw_asm, kw_sideeffect, kw_alignstack, + kw_inteldialect, kw_gc, kw_c, kw_cc, kw_ccc, kw_fastcc, kw_coldcc, + kw_intel_ocl_bicc, kw_x86_stdcallcc, kw_x86_fastcallcc, kw_x86_thiscallcc, kw_arm_apcscc, kw_arm_aapcscc, kw_arm_aapcs_vfpcc, kw_msp430_intrcc, kw_ptx_kernel, kw_ptx_device, + kw_spir_kernel, kw_spir_func, kw_signext, kw_zeroext, @@ -105,7 +110,7 @@ namespace lltok { kw_naked, kw_nonlazybind, kw_address_safety, - kw_ia_nsdialect, + kw_minsize, kw_type, kw_opaque, diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index 4ffee38..4ec9da1 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -52,6 +52,8 @@ void BitcodeReader::FreeState() { std::vector().swap(FunctionsWithBodies); DeferredFunctionInfo.clear(); MDKindMap.clear(); + + assert(BlockAddrFwdRefs.empty() && "Unresolved blockaddress fwd references"); } //===----------------------------------------------------------------------===// @@ -89,7 +91,7 @@ static GlobalValue::LinkageTypes GetDecodedLinkage(unsigned Val) { case 12: return GlobalValue::AvailableExternallyLinkage; case 13: return GlobalValue::LinkerPrivateLinkage; case 14: return GlobalValue::LinkerPrivateWeakLinkage; - case 15: return GlobalValue::LinkerPrivateWeakDefAutoLinkage; + case 15: return GlobalValue::LinkOnceODRAutoHideLinkage; } } @@ -197,7 +199,7 @@ namespace { /// @brief A class for maintaining the slot number definition /// as a placeholder for the actual definition for forward constants defs. class ConstantPlaceHolder : public ConstantExpr { - void operator=(const ConstantPlaceHolder &); // DO NOT IMPLEMENT + void operator=(const ConstantPlaceHolder &) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -209,7 +211,6 @@ namespace { } /// @brief Methods to support type inquiry through isa, cast, and dyn_cast. - //static inline bool classof(const ConstantPlaceHolder *) { return true; } static bool classof(const Value *V) { return isa(V) && cast(V)->getOpcode() == Instruction::UserOp1; @@ -475,17 +476,18 @@ bool BitcodeReader::ParseAttributeBlock() { for (unsigned i = 0, e = Record.size(); i != e; i += 2) { Attributes ReconstitutedAttr = - Attribute::decodeLLVMAttributesForBitcode(Record[i+1]); + Attributes::decodeLLVMAttributesForBitcode(Context, Record[i+1]); Record[i+1] = ReconstitutedAttr.Raw(); } for (unsigned i = 0, e = Record.size(); i != e; i += 2) { - if (Attributes(Record[i+1]) != Attribute::None) + AttrBuilder B(Record[i+1]); + if (B.hasAttributes()) Attrs.push_back(AttributeWithIndex::get(Record[i], - Attributes(Record[i+1]))); + Attributes::get(Context, B))); } - MAttributes.push_back(AttrListPtr::get(Attrs)); + MAttributes.push_back(AttrListPtr::get(Context, Attrs)); Attrs.clear(); break; } @@ -889,9 +891,9 @@ bool BitcodeReader::ParseMetadata() { } } -/// DecodeSignRotatedValue - Decode a signed value stored with the sign bit in +/// decodeSignRotatedValue - Decode a signed value stored with the sign bit in /// the LSB for dense VBR encoding. -static uint64_t DecodeSignRotatedValue(uint64_t V) { +uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) { if ((V & 1) == 0) return V >> 1; if (V != 1) @@ -941,7 +943,7 @@ bool BitcodeReader::ResolveGlobalAndAliasInits() { static APInt ReadWideAPInt(ArrayRef Vals, unsigned TypeBits) { SmallVector Words(Vals.size()); std::transform(Vals.begin(), Vals.end(), Words.begin(), - DecodeSignRotatedValue); + BitcodeReader::decodeSignRotatedValue); return APInt(TypeBits, Words); } @@ -995,7 +997,7 @@ bool BitcodeReader::ParseConstants() { case bitc::CST_CODE_INTEGER: // INTEGER: [intval] if (!CurTy->isIntegerTy() || Record.empty()) return Error("Invalid CST_INTEGER record"); - V = ConstantInt::get(CurTy, DecodeSignRotatedValue(Record[0])); + V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0])); break; case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval] if (!CurTy->isIntegerTy() || Record.empty()) @@ -1245,7 +1247,9 @@ bool BitcodeReader::ParseConstants() { V = ConstantExpr::getICmp(Record[3], Op0, Op1); break; } - case bitc::CST_CODE_INLINEASM: { + // This maintains backward compatibility, pre-asm dialect keywords. + // FIXME: Remove with the 4.0 release. + case bitc::CST_CODE_INLINEASM_OLD: { if (Record.size() < 2) return Error("Invalid INLINEASM record"); std::string AsmStr, ConstrStr; bool HasSideEffects = Record[0] & 1; @@ -1266,6 +1270,31 @@ bool BitcodeReader::ParseConstants() { AsmStr, ConstrStr, HasSideEffects, IsAlignStack); break; } + // This version adds support for the asm dialect keywords (e.g., + // inteldialect). + case bitc::CST_CODE_INLINEASM: { + if (Record.size() < 2) return Error("Invalid INLINEASM record"); + std::string AsmStr, ConstrStr; + bool HasSideEffects = Record[0] & 1; + bool IsAlignStack = (Record[0] >> 1) & 1; + unsigned AsmDialect = Record[0] >> 2; + unsigned AsmStrSize = Record[1]; + if (2+AsmStrSize >= Record.size()) + return Error("Invalid INLINEASM record"); + unsigned ConstStrSize = Record[2+AsmStrSize]; + if (3+AsmStrSize+ConstStrSize > Record.size()) + return Error("Invalid INLINEASM record"); + + for (unsigned i = 0; i != AsmStrSize; ++i) + AsmStr += (char)Record[2+i]; + for (unsigned i = 0; i != ConstStrSize; ++i) + ConstrStr += (char)Record[3+AsmStrSize+i]; + PointerType *PTy = cast(CurTy); + V = InlineAsm::get(cast(PTy->getElementType()), + AsmStr, ConstrStr, HasSideEffects, IsAlignStack, + InlineAsm::AsmDialect(AsmDialect)); + break; + } case bitc::CST_CODE_BLOCKADDRESS:{ if (Record.size() < 3) return Error("Invalid CE_BLOCKADDRESS record"); Type *FnTy = getTypeByID(Record[0]); @@ -1273,13 +1302,27 @@ bool BitcodeReader::ParseConstants() { Function *Fn = dyn_cast_or_null(ValueList.getConstantFwdRef(Record[1],FnTy)); if (Fn == 0) return Error("Invalid CE_BLOCKADDRESS record"); - - GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(), - Type::getInt8Ty(Context), + + // If the function is already parsed we can insert the block address right + // away. + if (!Fn->empty()) { + Function::iterator BBI = Fn->begin(), BBE = Fn->end(); + for (size_t I = 0, E = Record[2]; I != E; ++I) { + if (BBI == BBE) + return Error("Invalid blockaddress block #"); + ++BBI; + } + V = BlockAddress::get(Fn, BBI); + } else { + // Otherwise insert a placeholder and remember it so it can be inserted + // when the function is parsed. + GlobalVariable *FwdRef = new GlobalVariable(*Fn->getParent(), + Type::getInt8Ty(Context), false, GlobalValue::InternalLinkage, - 0, ""); - BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef)); - V = FwdRef; + 0, ""); + BlockAddrFwdRefs[Fn].push_back(std::make_pair(Record[2], FwdRef)); + V = FwdRef; + } break; } } @@ -1481,13 +1524,22 @@ bool BitcodeReader::ParseModule(bool Resume) { // Read a record. switch (Stream.ReadRecord(Code, Record)) { default: break; // Default behavior, ignore unknown content. - case bitc::MODULE_CODE_VERSION: // VERSION: [version#] + case bitc::MODULE_CODE_VERSION: { // VERSION: [version#] if (Record.size() < 1) return Error("Malformed MODULE_CODE_VERSION"); - // Only version #0 is supported so far. - if (Record[0] != 0) - return Error("Unknown bitstream version!"); + // Only version #0 and #1 are supported so far. + unsigned module_version = Record[0]; + switch (module_version) { + default: return Error("Unknown bitstream version!"); + case 0: + UseRelativeIDs = false; + break; + case 1: + UseRelativeIDs = true; + break; + } break; + } case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N] std::string S; if (ConvertToString(Record, 0, S)) @@ -1754,13 +1806,6 @@ bool BitcodeReader::ParseModuleTriple(std::string &Triple) { // Read a record. switch (Stream.ReadRecord(Code, Record)) { default: break; // Default behavior, ignore unknown content. - case bitc::MODULE_CODE_VERSION: // VERSION: [version#] - if (Record.size() < 1) - return Error("Malformed MODULE_CODE_VERSION"); - // Only version #0 is supported so far. - if (Record[0] != 0) - return Error("Unknown bitstream version!"); - break; case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N] std::string S; if (ConvertToString(Record, 0, S)) @@ -1973,7 +2018,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *LHS, *RHS; if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || - getValue(Record, OpNum, LHS->getType(), RHS) || + popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) || OpNum+1 > Record.size()) return Error("Invalid BINOP record"); @@ -2088,8 +2133,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *TrueVal, *FalseVal, *Cond; if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || - getValue(Record, OpNum, TrueVal->getType(), FalseVal) || - getValue(Record, OpNum, Type::getInt1Ty(Context), Cond)) + popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || + popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond)) return Error("Invalid SELECT record"); I = SelectInst::Create(Cond, TrueVal, FalseVal); @@ -2103,7 +2148,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *TrueVal, *FalseVal, *Cond; if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || - getValue(Record, OpNum, TrueVal->getType(), FalseVal) || + popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || getValueTypePair(Record, OpNum, NextValueNo, Cond)) return Error("Invalid SELECT record"); @@ -2128,7 +2173,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Vec, *Idx; if (getValueTypePair(Record, OpNum, NextValueNo, Vec) || - getValue(Record, OpNum, Type::getInt32Ty(Context), Idx)) + popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx)) return Error("Invalid EXTRACTELT record"); I = ExtractElementInst::Create(Vec, Idx); InstructionList.push_back(I); @@ -2139,9 +2184,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Vec, *Elt, *Idx; if (getValueTypePair(Record, OpNum, NextValueNo, Vec) || - getValue(Record, OpNum, + popValue(Record, OpNum, NextValueNo, cast(Vec->getType())->getElementType(), Elt) || - getValue(Record, OpNum, Type::getInt32Ty(Context), Idx)) + popValue(Record, OpNum, NextValueNo, Type::getInt32Ty(Context), Idx)) return Error("Invalid INSERTELT record"); I = InsertElementInst::Create(Vec, Elt, Idx); InstructionList.push_back(I); @@ -2152,7 +2197,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Vec1, *Vec2, *Mask; if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) || - getValue(Record, OpNum, Vec1->getType(), Vec2)) + popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2)) return Error("Invalid SHUFFLEVEC record"); if (getValueTypePair(Record, OpNum, NextValueNo, Mask)) @@ -2172,7 +2217,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *LHS, *RHS; if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || - getValue(Record, OpNum, LHS->getType(), RHS) || + popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) || OpNum+1 != Record.size()) return Error("Invalid CMP record"); @@ -2217,7 +2262,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { } else { BasicBlock *FalseDest = getBasicBlock(Record[1]); - Value *Cond = getFnValueByID(Record[2], Type::getInt1Ty(Context)); + Value *Cond = getValue(Record, 2, NextValueNo, + Type::getInt1Ty(Context)); if (FalseDest == 0 || Cond == 0) return Error("Invalid BR record"); I = BranchInst::Create(TrueDest, FalseDest, Cond); @@ -2233,7 +2279,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { Type *OpTy = getTypeByID(Record[1]); unsigned ValueBitWidth = cast(OpTy)->getBitWidth(); - Value *Cond = getFnValueByID(Record[2], OpTy); + Value *Cond = getValue(Record, 2, NextValueNo, OpTy); BasicBlock *Default = getBasicBlock(Record[3]); if (OpTy == 0 || Cond == 0 || Default == 0) return Error("Invalid SWITCH record"); @@ -2288,7 +2334,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { if (Record.size() < 3 || (Record.size() & 1) == 0) return Error("Invalid SWITCH record"); Type *OpTy = getTypeByID(Record[0]); - Value *Cond = getFnValueByID(Record[1], OpTy); + Value *Cond = getValue(Record, 1, NextValueNo, OpTy); BasicBlock *Default = getBasicBlock(Record[2]); if (OpTy == 0 || Cond == 0 || Default == 0) return Error("Invalid SWITCH record"); @@ -2312,7 +2358,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { if (Record.size() < 2) return Error("Invalid INDIRECTBR record"); Type *OpTy = getTypeByID(Record[0]); - Value *Address = getFnValueByID(Record[1], OpTy); + Value *Address = getValue(Record, 1, NextValueNo, OpTy); if (OpTy == 0 || Address == 0) return Error("Invalid INDIRECTBR record"); unsigned NumDests = Record.size()-2; @@ -2354,7 +2400,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { SmallVector Ops; for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { - Ops.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i))); + Ops.push_back(getValue(Record, OpNum, NextValueNo, + FTy->getParamType(i))); if (Ops.back() == 0) return Error("Invalid INVOKE record"); } @@ -2401,7 +2448,14 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { InstructionList.push_back(PN); for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) { - Value *V = getFnValueByID(Record[1+i], Ty); + Value *V; + // With the new function encoding, it is possible that operands have + // negative IDs (for forward references). Use a signed VBR + // representation to keep the encoding small. + if (UseRelativeIDs) + V = getValueSigned(Record, 1+i, NextValueNo, Ty); + else + V = getValue(Record, 1+i, NextValueNo, Ty); BasicBlock *BB = getBasicBlock(Record[2+i]); if (!V || !BB) return Error("Invalid PHI record"); PN->addIncoming(V, BB); @@ -2499,7 +2553,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Val, *Ptr; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || - getValue(Record, OpNum, + popValue(Record, OpNum, NextValueNo, cast(Ptr->getType())->getElementType(), Val) || OpNum+2 != Record.size()) return Error("Invalid STORE record"); @@ -2513,7 +2567,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Val, *Ptr; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || - getValue(Record, OpNum, + popValue(Record, OpNum, NextValueNo, cast(Ptr->getType())->getElementType(), Val) || OpNum+4 != Record.size()) return Error("Invalid STOREATOMIC record"); @@ -2536,9 +2590,9 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Ptr, *Cmp, *New; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || - getValue(Record, OpNum, + popValue(Record, OpNum, NextValueNo, cast(Ptr->getType())->getElementType(), Cmp) || - getValue(Record, OpNum, + popValue(Record, OpNum, NextValueNo, cast(Ptr->getType())->getElementType(), New) || OpNum+3 != Record.size()) return Error("Invalid CMPXCHG record"); @@ -2556,7 +2610,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { unsigned OpNum = 0; Value *Ptr, *Val; if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || - getValue(Record, OpNum, + popValue(Record, OpNum, NextValueNo, cast(Ptr->getType())->getElementType(), Val) || OpNum+4 != Record.size()) return Error("Invalid ATOMICRMW record"); @@ -2610,7 +2664,8 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { if (FTy->getParamType(i)->isLabelTy()) Args.push_back(getBasicBlock(Record[OpNum])); else - Args.push_back(getFnValueByID(Record[OpNum], FTy->getParamType(i))); + Args.push_back(getValue(Record, OpNum, NextValueNo, + FTy->getParamType(i))); if (Args.back() == 0) return Error("Invalid CALL record"); } @@ -2639,7 +2694,7 @@ bool BitcodeReader::ParseFunctionBody(Function *F) { if (Record.size() < 3) return Error("Invalid VAARG record"); Type *OpTy = getTypeByID(Record[0]); - Value *Op = getFnValueByID(Record[1], OpTy); + Value *Op = getValue(Record, 1, NextValueNo, OpTy); Type *ResTy = getTypeByID(Record[2]); if (!OpTy || !Op || !ResTy) return Error("Invalid VAARG record"); @@ -2837,7 +2892,7 @@ bool BitcodeReader::InitStream() { } bool BitcodeReader::InitStreamFromBuffer() { - const unsigned char *BufPtr = (unsigned char *)Buffer->getBufferStart(); + const unsigned char *BufPtr = (const unsigned char*)Buffer->getBufferStart(); const unsigned char *BufEnd = BufPtr+Buffer->getBufferSize(); if (Buffer->getBufferSize() & 3) { diff --git a/lib/Bitcode/Reader/BitcodeReader.h b/lib/Bitcode/Reader/BitcodeReader.h index e7c4e94..3d5c0eb 100644 --- a/lib/Bitcode/Reader/BitcodeReader.h +++ b/lib/Bitcode/Reader/BitcodeReader.h @@ -179,18 +179,27 @@ class BitcodeReader : public GVMaterializer { typedef std::pair BlockAddrRefTy; DenseMap > BlockAddrFwdRefs; + /// UseRelativeIDs - Indicates that we are using a new encoding for + /// instruction operands where most operands in the current + /// FUNCTION_BLOCK are encoded relative to the instruction number, + /// for a more compact encoding. Some instruction operands are not + /// relative to the instruction ID: basic block numbers, and types. + /// Once the old style function blocks have been phased out, we would + /// not need this flag. + bool UseRelativeIDs; + public: explicit BitcodeReader(MemoryBuffer *buffer, LLVMContext &C) : Context(C), TheModule(0), Buffer(buffer), BufferOwned(false), LazyStreamer(0), NextUnreadBit(0), SeenValueSymbolTable(false), ErrorString(0), ValueList(C), MDValueList(C), - SeenFirstFunctionBody(false) { + SeenFirstFunctionBody(false), UseRelativeIDs(false) { } explicit BitcodeReader(DataStreamer *streamer, LLVMContext &C) : Context(C), TheModule(0), Buffer(0), BufferOwned(false), LazyStreamer(streamer), NextUnreadBit(0), SeenValueSymbolTable(false), ErrorString(0), ValueList(C), MDValueList(C), - SeenFirstFunctionBody(false) { + SeenFirstFunctionBody(false), UseRelativeIDs(false) { } ~BitcodeReader() { FreeState(); @@ -223,6 +232,9 @@ public: /// @brief Cheap mechanism to just extract module triple /// @returns true if an error occurred. bool ParseTriple(std::string &Triple); + + static uint64_t decodeSignRotatedValue(uint64_t V); + private: Type *getTypeByID(unsigned ID); Value *getFnValueByID(unsigned ID, Type *Ty) { @@ -247,6 +259,9 @@ private: unsigned InstNum, Value *&ResVal) { if (Slot == Record.size()) return true; unsigned ValNo = (unsigned)Record[Slot++]; + // Adjust the ValNo, if it was encoded relative to the InstNum. + if (UseRelativeIDs) + ValNo = InstNum - ValNo; if (ValNo < InstNum) { // If this is not a forward reference, just return the value we already // have. @@ -255,20 +270,54 @@ private: } else if (Slot == Record.size()) { return true; } - + unsigned TypeNo = (unsigned)Record[Slot++]; ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo)); return ResVal == 0; } - bool getValue(SmallVector &Record, unsigned &Slot, - Type *Ty, Value *&ResVal) { - if (Slot == Record.size()) return true; - unsigned ValNo = (unsigned)Record[Slot++]; - ResVal = getFnValueByID(ValNo, Ty); + + /// popValue - Read a value out of the specified record from slot 'Slot'. + /// Increment Slot past the number of slots used by the value in the record. + /// Return true if there is an error. + bool popValue(SmallVector &Record, unsigned &Slot, + unsigned InstNum, Type *Ty, Value *&ResVal) { + if (getValue(Record, Slot, InstNum, Ty, ResVal)) + return true; + // All values currently take a single record slot. + ++Slot; + return false; + } + + /// getValue -- Like popValue, but does not increment the Slot number. + bool getValue(SmallVector &Record, unsigned Slot, + unsigned InstNum, Type *Ty, Value *&ResVal) { + ResVal = getValue(Record, Slot, InstNum, Ty); return ResVal == 0; } - + /// getValue -- Version of getValue that returns ResVal directly, + /// or 0 if there is an error. + Value *getValue(SmallVector &Record, unsigned Slot, + unsigned InstNum, Type *Ty) { + if (Slot == Record.size()) return 0; + unsigned ValNo = (unsigned)Record[Slot]; + // Adjust the ValNo, if it was encoded relative to the InstNum. + if (UseRelativeIDs) + ValNo = InstNum - ValNo; + return getFnValueByID(ValNo, Ty); + } + + /// getValueSigned -- Like getValue, but decodes signed VBRs. + Value *getValueSigned(SmallVector &Record, unsigned Slot, + unsigned InstNum, Type *Ty) { + if (Slot == Record.size()) return 0; + unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]); + // Adjust the ValNo, if it was encoded relative to the InstNum. + if (UseRelativeIDs) + ValNo = InstNum - ValNo; + return getFnValueByID(ValNo, Ty); + } + bool ParseModule(bool Resume); bool ParseAttributeBlock(); bool ParseTypeTable(); diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index 5b1725f..60c657a 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -41,8 +41,6 @@ EnablePreserveUseListOrdering("enable-bc-uselist-preserve", /// These are manifest constants used by the bitcode writer. They do not need to /// be kept in sync with the reader, but need to be consistent within this file. enum { - CurVersion = 0, - // VALUE_SYMTAB_BLOCK abbrev id's. VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV, VST_ENTRY_7_ABBREV, @@ -177,7 +175,7 @@ static void WriteAttributeTable(const ValueEnumerator &VE, for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i) { const AttributeWithIndex &PAWI = A.getSlot(i); Record.push_back(PAWI.Index); - Record.push_back(Attribute::encodeLLVMAttributesForBitcode(PAWI.Attrs)); + Record.push_back(Attributes::encodeLLVMAttributesForBitcode(PAWI.Attrs)); } Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record); @@ -365,7 +363,7 @@ static unsigned getEncodedLinkage(const GlobalValue *GV) { case GlobalValue::AvailableExternallyLinkage: return 12; case GlobalValue::LinkerPrivateLinkage: return 13; case GlobalValue::LinkerPrivateWeakLinkage: return 14; - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: return 15; + case GlobalValue::LinkOnceODRAutoHideLinkage: return 15; } llvm_unreachable("Invalid linkage"); } @@ -722,16 +720,20 @@ static void WriteModuleMetadataStore(const Module *M, BitstreamWriter &Stream) { Stream.ExitBlock(); } +static void emitSignedInt64(SmallVectorImpl &Vals, uint64_t V) { + if ((int64_t)V >= 0) + Vals.push_back(V << 1); + else + Vals.push_back((-V << 1) | 1); +} + static void EmitAPInt(SmallVectorImpl &Vals, unsigned &Code, unsigned &AbbrevToUse, const APInt &Val, bool EmitSizeForWideNumbers = false ) { if (Val.getBitWidth() <= 64) { uint64_t V = Val.getSExtValue(); - if ((int64_t)V >= 0) - Vals.push_back(V << 1); - else - Vals.push_back((-V << 1) | 1); + emitSignedInt64(Vals, V); Code = bitc::CST_CODE_INTEGER; AbbrevToUse = CONSTANTS_INTEGER_ABBREV; } else { @@ -747,11 +749,7 @@ static void EmitAPInt(SmallVectorImpl &Vals, const uint64_t *RawWords = Val.getRawData(); for (unsigned i = 0; i != NWords; ++i) { - int64_t V = RawWords[i]; - if (V >= 0) - Vals.push_back(V << 1); - else - Vals.push_back((-V << 1) | 1); + emitSignedInt64(Vals, RawWords[i]); } Code = bitc::CST_CODE_WIDE_INTEGER; } @@ -814,7 +812,8 @@ static void WriteConstants(unsigned FirstVal, unsigned LastVal, if (const InlineAsm *IA = dyn_cast(V)) { Record.push_back(unsigned(IA->hasSideEffects()) | - unsigned(IA->isAlignStack()) << 1); + unsigned(IA->isAlignStack()) << 1 | + unsigned(IA->getDialect()&1) << 2); // Add the asm string. const std::string &AsmStr = IA->getAsmString(); @@ -1024,12 +1023,13 @@ static void WriteModuleConstants(const ValueEnumerator &VE, /// /// This function adds V's value ID to Vals. If the value ID is higher than the /// instruction ID, then it is a forward reference, and it also includes the -/// type ID. +/// type ID. The value ID that is written is encoded relative to the InstID. static bool PushValueAndType(const Value *V, unsigned InstID, SmallVector &Vals, ValueEnumerator &VE) { unsigned ValID = VE.getValueID(V); - Vals.push_back(ValID); + // Make encoding relative to the InstID. + Vals.push_back(InstID - ValID); if (ValID >= InstID) { Vals.push_back(VE.getTypeID(V->getType())); return true; @@ -1037,6 +1037,30 @@ static bool PushValueAndType(const Value *V, unsigned InstID, return false; } +/// pushValue - Like PushValueAndType, but where the type of the value is +/// omitted (perhaps it was already encoded in an earlier operand). +static void pushValue(const Value *V, unsigned InstID, + SmallVector &Vals, + ValueEnumerator &VE) { + unsigned ValID = VE.getValueID(V); + Vals.push_back(InstID - ValID); +} + +static void pushValue64(const Value *V, unsigned InstID, + SmallVector &Vals, + ValueEnumerator &VE) { + uint64_t ValID = VE.getValueID(V); + Vals.push_back(InstID - ValID); +} + +static void pushValueSigned(const Value *V, unsigned InstID, + SmallVector &Vals, + ValueEnumerator &VE) { + unsigned ValID = VE.getValueID(V); + int64_t diff = ((int32_t)InstID - (int32_t)ValID); + emitSignedInt64(Vals, diff); +} + /// WriteInstruction - Emit an instruction to the specified stream. static void WriteInstruction(const Instruction &I, unsigned InstID, ValueEnumerator &VE, BitstreamWriter &Stream, @@ -1057,7 +1081,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, Code = bitc::FUNC_CODE_INST_BINOP; if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) AbbrevToUse = FUNCTION_INST_BINOP_ABBREV; - Vals.push_back(VE.getValueID(I.getOperand(1))); + pushValue(I.getOperand(1), InstID, Vals, VE); Vals.push_back(GetEncodedBinaryOpcode(I.getOpcode())); uint64_t Flags = GetOptimizationFlags(&I); if (Flags != 0) { @@ -1095,32 +1119,32 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, case Instruction::Select: Code = bitc::FUNC_CODE_INST_VSELECT; PushValueAndType(I.getOperand(1), InstID, Vals, VE); - Vals.push_back(VE.getValueID(I.getOperand(2))); + pushValue(I.getOperand(2), InstID, Vals, VE); PushValueAndType(I.getOperand(0), InstID, Vals, VE); break; case Instruction::ExtractElement: Code = bitc::FUNC_CODE_INST_EXTRACTELT; PushValueAndType(I.getOperand(0), InstID, Vals, VE); - Vals.push_back(VE.getValueID(I.getOperand(1))); + pushValue(I.getOperand(1), InstID, Vals, VE); break; case Instruction::InsertElement: Code = bitc::FUNC_CODE_INST_INSERTELT; PushValueAndType(I.getOperand(0), InstID, Vals, VE); - Vals.push_back(VE.getValueID(I.getOperand(1))); - Vals.push_back(VE.getValueID(I.getOperand(2))); + pushValue(I.getOperand(1), InstID, Vals, VE); + pushValue(I.getOperand(2), InstID, Vals, VE); break; case Instruction::ShuffleVector: Code = bitc::FUNC_CODE_INST_SHUFFLEVEC; PushValueAndType(I.getOperand(0), InstID, Vals, VE); - Vals.push_back(VE.getValueID(I.getOperand(1))); - Vals.push_back(VE.getValueID(I.getOperand(2))); + pushValue(I.getOperand(1), InstID, Vals, VE); + pushValue(I.getOperand(2), InstID, Vals, VE); break; case Instruction::ICmp: case Instruction::FCmp: // compare returning Int1Ty or vector of Int1Ty Code = bitc::FUNC_CODE_INST_CMP2; PushValueAndType(I.getOperand(0), InstID, Vals, VE); - Vals.push_back(VE.getValueID(I.getOperand(1))); + pushValue(I.getOperand(1), InstID, Vals, VE); Vals.push_back(cast(I).getPredicate()); break; @@ -1146,7 +1170,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, Vals.push_back(VE.getValueID(II.getSuccessor(0))); if (II.isConditional()) { Vals.push_back(VE.getValueID(II.getSuccessor(1))); - Vals.push_back(VE.getValueID(II.getCondition())); + pushValue(II.getCondition(), InstID, Vals, VE); } } break; @@ -1163,7 +1187,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, Vals64.push_back(SwitchRecordHeader); Vals64.push_back(VE.getTypeID(SI.getCondition()->getType())); - Vals64.push_back(VE.getValueID(SI.getCondition())); + pushValue64(SI.getCondition(), InstID, Vals64, VE); Vals64.push_back(VE.getValueID(SI.getDefaultDest())); Vals64.push_back(SI.getNumCases()); for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); @@ -1214,7 +1238,9 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, case Instruction::IndirectBr: Code = bitc::FUNC_CODE_INST_INDIRECTBR; Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); - for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) + // Encode the address operand as relative, but not the basic blocks. + pushValue(I.getOperand(0), InstID, Vals, VE); + for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) Vals.push_back(VE.getValueID(I.getOperand(i))); break; @@ -1233,7 +1259,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, // Emit value #'s for the fixed parameters. for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) - Vals.push_back(VE.getValueID(I.getOperand(i))); // fixed param. + pushValue(I.getOperand(i), InstID, Vals, VE); // fixed param. // Emit type/value pairs for varargs params. if (FTy->isVarArg()) { @@ -1255,12 +1281,19 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, case Instruction::PHI: { const PHINode &PN = cast(I); Code = bitc::FUNC_CODE_INST_PHI; - Vals.push_back(VE.getTypeID(PN.getType())); + // With the newer instruction encoding, forward references could give + // negative valued IDs. This is most common for PHIs, so we use + // signed VBRs. + SmallVector Vals64; + Vals64.push_back(VE.getTypeID(PN.getType())); for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { - Vals.push_back(VE.getValueID(PN.getIncomingValue(i))); - Vals.push_back(VE.getValueID(PN.getIncomingBlock(i))); + pushValueSigned(PN.getIncomingValue(i), InstID, Vals64, VE); + Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i))); } - break; + // Emit a Vals64 vector and exit. + Stream.EmitRecord(Code, Vals64, AbbrevToUse); + Vals64.clear(); + return; } case Instruction::LandingPad: { @@ -1310,7 +1343,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, else Code = bitc::FUNC_CODE_INST_STORE; PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr - Vals.push_back(VE.getValueID(I.getOperand(0))); // val. + pushValue(I.getOperand(0), InstID, Vals, VE); // val. Vals.push_back(Log2_32(cast(I).getAlignment())+1); Vals.push_back(cast(I).isVolatile()); if (cast(I).isAtomic()) { @@ -1321,8 +1354,8 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, case Instruction::AtomicCmpXchg: Code = bitc::FUNC_CODE_INST_CMPXCHG; PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr - Vals.push_back(VE.getValueID(I.getOperand(1))); // cmp. - Vals.push_back(VE.getValueID(I.getOperand(2))); // newval. + pushValue(I.getOperand(1), InstID, Vals, VE); // cmp. + pushValue(I.getOperand(2), InstID, Vals, VE); // newval. Vals.push_back(cast(I).isVolatile()); Vals.push_back(GetEncodedOrdering( cast(I).getOrdering())); @@ -1332,7 +1365,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, case Instruction::AtomicRMW: Code = bitc::FUNC_CODE_INST_ATOMICRMW; PushValueAndType(I.getOperand(0), InstID, Vals, VE); // ptrty + ptr - Vals.push_back(VE.getValueID(I.getOperand(1))); // val. + pushValue(I.getOperand(1), InstID, Vals, VE); // val. Vals.push_back(GetEncodedRMWOperation( cast(I).getOperation())); Vals.push_back(cast(I).isVolatile()); @@ -1357,8 +1390,13 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, PushValueAndType(CI.getCalledValue(), InstID, Vals, VE); // Callee // Emit value #'s for the fixed parameters. - for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) - Vals.push_back(VE.getValueID(CI.getArgOperand(i))); // fixed param. + for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) { + // Check for labels (can happen with asm labels). + if (FTy->getParamType(i)->isLabelTy()) + Vals.push_back(VE.getValueID(CI.getArgOperand(i))); + else + pushValue(CI.getArgOperand(i), InstID, Vals, VE); // fixed param. + } // Emit type/value pairs for varargs params. if (FTy->isVarArg()) { @@ -1371,7 +1409,7 @@ static void WriteInstruction(const Instruction &I, unsigned InstID, case Instruction::VAArg: Code = bitc::FUNC_CODE_INST_VAARG; Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty - Vals.push_back(VE.getValueID(I.getOperand(0))); // valist. + pushValue(I.getOperand(0), InstID, Vals, VE); // valist. Vals.push_back(VE.getTypeID(I.getType())); // restype. break; } @@ -1513,8 +1551,8 @@ static void WriteFunction(const Function &F, ValueEnumerator &VE, // Emit blockinfo, which defines the standard abbreviations etc. static void WriteBlockInfo(const ValueEnumerator &VE, BitstreamWriter &Stream) { // We only want to emit block info records for blocks that have multiple - // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. Other - // blocks can defined their abbrevs inline. + // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK. + // Other blocks can define their abbrevs inline. Stream.EnterBlockInfoBlock(2); { // 8-bit fixed-width VST_ENTRY/VST_BBENTRY strings. @@ -1772,12 +1810,10 @@ static void WriteModuleUseLists(const Module *M, ValueEnumerator &VE, static void WriteModule(const Module *M, BitstreamWriter &Stream) { Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3); - // Emit the version number if it is non-zero. - if (CurVersion) { - SmallVector Vals; - Vals.push_back(CurVersion); - Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals); - } + SmallVector Vals; + unsigned CurVersion = 1; + Vals.push_back(CurVersion); + Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals); // Analyze the module, enumerating globals, functions, etc. ValueEnumerator VE(M); diff --git a/lib/Bitcode/Writer/ValueEnumerator.h b/lib/Bitcode/Writer/ValueEnumerator.h index a6ca536..75468e6 100644 --- a/lib/Bitcode/Writer/ValueEnumerator.h +++ b/lib/Bitcode/Writer/ValueEnumerator.h @@ -78,9 +78,9 @@ private: unsigned FirstFuncConstantID; unsigned FirstInstID; - - ValueEnumerator(const ValueEnumerator &); // DO NOT IMPLEMENT - void operator=(const ValueEnumerator &); // DO NOT IMPLEMENT + + ValueEnumerator(const ValueEnumerator &) LLVM_DELETED_FUNCTION; + void operator=(const ValueEnumerator &) LLVM_DELETED_FUNCTION; public: ValueEnumerator(const Module *M); diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp index 205480a..7a1c049 100644 --- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -635,7 +635,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( --R; const unsigned NewSuperReg = Order[R]; // Don't consider non-allocatable registers - if (!RegClassInfo.isAllocatable(NewSuperReg)) continue; + if (!MRI.isAllocatable(NewSuperReg)) continue; // Don't replace a register with itself. if (NewSuperReg == SuperReg) continue; @@ -818,7 +818,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies( DEBUG(dbgs() << "\tAntidep reg: " << TRI->getName(AntiDepReg)); assert(AntiDepReg != 0 && "Anti-dependence on reg0?"); - if (!RegClassInfo.isAllocatable(AntiDepReg)) { + if (!MRI.isAllocatable(AntiDepReg)) { // Don't break anti-dependencies on non-allocatable registers. DEBUG(dbgs() << " (non-allocatable)\n"); continue; diff --git a/lib/CodeGen/AllocationOrder.cpp b/lib/CodeGen/AllocationOrder.cpp index 32ad34a..7cde136 100644 --- a/lib/CodeGen/AllocationOrder.cpp +++ b/lib/CodeGen/AllocationOrder.cpp @@ -29,6 +29,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg, const TargetRegisterClass *RC = VRM.getRegInfo().getRegClass(VirtReg); std::pair HintPair = VRM.getRegInfo().getRegAllocationHint(VirtReg); + const MachineRegisterInfo &MRI = VRM.getRegInfo(); // HintPair.second is a register, phys or virt. Hint = HintPair.second; @@ -52,7 +53,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg, unsigned *P = new unsigned[Order.size()]; Begin = P; for (unsigned i = 0; i != Order.size(); ++i) - if (!RCI.isReserved(Order[i])) + if (!MRI.isReserved(Order[i])) *P++ = Order[i]; End = P; @@ -69,7 +70,7 @@ AllocationOrder::AllocationOrder(unsigned VirtReg, // The hint must be a valid physreg for allocation. if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) || - !RC->contains(Hint) || RCI.isReserved(Hint))) + !RC->contains(Hint) || MRI.isReserved(Hint))) Hint = 0; } diff --git a/lib/CodeGen/Analysis.cpp b/lib/CodeGen/Analysis.cpp index 447f398..5162ad7 100644 --- a/lib/CodeGen/Analysis.cpp +++ b/lib/CodeGen/Analysis.cpp @@ -21,7 +21,7 @@ #include "llvm/Module.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/SelectionDAG.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/ErrorHandling.h" @@ -79,7 +79,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, uint64_t StartingOffset) { // Given a struct type, recursively traverse the elements. if (StructType *STy = dyn_cast(Ty)) { - const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy); + const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy); for (StructType::element_iterator EB = STy->element_begin(), EI = EB, EE = STy->element_end(); @@ -91,7 +91,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty, // Given an array type, recursively traverse the elements. if (ArrayType *ATy = dyn_cast(Ty)) { Type *EltTy = ATy->getElementType(); - uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); + uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy); for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, StartingOffset + i * EltSize); @@ -314,11 +314,13 @@ bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr, // the return. Ignore noalias because it doesn't affect the call sequence. const Function *F = ExitBB->getParent(); Attributes CallerRetAttr = F->getAttributes().getRetAttributes(); - if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) + if (AttrBuilder(CalleeRetAttr).removeAttribute(Attributes::NoAlias) != + AttrBuilder(CallerRetAttr).removeAttribute(Attributes::NoAlias)) return false; // It's not safe to eliminate the sign / zero extension of the return value. - if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) + if (CallerRetAttr.hasAttribute(Attributes::ZExt) || + CallerRetAttr.hasAttribute(Attributes::SExt)) return false; // Otherwise, make sure the unmodified return value of I is the return value. @@ -354,11 +356,13 @@ bool llvm::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, // Conservatively require the attributes of the call to match those of // the return. Ignore noalias because it doesn't affect the call sequence. Attributes CallerRetAttr = F->getAttributes().getRetAttributes(); - if (CallerRetAttr & ~Attribute::NoAlias) + if (AttrBuilder(CallerRetAttr) + .removeAttribute(Attributes::NoAlias).hasAttributes()) return false; // It's not safe to eliminate the sign / zero extension of the return value. - if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) + if (CallerRetAttr.hasAttribute(Attributes::ZExt) || + CallerRetAttr.hasAttribute(Attributes::SExt)) return false; // Check if the only use is a function return node. diff --git a/lib/CodeGen/AsmPrinter/ARMException.cpp b/lib/CodeGen/AsmPrinter/ARMException.cpp index bf5d8c4..b2ebf04 100644 --- a/lib/CodeGen/AsmPrinter/ARMException.cpp +++ b/lib/CodeGen/AsmPrinter/ARMException.cpp @@ -24,7 +24,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index d9be7a1..d74a703 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -33,7 +33,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" @@ -67,7 +67,7 @@ static gcp_map_type &getGCMap(void *&P) { /// getGVAlignmentLog2 - Return the alignment to use for the specified global /// value in log2 form. This rounds up to the preferred alignment if possible /// and legal. -static unsigned getGVAlignmentLog2(const GlobalValue *GV, const TargetData &TD, +static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &TD, unsigned InBits = 0) { unsigned NumBits = 0; if (const GlobalVariable *GVar = dyn_cast(GV)) @@ -131,9 +131,9 @@ const TargetLoweringObjectFile &AsmPrinter::getObjFileLowering() const { } -/// getTargetData - Return information about data layout. -const TargetData &AsmPrinter::getTargetData() const { - return *TM.getTargetData(); +/// getDataLayout - Return information about data layout. +const DataLayout &AsmPrinter::getDataLayout() const { + return *TM.getDataLayout(); } /// getCurrentSection() - Return the current section we are emitting to. @@ -160,7 +160,7 @@ bool AsmPrinter::doInitialization(Module &M) { const_cast(getObjFileLowering()) .Initialize(OutContext, TM); - Mang = new Mangler(OutContext, *TM.getTargetData()); + Mang = new Mangler(OutContext, *TM.getDataLayout()); // Allow the target to emit any magic that it wants at the start of the file. EmitStartOfAsmFile(M); @@ -213,16 +213,16 @@ void AsmPrinter::EmitLinkage(unsigned Linkage, MCSymbol *GVSym) const { case GlobalValue::CommonLinkage: case GlobalValue::LinkOnceAnyLinkage: case GlobalValue::LinkOnceODRLinkage: + case GlobalValue::LinkOnceODRAutoHideLinkage: case GlobalValue::WeakAnyLinkage: case GlobalValue::WeakODRLinkage: case GlobalValue::LinkerPrivateWeakLinkage: - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: if (MAI->getWeakDefDirective() != 0) { // .globl _foo OutStreamer.EmitSymbolAttribute(GVSym, MCSA_Global); if ((GlobalValue::LinkageTypes)Linkage != - GlobalValue::LinkerPrivateWeakDefAutoLinkage) + GlobalValue::LinkOnceODRAutoHideLinkage) // .weak_definition _foo OutStreamer.EmitSymbolAttribute(GVSym, MCSA_WeakDefinition); else @@ -280,7 +280,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM); - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType()); // If the alignment is specified, we *must* obey it. Overaligning a global @@ -312,8 +312,8 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { return; } - if (MAI->getLCOMMDirectiveType() != LCOMM::None && - (MAI->getLCOMMDirectiveType() != LCOMM::NoAlignment || Align == 1)) { + if (Align == 1 || + MAI->getLCOMMDirectiveAlignmentType() != LCOMM::NoAlignment) { // .lcomm _foo, 42 OutStreamer.EmitLocalCommonSymbol(GVSym, Size, Align); return; @@ -482,9 +482,8 @@ void AsmPrinter::EmitFunctionEntryLabel() { "' label emitted multiple times to assembly file"); } - -/// EmitComments - Pretty-print comments for instructions. -static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) { +/// emitComments - Pretty-print comments for instructions. +static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) { const MachineFunction *MF = MI.getParent()->getParent(); const TargetMachine &TM = MF->getTarget(); @@ -519,16 +518,16 @@ static void EmitComments(const MachineInstr &MI, raw_ostream &CommentOS) { CommentOS << " Reload Reuse\n"; } -/// EmitImplicitDef - This method emits the specified machine instruction +/// emitImplicitDef - This method emits the specified machine instruction /// that is an implicit def. -static void EmitImplicitDef(const MachineInstr *MI, AsmPrinter &AP) { +static void emitImplicitDef(const MachineInstr *MI, AsmPrinter &AP) { unsigned RegNo = MI->getOperand(0).getReg(); AP.OutStreamer.AddComment(Twine("implicit-def: ") + AP.TM.getRegisterInfo()->getName(RegNo)); AP.OutStreamer.AddBlankLine(); } -static void EmitKill(const MachineInstr *MI, AsmPrinter &AP) { +static void emitKill(const MachineInstr *MI, AsmPrinter &AP) { std::string Str = "kill:"; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &Op = MI->getOperand(i); @@ -541,10 +540,10 @@ static void EmitKill(const MachineInstr *MI, AsmPrinter &AP) { AP.OutStreamer.AddBlankLine(); } -/// EmitDebugValueComment - This method handles the target-independent form +/// emitDebugValueComment - This method handles the target-independent form /// of DBG_VALUE, returning true if it was able to do so. A false return /// means the target will need to handle MI in EmitInstruction. -static bool EmitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { +static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) { // This code handles only the 3-operand target-independent form. if (MI->getNumOperands() != 3) return false; @@ -674,7 +673,7 @@ void AsmPrinter::EmitFunctionBody() { } if (isVerbose()) - EmitComments(*II, OutStreamer.GetCommentOS()); + emitComments(*II, OutStreamer.GetCommentOS()); switch (II->getOpcode()) { case TargetOpcode::PROLOG_LABEL: @@ -690,15 +689,15 @@ void AsmPrinter::EmitFunctionBody() { break; case TargetOpcode::DBG_VALUE: if (isVerbose()) { - if (!EmitDebugValueComment(II, *this)) + if (!emitDebugValueComment(II, *this)) EmitInstruction(II); } break; case TargetOpcode::IMPLICIT_DEF: - if (isVerbose()) EmitImplicitDef(II, *this); + if (isVerbose()) emitImplicitDef(II, *this); break; case TargetOpcode::KILL: - if (isVerbose()) EmitKill(II, *this); + if (isVerbose()) emitKill(II, *this); break; default: if (!TM.hasMCUseLoc()) @@ -992,7 +991,7 @@ void AsmPrinter::EmitConstantPool() { Kind = SectionKind::getReadOnlyWithRelLocal(); break; case 0: - switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) { + switch (TM.getDataLayout()->getTypeAllocSize(CPE.getType())) { case 4: Kind = SectionKind::getMergeableConst4(); break; case 8: Kind = SectionKind::getMergeableConst8(); break; case 16: Kind = SectionKind::getMergeableConst16();break; @@ -1038,7 +1037,7 @@ void AsmPrinter::EmitConstantPool() { OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/); Type *Ty = CPE.getType(); - Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty); + Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty); OutStreamer.EmitLabel(GetCPISymbol(CPI)); if (CPE.isMachineConstantPoolEntry()) @@ -1081,7 +1080,12 @@ void AsmPrinter::EmitJumpTableInfo() { JTInDiffSection = true; } - EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData()))); + EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getDataLayout()))); + + // Jump tables in code sections are marked with a data_region directive + // where that's supported. + if (!JTInDiffSection) + OutStreamer.EmitDataRegion(MCDR_DataRegionJT32); for (unsigned JTI = 0, e = JT.size(); JTI != e; ++JTI) { const std::vector &JTBBs = JT[JTI].MBBs; @@ -1123,6 +1127,8 @@ void AsmPrinter::EmitJumpTableInfo() { for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) EmitJumpTableEntry(MJTI, JTBBs[ii], JTI); } + if (!JTInDiffSection) + OutStreamer.EmitDataRegion(MCDR_DataRegionEnd); } /// EmitJumpTableEntry - Emit a jump table entry for the specified MBB to the @@ -1190,7 +1196,7 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI, assert(Value && "Unknown entry kind!"); - unsigned EntrySize = MJTI->getEntrySize(*TM.getTargetData()); + unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout()); OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0); } @@ -1292,7 +1298,7 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) { } // Emit the function pointers in the target-specific order - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); unsigned Align = Log2_32(TD->getPointerPrefAlignment()); std::stable_sort(Structors.begin(), Structors.end(), priority_order); for (unsigned i = 0, e = Structors.size(); i != e; ++i) { @@ -1408,7 +1414,7 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset, // if required for correctness. // void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const { - if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getTargetData(), NumBits); + if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits); if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment. @@ -1422,9 +1428,9 @@ void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const { // Constant emission. //===----------------------------------------------------------------------===// -/// LowerConstant - Lower the specified LLVM Constant to an MCExpr. +/// lowerConstant - Lower the specified LLVM Constant to an MCExpr. /// -static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) { +static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) { MCContext &Ctx = AP.OutContext; if (CV->isNullValue() || isa(CV)) @@ -1447,12 +1453,12 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) { switch (CE->getOpcode()) { default: // If the code isn't optimized, there may be outstanding folding - // opportunities. Attempt to fold the expression using TargetData as a + // opportunities. Attempt to fold the expression using DataLayout as a // last resort before giving up. if (Constant *C = - ConstantFoldConstantExpression(CE, AP.TM.getTargetData())) + ConstantFoldConstantExpression(CE, AP.TM.getDataLayout())) if (C != CE) - return LowerConstant(C, AP); + return lowerConstant(C, AP); // Otherwise report the problem to the user. { @@ -1464,21 +1470,20 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) { report_fatal_error(OS.str()); } case Instruction::GetElementPtr: { - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); // Generate a symbolic expression for the byte address const Constant *PtrVal = CE->getOperand(0); SmallVector IdxVec(CE->op_begin()+1, CE->op_end()); int64_t Offset = TD.getIndexedOffset(PtrVal->getType(), IdxVec); - const MCExpr *Base = LowerConstant(CE->getOperand(0), AP); + const MCExpr *Base = lowerConstant(CE->getOperand(0), AP); if (Offset == 0) return Base; // Truncate/sext the offset to the pointer size. - if (TD.getPointerSizeInBits() != 64) { - int SExtAmount = 64-TD.getPointerSizeInBits(); - Offset = (Offset << SExtAmount) >> SExtAmount; - } + unsigned Width = TD.getPointerSizeInBits(); + if (Width < 64) + Offset = SignExtend64(Offset, Width); return MCBinaryExpr::CreateAdd(Base, MCConstantExpr::Create(Offset, Ctx), Ctx); @@ -1491,26 +1496,26 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) { // is reasonable to treat their delta as a 32-bit value. // FALL THROUGH. case Instruction::BitCast: - return LowerConstant(CE->getOperand(0), AP); + return lowerConstant(CE->getOperand(0), AP); case Instruction::IntToPtr: { - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); // Handle casts to pointers by changing them into casts to the appropriate // integer type. This promotes constant folding and simplifies this code. Constant *Op = CE->getOperand(0); Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()), false/*ZExt*/); - return LowerConstant(Op, AP); + return lowerConstant(Op, AP); } case Instruction::PtrToInt: { - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); // Support only foldable casts to/from pointers that can be eliminated by // changing the pointer to the appropriately sized integer type. Constant *Op = CE->getOperand(0); Type *Ty = CE->getType(); - const MCExpr *OpExpr = LowerConstant(Op, AP); + const MCExpr *OpExpr = lowerConstant(Op, AP); // We can emit the pointer value into this slot if the slot is an // integer slot equal to the size of the pointer. @@ -1536,8 +1541,8 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) { case Instruction::And: case Instruction::Or: case Instruction::Xor: { - const MCExpr *LHS = LowerConstant(CE->getOperand(0), AP); - const MCExpr *RHS = LowerConstant(CE->getOperand(1), AP); + const MCExpr *LHS = lowerConstant(CE->getOperand(0), AP); + const MCExpr *RHS = lowerConstant(CE->getOperand(1), AP); switch (CE->getOpcode()) { default: llvm_unreachable("Unknown binary operator constant cast expr"); case Instruction::Add: return MCBinaryExpr::CreateAdd(LHS, RHS, Ctx); @@ -1554,7 +1559,7 @@ static const MCExpr *LowerConstant(const Constant *CV, AsmPrinter &AP) { } } -static void EmitGlobalConstantImpl(const Constant *C, unsigned AddrSpace, +static void emitGlobalConstantImpl(const Constant *C, unsigned AddrSpace, AsmPrinter &AP); /// isRepeatedByteSequence - Determine whether the given value is @@ -1578,7 +1583,7 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) { if (const ConstantInt *CI = dyn_cast(V)) { if (CI->getBitWidth() > 64) return -1; - uint64_t Size = TM.getTargetData()->getTypeAllocSize(V->getType()); + uint64_t Size = TM.getDataLayout()->getTypeAllocSize(V->getType()); uint64_t Value = CI->getZExtValue(); // Make sure the constant is at least 8 bits long and has a power @@ -1616,13 +1621,13 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) { return -1; } -static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS, +static void emitGlobalConstantDataSequential(const ConstantDataSequential *CDS, unsigned AddrSpace,AsmPrinter &AP){ // See if we can aggregate this into a .fill, if so, emit it as such. int Value = isRepeatedByteSequence(CDS, AP.TM); if (Value != -1) { - uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CDS->getType()); + uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CDS->getType()); // Don't emit a 1-byte object as a .fill. if (Bytes > 1) return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace); @@ -1672,7 +1677,7 @@ static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS, } } - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); unsigned Size = TD.getTypeAllocSize(CDS->getType()); unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) * CDS->getNumElements(); @@ -1681,28 +1686,28 @@ static void EmitGlobalConstantDataSequential(const ConstantDataSequential *CDS, } -static void EmitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace, +static void emitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace, AsmPrinter &AP) { // See if we can aggregate some values. Make sure it can be // represented as a series of bytes of the constant value. int Value = isRepeatedByteSequence(CA, AP.TM); if (Value != -1) { - uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType()); + uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CA->getType()); AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace); } else { for (unsigned i = 0, e = CA->getNumOperands(); i != e; ++i) - EmitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP); + emitGlobalConstantImpl(CA->getOperand(i), AddrSpace, AP); } } -static void EmitGlobalConstantVector(const ConstantVector *CV, +static void emitGlobalConstantVector(const ConstantVector *CV, unsigned AddrSpace, AsmPrinter &AP) { for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i) - EmitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP); + emitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP); - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); unsigned Size = TD.getTypeAllocSize(CV->getType()); unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) * CV->getType()->getNumElements(); @@ -1710,10 +1715,10 @@ static void EmitGlobalConstantVector(const ConstantVector *CV, AP.OutStreamer.EmitZeros(Padding, AddrSpace); } -static void EmitGlobalConstantStruct(const ConstantStruct *CS, +static void emitGlobalConstantStruct(const ConstantStruct *CS, unsigned AddrSpace, AsmPrinter &AP) { // Print the fields in successive locations. Pad to align if needed! - const TargetData *TD = AP.TM.getTargetData(); + const DataLayout *TD = AP.TM.getDataLayout(); unsigned Size = TD->getTypeAllocSize(CS->getType()); const StructLayout *Layout = TD->getStructLayout(CS->getType()); uint64_t SizeSoFar = 0; @@ -1727,7 +1732,7 @@ static void EmitGlobalConstantStruct(const ConstantStruct *CS, SizeSoFar += FieldSize + PadSize; // Now print the actual field value. - EmitGlobalConstantImpl(Field, AddrSpace, AP); + emitGlobalConstantImpl(Field, AddrSpace, AP); // Insert padding - this may include padding to increase the size of the // current field up to the ABI size (if the struct is not packed) as well @@ -1738,7 +1743,7 @@ static void EmitGlobalConstantStruct(const ConstantStruct *CS, "Layout of constant struct may be incorrect!"); } -static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace, +static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace, AsmPrinter &AP) { if (CFP->getType()->isHalfTy()) { if (AP.isVerbose()) { @@ -1793,7 +1798,7 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace, << DoubleVal.convertToDouble() << '\n'; } - if (AP.TM.getTargetData()->isBigEndian()) { + if (AP.TM.getDataLayout()->isBigEndian()) { AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace); AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace); } else { @@ -1802,7 +1807,7 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace, } // Emit the tail padding for the long double. - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) - TD.getTypeStoreSize(CFP->getType()), AddrSpace); return; @@ -1814,7 +1819,7 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace, // API needed to prevent premature destruction. APInt API = CFP->getValueAPF().bitcastToAPInt(); const uint64_t *p = API.getRawData(); - if (AP.TM.getTargetData()->isBigEndian()) { + if (AP.TM.getDataLayout()->isBigEndian()) { AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace); AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace); } else { @@ -1823,9 +1828,9 @@ static void EmitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace, } } -static void EmitGlobalConstantLargeInt(const ConstantInt *CI, +static void emitGlobalConstantLargeInt(const ConstantInt *CI, unsigned AddrSpace, AsmPrinter &AP) { - const TargetData *TD = AP.TM.getTargetData(); + const DataLayout *TD = AP.TM.getDataLayout(); unsigned BitWidth = CI->getBitWidth(); assert((BitWidth & 63) == 0 && "only support multiples of 64-bits"); @@ -1839,9 +1844,9 @@ static void EmitGlobalConstantLargeInt(const ConstantInt *CI, } } -static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace, +static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace, AsmPrinter &AP) { - const TargetData *TD = AP.TM.getTargetData(); + const DataLayout *TD = AP.TM.getDataLayout(); uint64_t Size = TD->getTypeAllocSize(CV->getType()); if (isa(CV) || isa(CV)) return AP.OutStreamer.EmitZeros(Size, AddrSpace); @@ -1858,13 +1863,13 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace, AP.OutStreamer.EmitIntValue(CI->getZExtValue(), Size, AddrSpace); return; default: - EmitGlobalConstantLargeInt(CI, AddrSpace, AP); + emitGlobalConstantLargeInt(CI, AddrSpace, AP); return; } } if (const ConstantFP *CFP = dyn_cast(CV)) - return EmitGlobalConstantFP(CFP, AddrSpace, AP); + return emitGlobalConstantFP(CFP, AddrSpace, AP); if (isa(CV)) { AP.OutStreamer.EmitIntValue(0, Size, AddrSpace); @@ -1872,19 +1877,19 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace, } if (const ConstantDataSequential *CDS = dyn_cast(CV)) - return EmitGlobalConstantDataSequential(CDS, AddrSpace, AP); + return emitGlobalConstantDataSequential(CDS, AddrSpace, AP); if (const ConstantArray *CVA = dyn_cast(CV)) - return EmitGlobalConstantArray(CVA, AddrSpace, AP); + return emitGlobalConstantArray(CVA, AddrSpace, AP); if (const ConstantStruct *CVS = dyn_cast(CV)) - return EmitGlobalConstantStruct(CVS, AddrSpace, AP); + return emitGlobalConstantStruct(CVS, AddrSpace, AP); if (const ConstantExpr *CE = dyn_cast(CV)) { // Look through bitcasts, which might not be able to be MCExpr'ized (e.g. of // vectors). if (CE->getOpcode() == Instruction::BitCast) - return EmitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP); + return emitGlobalConstantImpl(CE->getOperand(0), AddrSpace, AP); if (Size > 8) { // If the constant expression's size is greater than 64-bits, then we have @@ -1892,23 +1897,23 @@ static void EmitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace, // that way. Constant *New = ConstantFoldConstantExpression(CE, TD); if (New && New != CE) - return EmitGlobalConstantImpl(New, AddrSpace, AP); + return emitGlobalConstantImpl(New, AddrSpace, AP); } } if (const ConstantVector *V = dyn_cast(CV)) - return EmitGlobalConstantVector(V, AddrSpace, AP); + return emitGlobalConstantVector(V, AddrSpace, AP); // Otherwise, it must be a ConstantExpr. Lower it to an MCExpr, then emit it // thread the streamer with EmitValue. - AP.OutStreamer.EmitValue(LowerConstant(CV, AP), Size, AddrSpace); + AP.OutStreamer.EmitValue(lowerConstant(CV, AP), Size, AddrSpace); } /// EmitGlobalConstant - Print a general LLVM constant to the .s file. void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) { - uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType()); + uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType()); if (Size) - EmitGlobalConstantImpl(CV, AddrSpace, *this); + emitGlobalConstantImpl(CV, AddrSpace, *this); else if (MAI->hasSubsectionsViaSymbols()) { // If the global has zero size, emit a single byte so that two labels don't // look like they are at the same location. @@ -2023,8 +2028,8 @@ static void PrintChildLoopComment(raw_ostream &OS, const MachineLoop *Loop, } } -/// EmitBasicBlockLoopComments - Pretty-print comments for basic blocks. -static void EmitBasicBlockLoopComments(const MachineBasicBlock &MBB, +/// emitBasicBlockLoopComments - Pretty-print comments for basic blocks. +static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB, const MachineLoopInfo *LI, const AsmPrinter &AP) { // Add loop depth information @@ -2090,7 +2095,7 @@ void AsmPrinter::EmitBasicBlockStart(const MachineBasicBlock *MBB) const { if (const BasicBlock *BB = MBB->getBasicBlock()) if (BB->hasName()) OutStreamer.AddComment("%" + BB->getName()); - EmitBasicBlockLoopComments(*MBB, LI, *this); + emitBasicBlockLoopComments(*MBB, LI, *this); } // Print the main label for the block. diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp index 90d511c..d94e1fe 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinterDwarf.cpp @@ -18,7 +18,7 @@ #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" @@ -112,7 +112,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const { switch (Encoding & 0x07) { default: llvm_unreachable("Invalid encoded value."); - case dwarf::DW_EH_PE_absptr: return TM.getTargetData()->getPointerSize(); + case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize(); case dwarf::DW_EH_PE_udata2: return 2; case dwarf::DW_EH_PE_udata4: return 4; case dwarf::DW_EH_PE_udata8: return 8; diff --git a/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp index db43b06..50f0fc3 100644 --- a/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp +++ b/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp @@ -43,10 +43,10 @@ namespace { }; } -/// SrcMgrDiagHandler - This callback is invoked when the SourceMgr for an +/// srcMgrDiagHandler - This callback is invoked when the SourceMgr for an /// inline asm has an error in it. diagInfo is a pointer to the SrcMgrDiagInfo /// struct above. -static void SrcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) { +static void srcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) { SrcMgrDiagInfo *DiagInfo = static_cast(diagInfo); assert(DiagInfo && "Diagnostic context not passed down?"); @@ -68,7 +68,8 @@ static void SrcMgrDiagHandler(const SMDiagnostic &Diag, void *diagInfo) { } /// EmitInlineAsm - Emit a blob of inline asm to the output streamer. -void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const { +void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode, + InlineAsm::AsmDialect Dialect) const { assert(!Str.empty() && "Can't emit empty inline asm block"); // Remember if the buffer is nul terminated or not so we can avoid a copy. @@ -91,12 +92,12 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const { LLVMContext &LLVMCtx = MMI->getModule()->getContext(); bool HasDiagHandler = false; if (LLVMCtx.getInlineAsmDiagnosticHandler() != 0) { - // If the source manager has an issue, we arrange for SrcMgrDiagHandler + // If the source manager has an issue, we arrange for srcMgrDiagHandler // to be invoked, getting DiagInfo passed into it. DiagInfo.LocInfo = LocMDNode; DiagInfo.DiagHandler = LLVMCtx.getInlineAsmDiagnosticHandler(); DiagInfo.DiagContext = LLVMCtx.getInlineAsmDiagnosticContext(); - SrcMgr.setDiagHandler(SrcMgrDiagHandler, &DiagInfo); + SrcMgr.setDiagHandler(srcMgrDiagHandler, &DiagInfo); HasDiagHandler = true; } @@ -126,6 +127,7 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const { if (!TAP) report_fatal_error("Inline asm not supported by this streamer because" " we don't have an asm parser for this target\n"); + Parser->setAssemblerDialect(Dialect); Parser->setTargetParser(*TAP.get()); // Don't implicitly switch to the text section before the asm. @@ -135,71 +137,113 @@ void AsmPrinter::EmitInlineAsm(StringRef Str, const MDNode *LocMDNode) const { report_fatal_error("Error parsing inline asm\n"); } +static void EmitMSInlineAsmStr(const char *AsmStr, const MachineInstr *MI, + MachineModuleInfo *MMI, int InlineAsmVariant, + AsmPrinter *AP, unsigned LocCookie, + raw_ostream &OS) { + // Switch to the inline assembly variant. + OS << "\t.intel_syntax\n\t"; -/// EmitInlineAsm - This method formats and emits the specified machine -/// instruction that is an inline asm. -void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { - assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms"); - + const char *LastEmitted = AsmStr; // One past the last character emitted. unsigned NumOperands = MI->getNumOperands(); - // Count the number of register definitions to find the asm string. - unsigned NumDefs = 0; - for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef(); - ++NumDefs) - assert(NumDefs != NumOperands-2 && "No asm string?"); + while (*LastEmitted) { + switch (*LastEmitted) { + default: { + // Not a special case, emit the string section literally. + const char *LiteralEnd = LastEmitted+1; + while (*LiteralEnd && *LiteralEnd != '{' && *LiteralEnd != '|' && + *LiteralEnd != '}' && *LiteralEnd != '$' && *LiteralEnd != '\n') + ++LiteralEnd; - assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?"); + OS.write(LastEmitted, LiteralEnd-LastEmitted); + LastEmitted = LiteralEnd; + break; + } + case '\n': + ++LastEmitted; // Consume newline character. + OS << '\n'; // Indent code with newline. + break; + case '$': { + ++LastEmitted; // Consume '$' character. + bool Done = true; - // Disassemble the AsmStr, printing out the literal pieces, the operands, etc. - const char *AsmStr = MI->getOperand(NumDefs).getSymbolName(); + // Handle escapes. + switch (*LastEmitted) { + default: Done = false; break; + case '$': + ++LastEmitted; // Consume second '$' character. + break; + } + if (Done) break; - // If this asmstr is empty, just print the #APP/#NOAPP markers. - // These are useful to see where empty asm's wound up. - if (AsmStr[0] == 0) { - // Don't emit the comments if writing to a .o file. - if (!OutStreamer.hasRawTextSupport()) return; + const char *IDStart = LastEmitted; + const char *IDEnd = IDStart; + while (*IDEnd >= '0' && *IDEnd <= '9') ++IDEnd; - OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+ - MAI->getInlineAsmStart()); - OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+ - MAI->getInlineAsmEnd()); - return; - } + unsigned Val; + if (StringRef(IDStart, IDEnd-IDStart).getAsInteger(10, Val)) + report_fatal_error("Bad $ operand number in inline asm string: '" + + Twine(AsmStr) + "'"); + LastEmitted = IDEnd; - // Emit the #APP start marker. This has to happen even if verbose-asm isn't - // enabled, so we use EmitRawText. - if (OutStreamer.hasRawTextSupport()) - OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+ - MAI->getInlineAsmStart()); + if (Val >= NumOperands-1) + report_fatal_error("Invalid $ operand number in inline asm string: '" + + Twine(AsmStr) + "'"); - // Get the !srcloc metadata node if we have it, and decode the loc cookie from - // it. - unsigned LocCookie = 0; - const MDNode *LocMD = 0; - for (unsigned i = MI->getNumOperands(); i != 0; --i) { - if (MI->getOperand(i-1).isMetadata() && - (LocMD = MI->getOperand(i-1).getMetadata()) && - LocMD->getNumOperands() != 0) { - if (const ConstantInt *CI = dyn_cast(LocMD->getOperand(0))) { - LocCookie = CI->getZExtValue(); - break; - } - } - } + // Okay, we finally have a value number. Ask the target to print this + // operand! + unsigned OpNo = InlineAsm::MIOp_FirstOperand; - // Emit the inline asm to a temporary string so we can emit it through - // EmitInlineAsm. - SmallString<256> StringData; - raw_svector_ostream OS(StringData); + bool Error = false; - OS << '\t'; + // Scan to find the machine operand number for the operand. + for (; Val; --Val) { + if (OpNo >= MI->getNumOperands()) break; + unsigned OpFlags = MI->getOperand(OpNo).getImm(); + OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1; + } - // The variant of the current asmprinter. - int AsmPrinterVariant = MAI->getAssemblerDialect(); + // We may have a location metadata attached to the end of the + // instruction, and at no point should see metadata at any + // other point while processing. It's an error if so. + if (OpNo >= MI->getNumOperands() || + MI->getOperand(OpNo).isMetadata()) { + Error = true; + } else { + unsigned OpFlags = MI->getOperand(OpNo).getImm(); + ++OpNo; // Skip over the ID number. + + if (InlineAsm::isMemKind(OpFlags)) { + Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant, + /*Modifier*/ 0, OS); + } else { + Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant, + /*Modifier*/ 0, OS); + } + } + if (Error) { + std::string msg; + raw_string_ostream Msg(msg); + Msg << "invalid operand in inline asm: '" << AsmStr << "'"; + MMI->getModule()->getContext().emitError(LocCookie, Msg.str()); + } + break; + } + } + } + OS << "\n\t.att_syntax\n" << (char)0; // null terminate string. +} +static void EmitGCCInlineAsmStr(const char *AsmStr, const MachineInstr *MI, + MachineModuleInfo *MMI, int InlineAsmVariant, + int AsmPrinterVariant, AsmPrinter *AP, + unsigned LocCookie, raw_ostream &OS) { int CurVariant = -1; // The number of the {.|.|.} region we are in. const char *LastEmitted = AsmStr; // One past the last character emitted. + unsigned NumOperands = MI->getNumOperands(); + + OS << '\t'; while (*LastEmitted) { switch (*LastEmitted) { @@ -272,7 +316,7 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { " string: '" + Twine(AsmStr) + "'"); std::string Val(StrStart, StrEnd); - PrintSpecial(MI, OS, Val.c_str()); + AP->PrintSpecial(MI, OS, Val.c_str()); LastEmitted = StrEnd+1; break; } @@ -340,13 +384,12 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { // FIXME: What if the operand isn't an MBB, report error? OS << *MI->getOperand(OpNo).getMBB()->getSymbol(); else { - AsmPrinter *AP = const_cast(this); if (InlineAsm::isMemKind(OpFlags)) { - Error = AP->PrintAsmMemoryOperand(MI, OpNo, AsmPrinterVariant, + Error = AP->PrintAsmMemoryOperand(MI, OpNo, InlineAsmVariant, Modifier[0] ? Modifier : 0, OS); } else { - Error = AP->PrintAsmOperand(MI, OpNo, AsmPrinterVariant, + Error = AP->PrintAsmOperand(MI, OpNo, InlineAsmVariant, Modifier[0] ? Modifier : 0, OS); } } @@ -363,7 +406,74 @@ void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { } } OS << '\n' << (char)0; // null terminate string. - EmitInlineAsm(OS.str(), LocMD); +} + +/// EmitInlineAsm - This method formats and emits the specified machine +/// instruction that is an inline asm. +void AsmPrinter::EmitInlineAsm(const MachineInstr *MI) const { + assert(MI->isInlineAsm() && "printInlineAsm only works on inline asms"); + + // Count the number of register definitions to find the asm string. + unsigned NumDefs = 0; + for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef(); + ++NumDefs) + assert(NumDefs != MI->getNumOperands()-2 && "No asm string?"); + + assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?"); + + // Disassemble the AsmStr, printing out the literal pieces, the operands, etc. + const char *AsmStr = MI->getOperand(NumDefs).getSymbolName(); + + // If this asmstr is empty, just print the #APP/#NOAPP markers. + // These are useful to see where empty asm's wound up. + if (AsmStr[0] == 0) { + // Don't emit the comments if writing to a .o file. + if (!OutStreamer.hasRawTextSupport()) return; + + OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+ + MAI->getInlineAsmStart()); + OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+ + MAI->getInlineAsmEnd()); + return; + } + + // Emit the #APP start marker. This has to happen even if verbose-asm isn't + // enabled, so we use EmitRawText. + if (OutStreamer.hasRawTextSupport()) + OutStreamer.EmitRawText(Twine("\t")+MAI->getCommentString()+ + MAI->getInlineAsmStart()); + + // Get the !srcloc metadata node if we have it, and decode the loc cookie from + // it. + unsigned LocCookie = 0; + const MDNode *LocMD = 0; + for (unsigned i = MI->getNumOperands(); i != 0; --i) { + if (MI->getOperand(i-1).isMetadata() && + (LocMD = MI->getOperand(i-1).getMetadata()) && + LocMD->getNumOperands() != 0) { + if (const ConstantInt *CI = dyn_cast(LocMD->getOperand(0))) { + LocCookie = CI->getZExtValue(); + break; + } + } + } + + // Emit the inline asm to a temporary string so we can emit it through + // EmitInlineAsm. + SmallString<256> StringData; + raw_svector_ostream OS(StringData); + + // The variant of the current asmprinter. + int AsmPrinterVariant = MAI->getAssemblerDialect(); + InlineAsm::AsmDialect InlineAsmVariant = MI->getInlineAsmDialect(); + AsmPrinter *AP = const_cast(this); + if (InlineAsmVariant == InlineAsm::AD_ATT) + EmitGCCInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AsmPrinterVariant, + AP, LocCookie, OS); + else + EmitMSInlineAsmStr(AsmStr, MI, MMI, InlineAsmVariant, AP, LocCookie, OS); + + EmitInlineAsm(OS.str(), LocMD, MI->getInlineAsmDialect()); // Emit the #NOAPP end marker. This has to happen even if verbose-asm isn't // enabled, so we use EmitRawText. @@ -409,8 +519,8 @@ void AsmPrinter::PrintSpecial(const MachineInstr *MI, raw_ostream &OS, /// instruction, using the specified assembler variant. Targets should /// override this to format as appropriate. bool AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &O) { + unsigned AsmVariant, const char *ExtraCode, + raw_ostream &O) { // Does this asm operand have a single letter operand modifier? if (ExtraCode && ExtraCode[0]) { if (ExtraCode[1] != 0) return true; // Unknown modifier. diff --git a/lib/CodeGen/AsmPrinter/DIE.cpp b/lib/CodeGen/AsmPrinter/DIE.cpp index 3776848..4d73b3c 100644 --- a/lib/CodeGen/AsmPrinter/DIE.cpp +++ b/lib/CodeGen/AsmPrinter/DIE.cpp @@ -17,7 +17,7 @@ #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -182,6 +182,12 @@ void DIEValue::dump() { void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const { unsigned Size = ~0U; switch (Form) { + case dwarf::DW_FORM_flag_present: + // Emit something to keep the lines and comments in sync. + // FIXME: Is there a better way to do this? + if (Asm->OutStreamer.hasRawTextSupport()) + Asm->OutStreamer.EmitRawText(StringRef("")); + return; case dwarf::DW_FORM_flag: // Fall thru case dwarf::DW_FORM_ref1: // Fall thru case dwarf::DW_FORM_data1: Size = 1; break; @@ -193,7 +199,8 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const { case dwarf::DW_FORM_data8: Size = 8; break; case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return; case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return; - case dwarf::DW_FORM_addr: Size = Asm->getTargetData().getPointerSize(); break; + case dwarf::DW_FORM_addr: + Size = Asm->getDataLayout().getPointerSize(); break; default: llvm_unreachable("DIE Value form not supported yet"); } Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/); @@ -203,6 +210,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const { /// unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const { switch (Form) { + case dwarf::DW_FORM_flag_present: return 0; case dwarf::DW_FORM_flag: // Fall thru case dwarf::DW_FORM_ref1: // Fall thru case dwarf::DW_FORM_data1: return sizeof(int8_t); @@ -214,7 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const { case dwarf::DW_FORM_data8: return sizeof(int64_t); case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer); case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer); - case dwarf::DW_FORM_addr: return AP->getTargetData().getPointerSize(); + case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize(); default: llvm_unreachable("DIE Value form not supported yet"); } } @@ -241,7 +249,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const { unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const { if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_strp) return 4; - return AP->getTargetData().getPointerSize(); + return AP->getDataLayout().getPointerSize(); } #ifndef NDEBUG @@ -265,7 +273,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const { unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const { if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_strp) return 4; - return AP->getTargetData().getPointerSize(); + return AP->getDataLayout().getPointerSize(); } #ifndef NDEBUG diff --git a/lib/CodeGen/AsmPrinter/DIE.h b/lib/CodeGen/AsmPrinter/DIE.h index f93ea1b..28a96f3 100644 --- a/lib/CodeGen/AsmPrinter/DIE.h +++ b/lib/CodeGen/AsmPrinter/DIE.h @@ -214,9 +214,6 @@ namespace llvm { /// virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const = 0; - // Implement isa/cast/dyncast. - static bool classof(const DIEValue *) { return true; } - #ifndef NDEBUG virtual void print(raw_ostream &O) = 0; void dump(); @@ -257,7 +254,6 @@ namespace llvm { virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const; // Implement isa/cast/dyncast. - static bool classof(const DIEInteger *) { return true; } static bool classof(const DIEValue *I) { return I->getType() == isInteger; } #ifndef NDEBUG @@ -286,7 +282,6 @@ namespace llvm { virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const; // Implement isa/cast/dyncast. - static bool classof(const DIELabel *) { return true; } static bool classof(const DIEValue *L) { return L->getType() == isLabel; } #ifndef NDEBUG @@ -313,7 +308,6 @@ namespace llvm { virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const; // Implement isa/cast/dyncast. - static bool classof(const DIEDelta *) { return true; } static bool classof(const DIEValue *D) { return D->getType() == isDelta; } #ifndef NDEBUG @@ -343,7 +337,6 @@ namespace llvm { } // Implement isa/cast/dyncast. - static bool classof(const DIEEntry *) { return true; } static bool classof(const DIEValue *E) { return E->getType() == isEntry; } #ifndef NDEBUG @@ -383,7 +376,6 @@ namespace llvm { virtual unsigned SizeOf(AsmPrinter *AP, unsigned Form) const; // Implement isa/cast/dyncast. - static bool classof(const DIEBlock *) { return true; } static bool classof(const DIEValue *E) { return E->getType() == isBlock; } #ifndef NDEBUG diff --git a/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp b/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp index 454a923..05e0f2f 100644 --- a/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfAccelTable.cpp @@ -133,8 +133,8 @@ void DwarfAccelTable::EmitHeader(AsmPrinter *Asm) { } } -// Walk through and emit the buckets for the table. This will look -// like a list of numbers of how many elements are in each bucket. +// Walk through and emit the buckets for the table. Each index is +// an offset into the list of hashes. void DwarfAccelTable::EmitBuckets(AsmPrinter *Asm) { unsigned index = 0; for (size_t i = 0, e = Buckets.size(); i < e; ++i) { diff --git a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h index 963b8cd..92d1bbe 100644 --- a/lib/CodeGen/AsmPrinter/DwarfAccelTable.h +++ b/lib/CodeGen/AsmPrinter/DwarfAccelTable.h @@ -237,8 +237,8 @@ private: #endif }; - DwarfAccelTable(const DwarfAccelTable&); // DO NOT IMPLEMENT - void operator=(const DwarfAccelTable&); // DO NOT IMPLEMENT + DwarfAccelTable(const DwarfAccelTable&) LLVM_DELETED_FUNCTION; + void operator=(const DwarfAccelTable&) LLVM_DELETED_FUNCTION; // Internal Functions void EmitHeader(AsmPrinter *); diff --git a/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp b/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp index d975f1f..4fdd5ca 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfCFIException.cpp @@ -25,7 +25,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp index d30e5bb..2b07dda 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp @@ -22,7 +22,7 @@ #include "llvm/Instructions.h" #include "llvm/Support/Debug.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -51,6 +51,15 @@ DIEEntry *CompileUnit::createDIEEntry(DIE *Entry) { return Value; } +/// addFlag - Add a flag that is true. +void CompileUnit::addFlag(DIE *Die, unsigned Attribute) { + if (!DD->useDarwinGDBCompat()) + Die->addValue(Attribute, dwarf::DW_FORM_flag_present, + DIEIntegerOne); + else + addUInt(Die, Attribute, dwarf::DW_FORM_flag, 1); +} + /// addUInt - Add an unsigned integer attribute data and value. /// void CompileUnit::addUInt(DIE *Die, unsigned Attribute, @@ -501,7 +510,7 @@ bool CompileUnit::addConstantFPValue(DIE *Die, const MachineOperand &MO) { const char *FltPtr = (const char*)FltVal.getRawData(); int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte. - bool LittleEndian = Asm->getTargetData().isLittleEndian(); + bool LittleEndian = Asm->getDataLayout().isLittleEndian(); int Incr = (LittleEndian ? 1 : -1); int Start = (LittleEndian ? 0 : NumBytes - 1); int Stop = (LittleEndian ? NumBytes : -1); @@ -543,7 +552,7 @@ bool CompileUnit::addConstantValue(DIE *Die, const ConstantInt *CI, const uint64_t *Ptr64 = Val.getRawData(); int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte. - bool LittleEndian = Asm->getTargetData().isLittleEndian(); + bool LittleEndian = Asm->getDataLayout().isLittleEndian(); // Output the constant to DWARF one byte at a time. for (int i = 0; i < NumBytes; i++) { @@ -794,7 +803,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) { (Language == dwarf::DW_LANG_C89 || Language == dwarf::DW_LANG_C99 || Language == dwarf::DW_LANG_ObjC)) - addUInt(&Buffer, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1); + addFlag(&Buffer, dwarf::DW_AT_prototyped); } break; case dwarf::DW_TAG_structure_type: @@ -825,15 +834,15 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) { addUInt(ElemDie, dwarf::DW_AT_accessibility, dwarf::DW_FORM_data1, dwarf::DW_ACCESS_public); if (SP.isExplicit()) - addUInt(ElemDie, dwarf::DW_AT_explicit, dwarf::DW_FORM_flag, 1); + addFlag(ElemDie, dwarf::DW_AT_explicit); } else if (Element.isVariable()) { DIVariable DV(Element); ElemDie = new DIE(dwarf::DW_TAG_variable); addString(ElemDie, dwarf::DW_AT_name, DV.getName()); addType(ElemDie, DV.getType()); - addUInt(ElemDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1); - addUInt(ElemDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1); + addFlag(ElemDie, dwarf::DW_AT_declaration); + addFlag(ElemDie, dwarf::DW_AT_external); addSourceLine(ElemDie, DV); } else if (Element.isDerivedType()) { DIDerivedType DDTy(Element); @@ -883,7 +892,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) { } if (CTy.isAppleBlockExtension()) - addUInt(&Buffer, dwarf::DW_AT_APPLE_block, dwarf::DW_FORM_flag, 1); + addFlag(&Buffer, dwarf::DW_AT_APPLE_block); DICompositeType ContainingType = CTy.getContainingType(); if (DIDescriptor(ContainingType).isCompositeType()) @@ -895,8 +904,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) { } if (CTy.isObjcClassComplete()) - addUInt(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type, - dwarf::DW_FORM_flag, 1); + addFlag(&Buffer, dwarf::DW_AT_APPLE_objc_complete_type); // Add template parameters to a class, structure or union types. // FIXME: The support isn't in the metadata for this yet. @@ -929,7 +937,7 @@ void CompileUnit::constructTypeDIE(DIE &Buffer, DICompositeType CTy) { // If we're a forward decl, say so. if (CTy.isForwardDecl()) - addUInt(&Buffer, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1); + addFlag(&Buffer, dwarf::DW_AT_declaration); // Add source line info if available. if (!CTy.isForwardDecl()) @@ -1028,8 +1036,10 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) { // AT_specification code in order to work around a bug in older // gdbs that requires the linkage name to resolve multiple template // functions. + // TODO: Remove this set of code when we get rid of the old gdb + // compatibility. StringRef LinkageName = SP.getLinkageName(); - if (!LinkageName.empty()) + if (!LinkageName.empty() && DD->useDarwinGDBCompat()) addString(SPDie, dwarf::DW_AT_MIPS_linkage_name, getRealLinkageName(LinkageName)); @@ -1043,6 +1053,11 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) { return SPDie; } + // Add the linkage name if we have one. + if (!LinkageName.empty() && !DD->useDarwinGDBCompat()) + addString(SPDie, dwarf::DW_AT_MIPS_linkage_name, + getRealLinkageName(LinkageName)); + // Constructors and operators for anonymous aggregates do not have names. if (!SP.getName().empty()) addString(SPDie, dwarf::DW_AT_name, SP.getName()); @@ -1055,7 +1070,7 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) { (Language == dwarf::DW_LANG_C89 || Language == dwarf::DW_LANG_C99 || Language == dwarf::DW_LANG_ObjC)) - addUInt(SPDie, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag, 1); + addFlag(SPDie, dwarf::DW_AT_prototyped); // Add Return Type. DICompositeType SPTy = SP.getType(); @@ -1079,7 +1094,7 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) { } if (!SP.isDefinition()) { - addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1); + addFlag(SPDie, dwarf::DW_AT_declaration); // Add arguments. Do not add arguments for subprogram definition. They will // be handled while processing variables. @@ -1090,22 +1105,22 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) { if (SPTag == dwarf::DW_TAG_subroutine_type) for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) { DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter); - DIType ATy = DIType(DIType(Args.getElement(i))); + DIType ATy = DIType(Args.getElement(i)); addType(Arg, ATy); if (ATy.isArtificial()) - addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1); + addFlag(Arg, dwarf::DW_AT_artificial); SPDie->addChild(Arg); } } if (SP.isArtificial()) - addUInt(SPDie, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1); + addFlag(SPDie, dwarf::DW_AT_artificial); if (!SP.isLocalToUnit()) - addUInt(SPDie, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1); + addFlag(SPDie, dwarf::DW_AT_external); if (SP.isOptimized()) - addUInt(SPDie, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1); + addFlag(SPDie, dwarf::DW_AT_APPLE_optimized); if (unsigned isa = Asm->getISAEncoding()) { addUInt(SPDie, dwarf::DW_AT_APPLE_isa, dwarf::DW_FORM_flag, isa); @@ -1168,7 +1183,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) { // Add scoping info. if (!GV.isLocalToUnit()) - addUInt(VariableDIE, dwarf::DW_AT_external, dwarf::DW_FORM_flag, 1); + addFlag(VariableDIE, dwarf::DW_AT_external); // Add line number info. addSourceLine(VariableDIE, GV); @@ -1193,8 +1208,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) { addDIEEntry(VariableSpecDIE, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4, VariableDIE); addBlock(VariableSpecDIE, dwarf::DW_AT_location, 0, Block); - addUInt(VariableDIE, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, - 1); + addFlag(VariableDIE, dwarf::DW_AT_declaration); addDie(VariableSpecDIE); } else { addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block); @@ -1213,7 +1227,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) { addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu); SmallVector Idx(CE->op_begin()+1, CE->op_end()); addUInt(Block, 0, dwarf::DW_FORM_udata, - Asm->getTargetData().getIndexedOffset(Ptr->getType(), Idx)); + Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx)); addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus); addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block); } @@ -1260,7 +1274,7 @@ void CompileUnit::constructArrayTypeDIE(DIE &Buffer, DICompositeType *CTy) { Buffer.setTag(dwarf::DW_TAG_array_type); if (CTy->getTag() == dwarf::DW_TAG_vector_type) - addUInt(&Buffer, dwarf::DW_AT_GNU_vector, dwarf::DW_FORM_flag, 1); + addFlag(&Buffer, dwarf::DW_AT_GNU_vector); // Emit derived type. addType(&Buffer, CTy->getTypeDerivedFrom()); @@ -1333,8 +1347,7 @@ DIE *CompileUnit::constructVariableDIE(DbgVariable *DV, bool isScopeAbstract) { } if (DV->isArtificial()) - addUInt(VariableDie, dwarf::DW_AT_artificial, - dwarf::DW_FORM_flag, 1); + addFlag(VariableDie, dwarf::DW_AT_artificial); if (isScopeAbstract) { DV->setDIE(VariableDie); @@ -1446,7 +1459,7 @@ DIE *CompileUnit::createMemberDIE(DIDerivedType DT) { Offset -= FieldOffset; // Maybe we need to work from the other end. - if (Asm->getTargetData().isLittleEndian()) + if (Asm->getDataLayout().isLittleEndian()) Offset = FieldSize - (Offset + Size); addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset); diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h index b4ff9e8..fad9b6e 100644 --- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h +++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.h @@ -176,6 +176,9 @@ public: } public: + /// addFlag - Add a flag that is true to the DIE. + void addFlag(DIE *Die, unsigned Attribute); + /// addUInt - Add an unsigned integer attribute data and value. /// void addUInt(DIE *Die, unsigned Attribute, unsigned Form, uint64_t Integer); @@ -280,8 +283,8 @@ public: /// for the given DITemplateTypeParameter. DIE *getOrCreateTemplateTypeParameterDIE(DITemplateTypeParameter TP); - /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create new DIE - /// for the given DITemplateValueParameter. + /// getOrCreateTemplateValueParameterDIE - Find existing DIE or create + /// new DIE for the given DITemplateValueParameter. DIE *getOrCreateTemplateValueParameterDIE(DITemplateValueParameter TVP); /// createDIEEntry - Creates a new DIEEntry to be a proxy for a debug diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 649684a..367b523 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -27,7 +27,7 @@ #include "llvm/MC/MCSection.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" @@ -54,9 +54,29 @@ static cl::opt UnknownLocations("use-unknown-locations", cl::Hidden, cl::desc("Make an absence of debug location information explicit."), cl::init(false)); -static cl::opt DwarfAccelTables("dwarf-accel-tables", cl::Hidden, +namespace { + enum DefaultOnOff { + Default, Enable, Disable + }; +} + +static cl::opt DwarfAccelTables("dwarf-accel-tables", cl::Hidden, cl::desc("Output prototype dwarf accelerator tables."), - cl::init(false)); + cl::values( + clEnumVal(Default, "Default for platform"), + clEnumVal(Enable, "Enabled"), + clEnumVal(Disable, "Disabled"), + clEnumValEnd), + cl::init(Default)); + +static cl::opt DarwinGDBCompat("darwin-gdb-compat", cl::Hidden, + cl::desc("Compatibility with Darwin gdb."), + cl::values( + clEnumVal(Default, "Default for platform"), + clEnumVal(Enable, "Enabled"), + clEnumVal(Disable, "Disabled"), + clEnumValEnd), + cl::init(Default)); namespace { const char *DWARFGroupName = "DWARF Emission"; @@ -135,10 +155,25 @@ DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M) DwarfDebugRangeSectionSym = DwarfDebugLocSectionSym = 0; FunctionBeginSym = FunctionEndSym = 0; - // Turn on accelerator tables for Darwin. - if (Triple(M->getTargetTriple()).isOSDarwin()) - DwarfAccelTables = true; - + // Turn on accelerator tables and older gdb compatibility + // for Darwin. + bool isDarwin = Triple(M->getTargetTriple()).isOSDarwin(); + if (DarwinGDBCompat == Default) { + if (isDarwin) + isDarwinGDBCompat = true; + else + isDarwinGDBCompat = false; + } else + isDarwinGDBCompat = DarwinGDBCompat == Enable ? true : false; + + if (DwarfAccelTables == Default) { + if (isDarwin) + hasDwarfAccelTables = true; + else + hasDwarfAccelTables = false; + } else + hasDwarfAccelTables = DwarfAccelTables == Enable ? true : false; + { NamedRegionTimer T(DbgTimerName, DWARFGroupName, TimePassesIsEnabled); beginModule(M); @@ -272,44 +307,51 @@ DIE *DwarfDebug::updateSubprogramScopeDIE(CompileUnit *SPCU, assert(SPDie && "Unable to find subprogram DIE!"); DISubprogram SP(SPNode); - DISubprogram SPDecl = SP.getFunctionDeclaration(); - if (!SPDecl.isSubprogram()) { - // There is not any need to generate specification DIE for a function - // defined at compile unit level. If a function is defined inside another - // function then gdb prefers the definition at top level and but does not - // expect specification DIE in parent function. So avoid creating - // specification DIE for a function defined inside a function. - if (SP.isDefinition() && !SP.getContext().isCompileUnit() && - !SP.getContext().isFile() && - !isSubprogramContext(SP.getContext())) { - SPCU->addUInt(SPDie, dwarf::DW_AT_declaration, dwarf::DW_FORM_flag, 1); - - // Add arguments. - DICompositeType SPTy = SP.getType(); - DIArray Args = SPTy.getTypeArray(); - unsigned SPTag = SPTy.getTag(); - if (SPTag == dwarf::DW_TAG_subroutine_type) - for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) { - DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter); - DIType ATy = DIType(DIType(Args.getElement(i))); - SPCU->addType(Arg, ATy); - if (ATy.isArtificial()) - SPCU->addUInt(Arg, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag, 1); - SPDie->addChild(Arg); - } - DIE *SPDeclDie = SPDie; - SPDie = new DIE(dwarf::DW_TAG_subprogram); - SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4, - SPDeclDie); - SPCU->addDie(SPDie); - } - } - // Pick up abstract subprogram DIE. + // If we're updating an abstract DIE, then we will be adding the children and + // object pointer later on. But what we don't want to do is process the + // concrete DIE twice. if (DIE *AbsSPDIE = AbstractSPDies.lookup(SPNode)) { + // Pick up abstract subprogram DIE. SPDie = new DIE(dwarf::DW_TAG_subprogram); SPCU->addDIEEntry(SPDie, dwarf::DW_AT_abstract_origin, dwarf::DW_FORM_ref4, AbsSPDIE); SPCU->addDie(SPDie); + } else { + DISubprogram SPDecl = SP.getFunctionDeclaration(); + if (!SPDecl.isSubprogram()) { + // There is not any need to generate specification DIE for a function + // defined at compile unit level. If a function is defined inside another + // function then gdb prefers the definition at top level and but does not + // expect specification DIE in parent function. So avoid creating + // specification DIE for a function defined inside a function. + if (SP.isDefinition() && !SP.getContext().isCompileUnit() && + !SP.getContext().isFile() && + !isSubprogramContext(SP.getContext())) { + SPCU->addFlag(SPDie, dwarf::DW_AT_declaration); + + // Add arguments. + DICompositeType SPTy = SP.getType(); + DIArray Args = SPTy.getTypeArray(); + unsigned SPTag = SPTy.getTag(); + if (SPTag == dwarf::DW_TAG_subroutine_type) + for (unsigned i = 1, N = Args.getNumElements(); i < N; ++i) { + DIE *Arg = new DIE(dwarf::DW_TAG_formal_parameter); + DIType ATy = DIType(Args.getElement(i)); + SPCU->addType(Arg, ATy); + if (ATy.isArtificial()) + SPCU->addFlag(Arg, dwarf::DW_AT_artificial); + if (ATy.isObjectPointer()) + SPCU->addDIEEntry(SPDie, dwarf::DW_AT_object_pointer, + dwarf::DW_FORM_ref4, Arg); + SPDie->addChild(Arg); + } + DIE *SPDeclDie = SPDie; + SPDie = new DIE(dwarf::DW_TAG_subprogram); + SPCU->addDIEEntry(SPDie, dwarf::DW_AT_specification, dwarf::DW_FORM_ref4, + SPDeclDie); + SPCU->addDie(SPDie); + } + } } SPCU->addLabel(SPDie, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr, @@ -346,7 +388,7 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU, // DW_AT_ranges appropriately. TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, DebugRangeSymbols.size() - * Asm->getTargetData().getPointerSize()); + * Asm->getDataLayout().getPointerSize()); for (SmallVector::const_iterator RI = Ranges.begin(), RE = Ranges.end(); RI != RE; ++RI) { DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); @@ -386,7 +428,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU, DISubprogram InlinedSP = getDISubprogram(DS); DIE *OriginDIE = TheCU->getDIE(InlinedSP); if (!OriginDIE) { - DEBUG(dbgs() << "Unable to find original DIE for inlined subprogram."); + DEBUG(dbgs() << "Unable to find original DIE for an inlined subprogram."); return NULL; } @@ -395,7 +437,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU, const MCSymbol *EndLabel = getLabelAfterInsn(RI->second); if (StartLabel == 0 || EndLabel == 0) { - llvm_unreachable("Unexpected Start and End labels for a inlined scope!"); + llvm_unreachable("Unexpected Start and End labels for an inlined scope!"); } assert(StartLabel->isDefined() && "Invalid starting label for an inlined scope!"); @@ -412,7 +454,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU, // DW_AT_ranges appropriately. TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, DebugRangeSymbols.size() - * Asm->getTargetData().getPointerSize()); + * Asm->getDataLayout().getPointerSize()); for (SmallVector::const_iterator RI = Ranges.begin(), RE = Ranges.end(); RI != RE; ++RI) { DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); @@ -461,21 +503,26 @@ DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) { return NULL; SmallVector Children; + DIE *ObjectPointer = NULL; // Collect arguments for current function. if (LScopes.isCurrentFunctionScope(Scope)) for (unsigned i = 0, N = CurrentFnArguments.size(); i < N; ++i) if (DbgVariable *ArgDV = CurrentFnArguments[i]) if (DIE *Arg = - TheCU->constructVariableDIE(ArgDV, Scope->isAbstractScope())) + TheCU->constructVariableDIE(ArgDV, Scope->isAbstractScope())) { Children.push_back(Arg); + if (ArgDV->isObjectPointer()) ObjectPointer = Arg; + } // Collect lexical scope children first. const SmallVector &Variables = ScopeVariables.lookup(Scope); for (unsigned i = 0, N = Variables.size(); i < N; ++i) if (DIE *Variable = - TheCU->constructVariableDIE(Variables[i], Scope->isAbstractScope())) + TheCU->constructVariableDIE(Variables[i], Scope->isAbstractScope())) { Children.push_back(Variable); + if (Variables[i]->isObjectPointer()) ObjectPointer = Variable; + } const SmallVector &Scopes = Scope->getChildren(); for (unsigned j = 0, M = Scopes.size(); j < M; ++j) if (DIE *Nested = constructScopeDIE(TheCU, Scopes[j])) @@ -509,6 +556,10 @@ DIE *DwarfDebug::constructScopeDIE(CompileUnit *TheCU, LexicalScope *Scope) { E = Children.end(); I != E; ++I) ScopeDIE->addChild(*I); + if (DS.isSubprogram() && ObjectPointer != NULL) + TheCU->addDIEEntry(ScopeDIE, dwarf::DW_AT_object_pointer, + dwarf::DW_FORM_ref4, ObjectPointer); + if (DS.isSubprogram()) TheCU->addPubTypes(DISubprogram(DS)); @@ -556,7 +607,8 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) { unsigned ID = GetOrCreateSourceID(FN, CompilationDir); DIE *Die = new DIE(dwarf::DW_TAG_compile_unit); - CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die, Asm, this); + CompileUnit *NewCU = new CompileUnit(ID, DIUnit.getLanguage(), Die, + Asm, this); NewCU->addString(Die, dwarf::DW_AT_producer, DIUnit.getProducer()); NewCU->addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2, DIUnit.getLanguage()); @@ -575,7 +627,7 @@ CompileUnit *DwarfDebug::constructCompileUnit(const MDNode *N) { if (!CompilationDir.empty()) NewCU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir); if (DIUnit.isOptimized()) - NewCU->addUInt(Die, dwarf::DW_AT_APPLE_optimized, dwarf::DW_FORM_flag, 1); + NewCU->addFlag(Die, dwarf::DW_AT_APPLE_optimized); StringRef Flags = DIUnit.getFlags(); if (!Flags.empty()) @@ -755,7 +807,7 @@ void DwarfDebug::endModule() { LexicalScope *Scope = new LexicalScope(NULL, DIDescriptor(SP), NULL, false); DeadFnScopeMap[SP] = Scope; - + // Construct subprogram DIE and add variables DIEs. CompileUnit *SPCU = CUMap.lookup(TheCU); assert(SPCU && "Unable to find Compile Unit!"); @@ -802,9 +854,9 @@ void DwarfDebug::endModule() { Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("data_end")); // End text sections. - for (unsigned i = 1, N = SectionMap.size(); i <= N; ++i) { - Asm->OutStreamer.SwitchSection(SectionMap[i]); - Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_end", i)); + for (unsigned I = 0, E = SectionMap.size(); I != E; ++I) { + Asm->OutStreamer.SwitchSection(SectionMap[I]); + Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("section_end", I+1)); } // Compute DIE offsets and sizes. @@ -816,8 +868,8 @@ void DwarfDebug::endModule() { // Corresponding abbreviations into a abbrev section. emitAbbreviations(); - // Emit info into a dwarf accelerator table sections. - if (DwarfAccelTables) { + // Emit info into the dwarf accelerator table sections. + if (useDwarfAccelTables()) { emitAccelNames(); emitAccelObjC(); emitAccelNamespaces(); @@ -825,7 +877,10 @@ void DwarfDebug::endModule() { } // Emit info into a debug pubtypes section. - emitDebugPubTypes(); + // TODO: When we don't need the option anymore we can + // remove all of the code that adds to the table. + if (useDarwinGDBCompat()) + emitDebugPubTypes(); // Emit info into a debug loc section. emitDebugLoc(); @@ -840,7 +895,11 @@ void DwarfDebug::endModule() { emitDebugMacInfo(); // Emit inline info. - emitDebugInlineInfo(); + // TODO: When we don't need the option anymore we + // can remove all of the code that this section + // depends upon. + if (useDarwinGDBCompat()) + emitDebugInlineInfo(); // Emit info into a debug str section. emitDebugStr(); @@ -1014,7 +1073,7 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF, if (AbsVar) AbsVar->setMInsn(MInsn); - // Simple ranges that are fully coalesced. + // Simplify ranges that are fully coalesced. if (History.size() <= 1 || (History.size() == 2 && MInsn->isIdenticalTo(History.back()))) { RegVar->setMInsn(MInsn); @@ -1267,7 +1326,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) { // Coalesce identical entries at the end of History. if (History.size() >= 2 && Prev->isIdenticalTo(History[History.size() - 2])) { - DEBUG(dbgs() << "Coalesce identical DBG_VALUE entries:\n" + DEBUG(dbgs() << "Coalescing identical DBG_VALUE entries:\n" << "\t" << *Prev << "\t" << *History[History.size() - 2] << "\n"); History.pop_back(); @@ -1283,7 +1342,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) { PrevMBB->getLastNonDebugInstr(); if (LastMI == PrevMBB->end()) { // Drop DBG_VALUE for empty range. - DEBUG(dbgs() << "Drop DBG_VALUE for empty range:\n" + DEBUG(dbgs() << "Dropping DBG_VALUE for empty range:\n" << "\t" << *Prev << "\n"); History.pop_back(); } @@ -1300,9 +1359,10 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) { if (!MI->isLabel()) AtBlockEntry = false; - // First known non DBG_VALUE location marks beginning of function - // body. - if (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown()) + // First known non-DBG_VALUE and non-frame setup location marks + // the beginning of the function body. + if (!MI->getFlag(MachineInstr::FrameSetup) && + (PrologEndLoc.isUnknown() && !MI->getDebugLoc().isUnknown())) PrologEndLoc = MI->getDebugLoc(); // Check if the instruction clobbers any registers with debug vars. @@ -1382,7 +1442,7 @@ void DwarfDebug::beginFunction(const MachineFunction *MF) { MF->getFunction()->getContext()); recordSourceLine(FnStartDL.getLine(), FnStartDL.getCol(), FnStartDL.getScope(MF->getFunction()->getContext()), - DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0); + 0); } } @@ -1439,8 +1499,7 @@ void DwarfDebug::endFunction(const MachineFunction *MF) { DIE *CurFnDIE = constructScopeDIE(TheCU, FnScope); if (!MF->getTarget().Options.DisableFramePointerElim(*MF)) - TheCU->addUInt(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr, - dwarf::DW_FORM_flag, 1); + TheCU->addFlag(CurFnDIE, dwarf::DW_AT_APPLE_omit_frame_ptr); DebugFrames.push_back(FunctionDebugFrameInfo(Asm->getFunctionNumber(), MMI->getFrameMoves())); @@ -1710,7 +1769,7 @@ void DwarfDebug::emitDebugInfo() { Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"), DwarfAbbrevSectionSym); Asm->OutStreamer.AddComment("Address Size (in bytes)"); - Asm->EmitInt8(Asm->getTargetData().getPointerSize()); + Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); emitDIE(Die); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID())); @@ -1756,14 +1815,14 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) { Asm->EmitInt8(0); Asm->OutStreamer.AddComment("Op size"); - Asm->EmitInt8(Asm->getTargetData().getPointerSize() + 1); + Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1); Asm->OutStreamer.AddComment("DW_LNE_set_address"); Asm->EmitInt8(dwarf::DW_LNE_set_address); Asm->OutStreamer.AddComment("Section end label"); Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd), - Asm->getTargetData().getPointerSize(), + Asm->getDataLayout().getPointerSize(), 0/*AddrSpace*/); // Mark end of matrix. @@ -1992,7 +2051,7 @@ void DwarfDebug::emitDebugLoc() { // Start the dwarf loc section. Asm->OutStreamer.SwitchSection( Asm->getObjFileLowering().getDwarfLocSection()); - unsigned char Size = Asm->getTargetData().getPointerSize(); + unsigned char Size = Asm->getDataLayout().getPointerSize(); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0)); unsigned index = 1; for (SmallVector::iterator @@ -2089,7 +2148,7 @@ void DwarfDebug::emitDebugRanges() { // Start the dwarf ranges section. Asm->OutStreamer.SwitchSection( Asm->getObjFileLowering().getDwarfRangesSection()); - unsigned char Size = Asm->getTargetData().getPointerSize(); + unsigned char Size = Asm->getDataLayout().getPointerSize(); for (SmallVector::iterator I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end(); I != E; ++I) { @@ -2147,7 +2206,7 @@ void DwarfDebug::emitDebugInlineInfo() { Asm->OutStreamer.AddComment("Dwarf Version"); Asm->EmitInt16(dwarf::DWARF_VERSION); Asm->OutStreamer.AddComment("Address Size (in bytes)"); - Asm->EmitInt8(Asm->getTargetData().getPointerSize()); + Asm->EmitInt8(Asm->getDataLayout().getPointerSize()); for (SmallVector::iterator I = InlinedSPNodes.begin(), E = InlinedSPNodes.end(); I != E; ++I) { @@ -2178,7 +2237,7 @@ void DwarfDebug::emitDebugInlineInfo() { if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc"); Asm->OutStreamer.EmitSymbolValue(LI->first, - Asm->getTargetData().getPointerSize(),0); + Asm->getDataLayout().getPointerSize(),0); } } diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.h b/lib/CodeGen/AsmPrinter/DwarfDebug.h index d1d6512..61d9a51 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.h +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.h @@ -21,9 +21,9 @@ #include "llvm/MC/MachineLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/FoldingSet.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" -#include "llvm/ADT/UniqueVector.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/DebugLoc.h" @@ -96,7 +96,8 @@ typedef struct DotDebugLocEntry { DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantFP *FPtr) : Begin(B), End(E), Variable(0), Merged(false), Constant(true) { Constants.CFP = FPtr; EntryKind = E_ConstantFP; } - DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, const ConstantInt *IPtr) + DotDebugLocEntry(const MCSymbol *B, const MCSymbol *E, + const ConstantInt *IPtr) : Begin(B), End(E), Variable(0), Merged(false), Constant(true) { Constants.CIP = IPtr; EntryKind = E_ConstantInt; } @@ -158,11 +159,19 @@ public: bool isArtificial() const { if (Var.isArtificial()) return true; - if (Var.getTag() == dwarf::DW_TAG_arg_variable - && getType().isArtificial()) + if (getType().isArtificial()) return true; return false; } + + bool isObjectPointer() const { + if (Var.isObjectPointer()) + return true; + if (getType().isObjectPointer()) + return true; + return false; + } + bool variableHasComplexAddress() const { assert(Var.Verify() && "Invalid complex DbgVariable!"); return Var.hasComplexAddress(); @@ -222,7 +231,7 @@ class DwarfDebug { /// SectionMap - Provides a unique id per text section. /// - UniqueVector SectionMap; + SetVector SectionMap; /// CurrentFnArguments - List of Arguments (DbgValues) for current function. SmallVector CurrentFnArguments; @@ -307,6 +316,9 @@ class DwarfDebug { // table for the same directory as DW_at_comp_dir. StringRef CompilationDir; + // A holder for the DarwinGDBCompat flag so that the compile unit can use it. + bool isDarwinGDBCompat; + bool hasDwarfAccelTables; private: /// assignAbbrevNumber - Define a unique number for the abbreviation. @@ -520,6 +532,11 @@ public: /// getStringPoolEntry - returns an entry into the string pool with the given /// string text. MCSymbol *getStringPoolEntry(StringRef Str); + + /// useDarwinGDBCompat - returns whether or not to limit some of our debug + /// output to the limitations of darwin gdb. + bool useDarwinGDBCompat() { return isDarwinGDBCompat; } + bool useDwarfAccelTables() { return hasDwarfAccelTables; } }; } // End of namespace llvm diff --git a/lib/CodeGen/AsmPrinter/DwarfException.cpp b/lib/CodeGen/AsmPrinter/DwarfException.cpp index 70cc2e5..08fb6b3 100644 --- a/lib/CodeGen/AsmPrinter/DwarfException.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfException.cpp @@ -24,7 +24,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" @@ -417,7 +417,7 @@ void DwarfException::EmitExceptionTable() { // that we're omitting that bit. TTypeEncoding = dwarf::DW_EH_PE_omit; // dwarf::DW_EH_PE_absptr - TypeFormatSize = Asm->getTargetData().getPointerSize(); + TypeFormatSize = Asm->getDataLayout().getPointerSize(); } else { // Okay, we have actual filters or typeinfos to emit. As such, we need to // pick a type encoding for them. We're about to emit a list of pointers to diff --git a/lib/CodeGen/AsmPrinter/DwarfException.h b/lib/CodeGen/AsmPrinter/DwarfException.h index 75f6056..fe9e493 100644 --- a/lib/CodeGen/AsmPrinter/DwarfException.h +++ b/lib/CodeGen/AsmPrinter/DwarfException.h @@ -43,26 +43,6 @@ protected: /// MMI - Collected machine module information. MachineModuleInfo *MMI; - /// EmitExceptionTable - Emit landing pads and actions. - /// - /// The general organization of the table is complex, but the basic concepts - /// are easy. First there is a header which describes the location and - /// organization of the three components that follow. - /// 1. The landing pad site information describes the range of code covered - /// by the try. In our case it's an accumulation of the ranges covered - /// by the invokes in the try. There is also a reference to the landing - /// pad that handles the exception once processed. Finally an index into - /// the actions table. - /// 2. The action table, in our case, is composed of pairs of type ids - /// and next action offset. Starting with the action index from the - /// landing pad site, each type Id is checked for a match to the current - /// exception. If it matches then the exception and type id are passed - /// on to the landing pad. Otherwise the next action is looked up. This - /// chain is terminated with a next action of zero. If no type id is - /// found the frame is unwound and handling continues. - /// 3. Type id table contains references to all the C++ typeinfo for all - /// catches in the function. This tables is reversed indexed base 1. - /// SharedTypeIds - How many leading type ids two landing pads have in common. static unsigned SharedTypeIds(const LandingPadInfo *L, const LandingPadInfo *R); @@ -119,6 +99,26 @@ protected: const RangeMapType &PadMap, const SmallVectorImpl &LPs, const SmallVectorImpl &FirstActions); + + /// EmitExceptionTable - Emit landing pads and actions. + /// + /// The general organization of the table is complex, but the basic concepts + /// are easy. First there is a header which describes the location and + /// organization of the three components that follow. + /// 1. The landing pad site information describes the range of code covered + /// by the try. In our case it's an accumulation of the ranges covered + /// by the invokes in the try. There is also a reference to the landing + /// pad that handles the exception once processed. Finally an index into + /// the actions table. + /// 2. The action table, in our case, is composed of pairs of type ids + /// and next action offset. Starting with the action index from the + /// landing pad site, each type Id is checked for a match to the current + /// exception. If it matches then the exception and type id are passed + /// on to the landing pad. Otherwise the next action is looked up. This + /// chain is terminated with a next action of zero. If no type id is + /// found the frame is unwound and handling continues. + /// 3. Type id table contains references to all the C++ typeinfo for all + /// catches in the function. This tables is reversed indexed base 1. void EmitExceptionTable(); public: diff --git a/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp b/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp index 1153817..f7c0119 100644 --- a/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp +++ b/lib/CodeGen/AsmPrinter/OcamlGCPrinter.cpp @@ -20,7 +20,7 @@ #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" #include "llvm/ADT/SmallString.h" @@ -91,7 +91,7 @@ void OcamlGCMetadataPrinter::beginAssembly(AsmPrinter &AP) { /// either condition is detected in a function which uses the GC. /// void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) { - unsigned IntPtrSize = AP.TM.getTargetData()->getPointerSize(); + unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize(); AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection()); EmitCamlGlobal(getModule(), AP, "code_end"); diff --git a/lib/CodeGen/AsmPrinter/Win64Exception.cpp b/lib/CodeGen/AsmPrinter/Win64Exception.cpp index b83aa5a..70742a8 100644 --- a/lib/CodeGen/AsmPrinter/Win64Exception.cpp +++ b/lib/CodeGen/AsmPrinter/Win64Exception.cpp @@ -24,7 +24,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index fb65bb7..6f4c5a2 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -357,9 +357,8 @@ static unsigned ComputeCommonTailLength(MachineBasicBlock *MBB1, if (I1 == MBB1->begin() && I2 != MBB2->begin()) { --I2; while (I2->isDebugValue()) { - if (I2 == MBB2->begin()) { + if (I2 == MBB2->begin()) return TailLen; - } --I2; } ++I2; @@ -482,21 +481,19 @@ bool BranchFolder::MergePotentialsElt::operator<(const MergePotentialsElt &o) const { if (getHash() < o.getHash()) return true; - else if (getHash() > o.getHash()) + if (getHash() > o.getHash()) return false; - else if (getBlock()->getNumber() < o.getBlock()->getNumber()) + if (getBlock()->getNumber() < o.getBlock()->getNumber()) return true; - else if (getBlock()->getNumber() > o.getBlock()->getNumber()) + if (getBlock()->getNumber() > o.getBlock()->getNumber()) return false; - else { - // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing - // an object with itself. + // _GLIBCXX_DEBUG checks strict weak ordering, which involves comparing + // an object with itself. #ifndef _GLIBCXX_DEBUG - llvm_unreachable("Predecessor appears twice"); + llvm_unreachable("Predecessor appears twice"); #else - return false; + return false; #endif - } } /// CountTerminators - Count the number of terminators in the given @@ -574,7 +571,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1, // instructions that would be deleted in the merge. MachineFunction *MF = MBB1->getParent(); if (EffectiveTailLen >= 2 && - MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize) && + MF->getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize) && (I1 == MBB1->begin() || I2 == MBB2->begin())) return true; @@ -1554,8 +1552,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB, for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) Uses.insert(*AI); } else { - if (Uses.count(Reg)) { - Uses.erase(Reg); + if (Uses.erase(Reg)) { for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) Uses.erase(*SubRegs); // Use sub-registers to be conservative } diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt index 2e189ad..fa6d4e1 100644 --- a/lib/CodeGen/CMakeLists.txt +++ b/lib/CodeGen/CMakeLists.txt @@ -45,6 +45,7 @@ add_llvm_library(LLVMCodeGen MachineCopyPropagation.cpp MachineCSE.cpp MachineDominators.cpp + MachinePostDominators.cpp MachineFunction.cpp MachineFunctionAnalysis.cpp MachineFunctionPass.cpp @@ -95,12 +96,14 @@ add_llvm_library(LLVMCodeGen SplitKit.cpp StackProtector.cpp StackSlotColoring.cpp + StackColoring.cpp StrongPHIElimination.cpp TailDuplication.cpp TargetFrameLoweringImpl.cpp TargetInstrInfoImpl.cpp TargetLoweringObjectFileImpl.cpp TargetOptionsImpl.cpp + TargetSchedule.cpp TwoAddressInstructionPass.cpp UnreachableBlockElim.cpp VirtRegMap.cpp diff --git a/lib/CodeGen/CalcSpillWeights.cpp b/lib/CodeGen/CalcSpillWeights.cpp index 939af3f..dee339a 100644 --- a/lib/CodeGen/CalcSpillWeights.cpp +++ b/lib/CodeGen/CalcSpillWeights.cpp @@ -9,7 +9,6 @@ #define DEBUG_TYPE "calcspillweights" -#include "llvm/Function.h" #include "llvm/ADT/SmallSet.h" #include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" @@ -42,8 +41,7 @@ void CalculateSpillWeights::getAnalysisUsage(AnalysisUsage &au) const { bool CalculateSpillWeights::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** Compute Spill Weights **********\n" - << "********** Function: " - << MF.getFunction()->getName() << '\n'); + << "********** Function: " << MF.getName() << '\n'); LiveIntervals &LIS = getAnalysis(); MachineRegisterInfo &MRI = MF.getRegInfo(); @@ -166,7 +164,7 @@ void VirtRegAuxInfo::CalculateWeightAndHint(LiveInterval &li) { continue; float hweight = Hint[hint] += weight; if (TargetRegisterInfo::isPhysicalRegister(hint)) { - if (hweight > bestPhys && LIS.isAllocatable(hint)) + if (hweight > bestPhys && mri.isAllocatable(hint)) bestPhys = hweight, hintPhys = hint; } else { if (hweight > bestVirt) diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp index 0b747fd..22b9140 100644 --- a/lib/CodeGen/CallingConvLower.cpp +++ b/lib/CodeGen/CallingConvLower.cpp @@ -18,7 +18,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetLowering.h" using namespace llvm; @@ -50,7 +50,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT, if (MinAlign > (int)Align) Align = MinAlign; MF.getFrameInfo()->ensureMaxAlignment(Align); - TM.getTargetLowering()->HandleByVal(this, Size); + TM.getTargetLowering()->HandleByVal(this, Size, Align); unsigned Offset = AllocateStack(Size, Align); addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); } diff --git a/lib/CodeGen/CodeGen.cpp b/lib/CodeGen/CodeGen.cpp index fb2c2e8..a53f6f8 100644 --- a/lib/CodeGen/CodeGen.cpp +++ b/lib/CodeGen/CodeGen.cpp @@ -41,6 +41,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeMachineCopyPropagationPass(Registry); initializeMachineCSEPass(Registry); initializeMachineDominatorTreePass(Registry); + initializeMachinePostDominatorTreePass(Registry); initializeMachineLICMPass(Registry); initializeMachineLoopInfoPass(Registry); initializeMachineModuleInfoPass(Registry); @@ -56,6 +57,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) { initializeRegisterCoalescerPass(Registry); initializeSlotIndexesPass(Registry); initializeStackProtectorPass(Registry); + initializeStackColoringPass(Registry); initializeStackSlotColoringPass(Registry); initializeStrongPHIEliminationPass(Registry); initializeTailDuplicatePassPass(Registry); diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp index 99233df..d8e06c3 100644 --- a/lib/CodeGen/CodePlacementOpt.cpp +++ b/lib/CodeGen/CodePlacementOpt.cpp @@ -373,7 +373,7 @@ bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) { /// bool CodePlacementOpt::AlignLoops(MachineFunction &MF) { const Function *F = MF.getFunction(); - if (F->hasFnAttr(Attribute::OptimizeForSize)) + if (F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize)) return false; unsigned Align = TLI->getPrefLoopAlignment(); diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp index a9de1c749..377b471 100644 --- a/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -527,7 +527,7 @@ BreakAntiDependencies(const std::vector& SUnits, if (Edge->getKind() == SDep::Anti) { AntiDepReg = Edge->getReg(); assert(AntiDepReg != 0 && "Anti-dependence on reg0?"); - if (!RegClassInfo.isAllocatable(AntiDepReg)) + if (!MRI.isAllocatable(AntiDepReg)) // Don't break anti-dependencies on non-allocatable registers. AntiDepReg = 0; else if (KeepRegs.test(AntiDepReg)) diff --git a/lib/CodeGen/DeadMachineInstructionElim.cpp b/lib/CodeGen/DeadMachineInstructionElim.cpp index b4394e8..8964269 100644 --- a/lib/CodeGen/DeadMachineInstructionElim.cpp +++ b/lib/CodeGen/DeadMachineInstructionElim.cpp @@ -33,7 +33,6 @@ namespace { const MachineRegisterInfo *MRI; const TargetInstrInfo *TII; BitVector LivePhysRegs; - BitVector ReservedRegs; public: static char ID; // Pass identification, replacement for typeid @@ -70,7 +69,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const { unsigned Reg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) { // Don't delete live physreg defs, or any reserved register defs. - if (LivePhysRegs.test(Reg) || ReservedRegs.test(Reg)) + if (LivePhysRegs.test(Reg) || MRI->isReserved(Reg)) return false; } else { if (!MRI->use_nodbg_empty(Reg)) @@ -90,9 +89,6 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { TRI = MF.getTarget().getRegisterInfo(); TII = MF.getTarget().getInstrInfo(); - // Treat reserved registers as always live. - ReservedRegs = TRI->getReservedRegs(MF); - // Loop over all instructions in all blocks, from bottom to top, so that it's // more likely that chains of dependent but ultimately dead instructions will // be cleaned up. @@ -101,7 +97,7 @@ bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) { MachineBasicBlock *MBB = &*I; // Start out assuming that reserved registers are live out of this block. - LivePhysRegs = ReservedRegs; + LivePhysRegs = MRI->getReservedRegs(); // Also add any explicit live-out physregs for this block. if (!MBB->empty() && MBB->back().isReturn()) diff --git a/lib/CodeGen/EarlyIfConversion.cpp b/lib/CodeGen/EarlyIfConversion.cpp index f9347ef..d5d8404 100644 --- a/lib/CodeGen/EarlyIfConversion.cpp +++ b/lib/CodeGen/EarlyIfConversion.cpp @@ -18,7 +18,6 @@ #define DEBUG_TYPE "early-ifcvt" #include "MachineTraceMetrics.h" -#include "llvm/Function.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SetVector.h" @@ -32,9 +31,9 @@ #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/MC/MCInstrItineraries.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" @@ -775,11 +774,11 @@ bool EarlyIfConverter::tryConvertIf(MachineBasicBlock *MBB) { bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n" - << "********** Function: " - << ((Value*)MF.getFunction())->getName() << '\n'); + << "********** Function: " << MF.getName() << '\n'); TII = MF.getTarget().getInstrInfo(); TRI = MF.getTarget().getRegisterInfo(); - SchedModel = MF.getTarget().getInstrItineraryData()->SchedModel; + SchedModel = + MF.getTarget().getSubtarget().getSchedModel(); MRI = &MF.getRegInfo(); DomTree = &getAnalysis(); Loops = getAnalysisIfAvailable(); @@ -798,6 +797,5 @@ bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) { if (tryConvertIf(I->getBlock())) Changed = true; - MF.verify(this, "After early if-conversion"); return Changed; } diff --git a/lib/CodeGen/ExecutionDepsFix.cpp b/lib/CodeGen/ExecutionDepsFix.cpp index fee8e47..ed78f19 100644 --- a/lib/CodeGen/ExecutionDepsFix.cpp +++ b/lib/CodeGen/ExecutionDepsFix.cpp @@ -626,9 +626,12 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) { } dv->Instrs.push_back(mi); - // Finally set all defs and non-collapsed uses to dv. - for (unsigned i = 0, e = mi->getDesc().getNumOperands(); i != e; ++i) { - MachineOperand &mo = mi->getOperand(i); + // Finally set all defs and non-collapsed uses to dv. We must iterate through + // all the operators, including imp-def ones. + for (MachineInstr::mop_iterator ii = mi->operands_begin(), + ee = mi->operands_end(); + ii != ee; ++ii) { + MachineOperand &mo = *ii; if (!mo.isReg()) continue; int rx = regIndex(mo.getReg()); if (rx < 0) continue; @@ -654,7 +657,7 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) { bool anyregs = false; for (TargetRegisterClass::const_iterator I = RC->begin(), E = RC->end(); I != E; ++I) - if (MF->getRegInfo().isPhysRegOrOverlapUsed(*I)) { + if (MF->getRegInfo().isPhysRegUsed(*I)) { anyregs = true; break; } diff --git a/lib/CodeGen/ExpandPostRAPseudos.cpp b/lib/CodeGen/ExpandPostRAPseudos.cpp index 7a17331..ffe4b63 100644 --- a/lib/CodeGen/ExpandPostRAPseudos.cpp +++ b/lib/CodeGen/ExpandPostRAPseudos.cpp @@ -14,7 +14,6 @@ #define DEBUG_TYPE "postrapseudos" #include "llvm/CodeGen/Passes.h" -#include "llvm/Function.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -190,8 +189,7 @@ bool ExpandPostRA::LowerCopy(MachineInstr *MI) { bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "Machine Function\n" << "********** EXPANDING POST-RA PSEUDO INSTRS **********\n" - << "********** Function: " - << MF.getFunction()->getName() << '\n'); + << "********** Function: " << MF.getName() << '\n'); TRI = MF.getTarget().getRegisterInfo(); TII = MF.getTarget().getInstrInfo(); diff --git a/lib/CodeGen/GCStrategy.cpp b/lib/CodeGen/GCStrategy.cpp index 506b5cf..f4755bb 100644 --- a/lib/CodeGen/GCStrategy.cpp +++ b/lib/CodeGen/GCStrategy.cpp @@ -20,6 +20,7 @@ #include "llvm/IntrinsicInst.h" #include "llvm/Module.h" #include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/DominatorInternals.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -387,9 +388,16 @@ void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) { const TargetFrameLowering *TFI = TM->getFrameLowering(); assert(TFI && "TargetRegisterInfo not available!"); - for (GCFunctionInfo::roots_iterator RI = FI->roots_begin(), - RE = FI->roots_end(); RI != RE; ++RI) - RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num); + for (GCFunctionInfo::roots_iterator RI = FI->roots_begin(); + RI != FI->roots_end();) { + // If the root references a dead object, no need to keep it. + if (MF.getFrameInfo()->isDeadObjectIndex(RI->Num)) { + RI = FI->removeStackRoot(RI); + } else { + RI->StackOffset = TFI->getFrameIndexOffset(MF, RI->Num); + ++RI; + } + } } bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) { diff --git a/lib/CodeGen/IfConversion.cpp b/lib/CodeGen/IfConversion.cpp index 4214ba1..31e36f0 100644 --- a/lib/CodeGen/IfConversion.cpp +++ b/lib/CodeGen/IfConversion.cpp @@ -13,7 +13,6 @@ #define DEBUG_TYPE "ifcvt" #include "BranchFolding.h" -#include "llvm/Function.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineBranchProbabilityInfo.h" @@ -282,7 +281,7 @@ bool IfConverter::runOnMachineFunction(MachineFunction &MF) { } DEBUG(dbgs() << "\nIfcvt: function (" << ++FnNum << ") \'" - << MF.getFunction()->getName() << "\'"); + << MF.getName() << "\'"); if (FnNum < IfCvtFnStart || (IfCvtFnStop != -1 && FnNum > IfCvtFnStop)) { DEBUG(dbgs() << " skipped\n"); @@ -997,14 +996,13 @@ static void UpdatePredRedefs(MachineInstr *MI, SmallSet &Redefs, } for (unsigned i = 0, e = Defs.size(); i != e; ++i) { unsigned Reg = Defs[i]; - if (Redefs.count(Reg)) { + if (!Redefs.insert(Reg)) { if (AddImpUse) // Treat predicated update as read + write. MI->addOperand(MachineOperand::CreateReg(Reg, false/*IsDef*/, true/*IsImp*/,false/*IsKill*/, false/*IsDead*/,true/*IsUndef*/)); } else { - Redefs.insert(Reg); for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) Redefs.insert(*SubRegs); } diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 07e37af..37828a7 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -613,7 +613,7 @@ MachineInstr *InlineSpiller::traceSiblingValue(unsigned UseReg, VNInfo *UseVNI, propagateSiblingValue(SVI); } while (!WorkList.empty()); - // Look up the value we were looking for. We already did this lokup at the + // Look up the value we were looking for. We already did this lookup at the // top of the function, but SibValues may have been invalidated. SVI = SibValues.find(UseVNI); assert(SVI != SibValues.end() && "Didn't compute requested info"); @@ -863,7 +863,7 @@ bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, // If the instruction also writes VirtReg.reg, it had better not require the // same register for uses and defs. SmallVector, 8> Ops; - MIBundleOperands::RegInfo RI = + MIBundleOperands::VirtRegInfo RI = MIBundleOperands(MI).analyzeVirtReg(VirtReg.reg, &Ops); if (RI.Tied) { markValueUsed(&VirtReg, ParentVNI); @@ -1142,7 +1142,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { // Analyze instruction. SmallVector, 8> Ops; - MIBundleOperands::RegInfo RI = + MIBundleOperands::VirtRegInfo RI = MIBundleOperands(MI).analyzeVirtReg(Reg, &Ops); // Find the slot index where this instruction reads and writes OldLI. diff --git a/lib/CodeGen/IntrinsicLowering.cpp b/lib/CodeGen/IntrinsicLowering.cpp index 8d2282a..6120ae56 100644 --- a/lib/CodeGen/IntrinsicLowering.cpp +++ b/lib/CodeGen/IntrinsicLowering.cpp @@ -21,7 +21,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; template @@ -457,7 +457,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) { break; // Strip out annotate intrinsic case Intrinsic::memcpy: { - IntegerType *IntPtr = TD.getIntPtrType(Context); + Type *IntPtr = TD.getIntPtrType(Context); Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr, /* isSigned */ false); Value *Ops[3]; @@ -468,7 +468,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) { break; } case Intrinsic::memmove: { - IntegerType *IntPtr = TD.getIntPtrType(Context); + Type *IntPtr = TD.getIntPtrType(Context); Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr, /* isSigned */ false); Value *Ops[3]; @@ -479,7 +479,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) { break; } case Intrinsic::memset: { - IntegerType *IntPtr = TD.getIntPtrType(Context); + Type *IntPtr = TD.getIntPtrType(Context); Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr, /* isSigned */ false); Value *Ops[3]; diff --git a/lib/CodeGen/LLVMTargetMachine.cpp b/lib/CodeGen/LLVMTargetMachine.cpp index cac0c83..24daafa 100644 --- a/lib/CodeGen/LLVMTargetMachine.cpp +++ b/lib/CodeGen/LLVMTargetMachine.cpp @@ -172,7 +172,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM, const MCSubtargetInfo &STI = getSubtarget(); MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI, STI, *Context); - MAB = getTarget().createMCAsmBackend(getTargetTriple()); + MAB = getTarget().createMCAsmBackend(getTargetTriple(), TargetCPU); } MCStreamer *S = getTarget().createAsmStreamer(*Context, Out, @@ -191,7 +191,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM, // emission fails. MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI, STI, *Context); - MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple()); + MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple(), TargetCPU); if (MCE == 0 || MAB == 0) return true; @@ -266,7 +266,7 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM, const MCSubtargetInfo &STI = getSubtarget(); MCCodeEmitter *MCE = getTarget().createMCCodeEmitter(*getInstrInfo(), MRI, STI, *Ctx); - MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple()); + MCAsmBackend *MAB = getTarget().createMCAsmBackend(getTargetTriple(), TargetCPU); if (MCE == 0 || MAB == 0) return true; diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index d631726..defc127 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -687,8 +687,7 @@ bool LDVImpl::runOnMachineFunction(MachineFunction &mf) { clear(); LS.initialize(mf); DEBUG(dbgs() << "********** COMPUTING LIVE DEBUG VARIABLES: " - << ((Value*)mf.getFunction())->getName() - << " **********\n"); + << mf.getName() << " **********\n"); bool Changed = collectDebugValues(mf); computeIntervals(); diff --git a/lib/CodeGen/LiveInterval.cpp b/lib/CodeGen/LiveInterval.cpp index 0a795e6..8585cbb 100644 --- a/lib/CodeGen/LiveInterval.cpp +++ b/lib/CodeGen/LiveInterval.cpp @@ -27,6 +27,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "RegisterCoalescer.h" #include using namespace llvm; @@ -58,8 +59,16 @@ VNInfo *LiveInterval::createDeadDef(SlotIndex Def, return VNI; } if (SlotIndex::isSameInstr(Def, I->start)) { - assert(I->start == Def && "Cannot insert def, already live"); - assert(I->valno->def == Def && "Inconsistent existing value def"); + assert(I->valno->def == I->start && "Inconsistent existing value def"); + + // It is possible to have both normal and early-clobber defs of the same + // register on an instruction. It doesn't make a lot of sense, but it is + // possible to specify in inline assembly. + // + // Just convert everything to early-clobber. + Def = std::min(Def, I->start); + if (Def != I->start) + I->start = I->valno->def = Def; return I->valno; } assert(SlotIndex::isEarlierInstr(Def, I->start) && "Already live at def"); @@ -68,21 +77,6 @@ VNInfo *LiveInterval::createDeadDef(SlotIndex Def, return VNI; } -/// killedInRange - Return true if the interval has kills in [Start,End). -bool LiveInterval::killedInRange(SlotIndex Start, SlotIndex End) const { - Ranges::const_iterator r = - std::lower_bound(ranges.begin(), ranges.end(), End); - - // Now r points to the first interval with start >= End, or ranges.end(). - if (r == ranges.begin()) - return false; - - --r; - // Now r points to the last interval with end <= End. - // r->end is the kill point. - return r->end >= Start && r->end < End; -} - // overlaps - Return true if the intersection of the two live intervals is // not empty. // @@ -142,6 +136,48 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other, return false; } +bool LiveInterval::overlaps(const LiveInterval &Other, + const CoalescerPair &CP, + const SlotIndexes &Indexes) const { + assert(!empty() && "empty interval"); + if (Other.empty()) + return false; + + // Use binary searches to find initial positions. + const_iterator I = find(Other.beginIndex()); + const_iterator IE = end(); + if (I == IE) + return false; + const_iterator J = Other.find(I->start); + const_iterator JE = Other.end(); + if (J == JE) + return false; + + for (;;) { + // J has just been advanced to satisfy: + assert(J->end >= I->start); + // Check for an overlap. + if (J->start < I->end) { + // I and J are overlapping. Find the later start. + SlotIndex Def = std::max(I->start, J->start); + // Allow the overlap if Def is a coalescable copy. + if (Def.isBlock() || + !CP.isCoalescable(Indexes.getInstructionFromIndex(Def))) + return true; + } + // Advance the iterator that ends first to check for more overlaps. + if (J->end > I->end) { + std::swap(I, J); + std::swap(IE, JE); + } + // Advance J until J->end >= I->start. + do + if (++J == JE) + return false; + while (J->end < I->start); + } +} + /// overlaps - Return true if the live interval overlaps a range specified /// by [Start, End). bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const { @@ -399,7 +435,7 @@ void LiveInterval::join(LiveInterval &Other, // If we have to apply a mapping to our base interval assignment, rewrite it // now. - if (MustMapCurValNos) { + if (MustMapCurValNos && !empty()) { // Map the first live range. iterator OutIt = begin(); @@ -673,27 +709,6 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) { return V2; } -void LiveInterval::Copy(const LiveInterval &RHS, - MachineRegisterInfo *MRI, - VNInfo::Allocator &VNInfoAllocator) { - ranges.clear(); - valnos.clear(); - std::pair Hint = MRI->getRegAllocationHint(RHS.reg); - MRI->setRegAllocationHint(reg, Hint.first, Hint.second); - - weight = RHS.weight; - for (unsigned i = 0, e = RHS.getNumValNums(); i != e; ++i) { - const VNInfo *VNI = RHS.getValNumInfo(i); - createValueCopy(VNI, VNInfoAllocator); - } - for (unsigned i = 0, e = RHS.ranges.size(); i != e; ++i) { - const LiveRange &LR = RHS.ranges[i]; - addRange(LiveRange(LR.start, LR.end, getValNumInfo(LR.valno->id))); - } - - verify(); -} - unsigned LiveInterval::getSize() const { unsigned Sum = 0; for (const_iterator I = begin(), E = end(); I != E; ++I) @@ -705,9 +720,11 @@ raw_ostream& llvm::operator<<(raw_ostream& os, const LiveRange &LR) { return os << '[' << LR.start << ',' << LR.end << ':' << LR.valno->id << ")"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LiveRange::dump() const { dbgs() << *this << "\n"; } +#endif void LiveInterval::print(raw_ostream &OS) const { if (empty()) @@ -740,9 +757,11 @@ void LiveInterval::print(raw_ostream &OS) const { } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LiveInterval::dump() const { dbgs() << *this << "\n"; } +#endif #ifndef NDEBUG void LiveInterval::verify() const { diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index d0f8ae1..4e75d89 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -34,6 +34,7 @@ #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" #include "LiveRangeCalc.h" +#include "VirtRegMap.h" #include #include #include @@ -109,8 +110,6 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { DomTree = &getAnalysis(); if (!LRCalc) LRCalc = new LiveRangeCalc(); - AllocatableRegs = TRI->getAllocatableSet(fn); - ReservedRegs = TRI->getReservedRegs(fn); // Allocate space for all virtual registers. VirtRegIntervals.resize(MRI->getNumVirtRegs()); @@ -147,6 +146,11 @@ void LiveIntervals::print(raw_ostream &OS, const Module* ) const { OS << PrintReg(Reg) << " = " << getInterval(Reg) << '\n'; } + OS << "RegMasks:"; + for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i) + OS << ' ' << RegMaskSlots[i]; + OS << '\n'; + printInstrs(OS); } @@ -155,9 +159,11 @@ void LiveIntervals::printInstrs(raw_ostream &OS) const { MF->print(OS, Indexes); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LiveIntervals::dumpInstrs() const { printInstrs(dbgs()); } +#endif static bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { @@ -382,8 +388,7 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, /// which a variable is live void LiveIntervals::computeIntervals() { DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" - << "********** Function: " - << ((Value*)MF->getFunction())->getName() << '\n'); + << "********** Function: " << MF->getName() << '\n'); RegMaskBlocks.resize(MF->getNumBlockIDs()); @@ -440,7 +445,7 @@ void LiveIntervals::computeIntervals() { // Compute the number of register mask instructions in this block. std::pair &RMB = RegMaskBlocks[MBB->getNumber()]; - RMB.second = RegMaskSlots.size() - RMB.first;; + RMB.second = RegMaskSlots.size() - RMB.first; } // Create empty intervals for registers defined by implicit_def's (except @@ -497,7 +502,7 @@ void LiveIntervals::computeRegMasks() { RegMaskBits.push_back(MO->getRegMask()); } // Compute the number of register mask instructions in this block. - RMB.second = RegMaskSlots.size() - RMB.first;; + RMB.second = RegMaskSlots.size() - RMB.first; } } @@ -540,11 +545,11 @@ void LiveIntervals::computeRegUnitInterval(LiveInterval *LI) { // Ignore uses of reserved registers. We only track defs of those. for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) { unsigned Root = *Roots; - if (!isReserved(Root) && !MRI->reg_empty(Root)) + if (!MRI->isReserved(Root) && !MRI->reg_empty(Root)) LRCalc->extendToUses(LI, Root); for (MCSuperRegIterator Supers(Root, TRI); Supers.isValid(); ++Supers) { unsigned Reg = *Supers; - if (!isReserved(Reg) && !MRI->reg_empty(Reg)) + if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg)) LRCalc->extendToUses(LI, Reg); } } @@ -729,17 +734,100 @@ bool LiveIntervals::shrinkToUses(LiveInterval *li, return CanSeparate; } +void LiveIntervals::extendToIndices(LiveInterval *LI, + ArrayRef Indices) { + assert(LRCalc && "LRCalc not initialized."); + LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); + for (unsigned i = 0, e = Indices.size(); i != e; ++i) + LRCalc->extend(LI, Indices[i]); +} + +void LiveIntervals::pruneValue(LiveInterval *LI, SlotIndex Kill, + SmallVectorImpl *EndPoints) { + LiveRangeQuery LRQ(*LI, Kill); + VNInfo *VNI = LRQ.valueOut(); + if (!VNI) + return; + + MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill); + SlotIndex MBBStart, MBBEnd; + tie(MBBStart, MBBEnd) = Indexes->getMBBRange(KillMBB); + + // If VNI isn't live out from KillMBB, the value is trivially pruned. + if (LRQ.endPoint() < MBBEnd) { + LI->removeRange(Kill, LRQ.endPoint()); + if (EndPoints) EndPoints->push_back(LRQ.endPoint()); + return; + } + + // VNI is live out of KillMBB. + LI->removeRange(Kill, MBBEnd); + if (EndPoints) EndPoints->push_back(MBBEnd); + + // Find all blocks that are reachable from KillMBB without leaving VNI's live + // range. It is possible that KillMBB itself is reachable, so start a DFS + // from each successor. + typedef SmallPtrSet VisitedTy; + VisitedTy Visited; + for (MachineBasicBlock::succ_iterator + SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end(); + SuccI != SuccE; ++SuccI) { + for (df_ext_iterator + I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited); + I != E;) { + MachineBasicBlock *MBB = *I; + + // Check if VNI is live in to MBB. + tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); + LiveRangeQuery LRQ(*LI, MBBStart); + if (LRQ.valueIn() != VNI) { + // This block isn't part of the VNI live range. Prune the search. + I.skipChildren(); + continue; + } + + // Prune the search if VNI is killed in MBB. + if (LRQ.endPoint() < MBBEnd) { + LI->removeRange(MBBStart, LRQ.endPoint()); + if (EndPoints) EndPoints->push_back(LRQ.endPoint()); + I.skipChildren(); + continue; + } + + // VNI is live through MBB. + LI->removeRange(MBBStart, MBBEnd); + if (EndPoints) EndPoints->push_back(MBBEnd); + ++I; + } + } +} //===----------------------------------------------------------------------===// // Register allocator hooks. // -void LiveIntervals::addKillFlags() { +void LiveIntervals::addKillFlags(const VirtRegMap *VRM) { + // Keep track of regunit ranges. + SmallVector, 8> RU; + for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { unsigned Reg = TargetRegisterInfo::index2VirtReg(i); if (MRI->reg_nodbg_empty(Reg)) continue; LiveInterval *LI = &getInterval(Reg); + if (LI->empty()) + continue; + + // Find the regunit intervals for the assigned register. They may overlap + // the virtual register live range, cancelling any kills. + RU.clear(); + for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid(); + ++Units) { + LiveInterval *RUInt = &getRegUnit(*Units); + if (RUInt->empty()) + continue; + RU.push_back(std::make_pair(RUInt, RUInt->find(LI->begin()->end))); + } // Every instruction that kills Reg corresponds to a live range end point. for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE; @@ -750,7 +838,32 @@ void LiveIntervals::addKillFlags() { MachineInstr *MI = getInstructionFromIndex(RI->end); if (!MI) continue; - MI->addRegisterKilled(Reg, NULL); + + // Check if any of the reguints are live beyond the end of RI. That could + // happen when a physreg is defined as a copy of a virtreg: + // + // %EAX = COPY %vreg5 + // FOO %vreg5 <--- MI, cancel kill because %EAX is live. + // BAR %EAX + // + // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX. + bool CancelKill = false; + for (unsigned u = 0, e = RU.size(); u != e; ++u) { + LiveInterval *RInt = RU[u].first; + LiveInterval::iterator &I = RU[u].second; + if (I == RInt->end()) + continue; + I = RInt->advanceTo(I, RI->end); + if (I == RInt->end() || I->start >= RI->end) + continue; + // I is overlapping RI. + CancelKill = true; + break; + } + if (CancelKill) + MI->clearRegisterKills(Reg, NULL); + else + MI->addRegisterKilled(Reg, NULL); } } } @@ -900,497 +1013,321 @@ private: LiveIntervals& LIS; const MachineRegisterInfo& MRI; const TargetRegisterInfo& TRI; + SlotIndex OldIdx; SlotIndex NewIdx; - - typedef std::pair IntRangePair; - typedef DenseSet RangeSet; - - struct RegRanges { - LiveRange* Use; - LiveRange* EC; - LiveRange* Dead; - LiveRange* Def; - RegRanges() : Use(0), EC(0), Dead(0), Def(0) {} - }; - typedef DenseMap BundleRanges; + SmallPtrSet Updated; + bool UpdateFlags; public: HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, - const TargetRegisterInfo& TRI, SlotIndex NewIdx) - : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {} - - // Update intervals for all operands of MI from OldIdx to NewIdx. - // This assumes that MI used to be at OldIdx, and now resides at - // NewIdx. - void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) { - assert(NewIdx != OldIdx && "No-op move? That's a bit strange."); - - // Collect the operands. - RangeSet Entering, Internal, Exiting; - bool hasRegMaskOp = false; - collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); - - // To keep the LiveRanges valid within an interval, move the ranges closest - // to the destination first. This prevents ranges from overlapping, to that - // APIs like removeRange still work. - if (NewIdx < OldIdx) { - moveAllEnteringFrom(OldIdx, Entering); - moveAllInternalFrom(OldIdx, Internal); - moveAllExitingFrom(OldIdx, Exiting); - } - else { - moveAllExitingFrom(OldIdx, Exiting); - moveAllInternalFrom(OldIdx, Internal); - moveAllEnteringFrom(OldIdx, Entering); - } - - if (hasRegMaskOp) - updateRegMaskSlots(OldIdx); - -#ifndef NDEBUG - LIValidator validator; - validator = std::for_each(Entering.begin(), Entering.end(), validator); - validator = std::for_each(Internal.begin(), Internal.end(), validator); - validator = std::for_each(Exiting.begin(), Exiting.end(), validator); - assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness."); -#endif - + const TargetRegisterInfo& TRI, + SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags) + : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx), + UpdateFlags(UpdateFlags) {} + + // FIXME: UpdateFlags is a workaround that creates live intervals for all + // physregs, even those that aren't needed for regalloc, in order to update + // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill + // flags, and postRA passes will use a live register utility instead. + LiveInterval *getRegUnitLI(unsigned Unit) { + if (UpdateFlags) + return &LIS.getRegUnit(Unit); + return LIS.getCachedRegUnit(Unit); } - // Update intervals for all operands of MI to refer to BundleStart's - // SlotIndex. - void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) { - if (MI == BundleStart) - return; // Bundling instr with itself - nothing to do. - - SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI); - assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI && - "SlotIndex <-> Instruction mapping broken for MI"); - - // Collect all ranges already in the bundle. - MachineBasicBlock::instr_iterator BII(BundleStart); - RangeSet Entering, Internal, Exiting; - bool hasRegMaskOp = false; - collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); - assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); - for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) { - if (&*BII == MI) + /// Update all live ranges touched by MI, assuming a move from OldIdx to + /// NewIdx. + void updateAllRanges(MachineInstr *MI) { + DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI); + bool hasRegMask = false; + for (MIOperands MO(MI); MO.isValid(); ++MO) { + if (MO->isRegMask()) + hasRegMask = true; + if (!MO->isReg()) continue; - collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); - assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); - } - - BundleRanges BR = createBundleRanges(Entering, Internal, Exiting); - - Entering.clear(); - Internal.clear(); - Exiting.clear(); - collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); - assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); + // Aggressively clear all kill flags. + // They are reinserted by VirtRegRewriter. + if (MO->isUse()) + MO->setIsKill(false); - DEBUG(dbgs() << "Entering: " << Entering.size() << "\n"); - DEBUG(dbgs() << "Internal: " << Internal.size() << "\n"); - DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n"); - - moveAllEnteringFromInto(OldIdx, Entering, BR); - moveAllInternalFromInto(OldIdx, Internal, BR); - moveAllExitingFromInto(OldIdx, Exiting, BR); - - -#ifndef NDEBUG - LIValidator validator; - validator = std::for_each(Entering.begin(), Entering.end(), validator); - validator = std::for_each(Internal.begin(), Internal.end(), validator); - validator = std::for_each(Exiting.begin(), Exiting.end(), validator); - assert(validator.rangesOk() && "moveAllOperandsInto broke liveness."); -#endif - } - -private: - -#ifndef NDEBUG - class LIValidator { - private: - DenseSet Checked, Bogus; - public: - void operator()(const IntRangePair& P) { - const LiveInterval* LI = P.first; - if (Checked.count(LI)) - return; - Checked.insert(LI); - if (LI->empty()) - return; - SlotIndex LastEnd = LI->begin()->start; - for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end(); - LRI != LRE; ++LRI) { - const LiveRange& LR = *LRI; - if (LastEnd > LR.start || LR.start >= LR.end) - Bogus.insert(LI); - LastEnd = LR.end; - } - } - - bool rangesOk() const { - return Bogus.empty(); - } - }; -#endif - - // Collect IntRangePairs for all operands of MI that may need fixing. - // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes' - // maps). - void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal, - RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) { - hasRegMaskOp = false; - for (MachineInstr::mop_iterator MOI = MI->operands_begin(), - MOE = MI->operands_end(); - MOI != MOE; ++MOI) { - const MachineOperand& MO = *MOI; - - if (MO.isRegMask()) { - hasRegMaskOp = true; + unsigned Reg = MO->getReg(); + if (!Reg) continue; - } - - if (!MO.isReg() || MO.getReg() == 0) - continue; - - unsigned Reg = MO.getReg(); - - // TODO: Currently we're skipping uses that are reserved or have no - // interval, but we're not updating their kills. This should be - // fixed. - if (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)) + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + updateRange(LIS.getInterval(Reg)); continue; - - // Collect ranges for register units. These live ranges are computed on - // demand, so just skip any that haven't been computed yet. - if (TargetRegisterInfo::isPhysicalRegister(Reg)) { - for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) - if (LiveInterval *LI = LIS.getCachedRegUnit(*Units)) - collectRanges(MO, LI, Entering, Internal, Exiting, OldIdx); - } else { - // Collect ranges for individual virtual registers. - collectRanges(MO, &LIS.getInterval(Reg), - Entering, Internal, Exiting, OldIdx); } + + // For physregs, only update the regunits that actually have a + // precomputed live range. + for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) + if (LiveInterval *LI = getRegUnitLI(*Units)) + updateRange(*LI); } + if (hasRegMask) + updateRegMaskSlots(); } - void collectRanges(const MachineOperand &MO, LiveInterval *LI, - RangeSet &Entering, RangeSet &Internal, RangeSet &Exiting, - SlotIndex OldIdx) { - if (MO.readsReg()) { - LiveRange* LR = LI->getLiveRangeContaining(OldIdx); - if (LR != 0) - Entering.insert(std::make_pair(LI, LR)); - } - if (MO.isDef()) { - LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot()); - assert(LR != 0 && "No live range for def?"); - if (LR->end > OldIdx.getDeadSlot()) - Exiting.insert(std::make_pair(LI, LR)); +private: + /// Update a single live range, assuming an instruction has been moved from + /// OldIdx to NewIdx. + void updateRange(LiveInterval &LI) { + if (!Updated.insert(&LI)) + return; + DEBUG({ + dbgs() << " "; + if (TargetRegisterInfo::isVirtualRegister(LI.reg)) + dbgs() << PrintReg(LI.reg); else - Internal.insert(std::make_pair(LI, LR)); - } + dbgs() << PrintRegUnit(LI.reg, &TRI); + dbgs() << ":\t" << LI << '\n'; + }); + if (SlotIndex::isEarlierInstr(OldIdx, NewIdx)) + handleMoveDown(LI); + else + handleMoveUp(LI); + DEBUG(dbgs() << " -->\t" << LI << '\n'); + LI.verify(); } - BundleRanges createBundleRanges(RangeSet& Entering, - RangeSet& Internal, - RangeSet& Exiting) { - BundleRanges BR; - - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) { - LiveInterval* LI = EI->first; - LiveRange* LR = EI->second; - BR[LI->reg].Use = LR; - } + /// Update LI to reflect an instruction has been moved downwards from OldIdx + /// to NewIdx. + /// + /// 1. Live def at OldIdx: + /// Move def to NewIdx, assert endpoint after NewIdx. + /// + /// 2. Live def at OldIdx, killed at NewIdx: + /// Change to dead def at NewIdx. + /// (Happens when bundling def+kill together). + /// + /// 3. Dead def at OldIdx: + /// Move def to NewIdx, possibly across another live value. + /// + /// 4. Def at OldIdx AND at NewIdx: + /// Remove live range [OldIdx;NewIdx) and value defined at OldIdx. + /// (Happens when bundling multiple defs together). + /// + /// 5. Value read at OldIdx, killed before NewIdx: + /// Extend kill to NewIdx. + /// + void handleMoveDown(LiveInterval &LI) { + // First look for a kill at OldIdx. + LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex()); + LiveInterval::iterator E = LI.end(); + // Is LI even live at OldIdx? + if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start)) + return; - for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); - II != IE; ++II) { - LiveInterval* LI = II->first; - LiveRange* LR = II->second; - if (LR->end.isDead()) { - BR[LI->reg].Dead = LR; - } else { - BR[LI->reg].EC = LR; - } + // Handle a live-in value. + if (!SlotIndex::isSameInstr(I->start, OldIdx)) { + bool isKill = SlotIndex::isSameInstr(OldIdx, I->end); + // If the live-in value already extends to NewIdx, there is nothing to do. + if (!SlotIndex::isEarlierInstr(I->end, NewIdx)) + return; + // Aggressively remove all kill flags from the old kill point. + // Kill flags shouldn't be used while live intervals exist, they will be + // reinserted by VirtRegRewriter. + if (MachineInstr *KillMI = LIS.getInstructionFromIndex(I->end)) + for (MIBundleOperands MO(KillMI); MO.isValid(); ++MO) + if (MO->isReg() && MO->isUse()) + MO->setIsKill(false); + // Adjust I->end to reach NewIdx. This may temporarily make LI invalid by + // overlapping ranges. Case 5 above. + I->end = NewIdx.getRegSlot(I->end.isEarlyClobber()); + // If this was a kill, there may also be a def. Otherwise we're done. + if (!isKill) + return; + ++I; } - for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); - EI != EE; ++EI) { - LiveInterval* LI = EI->first; - LiveRange* LR = EI->second; - BR[LI->reg].Def = LR; + // Check for a def at OldIdx. + if (I == E || !SlotIndex::isSameInstr(OldIdx, I->start)) + return; + // We have a def at OldIdx. + VNInfo *DefVNI = I->valno; + assert(DefVNI->def == I->start && "Inconsistent def"); + DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber()); + // If the defined value extends beyond NewIdx, just move the def down. + // This is case 1 above. + if (SlotIndex::isEarlierInstr(NewIdx, I->end)) { + I->start = DefVNI->def; + return; } - - return BR; - } - - void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) { - MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx); - if (!OldKillMI->killsRegister(reg)) - return; // Bail out if we don't have kill flags on the old register. - MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx); - assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill."); - assert(!NewKillMI->killsRegister(reg) && - "New kill instr is already a kill."); - OldKillMI->clearRegisterKills(reg, &TRI); - NewKillMI->addRegisterKilled(reg, &TRI); - } - - void updateRegMaskSlots(SlotIndex OldIdx) { - SmallVectorImpl::iterator RI = - std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), - OldIdx); - assert(*RI == OldIdx && "No RegMask at OldIdx."); - *RI = NewIdx; - assert(*prior(RI) < *RI && *RI < *next(RI) && - "RegSlots out of order. Did you move one call across another?"); - } - - // Return the last use of reg between NewIdx and OldIdx. - SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) { - SlotIndex LastUse = NewIdx; - for (MachineRegisterInfo::use_nodbg_iterator - UI = MRI.use_nodbg_begin(Reg), - UE = MRI.use_nodbg_end(); - UI != UE; UI.skipInstruction()) { - const MachineInstr* MI = &*UI; - SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); - if (InstSlot > LastUse && InstSlot < OldIdx) - LastUse = InstSlot; + // The remaining possibilities are now: + // 2. Live def at OldIdx, killed at NewIdx: isSameInstr(I->end, NewIdx). + // 3. Dead def at OldIdx: I->end = OldIdx.getDeadSlot(). + // In either case, it is possible that there is an existing def at NewIdx. + assert((I->end == OldIdx.getDeadSlot() || + SlotIndex::isSameInstr(I->end, NewIdx)) && + "Cannot move def below kill"); + LiveInterval::iterator NewI = LI.advanceTo(I, NewIdx.getRegSlot()); + if (NewI != E && SlotIndex::isSameInstr(NewI->start, NewIdx)) { + // There is an existing def at NewIdx, case 4 above. The def at OldIdx is + // coalesced into that value. + assert(NewI->valno != DefVNI && "Multiple defs of value?"); + LI.removeValNo(DefVNI); + return; } - return LastUse; + // There was no existing def at NewIdx. Turn *I into a dead def at NewIdx. + // If the def at OldIdx was dead, we allow it to be moved across other LI + // values. The new range should be placed immediately before NewI, move any + // intermediate ranges up. + assert(NewI != I && "Inconsistent iterators"); + std::copy(llvm::next(I), NewI, I); + *llvm::prior(NewI) = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); } - void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - bool LiveThrough = LR->end > OldIdx.getRegSlot(); - if (LiveThrough) + /// Update LI to reflect an instruction has been moved upwards from OldIdx + /// to NewIdx. + /// + /// 1. Live def at OldIdx: + /// Hoist def to NewIdx. + /// + /// 2. Dead def at OldIdx: + /// Hoist def+end to NewIdx, possibly move across other values. + /// + /// 3. Dead def at OldIdx AND existing def at NewIdx: + /// Remove value defined at OldIdx, coalescing it with existing value. + /// + /// 4. Live def at OldIdx AND existing def at NewIdx: + /// Remove value defined at NewIdx, hoist OldIdx def to NewIdx. + /// (Happens when bundling multiple defs together). + /// + /// 5. Value killed at OldIdx: + /// Hoist kill to NewIdx, then scan for last kill between NewIdx and + /// OldIdx. + /// + void handleMoveUp(LiveInterval &LI) { + // First look for a kill at OldIdx. + LiveInterval::iterator I = LI.find(OldIdx.getBaseIndex()); + LiveInterval::iterator E = LI.end(); + // Is LI even live at OldIdx? + if (I == E || SlotIndex::isEarlierInstr(OldIdx, I->start)) return; - SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); - if (LastUse != NewIdx) - moveKillFlags(LI->reg, NewIdx, LastUse); - LR->end = LastUse.getRegSlot(); - } - void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - // Extend the LiveRange if NewIdx is past the end. - if (NewIdx > LR->end) { - // Move kill flags if OldIdx was not originally the end - // (otherwise LR->end points to an invalid slot). - if (LR->end.getRegSlot() != OldIdx.getRegSlot()) { - assert(LR->end > OldIdx && "LiveRange does not cover original slot"); - moveKillFlags(LI->reg, LR->end, NewIdx); + // Handle a live-in value. + if (!SlotIndex::isSameInstr(I->start, OldIdx)) { + // If the live-in value isn't killed here, there is nothing to do. + if (!SlotIndex::isSameInstr(OldIdx, I->end)) + return; + // Adjust I->end to end at NewIdx. If we are hoisting a kill above + // another use, we need to search for that use. Case 5 above. + I->end = NewIdx.getRegSlot(I->end.isEarlyClobber()); + ++I; + // If OldIdx also defines a value, there couldn't have been another use. + if (I == E || !SlotIndex::isSameInstr(I->start, OldIdx)) { + // No def, search for the new kill. + // This can never be an early clobber kill since there is no def. + llvm::prior(I)->end = findLastUseBefore(LI.reg).getRegSlot(); + return; } - LR->end = NewIdx.getRegSlot(); - } - } - - void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) { - bool GoingUp = NewIdx < OldIdx; - - if (GoingUp) { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringUpFrom(OldIdx, *EI); - } else { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringDownFrom(OldIdx, *EI); } - } - - void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && - LR->end <= OldIdx.getDeadSlot() && - "Range should be internal to OldIdx."); - LiveRange Tmp(*LR); - Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber()); - Tmp.valno->def = Tmp.start; - Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot(); - LI->removeRange(*LR); - LI->addRange(Tmp); - } - - void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) { - for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); - II != IE; ++II) - moveInternalFrom(OldIdx, *II); - } - - void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) { - LiveRange* LR = P.second; - assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && - "Range should start in OldIdx."); - assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx."); - SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber()); - LR->start = NewStart; - LR->valno->def = NewStart; - } - - void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) { - for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); - EI != EE; ++EI) - moveExitingFrom(OldIdx, *EI); - } - void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - bool LiveThrough = LR->end > OldIdx.getRegSlot(); - if (LiveThrough) { - assert((LR->start < NewIdx || BR[LI->reg].Def == LR) && - "Def in bundle should be def range."); - assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && - "If bundle has use for this reg it should be LR."); - BR[LI->reg].Use = LR; + // Now deal with the def at OldIdx. + assert(I != E && SlotIndex::isSameInstr(I->start, OldIdx) && "No def?"); + VNInfo *DefVNI = I->valno; + assert(DefVNI->def == I->start && "Inconsistent def"); + DefVNI->def = NewIdx.getRegSlot(I->start.isEarlyClobber()); + + // Check for an existing def at NewIdx. + LiveInterval::iterator NewI = LI.find(NewIdx.getRegSlot()); + if (SlotIndex::isSameInstr(NewI->start, NewIdx)) { + assert(NewI->valno != DefVNI && "Same value defined more than once?"); + // There is an existing def at NewIdx. + if (I->end.isDead()) { + // Case 3: Remove the dead def at OldIdx. + LI.removeValNo(DefVNI); + return; + } + // Case 4: Replace def at NewIdx with live def at OldIdx. + I->start = DefVNI->def; + LI.removeValNo(NewI->valno); return; } - SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); - moveKillFlags(LI->reg, OldIdx, LastUse); - - if (LR->start < NewIdx) { - // Becoming a new entering range. - assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 && - "Bundle shouldn't be re-defining reg mid-range."); - assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && - "Bundle shouldn't have different use range for same reg."); - LR->end = LastUse.getRegSlot(); - BR[LI->reg].Use = LR; - } else { - // Becoming a new Dead-def. - assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) && - "Live range starting at unexpected slot."); - assert(BR[LI->reg].Def == LR && "Reg should have def range."); - assert(BR[LI->reg].Dead == 0 && - "Can't have def and dead def of same reg in a bundle."); - LR->end = LastUse.getDeadSlot(); - BR[LI->reg].Dead = BR[LI->reg].Def; - BR[LI->reg].Def = 0; - } - } - - void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - if (NewIdx > LR->end) { - // Range extended to bundle. Add to bundle uses. - // Note: Currently adds kill flags to bundle start. - assert(BR[LI->reg].Use == 0 && - "Bundle already has use range for reg."); - moveKillFlags(LI->reg, LR->end, NewIdx); - LR->end = NewIdx.getRegSlot(); - BR[LI->reg].Use = LR; - } else { - assert(BR[LI->reg].Use != 0 && - "Bundle should already have a use range for reg."); - } - } - - void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering, - BundleRanges& BR) { - bool GoingUp = NewIdx < OldIdx; - - if (GoingUp) { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringUpFromInto(OldIdx, *EI, BR); - } else { - for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); - EI != EE; ++EI) - moveEnteringDownFromInto(OldIdx, *EI, BR); + // There is no existing def at NewIdx. Hoist DefVNI. + if (!I->end.isDead()) { + // Leave the end point of a live def. + I->start = DefVNI->def; + return; } - } - void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - // TODO: Sane rules for moving ranges into bundles. + // DefVNI is a dead def. It may have been moved across other values in LI, + // so move I up to NewI. Slide [NewI;I) down one position. + std::copy_backward(NewI, I, llvm::next(I)); + *NewI = LiveRange(DefVNI->def, NewIdx.getDeadSlot(), DefVNI); } - void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal, - BundleRanges& BR) { - for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); - II != IE; ++II) - moveInternalFromInto(OldIdx, *II, BR); + void updateRegMaskSlots() { + SmallVectorImpl::iterator RI = + std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), + OldIdx); + assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() && + "No RegMask at OldIdx."); + *RI = NewIdx.getRegSlot(); + assert((RI == LIS.RegMaskSlots.begin() || + SlotIndex::isEarlierInstr(*llvm::prior(RI), *RI)) && + "Cannot move regmask instruction above another call"); + assert((llvm::next(RI) == LIS.RegMaskSlots.end() || + SlotIndex::isEarlierInstr(*RI, *llvm::next(RI))) && + "Cannot move regmask instruction below another call"); } - void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P, - BundleRanges& BR) { - LiveInterval* LI = P.first; - LiveRange* LR = P.second; - - assert(LR->start.isRegister() && - "Don't know how to merge exiting ECs into bundles yet."); + // Return the last use of reg between NewIdx and OldIdx. + SlotIndex findLastUseBefore(unsigned Reg) { + SlotIndex LastUse = NewIdx; - if (LR->end > NewIdx.getDeadSlot()) { - // This range is becoming an exiting range on the bundle. - // If there was an old dead-def of this reg, delete it. - if (BR[LI->reg].Dead != 0) { - LI->removeRange(*BR[LI->reg].Dead); - BR[LI->reg].Dead = 0; + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + for (MachineRegisterInfo::use_nodbg_iterator + UI = MRI.use_nodbg_begin(Reg), + UE = MRI.use_nodbg_end(); + UI != UE; UI.skipInstruction()) { + const MachineInstr* MI = &*UI; + SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); + if (InstSlot > LastUse && InstSlot < OldIdx) + LastUse = InstSlot; } - assert(BR[LI->reg].Def == 0 && - "Can't have two defs for the same variable exiting a bundle."); - LR->start = NewIdx.getRegSlot(); - LR->valno->def = LR->start; - BR[LI->reg].Def = LR; } else { - // This range is becoming internal to the bundle. - assert(LR->end == NewIdx.getRegSlot() && - "Can't bundle def whose kill is before the bundle"); - if (BR[LI->reg].Dead || BR[LI->reg].Def) { - // Already have a def for this. Just delete range. - LI->removeRange(*LR); - } else { - // Make range dead, record. - LR->end = NewIdx.getDeadSlot(); - BR[LI->reg].Dead = LR; - assert(BR[LI->reg].Use == LR && - "Range becoming dead should currently be use."); + MachineInstr* MI = LIS.getSlotIndexes()->getInstructionFromIndex(NewIdx); + MachineBasicBlock::iterator MII(MI); + ++MII; + MachineBasicBlock* MBB = MI->getParent(); + for (; MII != MBB->end() && LIS.getInstructionIndex(MII) < OldIdx; ++MII){ + for (MachineInstr::mop_iterator MOI = MII->operands_begin(), + MOE = MII->operands_end(); + MOI != MOE; ++MOI) { + const MachineOperand& mop = *MOI; + if (!mop.isReg() || mop.getReg() == 0 || + TargetRegisterInfo::isVirtualRegister(mop.getReg())) + continue; + + if (TRI.hasRegUnit(mop.getReg(), Reg)) + LastUse = LIS.getInstructionIndex(MII); + } } - // In both cases the range is no longer a use on the bundle. - BR[LI->reg].Use = 0; } + return LastUse; } - - void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting, - BundleRanges& BR) { - for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); - EI != EE; ++EI) - moveExitingFromInto(OldIdx, *EI, BR); - } - }; -void LiveIntervals::handleMove(MachineInstr* MI) { +void LiveIntervals::handleMove(MachineInstr* MI, bool UpdateFlags) { + assert(!MI->isBundled() && "Can't handle bundled instructions yet."); SlotIndex OldIndex = Indexes->getInstructionIndex(MI); Indexes->removeMachineInstrFromMaps(MI); - SlotIndex NewIndex = MI->isInsideBundle() ? - Indexes->getInstructionIndex(MI) : - Indexes->insertMachineInstrInMaps(MI); + SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI); assert(getMBBStartIdx(MI->getParent()) <= OldIndex && OldIndex < getMBBEndIdx(MI->getParent()) && "Cannot handle moves across basic block boundaries."); - assert(!MI->isBundled() && "Can't handle bundled instructions yet."); - HMEditor HME(*this, *MRI, *TRI, NewIndex); - HME.moveAllRangesFrom(MI, OldIndex); + HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); + HME.updateAllRanges(MI); } void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, - MachineInstr* BundleStart) { + MachineInstr* BundleStart, + bool UpdateFlags) { + SlotIndex OldIndex = Indexes->getInstructionIndex(MI); SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); - HMEditor HME(*this, *MRI, *TRI, NewIndex); - HME.moveAllRangesInto(MI, BundleStart); + HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); + HME.updateAllRanges(MI); } diff --git a/lib/CodeGen/LiveIntervalUnion.h b/lib/CodeGen/LiveIntervalUnion.h index cd4e690..4d41fca 100644 --- a/lib/CodeGen/LiveIntervalUnion.h +++ b/lib/CodeGen/LiveIntervalUnion.h @@ -178,8 +178,8 @@ public: bool checkLoopInterference(MachineLoopRange*); private: - Query(const Query&); // DO NOT IMPLEMENT - void operator=(const Query&); // DO NOT IMPLEMENT + Query(const Query&) LLVM_DELETED_FUNCTION; + void operator=(const Query&) LLVM_DELETED_FUNCTION; }; // Array of LiveIntervalUnions. diff --git a/lib/CodeGen/LiveRangeCalc.cpp b/lib/CodeGen/LiveRangeCalc.cpp index d828f25..c3ff4f1 100644 --- a/lib/CodeGen/LiveRangeCalc.cpp +++ b/lib/CodeGen/LiveRangeCalc.cpp @@ -65,7 +65,11 @@ void LiveRangeCalc::extendToUses(LiveInterval *LI, unsigned Reg) { // Visit all operands that read Reg. This may include partial defs. for (MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(Reg), E = MRI->reg_nodbg_end(); I != E; ++I) { - const MachineOperand &MO = I.getOperand(); + MachineOperand &MO = I.getOperand(); + // Clear all kill flags. They will be reinserted after register allocation + // by LiveIntervalAnalysis::addKillFlags(). + if (MO.isUse()) + MO.setIsKill(false); if (!MO.readsReg()) continue; // MI is reading Reg. We may have visited MI before if it happens to be diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp index b4ce9aa..f8fbc7d 100644 --- a/lib/CodeGen/LiveRangeEdit.cpp +++ b/lib/CodeGen/LiveRangeEdit.cpp @@ -87,7 +87,7 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI, // We can't remat physreg uses, unless it is a constant. if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { - if (MRI.isConstantPhysReg(MO.getReg(), VRM->getMachineFunction())) + if (MRI.isConstantPhysReg(MO.getReg(), *OrigMI->getParent()->getParent())) continue; return false; } @@ -96,6 +96,13 @@ bool LiveRangeEdit::allUsesAvailableAt(const MachineInstr *OrigMI, const VNInfo *OVNI = li.getVNInfoAt(OrigIdx); if (!OVNI) continue; + + // Don't allow rematerialization immediately after the original def. + // It would be incorrect if OrigMI redefines the register. + // See PR14098. + if (SlotIndex::isSameInstr(OrigIdx, UseIdx)) + return false; + if (OVNI != li.getVNInfoAt(UseIdx)) return false; } @@ -249,7 +256,7 @@ void LiveRangeEdit::eliminateDeadDefs(SmallVectorImpl &Dead, unsigned Reg = MOI->getReg(); if (!TargetRegisterInfo::isVirtualRegister(Reg)) { // Check if MI reads any unreserved physregs. - if (Reg && MOI->readsReg() && !LIS.isReserved(Reg)) + if (Reg && MOI->readsReg() && !MRI.isReserved(Reg)) ReadsPhysRegs = true; continue; } diff --git a/lib/CodeGen/LiveRegMatrix.cpp b/lib/CodeGen/LiveRegMatrix.cpp index cdb1776..7f22478 100644 --- a/lib/CodeGen/LiveRegMatrix.cpp +++ b/lib/CodeGen/LiveRegMatrix.cpp @@ -13,6 +13,7 @@ #define DEBUG_TYPE "regalloc" #include "LiveRegMatrix.h" +#include "RegisterCoalescer.h" #include "VirtRegMap.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/MachineRegisterInfo.h" @@ -117,8 +118,9 @@ bool LiveRegMatrix::checkRegUnitInterference(LiveInterval &VirtReg, unsigned PhysReg) { if (VirtReg.empty()) return false; + CoalescerPair CP(VirtReg.reg, PhysReg, *TRI); for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) - if (VirtReg.overlaps(LIS->getRegUnit(*Units))) + if (VirtReg.overlaps(LIS->getRegUnit(*Units), CP, *LIS->getSlotIndexes())) return true; return false; } diff --git a/lib/CodeGen/LiveRegMatrix.h b/lib/CodeGen/LiveRegMatrix.h index b3e2d7f..8f22c24 100644 --- a/lib/CodeGen/LiveRegMatrix.h +++ b/lib/CodeGen/LiveRegMatrix.h @@ -15,7 +15,7 @@ // Register units are defined in MCRegisterInfo.h, they represent the smallest // unit of interference when dealing with overlapping physical registers. The // LiveRegMatrix is represented as a LiveIntervalUnion per register unit. When -// a virtual register is assigned to a physicval register, the live range for +// a virtual register is assigned to a physical register, the live range for // the virtual register is inserted into the LiveIntervalUnion for each regunit // in the physreg. // diff --git a/lib/CodeGen/LiveStackAnalysis.cpp b/lib/CodeGen/LiveStackAnalysis.cpp index 939e795..f0b522b 100644 --- a/lib/CodeGen/LiveStackAnalysis.cpp +++ b/lib/CodeGen/LiveStackAnalysis.cpp @@ -25,7 +25,10 @@ using namespace llvm; char LiveStacks::ID = 0; -INITIALIZE_PASS(LiveStacks, "livestacks", +INITIALIZE_PASS_BEGIN(LiveStacks, "livestacks", + "Live Stack Slot Analysis", false, false) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_PASS_END(LiveStacks, "livestacks", "Live Stack Slot Analysis", false, false) char &llvm::LiveStacksID = LiveStacks::ID; diff --git a/lib/CodeGen/LiveVariables.cpp b/lib/CodeGen/LiveVariables.cpp index 348ed3a..6ea933d 100644 --- a/lib/CodeGen/LiveVariables.cpp +++ b/lib/CodeGen/LiveVariables.cpp @@ -65,6 +65,7 @@ LiveVariables::VarInfo::findKill(const MachineBasicBlock *MBB) const { } void LiveVariables::VarInfo::dump() const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) dbgs() << " Alive in blocks: "; for (SparseBitVector<>::iterator I = AliveBlocks.begin(), E = AliveBlocks.end(); I != E; ++I) @@ -77,6 +78,7 @@ void LiveVariables::VarInfo::dump() const { dbgs() << "\n #" << i << ": " << *Kills[i]; dbgs() << "\n"; } +#endif } /// getVarInfo - Get (possibly creating) a VarInfo object for the given vreg. @@ -501,8 +503,6 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) { MRI = &mf.getRegInfo(); TRI = MF->getTarget().getRegisterInfo(); - ReservedRegisters = TRI->getReservedRegs(mf); - unsigned NumRegs = TRI->getNumRegs(); PhysRegDef = new MachineInstr*[NumRegs]; PhysRegUse = new MachineInstr*[NumRegs]; @@ -586,7 +586,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) { unsigned MOReg = UseRegs[i]; if (TargetRegisterInfo::isVirtualRegister(MOReg)) HandleVirtRegUse(MOReg, MBB, MI); - else if (!ReservedRegisters[MOReg]) + else if (!MRI->isReserved(MOReg)) HandlePhysRegUse(MOReg, MI); } @@ -599,7 +599,7 @@ bool LiveVariables::runOnMachineFunction(MachineFunction &mf) { unsigned MOReg = DefRegs[i]; if (TargetRegisterInfo::isVirtualRegister(MOReg)) HandleVirtRegDef(MOReg, MI); - else if (!ReservedRegisters[MOReg]) + else if (!MRI->isReserved(MOReg)) HandlePhysRegDef(MOReg, MI, Defs); } UpdatePhysRegDefs(MI, Defs); @@ -806,18 +806,44 @@ void LiveVariables::addNewBlock(MachineBasicBlock *BB, MachineBasicBlock *SuccBB) { const unsigned NumNew = BB->getNumber(); - // All registers used by PHI nodes in SuccBB must be live through BB. - for (MachineBasicBlock::iterator BBI = SuccBB->begin(), - BBE = SuccBB->end(); BBI != BBE && BBI->isPHI(); ++BBI) + SmallSet Defs, Kills; + + MachineBasicBlock::iterator BBI = SuccBB->begin(), BBE = SuccBB->end(); + for (; BBI != BBE && BBI->isPHI(); ++BBI) { + // Record the def of the PHI node. + Defs.insert(BBI->getOperand(0).getReg()); + + // All registers used by PHI nodes in SuccBB must be live through BB. for (unsigned i = 1, e = BBI->getNumOperands(); i != e; i += 2) if (BBI->getOperand(i+1).getMBB() == BB) getVarInfo(BBI->getOperand(i).getReg()).AliveBlocks.set(NumNew); + } + + // Record all vreg defs and kills of all instructions in SuccBB. + for (; BBI != BBE; ++BBI) { + for (MachineInstr::mop_iterator I = BBI->operands_begin(), + E = BBI->operands_end(); I != E; ++I) { + if (I->isReg() && TargetRegisterInfo::isVirtualRegister(I->getReg())) { + if (I->isDef()) + Defs.insert(I->getReg()); + else if (I->isKill()) + Kills.insert(I->getReg()); + } + } + } // Update info for all live variables for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { unsigned Reg = TargetRegisterInfo::index2VirtReg(i); + + // If the Defs is defined in the successor it can't be live in BB. + if (Defs.count(Reg)) + continue; + + // If the register is either killed in or live through SuccBB it's also live + // through BB. VarInfo &VI = getVarInfo(Reg); - if (!VI.AliveBlocks.test(NumNew) && VI.isLiveIn(*SuccBB, Reg, *MRI)) + if (Kills.count(Reg) || VI.AliveBlocks.test(SuccBB->getNumber())) VI.AliveBlocks.set(NumNew); } } diff --git a/lib/CodeGen/MachineBasicBlock.cpp b/lib/CodeGen/MachineBasicBlock.cpp index fa6b450..18d021d 100644 --- a/lib/CodeGen/MachineBasicBlock.cpp +++ b/lib/CodeGen/MachineBasicBlock.cpp @@ -21,7 +21,7 @@ #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Assembly/Writer.h" @@ -145,7 +145,8 @@ MachineBasicBlock::iterator MachineBasicBlock::getFirstNonPHI() { instr_iterator I = instr_begin(), E = instr_end(); while (I != E && I->isPHI()) ++I; - assert(!I->isInsideBundle() && "First non-phi MI cannot be inside a bundle!"); + assert((I == E || !I->isInsideBundle()) && + "First non-phi MI cannot be inside a bundle!"); return I; } @@ -156,7 +157,7 @@ MachineBasicBlock::SkipPHIsAndLabels(MachineBasicBlock::iterator I) { ++I; // FIXME: This needs to change if we wish to bundle labels / dbg_values // inside the bundle. - assert(!I->isInsideBundle() && + assert((I == E || !I->isInsideBundle()) && "First non-phi / non-label instruction is inside a bundle!"); return I; } @@ -228,9 +229,11 @@ const MachineBasicBlock *MachineBasicBlock::getLandingPadSuccessor() const { return 0; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MachineBasicBlock::dump() const { print(dbgs()); } +#endif StringRef MachineBasicBlock::getName() const { if (const BasicBlock *LBB = getBasicBlock()) @@ -243,7 +246,7 @@ StringRef MachineBasicBlock::getName() const { std::string MachineBasicBlock::getFullName() const { std::string Name; if (getParent()) - Name = (getParent()->getFunction()->getName() + ":").str(); + Name = (getParent()->getName() + ":").str(); if (getBasicBlock()) Name += getBasicBlock()->getName(); else @@ -942,12 +945,11 @@ MachineBasicBlock::findDebugLoc(instr_iterator MBBI) { /// getSuccWeight - Return weight of the edge from this block to MBB. /// -uint32_t MachineBasicBlock::getSuccWeight(const MachineBasicBlock *succ) const { +uint32_t MachineBasicBlock::getSuccWeight(const_succ_iterator Succ) const { if (Weights.empty()) return 0; - const_succ_iterator I = std::find(Successors.begin(), Successors.end(), succ); - return *getWeightIterator(I); + return *getWeightIterator(Succ); } /// getWeightIterator - Return wight iterator corresonding to the I successor @@ -970,6 +972,80 @@ getWeightIterator(MachineBasicBlock::const_succ_iterator I) const { return Weights.begin() + index; } +/// Return whether (physical) register "Reg" has been ined and not ed +/// as of just before "MI". +/// +/// Search is localised to a neighborhood of +/// Neighborhood instructions before (searching for defs or kills) and N +/// instructions after (searching just for defs) MI. +MachineBasicBlock::LivenessQueryResult +MachineBasicBlock::computeRegisterLiveness(const TargetRegisterInfo *TRI, + unsigned Reg, MachineInstr *MI, + unsigned Neighborhood) { + + unsigned N = Neighborhood; + MachineBasicBlock *MBB = MI->getParent(); + + // Start by searching backwards from MI, looking for kills, reads or defs. + + MachineBasicBlock::iterator I(MI); + // If this is the first insn in the block, don't search backwards. + if (I != MBB->begin()) { + do { + --I; + + MachineOperandIteratorBase::PhysRegInfo Analysis = + MIOperands(I).analyzePhysReg(Reg, TRI); + + if (Analysis.Kills) + // Register killed, so isn't live. + return LQR_Dead; + + else if (Analysis.DefinesOverlap || Analysis.ReadsOverlap) + // Defined or read without a previous kill - live. + return (Analysis.Defines || Analysis.Reads) ? + LQR_Live : LQR_OverlappingLive; + + } while (I != MBB->begin() && --N > 0); + } + + // Did we get to the start of the block? + if (I == MBB->begin()) { + // If so, the register's state is definitely defined by the live-in state. + for (MCRegAliasIterator RAI(Reg, TRI, /*IncludeSelf=*/true); + RAI.isValid(); ++RAI) { + if (MBB->isLiveIn(*RAI)) + return (*RAI == Reg) ? LQR_Live : LQR_OverlappingLive; + } + + return LQR_Dead; + } + + N = Neighborhood; + + // Try searching forwards from MI, looking for reads or defs. + I = MachineBasicBlock::iterator(MI); + // If this is the last insn in the block, don't search forwards. + if (I != MBB->end()) { + for (++I; I != MBB->end() && N > 0; ++I, --N) { + MachineOperandIteratorBase::PhysRegInfo Analysis = + MIOperands(I).analyzePhysReg(Reg, TRI); + + if (Analysis.ReadsOverlap) + // Used, therefore must have been live. + return (Analysis.Reads) ? + LQR_Live : LQR_OverlappingLive; + + else if (Analysis.DefinesOverlap) + // Defined (but not read) therefore cannot have been live. + return LQR_Dead; + } + } + + // At this point we have no idea of the liveness of the register. + return LQR_Unknown; +} + void llvm::WriteAsOperand(raw_ostream &OS, const MachineBasicBlock *MBB, bool t) { OS << "BB#" << MBB->getNumber(); diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index c4dca2c..cd3f199 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -500,11 +500,10 @@ void MachineBlockPlacement::buildChain( assert(BB); assert(BlockToChain[BB] == &Chain); assert(*llvm::prior(Chain.end()) == BB); - MachineBasicBlock *BestSucc = 0; // Look for the best viable successor if there is one to place immediately // after this block. - BestSucc = selectBestSuccessor(BB, Chain, BlockFilter); + MachineBasicBlock *BestSucc = selectBestSuccessor(BB, Chain, BlockFilter); // If an immediate successor isn't available, look for the best viable // block among those we've identified as not violating the loop's CFG at @@ -1014,7 +1013,8 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { // exclusively on the loop info here so that we can align backedges in // unnatural CFGs and backedges that were introduced purely because of the // loop rotations done during this layout pass. - if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + if (F.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize)) return; unsigned Align = TLI->getPrefLoopAlignment(); if (!Align) diff --git a/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/lib/CodeGen/MachineBranchProbabilityInfo.cpp index 0cc1af0..4479211 100644 --- a/lib/CodeGen/MachineBranchProbabilityInfo.cpp +++ b/lib/CodeGen/MachineBranchProbabilityInfo.cpp @@ -38,7 +38,7 @@ getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const { Scale = 1; for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) { - uint32_t Weight = getEdgeWeight(MBB, *I); + uint32_t Weight = getEdgeWeight(MBB, I); Sum += Weight; } @@ -53,22 +53,30 @@ getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const { Sum = 0; for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) { - uint32_t Weight = getEdgeWeight(MBB, *I); + uint32_t Weight = getEdgeWeight(MBB, I); Sum += Weight / Scale; } assert(Sum <= UINT32_MAX); return Sum; } -uint32_t -MachineBranchProbabilityInfo::getEdgeWeight(const MachineBasicBlock *Src, - const MachineBasicBlock *Dst) const { +uint32_t MachineBranchProbabilityInfo:: +getEdgeWeight(const MachineBasicBlock *Src, + MachineBasicBlock::const_succ_iterator Dst) const { uint32_t Weight = Src->getSuccWeight(Dst); if (!Weight) return DEFAULT_WEIGHT; return Weight; } +uint32_t MachineBranchProbabilityInfo:: +getEdgeWeight(const MachineBasicBlock *Src, + const MachineBasicBlock *Dst) const { + // This is a linear search. Try to use the const_succ_iterator version when + // possible. + return getEdgeWeight(Src, std::find(Src->succ_begin(), Src->succ_end(), Dst)); +} + bool MachineBranchProbabilityInfo::isEdgeHot(MachineBasicBlock *Src, MachineBasicBlock *Dst) const { // Hot probability is at least 4/5 = 80% @@ -82,7 +90,7 @@ MachineBranchProbabilityInfo::getHotSucc(MachineBasicBlock *MBB) const { MachineBasicBlock *MaxSucc = 0; for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(), E = MBB->succ_end(); I != E; ++I) { - uint32_t Weight = getEdgeWeight(MBB, *I); + uint32_t Weight = getEdgeWeight(MBB, I); if (Weight > MaxWeight) { MaxWeight = Weight; MaxSucc = *I; diff --git a/lib/CodeGen/MachineCSE.cpp b/lib/CodeGen/MachineCSE.cpp index 896461f..dbc41de 100644 --- a/lib/CodeGen/MachineCSE.cpp +++ b/lib/CodeGen/MachineCSE.cpp @@ -63,8 +63,6 @@ namespace { virtual void releaseMemory() { ScopeMap.clear(); Exps.clear(); - AllocatableRegs.clear(); - ReservedRegs.clear(); } private: @@ -78,8 +76,6 @@ namespace { ScopedHTType VNT; SmallVector Exps; unsigned CurrVN; - BitVector AllocatableRegs; - BitVector ReservedRegs; bool PerformTrivialCoalescing(MachineInstr *MI, MachineBasicBlock *MBB); bool isPhysDefTriviallyDead(unsigned Reg, @@ -88,7 +84,8 @@ namespace { bool hasLivePhysRegDefUses(const MachineInstr *MI, const MachineBasicBlock *MBB, SmallSet &PhysRefs, - SmallVector &PhysDefs) const; + SmallVector &PhysDefs, + bool &PhysUseDef) const; bool PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, SmallSet &PhysRefs, SmallVector &PhysDefs, @@ -198,31 +195,52 @@ MachineCSE::isPhysDefTriviallyDead(unsigned Reg, bool MachineCSE::hasLivePhysRegDefUses(const MachineInstr *MI, const MachineBasicBlock *MBB, SmallSet &PhysRefs, - SmallVector &PhysDefs) const{ - MachineBasicBlock::const_iterator I = MI; I = llvm::next(I); + SmallVector &PhysDefs, + bool &PhysUseDef) const{ + // First, add all uses to PhysRefs. for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg()) + if (!MO.isReg() || MO.isDef()) continue; unsigned Reg = MO.getReg(); if (!Reg) continue; if (TargetRegisterInfo::isVirtualRegister(Reg)) continue; - // If the def is dead, it's ok. But the def may not marked "dead". That's - // common since this pass is run before livevariables. We can scan - // forward a few instructions and check if it is obviously dead. - if (MO.isDef() && - (MO.isDead() || isPhysDefTriviallyDead(Reg, I, MBB->end()))) - continue; // Reading constant physregs is ok. if (!MRI->isConstantPhysReg(Reg, *MBB->getParent())) for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) PhysRefs.insert(*AI); - if (MO.isDef()) + } + + // Next, collect all defs into PhysDefs. If any is already in PhysRefs + // (which currently contains only uses), set the PhysUseDef flag. + PhysUseDef = false; + MachineBasicBlock::const_iterator I = MI; I = llvm::next(I); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef()) + continue; + unsigned Reg = MO.getReg(); + if (!Reg) + continue; + if (TargetRegisterInfo::isVirtualRegister(Reg)) + continue; + // Check against PhysRefs even if the def is "dead". + if (PhysRefs.count(Reg)) + PhysUseDef = true; + // If the def is dead, it's ok. But the def may not marked "dead". That's + // common since this pass is run before livevariables. We can scan + // forward a few instructions and check if it is obviously dead. + if (!MO.isDead() && !isPhysDefTriviallyDead(Reg, I, MBB->end())) PhysDefs.push_back(Reg); } + // Finally, add all defs to PhysRefs as well. + for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) + for (MCRegAliasIterator AI(PhysDefs[i], TRI, true); AI.isValid(); ++AI) + PhysRefs.insert(*AI); + return !PhysRefs.empty(); } @@ -242,7 +260,7 @@ bool MachineCSE::PhysRegDefsReach(MachineInstr *CSMI, MachineInstr *MI, return false; for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) { - if (AllocatableRegs.test(PhysDefs[i]) || ReservedRegs.test(PhysDefs[i])) + if (MRI->isAllocatable(PhysDefs[i]) || MRI->isReserved(PhysDefs[i])) // Avoid extending live range of physical registers if they are //allocatable or reserved. return false; @@ -411,8 +429,8 @@ void MachineCSE::ExitScope(MachineBasicBlock *MBB) { DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n'); DenseMap::iterator SI = ScopeMap.find(MBB); assert(SI != ScopeMap.end()); - ScopeMap.erase(SI); delete SI->second; + ScopeMap.erase(SI); } bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { @@ -463,16 +481,22 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { bool CrossMBBPhysDef = false; SmallSet PhysRefs; SmallVector PhysDefs; - if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, PhysDefs)) { + bool PhysUseDef = false; + if (FoundCSE && hasLivePhysRegDefUses(MI, MBB, PhysRefs, + PhysDefs, PhysUseDef)) { FoundCSE = false; // ... Unless the CS is local or is in the sole predecessor block // and it also defines the physical register which is not clobbered // in between and the physical register uses were not clobbered. - unsigned CSVN = VNT.lookup(MI); - MachineInstr *CSMI = Exps[CSVN]; - if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef)) - FoundCSE = true; + // This can never be the case if the instruction both uses and + // defines the same physical register, which was detected above. + if (!PhysUseDef) { + unsigned CSVN = VNT.lookup(MI); + MachineInstr *CSMI = Exps[CSVN]; + if (PhysRegDefsReach(CSMI, MI, PhysRefs, PhysDefs, CrossMBBPhysDef)) + FoundCSE = true; + } } if (!FoundCSE) { @@ -635,7 +659,5 @@ bool MachineCSE::runOnMachineFunction(MachineFunction &MF) { MRI = &MF.getRegInfo(); AA = &getAnalysis(); DT = &getAnalysis(); - AllocatableRegs = TRI->getAllocatableSet(MF); - ReservedRegs = TRI->getReservedRegs(MF); return PerformCSE(DT->getRootNode()); } diff --git a/lib/CodeGen/MachineCopyPropagation.cpp b/lib/CodeGen/MachineCopyPropagation.cpp index bac3aa2..4a79328 100644 --- a/lib/CodeGen/MachineCopyPropagation.cpp +++ b/lib/CodeGen/MachineCopyPropagation.cpp @@ -16,6 +16,7 @@ #include "llvm/Pass.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -32,7 +33,7 @@ STATISTIC(NumDeletes, "Number of dead copies deleted"); namespace { class MachineCopyPropagation : public MachineFunctionPass { const TargetRegisterInfo *TRI; - BitVector ReservedRegs; + MachineRegisterInfo *MRI; public: static char ID; // Pass identification, replacement for typeid @@ -146,8 +147,8 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { DenseMap::iterator CI = AvailCopyMap.find(Src); if (CI != AvailCopyMap.end()) { MachineInstr *CopyMI = CI->second; - if (!ReservedRegs.test(Def) && - (!ReservedRegs.test(Src) || NoInterveningSideEffect(CopyMI, MI)) && + if (!MRI->isReserved(Def) && + (!MRI->isReserved(Src) || NoInterveningSideEffect(CopyMI, MI)) && isNopCopy(CopyMI, Def, Src, TRI)) { // The two copies cancel out and the source of the first copy // hasn't been overridden, eliminate the second one. e.g. @@ -259,7 +260,7 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end(); DI != DE; ++DI) { unsigned Reg = (*DI)->getOperand(0).getReg(); - if (ReservedRegs.test(Reg) || !MaskMO.clobbersPhysReg(Reg)) + if (MRI->isReserved(Reg) || !MaskMO.clobbersPhysReg(Reg)) continue; (*DI)->eraseFromParent(); Changed = true; @@ -296,7 +297,7 @@ bool MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) { for (SmallSetVector::iterator DI = MaybeDeadCopies.begin(), DE = MaybeDeadCopies.end(); DI != DE; ++DI) { - if (!ReservedRegs.test((*DI)->getOperand(0).getReg())) { + if (!MRI->isReserved((*DI)->getOperand(0).getReg())) { (*DI)->eraseFromParent(); Changed = true; ++NumDeletes; @@ -311,7 +312,7 @@ bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) { bool Changed = false; TRI = MF.getTarget().getRegisterInfo(); - ReservedRegs = TRI->getReservedRegs(MF); + MRI = &MF.getRegInfo(); for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) Changed |= CopyPropagateBlock(*I); diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index d4aede8a..91d5211 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -28,7 +28,7 @@ #include "llvm/MC/MCContext.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Support/Debug.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetFrameLowering.h" @@ -59,13 +59,13 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM, RegInfo = 0; MFInfo = 0; FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering()); - if (Fn->hasFnAttr(Attribute::StackAlignment)) - FrameInfo->ensureMaxAlignment(Attribute::getStackAlignmentFromAttrs( - Fn->getAttributes().getFnAttributes())); - ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData()); + if (Fn->getFnAttributes().hasAttribute(Attributes::StackAlignment)) + FrameInfo->ensureMaxAlignment(Fn->getAttributes(). + getFnAttributes().getStackAlignment()); + ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout()); Alignment = TM.getTargetLowering()->getMinFunctionAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn. - if (!Fn->hasFnAttr(Attribute::OptimizeForSize)) + if (!Fn->getFnAttributes().hasAttribute(Attributes::OptimizeForSize)) Alignment = std::max(Alignment, TM.getTargetLowering()->getPrefFunctionAlignment()); FunctionNumber = FunctionNum; @@ -284,12 +284,19 @@ MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin, return std::make_pair(Result, Result + Num); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MachineFunction::dump() const { print(dbgs()); } +#endif + +StringRef MachineFunction::getName() const { + assert(getFunction() && "No function!"); + return getFunction()->getName(); +} void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const { - OS << "# Machine code for function " << Fn->getName() << ": "; + OS << "# Machine code for function " << getName() << ": "; if (RegInfo) { OS << (RegInfo->isSSA() ? "SSA" : "Post SSA"); if (!RegInfo->tracksLiveness()) @@ -334,7 +341,7 @@ void MachineFunction::print(raw_ostream &OS, SlotIndexes *Indexes) const { BB->print(OS, Indexes); } - OS << "\n# End machine code for function " << Fn->getName() << ".\n\n"; + OS << "\n# End machine code for function " << getName() << ".\n\n"; } namespace llvm { @@ -344,7 +351,7 @@ namespace llvm { DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} static std::string getGraphName(const MachineFunction *F) { - return "CFG for '" + F->getFunction()->getName().str() + "' function"; + return "CFG for '" + F->getName().str() + "' function"; } std::string getNodeLabel(const MachineBasicBlock *Node, @@ -377,7 +384,7 @@ namespace llvm { void MachineFunction::viewCFG() const { #ifndef NDEBUG - ViewGraph(this, "mf" + getFunction()->getName()); + ViewGraph(this, "mf" + getName()); #else errs() << "MachineFunction::viewCFG is only available in debug builds on " << "systems with Graphviz or gv!\n"; @@ -387,7 +394,7 @@ void MachineFunction::viewCFG() const void MachineFunction::viewCFGOnly() const { #ifndef NDEBUG - ViewGraph(this, "mf" + getFunction()->getName(), true); + ViewGraph(this, "mf" + getName(), true); #else errs() << "MachineFunction::viewCFGOnly is only available in debug builds on " << "systems with Graphviz or gv!\n"; @@ -453,7 +460,9 @@ int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset, unsigned StackAlign = TFI.getStackAlignment(); unsigned Align = MinAlign(SPOffset, StackAlign); Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable, - /*isSS*/false, false)); + /*isSS*/ false, + /*NeedSP*/ false, + /*Alloca*/ 0)); return -++NumFixedObjects; } @@ -525,16 +534,18 @@ void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{ } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MachineFrameInfo::dump(const MachineFunction &MF) const { print(MF, dbgs()); } +#endif //===----------------------------------------------------------------------===// // MachineJumpTableInfo implementation //===----------------------------------------------------------------------===// /// getEntrySize - Return the size of each entry in the jump table. -unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const { +unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const { // The size of a jump table entry is 4 bytes unless the entry is just the // address of a block, in which case it is the pointer size. switch (getEntryKind()) { @@ -553,7 +564,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const { } /// getEntryAlignment - Return the alignment of each entry in the jump table. -unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const { +unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const { // The alignment of a jump table entry is the alignment of int32 unless the // entry is just the address of a block, in which case it is the pointer // alignment. @@ -622,7 +633,9 @@ void MachineJumpTableInfo::print(raw_ostream &OS) const { OS << '\n'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MachineJumpTableInfo::dump() const { print(dbgs()); } +#endif //===----------------------------------------------------------------------===// @@ -657,7 +670,7 @@ MachineConstantPool::~MachineConstantPool() { /// CanShareConstantPoolEntry - Test whether the given two constants /// can be allocated the same constant pool entry. static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, - const TargetData *TD) { + const DataLayout *TD) { // Handle the trivial case quickly. if (A == B) return true; @@ -681,7 +694,7 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, // Try constant folding a bitcast of both instructions to an integer. If we // get two identical ConstantInt's, then we are good to share them. We use // the constant folding APIs to do this so that we get the benefit of - // TargetData. + // DataLayout. if (isa(A->getType())) A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy, const_cast(A), TD); @@ -749,10 +762,12 @@ void MachineConstantPool::print(raw_ostream &OS) const { if (Constants[i].isMachineConstantPoolEntry()) Constants[i].Val.MachineCPVal->print(OS); else - OS << *(Value*)Constants[i].Val.ConstVal; + OS << *(const Value*)Constants[i].Val.ConstVal; OS << ", align=" << Constants[i].getAlignment(); OS << "\n"; } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MachineConstantPool::dump() const { print(dbgs()); } +#endif diff --git a/lib/CodeGen/MachineFunctionPrinterPass.cpp b/lib/CodeGen/MachineFunctionPrinterPass.cpp index 0102ac7..ed94efb 100644 --- a/lib/CodeGen/MachineFunctionPrinterPass.cpp +++ b/lib/CodeGen/MachineFunctionPrinterPass.cpp @@ -51,7 +51,7 @@ struct MachineFunctionPrinterPass : public MachineFunctionPass { char MachineFunctionPrinterPass::ID = 0; } -char &MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID; +char &llvm::MachineFunctionPrinterPassID = MachineFunctionPrinterPass::ID; INITIALIZE_PASS(MachineFunctionPrinterPass, "print-machineinstrs", "Machine Function Printer", false, false) diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp index b166849..ce8d520 100644 --- a/lib/CodeGen/MachineInstr.cpp +++ b/lib/CodeGen/MachineInstr.cpp @@ -111,6 +111,7 @@ void MachineOperand::setIsDef(bool Val) { /// the specified value. If an operand is known to be an immediate already, /// the setImm method should be used. void MachineOperand::ChangeToImmediate(int64_t ImmVal) { + assert((!isReg() || !isTied()) && "Cannot change a tied operand into an imm"); // If this operand is currently a register operand, and if this is in a // function, deregister the operand from the register's use/def list. if (isReg() && isOnRegUseList()) @@ -136,7 +137,8 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp, RegInfo = &MF->getRegInfo(); // If this operand is already a register operand, remove it from the // register's use/def lists. - if (RegInfo && isReg()) + bool WasReg = isReg(); + if (RegInfo && WasReg) RegInfo->removeRegOperandFromUseList(this); // Change this to a register and set the reg#. @@ -153,6 +155,9 @@ void MachineOperand::ChangeToRegister(unsigned Reg, bool isDef, bool isImp, IsDebug = isDebug; // Ensure isOnRegUseList() returns false. Contents.Reg.Prev = 0; + // Preserve the tie when the operand was already a register. + if (!WasReg) + TiedTo = 0; // If this operand is embedded in a function, add the operand to the // register's use/def list. @@ -193,7 +198,8 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { return !strcmp(getSymbolName(), Other.getSymbolName()) && getOffset() == Other.getOffset(); case MachineOperand::MO_BlockAddress: - return getBlockAddress() == Other.getBlockAddress(); + return getBlockAddress() == Other.getBlockAddress() && + getOffset() == Other.getOffset(); case MO_RegisterMask: return getRegMask() == Other.getRegMask(); case MachineOperand::MO_MCSymbol: @@ -208,8 +214,8 @@ bool MachineOperand::isIdenticalTo(const MachineOperand &Other) const { hash_code llvm::hash_value(const MachineOperand &MO) { switch (MO.getType()) { case MachineOperand::MO_Register: - return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getReg(), - MO.getSubReg(), MO.isDef()); + // Register operands don't have target flags. + return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef()); case MachineOperand::MO_Immediate: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm()); case MachineOperand::MO_CImmediate: @@ -234,7 +240,7 @@ hash_code llvm::hash_value(const MachineOperand &MO) { MO.getOffset()); case MachineOperand::MO_BlockAddress: return hash_combine(MO.getType(), MO.getTargetFlags(), - MO.getBlockAddress()); + MO.getBlockAddress(), MO.getOffset()); case MachineOperand::MO_RegisterMask: return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getRegMask()); case MachineOperand::MO_Metadata: @@ -262,7 +268,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const { OS << PrintReg(getReg(), TRI, getSubReg()); if (isDef() || isKill() || isDead() || isImplicit() || isUndef() || - isInternalRead() || isEarlyClobber()) { + isInternalRead() || isEarlyClobber() || isTied()) { OS << '<'; bool NeedComma = false; if (isDef()) { @@ -282,27 +288,32 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const { NeedComma = true; } - if (isKill() || isDead() || (isUndef() && isUse()) || isInternalRead()) { + if (isKill()) { if (NeedComma) OS << ','; - NeedComma = false; - if (isKill()) { - OS << "kill"; - NeedComma = true; - } - if (isDead()) { - OS << "dead"; - NeedComma = true; - } - if (isUndef() && isUse()) { - if (NeedComma) OS << ','; - OS << "undef"; - NeedComma = true; - } - if (isInternalRead()) { - if (NeedComma) OS << ','; - OS << "internal"; - NeedComma = true; - } + OS << "kill"; + NeedComma = true; + } + if (isDead()) { + if (NeedComma) OS << ','; + OS << "dead"; + NeedComma = true; + } + if (isUndef() && isUse()) { + if (NeedComma) OS << ','; + OS << "undef"; + NeedComma = true; + } + if (isInternalRead()) { + if (NeedComma) OS << ','; + OS << "internal"; + NeedComma = true; + } + if (isTied()) { + if (NeedComma) OS << ','; + OS << "tied"; + if (TiedTo != 15) + OS << unsigned(TiedTo - 1); + NeedComma = true; } OS << '>'; } @@ -352,6 +363,7 @@ void MachineOperand::print(raw_ostream &OS, const TargetMachine *TM) const { case MachineOperand::MO_BlockAddress: OS << '<'; WriteAsOperand(OS, getBlockAddress(), /*PrintType=*/false); + if (getOffset()) OS << "+" << getOffset(); OS << '>'; break; case MachineOperand::MO_RegisterMask: @@ -528,20 +540,6 @@ void MachineInstr::addImplicitDefUseOperands() { /// MachineInstr ctor - This constructor creates a MachineInstr and adds the /// implicit operands. It reserves space for the number of operands specified by /// the MCInstrDesc. -MachineInstr::MachineInstr(const MCInstrDesc &tid, bool NoImp) - : MCID(&tid), Flags(0), AsmPrinterFlags(0), - NumMemRefs(0), MemRefs(0), Parent(0) { - unsigned NumImplicitOps = 0; - if (!NoImp) - NumImplicitOps = MCID->getNumImplicitDefs() + MCID->getNumImplicitUses(); - Operands.reserve(NumImplicitOps + MCID->getNumOperands()); - if (!NoImp) - addImplicitDefUseOperands(); - // Make sure that we get added to a machine basicblock - LeakDetector::addGarbageObject(this); -} - -/// MachineInstr ctor - As above, but with a DebugLoc. MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl, bool NoImp) : MCID(&tid), Flags(0), AsmPrinterFlags(0), @@ -559,21 +557,6 @@ MachineInstr::MachineInstr(const MCInstrDesc &tid, const DebugLoc dl, /// MachineInstr ctor - Work exactly the same as the ctor two above, except /// that the MachineInstr is created and added to the end of the specified /// basic block. -MachineInstr::MachineInstr(MachineBasicBlock *MBB, const MCInstrDesc &tid) - : MCID(&tid), Flags(0), AsmPrinterFlags(0), - NumMemRefs(0), MemRefs(0), Parent(0) { - assert(MBB && "Cannot use inserting ctor with null basic block!"); - unsigned NumImplicitOps = - MCID->getNumImplicitDefs() + MCID->getNumImplicitUses(); - Operands.reserve(NumImplicitOps + MCID->getNumOperands()); - addImplicitDefUseOperands(); - // Make sure that we get added to a machine basicblock - LeakDetector::addGarbageObject(this); - MBB->push_back(this); // Add instruction to end of basic block! -} - -/// MachineInstr ctor - As above, but with a DebugLoc. -/// MachineInstr::MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl, const MCInstrDesc &tid) : MCID(&tid), Flags(0), AsmPrinterFlags(0), @@ -673,6 +656,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) { if (!isImpReg && !isInlineAsm()) { while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) { --OpNo; + assert(!Operands[OpNo].isTied() && "Cannot move tied operands"); if (RegInfo) RegInfo->removeRegOperandFromUseList(&Operands[OpNo]); } @@ -708,12 +692,25 @@ void MachineInstr::addOperand(const MachineOperand &Op) { if (Operands[OpNo].isReg()) { // Ensure isOnRegUseList() returns false, regardless of Op's status. Operands[OpNo].Contents.Reg.Prev = 0; + // Ignore existing ties. This is not a property that can be copied. + Operands[OpNo].TiedTo = 0; // Add the new operand to RegInfo. if (RegInfo) RegInfo->addRegOperandToUseList(&Operands[OpNo]); - // If the register operand is flagged as early, mark the operand as such. - if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1) - Operands[OpNo].setIsEarlyClobber(true); + // The MCID operand information isn't accurate until we start adding + // explicit operands. The implicit operands are added first, then the + // explicits are inserted before them. + if (!isImpReg) { + // Tie uses to defs as indicated in MCInstrDesc. + if (Operands[OpNo].isUse()) { + int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO); + if (DefIdx != -1) + tieOperands(DefIdx, OpNo); + } + // If the register operand is flagged as early, mark the operand as such. + if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1) + Operands[OpNo].setIsEarlyClobber(true); + } } // Re-add all the implicit ops. @@ -730,6 +727,7 @@ void MachineInstr::addOperand(const MachineOperand &Op) { /// void MachineInstr::RemoveOperand(unsigned OpNo) { assert(OpNo < Operands.size() && "Invalid operand number"); + untieRegOperand(OpNo); MachineRegisterInfo *RegInfo = getRegInfo(); // Special case removing the last one. @@ -752,6 +750,13 @@ void MachineInstr::RemoveOperand(unsigned OpNo) { } } +#ifndef NDEBUG + // Moving tied operands would break the ties. + for (unsigned i = OpNo + 1, e = Operands.size(); i != e; ++i) + if (Operands[i].isReg()) + assert(!Operands[i].isTied() && "Cannot move tied operands"); +#endif + Operands.erase(Operands.begin()+OpNo); if (RegInfo) { @@ -935,6 +940,12 @@ bool MachineInstr::isStackAligningInlineAsm() const { return false; } +InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const { + assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!"); + unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm(); + return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0); +} + int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo) const { assert(isInlineAsm() && "Expected an inline asm instruction"); @@ -1004,9 +1015,10 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx, unsigned MachineInstr::getBundleSize() const { assert(isBundle() && "Expecting a bundle"); - MachineBasicBlock::const_instr_iterator I = *this; + const MachineBasicBlock *MBB = getParent(); + MachineBasicBlock::const_instr_iterator I = *this, E = MBB->instr_end(); unsigned Size = 0; - while ((++I)->isInsideBundle()) { + while ((++I != E) && I->isInsideBundle()) { ++Size; } assert(Size > 1 && "Malformed bundle"); @@ -1114,107 +1126,99 @@ int MachineInstr::findFirstPredOperandIdx() const { return -1; } -/// isRegTiedToUseOperand - Given the index of a register def operand, -/// check if the register def is tied to a source operand, due to either -/// two-address elimination or inline assembly constraints. Returns the -/// first tied use operand index by reference is UseOpIdx is not null. -bool MachineInstr:: -isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx) const { - if (isInlineAsm()) { - assert(DefOpIdx > InlineAsm::MIOp_FirstOperand); - const MachineOperand &MO = getOperand(DefOpIdx); - if (!MO.isReg() || !MO.isDef() || MO.getReg() == 0) - return false; - // Determine the actual operand index that corresponds to this index. - unsigned DefNo = 0; - int FlagIdx = findInlineAsmFlagIdx(DefOpIdx, &DefNo); - if (FlagIdx < 0) - return false; +// MachineOperand::TiedTo is 4 bits wide. +const unsigned TiedMax = 15; - // Which part of the group is DefOpIdx? - unsigned DefPart = DefOpIdx - (FlagIdx + 1); - - for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); - i != e; ++i) { - const MachineOperand &FMO = getOperand(i); - if (!FMO.isImm()) - continue; - if (i+1 >= e || !getOperand(i+1).isReg() || !getOperand(i+1).isUse()) - continue; - unsigned Idx; - if (InlineAsm::isUseOperandTiedToDef(FMO.getImm(), Idx) && - Idx == DefNo) { - if (UseOpIdx) - *UseOpIdx = (unsigned)i + 1 + DefPart; - return true; - } - } - return false; +/// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other. +/// +/// Use and def operands can be tied together, indicated by a non-zero TiedTo +/// field. TiedTo can have these values: +/// +/// 0: Operand is not tied to anything. +/// 1 to TiedMax-1: Tied to getOperand(TiedTo-1). +/// TiedMax: Tied to an operand >= TiedMax-1. +/// +/// The tied def must be one of the first TiedMax operands on a normal +/// instruction. INLINEASM instructions allow more tied defs. +/// +void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) { + MachineOperand &DefMO = getOperand(DefIdx); + MachineOperand &UseMO = getOperand(UseIdx); + assert(DefMO.isDef() && "DefIdx must be a def operand"); + assert(UseMO.isUse() && "UseIdx must be a use operand"); + assert(!DefMO.isTied() && "Def is already tied to another use"); + assert(!UseMO.isTied() && "Use is already tied to another def"); + + if (DefIdx < TiedMax) + UseMO.TiedTo = DefIdx + 1; + else { + // Inline asm can use the group descriptors to find tied operands, but on + // normal instruction, the tied def must be within the first TiedMax + // operands. + assert(isInlineAsm() && "DefIdx out of range"); + UseMO.TiedTo = TiedMax; } - assert(getOperand(DefOpIdx).isDef() && "DefOpIdx is not a def!"); - const MCInstrDesc &MCID = getDesc(); - for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { - const MachineOperand &MO = getOperand(i); - if (MO.isReg() && MO.isUse() && - MCID.getOperandConstraint(i, MCOI::TIED_TO) == (int)DefOpIdx) { - if (UseOpIdx) - *UseOpIdx = (unsigned)i; - return true; - } - } - return false; + // UseIdx can be out of range, we'll search for it in findTiedOperandIdx(). + DefMO.TiedTo = std::min(UseIdx + 1, TiedMax); } -/// isRegTiedToDefOperand - Return true if the operand of the specified index -/// is a register use and it is tied to an def operand. It also returns the def -/// operand index by reference. -bool MachineInstr:: -isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx) const { - if (isInlineAsm()) { - const MachineOperand &MO = getOperand(UseOpIdx); - if (!MO.isReg() || !MO.isUse() || MO.getReg() == 0) - return false; +/// Given the index of a tied register operand, find the operand it is tied to. +/// Defs are tied to uses and vice versa. Returns the index of the tied operand +/// which must exist. +unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { + const MachineOperand &MO = getOperand(OpIdx); + assert(MO.isTied() && "Operand isn't tied"); - // Find the flag operand corresponding to UseOpIdx - int FlagIdx = findInlineAsmFlagIdx(UseOpIdx); - if (FlagIdx < 0) - return false; + // Normally TiedTo is in range. + if (MO.TiedTo < TiedMax) + return MO.TiedTo - 1; - const MachineOperand &UFMO = getOperand(FlagIdx); - unsigned DefNo; - if (InlineAsm::isUseOperandTiedToDef(UFMO.getImm(), DefNo)) { - if (!DefOpIdx) - return true; - - unsigned DefIdx = InlineAsm::MIOp_FirstOperand; - // Remember to adjust the index. First operand is asm string, second is - // the HasSideEffects and AlignStack bits, then there is a flag for each. - while (DefNo) { - const MachineOperand &FMO = getOperand(DefIdx); - assert(FMO.isImm()); - // Skip over this def. - DefIdx += InlineAsm::getNumOperandRegisters(FMO.getImm()) + 1; - --DefNo; - } - *DefOpIdx = DefIdx + UseOpIdx - FlagIdx; - return true; + // Uses on normal instructions can be out of range. + if (!isInlineAsm()) { + // Normal tied defs must be in the 0..TiedMax-1 range. + if (MO.isUse()) + return TiedMax - 1; + // MO is a def. Search for the tied use. + for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) { + const MachineOperand &UseMO = getOperand(i); + if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1) + return i; } - return false; + llvm_unreachable("Can't find tied use"); } - const MCInstrDesc &MCID = getDesc(); - if (UseOpIdx >= MCID.getNumOperands()) - return false; - const MachineOperand &MO = getOperand(UseOpIdx); - if (!MO.isReg() || !MO.isUse()) - return false; - int DefIdx = MCID.getOperandConstraint(UseOpIdx, MCOI::TIED_TO); - if (DefIdx == -1) - return false; - if (DefOpIdx) - *DefOpIdx = (unsigned)DefIdx; - return true; + // Now deal with inline asm by parsing the operand group descriptor flags. + // Find the beginning of each operand group. + SmallVector GroupIdx; + unsigned OpIdxGroup = ~0u; + unsigned NumOps; + for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e; + i += NumOps) { + const MachineOperand &FlagMO = getOperand(i); + assert(FlagMO.isImm() && "Invalid tied operand on inline asm"); + unsigned CurGroup = GroupIdx.size(); + GroupIdx.push_back(i); + NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); + // OpIdx belongs to this operand group. + if (OpIdx > i && OpIdx < i + NumOps) + OpIdxGroup = CurGroup; + unsigned TiedGroup; + if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup)) + continue; + // Operands in this group are tied to operands in TiedGroup which must be + // earlier. Find the number of operands between the two groups. + unsigned Delta = i - GroupIdx[TiedGroup]; + + // OpIdx is a use tied to TiedGroup. + if (OpIdxGroup == CurGroup) + return OpIdx - Delta; + + // OpIdx is a def tied to this use group. + if (OpIdxGroup == TiedGroup) + return OpIdx + Delta; + } + llvm_unreachable("Invalid tied operand on inline asm"); } /// clearKillInfo - Clears kill flags on all operands. @@ -1292,7 +1296,12 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, AliasAnalysis *AA, bool &SawStore) const { // Ignore stuff that we obviously can't move. - if (mayStore() || isCall()) { + // + // Treat volatile loads as stores. This is not strictly necessary for + // volatiles, but it is required for atomic loads. It is not allowed to move + // a load across an atomic load with Ordering > Monotonic. + if (mayStore() || isCall() || + (mayLoad() && hasOrderedMemoryRef())) { SawStore = true; return false; } @@ -1308,8 +1317,8 @@ bool MachineInstr::isSafeToMove(const TargetInstrInfo *TII, // load. if (mayLoad() && !isInvariantLoad(AA)) // Otherwise, this is a real load. If there is a store between the load and - // end of block, or if the load is volatile, we can't move it. - return !SawStore && !hasVolatileMemoryRef(); + // end of block, we can't move it. + return !SawStore; return true; } @@ -1340,11 +1349,11 @@ bool MachineInstr::isSafeToReMat(const TargetInstrInfo *TII, return true; } -/// hasVolatileMemoryRef - Return true if this instruction may have a -/// volatile memory reference, or if the information describing the -/// memory reference is not available. Return false if it is known to -/// have no volatile memory references. -bool MachineInstr::hasVolatileMemoryRef() const { +/// hasOrderedMemoryRef - Return true if this instruction may have an ordered +/// or volatile memory reference, or if the information describing the memory +/// reference is not available. Return false if it is known to have no ordered +/// memory references. +bool MachineInstr::hasOrderedMemoryRef() const { // An instruction known never to access memory won't have a volatile access. if (!mayStore() && !mayLoad() && @@ -1357,9 +1366,9 @@ bool MachineInstr::hasVolatileMemoryRef() const { if (memoperands_empty()) return true; - // Check the memory reference information for volatile references. + // Check the memory reference information for ordered references. for (mmo_iterator I = memoperands_begin(), E = memoperands_end(); I != E; ++I) - if ((*I)->isVolatile()) + if (!(*I)->isUnordered()) return true; return false; @@ -1461,7 +1470,9 @@ void MachineInstr::copyImplicitOps(const MachineInstr *MI) { } void MachineInstr::dump() const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) dbgs() << " " << *this; +#endif } static void printDebugLoc(DebugLoc DL, const MachineFunction *MF, @@ -1540,6 +1551,10 @@ void MachineInstr::print(raw_ostream &OS, const TargetMachine *TM) const { OS << " [sideeffect]"; if (ExtraInfo & InlineAsm::Extra_IsAlignStack) OS << " [alignstack]"; + if (getInlineAsmDialect() == InlineAsm::AD_ATT) + OS << " [attdialect]"; + if (getInlineAsmDialect() == InlineAsm::AD_Intel) + OS << " [inteldialect]"; StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand; FirstOp = false; diff --git a/lib/CodeGen/MachineInstrBundle.cpp b/lib/CodeGen/MachineInstrBundle.cpp index b7de7bf..1f7fbfc 100644 --- a/lib/CodeGen/MachineInstrBundle.cpp +++ b/lib/CodeGen/MachineInstrBundle.cpp @@ -109,10 +109,10 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB, MachineInstrBuilder MIB = BuildMI(MBB, FirstMI, FirstMI->getDebugLoc(), TII->get(TargetOpcode::BUNDLE)); - SmallVector LocalDefs; - SmallSet LocalDefSet; + SmallVector LocalDefs; + SmallSet LocalDefSet; SmallSet DeadDefSet; - SmallSet KilledDefSet; + SmallSet KilledDefSet; SmallVector ExternUses; SmallSet ExternUseSet; SmallSet KilledUseSet; @@ -181,7 +181,7 @@ void llvm::finalizeBundle(MachineBasicBlock &MBB, Defs.clear(); } - SmallSet Added; + SmallSet Added; for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) { unsigned Reg = LocalDefs[i]; if (Added.insert(Reg)) { @@ -248,10 +248,10 @@ bool llvm::finalizeBundles(MachineFunction &MF) { // MachineOperand iterator //===----------------------------------------------------------------------===// -MachineOperandIteratorBase::RegInfo +MachineOperandIteratorBase::VirtRegInfo MachineOperandIteratorBase::analyzeVirtReg(unsigned Reg, SmallVectorImpl > *Ops) { - RegInfo RI = { false, false, false }; + VirtRegInfo RI = { false, false, false }; for(; isValid(); ++*this) { MachineOperand &MO = deref(); if (!MO.isReg() || MO.getReg() != Reg) @@ -276,3 +276,53 @@ MachineOperandIteratorBase::analyzeVirtReg(unsigned Reg, } return RI; } + +MachineOperandIteratorBase::PhysRegInfo +MachineOperandIteratorBase::analyzePhysReg(unsigned Reg, + const TargetRegisterInfo *TRI) { + bool AllDefsDead = true; + PhysRegInfo PRI = {false, false, false, false, false, false, false}; + + assert(TargetRegisterInfo::isPhysicalRegister(Reg) && + "analyzePhysReg not given a physical register!"); + for (; isValid(); ++*this) { + MachineOperand &MO = deref(); + + if (MO.isRegMask() && MO.clobbersPhysReg(Reg)) + PRI.Clobbers = true; // Regmask clobbers Reg. + + if (!MO.isReg()) + continue; + + unsigned MOReg = MO.getReg(); + if (!MOReg || !TargetRegisterInfo::isPhysicalRegister(MOReg)) + continue; + + bool IsRegOrSuperReg = MOReg == Reg || TRI->isSubRegister(MOReg, Reg); + bool IsRegOrOverlapping = MOReg == Reg || TRI->regsOverlap(MOReg, Reg); + + if (IsRegOrSuperReg && MO.readsReg()) { + // Reg or a super-reg is read, and perhaps killed also. + PRI.Reads = true; + PRI.Kills = MO.isKill(); + } if (IsRegOrOverlapping && MO.readsReg()) { + PRI.ReadsOverlap = true;// Reg or an overlapping register is read. + } + + if (!MO.isDef()) + continue; + + if (IsRegOrSuperReg) { + PRI.Defines = true; // Reg or a super-register is defined. + if (!MO.isDead()) + AllDefsDead = false; + } + if (IsRegOrOverlapping) + PRI.Clobbers = true; // Reg or an overlapping reg is defined. + } + + if (AllDefsDead && PRI.Defines) + PRI.DefinesDead = true; // Reg or super-register was defined and was dead. + + return PRI; +} diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index efec481..169443e 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -334,7 +334,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: "); else DEBUG(dbgs() << "******** Post-regalloc Machine LICM: "); - DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n"); + DEBUG(dbgs() << MF.getName() << " ********\n"); if (PreRegAlloc) { // Estimate register pressure during pre-regalloc pass. diff --git a/lib/CodeGen/MachineLoopInfo.cpp b/lib/CodeGen/MachineLoopInfo.cpp index 9f3829e..27afeec 100644 --- a/lib/CodeGen/MachineLoopInfo.cpp +++ b/lib/CodeGen/MachineLoopInfo.cpp @@ -74,6 +74,8 @@ MachineBasicBlock *MachineLoop::getBottomBlock() { return BotMBB; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MachineLoop::dump() const { print(dbgs()); } +#endif diff --git a/lib/CodeGen/MachineModuleInfo.cpp b/lib/CodeGen/MachineModuleInfo.cpp index ea98b23..005bf78 100644 --- a/lib/CodeGen/MachineModuleInfo.cpp +++ b/lib/CodeGen/MachineModuleInfo.cpp @@ -25,7 +25,7 @@ using namespace llvm; using namespace llvm::dwarf; -// Handle the Pass registration stuff necessary to use TargetData's. +// Handle the Pass registration stuff necessary to use DataLayout's. INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo", "Machine Module Information", false, false) char MachineModuleInfo::ID = 0; diff --git a/lib/CodeGen/MachineModuleInfoImpls.cpp b/lib/CodeGen/MachineModuleInfoImpls.cpp index 5ab56c0..a1c7e9f 100644 --- a/lib/CodeGen/MachineModuleInfoImpls.cpp +++ b/lib/CodeGen/MachineModuleInfoImpls.cpp @@ -21,8 +21,8 @@ using namespace llvm; //===----------------------------------------------------------------------===// // Out of line virtual method. -void MachineModuleInfoMachO::Anchor() {} -void MachineModuleInfoELF::Anchor() {} +void MachineModuleInfoMachO::anchor() {} +void MachineModuleInfoELF::anchor() {} static int SortSymbolPair(const void *LHS, const void *RHS) { typedef std::pair PairTy; diff --git a/lib/CodeGen/MachinePostDominators.cpp b/lib/CodeGen/MachinePostDominators.cpp new file mode 100644 index 0000000..c3f6e92 --- /dev/null +++ b/lib/CodeGen/MachinePostDominators.cpp @@ -0,0 +1,55 @@ +//===- MachinePostDominators.cpp -Machine Post Dominator Calculation ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements simple dominator construction algorithms for finding +// post dominators on machine functions. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/MachinePostDominators.h" + +using namespace llvm; + +char MachinePostDominatorTree::ID = 0; + +//declare initializeMachinePostDominatorTreePass +INITIALIZE_PASS(MachinePostDominatorTree, "machinepostdomtree", + "MachinePostDominator Tree Construction", true, true) + +MachinePostDominatorTree::MachinePostDominatorTree() : MachineFunctionPass(ID) { + initializeMachinePostDominatorTreePass(*PassRegistry::getPassRegistry()); + DT = new DominatorTreeBase(true); //true indicate + // postdominator +} + +FunctionPass * +MachinePostDominatorTree::createMachinePostDominatorTreePass() { + return new MachinePostDominatorTree(); +} + +bool +MachinePostDominatorTree::runOnMachineFunction(MachineFunction &F) { + DT->recalculate(F); + return false; +} + +MachinePostDominatorTree::~MachinePostDominatorTree() { + delete DT; +} + +void +MachinePostDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +void +MachinePostDominatorTree::print(llvm::raw_ostream &OS, const Module *M) const { + DT->print(OS); +} diff --git a/lib/CodeGen/MachineRegisterInfo.cpp b/lib/CodeGen/MachineRegisterInfo.cpp index 5fb938f..95d7a7d 100644 --- a/lib/CodeGen/MachineRegisterInfo.cpp +++ b/lib/CodeGen/MachineRegisterInfo.cpp @@ -21,7 +21,7 @@ MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) : TRI(&TRI), IsSSA(true), TracksLiveness(true) { VRegInfo.reserve(256); RegAllocHints.reserve(256); - UsedPhysRegs.resize(TRI.getNumRegs()); + UsedRegUnits.resize(TRI.getNumRegUnits()); UsedPhysRegMask.resize(TRI.getNumRegs()); // Create the physreg use/def lists. @@ -32,7 +32,7 @@ MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) MachineRegisterInfo::~MachineRegisterInfo() { #ifndef NDEBUG clearVirtRegs(); - for (unsigned i = 0, e = UsedPhysRegs.size(); i != e; ++i) + for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) assert(!PhysRegUseDefLists[i] && "PhysRegUseDefLists has entries after all instructions are deleted"); #endif @@ -306,22 +306,18 @@ void MachineRegisterInfo::dumpUses(unsigned Reg) const { void MachineRegisterInfo::freezeReservedRegs(const MachineFunction &MF) { ReservedRegs = TRI->getReservedRegs(MF); + assert(ReservedRegs.size() == TRI->getNumRegs() && + "Invalid ReservedRegs vector from target"); } bool MachineRegisterInfo::isConstantPhysReg(unsigned PhysReg, const MachineFunction &MF) const { assert(TargetRegisterInfo::isPhysicalRegister(PhysReg)); - // Check if any overlapping register is modified. + // Check if any overlapping register is modified, or allocatable so it may be + // used later. for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) - if (!def_empty(*AI)) - return false; - - // Check if any overlapping register is allocatable so it may be used later. - if (AllocatableRegs.empty()) - AllocatableRegs = TRI->getAllocatableSet(MF); - for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) - if (AllocatableRegs.test(*AI)) + if (!def_empty(*AI) || isAllocatable(*AI)) return false; return true; } diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index a1dc948..a4817d0 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -18,11 +18,8 @@ #include "llvm/CodeGen/MachineScheduler.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/RegisterClassInfo.h" -#include "llvm/CodeGen/RegisterPressure.h" -#include "llvm/CodeGen/ScheduleDAGInstrs.h" +#include "llvm/CodeGen/ScheduleDAGILP.h" #include "llvm/CodeGen/ScheduleHazardRecognizer.h" -#include "llvm/Target/TargetInstrInfo.h" -#include "llvm/MC/MCInstrItineraries.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -35,10 +32,12 @@ using namespace llvm; -static cl::opt ForceTopDown("misched-topdown", cl::Hidden, - cl::desc("Force top-down list scheduling")); -static cl::opt ForceBottomUp("misched-bottomup", cl::Hidden, - cl::desc("Force bottom-up list scheduling")); +namespace llvm { +cl::opt ForceTopDown("misched-topdown", cl::Hidden, + cl::desc("Force top-down list scheduling")); +cl::opt ForceBottomUp("misched-bottomup", cl::Hidden, + cl::desc("Force bottom-up list scheduling")); +} #ifndef NDEBUG static cl::opt ViewMISchedDAGs("view-misched-dags", cl::Hidden, @@ -50,6 +49,15 @@ static cl::opt MISchedCutoff("misched-cutoff", cl::Hidden, static bool ViewMISchedDAGs = false; #endif // NDEBUG +// Threshold to very roughly model an out-of-order processor's instruction +// buffers. If the actual value of this threshold matters much in practice, then +// it can be specified by the machine model. For now, it's an experimental +// tuning knob to determine when and if it matters. +static cl::opt ILPWindow("ilp-window", cl::Hidden, + cl::desc("Allow expected latency to exceed the critical path by N cycles " + "before attempting to balance ILP"), + cl::init(10U)); + //===----------------------------------------------------------------------===// // Machine Instruction Scheduling Pass and Registry //===----------------------------------------------------------------------===// @@ -221,7 +229,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { // The Scheduler may insert instructions during either schedule() or // exitRegion(), even for empty regions. So the local iterators 'I' and // 'RegionEnd' are invalid across these calls. - unsigned RemainingCount = MBB->size(); + unsigned RemainingInstrs = MBB->size(); for(MachineBasicBlock::iterator RegionEnd = MBB->end(); RegionEnd != MBB->begin(); RegionEnd = Scheduler->begin()) { @@ -230,19 +238,19 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { || TII->isSchedulingBoundary(llvm::prior(RegionEnd), MBB, *MF)) { --RegionEnd; // Count the boundary instruction. - --RemainingCount; + --RemainingInstrs; } // The next region starts above the previous region. Look backward in the // instruction stream until we find the nearest boundary. MachineBasicBlock::iterator I = RegionEnd; - for(;I != MBB->begin(); --I, --RemainingCount) { + for(;I != MBB->begin(); --I, --RemainingInstrs) { if (TII->isSchedulingBoundary(llvm::prior(I), MBB, *MF)) break; } // Notify the scheduler of the region, even if we may skip scheduling // it. Perhaps it still needs to be bundled. - Scheduler->enterRegion(MBB, I, RegionEnd, RemainingCount); + Scheduler->enterRegion(MBB, I, RegionEnd, RemainingInstrs); // Skip empty scheduling regions (0 or 1 schedulable instructions). if (I == RegionEnd || I == llvm::prior(RegionEnd)) { @@ -252,11 +260,11 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { continue; } DEBUG(dbgs() << "********** MI Scheduling **********\n"); - DEBUG(dbgs() << MF->getFunction()->getName() + DEBUG(dbgs() << MF->getName() << ":BB#" << MBB->getNumber() << "\n From: " << *I << " To: "; if (RegionEnd != MBB->end()) dbgs() << *RegionEnd; else dbgs() << "End"; - dbgs() << " Remaining: " << RemainingCount << "\n"); + dbgs() << " Remaining: " << RemainingInstrs << "\n"); // Schedule a region: possibly reorder instructions. // This invalidates 'RegionEnd' and 'I'. @@ -269,7 +277,7 @@ bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) { // scheduler for the top of it's scheduled region. RegionEnd = Scheduler->begin(); } - assert(RemainingCount == 0 && "Instruction count mismatch!"); + assert(RemainingInstrs == 0 && "Instruction count mismatch!"); Scheduler->finishBlock(); } Scheduler->finalizeSchedule(); @@ -281,157 +289,20 @@ void MachineScheduler::print(raw_ostream &O, const Module* m) const { // unimplemented } -//===----------------------------------------------------------------------===// -// MachineSchedStrategy - Interface to a machine scheduling algorithm. -//===----------------------------------------------------------------------===// - -namespace { -class ScheduleDAGMI; - -/// MachineSchedStrategy - Interface used by ScheduleDAGMI to drive the selected -/// scheduling algorithm. -/// -/// If this works well and targets wish to reuse ScheduleDAGMI, we may expose it -/// in ScheduleDAGInstrs.h -class MachineSchedStrategy { -public: - virtual ~MachineSchedStrategy() {} - - /// Initialize the strategy after building the DAG for a new region. - virtual void initialize(ScheduleDAGMI *DAG) = 0; - - /// Pick the next node to schedule, or return NULL. Set IsTopNode to true to - /// schedule the node at the top of the unscheduled region. Otherwise it will - /// be scheduled at the bottom. - virtual SUnit *pickNode(bool &IsTopNode) = 0; - - /// Notify MachineSchedStrategy that ScheduleDAGMI has scheduled a node. - virtual void schedNode(SUnit *SU, bool IsTopNode) = 0; - - /// When all predecessor dependencies have been resolved, free this node for - /// top-down scheduling. - virtual void releaseTopNode(SUnit *SU) = 0; - /// When all successor dependencies have been resolved, free this node for - /// bottom-up scheduling. - virtual void releaseBottomNode(SUnit *SU) = 0; -}; -} // namespace +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void ReadyQueue::dump() { + dbgs() << Name << ": "; + for (unsigned i = 0, e = Queue.size(); i < e; ++i) + dbgs() << Queue[i]->NodeNum << " "; + dbgs() << "\n"; +} +#endif //===----------------------------------------------------------------------===// // ScheduleDAGMI - Base class for MachineInstr scheduling with LiveIntervals // preservation. //===----------------------------------------------------------------------===// -namespace { -/// ScheduleDAGMI is an implementation of ScheduleDAGInstrs that schedules -/// machine instructions while updating LiveIntervals. -class ScheduleDAGMI : public ScheduleDAGInstrs { - AliasAnalysis *AA; - RegisterClassInfo *RegClassInfo; - MachineSchedStrategy *SchedImpl; - - MachineBasicBlock::iterator LiveRegionEnd; - - /// Register pressure in this region computed by buildSchedGraph. - IntervalPressure RegPressure; - RegPressureTracker RPTracker; - - /// List of pressure sets that exceed the target's pressure limit before - /// scheduling, listed in increasing set ID order. Each pressure set is paired - /// with its max pressure in the currently scheduled regions. - std::vector RegionCriticalPSets; - - /// The top of the unscheduled zone. - MachineBasicBlock::iterator CurrentTop; - IntervalPressure TopPressure; - RegPressureTracker TopRPTracker; - - /// The bottom of the unscheduled zone. - MachineBasicBlock::iterator CurrentBottom; - IntervalPressure BotPressure; - RegPressureTracker BotRPTracker; - -#ifndef NDEBUG - /// The number of instructions scheduled so far. Used to cut off the - /// scheduler at the point determined by misched-cutoff. - unsigned NumInstrsScheduled; -#endif -public: - ScheduleDAGMI(MachineSchedContext *C, MachineSchedStrategy *S): - ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, /*IsPostRA=*/false, C->LIS), - AA(C->AA), RegClassInfo(C->RegClassInfo), SchedImpl(S), - RPTracker(RegPressure), CurrentTop(), TopRPTracker(TopPressure), - CurrentBottom(), BotRPTracker(BotPressure) { -#ifndef NDEBUG - NumInstrsScheduled = 0; -#endif - } - - ~ScheduleDAGMI() { - delete SchedImpl; - } - - MachineBasicBlock::iterator top() const { return CurrentTop; } - MachineBasicBlock::iterator bottom() const { return CurrentBottom; } - - /// Implement the ScheduleDAGInstrs interface for handling the next scheduling - /// region. This covers all instructions in a block, while schedule() may only - /// cover a subset. - void enterRegion(MachineBasicBlock *bb, - MachineBasicBlock::iterator begin, - MachineBasicBlock::iterator end, - unsigned endcount); - - /// Implement ScheduleDAGInstrs interface for scheduling a sequence of - /// reorderable instructions. - void schedule(); - - /// Get current register pressure for the top scheduled instructions. - const IntervalPressure &getTopPressure() const { return TopPressure; } - const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; } - - /// Get current register pressure for the bottom scheduled instructions. - const IntervalPressure &getBotPressure() const { return BotPressure; } - const RegPressureTracker &getBotRPTracker() const { return BotRPTracker; } - - /// Get register pressure for the entire scheduling region before scheduling. - const IntervalPressure &getRegPressure() const { return RegPressure; } - - const std::vector &getRegionCriticalPSets() const { - return RegionCriticalPSets; - } - - /// getIssueWidth - Return the max instructions per scheduling group. - unsigned getIssueWidth() const { - return (InstrItins && InstrItins->SchedModel) - ? InstrItins->SchedModel->IssueWidth : 1; - } - - /// getNumMicroOps - Return the number of issue slots required for this MI. - unsigned getNumMicroOps(MachineInstr *MI) const { - if (!InstrItins) return 1; - int UOps = InstrItins->getNumMicroOps(MI->getDesc().getSchedClass()); - return (UOps >= 0) ? UOps : TII->getNumMicroOps(InstrItins, MI); - } - -protected: - void initRegPressure(); - void updateScheduledPressure(std::vector NewMaxPressure); - - void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos); - bool checkSchedLimit(); - - void releaseRoots(); - - void releaseSucc(SUnit *SU, SDep *SuccEdge); - void releaseSuccessors(SUnit *SU); - void releasePred(SUnit *SU, SDep *PredEdge); - void releasePredecessors(SUnit *SU); - - void placeDebugValues(); -}; -} // namespace - /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When /// NumPredsLeft reaches zero, release the successor node. /// @@ -498,7 +369,7 @@ void ScheduleDAGMI::moveInstruction(MachineInstr *MI, BB->splice(InsertPos, BB, MI); // Update LiveIntervals - LIS->handleMove(MI); + LIS->handleMove(MI, /*UpdateFlags=*/true); // Recede RegionBegin if an instruction moves above the first. if (RegionBegin == InsertPos) @@ -565,6 +436,9 @@ void ScheduleDAGMI::initRegPressure() { std::vector RegionPressure = RPTracker.getPressure().MaxSetPressure; for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) { unsigned Limit = TRI->getRegPressureSetLimit(i); + DEBUG(dbgs() << TRI->getRegPressureSetName(i) + << "Limit " << Limit + << " Actual " << RegionPressure[i] << "\n"); if (RegionPressure[i] > Limit) RegionCriticalPSets.push_back(PressureElement(i, 0)); } @@ -587,6 +461,74 @@ updateScheduledPressure(std::vector NewMaxPressure) { } } +/// schedule - Called back from MachineScheduler::runOnMachineFunction +/// after setting up the current scheduling region. [RegionBegin, RegionEnd) +/// only includes instructions that have DAG nodes, not scheduling boundaries. +/// +/// This is a skeletal driver, with all the functionality pushed into helpers, +/// so that it can be easilly extended by experimental schedulers. Generally, +/// implementing MachineSchedStrategy should be sufficient to implement a new +/// scheduling algorithm. However, if a scheduler further subclasses +/// ScheduleDAGMI then it will want to override this virtual method in order to +/// update any specialized state. +void ScheduleDAGMI::schedule() { + buildDAGWithRegPressure(); + + postprocessDAG(); + + DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) + SUnits[su].dumpAll(this)); + + if (ViewMISchedDAGs) viewGraph(); + + initQueues(); + + bool IsTopNode = false; + while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { + assert(!SU->isScheduled && "Node already scheduled"); + if (!checkSchedLimit()) + break; + + scheduleMI(SU, IsTopNode); + + updateQueues(SU, IsTopNode); + } + assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); + + placeDebugValues(); + + DEBUG({ + unsigned BBNum = top()->getParent()->getNumber(); + dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n"; + dumpSchedule(); + dbgs() << '\n'; + }); +} + +/// Build the DAG and setup three register pressure trackers. +void ScheduleDAGMI::buildDAGWithRegPressure() { + // Initialize the register pressure tracker used by buildSchedGraph. + RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); + + // Account for liveness generate by the region boundary. + if (LiveRegionEnd != RegionEnd) + RPTracker.recede(); + + // Build the DAG, and compute current register pressure. + buildSchedGraph(AA, &RPTracker); + if (ViewMISchedDAGs) viewGraph(); + + // Initialize top/bottom trackers after computing region pressure. + initRegPressure(); +} + +/// Apply each ScheduleDAGMutation step in order. +void ScheduleDAGMI::postprocessDAG() { + for (unsigned i = 0, e = Mutations.size(); i < e; ++i) { + Mutations[i]->apply(this); + } +} + // Release all DAG roots for scheduling. void ScheduleDAGMI::releaseRoots() { SmallVector BotRoots; @@ -607,28 +549,10 @@ void ScheduleDAGMI::releaseRoots() { SchedImpl->releaseBottomNode(*I); } -/// schedule - Called back from MachineScheduler::runOnMachineFunction -/// after setting up the current scheduling region. [RegionBegin, RegionEnd) -/// only includes instructions that have DAG nodes, not scheduling boundaries. -void ScheduleDAGMI::schedule() { - // Initialize the register pressure tracker used by buildSchedGraph. - RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd); - - // Account for liveness generate by the region boundary. - if (LiveRegionEnd != RegionEnd) - RPTracker.recede(); - - // Build the DAG, and compute current register pressure. - buildSchedGraph(AA, &RPTracker); - - // Initialize top/bottom trackers after computing region pressure. - initRegPressure(); - - DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) - SUnits[su].dumpAll(this)); - - if (ViewMISchedDAGs) viewGraph(); +/// Identify DAG roots and setup scheduler queues. +void ScheduleDAGMI::initQueues() { + // Initialize the strategy before modifying the DAG. SchedImpl->initialize(this); // Release edges from the special Entry node or to the special Exit node. @@ -638,61 +562,64 @@ void ScheduleDAGMI::schedule() { // Release all DAG roots for scheduling. releaseRoots(); + SchedImpl->registerRoots(); + CurrentTop = nextIfDebug(RegionBegin, RegionEnd); CurrentBottom = RegionEnd; - bool IsTopNode = false; - while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { - if (!checkSchedLimit()) - break; - - // Move the instruction to its new location in the instruction stream. - MachineInstr *MI = SU->getInstr(); - - if (IsTopNode) { - assert(SU->isTopReady() && "node still has unscheduled dependencies"); - if (&*CurrentTop == MI) - CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); - else { - moveInstruction(MI, CurrentTop); - TopRPTracker.setPos(MI); - } +} - // Update top scheduled pressure. - TopRPTracker.advance(); - assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); - updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); +/// Move an instruction and update register pressure. +void ScheduleDAGMI::scheduleMI(SUnit *SU, bool IsTopNode) { + // Move the instruction to its new location in the instruction stream. + MachineInstr *MI = SU->getInstr(); - // Release dependent instructions for scheduling. - releaseSuccessors(SU); + if (IsTopNode) { + assert(SU->isTopReady() && "node still has unscheduled dependencies"); + if (&*CurrentTop == MI) + CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom); + else { + moveInstruction(MI, CurrentTop); + TopRPTracker.setPos(MI); } + + // Update top scheduled pressure. + TopRPTracker.advance(); + assert(TopRPTracker.getPos() == CurrentTop && "out of sync"); + updateScheduledPressure(TopRPTracker.getPressure().MaxSetPressure); + } + else { + assert(SU->isBottomReady() && "node still has unscheduled dependencies"); + MachineBasicBlock::iterator priorII = + priorNonDebug(CurrentBottom, CurrentTop); + if (&*priorII == MI) + CurrentBottom = priorII; else { - assert(SU->isBottomReady() && "node still has unscheduled dependencies"); - MachineBasicBlock::iterator priorII = - priorNonDebug(CurrentBottom, CurrentTop); - if (&*priorII == MI) - CurrentBottom = priorII; - else { - if (&*CurrentTop == MI) { - CurrentTop = nextIfDebug(++CurrentTop, priorII); - TopRPTracker.setPos(CurrentTop); - } - moveInstruction(MI, CurrentBottom); - CurrentBottom = MI; + if (&*CurrentTop == MI) { + CurrentTop = nextIfDebug(++CurrentTop, priorII); + TopRPTracker.setPos(CurrentTop); } - // Update bottom scheduled pressure. - BotRPTracker.recede(); - assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); - updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); - - // Release dependent instructions for scheduling. - releasePredecessors(SU); + moveInstruction(MI, CurrentBottom); + CurrentBottom = MI; } - SU->isScheduled = true; - SchedImpl->schedNode(SU, IsTopNode); + // Update bottom scheduled pressure. + BotRPTracker.recede(); + assert(BotRPTracker.getPos() == CurrentBottom && "out of sync"); + updateScheduledPressure(BotRPTracker.getPressure().MaxSetPressure); } - assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); +} - placeDebugValues(); +/// Update scheduler queues after scheduling an instruction. +void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) { + // Release dependent instructions for scheduling. + if (IsTopNode) + releaseSuccessors(SU); + else + releasePredecessors(SU); + + SU->isScheduled = true; + + // Notify the scheduling strategy after updating the DAG. + SchedImpl->schedNode(SU, IsTopNode); } /// Reinsert any remaining debug_values, just like the PostRA scheduler. @@ -716,91 +643,146 @@ void ScheduleDAGMI::placeDebugValues() { FirstDbgValue = NULL; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void ScheduleDAGMI::dumpSchedule() const { + for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) { + if (SUnit *SU = getSUnit(&(*MI))) + SU->dump(this); + else + dbgs() << "Missing SUnit\n"; + } +} +#endif + //===----------------------------------------------------------------------===// // ConvergingScheduler - Implementation of the standard MachineSchedStrategy. //===----------------------------------------------------------------------===// namespace { -/// ReadyQueue encapsulates vector of "ready" SUnits with basic convenience -/// methods for pushing and removing nodes. ReadyQueue's are uniquely identified -/// by an ID. SUnit::NodeQueueId is a mask of the ReadyQueues the SUnit is in. -class ReadyQueue { - unsigned ID; - std::string Name; - std::vector Queue; - +/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance +/// the schedule. +class ConvergingScheduler : public MachineSchedStrategy { public: - ReadyQueue(unsigned id, const Twine &name): ID(id), Name(name.str()) {} - - unsigned getID() const { return ID; } - - StringRef getName() const { return Name; } - - // SU is in this queue if it's NodeQueueID is a superset of this ID. - bool isInQueue(SUnit *SU) const { return (SU->NodeQueueId & ID); } - - bool empty() const { return Queue.empty(); } - - unsigned size() const { return Queue.size(); } - - typedef std::vector::iterator iterator; + /// Represent the type of SchedCandidate found within a single queue. + /// pickNodeBidirectional depends on these listed by decreasing priority. + enum CandReason { + NoCand, SingleExcess, SingleCritical, ResourceReduce, ResourceDemand, + BotHeightReduce, BotPathReduce, TopDepthReduce, TopPathReduce, + SingleMax, MultiPressure, NextDefUse, NodeOrder}; - iterator begin() { return Queue.begin(); } +#ifndef NDEBUG + static const char *getReasonStr(ConvergingScheduler::CandReason Reason); +#endif - iterator end() { return Queue.end(); } + /// Policy for scheduling the next instruction in the candidate's zone. + struct CandPolicy { + bool ReduceLatency; + unsigned ReduceResIdx; + unsigned DemandResIdx; - iterator find(SUnit *SU) { - return std::find(Queue.begin(), Queue.end(), SU); - } + CandPolicy(): ReduceLatency(false), ReduceResIdx(0), DemandResIdx(0) {} + }; - void push(SUnit *SU) { - Queue.push_back(SU); - SU->NodeQueueId |= ID; - } + /// Status of an instruction's critical resource consumption. + struct SchedResourceDelta { + // Count critical resources in the scheduled region required by SU. + unsigned CritResources; - void remove(iterator I) { - (*I)->NodeQueueId &= ~ID; - *I = Queue.back(); - Queue.pop_back(); - } + // Count critical resources from another region consumed by SU. + unsigned DemandedResources; - void dump() { - dbgs() << Name << ": "; - for (unsigned i = 0, e = Queue.size(); i < e; ++i) - dbgs() << Queue[i]->NodeNum << " "; - dbgs() << "\n"; - } -}; + SchedResourceDelta(): CritResources(0), DemandedResources(0) {} -/// ConvergingScheduler shrinks the unscheduled zone using heuristics to balance -/// the schedule. -class ConvergingScheduler : public MachineSchedStrategy { + bool operator==(const SchedResourceDelta &RHS) const { + return CritResources == RHS.CritResources + && DemandedResources == RHS.DemandedResources; + } + bool operator!=(const SchedResourceDelta &RHS) const { + return !operator==(RHS); + } + }; /// Store the state used by ConvergingScheduler heuristics, required for the /// lifetime of one invocation of pickNode(). struct SchedCandidate { + CandPolicy Policy; + // The best SUnit candidate. SUnit *SU; + // The reason for this candidate. + CandReason Reason; + // Register pressure values for the best candidate. RegPressureDelta RPDelta; - SchedCandidate(): SU(NULL) {} + // Critical resource consumption of the best candidate. + SchedResourceDelta ResDelta; + + SchedCandidate(const CandPolicy &policy) + : Policy(policy), SU(NULL), Reason(NoCand) {} + + bool isValid() const { return SU; } + + // Copy the status of another candidate without changing policy. + void setBest(SchedCandidate &Best) { + assert(Best.Reason != NoCand && "uninitialized Sched candidate"); + SU = Best.SU; + Reason = Best.Reason; + RPDelta = Best.RPDelta; + ResDelta = Best.ResDelta; + } + + void initResourceDelta(const ScheduleDAGMI *DAG, + const TargetSchedModel *SchedModel); + }; + + /// Summarize the unscheduled region. + struct SchedRemainder { + // Critical path through the DAG in expected latency. + unsigned CriticalPath; + + // Unscheduled resources + SmallVector RemainingCounts; + // Critical resource for the unscheduled zone. + unsigned CritResIdx; + // Number of micro-ops left to schedule. + unsigned RemainingMicroOps; + // Is the unscheduled zone resource limited. + bool IsResourceLimited; + + unsigned MaxRemainingCount; + + void reset() { + CriticalPath = 0; + RemainingCounts.clear(); + CritResIdx = 0; + RemainingMicroOps = 0; + IsResourceLimited = false; + MaxRemainingCount = 0; + } + + SchedRemainder() { reset(); } + + void init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel); }; - /// Represent the type of SchedCandidate found within a single queue. - enum CandResult { - NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure }; /// Each Scheduling boundary is associated with ready queues. It tracks the - /// current cycle in whichever direction at has moved, and maintains the state + /// current cycle in the direction of movement, and maintains the state /// of "hazards" and other interlocks at the current cycle. struct SchedBoundary { ScheduleDAGMI *DAG; + const TargetSchedModel *SchedModel; + SchedRemainder *Rem; ReadyQueue Available; ReadyQueue Pending; bool CheckPending; + // For heuristics, keep a list of the nodes that immediately depend on the + // most recently scheduled node. + SmallPtrSet NextSUs; + ScheduleHazardRecognizer *HazardRec; unsigned CurrCycle; @@ -809,29 +791,88 @@ class ConvergingScheduler : public MachineSchedStrategy { /// MinReadyCycle - Cycle of the soonest available instruction. unsigned MinReadyCycle; + // The expected latency of the critical path in this scheduled zone. + unsigned ExpectedLatency; + + // Resources used in the scheduled zone beyond this boundary. + SmallVector ResourceCounts; + + // Cache the critical resources ID in this scheduled zone. + unsigned CritResIdx; + + // Is the scheduled region resource limited vs. latency limited. + bool IsResourceLimited; + + unsigned ExpectedCount; + + // Policy flag: attempt to find ILP until expected latency is covered. + bool ShouldIncreaseILP; + +#ifndef NDEBUG // Remember the greatest min operand latency. unsigned MaxMinLatency; +#endif + + void reset() { + Available.clear(); + Pending.clear(); + CheckPending = false; + NextSUs.clear(); + HazardRec = 0; + CurrCycle = 0; + IssueCount = 0; + MinReadyCycle = UINT_MAX; + ExpectedLatency = 0; + ResourceCounts.resize(1); + assert(!ResourceCounts[0] && "nonzero count for bad resource"); + CritResIdx = 0; + IsResourceLimited = false; + ExpectedCount = 0; + ShouldIncreaseILP = false; +#ifndef NDEBUG + MaxMinLatency = 0; +#endif + // Reserve a zero-count for invalid CritResIdx. + ResourceCounts.resize(1); + } /// Pending queues extend the ready queues with the same ID and the /// PendingFlag set. SchedBoundary(unsigned ID, const Twine &Name): - DAG(0), Available(ID, Name+".A"), - Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P"), - CheckPending(false), HazardRec(0), CurrCycle(0), IssueCount(0), - MinReadyCycle(UINT_MAX), MaxMinLatency(0) {} + DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"), + Pending(ID << ConvergingScheduler::LogMaxQID, Name+".P") { + reset(); + } ~SchedBoundary() { delete HazardRec; } + void init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, + SchedRemainder *rem); + bool isTop() const { return Available.getID() == ConvergingScheduler::TopQID; } + unsigned getUnscheduledLatency(SUnit *SU) const { + if (isTop()) + return SU->getHeight(); + return SU->getDepth(); + } + + unsigned getCriticalCount() const { + return ResourceCounts[CritResIdx]; + } + bool checkHazard(SUnit *SU); + void checkILPPolicy(); + void releaseNode(SUnit *SU, unsigned ReadyCycle); void bumpCycle(); + void countResource(unsigned PIdx, unsigned Cycles); + void bumpNode(SUnit *SU); void releasePending(); @@ -841,10 +882,13 @@ class ConvergingScheduler : public MachineSchedStrategy { SUnit *pickOnlyChoice(); }; +private: ScheduleDAGMI *DAG; + const TargetSchedModel *SchedModel; const TargetRegisterInfo *TRI; // State of the top and bottom scheduled instruction boundaries. + SchedRemainder Rem; SchedBoundary Top; SchedBoundary Bot; @@ -857,7 +901,7 @@ public: }; ConvergingScheduler(): - DAG(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} + DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} virtual void initialize(ScheduleDAGMI *dag); @@ -869,28 +913,80 @@ public: virtual void releaseBottomNode(SUnit *SU); + virtual void registerRoots(); + protected: - SUnit *pickNodeBidrectional(bool &IsTopNode); + void balanceZones( + ConvergingScheduler::SchedBoundary &CriticalZone, + ConvergingScheduler::SchedCandidate &CriticalCand, + ConvergingScheduler::SchedBoundary &OppositeZone, + ConvergingScheduler::SchedCandidate &OppositeCand); + + void checkResourceLimits(ConvergingScheduler::SchedCandidate &TopCand, + ConvergingScheduler::SchedCandidate &BotCand); + + void tryCandidate(SchedCandidate &Cand, + SchedCandidate &TryCand, + SchedBoundary &Zone, + const RegPressureTracker &RPTracker, + RegPressureTracker &TempTracker); + + SUnit *pickNodeBidirectional(bool &IsTopNode); + + void pickNodeFromQueue(SchedBoundary &Zone, + const RegPressureTracker &RPTracker, + SchedCandidate &Candidate); - CandResult pickNodeFromQueue(ReadyQueue &Q, - const RegPressureTracker &RPTracker, - SchedCandidate &Candidate); #ifndef NDEBUG - void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, - PressureElement P = PressureElement()); + void traceCandidate(const SchedCandidate &Cand, const SchedBoundary &Zone); #endif }; } // namespace +void ConvergingScheduler::SchedRemainder:: +init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) { + reset(); + if (!SchedModel->hasInstrSchedModel()) + return; + RemainingCounts.resize(SchedModel->getNumProcResourceKinds()); + for (std::vector::iterator + I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) { + const MCSchedClassDesc *SC = DAG->getSchedClass(&*I); + RemainingMicroOps += SchedModel->getNumMicroOps(I->getInstr(), SC); + for (TargetSchedModel::ProcResIter + PI = SchedModel->getWriteProcResBegin(SC), + PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { + unsigned PIdx = PI->ProcResourceIdx; + unsigned Factor = SchedModel->getResourceFactor(PIdx); + RemainingCounts[PIdx] += (Factor * PI->Cycles); + } + } +} + +void ConvergingScheduler::SchedBoundary:: +init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) { + reset(); + DAG = dag; + SchedModel = smodel; + Rem = rem; + if (SchedModel->hasInstrSchedModel()) + ResourceCounts.resize(SchedModel->getNumProcResourceKinds()); +} + void ConvergingScheduler::initialize(ScheduleDAGMI *dag) { DAG = dag; + SchedModel = DAG->getSchedModel(); TRI = DAG->TRI; - Top.DAG = dag; - Bot.DAG = dag; + Rem.init(DAG, SchedModel); + Top.init(DAG, SchedModel, &Rem); + Bot.init(DAG, SchedModel, &Rem); + + // Initialize resource counts. - // Initialize the HazardRecognizers. + // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or + // are disabled, then these HazardRecs will be disabled. + const InstrItineraryData *Itin = SchedModel->getInstrItineraries(); const TargetMachine &TM = DAG->MF.getTarget(); - const InstrItineraryData *Itin = TM.getInstrItineraryData(); Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); @@ -905,13 +1001,12 @@ void ConvergingScheduler::releaseTopNode(SUnit *SU) { for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) { unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; - unsigned Latency = - DAG->computeOperandLatency(I->getSUnit(), SU, *I, /*FindMin=*/true); + unsigned MinLatency = I->getMinLatency(); #ifndef NDEBUG - Top.MaxMinLatency = std::max(Latency, Top.MaxMinLatency); + Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); #endif - if (SU->TopReadyCycle < PredReadyCycle + Latency) - SU->TopReadyCycle = PredReadyCycle + Latency; + if (SU->TopReadyCycle < PredReadyCycle + MinLatency) + SU->TopReadyCycle = PredReadyCycle + MinLatency; } Top.releaseNode(SU, SU->TopReadyCycle); } @@ -925,17 +1020,27 @@ void ConvergingScheduler::releaseBottomNode(SUnit *SU) { for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; - unsigned Latency = - DAG->computeOperandLatency(SU, I->getSUnit(), *I, /*FindMin=*/true); + unsigned MinLatency = I->getMinLatency(); #ifndef NDEBUG - Bot.MaxMinLatency = std::max(Latency, Bot.MaxMinLatency); + Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); #endif - if (SU->BotReadyCycle < SuccReadyCycle + Latency) - SU->BotReadyCycle = SuccReadyCycle + Latency; + if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) + SU->BotReadyCycle = SuccReadyCycle + MinLatency; } Bot.releaseNode(SU, SU->BotReadyCycle); } +void ConvergingScheduler::registerRoots() { + Rem.CriticalPath = DAG->ExitSU.getDepth(); + // Some roots may not feed into ExitSU. Check all of them in case. + for (std::vector::const_iterator + I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) { + if ((*I)->getDepth() > Rem.CriticalPath) + Rem.CriticalPath = (*I)->getDepth(); + } + DEBUG(dbgs() << "Critical Path: " << Rem.CriticalPath << '\n'); +} + /// Does this SU have a hazard within the current instruction group. /// /// The scheduler supports two modes of hazard recognition. The first is the @@ -953,14 +1058,27 @@ bool ConvergingScheduler::SchedBoundary::checkHazard(SUnit *SU) { if (HazardRec->isEnabled()) return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; - if (IssueCount + DAG->getNumMicroOps(SU->getInstr()) > DAG->getIssueWidth()) + unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); + if ((IssueCount > 0) && (IssueCount + uops > SchedModel->getIssueWidth())) { + DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops=" + << SchedModel->getNumMicroOps(SU->getInstr()) << '\n'); return true; - + } return false; } +/// If expected latency is covered, disable ILP policy. +void ConvergingScheduler::SchedBoundary::checkILPPolicy() { + if (ShouldIncreaseILP + && (IsResourceLimited || ExpectedLatency <= CurrCycle)) { + ShouldIncreaseILP = false; + DEBUG(dbgs() << "Disable ILP: " << Available.getName() << '\n'); + } +} + void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) { + if (ReadyCycle < MinReadyCycle) MinReadyCycle = ReadyCycle; @@ -970,15 +1088,31 @@ void ConvergingScheduler::SchedBoundary::releaseNode(SUnit *SU, Pending.push(SU); else Available.push(SU); + + // Record this node as an immediate dependent of the scheduled node. + NextSUs.insert(SU); + + // If CriticalPath has been computed, then check if the unscheduled nodes + // exceed the ILP window. Before registerRoots, CriticalPath==0. + if (Rem->CriticalPath && (ExpectedLatency + getUnscheduledLatency(SU) + > Rem->CriticalPath + ILPWindow)) { + ShouldIncreaseILP = true; + DEBUG(dbgs() << "Increase ILP: " << Available.getName() << " " + << ExpectedLatency << " + " << getUnscheduledLatency(SU) << '\n'); + } } /// Move the boundary of scheduled code by one cycle. void ConvergingScheduler::SchedBoundary::bumpCycle() { - unsigned Width = DAG->getIssueWidth(); + unsigned Width = SchedModel->getIssueWidth(); IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; + unsigned NextCycle = CurrCycle + 1; assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); - unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle); + if (MinReadyCycle > NextCycle) { + IssueCount = 0; + NextCycle = MinReadyCycle; + } if (!HazardRec->isEnabled()) { // Bypass HazardRec virtual calls. @@ -994,11 +1128,39 @@ void ConvergingScheduler::SchedBoundary::bumpCycle() { } } CheckPending = true; + IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); - DEBUG(dbgs() << "*** " << Available.getName() << " cycle " + DEBUG(dbgs() << " *** " << Available.getName() << " cycle " << CurrCycle << '\n'); } +/// Add the given processor resource to this scheduled zone. +void ConvergingScheduler::SchedBoundary::countResource(unsigned PIdx, + unsigned Cycles) { + unsigned Factor = SchedModel->getResourceFactor(PIdx); + DEBUG(dbgs() << " " << SchedModel->getProcResource(PIdx)->Name + << " +(" << Cycles << "x" << Factor + << ") / " << SchedModel->getLatencyFactor() << '\n'); + + unsigned Count = Factor * Cycles; + ResourceCounts[PIdx] += Count; + assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted"); + Rem->RemainingCounts[PIdx] -= Count; + + // Reset MaxRemainingCount for sanity. + Rem->MaxRemainingCount = 0; + + // Check if this resource exceeds the current critical resource by a full + // cycle. If so, it becomes the critical resource. + if ((int)(ResourceCounts[PIdx] - ResourceCounts[CritResIdx]) + >= (int)SchedModel->getLatencyFactor()) { + CritResIdx = PIdx; + DEBUG(dbgs() << " *** Critical resource " + << SchedModel->getProcResource(PIdx)->Name << " x" + << ResourceCounts[PIdx] << '\n'); + } +} + /// Move the boundary of scheduled code by one SUnit. void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { // Update the reservation table. @@ -1010,11 +1172,38 @@ void ConvergingScheduler::SchedBoundary::bumpNode(SUnit *SU) { } HazardRec->EmitInstruction(SU); } + // Update resource counts and critical resource. + if (SchedModel->hasInstrSchedModel()) { + const MCSchedClassDesc *SC = DAG->getSchedClass(SU); + Rem->RemainingMicroOps -= SchedModel->getNumMicroOps(SU->getInstr(), SC); + for (TargetSchedModel::ProcResIter + PI = SchedModel->getWriteProcResBegin(SC), + PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { + countResource(PI->ProcResourceIdx, PI->Cycles); + } + } + if (isTop()) { + if (SU->getDepth() > ExpectedLatency) + ExpectedLatency = SU->getDepth(); + } + else { + if (SU->getHeight() > ExpectedLatency) + ExpectedLatency = SU->getHeight(); + } + + IsResourceLimited = getCriticalCount() > std::max(ExpectedLatency, CurrCycle); + // Check the instruction group dispatch limit. // TODO: Check if this SU must end a dispatch group. - IssueCount += DAG->getNumMicroOps(SU->getInstr()); - if (IssueCount >= DAG->getIssueWidth()) { - DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n'); + IssueCount += SchedModel->getNumMicroOps(SU->getInstr()); + + // checkHazard prevents scheduling multiple instructions per cycle that exceed + // issue width. However, we commonly reach the maximum. In this case + // opportunistically bump the cycle to avoid uselessly checking everything in + // the readyQ. Furthermore, a single instruction may produce more than one + // cycle's worth of micro-ops. + if (IssueCount >= SchedModel->getIssueWidth()) { + DEBUG(dbgs() << " *** Max instrs at cycle " << CurrCycle << '\n'); bumpCycle(); } } @@ -1045,6 +1234,7 @@ void ConvergingScheduler::SchedBoundary::releasePending() { Pending.remove(Pending.begin()+i); --i; --e; } + DEBUG(if (!Pending.empty()) Pending.dump()); CheckPending = false; } @@ -1059,12 +1249,23 @@ void ConvergingScheduler::SchedBoundary::removeReady(SUnit *SU) { } /// If this queue only has one ready candidate, return it. As a side effect, -/// advance the cycle until at least one node is ready. If multiple instructions -/// are ready, return NULL. +/// defer any nodes that now hit a hazard, and advance the cycle until at least +/// one node is ready. If multiple instructions are ready, return NULL. SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { if (CheckPending) releasePending(); + if (IssueCount > 0) { + // Defer any ready instrs that now have a hazard. + for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) { + if (checkHazard(*I)) { + Pending.push(*I); + I = Available.remove(I); + continue; + } + ++I; + } + } for (unsigned i = 0; Available.empty(); ++i) { assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && "permanent hazard"); (void)i; @@ -1076,18 +1277,262 @@ SUnit *ConvergingScheduler::SchedBoundary::pickOnlyChoice() { return NULL; } -#ifndef NDEBUG -void ConvergingScheduler::traceCandidate(const char *Label, const ReadyQueue &Q, - SUnit *SU, PressureElement P) { - dbgs() << Label << " " << Q.getName() << " "; - if (P.isValid()) - dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease - << " "; - else - dbgs() << " "; - SU->dump(DAG); +/// Record the candidate policy for opposite zones with different critical +/// resources. +/// +/// If the CriticalZone is latency limited, don't force a policy for the +/// candidates here. Instead, When releasing each candidate, releaseNode +/// compares the region's critical path to the candidate's height or depth and +/// the scheduled zone's expected latency then sets ShouldIncreaseILP. +void ConvergingScheduler::balanceZones( + ConvergingScheduler::SchedBoundary &CriticalZone, + ConvergingScheduler::SchedCandidate &CriticalCand, + ConvergingScheduler::SchedBoundary &OppositeZone, + ConvergingScheduler::SchedCandidate &OppositeCand) { + + if (!CriticalZone.IsResourceLimited) + return; + + SchedRemainder *Rem = CriticalZone.Rem; + + // If the critical zone is overconsuming a resource relative to the + // remainder, try to reduce it. + unsigned RemainingCritCount = + Rem->RemainingCounts[CriticalZone.CritResIdx]; + if ((int)(Rem->MaxRemainingCount - RemainingCritCount) + > (int)SchedModel->getLatencyFactor()) { + CriticalCand.Policy.ReduceResIdx = CriticalZone.CritResIdx; + DEBUG(dbgs() << "Balance " << CriticalZone.Available.getName() << " reduce " + << SchedModel->getProcResource(CriticalZone.CritResIdx)->Name + << '\n'); + } + // If the other zone is underconsuming a resource relative to the full zone, + // try to increase it. + unsigned OppositeCount = + OppositeZone.ResourceCounts[CriticalZone.CritResIdx]; + if ((int)(OppositeZone.ExpectedCount - OppositeCount) + > (int)SchedModel->getLatencyFactor()) { + OppositeCand.Policy.DemandResIdx = CriticalZone.CritResIdx; + DEBUG(dbgs() << "Balance " << OppositeZone.Available.getName() << " demand " + << SchedModel->getProcResource(OppositeZone.CritResIdx)->Name + << '\n'); + } +} + +/// Determine if the scheduled zones exceed resource limits or critical path and +/// set each candidate's ReduceHeight policy accordingly. +void ConvergingScheduler::checkResourceLimits( + ConvergingScheduler::SchedCandidate &TopCand, + ConvergingScheduler::SchedCandidate &BotCand) { + + Bot.checkILPPolicy(); + Top.checkILPPolicy(); + if (Bot.ShouldIncreaseILP) + BotCand.Policy.ReduceLatency = true; + if (Top.ShouldIncreaseILP) + TopCand.Policy.ReduceLatency = true; + + // Handle resource-limited regions. + if (Top.IsResourceLimited && Bot.IsResourceLimited + && Top.CritResIdx == Bot.CritResIdx) { + // If the scheduled critical resource in both zones is no longer the + // critical remaining resource, attempt to reduce resource height both ways. + if (Top.CritResIdx != Rem.CritResIdx) { + TopCand.Policy.ReduceResIdx = Top.CritResIdx; + BotCand.Policy.ReduceResIdx = Bot.CritResIdx; + DEBUG(dbgs() << "Reduce scheduled " + << SchedModel->getProcResource(Top.CritResIdx)->Name << '\n'); + } + return; + } + // Handle latency-limited regions. + if (!Top.IsResourceLimited && !Bot.IsResourceLimited) { + // If the total scheduled expected latency exceeds the region's critical + // path then reduce latency both ways. + // + // Just because a zone is not resource limited does not mean it is latency + // limited. Unbuffered resource, such as max micro-ops may cause CurrCycle + // to exceed expected latency. + if ((Top.ExpectedLatency + Bot.ExpectedLatency >= Rem.CriticalPath) + && (Rem.CriticalPath > Top.CurrCycle + Bot.CurrCycle)) { + TopCand.Policy.ReduceLatency = true; + BotCand.Policy.ReduceLatency = true; + DEBUG(dbgs() << "Reduce scheduled latency " << Top.ExpectedLatency + << " + " << Bot.ExpectedLatency << '\n'); + } + return; + } + // The critical resource is different in each zone, so request balancing. + + // Compute the cost of each zone. + Rem.MaxRemainingCount = std::max( + Rem.RemainingMicroOps * SchedModel->getMicroOpFactor(), + Rem.RemainingCounts[Rem.CritResIdx]); + Top.ExpectedCount = std::max(Top.ExpectedLatency, Top.CurrCycle); + Top.ExpectedCount = std::max( + Top.getCriticalCount(), + Top.ExpectedCount * SchedModel->getLatencyFactor()); + Bot.ExpectedCount = std::max(Bot.ExpectedLatency, Bot.CurrCycle); + Bot.ExpectedCount = std::max( + Bot.getCriticalCount(), + Bot.ExpectedCount * SchedModel->getLatencyFactor()); + + balanceZones(Top, TopCand, Bot, BotCand); + balanceZones(Bot, BotCand, Top, TopCand); +} + +void ConvergingScheduler::SchedCandidate:: +initResourceDelta(const ScheduleDAGMI *DAG, + const TargetSchedModel *SchedModel) { + if (!Policy.ReduceResIdx && !Policy.DemandResIdx) + return; + + const MCSchedClassDesc *SC = DAG->getSchedClass(SU); + for (TargetSchedModel::ProcResIter + PI = SchedModel->getWriteProcResBegin(SC), + PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) { + if (PI->ProcResourceIdx == Policy.ReduceResIdx) + ResDelta.CritResources += PI->Cycles; + if (PI->ProcResourceIdx == Policy.DemandResIdx) + ResDelta.DemandedResources += PI->Cycles; + } +} + +/// Return true if this heuristic determines order. +static bool tryLess(unsigned TryVal, unsigned CandVal, + ConvergingScheduler::SchedCandidate &TryCand, + ConvergingScheduler::SchedCandidate &Cand, + ConvergingScheduler::CandReason Reason) { + if (TryVal < CandVal) { + TryCand.Reason = Reason; + return true; + } + if (TryVal > CandVal) { + if (Cand.Reason > Reason) + Cand.Reason = Reason; + return true; + } + return false; +} +static bool tryGreater(unsigned TryVal, unsigned CandVal, + ConvergingScheduler::SchedCandidate &TryCand, + ConvergingScheduler::SchedCandidate &Cand, + ConvergingScheduler::CandReason Reason) { + if (TryVal > CandVal) { + TryCand.Reason = Reason; + return true; + } + if (TryVal < CandVal) { + if (Cand.Reason > Reason) + Cand.Reason = Reason; + return true; + } + return false; +} + +/// Apply a set of heursitics to a new candidate. Heuristics are currently +/// hierarchical. This may be more efficient than a graduated cost model because +/// we don't need to evaluate all aspects of the model for each node in the +/// queue. But it's really done to make the heuristics easier to debug and +/// statistically analyze. +/// +/// \param Cand provides the policy and current best candidate. +/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized. +/// \param Zone describes the scheduled zone that we are extending. +/// \param RPTracker describes reg pressure within the scheduled zone. +/// \param TempTracker is a scratch pressure tracker to reuse in queries. +void ConvergingScheduler::tryCandidate(SchedCandidate &Cand, + SchedCandidate &TryCand, + SchedBoundary &Zone, + const RegPressureTracker &RPTracker, + RegPressureTracker &TempTracker) { + + // Always initialize TryCand's RPDelta. + TempTracker.getMaxPressureDelta(TryCand.SU->getInstr(), TryCand.RPDelta, + DAG->getRegionCriticalPSets(), + DAG->getRegPressure().MaxSetPressure); + + // Initialize the candidate if needed. + if (!Cand.isValid()) { + TryCand.Reason = NodeOrder; + return; + } + // Avoid exceeding the target's limit. + if (tryLess(TryCand.RPDelta.Excess.UnitIncrease, + Cand.RPDelta.Excess.UnitIncrease, TryCand, Cand, SingleExcess)) + return; + if (Cand.Reason == SingleExcess) + Cand.Reason = MultiPressure; + + // Avoid increasing the max critical pressure in the scheduled region. + if (tryLess(TryCand.RPDelta.CriticalMax.UnitIncrease, + Cand.RPDelta.CriticalMax.UnitIncrease, + TryCand, Cand, SingleCritical)) + return; + if (Cand.Reason == SingleCritical) + Cand.Reason = MultiPressure; + + // Avoid critical resource consumption and balance the schedule. + TryCand.initResourceDelta(DAG, SchedModel); + if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources, + TryCand, Cand, ResourceReduce)) + return; + if (tryGreater(TryCand.ResDelta.DemandedResources, + Cand.ResDelta.DemandedResources, + TryCand, Cand, ResourceDemand)) + return; + + // Avoid serializing long latency dependence chains. + if (Cand.Policy.ReduceLatency) { + if (Zone.isTop()) { + if (Cand.SU->getDepth() * SchedModel->getLatencyFactor() + > Zone.ExpectedCount) { + if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(), + TryCand, Cand, TopDepthReduce)) + return; + } + if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(), + TryCand, Cand, TopPathReduce)) + return; + } + else { + if (Cand.SU->getHeight() * SchedModel->getLatencyFactor() + > Zone.ExpectedCount) { + if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(), + TryCand, Cand, BotHeightReduce)) + return; + } + if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(), + TryCand, Cand, BotPathReduce)) + return; + } + } + + // Avoid increasing the max pressure of the entire region. + if (tryLess(TryCand.RPDelta.CurrentMax.UnitIncrease, + Cand.RPDelta.CurrentMax.UnitIncrease, TryCand, Cand, SingleMax)) + return; + if (Cand.Reason == SingleMax) + Cand.Reason = MultiPressure; + + // Prefer immediate defs/users of the last scheduled instruction. This is a + // nice pressure avoidance strategy that also conserves the processor's + // register renaming resources and keeps the machine code readable. + if (Zone.NextSUs.count(TryCand.SU) && !Zone.NextSUs.count(Cand.SU)) { + TryCand.Reason = NextDefUse; + return; + } + if (!Zone.NextSUs.count(TryCand.SU) && Zone.NextSUs.count(Cand.SU)) { + if (Cand.Reason > NextDefUse) + Cand.Reason = NextDefUse; + return; + } + // Fall through to original instruction order. + if ((Zone.isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum) + || (!Zone.isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) { + TryCand.Reason = NodeOrder; + } } -#endif /// pickNodeFromQueue helper that returns true if the LHS reg pressure effect is /// more desirable than RHS from scheduling standpoint. @@ -1098,109 +1543,144 @@ static bool compareRPDelta(const RegPressureDelta &LHS, // have UnitIncrease==0, so are neutral. // Avoid increasing the max critical pressure in the scheduled region. - if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) + if (LHS.Excess.UnitIncrease != RHS.Excess.UnitIncrease) { + DEBUG(dbgs() << "RP excess top - bot: " + << (LHS.Excess.UnitIncrease - RHS.Excess.UnitIncrease) << '\n'); return LHS.Excess.UnitIncrease < RHS.Excess.UnitIncrease; - + } // Avoid increasing the max critical pressure in the scheduled region. - if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) + if (LHS.CriticalMax.UnitIncrease != RHS.CriticalMax.UnitIncrease) { + DEBUG(dbgs() << "RP critical top - bot: " + << (LHS.CriticalMax.UnitIncrease - RHS.CriticalMax.UnitIncrease) + << '\n'); return LHS.CriticalMax.UnitIncrease < RHS.CriticalMax.UnitIncrease; - + } // Avoid increasing the max pressure of the entire region. - if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) + if (LHS.CurrentMax.UnitIncrease != RHS.CurrentMax.UnitIncrease) { + DEBUG(dbgs() << "RP current top - bot: " + << (LHS.CurrentMax.UnitIncrease - RHS.CurrentMax.UnitIncrease) + << '\n'); return LHS.CurrentMax.UnitIncrease < RHS.CurrentMax.UnitIncrease; - + } return false; } +#ifndef NDEBUG +const char *ConvergingScheduler::getReasonStr( + ConvergingScheduler::CandReason Reason) { + switch (Reason) { + case NoCand: return "NOCAND "; + case SingleExcess: return "REG-EXCESS"; + case SingleCritical: return "REG-CRIT "; + case SingleMax: return "REG-MAX "; + case MultiPressure: return "REG-MULTI "; + case ResourceReduce: return "RES-REDUCE"; + case ResourceDemand: return "RES-DEMAND"; + case TopDepthReduce: return "TOP-DEPTH "; + case TopPathReduce: return "TOP-PATH "; + case BotHeightReduce:return "BOT-HEIGHT"; + case BotPathReduce: return "BOT-PATH "; + case NextDefUse: return "DEF-USE "; + case NodeOrder: return "ORDER "; + }; + llvm_unreachable("Unknown reason!"); +} + +void ConvergingScheduler::traceCandidate(const SchedCandidate &Cand, + const SchedBoundary &Zone) { + const char *Label = getReasonStr(Cand.Reason); + PressureElement P; + unsigned ResIdx = 0; + unsigned Latency = 0; + switch (Cand.Reason) { + default: + break; + case SingleExcess: + P = Cand.RPDelta.Excess; + break; + case SingleCritical: + P = Cand.RPDelta.CriticalMax; + break; + case SingleMax: + P = Cand.RPDelta.CurrentMax; + break; + case ResourceReduce: + ResIdx = Cand.Policy.ReduceResIdx; + break; + case ResourceDemand: + ResIdx = Cand.Policy.DemandResIdx; + break; + case TopDepthReduce: + Latency = Cand.SU->getDepth(); + break; + case TopPathReduce: + Latency = Cand.SU->getHeight(); + break; + case BotHeightReduce: + Latency = Cand.SU->getHeight(); + break; + case BotPathReduce: + Latency = Cand.SU->getDepth(); + break; + } + dbgs() << Label << " " << Zone.Available.getName() << " "; + if (P.isValid()) + dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease + << " "; + else + dbgs() << " "; + if (ResIdx) + dbgs() << SchedModel->getProcResource(ResIdx)->Name << " "; + else + dbgs() << " "; + if (Latency) + dbgs() << Latency << " cycles "; + else + dbgs() << " "; + Cand.SU->dump(DAG); +} +#endif + /// Pick the best candidate from the top queue. /// /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during /// DAG building. To adjust for the current scheduling location we need to /// maintain the number of vreg uses remaining to be top-scheduled. -ConvergingScheduler::CandResult ConvergingScheduler:: -pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, - SchedCandidate &Candidate) { +void ConvergingScheduler::pickNodeFromQueue(SchedBoundary &Zone, + const RegPressureTracker &RPTracker, + SchedCandidate &Cand) { + ReadyQueue &Q = Zone.Available; + DEBUG(Q.dump()); // getMaxPressureDelta temporarily modifies the tracker. RegPressureTracker &TempTracker = const_cast(RPTracker); - // BestSU remains NULL if no top candidates beat the best existing candidate. - CandResult FoundCandidate = NoCand; for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { - RegPressureDelta RPDelta; - TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, - DAG->getRegionCriticalPSets(), - DAG->getRegPressure().MaxSetPressure); - - // Initialize the candidate if needed. - if (!Candidate.SU) { - Candidate.SU = *I; - Candidate.RPDelta = RPDelta; - FoundCandidate = NodeOrder; - continue; - } - // Avoid exceeding the target's limit. - if (RPDelta.Excess.UnitIncrease < Candidate.RPDelta.Excess.UnitIncrease) { - DEBUG(traceCandidate("ECAND", Q, *I, RPDelta.Excess)); - Candidate.SU = *I; - Candidate.RPDelta = RPDelta; - FoundCandidate = SingleExcess; - continue; - } - if (RPDelta.Excess.UnitIncrease > Candidate.RPDelta.Excess.UnitIncrease) - continue; - if (FoundCandidate == SingleExcess) - FoundCandidate = MultiPressure; - - // Avoid increasing the max critical pressure in the scheduled region. - if (RPDelta.CriticalMax.UnitIncrease - < Candidate.RPDelta.CriticalMax.UnitIncrease) { - DEBUG(traceCandidate("PCAND", Q, *I, RPDelta.CriticalMax)); - Candidate.SU = *I; - Candidate.RPDelta = RPDelta; - FoundCandidate = SingleCritical; - continue; - } - if (RPDelta.CriticalMax.UnitIncrease - > Candidate.RPDelta.CriticalMax.UnitIncrease) - continue; - if (FoundCandidate == SingleCritical) - FoundCandidate = MultiPressure; - - // Avoid increasing the max pressure of the entire region. - if (RPDelta.CurrentMax.UnitIncrease - < Candidate.RPDelta.CurrentMax.UnitIncrease) { - DEBUG(traceCandidate("MCAND", Q, *I, RPDelta.CurrentMax)); - Candidate.SU = *I; - Candidate.RPDelta = RPDelta; - FoundCandidate = SingleMax; - continue; - } - if (RPDelta.CurrentMax.UnitIncrease - > Candidate.RPDelta.CurrentMax.UnitIncrease) - continue; - if (FoundCandidate == SingleMax) - FoundCandidate = MultiPressure; - - // Fall through to original instruction order. - // Only consider node order if Candidate was chosen from this Q. - if (FoundCandidate == NoCand) - continue; - if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum) - || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) { - DEBUG(traceCandidate("NCAND", Q, *I)); - Candidate.SU = *I; - Candidate.RPDelta = RPDelta; - FoundCandidate = NodeOrder; + SchedCandidate TryCand(Cand.Policy); + TryCand.SU = *I; + tryCandidate(Cand, TryCand, Zone, RPTracker, TempTracker); + if (TryCand.Reason != NoCand) { + // Initialize resource delta if needed in case future heuristics query it. + if (TryCand.ResDelta == SchedResourceDelta()) + TryCand.initResourceDelta(DAG, SchedModel); + Cand.setBest(TryCand); + DEBUG(traceCandidate(Cand, Zone)); } + TryCand.SU = *I; } - return FoundCandidate; +} + +static void tracePick(const ConvergingScheduler::SchedCandidate &Cand, + bool IsTop) { + DEBUG(dbgs() << "Pick " << (IsTop ? "top" : "bot") + << " SU(" << Cand.SU->NodeNum << ") " + << ConvergingScheduler::getReasonStr(Cand.Reason) << '\n'); } /// Pick the best candidate node from either the top or bottom queue. -SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { +SUnit *ConvergingScheduler::pickNodeBidirectional(bool &IsTopNode) { // Schedule as far as possible in the direction of no choice. This is most // efficient, but also provides the best heuristics for CriticalPSets. if (SUnit *SU = Bot.pickOnlyChoice()) { @@ -1211,11 +1691,14 @@ SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { IsTopNode = true; return SU; } - SchedCandidate BotCand; + CandPolicy NoPolicy; + SchedCandidate BotCand(NoPolicy); + SchedCandidate TopCand(NoPolicy); + checkResourceLimits(TopCand, BotCand); + // Prefer bottom scheduling when heuristics are silent. - CandResult BotResult = pickNodeFromQueue(Bot.Available, - DAG->getBotRPTracker(), BotCand); - assert(BotResult != NoCand && "failed to find the first candidate"); + pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); + assert(BotCand.Reason != NoCand && "failed to find the first candidate"); // If either Q has a single candidate that provides the least increase in // Excess pressure, we can immediately schedule from that Q. @@ -1224,37 +1707,41 @@ SUnit *ConvergingScheduler::pickNodeBidrectional(bool &IsTopNode) { // affects picking from either Q. If scheduling in one direction must // increase pressure for one of the excess PSets, then schedule in that // direction first to provide more freedom in the other direction. - if (BotResult == SingleExcess || BotResult == SingleCritical) { + if (BotCand.Reason == SingleExcess || BotCand.Reason == SingleCritical) { IsTopNode = false; + tracePick(BotCand, IsTopNode); return BotCand.SU; } // Check if the top Q has a better candidate. - SchedCandidate TopCand; - CandResult TopResult = pickNodeFromQueue(Top.Available, - DAG->getTopRPTracker(), TopCand); - assert(TopResult != NoCand && "failed to find the first candidate"); + pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); + assert(TopCand.Reason != NoCand && "failed to find the first candidate"); - if (TopResult == SingleExcess || TopResult == SingleCritical) { - IsTopNode = true; - return TopCand.SU; - } // If either Q has a single candidate that minimizes pressure above the // original region's pressure pick it. - if (BotResult == SingleMax) { + if (TopCand.Reason <= SingleMax || BotCand.Reason <= SingleMax) { + if (TopCand.Reason < BotCand.Reason) { + IsTopNode = true; + tracePick(TopCand, IsTopNode); + return TopCand.SU; + } IsTopNode = false; + tracePick(BotCand, IsTopNode); return BotCand.SU; } - if (TopResult == SingleMax) { + // Check for a salient pressure difference and pick the best from either side. + if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { IsTopNode = true; + tracePick(TopCand, IsTopNode); return TopCand.SU; } - // Check for a salient pressure difference and pick the best from either side. - if (compareRPDelta(TopCand.RPDelta, BotCand.RPDelta)) { + // Otherwise prefer the bottom candidate, in node order if all else failed. + if (TopCand.Reason < BotCand.Reason) { IsTopNode = true; + tracePick(TopCand, IsTopNode); return TopCand.SU; } - // Otherwise prefer the bottom candidate in node order. IsTopNode = false; + tracePick(BotCand, IsTopNode); return BotCand.SU; } @@ -1266,33 +1753,34 @@ SUnit *ConvergingScheduler::pickNode(bool &IsTopNode) { return NULL; } SUnit *SU; - if (ForceTopDown) { - SU = Top.pickOnlyChoice(); - if (!SU) { - SchedCandidate TopCand; - CandResult TopResult = - pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand); - assert(TopResult != NoCand && "failed to find the first candidate"); - (void)TopResult; - SU = TopCand.SU; + do { + if (ForceTopDown) { + SU = Top.pickOnlyChoice(); + if (!SU) { + CandPolicy NoPolicy; + SchedCandidate TopCand(NoPolicy); + pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand); + assert(TopCand.Reason != NoCand && "failed to find the first candidate"); + SU = TopCand.SU; + } + IsTopNode = true; } - IsTopNode = true; - } - else if (ForceBottomUp) { - SU = Bot.pickOnlyChoice(); - if (!SU) { - SchedCandidate BotCand; - CandResult BotResult = - pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand); - assert(BotResult != NoCand && "failed to find the first candidate"); - (void)BotResult; - SU = BotCand.SU; + else if (ForceBottomUp) { + SU = Bot.pickOnlyChoice(); + if (!SU) { + CandPolicy NoPolicy; + SchedCandidate BotCand(NoPolicy); + pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand); + assert(BotCand.Reason != NoCand && "failed to find the first candidate"); + SU = BotCand.SU; + } + IsTopNode = false; } - IsTopNode = false; - } - else { - SU = pickNodeBidrectional(IsTopNode); - } + else { + SU = pickNodeBidirectional(IsTopNode); + } + } while (SU->isScheduled); + if (SU->isTopReady()) Top.removeReady(SU); if (SU->isBottomReady()) @@ -1331,6 +1819,86 @@ ConvergingSchedRegistry("converge", "Standard converging scheduler.", createConvergingSched); //===----------------------------------------------------------------------===// +// ILP Scheduler. Currently for experimental analysis of heuristics. +//===----------------------------------------------------------------------===// + +namespace { +/// \brief Order nodes by the ILP metric. +struct ILPOrder { + ScheduleDAGILP *ILP; + bool MaximizeILP; + + ILPOrder(ScheduleDAGILP *ilp, bool MaxILP): ILP(ilp), MaximizeILP(MaxILP) {} + + /// \brief Apply a less-than relation on node priority. + bool operator()(const SUnit *A, const SUnit *B) const { + // Return true if A comes after B in the Q. + if (MaximizeILP) + return ILP->getILP(A) < ILP->getILP(B); + else + return ILP->getILP(A) > ILP->getILP(B); + } +}; + +/// \brief Schedule based on the ILP metric. +class ILPScheduler : public MachineSchedStrategy { + ScheduleDAGILP ILP; + ILPOrder Cmp; + + std::vector ReadyQ; +public: + ILPScheduler(bool MaximizeILP) + : ILP(/*BottomUp=*/true), Cmp(&ILP, MaximizeILP) {} + + virtual void initialize(ScheduleDAGMI *DAG) { + ReadyQ.clear(); + ILP.resize(DAG->SUnits.size()); + } + + virtual void registerRoots() { + for (std::vector::const_iterator + I = ReadyQ.begin(), E = ReadyQ.end(); I != E; ++I) { + ILP.computeILP(*I); + } + } + + /// Implement MachineSchedStrategy interface. + /// ----------------------------------------- + + virtual SUnit *pickNode(bool &IsTopNode) { + if (ReadyQ.empty()) return NULL; + pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); + SUnit *SU = ReadyQ.back(); + ReadyQ.pop_back(); + IsTopNode = false; + DEBUG(dbgs() << "*** Scheduling " << *SU->getInstr() + << " ILP: " << ILP.getILP(SU) << '\n'); + return SU; + } + + virtual void schedNode(SUnit *, bool) {} + + virtual void releaseTopNode(SUnit *) { /*only called for top roots*/ } + + virtual void releaseBottomNode(SUnit *SU) { + ReadyQ.push_back(SU); + std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp); + } +}; +} // namespace + +static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) { + return new ScheduleDAGMI(C, new ILPScheduler(true)); +} +static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) { + return new ScheduleDAGMI(C, new ILPScheduler(false)); +} +static MachineSchedRegistry ILPMaxRegistry( + "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler); +static MachineSchedRegistry ILPMinRegistry( + "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler); + +//===----------------------------------------------------------------------===// // Machine Instruction Shuffler for Correctness Testing //===----------------------------------------------------------------------===// diff --git a/lib/CodeGen/MachineSink.cpp b/lib/CodeGen/MachineSink.cpp index bc383cb..b117f8c 100644 --- a/lib/CodeGen/MachineSink.cpp +++ b/lib/CodeGen/MachineSink.cpp @@ -49,7 +49,6 @@ namespace { MachineDominatorTree *DT; // Machine dominator tree MachineLoopInfo *LI; AliasAnalysis *AA; - BitVector AllocatableSet; // Which physregs are allocatable? // Remember which edges have been considered for breaking. SmallSet, 8> @@ -229,7 +228,6 @@ bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { DT = &getAnalysis(); LI = &getAnalysis(); AA = &getAnalysis(); - AllocatableSet = TRI->getAllocatableSet(MF); bool EverMadeChange = false; diff --git a/lib/CodeGen/MachineTraceMetrics.cpp b/lib/CodeGen/MachineTraceMetrics.cpp index 1a3aa60..9686b04 100644 --- a/lib/CodeGen/MachineTraceMetrics.cpp +++ b/lib/CodeGen/MachineTraceMetrics.cpp @@ -14,9 +14,10 @@ #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/MC/MCInstrItineraries.h" +#include "llvm/MC/MCSubtargetInfo.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/ADT/PostOrderIterator.h" @@ -50,9 +51,11 @@ bool MachineTraceMetrics::runOnMachineFunction(MachineFunction &Func) { MF = &Func; TII = MF->getTarget().getInstrInfo(); TRI = MF->getTarget().getRegisterInfo(); - ItinData = MF->getTarget().getInstrItineraryData(); MRI = &MF->getRegInfo(); Loops = &getAnalysis(); + const TargetSubtargetInfo &ST = + MF->getTarget().getSubtarget(); + SchedModel.init(*ST.getSchedModel(), &ST, TII); BlockInfo.resize(MF->getNumBlockIDs()); return false; } @@ -674,7 +677,7 @@ computeCrossBlockCriticalPath(const TraceBlockInfo &TBI) { const MachineInstr *DefMI = MTM.MRI->getVRegDef(LIR.Reg); // Ignore dependencies outside the current trace. const TraceBlockInfo &DefTBI = BlockInfo[DefMI->getParent()->getNumber()]; - if (!DefTBI.hasValidDepth() || DefTBI.Head != TBI.Head) + if (!DefTBI.isEarlierInSameTrace(TBI)) continue; unsigned Len = LIR.Height + Cycles[DefMI].Depth; MaxLen = std::max(MaxLen, Len); @@ -737,16 +740,15 @@ computeInstrDepths(const MachineBasicBlock *MBB) { const TraceBlockInfo&DepTBI = BlockInfo[Dep.DefMI->getParent()->getNumber()]; // Ignore dependencies from outside the current trace. - if (!DepTBI.hasValidDepth() || DepTBI.Head != TBI.Head) + if (!DepTBI.isEarlierInSameTrace(TBI)) continue; assert(DepTBI.HasValidInstrDepths && "Inconsistent dependency"); unsigned DepCycle = Cycles.lookup(Dep.DefMI).Depth; // Add latency if DefMI is a real instruction. Transients get latency 0. if (!Dep.DefMI->isTransient()) - DepCycle += MTM.TII->computeOperandLatency(MTM.ItinData, - Dep.DefMI, Dep.DefOp, - UseMI, Dep.UseOp, - /* FindMin = */ false); + DepCycle += MTM.SchedModel + .computeOperandLatency(Dep.DefMI, Dep.DefOp, UseMI, Dep.UseOp, + /* FindMin = */ false); Cycle = std::max(Cycle, DepCycle); } // Remember the instruction depth. @@ -769,7 +771,7 @@ computeInstrDepths(const MachineBasicBlock *MBB) { // Height is the issue height computed from virtual register dependencies alone. static unsigned updatePhysDepsUpwards(const MachineInstr *MI, unsigned Height, SparseSet &RegUnits, - const InstrItineraryData *ItinData, + const TargetSchedModel &SchedModel, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) { SmallVector ReadOps; @@ -792,14 +794,10 @@ static unsigned updatePhysDepsUpwards(const MachineInstr *MI, unsigned Height, unsigned DepHeight = I->Cycle; if (!MI->isTransient()) { // We may not know the UseMI of this dependency, if it came from the - // live-in list. - if (I->MI) - DepHeight += TII->computeOperandLatency(ItinData, - MI, MO.getOperandNo(), - I->MI, I->Op); - else - // No UseMI. Just use the MI latency instead. - DepHeight += TII->getInstrLatency(ItinData, MI); + // live-in list. SchedModel can handle a NULL UseMI. + DepHeight += SchedModel + .computeOperandLatency(MI, MO.getOperandNo(), I->MI, I->Op, + /* FindMin = */ false); } Height = std::max(Height, DepHeight); // This regunit is dead above MI. @@ -832,12 +830,12 @@ typedef DenseMap MIHeightMap; static bool pushDepHeight(const DataDep &Dep, const MachineInstr *UseMI, unsigned UseHeight, MIHeightMap &Heights, - const InstrItineraryData *ItinData, + const TargetSchedModel &SchedModel, const TargetInstrInfo *TII) { // Adjust height by Dep.DefMI latency. if (!Dep.DefMI->isTransient()) - UseHeight += TII->computeOperandLatency(ItinData, Dep.DefMI, Dep.DefOp, - UseMI, Dep.UseOp); + UseHeight += SchedModel.computeOperandLatency(Dep.DefMI, Dep.DefOp, + UseMI, Dep.UseOp, false); // Update Heights[DefMI] to be the maximum height seen. MIHeightMap::iterator I; @@ -852,14 +850,14 @@ static bool pushDepHeight(const DataDep &Dep, return false; } -/// Assuming that DefMI was used by Trace.back(), add it to the live-in lists -/// of all the blocks in Trace. Stop when reaching the block that contains -/// DefMI. +/// Assuming that the virtual register defined by DefMI:DefOp was used by +/// Trace.back(), add it to the live-in lists of all the blocks in Trace. Stop +/// when reaching the block that contains DefMI. void MachineTraceMetrics::Ensemble:: -addLiveIns(const MachineInstr *DefMI, +addLiveIns(const MachineInstr *DefMI, unsigned DefOp, ArrayRef Trace) { assert(!Trace.empty() && "Trace should contain at least one block"); - unsigned Reg = DefMI->getOperand(0).getReg(); + unsigned Reg = DefMI->getOperand(DefOp).getReg(); assert(TargetRegisterInfo::isVirtualRegister(Reg)); const MachineBasicBlock *DefMBB = DefMI->getParent(); @@ -951,8 +949,8 @@ computeInstrHeights(const MachineBasicBlock *MBB) { unsigned Height = TBI.Succ ? Cycles.lookup(PHI).Height : 0; DEBUG(dbgs() << "pred\t" << Height << '\t' << *PHI); if (pushDepHeight(Deps.front(), PHI, Height, - Heights, MTM.ItinData, MTM.TII)) - addLiveIns(Deps.front().DefMI, Stack); + Heights, MTM.SchedModel, MTM.TII)) + addLiveIns(Deps.front().DefMI, Deps.front().DefOp, Stack); } } } @@ -980,12 +978,12 @@ computeInstrHeights(const MachineBasicBlock *MBB) { // There may also be regunit dependencies to include in the height. if (HasPhysRegs) Cycle = updatePhysDepsUpwards(MI, Cycle, RegUnits, - MTM.ItinData, MTM.TII, MTM.TRI); + MTM.SchedModel, MTM.TII, MTM.TRI); // Update the required height of any virtual registers read by MI. for (unsigned i = 0, e = Deps.size(); i != e; ++i) - if (pushDepHeight(Deps[i], MI, Cycle, Heights, MTM.ItinData, MTM.TII)) - addLiveIns(Deps[i].DefMI, Stack); + if (pushDepHeight(Deps[i], MI, Cycle, Heights, MTM.SchedModel, MTM.TII)) + addLiveIns(Deps[i].DefMI, Deps[i].DefOp, Stack); InstrCycles &MICycles = Cycles[MI]; MICycles.Height = Cycle; @@ -1054,10 +1052,8 @@ MachineTraceMetrics::Trace::getPHIDepth(const MachineInstr *PHI) const { unsigned DepCycle = getInstrCycles(Dep.DefMI).Depth; // Add latency if DefMI is a real instruction. Transients get latency 0. if (!Dep.DefMI->isTransient()) - DepCycle += TE.MTM.TII->computeOperandLatency(TE.MTM.ItinData, - Dep.DefMI, Dep.DefOp, - PHI, Dep.UseOp, - /* FindMin = */ false); + DepCycle += TE.MTM.SchedModel + .computeOperandLatency(Dep.DefMI, Dep.DefOp, PHI, Dep.UseOp, false); return DepCycle; } @@ -1068,9 +1064,8 @@ unsigned MachineTraceMetrics::Trace::getResourceDepth(bool Bottom) const { unsigned Instrs = TBI.InstrDepth; if (Bottom) Instrs += TE.MTM.BlockInfo[getBlockNum()].InstrCount; - if (const MCSchedModel *Model = TE.MTM.ItinData->SchedModel) - if (Model->IssueWidth != 0) - return Instrs / Model->IssueWidth; + if (unsigned IW = TE.MTM.SchedModel.getIssueWidth()) + Instrs /= IW; // Assume issue width 1 without a schedule model. return Instrs; } @@ -1080,9 +1075,8 @@ getResourceLength(ArrayRef Extrablocks) const { unsigned Instrs = TBI.InstrDepth + TBI.InstrHeight; for (unsigned i = 0, e = Extrablocks.size(); i != e; ++i) Instrs += TE.MTM.getResources(Extrablocks[i])->InstrCount; - if (const MCSchedModel *Model = TE.MTM.ItinData->SchedModel) - if (Model->IssueWidth != 0) - return Instrs / Model->IssueWidth; + if (unsigned IW = TE.MTM.SchedModel.getIssueWidth()) + Instrs /= IW; // Assume issue width 1 without a schedule model. return Instrs; } diff --git a/lib/CodeGen/MachineTraceMetrics.h b/lib/CodeGen/MachineTraceMetrics.h index c5b86f3..460730b 100644 --- a/lib/CodeGen/MachineTraceMetrics.h +++ b/lib/CodeGen/MachineTraceMetrics.h @@ -50,6 +50,7 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/TargetSchedule.h" namespace llvm { @@ -67,9 +68,9 @@ class MachineTraceMetrics : public MachineFunctionPass { const MachineFunction *MF; const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; - const InstrItineraryData *ItinData; const MachineRegisterInfo *MRI; const MachineLoopInfo *Loops; + TargetSchedModel SchedModel; public: class Ensemble; @@ -164,6 +165,14 @@ public: /// Invalidate height resources when a block below this one has changed. void invalidateHeight() { InstrHeight = ~0u; HasValidInstrHeights = false; } + /// Determine if this block belongs to the same trace as TBI and comes + /// before it in the trace. + /// Also returns true when TBI == this. + bool isEarlierInSameTrace(const TraceBlockInfo &TBI) const { + return hasValidDepth() && TBI.hasValidDepth() && + Head == TBI.Head && InstrDepth <= TBI.InstrDepth; + } + // Data-dependency-related information. Per-instruction depth and height // are computed from data dependencies in the current trace, using // itinerary data. @@ -270,7 +279,7 @@ public: unsigned computeCrossBlockCriticalPath(const TraceBlockInfo&); void computeInstrDepths(const MachineBasicBlock*); void computeInstrHeights(const MachineBasicBlock*); - void addLiveIns(const MachineInstr *DefMI, + void addLiveIns(const MachineInstr *DefMI, unsigned DefOp, ArrayRef Trace); protected: diff --git a/lib/CodeGen/MachineVerifier.cpp b/lib/CodeGen/MachineVerifier.cpp index f745b41..69a3ae8 100644 --- a/lib/CodeGen/MachineVerifier.cpp +++ b/lib/CodeGen/MachineVerifier.cpp @@ -23,8 +23,9 @@ // the verifier errors. //===----------------------------------------------------------------------===// +#include "llvm/BasicBlock.h" +#include "llvm/InlineAsm.h" #include "llvm/Instructions.h" -#include "llvm/Function.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/LiveVariables.h" #include "llvm/CodeGen/LiveStackAnalysis.h" @@ -73,11 +74,12 @@ namespace { typedef SmallVector RegMaskVector; typedef DenseSet RegSet; typedef DenseMap RegMap; + typedef SmallPtrSet BlockSet; const MachineInstr *FirstTerminator; + BlockSet FunctionBlocks; BitVector regsReserved; - BitVector regsAllocatable; RegSet regsLive; RegVector regsDefined, regsDead, regsKilled; RegMaskVector regMasks; @@ -117,6 +119,9 @@ namespace { // block. This set is disjoint from regsLiveOut. RegSet vregsRequired; + // Set versions of block's predecessor and successor lists. + BlockSet Preds, Succs; + BBInfo() : reachable(false) {} // Add register to vregsPassed if it belongs there. Return true if @@ -180,7 +185,7 @@ namespace { } bool isAllocatable(unsigned Reg) { - return Reg < regsAllocatable.size() && regsAllocatable.test(Reg); + return Reg < TRI->getNumRegs() && MRI->isAllocatable(Reg); } // Analysis information if available @@ -208,6 +213,8 @@ namespace { void report(const char *msg, const MachineBasicBlock *MBB, const LiveInterval &LI); + void verifyInlineAsm(const MachineInstr *MI); + void checkLiveness(const MachineOperand *MO, unsigned MONum); void markReachable(const MachineBasicBlock *MBB); void calcRegsPassed(); @@ -352,7 +359,7 @@ void MachineVerifier::report(const char *msg, const MachineFunction *MF) { MF->print(*OS, Indexes); } *OS << "*** Bad machine code: " << msg << " ***\n" - << "- function: " << MF->getFunction()->getName() << "\n"; + << "- function: " << MF->getName() << "\n"; } void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { @@ -360,7 +367,7 @@ void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) { report(msg, MBB->getParent()); *OS << "- basic block: BB#" << MBB->getNumber() << ' ' << MBB->getName() - << " (" << (void*)MBB << ')'; + << " (" << (const void*)MBB << ')'; if (Indexes) *OS << " [" << Indexes->getMBBStartIdx(MBB) << ';' << Indexes->getMBBEndIdx(MBB) << ')'; @@ -419,7 +426,7 @@ void MachineVerifier::markReachable(const MachineBasicBlock *MBB) { void MachineVerifier::visitMachineFunctionBefore() { lastIndex = SlotIndex(); - regsReserved = TRI->getReservedRegs(*MF); + regsReserved = MRI->getReservedRegs(); // A sub-register of a reserved register is also reserved for (int Reg = regsReserved.find_first(); Reg>=0; @@ -431,9 +438,23 @@ void MachineVerifier::visitMachineFunctionBefore() { } } - regsAllocatable = TRI->getAllocatableSet(*MF); - markReachable(&MF->front()); + + // Build a set of the basic blocks in the function. + FunctionBlocks.clear(); + for (MachineFunction::const_iterator + I = MF->begin(), E = MF->end(); I != E; ++I) { + FunctionBlocks.insert(I); + BBInfo &MInfo = MBBInfoMap[I]; + + MInfo.Preds.insert(I->pred_begin(), I->pred_end()); + if (MInfo.Preds.size() != I->pred_size()) + report("MBB has duplicate entries in its predecessor list.", I); + + MInfo.Succs.insert(I->succ_begin(), I->succ_end()); + if (MInfo.Succs.size() != I->succ_size()) + report("MBB has duplicate entries in its successor list.", I); + } } // Does iterator point to a and b as the first two elements? @@ -470,6 +491,25 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { E = MBB->succ_end(); I != E; ++I) { if ((*I)->isLandingPad()) LandingPadSuccs.insert(*I); + if (!FunctionBlocks.count(*I)) + report("MBB has successor that isn't part of the function.", MBB); + if (!MBBInfoMap[*I].Preds.count(MBB)) { + report("Inconsistent CFG", MBB); + *OS << "MBB is not in the predecessor list of the successor BB#" + << (*I)->getNumber() << ".\n"; + } + } + + // Check the predecessor list. + for (MachineBasicBlock::const_pred_iterator I = MBB->pred_begin(), + E = MBB->pred_end(); I != E; ++I) { + if (!FunctionBlocks.count(*I)) + report("MBB has predecessor that isn't part of the function.", MBB); + if (!MBBInfoMap[*I].Succs.count(MBB)) { + report("Inconsistent CFG", MBB); + *OS << "MBB is not in the successor list of the predecessor BB#" + << (*I)->getNumber() << ".\n"; + } } const MCAsmInfo *AsmInfo = TM->getMCAsmInfo(); @@ -540,7 +580,15 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { ++MBBI; if (MBBI == MF->end()) { report("MBB conditionally falls through out of function!", MBB); - } if (MBB->succ_size() != 2) { + } if (MBB->succ_size() == 1) { + // A conditional branch with only one successor is weird, but allowed. + if (&*MBBI != TBB) + report("MBB exits via conditional branch/fall-through but only has " + "one CFG successor!", MBB); + else if (TBB != *MBB->succ_begin()) + report("MBB exits via conditional branch/fall-through but the CFG " + "successor don't match the actual successor!", MBB); + } else if (MBB->succ_size() != 2) { report("MBB exits via conditional branch/fall-through but doesn't have " "exactly two CFG successors!", MBB); } else if (!matchPair(MBB->succ_begin(), TBB, MBBI)) { @@ -560,7 +608,15 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { } else if (TBB && FBB) { // Block conditionally branches somewhere, otherwise branches // somewhere else. - if (MBB->succ_size() != 2) { + if (MBB->succ_size() == 1) { + // A conditional branch with only one successor is weird, but allowed. + if (FBB != TBB) + report("MBB exits via conditional branch/branch through but only has " + "one CFG successor!", MBB); + else if (TBB != *MBB->succ_begin()) + report("MBB exits via conditional branch/branch through but the CFG " + "successor don't match the actual successor!", MBB); + } else if (MBB->succ_size() != 2) { report("MBB exits via conditional branch/branch but doesn't have " "exactly two CFG successors!", MBB); } else if (!matchPair(MBB->succ_begin(), TBB, FBB)) { @@ -639,6 +695,50 @@ void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) { } } +// The operands on an INLINEASM instruction must follow a template. +// Verify that the flag operands make sense. +void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { + // The first two operands on INLINEASM are the asm string and global flags. + if (MI->getNumOperands() < 2) { + report("Too few operands on inline asm", MI); + return; + } + if (!MI->getOperand(0).isSymbol()) + report("Asm string must be an external symbol", MI); + if (!MI->getOperand(1).isImm()) + report("Asm flags must be an immediate", MI); + // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2, + // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16. + if (!isUInt<5>(MI->getOperand(1).getImm())) + report("Unknown asm flags", &MI->getOperand(1), 1); + + assert(InlineAsm::MIOp_FirstOperand == 2 && "Asm format changed"); + + unsigned OpNo = InlineAsm::MIOp_FirstOperand; + unsigned NumOps; + for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) { + const MachineOperand &MO = MI->getOperand(OpNo); + // There may be implicit ops after the fixed operands. + if (!MO.isImm()) + break; + NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm()); + } + + if (OpNo > MI->getNumOperands()) + report("Missing operands in last group", MI); + + // An optional MDNode follows the groups. + if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata()) + ++OpNo; + + // All trailing operands must be implicit registers. + for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) { + const MachineOperand &MO = MI->getOperand(OpNo); + if (!MO.isReg() || !MO.isImplicit()) + report("Expected implicit register after groups", &MO, OpNo); + } +} + void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { const MCInstrDesc &MCID = MI->getDesc(); if (MI->getNumOperands() < MCID.getNumOperands()) { @@ -647,6 +747,10 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { << MI->getNumExplicitOperands() << " given.\n"; } + // Check the tied operands. + if (MI->isInlineAsm()) + verifyInlineAsm(MI); + // Check the MachineMemOperands for basic consistency. for (MachineInstr::mmo_iterator I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I) { @@ -702,6 +806,17 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { if (MO->isImplicit()) report("Explicit operand marked as implicit", MO, MONum); } + + int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO); + if (TiedTo != -1) { + if (!MO->isReg()) + report("Tied use must be a register", MO, MONum); + else if (!MO->isTied()) + report("Operand should be tied", MO, MONum); + else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum)) + report("Tied def doesn't match MCInstrDesc", MO, MONum); + } else if (MO->isReg() && MO->isTied()) + report("Explicit operand should not be tied", MO, MONum); } else { // ARM adds %reg0 operands to indicate predicates. We'll allow that. if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg()) @@ -716,6 +831,28 @@ MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) { if (MRI->tracksLiveness() && !MI->isDebugValue()) checkLiveness(MO, MONum); + // Verify the consistency of tied operands. + if (MO->isTied()) { + unsigned OtherIdx = MI->findTiedOperandIdx(MONum); + const MachineOperand &OtherMO = MI->getOperand(OtherIdx); + if (!OtherMO.isReg()) + report("Must be tied to a register", MO, MONum); + if (!OtherMO.isTied()) + report("Missing tie flags on tied operand", MO, MONum); + if (MI->findTiedOperandIdx(OtherIdx) != MONum) + report("Inconsistent tie links", MO, MONum); + if (MONum < MCID.getNumDefs()) { + if (OtherIdx < MCID.getNumOperands()) { + if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO)) + report("Explicit def tied to explicit use without tie constraint", + MO, MONum); + } else { + if (!OtherMO.isImplicit()) + report("Explicit def should be tied to implicit use", MO, MONum); + } + } + } + // Verify two-address constraints after leaving SSA form. unsigned DefIdx; if (!MRI->isSSA() && MO->isUse() && diff --git a/lib/CodeGen/Passes.cpp b/lib/CodeGen/Passes.cpp index cfa3eec..4ea21d4 100644 --- a/lib/CodeGen/Passes.cpp +++ b/lib/CodeGen/Passes.cpp @@ -49,8 +49,8 @@ static cl::opt DisableSSC("disable-ssc", cl::Hidden, cl::desc("Disable Stack Slot Coloring")); static cl::opt DisableMachineDCE("disable-machine-dce", cl::Hidden, cl::desc("Disable Machine Dead Code Elimination")); -static cl::opt EnableEarlyIfConversion("enable-early-ifcvt", cl::Hidden, - cl::desc("Enable Early If-conversion")); +static cl::opt DisableEarlyIfConversion("disable-early-ifcvt", cl::Hidden, + cl::desc("Disable Early If-conversion")); static cl::opt DisableMachineLICM("disable-machine-licm", cl::Hidden, cl::desc("Disable Machine LICM")); static cl::opt DisableMachineCSE("disable-machine-cse", cl::Hidden, @@ -161,7 +161,7 @@ static AnalysisID overridePass(AnalysisID StandardID, AnalysisID TargetID) { return applyDisable(TargetID, DisableMachineDCE); if (StandardID == &EarlyIfConverterID) - return applyDisable(TargetID, !EnableEarlyIfConversion); + return applyDisable(TargetID, DisableEarlyIfConversion); if (StandardID == &MachineLICMID) return applyDisable(TargetID, DisableMachineLICM); @@ -447,8 +447,8 @@ void TargetPassConfig::addMachinePasses() { const PassInfo *TPI = PR->getPassInfo(PrintMachineInstrs.getValue()); const PassInfo *IPI = PR->getPassInfo(StringRef("print-machineinstrs")); assert (TPI && IPI && "Pass ID not registered!"); - const char *TID = (char *)(TPI->getTypeInfo()); - const char *IID = (char *)(IPI->getTypeInfo()); + const char *TID = (const char *)(TPI->getTypeInfo()); + const char *IID = (const char *)(IPI->getTypeInfo()); insertPass(TID, IID); } @@ -456,7 +456,8 @@ void TargetPassConfig::addMachinePasses() { printAndVerify("After Instruction Selection"); // Expand pseudo-instructions emitted by ISel. - addPass(&ExpandISelPseudosID); + if (addPass(&ExpandISelPseudosID)) + printAndVerify("After ExpandISelPseudos"); // Add passes that optimize machine instructions in SSA form. if (getOptLevel() != CodeGenOpt::None) { @@ -528,6 +529,10 @@ void TargetPassConfig::addMachineSSAOptimization() { // instructions dead. addPass(&OptimizePHIsID); + // This pass merges large allocas. StackSlotColoring is a different pass + // which merges spill slots. + addPass(&StackColoringID); + // If the target requests it, assign local variables to stack slots relative // to one another and simplify frame index references where possible. addPass(&LocalStackSlotAllocationID); diff --git a/lib/CodeGen/PeepholeOptimizer.cpp b/lib/CodeGen/PeepholeOptimizer.cpp index 9099862..a795ac8 100644 --- a/lib/CodeGen/PeepholeOptimizer.cpp +++ b/lib/CodeGen/PeepholeOptimizer.cpp @@ -527,6 +527,11 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { SeenMoveImm = true; } else { Changed |= optimizeExtInstr(MI, MBB, LocalMIs); + // optimizeExtInstr might have created new instructions after MI + // and before the already incremented MII. Adjust MII so that the + // next iteration sees the new instructions. + MII = MI; + ++MII; if (SeenMoveImm) Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); } diff --git a/lib/CodeGen/PostRASchedulerList.cpp b/lib/CodeGen/PostRASchedulerList.cpp index 7449ff5..d57bc73 100644 --- a/lib/CodeGen/PostRASchedulerList.cpp +++ b/lib/CodeGen/PostRASchedulerList.cpp @@ -240,6 +240,7 @@ void SchedulePostRATDList::exitRegion() { ScheduleDAGInstrs::exitRegion(); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// dumpSchedule - dump the scheduled Sequence. void SchedulePostRATDList::dumpSchedule() const { for (unsigned i = 0, e = Sequence.size(); i != e; i++) { @@ -249,6 +250,7 @@ void SchedulePostRATDList::dumpSchedule() const { dbgs() << "**** NOOP ****\n"; } } +#endif bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { TII = Fn.getTarget().getInstrInfo(); @@ -298,7 +300,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { static int bbcnt = 0; if (bbcnt++ % DebugDiv != DebugMod) continue; - dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName() + dbgs() << "*** DEBUG scheduling " << Fn.getName() << ":BB#" << MBB->getNumber() << " ***\n"; } #endif @@ -488,7 +490,6 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); BitVector killedRegs(TRI->getNumRegs()); - BitVector ReservedRegs = TRI->getReservedRegs(MF); StartBlockForKills(MBB); @@ -529,7 +530,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isUse()) continue; unsigned Reg = MO.getReg(); - if ((Reg == 0) || ReservedRegs.test(Reg)) continue; + if ((Reg == 0) || MRI.isReserved(Reg)) continue; bool kill = false; if (!killedRegs.test(Reg)) { @@ -564,7 +565,7 @@ void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; unsigned Reg = MO.getReg(); - if ((Reg == 0) || ReservedRegs.test(Reg)) continue; + if ((Reg == 0) || MRI.isReserved(Reg)) continue; LiveRegs.set(Reg); diff --git a/lib/CodeGen/ProcessImplicitDefs.cpp b/lib/CodeGen/ProcessImplicitDefs.cpp index 34d075c..e4e18c3 100644 --- a/lib/CodeGen/ProcessImplicitDefs.cpp +++ b/lib/CodeGen/ProcessImplicitDefs.cpp @@ -137,8 +137,7 @@ void ProcessImplicitDefs::processImplicitDef(MachineInstr *MI) { bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** PROCESS IMPLICIT DEFS **********\n" - << "********** Function: " - << ((Value*)MF.getFunction())->getName() << '\n'); + << "********** Function: " << MF.getName() << '\n'); bool Changed = false; diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index c791ffb..77554d6 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -96,7 +96,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) { placeCSRSpillsAndRestores(Fn); // Add the code to save and restore the callee saved registers - if (!F->hasFnAttr(Attribute::Naked)) + if (!F->getFnAttributes().hasAttribute(Attributes::Naked)) insertCSRSpillsAndRestores(Fn); // Allow the target machine to make final modifications to the function @@ -111,7 +111,7 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) { // called functions. Because of this, calculateCalleeSavedRegisters() // must be called before this function in order to set the AdjustsStack // and MaxCallFrameSize variables. - if (!F->hasFnAttr(Attribute::Naked)) + if (!F->getFnAttributes().hasAttribute(Attributes::Naked)) insertPrologEpilogCode(Fn); // Replace all MO_FrameIndex operands with physical register references @@ -221,13 +221,13 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) { return; // In Naked functions we aren't going to save any registers. - if (Fn.getFunction()->hasFnAttr(Attribute::Naked)) + if (Fn.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked)) return; std::vector CSI; for (unsigned i = 0; CSRegs[i]; ++i) { unsigned Reg = CSRegs[i]; - if (Fn.getRegInfo().isPhysRegOrOverlapUsed(Reg)) { + if (Fn.getRegInfo().isPhysRegUsed(Reg)) { // If the reg is modified, save it! CSI.push_back(CalleeSavedInfo(Reg)); } diff --git a/lib/CodeGen/RegAllocBasic.cpp b/lib/CodeGen/RegAllocBasic.cpp index 3a03807..8a49609 100644 --- a/lib/CodeGen/RegAllocBasic.cpp +++ b/lib/CodeGen/RegAllocBasic.cpp @@ -20,7 +20,6 @@ #include "VirtRegMap.h" #include "LiveRegMatrix.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Function.h" #include "llvm/PassAnalysisSupport.h" #include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" @@ -273,7 +272,7 @@ unsigned RABasic::selectOrSplit(LiveInterval &VirtReg, bool RABasic::runOnMachineFunction(MachineFunction &mf) { DEBUG(dbgs() << "********** BASIC REGISTER ALLOCATION **********\n" << "********** Function: " - << ((Value*)mf.getFunction())->getName() << '\n'); + << mf.getName() << '\n'); MF = &mf; RegAllocBase::init(getAnalysis(), diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp index 6b3a48e..8892216 100644 --- a/lib/CodeGen/RegAllocFast.cpp +++ b/lib/CodeGen/RegAllocFast.cpp @@ -113,9 +113,11 @@ namespace { // PhysRegState - One of the RegState enums, or a virtreg. std::vector PhysRegState; - // UsedInInstr - BitVector of physregs that are used in the current - // instruction, and so cannot be allocated. - BitVector UsedInInstr; + typedef SparseSet UsedInInstrSet; + + // UsedInInstr - Set of physregs that are used in the current instruction, + // and so cannot be allocated. + UsedInInstrSet UsedInInstr; // SkippedInstrs - Descriptors of instructions whose clobber list was // ignored because all registers were spilled. It is still necessary to @@ -173,7 +175,7 @@ namespace { unsigned VirtReg, unsigned Hint); LiveRegMap::iterator reloadVirtReg(MachineInstr *MI, unsigned OpNum, unsigned VirtReg, unsigned Hint); - void spillAll(MachineInstr *MI); + void spillAll(MachineBasicBlock::iterator MI); bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg); void addRetOperands(MachineBasicBlock *MBB); }; @@ -312,7 +314,7 @@ void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, } /// spillAll - Spill all dirty virtregs without killing them. -void RAFast::spillAll(MachineInstr *MI) { +void RAFast::spillAll(MachineBasicBlock::iterator MI) { if (LiveVirtRegs.empty()) return; isBulkSpilling = true; // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order @@ -340,7 +342,7 @@ void RAFast::usePhysReg(MachineOperand &MO) { PhysRegState[PhysReg] = regFree; // Fall through case regFree: - UsedInInstr.set(PhysReg); + UsedInInstr.insert(PhysReg); MO.setIsKill(); return; default: @@ -360,13 +362,13 @@ void RAFast::usePhysReg(MachineOperand &MO) { "Instruction is not using a subregister of a reserved register"); // Leave the superregister in the working set. PhysRegState[Alias] = regFree; - UsedInInstr.set(Alias); + UsedInInstr.insert(Alias); MO.getParent()->addRegisterKilled(Alias, TRI, true); return; case regFree: if (TRI->isSuperRegister(PhysReg, Alias)) { // Leave the superregister in the working set. - UsedInInstr.set(Alias); + UsedInInstr.insert(Alias); MO.getParent()->addRegisterKilled(Alias, TRI, true); return; } @@ -380,7 +382,7 @@ void RAFast::usePhysReg(MachineOperand &MO) { // All aliases are disabled, bring register into working set. PhysRegState[PhysReg] = regFree; - UsedInInstr.set(PhysReg); + UsedInInstr.insert(PhysReg); MO.setIsKill(); } @@ -389,7 +391,7 @@ void RAFast::usePhysReg(MachineOperand &MO) { /// reserved instead of allocated. void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg, RegState NewState) { - UsedInInstr.set(PhysReg); + UsedInInstr.insert(PhysReg); switch (unsigned VirtReg = PhysRegState[PhysReg]) { case regDisabled: break; @@ -429,7 +431,7 @@ void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg, // can be allocated directly. // Returns spillImpossible when PhysReg or an alias can't be spilled. unsigned RAFast::calcSpillCost(unsigned PhysReg) const { - if (UsedInInstr.test(PhysReg)) { + if (UsedInInstr.count(PhysReg)) { DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is already used in instr.\n"); return spillImpossible; } @@ -454,7 +456,7 @@ unsigned RAFast::calcSpillCost(unsigned PhysReg) const { unsigned Cost = 0; for (MCRegAliasIterator AI(PhysReg, TRI, false); AI.isValid(); ++AI) { unsigned Alias = *AI; - if (UsedInInstr.test(Alias)) + if (UsedInInstr.count(Alias)) return spillImpossible; switch (unsigned VirtReg = PhysRegState[Alias]) { case regDisabled: @@ -509,7 +511,7 @@ RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineInstr *MI, // Ignore invalid hints. if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) || - !RC->contains(Hint) || !RegClassInfo.isAllocatable(Hint))) + !RC->contains(Hint) || !MRI->isAllocatable(Hint))) Hint = 0; // Take hint when possible. @@ -530,7 +532,7 @@ RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineInstr *MI, // First try to find a completely free register. for (ArrayRef::iterator I = AO.begin(), E = AO.end(); I != E; ++I) { unsigned PhysReg = *I; - if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg)) { + if (PhysRegState[PhysReg] == regFree && !UsedInInstr.count(PhysReg)) { assignVirtToPhysReg(*LRI, PhysReg); return LRI; } @@ -596,7 +598,7 @@ RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum, LRI->LastUse = MI; LRI->LastOpNum = OpNum; LRI->Dirty = true; - UsedInInstr.set(LRI->PhysReg); + UsedInInstr.insert(LRI->PhysReg); return LRI; } @@ -646,7 +648,7 @@ RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum, assert(LRI->PhysReg && "Register not assigned"); LRI->LastUse = MI; LRI->LastOpNum = OpNum; - UsedInInstr.set(LRI->PhysReg); + UsedInInstr.insert(LRI->PhysReg); return LRI; } @@ -708,7 +710,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI, unsigned Reg = MO.getReg(); if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { - UsedInInstr.set(*AI); + UsedInInstr.insert(*AI); if (ThroughRegs.count(PhysRegState[*AI])) definePhysReg(MI, *AI, regFree); } @@ -756,7 +758,7 @@ void RAFast::handleThroughOperands(MachineInstr *MI, } // Restore UsedInInstr to a state usable for allocating normal virtual uses. - UsedInInstr.reset(); + UsedInInstr.clear(); for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue; @@ -764,12 +766,12 @@ void RAFast::handleThroughOperands(MachineInstr *MI, if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; DEBUG(dbgs() << "\tSetting " << PrintReg(Reg, TRI) << " as used in instr\n"); - UsedInInstr.set(Reg); + UsedInInstr.insert(Reg); } // Also mark PartialDefs as used to avoid reallocation. for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i) - UsedInInstr.set(PartialDefs[i]); + UsedInInstr.insert(PartialDefs[i]); } /// addRetOperand - ensure that a return instruction has an operand for each @@ -838,7 +840,7 @@ void RAFast::AllocateBasicBlock() { // Add live-in registers as live. for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(), E = MBB->livein_end(); I != E; ++I) - if (RegClassInfo.isAllocatable(*I)) + if (MRI->isAllocatable(*I)) definePhysReg(MII, *I, regReserved); SmallVector VirtDead; @@ -942,7 +944,7 @@ void RAFast::AllocateBasicBlock() { } // Track registers used by instruction. - UsedInInstr.reset(); + UsedInInstr.clear(); // First scan. // Mark physreg uses and early clobbers as used. @@ -954,6 +956,11 @@ void RAFast::AllocateBasicBlock() { bool hasPhysDefs = false; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); + // Make sure MRI knows about registers clobbered by regmasks. + if (MO.isRegMask()) { + MRI->addPhysRegsUsedFromRegMask(MO.getRegMask()); + continue; + } if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (!Reg) continue; @@ -970,7 +977,7 @@ void RAFast::AllocateBasicBlock() { } continue; } - if (!RegClassInfo.isAllocatable(Reg)) continue; + if (!MRI->isAllocatable(Reg)) continue; if (MO.isUse()) { usePhysReg(MO); } else if (MO.isEarlyClobber()) { @@ -1016,11 +1023,13 @@ void RAFast::AllocateBasicBlock() { } } - MRI->addPhysRegsUsed(UsedInInstr); + for (UsedInInstrSet::iterator + I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I) + MRI->setPhysRegUsed(*I); // Track registers defined by instruction - early clobbers and tied uses at // this point. - UsedInInstr.reset(); + UsedInInstr.clear(); if (hasEarlyClobbers) { for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); @@ -1030,7 +1039,7 @@ void RAFast::AllocateBasicBlock() { // Look for physreg defs and tied uses. if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue; for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) - UsedInInstr.set(*AI); + UsedInInstr.insert(*AI); } } @@ -1058,7 +1067,7 @@ void RAFast::AllocateBasicBlock() { unsigned Reg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) { - if (!RegClassInfo.isAllocatable(Reg)) continue; + if (!MRI->isAllocatable(Reg)) continue; definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ? regFree : regReserved); continue; @@ -1080,7 +1089,9 @@ void RAFast::AllocateBasicBlock() { killVirtReg(VirtDead[i]); VirtDead.clear(); - MRI->addPhysRegsUsed(UsedInInstr); + for (UsedInInstrSet::iterator + I = UsedInInstr.begin(), E = UsedInInstr.end(); I != E; ++I) + MRI->setPhysRegUsed(*I); if (CopyDst && CopyDst == CopySrc && CopyDstSub == CopySrcSub) { DEBUG(dbgs() << "-- coalescing: " << *MI); @@ -1110,8 +1121,7 @@ void RAFast::AllocateBasicBlock() { /// bool RAFast::runOnMachineFunction(MachineFunction &Fn) { DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n" - << "********** Function: " - << ((Value*)Fn.getFunction())->getName() << '\n'); + << "********** Function: " << Fn.getName() << '\n'); MF = &Fn; MRI = &MF->getRegInfo(); TM = &Fn.getTarget(); @@ -1119,7 +1129,8 @@ bool RAFast::runOnMachineFunction(MachineFunction &Fn) { TII = TM->getInstrInfo(); MRI->freezeReservedRegs(Fn); RegClassInfo.runOnMachineFunction(Fn); - UsedInInstr.resize(TRI->getNumRegs()); + UsedInInstr.clear(); + UsedInInstr.setUniverse(TRI->getNumRegs()); assert(!MRI->isSSA() && "regalloc requires leaving SSA"); diff --git a/lib/CodeGen/RegAllocGreedy.cpp b/lib/CodeGen/RegAllocGreedy.cpp index 6ac5428..06f69c1e 100644 --- a/lib/CodeGen/RegAllocGreedy.cpp +++ b/lib/CodeGen/RegAllocGreedy.cpp @@ -24,7 +24,6 @@ #include "VirtRegMap.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Function.h" #include "llvm/PassAnalysisSupport.h" #include "llvm/CodeGen/CalcSpillWeights.h" #include "llvm/CodeGen/EdgeBundles.h" @@ -331,9 +330,9 @@ void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); AU.addRequired(); AU.addPreserved(); + AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addRequired(); @@ -509,7 +508,7 @@ bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, /// /// @param VirtReg Live range that is about to be assigned. /// @param PhysReg Desired register for assignment. -/// @prarm IsHint True when PhysReg is VirtReg's preferred register. +/// @param IsHint True when PhysReg is VirtReg's preferred register. /// @param MaxCost Only look for cheaper candidates and update with new cost /// when returning true. /// @returns True when interference can be evicted cheaper than MaxCost. @@ -1746,8 +1745,7 @@ unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" - << "********** Function: " - << ((Value*)mf.getFunction())->getName() << '\n'); + << "********** Function: " << mf.getName() << '\n'); MF = &mf; if (VerifyEnabled) diff --git a/lib/CodeGen/RegAllocPBQP.cpp b/lib/CodeGen/RegAllocPBQP.cpp index d0db26b..02ebce7 100644 --- a/lib/CodeGen/RegAllocPBQP.cpp +++ b/lib/CodeGen/RegAllocPBQP.cpp @@ -118,7 +118,6 @@ private: typedef std::vector AllowedSetMap; typedef std::pair RegPair; typedef std::map CoalesceMap; - typedef std::vector NodeVector; typedef std::set RegSet; @@ -192,7 +191,6 @@ std::auto_ptr PBQPBuilder::build(MachineFunction *mf, const MachineLoopInfo *loopInfo, const RegSet &vregs) { - typedef std::vector LIVector; LiveIntervals *LIS = const_cast(lis); MachineRegisterInfo *mri = &mf->getRegInfo(); const TargetRegisterInfo *tri = mf->getTarget().getRegisterInfo(); @@ -209,8 +207,6 @@ std::auto_ptr PBQPBuilder::build(MachineFunction *mf, mri->setPhysRegUsed(Reg); } - BitVector reservedRegs = tri->getReservedRegs(*mf); - // Iterate over vregs. for (RegSet::const_iterator vregItr = vregs.begin(), vregEnd = vregs.end(); vregItr != vregEnd; ++vregItr) { @@ -219,7 +215,7 @@ std::auto_ptr PBQPBuilder::build(MachineFunction *mf, LiveInterval *vregLI = &LIS->getInterval(vreg); // Record any overlaps with regmask operands. - BitVector regMaskOverlaps(tri->getNumRegs()); + BitVector regMaskOverlaps; LIS->checkRegMaskInterference(*vregLI, regMaskOverlaps); // Compute an initial allowed set for the current vreg. @@ -228,7 +224,7 @@ std::auto_ptr PBQPBuilder::build(MachineFunction *mf, ArrayRef rawOrder = trc->getRawAllocationOrder(*mf); for (unsigned i = 0; i != rawOrder.size(); ++i) { unsigned preg = rawOrder[i]; - if (reservedRegs.test(preg)) + if (mri->isReserved(preg)) continue; // vregLI crosses a regmask operand that clobbers preg. @@ -358,7 +354,7 @@ std::auto_ptr PBQPBuilderWithCoalescing::build( loopInfo->getLoopDepth(mbb)); if (cp.isPhys()) { - if (!lis->isAllocatable(dst)) { + if (!mf->getRegInfo().isAllocatable(dst)) { continue; } @@ -433,6 +429,7 @@ void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const { au.addRequired(); au.addPreserved(); au.addRequired(); + au.addPreserved(); //au.addRequiredID(SplitCriticalEdgesID); if (customPassID) au.addRequiredID(*customPassID); @@ -444,6 +441,7 @@ void RegAllocPBQP::getAnalysisUsage(AnalysisUsage &au) const { au.addRequired(); au.addPreserved(); au.addRequired(); + au.addPreserved(); MachineFunctionPass::getAnalysisUsage(au); } @@ -556,7 +554,7 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) { mri->freezeReservedRegs(MF); - DEBUG(dbgs() << "PBQP Register Allocating for " << mf->getFunction()->getName() << "\n"); + DEBUG(dbgs() << "PBQP Register Allocating for " << mf->getName() << "\n"); // Allocator main loop: // @@ -570,11 +568,12 @@ bool RegAllocPBQP::runOnMachineFunction(MachineFunction &MF) { // Find the vreg intervals in need of allocation. findVRegIntervalsToAlloc(); +#ifndef NDEBUG const Function* func = mf->getFunction(); std::string fqn = func->getParent()->getModuleIdentifier() + "." + func->getName().str(); - (void)fqn; +#endif // If there are non-empty intervals allocate them using pbqp. if (!vregsToAlloc.empty()) { diff --git a/lib/CodeGen/RegisterClassInfo.cpp b/lib/CodeGen/RegisterClassInfo.cpp index 652bc30..805d235 100644 --- a/lib/CodeGen/RegisterClassInfo.cpp +++ b/lib/CodeGen/RegisterClassInfo.cpp @@ -15,8 +15,9 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "regalloc" -#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/RegisterClassInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" @@ -57,10 +58,11 @@ void RegisterClassInfo::runOnMachineFunction(const MachineFunction &mf) { CalleeSaved = CSR; // Different reserved registers? - BitVector RR = TRI->getReservedRegs(*MF); - if (RR != Reserved) + const BitVector &RR = MF->getRegInfo().getReservedRegs(); + if (Reserved.size() != RR.size() || RR != Reserved) { Update = true; - Reserved = RR; + Reserved = RR; + } // Invalidate cached information from previous function. if (Update) diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index 9906334..2538f10 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -55,6 +55,8 @@ STATISTIC(numCommutes , "Number of instruction commuting performed"); STATISTIC(numExtends , "Number of copies extended"); STATISTIC(NumReMats , "Number of instructions re-materialized"); STATISTIC(NumInflated , "Number of register classes inflated"); +STATISTIC(NumLaneConflicts, "Number of dead lane conflicts tested"); +STATISTIC(NumLaneResolves, "Number of dead lane conflicts resolved"); static cl::opt EnableJoining("join-liveintervals", @@ -123,6 +125,9 @@ namespace { /// can use this information below to update aliases. bool joinIntervals(CoalescerPair &CP); + /// Attempt joining two virtual registers. Return true on success. + bool joinVirtRegs(CoalescerPair &CP); + /// Attempt joining with a reserved physreg. bool joinReservedPhysReg(CoalescerPair &CP); @@ -193,12 +198,6 @@ INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing", char RegisterCoalescer::ID = 0; -static unsigned compose(const TargetRegisterInfo &tri, unsigned a, unsigned b) { - if (!a) return b; - if (!b) return a; - return tri.composeSubRegIndices(a, b); -} - static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, unsigned &Src, unsigned &Dst, unsigned &SrcSub, unsigned &DstSub) { @@ -209,8 +208,8 @@ static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, SrcSub = MI->getOperand(1).getSubReg(); } else if (MI->isSubregToReg()) { Dst = MI->getOperand(0).getReg(); - DstSub = compose(tri, MI->getOperand(0).getSubReg(), - MI->getOperand(3).getImm()); + DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(), + MI->getOperand(3).getImm()); Src = MI->getOperand(2).getReg(); SrcSub = MI->getOperand(2).getSubReg(); } else @@ -349,7 +348,8 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const { if (DstReg != Dst) return false; // Registers match, do the subregisters line up? - return compose(TRI, SrcIdx, SrcSub) == compose(TRI, DstIdx, DstSub); + return TRI.composeSubRegIndices(SrcIdx, SrcSub) == + TRI.composeSubRegIndices(DstIdx, DstSub); } } @@ -425,7 +425,8 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP, // If AValNo is defined as a copy from IntB, we can potentially process this. // Get the instruction that defines this value number. MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def); - if (!CP.isCoalescable(ACopyMI)) + // Don't allow any partial copies, even if isCoalescable() allows them. + if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy()) return false; // Get the LiveRange in IntB that this value number starts with. @@ -583,7 +584,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); unsigned NewReg = NewDstMO.getReg(); - if (NewReg != IntB.reg || !NewDstMO.isKill()) + if (NewReg != IntB.reg || !LiveRangeQuery(IntB, AValNo->def).isKill()) return false; // Make sure there are no other definitions of IntB that would reach the @@ -849,8 +850,17 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg, // Update LiveDebugVariables. LDV->renameRegister(SrcReg, DstReg, SubIdx); + SmallPtrSet Visited; for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg); MachineInstr *UseMI = I.skipInstruction();) { + // Each instruction can only be rewritten once because sub-register + // composition is not always idempotent. When SrcReg != DstReg, rewriting + // the UseMI operands removes them from the SrcReg use-def chain, but when + // SrcReg is DstReg we could encounter UseMI twice if it has multiple + // operands mentioning the virtual register. + if (SrcReg == DstReg && !Visited.insert(UseMI)) + continue; + SmallVector Ops; bool Reads, Writes; tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops); @@ -890,7 +900,7 @@ bool RegisterCoalescer::canJoinPhys(CoalescerPair &CP) { /// Always join simple intervals that are defined by a single copy from a /// reserved register. This doesn't increase register pressure, so it is /// always beneficial. - if (!RegClassInfo.isReserved(CP.getDstReg())) { + if (!MRI->isReserved(CP.getDstReg())) { DEBUG(dbgs() << "\tCan only merge into reserved registers.\n"); return false; } @@ -1065,7 +1075,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { /// Attempt joining with a reserved physreg. bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) { assert(CP.isPhys() && "Must be a physreg copy"); - assert(RegClassInfo.isReserved(CP.getDstReg()) && "Not a reserved register"); + assert(MRI->isReserved(CP.getDstReg()) && "Not a reserved register"); LiveInterval &RHS = LIS->getInterval(CP.getSrcReg()); DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS << '\n'); @@ -1102,347 +1112,797 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) { return true; } -/// ComputeUltimateVN - Assuming we are going to join two live intervals, -/// compute what the resultant value numbers for each value in the input two -/// ranges will be. This is complicated by copies between the two which can -/// and will commonly cause multiple value numbers to be merged into one. -/// -/// VN is the value number that we're trying to resolve. InstDefiningValue -/// keeps track of the new InstDefiningValue assignment for the result -/// LiveInterval. ThisFromOther/OtherFromThis are sets that keep track of -/// whether a value in this or other is a copy from the opposite set. -/// ThisValNoAssignments/OtherValNoAssignments keep track of value #'s that have -/// already been assigned. -/// -/// ThisFromOther[x] - If x is defined as a copy from the other interval, this -/// contains the value number the copy is from. -/// -static unsigned ComputeUltimateVN(VNInfo *VNI, - SmallVector &NewVNInfo, - DenseMap &ThisFromOther, - DenseMap &OtherFromThis, - SmallVector &ThisValNoAssignments, - SmallVector &OtherValNoAssignments) { - unsigned VN = VNI->id; - - // If the VN has already been computed, just return it. - if (ThisValNoAssignments[VN] >= 0) - return ThisValNoAssignments[VN]; - assert(ThisValNoAssignments[VN] != -2 && "Cyclic value numbers"); - - // If this val is not a copy from the other val, then it must be a new value - // number in the destination. - DenseMap::iterator I = ThisFromOther.find(VNI); - if (I == ThisFromOther.end()) { - NewVNInfo.push_back(VNI); - return ThisValNoAssignments[VN] = NewVNInfo.size()-1; - } - VNInfo *OtherValNo = I->second; - - // Otherwise, this *is* a copy from the RHS. If the other side has already - // been computed, return it. - if (OtherValNoAssignments[OtherValNo->id] >= 0) - return ThisValNoAssignments[VN] = OtherValNoAssignments[OtherValNo->id]; - - // Mark this value number as currently being computed, then ask what the - // ultimate value # of the other value is. - ThisValNoAssignments[VN] = -2; - unsigned UltimateVN = - ComputeUltimateVN(OtherValNo, NewVNInfo, OtherFromThis, ThisFromOther, - OtherValNoAssignments, ThisValNoAssignments); - return ThisValNoAssignments[VN] = UltimateVN; -} +//===----------------------------------------------------------------------===// +// Interference checking and interval joining +//===----------------------------------------------------------------------===// +// +// In the easiest case, the two live ranges being joined are disjoint, and +// there is no interference to consider. It is quite common, though, to have +// overlapping live ranges, and we need to check if the interference can be +// resolved. +// +// The live range of a single SSA value forms a sub-tree of the dominator tree. +// This means that two SSA values overlap if and only if the def of one value +// is contained in the live range of the other value. As a special case, the +// overlapping values can be defined at the same index. +// +// The interference from an overlapping def can be resolved in these cases: +// +// 1. Coalescable copies. The value is defined by a copy that would become an +// identity copy after joining SrcReg and DstReg. The copy instruction will +// be removed, and the value will be merged with the source value. +// +// There can be several copies back and forth, causing many values to be +// merged into one. We compute a list of ultimate values in the joined live +// range as well as a mappings from the old value numbers. +// +// 2. IMPLICIT_DEF. This instruction is only inserted to ensure all PHI +// predecessors have a live out value. It doesn't cause real interference, +// and can be merged into the value it overlaps. Like a coalescable copy, it +// can be erased after joining. +// +// 3. Copy of external value. The overlapping def may be a copy of a value that +// is already in the other register. This is like a coalescable copy, but +// the live range of the source register must be trimmed after erasing the +// copy instruction: +// +// %src = COPY %ext +// %dst = COPY %ext <-- Remove this COPY, trim the live range of %ext. +// +// 4. Clobbering undefined lanes. Vector registers are sometimes built by +// defining one lane at a time: +// +// %dst:ssub0 = FOO +// %src = BAR +// %dst:ssub1 = COPY %src +// +// The live range of %src overlaps the %dst value defined by FOO, but +// merging %src into %dst:ssub1 is only going to clobber the ssub1 lane +// which was undef anyway. +// +// The value mapping is more complicated in this case. The final live range +// will have different value numbers for both FOO and BAR, but there is no +// simple mapping from old to new values. It may even be necessary to add +// new PHI values. +// +// 5. Clobbering dead lanes. A def may clobber a lane of a vector register that +// is live, but never read. This can happen because we don't compute +// individual live ranges per lane. +// +// %dst = FOO +// %src = BAR +// %dst:ssub1 = COPY %src +// +// This kind of interference is only resolved locally. If the clobbered +// lane value escapes the block, the join is aborted. +namespace { +/// Track information about values in a single virtual register about to be +/// joined. Objects of this class are always created in pairs - one for each +/// side of the CoalescerPair. +class JoinVals { + LiveInterval &LI; + + // Location of this register in the final joined register. + // Either CP.DstIdx or CP.SrcIdx. + unsigned SubIdx; + + // Values that will be present in the final live range. + SmallVectorImpl &NewVNInfo; + + const CoalescerPair &CP; + LiveIntervals *LIS; + SlotIndexes *Indexes; + const TargetRegisterInfo *TRI; + + // Value number assignments. Maps value numbers in LI to entries in NewVNInfo. + // This is suitable for passing to LiveInterval::join(). + SmallVector Assignments; + + // Conflict resolution for overlapping values. + enum ConflictResolution { + // No overlap, simply keep this value. + CR_Keep, + + // Merge this value into OtherVNI and erase the defining instruction. + // Used for IMPLICIT_DEF, coalescable copies, and copies from external + // values. + CR_Erase, + + // Merge this value into OtherVNI but keep the defining instruction. + // This is for the special case where OtherVNI is defined by the same + // instruction. + CR_Merge, + + // Keep this value, and have it replace OtherVNI where possible. This + // complicates value mapping since OtherVNI maps to two different values + // before and after this def. + // Used when clobbering undefined or dead lanes. + CR_Replace, + + // Unresolved conflict. Visit later when all values have been mapped. + CR_Unresolved, + + // Unresolvable conflict. Abort the join. + CR_Impossible + }; -// Find out if we have something like -// A = X -// B = X -// if so, we can pretend this is actually -// A = X -// B = A -// which allows us to coalesce A and B. -// VNI is the definition of B. LR is the life range of A that includes -// the slot just before B. If we return true, we add "B = X" to DupCopies. -// This implies that A dominates B. -static bool RegistersDefinedFromSameValue(LiveIntervals &li, - const TargetRegisterInfo &tri, - CoalescerPair &CP, - VNInfo *VNI, - VNInfo *OtherVNI, - SmallVector &DupCopies) { - // FIXME: This is very conservative. For example, we don't handle - // physical registers. - - MachineInstr *MI = li.getInstructionFromIndex(VNI->def); - - if (!MI || CP.isPartial() || CP.isPhys()) - return false; + // Per-value info for LI. The lane bit masks are all relative to the final + // joined register, so they can be compared directly between SrcReg and + // DstReg. + struct Val { + ConflictResolution Resolution; - unsigned A = CP.getDstReg(); - if (!TargetRegisterInfo::isVirtualRegister(A)) - return false; + // Lanes written by this def, 0 for unanalyzed values. + unsigned WriteLanes; - unsigned B = CP.getSrcReg(); - if (!TargetRegisterInfo::isVirtualRegister(B)) - return false; + // Lanes with defined values in this register. Other lanes are undef and + // safe to clobber. + unsigned ValidLanes; - MachineInstr *OtherMI = li.getInstructionFromIndex(OtherVNI->def); - if (!OtherMI) - return false; + // Value in LI being redefined by this def. + VNInfo *RedefVNI; - if (MI->isImplicitDef()) { - DupCopies.push_back(MI); - return true; - } else { - if (!MI->isFullCopy()) - return false; - unsigned Src = MI->getOperand(1).getReg(); - if (!TargetRegisterInfo::isVirtualRegister(Src)) - return false; - if (!OtherMI->isFullCopy()) - return false; - unsigned OtherSrc = OtherMI->getOperand(1).getReg(); - if (!TargetRegisterInfo::isVirtualRegister(OtherSrc)) - return false; + // Value in the other live range that overlaps this def, if any. + VNInfo *OtherVNI; - if (Src != OtherSrc) - return false; + // Is this value an IMPLICIT_DEF? + bool IsImplicitDef; - // If the copies use two different value numbers of X, we cannot merge - // A and B. - LiveInterval &SrcInt = li.getInterval(Src); - // getVNInfoBefore returns NULL for undef copies. In this case, the - // optimization is still safe. - if (SrcInt.getVNInfoBefore(OtherVNI->def) != - SrcInt.getVNInfoBefore(VNI->def)) - return false; + // True when the live range of this value will be pruned because of an + // overlapping CR_Replace value in the other live range. + bool Pruned; - DupCopies.push_back(MI); - return true; - } -} + // True once Pruned above has been computed. + bool PrunedComputed; -/// joinIntervals - Attempt to join these two intervals. On failure, this -/// returns false. -bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) { - // Handle physreg joins separately. - if (CP.isPhys()) - return joinReservedPhysReg(CP); + Val() : Resolution(CR_Keep), WriteLanes(0), ValidLanes(0), + RedefVNI(0), OtherVNI(0), IsImplicitDef(false), Pruned(false), + PrunedComputed(false) {} - LiveInterval &RHS = LIS->getInterval(CP.getSrcReg()); - DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS - << '\n'); + bool isAnalyzed() const { return WriteLanes != 0; } + }; - // Compute the final value assignment, assuming that the live ranges can be - // coalesced. - SmallVector LHSValNoAssignments; - SmallVector RHSValNoAssignments; - DenseMap LHSValsDefinedFromRHS; - DenseMap RHSValsDefinedFromLHS; - SmallVector NewVNInfo; + // One entry per value number in LI. + SmallVector Vals; + + unsigned computeWriteLanes(const MachineInstr *DefMI, bool &Redef); + VNInfo *stripCopies(VNInfo *VNI); + ConflictResolution analyzeValue(unsigned ValNo, JoinVals &Other); + void computeAssignment(unsigned ValNo, JoinVals &Other); + bool taintExtent(unsigned, unsigned, JoinVals&, + SmallVectorImpl >&); + bool usesLanes(MachineInstr *MI, unsigned, unsigned, unsigned); + bool isPrunedValue(unsigned ValNo, JoinVals &Other); + +public: + JoinVals(LiveInterval &li, unsigned subIdx, + SmallVectorImpl &newVNInfo, + const CoalescerPair &cp, + LiveIntervals *lis, + const TargetRegisterInfo *tri) + : LI(li), SubIdx(subIdx), NewVNInfo(newVNInfo), CP(cp), LIS(lis), + Indexes(LIS->getSlotIndexes()), TRI(tri), + Assignments(LI.getNumValNums(), -1), Vals(LI.getNumValNums()) + {} + + /// Analyze defs in LI and compute a value mapping in NewVNInfo. + /// Returns false if any conflicts were impossible to resolve. + bool mapValues(JoinVals &Other); + + /// Try to resolve conflicts that require all values to be mapped. + /// Returns false if any conflicts were impossible to resolve. + bool resolveConflicts(JoinVals &Other); + + /// Prune the live range of values in Other.LI where they would conflict with + /// CR_Replace values in LI. Collect end points for restoring the live range + /// after joining. + void pruneValues(JoinVals &Other, SmallVectorImpl &EndPoints); + + /// Erase any machine instructions that have been coalesced away. + /// Add erased instructions to ErasedInstrs. + /// Add foreign virtual registers to ShrinkRegs if their live range ended at + /// the erased instrs. + void eraseInstrs(SmallPtrSet &ErasedInstrs, + SmallVectorImpl &ShrinkRegs); + + /// Get the value assignments suitable for passing to LiveInterval::join. + const int *getAssignments() const { return Assignments.data(); } +}; +} // end anonymous namespace + +/// Compute the bitmask of lanes actually written by DefMI. +/// Set Redef if there are any partial register definitions that depend on the +/// previous value of the register. +unsigned JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef) { + unsigned L = 0; + for (ConstMIOperands MO(DefMI); MO.isValid(); ++MO) { + if (!MO->isReg() || MO->getReg() != LI.reg || !MO->isDef()) + continue; + L |= TRI->getSubRegIndexLaneMask( + TRI->composeSubRegIndices(SubIdx, MO->getSubReg())); + if (MO->readsReg()) + Redef = true; + } + return L; +} - SmallVector DupCopies; - SmallVector DeadCopies; +/// Find the ultimate value that VNI was copied from. +VNInfo *JoinVals::stripCopies(VNInfo *VNI) { + while (!VNI->isPHIDef()) { + MachineInstr *MI = Indexes->getInstructionFromIndex(VNI->def); + assert(MI && "No defining instruction"); + if (!MI->isFullCopy()) + break; + unsigned Reg = MI->getOperand(1).getReg(); + if (!TargetRegisterInfo::isVirtualRegister(Reg)) + break; + LiveRangeQuery LRQ(LIS->getInterval(Reg), VNI->def); + if (!LRQ.valueIn()) + break; + VNI = LRQ.valueIn(); + } + return VNI; +} - LiveInterval &LHS = LIS->getOrCreateInterval(CP.getDstReg()); - DEBUG(dbgs() << "\t\tLHS = " << PrintReg(CP.getDstReg(), TRI) << ' ' << LHS - << '\n'); +/// Analyze ValNo in this live range, and set all fields of Vals[ValNo]. +/// Return a conflict resolution when possible, but leave the hard cases as +/// CR_Unresolved. +/// Recursively calls computeAssignment() on this and Other, guaranteeing that +/// both OtherVNI and RedefVNI have been analyzed and mapped before returning. +/// The recursion always goes upwards in the dominator tree, making loops +/// impossible. +JoinVals::ConflictResolution +JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { + Val &V = Vals[ValNo]; + assert(!V.isAnalyzed() && "Value has already been analyzed!"); + VNInfo *VNI = LI.getValNumInfo(ValNo); + if (VNI->isUnused()) { + V.WriteLanes = ~0u; + return CR_Keep; + } - // Loop over the value numbers of the LHS, seeing if any are defined from - // the RHS. - for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end(); - i != e; ++i) { - VNInfo *VNI = *i; - if (VNI->isUnused() || VNI->isPHIDef()) - continue; - MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def); - assert(MI && "Missing def"); - if (!MI->isCopyLike() && !MI->isImplicitDef()) // Src not defined by a copy? - continue; + // Get the instruction defining this value, compute the lanes written. + const MachineInstr *DefMI = 0; + if (VNI->isPHIDef()) { + // Conservatively assume that all lanes in a PHI are valid. + V.ValidLanes = V.WriteLanes = TRI->getSubRegIndexLaneMask(SubIdx); + } else { + DefMI = Indexes->getInstructionFromIndex(VNI->def); + bool Redef = false; + V.ValidLanes = V.WriteLanes = computeWriteLanes(DefMI, Redef); + + // If this is a read-modify-write instruction, there may be more valid + // lanes than the ones written by this instruction. + // This only covers partial redef operands. DefMI may have normal use + // operands reading the register. They don't contribute valid lanes. + // + // This adds ssub1 to the set of valid lanes in %src: + // + // %src:ssub1 = FOO + // + // This leaves only ssub1 valid, making any other lanes undef: + // + // %src:ssub1 = FOO %src:ssub2 + // + // The flag on the def operand means that old lane values are + // not important. + if (Redef) { + V.RedefVNI = LiveRangeQuery(LI, VNI->def).valueIn(); + assert(V.RedefVNI && "Instruction is reading nonexistent value"); + computeAssignment(V.RedefVNI->id, Other); + V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes; + } - // Figure out the value # from the RHS. - VNInfo *OtherVNI = RHS.getVNInfoBefore(VNI->def); - // The copy could be to an aliased physreg. - if (!OtherVNI) - continue; + // An IMPLICIT_DEF writes undef values. + if (DefMI->isImplicitDef()) { + V.IsImplicitDef = true; + V.ValidLanes &= ~V.WriteLanes; + } + } - // DstReg is known to be a register in the LHS interval. If the src is - // from the RHS interval, we can use its value #. - if (CP.isCoalescable(MI)) - DeadCopies.push_back(MI); - else if (!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, OtherVNI, - DupCopies)) - continue; + // Find the value in Other that overlaps VNI->def, if any. + LiveRangeQuery OtherLRQ(Other.LI, VNI->def); + + // It is possible that both values are defined by the same instruction, or + // the values are PHIs defined in the same block. When that happens, the two + // values should be merged into one, but not into any preceding value. + // The first value defined or visited gets CR_Keep, the other gets CR_Merge. + if (VNInfo *OtherVNI = OtherLRQ.valueDefined()) { + assert(SlotIndex::isSameInstr(VNI->def, OtherVNI->def) && "Broken LRQ"); + + // One value stays, the other is merged. Keep the earlier one, or the first + // one we see. + if (OtherVNI->def < VNI->def) + Other.computeAssignment(OtherVNI->id, *this); + else if (VNI->def < OtherVNI->def && OtherLRQ.valueIn()) { + // This is an early-clobber def overlapping a live-in value in the other + // register. Not mergeable. + V.OtherVNI = OtherLRQ.valueIn(); + return CR_Impossible; + } + V.OtherVNI = OtherVNI; + Val &OtherV = Other.Vals[OtherVNI->id]; + // Keep this value, check for conflicts when analyzing OtherVNI. + if (!OtherV.isAnalyzed()) + return CR_Keep; + // Both sides have been analyzed now. + // Allow overlapping PHI values. Any real interference would show up in a + // predecessor, the PHI itself can't introduce any conflicts. + if (VNI->isPHIDef()) + return CR_Merge; + if (V.ValidLanes & OtherV.ValidLanes) + // Overlapping lanes can't be resolved. + return CR_Impossible; + else + return CR_Merge; + } - LHSValsDefinedFromRHS[VNI] = OtherVNI; + // No simultaneous def. Is Other live at the def? + V.OtherVNI = OtherLRQ.valueIn(); + if (!V.OtherVNI) + // No overlap, no conflict. + return CR_Keep; + + assert(!SlotIndex::isSameInstr(VNI->def, V.OtherVNI->def) && "Broken LRQ"); + + // We have overlapping values, or possibly a kill of Other. + // Recursively compute assignments up the dominator tree. + Other.computeAssignment(V.OtherVNI->id, *this); + const Val &OtherV = Other.Vals[V.OtherVNI->id]; + + // Allow overlapping PHI values. Any real interference would show up in a + // predecessor, the PHI itself can't introduce any conflicts. + if (VNI->isPHIDef()) + return CR_Replace; + + // Check for simple erasable conflicts. + if (DefMI->isImplicitDef()) + return CR_Erase; + + // Include the non-conflict where DefMI is a coalescable copy that kills + // OtherVNI. We still want the copy erased and value numbers merged. + if (CP.isCoalescable(DefMI)) { + // Some of the lanes copied from OtherVNI may be undef, making them undef + // here too. + V.ValidLanes &= ~V.WriteLanes | OtherV.ValidLanes; + return CR_Erase; } - // Loop over the value numbers of the RHS, seeing if any are defined from - // the LHS. - for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end(); - i != e; ++i) { - VNInfo *VNI = *i; - if (VNI->isUnused() || VNI->isPHIDef()) - continue; - MachineInstr *MI = LIS->getInstructionFromIndex(VNI->def); - assert(MI && "Missing def"); - if (!MI->isCopyLike() && !MI->isImplicitDef()) // Src not defined by a copy? - continue; + // This may not be a real conflict if DefMI simply kills Other and defines + // VNI. + if (OtherLRQ.isKill() && OtherLRQ.endPoint() <= VNI->def) + return CR_Keep; + + // Handle the case where VNI and OtherVNI can be proven to be identical: + // + // %other = COPY %ext + // %this = COPY %ext <-- Erase this copy + // + if (DefMI->isFullCopy() && !CP.isPartial() && + stripCopies(VNI) == stripCopies(V.OtherVNI)) + return CR_Erase; + + // If the lanes written by this instruction were all undef in OtherVNI, it is + // still safe to join the live ranges. This can't be done with a simple value + // mapping, though - OtherVNI will map to multiple values: + // + // 1 %dst:ssub0 = FOO <-- OtherVNI + // 2 %src = BAR <-- VNI + // 3 %dst:ssub1 = COPY %src <-- Eliminate this copy. + // 4 BAZ %dst + // 5 QUUX %src + // + // Here OtherVNI will map to itself in [1;2), but to VNI in [2;5). CR_Replace + // handles this complex value mapping. + if ((V.WriteLanes & OtherV.ValidLanes) == 0) + return CR_Replace; + + // If the other live range is killed by DefMI and the live ranges are still + // overlapping, it must be because we're looking at an early clobber def: + // + // %dst = ASM %src + // + // In this case, it is illegal to merge the two live ranges since the early + // clobber def would clobber %src before it was read. + if (OtherLRQ.isKill()) { + // This case where the def doesn't overlap the kill is handled above. + assert(VNI->def.isEarlyClobber() && + "Only early clobber defs can overlap a kill"); + return CR_Impossible; + } - // Figure out the value # from the LHS. - VNInfo *OtherVNI = LHS.getVNInfoBefore(VNI->def); - // The copy could be to an aliased physreg. - if (!OtherVNI) - continue; + // VNI is clobbering live lanes in OtherVNI, but there is still the + // possibility that no instructions actually read the clobbered lanes. + // If we're clobbering all the lanes in OtherVNI, at least one must be read. + // Otherwise Other.LI wouldn't be live here. + if ((TRI->getSubRegIndexLaneMask(Other.SubIdx) & ~V.WriteLanes) == 0) + return CR_Impossible; + + // We need to verify that no instructions are reading the clobbered lanes. To + // save compile time, we'll only check that locally. Don't allow the tainted + // value to escape the basic block. + MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def); + if (OtherLRQ.endPoint() >= Indexes->getMBBEndIdx(MBB)) + return CR_Impossible; + + // There are still some things that could go wrong besides clobbered lanes + // being read, for example OtherVNI may be only partially redefined in MBB, + // and some clobbered lanes could escape the block. Save this analysis for + // resolveConflicts() when all values have been mapped. We need to know + // RedefVNI and WriteLanes for any later defs in MBB, and we can't compute + // that now - the recursive analyzeValue() calls must go upwards in the + // dominator tree. + return CR_Unresolved; +} - // DstReg is known to be a register in the RHS interval. If the src is - // from the LHS interval, we can use its value #. - if (CP.isCoalescable(MI)) - DeadCopies.push_back(MI); - else if (!RegistersDefinedFromSameValue(*LIS, *TRI, CP, VNI, OtherVNI, - DupCopies)) - continue; +/// Compute the value assignment for ValNo in LI. +/// This may be called recursively by analyzeValue(), but never for a ValNo on +/// the stack. +void JoinVals::computeAssignment(unsigned ValNo, JoinVals &Other) { + Val &V = Vals[ValNo]; + if (V.isAnalyzed()) { + // Recursion should always move up the dominator tree, so ValNo is not + // supposed to reappear before it has been assigned. + assert(Assignments[ValNo] != -1 && "Bad recursion?"); + return; + } + switch ((V.Resolution = analyzeValue(ValNo, Other))) { + case CR_Erase: + case CR_Merge: + // Merge this ValNo into OtherVNI. + assert(V.OtherVNI && "OtherVNI not assigned, can't merge."); + assert(Other.Vals[V.OtherVNI->id].isAnalyzed() && "Missing recursion"); + Assignments[ValNo] = Other.Assignments[V.OtherVNI->id]; + DEBUG(dbgs() << "\t\tmerge " << PrintReg(LI.reg) << ':' << ValNo << '@' + << LI.getValNumInfo(ValNo)->def << " into " + << PrintReg(Other.LI.reg) << ':' << V.OtherVNI->id << '@' + << V.OtherVNI->def << " --> @" + << NewVNInfo[Assignments[ValNo]]->def << '\n'); + break; + case CR_Replace: + case CR_Unresolved: + // The other value is going to be pruned if this join is successful. + assert(V.OtherVNI && "OtherVNI not assigned, can't prune"); + Other.Vals[V.OtherVNI->id].Pruned = true; + // Fall through. + default: + // This value number needs to go in the final joined live range. + Assignments[ValNo] = NewVNInfo.size(); + NewVNInfo.push_back(LI.getValNumInfo(ValNo)); + break; + } +} - RHSValsDefinedFromLHS[VNI] = OtherVNI; +bool JoinVals::mapValues(JoinVals &Other) { + for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) { + computeAssignment(i, Other); + if (Vals[i].Resolution == CR_Impossible) { + DEBUG(dbgs() << "\t\tinterference at " << PrintReg(LI.reg) << ':' << i + << '@' << LI.getValNumInfo(i)->def << '\n'); + return false; + } } + return true; +} - LHSValNoAssignments.resize(LHS.getNumValNums(), -1); - RHSValNoAssignments.resize(RHS.getNumValNums(), -1); - NewVNInfo.reserve(LHS.getNumValNums() + RHS.getNumValNums()); +/// Assuming ValNo is going to clobber some valid lanes in Other.LI, compute +/// the extent of the tainted lanes in the block. +/// +/// Multiple values in Other.LI can be affected since partial redefinitions can +/// preserve previously tainted lanes. +/// +/// 1 %dst = VLOAD <-- Define all lanes in %dst +/// 2 %src = FOO <-- ValNo to be joined with %dst:ssub0 +/// 3 %dst:ssub1 = BAR <-- Partial redef doesn't clear taint in ssub0 +/// 4 %dst:ssub0 = COPY %src <-- Conflict resolved, ssub0 wasn't read +/// +/// For each ValNo in Other that is affected, add an (EndIndex, TaintedLanes) +/// entry to TaintedVals. +/// +/// Returns false if the tainted lanes extend beyond the basic block. +bool JoinVals:: +taintExtent(unsigned ValNo, unsigned TaintedLanes, JoinVals &Other, + SmallVectorImpl > &TaintExtent) { + VNInfo *VNI = LI.getValNumInfo(ValNo); + MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def); + SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB); + + // Scan Other.LI from VNI.def to MBBEnd. + LiveInterval::iterator OtherI = Other.LI.find(VNI->def); + assert(OtherI != Other.LI.end() && "No conflict?"); + do { + // OtherI is pointing to a tainted value. Abort the join if the tainted + // lanes escape the block. + SlotIndex End = OtherI->end; + if (End >= MBBEnd) { + DEBUG(dbgs() << "\t\ttaints global " << PrintReg(Other.LI.reg) << ':' + << OtherI->valno->id << '@' << OtherI->start << '\n'); + return false; + } + DEBUG(dbgs() << "\t\ttaints local " << PrintReg(Other.LI.reg) << ':' + << OtherI->valno->id << '@' << OtherI->start + << " to " << End << '\n'); + // A dead def is not a problem. + if (End.isDead()) + break; + TaintExtent.push_back(std::make_pair(End, TaintedLanes)); + + // Check for another def in the MBB. + if (++OtherI == Other.LI.end() || OtherI->start >= MBBEnd) + break; + + // Lanes written by the new def are no longer tainted. + const Val &OV = Other.Vals[OtherI->valno->id]; + TaintedLanes &= ~OV.WriteLanes; + if (!OV.RedefVNI) + break; + } while (TaintedLanes); + return true; +} - for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end(); - i != e; ++i) { - VNInfo *VNI = *i; - unsigned VN = VNI->id; - if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused()) +/// Return true if MI uses any of the given Lanes from Reg. +/// This does not include partial redefinitions of Reg. +bool JoinVals::usesLanes(MachineInstr *MI, unsigned Reg, unsigned SubIdx, + unsigned Lanes) { + if (MI->isDebugValue()) + return false; + for (ConstMIOperands MO(MI); MO.isValid(); ++MO) { + if (!MO->isReg() || MO->isDef() || MO->getReg() != Reg) continue; - ComputeUltimateVN(VNI, NewVNInfo, - LHSValsDefinedFromRHS, RHSValsDefinedFromLHS, - LHSValNoAssignments, RHSValNoAssignments); - } - for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end(); - i != e; ++i) { - VNInfo *VNI = *i; - unsigned VN = VNI->id; - if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused()) + if (!MO->readsReg()) continue; - // If this value number isn't a copy from the LHS, it's a new number. - if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) { - NewVNInfo.push_back(VNI); - RHSValNoAssignments[VN] = NewVNInfo.size()-1; + if (Lanes & TRI->getSubRegIndexLaneMask( + TRI->composeSubRegIndices(SubIdx, MO->getSubReg()))) + return true; + } + return false; +} + +bool JoinVals::resolveConflicts(JoinVals &Other) { + for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) { + Val &V = Vals[i]; + assert (V.Resolution != CR_Impossible && "Unresolvable conflict"); + if (V.Resolution != CR_Unresolved) continue; - } + DEBUG(dbgs() << "\t\tconflict at " << PrintReg(LI.reg) << ':' << i + << '@' << LI.getValNumInfo(i)->def << '\n'); + ++NumLaneConflicts; + assert(V.OtherVNI && "Inconsistent conflict resolution."); + VNInfo *VNI = LI.getValNumInfo(i); + const Val &OtherV = Other.Vals[V.OtherVNI->id]; + + // VNI is known to clobber some lanes in OtherVNI. If we go ahead with the + // join, those lanes will be tainted with a wrong value. Get the extent of + // the tainted lanes. + unsigned TaintedLanes = V.WriteLanes & OtherV.ValidLanes; + SmallVector, 8> TaintExtent; + if (!taintExtent(i, TaintedLanes, Other, TaintExtent)) + // Tainted lanes would extend beyond the basic block. + return false; - ComputeUltimateVN(VNI, NewVNInfo, - RHSValsDefinedFromLHS, LHSValsDefinedFromRHS, - RHSValNoAssignments, LHSValNoAssignments); - } + assert(!TaintExtent.empty() && "There should be at least one conflict."); - // Armed with the mappings of LHS/RHS values to ultimate values, walk the - // interval lists to see if these intervals are coalescable. - LiveInterval::const_iterator I = LHS.begin(); - LiveInterval::const_iterator IE = LHS.end(); - LiveInterval::const_iterator J = RHS.begin(); - LiveInterval::const_iterator JE = RHS.end(); - - // Collect interval end points that will no longer be kills. - SmallVector LHSOldKills; - SmallVector RHSOldKills; - - // Skip ahead until the first place of potential sharing. - if (I != IE && J != JE) { - if (I->start < J->start) { - I = std::upper_bound(I, IE, J->start); - if (I != LHS.begin()) --I; - } else if (J->start < I->start) { - J = std::upper_bound(J, JE, I->start); - if (J != RHS.begin()) --J; + // Now look at the instructions from VNI->def to TaintExtent (inclusive). + MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def); + MachineBasicBlock::iterator MI = MBB->begin(); + if (!VNI->isPHIDef()) { + MI = Indexes->getInstructionFromIndex(VNI->def); + // No need to check the instruction defining VNI for reads. + ++MI; } - } - - while (I != IE && J != JE) { - // Determine if these two live ranges overlap. - // If so, check value # info to determine if they are really different. - if (I->end > J->start && J->end > I->start) { - // If the live range overlap will map to the same value number in the - // result liverange, we can still coalesce them. If not, we can't. - if (LHSValNoAssignments[I->valno->id] != - RHSValNoAssignments[J->valno->id]) + assert(!SlotIndex::isSameInstr(VNI->def, TaintExtent.front().first) && + "Interference ends on VNI->def. Should have been handled earlier"); + MachineInstr *LastMI = + Indexes->getInstructionFromIndex(TaintExtent.front().first); + assert(LastMI && "Range must end at a proper instruction"); + unsigned TaintNum = 0; + for(;;) { + assert(MI != MBB->end() && "Bad LastMI"); + if (usesLanes(MI, Other.LI.reg, Other.SubIdx, TaintedLanes)) { + DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI); return false; - - // Extended live ranges should no longer be killed. - if (!I->end.isBlock() && I->end < J->end) - if (MachineInstr *MI = LIS->getInstructionFromIndex(I->end)) - LHSOldKills.push_back(MI); - if (!J->end.isBlock() && J->end < I->end) - if (MachineInstr *MI = LIS->getInstructionFromIndex(J->end)) - RHSOldKills.push_back(MI); + } + // LastMI is the last instruction to use the current value. + if (&*MI == LastMI) { + if (++TaintNum == TaintExtent.size()) + break; + LastMI = Indexes->getInstructionFromIndex(TaintExtent[TaintNum].first); + assert(LastMI && "Range must end at a proper instruction"); + TaintedLanes = TaintExtent[TaintNum].second; + } + ++MI; } - if (I->end < J->end) - ++I; - else - ++J; - } - - // Clear kill flags where live ranges are extended. - while (!LHSOldKills.empty()) - LHSOldKills.pop_back_val()->clearRegisterKills(LHS.reg, TRI); - while (!RHSOldKills.empty()) - RHSOldKills.pop_back_val()->clearRegisterKills(RHS.reg, TRI); - - if (LHSValNoAssignments.empty()) - LHSValNoAssignments.push_back(-1); - if (RHSValNoAssignments.empty()) - RHSValNoAssignments.push_back(-1); - - // Now erase all the redundant copies. - for (unsigned i = 0, e = DeadCopies.size(); i != e; ++i) { - MachineInstr *MI = DeadCopies[i]; - if (!ErasedInstrs.insert(MI)) - continue; - DEBUG(dbgs() << "\t\terased:\t" << LIS->getInstructionIndex(MI) - << '\t' << *MI); - LIS->RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); + // The tainted lanes are unused. + V.Resolution = CR_Replace; + ++NumLaneResolves; } + return true; +} - SmallVector SourceRegisters; - for (SmallVector::iterator I = DupCopies.begin(), - E = DupCopies.end(); I != E; ++I) { - MachineInstr *MI = *I; - if (!ErasedInstrs.insert(MI)) - continue; +// Determine if ValNo is a copy of a value number in LI or Other.LI that will +// be pruned: +// +// %dst = COPY %src +// %src = COPY %dst <-- This value to be pruned. +// %dst = COPY %src <-- This value is a copy of a pruned value. +// +bool JoinVals::isPrunedValue(unsigned ValNo, JoinVals &Other) { + Val &V = Vals[ValNo]; + if (V.Pruned || V.PrunedComputed) + return V.Pruned; + + if (V.Resolution != CR_Erase && V.Resolution != CR_Merge) + return V.Pruned; + + // Follow copies up the dominator tree and check if any intermediate value + // has been pruned. + V.PrunedComputed = true; + V.Pruned = Other.isPrunedValue(V.OtherVNI->id, *this); + return V.Pruned; +} - // If MI is a copy, then we have pretended that the assignment to B in - // A = X - // B = X - // was actually a copy from A. Now that we decided to coalesce A and B, - // transform the code into - // A = X - // In the case of the implicit_def, we just have to remove it. - if (!MI->isImplicitDef()) { - unsigned Src = MI->getOperand(1).getReg(); - SourceRegisters.push_back(Src); +void JoinVals::pruneValues(JoinVals &Other, + SmallVectorImpl &EndPoints) { + for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) { + SlotIndex Def = LI.getValNumInfo(i)->def; + switch (Vals[i].Resolution) { + case CR_Keep: + break; + case CR_Replace: { + // This value takes precedence over the value in Other.LI. + LIS->pruneValue(&Other.LI, Def, &EndPoints); + // Check if we're replacing an IMPLICIT_DEF value. The IMPLICIT_DEF + // instructions are only inserted to provide a live-out value for PHI + // predecessors, so the instruction should simply go away once its value + // has been replaced. + Val &OtherV = Other.Vals[Vals[i].OtherVNI->id]; + bool EraseImpDef = OtherV.IsImplicitDef && OtherV.Resolution == CR_Keep; + if (!Def.isBlock()) { + // Remove flags. This def is now a partial redef. + // Also remove flags since the joined live range will + // continue past this instruction. + for (MIOperands MO(Indexes->getInstructionFromIndex(Def)); + MO.isValid(); ++MO) + if (MO->isReg() && MO->isDef() && MO->getReg() == LI.reg) { + MO->setIsUndef(EraseImpDef); + MO->setIsDead(false); + } + // This value will reach instructions below, but we need to make sure + // the live range also reaches the instruction at Def. + if (!EraseImpDef) + EndPoints.push_back(Def); + } + DEBUG(dbgs() << "\t\tpruned " << PrintReg(Other.LI.reg) << " at " << Def + << ": " << Other.LI << '\n'); + break; + } + case CR_Erase: + case CR_Merge: + if (isPrunedValue(i, Other)) { + // This value is ultimately a copy of a pruned value in LI or Other.LI. + // We can no longer trust the value mapping computed by + // computeAssignment(), the value that was originally copied could have + // been replaced. + LIS->pruneValue(&LI, Def, &EndPoints); + DEBUG(dbgs() << "\t\tpruned all of " << PrintReg(LI.reg) << " at " + << Def << ": " << LI << '\n'); + } + break; + case CR_Unresolved: + case CR_Impossible: + llvm_unreachable("Unresolved conflicts"); } - LIS->RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); } +} - // If B = X was the last use of X in a liverange, we have to shrink it now - // that B = X is gone. - for (SmallVector::iterator I = SourceRegisters.begin(), - E = SourceRegisters.end(); I != E; ++I) { - LIS->shrinkToUses(&LIS->getInterval(*I)); +void JoinVals::eraseInstrs(SmallPtrSet &ErasedInstrs, + SmallVectorImpl &ShrinkRegs) { + for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) { + // Get the def location before markUnused() below invalidates it. + SlotIndex Def = LI.getValNumInfo(i)->def; + switch (Vals[i].Resolution) { + case CR_Keep: + // If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any + // longer. The IMPLICIT_DEF instructions are only inserted by + // PHIElimination to guarantee that all PHI predecessors have a value. + if (!Vals[i].IsImplicitDef || !Vals[i].Pruned) + break; + // Remove value number i from LI. Note that this VNInfo is still present + // in NewVNInfo, so it will appear as an unused value number in the final + // joined interval. + LI.getValNumInfo(i)->markUnused(); + LI.removeValNo(LI.getValNumInfo(i)); + DEBUG(dbgs() << "\t\tremoved " << i << '@' << Def << ": " << LI << '\n'); + // FALL THROUGH. + + case CR_Erase: { + MachineInstr *MI = Indexes->getInstructionFromIndex(Def); + assert(MI && "No instruction to erase"); + if (MI->isCopy()) { + unsigned Reg = MI->getOperand(1).getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg) && + Reg != CP.getSrcReg() && Reg != CP.getDstReg()) + ShrinkRegs.push_back(Reg); + } + ErasedInstrs.insert(MI); + DEBUG(dbgs() << "\t\terased:\t" << Def << '\t' << *MI); + LIS->RemoveMachineInstrFromMaps(MI); + MI->eraseFromParent(); + break; + } + default: + break; + } } +} + +bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) { + SmallVector NewVNInfo; + LiveInterval &RHS = LIS->getInterval(CP.getSrcReg()); + LiveInterval &LHS = LIS->getInterval(CP.getDstReg()); + JoinVals RHSVals(RHS, CP.getSrcIdx(), NewVNInfo, CP, LIS, TRI); + JoinVals LHSVals(LHS, CP.getDstIdx(), NewVNInfo, CP, LIS, TRI); + + DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS + << "\n\t\tLHS = " << PrintReg(CP.getDstReg()) << ' ' << LHS + << '\n'); + + // First compute NewVNInfo and the simple value mappings. + // Detect impossible conflicts early. + if (!LHSVals.mapValues(RHSVals) || !RHSVals.mapValues(LHSVals)) + return false; + + // Some conflicts can only be resolved after all values have been mapped. + if (!LHSVals.resolveConflicts(RHSVals) || !RHSVals.resolveConflicts(LHSVals)) + return false; - // If we get here, we know that we can coalesce the live ranges. Ask the - // intervals to coalesce themselves now. - LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo, + // All clear, the live ranges can be merged. + + // The merging algorithm in LiveInterval::join() can't handle conflicting + // value mappings, so we need to remove any live ranges that overlap a + // CR_Replace resolution. Collect a set of end points that can be used to + // restore the live range after joining. + SmallVector EndPoints; + LHSVals.pruneValues(RHSVals, EndPoints); + RHSVals.pruneValues(LHSVals, EndPoints); + + // Erase COPY and IMPLICIT_DEF instructions. This may cause some external + // registers to require trimming. + SmallVector ShrinkRegs; + LHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs); + RHSVals.eraseInstrs(ErasedInstrs, ShrinkRegs); + while (!ShrinkRegs.empty()) + LIS->shrinkToUses(&LIS->getInterval(ShrinkRegs.pop_back_val())); + + // Join RHS into LHS. + LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo, MRI); + + // Kill flags are going to be wrong if the live ranges were overlapping. + // Eventually, we should simply clear all kill flags when computing live + // ranges. They are reinserted after register allocation. + MRI->clearKillFlags(LHS.reg); + MRI->clearKillFlags(RHS.reg); + + if (EndPoints.empty()) + return true; + + // Recompute the parts of the live range we had to remove because of + // CR_Replace conflicts. + DEBUG(dbgs() << "\t\trestoring liveness to " << EndPoints.size() + << " points: " << LHS << '\n'); + LIS->extendToIndices(&LHS, EndPoints); return true; } +/// joinIntervals - Attempt to join these two intervals. On failure, this +/// returns false. +bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) { + return CP.isPhys() ? joinReservedPhysReg(CP) : joinVirtRegs(CP); +} + namespace { // DepthMBBCompare - Comparison predicate that sort first based on the loop // depth of the basic block (the unsigned), and then on the MBB number. @@ -1564,8 +2024,7 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) { Loops = &getAnalysis(); DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n" - << "********** Function: " - << ((Value*)MF->getFunction())->getName() << '\n'); + << "********** Function: " << MF->getName() << '\n'); if (VerifyCoalescing) MF->verify(this, "Before register coalescing"); diff --git a/lib/CodeGen/RegisterCoalescer.h b/lib/CodeGen/RegisterCoalescer.h index 8a6df98..47c3df1 100644 --- a/lib/CodeGen/RegisterCoalescer.h +++ b/lib/CodeGen/RegisterCoalescer.h @@ -63,6 +63,13 @@ namespace llvm { : TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0), Partial(false), CrossClass(false), Flipped(false), NewRC(0) {} + /// Create a CoalescerPair representing a virtreg-to-physreg copy. + /// No need to call setRegisters(). + CoalescerPair(unsigned VirtReg, unsigned PhysReg, + const TargetRegisterInfo &tri) + : TRI(tri), DstReg(PhysReg), SrcReg(VirtReg), DstIdx(0), SrcIdx(0), + Partial(false), CrossClass(false), Flipped(false), NewRC(0) {} + /// setRegisters - set registers to match the copy instruction MI. Return /// false if MI is not a coalescable copy instruction. bool setRegisters(const MachineInstr*); diff --git a/lib/CodeGen/RegisterPressure.cpp b/lib/CodeGen/RegisterPressure.cpp index 43448c8..543c426 100644 --- a/lib/CodeGen/RegisterPressure.cpp +++ b/lib/CodeGen/RegisterPressure.cpp @@ -63,7 +63,8 @@ void RegisterPressure::decrease(const TargetRegisterClass *RC, decreaseSetPressure(MaxSetPressure, RC, TRI); } -void RegisterPressure::dump(const TargetRegisterInfo *TRI) { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void RegisterPressure::dump(const TargetRegisterInfo *TRI) const { dbgs() << "Live In: "; for (unsigned i = 0, e = LiveInRegs.size(); i < e; ++i) dbgs() << PrintReg(LiveInRegs[i], TRI) << " "; @@ -78,6 +79,7 @@ void RegisterPressure::dump(const TargetRegisterInfo *TRI) { << '\n'; } } +#endif /// Increase the current pressure as impacted by these physical registers and /// bump the high water mark if needed. @@ -320,10 +322,8 @@ struct RegisterOperands { if (findReg(MO.getReg(), isVReg, DeadDefs, TRI) == DeadDefs.end()) DeadDefs.push_back(MO.getReg()); } - else { - if (findReg(MO.getReg(), isVReg, Defs, TRI) == Defs.end()) - Defs.push_back(MO.getReg()); - } + else if (findReg(MO.getReg(), isVReg, Defs, TRI) == Defs.end()) + Defs.push_back(MO.getReg()); } } }; @@ -335,7 +335,7 @@ static void collectOperands(const MachineInstr *MI, PhysRegOperands &PhysRegOpers, VirtRegOperands &VirtRegOpers, const TargetRegisterInfo *TRI, - const RegisterClassInfo *RCI) { + const MachineRegisterInfo *MRI) { for(ConstMIBundleOperands OperI(MI); OperI.isValid(); ++OperI) { const MachineOperand &MO = *OperI; if (!MO.isReg() || !MO.getReg()) @@ -343,7 +343,7 @@ static void collectOperands(const MachineInstr *MI, if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) VirtRegOpers.collect(MO, TRI); - else if (RCI->isAllocatable(MO.getReg())) + else if (MRI->isAllocatable(MO.getReg())) PhysRegOpers.collect(MO, TRI); } // Remove redundant physreg dead defs. @@ -449,7 +449,7 @@ bool RegPressureTracker::recede() { PhysRegOperands PhysRegOpers; VirtRegOperands VirtRegOpers; - collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, RCI); + collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, MRI); // Boost pressure for all dead defs together. increasePhysRegPressure(PhysRegOpers.DeadDefs); @@ -522,7 +522,7 @@ bool RegPressureTracker::advance() { PhysRegOperands PhysRegOpers; VirtRegOperands VirtRegOpers; - collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, RCI); + collectOperands(CurrPos, PhysRegOpers, VirtRegOpers, TRI, MRI); // Kill liveness at last uses. for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) { @@ -664,7 +664,7 @@ void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) { // Account for register pressure similar to RegPressureTracker::recede(). PhysRegOperands PhysRegOpers; VirtRegOperands VirtRegOpers; - collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, RCI); + collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, MRI); // Boost max pressure for all dead defs together. // Since CurrSetPressure and MaxSetPressure @@ -674,9 +674,16 @@ void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) { decreaseVirtRegPressure(VirtRegOpers.DeadDefs); // Kill liveness at live defs. - decreasePhysRegPressure(PhysRegOpers.Defs); - decreaseVirtRegPressure(VirtRegOpers.Defs); - + for (unsigned i = 0, e = PhysRegOpers.Defs.size(); i < e; ++i) { + unsigned Reg = PhysRegOpers.Defs[i]; + if (!findReg(Reg, false, PhysRegOpers.Uses, TRI)) + decreasePhysRegPressure(PhysRegOpers.Defs); + } + for (unsigned i = 0, e = VirtRegOpers.Defs.size(); i < e; ++i) { + unsigned Reg = VirtRegOpers.Defs[i]; + if (!findReg(Reg, true, VirtRegOpers.Uses, TRI)) + decreaseVirtRegPressure(VirtRegOpers.Defs); + } // Generate liveness for uses. for (unsigned i = 0, e = PhysRegOpers.Uses.size(); i < e; ++i) { unsigned Reg = PhysRegOpers.Uses[i]; @@ -750,7 +757,7 @@ void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) { // Account for register pressure similar to RegPressureTracker::recede(). PhysRegOperands PhysRegOpers; VirtRegOperands VirtRegOpers; - collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, RCI); + collectOperands(MI, PhysRegOpers, VirtRegOpers, TRI, MRI); // Kill liveness at last uses. Assume allocatable physregs are single-use // rather than checking LiveIntervals. diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp index d673794..5ec6564 100644 --- a/lib/CodeGen/RegisterScavenging.cpp +++ b/lib/CodeGen/RegisterScavenging.cpp @@ -92,9 +92,6 @@ void RegScavenger::enterBasicBlock(MachineBasicBlock *mbb) { KillRegs.resize(NumPhysRegs); DefRegs.resize(NumPhysRegs); - // Create reserved registers bitvector. - ReservedRegs = TRI->getReservedRegs(MF); - // Create callee-saved registers bitvector. CalleeSavedRegs.resize(NumPhysRegs); const uint16_t *CSRegs = TRI->getCalleeSavedRegs(&MF); @@ -225,9 +222,9 @@ void RegScavenger::getRegsUsed(BitVector &used, bool includeReserved) { used = RegsAvailable; used.flip(); if (includeReserved) - used |= ReservedRegs; + used |= MRI->getReservedRegs(); else - used.reset(ReservedRegs); + used.reset(MRI->getReservedRegs()); } unsigned RegScavenger::FindUnusedReg(const TargetRegisterClass *RC) const { diff --git a/lib/CodeGen/ScheduleDAG.cpp b/lib/CodeGen/ScheduleDAG.cpp index 752f8e4..9a65071 100644 --- a/lib/CodeGen/ScheduleDAG.cpp +++ b/lib/CodeGen/ScheduleDAG.cpp @@ -279,6 +279,7 @@ void SUnit::ComputeHeight() { } while (!WorkList.empty()); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or /// a group of nodes flagged together. void SUnit::dump(const ScheduleDAG *G) const { @@ -336,6 +337,7 @@ void SUnit::dumpAll(const ScheduleDAG *G) const { } dbgs() << "\n"; } +#endif #ifndef NDEBUG /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 9c1dba3..a4d4a93 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -22,6 +22,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/CodeGen/RegisterPressure.h" +#include "llvm/CodeGen/ScheduleDAGILP.h" #include "llvm/CodeGen/ScheduleDAGInstrs.h" #include "llvm/MC/MCInstrItineraries.h" #include "llvm/Target/TargetMachine.h" @@ -30,6 +31,7 @@ #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallPtrSet.h" @@ -44,14 +46,15 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, const MachineDominatorTree &mdt, bool IsPostRAFlag, LiveIntervals *lis) - : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), - InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis), - IsPostRA(IsPostRAFlag), UnitLatencies(false), CanHandleTerminators(false), - LoopRegs(MDT), FirstDbgValue(0) { + : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis), + IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) { assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals"); DbgValues.clear(); assert(!(IsPostRA && MRI.getNumVirtRegs()) && "Virtual registers must be removed prior to PostRA scheduling"); + + const TargetSubtargetInfo &ST = TM.getSubtarget(); + SchedModel.init(*ST.getSchedModel(), &ST, TII); } /// getUnderlyingObjectFromInt - This is the function that does the work of @@ -68,7 +71,7 @@ static const Value *getUnderlyingObjectFromInt(const Value *V) { // object. We don't have to worry about the case where the // object address is somehow being computed by the multiply, // because our callers only care when the result is an - // identifibale object. + // identifiable object. if (U->getOpcode() != Instruction::Add || (!isa(U->getOperand(1)) && Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) @@ -135,10 +138,6 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { BB = bb; - LoopRegs.Deps.clear(); - if (MachineLoop *ML = MLI.getLoopFor(BB)) - if (BB == ML->getLoopLatch()) - LoopRegs.VisitLoop(ML); } void ScheduleDAGInstrs::finishBlock() { @@ -174,9 +173,6 @@ void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, EndIndex = endcount; MISUnitMap.clear(); - // Check to see if the scheduler cares about latencies. - UnitLatencies = forceUnitLatencies(); - ScheduleDAG::clearDAG(); } @@ -209,7 +205,7 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() { if (Reg == 0) continue; if (TRI->isPhysicalRegister(Reg)) - Uses[Reg].push_back(&ExitSU); + Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1)); else { assert(!IsPostRA && "Virtual register encountered after regalloc."); addVRegUseDeps(&ExitSU, i); @@ -225,59 +221,44 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() { E = (*SI)->livein_end(); I != E; ++I) { unsigned Reg = *I; if (!Uses.contains(Reg)) - Uses[Reg].push_back(&ExitSU); + Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1)); } } } /// MO is an operand of SU's instruction that defines a physical register. Add /// data dependencies from SU to any uses of the physical register. -void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, - const MachineOperand &MO) { +void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { + const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); assert(MO.isDef() && "expect physreg def"); // Ask the target if address-backscheduling is desirable, and if so how much. const TargetSubtargetInfo &ST = TM.getSubtarget(); - unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); - unsigned DataLatency = SU->Latency; for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); Alias.isValid(); ++Alias) { if (!Uses.contains(*Alias)) continue; - std::vector &UseList = Uses[*Alias]; + std::vector &UseList = Uses[*Alias]; for (unsigned i = 0, e = UseList.size(); i != e; ++i) { - SUnit *UseSU = UseList[i]; + SUnit *UseSU = UseList[i].SU; if (UseSU == SU) continue; - unsigned LDataLatency = DataLatency; - // Optionally add in a special extra latency for nodes that - // feed addresses. - // TODO: Perhaps we should get rid of - // SpecialAddressLatency and just move this into - // adjustSchedDependency for the targets that care about it. - if (SpecialAddressLatency != 0 && !UnitLatencies && - UseSU != &ExitSU) { - MachineInstr *UseMI = UseSU->getInstr(); - const MCInstrDesc &UseMCID = UseMI->getDesc(); - int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias); - assert(RegUseIndex >= 0 && "UseMI doesn't use register!"); - if (RegUseIndex >= 0 && - (UseMI->mayLoad() || UseMI->mayStore()) && - (unsigned)RegUseIndex < UseMCID.getNumOperands() && - UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) - LDataLatency += SpecialAddressLatency; - } - // Adjust the dependence latency using operand def/use - // information (if any), and then allow the target to - // perform its own adjustments. - SDep dep(SU, SDep::Data, LDataLatency, *Alias); - if (!UnitLatencies) { - unsigned Latency = computeOperandLatency(SU, UseSU, dep); - dep.setLatency(Latency); - - ST.adjustSchedDependency(SU, UseSU, dep); - } + + SDep dep(SU, SDep::Data, *Alias); + + // Adjust the dependence latency using operand def/use information, + // then allow the target to perform its own adjustments. + int UseOp = UseList[i].OpIdx; + MachineInstr *RegUse = UseOp < 0 ? 0 : UseSU->getInstr(); + dep.setLatency( + SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, + RegUse, UseOp, /*FindMin=*/false)); + dep.setMinLatency( + SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, + RegUse, UseOp, /*FindMin=*/true)); + + ST.adjustSchedDependency(SU, UseSU, dep); UseSU->addPred(dep); } } @@ -301,20 +282,23 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { Alias.isValid(); ++Alias) { if (!Defs.contains(*Alias)) continue; - std::vector &DefList = Defs[*Alias]; + std::vector &DefList = Defs[*Alias]; for (unsigned i = 0, e = DefList.size(); i != e; ++i) { - SUnit *DefSU = DefList[i]; + SUnit *DefSU = DefList[i].SU; if (DefSU == &ExitSU) continue; if (DefSU != SU && (Kind != SDep::Output || !MO.isDead() || !DefSU->getInstr()->registerDefIsDead(*Alias))) { if (Kind == SDep::Anti) - DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias)); + DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); else { - unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx, - DefSU->getInstr()); - DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias)); + SDep Dep(SU, Kind, /*Reg=*/*Alias); + unsigned OutLatency = + SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()); + Dep.setMinLatency(OutLatency); + Dep.setLatency(OutLatency); + DefSU->addPred(Dep); } } } @@ -324,61 +308,14 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { // Either insert a new Reg2SUnits entry with an empty SUnits list, or // retrieve the existing SUnits list for this register's uses. // Push this SUnit on the use list. - Uses[MO.getReg()].push_back(SU); + Uses[MO.getReg()].push_back(PhysRegSUOper(SU, OperIdx)); } else { - addPhysRegDataDeps(SU, MO); + addPhysRegDataDeps(SU, OperIdx); // Either insert a new Reg2SUnits entry with an empty SUnits list, or // retrieve the existing SUnits list for this register's defs. - std::vector &DefList = Defs[MO.getReg()]; - - // If a def is going to wrap back around to the top of the loop, - // backschedule it. - if (!UnitLatencies && DefList.empty()) { - LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg()); - if (I != LoopRegs.Deps.end()) { - const MachineOperand *UseMO = I->second.first; - unsigned Count = I->second.second; - const MachineInstr *UseMI = UseMO->getParent(); - unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); - const MCInstrDesc &UseMCID = UseMI->getDesc(); - const TargetSubtargetInfo &ST = - TM.getSubtarget(); - unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); - // TODO: If we knew the total depth of the region here, we could - // handle the case where the whole loop is inside the region but - // is large enough that the isScheduleHigh trick isn't needed. - if (UseMOIdx < UseMCID.getNumOperands()) { - // Currently, we only support scheduling regions consisting of - // single basic blocks. Check to see if the instruction is in - // the same region by checking to see if it has the same parent. - if (UseMI->getParent() != MI->getParent()) { - unsigned Latency = SU->Latency; - if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) - Latency += SpecialAddressLatency; - // This is a wild guess as to the portion of the latency which - // will be overlapped by work done outside the current - // scheduling region. - Latency -= std::min(Latency, Count); - // Add the artificial edge. - ExitSU.addPred(SDep(SU, SDep::Order, Latency, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, - /*isArtificial=*/true)); - } else if (SpecialAddressLatency > 0 && - UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { - // The entire loop body is within the current scheduling region - // and the latency of this operation is assumed to be greater - // than the latency of the loop. - // TODO: Recursively mark data-edge predecessors as - // isScheduleHigh too. - SU->isScheduleHigh = true; - } - } - LoopRegs.Deps.erase(I); - } - } + std::vector &DefList = Defs[MO.getReg()]; // clear this register's use list if (Uses.contains(MO.getReg())) @@ -393,11 +330,11 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { // the block. Instead, we leave only one call at the back of the // DefList. if (SU->isCall) { - while (!DefList.empty() && DefList.back()->isCall) + while (!DefList.empty() && DefList.back().SU->isCall) DefList.pop_back(); } // Defs are pushed in the order they are visited and never reordered. - DefList.push_back(SU); + DefList.push_back(PhysRegSUOper(SU, OperIdx)); } } @@ -430,9 +367,12 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { else { SUnit *DefSU = DefI->SU; if (DefSU != SU && DefSU != &ExitSU) { - unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx, - DefSU->getInstr()); - DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg)); + SDep Dep(SU, SDep::Output, Reg); + unsigned OutLatency = + SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()); + Dep.setMinLatency(OutLatency); + Dep.setLatency(OutLatency); + DefSU->addPred(Dep); } DefI->SU = SU; } @@ -462,18 +402,17 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { if (DefSU) { // The reaching Def lives within this scheduling region. // Create a data dependence. - // - // TODO: Handle "special" address latencies cleanly. - SDep dep(DefSU, SDep::Data, DefSU->Latency, Reg); - if (!UnitLatencies) { - // Adjust the dependence latency using operand def/use information, then - // allow the target to perform its own adjustments. - unsigned Latency = computeOperandLatency(DefSU, SU, const_cast(dep)); - dep.setLatency(Latency); - - const TargetSubtargetInfo &ST = TM.getSubtarget(); - ST.adjustSchedDependency(DefSU, SU, const_cast(dep)); - } + SDep dep(DefSU, SDep::Data, Reg); + // Adjust the dependence latency using operand def/use information, then + // allow the target to perform its own adjustments. + int DefOp = Def->findRegisterDefOperandIdx(Reg); + dep.setLatency( + SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx, false)); + dep.setMinLatency( + SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx, true)); + + const TargetSubtargetInfo &ST = TM.getSubtarget(); + ST.adjustSchedDependency(DefSU, SU, const_cast(dep)); SU->addPred(dep); } } @@ -481,14 +420,14 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { // Add antidependence to the following def of the vreg it uses. VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg); if (DefI != VRegDefs.end() && DefI->SU != SU) - DefI->SU->addPred(SDep(SU, SDep::Anti, 0, Reg)); + DefI->SU->addPred(SDep(SU, SDep::Anti, Reg)); } /// Return true if MI is an instruction we are unable to reason about /// (like a call or something with unmodeled side effects). static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) { if (MI->isCall() || MI->hasUnmodeledSideEffects() || - (MI->hasVolatileMemoryRef() && + (MI->hasOrderedMemoryRef() && (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) return true; return false; @@ -621,8 +560,7 @@ iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI, // and stop descending. if (*Depth > 200 || MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) { - SUb->addPred(SDep(SUa, SDep::Order, /*Latency=*/0, /*Reg=*/0, - /*isNormalMemory=*/true)); + SUb->addPred(SDep(SUa, SDep::MayAliasMem)); return *Depth; } // Track current depth. @@ -653,9 +591,9 @@ static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI, if (SU == *I) continue; if (MIsNeedChainEdge(AA, MFI, SU->getInstr(), (*I)->getInstr())) { - unsigned Latency = ((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0; - (*I)->addPred(SDep(SU, SDep::Order, Latency, /*Reg=*/0, - /*isNormalMemory=*/true)); + SDep Dep(SU, SDep::MayAliasMem); + Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0); + (*I)->addPred(Dep); } // Now go through all the chain successors and iterate from them. // Keep track of visited nodes. @@ -678,9 +616,11 @@ void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI, // If this is a false dependency, // do not add the edge, but rememeber the rejected node. if (!EnableAASchedMI || - MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) - SUb->addPred(SDep(SUa, SDep::Order, TrueMemOrderLatency, /*Reg=*/0, - isNormalMemory)); + MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) { + SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier); + Dep.setLatency(TrueMemOrderLatency); + SUb->addPred(Dep); + } else { // Duplicate entries should be ignored. RejectList.insert(SUb); @@ -718,10 +658,7 @@ void ScheduleDAGInstrs::initSUnits() { SU->isCommutable = MI->isCommutable(); // Assign the Latency field of SU using target-provided information. - if (UnitLatencies) - SU->Latency = 1; - else - computeLatency(SU); + SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); } } @@ -825,16 +762,19 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // references, even those that are known to not alias. for (std::map::iterator I = NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { - I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); + I->second->addPred(SDep(SU, SDep::Barrier)); } for (std::map >::iterator I = NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { - for (unsigned i = 0, e = I->second.size(); i != e; ++i) - I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); + for (unsigned i = 0, e = I->second.size(); i != e; ++i) { + SDep Dep(SU, SDep::Barrier); + Dep.setLatency(TrueMemOrderLatency); + I->second[i]->addPred(Dep); + } } // Add SU to the barrier chain. if (BarrierChain) - BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); + BarrierChain->addPred(SDep(SU, SDep::Barrier)); BarrierChain = SU; // This is a barrier event that acts as a pivotal node in the DAG, // so it is safe to clear list of exposed nodes. @@ -922,7 +862,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // SU and barrier _could_ be reordered, they should not. In addition, // we have lost all RejectMemNodes below barrier. if (BarrierChain) - BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); + BarrierChain->addPred(SDep(SU, SDep::Barrier)); } else { // Treat all other stores conservatively. goto new_alias_chain; @@ -931,10 +871,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, if (!ExitSU.isPred(SU)) // Push store's up a bit to avoid them getting in between cmp // and branches. - ExitSU.addPred(SDep(SU, SDep::Order, 0, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, - /*isArtificial=*/true)); + ExitSU.addPred(SDep(SU, SDep::Artificial)); } else if (MI->mayLoad()) { bool MayAlias = true; if (MI->isInvariantLoad(AA)) { @@ -969,7 +906,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, if (MayAlias && AliasChain) addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes); if (BarrierChain) - BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); + BarrierChain->addPred(SDep(SU, SDep::Barrier)); } } } @@ -982,34 +919,10 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, PendingLoads.clear(); } -void ScheduleDAGInstrs::computeLatency(SUnit *SU) { - // Compute the latency for the node. We only provide a default for missing - // itineraries. Empty itineraries still have latency properties. - if (!InstrItins) { - SU->Latency = 1; - - // Simplistic target-independent heuristic: assume that loads take - // extra time. - if (SU->getInstr()->mayLoad()) - SU->Latency += 2; - } else { - SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr()); - } -} - -unsigned ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use, - const SDep& dep, - bool FindMin) const { - // For a data dependency with a known register... - if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) - return 1; - - return TII->computeOperandLatency(InstrItins, TRI, Def->getInstr(), - Use->getInstr(), dep.getReg(), FindMin); -} - void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) SU->getInstr()->dump(); +#endif } std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { @@ -1029,3 +942,94 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { std::string ScheduleDAGInstrs::getDAGName() const { return "dag." + BB->getFullName(); } + +namespace { +/// \brief Manage the stack used by a reverse depth-first search over the DAG. +class SchedDAGReverseDFS { + std::vector > DFSStack; +public: + bool isComplete() const { return DFSStack.empty(); } + + void follow(const SUnit *SU) { + DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); + } + void advance() { ++DFSStack.back().second; } + + void backtrack() { DFSStack.pop_back(); } + + const SUnit *getCurr() const { return DFSStack.back().first; } + + SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } + + SUnit::const_pred_iterator getPredEnd() const { + return getCurr()->Preds.end(); + } +}; +} // anonymous + +void ScheduleDAGILP::resize(unsigned NumSUnits) { + ILPValues.resize(NumSUnits); +} + +ILPValue ScheduleDAGILP::getILP(const SUnit *SU) { + return ILPValues[SU->NodeNum]; +} + +// A leaf node has an ILP of 1/1. +static ILPValue initILP(const SUnit *SU) { + unsigned Cnt = SU->getInstr()->isTransient() ? 0 : 1; + return ILPValue(Cnt, 1 + SU->getDepth()); +} + +/// Compute an ILP metric for all nodes in the subDAG reachable via depth-first +/// search from this root. +void ScheduleDAGILP::computeILP(const SUnit *Root) { + if (!IsBottomUp) + llvm_unreachable("Top-down ILP metric is unimplemnted"); + + SchedDAGReverseDFS DFS; + // Mark a node visited by validating it. + ILPValues[Root->NodeNum] = initILP(Root); + DFS.follow(Root); + for (;;) { + // Traverse the leftmost path as far as possible. + while (DFS.getPred() != DFS.getPredEnd()) { + const SUnit *PredSU = DFS.getPred()->getSUnit(); + DFS.advance(); + // If the pred is already valid, skip it. + if (ILPValues[PredSU->NodeNum].isValid()) + continue; + ILPValues[PredSU->NodeNum] = initILP(PredSU); + DFS.follow(PredSU); + } + // Visit the top of the stack in postorder and backtrack. + unsigned PredCount = ILPValues[DFS.getCurr()->NodeNum].InstrCount; + DFS.backtrack(); + if (DFS.isComplete()) + break; + // Add the recently finished predecessor's bottom-up descendent count. + ILPValues[DFS.getCurr()->NodeNum].InstrCount += PredCount; + } +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +void ILPValue::print(raw_ostream &OS) const { + if (!isValid()) + OS << "BADILP"; + OS << InstrCount << " / " << Cycles << " = " + << format("%g", ((double)InstrCount / Cycles)); +} + +void ILPValue::dump() const { + dbgs() << *this << '\n'; +} + +namespace llvm { + +raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { + Val.print(OS); + return OS; +} + +} // namespace llvm +#endif // !NDEBUG || LLVM_ENABLE_DUMP diff --git a/lib/CodeGen/ScheduleDAGPrinter.cpp b/lib/CodeGen/ScheduleDAGPrinter.cpp index 38feee9..6e781b1 100644 --- a/lib/CodeGen/ScheduleDAGPrinter.cpp +++ b/lib/CodeGen/ScheduleDAGPrinter.cpp @@ -12,7 +12,6 @@ //===----------------------------------------------------------------------===// #include "llvm/Constants.h" -#include "llvm/Function.h" #include "llvm/Assembly/Writer.h" #include "llvm/CodeGen/ScheduleDAG.h" #include "llvm/CodeGen/MachineConstantPool.h" @@ -35,7 +34,7 @@ namespace llvm { DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {} static std::string getGraphName(const ScheduleDAG *G) { - return G->MF.getFunction()->getName(); + return G->MF.getName(); } static bool renderGraphFromBottomUp() { diff --git a/lib/CodeGen/ScoreboardHazardRecognizer.cpp b/lib/CodeGen/ScoreboardHazardRecognizer.cpp index e675366..2cd84d6 100644 --- a/lib/CodeGen/ScoreboardHazardRecognizer.cpp +++ b/lib/CodeGen/ScoreboardHazardRecognizer.cpp @@ -89,6 +89,7 @@ void ScoreboardHazardRecognizer::Reset() { ReservedScoreboard.reset(); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void ScoreboardHazardRecognizer::Scoreboard::dump() const { dbgs() << "Scoreboard:\n"; @@ -104,6 +105,7 @@ void ScoreboardHazardRecognizer::Scoreboard::dump() const { dbgs() << '\n'; } } +#endif bool ScoreboardHazardRecognizer::atIssueLimit() const { if (IssueWidth == 0) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 4e29879..37d7731 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -23,7 +23,7 @@ #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" @@ -194,6 +194,7 @@ namespace { SDValue visitOR(SDNode *N); SDValue visitXOR(SDNode *N); SDValue SimplifyVBinOp(SDNode *N); + SDValue SimplifyVUnaryOp(SDNode *N); SDValue visitSHL(SDNode *N); SDValue visitSRA(SDNode *N); SDValue visitSRL(SDNode *N); @@ -269,6 +270,8 @@ namespace { SDValue ReduceLoadWidth(SDNode *N); SDValue ReduceLoadOpStoreWidth(SDNode *N); SDValue TransformFPLoadStorePair(SDNode *N); + SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); + SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); SDValue GetDemandedBits(SDValue V, const APInt &Mask); @@ -300,6 +303,11 @@ namespace { /// looking for a better chain (aliasing node.) SDValue FindBetterChain(SDNode *N, SDValue Chain); + /// Merge consecutive store operations into a wide store. + /// This optimization uses wide integers or vectors when possible. + /// \return True if some memory operations were changed. + bool MergeConsecutiveStores(StoreSDNode *N); + public: DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), @@ -385,10 +393,6 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations, const TargetLowering &TLI, const TargetOptions *Options, unsigned Depth = 0) { - // No compile time optimizations on this type. - if (Op.getValueType() == MVT::ppcf128) - return 0; - // fneg is removable even if it has multiple uses. if (Op.getOpcode() == ISD::FNEG) return 2; @@ -413,7 +417,7 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations, !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) return 0; - // fold (fsub (fadd A, B)) -> (fsub (fneg A), B) + // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, Depth + 1)) return V; @@ -1643,7 +1647,8 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { return N0.getOperand(0); // fold C2-(A+C1) -> (C2-C1)-A if (N1.getOpcode() == ISD::ADD && N0C && N1C1) { - SDValue NewC = DAG.getConstant((N0C->getAPIntValue() - N1C1->getAPIntValue()), VT); + SDValue NewC = DAG.getConstant(N0C->getAPIntValue() - N1C1->getAPIntValue(), + VT); return DAG.getNode(ISD::SUB, N->getDebugLoc(), VT, NewC, N1.getOperand(0)); } @@ -2345,16 +2350,19 @@ SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { // we don't want to undo this promotion. // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper // on scalars. - if ((N0.getOpcode() == ISD::BITCAST || N0.getOpcode() == ISD::SCALAR_TO_VECTOR) - && Level == AfterLegalizeTypes) { + if ((N0.getOpcode() == ISD::BITCAST || + N0.getOpcode() == ISD::SCALAR_TO_VECTOR) && + Level == AfterLegalizeTypes) { SDValue In0 = N0.getOperand(0); SDValue In1 = N1.getOperand(0); EVT In0Ty = In0.getValueType(); EVT In1Ty = In1.getValueType(); - // If both incoming values are integers, and the original types are the same. + DebugLoc DL = N->getDebugLoc(); + // If both incoming values are integers, and the original types are the + // same. if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { - SDValue Op = DAG.getNode(N->getOpcode(), N->getDebugLoc(), In0Ty, In0, In1); - SDValue BC = DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, Op); + SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1); + SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op); AddToWorkList(Op.getNode()); return BC; } @@ -2496,8 +2504,18 @@ SDValue DAGCombiner::visitAND(SDNode *N) { // lanes of the constant together. EVT VT = Vector->getValueType(0); unsigned BitWidth = VT.getVectorElementType().getSizeInBits(); + + // If the splat value has been compressed to a bitlength lower + // than the size of the vector lane, we need to re-expand it to + // the lane size. + if (BitWidth > SplatBitSize) + for (SplatValue = SplatValue.zextOrTrunc(BitWidth); + SplatBitSize < BitWidth; + SplatBitSize = SplatBitSize * 2) + SplatValue |= SplatValue.shl(SplatBitSize); + Constant = APInt::getAllOnesValue(BitWidth); - for (unsigned i = 0, n = VT.getVectorNumElements(); i < n; ++i) + for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i) Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); } } @@ -2984,7 +3002,7 @@ SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { SDValue ShAmt = DAG.getConstant(16, getShiftAmountTy(VT)); if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) return DAG.getNode(ISD::ROTL, N->getDebugLoc(), VT, BSwap, ShAmt); - else if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) + if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) return DAG.getNode(ISD::ROTR, N->getDebugLoc(), VT, BSwap, ShAmt); return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, DAG.getNode(ISD::SHL, N->getDebugLoc(), VT, BSwap, ShAmt), @@ -3202,11 +3220,8 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) { if ((LShVal + RShVal) != OpSizeInBits) return 0; - SDValue Rot; - if (HasROTL) - Rot = DAG.getNode(ISD::ROTL, DL, VT, LHSShiftArg, LHSShiftAmt); - else - Rot = DAG.getNode(ISD::ROTR, DL, VT, LHSShiftArg, RHSShiftAmt); + SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, + LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt); // If there is an AND of either shifted operand, apply it to the result. if (LHSMask.getNode() || RHSMask.getNode()) { @@ -3239,12 +3254,8 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) { if (ConstantSDNode *SUBC = dyn_cast(RHSShiftAmt.getOperand(0))) { if (SUBC->getAPIntValue() == OpSizeInBits) { - if (HasROTL) - return DAG.getNode(ISD::ROTL, DL, VT, - LHSShiftArg, LHSShiftAmt).getNode(); - else - return DAG.getNode(ISD::ROTR, DL, VT, - LHSShiftArg, RHSShiftAmt).getNode(); + return DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, LHSShiftArg, + HasROTL ? LHSShiftAmt : RHSShiftAmt).getNode(); } } } @@ -3256,25 +3267,21 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL) { if (ConstantSDNode *SUBC = dyn_cast(LHSShiftAmt.getOperand(0))) { if (SUBC->getAPIntValue() == OpSizeInBits) { - if (HasROTR) - return DAG.getNode(ISD::ROTR, DL, VT, - LHSShiftArg, RHSShiftAmt).getNode(); - else - return DAG.getNode(ISD::ROTL, DL, VT, - LHSShiftArg, LHSShiftAmt).getNode(); + return DAG.getNode(HasROTR ? ISD::ROTR : ISD::ROTL, DL, VT, LHSShiftArg, + HasROTR ? RHSShiftAmt : LHSShiftAmt).getNode(); } } } // Look for sign/zext/any-extended or truncate cases: - if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND - || LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND - || LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND - || LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && - (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND - || RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND - || RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND - || RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { + if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || + LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || + LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || + LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && + (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || + RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || + RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || + RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { SDValue LExtOp0 = LHSShiftAmt.getOperand(0); SDValue RExtOp0 = RHSShiftAmt.getOperand(0); if (RExtOp0.getOpcode() == ISD::SUB && @@ -4046,7 +4053,8 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) { if (VT.isInteger() && (VT0 == MVT::i1 || (VT0.isInteger() && - TLI.getBooleanContents(false) == TargetLowering::ZeroOrOneBooleanContent)) && + TLI.getBooleanContents(false) == + TargetLowering::ZeroOrOneBooleanContent)) && N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) { SDValue XORNode; if (VT == VT0) @@ -4412,20 +4420,18 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { // If the desired elements are smaller or larger than the source // elements we can use a matching integer vector type and then // truncate/sign extend - else { - EVT MatchingElementType = - EVT::getIntegerVT(*DAG.getContext(), - N0VT.getScalarType().getSizeInBits()); - EVT MatchingVectorType = - EVT::getVectorVT(*DAG.getContext(), MatchingElementType, - N0VT.getVectorNumElements()); + EVT MatchingElementType = + EVT::getIntegerVT(*DAG.getContext(), + N0VT.getScalarType().getSizeInBits()); + EVT MatchingVectorType = + EVT::getVectorVT(*DAG.getContext(), MatchingElementType, + N0VT.getVectorNumElements()); - if (SVT == MatchingVectorType) { - SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, - N0.getOperand(0), N0.getOperand(1), - cast(N0.getOperand(2))->get()); - return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT); - } + if (SVT == MatchingVectorType) { + SDValue VsetCC = DAG.getSetCC(N->getDebugLoc(), MatchingVectorType, + N0.getOperand(0), N0.getOperand(1), + cast(N0.getOperand(2))->get()); + return DAG.getSExtOrTrunc(VsetCC, N->getDebugLoc(), VT); } } @@ -5235,13 +5241,12 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { // if the source is smaller than the dest, we still need an extend return DAG.getNode(N0.getOpcode(), N->getDebugLoc(), VT, N0.getOperand(0)); - else if (N0.getOperand(0).getValueType().bitsGT(VT)) + if (N0.getOperand(0).getValueType().bitsGT(VT)) // if the source is larger than the dest, than we just need the truncate return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, N0.getOperand(0)); - else - // if the source and dest are the same type, we can drop both the extend - // and the truncate. - return N0.getOperand(0); + // if the source and dest are the same type, we can drop both the extend + // and the truncate. + return N0.getOperand(0); } // Fold extract-and-trunc into a narrow extract. For example: @@ -5301,6 +5306,48 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { if (Reduced.getNode()) return Reduced; } + // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)), + // where ... are all 'undef'. + if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) { + SmallVector VTs; + SDValue V; + unsigned Idx = 0; + unsigned NumDefs = 0; + + for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { + SDValue X = N0.getOperand(i); + if (X.getOpcode() != ISD::UNDEF) { + V = X; + Idx = i; + NumDefs++; + } + // Stop if more than one members are non-undef. + if (NumDefs > 1) + break; + VTs.push_back(EVT::getVectorVT(*DAG.getContext(), + VT.getVectorElementType(), + X.getValueType().getVectorNumElements())); + } + + if (NumDefs == 0) + return DAG.getUNDEF(VT); + + if (NumDefs == 1) { + assert(V.getNode() && "The single defined operand is empty!"); + SmallVector Opnds; + for (unsigned i = 0, e = VTs.size(); i != e; ++i) { + if (i != Idx) { + Opnds.push_back(DAG.getUNDEF(VTs[i])); + continue; + } + SDValue NV = DAG.getNode(ISD::TRUNCATE, V.getDebugLoc(), VTs[i], V); + AddToWorkList(NV.getNode()); + Opnds.push_back(NV); + } + return DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, + &Opnds[0], Opnds.size()); + } + } // Simplify the operands using demanded-bits information. if (!VT.isVector() && @@ -5338,7 +5385,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { !LD2->isVolatile() && DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) { unsigned Align = LD1->getAlignment(); - unsigned NewAlign = TLI.getTargetData()-> + unsigned NewAlign = TLI.getDataLayout()-> getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); if (NewAlign <= Align && @@ -5407,7 +5454,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { !cast(N0)->isVolatile() && (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) { LoadSDNode *LN0 = cast(N0); - unsigned Align = TLI.getTargetData()-> + unsigned Align = TLI.getDataLayout()-> getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); unsigned OrigAlign = LN0->getAlignment(); @@ -5430,7 +5477,8 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) { // This often reduces constant pool loads. if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(VT)) || (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(VT))) && - N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) { + N0.getNode()->hasOneUse() && VT.isInteger() && + !VT.isVector() && !N0.getValueType().isVector()) { SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT, N0.getOperand(0)); AddToWorkList(NewConv.getNode()); @@ -5653,7 +5701,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { } // fold (fadd c1, c2) -> c1 + c2 - if (N0CFP && N1CFP && VT != MVT::ppcf128) + if (N0CFP && N1CFP) return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1); // canonicalize constant to RHS if (N0CFP && !N1CFP) @@ -5664,12 +5712,12 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { return N0; // fold (fadd A, (fneg B)) -> (fsub A, B) if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && - isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2) + isNegatibleForFree(N1, LegalOperations, TLI, &DAG.getTarget().Options) == 2) return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, GetNegatedExpression(N1, DAG, LegalOperations)); // fold (fadd (fneg A), B) -> (fsub B, A) if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && - isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2) + isNegatibleForFree(N0, LegalOperations, TLI, &DAG.getTarget().Options) == 2) return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N1, GetNegatedExpression(N0, DAG, LegalOperations)); @@ -5681,6 +5729,139 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0.getOperand(1), N1)); + // If allow, fold (fadd (fneg x), x) -> 0.0 + if (DAG.getTarget().Options.UnsafeFPMath && + N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) { + return DAG.getConstantFP(0.0, VT); + } + + // If allow, fold (fadd x, (fneg x)) -> 0.0 + if (DAG.getTarget().Options.UnsafeFPMath && + N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) { + return DAG.getConstantFP(0.0, VT); + } + + // In unsafe math mode, we can fold chains of FADD's of the same value + // into multiplications. This transform is not safe in general because + // we are reducing the number of rounding steps. + if (DAG.getTarget().Options.UnsafeFPMath && + TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && + !N0CFP && !N1CFP) { + if (N0.getOpcode() == ISD::FMUL) { + ConstantFPSDNode *CFP00 = dyn_cast(N0.getOperand(0)); + ConstantFPSDNode *CFP01 = dyn_cast(N0.getOperand(1)); + + // (fadd (fmul c, x), x) -> (fmul c+1, x) + if (CFP00 && !CFP01 && N0.getOperand(1) == N1) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP00, 0), + DAG.getConstantFP(1.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N1, NewCFP); + } + + // (fadd (fmul x, c), x) -> (fmul c+1, x) + if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP01, 0), + DAG.getConstantFP(1.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N1, NewCFP); + } + + // (fadd (fadd x, x), x) -> (fmul 3.0, x) + if (!CFP00 && !CFP01 && N0.getOperand(0) == N0.getOperand(1) && + N0.getOperand(0) == N1) { + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N1, DAG.getConstantFP(3.0, VT)); + } + + // (fadd (fmul c, x), (fadd x, x)) -> (fmul c+2, x) + if (CFP00 && !CFP01 && N1.getOpcode() == ISD::FADD && + N1.getOperand(0) == N1.getOperand(1) && + N0.getOperand(1) == N1.getOperand(0)) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP00, 0), + DAG.getConstantFP(2.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0.getOperand(1), NewCFP); + } + + // (fadd (fmul x, c), (fadd x, x)) -> (fmul c+2, x) + if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && + N1.getOperand(0) == N1.getOperand(1) && + N0.getOperand(0) == N1.getOperand(0)) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP01, 0), + DAG.getConstantFP(2.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0.getOperand(0), NewCFP); + } + } + + if (N1.getOpcode() == ISD::FMUL) { + ConstantFPSDNode *CFP10 = dyn_cast(N1.getOperand(0)); + ConstantFPSDNode *CFP11 = dyn_cast(N1.getOperand(1)); + + // (fadd x, (fmul c, x)) -> (fmul c+1, x) + if (CFP10 && !CFP11 && N1.getOperand(1) == N0) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP10, 0), + DAG.getConstantFP(1.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0, NewCFP); + } + + // (fadd x, (fmul x, c)) -> (fmul c+1, x) + if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP11, 0), + DAG.getConstantFP(1.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0, NewCFP); + } + + // (fadd x, (fadd x, x)) -> (fmul 3.0, x) + if (!CFP10 && !CFP11 && N1.getOperand(0) == N1.getOperand(1) && + N1.getOperand(0) == N0) { + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0, DAG.getConstantFP(3.0, VT)); + } + + // (fadd (fadd x, x), (fmul c, x)) -> (fmul c+2, x) + if (CFP10 && !CFP11 && N1.getOpcode() == ISD::FADD && + N1.getOperand(0) == N1.getOperand(1) && + N0.getOperand(1) == N1.getOperand(0)) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP10, 0), + DAG.getConstantFP(2.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0.getOperand(1), NewCFP); + } + + // (fadd (fadd x, x), (fmul x, c)) -> (fmul c+2, x) + if (CFP11 && !CFP10 && N1.getOpcode() == ISD::FADD && + N1.getOperand(0) == N1.getOperand(1) && + N0.getOperand(0) == N1.getOperand(0)) { + SDValue NewCFP = DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, + SDValue(CFP11, 0), + DAG.getConstantFP(2.0, VT)); + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0.getOperand(0), NewCFP); + } + } + + // (fadd (fadd x, x), (fadd x, x)) -> (fmul 4.0, x) + if (N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && + N0.getOperand(0) == N0.getOperand(1) && + N1.getOperand(0) == N1.getOperand(1) && + N0.getOperand(0) == N1.getOperand(0)) { + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0.getOperand(0), + DAG.getConstantFP(4.0, VT)); + } + } + // FADD -> FMA combines: if ((DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast || DAG.getTarget().Options.UnsafeFPMath) && @@ -5692,8 +5873,8 @@ SDValue DAGCombiner::visitFADD(SDNode *N) { return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, N0.getOperand(0), N0.getOperand(1), N1); } - - // fold (fadd x, (fmul y, z)) -> (fma x, y, z) + + // fold (fadd x, (fmul y, z)) -> (fma y, z, x) // Note: Commutes FADD operands. if (N1.getOpcode() == ISD::FMUL && N1->hasOneUse()) { return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, @@ -5719,7 +5900,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) { } // fold (fsub c1, c2) -> c1-c2 - if (N0CFP && N1CFP && VT != MVT::ppcf128) + if (N0CFP && N1CFP) return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1); // fold (fsub A, 0) -> A if (DAG.getTarget().Options.UnsafeFPMath && @@ -5811,7 +5992,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) { } // fold (fmul c1, c2) -> c1*c2 - if (N0CFP && N1CFP && VT != MVT::ppcf128) + if (N0CFP && N1CFP) return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1); // canonicalize constant to RHS if (N0CFP && !N1CFP) @@ -5867,7 +6048,14 @@ SDValue DAGCombiner::visitFMA(SDNode *N) { ConstantFPSDNode *N0CFP = dyn_cast(N0); ConstantFPSDNode *N1CFP = dyn_cast(N1); EVT VT = N->getValueType(0); + DebugLoc dl = N->getDebugLoc(); + if (DAG.getTarget().Options.UnsafeFPMath) { + if (N0CFP && N0CFP->isZero()) + return N2; + if (N1CFP && N1CFP->isZero()) + return N2; + } if (N0CFP && N0CFP->isExactlyValue(1.0)) return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N1, N2); if (N1CFP && N1CFP->isExactlyValue(1.0)) @@ -5877,6 +6065,58 @@ SDValue DAGCombiner::visitFMA(SDNode *N) { if (N0CFP && !N1CFP) return DAG.getNode(ISD::FMA, N->getDebugLoc(), VT, N1, N0, N2); + // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) + if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && + N2.getOpcode() == ISD::FMUL && + N0 == N2.getOperand(0) && + N2.getOperand(1).getOpcode() == ISD::ConstantFP) { + return DAG.getNode(ISD::FMUL, dl, VT, N0, + DAG.getNode(ISD::FADD, dl, VT, N1, N2.getOperand(1))); + } + + + // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) + if (DAG.getTarget().Options.UnsafeFPMath && + N0.getOpcode() == ISD::FMUL && N1CFP && + N0.getOperand(1).getOpcode() == ISD::ConstantFP) { + return DAG.getNode(ISD::FMA, dl, VT, + N0.getOperand(0), + DAG.getNode(ISD::FMUL, dl, VT, N1, N0.getOperand(1)), + N2); + } + + // (fma x, 1, y) -> (fadd x, y) + // (fma x, -1, y) -> (fadd (fneg x), y) + if (N1CFP) { + if (N1CFP->isExactlyValue(1.0)) + return DAG.getNode(ISD::FADD, dl, VT, N0, N2); + + if (N1CFP->isExactlyValue(-1.0) && + (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) { + SDValue RHSNeg = DAG.getNode(ISD::FNEG, dl, VT, N0); + AddToWorkList(RHSNeg.getNode()); + return DAG.getNode(ISD::FADD, dl, VT, N2, RHSNeg); + } + } + + // (fma x, c, x) -> (fmul x, (c+1)) + if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && N0 == N2) { + return DAG.getNode(ISD::FMUL, dl, VT, + N0, + DAG.getNode(ISD::FADD, dl, VT, + N1, DAG.getConstantFP(1.0, VT))); + } + + // (fma x, c, (fneg x)) -> (fmul x, (c-1)) + if (DAG.getTarget().Options.UnsafeFPMath && N1CFP && + N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) { + return DAG.getNode(ISD::FMUL, dl, VT, + N0, + DAG.getNode(ISD::FADD, dl, VT, + N1, DAG.getConstantFP(-1.0, VT))); + } + + return SDValue(); } @@ -5895,11 +6135,11 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) { } // fold (fdiv c1, c2) -> c1/c2 - if (N0CFP && N1CFP && VT != MVT::ppcf128) + if (N0CFP && N1CFP) return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1); // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. - if (N1CFP && VT != MVT::ppcf128 && DAG.getTarget().Options.UnsafeFPMath) { + if (N1CFP && DAG.getTarget().Options.UnsafeFPMath) { // Compute the reciprocal 1.0 / c2. APFloat N1APF = N1CFP->getValueAPF(); APFloat Recip(N1APF.getSemantics(), 1); // 1.0 @@ -5942,7 +6182,7 @@ SDValue DAGCombiner::visitFREM(SDNode *N) { EVT VT = N->getValueType(0); // fold (frem c1, c2) -> fmod(c1,c2) - if (N0CFP && N1CFP && VT != MVT::ppcf128) + if (N0CFP && N1CFP) return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1); return SDValue(); @@ -5955,7 +6195,7 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { ConstantFPSDNode *N1CFP = dyn_cast(N1); EVT VT = N->getValueType(0); - if (N0CFP && N1CFP && VT != MVT::ppcf128) // Constant fold + if (N0CFP && N1CFP) // Constant fold return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1); if (N1CFP) { @@ -6005,7 +6245,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { EVT OpVT = N0.getValueType(); // fold (sint_to_fp c1) -> c1fp - if (N0C && OpVT != MVT::ppcf128 && + if (N0C && // ...but only if the target supports immediate floating-point values (!LegalOperations || TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) @@ -6062,7 +6302,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { EVT OpVT = N0.getValueType(); // fold (uint_to_fp c1) -> c1fp - if (N0C && OpVT != MVT::ppcf128 && + if (N0C && // ...but only if the target supports immediate floating-point values (!LegalOperations || TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) @@ -6117,7 +6357,7 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { EVT VT = N->getValueType(0); // fold (fp_to_uint c1fp) -> c1 - if (N0CFP && VT != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0); return SDValue(); @@ -6130,7 +6370,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { EVT VT = N->getValueType(0); // fold (fp_round c1fp) -> c1fp - if (N0CFP && N0.getValueType() != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1); // fold (fp_round (fp_extend x)) -> x @@ -6184,7 +6424,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { return SDValue(); // fold (fp_extend c1fp) -> c1fp - if (N0CFP && VT != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0); // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the @@ -6225,6 +6465,11 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) { SDValue N0 = N->getOperand(0); EVT VT = N->getValueType(0); + if (VT.isVector()) { + SDValue FoldedVOp = SimplifyVUnaryOp(N); + if (FoldedVOp.getNode()) return FoldedVOp; + } + if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), &DAG.getTarget().Options)) return GetNegatedExpression(N0, DAG, LegalOperations); @@ -6246,6 +6491,17 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) { } } + // (fneg (fmul c, x)) -> (fmul -c, x) + if (N0.getOpcode() == ISD::FMUL) { + ConstantFPSDNode *CFP1 = dyn_cast(N0.getOperand(1)); + if (CFP1) { + return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, + N0.getOperand(0), + DAG.getNode(ISD::FNEG, N->getDebugLoc(), VT, + N0.getOperand(1))); + } + } + return SDValue(); } @@ -6255,7 +6511,7 @@ SDValue DAGCombiner::visitFCEIL(SDNode *N) { EVT VT = N->getValueType(0); // fold (fceil c1) -> fceil(c1) - if (N0CFP && VT != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FCEIL, N->getDebugLoc(), VT, N0); return SDValue(); @@ -6267,7 +6523,7 @@ SDValue DAGCombiner::visitFTRUNC(SDNode *N) { EVT VT = N->getValueType(0); // fold (ftrunc c1) -> ftrunc(c1) - if (N0CFP && VT != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FTRUNC, N->getDebugLoc(), VT, N0); return SDValue(); @@ -6279,7 +6535,7 @@ SDValue DAGCombiner::visitFFLOOR(SDNode *N) { EVT VT = N->getValueType(0); // fold (ffloor c1) -> ffloor(c1) - if (N0CFP && VT != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FFLOOR, N->getDebugLoc(), VT, N0); return SDValue(); @@ -6290,8 +6546,13 @@ SDValue DAGCombiner::visitFABS(SDNode *N) { ConstantFPSDNode *N0CFP = dyn_cast(N0); EVT VT = N->getValueType(0); + if (VT.isVector()) { + SDValue FoldedVOp = SimplifyVUnaryOp(N); + if (FoldedVOp.getNode()) return FoldedVOp; + } + // fold (fabs c1) -> fabs(c1) - if (N0CFP && VT != MVT::ppcf128) + if (N0CFP) return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0); // fold (fabs (fabs x)) -> (fabs x) if (N0.getOpcode() == ISD::FABS) @@ -6511,7 +6772,7 @@ static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, } else return false; - TargetLowering::AddrMode AM; + AddrMode AM; if (N->getOpcode() == ISD::ADD) { ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); if (Offset) @@ -7138,7 +7399,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); - if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy)) + if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy)) return SDValue(); SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(), @@ -7200,7 +7461,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { unsigned LDAlign = LD->getAlignment(); unsigned STAlign = ST->getAlignment(); Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); - unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy); + unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy); if (LDAlign < ABIAlign || STAlign < ABIAlign) return SDValue(); @@ -7225,6 +7486,433 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { return SDValue(); } +/// Returns the base pointer and an integer offset from that object. +static std::pair GetPointerBaseAndOffset(SDValue Ptr) { + if (Ptr->getOpcode() == ISD::ADD && isa(Ptr->getOperand(1))) { + int64_t Offset = cast(Ptr->getOperand(1))->getSExtValue(); + SDValue Base = Ptr->getOperand(0); + return std::make_pair(Base, Offset); + } + + return std::make_pair(Ptr, 0); +} + +/// Holds a pointer to an LSBaseSDNode as well as information on where it +/// is located in a sequence of memory operations connected by a chain. +struct MemOpLink { + MemOpLink (LSBaseSDNode *N, int64_t Offset, unsigned Seq): + MemNode(N), OffsetFromBase(Offset), SequenceNum(Seq) { } + // Ptr to the mem node. + LSBaseSDNode *MemNode; + // Offset from the base ptr. + int64_t OffsetFromBase; + // What is the sequence number of this mem node. + // Lowest mem operand in the DAG starts at zero. + unsigned SequenceNum; +}; + +/// Sorts store nodes in a link according to their offset from a shared +// base ptr. +struct ConsecutiveMemoryChainSorter { + bool operator()(MemOpLink LHS, MemOpLink RHS) { + return LHS.OffsetFromBase < RHS.OffsetFromBase; + } +}; + +bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { + EVT MemVT = St->getMemoryVT(); + int64_t ElementSizeBytes = MemVT.getSizeInBits()/8; + + // Don't merge vectors into wider inputs. + if (MemVT.isVector() || !MemVT.isSimple()) + return false; + + // Perform an early exit check. Do not bother looking at stored values that + // are not constants or loads. + SDValue StoredVal = St->getValue(); + bool IsLoadSrc = isa(StoredVal); + if (!isa(StoredVal) && !isa(StoredVal) && + !IsLoadSrc) + return false; + + // Only look at ends of store sequences. + SDValue Chain = SDValue(St, 1); + if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE) + return false; + + // This holds the base pointer and the offset in bytes from the base pointer. + std::pair BasePtr = + GetPointerBaseAndOffset(St->getBasePtr()); + + // We must have a base and an offset. + if (!BasePtr.first.getNode()) + return false; + + // Do not handle stores to undef base pointers. + if (BasePtr.first.getOpcode() == ISD::UNDEF) + return false; + + SmallVector StoreNodes; + // Walk up the chain and look for nodes with offsets from the same + // base pointer. Stop when reaching an instruction with a different kind + // or instruction which has a different base pointer. + unsigned Seq = 0; + StoreSDNode *Index = St; + while (Index) { + // If the chain has more than one use, then we can't reorder the mem ops. + if (Index != St && !SDValue(Index, 1)->hasOneUse()) + break; + + // Find the base pointer and offset for this memory node. + std::pair Ptr = + GetPointerBaseAndOffset(Index->getBasePtr()); + + // Check that the base pointer is the same as the original one. + if (Ptr.first.getNode() != BasePtr.first.getNode()) + break; + + // Check that the alignment is the same. + if (Index->getAlignment() != St->getAlignment()) + break; + + // The memory operands must not be volatile. + if (Index->isVolatile() || Index->isIndexed()) + break; + + // No truncation. + if (StoreSDNode *St = dyn_cast(Index)) + if (St->isTruncatingStore()) + break; + + // The stored memory type must be the same. + if (Index->getMemoryVT() != MemVT) + break; + + // We do not allow unaligned stores because we want to prevent overriding + // stores. + if (Index->getAlignment()*8 != MemVT.getSizeInBits()) + break; + + // We found a potential memory operand to merge. + StoreNodes.push_back(MemOpLink(Index, Ptr.second, Seq++)); + + // Move up the chain to the next memory operation. + Index = dyn_cast(Index->getChain().getNode()); + } + + // Check if there is anything to merge. + if (StoreNodes.size() < 2) + return false; + + // Sort the memory operands according to their distance from the base pointer. + std::sort(StoreNodes.begin(), StoreNodes.end(), + ConsecutiveMemoryChainSorter()); + + // Scan the memory operations on the chain and find the first non-consecutive + // store memory address. + unsigned LastConsecutiveStore = 0; + int64_t StartAddress = StoreNodes[0].OffsetFromBase; + for (unsigned i=1; i(StoreNodes[i].MemNode); + SDValue StoredVal = St->getValue(); + + if (ConstantSDNode *C = dyn_cast(StoredVal)) { + NonZero |= !C->isNullValue(); + } else if (ConstantFPSDNode *C = dyn_cast(StoredVal)) { + NonZero |= !C->getConstantFPValue()->isNullValue(); + } else { + // Non constant. + break; + } + + // Find a legal type for the constant store. + unsigned StoreBW = (i+1) * ElementSizeBytes * 8; + EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); + if (TLI.isTypeLegal(StoreTy)) + LastLegalType = i+1; + + // Find a legal type for the vector store. + EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); + if (TLI.isTypeLegal(Ty)) + LastLegalVectorType = i + 1; + } + + // We only use vectors if the constant is known to be zero. + if (NonZero) + LastLegalVectorType = 0; + + // Check if we found a legal integer type to store. + if (LastLegalType == 0 && LastLegalVectorType == 0) + return false; + + bool UseVector = LastLegalVectorType > LastLegalType; + unsigned NumElem = UseVector ? LastLegalVectorType : LastLegalType; + + // Make sure we have something to merge. + if (NumElem < 2) + return false; + + unsigned EarliestNodeUsed = 0; + for (unsigned i=0; i < NumElem; ++i) { + // Find a chain for the new wide-store operand. Notice that some + // of the store nodes that we found may not be selected for inclusion + // in the wide store. The chain we use needs to be the chain of the + // earliest store node which is *used* and replaced by the wide store. + if (StoreNodes[i].SequenceNum > StoreNodes[EarliestNodeUsed].SequenceNum) + EarliestNodeUsed = i; + } + + // The earliest Node in the DAG. + LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode; + DebugLoc DL = StoreNodes[0].MemNode->getDebugLoc(); + + SDValue StoredVal; + if (UseVector) { + // Find a legal type for the vector store. + EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem); + assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); + StoredVal = DAG.getConstant(0, Ty); + } else { + unsigned StoreBW = NumElem * ElementSizeBytes * 8; + APInt StoreInt(StoreBW, 0); + + // Construct a single integer constant which is made of the smaller + // constant inputs. + bool IsLE = TLI.isLittleEndian(); + for (unsigned i = 0; i < NumElem ; ++i) { + unsigned Idx = IsLE ?(NumElem - 1 - i) : i; + StoreSDNode *St = cast(StoreNodes[Idx].MemNode); + SDValue Val = St->getValue(); + StoreInt<<=ElementSizeBytes*8; + if (ConstantSDNode *C = dyn_cast(Val)) { + StoreInt|=C->getAPIntValue().zext(StoreBW); + } else if (ConstantFPSDNode *C = dyn_cast(Val)) { + StoreInt|= C->getValueAPF().bitcastToAPInt().zext(StoreBW); + } else { + assert(false && "Invalid constant element type"); + } + } + + // Create the new Load and Store operations. + EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); + StoredVal = DAG.getConstant(StoreInt, StoreTy); + } + + SDValue NewStore = DAG.getStore(EarliestOp->getChain(), DL, StoredVal, + FirstInChain->getBasePtr(), + FirstInChain->getPointerInfo(), + false, false, + FirstInChain->getAlignment()); + + // Replace the first store with the new store + CombineTo(EarliestOp, NewStore); + // Erase all other stores. + for (unsigned i = 0; i < NumElem ; ++i) { + if (StoreNodes[i].MemNode == EarliestOp) + continue; + StoreSDNode *St = cast(StoreNodes[i].MemNode); + // ReplaceAllUsesWith will replace all uses that existed when it was + // called, but graph optimizations may cause new ones to appear. For + // example, the case in pr14333 looks like + // + // St's chain -> St -> another store -> X + // + // And the only difference from St to the other store is the chain. + // When we change it's chain to be St's chain they become identical, + // get CSEed and the net result is that X is now a use of St. + // Since we know that St is redundant, just iterate. + while (!St->use_empty()) + DAG.ReplaceAllUsesWith(SDValue(St, 0), St->getChain()); + removeFromWorkList(St); + DAG.DeleteNode(St); + } + + return true; + } + + // Below we handle the case of multiple consecutive stores that + // come from multiple consecutive loads. We merge them into a single + // wide load and a single wide store. + + // Look for load nodes which are used by the stored values. + SmallVector LoadNodes; + + // Find acceptable loads. Loads need to have the same chain (token factor), + // must not be zext, volatile, indexed, and they must be consecutive. + SDValue LdBasePtr; + for (unsigned i=0; i(StoreNodes[i].MemNode); + LoadSDNode *Ld = dyn_cast(St->getValue()); + if (!Ld) break; + + // Loads must only have one use. + if (!Ld->hasNUsesOfValue(1, 0)) + break; + + // Check that the alignment is the same as the stores. + if (Ld->getAlignment() != St->getAlignment()) + break; + + // The memory operands must not be volatile. + if (Ld->isVolatile() || Ld->isIndexed()) + break; + + // We do not accept ext loads. + if (Ld->getExtensionType() != ISD::NON_EXTLOAD) + break; + + // The stored memory type must be the same. + if (Ld->getMemoryVT() != MemVT) + break; + + std::pair LdPtr = + GetPointerBaseAndOffset(Ld->getBasePtr()); + + // If this is not the first ptr that we check. + if (LdBasePtr.getNode()) { + // The base ptr must be the same. + if (LdPtr.first != LdBasePtr) + break; + } else { + // Check that all other base pointers are the same as this one. + LdBasePtr = LdPtr.first; + } + + // We found a potential memory operand to merge. + LoadNodes.push_back(MemOpLink(Ld, LdPtr.second, 0)); + } + + if (LoadNodes.size() < 2) + return false; + + // Scan the memory operations on the chain and find the first non-consecutive + // load memory address. These variables hold the index in the store node + // array. + unsigned LastConsecutiveLoad = 0; + // This variable refers to the size and not index in the array. + unsigned LastLegalVectorType = 0; + unsigned LastLegalIntegerType = 0; + StartAddress = LoadNodes[0].OffsetFromBase; + SDValue FirstChain = LoadNodes[0].MemNode->getChain(); + for (unsigned i = 1; i < LoadNodes.size(); ++i) { + // All loads much share the same chain. + if (LoadNodes[i].MemNode->getChain() != FirstChain) + break; + + int64_t CurrAddress = LoadNodes[i].OffsetFromBase; + if (CurrAddress - StartAddress != (ElementSizeBytes * i)) + break; + LastConsecutiveLoad = i; + + // Find a legal type for the vector store. + EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1); + if (TLI.isTypeLegal(StoreTy)) + LastLegalVectorType = i + 1; + + // Find a legal type for the integer store. + unsigned StoreBW = (i+1) * ElementSizeBytes * 8; + StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW); + if (TLI.isTypeLegal(StoreTy)) + LastLegalIntegerType = i + 1; + } + + // Only use vector types if the vector type is larger than the integer type. + // If they are the same, use integers. + bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType; + unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType); + + // We add +1 here because the LastXXX variables refer to location while + // the NumElem refers to array/index size. + unsigned NumElem = std::min(LastConsecutiveStore, LastConsecutiveLoad) + 1; + NumElem = std::min(LastLegalType, NumElem); + + if (NumElem < 2) + return false; + + // The earliest Node in the DAG. + unsigned EarliestNodeUsed = 0; + LSBaseSDNode *EarliestOp = StoreNodes[EarliestNodeUsed].MemNode; + for (unsigned i=1; i StoreNodes[EarliestNodeUsed].SequenceNum) + EarliestNodeUsed = i; + } + + // Find if it is better to use vectors or integers to load and store + // to memory. + EVT JointMemOpVT; + if (UseVectorTy) { + JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem); + } else { + unsigned StoreBW = NumElem * ElementSizeBytes * 8; + JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW); + } + + DebugLoc LoadDL = LoadNodes[0].MemNode->getDebugLoc(); + DebugLoc StoreDL = StoreNodes[0].MemNode->getDebugLoc(); + + LoadSDNode *FirstLoad = cast(LoadNodes[0].MemNode); + SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, + FirstLoad->getChain(), + FirstLoad->getBasePtr(), + FirstLoad->getPointerInfo(), + false, false, false, + FirstLoad->getAlignment()); + + SDValue NewStore = DAG.getStore(EarliestOp->getChain(), StoreDL, NewLoad, + FirstInChain->getBasePtr(), + FirstInChain->getPointerInfo(), false, false, + FirstInChain->getAlignment()); + + // Replace one of the loads with the new load. + LoadSDNode *Ld = cast(LoadNodes[0].MemNode); + DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), + SDValue(NewLoad.getNode(), 1)); + + // Remove the rest of the load chains. + for (unsigned i = 1; i < NumElem ; ++i) { + // Replace all chain users of the old load nodes with the chain of the new + // load node. + LoadSDNode *Ld = cast(LoadNodes[i].MemNode); + DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Ld->getChain()); + } + + // Replace the first store with the new store. + CombineTo(EarliestOp, NewStore); + // Erase all other stores. + for (unsigned i = 0; i < NumElem ; ++i) { + // Remove all Store nodes. + if (StoreNodes[i].MemNode == EarliestOp) + continue; + StoreSDNode *St = cast(StoreNodes[i].MemNode); + DAG.ReplaceAllUsesOfValueWith(SDValue(St, 0), St->getChain()); + removeFromWorkList(St); + DAG.DeleteNode(St); + } + + return true; +} + SDValue DAGCombiner::visitSTORE(SDNode *N) { StoreSDNode *ST = cast(N); SDValue Chain = ST->getChain(); @@ -7237,7 +7925,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { ST->isUnindexed()) { unsigned OrigAlign = ST->getAlignment(); EVT SVT = Value.getOperand(0).getValueType(); - unsigned Align = TLI.getTargetData()-> + unsigned Align = TLI.getDataLayout()-> getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext())); if (Align <= OrigAlign && ((!LegalOperations && !ST->isVolatile()) || @@ -7426,6 +8114,11 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { ST->getAlignment()); } + // Only perform this optimization before the types are legal, because we + // don't want to perform this optimization on every DAGCombine invocation. + if (!LegalTypes && MergeConsecutiveStores(ST)) + return SDValue(N, 0); + return ReduceLoadOpStoreWidth(N); } @@ -7504,9 +8197,9 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. // We only perform this optimization before the op legalization phase because - // we may introduce new vector instructions which are not backed by TD patterns. - // For example on AVX, extracting elements from a wide vector without using - // extract_subvector. + // we may introduce new vector instructions which are not backed by TD + // patterns. For example on AVX, extracting elements from a wide vector + // without using extract_subvector. if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE && ConstEltNo && !LegalOperations) { int Elt = cast(EltNo)->getZExtValue(); @@ -7625,7 +8318,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { // Check the resultant load doesn't need a higher alignment than the // original load. unsigned NewAlign = - TLI.getTargetData() + TLI.getDataLayout() ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext())); if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT)) @@ -7690,15 +8383,21 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { return SDValue(); } -SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { +// Simplify (build_vec (ext )) to (bitcast (build_vec )) +SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { + // We perform this optimization post type-legalization because + // the type-legalizer often scalarizes integer-promoted vectors. + // Performing this optimization before may create bit-casts which + // will be type-legalized to complex code sequences. + // We perform this optimization only before the operation legalizer because we + // may introduce illegal operations. + if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes) + return SDValue(); + unsigned NumInScalars = N->getNumOperands(); DebugLoc dl = N->getDebugLoc(); EVT VT = N->getValueType(0); - // A vector built entirely of undefs is undef. - if (ISD::allOperandsUndef(N)) - return DAG.getUNDEF(VT); - // Check to see if this is a BUILD_VECTOR of a bunch of values // which come from any_extend or zero_extend nodes. If so, we can create // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR @@ -7741,64 +8440,141 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { // In order to have valid types, all of the inputs must be extended from the // same source type and all of the inputs must be any or zero extend. // Scalar sizes must be a power of two. - EVT OutScalarTy = N->getValueType(0).getScalarType(); + EVT OutScalarTy = VT.getScalarType(); bool ValidTypes = SourceType != MVT::Other && isPowerOf2_32(OutScalarTy.getSizeInBits()) && isPowerOf2_32(SourceType.getSizeInBits()); - // We perform this optimization post type-legalization because - // the type-legalizer often scalarizes integer-promoted vectors. - // Performing this optimization before may create bit-casts which - // will be type-legalized to complex code sequences. - // We perform this optimization only before the operation legalizer because we - // may introduce illegal operations. // Create a new simpler BUILD_VECTOR sequence which other optimizations can // turn into a single shuffle instruction. - if ((Level == AfterLegalizeVectorOps || Level == AfterLegalizeTypes) && - ValidTypes) { - bool isLE = TLI.isLittleEndian(); - unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); - assert(ElemRatio > 1 && "Invalid element size ratio"); - SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): - DAG.getConstant(0, SourceType); - - unsigned NewBVElems = ElemRatio * N->getValueType(0).getVectorNumElements(); - SmallVector Ops(NewBVElems, Filler); - - // Populate the new build_vector - for (unsigned i=0; i < N->getNumOperands(); ++i) { - SDValue Cast = N->getOperand(i); - assert((Cast.getOpcode() == ISD::ANY_EXTEND || - Cast.getOpcode() == ISD::ZERO_EXTEND || - Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode"); - SDValue In; - if (Cast.getOpcode() == ISD::UNDEF) - In = DAG.getUNDEF(SourceType); - else - In = Cast->getOperand(0); - unsigned Index = isLE ? (i * ElemRatio) : - (i * ElemRatio + (ElemRatio - 1)); + if (!ValidTypes) + return SDValue(); + + bool isLE = TLI.isLittleEndian(); + unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); + assert(ElemRatio > 1 && "Invalid element size ratio"); + SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): + DAG.getConstant(0, SourceType); + + unsigned NewBVElems = ElemRatio * VT.getVectorNumElements(); + SmallVector Ops(NewBVElems, Filler); + + // Populate the new build_vector + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + SDValue Cast = N->getOperand(i); + assert((Cast.getOpcode() == ISD::ANY_EXTEND || + Cast.getOpcode() == ISD::ZERO_EXTEND || + Cast.getOpcode() == ISD::UNDEF) && "Invalid cast opcode"); + SDValue In; + if (Cast.getOpcode() == ISD::UNDEF) + In = DAG.getUNDEF(SourceType); + else + In = Cast->getOperand(0); + unsigned Index = isLE ? (i * ElemRatio) : + (i * ElemRatio + (ElemRatio - 1)); + + assert(Index < Ops.size() && "Invalid index"); + Ops[Index] = In; + } + + // The type of the new BUILD_VECTOR node. + EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); + assert(VecVT.getSizeInBits() == VT.getSizeInBits() && + "Invalid vector size"); + // Check if the new vector type is legal. + if (!isTypeLegal(VecVT)) return SDValue(); + + // Make the new BUILD_VECTOR. + SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], Ops.size()); + + // The new BUILD_VECTOR node has the potential to be further optimized. + AddToWorkList(BV.getNode()); + // Bitcast to the desired type. + return DAG.getNode(ISD::BITCAST, dl, VT, BV); +} + +SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { + EVT VT = N->getValueType(0); + + unsigned NumInScalars = N->getNumOperands(); + DebugLoc dl = N->getDebugLoc(); + + EVT SrcVT = MVT::Other; + unsigned Opcode = ISD::DELETED_NODE; + unsigned NumDefs = 0; - assert(Index < Ops.size() && "Invalid index"); - Ops[Index] = In; + for (unsigned i = 0; i != NumInScalars; ++i) { + SDValue In = N->getOperand(i); + unsigned Opc = In.getOpcode(); + + if (Opc == ISD::UNDEF) + continue; + + // If all scalar values are floats and converted from integers. + if (Opcode == ISD::DELETED_NODE && + (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) { + Opcode = Opc; + // If not supported by target, bail out. + if (TLI.getOperationAction(Opcode, VT) != TargetLowering::Legal && + TLI.getOperationAction(Opcode, VT) != TargetLowering::Custom) + return SDValue(); } + if (Opc != Opcode) + return SDValue(); - // The type of the new BUILD_VECTOR node. - EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); - assert(VecVT.getSizeInBits() == N->getValueType(0).getSizeInBits() && - "Invalid vector size"); - // Check if the new vector type is legal. - if (!isTypeLegal(VecVT)) return SDValue(); + EVT InVT = In.getOperand(0).getValueType(); - // Make the new BUILD_VECTOR. - SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), - VecVT, &Ops[0], Ops.size()); + // If all scalar values are typed differently, bail out. It's chosen to + // simplify BUILD_VECTOR of integer types. + if (SrcVT == MVT::Other) + SrcVT = InVT; + if (SrcVT != InVT) + return SDValue(); + NumDefs++; + } + + // If the vector has just one element defined, it's not worth to fold it into + // a vectorized one. + if (NumDefs < 2) + return SDValue(); - // The new BUILD_VECTOR node has the potential to be further optimized. - AddToWorkList(BV.getNode()); - // Bitcast to the desired type. - return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), BV); + assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) + && "Should only handle conversion from integer to float."); + assert(SrcVT != MVT::Other && "Cannot determine source type!"); + + EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars); + SmallVector Opnds; + for (unsigned i = 0; i != NumInScalars; ++i) { + SDValue In = N->getOperand(i); + + if (In.getOpcode() == ISD::UNDEF) + Opnds.push_back(DAG.getUNDEF(SrcVT)); + else + Opnds.push_back(In.getOperand(0)); } + SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, + &Opnds[0], Opnds.size()); + AddToWorkList(BV.getNode()); + + return DAG.getNode(Opcode, dl, VT, BV); +} + +SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { + unsigned NumInScalars = N->getNumOperands(); + DebugLoc dl = N->getDebugLoc(); + EVT VT = N->getValueType(0); + + // A vector built entirely of undefs is undef. + if (ISD::allOperandsUndef(N)) + return DAG.getUNDEF(VT); + + SDValue V = reduceBuildVecExtToExtBuildVec(N); + if (V.getNode()) + return V; + + V = reduceBuildVecConvertToConvertBuildVec(N); + if (V.getNode()) + return V; // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT // operations. If so, and if the EXTRACT_VECTOR_ELT vector inputs come from @@ -7876,15 +8652,22 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits()) return SDValue(); + // If the input vector type has a different base type to the output + // vector type, bail out. + if (VecIn1.getValueType().getVectorElementType() != + VT.getVectorElementType()) + return SDValue(); + // Widen the input vector by adding undef values. - VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, N->getDebugLoc(), VT, + VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, VecIn1, DAG.getUNDEF(VecIn1.getValueType())); } // If VecIn2 is unused then change it to undef. VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); - // Check that we were able to transform all incoming values to the same type. + // Check that we were able to transform all incoming values to the same + // type. if (VecIn2.getValueType() != VecIn1.getValueType() || VecIn1.getValueType() != VT) return SDValue(); @@ -7897,7 +8680,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { SDValue Ops[2]; Ops[0] = VecIn1; Ops[1] = VecIn2; - return DAG.getVectorShuffle(VT, N->getDebugLoc(), Ops[0], Ops[1], &Mask[0]); + return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], &Mask[0]); } return SDValue(); @@ -7933,8 +8716,8 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { return SDValue(); // Only handle cases where both indexes are constants with the same type. - ConstantSDNode *InsIdx = dyn_cast(N->getOperand(1)); - ConstantSDNode *ExtIdx = dyn_cast(V->getOperand(2)); + ConstantSDNode *ExtIdx = dyn_cast(N->getOperand(1)); + ConstantSDNode *InsIdx = dyn_cast(V->getOperand(2)); if (InsIdx && ExtIdx && InsIdx->getValueType(0).getSizeInBits() <= 64 && @@ -7951,6 +8734,21 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { } } + if (V->getOpcode() == ISD::CONCAT_VECTORS) { + // Combine: + // (extract_subvec (concat V1, V2, ...), i) + // Into: + // Vi if possible + // Only operand 0 is checked as 'concat' assumes all inputs of the same type. + if (V->getOperand(0).getValueType() != NVT) + return SDValue(); + unsigned Idx = dyn_cast(N->getOperand(1))->getZExtValue(); + unsigned NumElems = NVT.getVectorNumElements(); + assert((Idx % NumElems) == 0 && + "IDX in concat is not a multiple of the result vector length."); + return V->getOperand(Idx / NumElems); + } + return SDValue(); } @@ -8266,6 +9064,44 @@ SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { return SDValue(); } +/// SimplifyVUnaryOp - Visit a binary vector operation, like FABS/FNEG. +SDValue DAGCombiner::SimplifyVUnaryOp(SDNode *N) { + // After legalize, the target may be depending on adds and other + // binary ops to provide legal ways to construct constants or other + // things. Simplifying them may result in a loss of legality. + if (LegalOperations) return SDValue(); + + assert(N->getValueType(0).isVector() && + "SimplifyVUnaryOp only works on vectors!"); + + SDValue N0 = N->getOperand(0); + + if (N0.getOpcode() != ISD::BUILD_VECTOR) + return SDValue(); + + // Operand is a BUILD_VECTOR node, see if we can constant fold it. + SmallVector Ops; + for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { + SDValue Op = N0.getOperand(i); + if (Op.getOpcode() != ISD::UNDEF && + Op.getOpcode() != ISD::ConstantFP) + break; + EVT EltVT = Op.getValueType(); + SDValue FoldOp = DAG.getNode(N->getOpcode(), N0.getDebugLoc(), EltVT, Op); + if (FoldOp.getOpcode() != ISD::UNDEF && + FoldOp.getOpcode() != ISD::ConstantFP) + break; + Ops.push_back(FoldOp); + AddToWorkList(FoldOp.getNode()); + } + + if (Ops.size() != N0.getNumOperands()) + return SDValue(); + + return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), + N0.getValueType(), &Ops[0], Ops.size()); +} + SDValue DAGCombiner::SimplifySelect(DebugLoc DL, SDValue N0, SDValue N1, SDValue N2){ assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); @@ -8349,6 +9185,10 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) return false; + // The loads must not depend on one another. + if (LLD->isPredecessorOf(RLD) || + RLD->isPredecessorOf(LLD)) + return false; Addr = DAG.getNode(ISD::SELECT, TheSelect->getDebugLoc(), LLD->getBasePtr().getValueType(), TheSelect->getOperand(0), LLD->getBasePtr(), @@ -8468,7 +9308,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, const_cast(TV->getConstantFPValue()) }; Type *FPTy = Elts[0]->getType(); - const TargetData &TD = *TLI.getTargetData(); + const DataLayout &TD = *TLI.getDataLayout(); // Create a ConstantArray of the two constants. Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); @@ -8583,34 +9423,38 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1, return SDValue(); // Get a SetCC of the condition - // FIXME: Should probably make sure that setcc is legal if we ever have a - // target where it isn't. - SDValue Temp, SCC; - // cast from setcc result type to select result type - if (LegalTypes) { - SCC = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()), - N0, N1, CC); - if (N2.getValueType().bitsLT(SCC.getValueType())) - Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), N2.getValueType()); - else + // NOTE: Don't create a SETCC if it's not legal on this target. + if (!LegalOperations || + TLI.isOperationLegal(ISD::SETCC, + LegalTypes ? TLI.getSetCCResultType(N0.getValueType()) : MVT::i1)) { + SDValue Temp, SCC; + // cast from setcc result type to select result type + if (LegalTypes) { + SCC = DAG.getSetCC(DL, TLI.getSetCCResultType(N0.getValueType()), + N0, N1, CC); + if (N2.getValueType().bitsLT(SCC.getValueType())) + Temp = DAG.getZeroExtendInReg(SCC, N2.getDebugLoc(), + N2.getValueType()); + else + Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), + N2.getValueType(), SCC); + } else { + SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC); Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), N2.getValueType(), SCC); - } else { - SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC); - Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(), - N2.getValueType(), SCC); - } + } - AddToWorkList(SCC.getNode()); - AddToWorkList(Temp.getNode()); + AddToWorkList(SCC.getNode()); + AddToWorkList(Temp.getNode()); - if (N2C->getAPIntValue() == 1) - return Temp; + if (N2C->getAPIntValue() == 1) + return Temp; - // shl setcc result by log2 n2c - return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp, - DAG.getConstant(N2C->getAPIntValue().logBase2(), - getShiftAmountTy(Temp.getValueType()))); + // shl setcc result by log2 n2c + return DAG.getNode(ISD::SHL, DL, N2.getValueType(), Temp, + DAG.getConstant(N2C->getAPIntValue().logBase2(), + getShiftAmountTy(Temp.getValueType()))); + } } // Check to see if this is the equivalent of setcc @@ -8729,7 +9573,7 @@ SDValue DAGCombiner::BuildUDIV(SDNode *N) { // to alias with anything but itself. Provides base object and offset as // results. static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, - const GlobalValue *&GV, void *&CV) { + const GlobalValue *&GV, const void *&CV) { // Assume it is a primitive operation. Base = Ptr; Offset = 0; GV = 0; CV = 0; @@ -8754,8 +9598,8 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, // for ConstantSDNodes since the same constant pool entry may be represented // by multiple nodes with different offsets. if (ConstantPoolSDNode *C = dyn_cast(Base)) { - CV = C->isMachineConstantPoolEntry() ? (void *)C->getMachineCPVal() - : (void *)C->getConstVal(); + CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() + : (const void *)C->getConstVal(); Offset += C->getOffset(); return false; } @@ -8780,7 +9624,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1, SDValue Base1, Base2; int64_t Offset1, Offset2; const GlobalValue *GV1, *GV2; - void *CV1, *CV2; + const void *CV1, *CV2; bool isFrameIndex1 = FindBaseOffset(Ptr1, Base1, Offset1, GV1, CV1); bool isFrameIndex2 = FindBaseOffset(Ptr2, Base2, Offset2, GV2, CV2); diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 683fac6..4854cf7 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -53,7 +53,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Analysis/Loads.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLowering.h" @@ -1059,7 +1059,7 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo, MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()), TM(FuncInfo.MF->getTarget()), - TD(*TM.getTargetData()), + TD(*TM.getDataLayout()), TII(*TM.getInstrInfo()), TLI(*TM.getTargetLowering()), TRI(*TM.getRegisterInfo()), diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp index 3e18ea7..a418290 100644 --- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp +++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @@ -29,7 +29,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetOptions.h" @@ -80,9 +80,9 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) { if (const AllocaInst *AI = dyn_cast(I)) if (const ConstantInt *CUI = dyn_cast(AI->getArraySize())) { Type *Ty = AI->getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); + uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty); unsigned Align = - std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), + std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), AI->getAlignment()); TySize *= CUI->getZExtValue(); // Get total allocated size. @@ -97,7 +97,7 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) { cast(Ty)->getElementType()->isIntegerTy(8))); StaticAllocaMap[AI] = MF->getFrameInfo()->CreateStackObject(TySize, Align, false, - MayNeedSP); + MayNeedSP, AI); } for (; BB != EB; ++BB) diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 4488d27..a8381b2 100644 --- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -20,7 +20,7 @@ #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" @@ -55,7 +55,8 @@ unsigned InstrEmitter::CountResults(SDNode *Node) { /// /// Also count physreg RegisterSDNode and RegisterMaskSDNode operands preceding /// the chain and glue. These operands may be implicit on the machine instr. -static unsigned countOperands(SDNode *Node, unsigned &NumImpUses) { +static unsigned countOperands(SDNode *Node, unsigned NumExpUses, + unsigned &NumImpUses) { unsigned N = Node->getNumOperands(); while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) --N; @@ -63,7 +64,8 @@ static unsigned countOperands(SDNode *Node, unsigned &NumImpUses) { --N; // Ignore chain if it exists. // Count RegisterSDNode and RegisterMaskSDNode operands for NumImpUses. - for (unsigned I = N; I; --I) { + NumImpUses = N - NumExpUses; + for (unsigned I = N; I > NumExpUses; --I) { if (isa(Node->getOperand(I - 1))) continue; if (RegisterSDNode *RN = dyn_cast(Node->getOperand(I - 1))) @@ -312,8 +314,6 @@ InstrEmitter::AddRegisterOperand(MachineInstr *MI, SDValue Op, const TargetRegisterClass *DstRC = 0; if (IIOpNum < II->getNumOperands()) DstRC = TRI->getAllocatableClass(TII->getRegClass(*II,IIOpNum,TRI,*MF)); - assert((DstRC || (MI->isVariadic() && IIOpNum >= MCID.getNumOperands())) && - "Don't have operand info for this instruction!"); if (DstRC && !MRI->constrainRegClass(VReg, DstRC, MinRCSize)) { unsigned NewVReg = MRI->createVirtualRegister(DstRC); BuildMI(*MBB, InsertPos, Op.getNode()->getDebugLoc(), @@ -390,10 +390,10 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op, Type *Type = CP->getType(); // MachineConstantPool wants an explicit alignment. if (Align == 0) { - Align = TM->getTargetData()->getPrefTypeAlignment(Type); + Align = TM->getDataLayout()->getPrefTypeAlignment(Type); if (Align == 0) { // Alignment of vector types. FIXME! - Align = TM->getTargetData()->getTypeAllocSize(Type); + Align = TM->getDataLayout()->getTypeAllocSize(Type); } } @@ -410,6 +410,7 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op, ES->getTargetFlags())); } else if (BlockAddressSDNode *BA = dyn_cast(Op)) { MI->addOperand(MachineOperand::CreateBA(BA->getBlockAddress(), + BA->getOffset(), BA->getTargetFlags())); } else if (TargetIndexSDNode *TI = dyn_cast(Op)) { MI->addOperand(MachineOperand::CreateTargetIndex(TI->getIndex(), @@ -720,7 +721,8 @@ EmitMachineNode(SDNode *Node, bool IsClone, bool IsCloned, const MCInstrDesc &II = TII->get(Opc); unsigned NumResults = CountResults(Node); unsigned NumImpUses = 0; - unsigned NodeOperands = countOperands(Node, NumImpUses); + unsigned NodeOperands = + countOperands(Node, II.getNumOperands() - II.getNumDefs(), NumImpUses); bool HasPhysRegOuts = NumResults > II.getNumDefs() && II.getImplicitDefs()!=0; #ifndef NDEBUG unsigned NumMIOperands = NodeOperands + NumResults; @@ -870,6 +872,17 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, break; } + case ISD::LIFETIME_START: + case ISD::LIFETIME_END: { + unsigned TarOp = (Node->getOpcode() == ISD::LIFETIME_START) ? + TargetOpcode::LIFETIME_START : TargetOpcode::LIFETIME_END; + + FrameIndexSDNode *FI = dyn_cast(Node->getOperand(1)); + BuildMI(*MBB, InsertPos, Node->getDebugLoc(), TII->get(TarOp)) + .addFrameIndex(FI->getIndex()); + break; + } + case ISD::INLINEASM: { unsigned NumOps = Node->getNumOperands(); if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) @@ -884,25 +897,30 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, const char *AsmStr = cast(AsmStrV)->getSymbol(); MI->addOperand(MachineOperand::CreateES(AsmStr)); - // Add the HasSideEffect and isAlignStack bits. + // Add the HasSideEffect, isAlignStack, AsmDialect, MayLoad and MayStore + // bits. int64_t ExtraInfo = cast(Node->getOperand(InlineAsm::Op_ExtraInfo))-> getZExtValue(); MI->addOperand(MachineOperand::CreateImm(ExtraInfo)); + // Remember to operand index of the group flags. + SmallVector GroupIdx; + // Add all of the operand registers to the instruction. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { unsigned Flags = cast(Node->getOperand(i))->getZExtValue(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + GroupIdx.push_back(MI->getNumOperands()); MI->addOperand(MachineOperand::CreateImm(Flags)); ++i; // Skip the ID value. switch (InlineAsm::getKind(Flags)) { default: llvm_unreachable("Bad flags!"); case InlineAsm::Kind_RegDef: - for (; NumVals; --NumVals, ++i) { + for (unsigned j = 0; j != NumVals; ++j, ++i) { unsigned Reg = cast(Node->getOperand(i))->getReg(); // FIXME: Add dead flags for physical and virtual registers defined. // For now, mark physical register defs as implicit to help fast @@ -913,7 +931,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, break; case InlineAsm::Kind_RegDefEarlyClobber: case InlineAsm::Kind_Clobber: - for (; NumVals; --NumVals, ++i) { + for (unsigned j = 0; j != NumVals; ++j, ++i) { unsigned Reg = cast(Node->getOperand(i))->getReg(); MI->addOperand(MachineOperand::CreateReg(Reg, /*isDef=*/ true, /*isImp=*/ TargetRegisterInfo::isPhysicalRegister(Reg), @@ -928,9 +946,20 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, case InlineAsm::Kind_Mem: // Addressing mode. // The addressing mode has been selected, just add all of the // operands to the machine instruction. - for (; NumVals; --NumVals, ++i) + for (unsigned j = 0; j != NumVals; ++j, ++i) AddOperand(MI, Node->getOperand(i), 0, 0, VRBaseMap, /*IsDebug=*/false, IsClone, IsCloned); + + // Manually set isTied bits. + if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) { + unsigned DefGroup = 0; + if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) { + unsigned DefIdx = GroupIdx[DefGroup] + 1; + unsigned UseIdx = GroupIdx.back() + 1; + for (unsigned j = 0; j != NumVals; ++j) + MI->tieOperands(DefIdx + j, UseIdx + j); + } + } break; } } diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 908ebb9..abf40b7 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -22,7 +22,7 @@ #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetLowering.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -718,7 +718,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { // expand it. if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); if (ST->getAlignment() < ABIAlignment) ExpandUnalignedStore(cast(Node), DAG, TLI, this); @@ -824,7 +824,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { // expand it. if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty); if (ST->getAlignment() < ABIAlignment) ExpandUnalignedStore(cast(Node), DAG, TLI, this); } @@ -869,25 +869,24 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { switch (TLI.getOperationAction(Node->getOpcode(), VT)) { default: llvm_unreachable("This action is not supported yet!"); case TargetLowering::Legal: - // If this is an unaligned load and the target doesn't support it, - // expand it. - if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { - Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); - unsigned ABIAlignment = - TLI.getTargetData()->getABITypeAlignment(Ty); - if (LD->getAlignment() < ABIAlignment){ - ExpandUnalignedLoad(cast(Node), - DAG, TLI, RVal, RChain); - } - } - break; + // If this is an unaligned load and the target doesn't support it, + // expand it. + if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { + Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); + unsigned ABIAlignment = + TLI.getDataLayout()->getABITypeAlignment(Ty); + if (LD->getAlignment() < ABIAlignment){ + ExpandUnalignedLoad(cast(Node), DAG, TLI, RVal, RChain); + } + } + break; case TargetLowering::Custom: { - SDValue Res = TLI.LowerOperation(RVal, DAG); - if (Res.getNode()) { - RVal = Res; - RChain = Res.getValue(1); - } - break; + SDValue Res = TLI.LowerOperation(RVal, DAG); + if (Res.getNode()) { + RVal = Res; + RChain = Res.getValue(1); + } + break; } case TargetLowering::Promote: { // Only promote a load of vector type to another. @@ -1060,7 +1059,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); unsigned ABIAlignment = - TLI.getTargetData()->getABITypeAlignment(Ty); + TLI.getDataLayout()->getABITypeAlignment(Ty); if (LD->getAlignment() < ABIAlignment){ ExpandUnalignedLoad(cast(Node), DAG, TLI, Value, Chain); @@ -1241,6 +1240,19 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) { if (Action == TargetLowering::Legal) Action = TargetLowering::Custom; break; + case ISD::DEBUGTRAP: + Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0)); + if (Action == TargetLowering::Expand) { + // replace ISD::DEBUGTRAP with ISD::TRAP + SDValue NewVal; + NewVal = DAG.getNode(ISD::TRAP, Node->getDebugLoc(), Node->getVTList(), + Node->getOperand(0)); + ReplaceNode(Node, NewVal.getNode()); + LegalizeOp(NewVal.getNode()); + return; + } + break; + default: if (Node->getOpcode() >= ISD::BUILTIN_OP_END) { Action = TargetLowering::Legal; @@ -1588,26 +1600,71 @@ void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT, break; case TargetLowering::Expand: { ISD::CondCode CC1 = ISD::SETCC_INVALID, CC2 = ISD::SETCC_INVALID; + ISD::CondCode InvCC = ISD::SETCC_INVALID; unsigned Opc = 0; switch (CCCode) { default: llvm_unreachable("Don't know how to expand this condition!"); - case ISD::SETOEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETO; Opc = ISD::AND; break; - case ISD::SETOGT: CC1 = ISD::SETGT; CC2 = ISD::SETO; Opc = ISD::AND; break; - case ISD::SETOGE: CC1 = ISD::SETGE; CC2 = ISD::SETO; Opc = ISD::AND; break; - case ISD::SETOLT: CC1 = ISD::SETLT; CC2 = ISD::SETO; Opc = ISD::AND; break; - case ISD::SETOLE: CC1 = ISD::SETLE; CC2 = ISD::SETO; Opc = ISD::AND; break; - case ISD::SETONE: CC1 = ISD::SETNE; CC2 = ISD::SETO; Opc = ISD::AND; break; - case ISD::SETUEQ: CC1 = ISD::SETEQ; CC2 = ISD::SETUO; Opc = ISD::OR; break; - case ISD::SETUGT: CC1 = ISD::SETGT; CC2 = ISD::SETUO; Opc = ISD::OR; break; - case ISD::SETUGE: CC1 = ISD::SETGE; CC2 = ISD::SETUO; Opc = ISD::OR; break; - case ISD::SETULT: CC1 = ISD::SETLT; CC2 = ISD::SETUO; Opc = ISD::OR; break; - case ISD::SETULE: CC1 = ISD::SETLE; CC2 = ISD::SETUO; Opc = ISD::OR; break; - case ISD::SETUNE: CC1 = ISD::SETNE; CC2 = ISD::SETUO; Opc = ISD::OR; break; - // FIXME: Implement more expansions. - } - - SDValue SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); - SDValue SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); + case ISD::SETO: + assert(TLI.getCondCodeAction(ISD::SETOEQ, OpVT) + == TargetLowering::Legal + && "If SETO is expanded, SETOEQ must be legal!"); + CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break; + case ISD::SETUO: + assert(TLI.getCondCodeAction(ISD::SETUNE, OpVT) + == TargetLowering::Legal + && "If SETUO is expanded, SETUNE must be legal!"); + CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break; + case ISD::SETOEQ: + case ISD::SETOGT: + case ISD::SETOGE: + case ISD::SETOLT: + case ISD::SETOLE: + case ISD::SETONE: + case ISD::SETUEQ: + case ISD::SETUNE: + case ISD::SETUGT: + case ISD::SETUGE: + case ISD::SETULT: + case ISD::SETULE: + // If we are floating point, assign and break, otherwise fall through. + if (!OpVT.isInteger()) { + // We can use the 4th bit to tell if we are the unordered + // or ordered version of the opcode. + CC2 = ((unsigned)CCCode & 0x8U) ? ISD::SETUO : ISD::SETO; + Opc = ((unsigned)CCCode & 0x8U) ? ISD::OR : ISD::AND; + CC1 = (ISD::CondCode)(((int)CCCode & 0x7) | 0x10); + break; + } + // Fallthrough if we are unsigned integer. + case ISD::SETLE: + case ISD::SETGT: + case ISD::SETGE: + case ISD::SETLT: + case ISD::SETNE: + case ISD::SETEQ: + InvCC = ISD::getSetCCSwappedOperands(CCCode); + if (TLI.getCondCodeAction(InvCC, OpVT) == TargetLowering::Expand) { + // We only support using the inverted operation and not a + // different manner of supporting expanding these cases. + llvm_unreachable("Don't know how to expand this condition!"); + } + LHS = DAG.getSetCC(dl, VT, RHS, LHS, InvCC); + RHS = SDValue(); + CC = SDValue(); + return; + } + + SDValue SetCC1, SetCC2; + if (CCCode != ISD::SETO && CCCode != ISD::SETUO) { + // If we aren't the ordered or unorder operation, + // then the pattern is (LHS CC1 RHS) Opc (LHS CC2 RHS). + SetCC1 = DAG.getSetCC(dl, VT, LHS, RHS, CC1); + SetCC2 = DAG.getSetCC(dl, VT, LHS, RHS, CC2); + } else { + // Otherwise, the pattern is (LHS CC1 LHS) Opc (RHS CC2 RHS) + SetCC1 = DAG.getSetCC(dl, VT, LHS, LHS, CC1); + SetCC2 = DAG.getSetCC(dl, VT, RHS, RHS, CC2); + } LHS = DAG.getNode(Opc, dl, VT, SetCC1, SetCC2); RHS = SDValue(); CC = SDValue(); @@ -1626,7 +1683,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, DebugLoc dl) { // Create the stack frame object. unsigned SrcAlign = - TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). + TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType(). getTypeForEVT(*DAG.getContext())); SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); @@ -1638,7 +1695,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp, unsigned SlotSize = SlotVT.getSizeInBits(); unsigned DestSize = DestVT.getSizeInBits(); Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); - unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); + unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType); // Emit a store to the stack slot. Use a truncstore if the input value is // later than DestVT. @@ -2042,7 +2099,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned, SDValue Op0, EVT DestVT, DebugLoc dl) { - if (Op0.getValueType() == MVT::i32) { + if (Op0.getValueType() == MVT::i32 && TLI.isTypeLegal(MVT::f64)) { // simple 32-bit [signed|unsigned] integer to float/double expansion // Get the stack frame index of a 8 byte buffer. @@ -2787,7 +2844,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) { // Increment the pointer, VAList, to the next vaarg Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, - DAG.getConstant(TLI.getTargetData()-> + DAG.getConstant(TLI.getDataLayout()-> getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), TLI.getPointerTy())); // Store the incremented VAList to the legalized pointer @@ -3109,6 +3166,8 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) { Tmp3 = Node->getOperand(1); if (TLI.isOperationLegalOrCustom(DivRemOpc, VT) || (isDivRemLibcallAvailable(Node, isSigned, TLI) && + // If div is legal, it's better to do the normal expansion + !TLI.isOperationLegalOrCustom(DivOpc, Node->getValueType(0)) && useDivRem(Node, isSigned, false))) { Tmp1 = DAG.getNode(DivRemOpc, dl, VTs, Tmp2, Tmp3).getValue(1); } else if (TLI.isOperationLegalOrCustom(DivOpc, VT)) { @@ -3366,7 +3425,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) { EVT PTy = TLI.getPointerTy(); - const TargetData &TD = *TLI.getTargetData(); + const DataLayout &TD = *TLI.getDataLayout(); unsigned EntrySize = DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); diff --git a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp index e393896..92dc5a9 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp @@ -1245,32 +1245,30 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) { DEBUG(dbgs() << "Expand float operand: "; N->dump(&DAG); dbgs() << "\n"); SDValue Res = SDValue(); - if (TLI.getOperationAction(N->getOpcode(), N->getOperand(OpNo).getValueType()) - == TargetLowering::Custom) - Res = TLI.LowerOperation(SDValue(N, 0), DAG); - - if (Res.getNode() == 0) { - switch (N->getOpcode()) { - default: - #ifndef NDEBUG - dbgs() << "ExpandFloatOperand Op #" << OpNo << ": "; - N->dump(&DAG); dbgs() << "\n"; - #endif - llvm_unreachable("Do not know how to expand this operator's operand!"); - - case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break; - case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break; - case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break; - - case ISD::BR_CC: Res = ExpandFloatOp_BR_CC(N); break; - case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break; - case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break; - case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break; - case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break; - case ISD::SETCC: Res = ExpandFloatOp_SETCC(N); break; - case ISD::STORE: Res = ExpandFloatOp_STORE(cast(N), - OpNo); break; - } + // See if the target wants to custom expand this node. + if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false)) + return false; + + switch (N->getOpcode()) { + default: +#ifndef NDEBUG + dbgs() << "ExpandFloatOperand Op #" << OpNo << ": "; + N->dump(&DAG); dbgs() << "\n"; +#endif + llvm_unreachable("Do not know how to expand this operator's operand!"); + + case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break; + case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break; + case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break; + + case ISD::BR_CC: Res = ExpandFloatOp_BR_CC(N); break; + case ISD::FP_ROUND: Res = ExpandFloatOp_FP_ROUND(N); break; + case ISD::FP_TO_SINT: Res = ExpandFloatOp_FP_TO_SINT(N); break; + case ISD::FP_TO_UINT: Res = ExpandFloatOp_FP_TO_UINT(N); break; + case ISD::SELECT_CC: Res = ExpandFloatOp_SELECT_CC(N); break; + case ISD::SETCC: Res = ExpandFloatOp_SETCC(N); break; + case ISD::STORE: Res = ExpandFloatOp_STORE(cast(N), + OpNo); break; } // If the result is null, the sub-method took care of registering results etc. diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp index e8e968a..a370fae 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -644,8 +644,9 @@ SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) { EVT SmallVT = LHS.getValueType(); // To determine if the result overflowed in a larger type, we extend the - // input to the larger type, do the multiply, then check the high bits of - // the result to see if the overflow happened. + // input to the larger type, do the multiply (checking if it overflows), + // then also check the high bits of the result to see if overflow happened + // there. if (N->getOpcode() == ISD::SMULO) { LHS = SExtPromotedInteger(LHS); RHS = SExtPromotedInteger(RHS); @@ -653,24 +654,31 @@ SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) { LHS = ZExtPromotedInteger(LHS); RHS = ZExtPromotedInteger(RHS); } - SDValue Mul = DAG.getNode(ISD::MUL, DL, LHS.getValueType(), LHS, RHS); + SDVTList VTs = DAG.getVTList(LHS.getValueType(), N->getValueType(1)); + SDValue Mul = DAG.getNode(N->getOpcode(), DL, VTs, LHS, RHS); - // Overflow occurred iff the high part of the result does not - // zero/sign-extend the low part. + // Overflow occurred if it occurred in the larger type, or if the high part + // of the result does not zero/sign-extend the low part. Check this second + // possibility first. SDValue Overflow; if (N->getOpcode() == ISD::UMULO) { - // Unsigned overflow occurred iff the high part is non-zero. + // Unsigned overflow occurred if the high part is non-zero. SDValue Hi = DAG.getNode(ISD::SRL, DL, Mul.getValueType(), Mul, DAG.getIntPtrConstant(SmallVT.getSizeInBits())); Overflow = DAG.getSetCC(DL, N->getValueType(1), Hi, DAG.getConstant(0, Hi.getValueType()), ISD::SETNE); } else { - // Signed overflow occurred iff the high part does not sign extend the low. + // Signed overflow occurred if the high part does not sign extend the low. SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Mul.getValueType(), Mul, DAG.getValueType(SmallVT)); Overflow = DAG.getSetCC(DL, N->getValueType(1), SExt, Mul, ISD::SETNE); } + // The only other way for overflow to occur is if the multiplication in the + // larger type itself overflowed. + Overflow = DAG.getNode(ISD::OR, DL, N->getValueType(1), Overflow, + SDValue(Mul.getNode(), 1)); + // Use the calculated overflow everywhere. ReplaceValueWith(SDValue(N, 1), Overflow); return Mul; @@ -2253,32 +2261,35 @@ void DAGTypeLegalizer::ExpandIntRes_UADDSUBO(SDNode *N, void DAGTypeLegalizer::ExpandIntRes_XMULO(SDNode *N, SDValue &Lo, SDValue &Hi) { EVT VT = N->getValueType(0); - Type *RetTy = VT.getTypeForEVT(*DAG.getContext()); - EVT PtrVT = TLI.getPointerTy(); - Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext()); DebugLoc dl = N->getDebugLoc(); // A divide for UMULO should be faster than a function call. if (N->getOpcode() == ISD::UMULO) { SDValue LHS = N->getOperand(0), RHS = N->getOperand(1); - DebugLoc DL = N->getDebugLoc(); - SDValue MUL = DAG.getNode(ISD::MUL, DL, LHS.getValueType(), LHS, RHS); + SDValue MUL = DAG.getNode(ISD::MUL, dl, LHS.getValueType(), LHS, RHS); SplitInteger(MUL, Lo, Hi); // A divide for UMULO will be faster than a function call. Select to // make sure we aren't using 0. SDValue isZero = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), - RHS, DAG.getConstant(0, VT), ISD::SETNE); + RHS, DAG.getConstant(0, VT), ISD::SETEQ); SDValue NotZero = DAG.getNode(ISD::SELECT, dl, VT, isZero, DAG.getConstant(1, VT), RHS); - SDValue DIV = DAG.getNode(ISD::UDIV, DL, LHS.getValueType(), MUL, NotZero); - SDValue Overflow; - Overflow = DAG.getSetCC(DL, N->getValueType(1), DIV, LHS, ISD::SETNE); + SDValue DIV = DAG.getNode(ISD::UDIV, dl, VT, MUL, NotZero); + SDValue Overflow = DAG.getSetCC(dl, N->getValueType(1), DIV, LHS, + ISD::SETNE); + Overflow = DAG.getNode(ISD::SELECT, dl, N->getValueType(1), isZero, + DAG.getConstant(0, N->getValueType(1)), + Overflow); ReplaceValueWith(SDValue(N, 1), Overflow); return; } + Type *RetTy = VT.getTypeForEVT(*DAG.getContext()); + EVT PtrVT = TLI.getPointerTy(); + Type *PtrTy = PtrVT.getTypeForEVT(*DAG.getContext()); + // Replace this with a libcall that will check overflow. RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; if (VT == MVT::i32) diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp index 39337ff..644e36e 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.cpp @@ -15,7 +15,7 @@ #include "LegalizeTypes.h" #include "llvm/CallingConv.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/SetVector.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/lib/CodeGen/SelectionDAG/LegalizeTypes.h index 94fc976..20b7ce6 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -625,6 +625,7 @@ private: SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N); SDValue WidenVecRes_VSETCC(SDNode* N); + SDValue WidenVecRes_Ternary(SDNode *N); SDValue WidenVecRes_Binary(SDNode *N); SDValue WidenVecRes_Convert(SDNode *N); SDValue WidenVecRes_POWI(SDNode *N); @@ -633,7 +634,7 @@ private: SDValue WidenVecRes_InregOp(SDNode *N); // Widen Vector Operand. - bool WidenVectorOperand(SDNode *N, unsigned ResNo); + bool WidenVectorOperand(SDNode *N, unsigned OpNo); SDValue WidenVecOp_BITCAST(SDNode *N); SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N); SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N); diff --git a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp index 06f6bd6..6bcb3b2 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeTypesGeneric.cpp @@ -20,7 +20,7 @@ //===----------------------------------------------------------------------===// #include "LegalizeTypes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; //===----------------------------------------------------------------------===// @@ -94,14 +94,48 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) { if (InVT.isVector() && OutVT.isInteger()) { // Handle cases like i64 = BITCAST v1i64 on x86, where the operand // is legal but the result is not. - EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2); + unsigned NumElems = 2; + EVT ElemVT = NOutVT; + EVT NVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElems); + + // If is not a legal type, try . + while (!isTypeLegal(NVT)) { + unsigned NewSizeInBits = ElemVT.getSizeInBits() / 2; + // If the element size is smaller than byte, bail. + if (NewSizeInBits < 8) + break; + NumElems *= 2; + ElemVT = EVT::getIntegerVT(*DAG.getContext(), NewSizeInBits); + NVT = EVT::getVectorVT(*DAG.getContext(), ElemVT, NumElems); + } if (isTypeLegal(NVT)) { SDValue CastInOp = DAG.getNode(ISD::BITCAST, dl, NVT, InOp); - Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp, - DAG.getIntPtrConstant(0)); - Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp, - DAG.getIntPtrConstant(1)); + + SmallVector Vals; + for (unsigned i = 0; i < NumElems; ++i) + Vals.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ElemVT, + CastInOp, DAG.getIntPtrConstant(i))); + + // Build Lo, Hi pair by pairing extracted elements if needed. + unsigned Slot = 0; + for (unsigned e = Vals.size(); e - Slot > 2; Slot += 2, e += 1) { + // Each iteration will BUILD_PAIR two nodes and append the result until + // there are only two nodes left, i.e. Lo and Hi. + SDValue LHS = Vals[Slot]; + SDValue RHS = Vals[Slot + 1]; + + if (TLI.isBigEndian()) + std::swap(LHS, RHS); + + Vals.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, + EVT::getIntegerVT( + *DAG.getContext(), + LHS.getValueType().getSizeInBits() << 1), + LHS, RHS)); + } + Lo = Vals[Slot++]; + Hi = Vals[Slot++]; if (TLI.isBigEndian()) std::swap(Lo, Hi); @@ -116,7 +150,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) { // Create the stack frame object. Make sure it is aligned for both // the source and expanded destination types. unsigned Alignment = - TLI.getTargetData()->getPrefTypeAlignment(NOutVT. + TLI.getDataLayout()->getPrefTypeAlignment(NOutVT. getTypeForEVT(*DAG.getContext())); SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment); int SPFI = cast(StackPtr.getNode())->getIndex(); diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index 704f99b..22f8d51 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -64,6 +64,7 @@ class VectorLegalizer { // Implement vselect in terms of XOR, AND, OR when blend is not supported // by the target. SDValue ExpandVSELECT(SDValue Op); + SDValue ExpandSELECT(SDValue Op); SDValue ExpandLoad(SDValue Op); SDValue ExpandStore(SDValue Op); SDValue ExpandFNEG(SDValue Op); @@ -220,6 +221,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) { case ISD::FRINT: case ISD::FNEARBYINT: case ISD::FFLOOR: + case ISD::FMA: case ISD::SIGN_EXTEND_INREG: QueryType = Node->getValueType(0); break; @@ -260,6 +262,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) { case TargetLowering::Expand: if (Node->getOpcode() == ISD::VSELECT) Result = ExpandVSELECT(Op); + else if (Node->getOpcode() == ISD::SELECT) + Result = ExpandSELECT(Op); else if (Node->getOpcode() == ISD::UINT_TO_FP) Result = ExpandUINT_TO_FLOAT(Op); else if (Node->getOpcode() == ISD::FNEG) @@ -435,6 +439,66 @@ SDValue VectorLegalizer::ExpandStore(SDValue Op) { return TF; } +SDValue VectorLegalizer::ExpandSELECT(SDValue Op) { + // Lower a select instruction where the condition is a scalar and the + // operands are vectors. Lower this select to VSELECT and implement it + // using XOR AND OR. The selector bit is broadcasted. + EVT VT = Op.getValueType(); + DebugLoc DL = Op.getDebugLoc(); + + SDValue Mask = Op.getOperand(0); + SDValue Op1 = Op.getOperand(1); + SDValue Op2 = Op.getOperand(2); + + assert(VT.isVector() && !Mask.getValueType().isVector() + && Op1.getValueType() == Op2.getValueType() && "Invalid type"); + + unsigned NumElem = VT.getVectorNumElements(); + + // If we can't even use the basic vector operations of + // AND,OR,XOR, we will have to scalarize the op. + // Notice that the operation may be 'promoted' which means that it is + // 'bitcasted' to another type which is handled. + // Also, we need to be able to construct a splat vector using BUILD_VECTOR. + if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || + TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || + TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || + TLI.getOperationAction(ISD::BUILD_VECTOR, VT) == TargetLowering::Expand) + return DAG.UnrollVectorOp(Op.getNode()); + + // Generate a mask operand. + EVT MaskTy = TLI.getSetCCResultType(VT); + assert(MaskTy.isVector() && "Invalid CC type"); + assert(MaskTy.getSizeInBits() == Op1.getValueType().getSizeInBits() + && "Invalid mask size"); + + // What is the size of each element in the vector mask. + EVT BitTy = MaskTy.getScalarType(); + + Mask = DAG.getNode(ISD::SELECT, DL, BitTy, Mask, + DAG.getConstant(APInt::getAllOnesValue(BitTy.getSizeInBits()), BitTy), + DAG.getConstant(0, BitTy)); + + // Broadcast the mask so that the entire vector is all-one or all zero. + SmallVector Ops(NumElem, Mask); + Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskTy, &Ops[0], Ops.size()); + + // Bitcast the operands to be the same type as the mask. + // This is needed when we select between FP types because + // the mask is a vector of integers. + Op1 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op1); + Op2 = DAG.getNode(ISD::BITCAST, DL, MaskTy, Op2); + + SDValue AllOnes = DAG.getConstant( + APInt::getAllOnesValue(BitTy.getSizeInBits()), MaskTy); + SDValue NotMask = DAG.getNode(ISD::XOR, DL, MaskTy, Mask, AllOnes); + + Op1 = DAG.getNode(ISD::AND, DL, MaskTy, Op1, Mask); + Op2 = DAG.getNode(ISD::AND, DL, MaskTy, Op2, NotMask); + SDValue Val = DAG.getNode(ISD::OR, DL, MaskTy, Op1, Op2); + return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val); +} + SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { // Implement VSELECT in terms of XOR, AND, OR // on platforms which do not support blend natively. @@ -449,12 +513,17 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) { // AND,OR,XOR, we will have to scalarize the op. // Notice that the operation may be 'promoted' which means that it is // 'bitcasted' to another type which is handled. + // This operation also isn't safe with AND, OR, XOR when the boolean + // type is 0/1 as we need an all ones vector constant to mask with. + // FIXME: Sign extend 1 to all ones if thats legal on the target. if (TLI.getOperationAction(ISD::AND, VT) == TargetLowering::Expand || TLI.getOperationAction(ISD::XOR, VT) == TargetLowering::Expand || - TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand) + TLI.getOperationAction(ISD::OR, VT) == TargetLowering::Expand || + TLI.getBooleanContents(true) != + TargetLowering::ZeroOrNegativeOneBooleanContent) return DAG.UnrollVectorOp(Op.getNode()); - assert(VT.getSizeInBits() == Op.getOperand(1).getValueType().getSizeInBits() + assert(VT.getSizeInBits() == Op1.getValueType().getSizeInBits() && "Invalid mask size"); // Bitcast the operands to be the same type as the mask. // This is needed when we select between FP types because diff --git a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index 4709202..d51a6eb 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -21,7 +21,7 @@ //===----------------------------------------------------------------------===// #include "LegalizeTypes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; @@ -749,7 +749,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx); Type *VecType = VecVT.getTypeForEVT(*DAG.getContext()); unsigned Alignment = - TLI.getTargetData()->getPrefTypeAlignment(VecType); + TLI.getDataLayout()->getPrefTypeAlignment(VecType); Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT, false, false, 0); @@ -1366,6 +1366,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::FTRUNC: Res = WidenVecRes_Unary(N); break; + case ISD::FMA: + Res = WidenVecRes_Ternary(N); + break; } // If Res is null, the sub-method took care of registering the result. @@ -1373,6 +1376,16 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { SetWidenedVector(SDValue(N, ResNo), Res); } +SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) { + // Ternary op widening. + DebugLoc dl = N->getDebugLoc(); + EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); + SDValue InOp1 = GetWidenedVector(N->getOperand(0)); + SDValue InOp2 = GetWidenedVector(N->getOperand(1)); + SDValue InOp3 = GetWidenedVector(N->getOperand(2)); + return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3); +} + SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) { // Binary op widening. unsigned Opcode = N->getOpcode(); @@ -2069,16 +2082,20 @@ SDValue DAGTypeLegalizer::WidenVecRes_VSETCC(SDNode *N) { //===----------------------------------------------------------------------===// // Widen Vector Operand //===----------------------------------------------------------------------===// -bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) { - DEBUG(dbgs() << "Widen node operand " << ResNo << ": "; +bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) { + DEBUG(dbgs() << "Widen node operand " << OpNo << ": "; N->dump(&DAG); dbgs() << "\n"); SDValue Res = SDValue(); + // See if the target wants to custom widen this node. + if (CustomLowerNode(N, N->getOperand(OpNo).getValueType(), false)) + return false; + switch (N->getOpcode()) { default: #ifndef NDEBUG - dbgs() << "WidenVectorOperand op #" << ResNo << ": "; + dbgs() << "WidenVectorOperand op #" << OpNo << ": "; N->dump(&DAG); dbgs() << "\n"; #endif diff --git a/lib/CodeGen/SelectionDAG/SDNodeOrdering.h b/lib/CodeGen/SelectionDAG/SDNodeOrdering.h index f88b26d..d2269f8 100644 --- a/lib/CodeGen/SelectionDAG/SDNodeOrdering.h +++ b/lib/CodeGen/SelectionDAG/SDNodeOrdering.h @@ -28,8 +28,8 @@ class SDNode; class SDNodeOrdering { DenseMap OrderMap; - void operator=(const SDNodeOrdering&); // Do not implement. - SDNodeOrdering(const SDNodeOrdering&); // Do not implement. + void operator=(const SDNodeOrdering&) LLVM_DELETED_FUNCTION; + SDNodeOrdering(const SDNodeOrdering&) LLVM_DELETED_FUNCTION; public: SDNodeOrdering() {} diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp index b7ce48a..2ecdd89 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp @@ -13,11 +13,12 @@ #define DEBUG_TYPE "pre-RA-sched" #include "ScheduleDAGSDNodes.h" +#include "InstrEmitter.h" #include "llvm/InlineAsm.h" #include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/Debug.h" #include "llvm/ADT/SmallSet.h" @@ -34,6 +35,10 @@ STATISTIC(NumPRCopies, "Number of physical copies"); static RegisterScheduler fastDAGScheduler("fast", "Fast suboptimal list scheduling", createFastDAGScheduler); +static RegisterScheduler + linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling", + createDAGLinearizer); + namespace { /// FastPriorityQueue - A degenerate priority queue that considers @@ -331,7 +336,9 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) { } } if (isNewLoad) { - AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency)); + SDep D(LoadSU, SDep::Barrier); + D.setLatency(LoadSU->Latency); + AddPred(NewSU, D); } ++NumUnfolds; @@ -407,9 +414,12 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { RemovePred(DelDeps[i].first, DelDeps[i].second); } - - AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg)); - AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0)); + SDep FromDep(SU, SDep::Data, Reg); + FromDep.setLatency(SU->Latency); + AddPred(CopyFromSU, FromDep); + SDep ToDep(CopyFromSU, SDep::Data, 0); + ToDep.setLatency(CopyFromSU->Latency); + AddPred(CopyToSU, ToDep); Copies.push_back(CopyFromSU); Copies.push_back(CopyToSU); @@ -586,18 +596,14 @@ void ScheduleDAGFast::ListScheduleBottomUp() { InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum << " to SU #" << Copies.front()->NodeNum << "\n"); - AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, /*isArtificial=*/true)); + AddPred(TrySU, SDep(Copies.front(), SDep::Artificial)); NewDef = Copies.back(); } DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum << " to SU #" << TrySU->NodeNum << "\n"); LiveRegDefs[Reg] = NewDef; - AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, /*isArtificial=*/true)); + AddPred(NewDef, SDep(TrySU, SDep::Artificial)); TrySU->isAvailable = false; CurSU = NewDef; } @@ -629,6 +635,155 @@ void ScheduleDAGFast::ListScheduleBottomUp() { #endif } + +namespace { +//===----------------------------------------------------------------------===// +// ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the +// DAG in topological order. +// IMPORTANT: this may not work for targets with phyreg dependency. +// +class ScheduleDAGLinearize : public ScheduleDAGSDNodes { +public: + ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {} + + void Schedule(); + + MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos); + +private: + std::vector Sequence; + DenseMap GluedMap; // Cache glue to its user + + void ScheduleNode(SDNode *N); +}; +} // end anonymous namespace + +void ScheduleDAGLinearize::ScheduleNode(SDNode *N) { + if (N->getNodeId() != 0) + llvm_unreachable(0); + + if (!N->isMachineOpcode() && + (N->getOpcode() == ISD::EntryToken || isPassiveNode(N))) + // These nodes do not need to be translated into MIs. + return; + + DEBUG(dbgs() << "\n*** Scheduling: "); + DEBUG(N->dump(DAG)); + Sequence.push_back(N); + + unsigned NumOps = N->getNumOperands(); + if (unsigned NumLeft = NumOps) { + SDNode *GluedOpN = 0; + do { + const SDValue &Op = N->getOperand(NumLeft-1); + SDNode *OpN = Op.getNode(); + + if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) { + // Schedule glue operand right above N. + GluedOpN = OpN; + assert(OpN->getNodeId() != 0 && "Glue operand not ready?"); + OpN->setNodeId(0); + ScheduleNode(OpN); + continue; + } + + if (OpN == GluedOpN) + // Glue operand is already scheduled. + continue; + + DenseMap::iterator DI = GluedMap.find(OpN); + if (DI != GluedMap.end() && DI->second != N) + // Users of glues are counted against the glued users. + OpN = DI->second; + + unsigned Degree = OpN->getNodeId(); + assert(Degree > 0 && "Predecessor over-released!"); + OpN->setNodeId(--Degree); + if (Degree == 0) + ScheduleNode(OpN); + } while (--NumLeft); + } +} + +/// findGluedUser - Find the representative use of a glue value by walking +/// the use chain. +static SDNode *findGluedUser(SDNode *N) { + while (SDNode *Glued = N->getGluedUser()) + N = Glued; + return N; +} + +void ScheduleDAGLinearize::Schedule() { + DEBUG(dbgs() << "********** DAG Linearization **********\n"); + + SmallVector Glues; + unsigned DAGSize = 0; + for (SelectionDAG::allnodes_iterator I = DAG->allnodes_begin(), + E = DAG->allnodes_end(); I != E; ++I) { + SDNode *N = I; + + // Use node id to record degree. + unsigned Degree = N->use_size(); + N->setNodeId(Degree); + unsigned NumVals = N->getNumValues(); + if (NumVals && N->getValueType(NumVals-1) == MVT::Glue && + N->hasAnyUseOfValue(NumVals-1)) { + SDNode *User = findGluedUser(N); + if (User) { + Glues.push_back(N); + GluedMap.insert(std::make_pair(N, User)); + } + } + + if (N->isMachineOpcode() || + (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N))) + ++DAGSize; + } + + for (unsigned i = 0, e = Glues.size(); i != e; ++i) { + SDNode *Glue = Glues[i]; + SDNode *GUser = GluedMap[Glue]; + unsigned Degree = Glue->getNodeId(); + unsigned UDegree = GUser->getNodeId(); + + // Glue user must be scheduled together with the glue operand. So other + // users of the glue operand must be treated as its users. + SDNode *ImmGUser = Glue->getGluedUser(); + for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end(); + ui != ue; ++ui) + if (*ui == ImmGUser) + --Degree; + GUser->setNodeId(UDegree + Degree); + Glue->setNodeId(1); + } + + Sequence.reserve(DAGSize); + ScheduleNode(DAG->getRoot().getNode()); +} + +MachineBasicBlock* +ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) { + InstrEmitter Emitter(BB, InsertPos); + DenseMap VRBaseMap; + + DEBUG({ + dbgs() << "\n*** Final schedule ***\n"; + }); + + // FIXME: Handle dbg_values. + unsigned NumNodes = Sequence.size(); + for (unsigned i = 0; i != NumNodes; ++i) { + SDNode *N = Sequence[NumNodes-i-1]; + DEBUG(N->dump(DAG)); + Emitter.EmitNode(N, false, false, VRBaseMap); + } + + DEBUG(dbgs() << '\n'); + + InsertPos = Emitter.getInsertPos(); + return Emitter.getBlock(); +} + //===----------------------------------------------------------------------===// // Public Constructor Functions //===----------------------------------------------------------------------===// @@ -637,3 +792,8 @@ llvm::ScheduleDAGSDNodes * llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) { return new ScheduleDAGFast(*IS->MF); } + +llvm::ScheduleDAGSDNodes * +llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) { + return new ScheduleDAGLinearize(*IS->MF); +} diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index bf0a437..c554569 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -22,7 +22,7 @@ #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/ScheduleHazardRecognizer.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" @@ -656,6 +656,8 @@ void ScheduleDAGRRList::EmitNode(SUnit *SU) { break; case ISD::MERGE_VALUES: case ISD::TokenFactor: + case ISD::LIFETIME_START: + case ISD::LIFETIME_END: case ISD::CopyToReg: case ISD::CopyFromReg: case ISD::EH_LABEL: @@ -1056,7 +1058,9 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { // Add a data dependency to reflect that NewSU reads the value defined // by LoadSU. - AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency)); + SDep D(LoadSU, SDep::Data, 0); + D.setLatency(LoadSU->Latency); + AddPred(NewSU, D); if (isNewLoad) AvailableQueue->addNode(LoadSU); @@ -1138,17 +1142,18 @@ void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, // Avoid scheduling the def-side copy before other successors. Otherwise // we could introduce another physreg interference on the copy and // continue inserting copies indefinitely. - SDep D(CopyFromSU, SDep::Order, /*Latency=*/0, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, /*isArtificial=*/true); - AddPred(SuccSU, D); + AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial)); } } for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) RemovePred(DelDeps[i].first, DelDeps[i].second); - AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg)); - AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0)); + SDep FromDep(SU, SDep::Data, Reg); + FromDep.setLatency(SU->Latency); + AddPred(CopyFromSU, FromDep); + SDep ToDep(CopyFromSU, SDep::Data, 0); + ToDep.setLatency(CopyFromSU->Latency); + AddPred(CopyToSU, ToDep); AvailableQueue->updateNode(SU); AvailableQueue->addNode(CopyFromSU); @@ -1357,9 +1362,7 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() { if (!BtSU->isPending) AvailableQueue->remove(BtSU); } - AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, /*isArtificial=*/true)); + AddPred(TrySU, SDep(BtSU, SDep::Artificial)); // If one or more successors has been unscheduled, then the current // node is no longer avaialable. Schedule a successor that's now @@ -1411,20 +1414,14 @@ SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() { InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum << " to SU #" << Copies.front()->NodeNum << "\n"); - AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, - /*isArtificial=*/true)); + AddPred(TrySU, SDep(Copies.front(), SDep::Artificial)); NewDef = Copies.back(); } DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum << " to SU #" << TrySU->NodeNum << "\n"); LiveRegDefs[Reg] = NewDef; - AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, - /*isArtificial=*/true)); + AddPred(NewDef, SDep(TrySU, SDep::Artificial)); TrySU->isAvailable = false; CurSU = NewDef; } @@ -1756,6 +1753,7 @@ public: return V; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void dump(ScheduleDAG *DAG) const { // Emulate pop() without clobbering NodeQueueIds. std::vector DumpQueue = Queue; @@ -1766,6 +1764,7 @@ public: SU->dump(DAG); } } +#endif }; typedef RegReductionPriorityQueue @@ -1893,6 +1892,7 @@ unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const { //===----------------------------------------------------------------------===// void RegReductionPQBase::dumpRegPressure() const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), E = TRI->regclass_end(); I != E; ++I) { const TargetRegisterClass *RC = *I; @@ -1902,6 +1902,7 @@ void RegReductionPQBase::dumpRegPressure() const { DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id] << '\n'); } +#endif } bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const { @@ -2930,10 +2931,7 @@ void RegReductionPQBase::AddPseudoTwoAddrDeps() { !scheduleDAG->IsReachable(SuccSU, SU)) { DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #" << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n"); - scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, - /*isArtificial=*/true)); + scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial)); } } } diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index 748668c..a197fcb 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -485,14 +485,15 @@ void ScheduleDAGSDNodes::AddSchedEdges() { if(isChain && OpN->getOpcode() == ISD::TokenFactor) OpLatency = 0; - const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data, - OpLatency, PhysReg); + SDep Dep = isChain ? SDep(OpSU, SDep::Barrier) + : SDep(OpSU, SDep::Data, PhysReg); + Dep.setLatency(OpLatency); if (!isChain && !UnitLatencies) { - computeOperandLatency(OpN, N, i, const_cast(dep)); - ST.adjustSchedDependency(OpSU, SU, const_cast(dep)); + computeOperandLatency(OpN, N, i, Dep); + ST.adjustSchedDependency(OpSU, SU, Dep); } - if (!SU->addPred(dep) && !dep.isCtrl() && OpSU->NumRegDefsLeft > 1) { + if (!SU->addPred(Dep) && !Dep.isCtrl() && OpSU->NumRegDefsLeft > 1) { // Multiple register uses are combined in the same SUnit. For example, // we could have a set of glued nodes with all their defs consumed by // another set of glued nodes. Register pressure tracking sees this as @@ -643,6 +644,7 @@ void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use, } void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) if (!SU->getNode()) { dbgs() << "PHYS REG COPY\n"; return; @@ -659,8 +661,10 @@ void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const { dbgs() << "\n"; GluedNodes.pop_back(); } +#endif } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void ScheduleDAGSDNodes::dumpSchedule() const { for (unsigned i = 0, e = Sequence.size(); i != e; i++) { if (SUnit *SU = Sequence[i]) @@ -669,6 +673,7 @@ void ScheduleDAGSDNodes::dumpSchedule() const { dbgs() << "**** NOOP ****\n"; } } +#endif #ifndef NDEBUG /// VerifyScheduledSequence - Verify that all SUnits were scheduled and that @@ -827,8 +832,7 @@ EmitSchedule(MachineBasicBlock::iterator &InsertPos) { } SmallVector GluedNodes; - for (SDNode *N = SU->getNode()->getGluedNode(); N; - N = N->getGluedNode()) + for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode()) GluedNodes.push_back(N); while (!GluedNodes.empty()) { SDNode *N = GluedNodes.back(); diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h index 84e41fc..907356f 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.h @@ -114,7 +114,8 @@ namespace llvm { /// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock /// according to the order specified in Sequence. /// - MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos); + virtual MachineBasicBlock* + EmitSchedule(MachineBasicBlock::iterator &InsertPos); virtual void dumpNode(const SUnit *SU) const; @@ -158,6 +159,12 @@ namespace llvm { void InitNodeNumDefs(); }; + protected: + /// ForceUnitLatencies - Return true if all scheduling edges should be given + /// a latency value of one. The default is to return false; schedulers may + /// override this as needed. + virtual bool forceUnitLatencies() const { return false; } + private: /// ClusterNeighboringLoads - Cluster loads from "near" addresses into /// combined SUnits. diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp index c851291..30f03ac 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGVLIW.cpp @@ -25,7 +25,7 @@ #include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index f4fe892..f000ce3 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -29,7 +29,7 @@ #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetSelectionDAGInfo.h" #include "llvm/Target/TargetOptions.h" @@ -91,11 +91,6 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT, const APFloat& Val) { assert(VT.isFloatingPoint() && "Can only convert between FP types"); - // PPC long double cannot be converted to any other type. - if (VT == MVT::ppcf128 || - &Val.getSemantics() == &APFloat::PPCDoubleDouble) - return false; - // convert modifies in place, so make a copy. APFloat Val2 = APFloat(Val); bool losesInfo; @@ -136,13 +131,11 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) { // constants are. SDValue NotZero = N->getOperand(i); unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits(); - if (isa(NotZero)) { - if (cast(NotZero)->getAPIntValue().countTrailingOnes() < - EltSize) + if (ConstantSDNode *CN = dyn_cast(NotZero)) { + if (CN->getAPIntValue().countTrailingOnes() < EltSize) return false; - } else if (isa(NotZero)) { - if (cast(NotZero)->getValueAPF() - .bitcastToAPInt().countTrailingOnes() < EltSize) + } else if (ConstantFPSDNode *CFPN = dyn_cast(NotZero)) { + if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize) return false; } else return false; @@ -179,11 +172,11 @@ bool ISD::isBuildVectorAllZeros(const SDNode *N) { // Do not accept build_vectors that aren't all constants or which have non-0 // elements. SDValue Zero = N->getOperand(i); - if (isa(Zero)) { - if (!cast(Zero)->isNullValue()) + if (ConstantSDNode *CN = dyn_cast(Zero)) { + if (!CN->isNullValue()) return false; - } else if (isa(Zero)) { - if (!cast(Zero)->getValueAPF().isPosZero()) + } else if (ConstantFPSDNode *CFPN = dyn_cast(Zero)) { + if (!CFPN->getValueAPF().isPosZero()) return false; } else return false; @@ -494,8 +487,10 @@ static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) { } case ISD::TargetBlockAddress: case ISD::BlockAddress: { - ID.AddPointer(cast(N)->getBlockAddress()); - ID.AddInteger(cast(N)->getTargetFlags()); + const BlockAddressSDNode *BA = cast(N); + ID.AddPointer(BA->getBlockAddress()); + ID.AddInteger(BA->getOffset()); + ID.AddInteger(BA->getTargetFlags()); break; } } // end switch (N->getOpcode()) @@ -883,7 +878,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const { PointerType::get(Type::getInt8Ty(*getContext()), 0) : VT.getTypeForEVT(*getContext()); - return TLI.getTargetData()->getABITypeAlignment(Ty); + return TLI.getDataLayout()->getABITypeAlignment(Ty); } // EntryNode could meaningfully have debug info if we can find it... @@ -1097,10 +1092,9 @@ SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL, "Cannot set target flags on target-independent globals"); // Truncate (with sign-extension) the offset value to the pointer size. - EVT PTy = TLI.getPointerTy(); - unsigned BitWidth = PTy.getSizeInBits(); + unsigned BitWidth = TLI.getPointerTy().getSizeInBits(); if (BitWidth < 64) - Offset = (Offset << (64 - BitWidth) >> (64 - BitWidth)); + Offset = SignExtend64(Offset, BitWidth); const GlobalVariable *GVar = dyn_cast(GV); if (!GVar) { @@ -1174,7 +1168,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType()); + Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); @@ -1201,7 +1195,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT, assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType()); + Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); @@ -1471,6 +1465,7 @@ SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) { SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, + int64_t Offset, bool isTarget, unsigned char TargetFlags) { unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress; @@ -1478,12 +1473,14 @@ SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT, FoldingSetNodeID ID; AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); ID.AddPointer(BA); + ID.AddInteger(Offset); ID.AddInteger(TargetFlags); void *IP = 0; if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) return SDValue(E, 0); - SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, TargetFlags); + SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset, + TargetFlags); CSEMap.InsertNode(N, IP); AllNodes.push_back(N); return SDValue(N, 0); @@ -1542,7 +1539,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) { unsigned ByteSize = VT.getStoreSize(); Type *Ty = VT.getTypeForEVT(*getContext()); unsigned StackAlign = - std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign); + std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign); int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); return getFrameIndex(FrameIdx, TLI.getPointerTy()); @@ -1555,7 +1552,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) { VT2.getStoreSizeInBits())/8; Type *Ty1 = VT1.getTypeForEVT(*getContext()); Type *Ty2 = VT2.getTypeForEVT(*getContext()); - const TargetData *TD = TLI.getTargetData(); + const DataLayout *TD = TLI.getDataLayout(); unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), TD->getPrefTypeAlignment(Ty2)); @@ -1610,10 +1607,6 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1, } if (ConstantFPSDNode *N1C = dyn_cast(N1.getNode())) { if (ConstantFPSDNode *N2C = dyn_cast(N2.getNode())) { - // No compile time operations on this type yet. - if (N1C->getValueType(0) == MVT::ppcf128) - return SDValue(); - APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF()); switch (Cond) { default: break; @@ -2445,8 +2438,6 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT); case ISD::UINT_TO_FP: case ISD::SINT_TO_FP: { - // No compile time operations on ppcf128. - if (VT == MVT::ppcf128) break; APFloat apf(APInt::getNullValue(VT.getSizeInBits())); (void)apf.convertFromAPInt(Val, Opcode==ISD::SINT_TO_FP, @@ -2455,9 +2446,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, } case ISD::BITCAST: if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) - return getConstantFP(Val.bitsToFloat(), VT); + return getConstantFP(APFloat(Val), VT); else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) - return getConstantFP(Val.bitsToDouble(), VT); + return getConstantFP(APFloat(Val), VT); break; case ISD::BSWAP: return getConstant(Val.byteSwap(), VT); @@ -2475,61 +2466,59 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, // Constant fold unary operations with a floating point constant operand. if (ConstantFPSDNode *C = dyn_cast(Operand.getNode())) { APFloat V = C->getValueAPF(); // make copy - if (VT != MVT::ppcf128 && Operand.getValueType() != MVT::ppcf128) { - switch (Opcode) { - case ISD::FNEG: - V.changeSign(); + switch (Opcode) { + case ISD::FNEG: + V.changeSign(); + return getConstantFP(V, VT); + case ISD::FABS: + V.clearSign(); + return getConstantFP(V, VT); + case ISD::FCEIL: { + APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); + if (fs == APFloat::opOK || fs == APFloat::opInexact) return getConstantFP(V, VT); - case ISD::FABS: - V.clearSign(); + break; + } + case ISD::FTRUNC: { + APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); + if (fs == APFloat::opOK || fs == APFloat::opInexact) return getConstantFP(V, VT); - case ISD::FCEIL: { - APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive); - if (fs == APFloat::opOK || fs == APFloat::opInexact) - return getConstantFP(V, VT); - break; - } - case ISD::FTRUNC: { - APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero); - if (fs == APFloat::opOK || fs == APFloat::opInexact) - return getConstantFP(V, VT); - break; - } - case ISD::FFLOOR: { - APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); - if (fs == APFloat::opOK || fs == APFloat::opInexact) - return getConstantFP(V, VT); - break; - } - case ISD::FP_EXTEND: { - bool ignored; - // This can return overflow, underflow, or inexact; we don't care. - // FIXME need to be more flexible about rounding mode. - (void)V.convert(*EVTToAPFloatSemantics(VT), - APFloat::rmNearestTiesToEven, &ignored); + break; + } + case ISD::FFLOOR: { + APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative); + if (fs == APFloat::opOK || fs == APFloat::opInexact) return getConstantFP(V, VT); - } - case ISD::FP_TO_SINT: - case ISD::FP_TO_UINT: { - integerPart x[2]; - bool ignored; - assert(integerPartWidth >= 64); - // FIXME need to be more flexible about rounding mode. - APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), - Opcode==ISD::FP_TO_SINT, - APFloat::rmTowardZero, &ignored); - if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual - break; - APInt api(VT.getSizeInBits(), x); - return getConstant(api, VT); - } - case ISD::BITCAST: - if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) - return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); - else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) - return getConstant(V.bitcastToAPInt().getZExtValue(), VT); + break; + } + case ISD::FP_EXTEND: { + bool ignored; + // This can return overflow, underflow, or inexact; we don't care. + // FIXME need to be more flexible about rounding mode. + (void)V.convert(*EVTToAPFloatSemantics(VT), + APFloat::rmNearestTiesToEven, &ignored); + return getConstantFP(V, VT); + } + case ISD::FP_TO_SINT: + case ISD::FP_TO_UINT: { + integerPart x[2]; + bool ignored; + assert(integerPartWidth >= 64); + // FIXME need to be more flexible about rounding mode. + APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(), + Opcode==ISD::FP_TO_SINT, + APFloat::rmTowardZero, &ignored); + if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual break; - } + APInt api(VT.getSizeInBits(), x); + return getConstant(api, VT); + } + case ISD::BITCAST: + if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) + return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); + else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) + return getConstant(V.bitcastToAPInt().getZExtValue(), VT); + break; } } @@ -2817,6 +2806,24 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, if (ConstantFPSDNode *CFP = dyn_cast(N2)) if (CFP->getValueAPF().isZero()) return N1; + } else if (Opcode == ISD::FMUL) { + ConstantFPSDNode *CFP = dyn_cast(N1); + SDValue V = N2; + + // If the first operand isn't the constant, try the second + if (!CFP) { + CFP = dyn_cast(N2); + V = N1; + } + + if (CFP) { + // 0*x --> 0 + if (CFP->isZero()) + return SDValue(CFP,0); + // 1*x --> x + if (CFP->isExactlyValue(1.0)) + return V; + } } } assert(VT.isFloatingPoint() && "This operator only applies to FP types!"); @@ -2935,17 +2942,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, // expanding large vector constants. if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) { SDValue Elt = N1.getOperand(N2C->getZExtValue()); - EVT VEltTy = N1.getValueType().getVectorElementType(); - if (Elt.getValueType() != VEltTy) { + + if (VT != Elt.getValueType()) // If the vector element type is not legal, the BUILD_VECTOR operands - // are promoted and implicitly truncated. Make that explicit here. - Elt = getNode(ISD::TRUNCATE, DL, VEltTy, Elt); - } - if (VT != VEltTy) { - // If the vector element type is not legal, the EXTRACT_VECTOR_ELT - // result is implicitly extended. - Elt = getNode(ISD::ANY_EXTEND, DL, VT, Elt); - } + // are promoted and implicitly truncated, and the result implicitly + // extended. Make that explicit here. + Elt = getAnyExtOrTrunc(Elt, DL, VT); + return Elt; } @@ -3036,7 +3039,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, // Cannonicalize constant to RHS if commutative std::swap(N1CFP, N2CFP); std::swap(N1, N2); - } else if (N2CFP && VT != MVT::ppcf128) { + } else if (N2CFP) { APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF(); APFloat::opStatus s; switch (Opcode) { @@ -3435,7 +3438,7 @@ static bool FindOptimalMemOpLowering(std::vector &MemOps, DAG.getMachineFunction()); if (VT == MVT::Other) { - if (DstAlign >= TLI.getTargetData()->getPointerPrefAlignment() || + if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() || TLI.allowsUnalignedMemoryAccesses(VT)) { VT = TLI.getPointerTy(); } else { @@ -3503,7 +3506,9 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, bool DstAlignCanChange = false; MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); + bool OptSize = + MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize); FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; @@ -3523,7 +3528,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); if (NewAlign > Align) { // Give the stack frame object a larger alignment if needed. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) @@ -3596,7 +3601,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, bool DstAlignCanChange = false; MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); + bool OptSize = MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize); FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; @@ -3612,7 +3618,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); if (NewAlign > Align) { // Give the stack frame object a larger alignment if needed. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) @@ -3674,7 +3680,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, bool DstAlignCanChange = false; MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - bool OptSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); + bool OptSize = MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize); FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; @@ -3687,7 +3694,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, if (DstAlignCanChange) { Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); - unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); + unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty); if (NewAlign > Align) { // Give the stack frame object a larger alignment if needed. if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) @@ -3781,7 +3788,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, // Emit a library call. TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; - Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext()); + Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext()); Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); @@ -3836,7 +3843,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, // Emit a library call. TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; - Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext()); + Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext()); Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry); @@ -3885,7 +3892,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, return Result; // Emit a library call. - Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext()); + Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; Entry.Node = Dst; Entry.Ty = IntPtrTy; @@ -3923,17 +3930,21 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, SDValue Swp, MachinePointerInfo PtrInfo, unsigned Alignment, AtomicOrdering Ordering, - SynchronizationScope SynchScope) { + SynchronizationScope SynchScope) { if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(MemVT); MachineFunction &MF = getMachineFunction(); - unsigned Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; + // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE. // For now, atomics are considered to be volatile always. // FIXME: Volatile isn't really correct; we should keep track of atomic // orderings in the memoperand. - Flags |= MachineMemOperand::MOVolatile; + unsigned Flags = MachineMemOperand::MOVolatile; + if (Opcode != ISD::ATOMIC_STORE) + Flags |= MachineMemOperand::MOLoad; + if (Opcode != ISD::ATOMIC_LOAD) + Flags |= MachineMemOperand::MOStore; MachineMemOperand *MMO = MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment); @@ -3983,17 +3994,17 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, Alignment = getEVTAlignment(MemVT); MachineFunction &MF = getMachineFunction(); - // A monotonic store does not load; a release store "loads" in the sense - // that other stores cannot be sunk past it. + // An atomic store does not load. An atomic load does not store. // (An atomicrmw obviously both loads and stores.) - unsigned Flags = MachineMemOperand::MOStore; - if (Opcode != ISD::ATOMIC_STORE || Ordering > Monotonic) - Flags |= MachineMemOperand::MOLoad; - - // For now, atomics are considered to be volatile always. + // For now, atomics are considered to be volatile always, and they are + // chained as such. // FIXME: Volatile isn't really correct; we should keep track of atomic // orderings in the memoperand. - Flags |= MachineMemOperand::MOVolatile; + unsigned Flags = MachineMemOperand::MOVolatile; + if (Opcode != ISD::ATOMIC_STORE) + Flags |= MachineMemOperand::MOLoad; + if (Opcode != ISD::ATOMIC_LOAD) + Flags |= MachineMemOperand::MOStore; MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, @@ -4056,16 +4067,17 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT, Alignment = getEVTAlignment(MemVT); MachineFunction &MF = getMachineFunction(); - // A monotonic load does not store; an acquire load "stores" in the sense - // that other loads cannot be hoisted past it. - unsigned Flags = MachineMemOperand::MOLoad; - if (Ordering > Monotonic) - Flags |= MachineMemOperand::MOStore; - - // For now, atomics are considered to be volatile always. + // An atomic store does not load. An atomic load does not store. + // (An atomicrmw obviously both loads and stores.) + // For now, atomics are considered to be volatile always, and they are + // chained as such. // FIXME: Volatile isn't really correct; we should keep track of atomic // orderings in the memoperand. - Flags |= MachineMemOperand::MOVolatile; + unsigned Flags = MachineMemOperand::MOVolatile; + if (Opcode != ISD::ATOMIC_STORE) + Flags |= MachineMemOperand::MOLoad; + if (Opcode != ISD::ATOMIC_LOAD) + Flags |= MachineMemOperand::MOStore; MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags, @@ -4157,6 +4169,8 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList, assert((Opcode == ISD::INTRINSIC_VOID || Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::PREFETCH || + Opcode == ISD::LIFETIME_START || + Opcode == ISD::LIFETIME_END || (Opcode <= INT_MAX && (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) && "Opcode is not a memory-accessing opcode!"); @@ -4226,7 +4240,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo, const MDNode *Ranges) { - assert(Chain.getValueType() == MVT::Other && + assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(VT); @@ -4284,7 +4298,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3); ID.AddInteger(MemVT.getRawBits()); ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(), - MMO->isNonTemporal(), + MMO->isNonTemporal(), MMO->isInvariant())); ID.AddInteger(MMO->getPointerInfo().getAddrSpace()); void *IP = 0; @@ -4303,7 +4317,7 @@ SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, - bool isInvariant, unsigned Alignment, + bool isInvariant, unsigned Alignment, const MDNode *TBAAInfo, const MDNode *Ranges) { SDValue Undef = getUNDEF(Ptr.getValueType()); @@ -4332,7 +4346,7 @@ SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base, "Load is already a indexed load!"); return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl, LD->getChain(), Base, Offset, LD->getPointerInfo(), - LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), + LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(), false, LD->getAlignment()); } @@ -4340,7 +4354,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo) { - assert(Chain.getValueType() == MVT::Other && + assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(Val.getValueType()); @@ -4365,7 +4379,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr, MachineMemOperand *MMO) { - assert(Chain.getValueType() == MVT::Other && + assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); EVT VT = Val.getValueType(); SDVTList VTs = getVTList(MVT::Other); @@ -4394,7 +4408,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, EVT SVT,bool isVolatile, bool isNonTemporal, unsigned Alignment, const MDNode *TBAAInfo) { - assert(Chain.getValueType() == MVT::Other && + assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); if (Alignment == 0) // Ensure that codegen never sees alignment 0 Alignment = getEVTAlignment(SVT); @@ -4421,7 +4435,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, MachineMemOperand *MMO) { EVT VT = Val.getValueType(); - assert(Chain.getValueType() == MVT::Other && + assert(Chain.getValueType() == MVT::Other && "Invalid chain type"); if (VT == SVT) return getStore(Chain, dl, Val, Ptr, MMO); @@ -6074,7 +6088,7 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const { unsigned PtrWidth = TLI.getPointerTy().getSizeInBits(); APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); llvm::ComputeMaskedBits(const_cast(GV), KnownZero, KnownOne, - TLI.getTargetData()); + TLI.getDataLayout()); unsigned AlignBits = KnownZero.countTrailingOnes(); unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; if (Align) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index ba5bd79..3fbf7c2 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -19,6 +19,7 @@ #include "llvm/ADT/SmallSet.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/ConstantFolding.h" +#include "llvm/Analysis/ValueTracking.h" #include "llvm/Constants.h" #include "llvm/CallingConv.h" #include "llvm/DebugInfo.h" @@ -43,7 +44,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetIntrinsicInfo.h" @@ -88,7 +89,7 @@ static const unsigned MaxParallelChains = 64; static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, const SDValue *Parts, unsigned NumParts, - EVT PartVT, EVT ValueVT); + EVT PartVT, EVT ValueVT, const Value *V); /// getCopyFromParts - Create a value that contains the specified legal parts /// combined into the value they represent. If the parts combine to a type @@ -98,9 +99,11 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL, const SDValue *Parts, unsigned NumParts, EVT PartVT, EVT ValueVT, + const Value *V, ISD::NodeType AssertOp = ISD::DELETED_NODE) { if (ValueVT.isVector()) - return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT); + return getCopyFromPartsVector(DAG, DL, Parts, NumParts, + PartVT, ValueVT, V); assert(NumParts > 0 && "No parts to assemble!"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -124,9 +127,9 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL, if (RoundParts > 2) { Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, - PartVT, HalfVT); + PartVT, HalfVT, V); Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, - RoundParts / 2, PartVT, HalfVT); + RoundParts / 2, PartVT, HalfVT, V); } else { Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); @@ -142,7 +145,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL, unsigned OddParts = NumParts - RoundParts; EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); Hi = getCopyFromParts(DAG, DL, - Parts + RoundParts, OddParts, PartVT, OddVT); + Parts + RoundParts, OddParts, PartVT, OddVT, V); // Combine the round and odd parts. Lo = Val; @@ -171,7 +174,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL, assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && "Unexpected split"); EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); - Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT); + Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V); } } @@ -209,14 +212,14 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL, llvm_unreachable("Unknown mismatch!"); } -/// getCopyFromParts - Create a value that contains the specified legal parts -/// combined into the value they represent. If the parts combine to a type -/// larger then ValueVT then AssertOp can be used to specify whether the extra -/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT -/// (ISD::AssertSext). +/// getCopyFromPartsVector - Create a value that contains the specified legal +/// parts combined into the value they represent. If the parts combine to a +/// type larger then ValueVT then AssertOp can be used to specify whether the +/// extra bits are known to be zero (ISD::AssertZext) or sign extended from +/// ValueVT (ISD::AssertSext). static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, const SDValue *Parts, unsigned NumParts, - EVT PartVT, EVT ValueVT) { + EVT PartVT, EVT ValueVT, const Value *V) { assert(ValueVT.isVector() && "Not a vector value"); assert(NumParts > 0 && "No parts to assemble!"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -242,7 +245,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, - PartVT, IntermediateVT); + PartVT, IntermediateVT, V); } else if (NumParts > 0) { // If the intermediate type was expanded, build the intermediate // operands from the parts. @@ -251,7 +254,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, - PartVT, IntermediateVT); + PartVT, IntermediateVT, V); } // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the @@ -299,8 +302,19 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); // Handle cases such as i8 -> <1 x i1> - assert(ValueVT.getVectorNumElements() == 1 && - "Only trivial scalar-to-vector conversions should get here!"); + if (ValueVT.getVectorNumElements() != 1) { + LLVMContext &Ctx = *DAG.getContext(); + Twine ErrMsg("non-trivial scalar-to-vector conversion"); + if (const Instruction *I = dyn_cast_or_null(V)) { + if (const CallInst *CI = dyn_cast(I)) + if (isa(CI->getCalledValue())) + ErrMsg = ErrMsg + ", possible invalid constraint for vector type"; + Ctx.emitError(I, ErrMsg); + } else { + Ctx.emitError(ErrMsg); + } + report_fatal_error("Cannot handle scalar-to-vector conversion!"); + } if (ValueVT.getVectorNumElements() == 1 && ValueVT.getVectorElementType() != PartVT) { @@ -312,25 +326,22 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL, return DAG.getNode(ISD::BUILD_VECTOR, DL, ValueVT, Val); } - - - static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc dl, SDValue Val, SDValue *Parts, unsigned NumParts, - EVT PartVT); + EVT PartVT, const Value *V); /// getCopyToParts - Create a series of nodes that contain the specified value /// split into legal parts. If the parts contain more bits than Val, then, for /// integers, ExtendKind can be used to specify how to generate the extra bits. static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL, SDValue Val, SDValue *Parts, unsigned NumParts, - EVT PartVT, + EVT PartVT, const Value *V, ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { EVT ValueVT = Val.getValueType(); // Handle the vector case separately. if (ValueVT.isVector()) - return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT); + return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned PartBits = PartVT.getSizeInBits(); @@ -382,7 +393,19 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL, "Failed to tile the value with PartVT!"); if (NumParts == 1) { - assert(PartVT == ValueVT && "Type conversion failed!"); + if (PartVT != ValueVT) { + LLVMContext &Ctx = *DAG.getContext(); + Twine ErrMsg("scalar-to-vector conversion failed"); + if (const Instruction *I = dyn_cast_or_null(V)) { + if (const CallInst *CI = dyn_cast(I)) + if (isa(CI->getCalledValue())) + ErrMsg = ErrMsg + ", possible invalid constraint for vector type"; + Ctx.emitError(I, ErrMsg); + } else { + Ctx.emitError(ErrMsg); + } + } + Parts[0] = Val; return; } @@ -397,7 +420,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL, unsigned OddParts = NumParts - RoundParts; SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, DAG.getIntPtrConstant(RoundBits)); - getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT); + getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V); if (TLI.isBigEndian()) // The odd parts were reversed by getCopyToParts - unreverse them. @@ -443,7 +466,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL, /// value split into legal parts. static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL, SDValue Val, SDValue *Parts, unsigned NumParts, - EVT PartVT) { + EVT PartVT, const Value *V) { EVT ValueVT = Val.getValueType(); assert(ValueVT.isVector() && "Not a vector"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -529,7 +552,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL, // If the register was not expanded, promote or copy the value, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) - getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT); + getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V); } else if (NumParts > 0) { // If the intermediate type was expanded, split each the value into // legal parts. @@ -537,13 +560,10 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL, "Must expand into a divisible number of parts!"); unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) - getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT); + getCopyToParts(DAG, DL, Ops[i], &Parts[i*Factor], Factor, PartVT, V); } } - - - namespace { /// RegsForValue - This struct represents the registers (physical or virtual) /// that a particular set of values is assigned, and the type information @@ -621,14 +641,15 @@ namespace { /// If the Flag pointer is NULL, no flag is used. SDValue getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const; + SDValue &Chain, SDValue *Flag, + const Value *V = 0) const; /// getCopyToRegs - Emit a series of CopyToReg nodes that copies the /// specified value into the registers specified by this object. This uses /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. void getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const; + SDValue &Chain, SDValue *Flag, const Value *V) const; /// AddInlineAsmOperands - Add this value to the specified inlineasm node /// operand list. This adds the code marker, matching input operand index @@ -647,7 +668,8 @@ namespace { SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const { + SDValue &Chain, SDValue *Flag, + const Value *V) const { // A Value with type {} or [0 x %t] needs no registers. if (ValueVTs.empty()) return SDValue(); @@ -721,7 +743,7 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, } Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), - NumRegs, RegisterVT, ValueVT); + NumRegs, RegisterVT, ValueVT, V); Part += NumRegs; Parts.clear(); } @@ -736,7 +758,8 @@ SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, /// Chain/Flag as the input and updates them for the output Chain/Flag. /// If the Flag pointer is NULL, no flag is used. void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, - SDValue &Chain, SDValue *Flag) const { + SDValue &Chain, SDValue *Flag, + const Value *V) const { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // Get the list of the values's legal parts. @@ -748,7 +771,7 @@ void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, DebugLoc dl, EVT RegisterVT = RegVTs[Value]; getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), - &Parts[Part], NumParts, RegisterVT); + &Parts[Part], NumParts, RegisterVT, V); Part += NumParts; } @@ -824,7 +847,8 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa, AA = &aa; GFI = gfi; LibInfo = li; - TD = DAG.getTarget().getTargetData(); + TD = DAG.getTarget().getDataLayout(); + Context = DAG.getContext(); LPadToCallSiteMap.clear(); } @@ -992,7 +1016,7 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) { unsigned InReg = It->second; RegsForValue RFV(*DAG.getContext(), TLI, InReg, V->getType()); SDValue Chain = DAG.getEntryNode(); - N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL); + N = RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V); resolveDanglingDebugInfo(V, N); return N; } @@ -1147,7 +1171,7 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { unsigned InReg = FuncInfo.InitializeRegForValue(Inst); RegsForValue RFV(*DAG.getContext(), TLI, InReg, Inst->getType()); SDValue Chain = DAG.getEntryNode(); - return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL); + return RFV.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), Chain, NULL, V); } llvm_unreachable("Can't get register for value!"); @@ -1203,9 +1227,9 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) { ISD::NodeType ExtendKind = ISD::ANY_EXTEND; const Function *F = I.getParent()->getParent(); - if (F->paramHasAttr(0, Attribute::SExt)) + if (F->getRetAttributes().hasAttribute(Attributes::SExt)) ExtendKind = ISD::SIGN_EXTEND; - else if (F->paramHasAttr(0, Attribute::ZExt)) + else if (F->getRetAttributes().hasAttribute(Attributes::ZExt)) ExtendKind = ISD::ZERO_EXTEND; if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) @@ -1216,11 +1240,11 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) { SmallVector Parts(NumParts); getCopyToParts(DAG, getCurDebugLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + j), - &Parts[0], NumParts, PartVT, ExtendKind); + &Parts[0], NumParts, PartVT, &I, ExtendKind); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); - if (F->paramHasAttr(0, Attribute::InReg)) + if (F->getRetAttributes().hasAttribute(Attributes::InReg)) Flags.setInReg(); // Propagate extension type if any @@ -1231,7 +1255,7 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) { for (unsigned i = 0; i < NumParts; ++i) { Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), - /*isfixed=*/true)); + /*isfixed=*/true, 0, 0)); OutVals.push_back(Parts[i]); } } @@ -1601,7 +1625,10 @@ void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, // Update successor info addSuccessorWithWeight(SwitchBB, CB.TrueBB, CB.TrueWeight); - addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight); + // TrueBB and FalseBB are always different unless the incoming IR is + // degenerate. This only happens when running llc on weird IR. + if (CB.TrueBB != CB.FalseBB) + addSuccessorWithWeight(SwitchBB, CB.FalseBB, CB.FalseWeight); // Set NextBlock to be the MBB immediately after the current one, if any. // This is used to avoid emitting unnecessary branches to the next block. @@ -1762,6 +1789,7 @@ void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, /// visitBitTestCase - this function produces one "bit test" void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, MachineBasicBlock* NextMBB, + uint32_t BranchWeightToNext, unsigned Reg, BitTestCase &B, MachineBasicBlock *SwitchBB) { @@ -1799,8 +1827,10 @@ void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, ISD::SETNE); } - addSuccessorWithWeight(SwitchBB, B.TargetBB); - addSuccessorWithWeight(SwitchBB, NextMBB); + // The branch weight from SwitchBB to B.TargetBB is B.ExtraWeight. + addSuccessorWithWeight(SwitchBB, B.TargetBB, B.ExtraWeight); + // The branch weight from SwitchBB to NextMBB is BranchWeightToNext. + addSuccessorWithWeight(SwitchBB, NextMBB, BranchWeightToNext); SDValue BrAnd = DAG.getNode(ISD::BRCOND, getCurDebugLoc(), MVT::Other, getControlRoot(), @@ -1923,6 +1953,7 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR, if (++BBI != FuncInfo.MF->end()) NextBlock = BBI; + BranchProbabilityInfo *BPI = FuncInfo.BPI; // If any two of the cases has the same destination, and if one value // is the same as the other, but has one bit unset that the other has set, // use bit manipulation to do two compares at once. For example: @@ -1956,8 +1987,12 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR, ISD::SETEQ); // Update successor info. - addSuccessorWithWeight(SwitchBB, Small.BB); - addSuccessorWithWeight(SwitchBB, Default); + // Both Small and Big will jump to Small.BB, so we sum up the weights. + addSuccessorWithWeight(SwitchBB, Small.BB, + Small.ExtraWeight + Big.ExtraWeight); + addSuccessorWithWeight(SwitchBB, Default, + // The default destination is the first successor in IR. + BPI ? BPI->getEdgeWeight(SwitchBB->getBasicBlock(), (unsigned)0) : 0); // Insert the true branch. SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other, @@ -1975,14 +2010,13 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR, } // Order cases by weight so the most likely case will be checked first. - BranchProbabilityInfo *BPI = FuncInfo.BPI; + uint32_t UnhandledWeights = 0; if (BPI) { for (CaseItr I = CR.Range.first, IE = CR.Range.second; I != IE; ++I) { - uint32_t IWeight = BPI->getEdgeWeight(SwitchBB->getBasicBlock(), - I->BB->getBasicBlock()); + uint32_t IWeight = I->ExtraWeight; + UnhandledWeights += IWeight; for (CaseItr J = CR.Range.first; J < I; ++J) { - uint32_t JWeight = BPI->getEdgeWeight(SwitchBB->getBasicBlock(), - J->BB->getBasicBlock()); + uint32_t JWeight = J->ExtraWeight; if (IWeight > JWeight) std::swap(*I, *J); } @@ -2031,10 +2065,12 @@ bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR, LHS = I->Low; MHS = SV; RHS = I->High; } - uint32_t ExtraWeight = I->ExtraWeight; + // The false weight should be sum of all un-handled cases. + UnhandledWeights -= I->ExtraWeight; CaseBlock CB(CC, LHS, RHS, MHS, /* truebb */ I->BB, /* falsebb */ FallThrough, /* me */ CurBlock, - /* trueweight */ ExtraWeight / 2, /* falseweight */ ExtraWeight / 2); + /* trueweight */ I->ExtraWeight, + /* falseweight */ UnhandledWeights); // If emitting the first comparison, just call visitSwitchCase to emit the // code into the current block. Otherwise, push the CaseBlock onto the @@ -2079,7 +2115,7 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR, for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) TSize += I->size(); - if (!areJTsAllowed(TLI) || TSize.ult(4)) + if (!areJTsAllowed(TLI) || TSize.ult(TLI.getMinimumJumpTableEntries())) return false; APInt Range = ComputeRange(First, Last); @@ -2134,13 +2170,28 @@ bool SelectionDAGBuilder::handleJTSwitchCase(CaseRec &CR, } } + // Calculate weight for each unique destination in CR. + DenseMap DestWeights; + if (FuncInfo.BPI) + for (CaseItr I = CR.Range.first, E = CR.Range.second; I != E; ++I) { + DenseMap::iterator Itr = + DestWeights.find(I->BB); + if (Itr != DestWeights.end()) + Itr->second += I->ExtraWeight; + else + DestWeights[I->BB] = I->ExtraWeight; + } + // Update successor info. Add one edge to each unique successor. BitVector SuccsHandled(CR.CaseBB->getParent()->getNumBlockIDs()); for (std::vector::iterator I = DestBBs.begin(), E = DestBBs.end(); I != E; ++I) { if (!SuccsHandled[(*I)->getNumber()]) { SuccsHandled[(*I)->getNumber()] = true; - addSuccessorWithWeight(JumpTableBB, *I); + DenseMap::iterator Itr = + DestWeights.find(*I); + addSuccessorWithWeight(JumpTableBB, *I, + Itr != DestWeights.end() ? Itr->second : 0); } } @@ -2371,7 +2422,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR, if (i == count) { assert((count < 3) && "Too much destinations to test!"); - CasesBits.push_back(CaseBits(0, Dest, 0)); + CasesBits.push_back(CaseBits(0, Dest, 0, 0/*Weight*/)); count++; } @@ -2380,6 +2431,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR, uint64_t lo = (lowValue - lowBound).getZExtValue(); uint64_t hi = (highValue - lowBound).getZExtValue(); + CasesBits[i].ExtraWeight += I->ExtraWeight; for (uint64_t j = lo; j <= hi; j++) { CasesBits[i].Mask |= 1ULL << j; @@ -2407,7 +2459,7 @@ bool SelectionDAGBuilder::handleBitTestsSwitchCase(CaseRec& CR, CurMF->insert(BBI, CaseBB); BTC.push_back(BitTestCase(CasesBits[i].Mask, CaseBB, - CasesBits[i].BB)); + CasesBits[i].BB, CasesBits[i].ExtraWeight)); // Put SV in a virtual register to make it available from the new blocks. ExportFromCurrentBlock(SV); @@ -2435,30 +2487,25 @@ size_t SelectionDAGBuilder::Clusterify(CaseVector& Cases, Clusterifier TheClusterifier; + BranchProbabilityInfo *BPI = FuncInfo.BPI; // Start with "simple" cases for (SwitchInst::ConstCaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) { const BasicBlock *SuccBB = i.getCaseSuccessor(); MachineBasicBlock *SMBB = FuncInfo.MBBMap[SuccBB]; - TheClusterifier.add(i.getCaseValueEx(), SMBB); + TheClusterifier.add(i.getCaseValueEx(), SMBB, + BPI ? BPI->getEdgeWeight(SI.getParent(), i.getSuccessorIndex()) : 0); } TheClusterifier.optimize(); - BranchProbabilityInfo *BPI = FuncInfo.BPI; size_t numCmps = 0; for (Clusterifier::RangeIterator i = TheClusterifier.begin(), e = TheClusterifier.end(); i != e; ++i, ++numCmps) { Clusterifier::Cluster &C = *i; - unsigned W = 0; - if (BPI) { - W = BPI->getEdgeWeight(SI.getParent(), C.second->getBasicBlock()); - if (!W) - W = 16; - W *= C.first.Weight; - BPI->setEdgeWeight(SI.getParent(), C.second->getBasicBlock(), W); - } + // Update edge weight for the cluster. + unsigned W = C.first.Weight; // FIXME: Currently work with ConstantInt based numbers. // Changing it to APInt based is a pretty heavy for this commit. @@ -2540,9 +2587,10 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { if (handleSmallSwitchRange(CR, WorkList, SV, Default, SwitchMBB)) continue; - // If the switch has more than 5 blocks, and at least 40% dense, and the + // If the switch has more than N blocks, and is at least 40% dense, and the // target supports indirect branches, then emit a jump table rather than // lowering the switch to a binary tree of conditional branches. + // N defaults to 4 and is controlled via TLS.getMinimumJumpTableEntries(). if (handleJTSwitchCase(CR, WorkList, SV, Default, SwitchMBB)) continue; @@ -2556,14 +2604,14 @@ void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; // Update machine-CFG edges with unique successors. - SmallVector succs; - succs.reserve(I.getNumSuccessors()); - for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) - succs.push_back(I.getSuccessor(i)); - array_pod_sort(succs.begin(), succs.end()); - succs.erase(std::unique(succs.begin(), succs.end()), succs.end()); - for (unsigned i = 0, e = succs.size(); i != e; ++i) { - MachineBasicBlock *Succ = FuncInfo.MBBMap[succs[i]]; + SmallSet Done; + for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { + BasicBlock *BB = I.getSuccessor(i); + bool Inserted = Done.insert(BB); + if (!Inserted) + continue; + + MachineBasicBlock *Succ = FuncInfo.MBBMap[BB]; addSuccessorWithWeight(IndirectBrMBB, Succ); } @@ -3160,9 +3208,9 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { return; // getValue will auto-populate this. Type *Ty = I.getAllocatedType(); - uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); + uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty); unsigned Align = - std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), + std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), I.getAlignment()); SDValue AllocSize = getValue(I.getArraySize()); @@ -3460,7 +3508,7 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { SDValue InChain = getRoot(); - EVT VT = EVT::getEVT(I.getType()); + EVT VT = TLI.getValueType(I.getType()); if (I.getAlignment() * 8 < VT.getSizeInBits()) report_fatal_error("Cannot generate unaligned atomic load"); @@ -3490,7 +3538,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { SDValue InChain = getRoot(); - EVT VT = EVT::getEVT(I.getValueOperand()->getType()); + EVT VT = TLI.getValueType(I.getValueOperand()->getType()); if (I.getAlignment() * 8 < VT.getSizeInBits()) report_fatal_error("Cannot generate unaligned atomic store"); @@ -4352,7 +4400,7 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS, return DAG.getConstantFP(1.0, LHS.getValueType()); const Function *F = DAG.getMachineFunction().getFunction(); - if (!F->hasFnAttr(Attribute::OptimizeForSize) || + if (!F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize) || // If optimizing for size, don't insert too many multiplies. This // inserts up to 5 multiplies. CountPopulation_32(Val)+Log2_32(Val) < 7) { @@ -4850,7 +4898,21 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, DestVT, getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), - DAG.getConstant(Idx, MVT::i32)); + DAG.getIntPtrConstant(Idx)); + setValue(&I, Res); + return 0; + } + case Intrinsic::x86_avx_vextractf128_pd_256: + case Intrinsic::x86_avx_vextractf128_ps_256: + case Intrinsic::x86_avx_vextractf128_si_256: + case Intrinsic::x86_avx2_vextracti128: { + DebugLoc dl = getCurDebugLoc(); + EVT DestVT = TLI.getValueType(I.getType()); + uint64_t Idx = (cast(I.getArgOperand(1))->getZExtValue() & 1) * + DestVT.getVectorNumElements(); + Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, + getValue(I.getArgOperand(0)), + DAG.getIntPtrConstant(Idx)); setValue(&I, Res); return 0; } @@ -5113,10 +5175,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { return 0; } + case Intrinsic::debugtrap: case Intrinsic::trap: { StringRef TrapFuncName = TM.Options.getTrapFunctionName(); if (TrapFuncName.empty()) { - DAG.setRoot(DAG.getNode(ISD::TRAP, dl,MVT::Other, getRoot())); + ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ? + ISD::TRAP : ISD::DEBUGTRAP; + DAG.setRoot(DAG.getNode(Op, dl,MVT::Other, getRoot())); return 0; } TargetLowering::ArgListTy Args; @@ -5131,10 +5196,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { DAG.setRoot(Result.second); return 0; } - case Intrinsic::debugtrap: { - DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, dl,MVT::Other, getRoot())); - return 0; - } + case Intrinsic::uadd_with_overflow: case Intrinsic::sadd_with_overflow: case Intrinsic::usub_with_overflow: @@ -5177,14 +5239,40 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { rw==1)); /* write */ return 0; } + case Intrinsic::lifetime_start: + case Intrinsic::lifetime_end: { + bool IsStart = (Intrinsic == Intrinsic::lifetime_start); + // Stack coloring is not enabled in O0, discard region information. + if (TM.getOptLevel() == CodeGenOpt::None) + return 0; + + SmallVector Allocas; + GetUnderlyingObjects(I.getArgOperand(1), Allocas, TD); + + for (SmallVector::iterator Object = Allocas.begin(), + E = Allocas.end(); Object != E; ++Object) { + AllocaInst *LifetimeObject = dyn_cast_or_null(*Object); + // Could not find an Alloca. + if (!LifetimeObject) + continue; + + int FI = FuncInfo.StaticAllocaMap[LifetimeObject]; + + SDValue Ops[2]; + Ops[0] = getRoot(); + Ops[1] = DAG.getFrameIndex(FI, TLI.getPointerTy(), true); + unsigned Opcode = (IsStart ? ISD::LIFETIME_START : ISD::LIFETIME_END); + + Res = DAG.getNode(Opcode, dl, MVT::Other, Ops, 2); + DAG.setRoot(Res); + } + } case Intrinsic::invariant_start: - case Intrinsic::lifetime_start: // Discard region information. setValue(&I, DAG.getUNDEF(TLI.getPointerTy())); return 0; case Intrinsic::invariant_end: - case Intrinsic::lifetime_end: // Discard region information. return 0; case Intrinsic::donothing: @@ -5220,9 +5308,9 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, int DemoteStackIdx = -100; if (!CanLowerReturn) { - uint64_t TySize = TLI.getTargetData()->getTypeAllocSize( + uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize( FTy->getReturnType()); - unsigned Align = TLI.getTargetData()->getPrefTypeAlignment( + unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment( FTy->getReturnType()); MachineFunction &MF = DAG.getMachineFunction(); DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); @@ -5254,12 +5342,12 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, Entry.Node = ArgNode; Entry.Ty = V->getType(); unsigned attrInd = i - CS.arg_begin() + 1; - Entry.isSExt = CS.paramHasAttr(attrInd, Attribute::SExt); - Entry.isZExt = CS.paramHasAttr(attrInd, Attribute::ZExt); - Entry.isInReg = CS.paramHasAttr(attrInd, Attribute::InReg); - Entry.isSRet = CS.paramHasAttr(attrInd, Attribute::StructRet); - Entry.isNest = CS.paramHasAttr(attrInd, Attribute::Nest); - Entry.isByVal = CS.paramHasAttr(attrInd, Attribute::ByVal); + Entry.isSExt = CS.paramHasAttr(attrInd, Attributes::SExt); + Entry.isZExt = CS.paramHasAttr(attrInd, Attributes::ZExt); + Entry.isInReg = CS.paramHasAttr(attrInd, Attributes::InReg); + Entry.isSRet = CS.paramHasAttr(attrInd, Attributes::StructRet); + Entry.isNest = CS.paramHasAttr(attrInd, Attributes::Nest); + Entry.isByVal = CS.paramHasAttr(attrInd, Attributes::ByVal); Entry.Alignment = CS.getParamAlignment(attrInd); Args.push_back(Entry); } @@ -5687,7 +5775,7 @@ public: /// MVT::Other. EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, - const TargetData *TD) const { + const DataLayout *TD) const { if (CallOperandVal == 0) return MVT::Other; if (isa(CallOperandVal)) @@ -5991,8 +6079,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { // Otherwise, create a stack slot and emit a store to it before the // asm. Type *Ty = OpVal->getType(); - uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); - unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); + uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty); + unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); @@ -6040,12 +6128,36 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); - // Remember the HasSideEffect and AlignStack bits as operand 3. + // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore + // bits as operand 3. unsigned ExtraInfo = 0; if (IA->hasSideEffects()) ExtraInfo |= InlineAsm::Extra_HasSideEffects; if (IA->isAlignStack()) ExtraInfo |= InlineAsm::Extra_IsAlignStack; + // Set the asm dialect. + ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect; + + // Determine if this InlineAsm MayLoad or MayStore based on the constraints. + for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) { + TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i]; + + // Compute the constraint code and ConstraintType to use. + TLI.ComputeConstraintToUse(OpInfo, SDValue()); + + // Ideally, we would only check against memory constraints. However, the + // meaning of an other constraint can be target-specific and we can't easily + // reason about it. Therefore, be conservative and set MayLoad/MayStore + // for other constriants as well. + if (OpInfo.ConstraintType == TargetLowering::C_Memory || + OpInfo.ConstraintType == TargetLowering::C_Other) { + if (OpInfo.Type == InlineAsm::isInput) + ExtraInfo |= InlineAsm::Extra_MayLoad; + else if (OpInfo.Type == InlineAsm::isOutput) + ExtraInfo |= InlineAsm::Extra_MayStore; + } + } + AsmNodeOperands.push_back(DAG.getTargetConstant(ExtraInfo, TLI.getPointerTy())); @@ -6155,7 +6267,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { // Use the produced MatchedRegs object to MatchedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(), - Chain, &Flag); + Chain, &Flag, CS.getInstruction()); MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, true, OpInfo.getMatchedOperand(), DAG, AsmNodeOperands); @@ -6237,7 +6349,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { } OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, getCurDebugLoc(), - Chain, &Flag); + Chain, &Flag, CS.getInstruction()); OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, DAG, AsmNodeOperands); @@ -6268,7 +6380,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { // and set it as the value of the call. if (!RetValRegs.Regs.empty()) { SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), - Chain, &Flag); + Chain, &Flag, CS.getInstruction()); // FIXME: Why don't we do this for inline asms with MRVs? if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) { @@ -6308,7 +6420,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { RegsForValue &OutRegs = IndirectStoresToEmit[i].first; const Value *Ptr = IndirectStoresToEmit[i].second; SDValue OutVal = OutRegs.getCopyFromRegs(DAG, FuncInfo, getCurDebugLoc(), - Chain, &Flag); + Chain, &Flag, IA); StoresToEmit.push_back(std::make_pair(OutVal, Ptr)); } @@ -6338,7 +6450,7 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) { } void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { - const TargetData &TD = *TLI.getTargetData(); + const DataLayout &TD = *TLI.getDataLayout(); SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(), getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)), @@ -6384,7 +6496,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { Args[i].Node.getResNo() + Value); ISD::ArgFlagsTy Flags; unsigned OriginalAlignment = - getTargetData()->getABITypeAlignment(ArgTy); + getDataLayout()->getABITypeAlignment(ArgTy); if (Args[i].isZExt) Flags.setZExt(); @@ -6398,7 +6510,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { Flags.setByVal(); PointerType *Ty = cast(Args[i].Ty); Type *ElementTy = Ty->getElementType(); - Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy)); + Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy)); // For ByVal, alignment should come from FE. BE will guess if this // info is not there but there are cases it cannot get right. unsigned FrameAlign; @@ -6423,12 +6535,13 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { ExtendKind = ISD::ZERO_EXTEND; getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, - PartVT, ExtendKind); + PartVT, CLI.CS ? CLI.CS->getInstruction() : 0, ExtendKind); for (unsigned j = 0; j != NumParts; ++j) { // if it isn't first piece, alignment must be 1 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), - i < CLI.NumFixedArgs); + i < CLI.NumFixedArgs, + i, j*Parts[j].getValueType().getStoreSize()); if (NumParts > 1 && j == 0) MyFlags.Flags.setSplit(); else if (j != 0) @@ -6504,7 +6617,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), VT); ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], - NumRegs, RegisterVT, VT, + NumRegs, RegisterVT, VT, NULL, AssertOp)); CurReg += NumRegs; } @@ -6543,7 +6656,7 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { RegsForValue RFV(V->getContext(), TLI, Reg, V->getType()); SDValue Chain = DAG.getEntryNode(); - RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0); + RFV.getCopyToRegs(Op, DAG, getCurDebugLoc(), Chain, 0, V); PendingExports.push_back(Chain); } @@ -6573,7 +6686,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { const Function &F = *LLVMBB->getParent(); SelectionDAG &DAG = SDB->DAG; DebugLoc dl = SDB->getCurDebugLoc(); - const TargetData *TD = TLI.getTargetData(); + const DataLayout *TD = TLI.getDataLayout(); SmallVector Ins; // Check whether the function can return without sret-demotion. @@ -6591,7 +6704,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { ISD::ArgFlagsTy Flags; Flags.setSRet(); EVT RegisterVT = TLI.getRegisterType(*DAG.getContext(), ValueVTs[0]); - ISD::InputArg RetArg(Flags, RegisterVT, true); + ISD::InputArg RetArg(Flags, RegisterVT, true, 0, 0); Ins.push_back(RetArg); } @@ -6610,15 +6723,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { unsigned OriginalAlignment = TD->getABITypeAlignment(ArgTy); - if (F.paramHasAttr(Idx, Attribute::ZExt)) + if (F.getParamAttributes(Idx).hasAttribute(Attributes::ZExt)) Flags.setZExt(); - if (F.paramHasAttr(Idx, Attribute::SExt)) + if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt)) Flags.setSExt(); - if (F.paramHasAttr(Idx, Attribute::InReg)) + if (F.getParamAttributes(Idx).hasAttribute(Attributes::InReg)) Flags.setInReg(); - if (F.paramHasAttr(Idx, Attribute::StructRet)) + if (F.getParamAttributes(Idx).hasAttribute(Attributes::StructRet)) Flags.setSRet(); - if (F.paramHasAttr(Idx, Attribute::ByVal)) { + if (F.getParamAttributes(Idx).hasAttribute(Attributes::ByVal)) { Flags.setByVal(); PointerType *Ty = cast(I->getType()); Type *ElementTy = Ty->getElementType(); @@ -6632,14 +6745,15 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { FrameAlign = TLI.getByValTypeAlignment(ElementTy); Flags.setByValAlign(FrameAlign); } - if (F.paramHasAttr(Idx, Attribute::Nest)) + if (F.getParamAttributes(Idx).hasAttribute(Attributes::Nest)) Flags.setNest(); Flags.setOrigAlign(OriginalAlignment); EVT RegisterVT = TLI.getRegisterType(*CurDAG->getContext(), VT); unsigned NumRegs = TLI.getNumRegisters(*CurDAG->getContext(), VT); for (unsigned i = 0; i != NumRegs; ++i) { - ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed); + ISD::InputArg MyFlags(Flags, RegisterVT, isArgValueUsed, + Idx-1, i*RegisterVT.getStoreSize()); if (NumRegs > 1 && i == 0) MyFlags.Flags.setSplit(); // if it isn't first piece, alignment must be 1 @@ -6685,7 +6799,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { EVT RegVT = TLI.getRegisterType(*CurDAG->getContext(), VT); ISD::NodeType AssertOp = ISD::DELETED_NODE; SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, - RegVT, VT, AssertOp); + RegVT, VT, NULL, AssertOp); MachineFunction& MF = SDB->DAG.getMachineFunction(); MachineRegisterInfo& RegInfo = MF.getRegInfo(); @@ -6719,14 +6833,14 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { if (!I->use_empty()) { ISD::NodeType AssertOp = ISD::DELETED_NODE; - if (F.paramHasAttr(Idx, Attribute::SExt)) + if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt)) AssertOp = ISD::AssertSext; - else if (F.paramHasAttr(Idx, Attribute::ZExt)) + else if (F.getParamAttributes(Idx).hasAttribute(Attributes::ZExt)) AssertOp = ISD::AssertZext; ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, PartVT, VT, - AssertOp)); + NULL, AssertOp)); } i += NumParts; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index 4090002..9e46d96 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -66,7 +66,7 @@ class ShuffleVectorInst; class SIToFPInst; class StoreInst; class SwitchInst; -class TargetData; +class DataLayout; class TargetLibraryInfo; class TargetLowering; class TruncInst; @@ -150,9 +150,11 @@ private: uint64_t Mask; MachineBasicBlock* BB; unsigned Bits; + uint32_t ExtraWeight; - CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits): - Mask(mask), BB(bb), Bits(bits) { } + CaseBits(uint64_t mask, MachineBasicBlock* bb, unsigned bits, + uint32_t Weight): + Mask(mask), BB(bb), Bits(bits), ExtraWeight(Weight) { } }; typedef std::vector CaseVector; @@ -247,11 +249,13 @@ private: typedef std::pair JumpTableBlock; struct BitTestCase { - BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr): - Mask(M), ThisBB(T), TargetBB(Tr) { } + BitTestCase(uint64_t M, MachineBasicBlock* T, MachineBasicBlock* Tr, + uint32_t Weight): + Mask(M), ThisBB(T), TargetBB(Tr), ExtraWeight(Weight) { } uint64_t Mask; MachineBasicBlock *ThisBB; MachineBasicBlock *TargetBB; + uint32_t ExtraWeight; }; typedef SmallVector BitTestInfo; @@ -281,7 +285,7 @@ public: const TargetMachine &TM; const TargetLowering &TLI; SelectionDAG &DAG; - const TargetData *TD; + const DataLayout *TD; AliasAnalysis *AA; const TargetLibraryInfo *LibInfo; @@ -325,7 +329,7 @@ public: CodeGenOpt::Level ol) : SDNodeOrder(0), TM(dag.getTarget()), TLI(dag.getTargetLoweringInfo()), DAG(dag), FuncInfo(funcinfo), OptLevel(ol), - HasTailCall(false), Context(dag.getContext()) { + HasTailCall(false) { } void init(GCFunctionInfo *gfi, AliasAnalysis &aa, @@ -452,6 +456,7 @@ public: void visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB); void visitBitTestCase(BitTestBlock &BB, MachineBasicBlock* NextMBB, + uint32_t BranchWeightToNext, unsigned Reg, BitTestCase &B, MachineBasicBlock *SwitchBB); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp index 13cd011..6f3ce7a 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp @@ -267,6 +267,8 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const { case ISD::STACKRESTORE: return "stackrestore"; case ISD::TRAP: return "trap"; case ISD::DEBUGTRAP: return "debugtrap"; + case ISD::LIFETIME_START: return "lifetime.start"; + case ISD::LIFETIME_END: return "lifetime.end"; // Bit manipulation case ISD::BSWAP: return "bswap"; @@ -331,7 +333,7 @@ void SDNode::dump(const SelectionDAG *G) const { } void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const { - OS << (void*)this << ": "; + OS << (const void*)this << ": "; for (unsigned i = 0, e = getNumValues(); i != e; ++i) { if (i) OS << ","; @@ -473,11 +475,16 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const { OS << "<" << *M->getMemOperand() << ">"; } else if (const BlockAddressSDNode *BA = dyn_cast(this)) { + int64_t offset = BA->getOffset(); OS << "<"; WriteAsOperand(OS, BA->getBlockAddress()->getFunction(), false); OS << ", "; WriteAsOperand(OS, BA->getBlockAddress()->getBasicBlock(), false); OS << ">"; + if (offset > 0) + OS << " + " << offset; + else + OS << " " << offset; if (unsigned int TF = BA->getTargetFlags()) OS << " [TF=" << TF << ']'; } @@ -559,7 +566,7 @@ static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent, child->printr(OS, G); once.insert(child); } else { // Just the address. FIXME: also print the child's opcode. - OS << (void*)child; + OS << (const void*)child; if (unsigned RN = N->getOperand(i).getResNo()) OS << ":" << RN; } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 4e5e3ba..c314fa5 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -474,6 +474,11 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { MRI.replaceRegWith(From, To); } + // Freeze the set of reserved registers now that MachineFrameInfo has been + // set up. All the information required by getReservedRegs() should be + // available now. + MRI.freezeReservedRegs(*MF); + // Release function-specific state. SDB and CurDAG are already cleared // at this point. FuncInfo->clear(); @@ -554,7 +559,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { #endif { BlockNumber = FuncInfo->MBB->getNumber(); - BlockName = MF->getFunction()->getName().str() + ":" + + BlockName = MF->getName().str() + ":" + FuncInfo->MBB->getBasicBlock()->getName().str(); } DEBUG(dbgs() << "Initial selection DAG: BB#" << BlockNumber @@ -1209,7 +1214,12 @@ SelectionDAGISel::FinishBasicBlock() { CodeGenAndEmitDAG(); } + uint32_t UnhandledWeight = 0; + for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) + UnhandledWeight += SDB->BitTestCases[i].Cases[j].ExtraWeight; + for (unsigned j = 0, ej = SDB->BitTestCases[i].Cases.size(); j != ej; ++j) { + UnhandledWeight -= SDB->BitTestCases[i].Cases[j].ExtraWeight; // Set the current basic block to the mbb we wish to insert the code into FuncInfo->MBB = SDB->BitTestCases[i].Cases[j].ThisBB; FuncInfo->InsertPt = FuncInfo->MBB->end(); @@ -1217,12 +1227,14 @@ SelectionDAGISel::FinishBasicBlock() { if (j+1 != ej) SDB->visitBitTestCase(SDB->BitTestCases[i], SDB->BitTestCases[i].Cases[j+1].ThisBB, + UnhandledWeight, SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Cases[j], FuncInfo->MBB); else SDB->visitBitTestCase(SDB->BitTestCases[i], SDB->BitTestCases[i].Default, + UnhandledWeight, SDB->BitTestCases[i].Reg, SDB->BitTestCases[i].Cases[j], FuncInfo->MBB); @@ -1794,10 +1806,13 @@ WalkChainUsers(const SDNode *ChainedNode, User->getOpcode() == ISD::HANDLENODE) // Root of the graph. continue; - if (User->getOpcode() == ISD::CopyToReg || - User->getOpcode() == ISD::CopyFromReg || - User->getOpcode() == ISD::INLINEASM || - User->getOpcode() == ISD::EH_LABEL) { + unsigned UserOpcode = User->getOpcode(); + if (UserOpcode == ISD::CopyToReg || + UserOpcode == ISD::CopyFromReg || + UserOpcode == ISD::INLINEASM || + UserOpcode == ISD::EH_LABEL || + UserOpcode == ISD::LIFETIME_START || + UserOpcode == ISD::LIFETIME_END) { // If their node ID got reset to -1 then they've already been selected. // Treat them like a MachineOpcode. if (User->getNodeId() == -1) @@ -1994,7 +2009,7 @@ MorphNode(SDNode *Node, unsigned TargetOpc, SDVTList VTList, return Res; } -/// CheckPatternPredicate - Implements OP_CheckPatternPredicate. +/// CheckSame - Implements OP_CheckSame. LLVM_ATTRIBUTE_ALWAYS_INLINE static bool CheckSame(const unsigned char *MatcherTable, unsigned &MatcherIndex, SDValue N, @@ -2213,6 +2228,8 @@ SelectCodeCommon(SDNode *NodeToMatch, const unsigned char *MatcherTable, case ISD::CopyFromReg: case ISD::CopyToReg: case ISD::EH_LABEL: + case ISD::LIFETIME_START: + case ISD::LIFETIME_END: NodeToMatch->setNodeId(-1); // Mark selected. return 0; case ISD::AssertSext: @@ -2981,7 +2998,7 @@ void SelectionDAGISel::CannotYetSelect(SDNode *N) { N->getOpcode() != ISD::INTRINSIC_WO_CHAIN && N->getOpcode() != ISD::INTRINSIC_VOID) { N->printrFull(Msg, CurDAG); - Msg << "\nIn function: " << MF->getFunction()->getName(); + Msg << "\nIn function: " << MF->getName(); } else { bool HasInputChain = N->getOperand(0).getValueType() == MVT::Other; unsigned iid = diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp index 173ffac..3921635 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGPrinter.cpp @@ -14,7 +14,6 @@ #include "ScheduleDAGSDNodes.h" #include "llvm/Constants.h" #include "llvm/DebugInfo.h" -#include "llvm/Function.h" #include "llvm/Assembly/Writer.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/MachineConstantPool.h" @@ -50,7 +49,7 @@ namespace llvm { template static std::string getEdgeSourceLabel(const void *Node, EdgeIter I) { - return itostr(I - SDNodeIterator::begin((SDNode *) Node)); + return itostr(I - SDNodeIterator::begin((const SDNode *) Node)); } /// edgeTargetsEdgeSource - This method returns true if this outgoing edge @@ -73,7 +72,7 @@ namespace llvm { } static std::string getGraphName(const SelectionDAG *G) { - return G->getMachineFunction().getFunction()->getName(); + return G->getMachineFunction().getName(); } static bool renderGraphFromBottomUp() { @@ -146,7 +145,7 @@ std::string DOTGraphTraits::getNodeLabel(const SDNode *Node, void SelectionDAG::viewGraph(const std::string &Title) { // This code is only for debugging! #ifndef NDEBUG - ViewGraph(this, "dag." + getMachineFunction().getFunction()->getName(), + ViewGraph(this, "dag." + getMachineFunction().getName(), false, Title); #else errs() << "SelectionDAG::viewGraph is only available in debug builds on " diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 6820175..49f55e2 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -14,7 +14,7 @@ #include "llvm/Target/TargetLowering.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCExpr.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -515,7 +515,7 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) { /// NOTE: The constructor takes ownership of TLOF. TargetLowering::TargetLowering(const TargetMachine &tm, const TargetLoweringObjectFile *tlof) - : TM(tm), TD(TM.getTargetData()), TLOF(*tlof) { + : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) { // All operations default to being supported. memset(OpActions, 0, sizeof(OpActions)); memset(LoadExtActions, 0, sizeof(LoadExtActions)); @@ -583,8 +583,13 @@ TargetLowering::TargetLowering(const TargetMachine &tm, // Default ISD::TRAP to expand (which turns it into abort). setOperationAction(ISD::TRAP, MVT::Other, Expand); + // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand" + // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP. + // + setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); + IsLittleEndian = TD->isLittleEndian(); - PointerTy = MVT::getIntegerVT(8*TD->getPointerSize()); + PointerTy = MVT::getIntegerVT(8*TD->getPointerSize(0)); memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*)); memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8; @@ -613,6 +618,7 @@ TargetLowering::TargetLowering(const TargetMachine &tm, ShouldFoldAtomicFences = false; InsertFencesForAtomic = false; SupportJumpTables = true; + MinimumJumpTableEntries = 4; InitLibcallNames(LibcallRoutineNames); InitCmpLibcallCCs(CmpLibcallCCs); @@ -624,7 +630,7 @@ TargetLowering::~TargetLowering() { } MVT TargetLowering::getShiftAmountTy(EVT LHSTy) const { - return MVT::getIntegerVT(8*TD->getPointerSize()); + return MVT::getIntegerVT(8*TD->getPointerSize(0)); } /// canOpTrap - Returns true if the operation can trap for the value type. @@ -772,7 +778,7 @@ void TargetLowering::computeRegisterProperties() { LegalIntReg = IntReg; } else { RegisterTypeForVT[IntReg] = TransformToType[IntReg] = - (MVT::SimpleValueType)LegalIntReg; + (const MVT::SimpleValueType)LegalIntReg; ValueTypeActions.setTypeAction(IVT, TypePromoteInteger); } } @@ -898,10 +904,9 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { return NULL; } - EVT TargetLowering::getSetCCResultType(EVT VT) const { assert(!VT.isVector() && "No default SetCC type for vectors!"); - return PointerTy.SimpleTy; + return getPointerTy(0).SimpleTy; } MVT::SimpleValueType TargetLowering::getCmpLibcallReturnType() const { @@ -997,9 +1002,9 @@ void llvm::GetReturnInfo(Type* ReturnType, Attributes attr, EVT VT = ValueVTs[j]; ISD::NodeType ExtendKind = ISD::ANY_EXTEND; - if (attr & Attribute::SExt) + if (attr.hasAttribute(Attributes::SExt)) ExtendKind = ISD::SIGN_EXTEND; - else if (attr & Attribute::ZExt) + else if (attr.hasAttribute(Attributes::ZExt)) ExtendKind = ISD::ZERO_EXTEND; // FIXME: C calling convention requires the return type to be promoted to @@ -1017,18 +1022,17 @@ void llvm::GetReturnInfo(Type* ReturnType, Attributes attr, // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); - if (attr & Attribute::InReg) + if (attr.hasAttribute(Attributes::InReg)) Flags.setInReg(); // Propagate extension type if any - if (attr & Attribute::SExt) + if (attr.hasAttribute(Attributes::SExt)) Flags.setSExt(); - else if (attr & Attribute::ZExt) + else if (attr.hasAttribute(Attributes::ZExt)) Flags.setZExt(); - for (unsigned i = 0; i < NumParts; ++i) { - Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true)); - } + for (unsigned i = 0; i < NumParts; ++i) + Outs.push_back(ISD::OutputArg(Flags, PartVT, /*isFixed=*/true, 0, 0)); } } @@ -1062,7 +1066,7 @@ SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) - return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy()); + return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(0)); return Table; } @@ -2441,7 +2445,7 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, if (N0 == N1) { // The sext(setcc()) => setcc() optimization relies on the appropriate // constant being emitted. - uint64_t EqVal; + uint64_t EqVal = 0; switch (getBooleanContents(N0.getValueType().isVector())) { case UndefinedBooleanContent: case ZeroOrOneBooleanContent: @@ -2954,8 +2958,9 @@ TargetLowering::AsmOperandInfoVector TargetLowering::ParseConstraints( EVT::getEVT(IntegerType::get(OpTy->getContext(), BitSize), true); break; } - } else if (dyn_cast(OpTy)) { - OpInfo.ConstraintVT = MVT::getIntegerVT(8*TD->getPointerSize()); + } else if (PointerType *PT = dyn_cast(OpTy)) { + OpInfo.ConstraintVT = MVT::getIntegerVT( + 8*TD->getPointerSize(PT->getAddressSpace())); } else { OpInfo.ConstraintVT = EVT::getEVT(OpTy, true); } diff --git a/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp b/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp index a081e3c..f769b44 100644 --- a/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp +++ b/lib/CodeGen/SelectionDAG/TargetSelectionDAGInfo.cpp @@ -16,7 +16,7 @@ using namespace llvm; TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM) - : TD(TM.getTargetData()) { + : TD(TM.getDataLayout()) { } TargetSelectionDAGInfo::~TargetSelectionDAGInfo() { diff --git a/lib/CodeGen/ShrinkWrapping.cpp b/lib/CodeGen/ShrinkWrapping.cpp index 21ae2f5..4fbe1b3 100644 --- a/lib/CodeGen/ShrinkWrapping.cpp +++ b/lib/CodeGen/ShrinkWrapping.cpp @@ -159,7 +159,7 @@ void PEI::initShrinkWrappingInfo() { // via --shrink-wrap-func=. #ifndef NDEBUG if (ShrinkWrapFunc != "") { - std::string MFName = MF->getFunction()->getName().str(); + std::string MFName = MF->getName().str(); ShrinkWrapThisFunction = (MFName == ShrinkWrapFunc); } #endif @@ -187,7 +187,7 @@ void PEI::placeCSRSpillsAndRestores(MachineFunction &Fn) { DEBUG(if (ShrinkWrapThisFunction) { dbgs() << "Place CSR spills/restores for " - << MF->getFunction()->getName() << "\n"; + << MF->getName() << "\n"; }); if (calculateSets(Fn)) @@ -364,7 +364,7 @@ bool PEI::calculateSets(MachineFunction &Fn) { // If no CSRs used, we are done. if (CSI.empty()) { DEBUG(if (ShrinkWrapThisFunction) - dbgs() << "DISABLED: " << Fn.getFunction()->getName() + dbgs() << "DISABLED: " << Fn.getName() << ": uses no callee-saved registers\n"); return false; } @@ -384,7 +384,7 @@ bool PEI::calculateSets(MachineFunction &Fn) { // implementation to functions with <= 500 MBBs. if (Fn.size() > 500) { DEBUG(if (ShrinkWrapThisFunction) - dbgs() << "DISABLED: " << Fn.getFunction()->getName() + dbgs() << "DISABLED: " << Fn.getName() << ": too large (" << Fn.size() << " MBBs)\n"); ShrinkWrapThisFunction = false; } @@ -466,7 +466,7 @@ bool PEI::calculateSets(MachineFunction &Fn) { } if (allCSRUsesInEntryBlock) { - DEBUG(dbgs() << "DISABLED: " << Fn.getFunction()->getName() + DEBUG(dbgs() << "DISABLED: " << Fn.getName() << ": all CSRs used in EntryBlock\n"); ShrinkWrapThisFunction = false; } else { @@ -478,7 +478,7 @@ bool PEI::calculateSets(MachineFunction &Fn) { allCSRsUsedInEntryFanout = false; } if (allCSRsUsedInEntryFanout) { - DEBUG(dbgs() << "DISABLED: " << Fn.getFunction()->getName() + DEBUG(dbgs() << "DISABLED: " << Fn.getName() << ": all CSRs used in imm successors of EntryBlock\n"); ShrinkWrapThisFunction = false; } @@ -505,7 +505,7 @@ bool PEI::calculateSets(MachineFunction &Fn) { if (dominatesExitNodes) { CSRUsedInChokePoints |= CSRUsed[MBB]; if (CSRUsedInChokePoints == UsedCSRegs) { - DEBUG(dbgs() << "DISABLED: " << Fn.getFunction()->getName() + DEBUG(dbgs() << "DISABLED: " << Fn.getName() << ": all CSRs used in choke point(s) at " << getBasicBlockName(MBB) << "\n"); ShrinkWrapThisFunction = false; @@ -521,7 +521,7 @@ bool PEI::calculateSets(MachineFunction &Fn) { return false; DEBUG({ - dbgs() << "ENABLED: " << Fn.getFunction()->getName(); + dbgs() << "ENABLED: " << Fn.getName(); if (HasFastExitPath) dbgs() << " (fast exit path)"; dbgs() << "\n"; @@ -861,7 +861,7 @@ void PEI::placeSpillsAndRestores(MachineFunction &Fn) { DEBUG(if (ShrinkWrapDebugging >= BasicInfo) { dbgs() << "-----------------------------------------------------------\n"; dbgs() << "total iterations = " << iterations << " ( " - << Fn.getFunction()->getName() + << Fn.getName() << " " << numSRReducedThisFunc << " " << Fn.size() << " )\n"; @@ -984,7 +984,7 @@ void PEI::verifySpillRestorePlacement() { if (isReturnBlock(SBB) || SBB->succ_size() == 0) { if (restored != spilled) { CSRegSet notRestored = (spilled - restored); - DEBUG(dbgs() << MF->getFunction()->getName() << ": " + DEBUG(dbgs() << MF->getName() << ": " << stringifyCSRegSet(notRestored) << " spilled at " << getBasicBlockName(MBB) << " are never restored on path to return " @@ -1032,7 +1032,7 @@ void PEI::verifySpillRestorePlacement() { } if (spilled != restored) { CSRegSet notSpilled = (restored - spilled); - DEBUG(dbgs() << MF->getFunction()->getName() << ": " + DEBUG(dbgs() << MF->getName() << ": " << stringifyCSRegSet(notSpilled) << " restored at " << getBasicBlockName(MBB) << " are never spilled\n"); diff --git a/lib/CodeGen/SjLjEHPrepare.cpp b/lib/CodeGen/SjLjEHPrepare.cpp index 980bd74..4b566fc 100644 --- a/lib/CodeGen/SjLjEHPrepare.cpp +++ b/lib/CodeGen/SjLjEHPrepare.cpp @@ -30,7 +30,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" @@ -191,58 +191,43 @@ setupFunctionContext(Function &F, ArrayRef LPads) { // that needs to be restored on all exits from the function. This is an alloca // because the value needs to be added to the global context list. unsigned Align = - TLI->getTargetData()->getPrefTypeAlignment(FunctionContextTy); + TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy); FuncCtx = new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin()); // Fill in the function context structure. - Type *Int32Ty = Type::getInt32Ty(F.getContext()); - Value *Zero = ConstantInt::get(Int32Ty, 0); - Value *One = ConstantInt::get(Int32Ty, 1); - Value *Two = ConstantInt::get(Int32Ty, 2); - Value *Three = ConstantInt::get(Int32Ty, 3); - Value *Four = ConstantInt::get(Int32Ty, 4); - - Value *Idxs[2] = { Zero, 0 }; - for (unsigned I = 0, E = LPads.size(); I != E; ++I) { LandingPadInst *LPI = LPads[I]; IRBuilder<> Builder(LPI->getParent()->getFirstInsertionPt()); // Reference the __data field. - Idxs[1] = Two; - Value *FCData = Builder.CreateGEP(FuncCtx, Idxs, "__data"); + Value *FCData = Builder.CreateConstGEP2_32(FuncCtx, 0, 2, "__data"); // The exception values come back in context->__data[0]. - Idxs[1] = Zero; - Value *ExceptionAddr = Builder.CreateGEP(FCData, Idxs, "exception_gep"); + Value *ExceptionAddr = Builder.CreateConstGEP2_32(FCData, 0, 0, + "exception_gep"); Value *ExnVal = Builder.CreateLoad(ExceptionAddr, true, "exn_val"); - ExnVal = Builder.CreateIntToPtr(ExnVal, Type::getInt8PtrTy(F.getContext())); + ExnVal = Builder.CreateIntToPtr(ExnVal, Builder.getInt8PtrTy()); - Idxs[1] = One; - Value *SelectorAddr = Builder.CreateGEP(FCData, Idxs, "exn_selector_gep"); + Value *SelectorAddr = Builder.CreateConstGEP2_32(FCData, 0, 1, + "exn_selector_gep"); Value *SelVal = Builder.CreateLoad(SelectorAddr, true, "exn_selector_val"); substituteLPadValues(LPI, ExnVal, SelVal); } // Personality function - Idxs[1] = Three; + IRBuilder<> Builder(EntryBB->getTerminator()); if (!PersonalityFn) PersonalityFn = LPads[0]->getPersonalityFn(); - Value *PersonalityFieldPtr = - GetElementPtrInst::Create(FuncCtx, Idxs, "pers_fn_gep", - EntryBB->getTerminator()); - new StoreInst(PersonalityFn, PersonalityFieldPtr, true, - EntryBB->getTerminator()); + Value *PersonalityFieldPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 3, + "pers_fn_gep"); + Builder.CreateStore(PersonalityFn, PersonalityFieldPtr, /*isVolatile=*/true); // LSDA address - Value *LSDA = CallInst::Create(LSDAAddrFn, "lsda_addr", - EntryBB->getTerminator()); - Idxs[1] = Four; - Value *LSDAFieldPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "lsda_gep", - EntryBB->getTerminator()); - new StoreInst(LSDA, LSDAFieldPtr, true, EntryBB->getTerminator()); + Value *LSDA = Builder.CreateCall(LSDAAddrFn, "lsda_addr"); + Value *LSDAFieldPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 4, "lsda_gep"); + Builder.CreateStore(LSDA, LSDAFieldPtr, /*isVolatile=*/true); return FuncCtx; } @@ -417,48 +402,31 @@ bool SjLjEHPrepare::setupEntryBlockAndCallSites(Function &F) { Value *FuncCtx = setupFunctionContext(F, makeArrayRef(LPads.begin(), LPads.end())); BasicBlock *EntryBB = F.begin(); - Type *Int32Ty = Type::getInt32Ty(F.getContext()); - - Value *Idxs[2] = { - ConstantInt::get(Int32Ty, 0), 0 - }; + IRBuilder<> Builder(EntryBB->getTerminator()); // Get a reference to the jump buffer. - Idxs[1] = ConstantInt::get(Int32Ty, 5); - Value *JBufPtr = GetElementPtrInst::Create(FuncCtx, Idxs, "jbuf_gep", - EntryBB->getTerminator()); + Value *JBufPtr = Builder.CreateConstGEP2_32(FuncCtx, 0, 5, "jbuf_gep"); // Save the frame pointer. - Idxs[1] = ConstantInt::get(Int32Ty, 0); - Value *FramePtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_fp_gep", - EntryBB->getTerminator()); + Value *FramePtr = Builder.CreateConstGEP2_32(JBufPtr, 0, 0, "jbuf_fp_gep"); - Value *Val = CallInst::Create(FrameAddrFn, - ConstantInt::get(Int32Ty, 0), - "fp", - EntryBB->getTerminator()); - new StoreInst(Val, FramePtr, true, EntryBB->getTerminator()); + Value *Val = Builder.CreateCall(FrameAddrFn, Builder.getInt32(0), "fp"); + Builder.CreateStore(Val, FramePtr, /*isVolatile=*/true); // Save the stack pointer. - Idxs[1] = ConstantInt::get(Int32Ty, 2); - Value *StackPtr = GetElementPtrInst::Create(JBufPtr, Idxs, "jbuf_sp_gep", - EntryBB->getTerminator()); + Value *StackPtr = Builder.CreateConstGEP2_32(JBufPtr, 0, 2, "jbuf_sp_gep"); - Val = CallInst::Create(StackAddrFn, "sp", EntryBB->getTerminator()); - new StoreInst(Val, StackPtr, true, EntryBB->getTerminator()); + Val = Builder.CreateCall(StackAddrFn, "sp"); + Builder.CreateStore(Val, StackPtr, /*isVolatile=*/true); // Call the setjmp instrinsic. It fills in the rest of the jmpbuf. - Value *SetjmpArg = CastInst::Create(Instruction::BitCast, JBufPtr, - Type::getInt8PtrTy(F.getContext()), "", - EntryBB->getTerminator()); - CallInst::Create(BuiltinSetjmpFn, SetjmpArg, "", EntryBB->getTerminator()); + Value *SetjmpArg = Builder.CreateBitCast(JBufPtr, Builder.getInt8PtrTy()); + Builder.CreateCall(BuiltinSetjmpFn, SetjmpArg); // Store a pointer to the function context so that the back-end will know // where to look for it. - Value *FuncCtxArg = CastInst::Create(Instruction::BitCast, FuncCtx, - Type::getInt8PtrTy(F.getContext()), "", - EntryBB->getTerminator()); - CallInst::Create(FuncCtxFn, FuncCtxArg, "", EntryBB->getTerminator()); + Value *FuncCtxArg = Builder.CreateBitCast(FuncCtx, Builder.getInt8PtrTy()); + Builder.CreateCall(FuncCtxFn, FuncCtxArg); // At this point, we are all set up, update the invoke instructions to mark // their call_site values. diff --git a/lib/CodeGen/SlotIndexes.cpp b/lib/CodeGen/SlotIndexes.cpp index c8c3fb3..95faafab 100644 --- a/lib/CodeGen/SlotIndexes.cpp +++ b/lib/CodeGen/SlotIndexes.cpp @@ -143,6 +143,7 @@ void SlotIndexes::renumberIndexes(IndexList::iterator curItr) { } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void SlotIndexes::dump() const { for (IndexList::const_iterator itr = indexList.begin(); itr != indexList.end(); ++itr) { @@ -159,6 +160,7 @@ void SlotIndexes::dump() const { dbgs() << "BB#" << i << "\t[" << MBBRanges[i].first << ';' << MBBRanges[i].second << ")\n"; } +#endif // Print a SlotIndex to a raw_ostream. void SlotIndex::print(raw_ostream &os) const { @@ -168,9 +170,11 @@ void SlotIndex::print(raw_ostream &os) const { os << "invalid"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) // Dump a SlotIndex to stderr. void SlotIndex::dump() const { print(dbgs()); dbgs() << "\n"; } +#endif diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp index 4a2b7ec..dca15ee 100644 --- a/lib/CodeGen/SplitKit.cpp +++ b/lib/CodeGen/SplitKit.cpp @@ -356,6 +356,7 @@ void SplitEditor::reset(LiveRangeEdit &LRE, ComplementSpillMode SM) { Edit->anyRematerializable(0); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void SplitEditor::dump() const { if (RegAssign.empty()) { dbgs() << " empty\n"; @@ -366,6 +367,7 @@ void SplitEditor::dump() const { dbgs() << " [" << I.start() << ';' << I.stop() << "):" << I.value(); dbgs() << '\n'; } +#endif VNInfo *SplitEditor::defValue(unsigned RegIdx, const VNInfo *ParentVNI, diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp new file mode 100644 index 0000000..1cbee84 --- /dev/null +++ b/lib/CodeGen/StackColoring.cpp @@ -0,0 +1,783 @@ +//===-- StackColoring.cpp -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass implements the stack-coloring optimization that looks for +// lifetime markers machine instructions (LIFESTART_BEGIN and LIFESTART_END), +// which represent the possible lifetime of stack slots. It attempts to +// merge disjoint stack slots and reduce the used stack space. +// NOTE: This pass is not StackSlotColoring, which optimizes spill slots. +// +// TODO: In the future we plan to improve stack coloring in the following ways: +// 1. Allow merging multiple small slots into a single larger slot at different +// offsets. +// 2. Merge this pass with StackSlotColoring and allow merging of allocas with +// spill slots. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "stackcoloring" +#include "MachineTraceMetrics.h" +#include "llvm/Function.h" +#include "llvm/Module.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SparseSet.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/LiveInterval.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineBranchProbabilityInfo.h" +#include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineBasicBlock.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineLoopInfo.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/SlotIndexes.h" +#include "llvm/DebugInfo.h" +#include "llvm/Instructions.h" +#include "llvm/MC/MCInstrItineraries.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +static cl::opt +DisableColoring("no-stack-coloring", + cl::init(false), cl::Hidden, + cl::desc("Disable stack coloring")); + +/// The user may write code that uses allocas outside of the declared lifetime +/// zone. This can happen when the user returns a reference to a local +/// data-structure. We can detect these cases and decide not to optimize the +/// code. If this flag is enabled, we try to save the user. +static cl::opt +ProtectFromEscapedAllocas("protect-from-escaped-allocas", + cl::init(false), cl::Hidden, + cl::desc("Do not optimize lifetime zones that are broken")); + +STATISTIC(NumMarkerSeen, "Number of lifetime markers found."); +STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots."); +STATISTIC(StackSlotMerged, "Number of stack slot merged."); +STATISTIC(EscapedAllocas, + "Number of allocas that escaped the lifetime region"); + +//===----------------------------------------------------------------------===// +// StackColoring Pass +//===----------------------------------------------------------------------===// + +namespace { +/// StackColoring - A machine pass for merging disjoint stack allocations, +/// marked by the LIFETIME_START and LIFETIME_END pseudo instructions. +class StackColoring : public MachineFunctionPass { + MachineFrameInfo *MFI; + MachineFunction *MF; + + /// A class representing liveness information for a single basic block. + /// Each bit in the BitVector represents the liveness property + /// for a different stack slot. + struct BlockLifetimeInfo { + /// Which slots BEGINs in each basic block. + BitVector Begin; + /// Which slots ENDs in each basic block. + BitVector End; + /// Which slots are marked as LIVE_IN, coming into each basic block. + BitVector LiveIn; + /// Which slots are marked as LIVE_OUT, coming out of each basic block. + BitVector LiveOut; + }; + + /// Maps active slots (per bit) for each basic block. + DenseMap BlockLiveness; + + /// Maps serial numbers to basic blocks. + DenseMap BasicBlocks; + /// Maps basic blocks to a serial number. + SmallVector BasicBlockNumbering; + + /// Maps liveness intervals for each slot. + SmallVector Intervals; + /// VNInfo is used for the construction of LiveIntervals. + VNInfo::Allocator VNInfoAllocator; + /// SlotIndex analysis object. + SlotIndexes *Indexes; + + /// The list of lifetime markers found. These markers are to be removed + /// once the coloring is done. + SmallVector Markers; + + /// SlotSizeSorter - A Sort utility for arranging stack slots according + /// to their size. + struct SlotSizeSorter { + MachineFrameInfo *MFI; + SlotSizeSorter(MachineFrameInfo *mfi) : MFI(mfi) { } + bool operator()(int LHS, int RHS) { + // We use -1 to denote a uninteresting slot. Place these slots at the end. + if (LHS == -1) return false; + if (RHS == -1) return true; + // Sort according to size. + return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS); + } +}; + +public: + static char ID; + StackColoring() : MachineFunctionPass(ID) { + initializeStackColoringPass(*PassRegistry::getPassRegistry()); + } + void getAnalysisUsage(AnalysisUsage &AU) const; + bool runOnMachineFunction(MachineFunction &MF); + +private: + /// Debug. + void dump(); + + /// Removes all of the lifetime marker instructions from the function. + /// \returns true if any markers were removed. + bool removeAllMarkers(); + + /// Scan the machine function and find all of the lifetime markers. + /// Record the findings in the BEGIN and END vectors. + /// \returns the number of markers found. + unsigned collectMarkers(unsigned NumSlot); + + /// Perform the dataflow calculation and calculate the lifetime for each of + /// the slots, based on the BEGIN/END vectors. Set the LifetimeLIVE_IN and + /// LifetimeLIVE_OUT maps that represent which stack slots are live coming + /// in and out blocks. + void calculateLocalLiveness(); + + /// Construct the LiveIntervals for the slots. + void calculateLiveIntervals(unsigned NumSlots); + + /// Go over the machine function and change instructions which use stack + /// slots to use the joint slots. + void remapInstructions(DenseMap &SlotRemap); + + /// The input program may contain intructions which are not inside lifetime + /// markers. This can happen due to a bug in the compiler or due to a bug in + /// user code (for example, returning a reference to a local variable). + /// This procedure checks all of the instructions in the function and + /// invalidates lifetime ranges which do not contain all of the instructions + /// which access that frame slot. + void removeInvalidSlotRanges(); + + /// Map entries which point to other entries to their destination. + /// A->B->C becomes A->C. + void expungeSlotMap(DenseMap &SlotRemap, unsigned NumSlots); +}; +} // end anonymous namespace + +char StackColoring::ID = 0; +char &llvm::StackColoringID = StackColoring::ID; + +INITIALIZE_PASS_BEGIN(StackColoring, + "stack-coloring", "Merge disjoint stack slots", false, false) +INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_PASS_END(StackColoring, + "stack-coloring", "Merge disjoint stack slots", false, false) + +void StackColoring::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired(); + AU.addPreserved(); + AU.addRequired(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +void StackColoring::dump() { + for (df_iterator FI = df_begin(MF), FE = df_end(MF); + FI != FE; ++FI) { + unsigned Num = BasicBlocks[*FI]; + DEBUG(dbgs()<<"Inspecting block #"<getName()<<"]\n"); + Num = 0; + DEBUG(dbgs()<<"BEGIN : {"); + for (unsigned i=0; i < BlockLiveness[*FI].Begin.size(); ++i) + DEBUG(dbgs()< FI = df_begin(MF), FE = df_end(MF); + FI != FE; ++FI) { + + // Assign a serial number to this basic block. + BasicBlocks[*FI] = BasicBlockNumbering.size(); + BasicBlockNumbering.push_back(*FI); + + BlockLiveness[*FI].Begin.resize(NumSlot); + BlockLiveness[*FI].End.resize(NumSlot); + + for (MachineBasicBlock::iterator BI = (*FI)->begin(), BE = (*FI)->end(); + BI != BE; ++BI) { + + if (BI->getOpcode() != TargetOpcode::LIFETIME_START && + BI->getOpcode() != TargetOpcode::LIFETIME_END) + continue; + + Markers.push_back(BI); + + bool IsStart = BI->getOpcode() == TargetOpcode::LIFETIME_START; + MachineOperand &MI = BI->getOperand(0); + unsigned Slot = MI.getIndex(); + + MarkersFound++; + + const AllocaInst *Allocation = MFI->getObjectAllocation(Slot); + if (Allocation) { + DEBUG(dbgs()<<"Found a lifetime marker for slot #"<getName()<<"\n"); + } + + if (IsStart) { + BlockLiveness[*FI].Begin.set(Slot); + } else { + if (BlockLiveness[*FI].Begin.test(Slot)) { + // Allocas that start and end within a single block are handled + // specially when computing the LiveIntervals to avoid pessimizing + // the liveness propagation. + BlockLiveness[*FI].Begin.reset(Slot); + } else { + BlockLiveness[*FI].End.set(Slot); + } + } + } + } + + // Update statistics. + NumMarkerSeen += MarkersFound; + return MarkersFound; +} + +void StackColoring::calculateLocalLiveness() { + // Perform a standard reverse dataflow computation to solve for + // global liveness. The BEGIN set here is equivalent to KILL in the standard + // formulation, and END is equivalent to GEN. The result of this computation + // is a map from blocks to bitvectors where the bitvectors represent which + // allocas are live in/out of that block. + SmallPtrSet BBSet(BasicBlockNumbering.begin(), + BasicBlockNumbering.end()); + unsigned NumSSMIters = 0; + bool changed = true; + while (changed) { + changed = false; + ++NumSSMIters; + + SmallPtrSet NextBBSet; + + for (SmallVector::iterator + PI = BasicBlockNumbering.begin(), PE = BasicBlockNumbering.end(); + PI != PE; ++PI) { + + MachineBasicBlock *BB = *PI; + if (!BBSet.count(BB)) continue; + + BitVector LocalLiveIn; + BitVector LocalLiveOut; + + // Forward propagation from begins to ends. + for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(), + PE = BB->pred_end(); PI != PE; ++PI) + LocalLiveIn |= BlockLiveness[*PI].LiveOut; + LocalLiveIn |= BlockLiveness[BB].End; + LocalLiveIn.reset(BlockLiveness[BB].Begin); + + // Reverse propagation from ends to begins. + for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), + SE = BB->succ_end(); SI != SE; ++SI) + LocalLiveOut |= BlockLiveness[*SI].LiveIn; + LocalLiveOut |= BlockLiveness[BB].Begin; + LocalLiveOut.reset(BlockLiveness[BB].End); + + LocalLiveIn |= LocalLiveOut; + LocalLiveOut |= LocalLiveIn; + + // After adopting the live bits, we need to turn-off the bits which + // are de-activated in this block. + LocalLiveOut.reset(BlockLiveness[BB].End); + LocalLiveIn.reset(BlockLiveness[BB].Begin); + + // If we have both BEGIN and END markers in the same basic block then + // we know that the BEGIN marker comes after the END, because we already + // handle the case where the BEGIN comes before the END when collecting + // the markers (and building the BEGIN/END vectore). + // Want to enable the LIVE_IN and LIVE_OUT of slots that have both + // BEGIN and END because it means that the value lives before and after + // this basic block. + BitVector LocalEndBegin = BlockLiveness[BB].End; + LocalEndBegin &= BlockLiveness[BB].Begin; + LocalLiveIn |= LocalEndBegin; + LocalLiveOut |= LocalEndBegin; + + if (LocalLiveIn.test(BlockLiveness[BB].LiveIn)) { + changed = true; + BlockLiveness[BB].LiveIn |= LocalLiveIn; + + for (MachineBasicBlock::pred_iterator PI = BB->pred_begin(), + PE = BB->pred_end(); PI != PE; ++PI) + NextBBSet.insert(*PI); + } + + if (LocalLiveOut.test(BlockLiveness[BB].LiveOut)) { + changed = true; + BlockLiveness[BB].LiveOut |= LocalLiveOut; + + for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), + SE = BB->succ_end(); SI != SE; ++SI) + NextBBSet.insert(*SI); + } + } + + BBSet = NextBBSet; + }// while changed. +} + +void StackColoring::calculateLiveIntervals(unsigned NumSlots) { + SmallVector Starts; + SmallVector Finishes; + + // For each block, find which slots are active within this block + // and update the live intervals. + for (MachineFunction::iterator MBB = MF->begin(), MBBe = MF->end(); + MBB != MBBe; ++MBB) { + Starts.clear(); + Starts.resize(NumSlots); + Finishes.clear(); + Finishes.resize(NumSlots); + + // Create the interval for the basic blocks with lifetime markers in them. + for (SmallVector::iterator it = Markers.begin(), + e = Markers.end(); it != e; ++it) { + MachineInstr *MI = *it; + if (MI->getParent() != MBB) + continue; + + assert((MI->getOpcode() == TargetOpcode::LIFETIME_START || + MI->getOpcode() == TargetOpcode::LIFETIME_END) && + "Invalid Lifetime marker"); + + bool IsStart = MI->getOpcode() == TargetOpcode::LIFETIME_START; + MachineOperand &Mo = MI->getOperand(0); + int Slot = Mo.getIndex(); + assert(Slot >= 0 && "Invalid slot"); + + SlotIndex ThisIndex = Indexes->getInstructionIndex(MI); + + if (IsStart) { + if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex) + Starts[Slot] = ThisIndex; + } else { + if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex) + Finishes[Slot] = ThisIndex; + } + } + + // Create the interval of the blocks that we previously found to be 'alive'. + BitVector Alive = BlockLiveness[MBB].LiveIn; + Alive |= BlockLiveness[MBB].LiveOut; + + if (Alive.any()) { + for (int pos = Alive.find_first(); pos != -1; + pos = Alive.find_next(pos)) { + if (!Starts[pos].isValid()) + Starts[pos] = Indexes->getMBBStartIdx(MBB); + if (!Finishes[pos].isValid()) + Finishes[pos] = Indexes->getMBBEndIdx(MBB); + } + } + + for (unsigned i = 0; i < NumSlots; ++i) { + assert(Starts[i].isValid() == Finishes[i].isValid() && "Unmatched range"); + if (!Starts[i].isValid()) + continue; + + assert(Starts[i] && Finishes[i] && "Invalid interval"); + VNInfo *ValNum = Intervals[i]->getValNumInfo(0); + SlotIndex S = Starts[i]; + SlotIndex F = Finishes[i]; + if (S < F) { + // We have a single consecutive region. + Intervals[i]->addRange(LiveRange(S, F, ValNum)); + } else { + // We have two non consecutive regions. This happens when + // LIFETIME_START appears after the LIFETIME_END marker. + SlotIndex NewStart = Indexes->getMBBStartIdx(MBB); + SlotIndex NewFin = Indexes->getMBBEndIdx(MBB); + Intervals[i]->addRange(LiveRange(NewStart, F, ValNum)); + Intervals[i]->addRange(LiveRange(S, NewFin, ValNum)); + } + } + } +} + +bool StackColoring::removeAllMarkers() { + unsigned Count = 0; + for (unsigned i = 0; i < Markers.size(); ++i) { + Markers[i]->eraseFromParent(); + Count++; + } + Markers.clear(); + + DEBUG(dbgs()<<"Removed "< &SlotRemap) { + unsigned FixedInstr = 0; + unsigned FixedMemOp = 0; + unsigned FixedDbg = 0; + MachineModuleInfo *MMI = &MF->getMMI(); + + // Remap debug information that refers to stack slots. + MachineModuleInfo::VariableDbgInfoMapTy &VMap = MMI->getVariableDbgInfo(); + for (MachineModuleInfo::VariableDbgInfoMapTy::iterator VI = VMap.begin(), + VE = VMap.end(); VI != VE; ++VI) { + const MDNode *Var = VI->first; + if (!Var) continue; + std::pair &VP = VI->second; + if (SlotRemap.count(VP.first)) { + DEBUG(dbgs()<<"Remapping debug info for ["<getName()<<"].\n"); + VP.first = SlotRemap[VP.first]; + FixedDbg++; + } + } + + // Keep a list of *allocas* which need to be remapped. + DenseMap Allocas; + for (DenseMap::iterator it = SlotRemap.begin(), + e = SlotRemap.end(); it != e; ++it) { + const AllocaInst *From = MFI->getObjectAllocation(it->first); + const AllocaInst *To = MFI->getObjectAllocation(it->second); + assert(To && From && "Invalid allocation object"); + Allocas[From] = To; + } + + // Remap all instructions to the new stack slots. + MachineFunction::iterator BB, BBE; + MachineBasicBlock::iterator I, IE; + for (BB = MF->begin(), BBE = MF->end(); BB != BBE; ++BB) + for (I = BB->begin(), IE = BB->end(); I != IE; ++I) { + + // Skip lifetime markers. We'll remove them soon. + if (I->getOpcode() == TargetOpcode::LIFETIME_START || + I->getOpcode() == TargetOpcode::LIFETIME_END) + continue; + + // Update the MachineMemOperand to use the new alloca. + for (MachineInstr::mmo_iterator MM = I->memoperands_begin(), + E = I->memoperands_end(); MM != E; ++MM) { + MachineMemOperand *MMO = *MM; + + const Value *V = MMO->getValue(); + + if (!V) + continue; + + // Climb up and find the original alloca. + V = GetUnderlyingObject(V); + // If we did not find one, or if the one that we found is not in our + // map, then move on. + if (!V || !isa(V)) { + // Clear mem operand since we don't know for sure that it doesn't + // alias a merged alloca. + MMO->setValue(0); + continue; + } + const AllocaInst *AI= cast(V); + if (!Allocas.count(AI)) + continue; + + MMO->setValue(Allocas[AI]); + FixedMemOp++; + } + + // Update all of the machine instruction operands. + for (unsigned i = 0 ; i < I->getNumOperands(); ++i) { + MachineOperand &MO = I->getOperand(i); + + if (!MO.isFI()) + continue; + int FromSlot = MO.getIndex(); + + // Don't touch arguments. + if (FromSlot<0) + continue; + + // Only look at mapped slots. + if (!SlotRemap.count(FromSlot)) + continue; + + // In a debug build, check that the instruction that we are modifying is + // inside the expected live range. If the instruction is not inside + // the calculated range then it means that the alloca usage moved + // outside of the lifetime markers, or that the user has a bug. + // NOTE: Alloca address calculations which happen outside the lifetime + // zone are are okay, despite the fact that we don't have a good way + // for validating all of the usages of the calculation. +#ifndef NDEBUG + bool TouchesMemory = I->mayLoad() || I->mayStore(); + // If we *don't* protect the user from escaped allocas, don't bother + // validating the instructions. + if (!I->isDebugValue() && TouchesMemory && ProtectFromEscapedAllocas) { + SlotIndex Index = Indexes->getInstructionIndex(I); + LiveInterval *Interval = Intervals[FromSlot]; + assert(Interval->find(Index) != Interval->end() && + "Found instruction usage outside of live range."); + } +#endif + + // Fix the machine instructions. + int ToSlot = SlotRemap[FromSlot]; + MO.setIndex(ToSlot); + FixedInstr++; + } + } + + DEBUG(dbgs()<<"Fixed "<begin(), BBE = MF->end(); BB != BBE; ++BB) + for (I = BB->begin(), IE = BB->end(); I != IE; ++I) { + + if (I->getOpcode() == TargetOpcode::LIFETIME_START || + I->getOpcode() == TargetOpcode::LIFETIME_END || I->isDebugValue()) + continue; + + // Some intervals are suspicious! In some cases we find address + // calculations outside of the lifetime zone, but not actual memory + // read or write. Memory accesses outside of the lifetime zone are a clear + // violation, but address calculations are okay. This can happen when + // GEPs are hoisted outside of the lifetime zone. + // So, in here we only check instructions which can read or write memory. + if (!I->mayLoad() && !I->mayStore()) + continue; + + // Check all of the machine operands. + for (unsigned i = 0 ; i < I->getNumOperands(); ++i) { + MachineOperand &MO = I->getOperand(i); + + if (!MO.isFI()) + continue; + + int Slot = MO.getIndex(); + + if (Slot<0) + continue; + + if (Intervals[Slot]->empty()) + continue; + + // Check that the used slot is inside the calculated lifetime range. + // If it is not, warn about it and invalidate the range. + LiveInterval *Interval = Intervals[Slot]; + SlotIndex Index = Indexes->getInstructionIndex(I); + if (Interval->find(Index) == Interval->end()) { + Intervals[Slot]->clear(); + DEBUG(dbgs()<<"Invalidating range #"< &SlotRemap, + unsigned NumSlots) { + // Expunge slot remap map. + for (unsigned i=0; i < NumSlots; ++i) { + // If we are remapping i + if (SlotRemap.count(i)) { + int Target = SlotRemap[i]; + // As long as our target is mapped to something else, follow it. + while (SlotRemap.count(Target)) { + Target = SlotRemap[Target]; + SlotRemap[i] = Target; + } + } + } +} + +bool StackColoring::runOnMachineFunction(MachineFunction &Func) { + DEBUG(dbgs() << "********** Stack Coloring **********\n" + << "********** Function: " + << ((const Value*)Func.getFunction())->getName() << '\n'); + MF = &Func; + MFI = MF->getFrameInfo(); + Indexes = &getAnalysis(); + BlockLiveness.clear(); + BasicBlocks.clear(); + BasicBlockNumbering.clear(); + Markers.clear(); + Intervals.clear(); + VNInfoAllocator.Reset(); + + unsigned NumSlots = MFI->getObjectIndexEnd(); + + // If there are no stack slots then there are no markers to remove. + if (!NumSlots) + return false; + + SmallVector SortedSlots; + + SortedSlots.reserve(NumSlots); + Intervals.reserve(NumSlots); + + unsigned NumMarkers = collectMarkers(NumSlots); + + unsigned TotalSize = 0; + DEBUG(dbgs()<<"Found "<getObjectIndexEnd(); ++i) { + DEBUG(dbgs()<<"Slot #"<getObjectSize(i)<<" bytes.\n"); + TotalSize += MFI->getObjectSize(i); + } + + DEBUG(dbgs()<<"Total Stack size: "<getNextValue(Indexes->getZeroIndex(), VNInfoAllocator); + SortedSlots.push_back(i); + } + + // Calculate the liveness of each block. + calculateLocalLiveness(); + + // Propagate the liveness information. + calculateLiveIntervals(NumSlots); + + // Search for allocas which are used outside of the declared lifetime + // markers. + if (ProtectFromEscapedAllocas) + removeInvalidSlotRanges(); + + // Maps old slots to new slots. + DenseMap SlotRemap; + unsigned RemovedSlots = 0; + unsigned ReducedSize = 0; + + // Do not bother looking at empty intervals. + for (unsigned I = 0; I < NumSlots; ++I) { + if (Intervals[SortedSlots[I]]->empty()) + SortedSlots[I] = -1; + } + + // This is a simple greedy algorithm for merging allocas. First, sort the + // slots, placing the largest slots first. Next, perform an n^2 scan and look + // for disjoint slots. When you find disjoint slots, merge the samller one + // into the bigger one and update the live interval. Remove the small alloca + // and continue. + + // Sort the slots according to their size. Place unused slots at the end. + std::sort(SortedSlots.begin(), SortedSlots.end(), SlotSizeSorter(MFI)); + + bool Chanded = true; + while (Chanded) { + Chanded = false; + for (unsigned I = 0; I < NumSlots; ++I) { + if (SortedSlots[I] == -1) + continue; + + for (unsigned J=I+1; J < NumSlots; ++J) { + if (SortedSlots[J] == -1) + continue; + + int FirstSlot = SortedSlots[I]; + int SecondSlot = SortedSlots[J]; + LiveInterval *First = Intervals[FirstSlot]; + LiveInterval *Second = Intervals[SecondSlot]; + assert (!First->empty() && !Second->empty() && "Found an empty range"); + + // Merge disjoint slots. + if (!First->overlaps(*Second)) { + Chanded = true; + First->MergeRangesInAsValue(*Second, First->getValNumInfo(0)); + SlotRemap[SecondSlot] = FirstSlot; + SortedSlots[J] = -1; + DEBUG(dbgs()<<"Merging #"<getObjectAlignment(FirstSlot), + MFI->getObjectAlignment(SecondSlot)); + + assert(MFI->getObjectSize(FirstSlot) >= + MFI->getObjectSize(SecondSlot) && + "Merging a small object into a larger one"); + + RemovedSlots+=1; + ReducedSize += MFI->getObjectSize(SecondSlot); + MFI->setObjectAlignment(FirstSlot, MaxAlignment); + MFI->RemoveStackObject(SecondSlot); + } + } + } + }// While changed. + + // Record statistics. + StackSpaceSaved += ReducedSize; + StackSlotMerged += RemovedSlots; + DEBUG(dbgs()<<"Merge "< -SSPBufferSize("stack-protector-buffer-size", cl::init(8), - cl::desc("Lower bound for a buffer to be considered for " - "stack protection")); - namespace { class StackProtector : public FunctionPass { /// TLI - Keep a pointer of a TargetLowering to consult for determining @@ -61,6 +55,11 @@ namespace { /// check fails. BasicBlock *CreateFailBB(); + /// ContainsProtectableArray - Check whether the type either is an array or + /// contains an array of sufficient size so that we need stack protectors + /// for it. + bool ContainsProtectableArray(Type *Ty, bool InStruct = false) const; + /// RequiresStackProtector - Check whether or not this function needs a /// stack protector based upon the stack protector level. bool RequiresStackProtector() const; @@ -100,21 +99,50 @@ bool StackProtector::runOnFunction(Function &Fn) { return InsertStackProtectors(); } +/// ContainsProtectableArray - Check whether the type either is an array or +/// contains a char array of sufficient size so that we need stack protectors +/// for it. +bool StackProtector::ContainsProtectableArray(Type *Ty, bool InStruct) const { + if (!Ty) return false; + if (ArrayType *AT = dyn_cast(Ty)) { + const TargetMachine &TM = TLI->getTargetMachine(); + if (!AT->getElementType()->isIntegerTy(8)) { + Triple Trip(TM.getTargetTriple()); + + // If we're on a non-Darwin platform or we're inside of a structure, don't + // add stack protectors unless the array is a character array. + if (InStruct || !Trip.isOSDarwin()) + return false; + } + + // If an array has more than SSPBufferSize bytes of allocated space, then we + // emit stack protectors. + if (TM.Options.SSPBufferSize <= TLI->getDataLayout()->getTypeAllocSize(AT)) + return true; + } + + const StructType *ST = dyn_cast(Ty); + if (!ST) return false; + + for (StructType::element_iterator I = ST->element_begin(), + E = ST->element_end(); I != E; ++I) + if (ContainsProtectableArray(*I, true)) + return true; + + return false; +} + /// RequiresStackProtector - Check whether or not this function needs a stack /// protector based upon the stack protector level. The heuristic we use is to /// add a guard variable to functions that call alloca, and functions with /// buffers larger than SSPBufferSize bytes. bool StackProtector::RequiresStackProtector() const { - if (F->hasFnAttr(Attribute::StackProtectReq)) + if (F->getFnAttributes().hasAttribute(Attributes::StackProtectReq)) return true; - if (!F->hasFnAttr(Attribute::StackProtect)) + if (!F->getFnAttributes().hasAttribute(Attributes::StackProtect)) return false; - const TargetData *TD = TLI->getTargetData(); - const TargetMachine &TM = TLI->getTargetMachine(); - Triple Trip(TM.getTargetTriple()); - for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) { BasicBlock *BB = I; @@ -126,17 +154,8 @@ bool StackProtector::RequiresStackProtector() const { // protectors. return true; - if (ArrayType *AT = dyn_cast(AI->getAllocatedType())) { - // If we're on a non-Darwin platform, don't add stack protectors - // unless the array is a character array. - if (!Trip.isOSDarwin() && !AT->getElementType()->isIntegerTy(8)) - continue; - - // If an array has more than SSPBufferSize bytes of allocated space, - // then we emit stack protectors. - if (SSPBufferSize <= TD->getTypeAllocSize(AT)) - return true; - } + if (ContainsProtectableArray(AI->getAllocatedType())) + return true; } } diff --git a/lib/CodeGen/StackSlotColoring.cpp b/lib/CodeGen/StackSlotColoring.cpp index 20da36e..d349abc 100644 --- a/lib/CodeGen/StackSlotColoring.cpp +++ b/lib/CodeGen/StackSlotColoring.cpp @@ -11,8 +11,7 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "stackcoloring" -#include "llvm/Function.h" +#define DEBUG_TYPE "stackslotcoloring" #include "llvm/Module.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" @@ -391,8 +390,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) { bool StackSlotColoring::runOnMachineFunction(MachineFunction &MF) { DEBUG({ dbgs() << "********** Stack Slot Coloring **********\n" - << "********** Function: " - << MF.getFunction()->getName() << '\n'; + << "********** Function: " << MF.getName() << '\n'; }); MFI = MF.getFrameInfo(); diff --git a/lib/CodeGen/StrongPHIElimination.cpp b/lib/CodeGen/StrongPHIElimination.cpp index 5b06195..39fd600 100644 --- a/lib/CodeGen/StrongPHIElimination.cpp +++ b/lib/CodeGen/StrongPHIElimination.cpp @@ -404,9 +404,9 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &MF) { } void StrongPHIElimination::addReg(unsigned Reg) { - if (RegNodeMap.count(Reg)) - return; - RegNodeMap[Reg] = new (Allocator) Node(Reg); + Node *&N = RegNodeMap[Reg]; + if (!N) + N = new (Allocator) Node(Reg); } StrongPHIElimination::Node* @@ -714,8 +714,9 @@ void StrongPHIElimination::InsertCopiesForPHI(MachineInstr *PHI, assert(getRegColor(CopyReg) == CopyReg); } - if (!InsertedSrcCopyMap.count(std::make_pair(PredBB, PHIColor))) - InsertedSrcCopyMap[std::make_pair(PredBB, PHIColor)] = CopyInstr; + // Insert into map if not already there. + InsertedSrcCopyMap.insert(std::make_pair(std::make_pair(PredBB, PHIColor), + CopyInstr)); } SrcMO.setReg(CopyReg); diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp index a813fa6..1497d1b 100644 --- a/lib/CodeGen/TailDuplication.cpp +++ b/lib/CodeGen/TailDuplication.cpp @@ -552,7 +552,8 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF, // compensate for the duplication. unsigned MaxDuplicateCount; if (TailDuplicateSize.getNumOccurrences() == 0 && - MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize)) MaxDuplicateCount = 1; else MaxDuplicateCount = TailDuplicateSize; diff --git a/lib/CodeGen/TargetInstrInfoImpl.cpp b/lib/CodeGen/TargetInstrInfoImpl.cpp index ddee6b2..4439192 100644 --- a/lib/CodeGen/TargetInstrInfoImpl.cpp +++ b/lib/CodeGen/TargetInstrInfoImpl.cpp @@ -99,17 +99,8 @@ MachineInstr *TargetInstrInfoImpl::commuteInstruction(MachineInstr *MI, if (NewMI) { // Create a new instruction. - bool Reg0IsDead = HasDef ? MI->getOperand(0).isDead() : false; MachineFunction &MF = *MI->getParent()->getParent(); - if (HasDef) - return BuildMI(MF, MI->getDebugLoc(), MI->getDesc()) - .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead), SubReg0) - .addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2) - .addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1); - else - return BuildMI(MF, MI->getDebugLoc(), MI->getDesc()) - .addReg(Reg2, getKillRegState(Reg2IsKill), SubReg2) - .addReg(Reg1, getKillRegState(Reg1IsKill), SubReg1); + MI = MF.CloneMachineInstr(MI); } if (HasDef) { @@ -572,6 +563,8 @@ TargetInstrInfoImpl::getNumMicroOps(const InstrItineraryData *ItinData, /// Return the default expected latency for a def based on it's opcode. unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel, const MachineInstr *DefMI) const { + if (DefMI->isTransient()) + return 0; if (DefMI->mayLoad()) return SchedModel->LoadLatency; if (isHighLatencyDef(DefMI->getOpcode())) @@ -615,13 +608,13 @@ getOperandLatency(const InstrItineraryData *ItinData, /// If we can determine the operand latency from the def only, without itinerary /// lookup, do so. Otherwise return -1. -static int computeDefOperandLatency( - const TargetInstrInfo *TII, const InstrItineraryData *ItinData, - const MachineInstr *DefMI, bool FindMin) { +int TargetInstrInfo::computeDefOperandLatency( + const InstrItineraryData *ItinData, + const MachineInstr *DefMI, bool FindMin) const { // Let the target hook getInstrLatency handle missing itineraries. if (!ItinData) - return TII->getInstrLatency(ItinData, DefMI); + return getInstrLatency(ItinData, DefMI); // Return a latency based on the itinerary properties and defining instruction // if possible. Some common subtargets don't require per-operand latency, @@ -630,7 +623,7 @@ static int computeDefOperandLatency( // If MinLatency is valid, call getInstrLatency. This uses Stage latency if // it exists before defaulting to MinLatency. if (ItinData->SchedModel->MinLatency >= 0) - return TII->getInstrLatency(ItinData, DefMI); + return getInstrLatency(ItinData, DefMI); // If MinLatency is invalid, OperandLatency is interpreted as MinLatency. // For empty itineraries, short-cirtuit the check and default to one cycle. @@ -638,29 +631,42 @@ static int computeDefOperandLatency( return 1; } else if(ItinData->isEmpty()) - return TII->defaultDefLatency(ItinData->SchedModel, DefMI); + return defaultDefLatency(ItinData->SchedModel, DefMI); // ...operand lookup required return -1; } /// computeOperandLatency - Compute and return the latency of the given data -/// dependent def and use when the operand indices are already known. +/// dependent def and use when the operand indices are already known. UseMI may +/// be NULL for an unknown use. +/// +/// FindMin may be set to get the minimum vs. expected latency. Minimum +/// latency is used for scheduling groups, while expected latency is for +/// instruction cost and critical path. /// -/// FindMin may be set to get the minimum vs. expected latency. +/// Depending on the subtarget's itinerary properties, this may or may not need +/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or +/// UseIdx to compute min latency. unsigned TargetInstrInfo:: computeOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx, bool FindMin) const { - int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin); + int DefLatency = computeDefOperandLatency(ItinData, DefMI, FindMin); if (DefLatency >= 0) return DefLatency; assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail"); - int OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); + int OperLatency = 0; + if (UseMI) + OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); + else { + unsigned DefClass = DefMI->getDesc().getSchedClass(); + OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); + } if (OperLatency >= 0) return OperLatency; @@ -673,77 +679,3 @@ computeOperandLatency(const InstrItineraryData *ItinData, defaultDefLatency(ItinData->SchedModel, DefMI)); return InstrLatency; } - -/// computeOperandLatency - Compute and return the latency of the given data -/// dependent def and use. DefMI must be a valid def. UseMI may be NULL for an -/// unknown use. Depending on the subtarget's itinerary properties, this may or -/// may not need to call getOperandLatency(). -/// -/// FindMin may be set to get the minimum vs. expected latency. Minimum -/// latency is used for scheduling groups, while expected latency is for -/// instruction cost and critical path. -/// -/// For most subtargets, we don't need DefIdx or UseIdx to compute min latency. -/// DefMI must be a valid definition, but UseMI may be NULL for an unknown use. -unsigned TargetInstrInfo:: -computeOperandLatency(const InstrItineraryData *ItinData, - const TargetRegisterInfo *TRI, - const MachineInstr *DefMI, const MachineInstr *UseMI, - unsigned Reg, bool FindMin) const { - - int DefLatency = computeDefOperandLatency(this, ItinData, DefMI, FindMin); - if (DefLatency >= 0) - return DefLatency; - - assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail"); - - // Find the definition of the register in the defining instruction. - int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); - if (DefIdx != -1) { - const MachineOperand &MO = DefMI->getOperand(DefIdx); - if (MO.isReg() && MO.isImplicit() && - DefIdx >= (int)DefMI->getDesc().getNumOperands()) { - // This is an implicit def, getOperandLatency() won't return the correct - // latency. e.g. - // %D6, %D7 = VLD1q16 %R2, 0, ..., %Q3 - // %Q1 = VMULv8i16 %Q1, %Q3, ... - // What we want is to compute latency between def of %D6/%D7 and use of - // %Q3 instead. - unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); - if (DefMI->getOperand(Op2).isReg()) - DefIdx = Op2; - } - // For all uses of the register, calculate the maxmimum latency - int OperLatency = -1; - - // UseMI is null, then it must be a scheduling barrier. - if (!UseMI) { - unsigned DefClass = DefMI->getDesc().getSchedClass(); - OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); - } - else { - for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = UseMI->getOperand(i); - if (!MO.isReg() || !MO.isUse()) - continue; - unsigned MOReg = MO.getReg(); - if (MOReg != Reg) - continue; - - int UseCycle = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, i); - OperLatency = std::max(OperLatency, UseCycle); - } - } - // If we found an operand latency, we're done. - if (OperLatency >= 0) - return OperLatency; - } - // No operand latency was found. - unsigned InstrLatency = getInstrLatency(ItinData, DefMI); - - // Expected latency is the max of the stage latency and itinerary props. - if (!FindMin) - InstrLatency = std::max(InstrLatency, - defaultDefLatency(ItinData->SchedModel, DefMI)); - return InstrLatency; -} diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp index 2a2fa9e..8f5d770 100644 --- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp +++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp @@ -27,7 +27,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/Dwarf.h" @@ -77,9 +77,9 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer, Flags, SectionKind::getDataRel(), 0, Label->getName()); - unsigned Size = TM.getTargetData()->getPointerSize(); + unsigned Size = TM.getDataLayout()->getPointerSize(); Streamer.SwitchSection(Sec); - Streamer.EmitValueToAlignment(TM.getTargetData()->getPointerABIAlignment()); + Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment()); Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject); const MCExpr *E = MCConstantExpr::Create(Size, getContext()); Streamer.EmitELFSize(Label, E); @@ -247,7 +247,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, // FIXME: this is getting the alignment of the character, not the // alignment of the global! unsigned Align = - TM.getTargetData()->getPreferredAlignment(cast(GV)); + TM.getDataLayout()->getPreferredAlignment(cast(GV)); const char *SizeSpec = ".rodata.str1."; if (Kind.isMergeable2ByteCString()) @@ -522,14 +522,14 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, // FIXME: Alignment check should be handled by section classifier. if (Kind.isMergeable1ByteCString() && - TM.getTargetData()->getPreferredAlignment(cast(GV)) < 32) + TM.getDataLayout()->getPreferredAlignment(cast(GV)) < 32) return CStringSection; // Do not put 16-bit arrays in the UString section if they have an // externally visible label, this runs into issues with certain linker // versions. if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() && - TM.getTargetData()->getPreferredAlignment(cast(GV)) < 32) + TM.getDataLayout()->getPreferredAlignment(cast(GV)) < 32) return UStringSection; if (Kind.isMergeableConst()) { diff --git a/lib/CodeGen/TargetSchedule.cpp b/lib/CodeGen/TargetSchedule.cpp new file mode 100644 index 0000000..ca3b0e0 --- /dev/null +++ b/lib/CodeGen/TargetSchedule.cpp @@ -0,0 +1,306 @@ +//===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements a wrapper around MCSchedModel that allows the interface +// to benefit from information currently only available in TargetInstrInfo. +// +//===----------------------------------------------------------------------===// + +#include "llvm/CodeGen/TargetSchedule.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +static cl::opt EnableSchedModel("schedmodel", cl::Hidden, cl::init(true), + cl::desc("Use TargetSchedModel for latency lookup")); + +static cl::opt EnableSchedItins("scheditins", cl::Hidden, cl::init(true), + cl::desc("Use InstrItineraryData for latency lookup")); + +bool TargetSchedModel::hasInstrSchedModel() const { + return EnableSchedModel && SchedModel.hasInstrSchedModel(); +} + +bool TargetSchedModel::hasInstrItineraries() const { + return EnableSchedItins && !InstrItins.isEmpty(); +} + +static unsigned gcd(unsigned Dividend, unsigned Divisor) { + // Dividend and Divisor will be naturally swapped as needed. + while(Divisor) { + unsigned Rem = Dividend % Divisor; + Dividend = Divisor; + Divisor = Rem; + }; + return Dividend; +} +static unsigned lcm(unsigned A, unsigned B) { + unsigned LCM = (uint64_t(A) * B) / gcd(A, B); + assert((LCM >= A && LCM >= B) && "LCM overflow"); + return LCM; +} + +void TargetSchedModel::init(const MCSchedModel &sm, + const TargetSubtargetInfo *sti, + const TargetInstrInfo *tii) { + SchedModel = sm; + STI = sti; + TII = tii; + STI->initInstrItins(InstrItins); + + unsigned NumRes = SchedModel.getNumProcResourceKinds(); + ResourceFactors.resize(NumRes); + ResourceLCM = SchedModel.IssueWidth; + for (unsigned Idx = 0; Idx < NumRes; ++Idx) { + unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits; + if (NumUnits > 0) + ResourceLCM = lcm(ResourceLCM, NumUnits); + } + MicroOpFactor = ResourceLCM / SchedModel.IssueWidth; + for (unsigned Idx = 0; Idx < NumRes; ++Idx) { + unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits; + ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0; + } +} + +unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI, + const MCSchedClassDesc *SC) const { + if (hasInstrItineraries()) { + int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass()); + return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI); + } + if (hasInstrSchedModel()) { + if (!SC) + SC = resolveSchedClass(MI); + if (SC->isValid()) + return SC->NumMicroOps; + } + return MI->isTransient() ? 0 : 1; +} + +// The machine model may explicitly specify an invalid latency, which +// effectively means infinite latency. Since users of the TargetSchedule API +// don't know how to handle this, we convert it to a very large latency that is +// easy to distinguish when debugging the DAG but won't induce overflow. +static unsigned convertLatency(int Cycles) { + return Cycles >= 0 ? Cycles : 1000; +} + +/// If we can determine the operand latency from the def only, without machine +/// model or itinerary lookup, do so. Otherwise return -1. +int TargetSchedModel::getDefLatency(const MachineInstr *DefMI, + bool FindMin) const { + + // Return a latency based on the itinerary properties and defining instruction + // if possible. Some common subtargets don't require per-operand latency, + // especially for minimum latencies. + if (FindMin) { + // If MinLatency is invalid, then use the itinerary for MinLatency. If no + // itinerary exists either, then use single cycle latency. + if (SchedModel.MinLatency < 0 && !hasInstrItineraries()) { + return 1; + } + return SchedModel.MinLatency; + } + else if (!hasInstrSchedModel() && !hasInstrItineraries()) { + return TII->defaultDefLatency(&SchedModel, DefMI); + } + // ...operand lookup required + return -1; +} + +/// Return the MCSchedClassDesc for this instruction. Some SchedClasses require +/// evaluation of predicates that depend on instruction operands or flags. +const MCSchedClassDesc *TargetSchedModel:: +resolveSchedClass(const MachineInstr *MI) const { + + // Get the definition's scheduling class descriptor from this machine model. + unsigned SchedClass = MI->getDesc().getSchedClass(); + const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass); + +#ifndef NDEBUG + unsigned NIter = 0; +#endif + while (SCDesc->isVariant()) { + assert(++NIter < 6 && "Variants are nested deeper than the magic number"); + + SchedClass = STI->resolveSchedClass(SchedClass, MI, this); + SCDesc = SchedModel.getSchedClassDesc(SchedClass); + } + return SCDesc; +} + +/// Find the def index of this operand. This index maps to the machine model and +/// is independent of use operands. Def operands may be reordered with uses or +/// merged with uses without affecting the def index (e.g. before/after +/// regalloc). However, an instruction's def operands must never be reordered +/// with respect to each other. +static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) { + unsigned DefIdx = 0; + for (unsigned i = 0; i != DefOperIdx; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (MO.isReg() && MO.isDef()) + ++DefIdx; + } + return DefIdx; +} + +/// Find the use index of this operand. This is independent of the instruction's +/// def operands. +/// +/// Note that uses are not determined by the operand's isUse property, which +/// is simply the inverse of isDef. Here we consider any readsReg operand to be +/// a "use". The machine model allows an operand to be both a Def and Use. +static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) { + unsigned UseIdx = 0; + for (unsigned i = 0; i != UseOperIdx; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (MO.isReg() && MO.readsReg()) + ++UseIdx; + } + return UseIdx; +} + +// Top-level API for clients that know the operand indices. +unsigned TargetSchedModel::computeOperandLatency( + const MachineInstr *DefMI, unsigned DefOperIdx, + const MachineInstr *UseMI, unsigned UseOperIdx, + bool FindMin) const { + + int DefLatency = getDefLatency(DefMI, FindMin); + if (DefLatency >= 0) + return DefLatency; + + if (hasInstrItineraries()) { + int OperLatency = 0; + if (UseMI) { + OperLatency = + TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx); + } + else { + unsigned DefClass = DefMI->getDesc().getSchedClass(); + OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx); + } + if (OperLatency >= 0) + return OperLatency; + + // No operand latency was found. + unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI); + + // Expected latency is the max of the stage latency and itinerary props. + // Rather than directly querying InstrItins stage latency, we call a TII + // hook to allow subtargets to specialize latency. This hook is only + // applicable to the InstrItins model. InstrSchedModel should model all + // special cases without TII hooks. + if (!FindMin) + InstrLatency = std::max(InstrLatency, + TII->defaultDefLatency(&SchedModel, DefMI)); + return InstrLatency; + } + assert(!FindMin && hasInstrSchedModel() && + "Expected a SchedModel for this cpu"); + const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI); + unsigned DefIdx = findDefIdx(DefMI, DefOperIdx); + if (DefIdx < SCDesc->NumWriteLatencyEntries) { + // Lookup the definition's write latency in SubtargetInfo. + const MCWriteLatencyEntry *WLEntry = + STI->getWriteLatencyEntry(SCDesc, DefIdx); + unsigned WriteID = WLEntry->WriteResourceID; + unsigned Latency = convertLatency(WLEntry->Cycles); + if (!UseMI) + return Latency; + + // Lookup the use's latency adjustment in SubtargetInfo. + const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI); + if (UseDesc->NumReadAdvanceEntries == 0) + return Latency; + unsigned UseIdx = findUseIdx(UseMI, UseOperIdx); + return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID); + } + // If DefIdx does not exist in the model (e.g. implicit defs), then return + // unit latency (defaultDefLatency may be too conservative). +#ifndef NDEBUG + if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit() + && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) { + std::string Err; + raw_string_ostream ss(Err); + ss << "DefIdx " << DefIdx << " exceeds machine model writes for " + << *DefMI; + report_fatal_error(ss.str()); + } +#endif + return DefMI->isTransient() ? 0 : 1; +} + +unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const { + // For the itinerary model, fall back to the old subtarget hook. + // Allow subtargets to compute Bundle latencies outside the machine model. + if (hasInstrItineraries() || MI->isBundle()) + return TII->getInstrLatency(&InstrItins, MI); + + if (hasInstrSchedModel()) { + const MCSchedClassDesc *SCDesc = resolveSchedClass(MI); + if (SCDesc->isValid()) { + unsigned Latency = 0; + for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries; + DefIdx != DefEnd; ++DefIdx) { + // Lookup the definition's write latency in SubtargetInfo. + const MCWriteLatencyEntry *WLEntry = + STI->getWriteLatencyEntry(SCDesc, DefIdx); + Latency = std::max(Latency, convertLatency(WLEntry->Cycles)); + } + return Latency; + } + } + return TII->defaultDefLatency(&SchedModel, MI); +} + +unsigned TargetSchedModel:: +computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx, + const MachineInstr *DepMI) const { + // MinLatency == -1 is for in-order processors that always have unit + // MinLatency. MinLatency > 0 is for in-order processors with varying min + // latencies, but since this is not a RAW dep, we always use unit latency. + if (SchedModel.MinLatency != 0) + return 1; + + // MinLatency == 0 indicates an out-of-order processor that can dispatch + // WAW dependencies in the same cycle. + + // Treat predication as a data dependency for out-of-order cpus. In-order + // cpus do not need to treat predicated writes specially. + // + // TODO: The following hack exists because predication passes do not + // correctly append imp-use operands, and readsReg() strangely returns false + // for predicated defs. + unsigned Reg = DefMI->getOperand(DefOperIdx).getReg(); + const MachineFunction &MF = *DefMI->getParent()->getParent(); + const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); + if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI)) + return computeInstrLatency(DefMI); + + // If we have a per operand scheduling model, check if this def is writing + // an unbuffered resource. If so, it treated like an in-order cpu. + if (hasInstrSchedModel()) { + const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI); + if (SCDesc->isValid()) { + for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc), + *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) { + if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->IsBuffered) + return 1; + } + } + } + return 0; +} diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index aa601af..a9058bc 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -60,116 +60,108 @@ STATISTIC(NumReSchedUps, "Number of instructions re-scheduled up"); STATISTIC(NumReSchedDowns, "Number of instructions re-scheduled down"); namespace { - class TwoAddressInstructionPass : public MachineFunctionPass { - MachineFunction *MF; - const TargetInstrInfo *TII; - const TargetRegisterInfo *TRI; - const InstrItineraryData *InstrItins; - MachineRegisterInfo *MRI; - LiveVariables *LV; - SlotIndexes *Indexes; - LiveIntervals *LIS; - AliasAnalysis *AA; - CodeGenOpt::Level OptLevel; - - // DistanceMap - Keep track the distance of a MI from the start of the - // current basic block. - DenseMap DistanceMap; - - // SrcRegMap - A map from virtual registers to physical registers which - // are likely targets to be coalesced to due to copies from physical - // registers to virtual registers. e.g. v1024 = move r0. - DenseMap SrcRegMap; - - // DstRegMap - A map from virtual registers to physical registers which - // are likely targets to be coalesced to due to copies to physical - // registers from virtual registers. e.g. r1 = move v1024. - DenseMap DstRegMap; - - /// RegSequences - Keep track the list of REG_SEQUENCE instructions seen - /// during the initial walk of the machine function. - SmallVector RegSequences; - - bool Sink3AddrInstruction(MachineBasicBlock *MBB, MachineInstr *MI, - unsigned Reg, - MachineBasicBlock::iterator OldPos); - - bool NoUseAfterLastDef(unsigned Reg, MachineBasicBlock *MBB, unsigned Dist, - unsigned &LastDef); - - bool isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC, - MachineInstr *MI, MachineBasicBlock *MBB, - unsigned Dist); +class TwoAddressInstructionPass : public MachineFunctionPass { + MachineFunction *MF; + const TargetInstrInfo *TII; + const TargetRegisterInfo *TRI; + const InstrItineraryData *InstrItins; + MachineRegisterInfo *MRI; + LiveVariables *LV; + SlotIndexes *Indexes; + LiveIntervals *LIS; + AliasAnalysis *AA; + CodeGenOpt::Level OptLevel; + + // The current basic block being processed. + MachineBasicBlock *MBB; + + // DistanceMap - Keep track the distance of a MI from the start of the + // current basic block. + DenseMap DistanceMap; + + // Set of already processed instructions in the current block. + SmallPtrSet Processed; - bool CommuteInstruction(MachineBasicBlock::iterator &mi, - MachineFunction::iterator &mbbi, - unsigned RegB, unsigned RegC, unsigned Dist); + // SrcRegMap - A map from virtual registers to physical registers which are + // likely targets to be coalesced to due to copies from physical registers to + // virtual registers. e.g. v1024 = move r0. + DenseMap SrcRegMap; - bool isProfitableToConv3Addr(unsigned RegA, unsigned RegB); + // DstRegMap - A map from virtual registers to physical registers which are + // likely targets to be coalesced to due to copies to physical registers from + // virtual registers. e.g. r1 = move v1024. + DenseMap DstRegMap; - bool ConvertInstTo3Addr(MachineBasicBlock::iterator &mi, - MachineBasicBlock::iterator &nmi, - MachineFunction::iterator &mbbi, - unsigned RegA, unsigned RegB, unsigned Dist); + /// RegSequences - Keep track the list of REG_SEQUENCE instructions seen + /// during the initial walk of the machine function. + SmallVector RegSequences; - bool isDefTooClose(unsigned Reg, unsigned Dist, - MachineInstr *MI, MachineBasicBlock *MBB); + bool sink3AddrInstruction(MachineInstr *MI, unsigned Reg, + MachineBasicBlock::iterator OldPos); - bool RescheduleMIBelowKill(MachineBasicBlock *MBB, - MachineBasicBlock::iterator &mi, - MachineBasicBlock::iterator &nmi, - unsigned Reg); - bool RescheduleKillAboveMI(MachineBasicBlock *MBB, - MachineBasicBlock::iterator &mi, - MachineBasicBlock::iterator &nmi, - unsigned Reg); + bool noUseAfterLastDef(unsigned Reg, unsigned Dist, unsigned &LastDef); - bool TryInstructionTransform(MachineBasicBlock::iterator &mi, - MachineBasicBlock::iterator &nmi, - MachineFunction::iterator &mbbi, - unsigned SrcIdx, unsigned DstIdx, - unsigned Dist, - SmallPtrSet &Processed); + bool isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC, + MachineInstr *MI, unsigned Dist); - void ScanUses(unsigned DstReg, MachineBasicBlock *MBB, - SmallPtrSet &Processed); + bool commuteInstruction(MachineBasicBlock::iterator &mi, + unsigned RegB, unsigned RegC, unsigned Dist); - void ProcessCopy(MachineInstr *MI, MachineBasicBlock *MBB, - SmallPtrSet &Processed); + bool isProfitableToConv3Addr(unsigned RegA, unsigned RegB); - typedef SmallVector, 4> TiedPairList; - typedef SmallDenseMap TiedOperandMap; - bool collectTiedOperands(MachineInstr *MI, TiedOperandMap&); - void processTiedPairs(MachineInstr *MI, TiedPairList&, unsigned &Dist); + bool convertInstTo3Addr(MachineBasicBlock::iterator &mi, + MachineBasicBlock::iterator &nmi, + unsigned RegA, unsigned RegB, unsigned Dist); - void CoalesceExtSubRegs(SmallVector &Srcs, unsigned DstReg); + bool isDefTooClose(unsigned Reg, unsigned Dist, MachineInstr *MI); - /// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part - /// of the de-ssa process. This replaces sources of REG_SEQUENCE as - /// sub-register references of the register defined by REG_SEQUENCE. - bool EliminateRegSequences(); + bool rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, + MachineBasicBlock::iterator &nmi, + unsigned Reg); + bool rescheduleKillAboveMI(MachineBasicBlock::iterator &mi, + MachineBasicBlock::iterator &nmi, + unsigned Reg); - public: - static char ID; // Pass identification, replacement for typeid - TwoAddressInstructionPass() : MachineFunctionPass(ID) { - initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry()); - } + bool tryInstructionTransform(MachineBasicBlock::iterator &mi, + MachineBasicBlock::iterator &nmi, + unsigned SrcIdx, unsigned DstIdx, + unsigned Dist); - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesCFG(); - AU.addRequired(); - AU.addPreserved(); - AU.addPreserved(); - AU.addPreserved(); - AU.addPreservedID(MachineLoopInfoID); - AU.addPreservedID(MachineDominatorsID); - MachineFunctionPass::getAnalysisUsage(AU); - } + void scanUses(unsigned DstReg); - /// runOnMachineFunction - Pass entry point. - bool runOnMachineFunction(MachineFunction&); - }; -} + void processCopy(MachineInstr *MI); + + typedef SmallVector, 4> TiedPairList; + typedef SmallDenseMap TiedOperandMap; + bool collectTiedOperands(MachineInstr *MI, TiedOperandMap&); + void processTiedPairs(MachineInstr *MI, TiedPairList&, unsigned &Dist); + + /// eliminateRegSequences - Eliminate REG_SEQUENCE instructions as part of + /// the de-ssa process. This replaces sources of REG_SEQUENCE as sub-register + /// references of the register defined by REG_SEQUENCE. + bool eliminateRegSequences(); + +public: + static char ID; // Pass identification, replacement for typeid + TwoAddressInstructionPass() : MachineFunctionPass(ID) { + initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry()); + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + AU.addRequired(); + AU.addPreserved(); + AU.addPreserved(); + AU.addPreserved(); + AU.addPreservedID(MachineLoopInfoID); + AU.addPreservedID(MachineDominatorsID); + MachineFunctionPass::getAnalysisUsage(AU); + } + + /// runOnMachineFunction - Pass entry point. + bool runOnMachineFunction(MachineFunction&); +}; +} // end anonymous namespace char TwoAddressInstructionPass::ID = 0; INITIALIZE_PASS_BEGIN(TwoAddressInstructionPass, "twoaddressinstruction", @@ -180,13 +172,13 @@ INITIALIZE_PASS_END(TwoAddressInstructionPass, "twoaddressinstruction", char &llvm::TwoAddressInstructionPassID = TwoAddressInstructionPass::ID; -/// Sink3AddrInstruction - A two-address instruction has been converted to a +/// sink3AddrInstruction - A two-address instruction has been converted to a /// three-address instruction to avoid clobbering a register. Try to sink it /// past the instruction that would kill the above mentioned register to reduce /// register pressure. -bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB, - MachineInstr *MI, unsigned SavedReg, - MachineBasicBlock::iterator OldPos) { +bool TwoAddressInstructionPass:: +sink3AddrInstruction(MachineInstr *MI, unsigned SavedReg, + MachineBasicBlock::iterator OldPos) { // FIXME: Shouldn't we be trying to do this before we three-addressify the // instruction? After this transformation is done, we no longer need // the instruction to be in three-address form. @@ -299,13 +291,12 @@ bool TwoAddressInstructionPass::Sink3AddrInstruction(MachineBasicBlock *MBB, return true; } -/// NoUseAfterLastDef - Return true if there are no intervening uses between the +/// noUseAfterLastDef - Return true if there are no intervening uses between the /// last instruction in the MBB that defines the specified register and the /// two-address instruction which is being processed. It also returns the last /// def location by reference -bool TwoAddressInstructionPass::NoUseAfterLastDef(unsigned Reg, - MachineBasicBlock *MBB, unsigned Dist, - unsigned &LastDef) { +bool TwoAddressInstructionPass::noUseAfterLastDef(unsigned Reg, unsigned Dist, + unsigned &LastDef) { LastDef = 0; unsigned LastUse = Dist; for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(Reg), @@ -465,10 +456,9 @@ regsAreCompatible(unsigned RegA, unsigned RegB, const TargetRegisterInfo *TRI) { /// isProfitableToCommute - Return true if it's potentially profitable to commute /// the two-address instruction that's being processed. bool -TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB, - unsigned regC, - MachineInstr *MI, MachineBasicBlock *MBB, - unsigned Dist) { +TwoAddressInstructionPass:: +isProfitableToCommute(unsigned regA, unsigned regB, unsigned regC, + MachineInstr *MI, unsigned Dist) { if (OptLevel == CodeGenOpt::None) return false; @@ -516,13 +506,13 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB, // If there is a use of regC between its last def (could be livein) and this // instruction, then bail. unsigned LastDefC = 0; - if (!NoUseAfterLastDef(regC, MBB, Dist, LastDefC)) + if (!noUseAfterLastDef(regC, Dist, LastDefC)) return false; // If there is a use of regB between its last def (could be livein) and this // instruction, then go ahead and make this transformation. unsigned LastDefB = 0; - if (!NoUseAfterLastDef(regB, MBB, Dist, LastDefB)) + if (!noUseAfterLastDef(regB, Dist, LastDefB)) return true; // Since there are no intervening uses for both registers, then commute @@ -530,13 +520,12 @@ TwoAddressInstructionPass::isProfitableToCommute(unsigned regA, unsigned regB, return LastDefB && LastDefC && LastDefC > LastDefB; } -/// CommuteInstruction - Commute a two-address instruction and update the basic +/// commuteInstruction - Commute a two-address instruction and update the basic /// block, distance map, and live variables if needed. Return true if it is /// successful. -bool -TwoAddressInstructionPass::CommuteInstruction(MachineBasicBlock::iterator &mi, - MachineFunction::iterator &mbbi, - unsigned RegB, unsigned RegC, unsigned Dist) { +bool TwoAddressInstructionPass:: +commuteInstruction(MachineBasicBlock::iterator &mi, + unsigned RegB, unsigned RegC, unsigned Dist) { MachineInstr *MI = mi; DEBUG(dbgs() << "2addr: COMMUTING : " << *MI); MachineInstr *NewMI = TII->commuteInstruction(MI); @@ -555,8 +544,8 @@ TwoAddressInstructionPass::CommuteInstruction(MachineBasicBlock::iterator &mi, if (Indexes) Indexes->replaceMachineInstrInMaps(MI, NewMI); - mbbi->insert(mi, NewMI); // Insert the new inst - mbbi->erase(mi); // Nuke the old inst. + MBB->insert(mi, NewMI); // Insert the new inst + MBB->erase(mi); // Nuke the old inst. mi = NewMI; DistanceMap.insert(std::make_pair(NewMI, Dist)); } @@ -588,51 +577,51 @@ TwoAddressInstructionPass::isProfitableToConv3Addr(unsigned RegA,unsigned RegB){ return (ToRegA && !regsAreCompatible(FromRegB, ToRegA, TRI)); } -/// ConvertInstTo3Addr - Convert the specified two-address instruction into a +/// convertInstTo3Addr - Convert the specified two-address instruction into a /// three address one. Return true if this transformation was successful. bool -TwoAddressInstructionPass::ConvertInstTo3Addr(MachineBasicBlock::iterator &mi, +TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi, MachineBasicBlock::iterator &nmi, - MachineFunction::iterator &mbbi, unsigned RegA, unsigned RegB, unsigned Dist) { - MachineInstr *NewMI = TII->convertToThreeAddress(mbbi, mi, LV); - if (NewMI) { - DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi); - DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI); - bool Sunk = false; + // FIXME: Why does convertToThreeAddress() need an iterator reference? + MachineFunction::iterator MFI = MBB; + MachineInstr *NewMI = TII->convertToThreeAddress(MFI, mi, LV); + assert(MBB == MFI && "convertToThreeAddress changed iterator reference"); + if (!NewMI) + return false; - if (Indexes) - Indexes->replaceMachineInstrInMaps(mi, NewMI); + DEBUG(dbgs() << "2addr: CONVERTING 2-ADDR: " << *mi); + DEBUG(dbgs() << "2addr: TO 3-ADDR: " << *NewMI); + bool Sunk = false; - if (NewMI->findRegisterUseOperand(RegB, false, TRI)) - // FIXME: Temporary workaround. If the new instruction doesn't - // uses RegB, convertToThreeAddress must have created more - // then one instruction. - Sunk = Sink3AddrInstruction(mbbi, NewMI, RegB, mi); + if (Indexes) + Indexes->replaceMachineInstrInMaps(mi, NewMI); - mbbi->erase(mi); // Nuke the old inst. + if (NewMI->findRegisterUseOperand(RegB, false, TRI)) + // FIXME: Temporary workaround. If the new instruction doesn't + // uses RegB, convertToThreeAddress must have created more + // then one instruction. + Sunk = sink3AddrInstruction(NewMI, RegB, mi); - if (!Sunk) { - DistanceMap.insert(std::make_pair(NewMI, Dist)); - mi = NewMI; - nmi = llvm::next(mi); - } + MBB->erase(mi); // Nuke the old inst. - // Update source and destination register maps. - SrcRegMap.erase(RegA); - DstRegMap.erase(RegB); - return true; + if (!Sunk) { + DistanceMap.insert(std::make_pair(NewMI, Dist)); + mi = NewMI; + nmi = llvm::next(mi); } - return false; + // Update source and destination register maps. + SrcRegMap.erase(RegA); + DstRegMap.erase(RegB); + return true; } -/// ScanUses - Scan forward recursively for only uses, update maps if the use +/// scanUses - Scan forward recursively for only uses, update maps if the use /// is a copy or a two-address instruction. void -TwoAddressInstructionPass::ScanUses(unsigned DstReg, MachineBasicBlock *MBB, - SmallPtrSet &Processed) { +TwoAddressInstructionPass::scanUses(unsigned DstReg) { SmallVector VirtRegPairs; bool IsDstPhys; bool IsCopy = false; @@ -676,7 +665,7 @@ TwoAddressInstructionPass::ScanUses(unsigned DstReg, MachineBasicBlock *MBB, } } -/// ProcessCopy - If the specified instruction is not yet processed, process it +/// processCopy - If the specified instruction is not yet processed, process it /// if it's a copy. For a copy instruction, we find the physical registers the /// source and destination registers might be mapped to. These are kept in /// point-to maps used to determine future optimizations. e.g. @@ -688,9 +677,7 @@ TwoAddressInstructionPass::ScanUses(unsigned DstReg, MachineBasicBlock *MBB, /// coalesced to r0 (from the input side). v1025 is mapped to r1. v1026 is /// potentially joined with r1 on the output side. It's worthwhile to commute /// 'add' to eliminate a copy. -void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI, - MachineBasicBlock *MBB, - SmallPtrSet &Processed) { +void TwoAddressInstructionPass::processCopy(MachineInstr *MI) { if (Processed.count(MI)) return; @@ -707,21 +694,20 @@ void TwoAddressInstructionPass::ProcessCopy(MachineInstr *MI, assert(SrcRegMap[DstReg] == SrcReg && "Can't map to two src physical registers!"); - ScanUses(DstReg, MBB, Processed); + scanUses(DstReg); } Processed.insert(MI); return; } -/// RescheduleMIBelowKill - If there is one more local instruction that reads +/// rescheduleMIBelowKill - If there is one more local instruction that reads /// 'Reg' and it kills 'Reg, consider moving the instruction below the kill /// instruction in order to eliminate the need for the copy. -bool -TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB, - MachineBasicBlock::iterator &mi, - MachineBasicBlock::iterator &nmi, - unsigned Reg) { +bool TwoAddressInstructionPass:: +rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, + MachineBasicBlock::iterator &nmi, + unsigned Reg) { // Bail immediately if we don't have LV available. We use it to find kills // efficiently. if (!LV) @@ -853,8 +839,7 @@ TwoAddressInstructionPass::RescheduleMIBelowKill(MachineBasicBlock *MBB, /// isDefTooClose - Return true if the re-scheduling will put the given /// instruction too close to the defs of its register dependencies. bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist, - MachineInstr *MI, - MachineBasicBlock *MBB) { + MachineInstr *MI) { for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(Reg), DE = MRI->def_end(); DI != DE; ++DI) { MachineInstr *DefMI = &*DI; @@ -873,15 +858,14 @@ bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist, return false; } -/// RescheduleKillAboveMI - If there is one more local instruction that reads +/// rescheduleKillAboveMI - If there is one more local instruction that reads /// 'Reg' and it kills 'Reg, consider moving the kill instruction above the /// current two-address instruction in order to eliminate the need for the /// copy. -bool -TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB, - MachineBasicBlock::iterator &mi, - MachineBasicBlock::iterator &nmi, - unsigned Reg) { +bool TwoAddressInstructionPass:: +rescheduleKillAboveMI(MachineBasicBlock::iterator &mi, + MachineBasicBlock::iterator &nmi, + unsigned Reg) { // Bail immediately if we don't have LV available. We use it to find kills // efficiently. if (!LV) @@ -918,7 +902,7 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB, if (MO.isUse()) { if (!MOReg) continue; - if (isDefTooClose(MOReg, DI->second, MI, MBB)) + if (isDefTooClose(MOReg, DI->second, MI)) return false; if (MOReg == Reg && !MO.isKill()) return false; @@ -1006,18 +990,16 @@ TwoAddressInstructionPass::RescheduleKillAboveMI(MachineBasicBlock *MBB, return true; } -/// TryInstructionTransform - For the case where an instruction has a single +/// tryInstructionTransform - For the case where an instruction has a single /// pair of tied register operands, attempt some transformations that may /// either eliminate the tied operands or improve the opportunities for /// coalescing away the register copy. Returns true if no copy needs to be /// inserted to untie mi's operands (either because they were untied, or /// because mi was rescheduled, and will be visited again later). bool TwoAddressInstructionPass:: -TryInstructionTransform(MachineBasicBlock::iterator &mi, +tryInstructionTransform(MachineBasicBlock::iterator &mi, MachineBasicBlock::iterator &nmi, - MachineFunction::iterator &mbbi, - unsigned SrcIdx, unsigned DstIdx, unsigned Dist, - SmallPtrSet &Processed) { + unsigned SrcIdx, unsigned DstIdx, unsigned Dist) { if (OptLevel == CodeGenOpt::None) return false; @@ -1030,7 +1012,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, bool regBKilled = isKilled(MI, regB, MRI, TII); if (TargetRegisterInfo::isVirtualRegister(regA)) - ScanUses(regA, &*mbbi, Processed); + scanUses(regA); // Check if it is profitable to commute the operands. unsigned SrcOp1, SrcOp2; @@ -1051,7 +1033,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, // If C dies but B does not, swap the B and C operands. // This makes the live ranges of A and C joinable. TryCommute = true; - else if (isProfitableToCommute(regA, regB, regC, &MI, mbbi, Dist)) { + else if (isProfitableToCommute(regA, regB, regC, &MI, Dist)) { TryCommute = true; AggressiveCommute = true; } @@ -1059,7 +1041,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, } // If it's profitable to commute, try to do so. - if (TryCommute && CommuteInstruction(mi, mbbi, regB, regC, Dist)) { + if (TryCommute && commuteInstruction(mi, regB, regC, Dist)) { ++NumCommuted; if (AggressiveCommute) ++NumAggrCommuted; @@ -1068,7 +1050,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, // If there is one more use of regB later in the same MBB, consider // re-schedule this MI below it. - if (RescheduleMIBelowKill(mbbi, mi, nmi, regB)) { + if (rescheduleMIBelowKill(mi, nmi, regB)) { ++NumReSchedDowns; return true; } @@ -1078,7 +1060,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, // three-address instruction. Check if it is profitable. if (!regBKilled || isProfitableToConv3Addr(regA, regB)) { // Try to convert it. - if (ConvertInstTo3Addr(mi, nmi, mbbi, regA, regB, Dist)) { + if (convertInstTo3Addr(mi, nmi, regA, regB, Dist)) { ++NumConvertedTo3Addr; return true; // Done with this instruction. } @@ -1087,7 +1069,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, // If there is one more use of regB later in the same MBB, consider // re-schedule it before this MI if it's legal. - if (RescheduleKillAboveMI(mbbi, mi, nmi, regB)) { + if (rescheduleKillAboveMI(mi, nmi, regB)) { ++NumReSchedUps; return true; } @@ -1131,8 +1113,8 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, // Tentatively insert the instructions into the block so that they // look "normal" to the transformation logic. - mbbi->insert(mi, NewMIs[0]); - mbbi->insert(mi, NewMIs[1]); + MBB->insert(mi, NewMIs[0]); + MBB->insert(mi, NewMIs[1]); DEBUG(dbgs() << "2addr: NEW LOAD: " << *NewMIs[0] << "2addr: NEW INST: " << *NewMIs[1]); @@ -1142,8 +1124,7 @@ TryInstructionTransform(MachineBasicBlock::iterator &mi, unsigned NewSrcIdx = NewMIs[1]->findRegisterUseOperandIdx(regB); MachineBasicBlock::iterator NewMI = NewMIs[1]; bool TransformSuccess = - TryInstructionTransform(NewMI, mi, mbbi, - NewSrcIdx, NewDstIdx, Dist, Processed); + tryInstructionTransform(NewMI, mi, NewSrcIdx, NewDstIdx, Dist); if (TransformSuccess || NewMIs[1]->getOperand(NewSrcIdx).isKill()) { // Success, or at least we made an improvement. Keep the unfolded @@ -1202,8 +1183,7 @@ bool TwoAddressInstructionPass:: collectTiedOperands(MachineInstr *MI, TiedOperandMap &TiedOperands) { const MCInstrDesc &MCID = MI->getDesc(); bool AnyOps = false; - unsigned NumOps = MI->isInlineAsm() ? - MI->getNumOperands() : MCID.getNumOperands(); + unsigned NumOps = MI->getNumOperands(); for (unsigned SrcIdx = 0; SrcIdx < NumOps; ++SrcIdx) { unsigned DstIdx = 0; @@ -1373,22 +1353,21 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { DEBUG(dbgs() << "********** REWRITING TWO-ADDR INSTRS **********\n"); DEBUG(dbgs() << "********** Function: " - << MF->getFunction()->getName() << '\n'); + << MF->getName() << '\n'); // This pass takes the function out of SSA form. MRI->leaveSSA(); TiedOperandMap TiedOperands; - - SmallPtrSet Processed; - for (MachineFunction::iterator mbbi = MF->begin(), mbbe = MF->end(); - mbbi != mbbe; ++mbbi) { + for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); + MBBI != MBBE; ++MBBI) { + MBB = MBBI; unsigned Dist = 0; DistanceMap.clear(); SrcRegMap.clear(); DstRegMap.clear(); Processed.clear(); - for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end(); + for (MachineBasicBlock::iterator mi = MBB->begin(), me = MBB->end(); mi != me; ) { MachineBasicBlock::iterator nmi = llvm::next(mi); if (mi->isDebugValue()) { @@ -1402,7 +1381,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { DistanceMap.insert(std::make_pair(mi, ++Dist)); - ProcessCopy(&*mi, &*mbbi, Processed); + processCopy(&*mi); // First scan through all the tied register uses in this instruction // and record a list of pairs of tied operands for each register. @@ -1427,8 +1406,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { unsigned SrcReg = mi->getOperand(SrcIdx).getReg(); unsigned DstReg = mi->getOperand(DstIdx).getReg(); if (SrcReg != DstReg && - TryInstructionTransform(mi, nmi, mbbi, SrcIdx, DstIdx, Dist, - Processed)) { + tryInstructionTransform(mi, nmi, SrcIdx, DstIdx, Dist)) { // The tied operands have been eliminated or shifted further down the // block to ease elimination. Continue processing with 'nmi'. TiedOperands.clear(); @@ -1468,7 +1446,7 @@ bool TwoAddressInstructionPass::runOnMachineFunction(MachineFunction &Func) { // Eliminate REG_SEQUENCE instructions. Their whole purpose was to preseve // SSA form. It's now safe to de-SSA. - MadeChange |= EliminateRegSequences(); + MadeChange |= eliminateRegSequences(); return MadeChange; } @@ -1515,127 +1493,6 @@ static MachineInstr *findFirstDef(unsigned Reg, MachineRegisterInfo *MRI) { return First; } -/// CoalesceExtSubRegs - If a number of sources of the REG_SEQUENCE are -/// EXTRACT_SUBREG from the same register and to the same virtual register -/// with different sub-register indices, attempt to combine the -/// EXTRACT_SUBREGs and pre-coalesce them. e.g. -/// %reg1026 = VLDMQ %reg1025, 260, pred:14, pred:%reg0 -/// %reg1029:6 = EXTRACT_SUBREG %reg1026, 6 -/// %reg1029:5 = EXTRACT_SUBREG %reg1026, 5 -/// Since D subregs 5, 6 can combine to a Q register, we can coalesce -/// reg1026 to reg1029. -void -TwoAddressInstructionPass::CoalesceExtSubRegs(SmallVector &Srcs, - unsigned DstReg) { - SmallSet Seen; - for (unsigned i = 0, e = Srcs.size(); i != e; ++i) { - unsigned SrcReg = Srcs[i]; - if (!Seen.insert(SrcReg)) - continue; - - // Check that the instructions are all in the same basic block. - MachineInstr *SrcDefMI = MRI->getUniqueVRegDef(SrcReg); - MachineInstr *DstDefMI = MRI->getUniqueVRegDef(DstReg); - if (!SrcDefMI || !DstDefMI || - SrcDefMI->getParent() != DstDefMI->getParent()) - continue; - - // If there are no other uses than copies which feed into - // the reg_sequence, then we might be able to coalesce them. - bool CanCoalesce = true; - SmallVector SrcSubIndices, DstSubIndices; - for (MachineRegisterInfo::use_nodbg_iterator - UI = MRI->use_nodbg_begin(SrcReg), - UE = MRI->use_nodbg_end(); UI != UE; ++UI) { - MachineInstr *UseMI = &*UI; - if (!UseMI->isCopy() || UseMI->getOperand(0).getReg() != DstReg) { - CanCoalesce = false; - break; - } - SrcSubIndices.push_back(UseMI->getOperand(1).getSubReg()); - DstSubIndices.push_back(UseMI->getOperand(0).getSubReg()); - } - - if (!CanCoalesce || SrcSubIndices.size() < 2) - continue; - - // Check that the source subregisters can be combined. - std::sort(SrcSubIndices.begin(), SrcSubIndices.end()); - unsigned NewSrcSubIdx = 0; - if (!TRI->canCombineSubRegIndices(MRI->getRegClass(SrcReg), SrcSubIndices, - NewSrcSubIdx)) - continue; - - // Check that the destination subregisters can also be combined. - std::sort(DstSubIndices.begin(), DstSubIndices.end()); - unsigned NewDstSubIdx = 0; - if (!TRI->canCombineSubRegIndices(MRI->getRegClass(DstReg), DstSubIndices, - NewDstSubIdx)) - continue; - - // If neither source nor destination can be combined to the full register, - // just give up. This could be improved if it ever matters. - if (NewSrcSubIdx != 0 && NewDstSubIdx != 0) - continue; - - // Now that we know that all the uses are extract_subregs and that those - // subregs can somehow be combined, scan all the extract_subregs again to - // make sure the subregs are in the right order and can be composed. - MachineInstr *SomeMI = 0; - CanCoalesce = true; - for (MachineRegisterInfo::use_nodbg_iterator - UI = MRI->use_nodbg_begin(SrcReg), - UE = MRI->use_nodbg_end(); UI != UE; ++UI) { - MachineInstr *UseMI = &*UI; - assert(UseMI->isCopy()); - unsigned DstSubIdx = UseMI->getOperand(0).getSubReg(); - unsigned SrcSubIdx = UseMI->getOperand(1).getSubReg(); - assert(DstSubIdx != 0 && "missing subreg from RegSequence elimination"); - if ((NewDstSubIdx == 0 && - TRI->composeSubRegIndices(NewSrcSubIdx, DstSubIdx) != SrcSubIdx) || - (NewSrcSubIdx == 0 && - TRI->composeSubRegIndices(NewDstSubIdx, SrcSubIdx) != DstSubIdx)) { - CanCoalesce = false; - break; - } - // Keep track of one of the uses. Preferably the first one which has a - // flag. - if (!SomeMI || UseMI->getOperand(0).isUndef()) - SomeMI = UseMI; - } - if (!CanCoalesce) - continue; - - // Insert a copy to replace the original. - MachineInstr *CopyMI = BuildMI(*SomeMI->getParent(), SomeMI, - SomeMI->getDebugLoc(), - TII->get(TargetOpcode::COPY)) - .addReg(DstReg, RegState::Define | - getUndefRegState(SomeMI->getOperand(0).isUndef()), - NewDstSubIdx) - .addReg(SrcReg, 0, NewSrcSubIdx); - - // Remove all the old extract instructions. - for (MachineRegisterInfo::use_nodbg_iterator - UI = MRI->use_nodbg_begin(SrcReg), - UE = MRI->use_nodbg_end(); UI != UE; ) { - MachineInstr *UseMI = &*UI; - ++UI; - if (UseMI == CopyMI) - continue; - assert(UseMI->isCopy()); - // Move any kills to the new copy or extract instruction. - if (UseMI->getOperand(1).isKill()) { - CopyMI->getOperand(1).setIsKill(); - if (LV) - // Update live variables - LV->replaceKillInstruction(SrcReg, UseMI, &*CopyMI); - } - UseMI->eraseFromParent(); - } - } -} - static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq, MachineRegisterInfo *MRI) { for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg), @@ -1647,7 +1504,7 @@ static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq, return false; } -/// EliminateRegSequences - Eliminate REG_SEQUENCE instructions as part +/// eliminateRegSequences - Eliminate REG_SEQUENCE instructions as part /// of the de-ssa process. This replaces sources of REG_SEQUENCE as /// sub-register references of the register defined by REG_SEQUENCE. e.g. /// @@ -1655,7 +1512,7 @@ static bool HasOtherRegSequenceUses(unsigned Reg, MachineInstr *RegSeq, /// %reg1031 = REG_SEQUENCE %reg1029, 5, %reg1030, 6 /// => /// %reg1031:5, %reg1031:6 = VLD1q16 %reg1024, ... -bool TwoAddressInstructionPass::EliminateRegSequences() { +bool TwoAddressInstructionPass::eliminateRegSequences() { if (RegSequences.empty()) return false; @@ -1759,10 +1616,6 @@ bool TwoAddressInstructionPass::EliminateRegSequences() { if (MO.isReg() && MO.isDef() && MO.getReg() == DstReg) MO.setIsUndef(); } - // Make sure there is a full non-subreg imp-def operand on the - // instruction. This shouldn't be necessary, but it seems that at least - // RAFast requires it. - Def->addRegisterDefined(DstReg, TRI); DEBUG(dbgs() << "First def: " << *Def); } @@ -1775,12 +1628,6 @@ bool TwoAddressInstructionPass::EliminateRegSequences() { DEBUG(dbgs() << "Eliminated: " << *MI); MI->eraseFromParent(); } - - // Try coalescing some EXTRACT_SUBREG instructions. This can create - // INSERT_SUBREG instructions that must have flags added by - // LiveIntervalAnalysis, so only run it when LiveVariables is available. - if (LV) - CoalesceExtSubRegs(RealSrcs, DstReg); } RegSequences.clear(); diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp index 93840f0..bb93bdc 100644 --- a/lib/CodeGen/VirtRegMap.cpp +++ b/lib/CodeGen/VirtRegMap.cpp @@ -19,8 +19,8 @@ #define DEBUG_TYPE "regalloc" #include "VirtRegMap.h" #include "LiveDebugVariables.h" -#include "llvm/Function.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/LiveStackAnalysis.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -127,9 +127,11 @@ void VirtRegMap::print(raw_ostream &OS, const Module*) const { OS << '\n'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void VirtRegMap::dump() const { print(dbgs()); } +#endif //===----------------------------------------------------------------------===// // VirtRegRewriter @@ -170,6 +172,7 @@ INITIALIZE_PASS_BEGIN(VirtRegRewriter, "virtregrewriter", INITIALIZE_PASS_DEPENDENCY(SlotIndexes) INITIALIZE_PASS_DEPENDENCY(LiveIntervals) INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) +INITIALIZE_PASS_DEPENDENCY(LiveStacks) INITIALIZE_PASS_DEPENDENCY(VirtRegMap) INITIALIZE_PASS_END(VirtRegRewriter, "virtregrewriter", "Virtual Register Rewriter", false, false) @@ -182,6 +185,8 @@ void VirtRegRewriter::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); AU.addPreserved(); AU.addRequired(); + AU.addRequired(); + AU.addPreserved(); AU.addRequired(); MachineFunctionPass::getAnalysisUsage(AU); } @@ -197,11 +202,11 @@ bool VirtRegRewriter::runOnMachineFunction(MachineFunction &fn) { VRM = &getAnalysis(); DEBUG(dbgs() << "********** REWRITE VIRTUAL REGISTERS **********\n" << "********** Function: " - << MF->getFunction()->getName() << '\n'); + << MF->getName() << '\n'); DEBUG(VRM->dump()); // Add kill flags while we still have virtual registers. - LIS->addKillFlags(); + LIS->addKillFlags(VRM); // Live-in lists on basic blocks are required for physregs. addMBBLiveIns(); @@ -252,9 +257,6 @@ void VirtRegRewriter::rewrite() { SmallVector SuperDeads; SmallVector SuperDefs; SmallVector SuperKills; -#ifndef NDEBUG - BitVector Reserved = TRI->getReservedRegs(*MF); -#endif for (MachineFunction::iterator MBBI = MF->begin(), MBBE = MF->end(); MBBI != MBBE; ++MBBI) { @@ -278,7 +280,7 @@ void VirtRegRewriter::rewrite() { unsigned PhysReg = VRM->getPhys(VirtReg); assert(PhysReg != VirtRegMap::NO_PHYS_REG && "Instruction uses unmapped VirtReg"); - assert(!Reserved.test(PhysReg) && "Reserved register assignment"); + assert(!MRI->isReserved(PhysReg) && "Reserved register assignment"); // Preserve semantics of sub-register operands. if (MO.getSubReg()) { diff --git a/lib/CodeGen/VirtRegMap.h b/lib/CodeGen/VirtRegMap.h index c320985..7974dda 100644 --- a/lib/CodeGen/VirtRegMap.h +++ b/lib/CodeGen/VirtRegMap.h @@ -63,8 +63,8 @@ namespace llvm { /// createSpillSlot - Allocate a spill slot for RC from MFI. unsigned createSpillSlot(const TargetRegisterClass *RC); - VirtRegMap(const VirtRegMap&); // DO NOT IMPLEMENT - void operator=(const VirtRegMap&); // DO NOT IMPLEMENT + VirtRegMap(const VirtRegMap&) LLVM_DELETED_FUNCTION; + void operator=(const VirtRegMap&) LLVM_DELETED_FUNCTION; public: static char ID; diff --git a/lib/DebugInfo/CMakeLists.txt b/lib/DebugInfo/CMakeLists.txt index 441f1e8..1e9e509 100644 --- a/lib/DebugInfo/CMakeLists.txt +++ b/lib/DebugInfo/CMakeLists.txt @@ -8,5 +8,6 @@ add_llvm_library(LLVMDebugInfo DWARFDebugAranges.cpp DWARFDebugInfoEntry.cpp DWARFDebugLine.cpp + DWARFDebugRangeList.cpp DWARFFormValue.cpp ) diff --git a/lib/DebugInfo/DIContext.cpp b/lib/DebugInfo/DIContext.cpp index e2fd55f..691a92c 100644 --- a/lib/DebugInfo/DIContext.cpp +++ b/lib/DebugInfo/DIContext.cpp @@ -18,7 +18,10 @@ DIContext *DIContext::getDWARFContext(bool isLittleEndian, StringRef abbrevSection, StringRef aRangeSection, StringRef lineSection, - StringRef stringSection) { + StringRef stringSection, + StringRef rangeSection, + const RelocAddrMap &Map) { return new DWARFContextInMemory(isLittleEndian, infoSection, abbrevSection, - aRangeSection, lineSection, stringSection); + aRangeSection, lineSection, stringSection, + rangeSection, Map); } diff --git a/lib/DebugInfo/DWARFCompileUnit.cpp b/lib/DebugInfo/DWARFCompileUnit.cpp index b27d57b..bdd65b7 100644 --- a/lib/DebugInfo/DWARFCompileUnit.cpp +++ b/lib/DebugInfo/DWARFCompileUnit.cpp @@ -63,7 +63,7 @@ DWARFCompileUnit::extract(uint32_t offset, DataExtractor debug_info_data, Version = debug_info_data.getU16(&offset); bool abbrevsOK = debug_info_data.getU32(&offset) == abbrevs->getOffset(); Abbrevs = abbrevs; - AddrSize = debug_info_data.getU8 (&offset); + AddrSize = debug_info_data.getU8(&offset); bool versionOK = DWARFContext::isSupportedVersion(Version); bool addrSizeOK = AddrSize == 4 || AddrSize == 8; @@ -75,6 +75,15 @@ DWARFCompileUnit::extract(uint32_t offset, DataExtractor debug_info_data, return 0; } +bool DWARFCompileUnit::extractRangeList(uint32_t RangeListOffset, + DWARFDebugRangeList &RangeList) const { + // Require that compile unit is extracted. + assert(DieArray.size() > 0); + DataExtractor RangesData(Context.getRangeSection(), + Context.isLittleEndian(), AddrSize); + return RangeList.extract(RangesData, &RangeListOffset); +} + void DWARFCompileUnit::clear() { Offset = 0; Length = 0; @@ -94,7 +103,9 @@ void DWARFCompileUnit::dump(raw_ostream &OS) { << " (next CU at " << format("0x%08x", getNextCompileUnitOffset()) << ")\n"; - getCompileUnitDIE(false)->dump(OS, this, -1U); + const DWARFDebugInfoEntryMinimal *CU = getCompileUnitDIE(false); + assert(CU && "Null Compile Unit?"); + CU->dump(OS, this, -1U); } const char *DWARFCompileUnit::getCompilationDir() { @@ -174,11 +185,11 @@ size_t DWARFCompileUnit::extractDIEsIfNeeded(bool cu_die_only) { addDIE(die); return 1; } - else if (depth == 0 && initial_die_array_size == 1) { + else if (depth == 0 && initial_die_array_size == 1) // Don't append the CU die as we already did that - } else { - addDIE (die); - } + ; + else + addDIE(die); const DWARFAbbreviationDeclaration *abbrDecl = die.getAbbreviationDeclarationPtr(); @@ -199,9 +210,9 @@ size_t DWARFCompileUnit::extractDIEsIfNeeded(bool cu_die_only) { // Give a little bit of info if we encounter corrupt DWARF (our offset // should always terminate at or before the start of the next compilation // unit header). - if (offset > next_cu_offset) { - fprintf (stderr, "warning: DWARF compile unit extends beyond its bounds cu 0x%8.8x at 0x%8.8x'\n", getOffset(), offset); - } + if (offset > next_cu_offset) + fprintf(stderr, "warning: DWARF compile unit extends beyond its" + "bounds cu 0x%8.8x at 0x%8.8x'\n", getOffset(), offset); setDIERelations(); return DieArray.size(); @@ -244,12 +255,21 @@ DWARFCompileUnit::buildAddressRangeTable(DWARFDebugAranges *debug_aranges, clearDIEs(true); } -const DWARFDebugInfoEntryMinimal* -DWARFCompileUnit::getFunctionDIEForAddress(int64_t address) { +DWARFDebugInfoEntryMinimal::InlinedChain +DWARFCompileUnit::getInlinedChainForAddress(uint64_t Address) { + // First, find a subprogram that contains the given address (the root + // of inlined chain). extractDIEsIfNeeded(false); + const DWARFDebugInfoEntryMinimal *SubprogramDIE = 0; for (size_t i = 0, n = DieArray.size(); i != n; i++) { - if (DieArray[i].addressRangeContainsAddress(this, address)) - return &DieArray[i]; + if (DieArray[i].isSubprogramDIE() && + DieArray[i].addressRangeContainsAddress(this, Address)) { + SubprogramDIE = &DieArray[i]; + break; + } } - return 0; + // Get inlined chain rooted at this subprogram DIE. + if (!SubprogramDIE) + return DWARFDebugInfoEntryMinimal::InlinedChain(); + return SubprogramDIE->getInlinedChainForAddress(this, Address); } diff --git a/lib/DebugInfo/DWARFCompileUnit.h b/lib/DebugInfo/DWARFCompileUnit.h index b34a596..03e2862 100644 --- a/lib/DebugInfo/DWARFCompileUnit.h +++ b/lib/DebugInfo/DWARFCompileUnit.h @@ -12,6 +12,7 @@ #include "DWARFDebugAbbrev.h" #include "DWARFDebugInfoEntry.h" +#include "DWARFDebugRangeList.h" #include namespace llvm { @@ -45,6 +46,11 @@ public: /// extractDIEsIfNeeded - Parses a compile unit and indexes its DIEs if it /// hasn't already been done. Returns the number of DIEs parsed at this call. size_t extractDIEsIfNeeded(bool cu_die_only); + /// extractRangeList - extracts the range list referenced by this compile + /// unit from .debug_ranges section. Returns true on success. + /// Requires that compile unit is already extracted. + bool extractRangeList(uint32_t RangeListOffset, + DWARFDebugRangeList &RangeList) const; void clear(); void dump(raw_ostream &OS); uint32_t getOffset() const { return Offset; } @@ -106,11 +112,11 @@ public: void buildAddressRangeTable(DWARFDebugAranges *debug_aranges, bool clear_dies_if_already_not_parsed); - /// getFunctionDIEForAddress - Returns pointer to parsed subprogram DIE, - /// address ranges of which contain the provided address, - /// or NULL if there is no such subprogram. The pointer - /// is valid until DWARFCompileUnit::clear() or clearDIEs() is called. - const DWARFDebugInfoEntryMinimal *getFunctionDIEForAddress(int64_t address); + + /// getInlinedChainForAddress - fetches inlined chain for a given address. + /// Returns empty chain if there is no subprogram containing address. + DWARFDebugInfoEntryMinimal::InlinedChain getInlinedChainForAddress( + uint64_t Address); }; } diff --git a/lib/DebugInfo/DWARFContext.cpp b/lib/DebugInfo/DWARFContext.cpp index 797662b..afd614c 100644 --- a/lib/DebugInfo/DWARFContext.cpp +++ b/lib/DebugInfo/DWARFContext.cpp @@ -17,6 +17,8 @@ using namespace llvm; using namespace dwarf; +typedef DWARFDebugLine::LineTable DWARFLineTable; + void DWARFContext::dump(raw_ostream &OS) { OS << ".debug_abbrev contents:\n"; getDebugAbbrev()->dump(OS); @@ -32,15 +34,17 @@ void DWARFContext::dump(raw_ostream &OS) { while (set.extract(arangesData, &offset)) set.dump(OS); + uint8_t savedAddressByteSize = 0; OS << "\n.debug_lines contents:\n"; for (unsigned i = 0, e = getNumCompileUnits(); i != e; ++i) { DWARFCompileUnit *cu = getCompileUnitAtIndex(i); + savedAddressByteSize = cu->getAddressByteSize(); unsigned stmtOffset = cu->getCompileUnitDIE()->getAttributeValueAsUnsigned(cu, DW_AT_stmt_list, -1U); if (stmtOffset != -1U) { DataExtractor lineData(getLineSection(), isLittleEndian(), - cu->getAddressByteSize()); + savedAddressByteSize); DWARFDebugLine::DumpingState state(OS); DWARFDebugLine::parseStatementTable(lineData, &stmtOffset, state); } @@ -54,6 +58,18 @@ void DWARFContext::dump(raw_ostream &OS) { OS << format("0x%8.8x: \"%s\"\n", lastOffset, s); lastOffset = offset; } + + OS << "\n.debug_ranges contents:\n"; + // In fact, different compile units may have different address byte + // sizes, but for simplicity we just use the address byte size of the last + // compile unit (there is no easy and fast way to associate address range + // list and the compile unit it describes). + DataExtractor rangesData(getRangeSection(), isLittleEndian(), + savedAddressByteSize); + offset = 0; + DWARFDebugRangeList rangeList; + while (rangeList.extract(rangesData, &offset)) + rangeList.dump(OS); } const DWARFDebugAbbrev *DWARFContext::getDebugAbbrev() { @@ -80,7 +96,7 @@ const DWARFDebugAranges *DWARFContext::getDebugAranges() { return Aranges.get(); } -const DWARFDebugLine::LineTable * +const DWARFLineTable * DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) { if (!Line) Line.reset(new DWARFDebugLine()); @@ -92,7 +108,7 @@ DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) { return 0; // No line table for this compile unit. // See if the line table is cached. - if (const DWARFDebugLine::LineTable *lt = Line->getLineTable(stmtOffset)) + if (const DWARFLineTable *lt = Line->getLineTable(stmtOffset)) return lt; // We have to parse it first. @@ -103,11 +119,11 @@ DWARFContext::getLineTableForCompileUnit(DWARFCompileUnit *cu) { void DWARFContext::parseCompileUnits() { uint32_t offset = 0; - const DataExtractor &debug_info_data = DataExtractor(getInfoSection(), - isLittleEndian(), 0); - while (debug_info_data.isValidOffset(offset)) { + const DataExtractor &DIData = DataExtractor(getInfoSection(), + isLittleEndian(), 0); + while (DIData.isValidOffset(offset)) { CUs.push_back(DWARFCompileUnit(*this)); - if (!CUs.back().extract(debug_info_data, &offset)) { + if (!CUs.back().extract(DIData, &offset)) { CUs.pop_back(); break; } @@ -131,75 +147,155 @@ namespace { }; } -DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t offset) { +DWARFCompileUnit *DWARFContext::getCompileUnitForOffset(uint32_t Offset) { if (CUs.empty()) parseCompileUnits(); - DWARFCompileUnit *i = std::lower_bound(CUs.begin(), CUs.end(), offset, - OffsetComparator()); - if (i != CUs.end()) - return &*i; + DWARFCompileUnit *CU = std::lower_bound(CUs.begin(), CUs.end(), Offset, + OffsetComparator()); + if (CU != CUs.end()) + return &*CU; return 0; } -DILineInfo DWARFContext::getLineInfoForAddress(uint64_t address, - DILineInfoSpecifier specifier) { +DWARFCompileUnit *DWARFContext::getCompileUnitForAddress(uint64_t Address) { // First, get the offset of the compile unit. - uint32_t cuOffset = getDebugAranges()->findAddress(address); + uint32_t CUOffset = getDebugAranges()->findAddress(Address); // Retrieve the compile unit. - DWARFCompileUnit *cu = getCompileUnitForOffset(cuOffset); - if (!cu) + return getCompileUnitForOffset(CUOffset); +} + +static bool getFileNameForCompileUnit(DWARFCompileUnit *CU, + const DWARFLineTable *LineTable, + uint64_t FileIndex, + bool NeedsAbsoluteFilePath, + std::string &FileName) { + if (CU == 0 || + LineTable == 0 || + !LineTable->getFileNameByIndex(FileIndex, NeedsAbsoluteFilePath, + FileName)) + return false; + if (NeedsAbsoluteFilePath && sys::path::is_relative(FileName)) { + // We may still need to append compilation directory of compile unit. + SmallString<16> AbsolutePath; + if (const char *CompilationDir = CU->getCompilationDir()) { + sys::path::append(AbsolutePath, CompilationDir); + } + sys::path::append(AbsolutePath, FileName); + FileName = AbsolutePath.str(); + } + return true; +} + +static bool getFileLineInfoForCompileUnit(DWARFCompileUnit *CU, + const DWARFLineTable *LineTable, + uint64_t Address, + bool NeedsAbsoluteFilePath, + std::string &FileName, + uint32_t &Line, uint32_t &Column) { + if (CU == 0 || LineTable == 0) + return false; + // Get the index of row we're looking for in the line table. + uint32_t RowIndex = LineTable->lookupAddress(Address); + if (RowIndex == -1U) + return false; + // Take file number and line/column from the row. + const DWARFDebugLine::Row &Row = LineTable->Rows[RowIndex]; + if (!getFileNameForCompileUnit(CU, LineTable, Row.File, + NeedsAbsoluteFilePath, FileName)) + return false; + Line = Row.Line; + Column = Row.Column; + return true; +} + +DILineInfo DWARFContext::getLineInfoForAddress(uint64_t Address, + DILineInfoSpecifier Specifier) { + DWARFCompileUnit *CU = getCompileUnitForAddress(Address); + if (!CU) return DILineInfo(); - SmallString<16> fileName(""); - SmallString<16> functionName(""); - uint32_t line = 0; - uint32_t column = 0; - if (specifier.needs(DILineInfoSpecifier::FunctionName)) { - const DWARFDebugInfoEntryMinimal *function_die = - cu->getFunctionDIEForAddress(address); - if (function_die) { - if (const char *name = function_die->getSubprogramName(cu)) - functionName = name; + std::string FileName = ""; + std::string FunctionName = ""; + uint32_t Line = 0; + uint32_t Column = 0; + if (Specifier.needs(DILineInfoSpecifier::FunctionName)) { + // The address may correspond to instruction in some inlined function, + // so we have to build the chain of inlined functions and take the + // name of the topmost function in it. + const DWARFDebugInfoEntryMinimal::InlinedChain &InlinedChain = + CU->getInlinedChainForAddress(Address); + if (InlinedChain.size() > 0) { + const DWARFDebugInfoEntryMinimal &TopFunctionDIE = InlinedChain[0]; + if (const char *Name = TopFunctionDIE.getSubroutineName(CU)) + FunctionName = Name; } } - if (specifier.needs(DILineInfoSpecifier::FileLineInfo)) { - // Get the line table for this compile unit. - const DWARFDebugLine::LineTable *lineTable = getLineTableForCompileUnit(cu); - if (lineTable) { - // Get the index of the row we're looking for in the line table. - uint32_t rowIndex = lineTable->lookupAddress(address); - if (rowIndex != -1U) { - const DWARFDebugLine::Row &row = lineTable->Rows[rowIndex]; - // Take file/line info from the line table. - const DWARFDebugLine::FileNameEntry &fileNameEntry = - lineTable->Prologue.FileNames[row.File - 1]; - fileName = fileNameEntry.Name; - if (specifier.needs(DILineInfoSpecifier::AbsoluteFilePath) && - sys::path::is_relative(fileName.str())) { - // Append include directory of file (if it is present in line table) - // and compilation directory of compile unit to make path absolute. - const char *includeDir = 0; - if (uint64_t includeDirIndex = fileNameEntry.DirIdx) { - includeDir = lineTable->Prologue - .IncludeDirectories[includeDirIndex - 1]; - } - SmallString<16> absFileName; - if (includeDir == 0 || sys::path::is_relative(includeDir)) { - if (const char *compilationDir = cu->getCompilationDir()) - sys::path::append(absFileName, compilationDir); - } - if (includeDir) { - sys::path::append(absFileName, includeDir); - } - sys::path::append(absFileName, fileName.str()); - fileName = absFileName; - } - line = row.Line; - column = row.Column; + if (Specifier.needs(DILineInfoSpecifier::FileLineInfo)) { + const DWARFLineTable *LineTable = getLineTableForCompileUnit(CU); + const bool NeedsAbsoluteFilePath = + Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath); + getFileLineInfoForCompileUnit(CU, LineTable, Address, + NeedsAbsoluteFilePath, + FileName, Line, Column); + } + return DILineInfo(StringRef(FileName), StringRef(FunctionName), + Line, Column); +} + +DIInliningInfo DWARFContext::getInliningInfoForAddress(uint64_t Address, + DILineInfoSpecifier Specifier) { + DWARFCompileUnit *CU = getCompileUnitForAddress(Address); + if (!CU) + return DIInliningInfo(); + + const DWARFDebugInfoEntryMinimal::InlinedChain &InlinedChain = + CU->getInlinedChainForAddress(Address); + if (InlinedChain.size() == 0) + return DIInliningInfo(); + + DIInliningInfo InliningInfo; + uint32_t CallFile = 0, CallLine = 0, CallColumn = 0; + const DWARFLineTable *LineTable = 0; + for (uint32_t i = 0, n = InlinedChain.size(); i != n; i++) { + const DWARFDebugInfoEntryMinimal &FunctionDIE = InlinedChain[i]; + std::string FileName = ""; + std::string FunctionName = ""; + uint32_t Line = 0; + uint32_t Column = 0; + // Get function name if necessary. + if (Specifier.needs(DILineInfoSpecifier::FunctionName)) { + if (const char *Name = FunctionDIE.getSubroutineName(CU)) + FunctionName = Name; + } + if (Specifier.needs(DILineInfoSpecifier::FileLineInfo)) { + const bool NeedsAbsoluteFilePath = + Specifier.needs(DILineInfoSpecifier::AbsoluteFilePath); + if (i == 0) { + // For the topmost frame, initialize the line table of this + // compile unit and fetch file/line info from it. + LineTable = getLineTableForCompileUnit(CU); + // For the topmost routine, get file/line info from line table. + getFileLineInfoForCompileUnit(CU, LineTable, Address, + NeedsAbsoluteFilePath, + FileName, Line, Column); + } else { + // Otherwise, use call file, call line and call column from + // previous DIE in inlined chain. + getFileNameForCompileUnit(CU, LineTable, CallFile, + NeedsAbsoluteFilePath, FileName); + Line = CallLine; + Column = CallColumn; + } + // Get call file/line/column of a current DIE. + if (i + 1 < n) { + FunctionDIE.getCallerFrame(CU, CallFile, CallLine, CallColumn); } } + DILineInfo Frame(StringRef(FileName), StringRef(FunctionName), + Line, Column); + InliningInfo.addFrame(Frame); } - return DILineInfo(fileName, functionName, line, column); + return InliningInfo; } void DWARFContextInMemory::anchor() { } diff --git a/lib/DebugInfo/DWARFContext.h b/lib/DebugInfo/DWARFContext.h index e55a27e..4001792 100644 --- a/lib/DebugInfo/DWARFContext.h +++ b/lib/DebugInfo/DWARFContext.h @@ -13,6 +13,7 @@ #include "DWARFCompileUnit.h" #include "DWARFDebugAranges.h" #include "DWARFDebugLine.h" +#include "DWARFDebugRangeList.h" #include "llvm/DebugInfo/DIContext.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/SmallVector.h" @@ -25,21 +26,24 @@ namespace llvm { /// methods that a concrete implementation provides. class DWARFContext : public DIContext { bool IsLittleEndian; + const RelocAddrMap &RelocMap; SmallVector CUs; OwningPtr Abbrev; OwningPtr Aranges; OwningPtr Line; - DWARFContext(DWARFContext &); // = delete - DWARFContext &operator=(DWARFContext &); // = delete + DWARFContext(DWARFContext &) LLVM_DELETED_FUNCTION; + DWARFContext &operator=(DWARFContext &) LLVM_DELETED_FUNCTION; /// Read compile units from the debug_info section and store them in CUs. void parseCompileUnits(); protected: - DWARFContext(bool isLittleEndian) : IsLittleEndian(isLittleEndian) {} + DWARFContext(bool isLittleEndian, const RelocAddrMap &Map) : + IsLittleEndian(isLittleEndian), RelocMap(Map) {} public: virtual void dump(raw_ostream &OS); + /// Get the number of compile units in this context. unsigned getNumCompileUnits() { if (CUs.empty()) @@ -53,9 +57,6 @@ public: return &CUs[index]; } - /// Return the compile unit that includes an offset (relative to .debug_info). - DWARFCompileUnit *getCompileUnitForOffset(uint32_t offset); - /// Get a pointer to the parsed DebugAbbrev object. const DWARFDebugAbbrev *getDebugAbbrev(); @@ -66,22 +67,32 @@ public: const DWARFDebugLine::LineTable * getLineTableForCompileUnit(DWARFCompileUnit *cu); - virtual DILineInfo getLineInfoForAddress(uint64_t address, - DILineInfoSpecifier specifier = DILineInfoSpecifier()); + virtual DILineInfo getLineInfoForAddress(uint64_t Address, + DILineInfoSpecifier Specifier = DILineInfoSpecifier()); + virtual DIInliningInfo getInliningInfoForAddress(uint64_t Address, + DILineInfoSpecifier Specifier = DILineInfoSpecifier()); bool isLittleEndian() const { return IsLittleEndian; } + const RelocAddrMap &relocMap() const { return RelocMap; } virtual StringRef getInfoSection() = 0; virtual StringRef getAbbrevSection() = 0; virtual StringRef getARangeSection() = 0; virtual StringRef getLineSection() = 0; virtual StringRef getStringSection() = 0; + virtual StringRef getRangeSection() = 0; static bool isSupportedVersion(unsigned version) { return version == 2 || version == 3; } -}; +private: + /// Return the compile unit that includes an offset (relative to .debug_info). + DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset); + /// Return the compile unit which contains instruction with provided + /// address. + DWARFCompileUnit *getCompileUnitForAddress(uint64_t Address); +}; /// DWARFContextInMemory is the simplest possible implementation of a /// DWARFContext. It assumes all content is available in memory and stores @@ -93,19 +104,23 @@ class DWARFContextInMemory : public DWARFContext { StringRef ARangeSection; StringRef LineSection; StringRef StringSection; + StringRef RangeSection; public: DWARFContextInMemory(bool isLittleEndian, StringRef infoSection, StringRef abbrevSection, StringRef aRangeSection, StringRef lineSection, - StringRef stringSection) - : DWARFContext(isLittleEndian), + StringRef stringSection, + StringRef rangeSection, + const RelocAddrMap &Map = RelocAddrMap()) + : DWARFContext(isLittleEndian, Map), InfoSection(infoSection), AbbrevSection(abbrevSection), ARangeSection(aRangeSection), LineSection(lineSection), - StringSection(stringSection) + StringSection(stringSection), + RangeSection(rangeSection) {} virtual StringRef getInfoSection() { return InfoSection; } @@ -113,6 +128,7 @@ public: virtual StringRef getARangeSection() { return ARangeSection; } virtual StringRef getLineSection() { return LineSection; } virtual StringRef getStringSection() { return StringSection; } + virtual StringRef getRangeSection() { return RangeSection; } }; } diff --git a/lib/DebugInfo/DWARFDebugAranges.cpp b/lib/DebugInfo/DWARFDebugAranges.cpp index ef470e5..f9a34c9 100644 --- a/lib/DebugInfo/DWARFDebugAranges.cpp +++ b/lib/DebugInfo/DWARFDebugAranges.cpp @@ -62,7 +62,6 @@ bool DWARFDebugAranges::extract(DataExtractor debug_aranges_data) { uint32_t offset = 0; typedef std::vector SetCollection; - typedef SetCollection::const_iterator SetCollectionIter; SetCollection sets; DWARFDebugArangeSet set; diff --git a/lib/DebugInfo/DWARFDebugInfoEntry.cpp b/lib/DebugInfo/DWARFDebugInfoEntry.cpp index 429a36c..ab67464 100644 --- a/lib/DebugInfo/DWARFDebugInfoEntry.cpp +++ b/lib/DebugInfo/DWARFDebugInfoEntry.cpp @@ -1,4 +1,4 @@ -//===-- DWARFDebugInfoEntry.cpp --------------------------------------------===// +//===-- DWARFDebugInfoEntry.cpp -------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -101,7 +101,7 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu, DataExtractor debug_info_data = cu->getDebugInfoExtractor(); uint64_t abbrCode = debug_info_data.getULEB128(offset_ptr); - assert (fixed_form_sizes); // For best performance this should be specified! + assert(fixed_form_sizes); // For best performance this should be specified! if (abbrCode) { uint32_t offset = *offset_ptr; @@ -126,6 +126,7 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu, switch (form) { // Blocks if inlined data that have a length field and the data bytes // inlined in the .debug_info. + case DW_FORM_exprloc: case DW_FORM_block: form_size = debug_info_data.getULEB128(&offset); break; @@ -150,6 +151,11 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu, form_size = cu->getAddressByteSize(); break; + // 0 sized form. + case DW_FORM_flag_present: + form_size = 0; + break; + // 1 byte values case DW_FORM_data1: case DW_FORM_flag: @@ -173,6 +179,7 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu, // 8 byte values case DW_FORM_data8: case DW_FORM_ref8: + case DW_FORM_ref_sig8: form_size = 8; break; @@ -188,6 +195,13 @@ bool DWARFDebugInfoEntryMinimal::extractFast(const DWARFCompileUnit *cu, form = debug_info_data.getULEB128(&offset); break; + case DW_FORM_sec_offset: + if (cu->getAddressByteSize() == 4) + debug_info_data.getU32(offset_ptr); + else + debug_info_data.getU64(offset_ptr); + break; + default: *offset_ptr = Offset; return false; @@ -249,6 +263,7 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu, switch (form) { // Blocks if inlined data that have a length field and the data // bytes // inlined in the .debug_info + case DW_FORM_exprloc: case DW_FORM_block: form_size = debug_info_data.getULEB128(&offset); break; @@ -273,6 +288,11 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu, form_size = cu_addr_size; break; + // 0 byte value + case DW_FORM_flag_present: + form_size = 0; + break; + // 1 byte values case DW_FORM_data1: case DW_FORM_flag: @@ -299,6 +319,7 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu, // 8 byte values case DW_FORM_data8: case DW_FORM_ref8: + case DW_FORM_ref_sig8: form_size = 8; break; @@ -314,6 +335,13 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu, form_is_indirect = true; break; + case DW_FORM_sec_offset: + if (cu->getAddressByteSize() == 4) + debug_info_data.getU32(offset_ptr); + else + debug_info_data.getU64(offset_ptr); + break; + default: *offset_ptr = offset; return false; @@ -336,6 +364,16 @@ DWARFDebugInfoEntryMinimal::extract(const DWARFCompileUnit *cu, return false; } +bool DWARFDebugInfoEntryMinimal::isSubprogramDIE() const { + return getTag() == DW_TAG_subprogram; +} + +bool DWARFDebugInfoEntryMinimal::isSubroutineDIE() const { + uint32_t Tag = getTag(); + return Tag == DW_TAG_subprogram || + Tag == DW_TAG_inlined_subroutine; +} + uint32_t DWARFDebugInfoEntryMinimal::getAttributeValue(const DWARFCompileUnit *cu, const uint16_t attr, @@ -373,9 +411,10 @@ DWARFDebugInfoEntryMinimal::getAttributeValue(const DWARFCompileUnit *cu, const char* DWARFDebugInfoEntryMinimal::getAttributeValueAsString( - const DWARFCompileUnit* cu, - const uint16_t attr, - const char* fail_value) const { + const DWARFCompileUnit* cu, + const uint16_t attr, + const char* fail_value) + const { DWARFFormValue form_value; if (getAttributeValue(cu, attr, form_value)) { DataExtractor stringExtractor(cu->getContext().getStringSection(), @@ -387,9 +426,9 @@ DWARFDebugInfoEntryMinimal::getAttributeValueAsString( uint64_t DWARFDebugInfoEntryMinimal::getAttributeValueAsUnsigned( - const DWARFCompileUnit* cu, - const uint16_t attr, - uint64_t fail_value) const { + const DWARFCompileUnit* cu, + const uint16_t attr, + uint64_t fail_value) const { DWARFFormValue form_value; if (getAttributeValue(cu, attr, form_value)) return form_value.getUnsigned(); @@ -398,9 +437,9 @@ DWARFDebugInfoEntryMinimal::getAttributeValueAsUnsigned( int64_t DWARFDebugInfoEntryMinimal::getAttributeValueAsSigned( - const DWARFCompileUnit* cu, - const uint16_t attr, - int64_t fail_value) const { + const DWARFCompileUnit* cu, + const uint16_t attr, + int64_t fail_value) const { DWARFFormValue form_value; if (getAttributeValue(cu, attr, form_value)) return form_value.getSigned(); @@ -409,33 +448,42 @@ DWARFDebugInfoEntryMinimal::getAttributeValueAsSigned( uint64_t DWARFDebugInfoEntryMinimal::getAttributeValueAsReference( - const DWARFCompileUnit* cu, - const uint16_t attr, - uint64_t fail_value) const { + const DWARFCompileUnit* cu, + const uint16_t attr, + uint64_t fail_value) + const { DWARFFormValue form_value; if (getAttributeValue(cu, attr, form_value)) return form_value.getReference(cu); return fail_value; } +bool DWARFDebugInfoEntryMinimal::getLowAndHighPC(const DWARFCompileUnit *CU, + uint64_t &LowPC, + uint64_t &HighPC) const { + HighPC = -1ULL; + LowPC = getAttributeValueAsUnsigned(CU, DW_AT_low_pc, -1ULL); + if (LowPC != -1ULL) + HighPC = getAttributeValueAsUnsigned(CU, DW_AT_high_pc, -1ULL); + return (HighPC != -1ULL); +} + void -DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *cu, - DWARFDebugAranges *debug_aranges) +DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *CU, + DWARFDebugAranges *DebugAranges) const { if (AbbrevDecl) { - uint16_t tag = AbbrevDecl->getTag(); - if (tag == DW_TAG_subprogram) { - uint64_t hi_pc = -1ULL; - uint64_t lo_pc = getAttributeValueAsUnsigned(cu, DW_AT_low_pc, -1ULL); - if (lo_pc != -1ULL) - hi_pc = getAttributeValueAsUnsigned(cu, DW_AT_high_pc, -1ULL); - if (hi_pc != -1ULL) - debug_aranges->appendRange(cu->getOffset(), lo_pc, hi_pc); + if (isSubprogramDIE()) { + uint64_t LowPC, HighPC; + if (getLowAndHighPC(CU, LowPC, HighPC)) { + DebugAranges->appendRange(CU->getOffset(), LowPC, HighPC); + } + // FIXME: try to append ranges from .debug_ranges section. } const DWARFDebugInfoEntryMinimal *child = getFirstChild(); while (child) { - child->buildAddressRangeTable(cu, debug_aranges); + child->buildAddressRangeTable(CU, DebugAranges); child = child->getSibling(); } } @@ -443,51 +491,95 @@ DWARFDebugInfoEntryMinimal::buildAddressRangeTable(const DWARFCompileUnit *cu, bool DWARFDebugInfoEntryMinimal::addressRangeContainsAddress( - const DWARFCompileUnit *cu, const uint64_t address) const { - if (!isNULL() && getTag() == DW_TAG_subprogram) { - uint64_t hi_pc = -1ULL; - uint64_t lo_pc = getAttributeValueAsUnsigned(cu, DW_AT_low_pc, -1ULL); - if (lo_pc != -1ULL) - hi_pc = getAttributeValueAsUnsigned(cu, DW_AT_high_pc, -1ULL); - if (hi_pc != -1ULL) { - return (lo_pc <= address && address < hi_pc); - } + const DWARFCompileUnit *CU, + const uint64_t Address) + const { + if (isNULL()) + return false; + uint64_t LowPC, HighPC; + if (getLowAndHighPC(CU, LowPC, HighPC)) + return (LowPC <= Address && Address <= HighPC); + // Try to get address ranges from .debug_ranges section. + uint32_t RangesOffset = getAttributeValueAsReference(CU, DW_AT_ranges, -1U); + if (RangesOffset != -1U) { + DWARFDebugRangeList RangeList; + if (CU->extractRangeList(RangesOffset, RangeList)) + return RangeList.containsAddress(CU->getBaseAddress(), Address); } return false; } const char* -DWARFDebugInfoEntryMinimal::getSubprogramName( - const DWARFCompileUnit *cu) const { - if (isNULL() || getTag() != DW_TAG_subprogram) +DWARFDebugInfoEntryMinimal::getSubroutineName(const DWARFCompileUnit *CU) + const { + if (!isSubroutineDIE()) return 0; // Try to get mangled name if possible. if (const char *name = - getAttributeValueAsString(cu, DW_AT_MIPS_linkage_name, 0)) + getAttributeValueAsString(CU, DW_AT_MIPS_linkage_name, 0)) return name; - if (const char *name = getAttributeValueAsString(cu, DW_AT_linkage_name, 0)) + if (const char *name = getAttributeValueAsString(CU, DW_AT_linkage_name, 0)) return name; - if (const char *name = getAttributeValueAsString(cu, DW_AT_name, 0)) + if (const char *name = getAttributeValueAsString(CU, DW_AT_name, 0)) return name; // Try to get name from specification DIE. uint32_t spec_ref = - getAttributeValueAsReference(cu, DW_AT_specification, -1U); + getAttributeValueAsReference(CU, DW_AT_specification, -1U); if (spec_ref != -1U) { DWARFDebugInfoEntryMinimal spec_die; - if (spec_die.extract(cu, &spec_ref)) { - if (const char *name = spec_die.getSubprogramName(cu)) + if (spec_die.extract(CU, &spec_ref)) { + if (const char *name = spec_die.getSubroutineName(CU)) return name; } } // Try to get name from abstract origin DIE. uint32_t abs_origin_ref = - getAttributeValueAsReference(cu, DW_AT_abstract_origin, -1U); + getAttributeValueAsReference(CU, DW_AT_abstract_origin, -1U); if (abs_origin_ref != -1U) { DWARFDebugInfoEntryMinimal abs_origin_die; - if (abs_origin_die.extract(cu, &abs_origin_ref)) { - if (const char *name = abs_origin_die.getSubprogramName(cu)) + if (abs_origin_die.extract(CU, &abs_origin_ref)) { + if (const char *name = abs_origin_die.getSubroutineName(CU)) return name; } } return 0; } + +void DWARFDebugInfoEntryMinimal::getCallerFrame(const DWARFCompileUnit *CU, + uint32_t &CallFile, + uint32_t &CallLine, + uint32_t &CallColumn) const { + CallFile = getAttributeValueAsUnsigned(CU, DW_AT_call_file, 0); + CallLine = getAttributeValueAsUnsigned(CU, DW_AT_call_line, 0); + CallColumn = getAttributeValueAsUnsigned(CU, DW_AT_call_column, 0); +} + +DWARFDebugInfoEntryMinimal::InlinedChain +DWARFDebugInfoEntryMinimal::getInlinedChainForAddress( + const DWARFCompileUnit *CU, + const uint64_t Address) + const { + DWARFDebugInfoEntryMinimal::InlinedChain InlinedChain; + if (isNULL()) + return InlinedChain; + for (const DWARFDebugInfoEntryMinimal *DIE = this; DIE; ) { + // Append current DIE to inlined chain only if it has correct tag + // (e.g. it is not a lexical block). + if (DIE->isSubroutineDIE()) { + InlinedChain.push_back(*DIE); + } + // Try to get child which also contains provided address. + const DWARFDebugInfoEntryMinimal *Child = DIE->getFirstChild(); + while (Child) { + if (Child->addressRangeContainsAddress(CU, Address)) { + // Assume there is only one such child. + break; + } + Child = Child->getSibling(); + } + DIE = Child; + } + // Reverse the obtained chain to make the root of inlined chain last. + std::reverse(InlinedChain.begin(), InlinedChain.end()); + return InlinedChain; +} diff --git a/lib/DebugInfo/DWARFDebugInfoEntry.h b/lib/DebugInfo/DWARFDebugInfoEntry.h index d5d86b9..9c1b2be 100644 --- a/lib/DebugInfo/DWARFDebugInfoEntry.h +++ b/lib/DebugInfo/DWARFDebugInfoEntry.h @@ -11,6 +11,7 @@ #define LLVM_DEBUGINFO_DWARFDEBUGINFOENTRY_H #include "DWARFAbbreviationDeclaration.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/Support/DataTypes.h" namespace llvm { @@ -19,6 +20,7 @@ class DWARFDebugAranges; class DWARFCompileUnit; class DWARFContext; class DWARFFormValue; +class DWARFInlinedSubroutineChain; /// DWARFDebugInfoEntryMinimal - A DIE with only the minimum required data. class DWARFDebugInfoEntryMinimal { @@ -52,6 +54,13 @@ public: uint32_t getTag() const { return AbbrevDecl ? AbbrevDecl->getTag() : 0; } bool isNULL() const { return AbbrevDecl == 0; } + + /// Returns true if DIE represents a subprogram (not inlined). + bool isSubprogramDIE() const; + /// Returns true if DIE represents a subprogram or an inlined + /// subroutine. + bool isSubroutineDIE() const; + uint32_t getOffset() const { return Offset; } uint32_t getNumAttributes() const { return !isNULL() ? AbbrevDecl->getNumAttributes() : 0; @@ -126,17 +135,40 @@ public: const uint16_t attr, int64_t fail_value) const; - void buildAddressRangeTable(const DWARFCompileUnit *cu, - DWARFDebugAranges *debug_aranges) const; - - bool addressRangeContainsAddress(const DWARFCompileUnit *cu, - const uint64_t address) const; - - // If a DIE represents a subprogram, returns its mangled name - // (or short name, if mangled is missing). This name may be fetched - // from specification or abstract origin for this subprogram. - // Returns null if no name is found. - const char* getSubprogramName(const DWARFCompileUnit *cu) const; + /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU. + /// Returns true if both attributes are present. + bool getLowAndHighPC(const DWARFCompileUnit *CU, + uint64_t &LowPC, uint64_t &HighPC) const; + + void buildAddressRangeTable(const DWARFCompileUnit *CU, + DWARFDebugAranges *DebugAranges) const; + + bool addressRangeContainsAddress(const DWARFCompileUnit *CU, + const uint64_t Address) const; + + /// If a DIE represents a subprogram (or inlined subroutine), + /// returns its mangled name (or short name, if mangled is missing). + /// This name may be fetched from specification or abstract origin + /// for this subprogram. Returns null if no name is found. + const char* getSubroutineName(const DWARFCompileUnit *CU) const; + + /// Retrieves values of DW_AT_call_file, DW_AT_call_line and + /// DW_AT_call_column from DIE (or zeroes if they are missing). + void getCallerFrame(const DWARFCompileUnit *CU, uint32_t &CallFile, + uint32_t &CallLine, uint32_t &CallColumn) const; + + /// InlinedChain - represents a chain of inlined_subroutine + /// DIEs, (possibly ending with subprogram DIE), all of which are contained + /// in some concrete inlined instance tree. Address range for each DIE + /// (except the last DIE) in this chain is contained in address + /// range for next DIE in the chain. + typedef SmallVector InlinedChain; + + /// Get inlined chain for a given address, rooted at the current DIE. + /// Returns empty chain if address is not contained in address range + /// of current DIE. + InlinedChain getInlinedChainForAddress(const DWARFCompileUnit *CU, + const uint64_t Address) const; }; } diff --git a/lib/DebugInfo/DWARFDebugLine.cpp b/lib/DebugInfo/DWARFDebugLine.cpp index d99575d..267364a 100644 --- a/lib/DebugInfo/DWARFDebugLine.cpp +++ b/lib/DebugInfo/DWARFDebugLine.cpp @@ -10,6 +10,7 @@ #include "DWARFDebugLine.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Format.h" +#include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include using namespace llvm; @@ -513,3 +514,29 @@ DWARFDebugLine::LineTable::lookupAddress(uint64_t address) const { } return index; } + +bool +DWARFDebugLine::LineTable::getFileNameByIndex(uint64_t FileIndex, + bool NeedsAbsoluteFilePath, + std::string &Result) const { + if (FileIndex == 0 || FileIndex > Prologue.FileNames.size()) + return false; + const FileNameEntry &Entry = Prologue.FileNames[FileIndex - 1]; + const char *FileName = Entry.Name; + if (!NeedsAbsoluteFilePath || + sys::path::is_absolute(FileName)) { + Result = FileName; + return true; + } + SmallString<16> FilePath; + uint64_t IncludeDirIndex = Entry.DirIdx; + // Be defensive about the contents of Entry. + if (IncludeDirIndex > 0 && + IncludeDirIndex <= Prologue.IncludeDirectories.size()) { + const char *IncludeDir = Prologue.IncludeDirectories[IncludeDirIndex - 1]; + sys::path::append(FilePath, IncludeDir); + } + sys::path::append(FilePath, FileName); + Result = FilePath.str(); + return true; +} diff --git a/lib/DebugInfo/DWARFDebugLine.h b/lib/DebugInfo/DWARFDebugLine.h index 6382b45..586dd7e 100644 --- a/lib/DebugInfo/DWARFDebugLine.h +++ b/lib/DebugInfo/DWARFDebugLine.h @@ -12,6 +12,7 @@ #include "llvm/Support/DataExtractor.h" #include +#include #include namespace llvm { @@ -174,6 +175,13 @@ public: // Returns the index of the row with file/line info for a given address, // or -1 if there is no such row. uint32_t lookupAddress(uint64_t address) const; + + // Extracts filename by its index in filename table in prologue. + // Returns true on success. + bool getFileNameByIndex(uint64_t FileIndex, + bool NeedsAbsoluteFilePath, + std::string &Result) const; + void dump(raw_ostream &OS) const; struct Prologue Prologue; diff --git a/lib/DebugInfo/DWARFDebugRangeList.cpp b/lib/DebugInfo/DWARFDebugRangeList.cpp new file mode 100644 index 0000000..1806bee --- /dev/null +++ b/lib/DebugInfo/DWARFDebugRangeList.cpp @@ -0,0 +1,67 @@ +//===-- DWARFDebugRangesList.cpp ------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "DWARFDebugRangeList.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +void DWARFDebugRangeList::clear() { + Offset = -1U; + AddressSize = 0; + Entries.clear(); +} + +bool DWARFDebugRangeList::extract(DataExtractor data, uint32_t *offset_ptr) { + clear(); + if (!data.isValidOffset(*offset_ptr)) + return false; + AddressSize = data.getAddressSize(); + if (AddressSize != 4 && AddressSize != 8) + return false; + Offset = *offset_ptr; + while (true) { + RangeListEntry entry; + uint32_t prev_offset = *offset_ptr; + entry.StartAddress = data.getAddress(offset_ptr); + entry.EndAddress = data.getAddress(offset_ptr); + // Check that both values were extracted correctly. + if (*offset_ptr != prev_offset + 2 * AddressSize) { + clear(); + return false; + } + if (entry.isEndOfListEntry()) + break; + Entries.push_back(entry); + } + return true; +} + +void DWARFDebugRangeList::dump(raw_ostream &OS) const { + for (int i = 0, n = Entries.size(); i != n; ++i) { + const char *format_str = (AddressSize == 4 + ? "%08x %08" PRIx64 " %08" PRIx64 "\n" + : "%08x %016" PRIx64 " %016" PRIx64 "\n"); + OS << format(format_str, Offset, Entries[i].StartAddress, + Entries[i].EndAddress); + } + OS << format("%08x \n", Offset); +} + +bool DWARFDebugRangeList::containsAddress(uint64_t BaseAddress, + uint64_t Address) const { + for (int i = 0, n = Entries.size(); i != n; ++i) { + if (Entries[i].isBaseAddressSelectionEntry(AddressSize)) + BaseAddress = Entries[i].EndAddress; + else if (Entries[i].containsAddress(BaseAddress, Address)) + return true; + } + return false; +} diff --git a/lib/DebugInfo/DWARFDebugRangeList.h b/lib/DebugInfo/DWARFDebugRangeList.h new file mode 100644 index 0000000..4e34a91 --- /dev/null +++ b/lib/DebugInfo/DWARFDebugRangeList.h @@ -0,0 +1,78 @@ +//===-- DWARFDebugRangeList.h -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H +#define LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H + +#include "llvm/Support/DataExtractor.h" +#include + +namespace llvm { + +class raw_ostream; + +class DWARFDebugRangeList { +public: + struct RangeListEntry { + // A beginning address offset. This address offset has the size of an + // address and is relative to the applicable base address of the + // compilation unit referencing this range list. It marks the beginning + // of an address range. + uint64_t StartAddress; + // An ending address offset. This address offset again has the size of + // an address and is relative to the applicable base address of the + // compilation unit referencing this range list. It marks the first + // address past the end of the address range. The ending address must + // be greater than or equal to the beginning address. + uint64_t EndAddress; + // The end of any given range list is marked by an end of list entry, + // which consists of a 0 for the beginning address offset + // and a 0 for the ending address offset. + bool isEndOfListEntry() const { + return (StartAddress == 0) && (EndAddress == 0); + } + // A base address selection entry consists of: + // 1. The value of the largest representable address offset + // (for example, 0xffffffff when the size of an address is 32 bits). + // 2. An address, which defines the appropriate base address for + // use in interpreting the beginning and ending address offsets of + // subsequent entries of the location list. + bool isBaseAddressSelectionEntry(uint8_t AddressSize) const { + assert(AddressSize == 4 || AddressSize == 8); + if (AddressSize == 4) + return StartAddress == -1U; + else + return StartAddress == -1ULL; + } + bool containsAddress(uint64_t BaseAddress, uint64_t Address) const { + return (BaseAddress + StartAddress <= Address) && + (Address < BaseAddress + EndAddress); + } + }; + +private: + // Offset in .debug_ranges section. + uint32_t Offset; + uint8_t AddressSize; + std::vector Entries; + +public: + DWARFDebugRangeList() { clear(); } + void clear(); + void dump(raw_ostream &OS) const; + bool extract(DataExtractor data, uint32_t *offset_ptr); + /// containsAddress - Returns true if range list contains the given + /// address. Has to be passed base address of the compile unit that + /// references this range list. + bool containsAddress(uint64_t BaseAddress, uint64_t Address) const; +}; + +} // namespace llvm + +#endif // LLVM_DEBUGINFO_DWARFDEBUGRANGELIST_H diff --git a/lib/DebugInfo/DWARFFormValue.cpp b/lib/DebugInfo/DWARFFormValue.cpp index ee2a3ab..fea9fd7 100644 --- a/lib/DebugInfo/DWARFFormValue.cpp +++ b/lib/DebugInfo/DWARFFormValue.cpp @@ -10,6 +10,7 @@ #include "DWARFFormValue.h" #include "DWARFCompileUnit.h" #include "DWARFContext.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" @@ -41,6 +42,10 @@ static const uint8_t form_sizes_addr4[] = { 8, // 0x14 DW_FORM_ref8 0, // 0x15 DW_FORM_ref_udata 0, // 0x16 DW_FORM_indirect + 4, // 0x17 DW_FORM_sec_offset + 0, // 0x18 DW_FORM_exprloc + 0, // 0x19 DW_FORM_flag_present + 8, // 0x20 DW_FORM_ref_sig8 }; static const uint8_t form_sizes_addr8[] = { @@ -67,6 +72,10 @@ static const uint8_t form_sizes_addr8[] = { 8, // 0x14 DW_FORM_ref8 0, // 0x15 DW_FORM_ref_udata 0, // 0x16 DW_FORM_indirect + 8, // 0x17 DW_FORM_sec_offset + 0, // 0x18 DW_FORM_exprloc + 0, // 0x19 DW_FORM_flag_present + 8, // 0x20 DW_FORM_ref_sig8 }; const uint8_t * @@ -90,9 +99,18 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr, indirect = false; switch (Form) { case DW_FORM_addr: - case DW_FORM_ref_addr: - Value.uval = data.getUnsigned(offset_ptr, cu->getAddressByteSize()); + case DW_FORM_ref_addr: { + RelocAddrMap::const_iterator AI + = cu->getContext().relocMap().find(*offset_ptr); + if (AI != cu->getContext().relocMap().end()) { + const std::pair &R = AI->second; + Value.uval = R.second; + *offset_ptr += R.first; + } else + Value.uval = data.getUnsigned(offset_ptr, cu->getAddressByteSize()); + } break; + case DW_FORM_exprloc: case DW_FORM_block: Value.uval = data.getULEB128(offset_ptr); is_block = true; @@ -129,9 +147,17 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr, case DW_FORM_sdata: Value.sval = data.getSLEB128(offset_ptr); break; - case DW_FORM_strp: - Value.uval = data.getU32(offset_ptr); + case DW_FORM_strp: { + RelocAddrMap::const_iterator AI + = cu->getContext().relocMap().find(*offset_ptr); + if (AI != cu->getContext().relocMap().end()) { + const std::pair &R = AI->second; + Value.uval = R.second; + *offset_ptr += R.first; + } else + Value.uval = data.getU32(offset_ptr); break; + } case DW_FORM_udata: case DW_FORM_ref_udata: Value.uval = data.getULEB128(offset_ptr); @@ -141,12 +167,24 @@ DWARFFormValue::extractValue(DataExtractor data, uint32_t *offset_ptr, // Set the string value to also be the data for inlined cstr form // values only so we can tell the differnence between DW_FORM_string // and DW_FORM_strp form values - Value.data = (uint8_t*)Value.cstr; + Value.data = (const uint8_t*)Value.cstr; break; case DW_FORM_indirect: Form = data.getULEB128(offset_ptr); indirect = true; break; + case DW_FORM_sec_offset: + if (cu->getAddressByteSize() == 4) + Value.uval = data.getU32(offset_ptr); + else + Value.uval = data.getU64(offset_ptr); + break; + case DW_FORM_flag_present: + Value.uval = 1; + break; + case DW_FORM_ref_sig8: + Value.uval = data.getU64(offset_ptr); + break; default: return false; } @@ -179,6 +217,7 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data, switch (form) { // Blocks if inlined data that have a length field and the data bytes // inlined in the .debug_info + case DW_FORM_exprloc: case DW_FORM_block: { uint64_t size = debug_info_data.getULEB128(offset_ptr); *offset_ptr += size; @@ -211,6 +250,10 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data, *offset_ptr += cu->getAddressByteSize(); return true; + // 0 byte values - implied from the form. + case DW_FORM_flag_present: + return true; + // 1 byte values case DW_FORM_data1: case DW_FORM_flag: @@ -234,6 +277,7 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data, // 8 byte values case DW_FORM_data8: case DW_FORM_ref8: + case DW_FORM_ref_sig8: *offset_ptr += 8; return true; @@ -249,6 +293,15 @@ DWARFFormValue::skipValue(uint16_t form, DataExtractor debug_info_data, indirect = true; form = debug_info_data.getULEB128(offset_ptr); break; + + // 4 for DWARF32, 8 for DWARF64. + case DW_FORM_sec_offset: + if (cu->getAddressByteSize() == 4) + *offset_ptr += 4; + else + *offset_ptr += 8; + return true; + default: return false; } @@ -264,22 +317,26 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const { switch (Form) { case DW_FORM_addr: OS << format("0x%016" PRIx64, uvalue); break; + case DW_FORM_flag_present: OS << "true"; break; case DW_FORM_flag: case DW_FORM_data1: OS << format("0x%02x", (uint8_t)uvalue); break; case DW_FORM_data2: OS << format("0x%04x", (uint16_t)uvalue); break; case DW_FORM_data4: OS << format("0x%08x", (uint32_t)uvalue); break; + case DW_FORM_ref_sig8: case DW_FORM_data8: OS << format("0x%016" PRIx64, uvalue); break; case DW_FORM_string: OS << '"'; OS.write_escaped(getAsCString(NULL)); OS << '"'; break; + case DW_FORM_exprloc: case DW_FORM_block: case DW_FORM_block1: case DW_FORM_block2: case DW_FORM_block4: if (uvalue > 0) { switch (Form) { + case DW_FORM_exprloc: case DW_FORM_block: OS << format("<0x%" PRIx64 "> ", uvalue); break; case DW_FORM_block1: OS << format("<0x%2.2x> ", (uint8_t)uvalue); break; case DW_FORM_block2: OS << format("<0x%4.4x> ", (uint16_t)uvalue); break; @@ -342,6 +399,14 @@ DWARFFormValue::dump(raw_ostream &OS, const DWARFCompileUnit *cu) const { case DW_FORM_indirect: OS << "DW_FORM_indirect"; break; + + case DW_FORM_sec_offset: + if (cu->getAddressByteSize() == 4) + OS << format("0x%08x", (uint32_t)uvalue); + else + OS << format("0x%016" PRIx64, uvalue); + break; + default: OS << format("DW_FORM(0x%4.4x)", Form); break; @@ -404,6 +469,7 @@ const uint8_t *DWARFFormValue::BlockData() const { bool DWARFFormValue::isBlockForm(uint16_t form) { switch (form) { + case DW_FORM_exprloc: case DW_FORM_block: case DW_FORM_block1: case DW_FORM_block2: diff --git a/lib/DebugInfo/DWARFFormValue.h b/lib/DebugInfo/DWARFFormValue.h index 22ac011..c5b590d 100644 --- a/lib/DebugInfo/DWARFFormValue.h +++ b/lib/DebugInfo/DWARFFormValue.h @@ -52,7 +52,7 @@ public: bool extractValue(DataExtractor data, uint32_t *offset_ptr, const DWARFCompileUnit *cu); bool isInlinedCStr() const { - return Value.data != NULL && Value.data == (uint8_t*)Value.cstr; + return Value.data != NULL && Value.data == (const uint8_t*)Value.cstr; } const uint8_t *BlockData() const; uint64_t getReference(const DWARFCompileUnit* cu) const; diff --git a/lib/ExecutionEngine/ExecutionEngine.cpp b/lib/ExecutionEngine/ExecutionEngine.cpp index a744d0c..05987f2 100644 --- a/lib/ExecutionEngine/ExecutionEngine.cpp +++ b/lib/ExecutionEngine/ExecutionEngine.cpp @@ -29,7 +29,7 @@ #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/Host.h" #include "llvm/Support/TargetRegistry.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include #include @@ -91,11 +91,11 @@ class GVMemoryBlock : public CallbackVH { public: /// \brief Returns the address the GlobalVariable should be written into. The /// GVMemoryBlock object prefixes that. - static char *Create(const GlobalVariable *GV, const TargetData& TD) { + static char *Create(const GlobalVariable *GV, const DataLayout& TD) { Type *ElTy = GV->getType()->getElementType(); size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy); void *RawMemory = ::operator new( - TargetData::RoundUpAlignment(sizeof(GVMemoryBlock), + DataLayout::RoundUpAlignment(sizeof(GVMemoryBlock), TD.getPreferredAlignment(GV)) + GVSize); new(RawMemory) GVMemoryBlock(GV); @@ -113,7 +113,7 @@ public: } // anonymous namespace char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) { - return GVMemoryBlock::Create(GV, *getTargetData()); + return GVMemoryBlock::Create(GV, *getDataLayout()); } bool ExecutionEngine::removeModule(Module *M) { @@ -267,7 +267,7 @@ public: void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE, const std::vector &InputArgv) { clear(); // Free the old contents. - unsigned PtrSize = EE->getTargetData()->getPointerSize(); + unsigned PtrSize = EE->getDataLayout()->getPointerSize(); Array = new char[(InputArgv.size()+1)*PtrSize]; DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n"); @@ -342,7 +342,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) { #ifndef NDEBUG /// isTargetNullPtr - Return whether the target pointer stored at Loc is null. static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) { - unsigned PtrSize = EE->getTargetData()->getPointerSize(); + unsigned PtrSize = EE->getDataLayout()->getPointerSize(); for (unsigned i = 0; i < PtrSize; ++i) if (*(i + (uint8_t*)Loc)) return false; @@ -501,7 +501,8 @@ ExecutionEngine *EngineBuilder::create(TargetMachine *TM) { return 0; } - if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0) { + if ((WhichEngine & EngineKind::JIT) && ExecutionEngine::JITCtor == 0 && + ExecutionEngine::MCJITCtor == 0) { if (ErrorStr) *ErrorStr = "JIT has not been linked in."; } @@ -643,15 +644,17 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { } case Instruction::PtrToInt: { GenericValue GV = getConstantValue(Op0); - uint32_t PtrWidth = TD->getPointerSizeInBits(); + uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType()); + assert(PtrWidth <= 64 && "Bad pointer width"); GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal)); + uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType()); + GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth); return GV; } case Instruction::IntToPtr: { GenericValue GV = getConstantValue(Op0); - uint32_t PtrWidth = TD->getPointerSizeInBits(); - if (PtrWidth != GV.IntVal.getBitWidth()) - GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth); + uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType()); + GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth); assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width"); GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue())); return GV; @@ -832,7 +835,7 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) { static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes) { assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!"); - uint8_t *Src = (uint8_t *)IntVal.getRawData(); + const uint8_t *Src = (const uint8_t *)IntVal.getRawData(); if (sys::isLittleEndianHost()) { // Little-endian host - the source is ordered from LSB to MSB. Order the @@ -855,7 +858,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, GenericValue *Ptr, Type *Ty) { - const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty); + const unsigned StoreBytes = getDataLayout()->getTypeStoreSize(Ty); switch (Ty->getTypeID()) { case Type::IntegerTyID: @@ -881,7 +884,7 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, dbgs() << "Cannot store value of type " << *Ty << "!\n"; } - if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian()) + if (sys::isLittleEndianHost() != getDataLayout()->isLittleEndian()) // Host and target are different endian - reverse the stored bytes. std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr); } @@ -917,7 +920,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) { void ExecutionEngine::LoadValueFromMemory(GenericValue &Result, GenericValue *Ptr, Type *Ty) { - const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty); + const unsigned LoadBytes = getDataLayout()->getTypeStoreSize(Ty); switch (Ty->getTypeID()) { case Type::IntegerTyID: @@ -958,20 +961,20 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { if (const ConstantVector *CP = dyn_cast(Init)) { unsigned ElementSize = - getTargetData()->getTypeAllocSize(CP->getType()->getElementType()); + getDataLayout()->getTypeAllocSize(CP->getType()->getElementType()); for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize); return; } if (isa(Init)) { - memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType())); + memset(Addr, 0, (size_t)getDataLayout()->getTypeAllocSize(Init->getType())); return; } if (const ConstantArray *CPA = dyn_cast(Init)) { unsigned ElementSize = - getTargetData()->getTypeAllocSize(CPA->getType()->getElementType()); + getDataLayout()->getTypeAllocSize(CPA->getType()->getElementType()); for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize); return; @@ -979,7 +982,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) { if (const ConstantStruct *CPS = dyn_cast(Init)) { const StructLayout *SL = - getTargetData()->getStructLayout(cast(CPS->getType())); + getDataLayout()->getStructLayout(cast(CPS->getType())); for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i) InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i)); return; @@ -1126,7 +1129,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) { InitializeMemory(GV->getInitializer(), GA); Type *ElTy = GV->getType()->getElementType(); - size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy); + size_t GVSize = (size_t)getDataLayout()->getTypeAllocSize(ElTy); NumInitBytes += (unsigned)GVSize; ++NumGlobals; } diff --git a/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/lib/ExecutionEngine/ExecutionEngineBindings.cpp index 75e680a..1e790e7 100644 --- a/lib/ExecutionEngine/ExecutionEngineBindings.cpp +++ b/lib/ExecutionEngine/ExecutionEngineBindings.cpp @@ -239,7 +239,7 @@ void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn) } LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) { - return wrap(unwrap(EE)->getTargetData()); + return wrap(unwrap(EE)->getDataLayout()); } void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global, diff --git a/lib/ExecutionEngine/IntelJITEvents/CMakeLists.txt b/lib/ExecutionEngine/IntelJITEvents/CMakeLists.txt index 7d67d0d..3483088 100644 --- a/lib/ExecutionEngine/IntelJITEvents/CMakeLists.txt +++ b/lib/ExecutionEngine/IntelJITEvents/CMakeLists.txt @@ -1,11 +1,6 @@ - -include_directories( ${LLVM_INTEL_JITEVENTS_INCDIR} ${CMAKE_CURRENT_SOURCE_DIR}/.. ) - -set(system_libs - ${system_libs} - jitprofiling - ) +include_directories( ${CMAKE_CURRENT_SOURCE_DIR}/.. ) add_llvm_library(LLVMIntelJITEvents IntelJITEventListener.cpp + jitprofiling.c ) diff --git a/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp b/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp index c11c17e..4cb0270 100644 --- a/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp +++ b/lib/ExecutionEngine/IntelJITEvents/IntelJITEventListener.cpp @@ -22,12 +22,12 @@ #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/OwningPtr.h" #include "llvm/CodeGen/MachineFunction.h" -#include "llvm/ExecutionEngine/IntelJITEventsWrapper.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/Errno.h" #include "llvm/Support/ValueHandle.h" #include "EventListenerCommon.h" +#include "IntelJITEventsWrapper.h" using namespace llvm; using namespace llvm::jitprofiling; @@ -37,13 +37,13 @@ namespace { class IntelJITEventListener : public JITEventListener { typedef DenseMap MethodIDMap; - IntelJITEventsWrapper& Wrapper; + OwningPtr Wrapper; MethodIDMap MethodIDs; FilenameCache Filenames; public: - IntelJITEventListener(IntelJITEventsWrapper& libraryWrapper) - : Wrapper(libraryWrapper) { + IntelJITEventListener(IntelJITEventsWrapper* libraryWrapper) { + Wrapper.reset(libraryWrapper); } ~IntelJITEventListener() { @@ -54,6 +54,10 @@ public: const EmittedFunctionDetails &Details); virtual void NotifyFreeingMachineCode(void *OldPtr); + + virtual void NotifyObjectEmitted(const ObjectImage &Obj); + + virtual void NotifyFreeingObject(const ObjectImage &Obj); }; static LineNumberInfo LineStartToIntelJITFormat( @@ -94,7 +98,7 @@ static iJIT_Method_Load FunctionDescToIntelJITFormat( void IntelJITEventListener::NotifyFunctionEmitted( const Function &F, void *FnStart, size_t FnSize, const EmittedFunctionDetails &Details) { - iJIT_Method_Load FunctionMessage = FunctionDescToIntelJITFormat(Wrapper, + iJIT_Method_Load FunctionMessage = FunctionDescToIntelJITFormat(*Wrapper, F.getName().data(), reinterpret_cast(FnStart), FnSize); @@ -151,32 +155,36 @@ void IntelJITEventListener::NotifyFunctionEmitted( FunctionMessage.line_number_table = 0; } - Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, - &FunctionMessage); + Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, + &FunctionMessage); MethodIDs[FnStart] = FunctionMessage.method_id; } void IntelJITEventListener::NotifyFreeingMachineCode(void *FnStart) { MethodIDMap::iterator I = MethodIDs.find(FnStart); if (I != MethodIDs.end()) { - Wrapper.iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START, &I->second); + Wrapper->iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_UNLOAD_START, &I->second); MethodIDs.erase(I); } } +void IntelJITEventListener::NotifyObjectEmitted(const ObjectImage &Obj) { +} + +void IntelJITEventListener::NotifyFreeingObject(const ObjectImage &Obj) { +} + } // anonymous namespace. namespace llvm { JITEventListener *JITEventListener::createIntelJITEventListener() { - static OwningPtr JITProfilingWrapper( - new IntelJITEventsWrapper); - return new IntelJITEventListener(*JITProfilingWrapper); + return new IntelJITEventListener(new IntelJITEventsWrapper); } // for testing JITEventListener *JITEventListener::createIntelJITEventListener( IntelJITEventsWrapper* TestImpl) { - return new IntelJITEventListener(*TestImpl); + return new IntelJITEventListener(TestImpl); } } // namespace llvm diff --git a/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h b/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h new file mode 100644 index 0000000..7ab08e1 --- /dev/null +++ b/lib/ExecutionEngine/IntelJITEvents/IntelJITEventsWrapper.h @@ -0,0 +1,102 @@ +//===-- IntelJITEventsWrapper.h - Intel JIT Events API Wrapper --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a wrapper for the Intel JIT Events API. It allows for the +// implementation of the jitprofiling library to be swapped with an alternative +// implementation (for testing). To include this file, you must have the +// jitprofiling.h header available; it is available in Intel(R) VTune(TM) +// Amplifier XE 2011. +// +//===----------------------------------------------------------------------===// + +#ifndef INTEL_JIT_EVENTS_WRAPPER_H +#define INTEL_JIT_EVENTS_WRAPPER_H + +#include "jitprofiling.h" + +namespace llvm { + +class IntelJITEventsWrapper { + // Function pointer types for testing implementation of Intel jitprofiling + // library + typedef int (*NotifyEventPtr)(iJIT_JVM_EVENT, void*); + typedef void (*RegisterCallbackExPtr)(void *, iJIT_ModeChangedEx ); + typedef iJIT_IsProfilingActiveFlags (*IsProfilingActivePtr)(void); + typedef void (*FinalizeThreadPtr)(void); + typedef void (*FinalizeProcessPtr)(void); + typedef unsigned int (*GetNewMethodIDPtr)(void); + + NotifyEventPtr NotifyEventFunc; + RegisterCallbackExPtr RegisterCallbackExFunc; + IsProfilingActivePtr IsProfilingActiveFunc; + FinalizeThreadPtr FinalizeThreadFunc; + FinalizeProcessPtr FinalizeProcessFunc; + GetNewMethodIDPtr GetNewMethodIDFunc; + +public: + bool isAmplifierRunning() { + return iJIT_IsProfilingActive() == iJIT_SAMPLING_ON; + } + + IntelJITEventsWrapper() + : NotifyEventFunc(::iJIT_NotifyEvent), + RegisterCallbackExFunc(::iJIT_RegisterCallbackEx), + IsProfilingActiveFunc(::iJIT_IsProfilingActive), + FinalizeThreadFunc(::FinalizeThread), + FinalizeProcessFunc(::FinalizeProcess), + GetNewMethodIDFunc(::iJIT_GetNewMethodID) { + } + + IntelJITEventsWrapper(NotifyEventPtr NotifyEventImpl, + RegisterCallbackExPtr RegisterCallbackExImpl, + IsProfilingActivePtr IsProfilingActiveImpl, + FinalizeThreadPtr FinalizeThreadImpl, + FinalizeProcessPtr FinalizeProcessImpl, + GetNewMethodIDPtr GetNewMethodIDImpl) + : NotifyEventFunc(NotifyEventImpl), + RegisterCallbackExFunc(RegisterCallbackExImpl), + IsProfilingActiveFunc(IsProfilingActiveImpl), + FinalizeThreadFunc(FinalizeThreadImpl), + FinalizeProcessFunc(FinalizeProcessImpl), + GetNewMethodIDFunc(GetNewMethodIDImpl) { + } + + // Sends an event anncouncing that a function has been emitted + // return values are event-specific. See Intel documentation for details. + int iJIT_NotifyEvent(iJIT_JVM_EVENT EventType, void *EventSpecificData) { + if (!NotifyEventFunc) + return -1; + return NotifyEventFunc(EventType, EventSpecificData); + } + + // Registers a callback function to receive notice of profiling state changes + void iJIT_RegisterCallbackEx(void *UserData, + iJIT_ModeChangedEx NewModeCallBackFuncEx) { + if (RegisterCallbackExFunc) + RegisterCallbackExFunc(UserData, NewModeCallBackFuncEx); + } + + // Returns the current profiler mode + iJIT_IsProfilingActiveFlags iJIT_IsProfilingActive(void) { + if (!IsProfilingActiveFunc) + return iJIT_NOTHING_RUNNING; + return IsProfilingActiveFunc(); + } + + // Generates a locally unique method ID for use in code registration + unsigned int iJIT_GetNewMethodID(void) { + if (!GetNewMethodIDFunc) + return -1; + return GetNewMethodIDFunc(); + } +}; + +} //namespace llvm + +#endif //INTEL_JIT_EVENTS_WRAPPER_H diff --git a/lib/ExecutionEngine/IntelJITEvents/Makefile b/lib/ExecutionEngine/IntelJITEvents/Makefile index ba75ac6..dcf3126 100644 --- a/lib/ExecutionEngine/IntelJITEvents/Makefile +++ b/lib/ExecutionEngine/IntelJITEvents/Makefile @@ -11,7 +11,8 @@ LIBRARYNAME = LLVMIntelJITEvents include $(LEVEL)/Makefile.config -SOURCES := IntelJITEventListener.cpp -CPPFLAGS += -I$(INTEL_JITEVENTS_INCDIR) -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. +SOURCES := IntelJITEventListener.cpp \ + jitprofiling.c +CPPFLAGS += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/.. include $(LLVM_SRC_ROOT)/Makefile.rules diff --git a/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h b/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h new file mode 100644 index 0000000..1f029fb --- /dev/null +++ b/lib/ExecutionEngine/IntelJITEvents/ittnotify_config.h @@ -0,0 +1,454 @@ +/*===-- ittnotify_config.h - JIT Profiling API internal config-----*- C -*-===* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time) + * Profiling API internal config. + * + * NOTE: This file comes in a style different from the rest of LLVM + * source base since this is a piece of code shared from Intel(R) + * products. Please do not reformat / re-style this code to make + * subsequent merges and contributions from the original source base eaiser. + * + *===----------------------------------------------------------------------===*/ +#ifndef _ITTNOTIFY_CONFIG_H_ +#define _ITTNOTIFY_CONFIG_H_ + +/** @cond exclude_from_documentation */ +#ifndef ITT_OS_WIN +# define ITT_OS_WIN 1 +#endif /* ITT_OS_WIN */ + +#ifndef ITT_OS_LINUX +# define ITT_OS_LINUX 2 +#endif /* ITT_OS_LINUX */ + +#ifndef ITT_OS_MAC +# define ITT_OS_MAC 3 +#endif /* ITT_OS_MAC */ + +#ifndef ITT_OS +# if defined WIN32 || defined _WIN32 +# define ITT_OS ITT_OS_WIN +# elif defined( __APPLE__ ) && defined( __MACH__ ) +# define ITT_OS ITT_OS_MAC +# else +# define ITT_OS ITT_OS_LINUX +# endif +#endif /* ITT_OS */ + +#ifndef ITT_PLATFORM_WIN +# define ITT_PLATFORM_WIN 1 +#endif /* ITT_PLATFORM_WIN */ + +#ifndef ITT_PLATFORM_POSIX +# define ITT_PLATFORM_POSIX 2 +#endif /* ITT_PLATFORM_POSIX */ + +#ifndef ITT_PLATFORM +# if ITT_OS==ITT_OS_WIN +# define ITT_PLATFORM ITT_PLATFORM_WIN +# else +# define ITT_PLATFORM ITT_PLATFORM_POSIX +# endif /* _WIN32 */ +#endif /* ITT_PLATFORM */ + +#if defined(_UNICODE) && !defined(UNICODE) +#define UNICODE +#endif + +#include +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#include +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#include +#if defined(UNICODE) || defined(_UNICODE) +#include +#endif /* UNICODE || _UNICODE */ +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#ifndef CDECL +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# define CDECL __cdecl +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__ +# define CDECL /* not actual on x86_64 platform */ +# else /* _M_X64 || _M_AMD64 || __x86_64__ */ +# define CDECL __attribute__ ((cdecl)) +# endif /* _M_X64 || _M_AMD64 || __x86_64__ */ +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* CDECL */ + +#ifndef STDCALL +# if ITT_PLATFORM==ITT_PLATFORM_WIN +# define STDCALL __stdcall +# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__ +# define STDCALL /* not supported on x86_64 platform */ +# else /* _M_X64 || _M_AMD64 || __x86_64__ */ +# define STDCALL __attribute__ ((stdcall)) +# endif /* _M_X64 || _M_AMD64 || __x86_64__ */ +# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#endif /* STDCALL */ + +#define ITTAPI CDECL +#define LIBITTAPI CDECL + +/* TODO: Temporary for compatibility! */ +#define ITTAPI_CALL CDECL +#define LIBITTAPI_CALL CDECL + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +/* use __forceinline (VC++ specific) */ +#define ITT_INLINE __forceinline +#define ITT_INLINE_ATTRIBUTE /* nothing */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/* + * Generally, functions are not inlined unless optimization is specified. + * For functions declared inline, this attribute inlines the function even + * if no optimization level was specified. + */ +#ifdef __STRICT_ANSI__ +#define ITT_INLINE static +#else /* __STRICT_ANSI__ */ +#define ITT_INLINE static inline +#endif /* __STRICT_ANSI__ */ +#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline)) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +/** @endcond */ + +#ifndef ITT_ARCH_IA32 +# define ITT_ARCH_IA32 1 +#endif /* ITT_ARCH_IA32 */ + +#ifndef ITT_ARCH_IA32E +# define ITT_ARCH_IA32E 2 +#endif /* ITT_ARCH_IA32E */ + +#ifndef ITT_ARCH_IA64 +# define ITT_ARCH_IA64 3 +#endif /* ITT_ARCH_IA64 */ + +#ifndef ITT_ARCH +# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__ +# define ITT_ARCH ITT_ARCH_IA32E +# elif defined _M_IA64 || defined __ia64 +# define ITT_ARCH ITT_ARCH_IA64 +# else +# define ITT_ARCH ITT_ARCH_IA32 +# endif +#endif + +#ifdef __cplusplus +# define ITT_EXTERN_C extern "C" +#else +# define ITT_EXTERN_C /* nothing */ +#endif /* __cplusplus */ + +#define ITT_TO_STR_AUX(x) #x +#define ITT_TO_STR(x) ITT_TO_STR_AUX(x) + +#define __ITT_BUILD_ASSERT(expr, suffix) do { \ + static char __itt_build_check_##suffix[(expr) ? 1 : -1]; \ + __itt_build_check_##suffix[0] = 0; \ +} while(0) +#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix) +#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__) + +#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 } + +/* Replace with snapshot date YYYYMMDD for promotion build. */ +#define API_VERSION_BUILD 20111111 + +#ifndef API_VERSION_NUM +#define API_VERSION_NUM 0.0.0 +#endif /* API_VERSION_NUM */ + +#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) \ + " (" ITT_TO_STR(API_VERSION_BUILD) ")" + +/* OS communication functions */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#include +typedef HMODULE lib_t; +typedef DWORD TIDT; +typedef CRITICAL_SECTION mutex_t; +#define MUTEX_INITIALIZER { 0 } +#define strong_alias(name, aliasname) /* empty for Windows */ +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#include +#if defined(UNICODE) || defined(_UNICODE) +#include +#endif /* UNICODE */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */ +#endif /* _GNU_SOURCE */ +#include +typedef void* lib_t; +typedef pthread_t TIDT; +typedef pthread_mutex_t mutex_t; +#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER +#define _strong_alias(name, aliasname) \ + extern __typeof (name) aliasname __attribute__ ((alias (#name))); +#define strong_alias(name, aliasname) _strong_alias(name, aliasname) +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define __itt_get_proc(lib, name) GetProcAddress(lib, name) +#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex) +#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex) +#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex) +#define __itt_load_lib(name) LoadLibraryA(name) +#define __itt_unload_lib(handle) FreeLibrary(handle) +#define __itt_system_error() (int)GetLastError() +#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2) +#define __itt_fstrlen(s) lstrlenA(s) +#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l) +#define __itt_fstrdup(s) _strdup(s) +#define __itt_thread_id() GetCurrentThreadId() +#define __itt_thread_yield() SwitchToThread() +#ifndef ITT_SIMPLE_INIT +ITT_INLINE long +__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE; +ITT_INLINE long __itt_interlocked_increment(volatile long* ptr) +{ + return InterlockedIncrement(ptr); +} +#endif /* ITT_SIMPLE_INIT */ +#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ +#define __itt_get_proc(lib, name) dlsym(lib, name) +#define __itt_mutex_init(mutex) {\ + pthread_mutexattr_t mutex_attr; \ + int error_code = pthread_mutexattr_init(&mutex_attr); \ + if (error_code) \ + __itt_report_error(__itt_error_system, "pthread_mutexattr_init", \ + error_code); \ + error_code = pthread_mutexattr_settype(&mutex_attr, \ + PTHREAD_MUTEX_RECURSIVE); \ + if (error_code) \ + __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", \ + error_code); \ + error_code = pthread_mutex_init(mutex, &mutex_attr); \ + if (error_code) \ + __itt_report_error(__itt_error_system, "pthread_mutex_init", \ + error_code); \ + error_code = pthread_mutexattr_destroy(&mutex_attr); \ + if (error_code) \ + __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", \ + error_code); \ +} +#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex) +#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex) +#define __itt_load_lib(name) dlopen(name, RTLD_LAZY) +#define __itt_unload_lib(handle) dlclose(handle) +#define __itt_system_error() errno +#define __itt_fstrcmp(s1, s2) strcmp(s1, s2) +#define __itt_fstrlen(s) strlen(s) +#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l) +#define __itt_fstrdup(s) strdup(s) +#define __itt_thread_id() pthread_self() +#define __itt_thread_yield() sched_yield() +#if ITT_ARCH==ITT_ARCH_IA64 +#ifdef __INTEL_COMPILER +#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val) +#else /* __INTEL_COMPILER */ +/* TODO: Add Support for not Intel compilers for IA64 */ +#endif /* __INTEL_COMPILER */ +#else /* ITT_ARCH!=ITT_ARCH_IA64 */ +ITT_INLINE long +__TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE; +ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend) +{ + long result; + __asm__ __volatile__("lock\nxadd %0,%1" + : "=r"(result),"=m"(*(long*)ptr) + : "0"(addend), "m"(*(long*)ptr) + : "memory"); + return result; +} +#endif /* ITT_ARCH==ITT_ARCH_IA64 */ +#ifndef ITT_SIMPLE_INIT +ITT_INLINE long +__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE; +ITT_INLINE long __itt_interlocked_increment(volatile long* ptr) +{ + return __TBB_machine_fetchadd4(ptr, 1) + 1L; +} +#endif /* ITT_SIMPLE_INIT */ +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +typedef enum { + __itt_collection_normal = 0, + __itt_collection_paused = 1 +} __itt_collection_state; + +typedef enum { + __itt_thread_normal = 0, + __itt_thread_ignored = 1 +} __itt_thread_state; + +#pragma pack(push, 8) + +typedef struct ___itt_thread_info +{ + const char* nameA; /*!< Copy of original name in ASCII. */ +#if defined(UNICODE) || defined(_UNICODE) + const wchar_t* nameW; /*!< Copy of original name in UNICODE. */ +#else /* UNICODE || _UNICODE */ + void* nameW; +#endif /* UNICODE || _UNICODE */ + TIDT tid; + __itt_thread_state state; /*!< Thread state (paused or normal) */ + int extra1; /*!< Reserved to the runtime */ + void* extra2; /*!< Reserved to the runtime */ + struct ___itt_thread_info* next; +} __itt_thread_info; + +#include "ittnotify_types.h" /* For __itt_group_id definition */ + +typedef struct ___itt_api_info_20101001 +{ + const char* name; + void** func_ptr; + void* init_func; + __itt_group_id group; +} __itt_api_info_20101001; + +typedef struct ___itt_api_info +{ + const char* name; + void** func_ptr; + void* init_func; + void* null_func; + __itt_group_id group; +} __itt_api_info; + +struct ___itt_domain; +struct ___itt_string_handle; + +typedef struct ___itt_global +{ + unsigned char magic[8]; + unsigned long version_major; + unsigned long version_minor; + unsigned long version_build; + volatile long api_initialized; + volatile long mutex_initialized; + volatile long atomic_counter; + mutex_t mutex; + lib_t lib; + void* error_handler; + const char** dll_path_ptr; + __itt_api_info* api_list_ptr; + struct ___itt_global* next; + /* Joinable structures below */ + __itt_thread_info* thread_list; + struct ___itt_domain* domain_list; + struct ___itt_string_handle* string_list; + __itt_collection_state state; +} __itt_global; + +#pragma pack(pop) + +#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \ + h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \ + if (h != NULL) { \ + h->tid = t; \ + h->nameA = NULL; \ + h->nameW = n ? _wcsdup(n) : NULL; \ + h->state = s; \ + h->extra1 = 0; /* reserved */ \ + h->extra2 = NULL; /* reserved */ \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->thread_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \ + h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \ + if (h != NULL) { \ + h->tid = t; \ + h->nameA = n ? __itt_fstrdup(n) : NULL; \ + h->nameW = NULL; \ + h->state = s; \ + h->extra1 = 0; /* reserved */ \ + h->extra2 = NULL; /* reserved */ \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->thread_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \ + h = (__itt_domain*)malloc(sizeof(__itt_domain)); \ + if (h != NULL) { \ + h->flags = 0; /* domain is disabled by default */ \ + h->nameA = NULL; \ + h->nameW = name ? _wcsdup(name) : NULL; \ + h->extra1 = 0; /* reserved */ \ + h->extra2 = NULL; /* reserved */ \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->domain_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \ + h = (__itt_domain*)malloc(sizeof(__itt_domain)); \ + if (h != NULL) { \ + h->flags = 0; /* domain is disabled by default */ \ + h->nameA = name ? __itt_fstrdup(name) : NULL; \ + h->nameW = NULL; \ + h->extra1 = 0; /* reserved */ \ + h->extra2 = NULL; /* reserved */ \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->domain_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \ + h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \ + if (h != NULL) { \ + h->strA = NULL; \ + h->strW = name ? _wcsdup(name) : NULL; \ + h->extra1 = 0; /* reserved */ \ + h->extra2 = NULL; /* reserved */ \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->string_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \ + h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \ + if (h != NULL) { \ + h->strA = name ? __itt_fstrdup(name) : NULL; \ + h->strW = NULL; \ + h->extra1 = 0; /* reserved */ \ + h->extra2 = NULL; /* reserved */ \ + h->next = NULL; \ + if (h_tail == NULL) \ + (gptr)->string_list = h; \ + else \ + h_tail->next = h; \ + } \ +} + +#endif /* _ITTNOTIFY_CONFIG_H_ */ diff --git a/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h b/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h new file mode 100644 index 0000000..5df752f --- /dev/null +++ b/lib/ExecutionEngine/IntelJITEvents/ittnotify_types.h @@ -0,0 +1,70 @@ +/*===-- ittnotify_types.h - JIT Profiling API internal types--------*- C -*-===* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * NOTE: This file comes in a style different from the rest of LLVM + * source base since this is a piece of code shared from Intel(R) + * products. Please do not reformat / re-style this code to make + * subsequent merges and contributions from the original source base eaiser. + * + *===----------------------------------------------------------------------===*/ +#ifndef _ITTNOTIFY_TYPES_H_ +#define _ITTNOTIFY_TYPES_H_ + +typedef enum ___itt_group_id +{ + __itt_group_none = 0, + __itt_group_legacy = 1<<0, + __itt_group_control = 1<<1, + __itt_group_thread = 1<<2, + __itt_group_mark = 1<<3, + __itt_group_sync = 1<<4, + __itt_group_fsync = 1<<5, + __itt_group_jit = 1<<6, + __itt_group_model = 1<<7, + __itt_group_splitter_min = 1<<7, + __itt_group_counter = 1<<8, + __itt_group_frame = 1<<9, + __itt_group_stitch = 1<<10, + __itt_group_heap = 1<<11, + __itt_group_splitter_max = 1<<12, + __itt_group_structure = 1<<12, + __itt_group_suppress = 1<<13, + __itt_group_all = -1 +} __itt_group_id; + +#pragma pack(push, 8) + +typedef struct ___itt_group_list +{ + __itt_group_id id; + const char* name; +} __itt_group_list; + +#pragma pack(pop) + +#define ITT_GROUP_LIST(varname) \ + static __itt_group_list varname[] = { \ + { __itt_group_all, "all" }, \ + { __itt_group_control, "control" }, \ + { __itt_group_thread, "thread" }, \ + { __itt_group_mark, "mark" }, \ + { __itt_group_sync, "sync" }, \ + { __itt_group_fsync, "fsync" }, \ + { __itt_group_jit, "jit" }, \ + { __itt_group_model, "model" }, \ + { __itt_group_counter, "counter" }, \ + { __itt_group_frame, "frame" }, \ + { __itt_group_stitch, "stitch" }, \ + { __itt_group_heap, "heap" }, \ + { __itt_group_structure, "structure" }, \ + { __itt_group_suppress, "suppress" }, \ + { __itt_group_none, NULL } \ + } + +#endif /* _ITTNOTIFY_TYPES_H_ */ diff --git a/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c new file mode 100644 index 0000000..7b507de --- /dev/null +++ b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.c @@ -0,0 +1,481 @@ +/*===-- jitprofiling.c - JIT (Just-In-Time) Profiling API----------*- C -*-===* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time) + * Profiling API implementation. + * + * NOTE: This file comes in a style different from the rest of LLVM + * source base since this is a piece of code shared from Intel(R) + * products. Please do not reformat / re-style this code to make + * subsequent merges and contributions from the original source base eaiser. + * + *===----------------------------------------------------------------------===*/ +#include "ittnotify_config.h" + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#include +#pragma optimize("", off) +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#include +#include +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#include +#include + +#include "jitprofiling.h" + +static const char rcsid[] = "\n@(#) $Revision: 243501 $\n"; + +#define DLL_ENVIRONMENT_VAR "VS_PROFILER" + +#ifndef NEW_DLL_ENVIRONMENT_VAR +#if ITT_ARCH==ITT_ARCH_IA32 +#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32" +#else +#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64" +#endif +#endif /* NEW_DLL_ENVIRONMENT_VAR */ + +#if ITT_PLATFORM==ITT_PLATFORM_WIN +#define DEFAULT_DLLNAME "JitPI.dll" +HINSTANCE m_libHandle = NULL; +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +#define DEFAULT_DLLNAME "libJitPI.so" +void* m_libHandle = NULL; +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +/* default location of JIT profiling agent on Android */ +#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so" + +/* the function pointers */ +typedef unsigned int(*TPInitialize)(void); +static TPInitialize FUNC_Initialize=NULL; + +typedef unsigned int(*TPNotify)(unsigned int, void*); +static TPNotify FUNC_NotifyEvent=NULL; + +static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING; + +/* end collector dll part. */ + +/* loadiJIT_Funcs() : this function is called just in the beginning + * and is responsible to load the functions from BistroJavaCollector.dll + * result: + * on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1 + * on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0 + */ +static int loadiJIT_Funcs(void); + +/* global representing whether the BistroJavaCollector can't be loaded */ +static int iJIT_DLL_is_missing = 0; + +/* Virtual stack - the struct is used as a virtual stack for each thread. + * Every thread initializes with a stack of size INIT_TOP_STACK. + * Every method entry decreases from the current stack point, + * and when a thread stack reaches its top of stack (return from the global + * function), the top of stack and the current stack increase. Notice that + * when returning from a function the stack pointer is the address of + * the function return. +*/ +#if ITT_PLATFORM==ITT_PLATFORM_WIN +static DWORD threadLocalStorageHandle = 0; +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0; +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + +#define INIT_TOP_Stack 10000 + +typedef struct +{ + unsigned int TopStack; + unsigned int CurrentStack; +} ThreadStack, *pThreadStack; + +/* end of virtual stack. */ + +/* + * The function for reporting virtual-machine related events to VTune. + * Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill + * in the stack_id field in the iJIT_Method_NIDS structure, as VTune fills it. + * The return value in iJVM_EVENT_TYPE_ENTER_NIDS && + * iJVM_EVENT_TYPE_LEAVE_NIDS events will be 0 in case of failure. + * in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event + * it will be -1 if EventSpecificData == 0 otherwise it will be 0. +*/ + +ITT_EXTERN_C int JITAPI +iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData) +{ + int ReturnValue; + + /* + * This section is for debugging outside of VTune. + * It creates the environment variables that indicates call graph mode. + * If running outside of VTune remove the remark. + * + * + * static int firstTime = 1; + * char DoCallGraph[12] = "DoCallGraph"; + * if (firstTime) + * { + * firstTime = 0; + * SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph); + * } + * + * end of section. + */ + + /* initialization part - the functions have not been loaded yet. This part + * will load the functions, and check if we are in Call Graph mode. + * (for special treatment). + */ + if (!FUNC_NotifyEvent) + { + if (iJIT_DLL_is_missing) + return 0; + + /* load the Function from the DLL */ + if (!loadiJIT_Funcs()) + return 0; + + /* Call Graph initialization. */ + } + + /* If the event is method entry/exit, check that in the current mode + * VTune is allowed to receive it + */ + if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS || + event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) && + (executionMode != iJIT_CALLGRAPH_ON)) + { + return 0; + } + /* This section is performed when method enter event occurs. + * It updates the virtual stack, or creates it if this is the first + * method entry in the thread. The stack pointer is decreased. + */ + if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + pThreadStack threadStack = + (pThreadStack)TlsGetValue (threadLocalStorageHandle); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pThreadStack threadStack = + (pThreadStack)pthread_getspecific(threadLocalStorageHandle); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + + /* check for use of reserved method IDs */ + if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 ) + return 0; + + if (!threadStack) + { + /* initialize the stack. */ + threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1); + threadStack->TopStack = INIT_TOP_Stack; + threadStack->CurrentStack = INIT_TOP_Stack; +#if ITT_PLATFORM==ITT_PLATFORM_WIN + TlsSetValue(threadLocalStorageHandle,(void*)threadStack); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pthread_setspecific(threadLocalStorageHandle,(void*)threadStack); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + } + + /* decrease the stack. */ + ((piJIT_Method_NIDS) EventSpecificData)->stack_id = + (threadStack->CurrentStack)--; + } + + /* This section is performed when method leave event occurs + * It updates the virtual stack. + * Increases the stack pointer. + * If the stack pointer reached the top (left the global function) + * increase the pointer and the top pointer. + */ + if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + pThreadStack threadStack = + (pThreadStack)TlsGetValue (threadLocalStorageHandle); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pThreadStack threadStack = + (pThreadStack)pthread_getspecific(threadLocalStorageHandle); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + + /* check for use of reserved method IDs */ + if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 ) + return 0; + + if (!threadStack) + { + /* Error: first report in this thread is method exit */ + exit (1); + } + + ((piJIT_Method_NIDS) EventSpecificData)->stack_id = + ++(threadStack->CurrentStack) + 1; + + if (((piJIT_Method_NIDS) EventSpecificData)->stack_id + > threadStack->TopStack) + ((piJIT_Method_NIDS) EventSpecificData)->stack_id = + (unsigned int)-1; + } + + if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED) + { + /* check for use of reserved method IDs */ + if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 ) + return 0; + } + + ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData); + + return ReturnValue; +} + +/* The new mode call back routine */ +ITT_EXTERN_C void JITAPI +iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx + NewModeCallBackFuncEx) +{ + /* is it already missing... or the load of functions from the DLL failed */ + if (iJIT_DLL_is_missing || !loadiJIT_Funcs()) + { + /* then do not bother with notifications */ + NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS); + /* Error: could not load JIT functions. */ + return; + } + /* nothing to do with the callback */ +} + +/* + * This function allows the user to query in which mode, if at all, + *VTune is running + */ +ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive() +{ + if (!iJIT_DLL_is_missing) + { + loadiJIT_Funcs(); + } + + return executionMode; +} + +/* this function loads the collector dll (BistroJavaCollector) + * and the relevant functions. + * on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1 + * on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0 + */ +static int loadiJIT_Funcs() +{ + static int bDllWasLoaded = 0; + char *dllName = (char*)rcsid; /* !! Just to avoid unused code elimination */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN + DWORD dNameLength = 0; +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + + if(bDllWasLoaded) + { + /* dll was already loaded, no need to do it for the second time */ + return 1; + } + + /* Assumes that the DLL will not be found */ + iJIT_DLL_is_missing = 1; + FUNC_NotifyEvent = NULL; + + if (m_libHandle) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + FreeLibrary(m_libHandle); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + dlclose(m_libHandle); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + m_libHandle = NULL; + } + + /* Try to get the dll name from the environment */ +#if ITT_PLATFORM==ITT_PLATFORM_WIN + dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0); + if (dNameLength) + { + DWORD envret = 0; + dllName = (char*)malloc(sizeof(char) * (dNameLength + 1)); + envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, + dllName, dNameLength); + if (envret) + { + /* Try to load the dll from the PATH... */ + m_libHandle = LoadLibraryExA(dllName, + NULL, LOAD_WITH_ALTERED_SEARCH_PATH); + } + free(dllName); + } else { + /* Try to use old VS_PROFILER variable */ + dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0); + if (dNameLength) + { + DWORD envret = 0; + dllName = (char*)malloc(sizeof(char) * (dNameLength + 1)); + envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, + dllName, dNameLength); + if (envret) + { + /* Try to load the dll from the PATH... */ + m_libHandle = LoadLibraryA(dllName); + } + free(dllName); + } + } +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + dllName = getenv(NEW_DLL_ENVIRONMENT_VAR); + if (!dllName) + dllName = getenv(DLL_ENVIRONMENT_VAR); +#ifdef ANDROID + if (!dllName) + dllName = ANDROID_JIT_AGENT_PATH; +#endif + if (dllName) + { + /* Try to load the dll from the PATH... */ + m_libHandle = dlopen(dllName, RTLD_LAZY); + } +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + + if (!m_libHandle) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + m_libHandle = LoadLibraryA(DEFAULT_DLLNAME); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + } + + /* if the dll wasn't loaded - exit. */ + if (!m_libHandle) + { + iJIT_DLL_is_missing = 1; /* don't try to initialize + * JIT agent the second time + */ + return 0; + } + +#if ITT_PLATFORM==ITT_PLATFORM_WIN + FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent"); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + FUNC_NotifyEvent = (TPNotify)dlsym(m_libHandle, "NotifyEvent"); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + if (!FUNC_NotifyEvent) + { + FUNC_Initialize = NULL; + return 0; + } + +#if ITT_PLATFORM==ITT_PLATFORM_WIN + FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize"); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + FUNC_Initialize = (TPInitialize)dlsym(m_libHandle, "Initialize"); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + if (!FUNC_Initialize) + { + FUNC_NotifyEvent = NULL; + return 0; + } + + executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize(); + + bDllWasLoaded = 1; + iJIT_DLL_is_missing = 0; /* DLL is ok. */ + + /* + * Call Graph mode: init the thread local storage + * (need to store the virtual stack there). + */ + if ( executionMode == iJIT_CALLGRAPH_ON ) + { + /* Allocate a thread local storage slot for the thread "stack" */ + if (!threadLocalStorageHandle) +#if ITT_PLATFORM==ITT_PLATFORM_WIN + threadLocalStorageHandle = TlsAlloc(); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pthread_key_create(&threadLocalStorageHandle, NULL); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + } + + return 1; +} + +/* + * This function should be called by the user whenever a thread ends, + * to free the thread "virtual stack" storage + */ +ITT_EXTERN_C void JITAPI FinalizeThread() +{ + if (threadLocalStorageHandle) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + pThreadStack threadStack = + (pThreadStack)TlsGetValue (threadLocalStorageHandle); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pThreadStack threadStack = + (pThreadStack)pthread_getspecific(threadLocalStorageHandle); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + if (threadStack) + { + free (threadStack); + threadStack = NULL; +#if ITT_PLATFORM==ITT_PLATFORM_WIN + TlsSetValue (threadLocalStorageHandle, threadStack); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pthread_setspecific(threadLocalStorageHandle, threadStack); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + } + } +} + +/* + * This function should be called by the user when the process ends, + * to free the local storage index +*/ +ITT_EXTERN_C void JITAPI FinalizeProcess() +{ + if (m_libHandle) + { +#if ITT_PLATFORM==ITT_PLATFORM_WIN + FreeLibrary(m_libHandle); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + dlclose(m_libHandle); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + m_libHandle = NULL; + } + + if (threadLocalStorageHandle) +#if ITT_PLATFORM==ITT_PLATFORM_WIN + TlsFree (threadLocalStorageHandle); +#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ + pthread_key_delete(threadLocalStorageHandle); +#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ +} + +/* + * This function should be called by the user for any method once. + * The function will return a unique method ID, the user should maintain + * the ID for each method + */ +ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID() +{ + static unsigned int methodID = 0x100000; + + if (methodID == 0) + return 0; /* ERROR : this is not a valid value */ + + return methodID++; +} diff --git a/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h new file mode 100644 index 0000000..f08e287 --- /dev/null +++ b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h @@ -0,0 +1,259 @@ +/*===-- jitprofiling.h - JIT Profiling API-------------------------*- C -*-===* + * + * The LLVM Compiler Infrastructure + * + * This file is distributed under the University of Illinois Open Source + * License. See LICENSE.TXT for details. + * + *===----------------------------------------------------------------------===* + * + * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time) + * Profiling API declaration. + * + * NOTE: This file comes in a style different from the rest of LLVM + * source base since this is a piece of code shared from Intel(R) + * products. Please do not reformat / re-style this code to make + * subsequent merges and contributions from the original source base eaiser. + * + *===----------------------------------------------------------------------===*/ +#ifndef __JITPROFILING_H__ +#define __JITPROFILING_H__ + +/* + * Various constants used by functions + */ + +/* event notification */ +typedef enum iJIT_jvm_event +{ + + /* shutdown */ + + /* + * Program exiting EventSpecificData NA + */ + iJVM_EVENT_TYPE_SHUTDOWN = 2, + + /* JIT profiling */ + + /* + * issued after method code jitted into memory but before code is executed + * EventSpecificData is an iJIT_Method_Load + */ + iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13, + + /* issued before unload. Method code will no longer be executed, but code + * and info are still in memory. The VTune profiler may capture method + * code only at this point EventSpecificData is iJIT_Method_Id + */ + iJVM_EVENT_TYPE_METHOD_UNLOAD_START, + + /* Method Profiling */ + + /* method name, Id and stack is supplied + * issued when a method is about to be entered EventSpecificData is + * iJIT_Method_NIDS + */ + iJVM_EVENT_TYPE_ENTER_NIDS = 19, + + /* method name, Id and stack is supplied + * issued when a method is about to be left EventSpecificData is + * iJIT_Method_NIDS + */ + iJVM_EVENT_TYPE_LEAVE_NIDS +} iJIT_JVM_EVENT; + +typedef enum _iJIT_ModeFlags +{ + /* No need to Notify VTune, since VTune is not running */ + iJIT_NO_NOTIFICATIONS = 0x0000, + + /* when turned on the jit must call + * iJIT_NotifyEvent + * ( + * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, + * ) + * for all the method already jitted + */ + iJIT_BE_NOTIFY_ON_LOAD = 0x0001, + + /* when turned on the jit must call + * iJIT_NotifyEvent + * ( + * iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED, + * ) for all the method that are unloaded + */ + iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002, + + /* when turned on the jit must instrument all + * the currently jited code with calls on + * method entries + */ + iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004, + + /* when turned on the jit must instrument all + * the currently jited code with calls + * on method exit + */ + iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008 + +} iJIT_ModeFlags; + + + /* Flags used by iJIT_IsProfilingActive() */ +typedef enum _iJIT_IsProfilingActiveFlags +{ + /* No profiler is running. Currently not used */ + iJIT_NOTHING_RUNNING = 0x0000, + + /* Sampling is running. This is the default value + * returned by iJIT_IsProfilingActive() + */ + iJIT_SAMPLING_ON = 0x0001, + + /* Call Graph is running */ + iJIT_CALLGRAPH_ON = 0x0002 + +} iJIT_IsProfilingActiveFlags; + +/* Enumerator for the environment of methods*/ +typedef enum _iJDEnvironmentType +{ + iJDE_JittingAPI = 2 +} iJDEnvironmentType; + +/********************************** + * Data structures for the events * + **********************************/ + +/* structure for the events: + * iJVM_EVENT_TYPE_METHOD_UNLOAD_START + */ + +typedef struct _iJIT_Method_Id +{ + /* Id of the method (same as the one passed in + * the iJIT_Method_Load struct + */ + unsigned int method_id; + +} *piJIT_Method_Id, iJIT_Method_Id; + + +/* structure for the events: + * iJVM_EVENT_TYPE_ENTER_NIDS, + * iJVM_EVENT_TYPE_LEAVE_NIDS, + * iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS + */ + +typedef struct _iJIT_Method_NIDS +{ + /* unique method ID */ + unsigned int method_id; + + /* NOTE: no need to fill this field, it's filled by VTune */ + unsigned int stack_id; + + /* method name (just the method, without the class) */ + char* method_name; +} *piJIT_Method_NIDS, iJIT_Method_NIDS; + +/* structures for the events: + * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED + */ + +typedef struct _LineNumberInfo +{ + /* x86 Offset from the begining of the method*/ + unsigned int Offset; + + /* source line number from the begining of the source file */ + unsigned int LineNumber; + +} *pLineNumberInfo, LineNumberInfo; + +typedef struct _iJIT_Method_Load +{ + /* unique method ID - can be any unique value, (except 0 - 999) */ + unsigned int method_id; + + /* method name (can be with or without the class and signature, in any case + * the class name will be added to it) + */ + char* method_name; + + /* virtual address of that method - This determines the method range for the + * iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events + */ + void* method_load_address; + + /* Size in memory - Must be exact */ + unsigned int method_size; + + /* Line Table size in number of entries - Zero if none */ + unsigned int line_number_size; + + /* Pointer to the begining of the line numbers info array */ + pLineNumberInfo line_number_table; + + /* unique class ID */ + unsigned int class_id; + + /* class file name */ + char* class_file_name; + + /* source file name */ + char* source_file_name; + + /* bits supplied by the user for saving in the JIT file */ + void* user_data; + + /* the size of the user data buffer */ + unsigned int user_data_size; + + /* NOTE: no need to fill this field, it's filled by VTune */ + iJDEnvironmentType env; + +} *piJIT_Method_Load, iJIT_Method_Load; + +/* API Functions */ +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef CDECL +# if defined WIN32 || defined _WIN32 +# define CDECL __cdecl +# else /* defined WIN32 || defined _WIN32 */ +# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__ +# define CDECL /* not actual on x86_64 platform */ +# else /* _M_X64 || _M_AMD64 || __x86_64__ */ +# define CDECL __attribute__ ((cdecl)) +# endif /* _M_X64 || _M_AMD64 || __x86_64__ */ +# endif /* defined WIN32 || defined _WIN32 */ +#endif /* CDECL */ + +#define JITAPI CDECL + +/* called when the settings are changed with new settings */ +typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags); + +int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData); + +/* The new mode call back routine */ +void JITAPI iJIT_RegisterCallbackEx(void *userdata, + iJIT_ModeChangedEx NewModeCallBackFuncEx); + +iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void); + +void JITAPI FinalizeThread(void); + +void JITAPI FinalizeProcess(void); + +unsigned int JITAPI iJIT_GetNewMethodID(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __JITPROFILING_H__ */ diff --git a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp index 7a206eb..e16e2d1 100644 --- a/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp +++ b/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp @@ -25,7 +25,7 @@ #include "llvm/Config/config.h" // Detect libffi #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/DynamicLibrary.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/Mutex.h" #include @@ -180,7 +180,7 @@ static void *ffiValueFor(Type *Ty, const GenericValue &AV, static bool ffiInvoke(RawFunc Fn, Function *F, const std::vector &ArgVals, - const TargetData *TD, GenericValue &Result) { + const DataLayout *TD, GenericValue &Result) { ffi_cif cif; FunctionType *FTy = F->getFunctionType(); const unsigned NumArgs = F->arg_size(); @@ -276,7 +276,7 @@ GenericValue Interpreter::callExternalFunction(Function *F, FunctionsLock->release(); GenericValue Result; - if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result)) + if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result)) return Result; #endif // USE_LIBFFI @@ -376,7 +376,7 @@ GenericValue lle_X_sprintf(FunctionType *FT, case 'x': case 'X': if (HowLong >= 1) { if (HowLong == 1 && - TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 && + TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 && sizeof(long) < sizeof(int64_t)) { // Make sure we use %lld with a 64 bit argument because we might be // compiling LLI on a 32 bit compiler. diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/lib/ExecutionEngine/Interpreter/Interpreter.cpp index 43e3453..55152db 100644 --- a/lib/ExecutionEngine/Interpreter/Interpreter.cpp +++ b/lib/ExecutionEngine/Interpreter/Interpreter.cpp @@ -48,7 +48,7 @@ Interpreter::Interpreter(Module *M) : ExecutionEngine(M), TD(M) { memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped)); - setTargetData(&TD); + setDataLayout(&TD); // Initialize the "backend" initializeExecutionEngine(); initializeExternalFunctions(); diff --git a/lib/ExecutionEngine/Interpreter/Interpreter.h b/lib/ExecutionEngine/Interpreter/Interpreter.h index 28c5775..72c42c1 100644 --- a/lib/ExecutionEngine/Interpreter/Interpreter.h +++ b/lib/ExecutionEngine/Interpreter/Interpreter.h @@ -17,7 +17,7 @@ #include "llvm/Function.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/GenericValue.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/DataTypes.h" #include "llvm/Support/ErrorHandling.h" @@ -82,7 +82,7 @@ struct ExecutionContext { // class Interpreter : public ExecutionEngine, public InstVisitor { GenericValue ExitValue; // The return value of the called function - TargetData TD; + DataLayout TD; IntrinsicLowering *IL; // The runtime stack of executing code. The top of the stack is the current diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp index 97995ad..1ad3382 100644 --- a/lib/ExecutionEngine/JIT/JIT.cpp +++ b/lib/ExecutionEngine/JIT/JIT.cpp @@ -24,7 +24,7 @@ #include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ExecutionEngine/JITEventListener.h" #include "llvm/ExecutionEngine/JITMemoryManager.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetJITInfo.h" #include "llvm/Support/Dwarf.h" @@ -272,7 +272,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji, : ExecutionEngine(M), TM(tm), TJI(tji), JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()), AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) { - setTargetData(TM.getTargetData()); + setDataLayout(TM.getDataLayout()); jitstate = new JITState(M); @@ -285,7 +285,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji, // Add target data MutexGuard locked(lock); FunctionPassManager &PM = jitstate->getPM(locked); - PM.add(new TargetData(*TM.getTargetData())); + PM.add(new DataLayout(*TM.getDataLayout())); // Turn the machine code intermediate representation into bytes in memory that // may be executed. @@ -339,7 +339,7 @@ void JIT::addModule(Module *M) { jitstate = new JITState(M); FunctionPassManager &PM = jitstate->getPM(locked); - PM.add(new TargetData(*TM.getTargetData())); + PM.add(new DataLayout(*TM.getDataLayout())); // Turn the machine code intermediate representation into bytes in memory // that may be executed. @@ -370,7 +370,7 @@ bool JIT::removeModule(Module *M) { jitstate = new JITState(Modules[0]); FunctionPassManager &PM = jitstate->getPM(locked); - PM.add(new TargetData(*TM.getTargetData())); + PM.add(new DataLayout(*TM.getDataLayout())); // Turn the machine code intermediate representation into bytes in memory // that may be executed. @@ -815,8 +815,8 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) { // through the memory manager which puts them near the code but not in the // same buffer. Type *GlobalType = GV->getType()->getElementType(); - size_t S = getTargetData()->getTypeAllocSize(GlobalType); - size_t A = getTargetData()->getPreferredAlignment(GV); + size_t S = getDataLayout()->getTypeAllocSize(GlobalType); + size_t A = getDataLayout()->getPreferredAlignment(GV); if (GV->isThreadLocal()) { MutexGuard locked(lock); Ptr = TJI.allocateThreadLocalMemory(S); diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp index 42a136e..19c1979 100644 --- a/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.cpp @@ -24,7 +24,7 @@ #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" @@ -42,7 +42,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F, assert(MMI && "MachineModuleInfo not registered!"); const TargetMachine& TM = F.getTarget(); - TD = TM.getTargetData(); + TD = TM.getDataLayout(); stackGrowthDirection = TM.getFrameLowering()->getStackGrowthDirection(); RI = TM.getRegisterInfo(); MAI = TM.getMCAsmInfo(); diff --git a/lib/ExecutionEngine/JIT/JITDwarfEmitter.h b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h index 8dc99ab..9cdbeac 100644 --- a/lib/ExecutionEngine/JIT/JITDwarfEmitter.h +++ b/lib/ExecutionEngine/JIT/JITDwarfEmitter.h @@ -23,12 +23,12 @@ class MachineFunction; class MachineModuleInfo; class MachineMove; class MCAsmInfo; -class TargetData; +class DataLayout; class TargetMachine; class TargetRegisterInfo; class JITDwarfEmitter { - const TargetData* TD; + const DataLayout* TD; JITCodeEmitter* JCE; const TargetRegisterInfo* RI; const MCAsmInfo *MAI; diff --git a/lib/ExecutionEngine/JIT/JITEmitter.cpp b/lib/ExecutionEngine/JIT/JITEmitter.cpp index ff3a9dc..ecafda7 100644 --- a/lib/ExecutionEngine/JIT/JITEmitter.cpp +++ b/lib/ExecutionEngine/JIT/JITEmitter.cpp @@ -30,7 +30,7 @@ #include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ExecutionEngine/JITEventListener.h" #include "llvm/ExecutionEngine/JITMemoryManager.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetJITInfo.h" #include "llvm/Target/TargetMachine.h" @@ -384,11 +384,6 @@ namespace { delete MemMgr; } - /// classof - Methods for support type inquiry through isa, cast, and - /// dyn_cast: - /// - static inline bool classof(const MachineCodeEmitter*) { return true; } - JITResolver &getJITResolver() { return Resolver; } virtual void startFunction(MachineFunction &F); @@ -763,7 +758,7 @@ void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) { } static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP, - const TargetData *TD) { + const DataLayout *TD) { const std::vector &Constants = MCP->getConstants(); if (Constants.empty()) return 0; @@ -780,7 +775,7 @@ static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP, void JITEmitter::startFunction(MachineFunction &F) { DEBUG(dbgs() << "JIT: Starting CodeGen of Function " - << F.getFunction()->getName() << "\n"); + << F.getName() << "\n"); uintptr_t ActualSize = 0; // Set the memory writable, if it's not already @@ -929,7 +924,7 @@ bool JITEmitter::finishFunction(MachineFunction &F) { PrevDL = DebugLoc(); DEBUG(dbgs() << "JIT: Finished CodeGen of [" << (void*)FnStart - << "] Function: " << F.getFunction()->getName() + << "] Function: " << F.getName() << ": " << (FnEnd-FnStart) << " bytes of text, " << Relocations.size() << " relocations\n"); @@ -1058,7 +1053,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { const std::vector &Constants = MCP->getConstants(); if (Constants.empty()) return; - unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData()); + unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getDataLayout()); unsigned Align = MCP->getConstantPoolAlignment(); ConstantPoolBase = allocateSpace(Size, Align); ConstantPool = MCP; @@ -1087,7 +1082,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) { dbgs().write_hex(CAddr) << "]\n"); Type *Ty = CPE.Val.ConstVal->getType(); - Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty); + Offset += TheJIT->getDataLayout()->getTypeAllocSize(Ty); } } @@ -1104,14 +1099,14 @@ void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) { for (unsigned i = 0, e = JT.size(); i != e; ++i) NumEntries += JT[i].MBBs.size(); - unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getTargetData()); + unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getDataLayout()); // Just allocate space for all the jump tables now. We will fix up the actual // MBB entries in the tables after we emit the code for each block, since then // we will know the final locations of the MBBs in memory. JumpTable = MJTI; JumpTableBase = allocateSpace(NumEntries * EntrySize, - MJTI->getEntryAlignment(*TheJIT->getTargetData())); + MJTI->getEntryAlignment(*TheJIT->getDataLayout())); } void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { @@ -1128,7 +1123,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { case MachineJumpTableInfo::EK_BlockAddress: { // EK_BlockAddress - Each entry is a plain address of block, e.g.: // .word LBB123 - assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) && + assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == sizeof(void*) && "Cross JIT'ing?"); // For each jump table, map each target in the jump table to the address of @@ -1148,7 +1143,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { case MachineJumpTableInfo::EK_Custom32: case MachineJumpTableInfo::EK_GPRel32BlockAddress: case MachineJumpTableInfo::EK_LabelDifference32: { - assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == 4&&"Cross JIT'ing?"); + assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == 4&&"Cross JIT'ing?"); // For each jump table, place the offset from the beginning of the table // to the target address. int *SlotPtr = (int*)JumpTableBase; @@ -1224,7 +1219,7 @@ uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const { const std::vector &JT = JumpTable->getJumpTables(); assert(Index < JT.size() && "Invalid jump table index!"); - unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getTargetData()); + unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getDataLayout()); unsigned Offset = 0; for (unsigned i = 0; i < Index; ++i) @@ -1265,15 +1260,13 @@ void *JIT::getPointerToFunctionOrStub(Function *F) { return Addr; // Get a stub if the target supports it. - assert(isa(JCE) && "Unexpected MCE?"); - JITEmitter *JE = cast(getCodeEmitter()); + JITEmitter *JE = static_cast(getCodeEmitter()); return JE->getJITResolver().getLazyFunctionStub(F); } void JIT::updateFunctionStub(Function *F) { // Get the empty stub we generated earlier. - assert(isa(JCE) && "Unexpected MCE?"); - JITEmitter *JE = cast(getCodeEmitter()); + JITEmitter *JE = static_cast(getCodeEmitter()); void *Stub = JE->getJITResolver().getLazyFunctionStub(F); void *Addr = getPointerToGlobalIfAvailable(F); assert(Addr != Stub && "Function must have non-stub address to be updated."); @@ -1294,6 +1287,5 @@ void JIT::freeMachineCodeForFunction(Function *F) { updateGlobalMapping(F, 0); // Free the actual memory for the function body and related stuff. - assert(isa(JCE) && "Unexpected MCE?"); - cast(JCE)->deallocateMemForFunction(F); + static_cast(JCE)->deallocateMemForFunction(F); } diff --git a/lib/ExecutionEngine/MCJIT/CMakeLists.txt b/lib/ExecutionEngine/MCJIT/CMakeLists.txt index fef7176..2911a50 100644 --- a/lib/ExecutionEngine/MCJIT/CMakeLists.txt +++ b/lib/ExecutionEngine/MCJIT/CMakeLists.txt @@ -1,4 +1,3 @@ add_llvm_library(LLVMMCJIT MCJIT.cpp - MCJITMemoryManager.cpp ) diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/lib/ExecutionEngine/MCJIT/MCJIT.cpp index 739ffd7d8..752c5b7 100644 --- a/lib/ExecutionEngine/MCJIT/MCJIT.cpp +++ b/lib/ExecutionEngine/MCJIT/MCJIT.cpp @@ -8,18 +8,20 @@ //===----------------------------------------------------------------------===// #include "MCJIT.h" -#include "MCJITMemoryManager.h" #include "llvm/DerivedTypes.h" #include "llvm/Function.h" #include "llvm/ExecutionEngine/GenericValue.h" -#include "llvm/ExecutionEngine/MCJIT.h" +#include "llvm/ExecutionEngine/JITEventListener.h" #include "llvm/ExecutionEngine/JITMemoryManager.h" +#include "llvm/ExecutionEngine/MCJIT.h" +#include "llvm/ExecutionEngine/ObjectBuffer.h" +#include "llvm/ExecutionEngine/ObjectImage.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/MutexGuard.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; @@ -44,24 +46,20 @@ ExecutionEngine *MCJIT::createJIT(Module *M, // FIXME: Don't do this here. sys::DynamicLibrary::LoadLibraryPermanently(0, NULL); - // If the target supports JIT code generation, create the JIT. - if (TargetJITInfo *TJ = TM->getJITInfo()) - return new MCJIT(M, TM, *TJ, new MCJITMemoryManager(JMM), GVsWithCode); - - if (ErrorStr) - *ErrorStr = "target does not support JIT code generation"; - return 0; + return new MCJIT(M, TM, JMM, GVsWithCode); } -MCJIT::MCJIT(Module *m, TargetMachine *tm, TargetJITInfo &tji, - RTDyldMemoryManager *MM, bool AllocateGVsWithCode) - : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM), - isCompiled(false), M(m), OS(Buffer) { +MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM, + bool AllocateGVsWithCode) + : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM), + isCompiled(false), M(m) { - setTargetData(TM->getTargetData()); + setDataLayout(TM->getDataLayout()); } MCJIT::~MCJIT() { + if (LoadedObject) + NotifyFreeingObject(*LoadedObject.get()); delete MemMgr; delete TM; } @@ -69,7 +67,7 @@ MCJIT::~MCJIT() { void MCJIT::emitObject(Module *m) { /// Currently, MCJIT only supports a single module and the module passed to /// this function call is expected to be the contained module. The module - /// is passed as a parameter here to prepare for multiple module support in + /// is passed as a parameter here to prepare for multiple module support in /// the future. assert(M == m); @@ -84,41 +82,65 @@ void MCJIT::emitObject(Module *m) { PassManager PM; - PM.add(new TargetData(*TM->getTargetData())); + PM.add(new DataLayout(*TM->getDataLayout())); + + // The RuntimeDyld will take ownership of this shortly + OwningPtr Buffer(new ObjectBufferStream()); // Turn the machine code intermediate representation into bytes in memory // that may be executed. - if (TM->addPassesToEmitMC(PM, Ctx, OS, false)) { + if (TM->addPassesToEmitMC(PM, Ctx, Buffer->getOStream(), false)) { report_fatal_error("Target does not support MC emission!"); } // Initialize passes. - // FIXME: When we support multiple modules, we'll want to move the code - // gen and finalization out of the constructor here and do it more - // on-demand as part of getPointerToFunction(). PM.run(*m); - // Flush the output buffer so the SmallVector gets its data. - OS.flush(); + // Flush the output buffer to get the generated code into memory + Buffer->flush(); // Load the object into the dynamic linker. - MemoryBuffer* MB = MemoryBuffer::getMemBuffer(StringRef(Buffer.data(), - Buffer.size()), - "", false); - if (Dyld.loadObject(MB)) + // handing off ownership of the buffer + LoadedObject.reset(Dyld.loadObject(Buffer.take())); + if (!LoadedObject) report_fatal_error(Dyld.getErrorString()); // Resolve any relocations. Dyld.resolveRelocations(); + // FIXME: Make this optional, maybe even move it to a JIT event listener + LoadedObject->registerWithDebugger(); + + NotifyObjectEmitted(*LoadedObject); + // FIXME: Add support for per-module compilation state isCompiled = true; } +// FIXME: Add a parameter to identify which object is being finalized when +// MCJIT supports multiple modules. +void MCJIT::finalizeObject() { + // If the module hasn't been compiled, just do that. + if (!isCompiled) { + // If the call to Dyld.resolveRelocations() is removed from emitObject() + // we'll need to do that here. + emitObject(M); + return; + } + + // Resolve any relocations. + Dyld.resolveRelocations(); +} + void *MCJIT::getPointerToBasicBlock(BasicBlock *BB) { report_fatal_error("not yet implemented"); } void *MCJIT::getPointerToFunction(Function *F) { + // FIXME: This should really return a uint64_t since it's a pointer in the + // target address space, not our local address space. That's part of the + // ExecutionEngine interface, though. Fix that when the old JIT finally + // dies. + // FIXME: Add support for per-module compilation state if (!isCompiled) emitObject(M); @@ -132,10 +154,13 @@ void *MCJIT::getPointerToFunction(Function *F) { // FIXME: Should the Dyld be retaining module information? Probably not. // FIXME: Should we be using the mangler for this? Probably. + // + // This is the accessor for the target address, so make sure to check the + // load address of the symbol, not the local address. StringRef BaseName = F->getName(); if (BaseName[0] == '\1') - return (void*)Dyld.getSymbolAddress(BaseName.substr(1)); - return (void*)Dyld.getSymbolAddress((TM->getMCAsmInfo()->getGlobalPrefix() + return (void*)Dyld.getSymbolLoadAddress(BaseName.substr(1)); + return (void*)Dyld.getSymbolLoadAddress((TM->getMCAsmInfo()->getGlobalPrefix() + BaseName).str()); } @@ -270,3 +295,33 @@ void *MCJIT::getPointerToNamedFunction(const std::string &Name, } return 0; } + +void MCJIT::RegisterJITEventListener(JITEventListener *L) { + if (L == NULL) + return; + MutexGuard locked(lock); + EventListeners.push_back(L); +} +void MCJIT::UnregisterJITEventListener(JITEventListener *L) { + if (L == NULL) + return; + MutexGuard locked(lock); + SmallVector::reverse_iterator I= + std::find(EventListeners.rbegin(), EventListeners.rend(), L); + if (I != EventListeners.rend()) { + std::swap(*I, EventListeners.back()); + EventListeners.pop_back(); + } +} +void MCJIT::NotifyObjectEmitted(const ObjectImage& Obj) { + MutexGuard locked(lock); + for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) { + EventListeners[I]->NotifyObjectEmitted(Obj); + } +} +void MCJIT::NotifyFreeingObject(const ObjectImage& Obj) { + MutexGuard locked(lock); + for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) { + EventListeners[I]->NotifyFreeingObject(Obj); + } +} diff --git a/lib/ExecutionEngine/MCJIT/MCJIT.h b/lib/ExecutionEngine/MCJIT/MCJIT.h index 1d272e9..571080d 100644 --- a/lib/ExecutionEngine/MCJIT/MCJIT.h +++ b/lib/ExecutionEngine/MCJIT/MCJIT.h @@ -11,33 +11,32 @@ #define LLVM_LIB_EXECUTIONENGINE_MCJIT_H #include "llvm/PassManager.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/RuntimeDyld.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/Support/raw_ostream.h" namespace llvm { +class ObjectImage; + // FIXME: This makes all kinds of horrible assumptions for the time being, // like only having one module, not needing to worry about multi-threading, // blah blah. Purely in get-it-up-and-limping mode for now. class MCJIT : public ExecutionEngine { - MCJIT(Module *M, TargetMachine *tm, TargetJITInfo &tji, - RTDyldMemoryManager *MemMgr, bool AllocateGVsWithCode); + MCJIT(Module *M, TargetMachine *tm, RTDyldMemoryManager *MemMgr, + bool AllocateGVsWithCode); TargetMachine *TM; MCContext *Ctx; RTDyldMemoryManager *MemMgr; RuntimeDyld Dyld; + SmallVector EventListeners; // FIXME: Add support for multiple modules bool isCompiled; Module *M; - - // FIXME: Move these to a single container which manages JITed objects - SmallVector Buffer; // Working buffer into which we JIT. - raw_svector_ostream OS; + OwningPtr LoadedObject; public: ~MCJIT(); @@ -45,6 +44,8 @@ public: /// @name ExecutionEngine interface implementation /// @{ + virtual void finalizeObject(); + virtual void *getPointerToBasicBlock(BasicBlock *BB); virtual void *getPointerToFunction(Function *F); @@ -71,10 +72,14 @@ public: /// Map the address of a JIT section as returned from the memory manager /// to the address in the target process as the running code will see it. /// This is the address which will be used for relocation resolution. - virtual void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress) { + virtual void mapSectionAddress(const void *LocalAddress, + uint64_t TargetAddress) { Dyld.mapSectionAddress(LocalAddress, TargetAddress); } + virtual void RegisterJITEventListener(JITEventListener *L); + virtual void UnregisterJITEventListener(JITEventListener *L); + /// @} /// @name (Private) Registration Interfaces /// @{ @@ -98,6 +103,9 @@ protected: /// is passed as a parameter here to prepare for multiple module support in /// the future. void emitObject(Module *M); + + void NotifyObjectEmitted(const ObjectImage& Obj); + void NotifyFreeingObject(const ObjectImage& Obj); }; } // End llvm namespace diff --git a/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp b/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp deleted file mode 100644 index 457fe5e..0000000 --- a/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.cpp +++ /dev/null @@ -1,14 +0,0 @@ -//==-- MCJITMemoryManager.cpp - Definition for the Memory Manager -*-C++ -*-==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "MCJITMemoryManager.h" - -using namespace llvm; - -void MCJITMemoryManager::anchor() { } diff --git a/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h b/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h deleted file mode 100644 index 441aaeb..0000000 --- a/lib/ExecutionEngine/MCJIT/MCJITMemoryManager.h +++ /dev/null @@ -1,50 +0,0 @@ -//===-- MCJITMemoryManager.h - Definition for the Memory Manager ---C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_LIB_EXECUTIONENGINE_MCJITMEMORYMANAGER_H -#define LLVM_LIB_EXECUTIONENGINE_MCJITMEMORYMANAGER_H - -#include "llvm/Module.h" -#include "llvm/ExecutionEngine/JITMemoryManager.h" -#include "llvm/ExecutionEngine/RuntimeDyld.h" -#include - -namespace llvm { - -// The MCJIT memory manager is a layer between the standard JITMemoryManager -// and the RuntimeDyld interface that maps objects, by name, onto their -// matching LLVM IR counterparts in the module(s) being compiled. -class MCJITMemoryManager : public RTDyldMemoryManager { - virtual void anchor(); - OwningPtr JMM; - -public: - MCJITMemoryManager(JITMemoryManager *jmm) : - JMM(jmm?jmm:JITMemoryManager::CreateDefaultMemManager()) {} - - uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, - unsigned SectionID) { - return JMM->allocateDataSection(Size, Alignment, SectionID); - } - - uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, - unsigned SectionID) { - return JMM->allocateCodeSection(Size, Alignment, SectionID); - } - - virtual void *getPointerToNamedFunction(const std::string &Name, - bool AbortOnFailure = true) { - return JMM->getPointerToNamedFunction(Name, AbortOnFailure); - } - -}; - -} // End llvm namespace - -#endif diff --git a/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp b/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp index 8b50101..50cd072 100644 --- a/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/GDBRegistrar.cpp @@ -78,12 +78,12 @@ public: /// Creates an entry in the JIT registry for the buffer @p Object, /// which must contain an object file in executable memory with any /// debug information for the debugger. - void registerObject(const MemoryBuffer &Object); + void registerObject(const ObjectBuffer &Object); /// Removes the internal registration of @p Object, and /// frees associated resources. /// Returns true if @p Object was found in ObjectBufferMap. - bool deregisterObject(const MemoryBuffer &Object); + bool deregisterObject(const ObjectBuffer &Object); private: /// Deregister the debug info for the given object file from the debugger @@ -124,7 +124,7 @@ GDBJITRegistrar::~GDBJITRegistrar() { ObjectBufferMap.clear(); } -void GDBJITRegistrar::registerObject(const MemoryBuffer &Object) { +void GDBJITRegistrar::registerObject(const ObjectBuffer &Object) { const char *Buffer = Object.getBufferStart(); size_t Size = Object.getBufferSize(); @@ -147,7 +147,7 @@ void GDBJITRegistrar::registerObject(const MemoryBuffer &Object) { } } -bool GDBJITRegistrar::deregisterObject(const MemoryBuffer& Object) { +bool GDBJITRegistrar::deregisterObject(const ObjectBuffer& Object) { const char *Buffer = Object.getBufferStart(); RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Buffer); diff --git a/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h b/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h index f964bc6..69e9dbe 100644 --- a/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h +++ b/lib/ExecutionEngine/RuntimeDyld/JITRegistrar.h @@ -10,7 +10,7 @@ #ifndef LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H #define LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H -#include "llvm/Support/MemoryBuffer.h" +#include "llvm/ExecutionEngine/ObjectBuffer.h" namespace llvm { @@ -27,12 +27,12 @@ public: /// Creates an entry in the JIT registry for the buffer @p Object, /// which must contain an object file in executable memory with any /// debug information for the debugger. - virtual void registerObject(const MemoryBuffer &Object) = 0; + virtual void registerObject(const ObjectBuffer &Object) = 0; /// Removes the internal registration of @p Object, and /// frees associated resources. /// Returns true if @p Object was previously registered. - virtual bool deregisterObject(const MemoryBuffer &Object) = 0; + virtual bool deregisterObject(const ObjectBuffer &Object) = 0; /// Returns a reference to a GDB JIT registrar singleton static JITRegistrar& getGDBRegistrar(); diff --git a/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h b/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h deleted file mode 100644 index c3e3572..0000000 --- a/lib/ExecutionEngine/RuntimeDyld/ObjectImage.h +++ /dev/null @@ -1,59 +0,0 @@ -//===---- ObjectImage.h - Format independent executuable object image -----===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file declares a file format independent ObjectImage class. -// -//===----------------------------------------------------------------------===// - -#ifndef LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H -#define LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H - -#include "llvm/Object/ObjectFile.h" - -namespace llvm { - -class ObjectImage { - ObjectImage(); // = delete - ObjectImage(const ObjectImage &other); // = delete -protected: - object::ObjectFile *ObjFile; - -public: - ObjectImage(object::ObjectFile *Obj) { ObjFile = Obj; } - virtual ~ObjectImage() {} - - virtual object::symbol_iterator begin_symbols() const - { return ObjFile->begin_symbols(); } - virtual object::symbol_iterator end_symbols() const - { return ObjFile->end_symbols(); } - - virtual object::section_iterator begin_sections() const - { return ObjFile->begin_sections(); } - virtual object::section_iterator end_sections() const - { return ObjFile->end_sections(); } - - virtual /* Triple::ArchType */ unsigned getArch() const - { return ObjFile->getArch(); } - - // Subclasses can override these methods to update the image with loaded - // addresses for sections and common symbols - virtual void updateSectionAddress(const object::SectionRef &Sec, - uint64_t Addr) {} - virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr) - {} - - // Subclasses can override these methods to provide JIT debugging support - virtual void registerWithDebugger() {} - virtual void deregisterWithDebugger() {} -}; - -} // end namespace llvm - -#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H - diff --git a/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h b/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h new file mode 100644 index 0000000..17f3a21 --- /dev/null +++ b/lib/ExecutionEngine/RuntimeDyld/ObjectImageCommon.h @@ -0,0 +1,76 @@ +//===-- ObjectImageCommon.h - Format independent executuable object image -===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares a file format independent ObjectImage class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_RUNTIMEDYLD_OBJECTIMAGECOMMON_H +#define LLVM_RUNTIMEDYLD_OBJECTIMAGECOMMON_H + +#include "llvm/Object/ObjectFile.h" +#include "llvm/ExecutionEngine/ObjectImage.h" +#include "llvm/ExecutionEngine/ObjectBuffer.h" + +namespace llvm { + +class ObjectImageCommon : public ObjectImage { + ObjectImageCommon(); // = delete + ObjectImageCommon(const ObjectImageCommon &other); // = delete + +protected: + object::ObjectFile *ObjFile; + + // This form of the constructor allows subclasses to use + // format-specific subclasses of ObjectFile directly + ObjectImageCommon(ObjectBuffer *Input, object::ObjectFile *Obj) + : ObjectImage(Input), // saves Input as Buffer and takes ownership + ObjFile(Obj) + { + } + +public: + ObjectImageCommon(ObjectBuffer* Input) + : ObjectImage(Input) // saves Input as Buffer and takes ownership + { + ObjFile = object::ObjectFile::createObjectFile(Buffer->getMemBuffer()); + } + virtual ~ObjectImageCommon() { delete ObjFile; } + + virtual object::symbol_iterator begin_symbols() const + { return ObjFile->begin_symbols(); } + virtual object::symbol_iterator end_symbols() const + { return ObjFile->end_symbols(); } + + virtual object::section_iterator begin_sections() const + { return ObjFile->begin_sections(); } + virtual object::section_iterator end_sections() const + { return ObjFile->end_sections(); } + + virtual /* Triple::ArchType */ unsigned getArch() const + { return ObjFile->getArch(); } + + virtual StringRef getData() const { return ObjFile->getData(); } + + // Subclasses can override these methods to update the image with loaded + // addresses for sections and common symbols + virtual void updateSectionAddress(const object::SectionRef &Sec, + uint64_t Addr) {} + virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr) + {} + + // Subclasses can override these methods to provide JIT debugging support + virtual void registerWithDebugger() {} + virtual void deregisterWithDebugger() {} +}; + +} // end namespace llvm + +#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H + diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp index b464040..f6dccb1 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -12,10 +12,12 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "dyld" +#include "ObjectImageCommon.h" #include "RuntimeDyldImpl.h" #include "RuntimeDyldELF.h" #include "RuntimeDyldMachO.h" #include "llvm/Support/Path.h" +#include "llvm/Support/MathExtras.h" using namespace llvm; using namespace llvm::object; @@ -26,16 +28,6 @@ RuntimeDyldImpl::~RuntimeDyldImpl() {} namespace llvm { -namespace { - // Helper for extensive error checking in debug builds. - error_code Check(error_code Err) { - if (Err) { - report_fatal_error(Err.message()); - } - return Err; - } -} // end anonymous namespace - // Resolve the relocations for all symbols we currently know about. void RuntimeDyldImpl::resolveRelocations() { // First, resolve relocations associated with external symbols. @@ -44,11 +36,15 @@ void RuntimeDyldImpl::resolveRelocations() { // Just iterate over the sections we have and resolve all the relocations // in them. Gross overkill, but it gets the job done. for (int i = 0, e = Sections.size(); i != e; ++i) { - reassignSectionAddress(i, Sections[i].LoadAddress); + uint64_t Addr = Sections[i].LoadAddress; + DEBUG(dbgs() << "Resolving relocations Section #" << i + << "\t" << format("%p", (uint8_t *)Addr) + << "\n"); + resolveRelocationList(Relocations[i], Addr); } } -void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress, +void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress) { for (unsigned i = 0, e = Sections.size(); i != e; ++i) { if (Sections[i].Address == LocalAddress) { @@ -61,14 +57,11 @@ void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress, // Subclasses can implement this method to create specialized image instances. // The caller owns the pointer that is returned. -ObjectImage *RuntimeDyldImpl::createObjectImage(const MemoryBuffer *InputBuffer) { - ObjectFile *ObjFile = ObjectFile::createObjectFile(const_cast - (InputBuffer)); - ObjectImage *Obj = new ObjectImage(ObjFile); - return Obj; +ObjectImage *RuntimeDyldImpl::createObjectImage(ObjectBuffer *InputBuffer) { + return new ObjectImageCommon(InputBuffer); } -bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { +ObjectImage *RuntimeDyldImpl::loadObject(ObjectBuffer *InputBuffer) { OwningPtr obj(createObjectImage(InputBuffer)); if (!obj) report_fatal_error("Unable to create object image from memory buffer!"); @@ -80,9 +73,9 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { // Used sections from the object file ObjSectionToIDMap LocalSections; - // Common symbols requiring allocation, and the total size required to - // allocate all common symbols. + // Common symbols requiring allocation, with their sizes and alignments CommonSymbolMap CommonSymbols; + // Maximum required total memory to allocate all common symbols uint64_t CommonSize = 0; error_code err; @@ -102,13 +95,15 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { bool isCommon = flags & SymbolRef::SF_Common; if (isCommon) { // Add the common symbols to a list. We'll allocate them all below. + uint64_t Align = getCommonSymbolAlignment(*i); uint64_t Size = 0; Check(i->getSize(Size)); - CommonSize += Size; - CommonSymbols[*i] = Size; + CommonSize += Size + Align; + CommonSymbols[*i] = CommonSymbolInfo(Size, Align); } else { if (SymType == object::SymbolRef::ST_Function || - SymType == object::SymbolRef::ST_Data) { + SymType == object::SymbolRef::ST_Data || + SymType == object::SymbolRef::ST_Unknown) { uint64_t FileOffset; StringRef SectionData; section_iterator si = obj->end_sections(); @@ -177,9 +172,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) { } } - handleObjectLoaded(obj.take()); - - return false; + return obj.take(); } void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj, @@ -193,7 +186,7 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj, if (!Addr) report_fatal_error("Unable to allocate memory for common symbols!"); uint64_t Offset = 0; - Sections.push_back(SectionEntry(Addr, TotalSize, TotalSize, 0)); + Sections.push_back(SectionEntry(StringRef(), Addr, TotalSize, TotalSize, 0)); memset(Addr, 0, TotalSize); DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID @@ -204,11 +197,20 @@ void RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj, // Assign the address of each symbol for (CommonSymbolMap::const_iterator it = CommonSymbols.begin(), itEnd = CommonSymbols.end(); it != itEnd; it++) { + uint64_t Size = it->second.first; + uint64_t Align = it->second.second; StringRef Name; it->first.getName(Name); + if (Align) { + // This symbol has an alignment requirement. + uint64_t AlignOffset = OffsetToAlignment((uint64_t)Addr, Align); + Addr += AlignOffset; + Offset += AlignOffset; + DEBUG(dbgs() << "Allocating common symbol " << Name << " address " << + format("%p\n", Addr)); + } Obj.updateSymbolAddress(it->first, (uint64_t)Addr); SymbolTable[Name.data()] = SymbolLoc(SectionID, Offset); - uint64_t Size = it->second; Offset += Size; Addr += Size; } @@ -236,10 +238,12 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, bool IsVirtual; bool IsZeroInit; uint64_t DataSize; + StringRef Name; Check(Section.isRequiredForExecution(IsRequired)); Check(Section.isVirtual(IsVirtual)); Check(Section.isZeroInit(IsZeroInit)); Check(Section.getSize(DataSize)); + Check(Section.getName(Name)); unsigned Allocate; unsigned SectionID = Sections.size(); @@ -267,6 +271,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, memcpy(Addr, pData, DataSize); DEBUG(dbgs() << "emitSection SectionID: " << SectionID + << " Name: " << Name << " obj addr: " << format("%p", pData) << " new addr: " << format("%p", Addr) << " DataSize: " << DataSize @@ -282,6 +287,7 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, Allocate = 0; Addr = 0; DEBUG(dbgs() << "emitSection SectionID: " << SectionID + << " Name: " << Name << " obj addr: " << format("%p", data.data()) << " new addr: 0" << " DataSize: " << DataSize @@ -290,7 +296,8 @@ unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj, << "\n"); } - Sections.push_back(SectionEntry(Addr, Allocate, DataSize,(uintptr_t)pData)); + Sections.push_back(SectionEntry(Name, Addr, Allocate, DataSize, + (uintptr_t)pData)); return SectionID; } @@ -333,15 +340,49 @@ void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE, } uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr) { - // TODO: There is only ARM far stub now. We should add the Thumb stub, - // and stubs for branches Thumb - ARM and ARM - Thumb. if (Arch == Triple::arm) { + // TODO: There is only ARM far stub now. We should add the Thumb stub, + // and stubs for branches Thumb - ARM and ARM - Thumb. uint32_t *StubAddr = (uint32_t*)Addr; *StubAddr = 0xe51ff004; // ldr pc,
    ' instruction + writeInt32BE(LocalAddress, 0x48000001 | (delta & 0x03FFFFFC)); + } break; + case ELF::R_PPC64_ADDR64 : + writeInt64BE(LocalAddress, Value + Addend); + break; + case ELF::R_PPC64_TOC : + writeInt64BE(LocalAddress, findPPC64TOC()); + break; + case ELF::R_PPC64_TOC16 : { + uint64_t TOCStart = findPPC64TOC(); + Value = applyPPClo((Value + Addend) - TOCStart); + writeInt16BE(LocalAddress, applyPPClo(Value)); + } break; + case ELF::R_PPC64_TOC16_DS : { + uint64_t TOCStart = findPPC64TOC(); + Value = ((Value + Addend) - TOCStart); + writeInt16BE(LocalAddress, applyPPClo(Value)); + } break; + } +} + + +void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section, + uint64_t Offset, uint64_t Value, uint32_t Type, int64_t Addend) { switch (Arch) { case Triple::x86_64: - resolveX86_64Relocation(LocalAddress, FinalAddress, Value, Type, Addend); + resolveX86_64Relocation(Section, Offset, Value, Type, Addend); break; case Triple::x86: - resolveX86Relocation(LocalAddress, (uint32_t)(FinalAddress & 0xffffffffL), + resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, (uint32_t)(Addend & 0xffffffffL)); break; case Triple::arm: // Fall through. case Triple::thumb: - resolveARMRelocation(LocalAddress, (uint32_t)(FinalAddress & 0xffffffffL), + resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type, (uint32_t)(Addend & 0xffffffffL)); break; + case Triple::mips: // Fall through. + case Triple::mipsel: + resolveMIPSRelocation(Section, Offset, + (uint32_t)(Value & 0xffffffffL), Type, + (uint32_t)(Addend & 0xffffffffL)); + break; + case Triple::ppc64: + resolvePPC64Relocation(Section, Offset, Value, Type, Addend); + break; default: llvm_unreachable("Unsupported CPU type!"); } } @@ -350,6 +597,8 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, RelocationValueRef Value; // First search for the symbol in the local symbol table SymbolTableMap::const_iterator lsi = Symbols.find(TargetName.data()); + SymbolRef::Type SymType; + Symbol.getType(SymType); if (lsi != Symbols.end()) { Value.SectionID = lsi->second.first; Value.Addend = lsi->second.second; @@ -361,8 +610,6 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, Value.SectionID = gsi->second.first; Value.Addend = gsi->second.second; } else { - SymbolRef::Type SymType; - Symbol.getType(SymType); switch (SymType) { case SymbolRef::ST_Debug: { // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously @@ -373,7 +620,13 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, if (si == Obj.end_sections()) llvm_unreachable("Symbol section not found, bad object file format!"); DEBUG(dbgs() << "\t\tThis is section symbol\n"); - Value.SectionID = findOrEmitSection(Obj, (*si), true, ObjSectionToID); + // Default to 'true' in case isText fails (though it never does). + bool isCode = true; + si->isText(isCode); + Value.SectionID = findOrEmitSection(Obj, + (*si), + isCode, + ObjSectionToID); Value.Addend = Addend; break; } @@ -398,13 +651,12 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, // This is an ARM branch relocation, need to use a stub function. DEBUG(dbgs() << "\t\tThis is an ARM branch relocation."); SectionEntry &Section = Sections[Rel.SectionID]; - uint8_t *Target = Section.Address + Rel.Offset; - // Look up for existing stub. + // Look for an existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) { - resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address + - i->second, RelType, 0); + resolveRelocation(Section, Rel.Offset, + (uint64_t)Section.Address + i->second, RelType, 0); DEBUG(dbgs() << " Stub function found\n"); } else { // Create a new stub function. @@ -419,10 +671,145 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, else addRelocationForSection(RE, Value.SectionID); - resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address + - Section.StubOffset, RelType, 0); + resolveRelocation(Section, Rel.Offset, + (uint64_t)Section.Address + Section.StubOffset, + RelType, 0); Section.StubOffset += getMaxStubSize(); } + } else if (Arch == Triple::mipsel && RelType == ELF::R_MIPS_26) { + // This is an Mips branch relocation, need to use a stub function. + DEBUG(dbgs() << "\t\tThis is a Mips branch relocation."); + SectionEntry &Section = Sections[Rel.SectionID]; + uint8_t *Target = Section.Address + Rel.Offset; + uint32_t *TargetAddress = (uint32_t *)Target; + + // Extract the addend from the instruction. + uint32_t Addend = ((*TargetAddress) & 0x03ffffff) << 2; + + Value.Addend += Addend; + + // Look up for existing stub. + StubMap::const_iterator i = Stubs.find(Value); + if (i != Stubs.end()) { + resolveRelocation(Section, Rel.Offset, + (uint64_t)Section.Address + i->second, RelType, 0); + DEBUG(dbgs() << " Stub function found\n"); + } else { + // Create a new stub function. + DEBUG(dbgs() << " Create a new stub function\n"); + Stubs[Value] = Section.StubOffset; + uint8_t *StubTargetAddr = createStubFunction(Section.Address + + Section.StubOffset); + + // Creating Hi and Lo relocations for the filled stub instructions. + RelocationEntry REHi(Rel.SectionID, + StubTargetAddr - Section.Address, + ELF::R_MIPS_HI16, Value.Addend); + RelocationEntry RELo(Rel.SectionID, + StubTargetAddr - Section.Address + 4, + ELF::R_MIPS_LO16, Value.Addend); + + if (Value.SymbolName) { + addRelocationForSymbol(REHi, Value.SymbolName); + addRelocationForSymbol(RELo, Value.SymbolName); + } else { + addRelocationForSection(REHi, Value.SectionID); + addRelocationForSection(RELo, Value.SectionID); + } + + resolveRelocation(Section, Rel.Offset, + (uint64_t)Section.Address + Section.StubOffset, + RelType, 0); + Section.StubOffset += getMaxStubSize(); + } + } else if (Arch == Triple::ppc64) { + if (RelType == ELF::R_PPC64_REL24) { + // A PPC branch relocation will need a stub function if the target is + // an external symbol (Symbol::ST_Unknown) or if the target address + // is not within the signed 24-bits branch address. + SectionEntry &Section = Sections[Rel.SectionID]; + uint8_t *Target = Section.Address + Rel.Offset; + bool RangeOverflow = false; + if (SymType != SymbolRef::ST_Unknown) { + // A function call may points to the .opd entry, so the final symbol value + // in calculated based in the relocation values in .opd section. + findOPDEntrySection(Obj, ObjSectionToID, Value); + uint8_t *RelocTarget = Sections[Value.SectionID].Address + Value.Addend; + int32_t delta = static_cast(Target - RelocTarget); + // If it is within 24-bits branch range, just set the branch target + if (SignExtend32<24>(delta) == delta) { + RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + if (Value.SymbolName) + addRelocationForSymbol(RE, Value.SymbolName); + else + addRelocationForSection(RE, Value.SectionID); + } else { + RangeOverflow = true; + } + } + if (SymType == SymbolRef::ST_Unknown || RangeOverflow == true) { + // It is an external symbol (SymbolRef::ST_Unknown) or within a range + // larger than 24-bits. + StubMap::const_iterator i = Stubs.find(Value); + if (i != Stubs.end()) { + // Symbol function stub already created, just relocate to it + resolveRelocation(Section, Rel.Offset, + (uint64_t)Section.Address + i->second, RelType, 0); + DEBUG(dbgs() << " Stub function found\n"); + } else { + // Create a new stub function. + DEBUG(dbgs() << " Create a new stub function\n"); + Stubs[Value] = Section.StubOffset; + uint8_t *StubTargetAddr = createStubFunction(Section.Address + + Section.StubOffset); + RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address, + ELF::R_PPC64_ADDR64, Value.Addend); + + // Generates the 64-bits address loads as exemplified in section + // 4.5.1 in PPC64 ELF ABI. + RelocationEntry REhst(Rel.SectionID, + StubTargetAddr - Section.Address + 2, + ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend); + RelocationEntry REhr(Rel.SectionID, + StubTargetAddr - Section.Address + 6, + ELF::R_PPC64_ADDR16_HIGHER, Value.Addend); + RelocationEntry REh(Rel.SectionID, + StubTargetAddr - Section.Address + 14, + ELF::R_PPC64_ADDR16_HI, Value.Addend); + RelocationEntry REl(Rel.SectionID, + StubTargetAddr - Section.Address + 18, + ELF::R_PPC64_ADDR16_LO, Value.Addend); + + if (Value.SymbolName) { + addRelocationForSymbol(REhst, Value.SymbolName); + addRelocationForSymbol(REhr, Value.SymbolName); + addRelocationForSymbol(REh, Value.SymbolName); + addRelocationForSymbol(REl, Value.SymbolName); + } else { + addRelocationForSection(REhst, Value.SectionID); + addRelocationForSection(REhr, Value.SectionID); + addRelocationForSection(REh, Value.SectionID); + addRelocationForSection(REl, Value.SectionID); + } + + resolveRelocation(Section, Rel.Offset, + (uint64_t)Section.Address + Section.StubOffset, + RelType, 0); + if (SymType == SymbolRef::ST_Unknown) + // Restore the TOC for external calls + writeInt32BE(Target+4, 0xE8410028); // ld r2,40(r1) + Section.StubOffset += getMaxStubSize(); + } + } + } else { + RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); + // Extra check to avoid relocation againt empty symbols (usually + // the R_PPC64_TOC). + if (Value.SymbolName && !TargetName.empty()) + addRelocationForSymbol(RE, Value.SymbolName); + else + addRelocationForSection(RE, Value.SectionID); + } } else { RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend); if (Value.SymbolName) @@ -432,8 +819,16 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel, } } -bool RuntimeDyldELF::isCompatibleFormat(const MemoryBuffer *InputBuffer) const { - StringRef Magic = InputBuffer->getBuffer().slice(0, ELF::EI_NIDENT); - return (memcmp(Magic.data(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0; +unsigned RuntimeDyldELF::getCommonSymbolAlignment(const SymbolRef &Sym) { + // In ELF, the value of an SHN_COMMON symbol is its alignment requirement. + uint64_t Align; + Check(Sym.getValue(Align)); + return Align; +} + +bool RuntimeDyldELF::isCompatibleFormat(const ObjectBuffer *Buffer) const { + if (Buffer->getBufferSize() < strlen(ELF::ElfMagic)) + return false; + return (memcmp(Buffer->getBufferStart(), ELF::ElfMagic, strlen(ELF::ElfMagic))) == 0; } } // namespace llvm diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h index e413f78..07e704b 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h @@ -18,32 +18,52 @@ using namespace llvm; - namespace llvm { + +namespace { + // Helper for extensive error checking in debug builds. + error_code Check(error_code Err) { + if (Err) { + report_fatal_error(Err.message()); + } + return Err; + } +} // end anonymous namespace + class RuntimeDyldELF : public RuntimeDyldImpl { protected: - ObjectImage *LoadedObject; - - void resolveX86_64Relocation(uint8_t *LocalAddress, - uint64_t FinalAddress, + void resolveX86_64Relocation(const SectionEntry &Section, + uint64_t Offset, uint64_t Value, uint32_t Type, int64_t Addend); - void resolveX86Relocation(uint8_t *LocalAddress, - uint32_t FinalAddress, + void resolveX86Relocation(const SectionEntry &Section, + uint64_t Offset, uint32_t Value, uint32_t Type, int32_t Addend); - void resolveARMRelocation(uint8_t *LocalAddress, - uint32_t FinalAddress, + void resolveARMRelocation(const SectionEntry &Section, + uint64_t Offset, uint32_t Value, uint32_t Type, int32_t Addend); - virtual void resolveRelocation(uint8_t *LocalAddress, - uint64_t FinalAddress, + void resolveMIPSRelocation(const SectionEntry &Section, + uint64_t Offset, + uint32_t Value, + uint32_t Type, + int32_t Addend); + + void resolvePPC64Relocation(const SectionEntry &Section, + uint64_t Offset, + uint64_t Value, + uint32_t Type, + int64_t Addend); + + virtual void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, uint64_t Value, uint32_t Type, int64_t Addend); @@ -54,16 +74,22 @@ protected: const SymbolTableMap &Symbols, StubMap &Stubs); - virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer); - virtual void handleObjectLoaded(ObjectImage *Obj); + unsigned getCommonSymbolAlignment(const SymbolRef &Sym); + + virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer); + + uint64_t findPPC64TOC() const; + void findOPDEntrySection(ObjectImage &Obj, + ObjSectionToIDMap &LocalSections, + RelocationValueRef &Rel); public: RuntimeDyldELF(RTDyldMemoryManager *mm) - : RuntimeDyldImpl(mm), LoadedObject(0) {} + : RuntimeDyldImpl(mm) {} virtual ~RuntimeDyldELF(); - bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const; + bool isCompatibleFormat(const ObjectBuffer *Buffer) const; }; } // end namespace llvm diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h index c38ca69..829fd6c 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -14,8 +14,8 @@ #ifndef LLVM_RUNTIME_DYLD_IMPL_H #define LLVM_RUNTIME_DYLD_IMPL_H -#include "ObjectImage.h" #include "llvm/ExecutionEngine/RuntimeDyld.h" +#include "llvm/ExecutionEngine/ObjectImage.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" @@ -24,6 +24,8 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" +#include "llvm/Support/Host.h" +#include "llvm/Support/SwapByteOrder.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" #include @@ -33,7 +35,7 @@ using namespace llvm::object; namespace llvm { -class MemoryBuffer; +class ObjectBuffer; class Twine; @@ -41,6 +43,9 @@ class Twine; /// linker. class SectionEntry { public: + /// Name - section name. + StringRef Name; + /// Address - address in the linker's memory where the section resides. uint8_t *Address; @@ -61,9 +66,9 @@ public: /// for calculating relocations in some object formats (like MachO). uintptr_t ObjAddress; - SectionEntry(uint8_t *address, size_t size, uintptr_t stubOffset, - uintptr_t objAddress) - : Address(address), Size(size), LoadAddress((uintptr_t)address), + SectionEntry(StringRef name, uint8_t *address, size_t size, + uintptr_t stubOffset, uintptr_t objAddress) + : Name(name), Address(address), Size(size), LoadAddress((uintptr_t)address), StubOffset(stubOffset), ObjAddress(objAddress) {} }; @@ -135,8 +140,10 @@ protected: typedef StringMap SymbolTableMap; SymbolTableMap GlobalSymbolTable; - // Keep a map of common symbols to their sizes - typedef std::map CommonSymbolMap; + // Pair representing the size and alignment requirement for a common symbol. + typedef std::pair CommonSymbolInfo; + // Keep a map of common symbols to their info pairs + typedef std::map CommonSymbolMap; // For each symbol, keep a list of relocations based on it. Anytime // its address is reassigned (the JIT re-compiled the function, e.g.), @@ -161,6 +168,10 @@ protected: inline unsigned getMaxStubSize() { if (Arch == Triple::arm || Arch == Triple::thumb) return 8; // 32-bit instruction and 32-bit address + else if (Arch == Triple::mipsel) + return 16; + else if (Arch == Triple::ppc64) + return 44; else return 0; } @@ -175,10 +186,50 @@ protected: return true; } + uint64_t getSectionLoadAddress(unsigned SectionID) { + return Sections[SectionID].LoadAddress; + } + uint8_t *getSectionAddress(unsigned SectionID) { return (uint8_t*)Sections[SectionID].Address; } + // Subclasses can override this method to get the alignment requirement of + // a common symbol. Returns no alignment requirement if not implemented. + virtual unsigned getCommonSymbolAlignment(const SymbolRef &Sym) { + return 0; + } + + + void writeInt16BE(uint8_t *Addr, uint16_t Value) { + if (sys::isLittleEndianHost()) + Value = sys::SwapByteOrder(Value); + *Addr = (Value >> 8) & 0xFF; + *(Addr+1) = Value & 0xFF; + } + + void writeInt32BE(uint8_t *Addr, uint32_t Value) { + if (sys::isLittleEndianHost()) + Value = sys::SwapByteOrder(Value); + *Addr = (Value >> 24) & 0xFF; + *(Addr+1) = (Value >> 16) & 0xFF; + *(Addr+2) = (Value >> 8) & 0xFF; + *(Addr+3) = Value & 0xFF; + } + + void writeInt64BE(uint8_t *Addr, uint64_t Value) { + if (sys::isLittleEndianHost()) + Value = sys::SwapByteOrder(Value); + *Addr = (Value >> 56) & 0xFF; + *(Addr+1) = (Value >> 48) & 0xFF; + *(Addr+2) = (Value >> 40) & 0xFF; + *(Addr+3) = (Value >> 32) & 0xFF; + *(Addr+4) = (Value >> 24) & 0xFF; + *(Addr+5) = (Value >> 16) & 0xFF; + *(Addr+6) = (Value >> 8) & 0xFF; + *(Addr+7) = Value & 0xFF; + } + /// \brief Given the common symbols discovered in the object file, emit a /// new section for them and update the symbol mappings in the object and /// symbol table. @@ -221,13 +272,14 @@ protected: void resolveRelocationEntry(const RelocationEntry &RE, uint64_t Value); /// \brief A object file specific relocation resolver - /// \param Address Address to apply the relocation action + /// \param Section The section where the relocation is being applied + /// \param Offset The offset into the section for this relocation /// \param Value Target symbol address to apply the relocation action /// \param Type object file specific relocation type /// \param Addend A constant addend used to compute the value to be stored /// into the relocatable field - virtual void resolveRelocation(uint8_t *LocalAddress, - uint64_t FinalAddress, + virtual void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, uint64_t Value, uint32_t Type, int64_t Addend) = 0; @@ -242,19 +294,13 @@ protected: /// \brief Resolve relocations to external symbols. void resolveExternalSymbols(); - virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer); - virtual void handleObjectLoaded(ObjectImage *Obj) - { - // Subclasses may choose to retain this image if they have a use for it - delete Obj; - } - + virtual ObjectImage *createObjectImage(ObjectBuffer *InputBuffer); public: RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {} virtual ~RuntimeDyldImpl(); - bool loadObject(const MemoryBuffer *InputBuffer); + ObjectImage *loadObject(ObjectBuffer *InputBuffer); void *getSymbolAddress(StringRef Name) { // FIXME: Just look up as a function for now. Overly simple of course. @@ -265,11 +311,20 @@ public: return getSectionAddress(Loc.first) + Loc.second; } + uint64_t getSymbolLoadAddress(StringRef Name) { + // FIXME: Just look up as a function for now. Overly simple of course. + // Work in progress. + if (GlobalSymbolTable.find(Name) == GlobalSymbolTable.end()) + return 0; + SymbolLoc Loc = GlobalSymbolTable.lookup(Name); + return getSectionLoadAddress(Loc.first) + Loc.second; + } + void resolveRelocations(); void reassignSectionAddress(unsigned SectionID, uint64_t Addr); - void mapSectionAddress(void *LocalAddress, uint64_t TargetAddress); + void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress); // Is the linker in an error state? bool hasError() { return HasError; } @@ -280,8 +335,7 @@ public: // Get the error message. StringRef getErrorString() { return ErrorStr; } - virtual bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const = 0; - + virtual bool isCompatibleFormat(const ObjectBuffer *Buffer) const = 0; }; } // end namespace llvm diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp index 0e3a9d4..987c0c3 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp @@ -21,11 +21,13 @@ using namespace llvm::object; namespace llvm { -void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress, - uint64_t FinalAddress, +void RuntimeDyldMachO::resolveRelocation(const SectionEntry &Section, + uint64_t Offset, uint64_t Value, uint32_t Type, int64_t Addend) { + uint8_t *LocalAddress = Section.Address + Offset; + uint64_t FinalAddress = Section.LoadAddress + Offset; bool isPCRel = (Type >> 24) & 1; unsigned MachoType = (Type >> 28) & 0xf; unsigned Size = 1 << ((Type >> 25) & 3); @@ -57,7 +59,7 @@ void RuntimeDyldMachO::resolveRelocation(uint8_t *LocalAddress, FinalAddress, (uintptr_t)Value, isPCRel, - Type, + MachoType, Size, Addend); break; @@ -211,7 +213,6 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, uint32_t RelType = (uint32_t) (Rel.Type & 0xffffffffL); RelocationValueRef Value; SectionEntry &Section = Sections[Rel.SectionID]; - uint8_t *Target = Section.Address + Rel.Offset; bool isExtern = (RelType >> 27) & 1; if (isExtern) { @@ -246,7 +247,12 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, } assert(si != se && "No section containing relocation!"); Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID); - Value.Addend = *(const intptr_t *)Target; + Value.Addend = 0; + // FIXME: The size and type of the relocation determines if we can + // encode an Addend in the target location itself, and if so, how many + // bytes we should read in order to get it. We don't yet support doing + // that, and just assuming it's sizeof(intptr_t) is blatantly wrong. + //Value.Addend = *(const intptr_t *)Target; if (Value.Addend) { // The MachO addend is an offset from the current section. We need it // to be an offset from the destination section @@ -254,13 +260,13 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, } } - if (Arch == Triple::arm && RelType == macho::RIT_ARM_Branch24Bit) { + if (Arch == Triple::arm && (RelType & 0xf) == macho::RIT_ARM_Branch24Bit) { // This is an ARM branch relocation, need to use a stub function. // Look up for existing stub. StubMap::const_iterator i = Stubs.find(Value); if (i != Stubs.end()) - resolveRelocation(Target, (uint64_t)Target, + resolveRelocation(Section, Rel.Offset, (uint64_t)Section.Address + i->second, RelType, 0); else { @@ -274,7 +280,7 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, addRelocationForSymbol(RE, Value.SymbolName); else addRelocationForSection(RE, Value.SectionID); - resolveRelocation(Target, (uint64_t)Target, + resolveRelocation(Section, Rel.Offset, (uint64_t)Section.Address + Section.StubOffset, RelType, 0); Section.StubOffset += getMaxStubSize(); @@ -290,8 +296,10 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel, bool RuntimeDyldMachO::isCompatibleFormat( - const MemoryBuffer *InputBuffer) const { - StringRef Magic = InputBuffer->getBuffer().slice(0, 4); + const ObjectBuffer *InputBuffer) const { + if (InputBuffer->getBufferSize() < 4) + return false; + StringRef Magic(InputBuffer->getBufferStart(), 4); if (Magic == "\xFE\xED\xFA\xCE") return true; if (Magic == "\xCE\xFA\xED\xFE") return true; if (Magic == "\xFE\xED\xFA\xCF") return true; diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h index 707664c..fe3539d 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h @@ -55,15 +55,15 @@ protected: StubMap &Stubs); public: - virtual void resolveRelocation(uint8_t *LocalAddress, - uint64_t FinalAddress, + virtual void resolveRelocation(const SectionEntry &Section, + uint64_t Offset, uint64_t Value, uint32_t Type, int64_t Addend); RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {} - bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const; + bool isCompatibleFormat(const ObjectBuffer *Buffer) const; }; } // end namespace llvm diff --git a/lib/ExecutionEngine/TargetSelect.cpp b/lib/ExecutionEngine/TargetSelect.cpp index 7cdd669..8b6104f 100644 --- a/lib/ExecutionEngine/TargetSelect.cpp +++ b/lib/ExecutionEngine/TargetSelect.cpp @@ -26,7 +26,14 @@ using namespace llvm; TargetMachine *EngineBuilder::selectTarget() { - Triple TT(LLVM_HOSTTRIPLE); + Triple TT; + + // MCJIT can generate code for remote targets, but the old JIT and Interpreter + // must use the host architecture. + if (UseMCJIT && WhichEngine != EngineKind::Interpreter && M) + TT.setTriple(M->getTargetTriple()); + else + TT.setTriple(LLVM_HOSTTRIPLE); return selectTarget(TT, MArch, MCPU, MAttrs); } diff --git a/lib/MC/ELFObjectWriter.cpp b/lib/MC/ELFObjectWriter.cpp index 7203b9a..eda0623 100644 --- a/lib/MC/ELFObjectWriter.cpp +++ b/lib/MC/ELFObjectWriter.cpp @@ -133,6 +133,11 @@ class ELFObjectWriter : public MCObjectWriter { bool IsPCRel) const { return TargetObjectWriter->ExplicitRelSym(Asm, Target, F, Fixup, IsPCRel); } + const MCSymbol *undefinedExplicitRelSym(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const { + return TargetObjectWriter->undefinedExplicitRelSym(Target, Fixup, IsPCRel); + } bool is64Bit() const { return TargetObjectWriter->is64Bit(); } bool hasRelocationAddend() const { @@ -270,9 +275,10 @@ class ELFObjectWriter : public MCObjectWriter { /// ComputeSymbolTable - Compute the symbol table data /// - /// \param StringTable [out] - The string table data. - /// \param StringIndexMap [out] - Map from symbol names to offsets in the - /// string table. + /// \param Asm - The assembler. + /// \param SectionIndexMap - Maps a section to its index. + /// \param RevGroupMap - Maps a signature symbol to the group section. + /// \param NumRegularSections - Number of non-relocation sections. void ComputeSymbolTable(MCAssembler &Asm, const SectionIndexMapTy &SectionIndexMap, RevGroupMapTy RevGroupMap, @@ -638,7 +644,7 @@ const MCSymbol *ELFObjectWriter::SymbolToReloc(const MCAssembler &Asm, if (ASymbol.isUndefined()) { if (Renamed) return Renamed; - return &ASymbol; + return undefinedExplicitRelSym(Target, Fixup, IsPCRel); } if (SD.isExternal()) { @@ -720,10 +726,13 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm, MCSymbolData &SD = Asm.getSymbolData(ASymbol); MCFragment *F = SD.getFragment(); - Index = F->getParent()->getOrdinal() + 1; - - // Offset of the symbol in the section - Value += Layout.getSymbolOffset(&SD); + if (F) { + Index = F->getParent()->getOrdinal() + 1; + // Offset of the symbol in the section + Value += Layout.getSymbolOffset(&SD); + } else { + Index = 0; + } } else { if (Asm.getSymbolData(Symbol).getFlags() & ELF_Other_Weakref) WeakrefUsedInReloc.insert(RelocSymbol); @@ -732,8 +741,7 @@ void ELFObjectWriter::RecordRelocation(const MCAssembler &Asm, Index = -1; } Addend = Value; - // Compensate for the addend on i386. - if (is64Bit()) + if (hasRelocationAddend()) Value = 0; } diff --git a/lib/MC/MCAsmBackend.cpp b/lib/MC/MCAsmBackend.cpp index 2e447b0..53960e7 100644 --- a/lib/MC/MCAsmBackend.cpp +++ b/lib/MC/MCAsmBackend.cpp @@ -12,12 +12,9 @@ using namespace llvm; MCAsmBackend::MCAsmBackend() - : HasReliableSymbolDifference(false) -{ -} + : HasReliableSymbolDifference(false), HasDataInCodeSupport(false) {} -MCAsmBackend::~MCAsmBackend() { -} +MCAsmBackend::~MCAsmBackend() {} const MCFixupKindInfo & MCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { diff --git a/lib/MC/MCAsmInfo.cpp b/lib/MC/MCAsmInfo.cpp index 8da2e0e..7ea0f3b 100644 --- a/lib/MC/MCAsmInfo.cpp +++ b/lib/MC/MCAsmInfo.cpp @@ -68,8 +68,8 @@ MCAsmInfo::MCAsmInfo() { GlobalDirective = "\t.globl\t"; HasSetDirective = true; HasAggressiveSymbolFolding = true; - LCOMMDirectiveType = LCOMM::None; COMMDirectiveAlignmentIsInBytes = true; + LCOMMDirectiveAlignmentType = LCOMM::NoAlignment; HasDotTypeDotSizeDirective = true; HasSingleParameterDotFile = true; HasNoDeadStrip = false; diff --git a/lib/MC/MCAsmInfoCOFF.cpp b/lib/MC/MCAsmInfoCOFF.cpp index 678e75a..fd79193 100644 --- a/lib/MC/MCAsmInfoCOFF.cpp +++ b/lib/MC/MCAsmInfoCOFF.cpp @@ -19,8 +19,10 @@ void MCAsmInfoCOFF::anchor() { } MCAsmInfoCOFF::MCAsmInfoCOFF() { GlobalPrefix = "_"; + // MingW 4.5 and later support .comm with log2 alignment, but .lcomm uses byte + // alignment. COMMDirectiveAlignmentIsInBytes = false; - LCOMMDirectiveType = LCOMM::ByteAlignment; + LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment; HasDotTypeDotSizeDirective = false; HasSingleParameterDotFile = false; PrivateGlobalPrefix = "L"; // Prefix for private global symbols diff --git a/lib/MC/MCAsmInfoDarwin.cpp b/lib/MC/MCAsmInfoDarwin.cpp index 8e0ac23..a0e3eba 100644 --- a/lib/MC/MCAsmInfoDarwin.cpp +++ b/lib/MC/MCAsmInfoDarwin.cpp @@ -32,6 +32,7 @@ MCAsmInfoDarwin::MCAsmInfoDarwin() { AlignmentIsInBytes = false; COMMDirectiveAlignmentIsInBytes = false; + LCOMMDirectiveAlignmentType = LCOMM::Log2Alignment; InlineAsmStart = " InlineAsm Start"; InlineAsmEnd = " InlineAsm End"; diff --git a/lib/MC/MCAsmStreamer.cpp b/lib/MC/MCAsmStreamer.cpp index 373df4b..17a6323 100644 --- a/lib/MC/MCAsmStreamer.cpp +++ b/lib/MC/MCAsmStreamer.cpp @@ -166,7 +166,7 @@ public: /// /// @param Symbol - The common symbol to emit. /// @param Size - The size of the common symbol. - /// @param Size - The alignment of the common symbol in bytes. + /// @param ByteAlignment - The alignment of the common symbol in bytes. virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment); @@ -251,6 +251,7 @@ public: virtual void EmitPad(int64_t Offset); virtual void EmitRegSave(const SmallVectorImpl &RegList, bool); + virtual void EmitTCEntry(const MCSymbol &S); virtual void EmitInstruction(const MCInst &Inst); @@ -517,13 +518,19 @@ void MCAsmStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size, /// @param Size - The size of the common symbol. void MCAsmStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size, unsigned ByteAlign) { - assert(MAI.getLCOMMDirectiveType() != LCOMM::None && - "Doesn't have .lcomm, can't emit it!"); OS << "\t.lcomm\t" << *Symbol << ',' << Size; if (ByteAlign > 1) { - assert(MAI.getLCOMMDirectiveType() == LCOMM::ByteAlignment && - "Alignment not supported on .lcomm!"); - OS << ',' << ByteAlign; + switch (MAI.getLCOMMDirectiveAlignmentType()) { + case LCOMM::NoAlignment: + llvm_unreachable("alignment not supported on .lcomm!"); + case LCOMM::ByteAlignment: + OS << ',' << ByteAlign; + break; + case LCOMM::Log2Alignment: + assert(isPowerOf2_32(ByteAlign) && "alignment must be a power of 2"); + OS << ',' << Log2_32(ByteAlign); + break; + } } EmitEOL(); } @@ -1293,6 +1300,14 @@ void MCAsmStreamer::EmitRegSave(const SmallVectorImpl &RegList, EmitEOL(); } +void MCAsmStreamer::EmitTCEntry(const MCSymbol &S) { + OS << "\t.tc "; + OS << S.getName(); + OS << "[TC],"; + OS << S.getName(); + EmitEOL(); +} + void MCAsmStreamer::EmitInstruction(const MCInst &Inst) { assert(getCurrentSection() && "Cannot emit contents before setting section!"); diff --git a/lib/MC/MCAssembler.cpp b/lib/MC/MCAssembler.cpp index 05519b5..726ec5a 100644 --- a/lib/MC/MCAssembler.cpp +++ b/lib/MC/MCAssembler.cpp @@ -199,8 +199,7 @@ MCAssembler::MCAssembler(MCContext &Context_, MCAsmBackend &Backend_, MCCodeEmitter &Emitter_, MCObjectWriter &Writer_, raw_ostream &OS_) : Context(Context_), Backend(Backend_), Emitter(Emitter_), Writer(Writer_), - OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false) -{ + OS(OS_), RelaxAll(false), NoExecStack(false), SubsectionsViaSymbols(false) { } MCAssembler::~MCAssembler() { @@ -325,6 +324,12 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, const MCAlignFragment &AF = cast(F); unsigned Offset = Layout.getFragmentOffset(&AF); unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); + // If we are padding with nops, force the padding to be larger than the + // minimum nop size. + if (Size > 0 && AF.hasEmitNops()) { + while (Size % getBackend().getMinimumNopSize()) + Size += AF.getAlignment(); + } if (Size > AF.getMaxBytesToEmit()) return 0; return Size; @@ -375,7 +380,7 @@ void MCAsmLayout::LayoutFragment(MCFragment *F) { LastValidFragment[F->getParent()] = F; } -/// WriteFragmentData - Write the \arg F data to the output file. +/// WriteFragmentData - Write the \p F data to the output file. static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { MCObjectWriter *OW = &Asm.getWriter(); @@ -527,7 +532,7 @@ void MCAssembler::writeSectionData(const MCSectionData *SD, } uint64_t Start = getWriter().getStream().tell(); - (void) Start; + (void)Start; for (MCSectionData::const_iterator it = SD->begin(), ie = SD->end(); it != ie; ++it) @@ -824,6 +829,7 @@ raw_ostream &operator<<(raw_ostream &OS, const MCFixup &AF) { } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCFragment::dump() { raw_ostream &OS = llvm::errs(); @@ -964,6 +970,7 @@ void MCAssembler::dump() { } OS << "]>\n"; } +#endif // anchors for MC*Fragment vtables void MCDataFragment::anchor() { } diff --git a/lib/MC/MCContext.cpp b/lib/MC/MCContext.cpp index b5b14b9..477bd17 100644 --- a/lib/MC/MCContext.cpp +++ b/lib/MC/MCContext.cpp @@ -153,6 +153,12 @@ MCSymbol *MCContext::LookupSymbol(StringRef Name) const { return Symbols.lookup(Name); } +MCSymbol *MCContext::LookupSymbol(const Twine &Name) const { + SmallString<128> NameSV; + Name.toVector(NameSV); + return LookupSymbol(NameSV.str()); +} + //===----------------------------------------------------------------------===// // Section Management //===----------------------------------------------------------------------===// diff --git a/lib/MC/MCDisassembler/Disassembler.cpp b/lib/MC/MCDisassembler/Disassembler.cpp index 35f675d..5189c9da 100644 --- a/lib/MC/MCDisassembler/Disassembler.cpp +++ b/lib/MC/MCDisassembler/Disassembler.cpp @@ -184,3 +184,17 @@ size_t LLVMDisasmInstruction(LLVMDisasmContextRef DCR, uint8_t *Bytes, } llvm_unreachable("Invalid DecodeStatus!"); } + +// +// LLVMSetDisasmOptions() sets the disassembler's options. It returns 1 if it +// can set all the Options and 0 otherwise. +// +int LLVMSetDisasmOptions(LLVMDisasmContextRef DCR, uint64_t Options){ + if (Options & LLVMDisassembler_Option_UseMarkup){ + LLVMDisasmContext *DC = (LLVMDisasmContext *)DCR; + MCInstPrinter *IP = DC->getIP(); + IP->setUseMarkup(1); + Options &= ~LLVMDisassembler_Option_UseMarkup; + } + return (Options == 0); +} diff --git a/lib/MC/MCDisassembler/EDDisassembler.cpp b/lib/MC/MCDisassembler/EDDisassembler.cpp index 1226f1a..eed7a77 100644 --- a/lib/MC/MCDisassembler/EDDisassembler.cpp +++ b/lib/MC/MCDisassembler/EDDisassembler.cpp @@ -366,8 +366,9 @@ int EDDisassembler::parseInst(SmallVectorImpl &operands, instName = OpcodeToken.getString(); instLoc = OpcodeToken.getLoc(); + ParseInstructionInfo Info; if (NextToken.isNot(AsmToken::Eof) && - TargetParser->ParseInstruction(instName, instLoc, operands)) + TargetParser->ParseInstruction(Info, instName, instLoc, operands)) ret = -1; } else { ret = -1; diff --git a/lib/MC/MCDwarf.cpp b/lib/MC/MCDwarf.cpp index 4c63e43..f71b266 100644 --- a/lib/MC/MCDwarf.cpp +++ b/lib/MC/MCDwarf.cpp @@ -425,9 +425,11 @@ void MCDwarfFile::print(raw_ostream &OS) const { OS << '"' << getName() << '"'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCDwarfFile::dump() const { print(dbgs()); } +#endif // Utility function to write a tuple for .debug_abbrev. static void EmitAbbrev(MCStreamer *MCOS, uint64_t Name, uint64_t Form) { diff --git a/lib/MC/MCELFObjectTargetWriter.cpp b/lib/MC/MCELFObjectTargetWriter.cpp index 6eb6914..74cd042 100644 --- a/lib/MC/MCELFObjectTargetWriter.cpp +++ b/lib/MC/MCELFObjectTargetWriter.cpp @@ -9,6 +9,8 @@ #include "llvm/ADT/STLExtras.h" #include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCValue.h" using namespace llvm; @@ -35,6 +37,12 @@ const MCSymbol *MCELFObjectTargetWriter::ExplicitRelSym(const MCAssembler &Asm, return NULL; } +const MCSymbol *MCELFObjectTargetWriter::undefinedExplicitRelSym(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const { + const MCSymbol &Symbol = Target.getSymA()->getSymbol(); + return &Symbol.AliasedSymbol(); +} void MCELFObjectTargetWriter::adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) { diff --git a/lib/MC/MCELFStreamer.cpp b/lib/MC/MCELFStreamer.cpp index 2d342dc..14fbc1e 100644 --- a/lib/MC/MCELFStreamer.cpp +++ b/lib/MC/MCELFStreamer.cpp @@ -98,17 +98,13 @@ public: uint64_t Size, unsigned ByteAlignment = 0) { llvm_unreachable("ELF doesn't support this directive"); } - virtual void EmitBytes(StringRef Data, unsigned AddrSpace); - virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0, - unsigned ValueSize = 1, - unsigned MaxBytesToEmit = 0); - virtual void EmitCodeAlignment(unsigned ByteAlignment, - unsigned MaxBytesToEmit = 0); virtual void EmitValueImpl(const MCExpr *Value, unsigned Size, unsigned AddrSpace); virtual void EmitFileDirective(StringRef Filename); + virtual void EmitTCEntry(const MCSymbol &S); + virtual void FinishImpl(); private: @@ -247,7 +243,6 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol, switch (Attribute) { case MCSA_LazyReference: case MCSA_Reference: - case MCSA_NoDeadStrip: case MCSA_SymbolResolver: case MCSA_PrivateExtern: case MCSA_WeakDefinition: @@ -256,6 +251,7 @@ void MCELFStreamer::EmitSymbolAttribute(MCSymbol *Symbol, case MCSA_IndirectSymbol: llvm_unreachable("Invalid symbol attribute for ELF!"); + case MCSA_NoDeadStrip: case MCSA_ELF_TypeGnuUniqueObject: // Ignore for now. break; @@ -355,42 +351,6 @@ void MCELFStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size, EmitCommonSymbol(Symbol, Size, ByteAlignment); } -void MCELFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) { - // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into - // MCObjectStreamer. - getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end()); -} - -void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment, - int64_t Value, unsigned ValueSize, - unsigned MaxBytesToEmit) { - // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into - // MCObjectStreamer. - if (MaxBytesToEmit == 0) - MaxBytesToEmit = ByteAlignment; - new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit, - getCurrentSectionData()); - - // Update the maximum alignment on the current section if necessary. - if (ByteAlignment > getCurrentSectionData()->getAlignment()) - getCurrentSectionData()->setAlignment(ByteAlignment); -} - -void MCELFStreamer::EmitCodeAlignment(unsigned ByteAlignment, - unsigned MaxBytesToEmit) { - // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into - // MCObjectStreamer. - if (MaxBytesToEmit == 0) - MaxBytesToEmit = ByteAlignment; - MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit, - getCurrentSectionData()); - F->setEmitNops(true); - - // Update the maximum alignment on the current section if necessary. - if (ByteAlignment > getCurrentSectionData()->getAlignment()) - getCurrentSectionData()->setAlignment(ByteAlignment); -} - void MCELFStreamer::EmitValueImpl(const MCExpr *Value, unsigned Size, unsigned AddrSpace) { fixSymbolsInTLSFixups(Value); @@ -511,6 +471,12 @@ void MCELFStreamer::FinishImpl() { this->MCObjectStreamer::FinishImpl(); } +void MCELFStreamer::EmitTCEntry(const MCSymbol &S) +{ + // Creates a R_PPC64_TOC relocation + MCObjectStreamer::EmitSymbolValue(&S, 8, 0); +} + MCStreamer *llvm::createELFStreamer(MCContext &Context, MCAsmBackend &MAB, raw_ostream &OS, MCCodeEmitter *CE, bool RelaxAll, bool NoExecStack) { diff --git a/lib/MC/MCExpr.cpp b/lib/MC/MCExpr.cpp index 0eb7fcc..e033634 100644 --- a/lib/MC/MCExpr.cpp +++ b/lib/MC/MCExpr.cpp @@ -60,7 +60,8 @@ void MCExpr::print(raw_ostream &OS) const { SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTOFF || SRE.getKind() == MCSymbolRefExpr::VK_ARM_TPOFF || SRE.getKind() == MCSymbolRefExpr::VK_ARM_GOTTPOFF || - SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET1) + SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET1 || + SRE.getKind() == MCSymbolRefExpr::VK_ARM_TARGET2) OS << MCSymbolRefExpr::getVariantKindName(SRE.getKind()); else if (SRE.getKind() != MCSymbolRefExpr::VK_None && SRE.getKind() != MCSymbolRefExpr::VK_PPC_DARWIN_HA16 && @@ -136,10 +137,12 @@ void MCExpr::print(raw_ostream &OS) const { llvm_unreachable("Invalid expression kind!"); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCExpr::dump() const { print(dbgs()); dbgs() << '\n'; } +#endif /* *** */ @@ -197,7 +200,9 @@ StringRef MCSymbolRefExpr::getVariantKindName(VariantKind Kind) { case VK_ARM_GOTTPOFF: return "(gottpoff)"; case VK_ARM_TLSGD: return "(tlsgd)"; case VK_ARM_TARGET1: return "(target1)"; - case VK_PPC_TOC: return "toc"; + case VK_ARM_TARGET2: return "(target2)"; + case VK_PPC_TOC: return "tocbase"; + case VK_PPC_TOC_ENTRY: return "toc"; case VK_PPC_DARWIN_HA16: return "ha16"; case VK_PPC_DARWIN_LO16: return "lo16"; case VK_PPC_GAS_HA16: return "ha"; @@ -264,7 +269,7 @@ MCSymbolRefExpr::getVariantKindForName(StringRef Name) { /* *** */ -void MCTargetExpr::Anchor() {} +void MCTargetExpr::anchor() {} /* *** */ diff --git a/lib/MC/MCInst.cpp b/lib/MC/MCInst.cpp index 7bbfd2e..124cc14 100644 --- a/lib/MC/MCInst.cpp +++ b/lib/MC/MCInst.cpp @@ -32,10 +32,12 @@ void MCOperand::print(raw_ostream &OS, const MCAsmInfo *MAI) const { OS << ">"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCOperand::dump() const { print(dbgs(), 0); dbgs() << "\n"; } +#endif void MCInst::print(raw_ostream &OS, const MCAsmInfo *MAI) const { OS << ""; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCInst::dump() const { print(dbgs(), 0); dbgs() << "\n"; } +#endif diff --git a/lib/MC/MCInstPrinter.cpp b/lib/MC/MCInstPrinter.cpp index 847bcc0..41d90ab 100644 --- a/lib/MC/MCInstPrinter.cpp +++ b/lib/MC/MCInstPrinter.cpp @@ -36,3 +36,17 @@ void MCInstPrinter::printAnnotation(raw_ostream &OS, StringRef Annot) { OS << " " << MAI.getCommentString() << " " << Annot; } } + +/// Utility functions to make adding mark ups simpler. +StringRef MCInstPrinter::markup(StringRef s) const { + if (getUseMarkup()) + return s; + else + return ""; +} +StringRef MCInstPrinter::markup(StringRef a, StringRef b) const { + if (getUseMarkup()) + return a; + else + return b; +} diff --git a/lib/MC/MCLabel.cpp b/lib/MC/MCLabel.cpp index 9c0fc92..1d3022a 100644 --- a/lib/MC/MCLabel.cpp +++ b/lib/MC/MCLabel.cpp @@ -16,6 +16,8 @@ void MCLabel::print(raw_ostream &OS) const { OS << '"' << getInstance() << '"'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCLabel::dump() const { print(dbgs()); } +#endif diff --git a/lib/MC/MCMachOStreamer.cpp b/lib/MC/MCMachOStreamer.cpp index b75fe2c..04b0e86 100644 --- a/lib/MC/MCMachOStreamer.cpp +++ b/lib/MC/MCMachOStreamer.cpp @@ -70,19 +70,11 @@ public: llvm_unreachable("macho doesn't support this directive"); } virtual void EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size, - unsigned ByteAlignment) { - llvm_unreachable("macho doesn't support this directive"); - } + unsigned ByteAlignment); virtual void EmitZerofill(const MCSection *Section, MCSymbol *Symbol = 0, uint64_t Size = 0, unsigned ByteAlignment = 0); virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment = 0); - virtual void EmitBytes(StringRef Data, unsigned AddrSpace); - virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value = 0, - unsigned ValueSize = 1, - unsigned MaxBytesToEmit = 0); - virtual void EmitCodeAlignment(unsigned ByteAlignment, - unsigned MaxBytesToEmit = 0); virtual void EmitFileDirective(StringRef Filename) { // FIXME: Just ignore the .file; it isn't important enough to fail the @@ -141,6 +133,8 @@ void MCMachOStreamer::EmitLabel(MCSymbol *Symbol) { } void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) { + if (!getAssembler().getBackend().hasDataInCodeSupport()) + return; // Create a temporary label to mark the start of the data region. MCSymbol *Start = getContext().CreateTempSymbol(); EmitLabel(Start); @@ -151,6 +145,8 @@ void MCMachOStreamer::EmitDataRegion(DataRegionData::KindTy Kind) { } void MCMachOStreamer::EmitDataRegionEnd() { + if (!getAssembler().getBackend().hasDataInCodeSupport()) + return; std::vector &Regions = getAssembler().getDataRegions(); assert(Regions.size() && "Mismatched .end_data_region!"); DataRegionData &Data = Regions.back(); @@ -325,6 +321,15 @@ void MCMachOStreamer::EmitCommonSymbol(MCSymbol *Symbol, uint64_t Size, SD.setCommon(Size, ByteAlignment); } +void MCMachOStreamer::EmitLocalCommonSymbol(MCSymbol *Symbol, uint64_t Size, + unsigned ByteAlignment) { + // '.lcomm' is equivalent to '.zerofill'. + return EmitZerofill(getContext().getMachOSection("__DATA", "__bss", + MCSectionMachO::S_ZEROFILL, + 0, SectionKind::getBSS()), + Symbol, Size, ByteAlignment); +} + void MCMachOStreamer::EmitZerofill(const MCSection *Section, MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment) { MCSectionData &SectData = getAssembler().getOrCreateSectionData(*Section); @@ -361,42 +366,6 @@ void MCMachOStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol, return; } -void MCMachOStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) { - // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into - // MCObjectStreamer. - getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end()); -} - -void MCMachOStreamer::EmitValueToAlignment(unsigned ByteAlignment, - int64_t Value, unsigned ValueSize, - unsigned MaxBytesToEmit) { - // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into - // MCObjectStreamer. - if (MaxBytesToEmit == 0) - MaxBytesToEmit = ByteAlignment; - new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit, - getCurrentSectionData()); - - // Update the maximum alignment on the current section if necessary. - if (ByteAlignment > getCurrentSectionData()->getAlignment()) - getCurrentSectionData()->setAlignment(ByteAlignment); -} - -void MCMachOStreamer::EmitCodeAlignment(unsigned ByteAlignment, - unsigned MaxBytesToEmit) { - // TODO: This is exactly the same as WinCOFFStreamer. Consider merging into - // MCObjectStreamer. - if (MaxBytesToEmit == 0) - MaxBytesToEmit = ByteAlignment; - MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit, - getCurrentSectionData()); - F->setEmitNops(true); - - // Update the maximum alignment on the current section if necessary. - if (ByteAlignment > getCurrentSectionData()->getAlignment()) - getCurrentSectionData()->setAlignment(ByteAlignment); -} - void MCMachOStreamer::EmitInstToData(const MCInst &Inst) { MCDataFragment *DF = getOrCreateDataFragment(); diff --git a/lib/MC/MCObjectFileInfo.cpp b/lib/MC/MCObjectFileInfo.cpp index 29b4a94..2e1604d 100644 --- a/lib/MC/MCObjectFileInfo.cpp +++ b/lib/MC/MCObjectFileInfo.cpp @@ -392,6 +392,18 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) { DwarfMacroInfoSection = Ctx->getELFSection(".debug_macinfo", ELF::SHT_PROGBITS, 0, SectionKind::getMetadata()); + DwarfAccelNamesSection = + Ctx->getELFSection(".apple_names", ELF::SHT_PROGBITS, 0, + SectionKind::getMetadata()); + DwarfAccelObjCSection = + Ctx->getELFSection(".apple_objc", ELF::SHT_PROGBITS, 0, + SectionKind::getMetadata()); + DwarfAccelNamespaceSection = + Ctx->getELFSection(".apple_namespaces", ELF::SHT_PROGBITS, 0, + SectionKind::getMetadata()); + DwarfAccelTypesSection = + Ctx->getELFSection(".apple_types", ELF::SHT_PROGBITS, 0, + SectionKind::getMetadata()); } @@ -430,12 +442,20 @@ void MCObjectFileInfo::InitCOFFMCObjectFileInfo(Triple T) { } - StaticDtorSection = - Ctx->getCOFFSection(".dtors", - COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | - COFF::IMAGE_SCN_MEM_READ | - COFF::IMAGE_SCN_MEM_WRITE, - SectionKind::getDataRel()); + if (T.getOS() == Triple::Win32) { + StaticDtorSection = + Ctx->getCOFFSection(".CRT$XTX", + COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | + COFF::IMAGE_SCN_MEM_READ, + SectionKind::getReadOnly()); + } else { + StaticDtorSection = + Ctx->getCOFFSection(".dtors", + COFF::IMAGE_SCN_CNT_INITIALIZED_DATA | + COFF::IMAGE_SCN_MEM_READ | + COFF::IMAGE_SCN_MEM_WRITE, + SectionKind::getDataRel()); + } // FIXME: We're emitting LSDA info into a readonly section on COFF, even // though it contains relocatable pointers. In PIC mode, this is probably a @@ -557,6 +577,7 @@ void MCObjectFileInfo::InitMCObjectFileInfo(StringRef TT, Reloc::Model relocm, Env = IsMachO; InitMachOMCObjectFileInfo(T); } else if ((Arch == Triple::x86 || Arch == Triple::x86_64) && + (T.getEnvironment() != Triple::ELF) && (T.getOS() == Triple::MinGW32 || T.getOS() == Triple::Cygwin || T.getOS() == Triple::Win32)) { Env = IsCOFF; diff --git a/lib/MC/MCObjectStreamer.cpp b/lib/MC/MCObjectStreamer.cpp index bad7cfe..7746323 100644 --- a/lib/MC/MCObjectStreamer.cpp +++ b/lib/MC/MCObjectStreamer.cpp @@ -232,6 +232,31 @@ void MCObjectStreamer::EmitDwarfAdvanceFrameAddr(const MCSymbol *LastLabel, new MCDwarfCallFrameFragment(*AddrDelta, getCurrentSectionData()); } +void MCObjectStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) { + assert(AddrSpace == 0 && "Address space must be 0!"); + getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end()); +} + +void MCObjectStreamer::EmitValueToAlignment(unsigned ByteAlignment, + int64_t Value, + unsigned ValueSize, + unsigned MaxBytesToEmit) { + if (MaxBytesToEmit == 0) + MaxBytesToEmit = ByteAlignment; + new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit, + getCurrentSectionData()); + + // Update the maximum alignment on the current section if necessary. + if (ByteAlignment > getCurrentSectionData()->getAlignment()) + getCurrentSectionData()->setAlignment(ByteAlignment); +} + +void MCObjectStreamer::EmitCodeAlignment(unsigned ByteAlignment, + unsigned MaxBytesToEmit) { + EmitValueToAlignment(ByteAlignment, 0, 1, MaxBytesToEmit); + cast(getCurrentFragment())->setEmitNops(true); +} + bool MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset, unsigned char Value) { int64_t Res; @@ -258,12 +283,26 @@ bool MCObjectStreamer::EmitValueToOffset(const MCExpr *Offset, void MCObjectStreamer::EmitGPRel32Value(const MCExpr *Value) { MCDataFragment *DF = getOrCreateDataFragment(); - DF->addFixup(MCFixup::Create(DF->getContents().size(), - Value, - FK_GPRel_4)); + DF->addFixup(MCFixup::Create(DF->getContents().size(), Value, FK_GPRel_4)); DF->getContents().resize(DF->getContents().size() + 4, 0); } +// Associate GPRel32 fixup with data and resize data area +void MCObjectStreamer::EmitGPRel64Value(const MCExpr *Value) { + MCDataFragment *DF = getOrCreateDataFragment(); + + DF->addFixup(MCFixup::Create(DF->getContents().size(), Value, FK_GPRel_4)); + DF->getContents().resize(DF->getContents().size() + 8, 0); +} + +void MCObjectStreamer::EmitFill(uint64_t NumBytes, uint8_t FillValue, + unsigned AddrSpace) { + assert(AddrSpace == 0 && "Address space must be 0!"); + // FIXME: A MCFillFragment would be more memory efficient but MCExpr has + // problems evaluating expressions across multiple fragments. + getOrCreateDataFragment()->getContents().append(NumBytes, FillValue); +} + void MCObjectStreamer::FinishImpl() { // Dump out the dwarf file & directory tables and line tables. const MCSymbol *LineSectionSymbol = NULL; diff --git a/lib/MC/MCParser/AsmLexer.cpp b/lib/MC/MCParser/AsmLexer.cpp index c76052d..f93f685 100644 --- a/lib/MC/MCParser/AsmLexer.cpp +++ b/lib/MC/MCParser/AsmLexer.cpp @@ -396,8 +396,17 @@ AsmToken AsmLexer::LexToken() { case 0: case ' ': case '\t': - // Ignore whitespace. - return LexToken(); + if (SkipSpace) { + // Ignore whitespace. + return LexToken(); + } else { + int len = 1; + while (*CurPtr==' ' || *CurPtr=='\t') { + CurPtr++; + len++; + } + return AsmToken(AsmToken::Space, StringRef(TokStart, len)); + } case '\n': // FALL THROUGH. case '\r': isAtStartOfLine = true; diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp index b67c769..6f2e85e 100644 --- a/lib/MC/MCParser/AsmParser.cpp +++ b/lib/MC/MCParser/AsmParser.cpp @@ -19,6 +19,8 @@ #include "llvm/MC/MCContext.h" #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInstPrinter.h" +#include "llvm/MC/MCInstrInfo.h" #include "llvm/MC/MCParser/AsmCond.h" #include "llvm/MC/MCParser/AsmLexer.h" #include "llvm/MC/MCParser/MCAsmParser.h" @@ -35,6 +37,8 @@ #include "llvm/Support/SourceMgr.h" #include "llvm/Support/raw_ostream.h" #include +#include +#include #include using namespace llvm; @@ -42,12 +46,14 @@ static cl::opt FatalAssemblerWarnings("fatal-assembler-warnings", cl::desc("Consider warnings as error")); +MCAsmParserSemaCallback::~MCAsmParserSemaCallback() {} + namespace { /// \brief Helper class for tracking macro definitions. typedef std::vector MacroArgument; typedef std::vector MacroArguments; -typedef StringRef MacroParameter; +typedef std::pair MacroParameter; typedef std::vector MacroParameters; struct Macro { @@ -80,12 +86,34 @@ public: MemoryBuffer *I); }; +//struct AsmRewrite; +struct ParseStatementInfo { + /// ParsedOperands - The parsed operands from the last parsed statement. + SmallVector ParsedOperands; + + /// Opcode - The opcode from the last parsed instruction. + unsigned Opcode; + + SmallVectorImpl *AsmRewrites; + + ParseStatementInfo() : Opcode(~0U), AsmRewrites(0) {} + ParseStatementInfo(SmallVectorImpl *rewrites) + : Opcode(~0), AsmRewrites(rewrites) {} + + ~ParseStatementInfo() { + // Free any parsed operands. + for (unsigned i = 0, e = ParsedOperands.size(); i != e; ++i) + delete ParsedOperands[i]; + ParsedOperands.clear(); + } +}; + /// \brief The concrete assembly parser instance. class AsmParser : public MCAsmParser { friend class GenericAsmParser; - AsmParser(const AsmParser &); // DO NOT IMPLEMENT - void operator=(const AsmParser &); // DO NOT IMPLEMENT + AsmParser(const AsmParser &) LLVM_DELETED_FUNCTION; + void operator=(const AsmParser &) LLVM_DELETED_FUNCTION; private: AsmLexer Lexer; MCContext &Ctx; @@ -126,20 +154,27 @@ private: StringRef CppHashFilename; int64_t CppHashLineNumber; SMLoc CppHashLoc; + int CppHashBuf; /// AssemblerDialect. ~OU means unset value and use value provided by MAI. unsigned AssemblerDialect; + /// IsDarwin - is Darwin compatibility enabled? + bool IsDarwin; + + /// ParsingInlineAsm - Are we parsing ms-style inline assembly? + bool ParsingInlineAsm; + public: AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out, const MCAsmInfo &MAI); - ~AsmParser(); + virtual ~AsmParser(); virtual bool Run(bool NoInitialTextSection, bool NoFinalize = false); - void AddDirectiveHandler(MCAsmParserExtension *Object, - StringRef Directive, - DirectiveHandler Handler) { + virtual void AddDirectiveHandler(MCAsmParserExtension *Object, + StringRef Directive, + DirectiveHandler Handler) { DirectiveMap[Directive] = std::make_pair(Object, Handler); } @@ -166,7 +201,19 @@ public: virtual bool Error(SMLoc L, const Twine &Msg, ArrayRef Ranges = ArrayRef()); - const AsmToken &Lex(); + virtual const AsmToken &Lex(); + + void setParsingInlineAsm(bool V) { ParsingInlineAsm = V; } + bool isParsingInlineAsm() { return ParsingInlineAsm; } + + bool ParseMSInlineAsm(void *AsmLoc, std::string &AsmString, + unsigned &NumOutputs, unsigned &NumInputs, + SmallVectorImpl > &OpDecls, + SmallVectorImpl &Constraints, + SmallVectorImpl &Clobbers, + const MCInstrInfo *MII, + const MCInstPrinter *IP, + MCAsmParserSemaCallback &SI); bool ParseExpression(const MCExpr *&Res); virtual bool ParseExpression(const MCExpr *&Res, SMLoc &EndLoc); @@ -178,7 +225,7 @@ public: private: void CheckForValidSection(); - bool ParseStatement(); + bool ParseStatement(ParseStatementInfo &Info); void EatToEndOfLine(); bool ParseCppHashLineFilenameComment(const SMLoc &L); @@ -202,26 +249,28 @@ private: /// This returns true on failure. bool ProcessIncbinFile(const std::string &Filename); - /// \brief Reset the current lexer position to that given by \arg Loc. The + /// \brief Reset the current lexer position to that given by \p Loc. The /// current token is not set; clients should ensure Lex() is called /// subsequently. void JumpToLoc(SMLoc Loc); - void EatToEndOfStatement(); + virtual void EatToEndOfStatement(); - bool ParseMacroArgument(MacroArgument &MA); + bool ParseMacroArgument(MacroArgument &MA, + AsmToken::TokenKind &ArgumentDelimiter); bool ParseMacroArguments(const Macro *M, MacroArguments &A); /// \brief Parse up to the end of statement and a return the contents from the /// current token until the end of the statement; the current token on exit /// will be either the EndOfStatement or EOF. - StringRef ParseStringToEndOfStatement(); + virtual StringRef ParseStringToEndOfStatement(); /// \brief Parse until the end of a statement or a comma is encountered, /// return the contents from the current token up to the end or comma. StringRef ParseStringToComma(); - bool ParseAssignment(StringRef Name, bool allow_redef); + bool ParseAssignment(StringRef Name, bool allow_redef, + bool NoDeadStrip = false); bool ParsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc); bool ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res, SMLoc &EndLoc); @@ -229,8 +278,8 @@ private: bool ParseBracketExpr(const MCExpr *&Res, SMLoc &EndLoc); /// ParseIdentifier - Parse an identifier or string (as a quoted identifier) - /// and set \arg Res to the identifier contents. - bool ParseIdentifier(StringRef &Res); + /// and set \p Res to the identifier contents. + virtual bool ParseIdentifier(StringRef &Res); // Directive Parsing. @@ -282,6 +331,9 @@ private: bool ParseDirectiveIrp(SMLoc DirectiveLoc); // ".irp" bool ParseDirectiveIrpc(SMLoc DirectiveLoc); // ".irpc" bool ParseDirectiveEndr(SMLoc DirectiveLoc); // ".endr" + + // "_emit" + bool ParseDirectiveEmit(SMLoc DirectiveLoc, ParseStatementInfo &Info); }; /// \brief Generic implementations of directive handling, etc. which is shared @@ -406,8 +458,8 @@ AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx, MCStreamer &_Out, const MCAsmInfo &_MAI) : Lexer(_MAI), Ctx(_Ctx), Out(_Out), MAI(_MAI), SrcMgr(_SM), GenericParser(new GenericAsmParser), PlatformParser(0), - CurBuffer(0), MacrosEnabled(true), CppHashLineNumber(0), - AssemblerDialect(~0U) { + CurBuffer(0), MacrosEnabled(true), CppHashLineNumber(0), + AssemblerDialect(~0U), IsDarwin(false), ParsingInlineAsm(false) { // Save the old handler. SavedDiagHandler = SrcMgr.getDiagHandler(); SavedDiagContext = SrcMgr.getDiagContext(); @@ -428,6 +480,7 @@ AsmParser::AsmParser(SourceMgr &_SM, MCContext &_Ctx, } else if (_MAI.hasSubsectionsViaSymbols()) { PlatformParser = createDarwinAsmParser(); PlatformParser->Initialize(*this); + IsDarwin = true; } else { PlatformParser = createELFAsmParser(); PlatformParser->Initialize(*this); @@ -545,7 +598,8 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) { // While we have input, parse each statement. while (Lexer.isNot(AsmToken::Eof)) { - if (!ParseStatement()) continue; + ParseStatementInfo Info; + if (!ParseStatement(Info)) continue; // We had an error, validate that one was emitted and recover by skipping to // the next line. @@ -598,7 +652,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) { } void AsmParser::CheckForValidSection() { - if (!getStreamer().getCurrentSection()) { + if (!ParsingInlineAsm && !getStreamer().getCurrentSection()) { TokError("expected section directive before assembly directive"); Out.SwitchSection(Ctx.getMachOSection( "__TEXT", "__text", @@ -1024,14 +1078,11 @@ bool AsmParser::ParseBinOpRHS(unsigned Precedence, const MCExpr *&Res, } } - - - /// ParseStatement: /// ::= EndOfStatement /// ::= Label* Directive ...Operands... EndOfStatement /// ::= Label* Identifier OperandList* EndOfStatement -bool AsmParser::ParseStatement() { +bool AsmParser::ParseStatement(ParseStatementInfo &Info) { if (Lexer.is(AsmToken::EndOfStatement)) { Out.AddBlankLine(); Lex(); @@ -1150,7 +1201,7 @@ bool AsmParser::ParseStatement() { return false; } - return ParseStatement(); + return false; } case AsmToken::Equal: @@ -1304,26 +1355,30 @@ bool AsmParser::ParseStatement() { return Error(IDLoc, "unknown directive"); } + // _emit + if (ParsingInlineAsm && IDVal == "_emit") + return ParseDirectiveEmit(IDLoc, Info); + CheckForValidSection(); // Canonicalize the opcode to lower case. - SmallString<128> Opcode; + SmallString<128> OpcodeStr; for (unsigned i = 0, e = IDVal.size(); i != e; ++i) - Opcode.push_back(tolower(IDVal[i])); + OpcodeStr.push_back(tolower(IDVal[i])); - SmallVector ParsedOperands; - bool HadError = getTargetParser().ParseInstruction(Opcode.str(), IDLoc, - ParsedOperands); + ParseInstructionInfo IInfo(Info.AsmRewrites); + bool HadError = getTargetParser().ParseInstruction(IInfo, OpcodeStr.str(), + IDLoc,Info.ParsedOperands); // Dump the parsed representation, if requested. if (getShowParsedOperands()) { SmallString<256> Str; raw_svector_ostream OS(Str); OS << "parsed instruction: ["; - for (unsigned i = 0; i != ParsedOperands.size(); ++i) { + for (unsigned i = 0; i != Info.ParsedOperands.size(); ++i) { if (i != 0) OS << ", "; - ParsedOperands[i]->print(OS); + Info.ParsedOperands[i]->print(OS); } OS << "]"; @@ -1335,21 +1390,38 @@ bool AsmParser::ParseStatement() { // the instruction. if (!HadError && getContext().getGenDwarfForAssembly() && getContext().getGenDwarfSection() == getStreamer().getCurrentSection() ) { + + unsigned Line = SrcMgr.FindLineNumber(IDLoc, CurBuffer); + + // If we previously parsed a cpp hash file line comment then make sure the + // current Dwarf File is for the CppHashFilename if not then emit the + // Dwarf File table for it and adjust the line number for the .loc. + const std::vector &MCDwarfFiles = + getContext().getMCDwarfFiles(); + if (CppHashFilename.size() != 0) { + if(MCDwarfFiles[getContext().getGenDwarfFileNumber()]->getName() != + CppHashFilename) + getStreamer().EmitDwarfFileDirective( + getContext().nextGenDwarfFileNumber(), StringRef(), CppHashFilename); + + unsigned CppHashLocLineNo = SrcMgr.FindLineNumber(CppHashLoc,CppHashBuf); + Line = CppHashLineNumber - 1 + (Line - CppHashLocLineNo); + } + getStreamer().EmitDwarfLocDirective(getContext().getGenDwarfFileNumber(), - SrcMgr.FindLineNumber(IDLoc, CurBuffer), - 0, DWARF2_LINE_DEFAULT_IS_STMT ? + Line, 0, DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0, 0, 0, StringRef()); } // If parsing succeeded, match the instruction. - if (!HadError) - HadError = getTargetParser().MatchAndEmitInstruction(IDLoc, ParsedOperands, - Out); - - // Free any parsed operands. - for (unsigned i = 0, e = ParsedOperands.size(); i != e; ++i) - delete ParsedOperands[i]; + if (!HadError) { + unsigned ErrorInfo; + HadError = getTargetParser().MatchAndEmitInstruction(IDLoc, Info.Opcode, + Info.ParsedOperands, + Out, ErrorInfo, + ParsingInlineAsm); + } // Don't skip the rest of the line, the instruction parser is responsible for // that. @@ -1394,6 +1466,7 @@ bool AsmParser::ParseCppHashLineFilenameComment(const SMLoc &L) { CppHashLoc = L; CppHashFilename = Filename; CppHashLineNumber = LineNumber; + CppHashBuf = CurBuffer; // Ignore any trailing characters, they're just comment. EatToEndOfLine(); @@ -1454,6 +1527,14 @@ void AsmParser::DiagHandler(const SMDiagnostic &Diag, void *Context) { NewDiag.print(0, OS); } +// FIXME: This is mostly duplicated from the function in AsmLexer.cpp. The +// difference being that that function accepts '@' as part of identifiers and +// we can't do that. AsmLexer.cpp should probably be changed to handle +// '@' as a special case when needed. +static bool isIdentifierChar(char c) { + return isalnum(c) || c == '_' || c == '$' || c == '.'; +} + bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body, const MacroParameters &Parameters, const MacroArguments &A, @@ -1462,6 +1543,8 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body, if (NParameters != 0 && NParameters != A.size()) return Error(L, "Wrong number of arguments"); + // A macro without parameters is handled differently on Darwin: + // gas accepts no arguments and does no substitutions while (!Body.empty()) { // Scan for the next substitution. std::size_t End = Body.size(), Pos = 0; @@ -1518,25 +1601,33 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body, Pos += 2; } else { unsigned I = Pos + 1; - while (isalnum(Body[I]) && I + 1 != End) + while (isIdentifierChar(Body[I]) && I + 1 != End) ++I; const char *Begin = Body.data() + Pos +1; StringRef Argument(Begin, I - (Pos +1)); unsigned Index = 0; for (; Index < NParameters; ++Index) - if (Parameters[Index] == Argument) + if (Parameters[Index].first == Argument) break; - // FIXME: We should error at the macro definition. - if (Index == NParameters) - return Error(L, "Parameter not found"); - - for (MacroArgument::const_iterator it = A[Index].begin(), - ie = A[Index].end(); it != ie; ++it) - OS << it->getString(); + if (Index == NParameters) { + if (Body[Pos+1] == '(' && Body[Pos+2] == ')') + Pos += 3; + else { + OS << '\\' << Argument; + Pos = I; + } + } else { + for (MacroArgument::const_iterator it = A[Index].begin(), + ie = A[Index].end(); it != ie; ++it) + if (it->getKind() == AsmToken::String) + OS << it->getStringContents(); + else + OS << it->getString(); - Pos += 1 + Argument.size(); + Pos += 1 + Argument.size(); + } } // Update the scan point. Body = Body.substr(Pos); @@ -1551,24 +1642,97 @@ MacroInstantiation::MacroInstantiation(const Macro *M, SMLoc IL, SMLoc EL, { } +static bool IsOperator(AsmToken::TokenKind kind) +{ + switch (kind) + { + default: + return false; + case AsmToken::Plus: + case AsmToken::Minus: + case AsmToken::Tilde: + case AsmToken::Slash: + case AsmToken::Star: + case AsmToken::Dot: + case AsmToken::Equal: + case AsmToken::EqualEqual: + case AsmToken::Pipe: + case AsmToken::PipePipe: + case AsmToken::Caret: + case AsmToken::Amp: + case AsmToken::AmpAmp: + case AsmToken::Exclaim: + case AsmToken::ExclaimEqual: + case AsmToken::Percent: + case AsmToken::Less: + case AsmToken::LessEqual: + case AsmToken::LessLess: + case AsmToken::LessGreater: + case AsmToken::Greater: + case AsmToken::GreaterEqual: + case AsmToken::GreaterGreater: + return true; + } +} + /// ParseMacroArgument - Extract AsmTokens for a macro argument. /// This is used for both default macro parameter values and the /// arguments in macro invocations -bool AsmParser::ParseMacroArgument(MacroArgument &MA) { +bool AsmParser::ParseMacroArgument(MacroArgument &MA, + AsmToken::TokenKind &ArgumentDelimiter) { unsigned ParenLevel = 0; + unsigned AddTokens = 0; - for (;;) { - SMLoc LastTokenLoc; + // gas accepts arguments separated by whitespace, except on Darwin + if (!IsDarwin) + Lexer.setSkipSpace(false); - if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal)) + for (;;) { + if (Lexer.is(AsmToken::Eof) || Lexer.is(AsmToken::Equal)) { + Lexer.setSkipSpace(true); return TokError("unexpected token in macro instantiation"); + } + + if (ParenLevel == 0 && Lexer.is(AsmToken::Comma)) { + // Spaces and commas cannot be mixed to delimit parameters + if (ArgumentDelimiter == AsmToken::Eof) + ArgumentDelimiter = AsmToken::Comma; + else if (ArgumentDelimiter != AsmToken::Comma) { + Lexer.setSkipSpace(true); + return TokError("expected ' ' for macro argument separator"); + } + break; + } + + if (Lexer.is(AsmToken::Space)) { + Lex(); // Eat spaces + + // Spaces can delimit parameters, but could also be part an expression. + // If the token after a space is an operator, add the token and the next + // one into this argument + if (ArgumentDelimiter == AsmToken::Space || + ArgumentDelimiter == AsmToken::Eof) { + if (IsOperator(Lexer.getKind())) { + // Check to see whether the token is used as an operator, + // or part of an identifier + const char *NextChar = getTok().getEndLoc().getPointer() + 1; + if (*NextChar == ' ') + AddTokens = 2; + } + + if (!AddTokens && ParenLevel == 0) { + if (ArgumentDelimiter == AsmToken::Eof && + !IsOperator(Lexer.getKind())) + ArgumentDelimiter = AsmToken::Space; + break; + } + } + } // HandleMacroEntry relies on not advancing the lexer here // to be able to fill in the remaining default parameter values if (Lexer.is(AsmToken::EndOfStatement)) break; - if (ParenLevel == 0 && Lexer.is(AsmToken::Comma)) - break; // Adjust the current parentheses level. if (Lexer.is(AsmToken::LParen)) @@ -1578,16 +1742,23 @@ bool AsmParser::ParseMacroArgument(MacroArgument &MA) { // Append the token to the current argument list. MA.push_back(getTok()); + if (AddTokens) + AddTokens--; Lex(); } + + Lexer.setSkipSpace(true); if (ParenLevel != 0) - return TokError("unbalanced parenthesises in macro argument"); + return TokError("unbalanced parentheses in macro argument"); return false; } // Parse the macro instantiation arguments. bool AsmParser::ParseMacroArguments(const Macro *M, MacroArguments &A) { const unsigned NParameters = M ? M->Parameters.size() : 0; + // Argument delimiter is initially unknown. It will be set by + // ParseMacroArgument() + AsmToken::TokenKind ArgumentDelimiter = AsmToken::Eof; // Parse two kinds of macro invocations: // - macros defined without any parameters accept an arbitrary number of them @@ -1596,13 +1767,30 @@ bool AsmParser::ParseMacroArguments(const Macro *M, MacroArguments &A) { ++Parameter) { MacroArgument MA; - if (ParseMacroArgument(MA)) + if (ParseMacroArgument(MA, ArgumentDelimiter)) return true; - A.push_back(MA); + if (!MA.empty() || !NParameters) + A.push_back(MA); + else if (NParameters) { + if (!M->Parameters[Parameter].second.empty()) + A.push_back(M->Parameters[Parameter].second); + } - if (Lexer.is(AsmToken::EndOfStatement)) + // At the end of the statement, fill in remaining arguments that have + // default values. If there aren't any, then the next argument is + // required but missing + if (Lexer.is(AsmToken::EndOfStatement)) { + if (NParameters && Parameter < NParameters - 1) { + if (M->Parameters[Parameter + 1].second.empty()) + return TokError("macro argument '" + + Twine(M->Parameters[Parameter + 1].first) + + "' is missing"); + else + continue; + } return false; + } if (Lexer.is(AsmToken::Comma)) Lex(); @@ -1691,7 +1879,8 @@ static bool IsUsedIn(const MCSymbol *Sym, const MCExpr *Value) { llvm_unreachable("Unknown expr kind!"); } -bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) { +bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef, + bool NoDeadStrip) { // FIXME: Use better location, we should use proper tokens. SMLoc EqualLoc = Lexer.getLoc(); @@ -1746,6 +1935,9 @@ bool AsmParser::ParseAssignment(StringRef Name, bool allow_redef) { // Do the assignment. Out.EmitAssignment(Sym, Value); + if (NoDeadStrip) + Out.EmitSymbolAttribute(Sym, MCSA_NoDeadStrip); + return false; } @@ -1803,7 +1995,7 @@ bool AsmParser::ParseDirectiveSet(StringRef IDVal, bool allow_redef) { return TokError("unexpected token in '" + Twine(IDVal) + "'"); Lex(); - return ParseAssignment(Name, allow_redef); + return ParseAssignment(Name, allow_redef, true); } bool AsmParser::ParseEscapedString(std::string &Data) { @@ -2274,8 +2466,13 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) { if (ParseAbsoluteExpression(Pow2Alignment)) return true; + LCOMM::LCOMMType LCOMM = Lexer.getMAI().getLCOMMDirectiveAlignmentType(); + if (IsLocal && LCOMM == LCOMM::NoAlignment) + return Error(Pow2AlignmentLoc, "alignment not supported on this target"); + // If this target takes alignments in bytes (not log) validate and convert. - if (Lexer.getMAI().getAlignmentIsInBytes()) { + if ((!IsLocal && Lexer.getMAI().getCOMMDirectiveAlignmentIsInBytes()) || + (IsLocal && LCOMM == LCOMM::ByteAlignment)) { if (!isPowerOf2_64(Pow2Alignment)) return Error(Pow2AlignmentLoc, "alignment must be a power of 2"); Pow2Alignment = Log2_64(Pow2Alignment); @@ -2303,13 +2500,9 @@ bool AsmParser::ParseDirectiveComm(bool IsLocal) { if (!Sym->isUndefined()) return Error(IDLoc, "invalid symbol redefinition"); - // '.lcomm' is equivalent to '.zerofill'. // Create the Symbol as a common or local common with Size and Pow2Alignment if (IsLocal) { - getStreamer().EmitZerofill(Ctx.getMachOSection( - "__DATA", "__bss", MCSectionMachO::S_ZEROFILL, - 0, SectionKind::getBSS()), - Sym, Size, 1 << Pow2Alignment); + getStreamer().EmitLocalCommonSymbol(Sym, Size, 1 << Pow2Alignment); return false; } @@ -3073,25 +3266,33 @@ bool GenericAsmParser::ParseDirectiveMacro(StringRef Directive, SMLoc DirectiveLoc) { StringRef Name; if (getParser().ParseIdentifier(Name)) - return TokError("expected identifier in directive"); + return TokError("expected identifier in '.macro' directive"); MacroParameters Parameters; + // Argument delimiter is initially unknown. It will be set by + // ParseMacroArgument() + AsmToken::TokenKind ArgumentDelimiter = AsmToken::Eof; if (getLexer().isNot(AsmToken::EndOfStatement)) { - for(;;) { - StringRef Parameter; - if (getParser().ParseIdentifier(Parameter)) - return TokError("expected identifier in directive"); + for (;;) { + MacroParameter Parameter; + if (getParser().ParseIdentifier(Parameter.first)) + return TokError("expected identifier in '.macro' directive"); + + if (getLexer().is(AsmToken::Equal)) { + Lex(); + if (getParser().ParseMacroArgument(Parameter.second, ArgumentDelimiter)) + return true; + } + Parameters.push_back(Parameter); - if (getLexer().isNot(AsmToken::Comma)) + if (getLexer().is(AsmToken::Comma)) + Lex(); + else if (getLexer().is(AsmToken::EndOfStatement)) break; - Lex(); } } - if (getLexer().isNot(AsmToken::EndOfStatement)) - return TokError("unexpected token in '.macro' directive"); - // Eat the end of statement. Lex(); @@ -3296,7 +3497,7 @@ bool AsmParser::ParseDirectiveIrp(SMLoc DirectiveLoc) { MacroParameters Parameters; MacroParameter Parameter; - if (ParseIdentifier(Parameter)) + if (ParseIdentifier(Parameter.first)) return TokError("expected identifier in '.irp' directive"); Parameters.push_back(Parameter); @@ -3323,9 +3524,8 @@ bool AsmParser::ParseDirectiveIrp(SMLoc DirectiveLoc) { SmallString<256> Buf; raw_svector_ostream OS(Buf); - for (std::vector::iterator i = A.begin(), e = A.end(); i != e; - ++i) { - std::vector Args; + for (MacroArguments::iterator i = A.begin(), e = A.end(); i != e; ++i) { + MacroArguments Args; Args.push_back(*i); if (expandMacro(OS, M->Body, Parameters, Args, getTok().getLoc())) @@ -3343,7 +3543,7 @@ bool AsmParser::ParseDirectiveIrpc(SMLoc DirectiveLoc) { MacroParameters Parameters; MacroParameter Parameter; - if (ParseIdentifier(Parameter)) + if (ParseIdentifier(Parameter.first)) return TokError("expected identifier in '.irpc' directive"); Parameters.push_back(Parameter); @@ -3393,7 +3593,7 @@ bool AsmParser::ParseDirectiveIrpc(SMLoc DirectiveLoc) { bool AsmParser::ParseDirectiveEndr(SMLoc DirectiveLoc) { if (ActiveMacros.empty()) - return TokError("unexpected '.endr' directive, no current .rept"); + return TokError("unmatched '.endr' directive"); // The only .repl that should get here are the ones created by // InstantiateMacroLikeBody. @@ -3403,6 +3603,214 @@ bool AsmParser::ParseDirectiveEndr(SMLoc DirectiveLoc) { return false; } +bool AsmParser::ParseDirectiveEmit(SMLoc IDLoc, ParseStatementInfo &Info) { + const MCExpr *Value; + SMLoc ExprLoc = getLexer().getLoc(); + if (ParseExpression(Value)) + return true; + const MCConstantExpr *MCE = dyn_cast(Value); + if (!MCE) + return Error(ExprLoc, "unexpected expression in _emit"); + uint64_t IntValue = MCE->getValue(); + if (!isUIntN(8, IntValue) && !isIntN(8, IntValue)) + return Error(ExprLoc, "literal value out of range for directive"); + + Info.AsmRewrites->push_back(AsmRewrite(AOK_Emit, IDLoc, 5)); + return false; +} + +bool AsmParser::ParseMSInlineAsm(void *AsmLoc, std::string &AsmString, + unsigned &NumOutputs, unsigned &NumInputs, + SmallVectorImpl > &OpDecls, + SmallVectorImpl &Constraints, + SmallVectorImpl &Clobbers, + const MCInstrInfo *MII, + const MCInstPrinter *IP, + MCAsmParserSemaCallback &SI) { + SmallVector InputDecls; + SmallVector OutputDecls; + SmallVector InputDeclsOffsetOf; + SmallVector OutputDeclsOffsetOf; + SmallVector InputConstraints; + SmallVector OutputConstraints; + std::set ClobberRegs; + + SmallVector AsmStrRewrites; + + // Prime the lexer. + Lex(); + + // While we have input, parse each statement. + unsigned InputIdx = 0; + unsigned OutputIdx = 0; + while (getLexer().isNot(AsmToken::Eof)) { + ParseStatementInfo Info(&AsmStrRewrites); + if (ParseStatement(Info)) + return true; + + if (Info.Opcode != ~0U) { + const MCInstrDesc &Desc = MII->get(Info.Opcode); + + // Build the list of clobbers, outputs and inputs. + for (unsigned i = 1, e = Info.ParsedOperands.size(); i != e; ++i) { + MCParsedAsmOperand *Operand = Info.ParsedOperands[i]; + + // Immediate. + if (Operand->isImm()) { + if (Operand->needAsmRewrite()) + AsmStrRewrites.push_back(AsmRewrite(AOK_ImmPrefix, + Operand->getStartLoc())); + continue; + } + + // Register operand. + if (Operand->isReg() && !Operand->isOffsetOf()) { + unsigned NumDefs = Desc.getNumDefs(); + // Clobber. + if (NumDefs && Operand->getMCOperandNum() < NumDefs) { + std::string Reg; + raw_string_ostream OS(Reg); + IP->printRegName(OS, Operand->getReg()); + ClobberRegs.insert(StringRef(OS.str())); + } + continue; + } + + // Expr/Input or Output. + unsigned Size; + void *OpDecl = SI.LookupInlineAsmIdentifier(Operand->getName(), AsmLoc, + Size); + if (OpDecl) { + bool isOutput = (i == 1) && Desc.mayStore(); + if (!Operand->isOffsetOf() && Operand->needSizeDirective()) + AsmStrRewrites.push_back(AsmRewrite(AOK_SizeDirective, + Operand->getStartLoc(), + /*Len*/0, + Operand->getMemSize())); + if (isOutput) { + std::string Constraint = "="; + ++InputIdx; + OutputDecls.push_back(OpDecl); + OutputDeclsOffsetOf.push_back(Operand->isOffsetOf()); + Constraint += Operand->getConstraint().str(); + OutputConstraints.push_back(Constraint); + AsmStrRewrites.push_back(AsmRewrite(AOK_Output, + Operand->getStartLoc(), + Operand->getNameLen())); + } else { + InputDecls.push_back(OpDecl); + InputDeclsOffsetOf.push_back(Operand->isOffsetOf()); + InputConstraints.push_back(Operand->getConstraint().str()); + AsmStrRewrites.push_back(AsmRewrite(AOK_Input, + Operand->getStartLoc(), + Operand->getNameLen())); + } + } + } + } + } + + // Set the number of Outputs and Inputs. + NumOutputs = OutputDecls.size(); + NumInputs = InputDecls.size(); + + // Set the unique clobbers. + for (std::set::iterator I = ClobberRegs.begin(), + E = ClobberRegs.end(); I != E; ++I) + Clobbers.push_back(*I); + + // Merge the various outputs and inputs. Output are expected first. + if (NumOutputs || NumInputs) { + unsigned NumExprs = NumOutputs + NumInputs; + OpDecls.resize(NumExprs); + Constraints.resize(NumExprs); + // FIXME: Constraints are hard coded to 'm', but we need an 'r' + // constraint for offsetof. This needs to be cleaned up! + for (unsigned i = 0; i < NumOutputs; ++i) { + OpDecls[i] = std::make_pair(OutputDecls[i], OutputDeclsOffsetOf[i]); + Constraints[i] = OutputDeclsOffsetOf[i] ? "=r" : OutputConstraints[i]; + } + for (unsigned i = 0, j = NumOutputs; i < NumInputs; ++i, ++j) { + OpDecls[j] = std::make_pair(InputDecls[i], InputDeclsOffsetOf[i]); + Constraints[j] = InputDeclsOffsetOf[i] ? "r" : InputConstraints[i]; + } + } + + // Build the IR assembly string. + std::string AsmStringIR; + AsmRewriteKind PrevKind = AOK_Imm; + raw_string_ostream OS(AsmStringIR); + const char *Start = SrcMgr.getMemoryBuffer(0)->getBufferStart(); + for (SmallVectorImpl::iterator + I = AsmStrRewrites.begin(), E = AsmStrRewrites.end(); I != E; ++I) { + const char *Loc = (*I).Loc.getPointer(); + + AsmRewriteKind Kind = (*I).Kind; + + // Emit everything up to the immediate/expression. If the previous rewrite + // was a size directive, then this has already been done. + if (PrevKind != AOK_SizeDirective) + OS << StringRef(Start, Loc - Start); + PrevKind = Kind; + + // Skip the original expression. + if (Kind == AOK_Skip) { + Start = Loc + (*I).Len; + continue; + } + + // Rewrite expressions in $N notation. + switch (Kind) { + default: break; + case AOK_Imm: + OS << Twine("$$"); + OS << (*I).Val; + break; + case AOK_ImmPrefix: + OS << Twine("$$"); + break; + case AOK_Input: + OS << '$'; + OS << InputIdx++; + break; + case AOK_Output: + OS << '$'; + OS << OutputIdx++; + break; + case AOK_SizeDirective: + switch((*I).Val) { + default: break; + case 8: OS << "byte ptr "; break; + case 16: OS << "word ptr "; break; + case 32: OS << "dword ptr "; break; + case 64: OS << "qword ptr "; break; + case 80: OS << "xword ptr "; break; + case 128: OS << "xmmword ptr "; break; + case 256: OS << "ymmword ptr "; break; + } + break; + case AOK_Emit: + OS << ".byte"; + break; + case AOK_DotOperator: + OS << (*I).Val; + break; + } + + // Skip the original expression. + if (Kind != AOK_SizeDirective) + Start = Loc + (*I).Len; + } + + // Emit the remainder of the asm string. + const char *AsmEnd = SrcMgr.getMemoryBuffer(0)->getBufferEnd(); + if (Start != AsmEnd) + OS << StringRef(Start, AsmEnd - Start); + + AsmString = OS.str(); + return false; +} + /// \brief Create an MCAsmParser instance. MCAsmParser *llvm::createMCAsmParser(SourceMgr &SM, MCContext &C, MCStreamer &Out, diff --git a/lib/MC/MCParser/ELFAsmParser.cpp b/lib/MC/MCParser/ELFAsmParser.cpp index 9316bb1..d55de1f 100644 --- a/lib/MC/MCParser/ELFAsmParser.cpp +++ b/lib/MC/MCParser/ELFAsmParser.cpp @@ -203,7 +203,7 @@ bool ELFAsmParser::ParseDirectiveSize(StringRef, SMLoc) { StringRef Name; if (getParser().ParseIdentifier(Name)) return TokError("expected identifier in directive"); - MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);; + MCSymbol *Sym = getContext().GetOrCreateSymbol(Name); if (getLexer().isNot(AsmToken::Comma)) return TokError("unexpected token in directive"); diff --git a/lib/MC/MCParser/MCAsmLexer.cpp b/lib/MC/MCParser/MCAsmLexer.cpp index 3a3ff14..384b341 100644 --- a/lib/MC/MCParser/MCAsmLexer.cpp +++ b/lib/MC/MCParser/MCAsmLexer.cpp @@ -12,7 +12,8 @@ using namespace llvm; -MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()), TokStart(0) { +MCAsmLexer::MCAsmLexer() : CurTok(AsmToken::Error, StringRef()), + TokStart(0), SkipSpace(true) { } MCAsmLexer::~MCAsmLexer() { diff --git a/lib/MC/MCParser/MCAsmParser.cpp b/lib/MC/MCParser/MCAsmParser.cpp index 3a825f0..6967fee 100644 --- a/lib/MC/MCParser/MCAsmParser.cpp +++ b/lib/MC/MCParser/MCAsmParser.cpp @@ -44,5 +44,7 @@ bool MCAsmParser::ParseExpression(const MCExpr *&Res) { } void MCParsedAsmOperand::dump() const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) dbgs() << " " << *this; +#endif } diff --git a/lib/MC/MCParser/MCTargetAsmParser.cpp b/lib/MC/MCParser/MCTargetAsmParser.cpp index 6fb1ba4..60a3a3b 100644 --- a/lib/MC/MCParser/MCTargetAsmParser.cpp +++ b/lib/MC/MCParser/MCTargetAsmParser.cpp @@ -11,7 +11,7 @@ using namespace llvm; MCTargetAsmParser::MCTargetAsmParser() - : AvailableFeatures(0) + : AvailableFeatures(0), ParsingInlineAsm(false) { } diff --git a/lib/MC/MCRegisterInfo.cpp b/lib/MC/MCRegisterInfo.cpp index 4d1aff3..5c71106 100644 --- a/lib/MC/MCRegisterInfo.cpp +++ b/lib/MC/MCRegisterInfo.cpp @@ -24,6 +24,8 @@ unsigned MCRegisterInfo::getMatchingSuperReg(unsigned Reg, unsigned SubIdx, } unsigned MCRegisterInfo::getSubReg(unsigned Reg, unsigned Idx) const { + assert(Idx && Idx < getNumSubRegIndices() && + "This is not a subregister index"); // Get a pointer to the corresponding SubRegIndices list. This list has the // name of each sub-register in the same order as MCSubRegIterator. const uint16_t *SRI = SubRegIndices + get(Reg).SubRegIndices; @@ -34,6 +36,7 @@ unsigned MCRegisterInfo::getSubReg(unsigned Reg, unsigned Idx) const { } unsigned MCRegisterInfo::getSubRegIndex(unsigned Reg, unsigned SubReg) const { + assert(SubReg && SubReg < getNumRegs() && "This is not a register"); // Get a pointer to the corresponding SubRegIndices list. This list has the // name of each sub-register in the same order as MCSubRegIterator. const uint16_t *SRI = SubRegIndices + get(Reg).SubRegIndices; diff --git a/lib/MC/MCStreamer.cpp b/lib/MC/MCStreamer.cpp index 0bac24d..afece0b 100644 --- a/lib/MC/MCStreamer.cpp +++ b/lib/MC/MCStreamer.cpp @@ -561,6 +561,10 @@ void MCStreamer::EmitRegSave(const SmallVectorImpl &RegList, bool) { abort(); } +void MCStreamer::EmitTCEntry(const MCSymbol &S) { + llvm_unreachable("Unsupported method"); +} + /// EmitRawText - If this file is backed by an assembly streamer, this dumps /// the specified string in the output .s file. This capability is /// indicated by the hasRawTextSupport() predicate. diff --git a/lib/MC/MCSubtargetInfo.cpp b/lib/MC/MCSubtargetInfo.cpp index 05c83f7..80a1f02 100644 --- a/lib/MC/MCSubtargetInfo.cpp +++ b/lib/MC/MCSubtargetInfo.cpp @@ -19,11 +19,28 @@ using namespace llvm; MCSchedModel MCSchedModel::DefaultSchedModel; // For unknown processors. +/// InitMCProcessorInfo - Set or change the CPU (optionally supplemented +/// with feature string). Recompute feature bits and scheduling model. +void +MCSubtargetInfo::InitMCProcessorInfo(StringRef CPU, StringRef FS) { + SubtargetFeatures Features(FS); + FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs, + ProcFeatures, NumFeatures); + + if (!CPU.empty()) + CPUSchedModel = getSchedModelForCPU(CPU); + else + CPUSchedModel = &MCSchedModel::DefaultSchedModel; +} + void MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS, const SubtargetFeatureKV *PF, const SubtargetFeatureKV *PD, const SubtargetInfoKV *ProcSched, + const MCWriteProcResEntry *WPR, + const MCWriteLatencyEntry *WL, + const MCReadAdvanceEntry *RA, const InstrStage *IS, const unsigned *OC, const unsigned *FP, @@ -31,26 +48,18 @@ MCSubtargetInfo::InitMCSubtargetInfo(StringRef TT, StringRef CPU, StringRef FS, TargetTriple = TT; ProcFeatures = PF; ProcDesc = PD; - ProcSchedModel = ProcSched; + ProcSchedModels = ProcSched; + WriteProcResTable = WPR; + WriteLatencyTable = WL; + ReadAdvanceTable = RA; + Stages = IS; OperandCycles = OC; ForwardingPaths = FP; NumFeatures = NF; NumProcs = NP; - SubtargetFeatures Features(FS); - FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs, - ProcFeatures, NumFeatures); -} - - -/// ReInitMCSubtargetInfo - Change CPU (and optionally supplemented with -/// feature string) and recompute feature bits. -uint64_t MCSubtargetInfo::ReInitMCSubtargetInfo(StringRef CPU, StringRef FS) { - SubtargetFeatures Features(FS); - FeatureBits = Features.getFeatureBits(CPU, ProcDesc, NumProcs, - ProcFeatures, NumFeatures); - return FeatureBits; + InitMCProcessorInfo(CPU, FS); } /// ToggleFeature - Toggle a feature and returns the re-computed feature @@ -70,13 +79,13 @@ uint64_t MCSubtargetInfo::ToggleFeature(StringRef FS) { } -MCSchedModel * +const MCSchedModel * MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const { - assert(ProcSchedModel && "Processor machine model not available!"); + assert(ProcSchedModels && "Processor machine model not available!"); #ifndef NDEBUG for (size_t i = 1; i < NumProcs; i++) { - assert(strcmp(ProcSchedModel[i - 1].Key, ProcSchedModel[i].Key) < 0 && + assert(strcmp(ProcSchedModels[i - 1].Key, ProcSchedModels[i].Key) < 0 && "Processor machine model table is not sorted"); } #endif @@ -85,19 +94,25 @@ MCSubtargetInfo::getSchedModelForCPU(StringRef CPU) const { SubtargetInfoKV KV; KV.Key = CPU.data(); const SubtargetInfoKV *Found = - std::lower_bound(ProcSchedModel, ProcSchedModel+NumProcs, KV); - if (Found == ProcSchedModel+NumProcs || StringRef(Found->Key) != CPU) { + std::lower_bound(ProcSchedModels, ProcSchedModels+NumProcs, KV); + if (Found == ProcSchedModels+NumProcs || StringRef(Found->Key) != CPU) { errs() << "'" << CPU << "' is not a recognized processor for this target" << " (ignoring processor)\n"; return &MCSchedModel::DefaultSchedModel; } assert(Found->Value && "Missing processor SchedModel value"); - return (MCSchedModel *)Found->Value; + return (const MCSchedModel *)Found->Value; } InstrItineraryData MCSubtargetInfo::getInstrItineraryForCPU(StringRef CPU) const { - MCSchedModel *SchedModel = getSchedModelForCPU(CPU); + const MCSchedModel *SchedModel = getSchedModelForCPU(CPU); return InstrItineraryData(SchedModel, Stages, OperandCycles, ForwardingPaths); } + +/// Initialize an InstrItineraryData instance. +void MCSubtargetInfo::initInstrItins(InstrItineraryData &InstrItins) const { + InstrItins = + InstrItineraryData(CPUSchedModel, Stages, OperandCycles, ForwardingPaths); +} diff --git a/lib/MC/MCSymbol.cpp b/lib/MC/MCSymbol.cpp index f7f9184..b973c57 100644 --- a/lib/MC/MCSymbol.cpp +++ b/lib/MC/MCSymbol.cpp @@ -26,7 +26,7 @@ static bool isAcceptableChar(char C) { return true; } -/// NameNeedsQuoting - Return true if the identifier \arg Str needs quotes to be +/// NameNeedsQuoting - Return true if the identifier \p Str needs quotes to be /// syntactically correct. static bool NameNeedsQuoting(StringRef Str) { assert(!Str.empty() && "Cannot create an empty MCSymbol"); @@ -76,6 +76,8 @@ void MCSymbol::print(raw_ostream &OS) const { OS << '"' << getName() << '"'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCSymbol::dump() const { print(dbgs()); } +#endif diff --git a/lib/MC/MCValue.cpp b/lib/MC/MCValue.cpp index c6ea16c..4393777 100644 --- a/lib/MC/MCValue.cpp +++ b/lib/MC/MCValue.cpp @@ -31,6 +31,8 @@ void MCValue::print(raw_ostream &OS, const MCAsmInfo *MAI) const { OS << " + " << getConstant(); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCValue::dump() const { print(dbgs(), 0); } +#endif diff --git a/lib/MC/MachObjectWriter.cpp b/lib/MC/MachObjectWriter.cpp index 5820a22..a94b214 100644 --- a/lib/MC/MachObjectWriter.cpp +++ b/lib/MC/MachObjectWriter.cpp @@ -68,6 +68,11 @@ uint64_t MachObjectWriter::getSymbolAddress(const MCSymbolData* SD, // If this is a variable, then recursively evaluate now. if (S.isVariable()) { + if (const MCConstantExpr *C = + dyn_cast(S.getVariableValue())) + return C->getValue(); + + MCValue Target; if (!S.getVariableValue()->EvaluateAsRelocatable(Target, Layout)) report_fatal_error("unable to evaluate offset for variable '" + @@ -140,8 +145,8 @@ void MachObjectWriter::WriteHeader(unsigned NumLoadCommands, /// WriteSegmentLoadCommand - Write a segment load command. /// -/// \arg NumSections - The number of sections in this segment. -/// \arg SectionDataSize - The total size of the sections. +/// \param NumSections The number of sections in this segment. +/// \param SectionDataSize The total size of the sections. void MachObjectWriter::WriteSegmentLoadCommand(unsigned NumSections, uint64_t VMSize, uint64_t SectionDataStartOffset, @@ -315,11 +320,7 @@ void MachObjectWriter::WriteNlist(MachSymbolData &MSD, // Compute the symbol address. if (Symbol.isDefined()) { - if (Symbol.isAbsolute()) { - Address = cast(Symbol.getVariableValue())->getValue(); - } else { - Address = getSymbolAddress(&Data, Layout); - } + Address = getSymbolAddress(&Data, Layout); } else if (Data.isCommon()) { // Common symbols are encoded with the size in the address // field, and their alignment in the flags. @@ -396,8 +397,7 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) { continue; // Initialize the section indirect symbol base, if necessary. - if (!IndirectSymBase.count(it->SectionData)) - IndirectSymBase[it->SectionData] = IndirectIndex; + IndirectSymBase.insert(std::make_pair(it->SectionData, IndirectIndex)); Asm.getOrCreateSymbolData(*it->Symbol); } @@ -414,8 +414,7 @@ void MachObjectWriter::BindIndirectSymbols(MCAssembler &Asm) { continue; // Initialize the section indirect symbol base, if necessary. - if (!IndirectSymBase.count(it->SectionData)) - IndirectSymBase[it->SectionData] = IndirectIndex; + IndirectSymBase.insert(std::make_pair(it->SectionData, IndirectIndex)); // Set the symbol type to undefined lazy, but only on construction. // @@ -559,6 +558,26 @@ void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm, } } +void MachObjectWriter::markAbsoluteVariableSymbols(MCAssembler &Asm, + const MCAsmLayout &Layout) { + for (MCAssembler::symbol_iterator i = Asm.symbol_begin(), + e = Asm.symbol_end(); + i != e; ++i) { + MCSymbolData &SD = *i; + if (!SD.getSymbol().isVariable()) + continue; + + // Is the variable is a symbol difference (SA - SB + C) expression, + // and neither symbol is external, mark the variable as absolute. + const MCExpr *Expr = SD.getSymbol().getVariableValue(); + MCValue Value; + if (Expr->EvaluateAsRelocatable(Value, Layout)) { + if (Value.getSymA() && Value.getSymB()) + const_cast(&SD.getSymbol())->setAbsolute(); + } + } +} + void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm, const MCAsmLayout &Layout) { computeSectionAddresses(Asm, Layout); @@ -566,6 +585,10 @@ void MachObjectWriter::ExecutePostLayoutBinding(MCAssembler &Asm, // Create symbol data for any indirect symbols. BindIndirectSymbols(Asm); + // Mark symbol difference expressions in variables (from .set or = directives) + // as absolute. + markAbsoluteVariableSymbols(Asm, Layout); + // Compute symbol table information and bind symbol indices. ComputeSymbolTable(Asm, StringTable, LocalSymbolData, ExternalSymbolData, UndefinedSymbolData); @@ -797,8 +820,12 @@ void MachObjectWriter::WriteObject(MCAssembler &Asm, it = Asm.data_region_begin(), ie = Asm.data_region_end(); it != ie; ++it) { const DataRegionData *Data = &(*it); - uint64_t Start = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start), Layout); - uint64_t End = getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End), Layout); + uint64_t Start = + getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->Start), + Layout); + uint64_t End = + getSymbolAddress(&Layout.getAssembler().getSymbolData(*Data->End), + Layout); DEBUG(dbgs() << "data in code region-- kind: " << Data->Kind << " start: " << Start << "(" << Data->Start->getName() << ")" << " end: " << End << "(" << Data->End->getName() << ")" diff --git a/lib/MC/SubtargetFeature.cpp b/lib/MC/SubtargetFeature.cpp index 0a44e77..7625abd 100644 --- a/lib/MC/SubtargetFeature.cpp +++ b/lib/MC/SubtargetFeature.cpp @@ -119,14 +119,15 @@ void SubtargetFeatures::AddFeature(const StringRef String, } /// Find KV in array using binary search. -template const T *Find(const StringRef S, const T *A, size_t L) { +static const SubtargetFeatureKV *Find(StringRef S, const SubtargetFeatureKV *A, + size_t L) { // Make the lower bound element we're looking for - T KV; + SubtargetFeatureKV KV; KV.Key = S.data(); // Determine the end of the array - const T *Hi = A + L; + const SubtargetFeatureKV *Hi = A + L; // Binary search the array - const T *F = std::lower_bound(A, Hi, KV); + const SubtargetFeatureKV *F = std::lower_bound(A, Hi, KV); // If not found then return NULL if (F == Hi || StringRef(F->Key) != S) return NULL; // Return the found array item @@ -336,30 +337,6 @@ uint64_t SubtargetFeatures::getFeatureBits(const StringRef CPU, return Bits; } -/// Get scheduling itinerary of a CPU. -void *SubtargetFeatures::getItinerary(const StringRef CPU, - const SubtargetInfoKV *Table, - size_t TableSize) { - assert(Table && "missing table"); -#ifndef NDEBUG - for (size_t i = 1; i < TableSize; i++) { - assert(strcmp(Table[i - 1].Key, Table[i].Key) < 0 && "Table is not sorted"); - } -#endif - - // Find entry - const SubtargetInfoKV *Entry = Find(CPU, Table, TableSize); - - if (Entry) { - return Entry->Value; - } else { - errs() << "'" << CPU - << "' is not a recognized processor for this target" - << " (ignoring processor)\n"; - return NULL; - } -} - /// print - Print feature string. /// void SubtargetFeatures::print(raw_ostream &OS) const { @@ -368,11 +345,13 @@ void SubtargetFeatures::print(raw_ostream &OS) const { OS << "\n"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// dump - Dump feature info. /// void SubtargetFeatures::dump() const { print(dbgs()); } +#endif /// getDefaultSubtargetFeatures - Return a string listing the features /// associated with the target triple. diff --git a/lib/MC/WinCOFFStreamer.cpp b/lib/MC/WinCOFFStreamer.cpp index b026277..702eec0 100644 --- a/lib/MC/WinCOFFStreamer.cpp +++ b/lib/MC/WinCOFFStreamer.cpp @@ -70,11 +70,6 @@ public: uint64_t Size,unsigned ByteAlignment); virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol, uint64_t Size, unsigned ByteAlignment); - virtual void EmitBytes(StringRef Data, unsigned AddrSpace); - virtual void EmitValueToAlignment(unsigned ByteAlignment, int64_t Value, - unsigned ValueSize, unsigned MaxBytesToEmit); - virtual void EmitCodeAlignment(unsigned ByteAlignment, - unsigned MaxBytesToEmit); virtual void EmitFileDirective(StringRef Filename); virtual void EmitInstruction(const MCInst &Instruction); virtual void EmitWin64EHHandlerData(); @@ -333,43 +328,6 @@ void WinCOFFStreamer::EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol, llvm_unreachable("not implemented"); } -void WinCOFFStreamer::EmitBytes(StringRef Data, unsigned AddrSpace) { - // TODO: This is copied exactly from the MachOStreamer. Consider merging into - // MCObjectStreamer? - getOrCreateDataFragment()->getContents().append(Data.begin(), Data.end()); -} - -void WinCOFFStreamer::EmitValueToAlignment(unsigned ByteAlignment, - int64_t Value, - unsigned ValueSize, - unsigned MaxBytesToEmit) { - // TODO: This is copied exactly from the MachOStreamer. Consider merging into - // MCObjectStreamer? - if (MaxBytesToEmit == 0) - MaxBytesToEmit = ByteAlignment; - new MCAlignFragment(ByteAlignment, Value, ValueSize, MaxBytesToEmit, - getCurrentSectionData()); - - // Update the maximum alignment on the current section if necessary. - if (ByteAlignment > getCurrentSectionData()->getAlignment()) - getCurrentSectionData()->setAlignment(ByteAlignment); -} - -void WinCOFFStreamer::EmitCodeAlignment(unsigned ByteAlignment, - unsigned MaxBytesToEmit) { - // TODO: This is copied exactly from the MachOStreamer. Consider merging into - // MCObjectStreamer? - if (MaxBytesToEmit == 0) - MaxBytesToEmit = ByteAlignment; - MCAlignFragment *F = new MCAlignFragment(ByteAlignment, 0, 1, MaxBytesToEmit, - getCurrentSectionData()); - F->setEmitNops(true); - - // Update the maximum alignment on the current section if necessary. - if (ByteAlignment > getCurrentSectionData()->getAlignment()) - getCurrentSectionData()->setAlignment(ByteAlignment); -} - void WinCOFFStreamer::EmitFileDirective(StringRef Filename) { // Ignore for now, linkers don't care, and proper debug // info will be a much large effort. diff --git a/lib/Object/COFFObjectFile.cpp b/lib/Object/COFFObjectFile.cpp index 8ab54c6..0b7ee34 100644 --- a/lib/Object/COFFObjectFile.cpp +++ b/lib/Object/COFFObjectFile.cpp @@ -288,6 +288,11 @@ error_code COFFObjectFile::getSymbolSection(DataRefImpl Symb, return object_error::success; } +error_code COFFObjectFile::getSymbolValue(DataRefImpl Symb, + uint64_t &Val) const { + report_fatal_error("getSymbolValue unimplemented in COFFObjectFile"); +} + error_code COFFObjectFile::getSectionNext(DataRefImpl Sec, SectionRef &Result) const { const coff_section *sec = toSec(Sec); @@ -372,7 +377,14 @@ error_code COFFObjectFile::isSectionVirtual(DataRefImpl Sec, error_code COFFObjectFile::isSectionZeroInit(DataRefImpl Sec, bool &Result) const { - // FIXME: Unimplemented + // FIXME: Unimplemented. + Result = false; + return object_error::success; +} + +error_code COFFObjectFile::isSectionReadOnlyData(DataRefImpl Sec, + bool &Result) const { + // FIXME: Unimplemented. Result = false; return object_error::success; } diff --git a/lib/Object/MachOObjectFile.cpp b/lib/Object/MachOObjectFile.cpp index d229671..45aeaac 100644 --- a/lib/Object/MachOObjectFile.cpp +++ b/lib/Object/MachOObjectFile.cpp @@ -363,6 +363,10 @@ error_code MachOObjectFile::getSymbolType(DataRefImpl Symb, return object_error::success; } +error_code MachOObjectFile::getSymbolValue(DataRefImpl Symb, + uint64_t &Val) const { + report_fatal_error("getSymbolValue unimplemented in MachOObjectFile"); +} symbol_iterator MachOObjectFile::begin_symbols() const { // DRI.d.a = segment number; DRI.d.b = symbol index. @@ -581,14 +585,14 @@ error_code MachOObjectFile::isSectionBSS(DataRefImpl DRI, error_code MachOObjectFile::isSectionRequiredForExecution(DataRefImpl Sec, bool &Result) const { - // FIXME: Unimplemented + // FIXME: Unimplemented. Result = true; return object_error::success; } error_code MachOObjectFile::isSectionVirtual(DataRefImpl Sec, - bool &Result) const { - // FIXME: Unimplemented + bool &Result) const { + // FIXME: Unimplemented. Result = false; return object_error::success; } @@ -612,6 +616,17 @@ error_code MachOObjectFile::isSectionZeroInit(DataRefImpl DRI, return object_error::success; } +error_code MachOObjectFile::isSectionReadOnlyData(DataRefImpl Sec, + bool &Result) const { + // Consider using the code from isSectionText to look for __const sections. + // Alternately, emit S_ATTR_PURE_INSTRUCTIONS and/or S_ATTR_SOME_INSTRUCTIONS + // to use section attributes to distinguish code from data. + + // FIXME: Unimplemented. + Result = false; + return object_error::success; +} + error_code MachOObjectFile::sectionContainsSymbol(DataRefImpl Sec, DataRefImpl Symb, bool &Result) const { diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index ed261a4..7e8b4a3 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -46,22 +46,27 @@ namespace llvm { /* Number of bits in the significand. This includes the integer bit. */ unsigned int precision; - - /* True if arithmetic is supported. */ - unsigned int arithmeticOK; }; - const fltSemantics APFloat::IEEEhalf = { 15, -14, 11, true }; - const fltSemantics APFloat::IEEEsingle = { 127, -126, 24, true }; - const fltSemantics APFloat::IEEEdouble = { 1023, -1022, 53, true }; - const fltSemantics APFloat::IEEEquad = { 16383, -16382, 113, true }; - const fltSemantics APFloat::x87DoubleExtended = { 16383, -16382, 64, true }; - const fltSemantics APFloat::Bogus = { 0, 0, 0, true }; - - // The PowerPC format consists of two doubles. It does not map cleanly - // onto the usual format above. For now only storage of constants of - // this type is supported, no arithmetic. - const fltSemantics APFloat::PPCDoubleDouble = { 1023, -1022, 106, false }; + const fltSemantics APFloat::IEEEhalf = { 15, -14, 11 }; + const fltSemantics APFloat::IEEEsingle = { 127, -126, 24 }; + const fltSemantics APFloat::IEEEdouble = { 1023, -1022, 53 }; + const fltSemantics APFloat::IEEEquad = { 16383, -16382, 113 }; + const fltSemantics APFloat::x87DoubleExtended = { 16383, -16382, 64 }; + const fltSemantics APFloat::Bogus = { 0, 0, 0 }; + + /* The PowerPC format consists of two doubles. It does not map cleanly + onto the usual format above. It is approximated using twice the + mantissa bits. Note that for exponents near the double minimum, + we no longer can represent the full 106 mantissa bits, so those + will be treated as denormal numbers. + + FIXME: While this approximation is equivalent to what GCC uses for + compile-time arithmetic on PPC double-double numbers, it is not able + to represent all possible values held by a PPC double-double number, + for example: (long double) 1.0 + (long double) 0x1p-106 + Should this be replaced by a full emulation of PPC double-double? */ + const fltSemantics APFloat::PPCDoubleDouble = { 1023, -1022 + 53, 53 + 53 }; /* A tight upper bound on number of parts required to hold the value pow(5, power) is @@ -116,12 +121,6 @@ hexDigitValue(unsigned int c) return -1U; } -static inline void -assertArithmeticOK(const llvm::fltSemantics &semantics) { - assert(semantics.arithmeticOK && - "Compile-time arithmetic does not support these semantics"); -} - /* Return the value of a decimal exponent of the form [+-]ddddddd. @@ -196,8 +195,10 @@ totalExponent(StringRef::iterator p, StringRef::iterator end, assert(value < 10U && "Invalid character in exponent"); unsignedExponent = unsignedExponent * 10 + value; - if (unsignedExponent > 32767) + if (unsignedExponent > 32767) { overflow = true; + break; + } } if (exponentAdjustment > 32767 || exponentAdjustment < -32768) @@ -610,8 +611,6 @@ APFloat::assign(const APFloat &rhs) sign = rhs.sign; category = rhs.category; exponent = rhs.exponent; - sign2 = rhs.sign2; - exponent2 = rhs.exponent2; if (category == fcNormal || category == fcNaN) copySignificand(rhs); } @@ -705,16 +704,10 @@ APFloat::bitwiseIsEqual(const APFloat &rhs) const { category != rhs.category || sign != rhs.sign) return false; - if (semantics==(const llvm::fltSemantics*)&PPCDoubleDouble && - sign2 != rhs.sign2) - return false; if (category==fcZero || category==fcInfinity) return true; else if (category==fcNormal && exponent!=rhs.exponent) return false; - else if (semantics==(const llvm::fltSemantics*)&PPCDoubleDouble && - exponent2!=rhs.exponent2) - return false; else { int i= partCount(); const integerPart* p=significandParts(); @@ -727,9 +720,7 @@ APFloat::bitwiseIsEqual(const APFloat &rhs) const { } } -APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) { initialize(&ourSemantics); sign = 0; zeroSignificand(); @@ -738,24 +729,19 @@ APFloat::APFloat(const fltSemantics &ourSemantics, integerPart value) normalize(rmNearestTiesToEven, lfExactlyZero); } -APFloat::APFloat(const fltSemantics &ourSemantics) : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics) { initialize(&ourSemantics); category = fcZero; sign = false; } -APFloat::APFloat(const fltSemantics &ourSemantics, uninitializedTag tag) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics, uninitializedTag tag) { // Allocates storage if necessary but does not initialize it. initialize(&ourSemantics); } APFloat::APFloat(const fltSemantics &ourSemantics, - fltCategory ourCategory, bool negative) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); + fltCategory ourCategory, bool negative) { initialize(&ourSemantics); category = ourCategory; sign = negative; @@ -765,14 +751,12 @@ APFloat::APFloat(const fltSemantics &ourSemantics, makeNaN(); } -APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text) - : exponent2(0), sign2(0) { - assertArithmeticOK(ourSemantics); +APFloat::APFloat(const fltSemantics &ourSemantics, StringRef text) { initialize(&ourSemantics); convertFromString(text, rmNearestTiesToEven); } -APFloat::APFloat(const APFloat &rhs) : exponent2(0), sign2(0) { +APFloat::APFloat(const APFloat &rhs) { initialize(rhs.semantics); assign(rhs); } @@ -1559,8 +1543,6 @@ APFloat::addOrSubtract(const APFloat &rhs, roundingMode rounding_mode, { opStatus fs; - assertArithmeticOK(*semantics); - fs = addOrSubtractSpecials(rhs, subtract); /* This return code means it was not a simple case. */ @@ -1605,7 +1587,6 @@ APFloat::multiply(const APFloat &rhs, roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); sign ^= rhs.sign; fs = multiplySpecials(rhs); @@ -1625,7 +1606,6 @@ APFloat::divide(const APFloat &rhs, roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); sign ^= rhs.sign; fs = divideSpecials(rhs); @@ -1647,7 +1627,6 @@ APFloat::remainder(const APFloat &rhs) APFloat V = *this; unsigned int origSign = sign; - assertArithmeticOK(*semantics); fs = V.divide(rhs, rmNearestTiesToEven); if (fs == opDivByZero) return fs; @@ -1682,7 +1661,6 @@ APFloat::opStatus APFloat::mod(const APFloat &rhs, roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); fs = modSpecials(rhs); if (category == fcNormal && rhs.category == fcNormal) { @@ -1726,8 +1704,6 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand, { opStatus fs; - assertArithmeticOK(*semantics); - /* Post-multiplication sign, before addition. */ sign ^= multiplicand.sign; @@ -1768,12 +1744,11 @@ APFloat::fusedMultiplyAdd(const APFloat &multiplicand, /* Rounding-mode corrrect round to integral value. */ APFloat::opStatus APFloat::roundToIntegral(roundingMode rounding_mode) { opStatus fs; - assertArithmeticOK(*semantics); // If the exponent is large enough, we know that this value is already // integral, and the arithmetic below would potentially cause it to saturate // to +/-Inf. Bail out early instead. - if (exponent+1 >= (int)semanticsPrecision(*semantics)) + if (category == fcNormal && exponent+1 >= (int)semanticsPrecision(*semantics)) return opOK; // The algorithm here is quite simple: we add 2^(p-1), where p is the @@ -1815,7 +1790,6 @@ APFloat::compare(const APFloat &rhs) const { cmpResult result; - assertArithmeticOK(*semantics); assert(semantics == rhs.semantics); switch (convolve(category, rhs.category)) { @@ -1900,8 +1874,6 @@ APFloat::convert(const fltSemantics &toSemantics, int shift; const fltSemantics &fromSemantics = *semantics; - assertArithmeticOK(fromSemantics); - assertArithmeticOK(toSemantics); lostFraction = lfExactlyZero; newPartCount = partCountForBits(toSemantics.precision + 1); oldPartCount = partCount(); @@ -1986,8 +1958,6 @@ APFloat::convertToSignExtendedInteger(integerPart *parts, unsigned int width, const integerPart *src; unsigned int dstPartsCount, truncatedBits; - assertArithmeticOK(*semantics); - *isExact = false; /* Handle the three special cases first. */ @@ -2149,7 +2119,6 @@ APFloat::convertFromUnsignedParts(const integerPart *src, integerPart *dst; lostFraction lost_fraction; - assertArithmeticOK(*semantics); category = fcNormal; omsb = APInt::tcMSB(src, srcCount) + 1; dst = significandParts(); @@ -2200,7 +2169,6 @@ APFloat::convertFromSignExtendedInteger(const integerPart *src, { opStatus status; - assertArithmeticOK(*semantics); if (isSigned && APInt::tcExtractBit(src, srcCount * integerPartWidth - 1)) { integerPart *copy; @@ -2334,7 +2302,7 @@ APFloat::roundSignificandWithExponent(const integerPart *decSigParts, roundingMode rounding_mode) { unsigned int parts, pow5PartCount; - fltSemantics calcSemantics = { 32767, -32767, 0, true }; + fltSemantics calcSemantics = { 32767, -32767, 0 }; integerPart pow5Parts[maxPowerOfFiveParts]; bool isNearest; @@ -2526,7 +2494,6 @@ APFloat::convertFromDecimalString(StringRef str, roundingMode rounding_mode) APFloat::opStatus APFloat::convertFromString(StringRef str, roundingMode rounding_mode) { - assertArithmeticOK(*semantics); assert(!str.empty() && "Invalid string length"); /* Handle a leading minus sign. */ @@ -2578,8 +2545,6 @@ APFloat::convertToHexString(char *dst, unsigned int hexDigits, { char *p; - assertArithmeticOK(*semantics); - p = dst; if (sign) *dst++ = '-'; @@ -2788,42 +2753,46 @@ APFloat::convertPPCDoubleDoubleAPFloatToAPInt() const assert(semantics == (const llvm::fltSemantics*)&PPCDoubleDouble); assert(partCount()==2); - uint64_t myexponent, mysignificand, myexponent2, mysignificand2; - - if (category==fcNormal) { - myexponent = exponent + 1023; //bias - myexponent2 = exponent2 + 1023; - mysignificand = significandParts()[0]; - mysignificand2 = significandParts()[1]; - if (myexponent==1 && !(mysignificand & 0x10000000000000LL)) - myexponent = 0; // denormal - if (myexponent2==1 && !(mysignificand2 & 0x10000000000000LL)) - myexponent2 = 0; // denormal - } else if (category==fcZero) { - myexponent = 0; - mysignificand = 0; - myexponent2 = 0; - mysignificand2 = 0; - } else if (category==fcInfinity) { - myexponent = 0x7ff; - myexponent2 = 0; - mysignificand = 0; - mysignificand2 = 0; + uint64_t words[2]; + opStatus fs; + bool losesInfo; + + // Convert number to double. To avoid spurious underflows, we re- + // normalize against the "double" minExponent first, and only *then* + // truncate the mantissa. The result of that second conversion + // may be inexact, but should never underflow. + APFloat extended(*this); + fltSemantics extendedSemantics = *semantics; + extendedSemantics.minExponent = IEEEdouble.minExponent; + fs = extended.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + + APFloat u(extended); + fs = u.convert(IEEEdouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK || fs == opInexact); + (void)fs; + words[0] = *u.convertDoubleAPFloatToAPInt().getRawData(); + + // If conversion was exact or resulted in a special case, we're done; + // just set the second double to zero. Otherwise, re-convert back to + // the extended format and compute the difference. This now should + // convert exactly to double. + if (u.category == fcNormal && losesInfo) { + fs = u.convert(extendedSemantics, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + + APFloat v(extended); + v.subtract(u, rmNearestTiesToEven); + fs = v.convert(IEEEdouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + words[1] = *v.convertDoubleAPFloatToAPInt().getRawData(); } else { - assert(category == fcNaN && "Unknown category"); - myexponent = 0x7ff; - mysignificand = significandParts()[0]; - myexponent2 = exponent2; - mysignificand2 = significandParts()[1]; + words[1] = 0; } - uint64_t words[2]; - words[0] = ((uint64_t)(sign & 1) << 63) | - ((myexponent & 0x7ff) << 52) | - (mysignificand & 0xfffffffffffffLL); - words[1] = ((uint64_t)(sign2 & 1) << 63) | - ((myexponent2 & 0x7ff) << 52) | - (mysignificand2 & 0xfffffffffffffLL); return APInt(128, words); } @@ -3043,47 +3012,23 @@ APFloat::initFromPPCDoubleDoubleAPInt(const APInt &api) assert(api.getBitWidth()==128); uint64_t i1 = api.getRawData()[0]; uint64_t i2 = api.getRawData()[1]; - uint64_t myexponent = (i1 >> 52) & 0x7ff; - uint64_t mysignificand = i1 & 0xfffffffffffffLL; - uint64_t myexponent2 = (i2 >> 52) & 0x7ff; - uint64_t mysignificand2 = i2 & 0xfffffffffffffLL; + opStatus fs; + bool losesInfo; - initialize(&APFloat::PPCDoubleDouble); - assert(partCount()==2); + // Get the first double and convert to our format. + initFromDoubleAPInt(APInt(64, i1)); + fs = convert(PPCDoubleDouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; - sign = static_cast(i1>>63); - sign2 = static_cast(i2>>63); - if (myexponent==0 && mysignificand==0) { - // exponent, significand meaningless - // exponent2 and significand2 are required to be 0; we don't check - category = fcZero; - } else if (myexponent==0x7ff && mysignificand==0) { - // exponent, significand meaningless - // exponent2 and significand2 are required to be 0; we don't check - category = fcInfinity; - } else if (myexponent==0x7ff && mysignificand!=0) { - // exponent meaningless. So is the whole second word, but keep it - // for determinism. - category = fcNaN; - exponent2 = myexponent2; - significandParts()[0] = mysignificand; - significandParts()[1] = mysignificand2; - } else { - category = fcNormal; - // Note there is no category2; the second word is treated as if it is - // fcNormal, although it might be something else considered by itself. - exponent = myexponent - 1023; - exponent2 = myexponent2 - 1023; - significandParts()[0] = mysignificand; - significandParts()[1] = mysignificand2; - if (myexponent==0) // denormal - exponent = -1022; - else - significandParts()[0] |= 0x10000000000000LL; // integer bit - if (myexponent2==0) - exponent2 = -1022; - else - significandParts()[1] |= 0x10000000000000LL; // integer bit + // Unless we have a special case, add in second double. + if (category == fcNormal) { + APFloat v(APInt(64, i2)); + fs = v.convert(PPCDoubleDouble, rmNearestTiesToEven, &losesInfo); + assert(fs == opOK && !losesInfo); + (void)fs; + + add(v, rmNearestTiesToEven); } } @@ -3309,15 +3254,15 @@ APFloat APFloat::getSmallestNormalized(const fltSemantics &Sem, bool Negative) { return Val; } -APFloat::APFloat(const APInt& api, bool isIEEE) : exponent2(0), sign2(0) { +APFloat::APFloat(const APInt& api, bool isIEEE) { initFromAPInt(api, isIEEE); } -APFloat::APFloat(float f) : exponent2(0), sign2(0) { +APFloat::APFloat(float f) { initFromAPInt(APInt::floatToBits(f)); } -APFloat::APFloat(double d) : exponent2(0), sign2(0) { +APFloat::APFloat(double d) { initFromAPInt(APInt::doubleToBits(d)); } @@ -3608,11 +3553,6 @@ void APFloat::toString(SmallVectorImpl &Str, } bool APFloat::getExactInverse(APFloat *inv) const { - // We can only guarantee the existence of an exact inverse for IEEE floats. - if (semantics != &IEEEhalf && semantics != &IEEEsingle && - semantics != &IEEEdouble && semantics != &IEEEquad) - return false; - // Special floats and denormals have no exact inverse. if (category != fcNormal) return false; diff --git a/lib/Support/Atomic.cpp b/lib/Support/Atomic.cpp index 3001f6c..9559ad7 100644 --- a/lib/Support/Atomic.cpp +++ b/lib/Support/Atomic.cpp @@ -21,11 +21,15 @@ using namespace llvm; #undef MemoryFence #endif +#if defined(__GNUC__) || (defined(__IBMCPP__) && __IBMCPP__ >= 1210) +#define GNU_ATOMICS +#endif + void sys::MemoryFence() { #if LLVM_HAS_ATOMICS == 0 return; #else -# if defined(__GNUC__) +# if defined(GNU_ATOMICS) __sync_synchronize(); # elif defined(_MSC_VER) MemoryBarrier(); @@ -43,7 +47,7 @@ sys::cas_flag sys::CompareAndSwap(volatile sys::cas_flag* ptr, if (result == old_value) *ptr = new_value; return result; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_val_compare_and_swap(ptr, old_value, new_value); #elif defined(_MSC_VER) return InterlockedCompareExchange(ptr, new_value, old_value); @@ -56,7 +60,7 @@ sys::cas_flag sys::AtomicIncrement(volatile sys::cas_flag* ptr) { #if LLVM_HAS_ATOMICS == 0 ++(*ptr); return *ptr; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_add_and_fetch(ptr, 1); #elif defined(_MSC_VER) return InterlockedIncrement(ptr); @@ -69,7 +73,7 @@ sys::cas_flag sys::AtomicDecrement(volatile sys::cas_flag* ptr) { #if LLVM_HAS_ATOMICS == 0 --(*ptr); return *ptr; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_sub_and_fetch(ptr, 1); #elif defined(_MSC_VER) return InterlockedDecrement(ptr); @@ -82,7 +86,7 @@ sys::cas_flag sys::AtomicAdd(volatile sys::cas_flag* ptr, sys::cas_flag val) { #if LLVM_HAS_ATOMICS == 0 *ptr += val; return *ptr; -#elif defined(__GNUC__) +#elif defined(GNU_ATOMICS) return __sync_add_and_fetch(ptr, val); #elif defined(_MSC_VER) return InterlockedExchangeAdd(ptr, val) + val; diff --git a/lib/Support/CMakeLists.txt b/lib/Support/CMakeLists.txt index 83baf60..6af0f4a 100644 --- a/lib/Support/CMakeLists.txt +++ b/lib/Support/CMakeLists.txt @@ -1,9 +1,3 @@ -## FIXME: This only requires RTTI because tblgen uses it. Fix that. -set(LLVM_REQUIRES_RTTI 1) -if( MINGW ) - set(LLVM_REQUIRES_EH 1) -endif() - add_llvm_library(LLVMSupport APFloat.cpp APInt.cpp diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp index 593315d1..fc4f189 100644 --- a/lib/Support/CommandLine.cpp +++ b/lib/Support/CommandLine.cpp @@ -464,7 +464,7 @@ static void ParseCStringVector(std::vector &OutputVector, /// an environment variable (whose name is given in ENVVAR). /// void cl::ParseEnvironmentOptions(const char *progName, const char *envVar, - const char *Overview, bool ReadResponseFiles) { + const char *Overview) { // Check args. assert(progName && "Program name not specified"); assert(envVar && "Environment variable name missing"); @@ -483,7 +483,7 @@ void cl::ParseEnvironmentOptions(const char *progName, const char *envVar, // and hand it off to ParseCommandLineOptions(). ParseCStringVector(newArgv, envValue); int newArgc = static_cast(newArgv.size()); - ParseCommandLineOptions(newArgc, &newArgv[0], Overview, ReadResponseFiles); + ParseCommandLineOptions(newArgc, &newArgv[0], Overview); // Free all the strdup()ed strings. for (std::vector::iterator i = newArgv.begin(), e = newArgv.end(); @@ -529,7 +529,7 @@ static void ExpandResponseFiles(unsigned argc, const char*const* argv, } void cl::ParseCommandLineOptions(int argc, const char * const *argv, - const char *Overview, bool ReadResponseFiles) { + const char *Overview) { // Process all registered options. SmallVector PositionalOpts; SmallVector SinkOpts; @@ -541,12 +541,10 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv, // Expand response files. std::vector newArgv; - if (ReadResponseFiles) { - newArgv.push_back(strdup(argv[0])); - ExpandResponseFiles(argc, argv, newArgv); - argv = &newArgv[0]; - argc = static_cast(newArgv.size()); - } + newArgv.push_back(strdup(argv[0])); + ExpandResponseFiles(argc, argv, newArgv); + argv = &newArgv[0]; + argc = static_cast(newArgv.size()); // Copy the program name into ProgName, making sure not to overflow it. std::string ProgName = sys::path::filename(argv[0]); @@ -839,12 +837,10 @@ void cl::ParseCommandLineOptions(int argc, const char * const *argv, MoreHelp->clear(); // Free the memory allocated by ExpandResponseFiles. - if (ReadResponseFiles) { - // Free all the strdup()ed strings. - for (std::vector::iterator i = newArgv.begin(), e = newArgv.end(); - i != e; ++i) - free(*i); - } + // Free all the strdup()ed strings. + for (std::vector::iterator i = newArgv.begin(), e = newArgv.end(); + i != e; ++i) + free(*i); // If we had an error processing our arguments, don't let the program execute if (ErrorParsing) exit(1); diff --git a/lib/Support/DAGDeltaAlgorithm.cpp b/lib/Support/DAGDeltaAlgorithm.cpp index 1e89c6a..34e82cf 100644 --- a/lib/Support/DAGDeltaAlgorithm.cpp +++ b/lib/Support/DAGDeltaAlgorithm.cpp @@ -122,7 +122,7 @@ private: DDA.UpdatedSearchState(Changes, Sets, Required); } - /// ExecuteOneTest - Execute a single test predicate on the change set \arg S. + /// ExecuteOneTest - Execute a single test predicate on the change set \p S. bool ExecuteOneTest(const changeset_ty &S) { // Check dependencies invariant. DEBUG({ @@ -143,8 +143,8 @@ public: changeset_ty Run(); - /// GetTestResult - Get the test result for the active set \arg Changes with - /// \arg Required changes from the cache, executing the test if necessary. + /// GetTestResult - Get the test result for the active set \p Changes with + /// \p Required changes from the cache, executing the test if necessary. /// /// \param Changes - The set of active changes being minimized, which should /// have their pred closure included in the test. @@ -163,11 +163,11 @@ class DeltaActiveSetHelper : public DeltaAlgorithm { protected: /// UpdatedSearchState - Callback used when the search state changes. virtual void UpdatedSearchState(const changeset_ty &Changes, - const changesetlist_ty &Sets) { + const changesetlist_ty &Sets) LLVM_OVERRIDE { DDAI.UpdatedSearchState(Changes, Sets, Required); } - virtual bool ExecuteOneTest(const changeset_ty &S) { + virtual bool ExecuteOneTest(const changeset_ty &S) LLVM_OVERRIDE { return DDAI.GetTestResult(S, Required); } diff --git a/lib/Support/DataExtractor.cpp b/lib/Support/DataExtractor.cpp index dc21155..3d5cce0 100644 --- a/lib/Support/DataExtractor.cpp +++ b/lib/Support/DataExtractor.cpp @@ -139,7 +139,7 @@ uint64_t DataExtractor::getULEB128(uint32_t *offset_ptr) const { while (isValidOffset(offset)) { byte = Data[offset++]; - result |= (byte & 0x7f) << shift; + result |= uint64_t(byte & 0x7f) << shift; shift += 7; if ((byte & 0x80) == 0) break; @@ -160,7 +160,7 @@ int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const { while (isValidOffset(offset)) { byte = Data[offset++]; - result |= (byte & 0x7f) << shift; + result |= uint64_t(byte & 0x7f) << shift; shift += 7; if ((byte & 0x80) == 0) break; @@ -168,7 +168,7 @@ int64_t DataExtractor::getSLEB128(uint32_t *offset_ptr) const { // Sign bit of byte is 2nd high order bit (0x40) if (shift < 64 && (byte & 0x40)) - result |= -(1 << shift); + result |= -(1ULL << shift); *offset_ptr = offset; return result; diff --git a/lib/Support/DataStream.cpp b/lib/Support/DataStream.cpp index 94d14a5..3a38e2a 100644 --- a/lib/Support/DataStream.cpp +++ b/lib/Support/DataStream.cpp @@ -58,7 +58,7 @@ public: virtual ~DataFileStreamer() { close(Fd); } - virtual size_t GetBytes(unsigned char *buf, size_t len) { + virtual size_t GetBytes(unsigned char *buf, size_t len) LLVM_OVERRIDE { NumStreamFetches++; return read(Fd, buf, len); } diff --git a/lib/Support/DynamicLibrary.cpp b/lib/Support/DynamicLibrary.cpp index fb02c07..45fec36 100644 --- a/lib/Support/DynamicLibrary.cpp +++ b/lib/Support/DynamicLibrary.cpp @@ -160,7 +160,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char *symbolName) { // On linux we have a weird situation. The stderr/out/in symbols are both // macros and global variables because of standards requirements. So, we // boldly use the EXPLICIT_SYMBOL macro without checking for a #define first. -#if defined(__linux__) +#if defined(__linux__) and !defined(__ANDROID__) { EXPLICIT_SYMBOL(stderr); EXPLICIT_SYMBOL(stdout); diff --git a/lib/Support/Errno.cpp b/lib/Support/Errno.cpp index dd218f6..730220f 100644 --- a/lib/Support/Errno.cpp +++ b/lib/Support/Errno.cpp @@ -13,6 +13,7 @@ #include "llvm/Support/Errno.h" #include "llvm/Config/config.h" // Get autoconf configuration settings +#include "llvm/Support/raw_ostream.h" #if HAVE_STRING_H #include @@ -39,7 +40,7 @@ std::string StrError(int errnum) { const int MaxErrStrLen = 2000; char buffer[MaxErrStrLen]; buffer[0] = '\0'; - char* str = buffer; + std::string str; #ifdef HAVE_STRERROR_R // strerror_r is thread-safe. if (errnum) @@ -49,21 +50,25 @@ std::string StrError(int errnum) { str = strerror_r(errnum,buffer,MaxErrStrLen-1); # else strerror_r(errnum,buffer,MaxErrStrLen-1); + str = buffer; # endif #elif HAVE_DECL_STRERROR_S // "Windows Secure API" - if (errnum) + if (errnum) { strerror_s(buffer, MaxErrStrLen - 1, errnum); + str = buffer; + } #elif defined(HAVE_STRERROR) // Copy the thread un-safe result of strerror into // the buffer as fast as possible to minimize impact // of collision of strerror in multiple threads. if (errnum) - strncpy(buffer,strerror(errnum),MaxErrStrLen-1); - buffer[MaxErrStrLen-1] = '\0'; + str = strerror(errnum); #else // Strange that this system doesn't even have strerror // but, oh well, just use a generic message - sprintf(buffer, "Error #%d", errnum); + raw_string_ostream stream(str); + stream << "Error #" << errnum; + stream.flush(); #endif return str; } diff --git a/lib/Support/FoldingSet.cpp b/lib/Support/FoldingSet.cpp index c6282c6..4d489a8 100644 --- a/lib/Support/FoldingSet.cpp +++ b/lib/Support/FoldingSet.cpp @@ -38,6 +38,14 @@ bool FoldingSetNodeIDRef::operator==(FoldingSetNodeIDRef RHS) const { return memcmp(Data, RHS.Data, Size*sizeof(*Data)) == 0; } +/// Used to compare the "ordering" of two nodes as defined by the +/// profiled bits and their ordering defined by memcmp(). +bool FoldingSetNodeIDRef::operator<(FoldingSetNodeIDRef RHS) const { + if (Size != RHS.Size) + return Size < RHS.Size; + return memcmp(Data, RHS.Data, Size*sizeof(*Data)) < 0; +} + //===----------------------------------------------------------------------===// // FoldingSetNodeID Implementation @@ -152,6 +160,16 @@ bool FoldingSetNodeID::operator==(FoldingSetNodeIDRef RHS) const { return FoldingSetNodeIDRef(Bits.data(), Bits.size()) == RHS; } +/// Used to compare the "ordering" of two nodes as defined by the +/// profiled bits and their ordering defined by memcmp(). +bool FoldingSetNodeID::operator<(const FoldingSetNodeID &RHS)const{ + return *this < FoldingSetNodeIDRef(RHS.Bits.data(), RHS.Bits.size()); +} + +bool FoldingSetNodeID::operator<(FoldingSetNodeIDRef RHS) const { + return FoldingSetNodeIDRef(Bits.data(), Bits.size()) < RHS; +} + /// Intern - Copy this node's data to a memory region allocated from the /// given allocator and return a FoldingSetNodeIDRef describing the /// interned data. diff --git a/lib/Support/Host.cpp b/lib/Support/Host.cpp index 9a2c39d..34e32b8 100644 --- a/lib/Support/Host.cpp +++ b/lib/Support/Host.cpp @@ -234,6 +234,8 @@ std::string sys::getHostCPUName() { case 37: // Intel Core i7, laptop version. case 44: // Intel Core i7 processor and Intel Xeon processor. All // processors are manufactured using the 32 nm process. + case 46: // Nehalem EX + case 47: // Westmere EX return "corei7"; // SandyBridge: @@ -303,6 +305,7 @@ std::string sys::getHostCPUName() { case 8: return "k6-2"; case 9: case 13: return "k6-3"; + case 10: return "geode"; default: return "pentium"; } case 6: @@ -500,6 +503,7 @@ std::string sys::getHostCPUName() { .Case("0xb76", "arm1176jz-s") .Case("0xc08", "cortex-a8") .Case("0xc09", "cortex-a9") + .Case("0xc0f", "cortex-a15") .Case("0xc20", "cortex-m0") .Case("0xc23", "cortex-m3") .Case("0xc24", "cortex-m4") diff --git a/lib/Support/LockFileManager.cpp b/lib/Support/LockFileManager.cpp index 64404a1..59bfcfc 100644 --- a/lib/Support/LockFileManager.cpp +++ b/lib/Support/LockFileManager.cpp @@ -49,7 +49,7 @@ LockFileManager::readLockFile(StringRef LockFileName) { } bool LockFileManager::processStillExecuting(StringRef Hostname, int PID) { -#if LLVM_ON_UNIX +#if LLVM_ON_UNIX && !defined(__ANDROID__) char MyHostname[256]; MyHostname[255] = 0; MyHostname[0] = 0; diff --git a/lib/Support/Makefile b/lib/Support/Makefile index d68e500..4a2185d 100644 --- a/lib/Support/Makefile +++ b/lib/Support/Makefile @@ -11,9 +11,6 @@ LEVEL = ../.. LIBRARYNAME = LLVMSupport BUILD_ARCHIVE = 1 -## FIXME: This only requires RTTI because tblgen uses it. Fix that. -REQUIRES_RTTI = 1 - EXTRA_DIST = Unix Win32 README.txt include $(LEVEL)/Makefile.common diff --git a/lib/Support/Memory.cpp b/lib/Support/Memory.cpp index 22f7494..12f0838 100644 --- a/lib/Support/Memory.cpp +++ b/lib/Support/Memory.cpp @@ -16,14 +16,6 @@ #include "llvm/Support/Valgrind.h" #include "llvm/Config/config.h" -#if defined(__mips__) -#include -#endif - -namespace llvm { -using namespace sys; -} - // Include the platform-specific parts of this class. #ifdef LLVM_ON_UNIX #include "Unix/Memory.inc" @@ -31,51 +23,3 @@ using namespace sys; #ifdef LLVM_ON_WIN32 #include "Windows/Memory.inc" #endif - -extern "C" void sys_icache_invalidate(const void *Addr, size_t len); - -/// InvalidateInstructionCache - Before the JIT can run a block of code -/// that has been emitted it must invalidate the instruction cache on some -/// platforms. -void llvm::sys::Memory::InvalidateInstructionCache(const void *Addr, - size_t Len) { - -// icache invalidation for PPC and ARM. -#if defined(__APPLE__) - -# if (defined(__POWERPC__) || defined (__ppc__) || \ - defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) - sys_icache_invalidate(const_cast(Addr), Len); -# endif - -#else - -# if (defined(__POWERPC__) || defined (__ppc__) || \ - defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) - const size_t LineSize = 32; - - const intptr_t Mask = ~(LineSize - 1); - const intptr_t StartLine = ((intptr_t) Addr) & Mask; - const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; - - for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) - asm volatile("dcbf 0, %0" : : "r"(Line)); - asm volatile("sync"); - - for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) - asm volatile("icbi 0, %0" : : "r"(Line)); - asm volatile("isync"); -# elif defined(__arm__) && defined(__GNUC__) - // FIXME: Can we safely always call this for __GNUC__ everywhere? - const char *Start = static_cast(Addr); - const char *End = Start + Len; - __clear_cache(const_cast(Start), const_cast(End)); -# elif defined(__mips__) - const char *Start = static_cast(Addr); - cacheflush(const_cast(Start), Len, BCACHE); -# endif - -#endif // end apple - - ValgrindDiscardTranslations(Addr, Len); -} diff --git a/lib/Support/MemoryBuffer.cpp b/lib/Support/MemoryBuffer.cpp index 992f03c..ec373e7 100644 --- a/lib/Support/MemoryBuffer.cpp +++ b/lib/Support/MemoryBuffer.cpp @@ -33,6 +33,9 @@ #include #else #include +#ifndef S_ISFIFO +#define S_ISFIFO(x) (0) +#endif #endif #include using namespace llvm; @@ -81,12 +84,12 @@ public: init(InputData.begin(), InputData.end(), RequiresNullTerminator); } - virtual const char *getBufferIdentifier() const { + virtual const char *getBufferIdentifier() const LLVM_OVERRIDE { // The name is stored after the class itself. return reinterpret_cast(this + 1); } - - virtual BufferKind getBufferKind() const { + + virtual BufferKind getBufferKind() const LLVM_OVERRIDE { return MemoryBuffer_Malloc; } }; @@ -194,13 +197,34 @@ public: sys::Path::UnMapFilePages(reinterpret_cast(RealStart), RealSize); } - - virtual BufferKind getBufferKind() const { + + virtual BufferKind getBufferKind() const LLVM_OVERRIDE { return MemoryBuffer_MMap; } }; } +static error_code getMemoryBufferForStream(int FD, + StringRef BufferName, + OwningPtr &result) { + const ssize_t ChunkSize = 4096*4; + SmallString Buffer; + ssize_t ReadBytes; + // Read into Buffer until we hit EOF. + do { + Buffer.reserve(Buffer.size() + ChunkSize); + ReadBytes = read(FD, Buffer.end(), ChunkSize); + if (ReadBytes == -1) { + if (errno == EINTR) continue; + return error_code(errno, posix_category()); + } + Buffer.set_size(Buffer.size() + ReadBytes); + } while (ReadBytes != 0); + + result.reset(MemoryBuffer::getMemBufferCopy(Buffer, BufferName)); + return error_code::success(); +} + error_code MemoryBuffer::getFile(StringRef Filename, OwningPtr &result, int64_t FileSize, @@ -297,6 +321,13 @@ error_code MemoryBuffer::getOpenFile(int FD, const char *Filename, if (fstat(FD, &FileInfo) == -1) { return error_code(errno, posix_category()); } + + // If this is a named pipe, we can't trust the size. Create the memory + // buffer by copying off the stream. + if (S_ISFIFO(FileInfo.st_mode)) { + return getMemoryBufferForStream(FD, Filename, result); + } + FileSize = FileInfo.st_size; } MapSize = FileSize; @@ -370,20 +401,5 @@ error_code MemoryBuffer::getSTDIN(OwningPtr &result) { // fallback if it fails. sys::Program::ChangeStdinToBinary(); - const ssize_t ChunkSize = 4096*4; - SmallString Buffer; - ssize_t ReadBytes; - // Read into Buffer until we hit EOF. - do { - Buffer.reserve(Buffer.size() + ChunkSize); - ReadBytes = read(0, Buffer.end(), ChunkSize); - if (ReadBytes == -1) { - if (errno == EINTR) continue; - return error_code(errno, posix_category()); - } - Buffer.set_size(Buffer.size() + ReadBytes); - } while (ReadBytes != 0); - - result.reset(getMemBufferCopy(Buffer, "")); - return error_code::success(); + return getMemoryBufferForStream(0, "", result); } diff --git a/lib/Support/SmallVector.cpp b/lib/Support/SmallVector.cpp index a89f149..f9c0e78 100644 --- a/lib/Support/SmallVector.cpp +++ b/lib/Support/SmallVector.cpp @@ -16,14 +16,15 @@ using namespace llvm; /// grow_pod - This is an implementation of the grow() method which only works /// on POD-like datatypes and is out of line to reduce code duplication. -void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) { +void SmallVectorBase::grow_pod(void *FirstEl, size_t MinSizeInBytes, + size_t TSize) { size_t CurSizeBytes = size_in_bytes(); size_t NewCapacityInBytes = 2 * capacity_in_bytes() + TSize; // Always grow. if (NewCapacityInBytes < MinSizeInBytes) NewCapacityInBytes = MinSizeInBytes; void *NewElts; - if (this->isSmall()) { + if (BeginX == FirstEl) { NewElts = malloc(NewCapacityInBytes); // Copy the elements over. No need to run dtors on PODs. @@ -37,4 +38,3 @@ void SmallVectorBase::grow_pod(size_t MinSizeInBytes, size_t TSize) { this->BeginX = NewElts; this->CapacityX = (char*)this->BeginX + NewCapacityInBytes; } - diff --git a/lib/Support/StreamableMemoryObject.cpp b/lib/Support/StreamableMemoryObject.cpp index fe3752a..59e27a2 100644 --- a/lib/Support/StreamableMemoryObject.cpp +++ b/lib/Support/StreamableMemoryObject.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "llvm/Support/StreamableMemoryObject.h" +#include "llvm/Support/Compiler.h" #include #include @@ -23,18 +24,23 @@ public: assert(LastChar >= FirstChar && "Invalid start/end range"); } - virtual uint64_t getBase() const { return 0; } - virtual uint64_t getExtent() const { return LastChar - FirstChar; } - virtual int readByte(uint64_t address, uint8_t* ptr) const; + virtual uint64_t getBase() const LLVM_OVERRIDE { return 0; } + virtual uint64_t getExtent() const LLVM_OVERRIDE { + return LastChar - FirstChar; + } + virtual int readByte(uint64_t address, uint8_t* ptr) const LLVM_OVERRIDE; virtual int readBytes(uint64_t address, uint64_t size, uint8_t* buf, - uint64_t* copied) const; - virtual const uint8_t *getPointer(uint64_t address, uint64_t size) const; - virtual bool isValidAddress(uint64_t address) const { + uint64_t* copied) const LLVM_OVERRIDE; + virtual const uint8_t *getPointer(uint64_t address, + uint64_t size) const LLVM_OVERRIDE; + virtual bool isValidAddress(uint64_t address) const LLVM_OVERRIDE { return validAddress(address); } - virtual bool isObjectEnd(uint64_t address) const {return objectEnd(address);} + virtual bool isObjectEnd(uint64_t address) const LLVM_OVERRIDE { + return objectEnd(address); + } private: const uint8_t* const FirstChar; @@ -49,8 +55,8 @@ private: return static_cast(address) == LastChar - FirstChar; } - RawMemoryObject(const RawMemoryObject&); // DO NOT IMPLEMENT - void operator=(const RawMemoryObject&); // DO NOT IMPLEMENT + RawMemoryObject(const RawMemoryObject&) LLVM_DELETED_FUNCTION; + void operator=(const RawMemoryObject&) LLVM_DELETED_FUNCTION; }; int RawMemoryObject::readByte(uint64_t address, uint8_t* ptr) const { diff --git a/lib/Support/StringMap.cpp b/lib/Support/StringMap.cpp index c2fc261..9ac1f86 100644 --- a/lib/Support/StringMap.cpp +++ b/lib/Support/StringMap.cpp @@ -13,6 +13,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/Support/Compiler.h" #include using namespace llvm; @@ -69,7 +70,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) { while (1) { StringMapEntryBase *BucketItem = TheTable[BucketNo]; // If we found an empty bucket, this key isn't in the table yet, return it. - if (BucketItem == 0) { + if (LLVM_LIKELY(BucketItem == 0)) { // If we found a tombstone, we want to reuse the tombstone instead of an // empty bucket. This reduces probing. if (FirstTombstone != -1) { @@ -84,7 +85,7 @@ unsigned StringMapImpl::LookupBucketFor(StringRef Name) { if (BucketItem == getTombstoneVal()) { // Skip over tombstones. However, remember the first one we see. if (FirstTombstone == -1) FirstTombstone = BucketNo; - } else if (HashTable[BucketNo] == FullHashValue) { + } else if (LLVM_LIKELY(HashTable[BucketNo] == FullHashValue)) { // If the full hash value matches, check deeply for a match. The common // case here is that we are only looking at the buckets (for item info // being non-null and for the full hash value) not at the items. This @@ -123,12 +124,12 @@ int StringMapImpl::FindKey(StringRef Key) const { while (1) { StringMapEntryBase *BucketItem = TheTable[BucketNo]; // If we found an empty bucket, this key isn't in the table yet, return. - if (BucketItem == 0) + if (LLVM_LIKELY(BucketItem == 0)) return -1; if (BucketItem == getTombstoneVal()) { // Ignore tombstones. - } else if (HashTable[BucketNo] == FullHashValue) { + } else if (LLVM_LIKELY(HashTable[BucketNo] == FullHashValue)) { // If the full hash value matches, check deeply for a match. The common // case here is that we are only looking at the buckets (for item info // being non-null and for the full hash value) not at the items. This diff --git a/lib/Support/StringRef.cpp b/lib/Support/StringRef.cpp index 8aab4b2..f8e9208 100644 --- a/lib/Support/StringRef.cpp +++ b/lib/Support/StringRef.cpp @@ -350,8 +350,8 @@ bool llvm::getAsUnsignedInteger(StringRef Str, unsigned Radix, unsigned long long PrevResult = Result; Result = Result*Radix+CharVal; - // Check for overflow. - if (Result < PrevResult) + // Check for overflow by shifting back and seeing if bits were lost. + if (Result/Radix < PrevResult) return true; Str = Str.substr(1); diff --git a/lib/Support/Triple.cpp b/lib/Support/Triple.cpp index cca549d..c058c05 100644 --- a/lib/Support/Triple.cpp +++ b/lib/Support/Triple.cpp @@ -42,6 +42,8 @@ const char *Triple::getArchTypeName(ArchType Kind) { case nvptx64: return "nvptx64"; case le32: return "le32"; case amdil: return "amdil"; + case spir: return "spir"; + case spir64: return "spir64"; } llvm_unreachable("Invalid ArchType!"); @@ -83,6 +85,8 @@ const char *Triple::getArchTypePrefix(ArchType Kind) { case nvptx64: return "nvptx"; case le32: return "le32"; case amdil: return "amdil"; + case spir: return "spir"; + case spir64: return "spir"; } } @@ -95,6 +99,8 @@ const char *Triple::getVendorTypeName(VendorType Kind) { case SCEI: return "scei"; case BGP: return "bgp"; case BGQ: return "bgq"; + case Freescale: return "fsl"; + case IBM: return "ibm"; } llvm_unreachable("Invalid VendorType!"); @@ -125,6 +131,7 @@ const char *Triple::getOSTypeName(OSType Kind) { case NativeClient: return "nacl"; case CNK: return "cnk"; case Bitrig: return "bitrig"; + case AIX: return "aix"; } llvm_unreachable("Invalid OSType"); @@ -138,7 +145,8 @@ const char *Triple::getEnvironmentTypeName(EnvironmentType Kind) { case GNUEABI: return "gnueabi"; case EABI: return "eabi"; case MachO: return "macho"; - case ANDROIDEABI: return "androideabi"; + case Android: return "android"; + case ELF: return "elf"; } llvm_unreachable("Invalid EnvironmentType!"); @@ -170,40 +178,11 @@ Triple::ArchType Triple::getArchTypeForLLVMName(StringRef Name) { .Case("nvptx64", nvptx64) .Case("le32", le32) .Case("amdil", amdil) + .Case("spir", spir) + .Case("spir64", spir64) .Default(UnknownArch); } -Triple::ArchType Triple::getArchTypeForDarwinArchName(StringRef Str) { - // See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for - // archs which Darwin doesn't use. - - // The matching this routine does is fairly pointless, since it is neither the - // complete architecture list, nor a reasonable subset. The problem is that - // historically the driver driver accepts this and also ties its -march= - // handling to the architecture name, so we need to be careful before removing - // support for it. - - // This code must be kept in sync with Clang's Darwin specific argument - // translation. - - return StringSwitch(Str) - .Cases("ppc", "ppc601", "ppc603", "ppc604", "ppc604e", Triple::ppc) - .Cases("ppc750", "ppc7400", "ppc7450", "ppc970", Triple::ppc) - .Case("ppc64", Triple::ppc64) - .Cases("i386", "i486", "i486SX", "i586", "i686", Triple::x86) - .Cases("pentium", "pentpro", "pentIIm3", "pentIIm5", "pentium4", - Triple::x86) - .Case("x86_64", Triple::x86_64) - // This is derived from the driver driver. - .Cases("arm", "armv4t", "armv5", "armv6", Triple::arm) - .Cases("armv7", "armv7f", "armv7k", "armv7s", "xscale", Triple::arm) - .Case("r600", Triple::r600) - .Case("nvptx", Triple::nvptx) - .Case("nvptx64", Triple::nvptx64) - .Case("amdil", Triple::amdil) - .Default(Triple::UnknownArch); -} - // Returns architecture name that is understood by the target assembler. const char *Triple::getArchNameForAssembler() { if (!isOSDarwin() && getVendor() != Triple::Apple) @@ -225,6 +204,8 @@ const char *Triple::getArchNameForAssembler() { .Case("nvptx64", "nvptx64") .Case("le32", "le32") .Case("amdil", "amdil") + .Case("spir", "spir") + .Case("spir64", "spir64") .Default(NULL); } @@ -259,6 +240,8 @@ static Triple::ArchType parseArch(StringRef ArchName) { .Case("nvptx64", Triple::nvptx64) .Case("le32", Triple::le32) .Case("amdil", Triple::amdil) + .Case("spir", Triple::spir) + .Case("spir64", Triple::spir64) .Default(Triple::UnknownArch); } @@ -269,6 +252,8 @@ static Triple::VendorType parseVendor(StringRef VendorName) { .Case("scei", Triple::SCEI) .Case("bgp", Triple::BGP) .Case("bgq", Triple::BGQ) + .Case("fsl", Triple::Freescale) + .Case("ibm", Triple::IBM) .Default(Triple::UnknownVendor); } @@ -295,6 +280,7 @@ static Triple::OSType parseOS(StringRef OSName) { .StartsWith("nacl", Triple::NativeClient) .StartsWith("cnk", Triple::CNK) .StartsWith("bitrig", Triple::Bitrig) + .StartsWith("aix", Triple::AIX) .Default(Triple::UnknownOS); } @@ -305,7 +291,8 @@ static Triple::EnvironmentType parseEnvironment(StringRef EnvironmentName) { .StartsWith("gnueabi", Triple::GNUEABI) .StartsWith("gnu", Triple::GNU) .StartsWith("macho", Triple::MachO) - .StartsWith("androideabi", Triple::ANDROIDEABI) + .StartsWith("android", Triple::Android) + .StartsWith("elf", Triple::ELF) .Default(Triple::UnknownEnvironment); } @@ -690,6 +677,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) { case llvm::Triple::thumb: case llvm::Triple::x86: case llvm::Triple::xcore: + case llvm::Triple::spir: return 32; case llvm::Triple::mips64: @@ -698,6 +686,7 @@ static unsigned getArchPointerBitWidth(llvm::Triple::ArchType Arch) { case llvm::Triple::ppc64: case llvm::Triple::sparcv9: case llvm::Triple::x86_64: + case llvm::Triple::spir64: return 64; } llvm_unreachable("Invalid architecture value"); @@ -724,6 +713,7 @@ Triple Triple::get32BitArchVariant() const { break; case Triple::amdil: + case Triple::spir: case Triple::arm: case Triple::cellspu: case Triple::hexagon: @@ -748,6 +738,7 @@ Triple Triple::get32BitArchVariant() const { case Triple::ppc64: T.setArch(Triple::ppc); break; case Triple::sparcv9: T.setArch(Triple::sparc); break; case Triple::x86_64: T.setArch(Triple::x86); break; + case Triple::spir64: T.setArch(Triple::spir); break; } return T; } @@ -770,6 +761,7 @@ Triple Triple::get64BitArchVariant() const { T.setArch(UnknownArch); break; + case Triple::spir64: case Triple::mips64: case Triple::mips64el: case Triple::nvptx64: @@ -785,6 +777,7 @@ Triple Triple::get64BitArchVariant() const { case Triple::ppc: T.setArch(Triple::ppc64); break; case Triple::sparc: T.setArch(Triple::sparcv9); break; case Triple::x86: T.setArch(Triple::x86_64); break; + case Triple::spir: T.setArch(Triple::spir64); break; } return T; } diff --git a/lib/Support/Unix/Memory.inc b/lib/Support/Unix/Memory.inc index 5a57a28..9a8abd2 100644 --- a/lib/Support/Unix/Memory.inc +++ b/lib/Support/Unix/Memory.inc @@ -13,6 +13,7 @@ #include "Unix.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Process.h" #ifdef HAVE_SYS_MMAN_H @@ -23,14 +24,146 @@ #include #endif +#if defined(__mips__) +# if defined(__OpenBSD__) +# include +# else +# include +# endif +#endif + +extern "C" void sys_icache_invalidate(const void *Addr, size_t len); + +namespace { + +int getPosixProtectionFlags(unsigned Flags) { + switch (Flags) { + case llvm::sys::Memory::MF_READ: + return PROT_READ; + case llvm::sys::Memory::MF_WRITE: + return PROT_WRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: + return PROT_READ | PROT_WRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: + return PROT_READ | PROT_EXEC; + case llvm::sys::Memory::MF_READ | + llvm::sys::Memory::MF_WRITE | + llvm::sys::Memory::MF_EXEC: + return PROT_READ | PROT_WRITE | PROT_EXEC; + case llvm::sys::Memory::MF_EXEC: + return PROT_EXEC; + default: + llvm_unreachable("Illegal memory protection flag specified!"); + } + // Provide a default return value as required by some compilers. + return PROT_NONE; +} + +} // namespace + +namespace llvm { +namespace sys { + +MemoryBlock +Memory::allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned PFlags, + error_code &EC) { + EC = error_code::success(); + if (NumBytes == 0) + return MemoryBlock(); + + static const size_t PageSize = Process::GetPageSize(); + const size_t NumPages = (NumBytes+PageSize-1)/PageSize; + + int fd = -1; +#ifdef NEED_DEV_ZERO_FOR_MMAP + static int zero_fd = open("/dev/zero", O_RDWR); + if (zero_fd == -1) { + EC = error_code(errno, system_category()); + return MemoryBlock(); + } + fd = zero_fd; +#endif + + int MMFlags = MAP_PRIVATE | +#ifdef HAVE_MMAP_ANONYMOUS + MAP_ANONYMOUS +#else + MAP_ANON +#endif + ; // Ends statement above + + int Protect = getPosixProtectionFlags(PFlags); + + // Use any near hint and the page size to set a page-aligned starting address + uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) + + NearBlock->size() : 0; + if (Start && Start % PageSize) + Start += PageSize - Start % PageSize; + + void *Addr = ::mmap(reinterpret_cast(Start), PageSize*NumPages, + Protect, MMFlags, fd, 0); + if (Addr == MAP_FAILED) { + if (NearBlock) //Try again without a near hint + return allocateMappedMemory(NumBytes, 0, PFlags, EC); + + EC = error_code(errno, system_category()); + return MemoryBlock(); + } + + MemoryBlock Result; + Result.Address = Addr; + Result.Size = NumPages*PageSize; + + if (PFlags & MF_EXEC) + Memory::InvalidateInstructionCache(Result.Address, Result.Size); + + return Result; +} + +error_code +Memory::releaseMappedMemory(MemoryBlock &M) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + if (0 != ::munmap(M.Address, M.Size)) + return error_code(errno, system_category()); + + M.Address = 0; + M.Size = 0; + + return error_code::success(); +} + +error_code +Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + if (!Flags) + return error_code(EINVAL, generic_category()); + + int Protect = getPosixProtectionFlags(Flags); + + int Result = ::mprotect(M.Address, M.Size, Protect); + if (Result != 0) + return error_code(errno, system_category()); + + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(M.Address, M.Size); + + return error_code::success(); +} + /// AllocateRWX - Allocate a slab of memory with read/write/execute /// permissions. This is typically used for JIT applications where we want /// to emit code to the memory then jump to it. Getting this type of memory /// is very OS specific. /// -llvm::sys::MemoryBlock -llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, - std::string *ErrMsg) { +MemoryBlock +Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, + std::string *ErrMsg) { if (NumBytes == 0) return MemoryBlock(); size_t pageSize = Process::GetPageSize(); @@ -78,7 +211,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); if (KERN_SUCCESS != kr) { MakeErrMsg(ErrMsg, "vm_protect max RX failed"); - return sys::MemoryBlock(); + return MemoryBlock(); } kr = vm_protect(mach_task_self(), (vm_address_t)pa, @@ -86,7 +219,7 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, VM_PROT_READ | VM_PROT_WRITE); if (KERN_SUCCESS != kr) { MakeErrMsg(ErrMsg, "vm_protect RW failed"); - return sys::MemoryBlock(); + return MemoryBlock(); } #endif @@ -97,17 +230,17 @@ llvm::sys::Memory::AllocateRWX(size_t NumBytes, const MemoryBlock* NearBlock, return result; } -bool llvm::sys::Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { +bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { if (M.Address == 0 || M.Size == 0) return false; if (0 != ::munmap(M.Address, M.Size)) return MakeErrMsg(ErrMsg, "Can't release RWX Memory"); return false; } -bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { +bool Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { #if defined(__APPLE__) && defined(__arm__) if (M.Address == 0 || M.Size == 0) return false; - sys::Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.Size); kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_WRITE); return KERN_SUCCESS == kr; @@ -116,10 +249,10 @@ bool llvm::sys::Memory::setWritable (MemoryBlock &M, std::string *ErrMsg) { #endif } -bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { +bool Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { #if defined(__APPLE__) && defined(__arm__) if (M.Address == 0 || M.Size == 0) return false; - sys::Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.Size); kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)M.Address, (vm_size_t)M.Size, 0, VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_COPY); return KERN_SUCCESS == kr; @@ -128,7 +261,7 @@ bool llvm::sys::Memory::setExecutable (MemoryBlock &M, std::string *ErrMsg) { #endif } -bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) { +bool Memory::setRangeWritable(const void *Addr, size_t Size) { #if defined(__APPLE__) && defined(__arm__) kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, (vm_size_t)Size, 0, @@ -139,7 +272,7 @@ bool llvm::sys::Memory::setRangeWritable(const void *Addr, size_t Size) { #endif } -bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) { +bool Memory::setRangeExecutable(const void *Addr, size_t Size) { #if defined(__APPLE__) && defined(__arm__) kern_return_t kr = vm_protect(mach_task_self(), (vm_address_t)Addr, (vm_size_t)Size, 0, @@ -149,3 +282,52 @@ bool llvm::sys::Memory::setRangeExecutable(const void *Addr, size_t Size) { return true; #endif } + +/// InvalidateInstructionCache - Before the JIT can run a block of code +/// that has been emitted it must invalidate the instruction cache on some +/// platforms. +void Memory::InvalidateInstructionCache(const void *Addr, + size_t Len) { + +// icache invalidation for PPC and ARM. +#if defined(__APPLE__) + +# if (defined(__POWERPC__) || defined (__ppc__) || \ + defined(_POWER) || defined(_ARCH_PPC)) || defined(__arm__) + sys_icache_invalidate(const_cast(Addr), Len); +# endif + +#else + +# if (defined(__POWERPC__) || defined (__ppc__) || \ + defined(_POWER) || defined(_ARCH_PPC)) && defined(__GNUC__) + const size_t LineSize = 32; + + const intptr_t Mask = ~(LineSize - 1); + const intptr_t StartLine = ((intptr_t) Addr) & Mask; + const intptr_t EndLine = ((intptr_t) Addr + Len + LineSize - 1) & Mask; + + for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) + asm volatile("dcbf 0, %0" : : "r"(Line)); + asm volatile("sync"); + + for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize) + asm volatile("icbi 0, %0" : : "r"(Line)); + asm volatile("isync"); +# elif defined(__arm__) && defined(__GNUC__) + // FIXME: Can we safely always call this for __GNUC__ everywhere? + const char *Start = static_cast(Addr); + const char *End = Start + Len; + __clear_cache(const_cast(Start), const_cast(End)); +# elif defined(__mips__) + const char *Start = static_cast(Addr); + cacheflush(const_cast(Start), Len, BCACHE); +# endif + +#endif // end apple + + ValgrindDiscardTranslations(Addr, Len); +} + +} // namespace sys +} // namespace llvm diff --git a/lib/Support/Unix/Path.inc b/lib/Support/Unix/Path.inc index 6bddbdf..6a5ebb8 100644 --- a/lib/Support/Unix/Path.inc +++ b/lib/Support/Unix/Path.inc @@ -261,7 +261,8 @@ Path::GetCurrentDirectory() { } #if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \ - defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) + defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) || \ + defined(__linux__) || defined(__CYGWIN__) static int test_dir(char buf[PATH_MAX], char ret[PATH_MAX], const char *dir, const char *bin) @@ -337,9 +338,17 @@ Path Path::GetMainExecutable(const char *argv0, void *MainAddr) { return Path(exe_path); #elif defined(__linux__) || defined(__CYGWIN__) char exe_path[MAXPATHLEN]; - ssize_t len = readlink("/proc/self/exe", exe_path, sizeof(exe_path)); - if (len >= 0) - return Path(StringRef(exe_path, len)); + StringRef aPath("/proc/self/exe"); + if (sys::fs::exists(aPath)) { + // /proc is not always mounted under Linux (chroot for example). + ssize_t len = readlink(aPath.str().c_str(), exe_path, sizeof(exe_path)); + if (len >= 0) + return Path(StringRef(exe_path, len)); + } else { + // Fall back to the classical detection. + if (getprogpath(exe_path, argv0) != NULL) + return Path(exe_path); + } #elif defined(HAVE_DLFCN_H) // Use dladdr to get executable path if available. Dl_info DLInfo; diff --git a/lib/Support/Unix/Signals.inc b/lib/Support/Unix/Signals.inc index 5195116..9e94068 100644 --- a/lib/Support/Unix/Signals.inc +++ b/lib/Support/Unix/Signals.inc @@ -121,17 +121,29 @@ static void UnregisterHandlers() { /// NB: This must be an async signal safe function. It cannot allocate or free /// memory, even in debug builds. static void RemoveFilesToRemove() { - // Note: avoid iterators in case of debug iterators that allocate or release + // We avoid iterators in case of debug iterators that allocate or release // memory. for (unsigned i = 0, e = FilesToRemove.size(); i != e; ++i) { - // Note that we don't want to use any external code here, and we don't care - // about errors. We're going to try as hard as we can as often as we need - // to to make these files go away. If these aren't files, too bad. - // - // We do however rely on a std::string implementation for which repeated - // calls to 'c_str()' don't allocate memory. We pre-call 'c_str()' on all - // of these strings to try to ensure this is safe. - unlink(FilesToRemove[i].c_str()); + // We rely on a std::string implementation for which repeated calls to + // 'c_str()' don't allocate memory. We pre-call 'c_str()' on all of these + // strings to try to ensure this is safe. + const char *path = FilesToRemove[i].c_str(); + + // Get the status so we can determine if it's a file or directory. If we + // can't stat the file, ignore it. + struct stat buf; + if (stat(path, &buf) != 0) + continue; + + // If this is not a regular file, ignore it. We want to prevent removal of + // special files like /dev/null, even if the compiler is being run with the + // super-user permissions. + if (!S_ISREG(buf.st_mode)) + continue; + + // Otherwise, remove the file. We ignore any errors here as there is nothing + // else we can do. + unlink(path); } } @@ -243,7 +255,7 @@ void llvm::sys::AddSignalHandler(void (*FnPtr)(void *), void *Cookie) { // On glibc systems we have the 'backtrace' function, which works nicely, but // doesn't demangle symbols. static void PrintStackTrace(void *) { -#ifdef HAVE_BACKTRACE +#if defined(HAVE_BACKTRACE) && defined(ENABLE_BACKTRACES) static void* StackTrace[256]; // Use backtrace() to output a backtrace on Linux systems with glibc. int depth = backtrace(StackTrace, @@ -293,7 +305,7 @@ static void PrintStackTrace(void *) { #endif } -/// PrintStackTraceOnErrorSignal - When an error signal (such as SIBABRT or +/// PrintStackTraceOnErrorSignal - When an error signal (such as SIGABRT or /// SIGSEGV) is delivered to the process, print a stack trace and then exit. void llvm::sys::PrintStackTraceOnErrorSignal() { AddSignalHandler(PrintStackTrace, 0); @@ -305,10 +317,10 @@ void llvm::sys::PrintStackTraceOnErrorSignal() { exception_mask_t mask = EXC_MASK_CRASH; - kern_return_t ret = task_set_exception_ports(self, + kern_return_t ret = task_set_exception_ports(self, mask, MACH_PORT_NULL, - EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, + EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES, THREAD_STATE_NONE); (void)ret; } diff --git a/lib/Support/Windows/Memory.inc b/lib/Support/Windows/Memory.inc index fcc7283..cb80f28 100644 --- a/lib/Support/Windows/Memory.inc +++ b/lib/Support/Windows/Memory.inc @@ -12,51 +12,163 @@ // //===----------------------------------------------------------------------===// -#include "Windows.h" #include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Process.h" +#include "Windows.h" + +namespace { + +DWORD getWindowsProtectionFlags(unsigned Flags) { + switch (Flags) { + // Contrary to what you might expect, the Windows page protection flags + // are not a bitwise combination of RWX values + case llvm::sys::Memory::MF_READ: + return PAGE_READONLY; + case llvm::sys::Memory::MF_WRITE: + // Note: PAGE_WRITE is not supported by VirtualProtect + return PAGE_READWRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_WRITE: + return PAGE_READWRITE; + case llvm::sys::Memory::MF_READ|llvm::sys::Memory::MF_EXEC: + return PAGE_EXECUTE_READ; + case llvm::sys::Memory::MF_READ | + llvm::sys::Memory::MF_WRITE | + llvm::sys::Memory::MF_EXEC: + return PAGE_EXECUTE_READWRITE; + case llvm::sys::Memory::MF_EXEC: + return PAGE_EXECUTE; + default: + llvm_unreachable("Illegal memory protection flag specified!"); + } + // Provide a default return value as required by some compilers. + return PAGE_NOACCESS; +} + +size_t getAllocationGranularity() { + SYSTEM_INFO Info; + ::GetSystemInfo(&Info); + if (Info.dwPageSize > Info.dwAllocationGranularity) + return Info.dwPageSize; + else + return Info.dwAllocationGranularity; +} + +} // namespace namespace llvm { -using namespace sys; +namespace sys { //===----------------------------------------------------------------------===// //=== WARNING: Implementation here must contain only Win32 specific code //=== and must not be UNIX code //===----------------------------------------------------------------------===// -MemoryBlock Memory::AllocateRWX(size_t NumBytes, - const MemoryBlock *NearBlock, - std::string *ErrMsg) { - if (NumBytes == 0) return MemoryBlock(); +MemoryBlock Memory::allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned Flags, + error_code &EC) { + EC = error_code::success(); + if (NumBytes == 0) + return MemoryBlock(); + + // While we'd be happy to allocate single pages, the Windows allocation + // granularity may be larger than a single page (in practice, it is 64K) + // so mapping less than that will create an unreachable fragment of memory. + static const size_t Granularity = getAllocationGranularity(); + const size_t NumBlocks = (NumBytes+Granularity-1)/Granularity; - static const size_t pageSize = Process::GetPageSize(); - size_t NumPages = (NumBytes+pageSize-1)/pageSize; + uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) + + NearBlock->size() + : NULL; - PVOID start = NearBlock ? static_cast(NearBlock->base()) + - NearBlock->size() : NULL; + // If the requested address is not aligned to the allocation granularity, + // round up to get beyond NearBlock. VirtualAlloc would have rounded down. + if (Start && Start % Granularity != 0) + Start += Granularity - Start % Granularity; - void *pa = VirtualAlloc(start, NumPages*pageSize, MEM_RESERVE | MEM_COMMIT, - PAGE_EXECUTE_READWRITE); - if (pa == NULL) { + DWORD Protect = getWindowsProtectionFlags(Flags); + + void *PA = ::VirtualAlloc(reinterpret_cast(Start), + NumBlocks*Granularity, + MEM_RESERVE | MEM_COMMIT, Protect); + if (PA == NULL) { if (NearBlock) { // Try again without the NearBlock hint - return AllocateRWX(NumBytes, NULL, ErrMsg); + return allocateMappedMemory(NumBytes, NULL, Flags, EC); } - MakeErrMsg(ErrMsg, "Can't allocate RWX Memory: "); + EC = error_code(::GetLastError(), system_category()); return MemoryBlock(); } - MemoryBlock result; - result.Address = pa; - result.Size = NumPages*pageSize; - return result; + MemoryBlock Result; + Result.Address = PA; + Result.Size = NumBlocks*Granularity; + ; + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(Result.Address, Result.Size); + + return Result; } -bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { - if (M.Address == 0 || M.Size == 0) return false; +error_code Memory::releaseMappedMemory(MemoryBlock &M) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + if (!VirtualFree(M.Address, 0, MEM_RELEASE)) - return MakeErrMsg(ErrMsg, "Can't release RWX Memory: "); - return false; + return error_code(::GetLastError(), system_category()); + + M.Address = 0; + M.Size = 0; + + return error_code::success(); +} + +error_code Memory::protectMappedMemory(const MemoryBlock &M, + unsigned Flags) { + if (M.Address == 0 || M.Size == 0) + return error_code::success(); + + DWORD Protect = getWindowsProtectionFlags(Flags); + + DWORD OldFlags; + if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags)) + return error_code(::GetLastError(), system_category()); + + if (Flags & MF_EXEC) + Memory::InvalidateInstructionCache(M.Address, M.Size); + + return error_code::success(); +} + +/// InvalidateInstructionCache - Before the JIT can run a block of code +/// that has been emitted it must invalidate the instruction cache on some +/// platforms. +void Memory::InvalidateInstructionCache( + const void *Addr, size_t Len) { + FlushInstructionCache(GetCurrentProcess(), Addr, Len); +} + + +MemoryBlock Memory::AllocateRWX(size_t NumBytes, + const MemoryBlock *NearBlock, + std::string *ErrMsg) { + MemoryBlock MB; + error_code EC; + MB = allocateMappedMemory(NumBytes, NearBlock, + MF_READ|MF_WRITE|MF_EXEC, EC); + if (EC != error_code::success() && ErrMsg) { + MakeErrMsg(ErrMsg, EC.message()); + } + return MB; +} + +bool Memory::ReleaseRWX(MemoryBlock &M, std::string *ErrMsg) { + error_code EC = releaseMappedMemory(M); + if (EC == error_code::success()) + return false; + MakeErrMsg(ErrMsg, EC.message()); + return true; } static DWORD getProtection(const void *addr) { @@ -93,7 +205,7 @@ bool Memory::setRangeWritable(const void *Addr, size_t Size) { } DWORD oldProt; - sys::Memory::InvalidateInstructionCache(Addr, Size); + Memory::InvalidateInstructionCache(Addr, Size); return ::VirtualProtect(const_cast(Addr), Size, prot, &oldProt) == TRUE; } @@ -112,9 +224,10 @@ bool Memory::setRangeExecutable(const void *Addr, size_t Size) { } DWORD oldProt; - sys::Memory::InvalidateInstructionCache(Addr, Size); + Memory::InvalidateInstructionCache(Addr, Size); return ::VirtualProtect(const_cast(Addr), Size, prot, &oldProt) == TRUE; } -} +} // namespace sys +} // namespace llvm diff --git a/lib/Support/Windows/PathV2.inc b/lib/Support/Windows/PathV2.inc index 696768b..3dfac66 100644 --- a/lib/Support/Windows/PathV2.inc +++ b/lib/Support/Windows/PathV2.inc @@ -794,7 +794,7 @@ mapped_file_region::mapped_file_region(const Twine &path, SmallVector path_utf16; // Convert path to UTF-16. - if (ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16)) + if ((ec = UTF8ToUTF16(path.toStringRef(path_storage), path_utf16))) return; // Get file handle for creating a file mapping. diff --git a/lib/Support/YAMLParser.cpp b/lib/Support/YAMLParser.cpp index 7c353c8..34df636 100644 --- a/lib/Support/YAMLParser.cpp +++ b/lib/Support/YAMLParser.cpp @@ -903,6 +903,7 @@ bool Scanner::consume(uint32_t Expected) { void Scanner::skip(uint32_t Distance) { Current += Distance; Column += Distance; + assert(Current <= End && "Skipped past the end"); } bool Scanner::isBlankOrBreak(StringRef::iterator Position) { @@ -1239,6 +1240,12 @@ bool Scanner::scanFlowScalar(bool IsDoubleQuoted) { } } } + + if (Current == End) { + setError("Expected quote at end of scalar", Current); + return false; + } + skip(1); // Skip ending quote. Token T; T.Kind = Token::TK_Scalar; diff --git a/lib/Support/raw_ostream.cpp b/lib/Support/raw_ostream.cpp index fa69c2d..7cd5364 100644 --- a/lib/Support/raw_ostream.cpp +++ b/lib/Support/raw_ostream.cpp @@ -266,8 +266,8 @@ void raw_ostream::flush_nonempty() { raw_ostream &raw_ostream::write(unsigned char C) { // Group exceptional cases into a single branch. - if (BUILTIN_EXPECT(OutBufCur >= OutBufEnd, false)) { - if (BUILTIN_EXPECT(!OutBufStart, false)) { + if (LLVM_UNLIKELY(OutBufCur >= OutBufEnd)) { + if (LLVM_UNLIKELY(!OutBufStart)) { if (BufferMode == Unbuffered) { write_impl(reinterpret_cast(&C), 1); return *this; @@ -286,8 +286,8 @@ raw_ostream &raw_ostream::write(unsigned char C) { raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) { // Group exceptional cases into a single branch. - if (BUILTIN_EXPECT(size_t(OutBufEnd - OutBufCur) < Size, false)) { - if (BUILTIN_EXPECT(!OutBufStart, false)) { + if (LLVM_UNLIKELY(size_t(OutBufEnd - OutBufCur) < Size)) { + if (LLVM_UNLIKELY(!OutBufStart)) { if (BufferMode == Unbuffered) { write_impl(Ptr, Size); return *this; @@ -302,7 +302,7 @@ raw_ostream &raw_ostream::write(const char *Ptr, size_t Size) { // If the buffer is empty at this point we have a string that is larger // than the buffer. Directly write the chunk that is a multiple of the // preferred buffer size and put the remainder in the buffer. - if (BUILTIN_EXPECT(OutBufCur == OutBufStart, false)) { + if (LLVM_UNLIKELY(OutBufCur == OutBufStart)) { size_t BytesToWrite = Size - (Size % NumBytes); write_impl(Ptr, BytesToWrite); copy_to_buffer(Ptr + BytesToWrite, Size - BytesToWrite); @@ -523,7 +523,7 @@ void raw_fd_ostream::write_impl(const char *Ptr, size_t Size) { ssize_t ret; // Check whether we should attempt to use atomic writes. - if (BUILTIN_EXPECT(!UseAtomicWrites, true)) { + if (LLVM_LIKELY(!UseAtomicWrites)) { ret = ::write(FD, Ptr, Size); } else { // Use ::writev() where available. diff --git a/lib/Support/regexec.c b/lib/Support/regexec.c index 0078616..bd5e72d 100644 --- a/lib/Support/regexec.c +++ b/lib/Support/regexec.c @@ -69,7 +69,7 @@ #define SETUP(v) ((v) = 0) #define onestate long #define INIT(o, n) ((o) = (unsigned long)1 << (n)) -#define INC(o) ((o) <<= 1) +#define INC(o) ((o) = (unsigned long)(o) << 1) #define ISSTATEIN(v, o) (((v) & (o)) != 0) /* some abbreviations; note that some of these know variable names! */ /* do "if I'm here, I can also be there" etc without branches */ diff --git a/lib/Support/system_error.cpp b/lib/Support/system_error.cpp index 56898de..2df223c 100644 --- a/lib/Support/system_error.cpp +++ b/lib/Support/system_error.cpp @@ -48,8 +48,8 @@ _do_message::message(int ev) const { class _generic_error_category : public _do_message { public: - virtual const char* name() const; - virtual std::string message(int ev) const; + virtual const char* name() const LLVM_OVERRIDE; + virtual std::string message(int ev) const LLVM_OVERRIDE; }; const char* @@ -74,9 +74,9 @@ generic_category() { class _system_error_category : public _do_message { public: - virtual const char* name() const; - virtual std::string message(int ev) const; - virtual error_condition default_error_condition(int ev) const; + virtual const char* name() const LLVM_OVERRIDE; + virtual std::string message(int ev) const LLVM_OVERRIDE; + virtual error_condition default_error_condition(int ev) const LLVM_OVERRIDE; }; const char* diff --git a/lib/TableGen/CMakeLists.txt b/lib/TableGen/CMakeLists.txt index ba7bf14..935d674 100644 --- a/lib/TableGen/CMakeLists.txt +++ b/lib/TableGen/CMakeLists.txt @@ -1,13 +1,8 @@ -## FIXME: This only requires RTTI because tblgen uses it. Fix that. -set(LLVM_REQUIRES_RTTI 1) -set(LLVM_REQUIRES_EH 1) - add_llvm_library(LLVMTableGen Error.cpp Main.cpp Record.cpp StringMatcher.cpp - TableGenAction.cpp TableGenBackend.cpp TGLexer.cpp TGParser.cpp diff --git a/lib/TableGen/Error.cpp b/lib/TableGen/Error.cpp index 1463b68..0bb86b0 100644 --- a/lib/TableGen/Error.cpp +++ b/lib/TableGen/Error.cpp @@ -16,12 +16,25 @@ #include "llvm/ADT/Twine.h" #include "llvm/Support/raw_ostream.h" +#include + namespace llvm { SourceMgr SrcMgr; -void PrintWarning(SMLoc WarningLoc, const Twine &Msg) { - SrcMgr.PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg); +static void PrintMessage(ArrayRef Loc, SourceMgr::DiagKind Kind, + const Twine &Msg) { + SMLoc NullLoc; + if (Loc.empty()) + Loc = NullLoc; + SrcMgr.PrintMessage(Loc.front(), Kind, Msg); + for (unsigned i = 1; i < Loc.size(); ++i) + SrcMgr.PrintMessage(Loc[i], SourceMgr::DK_Note, + "instantiated from multiclass"); +} + +void PrintWarning(ArrayRef WarningLoc, const Twine &Msg) { + PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg); } void PrintWarning(const char *Loc, const Twine &Msg) { @@ -32,12 +45,8 @@ void PrintWarning(const Twine &Msg) { errs() << "warning:" << Msg << "\n"; } -void PrintWarning(const TGError &Warning) { - PrintWarning(Warning.getLoc(), Warning.getMessage()); -} - -void PrintError(SMLoc ErrorLoc, const Twine &Msg) { - SrcMgr.PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg); +void PrintError(ArrayRef ErrorLoc, const Twine &Msg) { + PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg); } void PrintError(const char *Loc, const Twine &Msg) { @@ -48,8 +57,14 @@ void PrintError(const Twine &Msg) { errs() << "error:" << Msg << "\n"; } -void PrintError(const TGError &Error) { - PrintError(Error.getLoc(), Error.getMessage()); +void PrintFatalError(const std::string &Msg) { + PrintError(Twine(Msg)); + std::exit(1); +} + +void PrintFatalError(ArrayRef ErrorLoc, const std::string &Msg) { + PrintError(ErrorLoc, Msg); + std::exit(1); } } // end namespace llvm diff --git a/lib/TableGen/Main.cpp b/lib/TableGen/Main.cpp index 7aeef56..d0ca756 100644 --- a/lib/TableGen/Main.cpp +++ b/lib/TableGen/Main.cpp @@ -22,8 +22,8 @@ #include "llvm/Support/ToolOutputFile.h" #include "llvm/Support/system_error.h" #include "llvm/TableGen/Error.h" +#include "llvm/TableGen/Main.h" #include "llvm/TableGen/Record.h" -#include "llvm/TableGen/TableGenAction.h" #include #include using namespace llvm; @@ -47,79 +47,79 @@ namespace { cl::value_desc("directory"), cl::Prefix); } +/// \brief Create a dependency file for `-d` option. +/// +/// This functionality is really only for the benefit of the build system. +/// It is similar to GCC's `-M*` family of options. +static int createDependencyFile(const TGParser &Parser, const char *argv0) { + if (OutputFilename == "-") { + errs() << argv0 << ": the option -d must be used together with -o\n"; + return 1; + } + std::string Error; + tool_output_file DepOut(DependFilename.c_str(), Error); + if (!Error.empty()) { + errs() << argv0 << ": error opening " << DependFilename + << ":" << Error << "\n"; + return 1; + } + DepOut.os() << OutputFilename << ":"; + const std::vector &Dependencies = Parser.getDependencies(); + for (std::vector::const_iterator I = Dependencies.begin(), + E = Dependencies.end(); + I != E; ++I) { + DepOut.os() << " " << (*I); + } + DepOut.os() << "\n"; + DepOut.keep(); + return 0; +} + namespace llvm { -int TableGenMain(char *argv0, TableGenAction &Action) { +int TableGenMain(char *argv0, TableGenMainFn *MainFn) { RecordKeeper Records; - try { - // Parse the input file. - OwningPtr File; - if (error_code ec = - MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), File)) { - errs() << "Could not open input file '" << InputFilename << "': " - << ec.message() <<"\n"; - return 1; - } - MemoryBuffer *F = File.take(); - - // Tell SrcMgr about this buffer, which is what TGParser will pick up. - SrcMgr.AddNewSourceBuffer(F, SMLoc()); - - // Record the location of the include directory so that the lexer can find - // it later. - SrcMgr.setIncludeDirs(IncludeDirs); - - TGParser Parser(SrcMgr, Records); - - if (Parser.ParseFile()) - return 1; - - std::string Error; - tool_output_file Out(OutputFilename.c_str(), Error); - if (!Error.empty()) { - errs() << argv0 << ": error opening " << OutputFilename - << ":" << Error << "\n"; - return 1; - } - if (!DependFilename.empty()) { - if (OutputFilename == "-") { - errs() << argv0 << ": the option -d must be used together with -o\n"; - return 1; - } - tool_output_file DepOut(DependFilename.c_str(), Error); - if (!Error.empty()) { - errs() << argv0 << ": error opening " << DependFilename - << ":" << Error << "\n"; - return 1; - } - DepOut.os() << OutputFilename << ":"; - const std::vector &Dependencies = Parser.getDependencies(); - for (std::vector::const_iterator I = Dependencies.begin(), - E = Dependencies.end(); - I != E; ++I) { - DepOut.os() << " " << (*I); - } - DepOut.os() << "\n"; - DepOut.keep(); - } - - if (Action(Out.os(), Records)) - return 1; - - // Declare success. - Out.keep(); - return 0; - - } catch (const TGError &Error) { - PrintError(Error); - } catch (const std::string &Error) { - PrintError(Error); - } catch (const char *Error) { - PrintError(Error); - } catch (...) { - errs() << argv0 << ": Unknown unexpected exception occurred.\n"; + // Parse the input file. + OwningPtr File; + if (error_code ec = + MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), File)) { + errs() << "Could not open input file '" << InputFilename << "': " + << ec.message() <<"\n"; + return 1; + } + MemoryBuffer *F = File.take(); + + // Tell SrcMgr about this buffer, which is what TGParser will pick up. + SrcMgr.AddNewSourceBuffer(F, SMLoc()); + + // Record the location of the include directory so that the lexer can find + // it later. + SrcMgr.setIncludeDirs(IncludeDirs); + + TGParser Parser(SrcMgr, Records); + + if (Parser.ParseFile()) + return 1; + + std::string Error; + tool_output_file Out(OutputFilename.c_str(), Error); + if (!Error.empty()) { + errs() << argv0 << ": error opening " << OutputFilename + << ":" << Error << "\n"; + return 1; } + if (!DependFilename.empty()) { + if (int Ret = createDependencyFile(Parser, argv0)) + return Ret; + } + + if (MainFn(Out.os(), Records)) + return 1; + + // Declare success. + Out.keep(); + return 0; return 1; } diff --git a/lib/TableGen/Makefile b/lib/TableGen/Makefile index 4472438..345db34 100644 --- a/lib/TableGen/Makefile +++ b/lib/TableGen/Makefile @@ -11,8 +11,4 @@ LEVEL = ../.. LIBRARYNAME = LLVMTableGen BUILD_ARCHIVE = 1 -## FIXME: This only requires RTTI because tblgen uses it. Fix that. -REQUIRES_RTTI = 1 -REQUIRES_EH = 1 - include $(LEVEL)/Makefile.common diff --git a/lib/TableGen/Record.cpp b/lib/TableGen/Record.cpp index 99fdc1f..11feb435 100644 --- a/lib/TableGen/Record.cpp +++ b/lib/TableGen/Record.cpp @@ -112,7 +112,8 @@ Init *BitRecTy::convertValue(IntInit *II) { } Init *BitRecTy::convertValue(TypedInit *VI) { - if (dynamic_cast(VI->getType())) + RecTy *Ty = VI->getType(); + if (isa(Ty) || isa(Ty) || isa(Ty)) return VI; // Accept variable if it is already of bit type! return 0; } @@ -178,60 +179,15 @@ Init *BitsRecTy::convertValue(BitsInit *BI) { } Init *BitsRecTy::convertValue(TypedInit *VI) { - if (BitsRecTy *BRT = dynamic_cast(VI->getType())) - if (BRT->Size == Size) { - SmallVector NewBits(Size); - - for (unsigned i = 0; i != Size; ++i) - NewBits[i] = VarBitInit::get(VI, i); - return BitsInit::get(NewBits); - } - - if (Size == 1 && dynamic_cast(VI->getType())) + if (Size == 1 && isa(VI->getType())) return BitsInit::get(VI); - if (TernOpInit *Tern = dynamic_cast(VI)) { - if (Tern->getOpcode() == TernOpInit::IF) { - Init *LHS = Tern->getLHS(); - Init *MHS = Tern->getMHS(); - Init *RHS = Tern->getRHS(); - - IntInit *MHSi = dynamic_cast(MHS); - IntInit *RHSi = dynamic_cast(RHS); - - if (MHSi && RHSi) { - int64_t MHSVal = MHSi->getValue(); - int64_t RHSVal = RHSi->getValue(); - - if (canFitInBitfield(MHSVal, Size) && canFitInBitfield(RHSVal, Size)) { - SmallVector NewBits(Size); - - for (unsigned i = 0; i != Size; ++i) - NewBits[i] = - TernOpInit::get(TernOpInit::IF, LHS, - IntInit::get((MHSVal & (1LL << i)) ? 1 : 0), - IntInit::get((RHSVal & (1LL << i)) ? 1 : 0), - VI->getType()); - - return BitsInit::get(NewBits); - } - } else { - BitsInit *MHSbs = dynamic_cast(MHS); - BitsInit *RHSbs = dynamic_cast(RHS); - - if (MHSbs && RHSbs) { - SmallVector NewBits(Size); - - for (unsigned i = 0; i != Size; ++i) - NewBits[i] = TernOpInit::get(TernOpInit::IF, LHS, - MHSbs->getBit(i), - RHSbs->getBit(i), - VI->getType()); + if (VI->getType()->typeIsConvertibleTo(this)) { + SmallVector NewBits(Size); - return BitsInit::get(NewBits); - } - } - } + for (unsigned i = 0; i != Size; ++i) + NewBits[i] = VarBitInit::get(VI, i); + return BitsInit::get(NewBits); } return 0; @@ -244,7 +200,7 @@ Init *IntRecTy::convertValue(BitInit *BI) { Init *IntRecTy::convertValue(BitsInit *BI) { int64_t Result = 0; for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) - if (BitInit *Bit = dynamic_cast(BI->getBit(i))) { + if (BitInit *Bit = dyn_cast(BI->getBit(i))) { Result |= Bit->getValue() << i; } else { return 0; @@ -285,7 +241,7 @@ Init *StringRecTy::convertValue(BinOpInit *BO) { Init *StringRecTy::convertValue(TypedInit *TI) { - if (dynamic_cast(TI->getType())) + if (isa(TI->getType())) return TI; // Accept variable if already of the right type! return 0; } @@ -305,17 +261,15 @@ Init *ListRecTy::convertValue(ListInit *LI) { else return 0; - ListRecTy *LType = dynamic_cast(LI->getType()); - if (LType == 0) { + if (!isa(LI->getType())) return 0; - } return ListInit::get(Elements, this); } Init *ListRecTy::convertValue(TypedInit *TI) { // Ensure that TI is compatible with our class. - if (ListRecTy *LRT = dynamic_cast(TI->getType())) + if (ListRecTy *LRT = dyn_cast(TI->getType())) if (LRT->getElementType()->typeIsConvertibleTo(getElementType())) return TI; return 0; @@ -351,7 +305,7 @@ Init *DagRecTy::convertValue(BinOpInit *BO) { } RecordRecTy *RecordRecTy::get(Record *R) { - return &dynamic_cast(*R->getDefInit()->getType()); + return dyn_cast(R->getDefInit()->getType()); } std::string RecordRecTy::getAsString() const { @@ -367,7 +321,7 @@ Init *RecordRecTy::convertValue(DefInit *DI) { Init *RecordRecTy::convertValue(TypedInit *TI) { // Ensure that TI is compatible with Rec. - if (RecordRecTy *RRT = dynamic_cast(TI->getType())) + if (RecordRecTy *RRT = dyn_cast(TI->getType())) if (RRT->getRecord()->isSubClassOf(getRecord()) || RRT->getRecord() == getRecord()) return TI; @@ -386,57 +340,53 @@ bool RecordRecTy::baseClassOf(const RecordRecTy *RHS) const { return false; } - /// resolveTypes - Find a common type that T1 and T2 convert to. /// Return 0 if no such type exists. /// RecTy *llvm::resolveTypes(RecTy *T1, RecTy *T2) { - if (!T1->typeIsConvertibleTo(T2)) { - if (!T2->typeIsConvertibleTo(T1)) { - // If one is a Record type, check superclasses - RecordRecTy *RecTy1 = dynamic_cast(T1); - if (RecTy1) { - // See if T2 inherits from a type T1 also inherits from - const std::vector &T1SuperClasses = - RecTy1->getRecord()->getSuperClasses(); - for(std::vector::const_iterator i = T1SuperClasses.begin(), - iend = T1SuperClasses.end(); - i != iend; - ++i) { - RecordRecTy *SuperRecTy1 = RecordRecTy::get(*i); - RecTy *NewType1 = resolveTypes(SuperRecTy1, T2); - if (NewType1 != 0) { - if (NewType1 != SuperRecTy1) { - delete SuperRecTy1; - } - return NewType1; - } + if (T1->typeIsConvertibleTo(T2)) + return T2; + if (T2->typeIsConvertibleTo(T1)) + return T1; + + // If one is a Record type, check superclasses + if (RecordRecTy *RecTy1 = dyn_cast(T1)) { + // See if T2 inherits from a type T1 also inherits from + const std::vector &T1SuperClasses = + RecTy1->getRecord()->getSuperClasses(); + for(std::vector::const_iterator i = T1SuperClasses.begin(), + iend = T1SuperClasses.end(); + i != iend; + ++i) { + RecordRecTy *SuperRecTy1 = RecordRecTy::get(*i); + RecTy *NewType1 = resolveTypes(SuperRecTy1, T2); + if (NewType1 != 0) { + if (NewType1 != SuperRecTy1) { + delete SuperRecTy1; } + return NewType1; } - RecordRecTy *RecTy2 = dynamic_cast(T2); - if (RecTy2) { - // See if T1 inherits from a type T2 also inherits from - const std::vector &T2SuperClasses = - RecTy2->getRecord()->getSuperClasses(); - for (std::vector::const_iterator i = T2SuperClasses.begin(), - iend = T2SuperClasses.end(); - i != iend; - ++i) { - RecordRecTy *SuperRecTy2 = RecordRecTy::get(*i); - RecTy *NewType2 = resolveTypes(T1, SuperRecTy2); - if (NewType2 != 0) { - if (NewType2 != SuperRecTy2) { - delete SuperRecTy2; - } - return NewType2; - } + } + } + if (RecordRecTy *RecTy2 = dyn_cast(T2)) { + // See if T1 inherits from a type T2 also inherits from + const std::vector &T2SuperClasses = + RecTy2->getRecord()->getSuperClasses(); + for (std::vector::const_iterator i = T2SuperClasses.begin(), + iend = T2SuperClasses.end(); + i != iend; + ++i) { + RecordRecTy *SuperRecTy2 = RecordRecTy::get(*i); + RecTy *NewType2 = resolveTypes(T1, SuperRecTy2); + if (NewType2 != 0) { + if (NewType2 != SuperRecTy2) { + delete SuperRecTy2; } + return NewType2; } - return 0; } - return T2; } - return T1; + return 0; } @@ -519,6 +469,15 @@ std::string BitsInit::getAsString() const { return Result + " }"; } +// Fix bit initializer to preserve the behavior that bit reference from a unset +// bits initializer will resolve into VarBitInit to keep the field name and bit +// number used in targets with fixed insn length. +static Init *fixBitInit(const RecordVal *RV, Init *Before, Init *After) { + if (RV || After != UnsetInit::get()) + return After; + return Before; +} + // resolveReferences - If there are any field references that refer to fields // that have been filled in, we can propagate the values now. // @@ -526,16 +485,39 @@ Init *BitsInit::resolveReferences(Record &R, const RecordVal *RV) const { bool Changed = false; SmallVector NewBits(getNumBits()); - for (unsigned i = 0, e = Bits.size(); i != e; ++i) { - Init *B; - Init *CurBit = getBit(i); + Init *CachedInit = 0; + Init *CachedBitVar = 0; + bool CachedBitVarChanged = false; + + for (unsigned i = 0, e = getNumBits(); i != e; ++i) { + Init *CurBit = Bits[i]; + Init *CurBitVar = CurBit->getBitVar(); - do { - B = CurBit; - CurBit = CurBit->resolveReferences(R, RV); - Changed |= B != CurBit; - } while (B != CurBit); NewBits[i] = CurBit; + + if (CurBitVar == CachedBitVar) { + if (CachedBitVarChanged) { + Init *Bit = CachedInit->getBit(CurBit->getBitNum()); + NewBits[i] = fixBitInit(RV, CurBit, Bit); + } + continue; + } + CachedBitVar = CurBitVar; + CachedBitVarChanged = false; + + Init *B; + do { + B = CurBitVar; + CurBitVar = CurBitVar->resolveReferences(R, RV); + CachedBitVarChanged |= B != CurBitVar; + Changed |= B != CurBitVar; + } while (B != CurBitVar); + CachedInit = CurBitVar; + + if (CachedBitVarChanged) { + Init *Bit = CurBitVar->getBit(CurBit->getBitNum()); + NewBits[i] = fixBitInit(RV, CurBit, Bit); + } } if (Changed) @@ -613,7 +595,7 @@ ListInit *ListInit::get(ArrayRef Range, RecTy *EltTy) { } void ListInit::Profile(FoldingSetNodeID &ID) const { - ListRecTy *ListType = dynamic_cast(getType()); + ListRecTy *ListType = dyn_cast(getType()); assert(ListType && "Bad type for ListInit!"); RecTy *EltTy = ListType->getElementType(); @@ -633,8 +615,9 @@ ListInit::convertInitListSlice(const std::vector &Elements) const { Record *ListInit::getElementAsRecord(unsigned i) const { assert(i < Values.size() && "List element index out of range!"); - DefInit *DI = dynamic_cast(Values[i]); - if (DI == 0) throw "Expected record in list!"; + DefInit *DI = dyn_cast(Values[i]); + if (DI == 0) + PrintFatalError("Expected record in list!"); return DI->getDef(); } @@ -668,7 +651,7 @@ Init *ListInit::resolveListElementReference(Record &R, const RecordVal *IRV, // If the element is set to some value, or if we are resolving a reference // to a specific variable and that variable is explicitly unset, then // replace the VarListElementInit with it. - if (IRV || !dynamic_cast(E)) + if (IRV || !isa(E)) return E; return 0; } @@ -682,30 +665,16 @@ std::string ListInit::getAsString() const { return Result + "]"; } -Init *OpInit::resolveBitReference(Record &R, const RecordVal *IRV, - unsigned Bit) const { - Init *Folded = Fold(&R, 0); - - if (Folded != this) { - TypedInit *Typed = dynamic_cast(Folded); - if (Typed) { - return Typed->resolveBitReference(R, IRV, Bit); - } - } - - return 0; -} - Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV, unsigned Elt) const { Init *Resolved = resolveReferences(R, IRV); - OpInit *OResolved = dynamic_cast(Resolved); + OpInit *OResolved = dyn_cast(Resolved); if (OResolved) { Resolved = OResolved->Fold(&R, 0); } if (Resolved != this) { - TypedInit *Typed = dynamic_cast(Resolved); + TypedInit *Typed = dyn_cast(Resolved); assert(Typed && "Expected typed init for list reference"); if (Typed) { Init *New = Typed->resolveListElementReference(R, IRV, Elt); @@ -718,6 +687,12 @@ Init *OpInit::resolveListElementReference(Record &R, const RecordVal *IRV, return 0; } +Init *OpInit::getBit(unsigned Bit) const { + if (getType() == BitRecTy::get()) + return const_cast(this); + return VarBitInit::get(const_cast(this), Bit); +} + UnOpInit *UnOpInit::get(UnaryOp opc, Init *lhs, RecTy *Type) { typedef std::pair, RecTy *> Key; @@ -735,30 +710,23 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { switch (getOpcode()) { case CAST: { if (getType()->getAsString() == "string") { - StringInit *LHSs = dynamic_cast(LHS); - if (LHSs) { + if (StringInit *LHSs = dyn_cast(LHS)) return LHSs; - } - DefInit *LHSd = dynamic_cast(LHS); - if (LHSd) { + if (DefInit *LHSd = dyn_cast(LHS)) return StringInit::get(LHSd->getDef()->getName()); - } - IntInit *LHSi = dynamic_cast(LHS); - if (LHSi) { + if (IntInit *LHSi = dyn_cast(LHS)) return StringInit::get(LHSi->getAsString()); - } } else { - StringInit *LHSs = dynamic_cast(LHS); - if (LHSs) { + if (StringInit *LHSs = dyn_cast(LHS)) { std::string Name = LHSs->getValue(); // From TGParser::ParseIDValue if (CurRec) { if (const RecordVal *RV = CurRec->getValue(Name)) { if (RV->getType() != getType()) - throw "type mismatch in cast"; + PrintFatalError("type mismatch in cast"); return VarInit::get(Name, RV->getType()); } @@ -770,7 +738,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { assert(RV && "Template arg doesn't exist??"); if (RV->getType() != getType()) - throw "type mismatch in cast"; + PrintFatalError("type mismatch in cast"); return VarInit::get(TemplateArgName, RV->getType()); } @@ -784,7 +752,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { assert(RV && "Template arg doesn't exist??"); if (RV->getType() != getType()) - throw "type mismatch in cast"; + PrintFatalError("type mismatch in cast"); return VarInit::get(MCName, RV->getType()); } @@ -793,14 +761,14 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { if (Record *D = (CurRec->getRecords()).getDef(Name)) return DefInit::get(D); - throw TGError(CurRec->getLoc(), "Undefined reference:'" + Name + "'\n"); + PrintFatalError(CurRec->getLoc(), + "Undefined reference:'" + Name + "'\n"); } } break; } case HEAD: { - ListInit *LHSl = dynamic_cast(LHS); - if (LHSl) { + if (ListInit *LHSl = dyn_cast(LHS)) { if (LHSl->getSize() == 0) { assert(0 && "Empty list in car"); return 0; @@ -810,8 +778,7 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { break; } case TAIL: { - ListInit *LHSl = dynamic_cast(LHS); - if (LHSl) { + if (ListInit *LHSl = dyn_cast(LHS)) { if (LHSl->getSize() == 0) { assert(0 && "Empty list in cdr"); return 0; @@ -828,16 +795,14 @@ Init *UnOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { break; } case EMPTY: { - ListInit *LHSl = dynamic_cast(LHS); - if (LHSl) { + if (ListInit *LHSl = dyn_cast(LHS)) { if (LHSl->getSize() == 0) { return IntInit::get(1); } else { return IntInit::get(0); } } - StringInit *LHSs = dynamic_cast(LHS); - if (LHSs) { + if (StringInit *LHSs = dyn_cast(LHS)) { if (LHSs->getValue().empty()) { return IntInit::get(1); } else { @@ -891,13 +856,13 @@ BinOpInit *BinOpInit::get(BinaryOp opc, Init *lhs, Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { switch (getOpcode()) { case CONCAT: { - DagInit *LHSs = dynamic_cast(LHS); - DagInit *RHSs = dynamic_cast(RHS); + DagInit *LHSs = dyn_cast(LHS); + DagInit *RHSs = dyn_cast(RHS); if (LHSs && RHSs) { - DefInit *LOp = dynamic_cast(LHSs->getOperator()); - DefInit *ROp = dynamic_cast(RHSs->getOperator()); + DefInit *LOp = dyn_cast(LHSs->getOperator()); + DefInit *ROp = dyn_cast(RHSs->getOperator()); if (LOp == 0 || ROp == 0 || LOp->getDef() != ROp->getDef()) - throw "Concated Dag operators do not match!"; + PrintFatalError("Concated Dag operators do not match!"); std::vector Args; std::vector ArgNames; for (unsigned i = 0, e = LHSs->getNumArgs(); i != e; ++i) { @@ -913,8 +878,8 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { break; } case STRCONCAT: { - StringInit *LHSs = dynamic_cast(LHS); - StringInit *RHSs = dynamic_cast(RHS); + StringInit *LHSs = dyn_cast(LHS); + StringInit *RHSs = dyn_cast(RHS); if (LHSs && RHSs) return StringInit::get(LHSs->getValue() + RHSs->getValue()); break; @@ -922,16 +887,16 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { case EQ: { // try to fold eq comparison for 'bit' and 'int', otherwise fallback // to string objects. - IntInit* L = - dynamic_cast(LHS->convertInitializerTo(IntRecTy::get())); - IntInit* R = - dynamic_cast(RHS->convertInitializerTo(IntRecTy::get())); + IntInit *L = + dyn_cast_or_null(LHS->convertInitializerTo(IntRecTy::get())); + IntInit *R = + dyn_cast_or_null(RHS->convertInitializerTo(IntRecTy::get())); if (L && R) return IntInit::get(L->getValue() == R->getValue()); - StringInit *LHSs = dynamic_cast(LHS); - StringInit *RHSs = dynamic_cast(RHS); + StringInit *LHSs = dyn_cast(LHS); + StringInit *RHSs = dyn_cast(RHS); // Make sure we've resolved if (LHSs && RHSs) @@ -942,8 +907,8 @@ Init *BinOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { case SHL: case SRA: case SRL: { - IntInit *LHSi = dynamic_cast(LHS); - IntInit *RHSi = dynamic_cast(RHS); + IntInit *LHSi = dyn_cast(LHS); + IntInit *RHSi = dyn_cast(RHS); if (LHSi && RHSi) { int64_t LHSv = LHSi->getValue(), RHSv = RHSi->getValue(); int64_t Result; @@ -1016,7 +981,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg, MultiClass *CurMultiClass) { std::vector NewOperands; - TypedInit *TArg = dynamic_cast(Arg); + TypedInit *TArg = dyn_cast(Arg); // If this is a dag, recurse if (TArg && TArg->getType()->getAsString() == "dag") { @@ -1030,7 +995,7 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg, } for (int i = 0; i < RHSo->getNumOperands(); ++i) { - OpInit *RHSoo = dynamic_cast(RHSo->getOperand(i)); + OpInit *RHSoo = dyn_cast(RHSo->getOperand(i)); if (RHSoo) { Init *Result = EvaluateOperation(RHSoo, LHS, Arg, @@ -1058,25 +1023,21 @@ static Init *EvaluateOperation(OpInit *RHSo, Init *LHS, Init *Arg, static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type, Record *CurRec, MultiClass *CurMultiClass) { - DagInit *MHSd = dynamic_cast(MHS); - ListInit *MHSl = dynamic_cast(MHS); + DagInit *MHSd = dyn_cast(MHS); + ListInit *MHSl = dyn_cast(MHS); - DagRecTy *DagType = dynamic_cast(Type); - ListRecTy *ListType = dynamic_cast(Type); - - OpInit *RHSo = dynamic_cast(RHS); + OpInit *RHSo = dyn_cast(RHS); if (!RHSo) { - throw TGError(CurRec->getLoc(), "!foreach requires an operator\n"); + PrintFatalError(CurRec->getLoc(), "!foreach requires an operator\n"); } - TypedInit *LHSt = dynamic_cast(LHS); + TypedInit *LHSt = dyn_cast(LHS); - if (!LHSt) { - throw TGError(CurRec->getLoc(), "!foreach requires typed variable\n"); - } + if (!LHSt) + PrintFatalError(CurRec->getLoc(), "!foreach requires typed variable\n"); - if ((MHSd && DagType) || (MHSl && ListType)) { + if ((MHSd && isa(Type)) || (MHSl && isa(Type))) { if (MHSd) { Init *Val = MHSd->getOperator(); Init *Result = EvaluateOperation(RHSo, LHS, Val, @@ -1139,17 +1100,17 @@ static Init *ForeachHelper(Init *LHS, Init *MHS, Init *RHS, RecTy *Type, Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { switch (getOpcode()) { case SUBST: { - DefInit *LHSd = dynamic_cast(LHS); - VarInit *LHSv = dynamic_cast(LHS); - StringInit *LHSs = dynamic_cast(LHS); + DefInit *LHSd = dyn_cast(LHS); + VarInit *LHSv = dyn_cast(LHS); + StringInit *LHSs = dyn_cast(LHS); - DefInit *MHSd = dynamic_cast(MHS); - VarInit *MHSv = dynamic_cast(MHS); - StringInit *MHSs = dynamic_cast(MHS); + DefInit *MHSd = dyn_cast(MHS); + VarInit *MHSv = dyn_cast(MHS); + StringInit *MHSs = dyn_cast(MHS); - DefInit *RHSd = dynamic_cast(RHS); - VarInit *RHSv = dynamic_cast(RHS); - StringInit *RHSs = dynamic_cast(RHS); + DefInit *RHSd = dyn_cast(RHS); + VarInit *RHSv = dyn_cast(RHS); + StringInit *RHSs = dyn_cast(RHS); if ((LHSd && MHSd && RHSd) || (LHSv && MHSv && RHSv) @@ -1197,9 +1158,9 @@ Init *TernOpInit::Fold(Record *CurRec, MultiClass *CurMultiClass) const { } case IF: { - IntInit *LHSi = dynamic_cast(LHS); + IntInit *LHSi = dyn_cast(LHS); if (Init *I = LHS->convertInitializerTo(IntRecTy::get())) - LHSi = dynamic_cast(I); + LHSi = dyn_cast(I); if (LHSi) { if (LHSi->getValue()) { return MHS; @@ -1219,9 +1180,9 @@ Init *TernOpInit::resolveReferences(Record &R, Init *lhs = LHS->resolveReferences(R, RV); if (Opc == IF && lhs != LHS) { - IntInit *Value = dynamic_cast(lhs); + IntInit *Value = dyn_cast(lhs); if (Init *I = lhs->convertInitializerTo(IntRecTy::get())) - Value = dynamic_cast(I); + Value = dyn_cast(I); if (Value != 0) { // Short-circuit if (Value->getValue()) { @@ -1257,19 +1218,15 @@ std::string TernOpInit::getAsString() const { } RecTy *TypedInit::getFieldType(const std::string &FieldName) const { - RecordRecTy *RecordType = dynamic_cast(getType()); - if (RecordType) { - RecordVal *Field = RecordType->getRecord()->getValue(FieldName); - if (Field) { + if (RecordRecTy *RecordType = dyn_cast(getType())) + if (RecordVal *Field = RecordType->getRecord()->getValue(FieldName)) return Field->getType(); - } - } return 0; } Init * TypedInit::convertInitializerBitRange(const std::vector &Bits) const { - BitsRecTy *T = dynamic_cast(getType()); + BitsRecTy *T = dyn_cast(getType()); if (T == 0) return 0; // Cannot subscript a non-bits variable. unsigned NumBits = T->getNumBits(); @@ -1285,7 +1242,7 @@ TypedInit::convertInitializerBitRange(const std::vector &Bits) const { Init * TypedInit::convertInitListSlice(const std::vector &Elements) const { - ListRecTy *T = dynamic_cast(getType()); + ListRecTy *T = dyn_cast(getType()); if (T == 0) return 0; // Cannot subscript a non-list variable. if (Elements.size() == 1) @@ -1318,31 +1275,15 @@ VarInit *VarInit::get(Init *VN, RecTy *T) { } const std::string &VarInit::getName() const { - StringInit *NameString = - dynamic_cast(getNameInit()); + StringInit *NameString = dyn_cast(getNameInit()); assert(NameString && "VarInit name is not a string!"); return NameString->getValue(); } -Init *VarInit::resolveBitReference(Record &R, const RecordVal *IRV, - unsigned Bit) const { - if (R.isTemplateArg(getNameInit())) return 0; - if (IRV && IRV->getNameInit() != getNameInit()) return 0; - - RecordVal *RV = R.getValue(getNameInit()); - assert(RV && "Reference to a non-existent variable?"); - assert(dynamic_cast(RV->getValue())); - BitsInit *BI = (BitsInit*)RV->getValue(); - - assert(Bit < BI->getNumBits() && "Bit reference out of range!"); - Init *B = BI->getBit(Bit); - - // If the bit is set to some value, or if we are resolving a reference to a - // specific variable and that variable is explicitly unset, then replace the - // VarBitInit with it. - if (IRV || !dynamic_cast(B)) - return B; - return 0; +Init *VarInit::getBit(unsigned Bit) const { + if (getType() == BitRecTy::get()) + return const_cast(this); + return VarBitInit::get(const_cast(this), Bit); } Init *VarInit::resolveListElementReference(Record &R, @@ -1353,9 +1294,9 @@ Init *VarInit::resolveListElementReference(Record &R, RecordVal *RV = R.getValue(getNameInit()); assert(RV && "Reference to a non-existent variable?"); - ListInit *LI = dynamic_cast(RV->getValue()); + ListInit *LI = dyn_cast(RV->getValue()); if (!LI) { - TypedInit *VI = dynamic_cast(RV->getValue()); + TypedInit *VI = dyn_cast(RV->getValue()); assert(VI && "Invalid list element!"); return VarListElementInit::get(VI, Elt); } @@ -1366,14 +1307,14 @@ Init *VarInit::resolveListElementReference(Record &R, // If the element is set to some value, or if we are resolving a reference // to a specific variable and that variable is explicitly unset, then // replace the VarListElementInit with it. - if (IRV || !dynamic_cast(E)) + if (IRV || !isa(E)) return E; return 0; } RecTy *VarInit::getFieldType(const std::string &FieldName) const { - if (RecordRecTy *RTy = dynamic_cast(getType())) + if (RecordRecTy *RTy = dyn_cast(getType())) if (const RecordVal *RV = RTy->getRecord()->getValue(FieldName)) return RV->getType(); return 0; @@ -1381,9 +1322,9 @@ RecTy *VarInit::getFieldType(const std::string &FieldName) const { Init *VarInit::getFieldInit(Record &R, const RecordVal *RV, const std::string &FieldName) const { - if (dynamic_cast(getType())) + if (isa(getType())) if (const RecordVal *Val = R.getValue(VarName)) { - if (RV != Val && (RV || dynamic_cast(Val->getValue()))) + if (RV != Val && (RV || isa(Val->getValue()))) return 0; Init *TheInit = Val->getValue(); assert(TheInit != this && "Infinite loop detected!"); @@ -1402,7 +1343,7 @@ Init *VarInit::getFieldInit(Record &R, const RecordVal *RV, /// Init *VarInit::resolveReferences(Record &R, const RecordVal *RV) const { if (RecordVal *Val = R.getValue(VarName)) - if (RV == Val || (RV == 0 && !dynamic_cast(Val->getValue()))) + if (RV == Val || (RV == 0 && !isa(Val->getValue()))) return Val->getValue(); return const_cast(this); } @@ -1425,9 +1366,11 @@ std::string VarBitInit::getAsString() const { } Init *VarBitInit::resolveReferences(Record &R, const RecordVal *RV) const { - if (Init *I = getVariable()->resolveBitReference(R, RV, getBitNum())) - return I; - return const_cast(this); + Init *I = TI->resolveReferences(R, RV); + if (TI != I) + return I->getBit(getBitNum()); + + return const_cast(this); } VarListElementInit *VarListElementInit::get(TypedInit *T, @@ -1456,11 +1399,10 @@ VarListElementInit::resolveReferences(Record &R, const RecordVal *RV) const { return const_cast(this); } -Init *VarListElementInit::resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { - // FIXME: This should be implemented, to support references like: - // bit B = AA[0]{1}; - return 0; +Init *VarListElementInit::getBit(unsigned Bit) const { + if (getType() == BitRecTy::get()) + return const_cast(this); + return VarBitInit::get(const_cast(this), Bit); } Init *VarListElementInit:: resolveListElementReference(Record &R, @@ -1469,8 +1411,7 @@ Init *VarListElementInit:: resolveListElementReference(Record &R, Init *Result = TI->resolveListElementReference(R, RV, Element); if (Result) { - TypedInit *TInit = dynamic_cast(Result); - if (TInit) { + if (TypedInit *TInit = dyn_cast(Result)) { Init *Result2 = TInit->resolveListElementReference(R, RV, Elt); if (Result2) return Result2; return new VarListElementInit(TInit, Elt); @@ -1513,30 +1454,23 @@ FieldInit *FieldInit::get(Init *R, const std::string &FN) { return I; } -Init *FieldInit::resolveBitReference(Record &R, const RecordVal *RV, - unsigned Bit) const { - if (Init *BitsVal = Rec->getFieldInit(R, RV, FieldName)) - if (BitsInit *BI = dynamic_cast(BitsVal)) { - assert(Bit < BI->getNumBits() && "Bit reference out of range!"); - Init *B = BI->getBit(Bit); - - if (dynamic_cast(B)) // If the bit is set. - return B; // Replace the VarBitInit with it. - } - return 0; +Init *FieldInit::getBit(unsigned Bit) const { + if (getType() == BitRecTy::get()) + return const_cast(this); + return VarBitInit::get(const_cast(this), Bit); } Init *FieldInit::resolveListElementReference(Record &R, const RecordVal *RV, unsigned Elt) const { if (Init *ListVal = Rec->getFieldInit(R, RV, FieldName)) - if (ListInit *LI = dynamic_cast(ListVal)) { + if (ListInit *LI = dyn_cast(ListVal)) { if (Elt >= LI->getSize()) return 0; Init *E = LI->getElement(Elt); // If the element is set to some value, or if we are resolving a // reference to a specific variable and that variable is explicitly // unset, then replace the VarListElementInit with it. - if (RV || !dynamic_cast(E)) + if (RV || !isa(E)) return E; } return 0; @@ -1665,7 +1599,7 @@ RecordVal::RecordVal(const std::string &N, RecTy *T, unsigned P) } const std::string &RecordVal::getName() const { - StringInit *NameString = dynamic_cast(Name); + StringInit *NameString = dyn_cast(Name); assert(NameString && "RecordVal name is not a string!"); return NameString->getValue(); } @@ -1695,12 +1629,11 @@ void Record::init() { void Record::checkName() { // Ensure the record name has string type. - const TypedInit *TypedName = dynamic_cast(Name); + const TypedInit *TypedName = dyn_cast(Name); assert(TypedName && "Record name is not typed!"); RecTy *Type = TypedName->getType(); - if (dynamic_cast(Type) == 0) { - throw TGError(getLoc(), "Record name is not a string!"); - } + if (!isa(Type)) + PrintFatalError(getLoc(), "Record name is not a string!"); } DefInit *Record::getDefInit() { @@ -1710,8 +1643,7 @@ DefInit *Record::getDefInit() { } const std::string &Record::getName() const { - const StringInit *NameString = - dynamic_cast(Name); + const StringInit *NameString = dyn_cast(Name); assert(NameString && "Record name is not a string!"); return NameString->getValue(); } @@ -1751,7 +1683,15 @@ void Record::resolveReferencesTo(const RecordVal *RV) { if (RV == &Values[i]) // Skip resolve the same field as the given one continue; if (Init *V = Values[i].getValue()) - Values[i].setValue(V->resolveReferences(*this, RV)); + if (Values[i].setValue(V->resolveReferences(*this, RV))) + PrintFatalError(getLoc(), "Invalid value is found when setting '" + + Values[i].getNameInitAsString() + + "' after resolving references" + + (RV ? " against '" + RV->getNameInitAsString() + + "' of (" + + RV->getValue()->getAsUnquotedString() + ")" + : "") + + "\n"); } Init *OldName = getNameInit(); Init *NewName = Name->resolveReferences(*this, RV); @@ -1799,184 +1739,201 @@ raw_ostream &llvm::operator<<(raw_ostream &OS, const Record &R) { } /// getValueInit - Return the initializer for a value with the specified name, -/// or throw an exception if the field does not exist. +/// or abort if the field does not exist. /// Init *Record::getValueInit(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); return R->getValue(); } /// getValueAsString - This method looks up the specified field and returns its -/// value as a string, throwing an exception if the field does not exist or if +/// value as a string, aborts if the field does not exist or if /// the value is not a string. /// std::string Record::getValueAsString(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (StringInit *SI = dynamic_cast(R->getValue())) + if (StringInit *SI = dyn_cast(R->getValue())) return SI->getValue(); - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a string initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a string initializer!"); } /// getValueAsBitsInit - This method looks up the specified field and returns -/// its value as a BitsInit, throwing an exception if the field does not exist -/// or if the value is not the right type. +/// its value as a BitsInit, aborts if the field does not exist or if +/// the value is not the right type. /// BitsInit *Record::getValueAsBitsInit(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (BitsInit *BI = dynamic_cast(R->getValue())) + if (BitsInit *BI = dyn_cast(R->getValue())) return BI; - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a BitsInit initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a BitsInit initializer!"); } /// getValueAsListInit - This method looks up the specified field and returns -/// its value as a ListInit, throwing an exception if the field does not exist -/// or if the value is not the right type. +/// its value as a ListInit, aborting if the field does not exist or if +/// the value is not the right type. /// ListInit *Record::getValueAsListInit(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (ListInit *LI = dynamic_cast(R->getValue())) + if (ListInit *LI = dyn_cast(R->getValue())) return LI; - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a list initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a list initializer!"); } /// getValueAsListOfDefs - This method looks up the specified field and returns -/// its value as a vector of records, throwing an exception if the field does -/// not exist or if the value is not the right type. +/// its value as a vector of records, aborting if the field does not exist +/// or if the value is not the right type. /// std::vector Record::getValueAsListOfDefs(StringRef FieldName) const { ListInit *List = getValueAsListInit(FieldName); std::vector Defs; for (unsigned i = 0; i < List->getSize(); i++) { - if (DefInit *DI = dynamic_cast(List->getElement(i))) { + if (DefInit *DI = dyn_cast(List->getElement(i))) { Defs.push_back(DI->getDef()); } else { - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' list is not entirely DefInit!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' list is not entirely DefInit!"); } } return Defs; } /// getValueAsInt - This method looks up the specified field and returns its -/// value as an int64_t, throwing an exception if the field does not exist or if -/// the value is not the right type. +/// value as an int64_t, aborting if the field does not exist or if the value +/// is not the right type. /// int64_t Record::getValueAsInt(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (IntInit *II = dynamic_cast(R->getValue())) + if (IntInit *II = dyn_cast(R->getValue())) return II->getValue(); - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have an int initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have an int initializer!"); } /// getValueAsListOfInts - This method looks up the specified field and returns -/// its value as a vector of integers, throwing an exception if the field does -/// not exist or if the value is not the right type. +/// its value as a vector of integers, aborting if the field does not exist or +/// if the value is not the right type. /// std::vector Record::getValueAsListOfInts(StringRef FieldName) const { ListInit *List = getValueAsListInit(FieldName); std::vector Ints; for (unsigned i = 0; i < List->getSize(); i++) { - if (IntInit *II = dynamic_cast(List->getElement(i))) { + if (IntInit *II = dyn_cast(List->getElement(i))) { Ints.push_back(II->getValue()); } else { - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a list of ints initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a list of ints initializer!"); } } return Ints; } /// getValueAsListOfStrings - This method looks up the specified field and -/// returns its value as a vector of strings, throwing an exception if the -/// field does not exist or if the value is not the right type. +/// returns its value as a vector of strings, aborting if the field does not +/// exist or if the value is not the right type. /// std::vector Record::getValueAsListOfStrings(StringRef FieldName) const { ListInit *List = getValueAsListInit(FieldName); std::vector Strings; for (unsigned i = 0; i < List->getSize(); i++) { - if (StringInit *II = dynamic_cast(List->getElement(i))) { + if (StringInit *II = dyn_cast(List->getElement(i))) { Strings.push_back(II->getValue()); } else { - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a list of strings initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a list of strings initializer!"); } } return Strings; } /// getValueAsDef - This method looks up the specified field and returns its -/// value as a Record, throwing an exception if the field does not exist or if -/// the value is not the right type. +/// value as a Record, aborting if the field does not exist or if the value +/// is not the right type. /// Record *Record::getValueAsDef(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (DefInit *DI = dynamic_cast(R->getValue())) + if (DefInit *DI = dyn_cast(R->getValue())) return DI->getDef(); - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a def initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a def initializer!"); } /// getValueAsBit - This method looks up the specified field and returns its -/// value as a bit, throwing an exception if the field does not exist or if -/// the value is not the right type. +/// value as a bit, aborting if the field does not exist or if the value is +/// not the right type. /// bool Record::getValueAsBit(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (BitInit *BI = dynamic_cast(R->getValue())) + if (BitInit *BI = dyn_cast(R->getValue())) return BI->getValue(); - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a bit initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a bit initializer!"); +} + +bool Record::getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const { + const RecordVal *R = getValue(FieldName); + if (R == 0 || R->getValue() == 0) + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); + + if (R->getValue() == UnsetInit::get()) { + Unset = true; + return false; + } + Unset = false; + if (BitInit *BI = dyn_cast(R->getValue())) + return BI->getValue(); + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a bit initializer!"); } /// getValueAsDag - This method looks up the specified field and returns its -/// value as an Dag, throwing an exception if the field does not exist or if -/// the value is not the right type. +/// value as an Dag, aborting if the field does not exist or if the value is +/// not the right type. /// DagInit *Record::getValueAsDag(StringRef FieldName) const { const RecordVal *R = getValue(FieldName); if (R == 0 || R->getValue() == 0) - throw "Record `" + getName() + "' does not have a field named `" + - FieldName.str() + "'!\n"; + PrintFatalError(getLoc(), "Record `" + getName() + + "' does not have a field named `" + FieldName.str() + "'!\n"); - if (DagInit *DI = dynamic_cast(R->getValue())) + if (DagInit *DI = dyn_cast(R->getValue())) return DI; - throw "Record `" + getName() + "', field `" + FieldName.str() + - "' does not have a dag initializer!"; + PrintFatalError(getLoc(), "Record `" + getName() + "', field `" + + FieldName.str() + "' does not have a dag initializer!"); } @@ -2019,7 +1976,7 @@ std::vector RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const { Record *Class = getClass(ClassName); if (!Class) - throw "ERROR: Couldn't find the `" + ClassName + "' class!\n"; + PrintFatalError("ERROR: Couldn't find the `" + ClassName + "' class!\n"); std::vector Defs; for (std::map::const_iterator I = getDefs().begin(), @@ -2034,7 +1991,7 @@ RecordKeeper::getAllDerivedDefinitions(const std::string &ClassName) const { /// to CurRec's name. Init *llvm::QualifyName(Record &CurRec, MultiClass *CurMultiClass, Init *Name, const std::string &Scoper) { - RecTy *Type = dynamic_cast(Name)->getType(); + RecTy *Type = dyn_cast(Name)->getType(); BinOpInit *NewName = BinOpInit::get(BinOpInit::STRCONCAT, diff --git a/lib/TableGen/TGParser.cpp b/lib/TableGen/TGParser.cpp index b9c7ff6..b1f9f72 100644 --- a/lib/TableGen/TGParser.cpp +++ b/lib/TableGen/TGParser.cpp @@ -93,7 +93,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName, // Do not allow assignments like 'X = X'. This will just cause infinite loops // in the resolution machinery. if (BitList.empty()) - if (VarInit *VI = dynamic_cast(V)) + if (VarInit *VI = dyn_cast(V)) if (VI->getNameInit() == ValName) return false; @@ -102,7 +102,7 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName, // initializer. // if (!BitList.empty()) { - BitsInit *CurVal = dynamic_cast(RV->getValue()); + BitsInit *CurVal = dyn_cast(RV->getValue()); if (CurVal == 0) return Error(Loc, "Value '" + ValName->getAsUnquotedString() + "' is not a bits type"); @@ -110,12 +110,11 @@ bool TGParser::SetValue(Record *CurRec, SMLoc Loc, Init *ValName, // Convert the incoming value to a bits type of the appropriate size... Init *BI = V->convertInitializerTo(BitsRecTy::get(BitList.size())); if (BI == 0) { - V->convertInitializerTo(BitsRecTy::get(BitList.size())); return Error(Loc, "Initializer is not compatible with bit range"); } // We should have a BitsInit type now. - BitsInit *BInit = dynamic_cast(BI); + BitsInit *BInit = dyn_cast(BI); assert(BInit != 0); SmallVector NewBits(CurVal->getNumBits()); @@ -311,7 +310,7 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){ if (IterVals.size() != Loops.size()) { assert(IterVals.size() < Loops.size()); ForeachLoop &CurLoop = Loops[IterVals.size()]; - ListInit *List = dynamic_cast(CurLoop.ListValue); + ListInit *List = dyn_cast(CurLoop.ListValue); if (List == 0) { Error(Loc, "Loop list is not a list"); return true; @@ -336,7 +335,7 @@ bool TGParser::ProcessForeachDefs(Record *CurRec, SMLoc Loc, IterSet &IterVals){ // Set the iterator values now. for (unsigned i = 0, e = IterVals.size(); i != e; ++i) { VarInit *IterVar = IterVals[i].IterVar; - TypedInit *IVal = dynamic_cast(IterVals[i].IterValue); + TypedInit *IVal = dyn_cast(IterVals[i].IterValue); if (IVal == 0) { Error(Loc, "foreach iterator value is untyped"); return true; @@ -407,8 +406,7 @@ Init *TGParser::ParseObjectName(MultiClass *CurMultiClass) { RecTy *Type = 0; if (CurRec) { - const TypedInit *CurRecName = - dynamic_cast(CurRec->getNameInit()); + const TypedInit *CurRecName = dyn_cast(CurRec->getNameInit()); if (!CurRecName) { TokError("Record name is not typed!"); return 0; @@ -781,7 +779,7 @@ Init *TGParser::ParseIDValue(Record *CurRec, for (LoopVector::iterator i = Loops.begin(), iend = Loops.end(); i != iend; ++i) { - VarInit *IterVar = dynamic_cast(i->IterVar); + VarInit *IterVar = dyn_cast(i->IterVar); if (IterVar && IterVar->getName() == Name) return IterVar; } @@ -856,16 +854,16 @@ Init *TGParser::ParseOperation(Record *CurRec) { if (Code == UnOpInit::HEAD || Code == UnOpInit::TAIL || Code == UnOpInit::EMPTY) { - ListInit *LHSl = dynamic_cast(LHS); - StringInit *LHSs = dynamic_cast(LHS); - TypedInit *LHSt = dynamic_cast(LHS); + ListInit *LHSl = dyn_cast(LHS); + StringInit *LHSs = dyn_cast(LHS); + TypedInit *LHSt = dyn_cast(LHS); if (LHSl == 0 && LHSs == 0 && LHSt == 0) { TokError("expected list or string type argument in unary operator"); return 0; } if (LHSt) { - ListRecTy *LType = dynamic_cast(LHSt->getType()); - StringRecTy *SType = dynamic_cast(LHSt->getType()); + ListRecTy *LType = dyn_cast(LHSt->getType()); + StringRecTy *SType = dyn_cast(LHSt->getType()); if (LType == 0 && SType == 0) { TokError("expected list or string type argumnet in unary operator"); return 0; @@ -885,7 +883,7 @@ Init *TGParser::ParseOperation(Record *CurRec) { } if (LHSl) { Init *Item = LHSl->getElement(0); - TypedInit *Itemt = dynamic_cast(Item); + TypedInit *Itemt = dyn_cast(Item); if (Itemt == 0) { TokError("untyped list element in unary operator"); return 0; @@ -897,7 +895,7 @@ Init *TGParser::ParseOperation(Record *CurRec) { } } else { assert(LHSt && "expected list type argument in unary operator"); - ListRecTy *LType = dynamic_cast(LHSt->getType()); + ListRecTy *LType = dyn_cast(LHSt->getType()); if (LType == 0) { TokError("expected list type argumnet in unary operator"); return 0; @@ -1044,35 +1042,28 @@ Init *TGParser::ParseOperation(Record *CurRec) { switch (LexCode) { default: llvm_unreachable("Unhandled code!"); case tgtok::XIf: { - // FIXME: The `!if' operator doesn't handle non-TypedInit well at - // all. This can be made much more robust. - TypedInit *MHSt = dynamic_cast(MHS); - TypedInit *RHSt = dynamic_cast(RHS); - RecTy *MHSTy = 0; RecTy *RHSTy = 0; - if (MHSt == 0 && RHSt == 0) { - BitsInit *MHSbits = dynamic_cast(MHS); - BitsInit *RHSbits = dynamic_cast(RHS); - - if (MHSbits && RHSbits && - MHSbits->getNumBits() == RHSbits->getNumBits()) { - Type = BitRecTy::get(); - break; - } else { - BitInit *MHSbit = dynamic_cast(MHS); - BitInit *RHSbit = dynamic_cast(RHS); - - if (MHSbit && RHSbit) { - Type = BitRecTy::get(); - break; - } - } - } else if (MHSt != 0 && RHSt != 0) { + if (TypedInit *MHSt = dyn_cast(MHS)) MHSTy = MHSt->getType(); + if (BitsInit *MHSbits = dyn_cast(MHS)) + MHSTy = BitsRecTy::get(MHSbits->getNumBits()); + if (isa(MHS)) + MHSTy = BitRecTy::get(); + + if (TypedInit *RHSt = dyn_cast(RHS)) RHSTy = RHSt->getType(); - } + if (BitsInit *RHSbits = dyn_cast(RHS)) + RHSTy = BitsRecTy::get(RHSbits->getNumBits()); + if (isa(RHS)) + RHSTy = BitRecTy::get(); + + // For UnsetInit, it's typed from the other hand. + if (isa(MHS)) + MHSTy = RHSTy; + if (isa(RHS)) + RHSTy = MHSTy; if (!MHSTy || !RHSTy) { TokError("could not get type for !if"); @@ -1090,7 +1081,7 @@ Init *TGParser::ParseOperation(Record *CurRec) { break; } case tgtok::XForEach: { - TypedInit *MHSt = dynamic_cast(MHS); + TypedInit *MHSt = dyn_cast(MHS); if (MHSt == 0) { TokError("could not get type for !foreach"); return 0; @@ -1099,7 +1090,7 @@ Init *TGParser::ParseOperation(Record *CurRec) { break; } case tgtok::XSubst: { - TypedInit *RHSt = dynamic_cast(RHS); + TypedInit *RHSt = dyn_cast(RHS); if (RHSt == 0) { TokError("could not get type for !subst"); return 0; @@ -1278,7 +1269,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType, ListRecTy *GivenListTy = 0; if (ItemType != 0) { - ListRecTy *ListType = dynamic_cast(ItemType); + ListRecTy *ListType = dyn_cast(ItemType); if (ListType == 0) { std::stringstream s; s << "Type mismatch for list, expected list type, got " @@ -1323,7 +1314,7 @@ Init *TGParser::ParseSimpleValue(Record *CurRec, RecTy *ItemType, for (std::vector::iterator i = Vals.begin(), ie = Vals.end(); i != ie; ++i) { - TypedInit *TArg = dynamic_cast(*i); + TypedInit *TArg = dyn_cast(*i); if (TArg == 0) { TokError("Untyped list element"); return 0; @@ -1506,7 +1497,7 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) { // Create a !strconcat() operation, first casting each operand to // a string if necessary. - TypedInit *LHS = dynamic_cast(Result); + TypedInit *LHS = dyn_cast(Result); if (!LHS) { Error(PasteLoc, "LHS of paste is not typed!"); return 0; @@ -1533,7 +1524,7 @@ Init *TGParser::ParseValue(Record *CurRec, RecTy *ItemType, IDParseMode Mode) { default: Init *RHSResult = ParseValue(CurRec, ItemType, ParseNameMode); - RHS = dynamic_cast(RHSResult); + RHS = dyn_cast(RHSResult); if (!RHS) { Error(PasteLoc, "RHS of paste is not typed!"); return 0; @@ -1724,13 +1715,13 @@ VarInit *TGParser::ParseForeachDeclaration(ListInit *&ForeachListValue) { default: TokError("Unknown token when expecting a range list"); return 0; case tgtok::l_square: { // '[' ValueList ']' Init *List = ParseSimpleValue(0, 0, ParseForeachMode); - ForeachListValue = dynamic_cast(List); + ForeachListValue = dyn_cast(List); if (ForeachListValue == 0) { TokError("Expected a Value list"); return 0; } RecTy *ValueType = ForeachListValue->getType(); - ListRecTy *ListType = dynamic_cast(ValueType); + ListRecTy *ListType = dyn_cast(ValueType); if (ListType == 0) { TokError("Value list is not of list type"); return 0; @@ -2265,7 +2256,7 @@ InstantiateMulticlassDef(MultiClass &MC, Init *DefName = DefProto->getNameInit(); - StringInit *DefNameString = dynamic_cast(DefName); + StringInit *DefNameString = dyn_cast(DefName); if (DefNameString != 0) { // We have a fully expanded string so there are no operators to @@ -2277,7 +2268,10 @@ InstantiateMulticlassDef(MultiClass &MC, DefName, StringRecTy::get())->Fold(DefProto, &MC); } - Record *CurRec = new Record(DefName, DefmPrefixLoc, Records); + // Make a trail of SMLocs from the multiclass instantiations. + SmallVector Locs(1, DefmPrefixLoc); + Locs.append(DefProto->getLoc().begin(), DefProto->getLoc().end()); + Record *CurRec = new Record(DefName, Locs, Records); SubClassReference Ref; Ref.RefLoc = DefmPrefixLoc; diff --git a/lib/TableGen/TGParser.h b/lib/TableGen/TGParser.h index 3d2c72c..9c2ad43 100644 --- a/lib/TableGen/TGParser.h +++ b/lib/TableGen/TGParser.h @@ -30,7 +30,7 @@ namespace llvm { struct MultiClass; struct SubClassReference; struct SubMultiClassReference; - + struct LetRecord { std::string Name; std::vector Bits; @@ -41,7 +41,7 @@ namespace llvm { : Name(N), Bits(B), Value(V), Loc(L) { } }; - + /// ForeachLoop - Record the iteration state associated with a for loop. /// This is used to instantiate items in the loop body. struct ForeachLoop { @@ -56,13 +56,13 @@ class TGParser { TGLexer Lex; std::vector > LetStack; std::map MultiClasses; - + /// Loops - Keep track of any foreach loops we are within. /// typedef std::vector LoopVector; LoopVector Loops; - /// CurMultiClass - If we are parsing a 'multiclass' definition, this is the + /// CurMultiClass - If we are parsing a 'multiclass' definition, this is the /// current value. MultiClass *CurMultiClass; @@ -82,13 +82,13 @@ class TGParser { }; public: - TGParser(SourceMgr &SrcMgr, RecordKeeper &records) : + TGParser(SourceMgr &SrcMgr, RecordKeeper &records) : Lex(SrcMgr), CurMultiClass(0), Records(records) {} - + /// ParseFile - Main entrypoint for parsing a tblgen file. These parser /// routines return true on error, or false on success. bool ParseFile(); - + bool Error(SMLoc L, const Twine &Msg) const { PrintError(L, Msg); return true; @@ -102,9 +102,9 @@ public: private: // Semantic analysis methods. bool AddValue(Record *TheRec, SMLoc Loc, const RecordVal &RV); - bool SetValue(Record *TheRec, SMLoc Loc, Init *ValName, + bool SetValue(Record *TheRec, SMLoc Loc, Init *ValName, const std::vector &BitList, Init *V); - bool SetValue(Record *TheRec, SMLoc Loc, const std::string &ValName, + bool SetValue(Record *TheRec, SMLoc Loc, const std::string &ValName, const std::vector &BitList, Init *V) { return SetValue(TheRec, Loc, StringInit::get(ValName), BitList, V); } @@ -170,7 +170,8 @@ private: // Parser methods. IDParseMode Mode = ParseValueMode); Init *ParseValue(Record *CurRec, RecTy *ItemType = 0, IDParseMode Mode = ParseValueMode); - std::vector ParseValueList(Record *CurRec, Record *ArgsRec = 0, RecTy *EltTy = 0); + std::vector ParseValueList(Record *CurRec, Record *ArgsRec = 0, + RecTy *EltTy = 0); std::vector > ParseDagArgList(Record *); bool ParseOptionalRangeList(std::vector &Ranges); bool ParseOptionalBitList(std::vector &Ranges); @@ -184,7 +185,7 @@ private: // Parser methods. MultiClass *ParseMultiClassID(); Record *ParseDefmID(); }; - + } // end namespace llvm #endif diff --git a/lib/TableGen/TableGenAction.cpp b/lib/TableGen/TableGenAction.cpp deleted file mode 100644 index 54e5083..0000000 --- a/lib/TableGen/TableGenAction.cpp +++ /dev/null @@ -1,15 +0,0 @@ -//===- TableGenAction.cpp - defines TableGenAction --------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "llvm/TableGen/TableGenAction.h" - -using namespace llvm; - -void TableGenAction::anchor() { } - diff --git a/lib/Target/ARM/ARM.h b/lib/Target/ARM/ARM.h index 2a1e8e4..1446bbb 100644 --- a/lib/Target/ARM/ARM.h +++ b/lib/Target/ARM/ARM.h @@ -37,6 +37,7 @@ FunctionPass *createARMJITCodeEmitterPass(ARMBaseTargetMachine &TM, FunctionPass *createARMLoadStoreOptimizationPass(bool PreAlloc = false); FunctionPass *createARMExpandPseudoPass(); +FunctionPass *createARMGlobalBaseRegPass(); FunctionPass *createARMGlobalMergePass(const TargetLowering* tli); FunctionPass *createARMConstantIslandPass(); FunctionPass *createMLxExpansionPass(); diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td index 69e2346..23974ad 100644 --- a/lib/Target/ARM/ARM.td +++ b/lib/Target/ARM/ARM.td @@ -32,9 +32,6 @@ def FeatureVFP2 : SubtargetFeature<"vfp2", "HasVFPv2", "true", def FeatureVFP3 : SubtargetFeature<"vfp3", "HasVFPv3", "true", "Enable VFP3 instructions", [FeatureVFP2]>; -def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true", - "Enable VFP4 instructions", - [FeatureVFP3]>; def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true", "Enable NEON instructions", [FeatureVFP3]>; @@ -44,10 +41,16 @@ def FeatureNoARM : SubtargetFeature<"noarm", "NoARM", "true", "Does not support ARM mode execution">; def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true", "Enable half-precision floating point">; +def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true", + "Enable VFP4 instructions", + [FeatureVFP3, FeatureFP16]>; def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true", "Restrict VFP3 to 16 double registers">; def FeatureHWDiv : SubtargetFeature<"hwdiv", "HasHardwareDivide", "true", "Enable divide instructions">; +def FeatureHWDivARM : SubtargetFeature<"hwdiv-arm", + "HasHardwareDivideInARM", "true", + "Enable divide instructions in ARM mode">; def FeatureT2XtPk : SubtargetFeature<"t2xtpk", "HasT2ExtractPack", "true", "Enable Thumb2 extract and pack instructions">; def FeatureDB : SubtargetFeature<"db", "HasDataBarrier", "true", @@ -139,6 +142,18 @@ def ProcA9 : SubtargetFeature<"a9", "ARMProcFamily", "CortexA9", [FeatureVMLxForwarding, FeatureT2XtPk, FeatureFP16, FeatureAvoidPartialCPSR]>; +def ProcSwift : SubtargetFeature<"swift", "ARMProcFamily", "Swift", + "Swift ARM processors", + [FeatureNEONForFP, FeatureT2XtPk, + FeatureVFP4, FeatureMP, FeatureHWDiv, + FeatureHWDivARM, FeatureAvoidPartialCPSR, + FeatureHasSlowFPVMLx]>; + +// FIXME: It has not been determined if A15 has these features. +def ProcA15 : SubtargetFeature<"a15", "ARMProcFamily", "CortexA15", + "Cortex-A15 ARM processors", + [FeatureT2XtPk, FeatureFP16, + FeatureAvoidPartialCPSR]>; class ProcNoItin Features> : Processor; @@ -214,6 +229,10 @@ def : ProcessorModel<"cortex-a9-mp", CortexA9Model, [ProcA9, HasV7Ops, FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureMP, FeatureHasRAS]>; +// FIXME: A15 has currently the same ProcessorModel as A9. +def : ProcessorModel<"cortex-a15", CortexA9Model, + [ProcA15, HasV7Ops, FeatureNEON, FeatureDB, + FeatureDSPThumb2, FeatureHasRAS]>; // V7M Processors. def : ProcNoItin<"cortex-m3", [HasV7Ops, @@ -227,6 +246,12 @@ def : ProcNoItin<"cortex-m4", [HasV7Ops, FeatureT2XtPk, FeatureVFP4, FeatureVFPOnlySP, FeatureMClass]>; +// Swift uArch Processors. +def : ProcessorModel<"swift", SwiftModel, + [ProcSwift, HasV7Ops, FeatureNEON, + FeatureDB, FeatureDSPThumb2, + FeatureHasRAS]>; + //===----------------------------------------------------------------------===// // Register File Description //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index e9e2803..d439d1d 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -23,6 +23,8 @@ #include "InstPrinter/ARMInstPrinter.h" #include "MCTargetDesc/ARMAddressingModes.h" #include "MCTargetDesc/ARMMCExpr.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallString.h" #include "llvm/Constants.h" #include "llvm/DebugInfo.h" #include "llvm/Module.h" @@ -40,9 +42,8 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/ADT/SmallString.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -302,7 +303,7 @@ void ARMAsmPrinter::EmitFunctionEntryLabel() { } void ARMAsmPrinter::EmitXXStructor(const Constant *CV) { - uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType()); + uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType()); assert(Size && "C++ constructor pointer had zero size!"); const GlobalValue *GV = dyn_cast(CV->stripPointerCasts()); @@ -389,16 +390,6 @@ void ARMAsmPrinter::printOperand(const MachineInstr *MI, int OpNum, //===--------------------------------------------------------------------===// MCSymbol *ARMAsmPrinter:: -GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2, - const MachineBasicBlock *MBB) const { - SmallString<60> Name; - raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() - << getFunctionNumber() << '_' << uid << '_' << uid2 - << "_set_" << MBB->getNumber(); - return OutContext.GetOrCreateSymbol(Name.str()); -} - -MCSymbol *ARMAsmPrinter:: GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const { SmallString<60> Name; raw_svector_ostream(Name) << MAI->getPrivateGlobalPrefix() << "JTI" @@ -592,9 +583,24 @@ void ARMAsmPrinter::EmitStartOfAsmFile(Module &M) { const TargetLoweringObjectFileMachO &TLOFMacho = static_cast( getObjFileLowering()); - OutStreamer.SwitchSection(TLOFMacho.getTextSection()); - OutStreamer.SwitchSection(TLOFMacho.getTextCoalSection()); - OutStreamer.SwitchSection(TLOFMacho.getConstTextCoalSection()); + + // Collect the set of sections our functions will go into. + SetVector, + SmallPtrSet > TextSections; + // Default text section comes first. + TextSections.insert(TLOFMacho.getTextSection()); + // Now any user defined text sections from function attributes. + for (Module::iterator F = M.begin(), e = M.end(); F != e; ++F) + if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage()) + TextSections.insert(TLOFMacho.SectionForGlobal(F, Mang, TM)); + // Now the coalescable sections. + TextSections.insert(TLOFMacho.getTextCoalSection()); + TextSections.insert(TLOFMacho.getConstTextCoalSection()); + + // Emit the sections in the .s file header to fix the order. + for (unsigned i = 0, e = TextSections.size(); i != e; ++i) + OutStreamer.SwitchSection(TextSections[i]); + if (RelocM == Reloc::DynamicNoPIC) { const MCSection *sect = OutContext.getMachOSection("__TEXT", "__symbol_stub4", @@ -743,13 +749,28 @@ void ARMAsmPrinter::emitAttributes() { AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use, ARMBuildAttrs::Allowed); } else if (CPUString == "generic") { - // FIXME: Why these defaults? - AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T); + // For a generic CPU, we assume a standard v7a architecture in Subtarget. + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7); + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch_profile, + ARMBuildAttrs::ApplicationProfile); AttrEmitter->EmitAttribute(ARMBuildAttrs::ARM_ISA_use, ARMBuildAttrs::Allowed); AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use, - ARMBuildAttrs::Allowed); - } + ARMBuildAttrs::AllowThumb32); + } else if (Subtarget->hasV7Ops()) { + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v7); + AttrEmitter->EmitAttribute(ARMBuildAttrs::THUMB_ISA_use, + ARMBuildAttrs::AllowThumb32); + } else if (Subtarget->hasV6T2Ops()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v6T2); + else if (Subtarget->hasV6Ops()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v6); + else if (Subtarget->hasV5TEOps()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5TE); + else if (Subtarget->hasV5TOps()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v5T); + else if (Subtarget->hasV4TOps()) + AttrEmitter->EmitAttribute(ARMBuildAttrs::CPU_arch, ARMBuildAttrs::v4T); if (Subtarget->hasNEON() && emitFPU) { /* NEON is not exactly a VFP architecture, but GAS emit one of @@ -893,7 +914,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) { void ARMAsmPrinter:: EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) { - int Size = TM.getTargetData()->getTypeAllocSize(MCPV->getType()); + int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType()); ARMConstantPoolValue *ACPV = static_cast(MCPV); @@ -1091,16 +1112,6 @@ static void populateADROperands(MCInst &Inst, unsigned Dest, Inst.addOperand(MCOperand::CreateReg(ccreg)); } -void ARMAsmPrinter::EmitPatchedInstruction(const MachineInstr *MI, - unsigned Opcode) { - MCInst TmpInst; - - // Emit the instruction as usual, just patch the opcode. - LowerARMMachineInstrToMCInst(MI, TmpInst, *this); - TmpInst.setOpcode(Opcode); - OutStreamer.EmitInstruction(TmpInst); -} - void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) { assert(MI->getFlag(MachineInstr::FrameSetup) && "Only instruction which are involved into frame setup code are allowed"); @@ -1402,31 +1413,6 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { } return; } - case ARM::t2BMOVPCB_CALL: { - { - MCInst TmpInst; - TmpInst.setOpcode(ARM::tMOVr); - TmpInst.addOperand(MCOperand::CreateReg(ARM::LR)); - TmpInst.addOperand(MCOperand::CreateReg(ARM::PC)); - // Add predicate operands. - TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL)); - TmpInst.addOperand(MCOperand::CreateReg(0)); - OutStreamer.EmitInstruction(TmpInst); - } - { - MCInst TmpInst; - TmpInst.setOpcode(ARM::t2B); - const GlobalValue *GV = MI->getOperand(0).getGlobal(); - MCSymbol *GVSym = Mang->getSymbol(GV); - const MCExpr *GVSymExpr = MCSymbolRefExpr::Create(GVSym, OutContext); - TmpInst.addOperand(MCOperand::CreateExpr(GVSymExpr)); - // Add predicate operands. - TmpInst.addOperand(MCOperand::CreateImm(ARMCC::AL)); - TmpInst.addOperand(MCOperand::CreateReg(0)); - OutStreamer.EmitInstruction(TmpInst); - } - return; - } case ARM::MOVi16_ga_pcrel: case ARM::t2MOVi16_ga_pcrel: { MCInst TmpInst; diff --git a/lib/Target/ARM/ARMAsmPrinter.h b/lib/Target/ARM/ARMAsmPrinter.h index 3555e8f5..c875b2c 100644 --- a/lib/Target/ARM/ARMAsmPrinter.h +++ b/lib/Target/ARM/ARMAsmPrinter.h @@ -53,7 +53,7 @@ public: Subtarget = &TM.getSubtarget(); } - virtual const char *getPassName() const { + virtual const char *getPassName() const LLVM_OVERRIDE { return "ARM Assembly Printer"; } @@ -62,22 +62,24 @@ public: virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, unsigned AsmVariant, const char *ExtraCode, - raw_ostream &O); + raw_ostream &O) LLVM_OVERRIDE; virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum, - unsigned AsmVariant, - const char *ExtraCode, raw_ostream &O); + unsigned AsmVariant, const char *ExtraCode, + raw_ostream &O) LLVM_OVERRIDE; void EmitJumpTable(const MachineInstr *MI); void EmitJump2Table(const MachineInstr *MI); - virtual void EmitInstruction(const MachineInstr *MI); - bool runOnMachineFunction(MachineFunction &F); + virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE; + virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE; - virtual void EmitConstantPool() {} // we emit constant pools customly! - virtual void EmitFunctionBodyEnd(); - virtual void EmitFunctionEntryLabel(); - void EmitStartOfAsmFile(Module &M); - void EmitEndOfAsmFile(Module &M); - void EmitXXStructor(const Constant *CV); + virtual void EmitConstantPool() LLVM_OVERRIDE { + // we emit constant pools customly! + } + virtual void EmitFunctionBodyEnd() LLVM_OVERRIDE; + virtual void EmitFunctionEntryLabel() LLVM_OVERRIDE; + virtual void EmitStartOfAsmFile(Module &M) LLVM_OVERRIDE; + virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE; + virtual void EmitXXStructor(const Constant *CV) LLVM_OVERRIDE; // lowerOperand - Convert a MachineOperand into the equivalent MCOperand. bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp); @@ -101,12 +103,13 @@ private: public: void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); - MachineLocation getDebugValueLocation(const MachineInstr *MI) const; + virtual MachineLocation + getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE; /// EmitDwarfRegOp - Emit dwarf register operation. - virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const; + virtual void EmitDwarfRegOp(const MachineLocation &MLoc) const LLVM_OVERRIDE; - virtual unsigned getISAEncoding() { + virtual unsigned getISAEncoding() LLVM_OVERRIDE { // ARM/Darwin adds ISA to the DWARF info for each function. if (!Subtarget->isTargetDarwin()) return 0; @@ -114,18 +117,19 @@ public: ARM::DW_ISA_ARM_thumb : ARM::DW_ISA_ARM_arm; } +private: MCOperand GetSymbolRef(const MachineOperand &MO, const MCSymbol *Symbol); - MCSymbol *GetARMSetPICJumpTableLabel2(unsigned uid, unsigned uid2, - const MachineBasicBlock *MBB) const; MCSymbol *GetARMJTIPICJumpTableLabel2(unsigned uid, unsigned uid2) const; MCSymbol *GetARMSJLJEHLabel(void) const; MCSymbol *GetARMGVSymbol(const GlobalValue *GV); +public: /// EmitMachineConstantPoolValue - Print a machine constantpool value to /// the .s file. - virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV); + virtual void + EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) LLVM_OVERRIDE; }; } // end namespace llvm diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 1cc5a17..3c7bb24 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -49,6 +49,11 @@ static cl::opt WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true), cl::desc("Widen ARM vmovs to vmovd when possible")); +static cl::opt +SwiftPartialUpdateClearance("swift-partial-update-clearance", + cl::Hidden, cl::init(12), + cl::desc("Clearance before partial register updates")); + /// ARM_MLxEntry - Record information about MLA / MLS instructions. struct ARM_MLxEntry { uint16_t MLxOpc; // MLA / MLS opcode @@ -683,7 +688,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, // Handle register classes that require multiple instructions. unsigned BeginIdx = 0; unsigned SubRegs = 0; - unsigned Spacing = 1; + int Spacing = 1; // Use VORRq when possible. if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) @@ -697,6 +702,8 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3; else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4; + else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) + Opc = ARM::MOVr, BeginIdx = ARM::gsub_0, SubRegs = 2; else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2; @@ -705,27 +712,38 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2; - if (Opc) { - const TargetRegisterInfo *TRI = &getRegisterInfo(); - MachineInstrBuilder Mov; - for (unsigned i = 0; i != SubRegs; ++i) { - unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing); - unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing); - assert(Dst && Src && "Bad sub-register"); - Mov = AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst) - .addReg(Src)); - // VORR takes two source operands. - if (Opc == ARM::VORRq) - Mov.addReg(Src); - } - // Add implicit super-register defs and kills to the last instruction. - Mov->addRegisterDefined(DestReg, TRI); - if (KillSrc) - Mov->addRegisterKilled(SrcReg, TRI); - return; - } + assert(Opc && "Impossible reg-to-reg copy"); + + const TargetRegisterInfo *TRI = &getRegisterInfo(); + MachineInstrBuilder Mov; - llvm_unreachable("Impossible reg-to-reg copy"); + // Copy register tuples backward when the first Dest reg overlaps with SrcReg. + if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) { + BeginIdx = BeginIdx + ((SubRegs-1)*Spacing); + Spacing = -Spacing; + } +#ifndef NDEBUG + SmallSet DstRegs; +#endif + for (unsigned i = 0; i != SubRegs; ++i) { + unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing); + unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing); + assert(Dst && Src && "Bad sub-register"); +#ifndef NDEBUG + assert(!DstRegs.count(Src) && "destructive vector copy"); + DstRegs.insert(Dst); +#endif + Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst) + .addReg(Src); + // VORR takes two source operands. + if (Opc == ARM::VORRq) + Mov.addReg(Src); + Mov = AddDefaultPred(Mov); + } + // Add implicit super-register defs and kills to the last instruction. + Mov->addRegisterDefined(DestReg, TRI); + if (KillSrc) + Mov->addRegisterKilled(SrcReg, TRI); } static const @@ -775,6 +793,13 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD)) .addReg(SrcReg, getKillRegState(isKill)) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { + MachineInstrBuilder MIB = + AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA)) + .addFrameIndex(FI)) + .addMemOperand(MMO); + MIB = AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI); + AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI); } else llvm_unreachable("Unknown reg class!"); break; @@ -922,6 +947,7 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL; if (I != MBB.end()) DL = I->getDebugLoc(); MachineFunction &MF = *MBB.getParent(); + ARMFunctionInfo *AFI = MF.getInfo(); MachineFrameInfo &MFI = *MF.getFrameInfo(); unsigned Align = MFI.getObjectAlignment(FI); MachineMemOperand *MMO = @@ -947,6 +973,15 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, if (ARM::DPRRegClass.hasSubClassEq(RC)) { AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg) .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + } else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) { + unsigned LdmOpc = AFI->isThumbFunction() ? ARM::t2LDMIA : ARM::LDMIA; + MachineInstrBuilder MIB = + AddDefaultPred(BuildMI(MBB, I, DL, get(LdmOpc)) + .addFrameIndex(FI).addImm(0).addMemOperand(MMO)); + MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI); + MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI); + if (TargetRegisterInfo::isPhysicalRegister(DestReg)) + MIB.addReg(DestReg, RegState::ImplicitDefine); } else llvm_unreachable("Unknown reg class!"); break; @@ -1378,7 +1413,6 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, case ARM::VLDRD: case ARM::VLDRS: case ARM::t2LDRi8: - case ARM::t2LDRDi8: case ARM::t2LDRSHi8: case ARM::t2LDRi12: case ARM::t2LDRSHi12: @@ -1517,6 +1551,14 @@ isProfitableToIfCvt(MachineBasicBlock &TMBB, return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost; } +bool +ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB, + MachineBasicBlock &FMBB) const { + // Reduce false anti-dependencies to let Swift's out-of-order execution + // engine do its thing. + return Subtarget.isSwift(); +} + /// getInstrPredicate - If instruction is predicated, returns its predicate /// condition, otherwise returns AL. It also returns the condition code /// register by reference. @@ -1569,71 +1611,41 @@ ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { } /// Identify instructions that can be folded into a MOVCC instruction, and -/// return the corresponding opcode for the predicated pseudo-instruction. -static unsigned canFoldIntoMOVCC(unsigned Reg, MachineInstr *&MI, - const MachineRegisterInfo &MRI) { +/// return the defining instruction. +static MachineInstr *canFoldIntoMOVCC(unsigned Reg, + const MachineRegisterInfo &MRI, + const TargetInstrInfo *TII) { if (!TargetRegisterInfo::isVirtualRegister(Reg)) return 0; if (!MRI.hasOneNonDBGUse(Reg)) return 0; - MI = MRI.getVRegDef(Reg); + MachineInstr *MI = MRI.getVRegDef(Reg); if (!MI) return 0; + // MI is folded into the MOVCC by predicating it. + if (!MI->isPredicable()) + return 0; // Check if MI has any non-dead defs or physreg uses. This also detects // predicated instructions which will be reading CPSR. for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); + // Reject frame index operands, PEI can't handle the predicated pseudos. + if (MO.isFI() || MO.isCPI() || MO.isJTI()) + return 0; if (!MO.isReg()) continue; + // MI can't have any tied operands, that would conflict with predication. + if (MO.isTied()) + return 0; if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) return 0; if (MO.isDef() && !MO.isDead()) return 0; } - switch (MI->getOpcode()) { - default: return 0; - case ARM::ANDri: return ARM::ANDCCri; - case ARM::ANDrr: return ARM::ANDCCrr; - case ARM::ANDrsi: return ARM::ANDCCrsi; - case ARM::ANDrsr: return ARM::ANDCCrsr; - case ARM::t2ANDri: return ARM::t2ANDCCri; - case ARM::t2ANDrr: return ARM::t2ANDCCrr; - case ARM::t2ANDrs: return ARM::t2ANDCCrs; - case ARM::EORri: return ARM::EORCCri; - case ARM::EORrr: return ARM::EORCCrr; - case ARM::EORrsi: return ARM::EORCCrsi; - case ARM::EORrsr: return ARM::EORCCrsr; - case ARM::t2EORri: return ARM::t2EORCCri; - case ARM::t2EORrr: return ARM::t2EORCCrr; - case ARM::t2EORrs: return ARM::t2EORCCrs; - case ARM::ORRri: return ARM::ORRCCri; - case ARM::ORRrr: return ARM::ORRCCrr; - case ARM::ORRrsi: return ARM::ORRCCrsi; - case ARM::ORRrsr: return ARM::ORRCCrsr; - case ARM::t2ORRri: return ARM::t2ORRCCri; - case ARM::t2ORRrr: return ARM::t2ORRCCrr; - case ARM::t2ORRrs: return ARM::t2ORRCCrs; - - // ARM ADD/SUB - case ARM::ADDri: return ARM::ADDCCri; - case ARM::ADDrr: return ARM::ADDCCrr; - case ARM::ADDrsi: return ARM::ADDCCrsi; - case ARM::ADDrsr: return ARM::ADDCCrsr; - case ARM::SUBri: return ARM::SUBCCri; - case ARM::SUBrr: return ARM::SUBCCrr; - case ARM::SUBrsi: return ARM::SUBCCrsi; - case ARM::SUBrsr: return ARM::SUBCCrsr; - - // Thumb2 ADD/SUB - case ARM::t2ADDri: return ARM::t2ADDCCri; - case ARM::t2ADDri12: return ARM::t2ADDCCri12; - case ARM::t2ADDrr: return ARM::t2ADDCCrr; - case ARM::t2ADDrs: return ARM::t2ADDCCrs; - case ARM::t2SUBri: return ARM::t2SUBCCri; - case ARM::t2SUBri12: return ARM::t2SUBCCri12; - case ARM::t2SUBrr: return ARM::t2SUBCCrr; - case ARM::t2SUBrs: return ARM::t2SUBCCrs; - } + bool DontMoveAcrossStores = true; + if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores)) + return 0; + return MI; } bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI, @@ -1662,19 +1674,18 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI, assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) && "Unknown select instruction"); const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); - MachineInstr *DefMI = 0; - unsigned Opc = canFoldIntoMOVCC(MI->getOperand(2).getReg(), DefMI, MRI); - bool Invert = !Opc; - if (!Opc) - Opc = canFoldIntoMOVCC(MI->getOperand(1).getReg(), DefMI, MRI); - if (!Opc) + MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this); + bool Invert = !DefMI; + if (!DefMI) + DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this); + if (!DefMI) return 0; // Create a new predicated version of DefMI. // Rfalse is the first use. MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), - get(Opc), MI->getOperand(0).getReg()) - .addOperand(MI->getOperand(Invert ? 2 : 1)); + DefMI->getDesc(), + MI->getOperand(0).getReg()); // Copy all the DefMI operands, excluding its (null) predicate. const MCInstrDesc &DefDesc = DefMI->getDesc(); @@ -1693,6 +1704,15 @@ MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI, if (NewMI->hasOptionalDef()) AddDefaultCC(NewMI); + // The output register value when the predicate is false is an implicit + // register operand tied to the first def. + // The tie makes the register allocator ensure the FalseReg is allocated the + // same register as operand 0. + MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1); + FalseReg.setImplicit(); + NewMI->addOperand(FalseReg); + NewMI->tieOperands(0, NewMI->getNumOperands() - 1); + // The caller will erase MI, but not DefMI. DefMI->eraseFromParent(); return NewMI; @@ -2039,13 +2059,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // Masked compares sometimes use the same register as the corresponding 'and'. if (CmpMask != ~0) { - if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) { + if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) { MI = 0; for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg), UE = MRI->use_end(); UI != UE; ++UI) { if (UI->getParent() != CmpInstr->getParent()) continue; MachineInstr *PotentialAND = &*UI; - if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true)) + if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) || + isPredicated(PotentialAND)) continue; MI = PotentialAND; break; @@ -2111,6 +2132,10 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // The single candidate is called MI. if (!MI) MI = Sub; + // We can't use a predicated instruction - it doesn't always write the flags. + if (isPredicated(MI)) + return false; + switch (MI->getOpcode()) { default: break; case ARM::RSBrr: @@ -2217,6 +2242,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, // Toggle the optional operand to CPSR. MI->getOperand(5).setReg(ARM::CPSR); MI->getOperand(5).setIsDef(true); + assert(!isPredicated(MI) && "Can't use flags from predicated instruction"); CmpInstr->eraseFromParent(); // Modify the condition code of operands in OperandsToUpdate. @@ -2347,6 +2373,260 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, return true; } +static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, + const MachineInstr *MI) { + switch (MI->getOpcode()) { + default: { + const MCInstrDesc &Desc = MI->getDesc(); + int UOps = ItinData->getNumMicroOps(Desc.getSchedClass()); + assert(UOps >= 0 && "bad # UOps"); + return UOps; + } + + case ARM::LDRrs: + case ARM::LDRBrs: + case ARM::STRrs: + case ARM::STRBrs: { + unsigned ShOpVal = MI->getOperand(3).getImm(); + bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (!isSub && + (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) + return 1; + return 2; + } + + case ARM::LDRH: + case ARM::STRH: { + if (!MI->getOperand(2).getReg()) + return 1; + + unsigned ShOpVal = MI->getOperand(3).getImm(); + bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (!isSub && + (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) + return 1; + return 2; + } + + case ARM::LDRSB: + case ARM::LDRSH: + return (ARM_AM::getAM3Op(MI->getOperand(3).getImm()) == ARM_AM::sub) ? 3:2; + + case ARM::LDRSB_POST: + case ARM::LDRSH_POST: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rm = MI->getOperand(3).getReg(); + return (Rt == Rm) ? 4 : 3; + } + + case ARM::LDR_PRE_REG: + case ARM::LDRB_PRE_REG: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rm = MI->getOperand(3).getReg(); + if (Rt == Rm) + return 3; + unsigned ShOpVal = MI->getOperand(4).getImm(); + bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (!isSub && + (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) + return 2; + return 3; + } + + case ARM::STR_PRE_REG: + case ARM::STRB_PRE_REG: { + unsigned ShOpVal = MI->getOperand(4).getImm(); + bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (!isSub && + (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) + return 2; + return 3; + } + + case ARM::LDRH_PRE: + case ARM::STRH_PRE: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rm = MI->getOperand(3).getReg(); + if (!Rm) + return 2; + if (Rt == Rm) + return 3; + return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) + ? 3 : 2; + } + + case ARM::LDR_POST_REG: + case ARM::LDRB_POST_REG: + case ARM::LDRH_POST: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rm = MI->getOperand(3).getReg(); + return (Rt == Rm) ? 3 : 2; + } + + case ARM::LDR_PRE_IMM: + case ARM::LDRB_PRE_IMM: + case ARM::LDR_POST_IMM: + case ARM::LDRB_POST_IMM: + case ARM::STRB_POST_IMM: + case ARM::STRB_POST_REG: + case ARM::STRB_PRE_IMM: + case ARM::STRH_POST: + case ARM::STR_POST_IMM: + case ARM::STR_POST_REG: + case ARM::STR_PRE_IMM: + return 2; + + case ARM::LDRSB_PRE: + case ARM::LDRSH_PRE: { + unsigned Rm = MI->getOperand(3).getReg(); + if (Rm == 0) + return 3; + unsigned Rt = MI->getOperand(0).getReg(); + if (Rt == Rm) + return 4; + unsigned ShOpVal = MI->getOperand(4).getImm(); + bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (!isSub && + (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) + return 3; + return 4; + } + + case ARM::LDRD: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rn = MI->getOperand(2).getReg(); + unsigned Rm = MI->getOperand(3).getReg(); + if (Rm) + return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3; + return (Rt == Rn) ? 3 : 2; + } + + case ARM::STRD: { + unsigned Rm = MI->getOperand(3).getReg(); + if (Rm) + return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3; + return 2; + } + + case ARM::LDRD_POST: + case ARM::t2LDRD_POST: + return 3; + + case ARM::STRD_POST: + case ARM::t2STRD_POST: + return 4; + + case ARM::LDRD_PRE: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rn = MI->getOperand(3).getReg(); + unsigned Rm = MI->getOperand(4).getReg(); + if (Rm) + return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4; + return (Rt == Rn) ? 4 : 3; + } + + case ARM::t2LDRD_PRE: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rn = MI->getOperand(3).getReg(); + return (Rt == Rn) ? 4 : 3; + } + + case ARM::STRD_PRE: { + unsigned Rm = MI->getOperand(4).getReg(); + if (Rm) + return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4; + return 3; + } + + case ARM::t2STRD_PRE: + return 3; + + case ARM::t2LDR_POST: + case ARM::t2LDRB_POST: + case ARM::t2LDRB_PRE: + case ARM::t2LDRSBi12: + case ARM::t2LDRSBi8: + case ARM::t2LDRSBpci: + case ARM::t2LDRSBs: + case ARM::t2LDRH_POST: + case ARM::t2LDRH_PRE: + case ARM::t2LDRSBT: + case ARM::t2LDRSB_POST: + case ARM::t2LDRSB_PRE: + case ARM::t2LDRSH_POST: + case ARM::t2LDRSH_PRE: + case ARM::t2LDRSHi12: + case ARM::t2LDRSHi8: + case ARM::t2LDRSHpci: + case ARM::t2LDRSHs: + return 2; + + case ARM::t2LDRDi8: { + unsigned Rt = MI->getOperand(0).getReg(); + unsigned Rn = MI->getOperand(2).getReg(); + return (Rt == Rn) ? 3 : 2; + } + + case ARM::t2STRB_POST: + case ARM::t2STRB_PRE: + case ARM::t2STRBs: + case ARM::t2STRDi8: + case ARM::t2STRH_POST: + case ARM::t2STRH_PRE: + case ARM::t2STRHs: + case ARM::t2STR_POST: + case ARM::t2STR_PRE: + case ARM::t2STRs: + return 2; + } +} + +// Return the number of 32-bit words loaded by LDM or stored by STM. If this +// can't be easily determined return 0 (missing MachineMemOperand). +// +// FIXME: The current MachineInstr design does not support relying on machine +// mem operands to determine the width of a memory access. Instead, we expect +// the target to provide this information based on the instruction opcode and +// operands. However, using MachineMemOperand is a the best solution now for +// two reasons: +// +// 1) getNumMicroOps tries to infer LDM memory width from the total number of MI +// operands. This is much more dangerous than using the MachineMemOperand +// sizes because CodeGen passes can insert/remove optional machine operands. In +// fact, it's totally incorrect for preRA passes and appears to be wrong for +// postRA passes as well. +// +// 2) getNumLDMAddresses is only used by the scheduling machine model and any +// machine model that calls this should handle the unknown (zero size) case. +// +// Long term, we should require a target hook that verifies MachineMemOperand +// sizes during MC lowering. That target hook should be local to MC lowering +// because we can't ensure that it is aware of other MI forms. Doing this will +// ensure that MachineMemOperands are correctly propagated through all passes. +unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr *MI) const { + unsigned Size = 0; + for (MachineInstr::mmo_iterator I = MI->memoperands_begin(), + E = MI->memoperands_end(); I != E; ++I) { + Size += (*I)->getSize(); + } + return Size / 4; +} + unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr *MI) const { @@ -2356,8 +2636,12 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, const MCInstrDesc &Desc = MI->getDesc(); unsigned Class = Desc.getSchedClass(); int ItinUOps = ItinData->getNumMicroOps(Class); - if (ItinUOps >= 0) + if (ItinUOps >= 0) { + if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore())) + return getNumMicroOpsSwiftLdSt(ItinData, MI); + return ItinUOps; + } unsigned Opc = MI->getOpcode(); switch (Opc) { @@ -2426,7 +2710,43 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, case ARM::t2STMIA_UPD: case ARM::t2STMDB_UPD: { unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1; - if (Subtarget.isCortexA8()) { + if (Subtarget.isSwift()) { + // rdar://8402126 + int UOps = 1 + NumRegs; // One for address computation, one for each ld / st. + switch (Opc) { + default: break; + case ARM::VLDMDIA_UPD: + case ARM::VLDMDDB_UPD: + case ARM::VLDMSIA_UPD: + case ARM::VLDMSDB_UPD: + case ARM::VSTMDIA_UPD: + case ARM::VSTMDDB_UPD: + case ARM::VSTMSIA_UPD: + case ARM::VSTMSDB_UPD: + case ARM::LDMIA_UPD: + case ARM::LDMDA_UPD: + case ARM::LDMDB_UPD: + case ARM::LDMIB_UPD: + case ARM::STMIA_UPD: + case ARM::STMDA_UPD: + case ARM::STMDB_UPD: + case ARM::STMIB_UPD: + case ARM::tLDMIA_UPD: + case ARM::tSTMIA_UPD: + case ARM::t2LDMIA_UPD: + case ARM::t2LDMDB_UPD: + case ARM::t2STMIA_UPD: + case ARM::t2STMDB_UPD: + ++UOps; // One for base register writeback. + break; + case ARM::LDMIA_RET: + case ARM::tPOP_RET: + case ARM::t2LDMIA_RET: + UOps += 2; // One for base reg wb, one for write to pc. + break; + } + return UOps; + } else if (Subtarget.isCortexA8()) { if (NumRegs < 4) return 2; // 4 registers would be issued: 2, 2. @@ -2435,7 +2755,7 @@ ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, if (NumRegs % 2) ++A8UOps; return A8UOps; - } else if (Subtarget.isCortexA9()) { + } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { int A9UOps = (NumRegs / 2); // If there are odd number of registers or if it's not 64-bit aligned, // then it takes an extra AGU (Address Generation Unit) cycle. @@ -2468,7 +2788,7 @@ ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData, DefCycle = RegNo / 2 + 1; if (RegNo % 2) ++DefCycle; - } else if (Subtarget.isCortexA9()) { + } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { DefCycle = RegNo; bool isSLoad = false; @@ -2512,7 +2832,7 @@ ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData, DefCycle = 1; // Result latency is issue cycle + 2: E2. DefCycle += 2; - } else if (Subtarget.isCortexA9()) { + } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { DefCycle = (RegNo / 2); // If there are odd number of registers or if it's not 64-bit aligned, // then it takes an extra AGU (Address Generation Unit) cycle. @@ -2543,7 +2863,7 @@ ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData, UseCycle = RegNo / 2 + 1; if (RegNo % 2) ++UseCycle; - } else if (Subtarget.isCortexA9()) { + } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { UseCycle = RegNo; bool isSStore = false; @@ -2584,7 +2904,7 @@ ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData, UseCycle = 2; // Read in E3. UseCycle += 2; - } else if (Subtarget.isCortexA9()) { + } else if (Subtarget.isLikeA9() || Subtarget.isSwift()) { UseCycle = (RegNo / 2); // If there are odd number of registers or if it's not 64-bit aligned, // then it takes an extra AGU (Address Generation Unit) cycle. @@ -2769,7 +3089,7 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr *DefMI, const MCInstrDesc *DefMCID, unsigned DefAlign) { int Adjust = 0; - if (Subtarget.isCortexA8() || Subtarget.isCortexA9()) { + if (Subtarget.isCortexA8() || Subtarget.isLikeA9()) { // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] // variants are one cycle cheaper. switch (DefMCID->getOpcode()) { @@ -2794,9 +3114,40 @@ static int adjustDefLatency(const ARMSubtarget &Subtarget, break; } } + } else if (Subtarget.isSwift()) { + // FIXME: Properly handle all of the latency adjustments for address + // writeback. + switch (DefMCID->getOpcode()) { + default: break; + case ARM::LDRrs: + case ARM::LDRBrs: { + unsigned ShOpVal = DefMI->getOperand(3).getImm(); + bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub; + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (!isSub && + (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))) + Adjust -= 2; + else if (!isSub && + ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) + --Adjust; + break; + } + case ARM::t2LDRs: + case ARM::t2LDRBs: + case ARM::t2LDRHs: + case ARM::t2LDRSHs: { + // Thumb2 mode: lsl only. + unsigned ShAmt = DefMI->getOperand(3).getImm(); + if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3) + Adjust -= 2; + break; + } + } } - if (DefAlign < 8 && Subtarget.isCortexA9()) { + if (DefAlign < 8 && Subtarget.isLikeA9()) { switch (DefMCID->getOpcode()) { default: break; case ARM::VLD1q8: @@ -2954,7 +3305,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, if (Reg == ARM::CPSR) { if (DefMI->getOpcode() == ARM::FMSTAT) { // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?) - return Subtarget.isCortexA9() ? 1 : 20; + return Subtarget.isLikeA9() ? 1 : 20; } // CPSR set and branch can be paired in the same cycle. @@ -2970,7 +3321,8 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, // instructions). if (Latency > 0 && Subtarget.isThumb2()) { const MachineFunction *MF = DefMI->getParent()->getParent(); - if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize)) + if (MF->getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize)) --Latency; } return Latency; @@ -3020,7 +3372,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, if (!UseNode->isMachineOpcode()) { int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx); - if (Subtarget.isCortexA9()) + if (Subtarget.isLikeA9() || Subtarget.isSwift()) return Latency <= 2 ? 1 : Latency - 1; else return Latency <= 3 ? 1 : Latency - 2; @@ -3037,7 +3389,7 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, UseMCID, UseIdx, UseAlign); if (Latency > 1 && - (Subtarget.isCortexA8() || Subtarget.isCortexA9())) { + (Subtarget.isCortexA8() || Subtarget.isLikeA9())) { // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2] // variants are one cycle cheaper. switch (DefMCID.getOpcode()) { @@ -3064,9 +3416,36 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, break; } } + } else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) { + // FIXME: Properly handle all of the latency adjustments for address + // writeback. + switch (DefMCID.getOpcode()) { + default: break; + case ARM::LDRrs: + case ARM::LDRBrs: { + unsigned ShOpVal = + cast(DefNode->getOperand(2))->getZExtValue(); + unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal); + if (ShImm == 0 || + ((ShImm == 1 || ShImm == 2 || ShImm == 3) && + ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)) + Latency -= 2; + else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr) + --Latency; + break; + } + case ARM::t2LDRs: + case ARM::t2LDRBs: + case ARM::t2LDRHs: + case ARM::t2LDRSHs: { + // Thumb2 mode: lsl 0-3 only. + Latency -= 2; + break; + } + } } - if (DefAlign < 8 && Subtarget.isCortexA9()) + if (DefAlign < 8 && Subtarget.isLikeA9()) switch (DefMCID.getOpcode()) { default: break; case ARM::VLD1q8: @@ -3190,18 +3569,6 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, return Latency; } -unsigned -ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *DepMI) const { - unsigned Reg = DefMI->getOperand(DefIdx).getReg(); - if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI)) - return 1; - - // If the second MI is predicated, then there is an implicit use dependency. - return getInstrLatency(ItinData, DefMI); -} - unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr *MI, unsigned *PredCost) const { @@ -3359,11 +3726,12 @@ ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const { if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI)) return std::make_pair(ExeVFP, (1<getOpcode() == ARM::VMOVRS || - MI->getOpcode() == ARM::VMOVSR)) + MI->getOpcode() == ARM::VMOVSR || + MI->getOpcode() == ARM::VMOVS)) return std::make_pair(ExeVFP, (1<getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass); + Lane = 0; + + if (DReg != ARM::NoRegister) + return DReg; + + Lane = 1; + DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass); + + assert(DReg && "S-register with no D super-register?"); + return DReg; +} + +/// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, +/// set ImplicitSReg to a register number that must be marked as implicit-use or +/// zero if no register needs to be defined as implicit-use. +/// +/// If the function cannot determine if an SPR should be marked implicit use or +/// not, it returns false. +/// +/// This function handles cases where an instruction is being modified from taking +/// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict +/// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other +/// lane of the DPR). +/// +/// If the other SPR is defined, an implicit-use of it should be added. Else, +/// (including the case where the DPR itself is defined), it should not. +/// +static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, + MachineInstr *MI, + unsigned DReg, unsigned Lane, + unsigned &ImplicitSReg) { + // If the DPR is defined or used already, the other SPR lane will be chained + // correctly, so there is nothing to be done. + if (MI->definesRegister(DReg, TRI) || MI->readsRegister(DReg, TRI)) { + ImplicitSReg = 0; + return true; + } + + // Otherwise we need to go searching to see if the SPR is set explicitly. + ImplicitSReg = TRI->getSubReg(DReg, + (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1); + MachineBasicBlock::LivenessQueryResult LQR = + MI->getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI); + + if (LQR == MachineBasicBlock::LQR_Live) + return true; + else if (LQR == MachineBasicBlock::LQR_Unknown) + return false; + + // If the register is known not to be live, there is no need to add an + // implicit-use. + ImplicitSReg = 0; + return true; +} + void ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { unsigned DstReg, SrcReg, DReg; unsigned Lane; MachineInstrBuilder MIB(MI); const TargetRegisterInfo *TRI = &getRegisterInfo(); - bool isKill; switch (MI->getOpcode()) { default: llvm_unreachable("cannot handle opcode!"); @@ -3400,82 +3825,294 @@ ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const { // Zap the predicate operands. assert(!isPredicated(MI) && "Cannot predicate a VORRd"); - MI->RemoveOperand(3); - MI->RemoveOperand(2); - // Change to a VORRd which requires two identical use operands. - MI->setDesc(get(ARM::VORRd)); + // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits) + DstReg = MI->getOperand(0).getReg(); + SrcReg = MI->getOperand(1).getReg(); - // Add the extra source operand and new predicates. - // This will go before any implicit ops. - AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1))); + for (unsigned i = MI->getDesc().getNumOperands(); i; --i) + MI->RemoveOperand(i-1); + + // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits) + MI->setDesc(get(ARM::VORRd)); + AddDefaultPred(MIB.addReg(DstReg, RegState::Define) + .addReg(SrcReg) + .addReg(SrcReg)); break; case ARM::VMOVRS: if (Domain != ExeNEON) break; assert(!isPredicated(MI) && "Cannot predicate a VGETLN"); + // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits) DstReg = MI->getOperand(0).getReg(); SrcReg = MI->getOperand(1).getReg(); - DReg = TRI->getMatchingSuperReg(SrcReg, ARM::ssub_0, &ARM::DPRRegClass); - Lane = 0; - if (DReg == ARM::NoRegister) { - DReg = TRI->getMatchingSuperReg(SrcReg, ARM::ssub_1, &ARM::DPRRegClass); - Lane = 1; - assert(DReg && "S-register with no D super-register?"); - } + for (unsigned i = MI->getDesc().getNumOperands(); i; --i) + MI->RemoveOperand(i-1); - MI->RemoveOperand(3); - MI->RemoveOperand(2); - MI->RemoveOperand(1); + DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane); + // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps) + // Note that DSrc has been widened and the other lane may be undef, which + // contaminates the entire register. MI->setDesc(get(ARM::VGETLNi32)); - MIB.addReg(DReg); - MIB.addImm(Lane); + AddDefaultPred(MIB.addReg(DstReg, RegState::Define) + .addReg(DReg, RegState::Undef) + .addImm(Lane)); - MIB->getOperand(1).setIsUndef(); + // The old source should be an implicit use, otherwise we might think it + // was dead before here. MIB.addReg(SrcReg, RegState::Implicit); - - AddDefaultPred(MIB); break; - case ARM::VMOVSR: + case ARM::VMOVSR: { if (Domain != ExeNEON) break; assert(!isPredicated(MI) && "Cannot predicate a VSETLN"); + // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits) DstReg = MI->getOperand(0).getReg(); SrcReg = MI->getOperand(1).getReg(); - DReg = TRI->getMatchingSuperReg(DstReg, ARM::ssub_0, &ARM::DPRRegClass); - Lane = 0; - if (DReg == ARM::NoRegister) { - DReg = TRI->getMatchingSuperReg(DstReg, ARM::ssub_1, &ARM::DPRRegClass); - Lane = 1; - assert(DReg && "S-register with no D super-register?"); - } - isKill = MI->getOperand(0).isKill(); - MI->RemoveOperand(3); - MI->RemoveOperand(2); - MI->RemoveOperand(1); - MI->RemoveOperand(0); + DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane); + + unsigned ImplicitSReg; + if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg)) + break; + + for (unsigned i = MI->getDesc().getNumOperands(); i; --i) + MI->RemoveOperand(i-1); + // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps) + // Again DDst may be undefined at the beginning of this instruction. MI->setDesc(get(ARM::VSETLNi32)); - MIB.addReg(DReg); - MIB.addReg(DReg); - MIB.addReg(SrcReg); - MIB.addImm(Lane); + MIB.addReg(DReg, RegState::Define) + .addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI))) + .addReg(SrcReg) + .addImm(Lane); + AddDefaultPred(MIB); + + // The narrower destination must be marked as set to keep previous chains + // in place. + MIB.addReg(DstReg, RegState::Define | RegState::Implicit); + if (ImplicitSReg != 0) + MIB.addReg(ImplicitSReg, RegState::Implicit); + break; + } + case ARM::VMOVS: { + if (Domain != ExeNEON) + break; + + // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits) + DstReg = MI->getOperand(0).getReg(); + SrcReg = MI->getOperand(1).getReg(); + + unsigned DstLane = 0, SrcLane = 0, DDst, DSrc; + DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane); + DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane); - MIB->getOperand(1).setIsUndef(); + unsigned ImplicitSReg; + if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg)) + break; - if (isKill) - MIB->addRegisterKilled(DstReg, TRI, true); - MIB->addRegisterDefined(DstReg, TRI); + for (unsigned i = MI->getDesc().getNumOperands(); i; --i) + MI->RemoveOperand(i-1); + + if (DSrc == DDst) { + // Destination can be: + // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits) + MI->setDesc(get(ARM::VDUPLN32d)); + MIB.addReg(DDst, RegState::Define) + .addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI))) + .addImm(SrcLane); + AddDefaultPred(MIB); + + // Neither the source or the destination are naturally represented any + // more, so add them in manually. + MIB.addReg(DstReg, RegState::Implicit | RegState::Define); + MIB.addReg(SrcReg, RegState::Implicit); + if (ImplicitSReg != 0) + MIB.addReg(ImplicitSReg, RegState::Implicit); + break; + } + // In general there's no single instruction that can perform an S <-> S + // move in NEON space, but a pair of VEXT instructions *can* do the + // job. It turns out that the VEXTs needed will only use DSrc once, with + // the position based purely on the combination of lane-0 and lane-1 + // involved. For example + // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1 + // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1 + // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1 + // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1 + // + // Pattern of the MachineInstrs is: + // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits) + MachineInstrBuilder NewMIB; + NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), + get(ARM::VEXTd32), DDst); + + // On the first instruction, both DSrc and DDst may be if present. + // Specifically when the original instruction didn't have them as an + // . + unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst; + bool CurUndef = !MI->readsRegister(CurReg, TRI); + NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); + + CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst; + CurUndef = !MI->readsRegister(CurReg, TRI); + NewMIB.addReg(CurReg, getUndefRegState(CurUndef)); + + NewMIB.addImm(1); + AddDefaultPred(NewMIB); + + if (SrcLane == DstLane) + NewMIB.addReg(SrcReg, RegState::Implicit); + + MI->setDesc(get(ARM::VEXTd32)); + MIB.addReg(DDst, RegState::Define); + + // On the second instruction, DDst has definitely been defined above, so + // it is not . DSrc, if present, can be as above. + CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst; + CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI); + MIB.addReg(CurReg, getUndefRegState(CurUndef)); + + CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst; + CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI); + MIB.addReg(CurReg, getUndefRegState(CurUndef)); + + MIB.addImm(1); AddDefaultPred(MIB); + + if (SrcLane != DstLane) + MIB.addReg(SrcReg, RegState::Implicit); + + // As before, the original destination is no longer represented, add it + // implicitly. + MIB.addReg(DstReg, RegState::Define | RegState::Implicit); + if (ImplicitSReg != 0) + MIB.addReg(ImplicitSReg, RegState::Implicit); break; + } + } + +} + +//===----------------------------------------------------------------------===// +// Partial register updates +//===----------------------------------------------------------------------===// +// +// Swift renames NEON registers with 64-bit granularity. That means any +// instruction writing an S-reg implicitly reads the containing D-reg. The +// problem is mostly avoided by translating f32 operations to v2f32 operations +// on D-registers, but f32 loads are still a problem. +// +// These instructions can load an f32 into a NEON register: +// +// VLDRS - Only writes S, partial D update. +// VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops. +// VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops. +// +// FCONSTD can be used as a dependency-breaking instruction. + + +unsigned ARMBaseInstrInfo:: +getPartialRegUpdateClearance(const MachineInstr *MI, + unsigned OpNum, + const TargetRegisterInfo *TRI) const { + // Only Swift has partial register update problems. + if (!SwiftPartialUpdateClearance || !Subtarget.isSwift()) + return 0; + + assert(TRI && "Need TRI instance"); + + const MachineOperand &MO = MI->getOperand(OpNum); + if (MO.readsReg()) + return 0; + unsigned Reg = MO.getReg(); + int UseOp = -1; + + switch(MI->getOpcode()) { + // Normal instructions writing only an S-register. + case ARM::VLDRS: + case ARM::FCONSTS: + case ARM::VMOVSR: + // rdar://problem/8791586 + case ARM::VMOVv8i8: + case ARM::VMOVv4i16: + case ARM::VMOVv2i32: + case ARM::VMOVv2f32: + case ARM::VMOVv1i64: + UseOp = MI->findRegisterUseOperandIdx(Reg, false, TRI); + break; + + // Explicitly reads the dependency. + case ARM::VLD1LNd32: + UseOp = 1; + break; + default: + return 0; + } + + // If this instruction actually reads a value from Reg, there is no unwanted + // dependency. + if (UseOp != -1 && MI->getOperand(UseOp).readsReg()) + return 0; + + // We must be able to clobber the whole D-reg. + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + // Virtual register must be a foo:ssub_0 operand. + if (!MO.getSubReg() || MI->readsVirtualRegister(Reg)) + return 0; + } else if (ARM::SPRRegClass.contains(Reg)) { + // Physical register: MI must define the full D-reg. + unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0, + &ARM::DPRRegClass); + if (!DReg || !MI->definesRegister(DReg, TRI)) + return 0; } + // MI has an unwanted D-register dependency. + // Avoid defs in the previous N instructrions. + return SwiftPartialUpdateClearance; +} + +// Break a partial register dependency after getPartialRegUpdateClearance +// returned non-zero. +void ARMBaseInstrInfo:: +breakPartialRegDependency(MachineBasicBlock::iterator MI, + unsigned OpNum, + const TargetRegisterInfo *TRI) const { + assert(MI && OpNum < MI->getDesc().getNumDefs() && "OpNum is not a def"); + assert(TRI && "Need TRI instance"); + + const MachineOperand &MO = MI->getOperand(OpNum); + unsigned Reg = MO.getReg(); + assert(TargetRegisterInfo::isPhysicalRegister(Reg) && + "Can't break virtual register dependencies."); + unsigned DReg = Reg; + + // If MI defines an S-reg, find the corresponding D super-register. + if (ARM::SPRRegClass.contains(Reg)) { + DReg = ARM::D0 + (Reg - ARM::S0) / 2; + assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken"); + } + + assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps"); + assert(MI->definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg"); + + // FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines + // the full D-register by loading the same value to both lanes. The + // instruction is micro-coded with 2 uops, so don't do this until we can + // properly schedule micro-coded instuctions. The dispatcher stalls cause + // too big regressions. + + // Insert the dependency-breaking FCONSTD before MI. + // 96 is the encoding of 0.5, but the actual value doesn't matter here. + AddDefaultPred(BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), + get(ARM::FCONSTD), DReg).addImm(96)); + MI->addRegisterKilled(DReg, TRI, true); } bool ARMBaseInstrInfo::hasNOP() const { diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 92e5ee8..6f38e35 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -182,10 +182,13 @@ public: virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, const BranchProbability - &Probability) const { + &Probability) const { return NumCycles == 1; } + virtual bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, + MachineBasicBlock &FMBB) const; + /// analyzeCompare - For a comparison instruction, return the source registers /// in SrcReg and SrcReg2 if having two register operands, and the value it /// compares against in CmpValue. Return true if the comparison instruction @@ -226,15 +229,18 @@ public: SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const; - virtual unsigned getOutputLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *DepMI) const; - /// VFP/NEON execution domains. std::pair getExecutionDomain(const MachineInstr *MI) const; void setExecutionDomain(MachineInstr *MI, unsigned Domain) const; + unsigned getPartialRegUpdateClearance(const MachineInstr*, unsigned, + const TargetRegisterInfo*) const; + void breakPartialRegDependency(MachineBasicBlock::iterator, unsigned, + const TargetRegisterInfo *TRI) const; + /// Get the number of addresses by LDM or VLDM or zero for unknown. + unsigned getNumLDMAddresses(const MachineInstr *MI) const; + private: unsigned getInstBundleLength(const MachineInstr *MI) const; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 9deb96e..e5b300f 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -84,6 +84,11 @@ ARMBaseRegisterInfo::getCallPreservedMask(CallingConv::ID) const { ? CSR_iOS_RegMask : CSR_AAPCS_RegMask; } +const uint32_t* +ARMBaseRegisterInfo::getNoPreservedMask() const { + return CSR_NoRegs_RegMask; +} + BitVector ARMBaseRegisterInfo:: getReservedRegs(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); @@ -106,148 +111,12 @@ getReservedRegs(const MachineFunction &MF) const { for (unsigned i = 0; i != 16; ++i) Reserved.set(ARM::D16 + i); } - return Reserved; -} - -bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF, - unsigned Reg) const { - const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); - - switch (Reg) { - default: break; - case ARM::SP: - case ARM::PC: - return true; - case ARM::R6: - if (hasBasePointer(MF)) - return true; - break; - case ARM::R7: - case ARM::R11: - if (FramePtr == Reg && TFI->hasFP(MF)) - return true; - break; - case ARM::R9: - return STI.isR9Reserved(); - } - - return false; -} + const TargetRegisterClass *RC = &ARM::GPRPairRegClass; + for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I) + for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI) + if (Reserved.test(*SI)) Reserved.set(*I); -bool -ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC, - SmallVectorImpl &SubIndices, - unsigned &NewSubIdx) const { - - unsigned Size = RC->getSize() * 8; - if (Size < 6) - return 0; - - NewSubIdx = 0; // Whole register. - unsigned NumRegs = SubIndices.size(); - if (NumRegs == 8) { - // 8 D registers -> 1 QQQQ register. - return (Size == 512 && - SubIndices[0] == ARM::dsub_0 && - SubIndices[1] == ARM::dsub_1 && - SubIndices[2] == ARM::dsub_2 && - SubIndices[3] == ARM::dsub_3 && - SubIndices[4] == ARM::dsub_4 && - SubIndices[5] == ARM::dsub_5 && - SubIndices[6] == ARM::dsub_6 && - SubIndices[7] == ARM::dsub_7); - } else if (NumRegs == 4) { - if (SubIndices[0] == ARM::qsub_0) { - // 4 Q registers -> 1 QQQQ register. - return (Size == 512 && - SubIndices[1] == ARM::qsub_1 && - SubIndices[2] == ARM::qsub_2 && - SubIndices[3] == ARM::qsub_3); - } else if (SubIndices[0] == ARM::dsub_0) { - // 4 D registers -> 1 QQ register. - if (Size >= 256 && - SubIndices[1] == ARM::dsub_1 && - SubIndices[2] == ARM::dsub_2 && - SubIndices[3] == ARM::dsub_3) { - if (Size == 512) - NewSubIdx = ARM::qqsub_0; - return true; - } - } else if (SubIndices[0] == ARM::dsub_4) { - // 4 D registers -> 1 QQ register (2nd). - if (Size == 512 && - SubIndices[1] == ARM::dsub_5 && - SubIndices[2] == ARM::dsub_6 && - SubIndices[3] == ARM::dsub_7) { - NewSubIdx = ARM::qqsub_1; - return true; - } - } else if (SubIndices[0] == ARM::ssub_0) { - // 4 S registers -> 1 Q register. - if (Size >= 128 && - SubIndices[1] == ARM::ssub_1 && - SubIndices[2] == ARM::ssub_2 && - SubIndices[3] == ARM::ssub_3) { - if (Size >= 256) - NewSubIdx = ARM::qsub_0; - return true; - } - } - } else if (NumRegs == 2) { - if (SubIndices[0] == ARM::qsub_0) { - // 2 Q registers -> 1 QQ register. - if (Size >= 256 && SubIndices[1] == ARM::qsub_1) { - if (Size == 512) - NewSubIdx = ARM::qqsub_0; - return true; - } - } else if (SubIndices[0] == ARM::qsub_2) { - // 2 Q registers -> 1 QQ register (2nd). - if (Size == 512 && SubIndices[1] == ARM::qsub_3) { - NewSubIdx = ARM::qqsub_1; - return true; - } - } else if (SubIndices[0] == ARM::dsub_0) { - // 2 D registers -> 1 Q register. - if (Size >= 128 && SubIndices[1] == ARM::dsub_1) { - if (Size >= 256) - NewSubIdx = ARM::qsub_0; - return true; - } - } else if (SubIndices[0] == ARM::dsub_2) { - // 2 D registers -> 1 Q register (2nd). - if (Size >= 256 && SubIndices[1] == ARM::dsub_3) { - NewSubIdx = ARM::qsub_1; - return true; - } - } else if (SubIndices[0] == ARM::dsub_4) { - // 2 D registers -> 1 Q register (3rd). - if (Size == 512 && SubIndices[1] == ARM::dsub_5) { - NewSubIdx = ARM::qsub_2; - return true; - } - } else if (SubIndices[0] == ARM::dsub_6) { - // 2 D registers -> 1 Q register (3rd). - if (Size == 512 && SubIndices[1] == ARM::dsub_7) { - NewSubIdx = ARM::qsub_3; - return true; - } - } else if (SubIndices[0] == ARM::ssub_0) { - // 2 S registers -> 1 D register. - if (SubIndices[1] == ARM::ssub_1) { - if (Size >= 128) - NewSubIdx = ARM::dsub_0; - return true; - } - } else if (SubIndices[0] == ARM::ssub_2) { - // 2 S registers -> 1 D register (2nd). - if (Size >= 128 && SubIndices[1] == ARM::ssub_3) { - NewSubIdx = ARM::dsub_1; - return true; - } - } - } - return false; + return Reserved; } const TargetRegisterClass* @@ -263,6 +132,7 @@ ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) case ARM::QPRRegClassID: case ARM::QQPRRegClassID: case ARM::QQQQPRRegClassID: + case ARM::GPRPairRegClassID: return Super; } Super = *I++; @@ -476,7 +346,7 @@ ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg, bool ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const { // CortexA9 has a Write-after-write hazard for NEON registers. - if (!STI.isCortexA9()) + if (!STI.isLikeA9()) return false; switch (RC->getID()) { @@ -561,8 +431,9 @@ needsStackRealignment(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); const Function *F = MF.getFunction(); unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); - bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || - F->hasFnAttr(Attribute::StackAlignment)); + bool requiresRealignment = + ((MFI->getMaxAlignment() > StackAlign) || + F->getFnAttributes().hasAttribute(Attributes::StackAlignment)); return requiresRealignment && canRealignStack(MF); } @@ -595,6 +466,7 @@ unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const { unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg, const MachineFunction &MF) const { + const MachineRegisterInfo &MRI = MF.getRegInfo(); switch (Reg) { default: break; // Return 0 if either register of the pair is a special register. @@ -603,10 +475,10 @@ unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg, case ARM::R3: return ARM::R2; case ARM::R5: return ARM::R4; case ARM::R7: - return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6)) + return (MRI.isReserved(ARM::R7) || MRI.isReserved(ARM::R6)) ? 0 : ARM::R6; - case ARM::R9: return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8; - case ARM::R11: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10; + case ARM::R9: return MRI.isReserved(ARM::R9) ? 0 :ARM::R8; + case ARM::R11: return MRI.isReserved(ARM::R11) ? 0 : ARM::R10; case ARM::S1: return ARM::S0; case ARM::S3: return ARM::S2; @@ -648,6 +520,7 @@ unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg, unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg, const MachineFunction &MF) const { + const MachineRegisterInfo &MRI = MF.getRegInfo(); switch (Reg) { default: break; // Return 0 if either register of the pair is a special register. @@ -656,10 +529,10 @@ unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg, case ARM::R2: return ARM::R3; case ARM::R4: return ARM::R5; case ARM::R6: - return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6)) + return (MRI.isReserved(ARM::R7) || MRI.isReserved(ARM::R6)) ? 0 : ARM::R7; - case ARM::R8: return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9; - case ARM::R10: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11; + case ARM::R8: return MRI.isReserved(ARM::R9) ? 0 :ARM::R9; + case ARM::R10: return MRI.isReserved(ARM::R11) ? 0 : ARM::R11; case ARM::S0: return ARM::S1; case ARM::S2: return ARM::S3; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.h b/lib/Target/ARM/ARMBaseRegisterInfo.h index da29f7e..e2bdd04 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.h +++ b/lib/Target/ARM/ARMBaseRegisterInfo.h @@ -96,19 +96,10 @@ public: /// Code Generation virtual methods... const uint16_t *getCalleeSavedRegs(const MachineFunction *MF = 0) const; const uint32_t *getCallPreservedMask(CallingConv::ID) const; + const uint32_t *getNoPreservedMask() const; BitVector getReservedRegs(const MachineFunction &MF) const; - /// canCombineSubRegIndices - Given a register class and a list of - /// subregister indices, return true if it's possible to combine the - /// subregister indices into one that corresponds to a larger - /// subregister. Return the new subregister index by reference. Note the - /// new index may be zero if the given subregisters can be combined to - /// form the whole register. - virtual bool canCombineSubRegIndices(const TargetRegisterClass *RC, - SmallVectorImpl &SubIndices, - unsigned &NewSubIdx) const; - const TargetRegisterClass* getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const; const TargetRegisterClass* @@ -170,8 +161,6 @@ public: unsigned MIFlags = MachineInstr::NoFlags)const; /// Code Generation virtual methods... - virtual bool isReservedReg(const MachineFunction &MF, unsigned Reg) const; - virtual bool requiresRegisterScavenging(const MachineFunction &MF) const; virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const; diff --git a/lib/Target/ARM/ARMCallingConv.td b/lib/Target/ARM/ARMCallingConv.td index bda1517..b378b96 100644 --- a/lib/Target/ARM/ARMCallingConv.td +++ b/lib/Target/ARM/ARMCallingConv.td @@ -190,6 +190,8 @@ def RetCC_ARM_AAPCS_VFP : CallingConv<[ // Callee-saved register lists. //===----------------------------------------------------------------------===// +def CSR_NoRegs : CalleeSavedRegs<(add)>; + def CSR_AAPCS : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6, R5, R4, (sequence "D%u", 15, 8))>; diff --git a/lib/Target/ARM/ARMCodeEmitter.cpp b/lib/Target/ARM/ARMCodeEmitter.cpp index e81b4cc..6adbf4f 100644 --- a/lib/Target/ARM/ARMCodeEmitter.cpp +++ b/lib/Target/ARM/ARMCodeEmitter.cpp @@ -47,7 +47,7 @@ namespace { class ARMCodeEmitter : public MachineFunctionPass { ARMJITInfo *JTI; const ARMBaseInstrInfo *II; - const TargetData *TD; + const DataLayout *TD; const ARMSubtarget *Subtarget; TargetMachine &TM; JITCodeEmitter &MCE; @@ -67,7 +67,7 @@ namespace { ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) : MachineFunctionPass(ID), JTI(0), II((const ARMBaseInstrInfo *)tm.getInstrInfo()), - TD(tm.getTargetData()), TM(tm), + TD(tm.getDataLayout()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0), IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {} @@ -376,7 +376,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) { "JIT relocation model must be set to static or default!"); JTI = ((ARMBaseTargetMachine &)MF.getTarget()).getJITInfo(); II = (const ARMBaseInstrInfo *)MF.getTarget().getInstrInfo(); - TD = MF.getTarget().getTargetData(); + TD = MF.getTarget().getDataLayout(); Subtarget = &TM.getSubtarget(); MCPEs = &MF.getConstantPool()->getConstants(); MJTEs = 0; @@ -389,7 +389,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) { do { DEBUG(errs() << "JITTing function '" - << MF.getFunction()->getName() << "'\n"); + << MF.getName() << "'\n"); MCE.startFunction(MF); for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) { diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index a953985..a57368f 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -22,7 +22,7 @@ #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -528,7 +528,7 @@ ARMConstantIslands::doInitialPlacement(std::vector &CPEMIs) { // identity mapping of CPI's to CPE's. const std::vector &CPs = MCP->getConstants(); - const TargetData &TD = *MF->getTarget().getTargetData(); + const DataLayout &TD = *MF->getTarget().getDataLayout(); for (unsigned i = 0, e = CPs.size(); i != e; ++i) { unsigned Size = TD.getTypeAllocSize(CPs[i].getType()); assert(Size >= 4 && "Too small constant pool entry"); @@ -1388,10 +1388,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex) { // If the original WaterList entry was "new water" on this iteration, // propagate that to the new island. This is just keeping NewWaterList // updated to match the WaterList, which will be updated below. - if (NewWaterList.count(WaterBB)) { - NewWaterList.erase(WaterBB); + if (NewWaterList.erase(WaterBB)) NewWaterList.insert(NewIsland); - } + // The new CPE goes before the following block (NewMBB). NewMBB = llvm::next(MachineFunction::iterator(WaterBB)); diff --git a/lib/Target/ARM/ARMConstantPoolValue.h b/lib/Target/ARM/ARMConstantPoolValue.h index 6b98d44..ae531c4 100644 --- a/lib/Target/ARM/ARMConstantPoolValue.h +++ b/lib/Target/ARM/ARMConstantPoolValue.h @@ -102,8 +102,6 @@ public: virtual void print(raw_ostream &O) const; void print(raw_ostream *O) const { if (O) print(*O); } void dump() const; - - static bool classof(const ARMConstantPoolValue *) { return true; } }; inline raw_ostream &operator<<(raw_ostream &O, const ARMConstantPoolValue &V) { @@ -158,7 +156,6 @@ public: static bool classof(const ARMConstantPoolValue *APV) { return APV->isGlobalValue() || APV->isBlockAddress() || APV->isLSDA(); } - static bool classof(const ARMConstantPoolConstant *) { return true; } }; /// ARMConstantPoolSymbol - ARM-specific constantpool values for external @@ -192,7 +189,6 @@ public: static bool classof(const ARMConstantPoolValue *ACPV) { return ACPV->isExtSymbol(); } - static bool classof(const ARMConstantPoolSymbol *) { return true; } }; /// ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic @@ -225,7 +221,6 @@ public: static bool classof(const ARMConstantPoolValue *ACPV) { return ACPV->isMachineBasicBlock(); } - static bool classof(const ARMConstantPoolMBB *) { return true; } }; } // End llvm namespace diff --git a/lib/Target/ARM/ARMELFWriterInfo.cpp b/lib/Target/ARM/ARMELFWriterInfo.cpp deleted file mode 100644 index f671317..0000000 --- a/lib/Target/ARM/ARMELFWriterInfo.cpp +++ /dev/null @@ -1,78 +0,0 @@ -//===-- ARMELFWriterInfo.cpp - ELF Writer Info for the ARM backend --------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements ELF writer information for the ARM backend. -// -//===----------------------------------------------------------------------===// - -#include "ARMELFWriterInfo.h" -#include "ARMRelocations.h" -#include "llvm/Function.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Support/ELF.h" - -using namespace llvm; - -//===----------------------------------------------------------------------===// -// Implementation of the ARMELFWriterInfo class -//===----------------------------------------------------------------------===// - -ARMELFWriterInfo::ARMELFWriterInfo(TargetMachine &TM) - : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64, - TM.getTargetData()->isLittleEndian()) { -} - -ARMELFWriterInfo::~ARMELFWriterInfo() {} - -unsigned ARMELFWriterInfo::getRelocationType(unsigned MachineRelTy) const { - switch (MachineRelTy) { - case ARM::reloc_arm_absolute: - case ARM::reloc_arm_relative: - case ARM::reloc_arm_cp_entry: - case ARM::reloc_arm_vfp_cp_entry: - case ARM::reloc_arm_machine_cp_entry: - case ARM::reloc_arm_jt_base: - case ARM::reloc_arm_pic_jt: - llvm_unreachable("unsupported ARM relocation type"); - - case ARM::reloc_arm_branch: return ELF::R_ARM_CALL; - case ARM::reloc_arm_movt: return ELF::R_ARM_MOVT_ABS; - case ARM::reloc_arm_movw: return ELF::R_ARM_MOVW_ABS_NC; - default: - llvm_unreachable("unknown ARM relocation type"); - } -} - -long int ARMELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier) const { - llvm_unreachable("ARMELFWriterInfo::getDefaultAddendForRelTy() not " - "implemented"); -} - -unsigned ARMELFWriterInfo::getRelocationTySize(unsigned RelTy) const { - llvm_unreachable("ARMELFWriterInfo::getRelocationTySize() not implemented"); -} - -bool ARMELFWriterInfo::isPCRelativeRel(unsigned RelTy) const { - llvm_unreachable("ARMELFWriterInfo::isPCRelativeRel() not implemented"); -} - -unsigned ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() const { - llvm_unreachable("ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not " - "implemented"); -} - -long int ARMELFWriterInfo::computeRelocation(unsigned SymOffset, - unsigned RelOffset, - unsigned RelTy) const { - llvm_unreachable("ARMELFWriterInfo::getAbsoluteLabelMachineRelTy() not " - "implemented"); -} diff --git a/lib/Target/ARM/ARMELFWriterInfo.h b/lib/Target/ARM/ARMELFWriterInfo.h deleted file mode 100644 index 6a84f8a..0000000 --- a/lib/Target/ARM/ARMELFWriterInfo.h +++ /dev/null @@ -1,59 +0,0 @@ -//===-- ARMELFWriterInfo.h - ELF Writer Info for ARM ------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements ELF writer information for the ARM backend. -// -//===----------------------------------------------------------------------===// - -#ifndef ARM_ELF_WRITER_INFO_H -#define ARM_ELF_WRITER_INFO_H - -#include "llvm/Target/TargetELFWriterInfo.h" - -namespace llvm { - class TargetMachine; - - class ARMELFWriterInfo : public TargetELFWriterInfo { - public: - ARMELFWriterInfo(TargetMachine &TM); - virtual ~ARMELFWriterInfo(); - - /// getRelocationType - Returns the target specific ELF Relocation type. - /// 'MachineRelTy' contains the object code independent relocation type - virtual unsigned getRelocationType(unsigned MachineRelTy) const; - - /// hasRelocationAddend - True if the target uses an addend in the - /// ELF relocation entry. - virtual bool hasRelocationAddend() const { return false; } - - /// getDefaultAddendForRelTy - Gets the default addend value for a - /// relocation entry based on the target ELF relocation type. - virtual long int getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier = 0) const; - - /// getRelTySize - Returns the size of relocatable field in bits - virtual unsigned getRelocationTySize(unsigned RelTy) const; - - /// isPCRelativeRel - True if the relocation type is pc relative - virtual bool isPCRelativeRel(unsigned RelTy) const; - - /// getJumpTableRelocationTy - Returns the machine relocation type used - /// to reference a jumptable. - virtual unsigned getAbsoluteLabelMachineRelTy() const; - - /// computeRelocation - Some relocatable fields could be relocated - /// directly, avoiding the relocation symbol emission, compute the - /// final relocation value for this symbol. - virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset, - unsigned RelTy) const; - }; - -} // end llvm namespace - -#endif // ARM_ELF_WRITER_INFO_H diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index 15bb32e..8c45e0b 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -103,9 +103,9 @@ namespace { bool IsLoad; bool isUpdating; bool hasWritebackOperand; - NEONRegSpacing RegSpacing; - unsigned char NumRegs; // D registers loaded or stored - unsigned char RegElts; // elements per D register; used for lane ops + uint8_t RegSpacing; // One of type NEONRegSpacing + uint8_t NumRegs; // D registers loaded or stored + uint8_t RegElts; // elements per D register; used for lane ops // FIXME: Temporary flag to denote whether the real instruction takes // a single register (like the encoding) or all of the registers in // the list (like the asm syntax and the isel DAG). When all definitions @@ -377,7 +377,7 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) { const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode()); assert(TableEntry && TableEntry->IsLoad && "NEONLdStTable lookup failed"); - NEONRegSpacing RegSpc = TableEntry->RegSpacing; + NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing; unsigned NumRegs = TableEntry->NumRegs; MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), @@ -442,7 +442,7 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) { const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode()); assert(TableEntry && !TableEntry->IsLoad && "NEONLdStTable lookup failed"); - NEONRegSpacing RegSpc = TableEntry->RegSpacing; + NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing; unsigned NumRegs = TableEntry->NumRegs; MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), @@ -493,7 +493,7 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) { const NEONLdStTableEntry *TableEntry = LookupNEONLdSt(MI.getOpcode()); assert(TableEntry && "NEONLdStTable lookup failed"); - NEONRegSpacing RegSpc = TableEntry->RegSpacing; + NEONRegSpacing RegSpc = (NEONRegSpacing)TableEntry->RegSpacing; unsigned NumRegs = TableEntry->NumRegs; unsigned RegElts = TableEntry->RegElts; @@ -777,9 +777,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MI.eraseFromParent(); return true; } - case ARM::Int_eh_sjlj_dispatchsetup: - case ARM::Int_eh_sjlj_dispatchsetup_nofp: - case ARM::tInt_eh_sjlj_dispatchsetup: { + case ARM::Int_eh_sjlj_dispatchsetup: { MachineFunction &MF = *MI.getParent()->getParent(); const ARMBaseInstrInfo *AII = static_cast(TII); diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index bf9d16e..6611862 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -40,7 +40,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetMachine.h" @@ -100,51 +100,53 @@ class ARMFastISel : public FastISel { } // Code from FastISel.cpp. - virtual unsigned FastEmitInst_(unsigned MachineInstOpcode, - const TargetRegisterClass *RC); - virtual unsigned FastEmitInst_r(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill); - virtual unsigned FastEmitInst_rr(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill); - virtual unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill, - unsigned Op2, bool Op2IsKill); - virtual unsigned FastEmitInst_ri(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - uint64_t Imm); - virtual unsigned FastEmitInst_rf(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - const ConstantFP *FPImm); - virtual unsigned FastEmitInst_rri(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - unsigned Op0, bool Op0IsKill, - unsigned Op1, bool Op1IsKill, - uint64_t Imm); - virtual unsigned FastEmitInst_i(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm); - virtual unsigned FastEmitInst_ii(unsigned MachineInstOpcode, - const TargetRegisterClass *RC, - uint64_t Imm1, uint64_t Imm2); - - virtual unsigned FastEmitInst_extractsubreg(MVT RetVT, - unsigned Op0, bool Op0IsKill, - uint32_t Idx); + private: + unsigned FastEmitInst_(unsigned MachineInstOpcode, + const TargetRegisterClass *RC); + unsigned FastEmitInst_r(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill); + unsigned FastEmitInst_rr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill); + unsigned FastEmitInst_rrr(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill, + unsigned Op2, bool Op2IsKill); + unsigned FastEmitInst_ri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + uint64_t Imm); + unsigned FastEmitInst_rf(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + const ConstantFP *FPImm); + unsigned FastEmitInst_rri(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + unsigned Op0, bool Op0IsKill, + unsigned Op1, bool Op1IsKill, + uint64_t Imm); + unsigned FastEmitInst_i(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + uint64_t Imm); + unsigned FastEmitInst_ii(unsigned MachineInstOpcode, + const TargetRegisterClass *RC, + uint64_t Imm1, uint64_t Imm2); + + unsigned FastEmitInst_extractsubreg(MVT RetVT, + unsigned Op0, bool Op0IsKill, + uint32_t Idx); // Backend specific FastISel code. + private: virtual bool TargetSelectInstruction(const Instruction *I); virtual unsigned TargetMaterializeConstant(const Constant *C); virtual unsigned TargetMaterializeAlloca(const AllocaInst *AI); virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, const LoadInst *LI); - + private: #include "ARMGenFastISel.inc" // Instruction selection routines. @@ -192,6 +194,7 @@ class ARMFastISel : public FastISel { unsigned ARMMoveToFPReg(EVT VT, unsigned SrcReg); unsigned ARMMoveToIntReg(EVT VT, unsigned SrcReg); unsigned ARMSelectCallOp(bool UseReg); + unsigned ARMLowerPICELF(const GlobalValue *GV, unsigned Align, EVT VT); // Call handling routines. private: @@ -615,11 +618,11 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { if (VT != MVT::i32) return 0; Reloc::Model RelocM = TM.getRelocationModel(); - - // TODO: Need more magic for ARM PIC. - if (!isThumb2 && (RelocM == Reloc::PIC_)) return 0; - - unsigned DestReg = createResultReg(TLI.getRegClassFor(VT)); + bool IsIndirect = Subtarget->GVIsIndirectSymbol(GV, RelocM); + const TargetRegisterClass *RC = isThumb2 ? + (const TargetRegisterClass*)&ARM::rGPRRegClass : + (const TargetRegisterClass*)&ARM::GPRRegClass; + unsigned DestReg = createResultReg(RC); // Use movw+movt when possible, it avoids constant pool entries. // Darwin targets don't support movt with Reloc::Static, see @@ -649,6 +652,9 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { Align = TD.getTypeAllocSize(GV->getType()); } + if (Subtarget->isTargetELF() && RelocM == Reloc::PIC_) + return ARMLowerPICELF(GV, Align, VT); + // Grab index. unsigned PCAdj = (RelocM != Reloc::PIC_) ? 0 : (Subtarget->isThumb() ? 4 : 8); @@ -666,17 +672,30 @@ unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, EVT VT) { .addConstantPoolIndex(Idx); if (RelocM == Reloc::PIC_) MIB.addImm(Id); + AddOptionalDefs(MIB); } else { // The extra immediate is for addrmode2. MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(ARM::LDRcp), DestReg) .addConstantPoolIndex(Idx) .addImm(0); + AddOptionalDefs(MIB); + + if (RelocM == Reloc::PIC_) { + unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD; + unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); + + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc), NewDestReg) + .addReg(DestReg) + .addImm(Id); + AddOptionalDefs(MIB); + return NewDestReg; + } } - AddOptionalDefs(MIB); } - if (Subtarget->GVIsIndirectSymbol(GV, RelocM)) { + if (IsIndirect) { MachineInstrBuilder MIB; unsigned NewDestReg = createResultReg(TLI.getRegClassFor(VT)); if (isThumb2) @@ -1009,6 +1028,9 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, RC = &ARM::GPRRegClass; break; case MVT::i16: + if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) + return false; + if (isThumb2) { if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8; @@ -1021,6 +1043,9 @@ bool ARMFastISel::ARMEmitLoad(EVT VT, unsigned &ResultReg, Address &Addr, RC = &ARM::GPRRegClass; break; case MVT::i32: + if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) + return false; + if (isThumb2) { if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) Opc = ARM::t2LDRi8; @@ -1127,6 +1152,9 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, } break; case MVT::i16: + if (Alignment && Alignment < 2 && !Subtarget->allowsUnalignedMem()) + return false; + if (isThumb2) { if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) StrOpc = ARM::t2STRHi8; @@ -1138,6 +1166,9 @@ bool ARMFastISel::ARMEmitStore(EVT VT, unsigned SrcReg, Address &Addr, } break; case MVT::i32: + if (Alignment && Alignment < 4 && !Subtarget->allowsUnalignedMem()) + return false; + if (isThumb2) { if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops()) StrOpc = ARM::t2STRi8; @@ -1360,6 +1391,11 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) { unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX; AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc)) .addReg(AddrReg)); + + const IndirectBrInst *IB = cast(I); + for (unsigned i = 0, e = IB->getNumSuccessors(); i != e; ++i) + FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[IB->getSuccessor(i)]); + return true; } @@ -2210,25 +2246,17 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { unsigned CallOpc = ARMSelectCallOp(EnableARMLongCalls); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)); - if (isThumb2) { - // Explicitly adding the predicate here. + // BL / BLX don't take a predicate, but tBL / tBLX do. + if (isThumb2) AddDefaultPred(MIB); - if (EnableARMLongCalls) - MIB.addReg(CalleeReg); - else - MIB.addExternalSymbol(TLI.getLibcallName(Call)); - } else { - if (EnableARMLongCalls) - MIB.addReg(CalleeReg); - else - MIB.addExternalSymbol(TLI.getLibcallName(Call)); + if (EnableARMLongCalls) + MIB.addReg(CalleeReg); + else + MIB.addExternalSymbol(TLI.getLibcallName(Call)); - // Explicitly adding the predicate here. - AddDefaultPred(MIB); - } // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i]); + MIB.addReg(RegArgs[i], RegState::Implicit); // Add a register mask with the call-preserved registers. // Proper defs for return values will be added by setPhysRegsDeadExcept(). @@ -2300,16 +2328,16 @@ bool ARMFastISel::SelectCall(const Instruction *I, ISD::ArgFlagsTy Flags; unsigned AttrInd = i - CS.arg_begin() + 1; - if (CS.paramHasAttr(AttrInd, Attribute::SExt)) + if (CS.paramHasAttr(AttrInd, Attributes::SExt)) Flags.setSExt(); - if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) + if (CS.paramHasAttr(AttrInd, Attributes::ZExt)) Flags.setZExt(); // FIXME: Only handle *easy* calls for now. - if (CS.paramHasAttr(AttrInd, Attribute::InReg) || - CS.paramHasAttr(AttrInd, Attribute::StructRet) || - CS.paramHasAttr(AttrInd, Attribute::Nest) || - CS.paramHasAttr(AttrInd, Attribute::ByVal)) + if (CS.paramHasAttr(AttrInd, Attributes::InReg) || + CS.paramHasAttr(AttrInd, Attributes::StructRet) || + CS.paramHasAttr(AttrInd, Attributes::Nest) || + CS.paramHasAttr(AttrInd, Attributes::ByVal)) return false; Type *ArgTy = (*i)->getType(); @@ -2356,30 +2384,20 @@ bool ARMFastISel::SelectCall(const Instruction *I, unsigned CallOpc = ARMSelectCallOp(UseReg); MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)); - if(isThumb2) { - // Explicitly adding the predicate here. - AddDefaultPred(MIB); - if (UseReg) - MIB.addReg(CalleeReg); - else if (!IntrMemName) - MIB.addGlobalAddress(GV, 0, 0); - else - MIB.addExternalSymbol(IntrMemName, 0); - } else { - if (UseReg) - MIB.addReg(CalleeReg); - else if (!IntrMemName) - MIB.addGlobalAddress(GV, 0, 0); - else - MIB.addExternalSymbol(IntrMemName, 0); - // Explicitly adding the predicate here. + // ARM calls don't take a predicate, but tBL / tBLX do. + if(isThumb2) AddDefaultPred(MIB); - } + if (UseReg) + MIB.addReg(CalleeReg); + else if (!IntrMemName) + MIB.addGlobalAddress(GV, 0, 0); + else + MIB.addExternalSymbol(IntrMemName, 0); // Add implicit physical register uses to the call. for (unsigned i = 0, e = RegArgs.size(); i != e; ++i) - MIB.addReg(RegArgs[i]); + MIB.addReg(RegArgs[i], RegState::Implicit); // Add a register mask with the call-preserved registers. // Proper defs for return values will be added by setPhysRegsDeadExcept(). @@ -2648,7 +2666,7 @@ bool ARMFastISel::SelectShift(const Instruction *I, unsigned Reg1 = getRegForValue(Src1Value); if (Reg1 == 0) return false; - unsigned Reg2; + unsigned Reg2 = 0; if (Opc == ARM::MOVsr) { Reg2 = getRegForValue(Src2Value); if (Reg2 == 0) return false; @@ -2790,6 +2808,47 @@ bool ARMFastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo, return true; } +unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, + unsigned Align, EVT VT) { + bool UseGOTOFF = GV->hasLocalLinkage() || GV->hasHiddenVisibility(); + ARMConstantPoolConstant *CPV = + ARMConstantPoolConstant::Create(GV, UseGOTOFF ? ARMCP::GOTOFF : ARMCP::GOT); + unsigned Idx = MCP.getConstantPoolIndex(CPV, Align); + + unsigned Opc; + unsigned DestReg1 = createResultReg(TLI.getRegClassFor(VT)); + // Load value. + if (isThumb2) { + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, + TII.get(ARM::t2LDRpci), DestReg1) + .addConstantPoolIndex(Idx)); + Opc = UseGOTOFF ? ARM::t2ADDrr : ARM::t2LDRs; + } else { + // The extra immediate is for addrmode2. + AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(ARM::LDRcp), DestReg1) + .addConstantPoolIndex(Idx).addImm(0)); + Opc = UseGOTOFF ? ARM::ADDrr : ARM::LDRrs; + } + + unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); + if (GlobalBaseReg == 0) { + GlobalBaseReg = MRI.createVirtualRegister(TLI.getRegClassFor(VT)); + AFI->setGlobalBaseReg(GlobalBaseReg); + } + + unsigned DestReg2 = createResultReg(TLI.getRegClassFor(VT)); + MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, + DL, TII.get(Opc), DestReg2) + .addReg(DestReg1) + .addReg(GlobalBaseReg); + if (!UseGOTOFF) + MIB.addImm(0); + AddOptionalDefs(MIB); + + return DestReg2; +} + namespace llvm { FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) { diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index aee72d2..9392497 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -153,7 +153,8 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const { int FramePtrSpillFI = 0; int D8SpillFI = 0; - // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue. + // All calls are tail calls in GHC calling conv, and functions have no + // prologue/epilogue. if (MF.getFunction()->getCallingConv() == CallingConv::GHC) return; @@ -360,7 +361,8 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF, int NumBytes = (int)MFI->getStackSize(); unsigned FramePtr = RegInfo->getFrameRegister(MF); - // All calls are tail calls in GHC calling conv, and functions have no prologue/epilogue. + // All calls are tail calls in GHC calling conv, and functions have no + // prologue/epilogue. if (MF.getFunction()->getCallingConv() == CallingConv::GHC) return; @@ -1151,7 +1153,7 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) { return; // Naked functions don't spill callee-saved registers. - if (MF.getFunction()->hasFnAttr(Attribute::Naked)) + if (MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked)) return; // We are planning to use NEON instructions vst1 / vld1. @@ -1176,7 +1178,7 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) { MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned NumSpills = 0; for (; NumSpills < 8; ++NumSpills) - if (!MRI.isPhysRegOrOverlapUsed(ARM::D8 + NumSpills)) + if (!MRI.isPhysRegUsed(ARM::D8 + NumSpills)) break; // Don't do this for just one d-register. It's not worth it. @@ -1209,6 +1211,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, *static_cast(MF.getTarget().getInstrInfo()); ARMFunctionInfo *AFI = MF.getInfo(); MachineFrameInfo *MFI = MF.getFrameInfo(); + MachineRegisterInfo &MRI = MF.getRegInfo(); unsigned FramePtr = RegInfo->getFrameRegister(MF); // Spill R4 if Thumb2 function requires stack realignment - it will be used as @@ -1218,12 +1221,12 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // FIXME: It will be better just to find spare register here. if (AFI->isThumb2Function() && (MFI->hasVarSizedObjects() || RegInfo->needsStackRealignment(MF))) - MF.getRegInfo().setPhysRegUsed(ARM::R4); + MRI.setPhysRegUsed(ARM::R4); if (AFI->isThumb1OnlyFunction()) { // Spill LR if Thumb1 function uses variable length argument lists. if (AFI->getVarArgsRegSaveSize() > 0) - MF.getRegInfo().setPhysRegUsed(ARM::LR); + MRI.setPhysRegUsed(ARM::LR); // Spill R4 if Thumb1 epilogue has to restore SP from FP. We don't know // for sure what the stack size will be, but for this, an estimate is good @@ -1233,7 +1236,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // FIXME: It will be better just to find spare register here. unsigned StackSize = estimateStackSize(MF); if (MFI->hasVarSizedObjects() || StackSize > 508) - MF.getRegInfo().setPhysRegUsed(ARM::R4); + MRI.setPhysRegUsed(ARM::R4); } // See if we can spill vector registers to aligned stack. @@ -1241,7 +1244,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // Spill the BasePtr if it's used. if (RegInfo->hasBasePointer(MF)) - MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister()); + MRI.setPhysRegUsed(RegInfo->getBaseRegister()); // Don't spill FP if the frame can be eliminated. This is determined // by scanning the callee-save registers to see if any is used. @@ -1249,7 +1252,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, for (unsigned i = 0; CSRegs[i]; ++i) { unsigned Reg = CSRegs[i]; bool Spilled = false; - if (MF.getRegInfo().isPhysRegOrOverlapUsed(Reg)) { + if (MRI.isPhysRegUsed(Reg)) { Spilled = true; CanEliminateFrame = false; } @@ -1338,7 +1341,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled. // Spill LR as well so we can fold BX_RET to the registers restore (LDM). if (!LRSpilled && CS1Spilled) { - MF.getRegInfo().setPhysRegUsed(ARM::LR); + MRI.setPhysRegUsed(ARM::LR); NumGPRSpills++; UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(), UnspilledCS1GPRs.end(), (unsigned)ARM::LR)); @@ -1347,7 +1350,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, } if (hasFP(MF)) { - MF.getRegInfo().setPhysRegUsed(FramePtr); + MRI.setPhysRegUsed(FramePtr); NumGPRSpills++; } @@ -1362,16 +1365,16 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // Don't spill high register if the function is thumb1 if (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) || Reg == ARM::LR) { - MF.getRegInfo().setPhysRegUsed(Reg); - if (!RegInfo->isReservedReg(MF, Reg)) + MRI.setPhysRegUsed(Reg); + if (!MRI.isReserved(Reg)) ExtraCSSpill = true; break; } } } else if (!UnspilledCS2GPRs.empty() && !AFI->isThumb1OnlyFunction()) { unsigned Reg = UnspilledCS2GPRs.front(); - MF.getRegInfo().setPhysRegUsed(Reg); - if (!RegInfo->isReservedReg(MF, Reg)) + MRI.setPhysRegUsed(Reg); + if (!MRI.isReserved(Reg)) ExtraCSSpill = true; } } @@ -1389,7 +1392,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, while (NumExtras && !UnspilledCS1GPRs.empty()) { unsigned Reg = UnspilledCS1GPRs.back(); UnspilledCS1GPRs.pop_back(); - if (!RegInfo->isReservedReg(MF, Reg) && + if (!MRI.isReserved(Reg) && (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) || Reg == ARM::LR)) { Extras.push_back(Reg); @@ -1401,7 +1404,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, while (NumExtras && !UnspilledCS2GPRs.empty()) { unsigned Reg = UnspilledCS2GPRs.back(); UnspilledCS2GPRs.pop_back(); - if (!RegInfo->isReservedReg(MF, Reg)) { + if (!MRI.isReserved(Reg)) { Extras.push_back(Reg); NumExtras--; } @@ -1409,7 +1412,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, } if (Extras.size() && NumExtras == 0) { for (unsigned i = 0, e = Extras.size(); i != e; ++i) { - MF.getRegInfo().setPhysRegUsed(Extras[i]); + MRI.setPhysRegUsed(Extras[i]); } } else if (!AFI->isThumb1OnlyFunction()) { // note: Thumb1 functions spill to R12, not the stack. Reserve a slot @@ -1423,7 +1426,7 @@ ARMFrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF, } if (ForceLRSpill) { - MF.getRegInfo().setPhysRegUsed(ARM::LR); + MRI.setPhysRegUsed(ARM::LR); AFI->setLRIsSpilledForFarJump(true); } } diff --git a/lib/Target/ARM/ARMHazardRecognizer.cpp b/lib/Target/ARM/ARMHazardRecognizer.cpp index a5fd15b..1240169 100644 --- a/lib/Target/ARM/ARMHazardRecognizer.cpp +++ b/lib/Target/ARM/ARMHazardRecognizer.cpp @@ -47,7 +47,7 @@ ARMHazardRecognizer::getHazardType(SUnit *SU, int Stalls) { // Skip over one non-VFP / NEON instruction. if (!LastMI->isBarrier() && // On A9, AGU and NEON/FPU are muxed. - !(STI.isCortexA9() && (LastMI->mayLoad() || LastMI->mayStore())) && + !(STI.isLikeA9() && (LastMI->mayLoad() || LastMI->mayStore())) && (LastMCID.TSFlags & ARMII::DomainMask) == ARMII::DomainGeneral) { MachineBasicBlock::iterator I = LastMI; if (I != LastMI->getParent()->begin()) { diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index a3a6c31..efd6d2b 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -239,7 +239,6 @@ private: /// SelectCMOVOp - Select CMOV instructions for ARM. SDNode *SelectCMOVOp(SDNode *N); - SDNode *SelectConditionalOp(SDNode *N); SDNode *SelectT2CMOVShiftOp(SDNode *N, SDValue FalseVal, SDValue TrueVal, ARMCC::CondCodes CCVal, SDValue CCR, SDValue InFlag); @@ -306,7 +305,7 @@ static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { } /// \brief Check whether a particular node is a constant value representable as -/// (N * Scale) where (N in [\arg RangeMin, \arg RangeMax). +/// (N * Scale) where (N in [\p RangeMin, \p RangeMax). /// /// \param ScaledConstant [out] - On success, the pre-scaled constant value. static bool isScaledConstantInRange(SDValue Node, int Scale, @@ -337,7 +336,8 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { if (!CheckVMLxHazard) return true; - if (!Subtarget->isCortexA8() && !Subtarget->isCortexA9()) + if (!Subtarget->isCortexA8() && !Subtarget->isLikeA9() && + !Subtarget->isSwift()) return true; if (!N->hasOneUse()) @@ -375,12 +375,13 @@ bool ARMDAGToDAGISel::hasNoVMLxHazardUse(SDNode *N) const { bool ARMDAGToDAGISel::isShifterOpProfitable(const SDValue &Shift, ARM_AM::ShiftOpc ShOpcVal, unsigned ShAmt) { - if (!Subtarget->isCortexA9()) + if (!Subtarget->isLikeA9() && !Subtarget->isSwift()) return true; if (Shift.hasOneUse()) return true; // R << 2 is free. - return ShOpcVal == ARM_AM::lsl && ShAmt == 2; + return ShOpcVal == ARM_AM::lsl && + (ShAmt == 2 || (Subtarget->isSwift() && ShAmt == 1)); } bool ARMDAGToDAGISel::SelectImmShifterOperand(SDValue N, @@ -487,7 +488,7 @@ bool ARMDAGToDAGISel::SelectAddrModeImm12(SDValue N, bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, SDValue &Opc) { if (N.getOpcode() == ISD::MUL && - (!Subtarget->isCortexA9() || N.hasOneUse())) { + ((!Subtarget->isLikeA9() && !Subtarget->isSwift()) || N.hasOneUse())) { if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { // X * [3,5,9] -> X + X * [2,4,8] etc. int RHSC = (int)RHS->getZExtValue(); @@ -551,7 +552,8 @@ bool ARMDAGToDAGISel::SelectLdStSOReg(SDValue N, SDValue &Base, SDValue &Offset, // Try matching (R shl C) + (R). if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && - !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) { + !(Subtarget->isLikeA9() || Subtarget->isSwift() || + N.getOperand(0).hasOneUse())) { ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't @@ -585,7 +587,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, SDValue &Offset, SDValue &Opc) { if (N.getOpcode() == ISD::MUL && - (!Subtarget->isCortexA9() || N.hasOneUse())) { + (!(Subtarget->isLikeA9() || Subtarget->isSwift()) || N.hasOneUse())) { if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) { // X * [3,5,9] -> X + X * [2,4,8] etc. int RHSC = (int)RHS->getZExtValue(); @@ -651,7 +653,7 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, } } - if (Subtarget->isCortexA9() && !N.hasOneUse()) { + if ((Subtarget->isLikeA9() || Subtarget->isSwift()) && !N.hasOneUse()) { // Compute R +/- (R << N) and reuse it. Base = N; Offset = CurDAG->getRegister(0, MVT::i32); @@ -689,7 +691,8 @@ AddrMode2Type ARMDAGToDAGISel::SelectAddrMode2Worker(SDValue N, // Try matching (R shl C) + (R). if (N.getOpcode() != ISD::SUB && ShOpcVal == ARM_AM::no_shift && - !(Subtarget->isCortexA9() || N.getOperand(0).hasOneUse())) { + !(Subtarget->isLikeA9() || Subtarget->isSwift() || + N.getOperand(0).hasOneUse())) { ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0).getOpcode()); if (ShOpcVal != ARM_AM::no_shift) { // Check to see if the RHS of the shift is a constant, if not, we can't @@ -2363,121 +2366,6 @@ SDNode *ARMDAGToDAGISel::SelectCMOVOp(SDNode *N) { return CurDAG->SelectNodeTo(N, Opc, VT, Ops, 5); } -SDNode *ARMDAGToDAGISel::SelectConditionalOp(SDNode *N) { - SDValue FalseVal = N->getOperand(0); - SDValue TrueVal = N->getOperand(1); - ARMCC::CondCodes CCVal = - (ARMCC::CondCodes)cast(N->getOperand(2))->getZExtValue(); - SDValue CCR = N->getOperand(3); - assert(CCR.getOpcode() == ISD::Register); - SDValue InFlag = N->getOperand(4); - SDValue CC = CurDAG->getTargetConstant(CCVal, MVT::i32); - SDValue Reg0 = CurDAG->getRegister(0, MVT::i32); - - if (Subtarget->isThumb()) { - SDValue CPTmp0; - SDValue CPTmp1; - if (SelectT2ShifterOperandReg(TrueVal, CPTmp0, CPTmp1)) { - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::t2ANDCCrs; break; - case ARMISD::COR: Opc = ARM::t2ORRCCrs; break; - case ARMISD::CXOR: Opc = ARM::t2EORCCrs; break; - } - SDValue Ops[] = { - FalseVal, FalseVal, CPTmp0, CPTmp1, CC, CCR, Reg0, InFlag - }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8); - } - - ConstantSDNode *T = dyn_cast(TrueVal); - if (T) { - unsigned TrueImm = T->getZExtValue(); - if (is_t2_so_imm(TrueImm)) { - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::t2ANDCCri; break; - case ARMISD::COR: Opc = ARM::t2ORRCCri; break; - case ARMISD::CXOR: Opc = ARM::t2EORCCri; break; - } - SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32); - SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7); - } - } - - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::t2ANDCCrr; break; - case ARMISD::COR: Opc = ARM::t2ORRCCrr; break; - case ARMISD::CXOR: Opc = ARM::t2EORCCrr; break; - } - SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7); - } - - SDValue CPTmp0; - SDValue CPTmp1; - SDValue CPTmp2; - if (SelectImmShifterOperand(TrueVal, CPTmp0, CPTmp2)) { - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::ANDCCrsi; break; - case ARMISD::COR: Opc = ARM::ORRCCrsi; break; - case ARMISD::CXOR: Opc = ARM::EORCCrsi; break; - } - SDValue Ops[] = { - FalseVal, FalseVal, CPTmp0, CPTmp2, CC, CCR, Reg0, InFlag - }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 8); - } - - if (SelectRegShifterOperand(TrueVal, CPTmp0, CPTmp1, CPTmp2)) { - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::ANDCCrsr; break; - case ARMISD::COR: Opc = ARM::ORRCCrsr; break; - case ARMISD::CXOR: Opc = ARM::EORCCrsr; break; - } - SDValue Ops[] = { - FalseVal, FalseVal, CPTmp0, CPTmp1, CPTmp2, CC, CCR, Reg0, InFlag - }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 9); - } - - ConstantSDNode *T = dyn_cast(TrueVal); - if (T) { - unsigned TrueImm = T->getZExtValue(); - if (is_so_imm(TrueImm)) { - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::ANDCCri; break; - case ARMISD::COR: Opc = ARM::ORRCCri; break; - case ARMISD::CXOR: Opc = ARM::EORCCri; break; - } - SDValue True = CurDAG->getTargetConstant(TrueImm, MVT::i32); - SDValue Ops[] = { FalseVal, FalseVal, True, CC, CCR, Reg0, InFlag }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7); - } - } - - unsigned Opc; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ARMISD::CAND: Opc = ARM::ANDCCrr; break; - case ARMISD::COR: Opc = ARM::ORRCCrr; break; - case ARMISD::CXOR: Opc = ARM::EORCCrr; break; - } - SDValue Ops[] = { FalseVal, FalseVal, TrueVal, CC, CCR, Reg0, InFlag }; - return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 7); -} - /// Target-specific DAG combining for ISD::XOR. /// Target-independent combining lowers SELECT_CC nodes of the form /// select_cc setg[ge] X, 0, X, -X @@ -2753,6 +2641,38 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { dl, MVT::i32, MVT::i32, Ops, 5); } } + case ARMISD::UMLAL:{ + if (Subtarget->isThumb()) { + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), + N->getOperand(3), getAL(CurDAG), + CurDAG->getRegister(0, MVT::i32)}; + return CurDAG->getMachineNode(ARM::t2UMLAL, dl, MVT::i32, MVT::i32, Ops, 6); + }else{ + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), + N->getOperand(3), getAL(CurDAG), + CurDAG->getRegister(0, MVT::i32), + CurDAG->getRegister(0, MVT::i32) }; + return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? + ARM::UMLAL : ARM::UMLALv5, + dl, MVT::i32, MVT::i32, Ops, 7); + } + } + case ARMISD::SMLAL:{ + if (Subtarget->isThumb()) { + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), + N->getOperand(3), getAL(CurDAG), + CurDAG->getRegister(0, MVT::i32)}; + return CurDAG->getMachineNode(ARM::t2SMLAL, dl, MVT::i32, MVT::i32, Ops, 6); + }else{ + SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2), + N->getOperand(3), getAL(CurDAG), + CurDAG->getRegister(0, MVT::i32), + CurDAG->getRegister(0, MVT::i32) }; + return CurDAG->getMachineNode(Subtarget->hasV6Ops() ? + ARM::SMLAL : ARM::SMLALv5, + dl, MVT::i32, MVT::i32, Ops, 7); + } + } case ISD::LOAD: { SDNode *ResNode = 0; if (Subtarget->isThumb() && Subtarget->hasThumb2()) @@ -2805,10 +2725,6 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) { } case ARMISD::CMOV: return SelectCMOVOp(N); - case ARMISD::CAND: - case ARMISD::COR: - case ARMISD::CXOR: - return SelectConditionalOp(N); case ARMISD::VZIP: { unsigned Opc = 0; EVT VT = N->getValueType(0); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 190ca07..ff99b04 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -122,6 +122,7 @@ void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT, setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); setOperationAction(ISD::SELECT, VT, Expand); setOperationAction(ISD::SELECT_CC, VT, Expand); + setOperationAction(ISD::VSELECT, VT, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); if (VT.isInteger()) { setOperationAction(ISD::SHL, VT, Custom); @@ -514,6 +515,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setOperationAction(ISD::FLOG10, MVT::v4f32, Expand); setOperationAction(ISD::FEXP, MVT::v4f32, Expand); setOperationAction(ISD::FEXP2, MVT::v4f32, Expand); + setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand); // Neon does not support some operations on v1i64 and v2i64 types. setOperationAction(ISD::MUL, MVT::v1i64, Expand); @@ -566,6 +568,11 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) } } + // ARM and Thumb2 support UMLAL/SMLAL. + if (!Subtarget->isThumb1Only()) + setTargetDAGCombine(ISD::ADDC); + + computeRegisterProperties(); // ARM does not have f32 extending load. @@ -629,9 +636,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) if (!Subtarget->hasV6Ops()) setOperationAction(ISD::BSWAP, MVT::i32, Expand); - // These are expanded into libcalls. - if (!Subtarget->hasDivide() || !Subtarget->isThumb2()) { - // v7M has a hardware divider + if (!(Subtarget->hasDivide() && Subtarget->isThumb2()) && + !(Subtarget->hasDivideInARMMode() && !Subtarget->isThumb())) { + // These are expanded into libcalls if the cpu doesn't have HW divider. setOperationAction(ISD::SDIV, MVT::i32, Expand); setOperationAction(ISD::UDIV, MVT::i32, Expand); } @@ -791,12 +798,9 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) setTargetDAGCombine(ISD::ADD); setTargetDAGCombine(ISD::SUB); setTargetDAGCombine(ISD::MUL); - - if (Subtarget->hasV6T2Ops() || Subtarget->hasNEON()) { - setTargetDAGCombine(ISD::AND); - setTargetDAGCombine(ISD::OR); - setTargetDAGCombine(ISD::XOR); - } + setTargetDAGCombine(ISD::AND); + setTargetDAGCombine(ISD::OR); + setTargetDAGCombine(ISD::XOR); if (Subtarget->hasV6Ops()) setTargetDAGCombine(ISD::SRL); @@ -821,7 +825,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) benefitFromCodePlacementOpt = true; // Prefer likely predicted branches to selects on out-of-order cores. - predictableSelectIsExpensive = Subtarget->isCortexA9(); + predictableSelectIsExpensive = Subtarget->isLikeA9(); setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2); } @@ -898,9 +902,6 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { case ARMISD::FMSTAT: return "ARMISD::FMSTAT"; case ARMISD::CMOV: return "ARMISD::CMOV"; - case ARMISD::CAND: return "ARMISD::CAND"; - case ARMISD::COR: return "ARMISD::COR"; - case ARMISD::CXOR: return "ARMISD::CXOR"; case ARMISD::RBIT: return "ARMISD::RBIT"; @@ -984,6 +985,8 @@ const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { case ARMISD::VTBL2: return "ARMISD::VTBL2"; case ARMISD::VMULLs: return "ARMISD::VMULLs"; case ARMISD::VMULLu: return "ARMISD::VMULLu"; + case ARMISD::UMLAL: return "ARMISD::UMLAL"; + case ARMISD::SMLAL: return "ARMISD::SMLAL"; case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR"; case ARMISD::FMAX: return "ARMISD::FMAX"; case ARMISD::FMIN: return "ARMISD::FMIN"; @@ -1591,19 +1594,19 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // FIXME: handle tail calls differently. unsigned CallOpc; + bool HasMinSizeAttr = MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::MinSize); if (Subtarget->isThumb()) { if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) CallOpc = ARMISD::CALL_NOLINK; - else if (doesNotRet && isDirect && !isARMFunc && - Subtarget->hasRAS() && !Subtarget->isThumb1Only()) - // "mov lr, pc; b _foo" to avoid confusing the RSP - CallOpc = ARMISD::CALL_NOLINK; else CallOpc = isARMFunc ? ARMISD::CALL : ARMISD::tCALL; } else { - if (!isDirect && !Subtarget->hasV5TOps()) { + if (!isDirect && !Subtarget->hasV5TOps()) CallOpc = ARMISD::CALL_NOLINK; - } else if (doesNotRet && isDirect && Subtarget->hasRAS()) + else if (doesNotRet && isDirect && Subtarget->hasRAS() && + // Emit regular call when code size is the priority + !HasMinSizeAttr) // "mov lr, pc; b _foo" to avoid confusing the RSP CallOpc = ARMISD::CALL_NOLINK; else @@ -1653,22 +1656,31 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, /// and then confiscate the rest of the parameter registers to insure /// this. void -ARMTargetLowering::HandleByVal(CCState *State, unsigned &size) const { +ARMTargetLowering::HandleByVal( + CCState *State, unsigned &size, unsigned Align) const { unsigned reg = State->AllocateReg(GPRArgRegs, 4); assert((State->getCallOrPrologue() == Prologue || State->getCallOrPrologue() == Call) && "unhandled ParmContext"); if ((!State->isFirstByValRegValid()) && (ARM::R0 <= reg) && (reg <= ARM::R3)) { - State->setFirstByValReg(reg); - // At a call site, a byval parameter that is split between - // registers and memory needs its size truncated here. In a - // function prologue, such byval parameters are reassembled in - // memory, and are not truncated. - if (State->getCallOrPrologue() == Call) { - unsigned excess = 4 * (ARM::R4 - reg); - assert(size >= excess && "expected larger existing stack allocation"); - size -= excess; + if (Subtarget->isAAPCS_ABI() && Align > 4) { + unsigned AlignInRegs = Align / 4; + unsigned Waste = (ARM::R4 - reg) % AlignInRegs; + for (unsigned i = 0; i < Waste; ++i) + reg = State->AllocateReg(GPRArgRegs, 4); + } + if (reg != 0) { + State->setFirstByValReg(reg); + // At a call site, a byval parameter that is split between + // registers and memory needs its size truncated here. In a + // function prologue, such byval parameters are reassembled in + // memory, and are not truncated. + if (State->getCallOrPrologue() == Call) { + unsigned excess = 4 * (ARM::R4 - reg); + assert(size >= excess && "expected larger existing stack allocation"); + size -= excess; + } } } // Confiscate any remaining parameter registers to preclude their @@ -1801,6 +1813,14 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, } } + // If Caller's vararg or byval argument has been split between registers and + // stack, do not perform tail call, since part of the argument is in caller's + // local frame. + const ARMFunctionInfo *AFI_Caller = DAG.getMachineFunction(). + getInfo(); + if (AFI_Caller->getVarArgsRegSaveSize()) + return false; + // If the callee takes no arguments then go on to check the results of the // call. if (!Outs.empty()) { @@ -2532,7 +2552,10 @@ ARMTargetLowering::computeRegArea(CCState &CCInfo, MachineFunction &MF, void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, DebugLoc dl, SDValue &Chain, - unsigned ArgOffset) const { + const Value *OrigArg, + unsigned OffsetFromOrigArg, + unsigned ArgOffset, + bool ForceMutable) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); ARMFunctionInfo *AFI = MF.getInfo(); @@ -2559,7 +2582,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, getPointerTy()); SmallVector MemOps; - for (; firstRegToSaveIndex < 4; ++firstRegToSaveIndex) { + for (unsigned i = 0; firstRegToSaveIndex < 4; ++firstRegToSaveIndex, ++i) { const TargetRegisterClass *RC; if (AFI->isThumb1OnlyFunction()) RC = &ARM::tGPRRegClass; @@ -2570,7 +2593,7 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, - MachinePointerInfo::getFixedStack(AFI->getVarArgsFrameIndex()), + MachinePointerInfo(OrigArg, OffsetFromOrigArg + 4*i), false, false, 0); MemOps.push_back(Store); FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN, @@ -2581,7 +2604,8 @@ ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, &MemOps[0], MemOps.size()); } else // This will point to the next argument passed via stack. - AFI->setVarArgsFrameIndex(MFI->CreateFixedObject(4, ArgOffset, true)); + AFI->setVarArgsFrameIndex( + MFI->CreateFixedObject(4, ArgOffset, !ForceMutable)); } SDValue @@ -2604,14 +2628,16 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv, /* Return*/ false, isVarArg)); - + SmallVector ArgValues; int lastInsIndex = -1; - SDValue ArgValue; + Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin(); + unsigned CurArgIdx = 0; for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; - + std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx); + CurArgIdx = Ins[VA.getValNo()].OrigArgIndex; // Arguments stored in registers. if (VA.isRegLoc()) { EVT RegVT = VA.getLocVT(); @@ -2705,14 +2731,20 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, // Since they could be overwritten by lowering of arguments in case of // a tail call. if (Flags.isByVal()) { - unsigned VARegSize, VARegSaveSize; - computeRegArea(CCInfo, MF, VARegSize, VARegSaveSize); - VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0); - unsigned Bytes = Flags.getByValSize() - VARegSize; - if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. - int FI = MFI->CreateFixedObject(Bytes, - VA.getLocMemOffset(), false); - InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); + ARMFunctionInfo *AFI = MF.getInfo(); + if (!AFI->getVarArgsFrameIndex()) { + VarArgStyleRegisters(CCInfo, DAG, + dl, Chain, CurOrigArg, + Ins[VA.getValNo()].PartOffset, + VA.getLocMemOffset(), + true /*force mutable frames*/); + int VAFrameIndex = AFI->getVarArgsFrameIndex(); + InVals.push_back(DAG.getFrameIndex(VAFrameIndex, getPointerTy())); + } else { + int FI = MFI->CreateFixedObject(Flags.getByValSize(), + VA.getLocMemOffset(), false); + InVals.push_back(DAG.getFrameIndex(FI, getPointerTy())); + } } else { int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8, VA.getLocMemOffset(), true); @@ -2730,7 +2762,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, // varargs if (isVarArg) - VarArgStyleRegisters(CCInfo, DAG, dl, Chain, CCInfo.getNextStackOffset()); + VarArgStyleRegisters(CCInfo, DAG, dl, Chain, 0, 0, + CCInfo.getNextStackOffset()); return Chain; } @@ -3890,6 +3923,36 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, return SDValue(); } +// check if an VEXT instruction can handle the shuffle mask when the +// vector sources of the shuffle are the same. +static bool isSingletonVEXTMask(ArrayRef M, EVT VT, unsigned &Imm) { + unsigned NumElts = VT.getVectorNumElements(); + + // Assume that the first shuffle index is not UNDEF. Fail if it is. + if (M[0] < 0) + return false; + + Imm = M[0]; + + // If this is a VEXT shuffle, the immediate value is the index of the first + // element. The other shuffle indices must be the successive elements after + // the first one. + unsigned ExpectedElt = Imm; + for (unsigned i = 1; i < NumElts; ++i) { + // Increment the expected index. If it wraps around, just follow it + // back to index zero and keep going. + ++ExpectedElt; + if (ExpectedElt == NumElts) + ExpectedElt = 0; + + if (M[i] < 0) continue; // ignore UNDEF indices + if (ExpectedElt != static_cast(M[i])) + return false; + } + + return true; +} + static bool isVEXTMask(ArrayRef M, EVT VT, bool &ReverseVEXT, unsigned &Imm) { @@ -4157,10 +4220,21 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, } // Scan through the operands to see if only one value is used. + // + // As an optimisation, even if more than one value is used it may be more + // profitable to splat with one value then change some lanes. + // + // Heuristically we decide to do this if the vector has a "dominant" value, + // defined as splatted to more than half of the lanes. unsigned NumElts = VT.getVectorNumElements(); bool isOnlyLowElement = true; bool usesOnlyOneValue = true; + bool hasDominantValue = false; bool isConstant = true; + + // Map of the number of times a particular SDValue appears in the + // element list. + DenseMap ValueCounts; SDValue Value; for (unsigned i = 0; i < NumElts; ++i) { SDValue V = Op.getOperand(i); @@ -4171,13 +4245,21 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, if (!isa(V) && !isa(V)) isConstant = false; - if (!Value.getNode()) + ValueCounts.insert(std::make_pair(V, 0)); + unsigned &Count = ValueCounts[V]; + + // Is this value dominant? (takes up more than half of the lanes) + if (++Count > (NumElts / 2)) { + hasDominantValue = true; Value = V; - else if (V != Value) - usesOnlyOneValue = false; + } } + if (ValueCounts.size() != 1) + usesOnlyOneValue = false; + if (!Value.getNode() && ValueCounts.size() > 0) + Value = ValueCounts.begin()->first; - if (!Value.getNode()) + if (ValueCounts.size() == 0) return DAG.getUNDEF(VT); if (isOnlyLowElement) @@ -4187,9 +4269,51 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, // Use VDUP for non-constant splats. For f32 constant splats, reduce to // i32 and try again. - if (usesOnlyOneValue && EltSize <= 32) { - if (!isConstant) - return DAG.getNode(ARMISD::VDUP, dl, VT, Value); + if (hasDominantValue && EltSize <= 32) { + if (!isConstant) { + SDValue N; + + // If we are VDUPing a value that comes directly from a vector, that will + // cause an unnecessary move to and from a GPR, where instead we could + // just use VDUPLANE. + if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT) { + // We need to create a new undef vector to use for the VDUPLANE if the + // size of the vector from which we get the value is different than the + // size of the vector that we need to create. We will insert the element + // such that the register coalescer will remove unnecessary copies. + if (VT != Value->getOperand(0).getValueType()) { + ConstantSDNode *constIndex; + constIndex = dyn_cast(Value->getOperand(1)); + assert(constIndex && "The index is not a constant!"); + unsigned index = constIndex->getAPIntValue().getLimitedValue() % + VT.getVectorNumElements(); + N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, + DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT), + Value, DAG.getConstant(index, MVT::i32)), + DAG.getConstant(index, MVT::i32)); + } else { + N = DAG.getNode(ARMISD::VDUPLANE, dl, VT, + Value->getOperand(0), Value->getOperand(1)); + } + } + else + N = DAG.getNode(ARMISD::VDUP, dl, VT, Value); + + if (!usesOnlyOneValue) { + // The dominant value was splatted as 'N', but we now have to insert + // all differing elements. + for (unsigned I = 0; I < NumElts; ++I) { + if (Op.getOperand(I) == Value) + continue; + SmallVector Ops; + Ops.push_back(N); + Ops.push_back(Op.getOperand(I)); + Ops.push_back(DAG.getConstant(I, MVT::i32)); + N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, &Ops[0], 3); + } + } + return N; + } if (VT.getVectorElementType().isFloatingPoint()) { SmallVector Ops; for (unsigned i = 0; i < NumElts; ++i) @@ -4201,9 +4325,11 @@ SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, if (Val.getNode()) return DAG.getNode(ISD::BITCAST, dl, VT, Val); } - SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); - if (Val.getNode()) - return DAG.getNode(ARMISD::VDUP, dl, VT, Val); + if (usesOnlyOneValue) { + SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); + if (isConstant && Val.getNode()) + return DAG.getNode(ARMISD::VDUP, dl, VT, Val); + } } // If all elements are constants and the case above didn't get hit, fall back @@ -4586,6 +4712,12 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { if (isVREVMask(ShuffleMask, VT, 16)) return DAG.getNode(ARMISD::VREV16, dl, VT, V1); + if (V2->getOpcode() == ISD::UNDEF && + isSingletonVEXTMask(ShuffleMask, VT, Imm)) { + return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1, + DAG.getConstant(Imm, MVT::i32)); + } + // Check for Neon shuffles that modify both input vectors in place. // If both results are used, i.e., if there are two shuffles with the same // source operands and with masks corresponding to both results of one of @@ -5421,7 +5553,7 @@ ARMTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, exitMBB->transferSuccessorsAndUpdatePHIs(BB); const TargetRegisterClass *TRC = isThumb2 ? - (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::rGPRRegClass : (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned scratch = MRI.createVirtualRegister(TRC); unsigned scratch2 = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC); @@ -5532,7 +5664,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, exitMBB->transferSuccessorsAndUpdatePHIs(BB); const TargetRegisterClass *TRC = isThumb2 ? - (const TargetRegisterClass*)&ARM::tGPRRegClass : + (const TargetRegisterClass*)&ARM::rGPRRegClass : (const TargetRegisterClass*)&ARM::GPRRegClass; unsigned scratch = MRI.createVirtualRegister(TRC); unsigned scratch2 = MRI.createVirtualRegister(TRC); @@ -5546,7 +5678,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, // ldrex dest, ptr // (sign extend dest, if required) // cmp dest, incr - // cmov.cond scratch2, dest, incr + // cmov.cond scratch2, incr, dest // strex scratch, scratch2, ptr // cmp scratch, #0 // bne- loopMBB @@ -5569,7 +5701,7 @@ ARMTargetLowering::EmitAtomicBinaryMinMax(MachineInstr *MI, AddDefaultPred(BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr)) .addReg(oldval).addReg(incr)); BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr), scratch2) - .addReg(oldval).addReg(incr).addImm(Cond).addReg(ARM::CPSR); + .addReg(incr).addReg(oldval).addImm(Cond).addReg(ARM::CPSR); MIB = BuildMI(BB, dl, TII->get(strOpc), scratch).addReg(scratch2).addReg(ptr); if (strOpc == ARM::t2STREX) @@ -5939,12 +6071,15 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4); - if (AFI->isThumb1OnlyFunction()) - BuildMI(DispatchBB, dl, TII->get(ARM::tInt_eh_sjlj_dispatchsetup)); - else if (!Subtarget->hasVFP2()) - BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup_nofp)); - else - BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); + MachineInstrBuilder MIB; + MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup)); + + const ARMBaseInstrInfo *AII = static_cast(TII); + const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); + + // Add a register mask with no preserved registers. This results in all + // registers being marked as clobbered. + MIB.addRegMask(RI.getNoPreservedMask()); unsigned NumLPads = LPadList.size(); if (Subtarget->isThumb2()) { @@ -6016,9 +6151,9 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { const Constant *C = ConstantInt::get(Int32Ty, NumLPads); // MachineConstantPool wants an explicit alignment. - unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); + unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); if (Align == 0) - Align = getTargetData()->getTypeAllocSize(C->getType()); + Align = getDataLayout()->getTypeAllocSize(C->getType()); unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); unsigned VReg1 = MRI->createVirtualRegister(TRC); @@ -6105,9 +6240,9 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { const Constant *C = ConstantInt::get(Int32Ty, NumLPads); // MachineConstantPool wants an explicit alignment. - unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); + unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); if (Align == 0) - Align = getTargetData()->getTypeAllocSize(C->getType()); + Align = getDataLayout()->getTypeAllocSize(C->getType()); unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); unsigned VReg1 = MRI->createVirtualRegister(TRC); @@ -6154,18 +6289,15 @@ EmitSjLjDispatchBlock(MachineInstr *MI, MachineBasicBlock *MBB) const { } // Add the jump table entries as successors to the MBB. - MachineBasicBlock *PrevMBB = 0; + SmallPtrSet SeenMBBs; for (std::vector::iterator I = LPadList.begin(), E = LPadList.end(); I != E; ++I) { MachineBasicBlock *CurMBB = *I; - if (PrevMBB != CurMBB) + if (SeenMBBs.insert(CurMBB)) DispContBB->addSuccessor(CurMBB); - PrevMBB = CurMBB; } // N.B. the order the invoke BBs are processed in doesn't matter here. - const ARMBaseInstrInfo *AII = static_cast(TII); - const ARMBaseRegisterInfo &RI = AII->getRegisterInfo(); const uint16_t *SavedRegs = RI.getCalleeSavedRegs(MF); SmallVector MBBLPads; for (SmallPtrSet::iterator @@ -6279,7 +6411,8 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { UnitSize = 2; } else { // Check whether we can use NEON instructions. - if (!MF->getFunction()->hasFnAttr(Attribute::NoImplicitFloat) && + if (!MF->getFunction()->getFnAttributes(). + hasAttribute(Attributes::NoImplicitFloat) && Subtarget->hasNEON()) { if ((Align % 16 == 0) && SizeVal >= 16) { ldrOpc = ARM::VLD1q32wb_fixed; @@ -6364,7 +6497,8 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { } else { AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ldrOpc),scratch) - .addReg(srcOut, RegState::Define).addReg(srcIn).addImm(1)); + .addReg(srcOut, RegState::Define).addReg(srcIn) + .addReg(0).addImm(1)); AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(strOpc), destOut) .addReg(scratch).addReg(destIn) @@ -6427,9 +6561,9 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { const Constant *C = ConstantInt::get(Int32Ty, LoopSize); // MachineConstantPool wants an explicit alignment. - unsigned Align = getTargetData()->getPrefTypeAlignment(Int32Ty); + unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty); if (Align == 0) - Align = getTargetData()->getTypeAllocSize(C->getType()); + Align = getDataLayout()->getTypeAllocSize(C->getType()); unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align); AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::LDRcp)) @@ -6981,73 +7115,131 @@ static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { return AllOnes ? C->isAllOnesValue() : C->isNullValue(); } +// Return true if N is conditionally 0 or all ones. +// Detects these expressions where cc is an i1 value: +// +// (select cc 0, y) [AllOnes=0] +// (select cc y, 0) [AllOnes=0] +// (zext cc) [AllOnes=0] +// (sext cc) [AllOnes=0/1] +// (select cc -1, y) [AllOnes=1] +// (select cc y, -1) [AllOnes=1] +// +// Invert is set when N is the null/all ones constant when CC is false. +// OtherOp is set to the alternative value of N. +static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, + SDValue &CC, bool &Invert, + SDValue &OtherOp, + SelectionDAG &DAG) { + switch (N->getOpcode()) { + default: return false; + case ISD::SELECT: { + CC = N->getOperand(0); + SDValue N1 = N->getOperand(1); + SDValue N2 = N->getOperand(2); + if (isZeroOrAllOnes(N1, AllOnes)) { + Invert = false; + OtherOp = N2; + return true; + } + if (isZeroOrAllOnes(N2, AllOnes)) { + Invert = true; + OtherOp = N1; + return true; + } + return false; + } + case ISD::ZERO_EXTEND: + // (zext cc) can never be the all ones value. + if (AllOnes) + return false; + // Fall through. + case ISD::SIGN_EXTEND: { + EVT VT = N->getValueType(0); + CC = N->getOperand(0); + if (CC.getValueType() != MVT::i1) + return false; + Invert = !AllOnes; + if (AllOnes) + // When looking for an AllOnes constant, N is an sext, and the 'other' + // value is 0. + OtherOp = DAG.getConstant(0, VT); + else if (N->getOpcode() == ISD::ZERO_EXTEND) + // When looking for a 0 constant, N can be zext or sext. + OtherOp = DAG.getConstant(1, VT); + else + OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT); + return true; + } + } +} + // Combine a constant select operand into its use: // -// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) -// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) +// (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) +// (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) +// (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1] +// (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) +// (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) // // The transform is rejected if the select doesn't have a constant operand that -// is null. +// is null, or all ones when AllOnes is set. +// +// Also recognize sext/zext from i1: +// +// (add (zext cc), x) -> (select cc (add x, 1), x) +// (add (sext cc), x) -> (select cc (add x, -1), x) +// +// These transformations eventually create predicated instructions. // // @param N The node to transform. // @param Slct The N operand that is a select. // @param OtherOp The other N operand (x above). // @param DCI Context. +// @param AllOnes Require the select constant to be all ones instead of null. // @returns The new node, or SDValue() on failure. static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, - TargetLowering::DAGCombinerInfo &DCI) { + TargetLowering::DAGCombinerInfo &DCI, + bool AllOnes = false) { SelectionDAG &DAG = DCI.DAG; - const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = N->getValueType(0); - unsigned Opc = N->getOpcode(); - bool isSlctCC = Slct.getOpcode() == ISD::SELECT_CC; - SDValue LHS = isSlctCC ? Slct.getOperand(2) : Slct.getOperand(1); - SDValue RHS = isSlctCC ? Slct.getOperand(3) : Slct.getOperand(2); - ISD::CondCode CC = ISD::SETCC_INVALID; - - if (isSlctCC) { - CC = cast(Slct.getOperand(4))->get(); - } else { - SDValue CCOp = Slct.getOperand(0); - if (CCOp.getOpcode() == ISD::SETCC) - CC = cast(CCOp.getOperand(2))->get(); - } - - bool DoXform = false; - bool InvCC = false; - assert ((Opc == ISD::ADD || (Opc == ISD::SUB && Slct == N->getOperand(1))) && - "Bad input!"); + SDValue NonConstantVal; + SDValue CCOp; + bool SwapSelectOps; + if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps, + NonConstantVal, DAG)) + return SDValue(); - if (isZeroOrAllOnes(LHS, false)) { - DoXform = true; - } else if (CC != ISD::SETCC_INVALID && isZeroOrAllOnes(RHS, false)) { - std::swap(LHS, RHS); - SDValue Op0 = Slct.getOperand(0); - EVT OpVT = isSlctCC ? Op0.getValueType() : Op0.getOperand(0).getValueType(); - bool isInt = OpVT.isInteger(); - CC = ISD::getSetCCInverse(CC, isInt); + // Slct is now know to be the desired identity constant when CC is true. + SDValue TrueVal = OtherOp; + SDValue FalseVal = DAG.getNode(N->getOpcode(), N->getDebugLoc(), VT, + OtherOp, NonConstantVal); + // Unless SwapSelectOps says CC should be false. + if (SwapSelectOps) + std::swap(TrueVal, FalseVal); - if (!TLI.isCondCodeLegal(CC, OpVT)) - return SDValue(); // Inverse operator isn't legal. + return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, + CCOp, TrueVal, FalseVal); +} - DoXform = true; - InvCC = true; +// Attempt combineSelectAndUse on each operand of a commutative operator N. +static +SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, + TargetLowering::DAGCombinerInfo &DCI) { + SDValue N0 = N->getOperand(0); + SDValue N1 = N->getOperand(1); + if (N0.getNode()->hasOneUse()) { + SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes); + if (Result.getNode()) + return Result; } - - if (!DoXform) - return SDValue(); - - SDValue Result = DAG.getNode(Opc, RHS.getDebugLoc(), VT, OtherOp, RHS); - if (isSlctCC) - return DAG.getSelectCC(N->getDebugLoc(), OtherOp, Result, - Slct.getOperand(0), Slct.getOperand(1), CC); - SDValue CCOp = Slct.getOperand(0); - if (InvCC) - CCOp = DAG.getSetCC(Slct.getDebugLoc(), CCOp.getValueType(), - CCOp.getOperand(0), CCOp.getOperand(1), CC); - return DAG.getNode(ISD::SELECT, N->getDebugLoc(), VT, - CCOp, OtherOp, Result); + if (N1.getNode()->hasOneUse()) { + SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes); + if (Result.getNode()) + return Result; + } + return SDValue(); } // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction @@ -7139,6 +7331,154 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, tmp); } +static SDValue findMUL_LOHI(SDValue V) { + if (V->getOpcode() == ISD::UMUL_LOHI || + V->getOpcode() == ISD::SMUL_LOHI) + return V; + return SDValue(); +} + +static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, + TargetLowering::DAGCombinerInfo &DCI, + const ARMSubtarget *Subtarget) { + + if (Subtarget->isThumb1Only()) return SDValue(); + + // Only perform the checks after legalize when the pattern is available. + if (DCI.isBeforeLegalize()) return SDValue(); + + // Look for multiply add opportunities. + // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where + // each add nodes consumes a value from ISD::UMUL_LOHI and there is + // a glue link from the first add to the second add. + // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by + // a S/UMLAL instruction. + // loAdd UMUL_LOHI + // \ / :lo \ :hi + // \ / \ [no multiline comment] + // ADDC | hiAdd + // \ :glue / / + // \ / / + // ADDE + // + assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); + SDValue AddcOp0 = AddcNode->getOperand(0); + SDValue AddcOp1 = AddcNode->getOperand(1); + + // Check if the two operands are from the same mul_lohi node. + if (AddcOp0.getNode() == AddcOp1.getNode()) + return SDValue(); + + assert(AddcNode->getNumValues() == 2 && + AddcNode->getValueType(0) == MVT::i32 && + AddcNode->getValueType(1) == MVT::Glue && + "Expect ADDC with two result values: i32, glue"); + + // Check that the ADDC adds the low result of the S/UMUL_LOHI. + if (AddcOp0->getOpcode() != ISD::UMUL_LOHI && + AddcOp0->getOpcode() != ISD::SMUL_LOHI && + AddcOp1->getOpcode() != ISD::UMUL_LOHI && + AddcOp1->getOpcode() != ISD::SMUL_LOHI) + return SDValue(); + + // Look for the glued ADDE. + SDNode* AddeNode = AddcNode->getGluedUser(); + if (AddeNode == NULL) + return SDValue(); + + // Make sure it is really an ADDE. + if (AddeNode->getOpcode() != ISD::ADDE) + return SDValue(); + + assert(AddeNode->getNumOperands() == 3 && + AddeNode->getOperand(2).getValueType() == MVT::Glue && + "ADDE node has the wrong inputs"); + + // Check for the triangle shape. + SDValue AddeOp0 = AddeNode->getOperand(0); + SDValue AddeOp1 = AddeNode->getOperand(1); + + // Make sure that the ADDE operands are not coming from the same node. + if (AddeOp0.getNode() == AddeOp1.getNode()) + return SDValue(); + + // Find the MUL_LOHI node walking up ADDE's operands. + bool IsLeftOperandMUL = false; + SDValue MULOp = findMUL_LOHI(AddeOp0); + if (MULOp == SDValue()) + MULOp = findMUL_LOHI(AddeOp1); + else + IsLeftOperandMUL = true; + if (MULOp == SDValue()) + return SDValue(); + + // Figure out the right opcode. + unsigned Opc = MULOp->getOpcode(); + unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL; + + // Figure out the high and low input values to the MLAL node. + SDValue* HiMul = &MULOp; + SDValue* HiAdd = NULL; + SDValue* LoMul = NULL; + SDValue* LowAdd = NULL; + + if (IsLeftOperandMUL) + HiAdd = &AddeOp1; + else + HiAdd = &AddeOp0; + + + if (AddcOp0->getOpcode() == Opc) { + LoMul = &AddcOp0; + LowAdd = &AddcOp1; + } + if (AddcOp1->getOpcode() == Opc) { + LoMul = &AddcOp1; + LowAdd = &AddcOp0; + } + + if (LoMul == NULL) + return SDValue(); + + if (LoMul->getNode() != HiMul->getNode()) + return SDValue(); + + // Create the merged node. + SelectionDAG &DAG = DCI.DAG; + + // Build operand list. + SmallVector Ops; + Ops.push_back(LoMul->getOperand(0)); + Ops.push_back(LoMul->getOperand(1)); + Ops.push_back(*LowAdd); + Ops.push_back(*HiAdd); + + SDValue MLALNode = DAG.getNode(FinalOpc, AddcNode->getDebugLoc(), + DAG.getVTList(MVT::i32, MVT::i32), + &Ops[0], Ops.size()); + + // Replace the ADDs' nodes uses by the MLA node's values. + SDValue HiMLALResult(MLALNode.getNode(), 1); + DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult); + + SDValue LoMLALResult(MLALNode.getNode(), 0); + DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult); + + // Return original node to notify the driver to stop replacing. + SDValue resNode(AddcNode, 0); + return resNode; +} + +/// PerformADDCCombine - Target-specific dag combine transform from +/// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL. +static SDValue PerformADDCCombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + const ARMSubtarget *Subtarget) { + + return AddCombineTo64bitMLAL(N, DCI, Subtarget); + +} + /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with /// operands N0 and N1. This is a helper for PerformADDCombine that is /// called with the default operands, and if that fails, with commuted @@ -7153,7 +7493,7 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, return Result; // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c)) - if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) { + if (N0.getNode()->hasOneUse()) { SDValue Result = combineSelectAndUse(N, N0, N1, DCI); if (Result.getNode()) return Result; } @@ -7185,7 +7525,7 @@ static SDValue PerformSUBCombine(SDNode *N, SDValue N1 = N->getOperand(1); // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c)) - if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) { + if (N1.getNode()->hasOneUse()) { SDValue Result = combineSelectAndUse(N, N1, N0, DCI); if (Result.getNode()) return Result; } @@ -7313,41 +7653,6 @@ static SDValue PerformMULCombine(SDNode *N, return SDValue(); } -static bool isCMOVWithZeroOrAllOnesLHS(SDValue N, bool AllOnes) { - return N.getOpcode() == ARMISD::CMOV && N.getNode()->hasOneUse() && - isZeroOrAllOnes(N.getOperand(0), AllOnes); -} - -/// formConditionalOp - Combine an operation with a conditional move operand -/// to form a conditional op. e.g. (or x, (cmov 0, y, cond)) => (or.cond x, y) -/// (and x, (cmov -1, y, cond)) => (and.cond, x, y) -static SDValue formConditionalOp(SDNode *N, SelectionDAG &DAG, - bool Commutable) { - SDValue N0 = N->getOperand(0); - SDValue N1 = N->getOperand(1); - - bool isAND = N->getOpcode() == ISD::AND; - bool isCand = isCMOVWithZeroOrAllOnesLHS(N1, isAND); - if (!isCand && Commutable) { - isCand = isCMOVWithZeroOrAllOnesLHS(N0, isAND); - if (isCand) - std::swap(N0, N1); - } - if (!isCand) - return SDValue(); - - unsigned Opc = 0; - switch (N->getOpcode()) { - default: llvm_unreachable("Unexpected node"); - case ISD::AND: Opc = ARMISD::CAND; break; - case ISD::OR: Opc = ARMISD::COR; break; - case ISD::XOR: Opc = ARMISD::CXOR; break; - } - return DAG.getNode(Opc, N->getDebugLoc(), N->getValueType(0), N0, - N1.getOperand(1), N1.getOperand(2), N1.getOperand(3), - N1.getOperand(4)); -} - static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget) { @@ -7382,10 +7687,10 @@ static SDValue PerformANDCombine(SDNode *N, } if (!Subtarget->isThumb1Only()) { - // (and x, (cmov -1, y, cond)) => (and.cond x, y) - SDValue CAND = formConditionalOp(N, DAG, true); - if (CAND.getNode()) - return CAND; + // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) + SDValue Result = combineSelectAndUseCommutative(N, true, DCI); + if (Result.getNode()) + return Result; } return SDValue(); @@ -7425,13 +7730,12 @@ static SDValue PerformORCombine(SDNode *N, } if (!Subtarget->isThumb1Only()) { - // (or x, (cmov 0, y, cond)) => (or.cond x, y) - SDValue COR = formConditionalOp(N, DAG, true); - if (COR.getNode()) - return COR; + // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c)) + SDValue Result = combineSelectAndUseCommutative(N, false, DCI); + if (Result.getNode()) + return Result; } - // The code below optimizes (or (and X, Y), Z). // The AND operand needs to have a single user to make these optimizations // profitable. @@ -7593,10 +7897,10 @@ static SDValue PerformXORCombine(SDNode *N, return SDValue(); if (!Subtarget->isThumb1Only()) { - // (xor x, (cmov 0, y, cond)) => (xor.cond x, y) - SDValue CXOR = formConditionalOp(N, DAG, true); - if (CXOR.getNode()) - return CXOR; + // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c)) + SDValue Result = combineSelectAndUseCommutative(N, false, DCI); + if (Result.getNode()) + return Result; } return SDValue(); @@ -8746,6 +9050,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { switch (N->getOpcode()) { default: break; + case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget); case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget); case ISD::SUB: return PerformSUBCombine(N, DCI); case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget); @@ -8807,8 +9112,8 @@ bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc, } bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { - if (!Subtarget->allowsUnalignedMem()) - return false; + // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus + bool AllowsUnaligned = Subtarget->allowsUnalignedMem(); switch (VT.getSimpleVT().SimpleTy) { default: @@ -8816,10 +9121,14 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { case MVT::i8: case MVT::i16: case MVT::i32: - return true; + // Unaligned access can use (for example) LRDB, LRDH, LDR + return AllowsUnaligned; case MVT::f64: - return Subtarget->hasNEON(); - // FIXME: VLD1 etc with standard alignment is legal. + case MVT::v2f64: + // For any little-endian targets with neon, we can support unaligned ld/st + // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. + // A big-endian target may also explictly support unaligned accesses + return Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian()); } } @@ -8838,7 +9147,7 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, // See if we can use NEON instructions for this... if (IsZeroVal && - !F->hasFnAttr(Attribute::NoImplicitFloat) && + !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat) && Subtarget->hasNEON()) { if (memOpAlign(SrcAlign, DstAlign, 16) && Size >= 16) { return MVT::v4i32; @@ -9632,7 +9941,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, case Intrinsic::arm_neon_vld4lane: { Info.opc = ISD::INTRINSIC_W_CHAIN; // Conservatively set memVT to the entire set of vectors loaded. - uint64_t NumElts = getTargetData()->getTypeAllocSize(I.getType()) / 8; + uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); Info.ptrVal = I.getArgOperand(0); Info.offset = 0; @@ -9657,7 +9966,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, Type *ArgTy = I.getArgOperand(ArgI)->getType(); if (!ArgTy->isVectorTy()) break; - NumElts += getTargetData()->getTypeAllocSize(ArgTy) / 8; + NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; } Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); Info.ptrVal = I.getArgOperand(0); diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 51d1205..4eb3b2c 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -63,9 +63,6 @@ namespace llvm { FMSTAT, // ARM fmstat instruction. CMOV, // ARM conditional move instructions. - CAND, // ARM conditional and instructions. - COR, // ARM conditional or instructions. - CXOR, // ARM conditional xor instructions. BCC_i64, @@ -176,6 +173,9 @@ namespace llvm { VMULLs, // ...signed VMULLu, // ...unsigned + UMLAL, // 64bit Unsigned Accumulate Multiply + SMLAL, // 64bit Signed Accumulate Multiply + // Operands of the standard BUILD_VECTOR node are not legalized, which // is fine if BUILD_VECTORs are always lowered to shuffles or other // operations, but for ARM some BUILD_VECTORs are legal as-is and their @@ -260,6 +260,11 @@ namespace llvm { virtual const char *getTargetNodeName(unsigned Opcode) const; + virtual bool isSelectSupported(SelectSupportKind Kind) const { + // ARM does not support scalar condition selects on vectors. + return (Kind != ScalarCondVectorVal); + } + /// getSetCCResultType - Return the value type to use for ISD::SETCC. virtual EVT getSetCCResultType(EVT VT) const; @@ -461,7 +466,11 @@ namespace llvm { SmallVectorImpl &InVals) const; void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG, - DebugLoc dl, SDValue &Chain, unsigned ArgOffset) + DebugLoc dl, SDValue &Chain, + const Value *OrigArg, + unsigned OffsetFromOrigArg, + unsigned ArgOffset, + bool ForceMutable = false) const; void computeRegArea(CCState &CCInfo, MachineFunction &MF, @@ -472,7 +481,7 @@ namespace llvm { SmallVectorImpl &InVals) const; /// HandleByVal - Target-specific cleanup for ByVal support. - virtual void HandleByVal(CCState *, unsigned &) const; + virtual void HandleByVal(CCState *, unsigned &, unsigned) const; /// IsEligibleForTailCallOptimization - Check whether the call is eligible /// for tail call optimization. Targets which want to do tail call diff --git a/lib/Target/ARM/ARMInstrFormats.td b/lib/Target/ARM/ARMInstrFormats.td index c8966fb..67a6820 100644 --- a/lib/Target/ARM/ARMInstrFormats.td +++ b/lib/Target/ARM/ARMInstrFormats.td @@ -846,6 +846,23 @@ class AMiscA1I opcod, bits<4> opc7_4, dag oops, dag iops, let Inst{3-0} = Rm; } +// Division instructions. +class ADivA1I opcod, dag oops, dag iops, + InstrItinClass itin, string opc, string asm, list pattern> + : I { + bits<4> Rd; + bits<4> Rn; + bits<4> Rm; + let Inst{27-23} = 0b01110; + let Inst{22-20} = opcod; + let Inst{19-16} = Rd; + let Inst{15-12} = 0b1111; + let Inst{11-8} = Rm; + let Inst{7-4} = 0b0001; + let Inst{3-0} = Rn; +} + // PKH instructions def PKHLSLAsmOperand : ImmAsmOperand { let Name = "PKHLSLImm"; @@ -893,6 +910,10 @@ class ARMV5TPat : Pat { class ARMV5TEPat : Pat { list Predicates = [IsARM, HasV5TE]; } +// ARMV5MOPat - Same as ARMV5TEPat with UseMulOps. +class ARMV5MOPat : Pat { + list Predicates = [IsARM, HasV5TE, UseMulOps]; +} class ARMV6Pat : Pat { list Predicates = [IsARM, HasV6]; } diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp index 31b0c41..a0b6f24 100644 --- a/lib/Target/ARM/ARMInstrInfo.cpp +++ b/lib/Target/ARM/ARMInstrInfo.cpp @@ -13,13 +13,17 @@ #include "ARMInstrInfo.h" #include "ARM.h" +#include "ARMConstantPoolValue.h" #include "ARMMachineFunctionInfo.h" +#include "ARMTargetMachine.h" #include "MCTargetDesc/ARMAddressingModes.h" #include "llvm/ADT/STLExtras.h" #include "llvm/CodeGen/LiveVariables.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCInst.h" using namespace llvm; @@ -84,3 +88,61 @@ unsigned ARMInstrInfo::getUnindexedOpcode(unsigned Opc) const { return 0; } + +namespace { + /// ARMCGBR - Create Global Base Reg pass. This initializes the PIC + /// global base register for ARM ELF. + struct ARMCGBR : public MachineFunctionPass { + static char ID; + ARMCGBR() : MachineFunctionPass(ID) {} + + virtual bool runOnMachineFunction(MachineFunction &MF) { + ARMFunctionInfo *AFI = MF.getInfo(); + if (AFI->getGlobalBaseReg() == 0) + return false; + + const ARMTargetMachine *TM = + static_cast(&MF.getTarget()); + if (TM->getRelocationModel() != Reloc::PIC_) + return false; + + LLVMContext* Context = &MF.getFunction()->getContext(); + GlobalValue *GV = new GlobalVariable(Type::getInt32Ty(*Context), false, + GlobalValue::ExternalLinkage, 0, + "_GLOBAL_OFFSET_TABLE_"); + unsigned Id = AFI->createPICLabelUId(); + ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id); + unsigned Align = TM->getDataLayout()->getPrefTypeAlignment(GV->getType()); + unsigned Idx = MF.getConstantPool()->getConstantPoolIndex(CPV, Align); + + MachineBasicBlock &FirstMBB = MF.front(); + MachineBasicBlock::iterator MBBI = FirstMBB.begin(); + DebugLoc DL = FirstMBB.findDebugLoc(MBBI); + unsigned GlobalBaseReg = AFI->getGlobalBaseReg(); + unsigned Opc = TM->getSubtarget().isThumb2() ? + ARM::t2LDRpci : ARM::LDRcp; + const TargetInstrInfo &TII = *TM->getInstrInfo(); + MachineInstrBuilder MIB = BuildMI(FirstMBB, MBBI, DL, + TII.get(Opc), GlobalBaseReg) + .addConstantPoolIndex(Idx); + if (Opc == ARM::LDRcp) + MIB.addImm(0); + AddDefaultPred(MIB); + + return true; + } + + virtual const char *getPassName() const { + return "ARM PIC Global Base Reg Initialization"; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); + } + }; +} + +char ARMCGBR::ID = 0; +FunctionPass* +llvm::createARMGlobalBaseRegPass() { return new ARMCGBR(); } diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index 992aba5..df2e55e 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -83,6 +83,13 @@ def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3, SDTCisInt<0>, SDTCisVT<1, i32>, SDTCisVT<4, i32>]>; + +def SDT_ARM64bitmlal : SDTypeProfile<2,4, [ SDTCisVT<0, i32>, SDTCisVT<1, i32>, + SDTCisVT<2, i32>, SDTCisVT<3, i32>, + SDTCisVT<4, i32>, SDTCisVT<5, i32> ] >; +def ARMUmlal : SDNode<"ARMISD::UMLAL", SDT_ARM64bitmlal>; +def ARMSmlal : SDNode<"ARMISD::SMLAL", SDT_ARM64bitmlal>; + // Node definitions. def ARMWrapper : SDNode<"ARMISD::Wrapper", SDTIntUnaryOp>; def ARMWrapperDYN : SDNode<"ARMISD::WrapperDYN", SDTIntUnaryOp>; @@ -90,9 +97,10 @@ def ARMWrapperPIC : SDNode<"ARMISD::WrapperPIC", SDTIntUnaryOp>; def ARMWrapperJT : SDNode<"ARMISD::WrapperJT", SDTIntBinOp>; def ARMcallseq_start : SDNode<"ISD::CALLSEQ_START", SDT_ARMCallSeqStart, - [SDNPHasChain, SDNPOutGlue]>; + [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>; def ARMcallseq_end : SDNode<"ISD::CALLSEQ_END", SDT_ARMCallSeqEnd, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + [SDNPHasChain, SDNPSideEffect, + SDNPOptInGlue, SDNPOutGlue]>; def ARMcopystructbyval : SDNode<"ARMISD::COPY_STRUCT_BYVAL" , SDT_ARMStructByVal, [SDNPHasChain, SDNPInGlue, SDNPOutGlue, @@ -148,14 +156,16 @@ def ARMsube : SDNode<"ARMISD::SUBE", SDTBinaryArithWithFlagsInOut>; def ARMthread_pointer: SDNode<"ARMISD::THREAD_POINTER", SDT_ARMThreadPointer>; def ARMeh_sjlj_setjmp: SDNode<"ARMISD::EH_SJLJ_SETJMP", - SDT_ARMEH_SJLJ_Setjmp, [SDNPHasChain]>; + SDT_ARMEH_SJLJ_Setjmp, + [SDNPHasChain, SDNPSideEffect]>; def ARMeh_sjlj_longjmp: SDNode<"ARMISD::EH_SJLJ_LONGJMP", - SDT_ARMEH_SJLJ_Longjmp, [SDNPHasChain]>; + SDT_ARMEH_SJLJ_Longjmp, + [SDNPHasChain, SDNPSideEffect]>; def ARMMemBarrier : SDNode<"ARMISD::MEMBARRIER", SDT_ARMMEMBARRIER, - [SDNPHasChain]>; + [SDNPHasChain, SDNPSideEffect]>; def ARMMemBarrierMCR : SDNode<"ARMISD::MEMBARRIER_MCR", SDT_ARMMEMBARRIER, - [SDNPHasChain]>; + [SDNPHasChain, SDNPSideEffect]>; def ARMPreload : SDNode<"ARMISD::PRELOAD", SDT_ARMPREFETCH, [SDNPHasChain, SDNPMayLoad, SDNPMayStore]>; @@ -197,6 +207,8 @@ def HasFP16 : Predicate<"Subtarget->hasFP16()">, AssemblerPredicate<"FeatureFP16","half-float">; def HasDivide : Predicate<"Subtarget->hasDivide()">, AssemblerPredicate<"FeatureHWDiv", "divide">; +def HasDivideInARM : Predicate<"Subtarget->hasDivideInARMMode()">, + AssemblerPredicate<"FeatureHWDivARM">; def HasT2ExtractPack : Predicate<"Subtarget->hasT2ExtractPack()">, AssemblerPredicate<"FeatureT2XtPk", "pack/extract">; @@ -232,6 +244,7 @@ def IsNaCl : Predicate<"Subtarget->isTargetNaCl()">; def UseMovt : Predicate<"Subtarget->useMovt()">; def DontUseMovt : Predicate<"!Subtarget->useMovt()">; def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">; +def UseMulOps : Predicate<"Subtarget->useMulOps()">; // Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available. // But only select them if more precision in FP computation is allowed. @@ -242,6 +255,20 @@ def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion ==" def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || " "Subtarget->isTargetDarwin()">; +// VGETLNi32 is microcoded on Swift - prefer VMOV. +def HasFastVGETLNi32 : Predicate<"!Subtarget->isSwift()">; +def HasSlowVGETLNi32 : Predicate<"Subtarget->isSwift()">; + +// VDUP.32 is microcoded on Swift - prefer VMOV. +def HasFastVDUP32 : Predicate<"!Subtarget->isSwift()">; +def HasSlowVDUP32 : Predicate<"Subtarget->isSwift()">; + +// Cortex-A9 prefers VMOVSR to VMOVDRR even when using NEON for scalar FP, as +// this allows more effective execution domain optimization. See +// setExecutionDomain(). +def UseVMOVSR : Predicate<"Subtarget->isCortexA9() || !Subtarget->useNEONForSinglePrecisionFP()">; +def DontUseVMOVSR : Predicate<"!Subtarget->isCortexA9() && Subtarget->useNEONForSinglePrecisionFP()">; + def IsLE : Predicate<"TLI.isLittleEndian()">; def IsBE : Predicate<"TLI.isBigEndian()">; @@ -256,15 +283,13 @@ class RegConstraint { // ARM specific transformation functions and pattern fragments. // -// imm_neg_XFORM - Return a imm value packed into the format described for -// imm_neg defs below. +// imm_neg_XFORM - Return the negation of an i32 immediate value. def imm_neg_XFORM : SDNodeXFormgetTargetConstant(-(int)N->getZExtValue(), MVT::i32); }]>; -// so_imm_not_XFORM - Return a so_imm value packed into the format described for -// so_imm_not def below. -def so_imm_not_XFORM : SDNodeXFormgetTargetConstant(~(int)N->getZExtValue(), MVT::i32); }]>; @@ -275,7 +300,7 @@ def imm16_31 : ImmLeaf, PatLeaf<(imm), [{ - int64_t Value = -(int)N->getZExtValue(); + unsigned Value = -(unsigned)N->getZExtValue(); return Value && ARM_AM::getSOImmVal(Value) != -1; }], imm_neg_XFORM> { let ParserMatchClass = so_imm_neg_asmoperand; @@ -287,7 +312,7 @@ def so_imm_neg : Operand, PatLeaf<(imm), [{ def so_imm_not_asmoperand : AsmOperandClass { let Name = "ARMSOImmNot"; } def so_imm_not : Operand, PatLeaf<(imm), [{ return ARM_AM::getSOImmVal(~(uint32_t)N->getZExtValue()) != -1; - }], so_imm_not_XFORM> { + }], imm_not_XFORM> { let ParserMatchClass = so_imm_not_asmoperand; } @@ -1791,12 +1816,15 @@ def ADR : AI1<{0,?,?,0}, (outs GPR:$Rd), (ins adrlabel:$label), let Inst{15-12} = Rd; let Inst{11-0} = label{11-0}; } + +let hasSideEffects = 1 in { def LEApcrel : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, pred:$p), 4, IIC_iALUi, []>; def LEApcrelJT : ARMPseudoInst<(outs GPR:$Rd), (ins i32imm:$label, nohash_imm:$id, pred:$p), 4, IIC_iALUi, []>; +} //===----------------------------------------------------------------------===// // Control Flow Instructions. @@ -3079,15 +3107,19 @@ def : ARMPat<(ARMaddc GPR:$src, so_imm_neg:$imm), (SUBSri GPR:$src, so_imm_neg:$imm)>; def : ARMPat<(add GPR:$src, imm0_65535_neg:$imm), - (SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>; + (SUBrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>, + Requires<[IsARM, HasV6T2]>; def : ARMPat<(ARMaddc GPR:$src, imm0_65535_neg:$imm), - (SUBSrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>; + (SUBSrr GPR:$src, (MOVi16 (imm_neg_XFORM imm:$imm)))>, + Requires<[IsARM, HasV6T2]>; // The with-carry-in form matches bitwise not instead of the negation. // Effectively, the inverse interpretation of the carry flag already accounts // for part of the negation. def : ARMPat<(ARMadde GPR:$src, so_imm_not:$imm, CPSR), (SBCri GPR:$src, so_imm_not:$imm)>; +def : ARMPat<(ARMadde GPR:$src, imm0_65535_neg:$imm, CPSR), + (SBCrr GPR:$src, (MOVi16 (imm_not_XFORM imm:$imm)))>; // Note: These are implemented in C++ code, because they have to generate // ADD/SUBrs instructions, which use a complex pattern that a xform function @@ -3399,6 +3431,18 @@ class AsMul1I64 opcod, dag oops, dag iops, InstrItinClass itin, let Inst{11-8} = Rm; let Inst{3-0} = Rn; } +class AsMla1I64 opcod, dag oops, dag iops, InstrItinClass itin, + string opc, string asm, list pattern> + : AsMul1I { + bits<4> RdLo; + bits<4> RdHi; + bits<4> Rm; + bits<4> Rn; + let Inst{19-16} = RdHi; + let Inst{15-12} = RdLo; + let Inst{11-8} = Rm; + let Inst{3-0} = Rn; +} // FIXME: The v5 pseudos are only necessary for the additional Constraint // property. Remove them when it's possible to add those properties @@ -3419,13 +3463,13 @@ def MULv5: ARMPseudoExpand<(outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm, 4, IIC_iMUL32, [(set GPRnopc:$Rd, (mul GPRnopc:$Rn, GPRnopc:$Rm))], (MUL GPRnopc:$Rd, GPRnopc:$Rn, GPRnopc:$Rm, pred:$p, cc_out:$s)>, - Requires<[IsARM, NoV6]>; + Requires<[IsARM, NoV6, UseMulOps]>; } def MLA : AsMul1I32<0b0000001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra", [(set GPR:$Rd, (add (mul GPR:$Rn, GPR:$Rm), GPR:$Ra))]>, - Requires<[IsARM, HasV6]> { + Requires<[IsARM, HasV6, UseMulOps]> { bits<4> Ra; let Inst{15-12} = Ra; } @@ -3441,7 +3485,7 @@ def MLAv5: ARMPseudoExpand<(outs GPR:$Rd), def MLS : AMul1I<0b0000011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra", [(set GPR:$Rd, (sub GPR:$Ra, (mul GPR:$Rn, GPR:$Rm)))]>, - Requires<[IsARM, HasV6T2]> { + Requires<[IsARM, HasV6T2, UseMulOps]> { bits<4> Rd; bits<4> Rm; bits<4> Rn; @@ -3481,14 +3525,14 @@ def UMULLv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi), } // Multiply + accumulate -def SMLAL : AsMul1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi), - (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64, +def SMLAL : AsMla1I64<0b0000111, (outs GPR:$RdLo, GPR:$RdHi), + (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi), IIC_iMAC64, "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>, - Requires<[IsARM, HasV6]>; -def UMLAL : AsMul1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi), - (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64, + RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>; +def UMLAL : AsMla1I64<0b0000101, (outs GPR:$RdLo, GPR:$RdHi), + (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi), IIC_iMAC64, "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>, - Requires<[IsARM, HasV6]>; + RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">, Requires<[IsARM, HasV6]>; def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi), (ins GPR:$Rn, GPR:$Rm), IIC_iMAC64, @@ -3504,17 +3548,22 @@ def UMAAL : AMul1I <0b0000010, (outs GPR:$RdLo, GPR:$RdHi), let Inst{3-0} = Rn; } -let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in { +let Constraints = "$RLo = $RdLo,$RHi = $RdHi" in { def SMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi), - (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s), + (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s), 4, IIC_iMAC64, [], - (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>, + (SMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, + pred:$p, cc_out:$s)>, Requires<[IsARM, NoV6]>; def UMLALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi), - (ins GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s), + (ins GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, pred:$p, cc_out:$s), 4, IIC_iMAC64, [], - (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>, + (UMLAL GPR:$RdLo, GPR:$RdHi, GPR:$Rn, GPR:$Rm, GPR:$RLo, GPR:$RHi, + pred:$p, cc_out:$s)>, Requires<[IsARM, NoV6]>; +} + +let Constraints = "@earlyclobber $RdLo,@earlyclobber $RdHi" in { def UMAALv5 : ARMPseudoExpand<(outs GPR:$RdLo, GPR:$RdHi), (ins GPR:$Rn, GPR:$Rm, pred:$p), 4, IIC_iMAC64, [], @@ -3542,7 +3591,7 @@ def SMMLA : AMul2Ia <0b0111010, 0b0001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), IIC_iMAC32, "smmla", "\t$Rd, $Rn, $Rm, $Ra", [(set GPR:$Rd, (add (mulhs GPR:$Rn, GPR:$Rm), GPR:$Ra))]>, - Requires<[IsARM, HasV6]>; + Requires<[IsARM, HasV6, UseMulOps]>; def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), @@ -3552,7 +3601,7 @@ def SMMLAR : AMul2Ia <0b0111010, 0b0011, (outs GPR:$Rd), def SMMLS : AMul2Ia <0b0111010, 0b1101, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra", []>, - Requires<[IsARM, HasV6]>; + Requires<[IsARM, HasV6, UseMulOps]>; def SMMLSR : AMul2Ia <0b0111010, 0b1111, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm, GPR:$Ra), @@ -3606,7 +3655,7 @@ multiclass AI_smla { [(set GPRnopc:$Rd, (add GPR:$Ra, (opnode (sext_inreg GPRnopc:$Rn, i16), (sext_inreg GPRnopc:$Rm, i16))))]>, - Requires<[IsARM, HasV5TE]>; + Requires<[IsARM, HasV5TE, UseMulOps]>; def BT : AMulxyIa<0b0001000, 0b10, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra), @@ -3614,7 +3663,7 @@ multiclass AI_smla { [(set GPRnopc:$Rd, (add GPR:$Ra, (opnode (sext_inreg GPRnopc:$Rn, i16), (sra GPRnopc:$Rm, (i32 16)))))]>, - Requires<[IsARM, HasV5TE]>; + Requires<[IsARM, HasV5TE, UseMulOps]>; def TB : AMulxyIa<0b0001000, 0b01, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra), @@ -3622,7 +3671,7 @@ multiclass AI_smla { [(set GPRnopc:$Rd, (add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)), (sext_inreg GPRnopc:$Rm, i16))))]>, - Requires<[IsARM, HasV5TE]>; + Requires<[IsARM, HasV5TE, UseMulOps]>; def TT : AMulxyIa<0b0001000, 0b11, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra), @@ -3630,7 +3679,7 @@ multiclass AI_smla { [(set GPRnopc:$Rd, (add GPR:$Ra, (opnode (sra GPRnopc:$Rn, (i32 16)), (sra GPRnopc:$Rm, (i32 16)))))]>, - Requires<[IsARM, HasV5TE]>; + Requires<[IsARM, HasV5TE, UseMulOps]>; def WB : AMulxyIa<0b0001001, 0b00, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra), @@ -3638,7 +3687,7 @@ multiclass AI_smla { [(set GPRnopc:$Rd, (add GPR:$Ra, (sra (opnode GPRnopc:$Rn, (sext_inreg GPRnopc:$Rm, i16)), (i32 16))))]>, - Requires<[IsARM, HasV5TE]>; + Requires<[IsARM, HasV5TE, UseMulOps]>; def WT : AMulxyIa<0b0001001, 0b10, (outs GPRnopc:$Rd), (ins GPRnopc:$Rn, GPRnopc:$Rm, GPR:$Ra), @@ -3646,7 +3695,7 @@ multiclass AI_smla { [(set GPRnopc:$Rd, (add GPR:$Ra, (sra (opnode GPRnopc:$Rn, (sra GPRnopc:$Rm, (i32 16))), (i32 16))))]>, - Requires<[IsARM, HasV5TE]>; + Requires<[IsARM, HasV5TE, UseMulOps]>; } } @@ -3749,6 +3798,19 @@ defm SMUA : AI_sdml<0, "smua">; defm SMUS : AI_sdml<1, "smus">; //===----------------------------------------------------------------------===// +// Division Instructions (ARMv7-A with virtualization extension) +// +def SDIV : ADivA1I<0b001, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV, + "sdiv", "\t$Rd, $Rn, $Rm", + [(set GPR:$Rd, (sdiv GPR:$Rn, GPR:$Rm))]>, + Requires<[IsARM, HasDivideInARM]>; + +def UDIV : ADivA1I<0b011, (outs GPR:$Rd), (ins GPR:$Rn, GPR:$Rm), IIC_iDIV, + "udiv", "\t$Rd, $Rn, $Rm", + [(set GPR:$Rd, (udiv GPR:$Rn, GPR:$Rm))]>, + Requires<[IsARM, HasDivideInARM]>; + +//===----------------------------------------------------------------------===// // Misc. Arithmetic Instructions. // @@ -3986,48 +4048,6 @@ def MVNCCi : ARMPseudoInst<(outs GPR:$Rd), [/*(set GPR:$Rd, (ARMcmov GPR:$false, so_imm_not:$imm, imm:$cc, CCR:$ccr))*/]>, RegConstraint<"$false = $Rd">; -// Conditional instructions -multiclass AsI1_bincc_irs { - def ri : ARMPseudoExpand<(outs GPR:$Rd), - (ins GPR:$Rfalse, GPR:$Rn, so_imm:$imm, - pred:$p, cc_out:$s), - 4, iii, [], - (iri GPR:$Rd, GPR:$Rn, so_imm:$imm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - def rr : ARMPseudoExpand<(outs GPR:$Rd), - (ins GPR:$Rfalse, GPR:$Rn, GPR:$Rm, - pred:$p, cc_out:$s), - 4, iir, [], - (irr GPR:$Rd, GPR:$Rn, GPR:$Rm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - def rsi : ARMPseudoExpand<(outs GPR:$Rd), - (ins GPR:$Rfalse, GPR:$Rn, so_reg_imm:$shift, - pred:$p, cc_out:$s), - 4, iis, [], - (irsi GPR:$Rd, GPR:$Rn, so_reg_imm:$shift, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - def rsr : ARMPseudoExpand<(outs GPRnopc:$Rd), - (ins GPRnopc:$Rfalse, GPRnopc:$Rn, so_reg_reg:$shift, - pred:$p, cc_out:$s), - 4, iis, [], - (irsr GPR:$Rd, GPR:$Rn, so_reg_reg:$shift, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; -} - -defm ANDCC : AsI1_bincc_irs; -defm ORRCC : AsI1_bincc_irs; -defm EORCC : AsI1_bincc_irs; -defm ADDCC : AsI1_bincc_irs; -defm SUBCC : AsI1_bincc_irs; - } // neverHasSideEffects @@ -4723,21 +4743,13 @@ def Int_eh_sjlj_longjmp : PseudoInst<(outs), (ins GPR:$src, GPR:$scratch), Requires<[IsARM, IsIOS]>; } -// eh.sjlj.dispatchsetup pseudo-instructions. -// These pseudos are used for both ARM and Thumb2. Any differences are -// handled when the pseudo is expanded (which happens before any passes -// that need the instruction size). -let Defs = - [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR, - Q0, Q1, Q2, Q3, Q4, Q5, Q6, Q7, Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15 ], - isBarrier = 1 in +// eh.sjlj.dispatchsetup pseudo-instruction. +// This pseudo is used for both ARM and Thumb. Any differences are handled when +// the pseudo is expanded (which happens before any passes that need the +// instruction size). +let isBarrier = 1 in def Int_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>; -let Defs = - [ R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, LR, CPSR ], - isBarrier = 1 in -def Int_eh_sjlj_dispatchsetup_nofp : PseudoInst<(outs), (ins), NoItinerary, []>; - //===----------------------------------------------------------------------===// // Non-Instruction Patterns @@ -4841,32 +4853,32 @@ def : ARMV5TEPat<(sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))), def : ARMV5TEPat<(sra (mul GPR:$a, sext_16_node:$b), (i32 16)), (SMULWB GPR:$a, GPR:$b)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (mul (sra (shl GPR:$a, (i32 16)), (i32 16)), (sra (shl GPR:$b, (i32 16)), (i32 16)))), (SMLABB GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (mul sext_16_node:$a, sext_16_node:$b)), (SMLABB GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (mul (sra (shl GPR:$a, (i32 16)), (i32 16)), (sra GPR:$b, (i32 16)))), (SMLABT GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (mul sext_16_node:$a, (sra GPR:$b, (i32 16)))), (SMLABT GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (mul (sra GPR:$a, (i32 16)), (sra (shl GPR:$b, (i32 16)), (i32 16)))), (SMLATB GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (mul (sra GPR:$a, (i32 16)), sext_16_node:$b)), (SMLATB GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (sra (mul GPR:$a, (sra (shl GPR:$b, (i32 16)), (i32 16))), (i32 16))), (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>; -def : ARMV5TEPat<(add GPR:$acc, +def : ARMV5MOPat<(add GPR:$acc, (sra (mul GPR:$a, sext_16_node:$b), (i32 16))), (SMLAWB GPR:$a, GPR:$b, GPR:$acc)>; diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 048d340..3cf213c 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -398,6 +398,20 @@ def VecListFourQWordIndexed : Operand { let MIOperandInfo = (ops DPR:$Vd, i32imm:$idx); } +def dword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return cast(N)->getAlignment() >= 8; +}]>; +def dword_alignedstore : PatFrag<(ops node:$val, node:$ptr), + (store node:$val, node:$ptr), [{ + return cast(N)->getAlignment() >= 8; +}]>; +def word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ + return cast(N)->getAlignment() == 4; +}]>; +def word_alignedstore : PatFrag<(ops node:$val, node:$ptr), + (store node:$val, node:$ptr), [{ + return cast(N)->getAlignment() == 4; +}]>; def hword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ return cast(N)->getAlignment() == 2; }]>; @@ -1980,7 +1994,7 @@ def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8, def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16, NEONvgetlaneu, addrmode6> { let Inst{7-6} = lane{1-0}; - let Inst{4} = Rn{5}; + let Inst{4} = Rn{4}; } def VST1LNd32 : VST1LN<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt, @@ -2023,7 +2037,7 @@ def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8", v8i8, post_truncsti8, def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16", v4i16, post_truncsti16, NEONvgetlaneu, addrmode6> { let Inst{7-6} = lane{1-0}; - let Inst{4} = Rn{5}; + let Inst{4} = Rn{4}; } def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32", v2i32, post_store, extractelt, addrmode6oneL32> { @@ -2273,6 +2287,25 @@ def : Pat<(f64 (non_word_alignedload addrmode6:$addr)), def : Pat<(non_word_alignedstore (f64 DPR:$value), addrmode6:$addr), (VST1d64 addrmode6:$addr, DPR:$value)>, Requires<[IsBE]>; +// Use vld1/vst1 for Q and QQ. Also use them for unaligned v2f64 +// load / store if it's legal. +def : Pat<(v2f64 (dword_alignedload addrmode6:$addr)), + (VLD1q64 addrmode6:$addr)>; +def : Pat<(dword_alignedstore (v2f64 QPR:$value), addrmode6:$addr), + (VST1q64 addrmode6:$addr, QPR:$value)>; +def : Pat<(v2f64 (word_alignedload addrmode6:$addr)), + (VLD1q32 addrmode6:$addr)>; +def : Pat<(word_alignedstore (v2f64 QPR:$value), addrmode6:$addr), + (VST1q32 addrmode6:$addr, QPR:$value)>; +def : Pat<(v2f64 (hword_alignedload addrmode6:$addr)), + (VLD1q16 addrmode6:$addr)>, Requires<[IsLE]>; +def : Pat<(hword_alignedstore (v2f64 QPR:$value), addrmode6:$addr), + (VST1q16 addrmode6:$addr, QPR:$value)>, Requires<[IsLE]>; +def : Pat<(v2f64 (byte_alignedload addrmode6:$addr)), + (VLD1q8 addrmode6:$addr)>, Requires<[IsLE]>; +def : Pat<(byte_alignedstore (v2f64 QPR:$value), addrmode6:$addr), + (VST1q8 addrmode6:$addr, QPR:$value)>, Requires<[IsLE]>; + //===----------------------------------------------------------------------===// // NEON pattern fragments //===----------------------------------------------------------------------===// @@ -4455,10 +4488,36 @@ def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd), "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd", [(set DPR:$Vd, (v2i32 (NEONvbsl DPR:$src1, DPR:$Vn, DPR:$Vm)))]>; +def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 DPR:$src1), + (v8i8 DPR:$Vn), (v8i8 DPR:$Vm))), + (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 DPR:$src1), + (v4i16 DPR:$Vn), (v4i16 DPR:$Vm))), + (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 DPR:$src1), + (v2i32 DPR:$Vn), (v2i32 DPR:$Vm))), + (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 DPR:$src1), + (v2f32 DPR:$Vn), (v2f32 DPR:$Vm))), + (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 DPR:$src1), + (v1i64 DPR:$Vn), (v1i64 DPR:$Vm))), + (VBSLd DPR:$src1, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; def : Pat<(v2i32 (or (and DPR:$Vn, DPR:$Vd), (and DPR:$Vm, (vnotd DPR:$Vd)))), - (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>; + (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; + +def : Pat<(v1i64 (or (and DPR:$Vn, DPR:$Vd), + (and DPR:$Vm, (vnotd DPR:$Vd)))), + (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>, + Requires<[HasNEON]>; def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), @@ -4467,9 +4526,35 @@ def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd), [(set QPR:$Vd, (v4i32 (NEONvbsl QPR:$src1, QPR:$Vn, QPR:$Vm)))]>; +def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 QPR:$src1), + (v16i8 QPR:$Vn), (v16i8 QPR:$Vm))), + (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 QPR:$src1), + (v8i16 QPR:$Vn), (v8i16 QPR:$Vm))), + (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 QPR:$src1), + (v4i32 QPR:$Vn), (v4i32 QPR:$Vm))), + (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 QPR:$src1), + (v4f32 QPR:$Vn), (v4f32 QPR:$Vm))), + (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 QPR:$src1), + (v2i64 QPR:$Vn), (v2i64 QPR:$Vm))), + (VBSLq QPR:$src1, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; + def : Pat<(v4i32 (or (and QPR:$Vn, QPR:$Vd), (and QPR:$Vm, (vnotq QPR:$Vd)))), - (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>; + (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; +def : Pat<(v2i64 (or (and QPR:$Vn, QPR:$Vd), + (and QPR:$Vm, (vnotq QPR:$Vd)))), + (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>, + Requires<[HasNEON]>; // VBIF : Vector Bitwise Insert if False // like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst", @@ -4983,7 +5068,8 @@ def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00, (outs GPR:$R), (ins DPR:$V, VectorIndex32:$lane), IIC_VMOVSI, "vmov", "32", "$R, $V$lane", [(set GPR:$R, (extractelt (v2i32 DPR:$V), - imm:$lane))]> { + imm:$lane))]>, + Requires<[HasNEON, HasFastVGETLNi32]> { let Inst{21} = lane{0}; } // def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td @@ -5006,7 +5092,16 @@ def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane), def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane), (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src, (DSubReg_i32_reg imm:$lane))), - (SubReg_i32_lane imm:$lane))>; + (SubReg_i32_lane imm:$lane))>, + Requires<[HasNEON, HasFastVGETLNi32]>; +def : Pat<(extractelt (v2i32 DPR:$src), imm:$lane), + (COPY_TO_REGCLASS + (i32 (EXTRACT_SUBREG DPR:$src, (SSubReg_f32_reg imm:$lane))), GPR)>, + Requires<[HasNEON, HasSlowVGETLNi32]>; +def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane), + (COPY_TO_REGCLASS + (i32 (EXTRACT_SUBREG QPR:$src, (SSubReg_f32_reg imm:$lane))), GPR)>, + Requires<[HasNEON, HasSlowVGETLNi32]>; def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2), (EXTRACT_SUBREG (v2f32 (COPY_TO_REGCLASS (v2f32 DPR:$src1),DPR_VFP2)), (SSubReg_f32_reg imm:$src2))>; @@ -5117,14 +5212,23 @@ class VDUPQ opcod1, bits<2> opcod3, string Dt, ValueType Ty> def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>; def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>; -def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>; +def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>, + Requires<[HasNEON, HasFastVDUP32]>; def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>; def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>; def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>; -def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>; +// NEONvdup patterns for uarchs with fast VDUP.32. +def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>, + Requires<[HasNEON,HasFastVDUP32]>; def : Pat<(v4f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32q GPR:$R)>; +// NEONvdup patterns for uarchs with slow VDUP.32 - use VMOVDRR instead. +def : Pat<(v2i32 (NEONvdup (i32 GPR:$R))), (VMOVDRR GPR:$R, GPR:$R)>, + Requires<[HasNEON,HasSlowVDUP32]>; +def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VMOVDRR GPR:$R, GPR:$R)>, + Requires<[HasNEON,HasSlowVDUP32]>; + // VDUP : Vector Duplicate Lane (from scalar to all elements) class VDUPLND op19_16, string OpcodeStr, string Dt, @@ -5561,6 +5665,11 @@ def : N2VSPat; def : N2VSPat; def : N2VSPat; +// Prefer VMOVDRR for i32 -> f32 bitcasts, it can write all DPR registers. +def : Pat<(f32 (bitconvert GPR:$a)), + (EXTRACT_SUBREG (VMOVDRR GPR:$a, GPR:$a), ssub_0)>, + Requires<[HasNEON, DontUseVMOVSR]>; + //===----------------------------------------------------------------------===// // Non-Instruction Patterns //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 554f6d9..ae7a5c0 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -223,6 +223,7 @@ def t_addrmode_sp : Operand, def t_addrmode_pc : Operand { let EncoderMethod = "getAddrModePCOpValue"; let DecoderMethod = "DecodeThumbAddrModePC"; + let PrintMethod = "printThumbLdrLabelOperand"; } //===----------------------------------------------------------------------===// @@ -1200,6 +1201,7 @@ let neverHasSideEffects = 1, isReMaterializable = 1 in def tLEApcrel : tPseudoInst<(outs tGPR:$Rd), (ins i32imm:$label, pred:$p), 2, IIC_iALUi, []>; +let hasSideEffects = 1 in def tLEApcrelJT : tPseudoInst<(outs tGPR:$Rd), (ins i32imm:$label, nohash_imm:$id, pred:$p), 2, IIC_iALUi, []>; @@ -1245,10 +1247,6 @@ def tInt_eh_sjlj_longjmp : XI<(outs), (ins GPR:$src, GPR:$scratch), [(ARMeh_sjlj_longjmp GPR:$src, GPR:$scratch)]>, Requires<[IsThumb, IsIOS]>; -let Defs = [ R0, R1, R2, R3, R4, R5, R6, R7, R12, CPSR ], - isBarrier = 1 in -def tInt_eh_sjlj_dispatchsetup : PseudoInst<(outs), (ins), NoItinerary, []>; - //===----------------------------------------------------------------------===// // Non-Instruction Patterns // diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index 8ecf009..002d64a 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -159,7 +159,7 @@ def t2addrmode_imm12 : Operand, // t2ldrlabel := imm12 def t2ldrlabel : Operand { let EncoderMethod = "getAddrModeImm12OpValue"; - let PrintMethod = "printT2LdrLabelOperand"; + let PrintMethod = "printThumbLdrLabelOperand"; } def t2ldr_pcrel_imm12_asmoperand : AsmOperandClass {let Name = "MemPCRelImm12";} @@ -523,6 +523,23 @@ class T2MulLong opc22_20, bits<4> opc7_4, let Inst{7-4} = opc7_4; let Inst{3-0} = Rm; } +class T2MlaLong opc22_20, bits<4> opc7_4, + dag oops, dag iops, InstrItinClass itin, + string opc, string asm, list pattern> + : T2I { + bits<4> RdLo; + bits<4> RdHi; + bits<4> Rn; + bits<4> Rm; + + let Inst{31-23} = 0b111110111; + let Inst{22-20} = opc22_20; + let Inst{19-16} = Rn; + let Inst{15-12} = RdLo; + let Inst{11-8} = RdHi; + let Inst{7-4} = opc7_4; + let Inst{3-0} = Rm; +} /// T2I_bin_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns for a @@ -757,33 +774,6 @@ multiclass T2I_bin_ii12rs op23_21, string opc, PatFrag opnode, let Inst{24} = 1; let Inst{23-21} = op23_21; } - - // Predicated versions. - def CCri : t2PseudoExpand<(outs GPRnopc:$Rd), - (ins GPRnopc:$Rfalse, GPRnopc:$Rn, t2_so_imm:$imm, - pred:$p, cc_out:$s), 4, IIC_iALUi, [], - (!cast(NAME#ri) GPRnopc:$Rd, - GPRnopc:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - def CCri12 : t2PseudoExpand<(outs GPRnopc:$Rd), - (ins GPRnopc:$Rfalse, GPR:$Rn, imm0_4095:$imm, - pred:$p), - 4, IIC_iALUi, [], - (!cast(NAME#ri12) GPRnopc:$Rd, - GPR:$Rn, imm0_4095:$imm, pred:$p)>, - RegConstraint<"$Rfalse = $Rd">; - def CCrr : t2PseudoExpand<(outs GPRnopc:$Rd), - (ins GPRnopc:$Rfalse, GPRnopc:$Rn, rGPR:$Rm, - pred:$p, cc_out:$s), 4, IIC_iALUr, [], - (!cast(NAME#rr) GPRnopc:$Rd, - GPRnopc:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - def CCrs : t2PseudoExpand<(outs GPRnopc:$Rd), - (ins GPRnopc:$Rfalse, GPRnopc:$Rn, t2_so_reg:$Rm, - pred:$p, cc_out:$s), 4, IIC_iALUsi, [], - (!cast(NAME#rs) GPRnopc:$Rd, - GPRnopc:$Rn, t2_so_reg:$Rm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; } /// T2I_adde_sube_irs - Defines a set of (op reg, {so_imm|r|so_reg}) patterns @@ -1200,6 +1190,7 @@ def t2ADR : T2PCOneRegImm<(outs rGPR:$Rd), let neverHasSideEffects = 1, isReMaterializable = 1 in def t2LEApcrel : t2PseudoInst<(outs rGPR:$Rd), (ins i32imm:$label, pred:$p), 4, IIC_iALUi, []>; +let hasSideEffects = 1 in def t2LEApcrelJT : t2PseudoInst<(outs rGPR:$Rd), (ins i32imm:$label, nohash_imm:$id, pred:$p), 4, IIC_iALUi, @@ -1962,7 +1953,7 @@ def : T2Pat<(ARMadde rGPR:$src, imm0_255_not:$imm, CPSR), def : T2Pat<(ARMadde rGPR:$src, t2_so_imm_not:$imm, CPSR), (t2SBCri rGPR:$src, t2_so_imm_not:$imm)>; def : T2Pat<(ARMadde rGPR:$src, imm0_65535_neg:$imm, CPSR), - (t2SBCrr rGPR:$src, (t2MOVi16 (imm_neg_XFORM imm:$imm)))>; + (t2SBCrr rGPR:$src, (t2MOVi16 (imm_not_XFORM imm:$imm)))>; // Select Bytes -- for disassembly only @@ -2405,7 +2396,8 @@ def t2MUL: T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iMUL32, def t2MLA: T2FourReg< (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "mla", "\t$Rd, $Rn, $Rm, $Ra", - [(set rGPR:$Rd, (add (mul rGPR:$Rn, rGPR:$Rm), rGPR:$Ra))]> { + [(set rGPR:$Rd, (add (mul rGPR:$Rn, rGPR:$Rm), rGPR:$Ra))]>, + Requires<[IsThumb2, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b000; @@ -2415,7 +2407,8 @@ def t2MLA: T2FourReg< def t2MLS: T2FourReg< (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "mls", "\t$Rd, $Rn, $Rm, $Ra", - [(set rGPR:$Rd, (sub rGPR:$Ra, (mul rGPR:$Rn, rGPR:$Rm)))]> { + [(set rGPR:$Rd, (sub rGPR:$Ra, (mul rGPR:$Rn, rGPR:$Rm)))]>, + Requires<[IsThumb2, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b000; @@ -2437,15 +2430,17 @@ def t2UMULL : T2MulLong<0b010, 0b0000, } // isCommutable // Multiply + accumulate -def t2SMLAL : T2MulLong<0b100, 0b0000, +def t2SMLAL : T2MlaLong<0b100, 0b0000, (outs rGPR:$RdLo, rGPR:$RdHi), - (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64, - "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>; + (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi), IIC_iMAC64, + "smlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>, + RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">; -def t2UMLAL : T2MulLong<0b110, 0b0000, +def t2UMLAL : T2MlaLong<0b110, 0b0000, (outs rGPR:$RdLo, rGPR:$RdHi), - (ins rGPR:$Rn, rGPR:$Rm), IIC_iMAC64, - "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>; + (ins rGPR:$Rn, rGPR:$Rm, rGPR:$RLo, rGPR:$RHi), IIC_iMAC64, + "umlal", "\t$RdLo, $RdHi, $Rn, $Rm", []>, + RegConstraint<"$RLo = $RdLo, $RHi = $RdHi">; def t2UMAAL : T2MulLong<0b110, 0b0110, (outs rGPR:$RdLo, rGPR:$RdHi), @@ -2482,7 +2477,7 @@ def t2SMMLA : T2FourReg< (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smmla", "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (add (mulhs rGPR:$Rm, rGPR:$Rn), rGPR:$Ra))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b101; @@ -2503,7 +2498,7 @@ def t2SMMLS: T2FourReg< (outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm, rGPR:$Ra), IIC_iMAC32, "smmls", "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (sub rGPR:$Ra, (mulhs rGPR:$Rn, rGPR:$Rm)))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b110; @@ -2608,7 +2603,7 @@ multiclass T2I_smla { [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sext_inreg rGPR:$Rn, i16), (sext_inreg rGPR:$Rm, i16))))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b001; @@ -2621,7 +2616,7 @@ multiclass T2I_smla { !strconcat(opc, "bt"), "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sext_inreg rGPR:$Rn, i16), (sra rGPR:$Rm, (i32 16)))))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b001; @@ -2634,7 +2629,7 @@ multiclass T2I_smla { !strconcat(opc, "tb"), "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)), (sext_inreg rGPR:$Rm, i16))))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b001; @@ -2647,7 +2642,7 @@ multiclass T2I_smla { !strconcat(opc, "tt"), "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (add rGPR:$Ra, (opnode (sra rGPR:$Rn, (i32 16)), (sra rGPR:$Rm, (i32 16)))))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b001; @@ -2660,7 +2655,7 @@ multiclass T2I_smla { !strconcat(opc, "wb"), "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn, (sext_inreg rGPR:$Rm, i16)), (i32 16))))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b011; @@ -2673,7 +2668,7 @@ multiclass T2I_smla { !strconcat(opc, "wt"), "\t$Rd, $Rn, $Rm, $Ra", [(set rGPR:$Rd, (add rGPR:$Ra, (sra (opnode rGPR:$Rn, (sra rGPR:$Rm, (i32 16))), (i32 16))))]>, - Requires<[IsThumb2, HasThumb2DSP]> { + Requires<[IsThumb2, HasThumb2DSP, UseMulOps]> { let Inst{31-27} = 0b11111; let Inst{26-23} = 0b0110; let Inst{22-20} = 0b011; @@ -2767,7 +2762,7 @@ def t2SMLSLDX : T2FourReg_mac<1, 0b101, 0b1101, (outs rGPR:$Ra,rGPR:$Rd), // Division Instructions. // Signed and unsigned division on v7-M // -def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi, +def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV, "sdiv", "\t$Rd, $Rn, $Rm", [(set rGPR:$Rd, (sdiv rGPR:$Rn, rGPR:$Rm))]>, Requires<[HasDivide, IsThumb2]> { @@ -2778,7 +2773,7 @@ def t2SDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi, let Inst{7-4} = 0b1111; } -def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iALUi, +def t2UDIV : T2ThreeReg<(outs rGPR:$Rd), (ins rGPR:$Rn, rGPR:$Rm), IIC_iDIV, "udiv", "\t$Rd, $Rn, $Rm", [(set rGPR:$Rd, (udiv rGPR:$Rn, rGPR:$Rm))]>, Requires<[HasDivide, IsThumb2]> { @@ -3049,37 +3044,6 @@ def t2MOVCCror : T2I_movcc_sh<0b11, (outs rGPR:$Rd), RegConstraint<"$false = $Rd">; } // isCodeGenOnly = 1 -multiclass T2I_bincc_irs { - // shifted imm - def ri : t2PseudoExpand<(outs rGPR:$Rd), - (ins rGPR:$Rfalse, rGPR:$Rn, t2_so_imm:$imm, - pred:$p, cc_out:$s), - 4, iii, [], - (iri rGPR:$Rd, rGPR:$Rn, t2_so_imm:$imm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - // register - def rr : t2PseudoExpand<(outs rGPR:$Rd), - (ins rGPR:$Rfalse, rGPR:$Rn, rGPR:$Rm, - pred:$p, cc_out:$s), - 4, iir, [], - (irr rGPR:$Rd, rGPR:$Rn, rGPR:$Rm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; - // shifted register - def rs : t2PseudoExpand<(outs rGPR:$Rd), - (ins rGPR:$Rfalse, rGPR:$Rn, t2_so_reg:$ShiftedRm, - pred:$p, cc_out:$s), - 4, iis, [], - (irs rGPR:$Rd, rGPR:$Rn, t2_so_reg:$ShiftedRm, pred:$p, cc_out:$s)>, - RegConstraint<"$Rfalse = $Rd">; -} // T2I_bincc_irs - -defm t2ANDCC : T2I_bincc_irs; -defm t2ORRCC : T2I_bincc_irs; -defm t2EORCC : T2I_bincc_irs; } // neverHasSideEffects //===----------------------------------------------------------------------===// @@ -3281,11 +3245,11 @@ def t2B : T2I<(outs), (ins uncondbrtarget:$target), IIC_Br, let Inst{15-14} = 0b10; let Inst{12} = 1; - bits<20> target; + bits<24> target; let Inst{26} = target{19}; let Inst{11} = target{18}; let Inst{13} = target{17}; - let Inst{21-16} = target{16-11}; + let Inst{25-16} = target{20-11}; let Inst{10-0} = target{10-0}; let DecoderMethod = "DecodeT2BInstruction"; } @@ -3367,20 +3331,6 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in { Requires<[IsThumb2, IsIOS]>; } -let isCall = 1, Defs = [LR], Uses = [SP] in { - // mov lr, pc; b if callee is marked noreturn to avoid confusing the - // return stack predictor. - def t2BMOVPCB_CALL : tPseudoInst<(outs), - (ins t_bltarget:$func), - 6, IIC_Br, [(ARMcall_nolink tglobaladdr:$func)]>, - Requires<[IsThumb]>; -} - -// Direct calls -def : T2Pat<(ARMcall_nolink texternalsym:$func), - (t2BMOVPCB_CALL texternalsym:$func)>, - Requires<[IsThumb]>; - // IT block let Defs = [ITSTATE] in def t2IT : Thumb2XI<(outs), (ins it_pred:$cc, it_mask:$mask), diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index eb7eaa6..b5a896c 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -450,11 +450,11 @@ def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm), /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm", [/* For disassembly only; pattern left blank */]>; -def : ARMPat<(f32_to_f16 SPR:$a), - (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>; +def : Pat<(f32_to_f16 SPR:$a), + (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>; -def : ARMPat<(f16_to_f32 GPR:$a), - (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>; +def : Pat<(f16_to_f32 GPR:$a), + (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>; def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm), /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm", @@ -523,10 +523,12 @@ def VMOVRS : AVConv2I<0b11100001, 0b1010, let D = VFPNeonDomain; } +// Bitcast i32 -> f32. NEON prefers to use VMOVDRR. def VMOVSR : AVConv4I<0b11100000, 0b1010, (outs SPR:$Sn), (ins GPR:$Rt), IIC_fpMOVIS, "vmov", "\t$Sn, $Rt", - [(set SPR:$Sn, (bitconvert GPR:$Rt))]> { + [(set SPR:$Sn, (bitconvert GPR:$Rt))]>, + Requires<[HasVFP2, UseVMOVSR]> { // Instruction operands. bits<5> Sn; bits<4> Rt; diff --git a/lib/Target/ARM/ARMJITInfo.cpp b/lib/Target/ARM/ARMJITInfo.cpp index 3f99cce..254d8f6 100644 --- a/lib/Target/ARM/ARMJITInfo.cpp +++ b/lib/Target/ARM/ARMJITInfo.cpp @@ -168,7 +168,7 @@ void *ARMJITInfo::emitFunctionStub(const Function* F, void *Fn, intptr_t LazyPtr = getIndirectSymAddr(Fn); if (!LazyPtr) { // In PIC mode, the function stub is loading a lazy-ptr. - LazyPtr= (intptr_t)emitGlobalValueIndirectSym((GlobalValue*)F, Fn, JCE); + LazyPtr= (intptr_t)emitGlobalValueIndirectSym((const GlobalValue*)F, Fn, JCE); DEBUG(if (F) errs() << "JIT: Indirect symbol emitted at [" << LazyPtr << "] for GV '" << F->getName() << "'\n"; diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 897ceb6..0185289 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -27,7 +27,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" #include "llvm/CodeGen/SelectionDAGNodes.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -1448,7 +1448,7 @@ namespace { static char ID; ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {} - const TargetData *TD; + const DataLayout *TD; const TargetInstrInfo *TII; const TargetRegisterInfo *TRI; const ARMSubtarget *STI; @@ -1478,7 +1478,7 @@ namespace { } bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) { - TD = Fn.getTarget().getTargetData(); + TD = Fn.getTarget().getDataLayout(); TII = Fn.getTarget().getInstrInfo(); TRI = Fn.getTarget().getRegisterInfo(); STI = &Fn.getTarget().getSubtarget(); diff --git a/lib/Target/ARM/ARMMachineFunctionInfo.h b/lib/Target/ARM/ARMMachineFunctionInfo.h index f1c8fc8..c0ac04b 100644 --- a/lib/Target/ARM/ARMMachineFunctionInfo.h +++ b/lib/Target/ARM/ARMMachineFunctionInfo.h @@ -108,6 +108,11 @@ class ARMFunctionInfo : public MachineFunctionInfo { /// pass. DenseMap CPEClones; + /// GlobalBaseReg - keeps track of the virtual register initialized for + /// use as the global base register. This is used for PIC in some PIC + /// relocation models. + unsigned GlobalBaseReg; + public: ARMFunctionInfo() : isThumb(false), @@ -119,7 +124,7 @@ public: GPRCS1Frames(0), GPRCS2Frames(0), DPRCSFrames(0), NumAlignedDPRCS2Regs(0), JumpTableUId(0), PICLabelUId(0), - VarArgsFrameIndex(0), HasITBlocks(false) {} + VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {} explicit ARMFunctionInfo(MachineFunction &MF) : isThumb(MF.getTarget().getSubtarget().isThumb()), @@ -130,7 +135,7 @@ public: GPRCS1Size(0), GPRCS2Size(0), DPRCSSize(0), GPRCS1Frames(32), GPRCS2Frames(32), DPRCSFrames(32), JumpTableUId(0), PICLabelUId(0), - VarArgsFrameIndex(0), HasITBlocks(false) {} + VarArgsFrameIndex(0), HasITBlocks(false), GlobalBaseReg(0) {} bool isThumbFunction() const { return isThumb; } bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; } @@ -249,6 +254,9 @@ public: bool hasITBlocks() const { return HasITBlocks; } void setHasITBlocks(bool h) { HasITBlocks = h; } + unsigned getGlobalBaseReg() const { return GlobalBaseReg; } + void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; } + void recordCPEClone(unsigned CPIdx, unsigned CPCloneIdx) { if (!CPEClones.insert(std::make_pair(CPCloneIdx, CPIdx)).second) assert(0 && "Duplicate entries!"); diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td index 6f974fd..b0f576b 100644 --- a/lib/Target/ARM/ARMRegisterInfo.td +++ b/lib/Target/ARM/ARMRegisterInfo.td @@ -49,6 +49,9 @@ def ssub_0 : SubRegIndex; def ssub_1 : SubRegIndex; def ssub_2 : SubRegIndex<[dsub_1, ssub_0]>; def ssub_3 : SubRegIndex<[dsub_1, ssub_1]>; + +def gsub_0 : SubRegIndex; +def gsub_1 : SubRegIndex; // Let TableGen synthesize the remaining 12 ssub_* indices. // We don't need to name them. } @@ -247,11 +250,16 @@ def CCR : RegisterClass<"ARM", [i32], 32, (add CPSR)> { } // Scalar single precision floating point register class.. -def SPR : RegisterClass<"ARM", [f32], 32, (sequence "S%u", 0, 31)>; +// FIXME: Allocation order changed to s0, s2, s4, ... as a quick hack to +// avoid partial-write dependencies on D registers (S registers are +// renamed as portions of D registers). +def SPR : RegisterClass<"ARM", [f32], 32, (add (decimate + (sequence "S%u", 0, 31), 2), + (sequence "S%u", 0, 31))>; // Subset of SPR which can be used as a source of NEON scalars for 16-bit // operations -def SPR_8 : RegisterClass<"ARM", [f32], 32, (trunc SPR, 16)>; +def SPR_8 : RegisterClass<"ARM", [f32], 32, (sequence "S%u", 0, 15)>; // Scalar double precision floating point / generic 64-bit vector register // class. @@ -308,6 +316,17 @@ def DPair : RegisterClass<"ARM", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], let AltOrderSelect = [{ return 1; }]; } +// Pseudo-registers representing even-odd pairs of GPRs from R1 to R13/SP. +// These are needed by instructions (e.g. ldrexd/strexd) requiring even-odd GPRs. +def Tuples2R : RegisterTuples<[gsub_0, gsub_1], + [(add R0, R2, R4, R6, R8, R10, R12), + (add R1, R3, R5, R7, R9, R11, SP)]>; + +// Register class representing a pair of even-odd GPRs. +def GPRPair : RegisterClass<"ARM", [untyped], 64, (add Tuples2R)> { + let Size = 64; // 2 x 32 bits, we have no predefined type of that size. +} + // Pseudo-registers representing 3 consecutive D registers. def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2], [(shl DPR, 0), diff --git a/lib/Target/ARM/ARMSchedule.td b/lib/Target/ARM/ARMSchedule.td index 81d2fa3..02196d0 100644 --- a/lib/Target/ARM/ARMSchedule.td +++ b/lib/Target/ARM/ARMSchedule.td @@ -55,6 +55,7 @@ def IIC_iMUL32 : InstrItinClass; def IIC_iMAC32 : InstrItinClass; def IIC_iMUL64 : InstrItinClass; def IIC_iMAC64 : InstrItinClass; +def IIC_iDIV : InstrItinClass; def IIC_iLoad_i : InstrItinClass; def IIC_iLoad_r : InstrItinClass; def IIC_iLoad_si : InstrItinClass; @@ -261,3 +262,4 @@ def IIC_VTBX4 : InstrItinClass; include "ARMScheduleV6.td" include "ARMScheduleA8.td" include "ARMScheduleA9.td" +include "ARMScheduleSwift.td" diff --git a/lib/Target/ARM/ARMScheduleA9.td b/lib/Target/ARM/ARMScheduleA9.td index 7bc590f..404634f 100644 --- a/lib/Target/ARM/ARMScheduleA9.td +++ b/lib/Target/ARM/ARMScheduleA9.td @@ -1876,8 +1876,9 @@ def CortexA9Itineraries : ProcessorItineraries< ]>; // ===---------------------------------------------------------------------===// -// This following definitions describe the simple machine model which -// will replace itineraries. +// The following definitions describe the simpler per-operand machine model. +// This works with MachineScheduler and will eventually replace itineraries. + // Cortex-A9 machine model for scheduling and other instruction cost heuristics. def CortexA9Model : SchedMachineModel { @@ -1891,5 +1892,595 @@ def CortexA9Model : SchedMachineModel { let Itineraries = CortexA9Itineraries; } -// TODO: Add Cortex-A9 processor and scheduler resources. +//===----------------------------------------------------------------------===// +// Define each kind of processor resource and number available. + +def A9UnitALU : ProcResource<2>; +def A9UnitMul : ProcResource<1> { let Super = A9UnitALU; } +def A9UnitAGU : ProcResource<1>; +def A9UnitLS : ProcResource<1>; +def A9UnitFP : ProcResource<1> { let Buffered = 0; } +def A9UnitB : ProcResource<1>; + +//===----------------------------------------------------------------------===// +// Define scheduler read/write types with their resources and latency on A9. + +// Consume an issue slot, but no processor resources. This is useful when all +// other writes associated with the operand have NumMicroOps = 0. +def A9WriteIssue : SchedWriteRes<[]> { let Latency = 0; } + +// Write an integer register. +def A9WriteI : SchedWriteRes<[A9UnitALU]>; +// Write an integer shifted-by register +def A9WriteIsr : SchedWriteRes<[A9UnitALU]> { let Latency = 2; } + +// Basic ALU. +def A9WriteA : SchedWriteRes<[A9UnitALU]>; +// ALU with operand shifted by immediate. +def A9WriteAsi : SchedWriteRes<[A9UnitALU]> { let Latency = 2; } +// ALU with operand shifted by register. +def A9WriteAsr : SchedWriteRes<[A9UnitALU]> { let Latency = 3; } + +// Multiplication +def A9WriteM : SchedWriteRes<[A9UnitMul, A9UnitMul]> { let Latency = 4; } +def A9WriteMHi : SchedWriteRes<[A9UnitMul]> { let Latency = 5; + let NumMicroOps = 0; } +def A9WriteM16 : SchedWriteRes<[A9UnitMul]> { let Latency = 3; } +def A9WriteM16Hi : SchedWriteRes<[A9UnitMul]> { let Latency = 4; + let NumMicroOps = 0; } + +// Floating-point +// Only one FP or AGU instruction may issue per cycle. We model this +// by having FP instructions consume the AGU resource. +def A9WriteF : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 4; } +def A9WriteFMov : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 1; } +def A9WriteFMulS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 5; } +def A9WriteFMulD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 6; } +def A9WriteFMAS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 8; } +def A9WriteFMAD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 9; } +def A9WriteFDivS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 15; } +def A9WriteFDivD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 25; } +def A9WriteFSqrtS : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 17; } +def A9WriteFSqrtD : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 32; } + +// NEON has an odd mix of latencies. Simply name the write types by latency. +def A9WriteV1 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 1; } +def A9WriteV2 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 2; } +def A9WriteV3 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 3; } +def A9WriteV4 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 4; } +def A9WriteV5 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 5; } +def A9WriteV6 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 6; } +def A9WriteV7 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 7; } +def A9WriteV9 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 9; } +def A9WriteV10 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { let Latency = 10; } + +// Reserve A9UnitFP for 2 consecutive cycles. +def A9Write2V4 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { + let Latency = 4; + let ResourceCycles = [2]; +} +def A9Write2V7 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { + let Latency = 7; + let ResourceCycles = [2]; +} +def A9Write2V9 : SchedWriteRes<[A9UnitFP, A9UnitAGU]> { + let Latency = 9; + let ResourceCycles = [2]; +} + +// Branches don't have a def operand but still consume resources. +def A9WriteB : SchedWriteRes<[A9UnitB]>; + +// Address generation. +def A9WriteAdr : SchedWriteRes<[A9UnitAGU]> { let NumMicroOps = 0; } + +// Load Integer. +def A9WriteL : SchedWriteRes<[A9UnitLS]> { let Latency = 3; } +// Load the upper 32-bits using the same micro-op. +def A9WriteLHi : SchedWriteRes<[]> { let Latency = 3; + let NumMicroOps = 0; } +// Offset shifted by register. +def A9WriteLsi : SchedWriteRes<[A9UnitLS]> { let Latency = 4; } +// Load (and zero extend) a byte. +def A9WriteLb : SchedWriteRes<[A9UnitLS]> { let Latency = 4; } +def A9WriteLbsi : SchedWriteRes<[A9UnitLS]> { let Latency = 5; } + +// Load or Store Float, aligned. +def A9WriteLSfp : SchedWriteRes<[A9UnitLS, A9UnitFP]> { let Latency = 1; } + +// Store Integer. +def A9WriteS : SchedWriteRes<[A9UnitLS]>; + +//===----------------------------------------------------------------------===// +// Define resources dynamically for load multiple variants. + +// Define helpers for extra latency without consuming resources. +def A9WriteCycle1 : SchedWriteRes<[]> { let Latency = 1; let NumMicroOps = 0; } +foreach NumCycles = 2-8 in { +def A9WriteCycle#NumCycles : WriteSequence<[A9WriteCycle1], NumCycles>; +} // foreach NumCycles + +// Define TII for use in SchedVariant Predicates. +def : PredicateProlog<[{ + const ARMBaseInstrInfo *TII = + static_cast(SchedModel->getInstrInfo()); + (void)TII; +}]>; + +// Define address generation sequences and predicates for 8 flavors of LDMs. +foreach NumAddr = 1-8 in { + +// Define A9WriteAdr1-8 as a sequence of A9WriteAdr with additive +// latency for instructions that generate multiple loads or stores. +def A9WriteAdr#NumAddr : WriteSequence<[A9WriteAdr], NumAddr>; + +// Define a predicate to select the LDM based on number of memory addresses. +def A9LMAdr#NumAddr#Pred : + SchedPredicate<"TII->getNumLDMAddresses(MI) == "#NumAddr>; + +} // foreach NumAddr + +// Fall-back for unknown LDMs. +def A9LMUnknownPred : SchedPredicate<"TII->getNumLDMAddresses(MI) == 0">; + +// LDM/VLDM/VLDn address generation latency & resources. +// Dynamically select the A9WriteAdrN sequence using a predicate. +def A9WriteLMAdr : SchedWriteVariant<[ + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + // For unknown LDM/VLDM/VSTM, assume 2 32-bit registers. + SchedVar]>; + +// Define LDM Resources. +// These take no issue resource, so they can be combined with other +// writes like WriteB. +// A9WriteLMLo takes a single LS resource and 2 cycles. +def A9WriteLMLo : SchedWriteRes<[A9UnitLS]> { let Latency = 2; + let NumMicroOps = 0; } +// Assuming aligned access, the upper half of each pair is free with +// the same latency. +def A9WriteLMHi : SchedWriteRes<[]> { let Latency = 2; + let NumMicroOps = 0; } +// Each A9WriteL#N variant adds N cycles of latency without consuming +// additional resources. +foreach NumAddr = 1-8 in { +def A9WriteL#NumAddr : WriteSequence< + [A9WriteLMLo, !cast("A9WriteCycle"#NumAddr)]>; +def A9WriteL#NumAddr#Hi : WriteSequence< + [A9WriteLMHi, !cast("A9WriteCycle"#NumAddr)]>; +} + +//===----------------------------------------------------------------------===// +// LDM: Load multiple into 32-bit integer registers. + +// A9WriteLM variants expand into a pair of writes for each 64-bit +// value loaded. When the number of registers is odd, the last +// A9WriteLnHi is naturally ignored because the instruction has no +// following def operands. These variants take no issue resource, so +// they may need to be part of a WriteSequence that includes A9WriteIssue. +def A9WriteLM : SchedWriteVariant<[ + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + // For unknown LDMs, define the maximum number of writes, but only + // make the first two consume resources. + SchedVar]> { + let Variadic = 1; +} + +//===----------------------------------------------------------------------===// +// VFP Load/Store Multiple Variants, and NEON VLDn/VSTn support. + +// A9WriteLfpOp is the same as A9WriteLSfp but takes no issue resources +// so can be used in WriteSequences for in single-issue instructions that +// encapsulate multiple loads. +def A9WriteLfpOp : SchedWriteRes<[A9UnitLS, A9UnitFP]> { + let Latency = 1; + let NumMicroOps = 0; +} + +foreach NumAddr = 1-8 in { + +// Helper for A9WriteLfp1-8: A sequence of fp loads with no micro-ops. +def A9WriteLfp#NumAddr#Seq : WriteSequence<[A9WriteLfpOp], NumAddr>; + +// A9WriteLfp1-8 definitions are statically expanded into a sequence of +// A9WriteLfpOps with additive latency that takes a single issue slot. +// Used directly to describe NEON VLDn. +def A9WriteLfp#NumAddr : WriteSequence< + [A9WriteIssue, !cast("A9WriteLfp"#NumAddr#Seq)]>; + +// A9WriteLfp1-8Mov adds a cycle of latency and FP resource for +// permuting loaded values. +def A9WriteLfp#NumAddr#Mov : WriteSequence< + [A9WriteF, !cast("A9WriteLfp"#NumAddr#Seq)]>; + +} // foreach NumAddr + +// Define VLDM/VSTM PreRA resources. +// A9WriteLMfpPreRA are dynamically expanded into the correct +// A9WriteLfp1-8 sequence based on a predicate. This supports the +// preRA VLDM variants in which all 64-bit loads are written to the +// same tuple of either single or double precision registers. +def A9WriteLMfpPreRA : SchedWriteVariant<[ + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + // For unknown VLDM/VSTM PreRA, assume 2xS registers. + SchedVar]>; + +// Define VLDM/VSTM PostRA Resources. +// A9WriteLMfpLo takes a LS and FP resource and one issue slot but no latency. +def A9WriteLMfpLo : SchedWriteRes<[A9UnitLS, A9UnitFP]> { let Latency = 0; } + +foreach NumAddr = 1-8 in { + +// Each A9WriteL#N variant adds N cycles of latency without consuming +// additional resources. +def A9WriteLMfp#NumAddr : WriteSequence< + [A9WriteLMfpLo, !cast("A9WriteCycle"#NumAddr)]>; + +// Assuming aligned access, the upper half of each pair is free with +// the same latency. +def A9WriteLMfp#NumAddr#Hi : WriteSequence< + [A9WriteLMHi, !cast("A9WriteCycle"#NumAddr)]>; + +} // foreach NumAddr + +// VLDM PostRA Variants. These variants expand A9WriteLMfpPostRA into a +// pair of writes for each 64-bit data loaded. When the number of +// registers is odd, the last WriteLMfpnHi is naturally ignored because +// the instruction has no following def operands. +def A9WriteLMfpPostRA : SchedWriteVariant<[ + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + SchedVar, + // For unknown LDMs, define the maximum number of writes, but only + // make the first two consume resources. + SchedVar]> { + let Variadic = 1; +} + +// Distinguish between our multiple MI-level forms of the same +// VLDM/VSTM instructions. +def A9PreRA : SchedPredicate< + "TargetRegisterInfo::isVirtualRegister(MI->getOperand(0).getReg())">; +def A9PostRA : SchedPredicate< + "TargetRegisterInfo::isPhysicalRegister(MI->getOperand(0).getReg())">; + +// VLDM represents all destination registers as a single register +// tuple, unlike LDM. So the number of write operands is not variadic. +def A9WriteLMfp : SchedWriteVariant<[ + SchedVar, + SchedVar]>; + +//===----------------------------------------------------------------------===// +// Resources for other (non LDM/VLDM) Variants. + +// These mov immediate writers are unconditionally expanded with +// additive latency. +def A9WriteI2 : WriteSequence<[A9WriteI, A9WriteI]>; +def A9WriteI2pc : WriteSequence<[A9WriteI, A9WriteI, A9WriteA]>; +def A9WriteI2ld : WriteSequence<[A9WriteI, A9WriteI, A9WriteL]>; + +// Some ALU operations can read loaded integer values one cycle early. +def A9ReadA : SchedReadAdvance<1, + [A9WriteL, A9WriteLHi, A9WriteLsi, A9WriteLb, A9WriteLbsi, + A9WriteL1, A9WriteL2, A9WriteL3, A9WriteL4, + A9WriteL5, A9WriteL6, A9WriteL7, A9WriteL8, + A9WriteL1Hi, A9WriteL2Hi, A9WriteL3Hi, A9WriteL4Hi, + A9WriteL5Hi, A9WriteL6Hi, A9WriteL7Hi, A9WriteL8Hi]>; + +// Read types for operands that are unconditionally read in cycle N +// after the instruction issues, decreases producer latency by N-1. +def A9Read2 : SchedReadAdvance<1>; +def A9Read3 : SchedReadAdvance<2>; +def A9Read4 : SchedReadAdvance<3>; + +//===----------------------------------------------------------------------===// +// Map itinerary classes to scheduler read/write resources per operand. +// +// For ARM, we piggyback scheduler resources on the Itinerary classes +// to avoid perturbing the existing instruction definitions. + +// This table follows the ARM Cortex-A9 Technical Reference Manuals, +// mostly in order. +let SchedModel = CortexA9Model in { + +def :ItinRW<[A9WriteI], [IIC_iMOVi,IIC_iMOVr,IIC_iMOVsi, + IIC_iMVNi,IIC_iMVNsi, + IIC_iCMOVi,IIC_iCMOVr,IIC_iCMOVsi]>; +def :ItinRW<[A9WriteI,A9ReadA],[IIC_iMVNr]>; +def :ItinRW<[A9WriteIsr], [IIC_iMOVsr,IIC_iMVNsr,IIC_iCMOVsr]>; + +def :ItinRW<[A9WriteI2], [IIC_iMOVix2,IIC_iCMOVix2]>; +def :ItinRW<[A9WriteI2pc], [IIC_iMOVix2addpc]>; +def :ItinRW<[A9WriteI2ld], [IIC_iMOVix2ld]>; + +def :ItinRW<[A9WriteA], [IIC_iBITi,IIC_iBITr,IIC_iUNAr,IIC_iTSTi,IIC_iTSTr]>; +def :ItinRW<[A9WriteA, A9ReadA], [IIC_iALUi, IIC_iCMPi, IIC_iCMPsi]>; +def :ItinRW<[A9WriteA, A9ReadA, A9ReadA],[IIC_iALUr,IIC_iCMPr]>; +def :ItinRW<[A9WriteAsi], [IIC_iBITsi,IIC_iUNAsi,IIC_iEXTr,IIC_iTSTsi]>; +def :ItinRW<[A9WriteAsi, A9ReadA], [IIC_iALUsi]>; +def :ItinRW<[A9WriteAsi, ReadDefault, A9ReadA], [IIC_iALUsir]>; // RSB +def :ItinRW<[A9WriteAsr], [IIC_iBITsr,IIC_iTSTsr,IIC_iEXTAr,IIC_iEXTAsr]>; +def :ItinRW<[A9WriteAsr, A9ReadA], [IIC_iALUsr,IIC_iCMPsr]>; + +// A9WriteHi ignored for MUL32. +def :ItinRW<[A9WriteM, A9WriteMHi], [IIC_iMUL32,IIC_iMAC32, + IIC_iMUL64,IIC_iMAC64]>; +// FIXME: SMLALxx needs itin classes +def :ItinRW<[A9WriteM16, A9WriteM16Hi], [IIC_iMUL16,IIC_iMAC16]>; + +// TODO: For floating-point ops, we model the pipeline forwarding +// latencies here. WAW latencies are sometimes longer. + +def :ItinRW<[A9WriteFMov], [IIC_fpSTAT, IIC_fpMOVIS, IIC_fpMOVID, IIC_fpMOVSI, + IIC_fpUNA32, IIC_fpUNA64, + IIC_fpCMP32, IIC_fpCMP64]>; +def :ItinRW<[A9WriteFMov, A9WriteFMov], [IIC_fpMOVDI]>; +def :ItinRW<[A9WriteF], [IIC_fpCVTSD, IIC_fpCVTDS, IIC_fpCVTSH, IIC_fpCVTHS, + IIC_fpCVTIS, IIC_fpCVTID, IIC_fpCVTSI, IIC_fpCVTDI, + IIC_fpALU32, IIC_fpALU64]>; +def :ItinRW<[A9WriteFMulS], [IIC_fpMUL32]>; +def :ItinRW<[A9WriteFMulD], [IIC_fpMUL64]>; +def :ItinRW<[A9WriteFMAS], [IIC_fpMAC32]>; +def :ItinRW<[A9WriteFMAD], [IIC_fpMAC64]>; +def :ItinRW<[A9WriteFDivS], [IIC_fpDIV32]>; +def :ItinRW<[A9WriteFDivD], [IIC_fpDIV64]>; +def :ItinRW<[A9WriteFSqrtS], [IIC_fpSQRT32]>; +def :ItinRW<[A9WriteFSqrtD], [IIC_fpSQRT64]>; + +def :ItinRW<[A9WriteB], [IIC_Br]>; + +// A9 PLD is processed in a dedicated unit. +def :ItinRW<[], [IIC_Preload]>; + +// Note: We must assume that loads are aligned, since the machine +// model cannot know this statically and A9 ignores alignment hints. + +// A9WriteAdr consumes AGU regardless address writeback. But it's +// latency is only relevant for users of an updated address. +def :ItinRW<[A9WriteL, A9WriteAdr], [IIC_iLoad_i,IIC_iLoad_r, + IIC_iLoad_iu,IIC_iLoad_ru]>; +def :ItinRW<[A9WriteLsi, A9WriteAdr], [IIC_iLoad_si,IIC_iLoad_siu]>; +def :ItinRW<[A9WriteLb, A9WriteAdr2], [IIC_iLoad_bh_i,IIC_iLoad_bh_r, + IIC_iLoad_bh_iu,IIC_iLoad_bh_ru]>; +def :ItinRW<[A9WriteLbsi, A9WriteAdr2], [IIC_iLoad_bh_si,IIC_iLoad_bh_siu]>; +def :ItinRW<[A9WriteL, A9WriteLHi, A9WriteAdr], [IIC_iLoad_d_i,IIC_iLoad_d_r, + IIC_iLoad_d_ru]>; +// Store either has no def operands, or the one def for address writeback. +def :ItinRW<[A9WriteAdr, A9WriteS], [IIC_iStore_i, IIC_iStore_r, + IIC_iStore_iu, IIC_iStore_ru, + IIC_iStore_d_i, IIC_iStore_d_r, + IIC_iStore_d_ru]>; +def :ItinRW<[A9WriteAdr2, A9WriteS], [IIC_iStore_si, IIC_iStore_siu, + IIC_iStore_bh_i, IIC_iStore_bh_r, + IIC_iStore_bh_iu, IIC_iStore_bh_ru]>; +def :ItinRW<[A9WriteAdr3, A9WriteS], [IIC_iStore_bh_si, IIC_iStore_bh_siu]>; + +// A9WriteML will be expanded into a separate write for each def +// operand. Address generation consumes resources, but A9WriteLMAdr +// is listed after all def operands, so has no effective latency. +// +// Note: A9WriteLM expands into an even number of def operands. The +// actual number of def operands may be less by one. +def :ItinRW<[A9WriteLM, A9WriteLMAdr, A9WriteIssue], [IIC_iLoad_m, IIC_iPop]>; + +// Load multiple with address writeback has an extra def operand in +// front of the loaded registers. +// +// Reuse the load-multiple variants for store-multiple because the +// resources are identical, For stores only the address writeback +// has a def operand so the WriteL latencies are unused. +def :ItinRW<[A9WriteLMAdr, A9WriteLM, A9WriteIssue], [IIC_iLoad_mu, + IIC_iStore_m, + IIC_iStore_mu]>; +def :ItinRW<[A9WriteLM, A9WriteLMAdr, A9WriteB], [IIC_iLoad_mBr, IIC_iPop_Br]>; +def :ItinRW<[A9WriteL, A9WriteAdr, A9WriteA], [IIC_iLoadiALU]>; + +def :ItinRW<[A9WriteLSfp, A9WriteAdr], [IIC_fpLoad32, IIC_fpLoad64]>; + +def :ItinRW<[A9WriteLMfp, A9WriteLMAdr], [IIC_fpLoad_m]>; +def :ItinRW<[A9WriteLMAdr, A9WriteLMfp], [IIC_fpLoad_mu]>; +def :ItinRW<[A9WriteAdr, A9WriteLSfp], [IIC_fpStore32, IIC_fpStore64, + IIC_fpStore_m, IIC_fpStore_mu]>; + +// Note: Unlike VLDM, VLD1 expects the writeback operand after the +// normal writes. +def :ItinRW<[A9WriteLfp1, A9WriteAdr1], [IIC_VLD1, IIC_VLD1u, + IIC_VLD1x2, IIC_VLD1x2u]>; +def :ItinRW<[A9WriteLfp2, A9WriteAdr2], [IIC_VLD1x3, IIC_VLD1x3u, + IIC_VLD1x4, IIC_VLD1x4u, + IIC_VLD4dup, IIC_VLD4dupu]>; +def :ItinRW<[A9WriteLfp1Mov, A9WriteAdr1], [IIC_VLD1dup, IIC_VLD1dupu, + IIC_VLD2, IIC_VLD2u, + IIC_VLD2dup, IIC_VLD2dupu]>; +def :ItinRW<[A9WriteLfp2Mov, A9WriteAdr1], [IIC_VLD1ln, IIC_VLD1lnu, + IIC_VLD2x2, IIC_VLD2x2u, + IIC_VLD2ln, IIC_VLD2lnu]>; +def :ItinRW<[A9WriteLfp3Mov, A9WriteAdr3], [IIC_VLD3, IIC_VLD3u, + IIC_VLD3dup, IIC_VLD3dupu]>; +def :ItinRW<[A9WriteLfp4Mov, A9WriteAdr4], [IIC_VLD4, IIC_VLD4u, + IIC_VLD4ln, IIC_VLD4lnu]>; +def :ItinRW<[A9WriteLfp5Mov, A9WriteAdr5], [IIC_VLD3ln, IIC_VLD3lnu]>; + +// Vector stores use similar resources to vector loads, so use the +// same write types. The address write must be first for stores with +// address writeback. +def :ItinRW<[A9WriteAdr1, A9WriteLfp1], [IIC_VST1, IIC_VST1u, + IIC_VST1x2, IIC_VST1x2u, + IIC_VST1ln, IIC_VST1lnu, + IIC_VST2, IIC_VST2u, + IIC_VST2x2, IIC_VST2x2u, + IIC_VST2ln, IIC_VST2lnu]>; +def :ItinRW<[A9WriteAdr2, A9WriteLfp2], [IIC_VST1x3, IIC_VST1x3u, + IIC_VST1x4, IIC_VST1x4u, + IIC_VST3, IIC_VST3u, + IIC_VST3ln, IIC_VST3lnu, + IIC_VST4, IIC_VST4u, + IIC_VST4ln, IIC_VST4lnu]>; + +// NEON moves. +def :ItinRW<[A9WriteV2], [IIC_VMOVSI, IIC_VMOVDI, IIC_VMOVD, IIC_VMOVQ]>; +def :ItinRW<[A9WriteV1], [IIC_VMOV, IIC_VMOVIS, IIC_VMOVID]>; +def :ItinRW<[A9WriteV3], [IIC_VMOVISL, IIC_VMOVN]>; + +// NEON integer arithmetic +// +// VADD/VAND/VORR/VEOR/VBIC/VORN/VBIT/VBIF/VBSL +def :ItinRW<[A9WriteV3, A9Read2, A9Read2], [IIC_VBINiD, IIC_VBINiQ]>; +// VSUB/VMVN/VCLSD/VCLZD/VCNTD +def :ItinRW<[A9WriteV3, A9Read2], [IIC_VSUBiD, IIC_VSUBiQ, IIC_VCNTiD]>; +// VADDL/VSUBL/VNEG are mapped later under IIC_SHLi. +// ... +// VHADD/VRHADD/VQADD/VTST/VADH/VRADH +def :ItinRW<[A9WriteV4, A9Read2, A9Read2], [IIC_VBINi4D, IIC_VBINi4Q]>; +// VSBH/VRSBH/VHSUB/VQSUB/VABD/VCEQ/VCGE/VCGT/VMAX/VMIN/VPMAX/VPMIN/VABDL +def :ItinRW<[A9WriteV4, A9Read2], [IIC_VSUBi4D, IIC_VSUBi4Q]>; +// VQNEG/VQABS +def :ItinRW<[A9WriteV4], [IIC_VQUNAiD, IIC_VQUNAiQ]>; +// VABS +def :ItinRW<[A9WriteV4, A9Read2], [IIC_VUNAiD, IIC_VUNAiQ]>; +// VPADD/VPADDL are mapped later under IIC_SHLi. +// ... +// VCLSQ/VCLZQ/VCNTQ, takes two cycles. +def :ItinRW<[A9Write2V4, A9Read3], [IIC_VCNTiQ]>; +// VMOVimm/VMVNimm/VORRimm/VBICimm +def :ItinRW<[A9WriteV3], [IIC_VMOVImm]>; +def :ItinRW<[A9WriteV6, A9Read3, A9Read2], [IIC_VABAD, IIC_VABAQ]>; +def :ItinRW<[A9WriteV6, A9Read3], [IIC_VPALiD, IIC_VPALiQ]>; + +// NEON integer multiply +// +// Note: these don't quite match the timing docs, but they do match +// the original A9 itinerary. +def :ItinRW<[A9WriteV6, A9Read2, A9Read2], [IIC_VMULi16D]>; +def :ItinRW<[A9WriteV7, A9Read2, A9Read2], [IIC_VMULi16Q]>; +def :ItinRW<[A9Write2V7, A9Read2], [IIC_VMULi32D]>; +def :ItinRW<[A9Write2V9, A9Read2], [IIC_VMULi32Q]>; +def :ItinRW<[A9WriteV6, A9Read3, A9Read2, A9Read2], [IIC_VMACi16D]>; +def :ItinRW<[A9WriteV7, A9Read3, A9Read2, A9Read2], [IIC_VMACi16Q]>; +def :ItinRW<[A9Write2V7, A9Read3, A9Read2], [IIC_VMACi32D]>; +def :ItinRW<[A9Write2V9, A9Read3, A9Read2], [IIC_VMACi32Q]>; + +// NEON integer shift +// TODO: Q,Q,Q shifts should actually reserve FP for 2 cycles. +def :ItinRW<[A9WriteV3], [IIC_VSHLiD, IIC_VSHLiQ]>; +def :ItinRW<[A9WriteV4], [IIC_VSHLi4D, IIC_VSHLi4Q]>; + +// NEON permute +def :ItinRW<[A9WriteV2], [IIC_VPERMD, IIC_VPERMQ, IIC_VEXTD]>; +def :ItinRW<[A9WriteV3, A9WriteV4, ReadDefault, A9Read2], + [IIC_VPERMQ3, IIC_VEXTQ]>; +def :ItinRW<[A9WriteV3, A9Read2], [IIC_VTB1]>; +def :ItinRW<[A9WriteV3, A9Read2, A9Read2], [IIC_VTB2]>; +def :ItinRW<[A9WriteV4, A9Read2, A9Read2, A9Read3], [IIC_VTB3]>; +def :ItinRW<[A9WriteV4, A9Read2, A9Read2, A9Read3, A9Read3], [IIC_VTB4]>; +def :ItinRW<[A9WriteV3, ReadDefault, A9Read2], [IIC_VTBX1]>; +def :ItinRW<[A9WriteV3, ReadDefault, A9Read2, A9Read2], [IIC_VTBX2]>; +def :ItinRW<[A9WriteV4, ReadDefault, A9Read2, A9Read2, A9Read3], [IIC_VTBX3]>; +def :ItinRW<[A9WriteV4, ReadDefault, A9Read2, A9Read2, A9Read3, A9Read3], + [IIC_VTBX4]>; +// NEON floating-point +def :ItinRW<[A9WriteV5, A9Read2, A9Read2], [IIC_VBIND]>; +def :ItinRW<[A9WriteV6, A9Read2, A9Read2], [IIC_VBINQ]>; +def :ItinRW<[A9WriteV5, A9Read2], [IIC_VUNAD, IIC_VFMULD]>; +def :ItinRW<[A9WriteV6, A9Read2], [IIC_VUNAQ, IIC_VFMULQ]>; +def :ItinRW<[A9WriteV9, A9Read3, A9Read2], [IIC_VMACD, IIC_VFMACD]>; +def :ItinRW<[A9WriteV10, A9Read3, A9Read2], [IIC_VMACQ, IIC_VFMACQ]>; +def :ItinRW<[A9WriteV9, A9Read2, A9Read2], [IIC_VRECSD]>; +def :ItinRW<[A9WriteV10, A9Read2, A9Read2], [IIC_VRECSQ]>; +} // SchedModel = CortexA9Model diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td new file mode 100644 index 0000000..e9bc3e0 --- /dev/null +++ b/lib/Target/ARM/ARMScheduleSwift.td @@ -0,0 +1,1085 @@ +//=- ARMScheduleSwift.td - Swift Scheduling Definitions -*- tablegen -*----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the itinerary class data for the Swift processor.. +// +//===----------------------------------------------------------------------===// + +// ===---------------------------------------------------------------------===// +// This section contains legacy support for itineraries. This is +// required until SD and PostRA schedulers are replaced by MachineScheduler. + +def SW_DIS0 : FuncUnit; +def SW_DIS1 : FuncUnit; +def SW_DIS2 : FuncUnit; + +def SW_ALU0 : FuncUnit; +def SW_ALU1 : FuncUnit; +def SW_LS : FuncUnit; +def SW_IDIV : FuncUnit; +def SW_FDIV : FuncUnit; + +// FIXME: Need bypasses. +// FIXME: Model the multiple stages of IIC_iMOVix2, IIC_iMOVix2addpc, and +// IIC_iMOVix2ld better. +// FIXME: Model the special immediate shifts that are not microcoded. +// FIXME: Do we need to model the fact that uses of r15 in a micro-op force it +// to issue on pipe 1? +// FIXME: Model the pipelined behavior of CMP / TST instructions. +// FIXME: Better model the microcode stages of multiply instructions, especially +// conditional variants. +// FIXME: Add preload instruction when it is documented. +// FIXME: Model non-pipelined nature of FP div / sqrt unit. + +def SwiftItineraries : ProcessorItineraries< + [SW_DIS0, SW_DIS1, SW_DIS2, SW_ALU0, SW_ALU1, SW_LS, SW_IDIV, SW_FDIV], [], [ + // + // Move instructions, unconditional + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1]>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>, + InstrStage<1, [SW_ALU0, SW_ALU1]>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [3]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>, + InstrStage<1, [SW_ALU0, SW_ALU1]>, + InstrStage<1, [SW_LS]>], + [5]>, + // + // MVN instructions + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + // + // No operand cycles + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>]>, + // + // Binary Instructions that produce a result + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1, 1]>, + // + // Bitwise Instructions that produce a result + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1, 1]>, + // + // Unary Instructions that produce a result + + // CLZ, RBIT, etc. + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + + // BFC, BFI, UBFX, SBFX + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1]>, + + // + // Zero and sign extension instructions + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1, 1, 1]>, + // + // Compare instructions + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<2, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<2, [SW_ALU0, SW_ALU1]>], + [1, 1, 1]>, + // + // Test instructions + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<2, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<2, [SW_ALU0, SW_ALU1]>], + [1, 1, 1]>, + // + // Move instructions, conditional + // FIXME: Correctly model the extra input dep on the destination. + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS0, SW_DIS1, SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1]>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2]>, + + // Integer multiply pipeline + // + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [3, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [3, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0], 1>, + InstrStage<1, [SW_ALU0], 3>, + InstrStage<1, [SW_ALU0]>], + [5, 5, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0], 1>, + InstrStage<1, [SW_ALU0], 1>, + InstrStage<1, [SW_ALU0, SW_ALU1], 3>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [5, 6, 1, 1]>, + // + // Integer divide + InstrItinData, + InstrStage<1, [SW_ALU0], 0>, + InstrStage<14, [SW_IDIV]>], + [14, 1, 1]>, + + // Integer load pipeline + // FIXME: The timings are some rough approximations + // + // Immediate offset + InstrItinData, + InstrStage<1, [SW_LS]>], + [3, 1]>, + InstrItinData, + InstrStage<1, [SW_LS]>], + [3, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 1>, + InstrStage<1, [SW_LS]>], + [3, 4, 1]>, + // + // Register offset + InstrItinData, + InstrStage<1, [SW_LS]>], + [3, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_LS]>], + [3, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_LS], 1>, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [3, 4, 1, 1]>, + // + // Scaled register offset + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS]>], + [5, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS]>], + [5, 1, 1]>, + // + // Immediate offset with update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [3, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [3, 1, 1]>, + // + // Register offset with update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0], 1>, + InstrStage<1, [SW_LS]>], + [3, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0], 1>, + InstrStage<1, [SW_LS]>], + [3, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 0>, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_LS], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [3, 4, 1, 1]>, + // + // Scaled register offset with update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [5, 3, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [5, 3, 1, 1]>, + // + // Load multiple, def is the 5th operand. + // FIXME: This assumes 3 to 4 registers. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1, 1, 3], [], -1>, // dynamic uops + + // + // Load multiple + update, defs are the 1st and 5th operands. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 0>, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1, 1, 3], [], -1>, // dynamic uops + // + // Load multiple plus branch + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1, 1, 3], [], -1>, // dynamic uops + // + // Pop, def is the 3rd operand. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 3], [], -1>, // dynamic uops + // + // Pop + branch, def is the 3rd operand. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 3], [], -1>, // dynamic uops + + // + // iLoadi + iALUr for t2LDRpci_pic. + InstrItinData, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [4, 1]>, + + // Integer store pipeline + /// + // Immediate offset + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_LS], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1]>, + // + // Register offset + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_LS], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + // + // Scaled register offset + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + // + // Immediate offset with update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1]>, + // + // Register offset with update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [1, 1, 1, 1]>, + // + // Scaled register offset with update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>], + [3, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 2>, + InstrStage<1, [SW_LS], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>], + [3, 1, 1, 1]>, + // + // Store multiple + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS], 1>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS], 1>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [], [], -1>, // dynamic uops + // + // Store multiple + update + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS], 1>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS], 1>, + InstrStage<1, [SW_ALU0, SW_ALU1], 1>, + InstrStage<1, [SW_LS]>], + [2], [], -1>, // dynamic uops + + // + // Preload + InstrItinData], [1, 1]>, + + // Branch + // + // no delay slots, so the latency of a branch is unimportant + InstrItinData]>, + + // FP Special Register to Integer Register File Move + InstrItinData, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [1]>, + // + // Single-precision FP Unary + // + // Most floating-point moves get issued on ALU0. + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1]>, + // + // Double-precision FP Unary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1]>, + + // + // Single-precision FP Compare + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [1, 1]>, + // + // Double-precision FP Compare + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [1, 1]>, + // + // Single to Double FP Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + // + // Double to Single FP Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + + // + // Single to Half FP Convert + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU1], 4>, + InstrStage<1, [SW_ALU1]>], + [6, 1]>, + // + // Half to Single FP Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + + // + // Single-Precision FP to Integer Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + // + // Double-Precision FP to Integer Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + // + // Integer to Single-Precision FP Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + // + // Integer to Double-Precision FP Convert + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1]>, + // + // Single-precision FP ALU + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Double-precision FP ALU + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Single-precision FP Multiply + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + // + // Double-precision FP Multiply + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [6, 1, 1]>, + // + // Single-precision FP MAC + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Double-precision FP MAC + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [12, 1, 1]>, + // + // Single-precision Fused FP MAC + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Double-precision Fused FP MAC + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [12, 1, 1]>, + // + // Single-precision FP DIV + InstrItinData, + InstrStage<1, [SW_ALU1], 0>, + InstrStage<15, [SW_FDIV]>], + [17, 1, 1]>, + // + // Double-precision FP DIV + InstrItinData, + InstrStage<1, [SW_ALU1], 0>, + InstrStage<30, [SW_FDIV]>], + [32, 1, 1]>, + // + // Single-precision FP SQRT + InstrItinData, + InstrStage<1, [SW_ALU1], 0>, + InstrStage<15, [SW_FDIV]>], + [17, 1]>, + // + // Double-precision FP SQRT + InstrItinData, + InstrStage<1, [SW_ALU1], 0>, + InstrStage<30, [SW_FDIV]>], + [32, 1, 1]>, + + // + // Integer to Single-precision Move + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 4>, + InstrStage<1, [SW_ALU0]>], + [6, 1]>, + // + // Integer to Double-precision Move + InstrItinData, + InstrStage<1, [SW_LS]>], + [4, 1]>, + // + // Single-precision to Integer Move + InstrItinData, + InstrStage<1, [SW_LS]>], + [3, 1]>, + // + // Double-precision to Integer Move + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_LS]>], + [3, 4, 1]>, + // + // Single-precision FP Load + InstrItinData, + InstrStage<1, [SW_LS]>], + [4, 1]>, + // + // Double-precision FP Load + InstrItinData, + InstrStage<1, [SW_LS]>], + [4, 1]>, + // + // FP Load Multiple + // FIXME: Assumes a single Q register. + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1, 1, 4], [], -1>, // dynamic uops + // + // FP Load Multiple + update + // FIXME: Assumes a single Q register. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 4>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1, 1, 4], [], -1>, // dynamic uops + // + // Single-precision FP Store + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1]>, + // + // Double-precision FP Store + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1]>, + // + // FP Store Multiple + // FIXME: Assumes a single Q register. + InstrItinData, + InstrStage<1, [SW_LS]>], + [1, 1, 1], [], -1>, // dynamic uops + // + // FP Store Multiple + update + // FIXME: Assumes a single Q register. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 4>, + InstrStage<1, [SW_ALU0, SW_ALU1]>], + [2, 1, 1, 1], [], -1>, // dynamic uops + // NEON + // + // Double-register Integer Unary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1]>, + // + // Quad-register Integer Unary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1]>, + // + // Double-register Integer Q-Unary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1]>, + // + // Quad-register Integer CountQ-Unary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1]>, + // + // Double-register Integer Binary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Quad-register Integer Binary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Double-register Integer Subtract + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Quad-register Integer Subtract + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Double-register Integer Shift + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Quad-register Integer Shift + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Double-register Integer Shift (4 cycle) + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Quad-register Integer Shift (4 cycle) + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Double-register Integer Binary (4 cycle) + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Quad-register Integer Binary (4 cycle) + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Double-register Integer Subtract (4 cycle) + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Quad-register Integer Subtract (4 cycle) + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + + // + // Double-register Integer Count + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Quad-register Integer Count + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1, 1]>, + // + // Double-register Absolute Difference and Accumulate + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1, 1]>, + // + // Quad-register Absolute Difference and Accumulate + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1, 1]>, + // + // Double-register Integer Pair Add Long + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Quad-register Integer Pair Add Long + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + + // + // Double-register Integer Multiply (.8, .16) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + // + // Quad-register Integer Multiply (.8, .16) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + + // + // Double-register Integer Multiply (.32) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + // + // Quad-register Integer Multiply (.32) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + // + // Double-register Integer Multiply-Accumulate (.8, .16) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1, 1]>, + // + // Double-register Integer Multiply-Accumulate (.32) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1, 1]>, + // + // Quad-register Integer Multiply-Accumulate (.8, .16) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1, 1]>, + // + // Quad-register Integer Multiply-Accumulate (.32) + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1, 1]>, + + // + // Move + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1]>, + // + // Move Immediate + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2]>, + // + // Double-register Permute Move + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1]>, + // + // Quad-register Permute Move + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1]>, + // + // Integer to Single-precision Move + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 4>, + InstrStage<1, [SW_ALU0]>], + [6, 1]>, + // + // Integer to Double-precision Move + InstrItinData, + InstrStage<1, [SW_LS]>], + [4, 1, 1]>, + // + // Single-precision to Integer Move + InstrItinData, + InstrStage<1, [SW_LS]>], + [3, 1]>, + // + // Double-precision to Integer Move + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 3>, + InstrStage<1, [SW_LS]>], + [3, 4, 1]>, + // + // Integer to Lane Move + // FIXME: I think this is correct, but it is not clear from the tuning guide. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_LS], 4>, + InstrStage<1, [SW_ALU0]>], + [6, 1]>, + + // + // Vector narrow move + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1]>, + // + // Double-register FP Unary + // FIXME: VRECPE / VRSQRTE has a longer latency than VABS, which is used here, + // and they issue on a different pipeline. + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1]>, + // + // Quad-register FP Unary + // FIXME: VRECPE / VRSQRTE has a longer latency than VABS, which is used here, + // and they issue on a different pipeline. + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [2, 1]>, + // + // Double-register FP Binary + // FIXME: We're using this itin for many instructions. + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + + // + // VPADD, etc. + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Double-register FP VMUL + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + // + // Quad-register FP Binary + InstrItinData, + InstrStage<1, [SW_ALU0]>], + [4, 1, 1]>, + // + // Quad-register FP VMUL + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [4, 1, 1]>, + // + // Double-register FP Multiple-Accumulate + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Quad-register FP Multiple-Accumulate + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Double-register Fused FP Multiple-Accumulate + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Quad-register FusedF P Multiple-Accumulate + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Double-register Reciprical Step + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Quad-register Reciprical Step + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [8, 1, 1]>, + // + // Double-register Permute + // FIXME: The latencies are unclear from the documentation. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [3, 4, 3, 4]>, + // + // Quad-register Permute + // FIXME: The latencies are unclear from the documentation. + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [3, 4, 3, 4]>, + // + // Quad-register Permute (3 cycle issue on A9) + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [3, 4, 3, 4]>, + + // + // Double-register VEXT + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1, 1]>, + // + // Quad-register VEXT + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1, 1]>, + // + // VTB + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [4, 1, 3, 3]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [6, 1, 3, 5, 5]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [8, 1, 3, 5, 7, 7]>, + // + // VTBX + InstrItinData, + InstrStage<1, [SW_ALU1]>], + [2, 1, 1]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [4, 1, 3, 3]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [6, 1, 3, 5, 5]>, + InstrItinData, + InstrStage<1, [SW_DIS1], 0>, + InstrStage<1, [SW_DIS2], 0>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1], 2>, + InstrStage<1, [SW_ALU1]>], + [8, 1, 3, 5, 7, 7]> +]>; + +// ===---------------------------------------------------------------------===// +// This following definitions describe the simple machine model which +// will replace itineraries. + +// Swift machine model for scheduling and other instruction cost heuristics. +def SwiftModel : SchedMachineModel { + let IssueWidth = 3; // 3 micro-ops are dispatched per cycle. + let MinLatency = 0; // Data dependencies are allowed within dispatch groups. + let LoadLatency = 3; + + let Itineraries = SwiftItineraries; +} + +// TODO: Add Swift processor and scheduler resources. diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 31d5d38..b33b3c9 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -155,7 +155,7 @@ EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, TargetLowering::ArgListEntry Entry; // First argument: data pointer - Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*DAG.getContext()); + Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext()); Entry.Node = Dst; Entry.Ty = IntPtrTy; Args.push_back(Entry); diff --git a/lib/Target/ARM/ARMSubtarget.cpp b/lib/Target/ARM/ARMSubtarget.cpp index 4762854..bcc9db4 100644 --- a/lib/Target/ARM/ARMSubtarget.cpp +++ b/lib/Target/ARM/ARMSubtarget.cpp @@ -13,8 +13,9 @@ #include "ARMSubtarget.h" #include "ARMBaseRegisterInfo.h" +#include "ARMBaseInstrInfo.h" #include "llvm/GlobalValue.h" -#include "llvm/Target/TargetSubtargetInfo.h" +#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/CommandLine.h" #define GET_SUBTARGETINFO_TARGET_DESC @@ -31,6 +32,10 @@ static cl::opt DarwinUseMOVT("arm-darwin-use-movt", cl::init(true), cl::Hidden); static cl::opt +UseFusedMulOps("arm-use-mulops", + cl::init(true), cl::Hidden); + +static cl::opt StrictAlign("arm-strict-align", cl::Hidden, cl::desc("Disallow all unaligned memory accesses")); @@ -49,6 +54,7 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU, , HasVFPv4(false) , HasNEON(false) , UseNEONForSinglePrecisionFP(false) + , UseMulOps(UseFusedMulOps) , SlowFPVMLx(false) , HasVMLxForwarding(false) , SlowFPBrcc(false) @@ -63,6 +69,7 @@ ARMSubtarget::ARMSubtarget(const std::string &TT, const std::string &CPU, , HasFP16(false) , HasD16(false) , HasHardwareDivide(false) + , HasHardwareDivideInARM(false) , HasT2ExtractPack(false) , HasDataBarrier(false) , Pref32BitThumb(false) diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index b394061..8e6b650 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -30,7 +30,7 @@ class StringRef; class ARMSubtarget : public ARMGenSubtargetInfo { protected: enum ARMProcFamilyEnum { - Others, CortexA8, CortexA9 + Others, CortexA8, CortexA9, CortexA15, Swift }; /// ARMProcFamily - ARM processor family: Cortex-A8, Cortex-A9, and others. @@ -57,6 +57,10 @@ protected: /// determine if NEON should actually be used. bool UseNEONForSinglePrecisionFP; + /// UseMulOps - True if non-microcoded fused integer multiply-add and + /// multiply-subtract instructions should be used. + bool UseMulOps; + /// SlowFPVMLx - If the VFP2 / NEON instructions are available, indicates /// whether the FP VML[AS] instructions are slow (if so, don't use them). bool SlowFPVMLx; @@ -107,6 +111,9 @@ protected: /// HasHardwareDivide - True if subtarget supports [su]div bool HasHardwareDivide; + /// HasHardwareDivideInARM - True if subtarget supports [su]div in ARM mode + bool HasHardwareDivideInARM; + /// HasT2ExtractPack - True if subtarget supports thumb2 extract/pack /// instructions. bool HasT2ExtractPack; @@ -199,7 +206,10 @@ protected: bool isCortexA8() const { return ARMProcFamily == CortexA8; } bool isCortexA9() const { return ARMProcFamily == CortexA9; } + bool isCortexA15() const { return ARMProcFamily == CortexA15; } + bool isSwift() const { return ARMProcFamily == Swift; } bool isCortexM3() const { return CPUString == "cortex-m3"; } + bool isLikeA9() const { return isCortexA9() || isCortexA15(); } bool hasARMOps() const { return !NoARM; } @@ -211,8 +221,10 @@ protected: return hasNEON() && UseNEONForSinglePrecisionFP; } bool hasDivide() const { return HasHardwareDivide; } + bool hasDivideInARMMode() const { return HasHardwareDivideInARM; } bool hasT2ExtractPack() const { return HasT2ExtractPack; } bool hasDataBarrier() const { return HasDataBarrier; } + bool useMulOps() const { return UseMulOps; } bool useFPVMLx() const { return !SlowFPVMLx; } bool hasVMLxForwarding() const { return HasVMLxForwarding; } bool isFPBrccSlow() const { return SlowFPBrcc; } diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index 171c9ad..b486d4f 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -60,7 +60,7 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL) : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), InstrInfo(Subtarget), - DataLayout(Subtarget.isAPCS_ABI() ? + DL(Subtarget.isAPCS_ABI() ? std::string("e-p:32:32-f64:32:64-i64:32:64-" "v128:32:128-v64:32:64-n32-S32") : Subtarget.isAAPCS_ABI() ? @@ -68,10 +68,10 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, "v128:64:128-v64:64:64-n32-S64") : std::string("e-p:32:32-f64:64:64-i64:64:64-" "v128:64:128-v64:64:64-n32-S32")), - ELFWriterInfo(*this), TLInfo(*this), TSInfo(*this), - FrameLowering(Subtarget) { + FrameLowering(Subtarget), + STTI(&TLInfo), VTTI(&TLInfo) { if (!Subtarget.hasARMOps()) report_fatal_error("CPU: '" + Subtarget.getCPUString() + "' does not " "support ARM mode execution!"); @@ -88,7 +88,7 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, InstrInfo(Subtarget.hasThumb2() ? ((ARMBaseInstrInfo*)new Thumb2InstrInfo(Subtarget)) : ((ARMBaseInstrInfo*)new Thumb1InstrInfo(Subtarget))), - DataLayout(Subtarget.isAPCS_ABI() ? + DL(Subtarget.isAPCS_ABI() ? std::string("e-p:32:32-f64:32:64-i64:32:64-" "i16:16:32-i8:8:32-i1:8:32-" "v128:32:128-v64:32:64-a:0:32-n32-S32") : @@ -99,12 +99,12 @@ ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, std::string("e-p:32:32-f64:64:64-i64:64:64-" "i16:16:32-i8:8:32-i1:8:32-" "v128:64:128-v64:64:64-a:0:32-n32-S32")), - ELFWriterInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(Subtarget.hasThumb2() ? new ARMFrameLowering(Subtarget) - : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)) { + : (ARMFrameLowering*)new Thumb1FrameLowering(Subtarget)), + STTI(&TLInfo), VTTI(&TLInfo) { } namespace { @@ -143,6 +143,11 @@ bool ARMPassConfig::addPreISel() { bool ARMPassConfig::addInstSelector() { addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); + + const ARMSubtarget *Subtarget = &getARMSubtarget(); + if (Subtarget->isTargetELF() && !Subtarget->isThumb1Only() && + TM->Options.EnableFastISel) + addPass(createARMGlobalBaseRegPass()); return false; } @@ -150,7 +155,7 @@ bool ARMPassConfig::addPreRegAlloc() { // FIXME: temporarily disabling load / store optimization pass for Thumb1. if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only()) addPass(createARMLoadStoreOptimizationPass(true)); - if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9()) + if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isLikeA9()) addPass(createMLxExpansionPass()); return true; } diff --git a/lib/Target/ARM/ARMTargetMachine.h b/lib/Target/ARM/ARMTargetMachine.h index abcdb24..ebdd5b4 100644 --- a/lib/Target/ARM/ARMTargetMachine.h +++ b/lib/Target/ARM/ARMTargetMachine.h @@ -15,7 +15,6 @@ #define ARMTARGETMACHINE_H #include "ARMInstrInfo.h" -#include "ARMELFWriterInfo.h" #include "ARMFrameLowering.h" #include "ARMJITInfo.h" #include "ARMSubtarget.h" @@ -25,7 +24,8 @@ #include "Thumb1FrameLowering.h" #include "Thumb2InstrInfo.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetTransformImpl.h" +#include "llvm/DataLayout.h" #include "llvm/MC/MCStreamer.h" #include "llvm/ADT/OwningPtr.h" @@ -62,11 +62,12 @@ public: class ARMTargetMachine : public ARMBaseTargetMachine { virtual void anchor(); ARMInstrInfo InstrInfo; - const TargetData DataLayout; // Calculates type size & alignment - ARMELFWriterInfo ELFWriterInfo; + const DataLayout DL; // Calculates type size & alignment ARMTargetLowering TLInfo; ARMSelectionDAGInfo TSInfo; ARMFrameLowering FrameLowering; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, @@ -88,12 +89,14 @@ class ARMTargetMachine : public ARMBaseTargetMachine { virtual const ARMFrameLowering *getFrameLowering() const { return &FrameLowering; } - - virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; } - virtual const TargetData *getTargetData() const { return &DataLayout; } - virtual const ARMELFWriterInfo *getELFWriterInfo() const { - return Subtarget.isTargetELF() ? &ELFWriterInfo : 0; + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; } + virtual const ARMInstrInfo *getInstrInfo() const { return &InstrInfo; } + virtual const DataLayout *getDataLayout() const { return &DL; } }; /// ThumbTargetMachine - Thumb target machine. @@ -104,12 +107,13 @@ class ThumbTargetMachine : public ARMBaseTargetMachine { virtual void anchor(); // Either Thumb1InstrInfo or Thumb2InstrInfo. OwningPtr InstrInfo; - const TargetData DataLayout; // Calculates type size & alignment - ARMELFWriterInfo ELFWriterInfo; + const DataLayout DL; // Calculates type size & alignment ARMTargetLowering TLInfo; ARMSelectionDAGInfo TSInfo; // Either Thumb1FrameLowering or ARMFrameLowering. OwningPtr FrameLowering; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: ThumbTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, @@ -138,10 +142,13 @@ public: virtual const ARMFrameLowering *getFrameLowering() const { return FrameLowering.get(); } - virtual const TargetData *getTargetData() const { return &DataLayout; } - virtual const ARMELFWriterInfo *getELFWriterInfo() const { - return Subtarget.isTargetELF() ? &ELFWriterInfo : 0; + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; } + virtual const DataLayout *getDataLayout() const { return &DL; } }; } // end namespace llvm diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index 3a5957b..c61e3bd 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -181,49 +181,44 @@ class ARMAsmParser : public MCTargetAsmParser { OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); // Asm Match Converter Methods - bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, - const SmallVectorImpl &); - bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, - const SmallVectorImpl &); - bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, + void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl &); + void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl &); + void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, const SmallVectorImpl &); - bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, + void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, const SmallVectorImpl &); - bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, + void cvtLdWriteBackRegAddrMode2(MCInst &Inst, const SmallVectorImpl &); - bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, + void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, const SmallVectorImpl &); - bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, + void cvtStWriteBackRegAddrModeImm12(MCInst &Inst, const SmallVectorImpl &); - bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, + void cvtStWriteBackRegAddrMode2(MCInst &Inst, const SmallVectorImpl &); - bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, + void cvtStWriteBackRegAddrMode3(MCInst &Inst, const SmallVectorImpl &); - bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, + void cvtLdExtTWriteBackImm(MCInst &Inst, const SmallVectorImpl &); - bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, + void cvtLdExtTWriteBackReg(MCInst &Inst, const SmallVectorImpl &); - bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, + void cvtStExtTWriteBackImm(MCInst &Inst, const SmallVectorImpl &); - bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, + void cvtStExtTWriteBackReg(MCInst &Inst, const SmallVectorImpl &); - bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, - const SmallVectorImpl &); - bool cvtStrdPre(MCInst &Inst, unsigned Opcode, - const SmallVectorImpl &); - bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, + void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl &); + void cvtStrdPre(MCInst &Inst, const SmallVectorImpl &); + void cvtLdWriteBackRegAddrMode3(MCInst &Inst, const SmallVectorImpl &); - bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, + void cvtThumbMultiply(MCInst &Inst, const SmallVectorImpl &); - bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, + void cvtVLDwbFixed(MCInst &Inst, const SmallVectorImpl &); - bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, + void cvtVLDwbRegister(MCInst &Inst, const SmallVectorImpl &); - bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, + void cvtVSTwbFixed(MCInst &Inst, const SmallVectorImpl &); - bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, + void cvtVSTwbRegister(MCInst &Inst, const SmallVectorImpl &); - bool validateInstruction(MCInst &Inst, const SmallVectorImpl &Ops); bool processInstruction(MCInst &Inst, @@ -258,15 +253,17 @@ public: // Implementation of the MCTargetAsmParser interface: bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); - bool ParseInstruction(StringRef Name, SMLoc NameLoc, + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, SmallVectorImpl &Operands); bool ParseDirective(AsmToken DirectiveID); unsigned checkTargetMatchPredicate(MCInst &Inst); - bool MatchAndEmitInstruction(SMLoc IDLoc, + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out); + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm); }; } // end anonymous namespace @@ -486,7 +483,8 @@ public: SMLoc getStartLoc() const { return StartLoc; } /// getEndLoc - Get the location of the last token of this operand. SMLoc getEndLoc() const { return EndLoc; } - + /// getLocRange - Get the range between the first and last token of this + /// operand. SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } ARMCC::CondCodes getCondCode() const { @@ -862,7 +860,7 @@ public: bool isSPRRegList() const { return Kind == k_SPRRegisterList; } bool isToken() const { return Kind == k_Token; } bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } - bool isMemory() const { return Kind == k_Memory; } + bool isMem() const { return Kind == k_Memory; } bool isShifterImm() const { return Kind == k_ShifterImmediate; } bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } @@ -873,14 +871,14 @@ public: return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; } bool isMemNoOffset(bool alignOK = false) const { - if (!isMemory()) + if (!isMem()) return false; // No offset of any kind. return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && (alignOK || Memory.Alignment == 0); } bool isMemPCRelImm12() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Base register must be PC. if (Memory.BaseRegNum != ARM::PC) @@ -894,7 +892,7 @@ public: return isMemNoOffset(true); } bool isAddrMode2() const { - if (!isMemory() || Memory.Alignment != 0) return false; + if (!isMem() || Memory.Alignment != 0) return false; // Check for register offset. if (Memory.OffsetRegNum) return true; // Immediate offset in range [-4095, 4095]. @@ -916,7 +914,7 @@ public: // and we reject it. if (isImm() && !isa(getImm())) return true; - if (!isMemory() || Memory.Alignment != 0) return false; + if (!isMem() || Memory.Alignment != 0) return false; // No shifts are legal for AM3. if (Memory.ShiftType != ARM_AM::no_shift) return false; // Check for register offset. @@ -946,7 +944,7 @@ public: // and we reject it. if (isImm() && !isa(getImm())) return true; - if (!isMemory() || Memory.Alignment != 0) return false; + if (!isMem() || Memory.Alignment != 0) return false; // Check for register offset. if (Memory.OffsetRegNum) return false; // Immediate offset in range [-1020, 1020] and a multiple of 4. @@ -956,25 +954,25 @@ public: Val == INT32_MIN; } bool isMemTBB() const { - if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || + if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) return false; return true; } bool isMemTBH() const { - if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || + if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || Memory.Alignment != 0 ) return false; return true; } bool isMemRegOffset() const { - if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) + if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) return false; return true; } bool isT2MemRegOffset() const { - if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || + if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || Memory.Alignment != 0) return false; // Only lsl #{0, 1, 2, 3} allowed. @@ -987,14 +985,14 @@ public: bool isMemThumbRR() const { // Thumb reg+reg addressing is simple. Just two registers, a base and // an offset. No shifts, negations or any other complicating factors. - if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || + if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) return false; return isARMLowRegister(Memory.BaseRegNum) && (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); } bool isMemThumbRIs4() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || + if (!isMem() || Memory.OffsetRegNum != 0 || !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) return false; // Immediate offset, multiple of 4 in range [0, 124]. @@ -1003,7 +1001,7 @@ public: return Val >= 0 && Val <= 124 && (Val % 4) == 0; } bool isMemThumbRIs2() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || + if (!isMem() || Memory.OffsetRegNum != 0 || !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) return false; // Immediate offset, multiple of 4 in range [0, 62]. @@ -1012,7 +1010,7 @@ public: return Val >= 0 && Val <= 62 && (Val % 2) == 0; } bool isMemThumbRIs1() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || + if (!isMem() || Memory.OffsetRegNum != 0 || !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) return false; // Immediate offset in range [0, 31]. @@ -1021,7 +1019,7 @@ public: return Val >= 0 && Val <= 31; } bool isMemThumbSPI() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) return false; // Immediate offset, multiple of 4 in range [0, 1020]. @@ -1035,7 +1033,7 @@ public: // and we reject it. if (isImm() && !isa(getImm())) return true; - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Immediate offset a multiple of 4 in range [-1020, 1020]. if (!Memory.OffsetImm) return true; @@ -1044,7 +1042,7 @@ public: return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN; } bool isMemImm0_1020s4Offset() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Immediate offset a multiple of 4 in range [0, 1020]. if (!Memory.OffsetImm) return true; @@ -1052,7 +1050,7 @@ public: return Val >= 0 && Val <= 1020 && (Val & 3) == 0; } bool isMemImm8Offset() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Base reg of PC isn't allowed for these encodings. if (Memory.BaseRegNum == ARM::PC) return false; @@ -1062,7 +1060,7 @@ public: return (Val == INT32_MIN) || (Val > -256 && Val < 256); } bool isMemPosImm8Offset() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Immediate offset in range [0, 255]. if (!Memory.OffsetImm) return true; @@ -1070,7 +1068,7 @@ public: return Val >= 0 && Val < 256; } bool isMemNegImm8Offset() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Base reg of PC isn't allowed for these encodings. if (Memory.BaseRegNum == ARM::PC) return false; @@ -1080,7 +1078,7 @@ public: return (Val == INT32_MIN) || (Val > -256 && Val < 0); } bool isMemUImm12Offset() const { - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Immediate offset in range [0, 4095]. if (!Memory.OffsetImm) return true; @@ -1094,7 +1092,7 @@ public: if (isImm() && !isa(getImm())) return true; - if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) + if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) return false; // Immediate offset in range [-4095, 4095]. if (!Memory.OffsetImm) return true; @@ -3376,7 +3374,8 @@ ARMAsmParser::OperandMatchResultTy ARMAsmParser:: parseMSRMaskOperand(SmallVectorImpl &Operands) { SMLoc S = Parser.getTok().getLoc(); const AsmToken &Tok = Parser.getTok(); - assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); + if (!Tok.is(AsmToken::Identifier)) + return MatchOperand_NoMatch; StringRef Mask = Tok.getString(); if (isMClass()) { @@ -3880,8 +3879,8 @@ parseAM3Offset(SmallVectorImpl &Operands) { /// cvtT2LdrdPre - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl &Operands) { // Rt, Rt2 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -3892,14 +3891,13 @@ cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtT2StrdPre - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtT2StrdPre(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateReg(0)); @@ -3910,14 +3908,13 @@ cvtT2StrdPre(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, const SmallVectorImpl &Operands) { ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -3926,28 +3923,26 @@ cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdWriteBackRegAddrMode2(MCInst &Inst, const SmallVectorImpl &Operands) { ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -3956,14 +3951,13 @@ cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, const SmallVectorImpl &Operands) { ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -3972,57 +3966,53 @@ cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStWriteBackRegAddrModeImm12(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStWriteBackRegAddrMode2(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStWriteBackRegAddrMode3(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdExtTWriteBackImm(MCInst &Inst, const SmallVectorImpl &Operands) { // Rt ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -4034,14 +4024,13 @@ cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdExtTWriteBackReg(MCInst &Inst, const SmallVectorImpl &Operands) { // Rt ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -4053,14 +4042,13 @@ cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStExtTWriteBackImm(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); @@ -4072,14 +4060,13 @@ cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStExtTWriteBackReg(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); @@ -4091,14 +4078,13 @@ cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdrdPre - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdrdPre(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdrdPre(MCInst &Inst, const SmallVectorImpl &Operands) { // Rt, Rt2 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); @@ -4109,14 +4095,13 @@ cvtLdrdPre(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtStrdPre - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtStrdPre(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtStrdPre(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); @@ -4127,40 +4112,27 @@ cvtStrdPre(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtLdWriteBackRegAddrMode3(MCInst &Inst, const SmallVectorImpl &Operands) { ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } -/// cvtThumbMultiple- Convert parsed operands to MCInst. +/// cvtThumbMultiply - Convert parsed operands to MCInst. /// Needed here because the Asm Gen Matcher can't handle properly tied operands /// when they refer multiple MIOperands inside a single one. -bool ARMAsmParser:: -cvtThumbMultiply(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtThumbMultiply(MCInst &Inst, const SmallVectorImpl &Operands) { - // The second source operand must be the same register as the destination - // operand. - if (Operands.size() == 6 && - (((ARMOperand*)Operands[3])->getReg() != - ((ARMOperand*)Operands[5])->getReg()) && - (((ARMOperand*)Operands[3])->getReg() != - ((ARMOperand*)Operands[4])->getReg())) { - Error(Operands[3]->getStartLoc(), - "destination register must match source register"); - return false; - } ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); // If we have a three-operand form, make sure to set Rn to be the operand @@ -4173,12 +4145,10 @@ cvtThumbMultiply(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); Inst.addOperand(Inst.getOperand(0)); ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); - - return true; } -bool ARMAsmParser:: -cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtVLDwbFixed(MCInst &Inst, const SmallVectorImpl &Operands) { // Vd ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); @@ -4188,11 +4158,10 @@ cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } -bool ARMAsmParser:: -cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtVLDwbRegister(MCInst &Inst, const SmallVectorImpl &Operands) { // Vd ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); @@ -4204,11 +4173,10 @@ cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } -bool ARMAsmParser:: -cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtVSTwbFixed(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); @@ -4218,11 +4186,10 @@ cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } -bool ARMAsmParser:: -cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, +void ARMAsmParser:: +cvtVSTwbRegister(MCInst &Inst, const SmallVectorImpl &Operands) { // Create a writeback register dummy placeholder. Inst.addOperand(MCOperand::CreateImm(0)); @@ -4234,7 +4201,6 @@ cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); // pred ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); - return true; } /// Parse an ARM memory expression, return false if successful else return true @@ -4471,6 +4437,12 @@ bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) return Error(Loc, "immediate shift value out of range"); + // If #0, turn it into a no_shift. + if (Imm == 0) + St = ARM_AM::lsl; + // For consistency, treat lsr #32 and asr #32 as having immediate value 0. + if (Imm == 32) + Imm = 0; Amount = Imm; } @@ -4648,7 +4620,7 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl &Operands, return true; const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, - getContext()); + getContext()); E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); return false; @@ -4983,7 +4955,8 @@ static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); /// Parse an arm instruction mnemonic followed by its operands. -bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, +bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, SmallVectorImpl &Operands) { // Apply mnemonic aliases before doing anything else, as the destination // mnemnonic may include suffices and we want to handle them normally. @@ -5377,6 +5350,25 @@ validateInstruction(MCInst &Inst, "in register list"); break; } + case ARM::tMUL: { + // The second source operand must be the same register as the destination + // operand. + // + // In this case, we must directly check the parsed operands because the + // cvtThumbMultiply() function is written in such a way that it guarantees + // this first statement is always true for the new Inst. Essentially, the + // destination is unconditionally copied into the second source operand + // without checking to see if it matches what we actually parsed. + if (Operands.size() == 6 && + (((ARMOperand*)Operands[3])->getReg() != + ((ARMOperand*)Operands[5])->getReg()) && + (((ARMOperand*)Operands[3])->getReg() != + ((ARMOperand*)Operands[4])->getReg())) { + return Error(Operands[3]->getStartLoc(), + "destination register must match source register"); + } + break; + } // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, // so only issue a diagnostic for thumb1. The instructions will be // switched to the t2 encodings in processInstruction() if necessary. @@ -5678,6 +5670,20 @@ bool ARMAsmParser:: processInstruction(MCInst &Inst, const SmallVectorImpl &Operands) { switch (Inst.getOpcode()) { + // Alias for alternate form of 'ADR Rd, #imm' instruction. + case ARM::ADDri: { + if (Inst.getOperand(1).getReg() != ARM::PC || + Inst.getOperand(5).getReg() != 0) + return false; + MCInst TmpInst; + TmpInst.setOpcode(ARM::ADR); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(2)); + TmpInst.addOperand(Inst.getOperand(3)); + TmpInst.addOperand(Inst.getOperand(4)); + Inst = TmpInst; + return true; + } // Aliases for alternate PC+imm syntax of LDR instructions. case ARM::t2LDRpcrel: Inst.setOpcode(ARM::t2LDRpci); @@ -7471,13 +7477,14 @@ unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { static const char *getSubtargetFeatureName(unsigned Val); bool ARMAsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, +MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out) { + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm) { MCInst Inst; - unsigned ErrorInfo; unsigned MatchResult; - MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); + MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, + MatchingInlineAsm); switch (MatchResult) { default: break; case Match_Success: @@ -7540,9 +7547,6 @@ MatchAndEmitInstruction(SMLoc IDLoc, case Match_MnemonicFail: return Error(IDLoc, "invalid instruction", ((ARMOperand*)Operands[0])->getLocRange()); - case Match_ConversionFail: - // The converter function will have already emitted a diagnostic. - return true; case Match_RequiresNotITBlock: return Error(IDLoc, "flag setting instruction only valid outside IT block"); case Match_RequiresITBlock: diff --git a/lib/Target/ARM/CMakeLists.txt b/lib/Target/ARM/CMakeLists.txt index ac916cc..377bd92 100644 --- a/lib/Target/ARM/CMakeLists.txt +++ b/lib/Target/ARM/CMakeLists.txt @@ -22,7 +22,6 @@ add_llvm_target(ARMCodeGen ARMCodeEmitter.cpp ARMConstantIslandPass.cpp ARMConstantPoolValue.cpp - ARMELFWriterInfo.cpp ARMExpandPseudoInsts.cpp ARMFastISel.cpp ARMFrameLowering.cpp diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index c90751d..f00142d 100644 --- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -525,8 +525,9 @@ static bool tryAddingSymbolicOperand(uint64_t Address, int32_t Value, else ReferenceType = LLVMDisassembler_ReferenceType_InOut_None; const char *ReferenceName; - const char *Name = SymbolLookUp(DisInfo, Value, &ReferenceType, Address, - &ReferenceName); + uint64_t SymbolValue = 0x00000000ffffffffULL & Value; + const char *Name = SymbolLookUp(DisInfo, SymbolValue, &ReferenceType, + Address, &ReferenceName); if (Name) { SymbolicOp.AddSymbol.Name = Name; SymbolicOp.AddSymbol.Present = true; @@ -1523,6 +1524,8 @@ DecodeAddrMode2IdxInstruction(MCInst &Inst, unsigned Insn, return MCDisassembler::Fail; } unsigned amt = fieldFromInstruction(Insn, 7, 5); + if (Opc == ARM_AM::ror && amt == 0) + Opc = ARM_AM::rrx; unsigned imm = ARM_AM::getAM2Opc(Op, amt, Opc, idx_mode); Inst.addOperand(MCOperand::CreateImm(imm)); @@ -1564,6 +1567,9 @@ static DecodeStatus DecodeSORegMemOperand(MCInst &Inst, unsigned Val, break; } + if (ShOp == ARM_AM::ror && imm == 0) + ShOp = ARM_AM::rrx; + if (!Check(S, DecodeGPRRegisterClass(Inst, Rn, Address, Decoder))) return MCDisassembler::Fail; if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder))) @@ -2089,16 +2095,28 @@ static DecodeStatus DecodeAddrMode7Operand(MCInst &Inst, unsigned Val, static DecodeStatus DecodeT2BInstruction(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) { - DecodeStatus S = MCDisassembler::Success; - unsigned imm = (fieldFromInstruction(Insn, 0, 11) << 0) | - (fieldFromInstruction(Insn, 11, 1) << 18) | - (fieldFromInstruction(Insn, 13, 1) << 17) | - (fieldFromInstruction(Insn, 16, 6) << 11) | - (fieldFromInstruction(Insn, 26, 1) << 19); - if (!tryAddingSymbolicOperand(Address, Address + SignExtend32<20>(imm<<1) + 4, + DecodeStatus Status = MCDisassembler::Success; + + // Note the J1 and J2 values are from the encoded instruction. So here + // change them to I1 and I2 values via as documented: + // I1 = NOT(J1 EOR S); + // I2 = NOT(J2 EOR S); + // and build the imm32 with one trailing zero as documented: + // imm32 = SignExtend(S:I1:I2:imm10:imm11:'0', 32); + unsigned S = fieldFromInstruction(Insn, 26, 1); + unsigned J1 = fieldFromInstruction(Insn, 13, 1); + unsigned J2 = fieldFromInstruction(Insn, 11, 1); + unsigned I1 = !(J1 ^ S); + unsigned I2 = !(J2 ^ S); + unsigned imm10 = fieldFromInstruction(Insn, 16, 10); + unsigned imm11 = fieldFromInstruction(Insn, 0, 11); + unsigned tmp = (S << 23) | (I1 << 22) | (I2 << 21) | (imm10 << 11) | imm11; + int imm32 = SignExtend32<24>(tmp << 1); + if (!tryAddingSymbolicOperand(Address, Address + imm32 + 4, true, 4, Inst, Decoder)) - Inst.addOperand(MCOperand::CreateImm(SignExtend32<20>(imm << 1))); - return S; + Inst.addOperand(MCOperand::CreateImm(imm32)); + + return Status; } static DecodeStatus @@ -2701,6 +2719,8 @@ static DecodeStatus DecodeVLD1DupInstruction(MCInst &Inst, unsigned Insn, unsigned align = fieldFromInstruction(Insn, 4, 1); unsigned size = fieldFromInstruction(Insn, 6, 2); + if (size == 0 && align == 1) + return MCDisassembler::Fail; align *= (1 << size); switch (Inst.getOpcode()) { @@ -2831,6 +2851,8 @@ static DecodeStatus DecodeVLD4DupInstruction(MCInst &Inst, unsigned Insn, unsigned align = fieldFromInstruction(Insn, 4, 1); if (size == 0x3) { + if (align == 0) + return MCDisassembler::Fail; size = 4; align = 16; } else { @@ -3170,7 +3192,7 @@ static DecodeStatus DecodeT2Imm8S4(MCInst &Inst, unsigned Val, int imm = Val & 0xFF; if (!(Val & 0x100)) imm *= -1; - Inst.addOperand(MCOperand::CreateImm(imm << 2)); + Inst.addOperand(MCOperand::CreateImm(imm * 4)); } return MCDisassembler::Success; @@ -3710,8 +3732,16 @@ static DecodeStatus DecodeVLD1LN(MCInst &Inst, unsigned Insn, if (fieldFromInstruction(Insn, 6, 1)) return MCDisassembler::Fail; // UNDEFINED index = fieldFromInstruction(Insn, 7, 1); - if (fieldFromInstruction(Insn, 4, 2) != 0) - align = 4; + + switch (fieldFromInstruction(Insn, 4, 2)) { + case 0 : + align = 0; break; + case 3: + align = 4; break; + default: + return MCDisassembler::Fail; + } + break; } if (!Check(S, DecodeDPRRegisterClass(Inst, Rd, Address, Decoder))) @@ -3769,8 +3799,16 @@ static DecodeStatus DecodeVST1LN(MCInst &Inst, unsigned Insn, if (fieldFromInstruction(Insn, 6, 1)) return MCDisassembler::Fail; // UNDEFINED index = fieldFromInstruction(Insn, 7, 1); - if (fieldFromInstruction(Insn, 4, 2) != 0) - align = 4; + + switch (fieldFromInstruction(Insn, 4, 2)) { + case 0: + align = 0; break; + case 3: + align = 4; break; + default: + return MCDisassembler::Fail; + } + break; } if (Rm != 0xF) { // Writeback @@ -4090,8 +4128,15 @@ static DecodeStatus DecodeVLD4LN(MCInst &Inst, unsigned Insn, inc = 2; break; case 2: - if (fieldFromInstruction(Insn, 4, 2)) - align = 4 << fieldFromInstruction(Insn, 4, 2); + switch (fieldFromInstruction(Insn, 4, 2)) { + case 0: + align = 0; break; + case 3: + return MCDisassembler::Fail; + default: + align = 4 << fieldFromInstruction(Insn, 4, 2); break; + } + index = fieldFromInstruction(Insn, 7, 1); if (fieldFromInstruction(Insn, 6, 1)) inc = 2; @@ -4164,8 +4209,15 @@ static DecodeStatus DecodeVST4LN(MCInst &Inst, unsigned Insn, inc = 2; break; case 2: - if (fieldFromInstruction(Insn, 4, 2)) - align = 4 << fieldFromInstruction(Insn, 4, 2); + switch (fieldFromInstruction(Insn, 4, 2)) { + case 0: + align = 0; break; + case 3: + return MCDisassembler::Fail; + default: + align = 4 << fieldFromInstruction(Insn, 4, 2); break; + } + index = fieldFromInstruction(Insn, 7, 1); if (fieldFromInstruction(Insn, 6, 1)) inc = 2; diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp index 8b9109e..dcc41d9 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.cpp @@ -29,11 +29,33 @@ using namespace llvm; /// /// getSORegOffset returns an integer from 0-31, representing '32' as 0. static unsigned translateShiftImm(unsigned imm) { + // lsr #32 and asr #32 exist, but should be encoded as a 0. + assert((imm & ~0x1f) == 0 && "Invalid shift encoding"); + if (imm == 0) return 32; return imm; } +/// Prints the shift value with an immediate value. +static void printRegImmShift(raw_ostream &O, ARM_AM::ShiftOpc ShOpc, + unsigned ShImm, bool UseMarkup) { + if (ShOpc == ARM_AM::no_shift || (ShOpc == ARM_AM::lsl && !ShImm)) + return; + O << ", "; + + assert (!(ShOpc == ARM_AM::ror && !ShImm) && "Cannot have ror #0"); + O << getShiftOpcStr(ShOpc); + + if (ShOpc != ARM_AM::rrx) { + O << " "; + if (UseMarkup) + O << ""; + } +} ARMInstPrinter::ARMInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, @@ -45,7 +67,9 @@ ARMInstPrinter::ARMInstPrinter(const MCAsmInfo &MAI, } void ARMInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { - OS << getRegisterName(RegNo); + OS << markup(""); } void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, @@ -85,10 +109,13 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, printSBitModifierOperand(MI, 6, O); printPredicateOperand(MI, 4, O); - O << '\t' << getRegisterName(Dst.getReg()) - << ", " << getRegisterName(MO1.getReg()); + O << '\t'; + printRegName(O, Dst.getReg()); + O << ", "; + printRegName(O, MO1.getReg()); - O << ", " << getRegisterName(MO2.getReg()); + O << ", "; + printRegName(O, MO2.getReg()); assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0); printAnnotation(O, Annot); return; @@ -104,15 +131,20 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, printSBitModifierOperand(MI, 5, O); printPredicateOperand(MI, 3, O); - O << '\t' << getRegisterName(Dst.getReg()) - << ", " << getRegisterName(MO1.getReg()); + O << '\t'; + printRegName(O, Dst.getReg()); + O << ", "; + printRegName(O, MO1.getReg()); if (ARM_AM::getSORegShOp(MO2.getImm()) == ARM_AM::rrx) { printAnnotation(O, Annot); return; } - O << ", #" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm())); + O << ", " + << markup(""); printAnnotation(O, Annot); return; } @@ -136,7 +168,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, MI->getOperand(3).getImm() == -4) { O << '\t' << "push"; printPredicateOperand(MI, 4, O); - O << "\t{" << getRegisterName(MI->getOperand(1).getReg()) << "}"; + O << "\t{"; + printRegName(O, MI->getOperand(1).getReg()); + O << "}"; printAnnotation(O, Annot); return; } @@ -159,7 +193,9 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, MI->getOperand(4).getImm() == 4) { O << '\t' << "pop"; printPredicateOperand(MI, 5, O); - O << "\t{" << getRegisterName(MI->getOperand(0).getReg()) << "}"; + O << "\t{"; + printRegName(O, MI->getOperand(0).getReg()); + O << "}"; printAnnotation(O, Annot); return; } @@ -198,7 +234,8 @@ void ARMInstPrinter::printInst(const MCInst *MI, raw_ostream &O, O << "\tldm"; printPredicateOperand(MI, 1, O); - O << '\t' << getRegisterName(BaseReg); + O << '\t'; + printRegName(O, BaseReg); if (Writeback) O << "!"; O << ", "; printRegisterList(MI, 3, O); @@ -224,9 +261,11 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { unsigned Reg = Op.getReg(); - O << getRegisterName(Reg); + printRegName(O, Reg); } else if (Op.isImm()) { - O << '#' << Op.getImm(); + O << markup(""); } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); // If a symbolic branch target was added as a constant expression then print @@ -244,13 +283,16 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, } } -void ARMInstPrinter::printT2LdrLabelOperand(const MCInst *MI, unsigned OpNum, - raw_ostream &O) { +void ARMInstPrinter::printThumbLdrLabelOperand(const MCInst *MI, unsigned OpNum, + raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(OpNum); if (MO1.isExpr()) O << *MO1.getExpr(); - else if (MO1.isImm()) - O << "[pc, #" << MO1.getImm() << "]"; + else if (MO1.isImm()) { + O << markup("]>", "]"); + } else llvm_unreachable("Unknown LDR label operand?"); } @@ -266,7 +308,7 @@ void ARMInstPrinter::printSORegRegOperand(const MCInst *MI, unsigned OpNum, const MCOperand &MO2 = MI->getOperand(OpNum+1); const MCOperand &MO3 = MI->getOperand(OpNum+2); - O << getRegisterName(MO1.getReg()); + printRegName(O, MO1.getReg()); // Print the shift opc. ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO3.getImm()); @@ -274,7 +316,8 @@ void ARMInstPrinter::printSORegRegOperand(const MCInst *MI, unsigned OpNum, if (ShOpc == ARM_AM::rrx) return; - O << ' ' << getRegisterName(MO2.getReg()); + O << ' '; + printRegName(O, MO2.getReg()); assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0); } @@ -283,14 +326,11 @@ void ARMInstPrinter::printSORegImmOperand(const MCInst *MI, unsigned OpNum, const MCOperand &MO1 = MI->getOperand(OpNum); const MCOperand &MO2 = MI->getOperand(OpNum+1); - O << getRegisterName(MO1.getReg()); + printRegName(O, MO1.getReg()); // Print the shift opc. - ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO2.getImm()); - O << ", " << ARM_AM::getShiftOpcStr(ShOpc); - if (ShOpc == ARM_AM::rrx) - return; - O << " #" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm())); + printRegImmShift(O, ARM_AM::getSORegShOp(MO2.getImm()), + ARM_AM::getSORegOffset(MO2.getImm()), UseMarkup); } @@ -304,67 +344,51 @@ void ARMInstPrinter::printAM2PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, const MCOperand &MO2 = MI->getOperand(Op+1); const MCOperand &MO3 = MI->getOperand(Op+2); - O << "[" << getRegisterName(MO1.getReg()); + O << markup("getOperand(Op); - const MCOperand &MO2 = MI->getOperand(Op+1); - const MCOperand &MO3 = MI->getOperand(Op+2); - - O << "[" << getRegisterName(MO1.getReg()) << "], "; - - if (!MO2.getReg()) { - unsigned ImmOffs = ARM_AM::getAM2Offset(MO3.getImm()); - O << '#' - << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm())) - << ImmOffs; + << ARM_AM::getAM2Offset(MO3.getImm()) + << markup(">"); + } + O << "]" << markup(">"); return; } - O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm())) - << getRegisterName(MO2.getReg()); + O << ", "; + O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO3.getImm())); + printRegName(O, MO2.getReg()); - if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm())) - O << ", " - << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImm())) - << " #" << ShImm; + printRegImmShift(O, ARM_AM::getAM2ShiftOpc(MO3.getImm()), + ARM_AM::getAM2Offset(MO3.getImm()), UseMarkup); + O << "]" << markup(">"); } void ARMInstPrinter::printAddrModeTBB(const MCInst *MI, unsigned Op, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(Op); const MCOperand &MO2 = MI->getOperand(Op+1); - O << "[" << getRegisterName(MO1.getReg()) << ", " - << getRegisterName(MO2.getReg()) << "]"; + O << markup(""); } void ARMInstPrinter::printAddrModeTBH(const MCInst *MI, unsigned Op, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(Op); const MCOperand &MO2 = MI->getOperand(Op+1); - O << "[" << getRegisterName(MO1.getReg()) << ", " - << getRegisterName(MO2.getReg()) << ", lsl #1]"; + O << markup("") << "]" << markup(">"); } void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op, @@ -376,13 +400,13 @@ void ARMInstPrinter::printAddrMode2Operand(const MCInst *MI, unsigned Op, return; } +#ifndef NDEBUG const MCOperand &MO3 = MI->getOperand(Op+2); unsigned IdxMode = ARM_AM::getAM2IdxMode(MO3.getImm()); + assert(IdxMode != ARMII::IndexModePost && + "Should be pre or offset index op"); +#endif - if (IdxMode == ARMII::IndexModePost) { - printAM2PostIndexOp(MI, Op, O); - return; - } printAM2PreOrOffsetIndexOp(MI, Op, O); } @@ -394,19 +418,18 @@ void ARMInstPrinter::printAddrMode2OffsetOperand(const MCInst *MI, if (!MO1.getReg()) { unsigned ImmOffs = ARM_AM::getAM2Offset(MO2.getImm()); - O << '#' - << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm())) - << ImmOffs; + O << markup(""); return; } - O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm())) - << getRegisterName(MO1.getReg()); + O << ARM_AM::getAddrOpcStr(ARM_AM::getAM2Op(MO2.getImm())); + printRegName(O, MO1.getReg()); - if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm())) - O << ", " - << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImm())) - << " #" << ShImm; + printRegImmShift(O, ARM_AM::getAM2ShiftOpc(MO2.getImm()), + ARM_AM::getAM2Offset(MO2.getImm()), UseMarkup); } //===--------------------------------------------------------------------===// @@ -419,18 +442,22 @@ void ARMInstPrinter::printAM3PostIndexOp(const MCInst *MI, unsigned Op, const MCOperand &MO2 = MI->getOperand(Op+1); const MCOperand &MO3 = MI->getOperand(Op+2); - O << "[" << getRegisterName(MO1.getReg()) << "], "; + O << markup(""); if (MO2.getReg()) { - O << (char)ARM_AM::getAM3Op(MO3.getImm()) - << getRegisterName(MO2.getReg()); + O << (char)ARM_AM::getAM3Op(MO3.getImm()); + printRegName(O, MO2.getReg()); return; } unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()); - O << '#' + O << markup(""); } void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, @@ -439,23 +466,29 @@ void ARMInstPrinter::printAM3PreOrOffsetIndexOp(const MCInst *MI, unsigned Op, const MCOperand &MO2 = MI->getOperand(Op+1); const MCOperand &MO3 = MI->getOperand(Op+2); - O << '[' << getRegisterName(MO1.getReg()); + O << markup(""); return; } - //If the op is sub we have to print the immediate even if it is 0 + //If the op is sub we have to print the immediate even if it is 0 unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()); ARM_AM::AddrOpc op = ARM_AM::getAM3Op(MO3.getImm()); - - if (ImmOffs || (op == ARM_AM::sub)) - O << ", #" + + if (ImmOffs || (op == ARM_AM::sub)) { + O << ", " + << markup(""); + } + O << ']' << markup(">"); } void ARMInstPrinter::printAddrMode3Operand(const MCInst *MI, unsigned Op, @@ -483,15 +516,15 @@ void ARMInstPrinter::printAddrMode3OffsetOperand(const MCInst *MI, const MCOperand &MO2 = MI->getOperand(OpNum+1); if (MO1.getReg()) { - O << getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm())) - << getRegisterName(MO1.getReg()); + O << getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm())); + printRegName(O, MO1.getReg()); return; } unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm()); - O << '#' - << ARM_AM::getAddrOpcStr(ARM_AM::getAM3Op(MO2.getImm())) - << ImmOffs; + O << markup(""); } void ARMInstPrinter::printPostIdxImm8Operand(const MCInst *MI, @@ -499,7 +532,9 @@ void ARMInstPrinter::printPostIdxImm8Operand(const MCInst *MI, raw_ostream &O) { const MCOperand &MO = MI->getOperand(OpNum); unsigned Imm = MO.getImm(); - O << '#' << ((Imm & 256) ? "" : "-") << (Imm & 0xff); + O << markup(""); } void ARMInstPrinter::printPostIdxRegOperand(const MCInst *MI, unsigned OpNum, @@ -507,7 +542,8 @@ void ARMInstPrinter::printPostIdxRegOperand(const MCInst *MI, unsigned OpNum, const MCOperand &MO1 = MI->getOperand(OpNum); const MCOperand &MO2 = MI->getOperand(OpNum+1); - O << (MO2.getImm() ? "" : "-") << getRegisterName(MO1.getReg()); + O << (MO2.getImm() ? "" : "-"); + printRegName(O, MO1.getReg()); } void ARMInstPrinter::printPostIdxImm8s4Operand(const MCInst *MI, @@ -515,7 +551,9 @@ void ARMInstPrinter::printPostIdxImm8s4Operand(const MCInst *MI, raw_ostream &O) { const MCOperand &MO = MI->getOperand(OpNum); unsigned Imm = MO.getImm(); - O << '#' << ((Imm & 256) ? "" : "-") << ((Imm & 0xff) << 2); + O << markup(""); } @@ -536,16 +574,20 @@ void ARMInstPrinter::printAddrMode5Operand(const MCInst *MI, unsigned OpNum, return; } - O << "[" << getRegisterName(MO1.getReg()); + O << markup(""); } - O << "]"; + O << "]" << markup(">"); } void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum, @@ -553,18 +595,21 @@ void ARMInstPrinter::printAddrMode6Operand(const MCInst *MI, unsigned OpNum, const MCOperand &MO1 = MI->getOperand(OpNum); const MCOperand &MO2 = MI->getOperand(OpNum+1); - O << "[" << getRegisterName(MO1.getReg()); + O << markup(""); } void ARMInstPrinter::printAddrMode7Operand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(OpNum); - O << "[" << getRegisterName(MO1.getReg()) << "]"; + O << markup(""); } void ARMInstPrinter::printAddrMode6OffsetOperand(const MCInst *MI, @@ -573,8 +618,10 @@ void ARMInstPrinter::printAddrMode6OffsetOperand(const MCInst *MI, const MCOperand &MO = MI->getOperand(OpNum); if (MO.getReg() == 0) O << "!"; - else - O << ", " << getRegisterName(MO.getReg()); + else { + O << ", "; + printRegName(O, MO.getReg()); + } } void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI, @@ -585,7 +632,9 @@ void ARMInstPrinter::printBitfieldInvMaskImmOperand(const MCInst *MI, int32_t lsb = CountTrailingZeros_32(v); int32_t width = (32 - CountLeadingZeros_32 (v)) - lsb; assert(MO.isImm() && "Not a valid bf_inv_mask_imm value!"); - O << '#' << lsb << ", #" << width; + O << markup("") + << ", " + << markup(""); } void ARMInstPrinter::printMemBOption(const MCInst *MI, unsigned OpNum, @@ -599,10 +648,18 @@ void ARMInstPrinter::printShiftImmOperand(const MCInst *MI, unsigned OpNum, unsigned ShiftOp = MI->getOperand(OpNum).getImm(); bool isASR = (ShiftOp & (1 << 5)) != 0; unsigned Amt = ShiftOp & 0x1f; - if (isASR) - O << ", asr #" << (Amt == 0 ? 32 : Amt); - else if (Amt) - O << ", lsl #" << Amt; + if (isASR) { + O << ", asr " + << markup(""); + } + else if (Amt) { + O << ", lsl " + << markup(""); + } } void ARMInstPrinter::printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum, @@ -611,7 +668,7 @@ void ARMInstPrinter::printPKHLSLShiftImm(const MCInst *MI, unsigned OpNum, if (Imm == 0) return; assert(Imm > 0 && Imm < 32 && "Invalid PKH shift immediate value!"); - O << ", lsl #" << Imm; + O << ", lsl " << markup(""); } void ARMInstPrinter::printPKHASRShiftImm(const MCInst *MI, unsigned OpNum, @@ -621,7 +678,7 @@ void ARMInstPrinter::printPKHASRShiftImm(const MCInst *MI, unsigned OpNum, if (Imm == 0) Imm = 32; assert(Imm > 0 && Imm <= 32 && "Invalid PKH shift immediate value!"); - O << ", asr #" << Imm; + O << ", asr " << markup(""); } void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum, @@ -629,7 +686,7 @@ void ARMInstPrinter::printRegisterList(const MCInst *MI, unsigned OpNum, O << "{"; for (unsigned i = OpNum, e = MI->getNumOperands(); i != e; ++i) { if (i != OpNum) O << ", "; - O << getRegisterName(MI->getOperand(i).getReg()); + printRegName(O, MI->getOperand(i).getReg()); } O << "}"; } @@ -803,23 +860,29 @@ void ARMInstPrinter::printAdrLabelOperand(const MCInst *MI, unsigned OpNum, int32_t OffImm = (int32_t)MO.getImm(); + O << markup(""); } void ARMInstPrinter::printThumbS4ImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { - O << "#" << MI->getOperand(OpNum).getImm() * 4; + O << markup("getOperand(OpNum).getImm() * 4 + << markup(">"); } void ARMInstPrinter::printThumbSRImm(const MCInst *MI, unsigned OpNum, raw_ostream &O) { unsigned Imm = MI->getOperand(OpNum).getImm(); - O << "#" << (Imm == 0 ? 32 : Imm); + O << markup(""); } void ARMInstPrinter::printThumbITMask(const MCInst *MI, unsigned OpNum, @@ -849,10 +912,13 @@ void ARMInstPrinter::printThumbAddrModeRROperand(const MCInst *MI, unsigned Op, return; } - O << "[" << getRegisterName(MO1.getReg()); - if (unsigned RegNum = MO2.getReg()) - O << ", " << getRegisterName(RegNum); - O << "]"; + O << markup(""); } void ARMInstPrinter::printThumbAddrModeImm5SOperand(const MCInst *MI, @@ -867,10 +933,15 @@ void ARMInstPrinter::printThumbAddrModeImm5SOperand(const MCInst *MI, return; } - O << "[" << getRegisterName(MO1.getReg()); - if (unsigned ImmOffs = MO2.getImm()) - O << ", #" << ImmOffs * Scale; - O << "]"; + O << markup(""); + } + O << "]" << markup(">"); } void ARMInstPrinter::printThumbAddrModeImm5S1Operand(const MCInst *MI, @@ -906,14 +977,12 @@ void ARMInstPrinter::printT2SOOperand(const MCInst *MI, unsigned OpNum, const MCOperand &MO2 = MI->getOperand(OpNum+1); unsigned Reg = MO1.getReg(); - O << getRegisterName(Reg); + printRegName(O, Reg); // Print the shift opc. assert(MO2.isImm() && "Not a valid t2_so_reg value!"); - ARM_AM::ShiftOpc ShOpc = ARM_AM::getSORegShOp(MO2.getImm()); - O << ", " << ARM_AM::getShiftOpcStr(ShOpc); - if (ShOpc != ARM_AM::rrx) - O << " #" << translateShiftImm(ARM_AM::getSORegOffset(MO2.getImm())); + printRegImmShift(O, ARM_AM::getSORegShOp(MO2.getImm()), + ARM_AM::getSORegOffset(MO2.getImm()), UseMarkup); } void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, @@ -926,18 +995,27 @@ void ARMInstPrinter::printAddrModeImm12Operand(const MCInst *MI, unsigned OpNum, return; } - O << "[" << getRegisterName(MO1.getReg()); + O << markup(" 0) - O << ", #" << OffImm; - O << "]"; + if (isSub) { + O << ", " + << markup(""); + } + else if (OffImm > 0) { + O << ", " + << markup(""); + } + O << "]" << markup(">"); } void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI, @@ -946,17 +1024,24 @@ void ARMInstPrinter::printT2AddrModeImm8Operand(const MCInst *MI, const MCOperand &MO1 = MI->getOperand(OpNum); const MCOperand &MO2 = MI->getOperand(OpNum+1); - O << "[" << getRegisterName(MO1.getReg()); + O << markup(" 0) - O << ", #" << OffImm; - O << "]"; + O << "#" << OffImm; + if (OffImm != 0 && UseMarkup) + O << ">"; + O << "]" << markup(">"); } void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI, @@ -970,20 +1055,27 @@ void ARMInstPrinter::printT2AddrModeImm8s4Operand(const MCInst *MI, return; } - O << "[" << getRegisterName(MO1.getReg()); + O << markup(" 0) - O << ", #" << OffImm; - O << "]"; + O << "#" << OffImm; + if (OffImm != 0 && UseMarkup) + O << ">"; + O << "]" << markup(">"); } void ARMInstPrinter::printT2AddrModeImm0_1020s4Operand(const MCInst *MI, @@ -992,10 +1084,15 @@ void ARMInstPrinter::printT2AddrModeImm0_1020s4Operand(const MCInst *MI, const MCOperand &MO1 = MI->getOperand(OpNum); const MCOperand &MO2 = MI->getOperand(OpNum+1); - O << "[" << getRegisterName(MO1.getReg()); - if (MO2.getImm()) - O << ", #" << MO2.getImm() * 4; - O << "]"; + O << markup(""); + } + O << "]" << markup(">"); } void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI, @@ -1003,11 +1100,12 @@ void ARMInstPrinter::printT2AddrModeImm8OffsetOperand(const MCInst *MI, raw_ostream &O) { const MCOperand &MO1 = MI->getOperand(OpNum); int32_t OffImm = (int32_t)MO1.getImm(); - // Don't print +0. + O << ", " << markup(""); } void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI, @@ -1019,12 +1117,18 @@ void ARMInstPrinter::printT2AddrModeImm8s4OffsetOperand(const MCInst *MI, assert(((OffImm & 0x3) == 0) && "Not a valid immediate!"); // Don't print +0. + if (OffImm != 0) + O << ", "; + if (OffImm != 0 && UseMarkup) + O << " 0) - O << ", #" << OffImm; + O << "#" << OffImm; + if (OffImm != 0 && UseMarkup) + O << ">"; } void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI, @@ -1034,23 +1138,30 @@ void ARMInstPrinter::printT2AddrModeSoRegOperand(const MCInst *MI, const MCOperand &MO2 = MI->getOperand(OpNum+1); const MCOperand &MO3 = MI->getOperand(OpNum+2); - O << "[" << getRegisterName(MO1.getReg()); + O << markup(""); } - O << "]"; + O << "]" << markup(">"); } void ARMInstPrinter::printFPImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { const MCOperand &MO = MI->getOperand(OpNum); - O << '#' << ARM_AM::getFPImmFloat(MO.getImm()); + O << markup(""); } void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum, @@ -1058,14 +1169,18 @@ void ARMInstPrinter::printNEONModImmOperand(const MCInst *MI, unsigned OpNum, unsigned EncodedImm = MI->getOperand(OpNum).getImm(); unsigned EltBits; uint64_t Val = ARM_AM::decodeNEONModImm(EncodedImm, EltBits); - O << "#0x"; + O << markup(""); } void ARMInstPrinter::printImmPlusOneOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O) { unsigned Imm = MI->getOperand(OpNum).getImm(); - O << "#" << Imm + 1; + O << markup(""); } void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum, @@ -1073,23 +1188,30 @@ void ARMInstPrinter::printRotImmOperand(const MCInst *MI, unsigned OpNum, unsigned Imm = MI->getOperand(OpNum).getImm(); if (Imm == 0) return; - O << ", ror #"; + O << ", ror " + << markup(""); } void ARMInstPrinter::printFBits16(const MCInst *MI, unsigned OpNum, raw_ostream &O) { - O << "#" << 16 - MI->getOperand(OpNum).getImm(); + O << markup("getOperand(OpNum).getImm() + << markup(">"); } void ARMInstPrinter::printFBits32(const MCInst *MI, unsigned OpNum, raw_ostream &O) { - O << "#" << 32 - MI->getOperand(OpNum).getImm(); + O << markup("getOperand(OpNum).getImm() + << markup(">"); } void ARMInstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum, @@ -1099,7 +1221,9 @@ void ARMInstPrinter::printVectorIndex(const MCInst *MI, unsigned OpNum, void ARMInstPrinter::printVectorListOne(const MCInst *MI, unsigned OpNum, raw_ostream &O) { - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << "}"; } void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum, @@ -1107,7 +1231,11 @@ void ARMInstPrinter::printVectorListTwo(const MCInst *MI, unsigned OpNum, unsigned Reg = MI->getOperand(OpNum).getReg(); unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1); - O << "{" << getRegisterName(Reg0) << ", " << getRegisterName(Reg1) << "}"; + O << "{"; + printRegName(O, Reg0); + O << ", "; + printRegName(O, Reg1); + O << "}"; } void ARMInstPrinter::printVectorListTwoSpaced(const MCInst *MI, @@ -1116,7 +1244,11 @@ void ARMInstPrinter::printVectorListTwoSpaced(const MCInst *MI, unsigned Reg = MI->getOperand(OpNum).getReg(); unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2); - O << "{" << getRegisterName(Reg0) << ", " << getRegisterName(Reg1) << "}"; + O << "{"; + printRegName(O, Reg0); + O << ", "; + printRegName(O, Reg1); + O << "}"; } void ARMInstPrinter::printVectorListThree(const MCInst *MI, unsigned OpNum, @@ -1124,9 +1256,13 @@ void ARMInstPrinter::printVectorListThree(const MCInst *MI, unsigned OpNum, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 1); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << "}"; } void ARMInstPrinter::printVectorListFour(const MCInst *MI, unsigned OpNum, @@ -1134,16 +1270,23 @@ void ARMInstPrinter::printVectorListFour(const MCInst *MI, unsigned OpNum, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 3) << "}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 1); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 3); + O << "}"; } void ARMInstPrinter::printVectorListOneAllLanes(const MCInst *MI, unsigned OpNum, raw_ostream &O) { - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[]}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << "[]}"; } void ARMInstPrinter::printVectorListTwoAllLanes(const MCInst *MI, @@ -1152,7 +1295,11 @@ void ARMInstPrinter::printVectorListTwoAllLanes(const MCInst *MI, unsigned Reg = MI->getOperand(OpNum).getReg(); unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_1); - O << "{" << getRegisterName(Reg0) << "[], " << getRegisterName(Reg1) << "[]}"; + O << "{"; + printRegName(O, Reg0); + O << "[], "; + printRegName(O, Reg1); + O << "[]}"; } void ARMInstPrinter::printVectorListThreeAllLanes(const MCInst *MI, @@ -1161,9 +1308,13 @@ void ARMInstPrinter::printVectorListThreeAllLanes(const MCInst *MI, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[]}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 1); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << "[]}"; } void ARMInstPrinter::printVectorListFourAllLanes(const MCInst *MI, @@ -1172,10 +1323,15 @@ void ARMInstPrinter::printVectorListFourAllLanes(const MCInst *MI, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 1) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 3) << "[]}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 1); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 3); + O << "[]}"; } void ARMInstPrinter::printVectorListTwoSpacedAllLanes(const MCInst *MI, @@ -1184,7 +1340,11 @@ void ARMInstPrinter::printVectorListTwoSpacedAllLanes(const MCInst *MI, unsigned Reg = MI->getOperand(OpNum).getReg(); unsigned Reg0 = MRI.getSubReg(Reg, ARM::dsub_0); unsigned Reg1 = MRI.getSubReg(Reg, ARM::dsub_2); - O << "{" << getRegisterName(Reg0) << "[], " << getRegisterName(Reg1) << "[]}"; + O << "{"; + printRegName(O, Reg0); + O << "[], "; + printRegName(O, Reg1); + O << "[]}"; } void ARMInstPrinter::printVectorListThreeSpacedAllLanes(const MCInst *MI, @@ -1193,9 +1353,13 @@ void ARMInstPrinter::printVectorListThreeSpacedAllLanes(const MCInst *MI, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "[]}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 4); + O << "[]}"; } void ARMInstPrinter::printVectorListFourSpacedAllLanes(const MCInst *MI, @@ -1204,10 +1368,15 @@ void ARMInstPrinter::printVectorListFourSpacedAllLanes(const MCInst *MI, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "[], " - << getRegisterName(MI->getOperand(OpNum).getReg() + 6) << "[]}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 4); + O << "[], "; + printRegName(O, MI->getOperand(OpNum).getReg() + 6); + O << "[]}"; } void ARMInstPrinter::printVectorListThreeSpaced(const MCInst *MI, @@ -1216,9 +1385,13 @@ void ARMInstPrinter::printVectorListThreeSpaced(const MCInst *MI, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << "}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 4); + O << "}"; } void ARMInstPrinter::printVectorListFourSpaced(const MCInst *MI, @@ -1227,8 +1400,13 @@ void ARMInstPrinter::printVectorListFourSpaced(const MCInst *MI, // Normally, it's not safe to use register enum values directly with // addition to get the next register, but for VFP registers, the // sort order is guaranteed because they're all of the form D. - O << "{" << getRegisterName(MI->getOperand(OpNum).getReg()) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 2) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 4) << ", " - << getRegisterName(MI->getOperand(OpNum).getReg() + 6) << "}"; + O << "{"; + printRegName(O, MI->getOperand(OpNum).getReg()); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 2); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 4); + O << ", "; + printRegName(O, MI->getOperand(OpNum).getReg() + 6); + O << "}"; } diff --git a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h index 73d7bfd..b7bab5f 100644 --- a/lib/Target/ARM/InstPrinter/ARMInstPrinter.h +++ b/lib/Target/ARM/InstPrinter/ARMInstPrinter.h @@ -126,7 +126,8 @@ public: void printRotImmOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printPCLabel(const MCInst *MI, unsigned OpNum, raw_ostream &O); - void printT2LdrLabelOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + void printThumbLdrLabelOperand(const MCInst *MI, unsigned OpNum, + raw_ostream &O); void printFBits16(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printFBits32(const MCInst *MI, unsigned OpNum, raw_ostream &O); void printVectorIndex(const MCInst *MI, unsigned OpNum, raw_ostream &O); diff --git a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp index ac6ce64..1ba6ab0 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -593,7 +593,9 @@ public: const object::mach::CPUSubtypeARM Subtype; DarwinARMAsmBackend(const Target &T, const StringRef TT, object::mach::CPUSubtypeARM st) - : ARMAsmBackend(T, TT), Subtype(st) { } + : ARMAsmBackend(T, TT), Subtype(st) { + HasDataInCodeSupport = true; + } MCObjectWriter *createObjectWriter(raw_ostream &OS) const { return createARMMachObjectWriter(OS, /*Is64Bit=*/false, @@ -674,7 +676,7 @@ void DarwinARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, } // end anonymous namespace -MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT, StringRef CPU) { Triple TheTriple(TT); if (TheTriple.isOSDarwin()) { @@ -687,6 +689,15 @@ MCAsmBackend *llvm::createARMAsmBackend(const Target &T, StringRef TT) { else if (TheTriple.getArchName() == "armv6" || TheTriple.getArchName() == "thumbv6") return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V6); + else if (TheTriple.getArchName() == "armv7f" || + TheTriple.getArchName() == "thumbv7f") + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7F); + else if (TheTriple.getArchName() == "armv7k" || + TheTriple.getArchName() == "thumbv7k") + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7K); + else if (TheTriple.getArchName() == "armv7s" || + TheTriple.getArchName() == "thumbv7s") + return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7S); return new DarwinARMAsmBackend(T, TT, object::mach::CSARM_V7); } diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp index 7d6acbc..99e4f71 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMELFObjectWriter.cpp @@ -194,6 +194,10 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target, case ARM::fixup_arm_uncondbranch: Type = ELF::R_ARM_JUMP24; break; + case ARM::fixup_t2_condbranch: + case ARM::fixup_t2_uncondbranch: + Type = ELF::R_ARM_THM_JUMP24; + break; case ARM::fixup_arm_movt_hi16: case ARM::fixup_arm_movt_hi16_pcrel: Type = ELF::R_ARM_MOVT_PREL; @@ -242,6 +246,9 @@ unsigned ARMELFObjectWriter::GetRelocTypeInner(const MCValue &Target, case MCSymbolRefExpr::VK_ARM_TARGET1: Type = ELF::R_ARM_TARGET1; break; + case MCSymbolRefExpr::VK_ARM_TARGET2: + Type = ELF::R_ARM_TARGET2; + break; } break; case ARM::fixup_arm_ldst_pcrel_12: diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp index d32805e..c1aab9c 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCAsmInfo.cpp @@ -50,7 +50,6 @@ ARMELFMCAsmInfo::ARMELFMCAsmInfo() { Code32Directive = ".code\t32"; WeakRefDirective = "\t.weak\t"; - LCOMMDirectiveType = LCOMM::NoAlignment; HasLEB128 = true; SupportsDebugInformation = true; diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp index 94f1082..d0e127a 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -35,8 +35,8 @@ STATISTIC(MCNumCPRelocations, "Number of constant pool relocations created."); namespace { class ARMMCCodeEmitter : public MCCodeEmitter { - ARMMCCodeEmitter(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT - void operator=(const ARMMCCodeEmitter &); // DO NOT IMPLEMENT + ARMMCCodeEmitter(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const ARMMCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCInstrInfo &MCII; const MCSubtargetInfo &STI; const MCContext &CTX; @@ -783,7 +783,7 @@ getT2Imm8s4OpValue(const MCInst &MI, unsigned OpIdx, // Immediate is always encoded as positive. The 'U' bit controls add vs sub. if (Imm8 < 0) - Imm8 = -Imm8; + Imm8 = -(uint32_t)Imm8; // Scaled by 4. Imm8 /= 4; @@ -934,6 +934,10 @@ getLdStSORegOpValue(const MCInst &MI, unsigned OpIdx, ARM_AM::ShiftOpc ShOp = ARM_AM::getAM2ShiftOpc(MO2.getImm()); unsigned SBits = getShiftOp(ShOp); + // While "lsr #32" and "asr #32" exist, they are encoded with a 0 in the shift + // amount. However, it would be an easy mistake to make so check here. + assert((ShImm & ~0x1f) == 0 && "Out of range shift amount"); + // {16-13} = Rn // {12} = isAdd // {11-0} = shifter diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h b/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h index a727e08..b404e6c 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h +++ b/lib/Target/ARM/MCTargetDesc/ARMMCExpr.h @@ -28,7 +28,7 @@ private: explicit ARMMCExpr(VariantKind _Kind, const MCExpr *_Expr) : Kind(_Kind), Expr(_Expr) {} - + public: /// @name Construction /// @{ @@ -67,9 +67,6 @@ public: static bool classof(const MCExpr *E) { return E->getKind() == MCExpr::Target; } - - static bool classof(const ARMMCExpr *) { return true; } - }; } // end namespace llvm diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp index 5df84c8..00ffc94 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.cpp @@ -71,6 +71,14 @@ std::string ARM_MC::ParseARMTriple(StringRef TT, StringRef CPU) { else // Use CPU to figure out the exact features. ARMArchFeature = "+v7"; + } else if (Len >= Idx+2 && TT[Idx+1] == 's') { + if (NoCPU) + // v7s: FeatureNEON, FeatureDB, FeatureDSPThumb2, FeatureT2XtPk + // Swift + ARMArchFeature = "+v7,+swift,+neon,+db,+t2dsp,+t2xtpk"; + else + // Use CPU to figure out the exact features. + ARMArchFeature = "+v7"; } else { // v7 CPUs have lots of different feature sets. If no CPU is specified, // then assume v7a (e.g. cortex-a8) feature set. Otherwise, return diff --git a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h index 510302d..a89981e 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h +++ b/lib/Target/ARM/MCTargetDesc/ARMMCTargetDesc.h @@ -46,7 +46,7 @@ MCCodeEmitter *createARMMCCodeEmitter(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx); -MCAsmBackend *createARMAsmBackend(const Target &T, StringRef TT); +MCAsmBackend *createARMAsmBackend(const Target &T, StringRef TT, StringRef CPU); /// createARMELFObjectWriter - Construct an ELF Mach-O object writer. MCObjectWriter *createARMELFObjectWriter(raw_ostream &OS, diff --git a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp index a51e0fa..2154c93 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -41,6 +41,12 @@ class ARMMachObjectWriter : public MCMachObjectTargetWriter { const MCFixup &Fixup, MCValue Target, uint64_t &FixedValue); + bool requiresExternRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCFragment &Fragment, + unsigned RelocType, const MCSymbolData *SD, + uint64_t FixedValue); + public: ARMMachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype) @@ -305,6 +311,46 @@ void ARMMachObjectWriter::RecordARMScatteredRelocation(MachObjectWriter *Writer, Writer->addRelocation(Fragment->getParent(), MRE); } +bool ARMMachObjectWriter::requiresExternRelocation(MachObjectWriter *Writer, + const MCAssembler &Asm, + const MCFragment &Fragment, + unsigned RelocType, + const MCSymbolData *SD, + uint64_t FixedValue) { + // Most cases can be identified purely from the symbol. + if (Writer->doesSymbolRequireExternRelocation(SD)) + return true; + int64_t Value = (int64_t)FixedValue; // The displacement is signed. + int64_t Range; + switch (RelocType) { + default: + return false; + case macho::RIT_ARM_Branch24Bit: + // PC pre-adjustment of 8 for these instructions. + Value -= 8; + // ARM BL/BLX has a 25-bit offset. + Range = 0x1ffffff; + break; + case macho::RIT_ARM_ThumbBranch22Bit: + // PC pre-adjustment of 4 for these instructions. + Value -= 4; + // Thumb BL/BLX has a 24-bit offset. + Range = 0xffffff; + } + // BL/BLX also use external relocations when an internal relocation + // would result in the target being out of range. This gives the linker + // enough information to generate a branch island. + const MCSectionData &SymSD = Asm.getSectionData( + SD->getSymbol().getSection()); + Value += Writer->getSectionAddress(&SymSD); + Value -= Writer->getSectionAddress(Fragment.getParent()); + // If the resultant value would be out of range for an internal relocation, + // use an external instead. + if (Value > Range || Value < -(Range + 1)) + return true; + return false; +} + void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout, @@ -373,7 +419,8 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, } // Check whether we need an external or internal relocation. - if (Writer->doesSymbolRequireExternRelocation(SD)) { + if (requiresExternRelocation(Writer, Asm, *Fragment, RelocType, SD, + FixedValue)) { IsExtern = 1; Index = SD->getIndex(); @@ -410,7 +457,7 @@ void ARMMachObjectWriter::RecordRelocation(MachObjectWriter *Writer, if (Type == macho::RIT_ARM_Half) { // The other-half value only gets populated for the movt and movw // relocation entries. - uint32_t Value = 0;; + uint32_t Value = 0; switch ((unsigned)Fixup.getKind()) { default: break; case ARM::fixup_arm_movw_lo16: diff --git a/lib/Target/ARM/MLxExpansionPass.cpp b/lib/Target/ARM/MLxExpansionPass.cpp index ad60e32..70643bc 100644 --- a/lib/Target/ARM/MLxExpansionPass.cpp +++ b/lib/Target/ARM/MLxExpansionPass.cpp @@ -51,7 +51,8 @@ namespace { const TargetRegisterInfo *TRI; MachineRegisterInfo *MRI; - bool isA9; + bool isLikeA9; + bool isSwift; unsigned MIIdx; MachineInstr* LastMIs[4]; SmallPtrSet IgnoreStall; @@ -60,6 +61,7 @@ namespace { void pushStack(MachineInstr *MI); MachineInstr *getAccDefMI(MachineInstr *MI) const; unsigned getDefReg(MachineInstr *MI) const; + bool hasLoopHazard(MachineInstr *MI) const; bool hasRAWHazard(unsigned Reg, MachineInstr *MI) const; bool FindMLxHazard(MachineInstr *MI); void ExpandFPMLxInstruction(MachineBasicBlock &MBB, MachineInstr *MI, @@ -135,6 +137,50 @@ unsigned MLxExpansion::getDefReg(MachineInstr *MI) const { return Reg; } +/// hasLoopHazard - Check whether an MLx instruction is chained to itself across +/// a single-MBB loop. +bool MLxExpansion::hasLoopHazard(MachineInstr *MI) const { + unsigned Reg = MI->getOperand(1).getReg(); + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + return false; + + MachineBasicBlock *MBB = MI->getParent(); + MachineInstr *DefMI = MRI->getVRegDef(Reg); + while (true) { +outer_continue: + if (DefMI->getParent() != MBB) + break; + + if (DefMI->isPHI()) { + for (unsigned i = 1, e = DefMI->getNumOperands(); i < e; i += 2) { + if (DefMI->getOperand(i + 1).getMBB() == MBB) { + unsigned SrcReg = DefMI->getOperand(i).getReg(); + if (TargetRegisterInfo::isVirtualRegister(SrcReg)) { + DefMI = MRI->getVRegDef(SrcReg); + goto outer_continue; + } + } + } + } else if (DefMI->isCopyLike()) { + Reg = DefMI->getOperand(1).getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + DefMI = MRI->getVRegDef(Reg); + continue; + } + } else if (DefMI->isInsertSubreg()) { + Reg = DefMI->getOperand(2).getReg(); + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + DefMI = MRI->getVRegDef(Reg); + continue; + } + } + + break; + } + + return DefMI == MI; +} + bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const { // FIXME: Detect integer instructions properly. const MCInstrDesc &MCID = MI->getDesc(); @@ -149,6 +195,19 @@ bool MLxExpansion::hasRAWHazard(unsigned Reg, MachineInstr *MI) const { return false; } +static bool isFpMulInstruction(unsigned Opcode) { + switch (Opcode) { + case ARM::VMULS: + case ARM::VMULfd: + case ARM::VMULfq: + case ARM::VMULD: + case ARM::VMULslfd: + case ARM::VMULslfq: + return true; + default: + return false; + } +} bool MLxExpansion::FindMLxHazard(MachineInstr *MI) { if (NumExpand >= ExpandLimit) @@ -171,6 +230,12 @@ bool MLxExpansion::FindMLxHazard(MachineInstr *MI) { return true; } + // On Swift, we mostly care about hazards from multiplication instructions + // writing the accumulator and the pipelining of loop iterations by out-of- + // order execution. + if (isSwift) + return isFpMulInstruction(DefMI->getOpcode()) || hasLoopHazard(MI); + if (IgnoreStall.count(MI)) return false; @@ -179,8 +244,8 @@ bool MLxExpansion::FindMLxHazard(MachineInstr *MI) { // preserves the in-order retirement of the instructions. // Look at the next few instructions, if *most* of them can cause hazards, // then the scheduler can't *fix* this, we'd better break up the VMLA. - unsigned Limit1 = isA9 ? 1 : 4; - unsigned Limit2 = isA9 ? 1 : 4; + unsigned Limit1 = isLikeA9 ? 1 : 4; + unsigned Limit2 = isLikeA9 ? 1 : 4; for (unsigned i = 1; i <= 4; ++i) { int Idx = ((int)MIIdx - i + 4) % 4; MachineInstr *NextMI = LastMIs[Idx]; @@ -316,7 +381,8 @@ bool MLxExpansion::runOnMachineFunction(MachineFunction &Fn) { TRI = Fn.getTarget().getRegisterInfo(); MRI = &Fn.getRegInfo(); const ARMSubtarget *STI = &Fn.getTarget().getSubtarget(); - isA9 = STI->isCortexA9(); + isLikeA9 = STI->isLikeA9() || STI->isSwift(); + isSwift = STI->isSwift(); bool Modified = false; for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E; diff --git a/lib/Target/CMakeLists.txt b/lib/Target/CMakeLists.txt index 5913a9c..a85acaa 100644 --- a/lib/Target/CMakeLists.txt +++ b/lib/Target/CMakeLists.txt @@ -1,8 +1,6 @@ add_llvm_library(LLVMTarget Mangler.cpp Target.cpp - TargetData.cpp - TargetELFWriterInfo.cpp TargetInstrInfo.cpp TargetIntrinsicInfo.cpp TargetJITInfo.cpp @@ -12,6 +10,7 @@ add_llvm_library(LLVMTarget TargetMachineC.cpp TargetRegisterInfo.cpp TargetSubtargetInfo.cpp + TargetTransformImpl.cpp ) foreach(t ${LLVM_TARGETS_TO_BUILD}) diff --git a/lib/Target/CellSPU/SPUAsmPrinter.cpp b/lib/Target/CellSPU/SPUAsmPrinter.cpp index 03d5a9a..3396e8b 100644 --- a/lib/Target/CellSPU/SPUAsmPrinter.cpp +++ b/lib/Target/CellSPU/SPUAsmPrinter.cpp @@ -130,8 +130,7 @@ namespace { void printS10ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) { - short value = (short) (((int) MI->getOperand(OpNo).getImm() << 16) - >> 16); + short value = MI->getOperand(OpNo).getImm(); assert((value >= -(1 << 9) && value <= (1 << 9) - 1) && "Invalid s10 argument"); O << value; @@ -140,8 +139,7 @@ namespace { void printU10ImmOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) { - short value = (short) (((int) MI->getOperand(OpNo).getImm() << 16) - >> 16); + short value = MI->getOperand(OpNo).getImm(); assert((value <= (1 << 10) - 1) && "Invalid u10 argument"); O << value; } diff --git a/lib/Target/CellSPU/SPUFrameLowering.cpp b/lib/Target/CellSPU/SPUFrameLowering.cpp index fac806e1..f011995 100644 --- a/lib/Target/CellSPU/SPUFrameLowering.cpp +++ b/lib/Target/CellSPU/SPUFrameLowering.cpp @@ -22,7 +22,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" using namespace llvm; diff --git a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp index c27caea..5d50610 100644 --- a/lib/Target/CellSPU/SPUISelDAGToDAG.cpp +++ b/lib/Target/CellSPU/SPUISelDAGToDAG.cpp @@ -67,8 +67,8 @@ namespace { //! ConstantSDNode predicate for signed 16-bit values /*! - \arg CN The constant SelectionDAG node holding the value - \arg Imm The returned 16-bit value, if returning true + \param CN The constant SelectionDAG node holding the value + \param Imm The returned 16-bit value, if returning true This predicate tests the value in \a CN to see whether it can be represented as a 16-bit, sign-extended quantity. Returns true if @@ -83,12 +83,10 @@ namespace { return true; } else if (vt == MVT::i32) { int32_t i_val = (int32_t) CN->getZExtValue(); - short s_val = (short) i_val; - return i_val == s_val; + return i_val == SignExtend32<16>(i_val); } else { int64_t i_val = (int64_t) CN->getZExtValue(); - short s_val = (short) i_val; - return i_val == s_val; + return i_val == SignExtend64<16>(i_val); } } @@ -99,9 +97,10 @@ namespace { EVT vt = FPN->getValueType(0); if (vt == MVT::f32) { int val = FloatToBits(FPN->getValueAPF().convertToFloat()); - int sval = (int) ((val << 16) >> 16); - Imm = (short) val; - return val == sval; + if (val == SignExtend32<16>(val)) { + Imm = (short) val; + return true; + } } return false; @@ -306,10 +305,10 @@ namespace { } /*! - \arg Op The ISD instruction operand - \arg N The address to be tested - \arg Base The base address - \arg Index The base address index + \param Op The ISD instruction operand + \param N The address to be tested + \param Base The base address + \param Index The base address index */ bool SPUDAGToDAGISel::SelectAFormAddr(SDNode *Op, SDValue N, SDValue &Base, @@ -376,10 +375,10 @@ SPUDAGToDAGISel::SelectDForm2Addr(SDNode *Op, SDValue N, SDValue &Disp, } /*! - \arg Op The ISD instruction (ignored) - \arg N The address to be tested - \arg Base Base address register/pointer - \arg Index Base address index + \param Op The ISD instruction (ignored) + \param N The address to be tested + \param Base Base address register/pointer + \param Index Base address index Examine the input address by a base register plus a signed 10-bit displacement, [r+I10] (D-form address). @@ -542,10 +541,10 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base, } /*! - \arg Op The ISD instruction operand - \arg N The address operand - \arg Base The base pointer operand - \arg Index The offset/index operand + \param Op The ISD instruction operand + \param N The address operand + \param Base The base pointer operand + \param Index The offset/index operand If the address \a N can be expressed as an A-form or D-form address, returns false. Otherwise, creates two operands, Base and Index that will become the @@ -570,7 +569,7 @@ SPUDAGToDAGISel::SelectXFormAddr(SDNode *Op, SDValue N, SDValue &Base, Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue to be used as the last parameter of a CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call - \arg VT the value type for which we want a register class + \param VT the value type for which we want a register class */ SDValue SPUDAGToDAGISel::getRC( MVT VT ) { switch( VT.SimpleTy ) { diff --git a/lib/Target/CellSPU/SPUSubtarget.h b/lib/Target/CellSPU/SPUSubtarget.h index 7c4aa14..27d28b2 100644 --- a/lib/Target/CellSPU/SPUSubtarget.h +++ b/lib/Target/CellSPU/SPUSubtarget.h @@ -80,9 +80,9 @@ namespace llvm { return UseLargeMem; } - /// getTargetDataString - Return the pointer size and type alignment + /// getDataLayoutString - Return the pointer size and type alignment /// properties of this subtarget. - const char *getTargetDataString() const { + const char *getDataLayoutString() const { return "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128" "-i16:16:128-i8:8:128-i1:8:128-a:0:128-v64:64:128-v128:128:128" "-s:128:128-n32:64"; diff --git a/lib/Target/CellSPU/SPUTargetMachine.cpp b/lib/Target/CellSPU/SPUTargetMachine.cpp index 54764f1..9183165 100644 --- a/lib/Target/CellSPU/SPUTargetMachine.cpp +++ b/lib/Target/CellSPU/SPUTargetMachine.cpp @@ -38,12 +38,13 @@ SPUTargetMachine::SPUTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), - DataLayout(Subtarget.getTargetDataString()), + DL(Subtarget.getDataLayoutString()), InstrInfo(*this), FrameLowering(Subtarget), TLInfo(*this), TSInfo(*this), - InstrItins(Subtarget.getInstrItineraryData()) { + InstrItins(Subtarget.getInstrItineraryData()), + STTI(&TLInfo), VTTI(&TLInfo) { } //===----------------------------------------------------------------------===// diff --git a/lib/Target/CellSPU/SPUTargetMachine.h b/lib/Target/CellSPU/SPUTargetMachine.h index 3e5d38c..7f53ea6 100644 --- a/lib/Target/CellSPU/SPUTargetMachine.h +++ b/lib/Target/CellSPU/SPUTargetMachine.h @@ -20,7 +20,8 @@ #include "SPUSelectionDAGInfo.h" #include "SPUFrameLowering.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetTransformImpl.h" +#include "llvm/DataLayout.h" namespace llvm { @@ -28,12 +29,14 @@ namespace llvm { /// class SPUTargetMachine : public LLVMTargetMachine { SPUSubtarget Subtarget; - const TargetData DataLayout; + const DataLayout DL; SPUInstrInfo InstrInfo; SPUFrameLowering FrameLowering; SPUTargetLowering TLInfo; SPUSelectionDAGInfo TSInfo; InstrItineraryData InstrItins; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: SPUTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, @@ -70,13 +73,19 @@ public: return &InstrInfo.getRegisterInfo(); } - virtual const TargetData *getTargetData() const { - return &DataLayout; + virtual const DataLayout *getDataLayout() const { + return &DL; } virtual const InstrItineraryData *getInstrItineraryData() const { return &InstrItins; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index c8e757b..5c90990 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -285,14 +285,14 @@ void CppWriter::printLinkageType(GlobalValue::LinkageTypes LT) { Out << "GlobalValue::LinkerPrivateLinkage"; break; case GlobalValue::LinkerPrivateWeakLinkage: Out << "GlobalValue::LinkerPrivateWeakLinkage"; break; - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: - Out << "GlobalValue::LinkerPrivateWeakDefAutoLinkage"; break; case GlobalValue::AvailableExternallyLinkage: Out << "GlobalValue::AvailableExternallyLinkage "; break; case GlobalValue::LinkOnceAnyLinkage: Out << "GlobalValue::LinkOnceAnyLinkage "; break; case GlobalValue::LinkOnceODRLinkage: Out << "GlobalValue::LinkOnceODRLinkage "; break; + case GlobalValue::LinkOnceODRAutoHideLinkage: + Out << "GlobalValue::LinkOnceODRAutoHideLinkage"; break; case GlobalValue::WeakAnyLinkage: Out << "GlobalValue::WeakAnyLinkage"; break; case GlobalValue::WeakODRLinkage: @@ -474,13 +474,15 @@ void CppWriter::printAttributes(const AttrListPtr &PAL, Out << "AttributeWithIndex PAWI;"; nl(Out); for (unsigned i = 0; i < PAL.getNumSlots(); ++i) { unsigned index = PAL.getSlot(i).Index; - Attributes attrs = PAL.getSlot(i).Attrs; - Out << "PAWI.Index = " << index << "U; PAWI.Attrs = Attribute::None "; -#define HANDLE_ATTR(X) \ - if (attrs & Attribute::X) \ - Out << " | Attribute::" #X; \ - attrs &= ~Attribute::X; - + AttrBuilder attrs(PAL.getSlot(i).Attrs); + Out << "PAWI.Index = " << index << "U;\n"; + Out << " {\n AttrBuilder B;\n"; + +#define HANDLE_ATTR(X) \ + if (attrs.hasAttribute(Attributes::X)) \ + Out << " B.addAttribute(Attributes::" #X ");\n"; \ + attrs.removeAttribute(Attributes::X); + HANDLE_ATTR(SExt); HANDLE_ATTR(ZExt); HANDLE_ATTR(NoReturn); @@ -505,19 +507,18 @@ void CppWriter::printAttributes(const AttrListPtr &PAL, HANDLE_ATTR(ReturnsTwice); HANDLE_ATTR(UWTable); HANDLE_ATTR(NonLazyBind); + HANDLE_ATTR(MinSize); #undef HANDLE_ATTR - if (attrs & Attribute::StackAlignment) - Out << " | Attribute::constructStackAlignmentFromInt(" - << Attribute::getStackAlignmentFromAttrs(attrs) - << ")"; - attrs &= ~Attribute::StackAlignment; - assert(attrs == 0 && "Unhandled attribute!"); - Out << ";"; + if (attrs.hasAttribute(Attributes::StackAlignment)) + Out << " B.addStackAlignmentAttr(" << attrs.getStackAlignment() << ")\n"; + attrs.removeAttribute(Attributes::StackAlignment); + assert(!attrs.hasAttributes() && "Unhandled attribute!"); + Out << " PAWI.Attrs = Attributes::get(mod->getContext(), B);\n }"; nl(Out); Out << "Attrs.push_back(PAWI);"; nl(Out); } - Out << name << "_PAL = AttrListPtr::get(Attrs);"; + Out << name << "_PAL = AttrListPtr::get(mod->getContext(), Attrs);"; nl(Out); out(); nl(Out); Out << '}'; nl(Out); diff --git a/lib/Target/CppBackend/CPPTargetMachine.h b/lib/Target/CppBackend/CPPTargetMachine.h index 9cbe798..30d765d 100644 --- a/lib/Target/CppBackend/CPPTargetMachine.h +++ b/lib/Target/CppBackend/CPPTargetMachine.h @@ -15,7 +15,7 @@ #define CPPTARGETMACHINE_H #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" namespace llvm { @@ -35,7 +35,7 @@ struct CPPTargetMachine : public TargetMachine { AnalysisID StartAfter, AnalysisID StopAfter); - virtual const TargetData *getTargetData() const { return 0; } + virtual const DataLayout *getDataLayout() const { return 0; } }; extern Target TheCppBackendTarget; diff --git a/lib/Target/Hexagon/CMakeLists.txt b/lib/Target/Hexagon/CMakeLists.txt index 1f2d8ac..306084b 100644 --- a/lib/Target/Hexagon/CMakeLists.txt +++ b/lib/Target/Hexagon/CMakeLists.txt @@ -16,6 +16,7 @@ add_llvm_target(HexagonCodeGen HexagonExpandPredSpillCode.cpp HexagonFrameLowering.cpp HexagonHardwareLoops.cpp + HexagonMachineScheduler.cpp HexagonMCInstLower.cpp HexagonInstrInfo.cpp HexagonISelDAGToDAG.cpp diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp index 5fa4740..c15bce6 100644 --- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp +++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp @@ -46,7 +46,7 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" diff --git a/lib/Target/Hexagon/HexagonCallingConvLower.cpp b/lib/Target/Hexagon/HexagonCallingConvLower.cpp index ba8e679..73f9d9a 100644 --- a/lib/Target/Hexagon/HexagonCallingConvLower.cpp +++ b/lib/Target/Hexagon/HexagonCallingConvLower.cpp @@ -16,7 +16,7 @@ #include "HexagonCallingConvLower.h" #include "Hexagon.h" #include "llvm/Target/TargetRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 703a128..1c891f1 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -1350,6 +1350,8 @@ HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine } else { setOperationAction(ISD::BR_JT, MVT::Other, Expand); } + // Increase jump tables cutover to 5, was 4. + setMinimumJumpTableEntries(5); setOperationAction(ISD::BR_CC, MVT::i32, Expand); diff --git a/lib/Target/Hexagon/HexagonInstrFormats.td b/lib/Target/Hexagon/HexagonInstrFormats.td index e472d49..a64c7a1 100644 --- a/lib/Target/Hexagon/HexagonInstrFormats.td +++ b/lib/Target/Hexagon/HexagonInstrFormats.td @@ -56,6 +56,16 @@ class InstHexagon pattern, bits<1> isPredicated = 0; let TSFlags{6} = isPredicated; + // Dot new value store instructions. + bits<1> isNVStore = 0; + let TSFlags{8} = isNVStore; + + // Fields used for relation models. + string BaseOpcode = ""; + string CextOpcode = ""; + string PredSense = ""; + string PNewValue = ""; + string InputType = ""; // Input is "imm" or "reg" type. // *** The code above must match HexagonBaseInfo.h *** } diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index c8f933d..8435440 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -25,6 +25,7 @@ #include "llvm/CodeGen/PseudoSourceValue.h" #include "llvm/Support/MathExtras.h" #define GET_INSTRINFO_CTOR +#define GET_INSTRMAP_INFO #include "HexagonGenInstrInfo.inc" #include "HexagonGenDFAPacketizer.inc" @@ -1915,6 +1916,15 @@ unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const { int HexagonInstrInfo:: getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { + enum Hexagon::PredSense inPredSense; + inPredSense = invertPredicate ? Hexagon::PredSense_false : + Hexagon::PredSense_true; + int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense); + if (CondOpcode >= 0) // Valid Conditional opcode/instruction + return CondOpcode; + + // This switch case will be removed once all the instructions have been + // modified to use relation maps. switch(Opc) { case Hexagon::TFR: return !invertPredicate ? Hexagon::TFR_cPt : @@ -1934,24 +1944,6 @@ getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const { case Hexagon::JMP_EQriPt_nv_V4: return !invertPredicate ? Hexagon::JMP_EQriPt_nv_V4 : Hexagon::JMP_EQriNotPt_nv_V4; - case Hexagon::ADD_ri: - return !invertPredicate ? Hexagon::ADD_ri_cPt : - Hexagon::ADD_ri_cNotPt; - case Hexagon::ADD_rr: - return !invertPredicate ? Hexagon::ADD_rr_cPt : - Hexagon::ADD_rr_cNotPt; - case Hexagon::XOR_rr: - return !invertPredicate ? Hexagon::XOR_rr_cPt : - Hexagon::XOR_rr_cNotPt; - case Hexagon::AND_rr: - return !invertPredicate ? Hexagon::AND_rr_cPt : - Hexagon::AND_rr_cNotPt; - case Hexagon::OR_rr: - return !invertPredicate ? Hexagon::OR_rr_cPt : - Hexagon::OR_rr_cNotPt; - case Hexagon::SUB_rr: - return !invertPredicate ? Hexagon::SUB_rr_cPt : - Hexagon::SUB_rr_cNotPt; case Hexagon::COMBINE_rr: return !invertPredicate ? Hexagon::COMBINE_rr_cPt : Hexagon::COMBINE_rr_cNotPt; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.td b/lib/Target/Hexagon/HexagonInstrInfo.td index c0c0df6..1d4a706 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.td +++ b/lib/Target/Hexagon/HexagonInstrInfo.td @@ -15,6 +15,18 @@ include "HexagonInstrFormats.td" include "HexagonImmediates.td" //===----------------------------------------------------------------------===// +// Classes used for relation maps. +//===----------------------------------------------------------------------===// +// PredRel - Filter class used to relate non-predicated instructions with their +// predicated forms. +class PredRel; +// PredNewRel - Filter class used to relate predicated instructions with their +// predicate-new forms. +class PredNewRel: PredRel; +// ImmRegRel - Filter class used to relate instructions having reg-reg form +// with their reg-imm counterparts. +class ImmRegRel; +//===----------------------------------------------------------------------===// // Hexagon Instruction Predicate Definitions. //===----------------------------------------------------------------------===// def HasV2T : Predicate<"Subtarget.hasV2TOps()">; @@ -148,37 +160,91 @@ multiclass CMP32_ri_s8 { } //===----------------------------------------------------------------------===// -// ALU32/ALU + +// ALU32/ALU (Instructions with register-register form) //===----------------------------------------------------------------------===// -// Add. -let isCommutable = 1, isPredicable = 1 in -def ADD_rr : ALU32_rr<(outs IntRegs:$dst), - (ins IntRegs:$src1, IntRegs:$src2), - "$dst = add($src1, $src2)", - [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))]>; +multiclass ALU32_Pbase { -let isPredicable = 1 in -def ADD_ri : ALU32_ri<(outs IntRegs:$dst), - (ins IntRegs:$src1, s16Imm:$src2), - "$dst = add($src1, #$src2)", - [(set (i32 IntRegs:$dst), (add (i32 IntRegs:$src1), - s16ImmPred:$src2))]>; + let PNewValue = #!if(isPredNew, "new", "") in + def #NAME# : ALU32_rr<(outs IntRegs:$dst), + (ins PredRegs:$src1, IntRegs:$src2, IntRegs: $src3), + !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ", + ") $dst = ")#mnemonic#"($src2, $src3)", + []>; +} -// Logical operations. -let isPredicable = 1 in -def XOR_rr : ALU32_rr<(outs IntRegs:$dst), - (ins IntRegs:$src1, IntRegs:$src2), - "$dst = xor($src1, $src2)", - [(set (i32 IntRegs:$dst), (xor (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))]>; +multiclass ALU32_Pred { + let PredSense = #!if(PredNot, "false", "true") in { + defm _c#NAME# : ALU32_Pbase; + // Predicate new + defm _cdn#NAME# : ALU32_Pbase; + } +} -let isCommutable = 1, isPredicable = 1 in -def AND_rr : ALU32_rr<(outs IntRegs:$dst), +let InputType = "reg" in +multiclass ALU32_base { + let CextOpcode = CextOp, BaseOpcode = CextOp#_rr in { + let isPredicable = 1 in + def #NAME# : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1, IntRegs:$src2), - "$dst = and($src1, $src2)", - [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))]>; + "$dst = "#mnemonic#"($src1, $src2)", + [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1), + (i32 IntRegs:$src2)))]>; + + let neverHasSideEffects = 1, isPredicated = 1 in { + defm Pt : ALU32_Pred; + defm NotPt : ALU32_Pred; + } + } +} + +let isCommutable = 1 in { + defm ADD_rr : ALU32_base<"add", "ADD", add>, ImmRegRel, PredNewRel; + defm AND_rr : ALU32_base<"and", "AND", and>, ImmRegRel, PredNewRel; + defm XOR_rr : ALU32_base<"xor", "XOR", xor>, ImmRegRel, PredNewRel; + defm OR_rr : ALU32_base<"or", "OR", or>, ImmRegRel, PredNewRel; +} + +defm SUB_rr : ALU32_base<"sub", "SUB", sub>, ImmRegRel, PredNewRel; + +//===----------------------------------------------------------------------===// +// ALU32/ALU (ADD with register-immediate form) +//===----------------------------------------------------------------------===// +multiclass ALU32ri_Pbase { + let PNewValue = #!if(isPredNew, "new", "") in + def #NAME# : ALU32_ri<(outs IntRegs:$dst), + (ins PredRegs:$src1, IntRegs:$src2, s8Imm: $src3), + !if(isNot, "if (!$src1", "if ($src1")#!if(isPredNew,".new) $dst = ", + ") $dst = ")#mnemonic#"($src2, #$src3)", + []>; +} + +multiclass ALU32ri_Pred { + let PredSense = #!if(PredNot, "false", "true") in { + defm _c#NAME# : ALU32ri_Pbase; + // Predicate new + defm _cdn#NAME# : ALU32ri_Pbase; + } +} + +let InputType = "imm" in +multiclass ALU32ri_base { + let CextOpcode = CextOp, BaseOpcode = CextOp#_ri in { + let isPredicable = 1 in + def #NAME# : ALU32_ri<(outs IntRegs:$dst), + (ins IntRegs:$src1, s16Imm:$src2), + "$dst = "#mnemonic#"($src1, #$src2)", + [(set (i32 IntRegs:$dst), (OpNode (i32 IntRegs:$src1), + (s16ImmPred:$src2)))]>; + + let neverHasSideEffects = 1, isPredicated = 1 in { + defm Pt : ALU32ri_Pred; + defm NotPt : ALU32ri_Pred; + } + } +} + +defm ADD_ri : ALU32ri_base<"add", "ADD", add>, ImmRegRel, PredNewRel; def OR_ri : ALU32_ri<(outs IntRegs:$dst), (ins IntRegs:$src1, s10Imm:$src2), @@ -197,13 +263,6 @@ def AND_ri : ALU32_ri<(outs IntRegs:$dst), [(set (i32 IntRegs:$dst), (and (i32 IntRegs:$src1), s10ImmPred:$src2))]>; -let isCommutable = 1, isPredicable = 1 in -def OR_rr : ALU32_rr<(outs IntRegs:$dst), - (ins IntRegs:$src1, IntRegs:$src2), - "$dst = or($src1, $src2)", - [(set (i32 IntRegs:$dst), (or (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))]>; - // Negate. def NEG : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), "$dst = neg($src1)", @@ -214,14 +273,6 @@ def NOP : ALU32_rr<(outs), (ins), "nop", []>; -// Subtract. -let isPredicable = 1 in -def SUB_rr : ALU32_rr<(outs IntRegs:$dst), - (ins IntRegs:$src1, IntRegs:$src2), - "$dst = sub($src1, $src2)", - [(set (i32 IntRegs:$dst), (sub (i32 IntRegs:$src1), - (i32 IntRegs:$src2)))]>; - // Rd32=sub(#s10,Rs32) def SUB_ri : ALU32_ri<(outs IntRegs:$dst), (ins s10Imm:$src1, IntRegs:$src2), @@ -348,56 +399,6 @@ def ZXTH : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$src1), // ALU32/PRED + //===----------------------------------------------------------------------===// -// Conditional add. -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_ri_cPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), - "if ($src1) $dst = add($src2, #$src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_ri_cNotPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), - "if (!$src1) $dst = add($src2, #$src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_ri_cdnPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), - "if ($src1.new) $dst = add($src2, #$src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_ri_cdnNotPt : ALU32_ri<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, s8Imm:$src3), - "if (!$src1.new) $dst = add($src2, #$src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_rr_cPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1) $dst = add($src2, $src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1) $dst = add($src2, $src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1.new) $dst = add($src2, $src3)", - []>; - -let neverHasSideEffects = 1, isPredicated = 1 in -def ADD_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1.new) $dst = add($src2, $src3)", - []>; - - // Conditional combine. let neverHasSideEffects = 1, isPredicated = 1 in @@ -424,108 +425,6 @@ def COMBINE_rr_cdnNotPt : ALU32_rr<(outs DoubleRegs:$dst), "if (!$src1.new) $dst = combine($src2, $src3)", []>; -// Conditional logical operations. - -let isPredicated = 1 in -def XOR_rr_cPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1) $dst = xor($src2, $src3)", - []>; - -let isPredicated = 1 in -def XOR_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1) $dst = xor($src2, $src3)", - []>; - -let isPredicated = 1 in -def XOR_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1.new) $dst = xor($src2, $src3)", - []>; - -let isPredicated = 1 in -def XOR_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1.new) $dst = xor($src2, $src3)", - []>; - -let isPredicated = 1 in -def AND_rr_cPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1) $dst = and($src2, $src3)", - []>; - -let isPredicated = 1 in -def AND_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1) $dst = and($src2, $src3)", - []>; - -let isPredicated = 1 in -def AND_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1.new) $dst = and($src2, $src3)", - []>; - -let isPredicated = 1 in -def AND_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1.new) $dst = and($src2, $src3)", - []>; - -let isPredicated = 1 in -def OR_rr_cPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1) $dst = or($src2, $src3)", - []>; - -let isPredicated = 1 in -def OR_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1) $dst = or($src2, $src3)", - []>; - -let isPredicated = 1 in -def OR_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1.new) $dst = or($src2, $src3)", - []>; - -let isPredicated = 1 in -def OR_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1.new) $dst = or($src2, $src3)", - []>; - - -// Conditional subtract. - -let isPredicated = 1 in -def SUB_rr_cPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1) $dst = sub($src2, $src3)", - []>; - -let isPredicated = 1 in -def SUB_rr_cNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1) $dst = sub($src2, $src3)", - []>; - -let isPredicated = 1 in -def SUB_rr_cdnPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if ($src1.new) $dst = sub($src2, $src3)", - []>; - -let isPredicated = 1 in -def SUB_rr_cdnNotPt : ALU32_rr<(outs IntRegs:$dst), - (ins PredRegs:$src1, IntRegs:$src2, IntRegs:$src3), - "if (!$src1.new) $dst = sub($src2, $src3)", - []>; - - // Conditional transfer. let neverHasSideEffects = 1, isPredicated = 1 in def TFR_cPt : ALU32_rr<(outs IntRegs:$dst), (ins PredRegs:$src1, IntRegs:$src2), @@ -3546,4 +3445,31 @@ include "HexagonInstrInfoV5.td" // V5 Instructions - //===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// Generate mapping table to relate non-predicate instructions with their +// predicated formats - true and false. +// + +def getPredOpcode : InstrMapping { + let FilterClass = "PredRel"; + // Instructions with the same BaseOpcode and isNVStore values form a row. + let RowFields = ["BaseOpcode", "isNVStore", "PNewValue"]; + // Instructions with the same predicate sense form a column. + let ColFields = ["PredSense"]; + // The key column is the unpredicated instructions. + let KeyCol = [""]; + // Value columns are PredSense=true and PredSense=false + let ValueCols = [["true"], ["false"]]; +} +//===----------------------------------------------------------------------===// +// Generate mapping table to relate predicated instructions with their .new +// format. +// +def getPredNewOpcode : InstrMapping { + let FilterClass = "PredNewRel"; + let RowFields = ["BaseOpcode", "PredSense", "isNVStore"]; + let ColFields = ["PNewValue"]; + let KeyCol = [""]; + let ValueCols = [["new"]]; +} diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.cpp b/lib/Target/Hexagon/HexagonMachineScheduler.cpp new file mode 100644 index 0000000..0e9ef48 --- /dev/null +++ b/lib/Target/Hexagon/HexagonMachineScheduler.cpp @@ -0,0 +1,681 @@ +//===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// MachineScheduler schedules machine instructions after phi elimination. It +// preserves LiveIntervals so it can be invoked before register allocation. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "misched" + +#include "HexagonMachineScheduler.h" + +#include + +using namespace llvm; + +/// Platform specific modifications to DAG. +void VLIWMachineScheduler::postprocessDAG() { + SUnit* LastSequentialCall = NULL; + // Currently we only catch the situation when compare gets scheduled + // before preceding call. + for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { + // Remember the call. + if (SUnits[su].getInstr()->isCall()) + LastSequentialCall = &(SUnits[su]); + // Look for a compare that defines a predicate. + else if (SUnits[su].getInstr()->isCompare() && LastSequentialCall) + SUnits[su].addPred(SDep(LastSequentialCall, SDep::Barrier)); + } +} + +/// Check if scheduling of this SU is possible +/// in the current packet. +/// It is _not_ precise (statefull), it is more like +/// another heuristic. Many corner cases are figured +/// empirically. +bool VLIWResourceModel::isResourceAvailable(SUnit *SU) { + if (!SU || !SU->getInstr()) + return false; + + // First see if the pipeline could receive this instruction + // in the current cycle. + switch (SU->getInstr()->getOpcode()) { + default: + if (!ResourcesModel->canReserveResources(SU->getInstr())) + return false; + case TargetOpcode::EXTRACT_SUBREG: + case TargetOpcode::INSERT_SUBREG: + case TargetOpcode::SUBREG_TO_REG: + case TargetOpcode::REG_SEQUENCE: + case TargetOpcode::IMPLICIT_DEF: + case TargetOpcode::COPY: + case TargetOpcode::INLINEASM: + break; + } + + // Now see if there are no other dependencies to instructions already + // in the packet. + for (unsigned i = 0, e = Packet.size(); i != e; ++i) { + if (Packet[i]->Succs.size() == 0) + continue; + for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(), + E = Packet[i]->Succs.end(); I != E; ++I) { + // Since we do not add pseudos to packets, might as well + // ignore order dependencies. + if (I->isCtrl()) + continue; + + if (I->getSUnit() == SU) + return false; + } + } + return true; +} + +/// Keep track of available resources. +bool VLIWResourceModel::reserveResources(SUnit *SU) { + bool startNewCycle = false; + // Artificially reset state. + if (!SU) { + ResourcesModel->clearResources(); + Packet.clear(); + TotalPackets++; + return false; + } + // If this SU does not fit in the packet + // start a new one. + if (!isResourceAvailable(SU)) { + ResourcesModel->clearResources(); + Packet.clear(); + TotalPackets++; + startNewCycle = true; + } + + switch (SU->getInstr()->getOpcode()) { + default: + ResourcesModel->reserveResources(SU->getInstr()); + break; + case TargetOpcode::EXTRACT_SUBREG: + case TargetOpcode::INSERT_SUBREG: + case TargetOpcode::SUBREG_TO_REG: + case TargetOpcode::REG_SEQUENCE: + case TargetOpcode::IMPLICIT_DEF: + case TargetOpcode::KILL: + case TargetOpcode::PROLOG_LABEL: + case TargetOpcode::EH_LABEL: + case TargetOpcode::COPY: + case TargetOpcode::INLINEASM: + break; + } + Packet.push_back(SU); + +#ifndef NDEBUG + DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n"); + for (unsigned i = 0, e = Packet.size(); i != e; ++i) { + DEBUG(dbgs() << "\t[" << i << "] SU("); + DEBUG(dbgs() << Packet[i]->NodeNum << ")\t"); + DEBUG(Packet[i]->getInstr()->dump()); + } +#endif + + // If packet is now full, reset the state so in the next cycle + // we start fresh. + if (Packet.size() >= SchedModel->getIssueWidth()) { + ResourcesModel->clearResources(); + Packet.clear(); + TotalPackets++; + startNewCycle = true; + } + + return startNewCycle; +} + +/// schedule - Called back from MachineScheduler::runOnMachineFunction +/// after setting up the current scheduling region. [RegionBegin, RegionEnd) +/// only includes instructions that have DAG nodes, not scheduling boundaries. +void VLIWMachineScheduler::schedule() { + DEBUG(dbgs() + << "********** MI Converging Scheduling VLIW BB#" << BB->getNumber() + << " " << BB->getName() + << " in_func " << BB->getParent()->getFunction()->getName() + << " at loop depth " << MLI.getLoopDepth(BB) + << " \n"); + + buildDAGWithRegPressure(); + + // Postprocess the DAG to add platform specific artificial dependencies. + postprocessDAG(); + + // To view Height/Depth correctly, they should be accessed at least once. + DEBUG(unsigned maxH = 0; + for (unsigned su = 0, e = SUnits.size(); su != e; ++su) + if (SUnits[su].getHeight() > maxH) + maxH = SUnits[su].getHeight(); + dbgs() << "Max Height " << maxH << "\n";); + DEBUG(unsigned maxD = 0; + for (unsigned su = 0, e = SUnits.size(); su != e; ++su) + if (SUnits[su].getDepth() > maxD) + maxD = SUnits[su].getDepth(); + dbgs() << "Max Depth " << maxD << "\n";); + DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) + SUnits[su].dumpAll(this)); + + initQueues(); + + bool IsTopNode = false; + while (SUnit *SU = SchedImpl->pickNode(IsTopNode)) { + if (!checkSchedLimit()) + break; + + scheduleMI(SU, IsTopNode); + + updateQueues(SU, IsTopNode); + } + assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone."); + + placeDebugValues(); +} + +void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) { + DAG = static_cast(dag); + SchedModel = DAG->getSchedModel(); + TRI = DAG->TRI; + Top.init(DAG, SchedModel); + Bot.init(DAG, SchedModel); + + // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or + // are disabled, then these HazardRecs will be disabled. + const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries(); + const TargetMachine &TM = DAG->MF.getTarget(); + Top.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); + Bot.HazardRec = TM.getInstrInfo()->CreateTargetMIHazardRecognizer(Itin, DAG); + + Top.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel()); + Bot.ResourceModel = new VLIWResourceModel(TM, DAG->getSchedModel()); + + assert((!llvm::ForceTopDown || !llvm::ForceBottomUp) && + "-misched-topdown incompatible with -misched-bottomup"); +} + +void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) { + if (SU->isScheduled) + return; + + for (SUnit::succ_iterator I = SU->Preds.begin(), E = SU->Preds.end(); + I != E; ++I) { + unsigned PredReadyCycle = I->getSUnit()->TopReadyCycle; + unsigned MinLatency = I->getMinLatency(); +#ifndef NDEBUG + Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency); +#endif + if (SU->TopReadyCycle < PredReadyCycle + MinLatency) + SU->TopReadyCycle = PredReadyCycle + MinLatency; + } + Top.releaseNode(SU, SU->TopReadyCycle); +} + +void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) { + if (SU->isScheduled) + return; + + assert(SU->getInstr() && "Scheduled SUnit must have instr"); + + for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); + I != E; ++I) { + unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle; + unsigned MinLatency = I->getMinLatency(); +#ifndef NDEBUG + Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency); +#endif + if (SU->BotReadyCycle < SuccReadyCycle + MinLatency) + SU->BotReadyCycle = SuccReadyCycle + MinLatency; + } + Bot.releaseNode(SU, SU->BotReadyCycle); +} + +/// Does this SU have a hazard within the current instruction group. +/// +/// The scheduler supports two modes of hazard recognition. The first is the +/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that +/// supports highly complicated in-order reservation tables +/// (ScoreboardHazardRecognizer) and arbitrary target-specific logic. +/// +/// The second is a streamlined mechanism that checks for hazards based on +/// simple counters that the scheduler itself maintains. It explicitly checks +/// for instruction dispatch limitations, including the number of micro-ops that +/// can dispatch per cycle. +/// +/// TODO: Also check whether the SU must start a new group. +bool ConvergingVLIWScheduler::SchedBoundary::checkHazard(SUnit *SU) { + if (HazardRec->isEnabled()) + return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard; + + unsigned uops = SchedModel->getNumMicroOps(SU->getInstr()); + if (IssueCount + uops > SchedModel->getIssueWidth()) + return true; + + return false; +} + +void ConvergingVLIWScheduler::SchedBoundary::releaseNode(SUnit *SU, + unsigned ReadyCycle) { + if (ReadyCycle < MinReadyCycle) + MinReadyCycle = ReadyCycle; + + // Check for interlocks first. For the purpose of other heuristics, an + // instruction that cannot issue appears as if it's not in the ReadyQueue. + if (ReadyCycle > CurrCycle || checkHazard(SU)) + + Pending.push(SU); + else + Available.push(SU); +} + +/// Move the boundary of scheduled code by one cycle. +void ConvergingVLIWScheduler::SchedBoundary::bumpCycle() { + unsigned Width = SchedModel->getIssueWidth(); + IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width; + + assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized"); + unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle); + + if (!HazardRec->isEnabled()) { + // Bypass HazardRec virtual calls. + CurrCycle = NextCycle; + } else { + // Bypass getHazardType calls in case of long latency. + for (; CurrCycle != NextCycle; ++CurrCycle) { + if (isTop()) + HazardRec->AdvanceCycle(); + else + HazardRec->RecedeCycle(); + } + } + CheckPending = true; + + DEBUG(dbgs() << "*** " << Available.getName() << " cycle " + << CurrCycle << '\n'); +} + +/// Move the boundary of scheduled code by one SUnit. +void ConvergingVLIWScheduler::SchedBoundary::bumpNode(SUnit *SU) { + bool startNewCycle = false; + + // Update the reservation table. + if (HazardRec->isEnabled()) { + if (!isTop() && SU->isCall) { + // Calls are scheduled with their preceding instructions. For bottom-up + // scheduling, clear the pipeline state before emitting. + HazardRec->Reset(); + } + HazardRec->EmitInstruction(SU); + } + + // Update DFA model. + startNewCycle = ResourceModel->reserveResources(SU); + + // Check the instruction group dispatch limit. + // TODO: Check if this SU must end a dispatch group. + IssueCount += SchedModel->getNumMicroOps(SU->getInstr()); + if (startNewCycle) { + DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n'); + bumpCycle(); + } + else + DEBUG(dbgs() << "*** IssueCount " << IssueCount + << " at cycle " << CurrCycle << '\n'); +} + +/// Release pending ready nodes in to the available queue. This makes them +/// visible to heuristics. +void ConvergingVLIWScheduler::SchedBoundary::releasePending() { + // If the available queue is empty, it is safe to reset MinReadyCycle. + if (Available.empty()) + MinReadyCycle = UINT_MAX; + + // Check to see if any of the pending instructions are ready to issue. If + // so, add them to the available queue. + for (unsigned i = 0, e = Pending.size(); i != e; ++i) { + SUnit *SU = *(Pending.begin()+i); + unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle; + + if (ReadyCycle < MinReadyCycle) + MinReadyCycle = ReadyCycle; + + if (ReadyCycle > CurrCycle) + continue; + + if (checkHazard(SU)) + continue; + + Available.push(SU); + Pending.remove(Pending.begin()+i); + --i; --e; + } + CheckPending = false; +} + +/// Remove SU from the ready set for this boundary. +void ConvergingVLIWScheduler::SchedBoundary::removeReady(SUnit *SU) { + if (Available.isInQueue(SU)) + Available.remove(Available.find(SU)); + else { + assert(Pending.isInQueue(SU) && "bad ready count"); + Pending.remove(Pending.find(SU)); + } +} + +/// If this queue only has one ready candidate, return it. As a side effect, +/// advance the cycle until at least one node is ready. If multiple instructions +/// are ready, return NULL. +SUnit *ConvergingVLIWScheduler::SchedBoundary::pickOnlyChoice() { + if (CheckPending) + releasePending(); + + for (unsigned i = 0; Available.empty(); ++i) { + assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) && + "permanent hazard"); (void)i; + ResourceModel->reserveResources(0); + bumpCycle(); + releasePending(); + } + if (Available.size() == 1) + return *Available.begin(); + return NULL; +} + +#ifndef NDEBUG +void ConvergingVLIWScheduler::traceCandidate(const char *Label, + const ReadyQueue &Q, + SUnit *SU, PressureElement P) { + dbgs() << Label << " " << Q.getName() << " "; + if (P.isValid()) + dbgs() << TRI->getRegPressureSetName(P.PSetID) << ":" << P.UnitIncrease + << " "; + else + dbgs() << " "; + SU->dump(DAG); +} +#endif + +/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor +/// of SU, return it, otherwise return null. +static SUnit *getSingleUnscheduledPred(SUnit *SU) { + SUnit *OnlyAvailablePred = 0; + for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); + I != E; ++I) { + SUnit &Pred = *I->getSUnit(); + if (!Pred.isScheduled) { + // We found an available, but not scheduled, predecessor. If it's the + // only one we have found, keep track of it... otherwise give up. + if (OnlyAvailablePred && OnlyAvailablePred != &Pred) + return 0; + OnlyAvailablePred = &Pred; + } + } + return OnlyAvailablePred; +} + +/// getSingleUnscheduledSucc - If there is exactly one unscheduled successor +/// of SU, return it, otherwise return null. +static SUnit *getSingleUnscheduledSucc(SUnit *SU) { + SUnit *OnlyAvailableSucc = 0; + for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); + I != E; ++I) { + SUnit &Succ = *I->getSUnit(); + if (!Succ.isScheduled) { + // We found an available, but not scheduled, successor. If it's the + // only one we have found, keep track of it... otherwise give up. + if (OnlyAvailableSucc && OnlyAvailableSucc != &Succ) + return 0; + OnlyAvailableSucc = &Succ; + } + } + return OnlyAvailableSucc; +} + +// Constants used to denote relative importance of +// heuristic components for cost computation. +static const unsigned PriorityOne = 200; +static const unsigned PriorityTwo = 100; +static const unsigned PriorityThree = 50; +static const unsigned PriorityFour = 20; +static const unsigned ScaleTwo = 10; +static const unsigned FactorOne = 2; + +/// Single point to compute overall scheduling cost. +/// TODO: More heuristics will be used soon. +int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU, + SchedCandidate &Candidate, + RegPressureDelta &Delta, + bool verbose) { + // Initial trivial priority. + int ResCount = 1; + + // Do not waste time on a node that is already scheduled. + if (!SU || SU->isScheduled) + return ResCount; + + // Forced priority is high. + if (SU->isScheduleHigh) + ResCount += PriorityOne; + + // Critical path first. + if (Q.getID() == TopQID) { + ResCount += (SU->getHeight() * ScaleTwo); + + // If resources are available for it, multiply the + // chance of scheduling. + if (Top.ResourceModel->isResourceAvailable(SU)) + ResCount <<= FactorOne; + } else { + ResCount += (SU->getDepth() * ScaleTwo); + + // If resources are available for it, multiply the + // chance of scheduling. + if (Bot.ResourceModel->isResourceAvailable(SU)) + ResCount <<= FactorOne; + } + + unsigned NumNodesBlocking = 0; + if (Q.getID() == TopQID) { + // How many SUs does it block from scheduling? + // Look at all of the successors of this node. + // Count the number of nodes that + // this node is the sole unscheduled node for. + for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); + I != E; ++I) + if (getSingleUnscheduledPred(I->getSUnit()) == SU) + ++NumNodesBlocking; + } else { + // How many unscheduled predecessors block this node? + for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); + I != E; ++I) + if (getSingleUnscheduledSucc(I->getSUnit()) == SU) + ++NumNodesBlocking; + } + ResCount += (NumNodesBlocking * ScaleTwo); + + // Factor in reg pressure as a heuristic. + ResCount -= (Delta.Excess.UnitIncrease*PriorityThree); + ResCount -= (Delta.CriticalMax.UnitIncrease*PriorityThree); + + DEBUG(if (verbose) dbgs() << " Total(" << ResCount << ")"); + + return ResCount; +} + +/// Pick the best candidate from the top queue. +/// +/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during +/// DAG building. To adjust for the current scheduling location we need to +/// maintain the number of vreg uses remaining to be top-scheduled. +ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler:: +pickNodeFromQueue(ReadyQueue &Q, const RegPressureTracker &RPTracker, + SchedCandidate &Candidate) { + DEBUG(Q.dump()); + + // getMaxPressureDelta temporarily modifies the tracker. + RegPressureTracker &TempTracker = const_cast(RPTracker); + + // BestSU remains NULL if no top candidates beat the best existing candidate. + CandResult FoundCandidate = NoCand; + for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) { + RegPressureDelta RPDelta; + TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta, + DAG->getRegionCriticalPSets(), + DAG->getRegPressure().MaxSetPressure); + + int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false); + + // Initialize the candidate if needed. + if (!Candidate.SU) { + Candidate.SU = *I; + Candidate.RPDelta = RPDelta; + Candidate.SCost = CurrentCost; + FoundCandidate = NodeOrder; + continue; + } + + // Best cost. + if (CurrentCost > Candidate.SCost) { + DEBUG(traceCandidate("CCAND", Q, *I)); + Candidate.SU = *I; + Candidate.RPDelta = RPDelta; + Candidate.SCost = CurrentCost; + FoundCandidate = BestCost; + continue; + } + + // Fall through to original instruction order. + // Only consider node order if Candidate was chosen from this Q. + if (FoundCandidate == NoCand) + continue; + } + return FoundCandidate; +} + +/// Pick the best candidate node from either the top or bottom queue. +SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) { + // Schedule as far as possible in the direction of no choice. This is most + // efficient, but also provides the best heuristics for CriticalPSets. + if (SUnit *SU = Bot.pickOnlyChoice()) { + IsTopNode = false; + return SU; + } + if (SUnit *SU = Top.pickOnlyChoice()) { + IsTopNode = true; + return SU; + } + SchedCandidate BotCand; + // Prefer bottom scheduling when heuristics are silent. + CandResult BotResult = pickNodeFromQueue(Bot.Available, + DAG->getBotRPTracker(), BotCand); + assert(BotResult != NoCand && "failed to find the first candidate"); + + // If either Q has a single candidate that provides the least increase in + // Excess pressure, we can immediately schedule from that Q. + // + // RegionCriticalPSets summarizes the pressure within the scheduled region and + // affects picking from either Q. If scheduling in one direction must + // increase pressure for one of the excess PSets, then schedule in that + // direction first to provide more freedom in the other direction. + if (BotResult == SingleExcess || BotResult == SingleCritical) { + IsTopNode = false; + return BotCand.SU; + } + // Check if the top Q has a better candidate. + SchedCandidate TopCand; + CandResult TopResult = pickNodeFromQueue(Top.Available, + DAG->getTopRPTracker(), TopCand); + assert(TopResult != NoCand && "failed to find the first candidate"); + + if (TopResult == SingleExcess || TopResult == SingleCritical) { + IsTopNode = true; + return TopCand.SU; + } + // If either Q has a single candidate that minimizes pressure above the + // original region's pressure pick it. + if (BotResult == SingleMax) { + IsTopNode = false; + return BotCand.SU; + } + if (TopResult == SingleMax) { + IsTopNode = true; + return TopCand.SU; + } + if (TopCand.SCost > BotCand.SCost) { + IsTopNode = true; + return TopCand.SU; + } + // Otherwise prefer the bottom candidate in node order. + IsTopNode = false; + return BotCand.SU; +} + +/// Pick the best node to balance the schedule. Implements MachineSchedStrategy. +SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) { + if (DAG->top() == DAG->bottom()) { + assert(Top.Available.empty() && Top.Pending.empty() && + Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage"); + return NULL; + } + SUnit *SU; + if (llvm::ForceTopDown) { + SU = Top.pickOnlyChoice(); + if (!SU) { + SchedCandidate TopCand; + CandResult TopResult = + pickNodeFromQueue(Top.Available, DAG->getTopRPTracker(), TopCand); + assert(TopResult != NoCand && "failed to find the first candidate"); + (void)TopResult; + SU = TopCand.SU; + } + IsTopNode = true; + } else if (llvm::ForceBottomUp) { + SU = Bot.pickOnlyChoice(); + if (!SU) { + SchedCandidate BotCand; + CandResult BotResult = + pickNodeFromQueue(Bot.Available, DAG->getBotRPTracker(), BotCand); + assert(BotResult != NoCand && "failed to find the first candidate"); + (void)BotResult; + SU = BotCand.SU; + } + IsTopNode = false; + } else { + SU = pickNodeBidrectional(IsTopNode); + } + if (SU->isTopReady()) + Top.removeReady(SU); + if (SU->isBottomReady()) + Bot.removeReady(SU); + + DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom") + << " Scheduling Instruction in cycle " + << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << '\n'; + SU->dump(DAG)); + return SU; +} + +/// Update the scheduler's state after scheduling a node. This is the same node +/// that was just returned by pickNode(). However, VLIWMachineScheduler needs +/// to update it's state based on the current cycle before MachineSchedStrategy +/// does. +void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) { + if (IsTopNode) { + SU->TopReadyCycle = Top.CurrCycle; + Top.bumpNode(SU); + } else { + SU->BotReadyCycle = Bot.CurrCycle; + Bot.bumpNode(SU); + } +} + diff --git a/lib/Target/Hexagon/HexagonMachineScheduler.h b/lib/Target/Hexagon/HexagonMachineScheduler.h new file mode 100644 index 0000000..fe0242a --- /dev/null +++ b/lib/Target/Hexagon/HexagonMachineScheduler.h @@ -0,0 +1,244 @@ +//===-- HexagonMachineScheduler.h - Custom Hexagon MI scheduler. ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Custom Hexagon MI scheduler. +// +//===----------------------------------------------------------------------===// + +#ifndef HEXAGONASMPRINTER_H +#define HEXAGONASMPRINTER_H + +#include "llvm/CodeGen/LiveIntervalAnalysis.h" +#include "llvm/CodeGen/MachineScheduler.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/RegisterClassInfo.h" +#include "llvm/CodeGen/RegisterPressure.h" +#include "llvm/CodeGen/ResourcePriorityQueue.h" +#include "llvm/CodeGen/ScheduleDAGInstrs.h" +#include "llvm/CodeGen/ScheduleHazardRecognizer.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/PriorityQueue.h" + +using namespace llvm; + +namespace llvm { +//===----------------------------------------------------------------------===// +// ConvergingVLIWScheduler - Implementation of the standard +// MachineSchedStrategy. +//===----------------------------------------------------------------------===// + +class VLIWResourceModel { + /// ResourcesModel - Represents VLIW state. + /// Not limited to VLIW targets per say, but assumes + /// definition of DFA by a target. + DFAPacketizer *ResourcesModel; + + const TargetSchedModel *SchedModel; + + /// Local packet/bundle model. Purely + /// internal to the MI schedulre at the time. + std::vector Packet; + + /// Total packets created. + unsigned TotalPackets; + +public: +VLIWResourceModel(const TargetMachine &TM, const TargetSchedModel *SM) : + SchedModel(SM), TotalPackets(0) { + ResourcesModel = TM.getInstrInfo()->CreateTargetScheduleState(&TM,NULL); + + // This hard requirement could be relaxed, + // but for now do not let it proceed. + assert(ResourcesModel && "Unimplemented CreateTargetScheduleState."); + + Packet.resize(SchedModel->getIssueWidth()); + Packet.clear(); + ResourcesModel->clearResources(); + } + + ~VLIWResourceModel() { + delete ResourcesModel; + } + + void resetPacketState() { + Packet.clear(); + } + + void resetDFA() { + ResourcesModel->clearResources(); + } + + void reset() { + Packet.clear(); + ResourcesModel->clearResources(); + } + + bool isResourceAvailable(SUnit *SU); + bool reserveResources(SUnit *SU); + unsigned getTotalPackets() const { return TotalPackets; } +}; + +/// Extend the standard ScheduleDAGMI to provide more context and override the +/// top-level schedule() driver. +class VLIWMachineScheduler : public ScheduleDAGMI { +public: + VLIWMachineScheduler(MachineSchedContext *C, MachineSchedStrategy *S): + ScheduleDAGMI(C, S) {} + + /// Schedule - This is called back from ScheduleDAGInstrs::Run() when it's + /// time to do some work. + virtual void schedule(); + /// Perform platform specific DAG postprocessing. + void postprocessDAG(); +}; + +/// ConvergingVLIWScheduler shrinks the unscheduled zone using heuristics +/// to balance the schedule. +class ConvergingVLIWScheduler : public MachineSchedStrategy { + + /// Store the state used by ConvergingVLIWScheduler heuristics, required + /// for the lifetime of one invocation of pickNode(). + struct SchedCandidate { + // The best SUnit candidate. + SUnit *SU; + + // Register pressure values for the best candidate. + RegPressureDelta RPDelta; + + // Best scheduling cost. + int SCost; + + SchedCandidate(): SU(NULL), SCost(0) {} + }; + /// Represent the type of SchedCandidate found within a single queue. + enum CandResult { + NoCand, NodeOrder, SingleExcess, SingleCritical, SingleMax, MultiPressure, + BestCost}; + + /// Each Scheduling boundary is associated with ready queues. It tracks the + /// current cycle in whichever direction at has moved, and maintains the state + /// of "hazards" and other interlocks at the current cycle. + struct SchedBoundary { + VLIWMachineScheduler *DAG; + const TargetSchedModel *SchedModel; + + ReadyQueue Available; + ReadyQueue Pending; + bool CheckPending; + + ScheduleHazardRecognizer *HazardRec; + VLIWResourceModel *ResourceModel; + + unsigned CurrCycle; + unsigned IssueCount; + + /// MinReadyCycle - Cycle of the soonest available instruction. + unsigned MinReadyCycle; + + // Remember the greatest min operand latency. + unsigned MaxMinLatency; + + /// Pending queues extend the ready queues with the same ID and the + /// PendingFlag set. + SchedBoundary(unsigned ID, const Twine &Name): + DAG(0), SchedModel(0), Available(ID, Name+".A"), + Pending(ID << ConvergingVLIWScheduler::LogMaxQID, Name+".P"), + CheckPending(false), HazardRec(0), ResourceModel(0), + CurrCycle(0), IssueCount(0), + MinReadyCycle(UINT_MAX), MaxMinLatency(0) {} + + ~SchedBoundary() { + delete ResourceModel; + delete HazardRec; + } + + void init(VLIWMachineScheduler *dag, const TargetSchedModel *smodel) { + DAG = dag; + SchedModel = smodel; + } + + bool isTop() const { + return Available.getID() == ConvergingVLIWScheduler::TopQID; + } + + bool checkHazard(SUnit *SU); + + void releaseNode(SUnit *SU, unsigned ReadyCycle); + + void bumpCycle(); + + void bumpNode(SUnit *SU); + + void releasePending(); + + void removeReady(SUnit *SU); + + SUnit *pickOnlyChoice(); + }; + + VLIWMachineScheduler *DAG; + const TargetSchedModel *SchedModel; + const TargetRegisterInfo *TRI; + + // State of the top and bottom scheduled instruction boundaries. + SchedBoundary Top; + SchedBoundary Bot; + +public: + /// SUnit::NodeQueueId: 0 (none), 1 (top), 2 (bot), 3 (both) + enum { + TopQID = 1, + BotQID = 2, + LogMaxQID = 2 + }; + + ConvergingVLIWScheduler(): + DAG(0), SchedModel(0), TRI(0), Top(TopQID, "TopQ"), Bot(BotQID, "BotQ") {} + + virtual void initialize(ScheduleDAGMI *dag); + + virtual SUnit *pickNode(bool &IsTopNode); + + virtual void schedNode(SUnit *SU, bool IsTopNode); + + virtual void releaseTopNode(SUnit *SU); + + virtual void releaseBottomNode(SUnit *SU); + + unsigned ReportPackets() { + return Top.ResourceModel->getTotalPackets() + + Bot.ResourceModel->getTotalPackets(); + } + +protected: + SUnit *pickNodeBidrectional(bool &IsTopNode); + + int SchedulingCost(ReadyQueue &Q, + SUnit *SU, SchedCandidate &Candidate, + RegPressureDelta &Delta, bool verbose); + + CandResult pickNodeFromQueue(ReadyQueue &Q, + const RegPressureTracker &RPTracker, + SchedCandidate &Candidate); +#ifndef NDEBUG + void traceCandidate(const char *Label, const ReadyQueue &Q, SUnit *SU, + PressureElement P = PressureElement()); +#endif +}; + +} // namespace + + +#endif diff --git a/lib/Target/Hexagon/HexagonNewValueJump.cpp b/lib/Target/Hexagon/HexagonNewValueJump.cpp index 7ece408..1e91c39 100644 --- a/lib/Target/Hexagon/HexagonNewValueJump.cpp +++ b/lib/Target/Hexagon/HexagonNewValueJump.cpp @@ -337,7 +337,7 @@ bool HexagonNewValueJump::runOnMachineFunction(MachineFunction &MF) { DEBUG(dbgs() << "********** Hexagon New Value Jump **********\n" << "********** Function: " - << MF.getFunction()->getName() << "\n"); + << MF.getName() << "\n"); #if 0 // for now disable this, if we move NewValueJump before register diff --git a/lib/Target/Hexagon/HexagonPeephole.cpp b/lib/Target/Hexagon/HexagonPeephole.cpp index 55cbc09..a295015 100644 --- a/lib/Target/Hexagon/HexagonPeephole.cpp +++ b/lib/Target/Hexagon/HexagonPeephole.cpp @@ -109,6 +109,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { MRI = &MF.getRegInfo(); DenseMap PeepholeMap; + DenseMap > PeepholeDoubleRegsMap; if (DisableHexagonPeephole) return false; @@ -117,6 +118,7 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { MBBb != MBBe; ++MBBb) { MachineBasicBlock* MBB = MBBb; PeepholeMap.clear(); + PeepholeDoubleRegsMap.clear(); // Traverse the basic block. for (MachineBasicBlock::iterator MII = MBB->begin(); MII != MBB->end(); @@ -140,6 +142,24 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { } } + // Look for this sequence below + // %vregDoubleReg1 = LSRd_ri %vregDoubleReg0, 32 + // %vregIntReg = COPY %vregDoubleReg1:subreg_loreg. + // and convert into + // %vregIntReg = COPY %vregDoubleReg0:subreg_hireg. + if (MI->getOpcode() == Hexagon::LSRd_ri) { + assert(MI->getNumOperands() == 3); + MachineOperand &Dst = MI->getOperand(0); + MachineOperand &Src1 = MI->getOperand(1); + MachineOperand &Src2 = MI->getOperand(2); + if (Src2.getImm() != 32) + continue; + unsigned DstReg = Dst.getReg(); + unsigned SrcReg = Src1.getReg(); + PeepholeDoubleRegsMap[DstReg] = + std::make_pair(*&SrcReg, 1/*Hexagon::subreg_hireg*/); + } + // Look for P=NOT(P). if (!DisablePNotP && (MI->getOpcode() == Hexagon::NOT_p)) { @@ -178,6 +198,21 @@ bool HexagonPeephole::runOnMachineFunction(MachineFunction &MF) { // Change the 1st operand. MI->RemoveOperand(1); MI->addOperand(MachineOperand::CreateReg(PeepholeSrc, false)); + } else { + DenseMap >::iterator DI = + PeepholeDoubleRegsMap.find(SrcReg); + if (DI != PeepholeDoubleRegsMap.end()) { + std::pair PeepholeSrc = DI->second; + MI->RemoveOperand(1); + MI->addOperand(MachineOperand::CreateReg(PeepholeSrc.first, + false /*isDef*/, + false /*isImp*/, + false /*isKill*/, + false /*isDead*/, + false /*isUndef*/, + false /*isEarlyClobber*/, + PeepholeSrc.second)); + } } } } diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index 2c23674..3742486 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -310,6 +310,58 @@ void HexagonRegisterInfo::getInitialFrameState(std::vector Moves.push_back(MachineMove(0, Dst, Src)); } +// Get the weight in units of pressure for this register class. +const RegClassWeight & +HexagonRegisterInfo::getRegClassWeight(const TargetRegisterClass *RC) const { + // Each TargetRegisterClass has a per register weight, and weight + // limit which must be less than the limits of its pressure sets. + static const RegClassWeight RCWeightTable[] = { + {1, 32}, // IntRegs + {1, 8}, // CRRegs + {1, 4}, // PredRegs + {2, 16}, // DoubleRegs + {0, 0} }; + return RCWeightTable[RC->getID()]; +} + +/// Get the number of dimensions of register pressure. +unsigned HexagonRegisterInfo::getNumRegPressureSets() const { + return 4; +} + +/// Get the name of this register unit pressure set. +const char *HexagonRegisterInfo::getRegPressureSetName(unsigned Idx) const { + static const char *const RegPressureSetName[] = { + "IntRegsRegSet", + "CRRegsRegSet", + "PredRegsRegSet", + "DoubleRegsRegSet" + }; + assert((Idx < 4) && "Index out of bounds"); + return RegPressureSetName[Idx]; +} + +/// Get the register unit pressure limit for this dimension. +/// This limit must be adjusted dynamically for reserved registers. +unsigned HexagonRegisterInfo::getRegPressureSetLimit(unsigned Idx) const { + static const int RegPressureLimit [] = { 16, 4, 2, 8 }; + assert((Idx < 4) && "Index out of bounds"); + return RegPressureLimit[Idx]; +} + +const int* +HexagonRegisterInfo::getRegClassPressureSets(const TargetRegisterClass *RC) + const { + static const int RCSetsTable[] = { + 0, -1, // IntRegs + 1, -1, // CRRegs + 2, -1, // PredRegs + 0, -1, // DoubleRegs + -1 }; + static const unsigned RCSetStartTable[] = { 0, 2, 4, 6, 0 }; + unsigned SetListStart = RCSetStartTable[RC->getID()]; + return &RCSetsTable[SetListStart]; +} unsigned HexagonRegisterInfo::getEHExceptionRegister() const { llvm_unreachable("What is the exception register"); } diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.h b/lib/Target/Hexagon/HexagonRegisterInfo.h index 85355ae..8820d13 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.h +++ b/lib/Target/Hexagon/HexagonRegisterInfo.h @@ -87,6 +87,11 @@ struct HexagonRegisterInfo : public HexagonGenRegisterInfo { // Exception handling queries. unsigned getEHExceptionRegister() const; unsigned getEHHandlerRegister() const; + const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const; + unsigned getNumRegPressureSets() const; + const char *getRegPressureSetName(unsigned Idx) const; + unsigned getRegPressureSetLimit(unsigned Idx) const; + const int* getRegClassPressureSets(const TargetRegisterClass *RC) const; }; } // end namespace llvm diff --git a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp index 2468f0b..4d93dd1 100644 --- a/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp +++ b/lib/Target/Hexagon/HexagonRemoveSZExtArgs.cpp @@ -50,7 +50,7 @@ bool HexagonRemoveExtendArgs::runOnFunction(Function &F) { unsigned Idx = 1; for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end(); AI != AE; ++AI, ++Idx) { - if (F.paramHasAttr(Idx, Attribute::SExt)) { + if (F.getParamAttributes(Idx).hasAttribute(Attributes::SExt)) { Argument* Arg = AI; if (!isa(Arg->getType())) { for (Instruction::use_iterator UI = Arg->use_begin(); diff --git a/lib/Target/Hexagon/HexagonSchedule.td b/lib/Target/Hexagon/HexagonSchedule.td index d1076b8..b5ff69a 100644 --- a/lib/Target/Hexagon/HexagonSchedule.td +++ b/lib/Target/Hexagon/HexagonSchedule.td @@ -47,6 +47,7 @@ def HexagonModel : SchedMachineModel { // Max issue per cycle == bundle width. let IssueWidth = 4; let Itineraries = HexagonItineraries; + let LoadLatency = 1; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonScheduleV4.td b/lib/Target/Hexagon/HexagonScheduleV4.td index 9b41126..5668ae8 100644 --- a/lib/Target/Hexagon/HexagonScheduleV4.td +++ b/lib/Target/Hexagon/HexagonScheduleV4.td @@ -58,6 +58,7 @@ def HexagonModelV4 : SchedMachineModel { // Max issue per cycle == bundle width. let IssueWidth = 4; let Itineraries = HexagonItinerariesV4; + let LoadLatency = 1; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index 5d087db..4bacb8f 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -40,28 +40,27 @@ EnableIEEERndNear( HexagonSubtarget::HexagonSubtarget(StringRef TT, StringRef CPU, StringRef FS): HexagonGenSubtargetInfo(TT, CPU, FS), - HexagonArchVersion(V2), CPUString(CPU.str()) { - ParseSubtargetFeatures(CPU, FS); - switch(HexagonArchVersion) { - case HexagonSubtarget::V2: - break; - case HexagonSubtarget::V3: - EnableV3 = true; - break; - case HexagonSubtarget::V4: - break; - case HexagonSubtarget::V5: - break; - default: - // If the programmer has not specified a Hexagon version, default - // to -mv4. + // If the programmer has not specified a Hexagon version, default to -mv4. + if (CPUString.empty()) CPUString = "hexagonv4"; - HexagonArchVersion = HexagonSubtarget::V4; - break; + + if (CPUString == "hexagonv2") { + HexagonArchVersion = V2; + } else if (CPUString == "hexagonv3") { + EnableV3 = true; + HexagonArchVersion = V3; + } else if (CPUString == "hexagonv4") { + HexagonArchVersion = V4; + } else if (CPUString == "hexagonv5") { + HexagonArchVersion = V5; + } else { + llvm_unreachable("Unrecognized Hexagon processor version"); } + ParseSubtargetFeatures(CPUString, FS); + // Initialize scheduling itinerary for the specified CPU. InstrItins = getInstrItineraryForCPU(CPUString); diff --git a/lib/Target/Hexagon/HexagonTargetMachine.cpp b/lib/Target/Hexagon/HexagonTargetMachine.cpp index a7b291f..30866e9 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.cpp +++ b/lib/Target/Hexagon/HexagonTargetMachine.cpp @@ -14,6 +14,7 @@ #include "HexagonTargetMachine.h" #include "Hexagon.h" #include "HexagonISelLowering.h" +#include "HexagonMachineScheduler.h" #include "llvm/Module.h" #include "llvm/CodeGen/Passes.h" #include "llvm/PassManager.h" @@ -29,6 +30,11 @@ opt DisableHardwareLoops( "disable-hexagon-hwloops", cl::Hidden, cl::desc("Disable Hardware Loops for Hexagon target")); +static cl:: +opt DisableHexagonMISched("disable-hexagon-misched", + cl::Hidden, cl::ZeroOrMore, cl::init(false), + cl::desc("Disable Hexagon MI Scheduling")); + /// HexagonTargetMachineModule - Note that this is used on hosts that /// cannot link in a library unless there are references into the /// library. In particular, it seems that it is not possible to get @@ -42,6 +48,13 @@ extern "C" void LLVMInitializeHexagonTarget() { RegisterTargetMachine X(TheHexagonTarget); } +static ScheduleDAGInstrs *createVLIWMachineSched(MachineSchedContext *C) { + return new VLIWMachineScheduler(C, new ConvergingVLIWScheduler()); +} + +static MachineSchedRegistry +SchedCustomRegistry("hexagon", "Run Hexagon's custom scheduler", + createVLIWMachineSched); /// HexagonTargetMachine ctor - Create an ILP32 architecture model. /// @@ -55,13 +68,14 @@ HexagonTargetMachine::HexagonTargetMachine(const Target &T, StringRef TT, CodeModel::Model CM, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - DataLayout("e-p:32:32:32-" + DL("e-p:32:32:32-" "i64:64:64-i32:32:32-i16:16:16-i1:32:32-" "f64:64:64-f32:32:32-a0:0-n32") , Subtarget(TT, CPU, FS), InstrInfo(Subtarget), TLInfo(*this), TSInfo(*this), FrameLowering(Subtarget), - InstrItins(&Subtarget.getInstrItineraryData()) { + InstrItins(&Subtarget.getInstrItineraryData()), + STTI(&TLInfo), VTTI(&TLInfo) { setMCUseCFI(false); } @@ -74,7 +88,7 @@ bool HexagonTargetMachine::addPassesForOptimizations(PassManagerBase &PM) { PM.add(createDeadCodeEliminationPass()); PM.add(createConstantPropagationPass()); PM.add(createLoopUnrollPass()); - PM.add(createLoopStrengthReducePass(getTargetLowering())); + PM.add(createLoopStrengthReducePass()); return true; } @@ -83,7 +97,13 @@ namespace { class HexagonPassConfig : public TargetPassConfig { public: HexagonPassConfig(HexagonTargetMachine *TM, PassManagerBase &PM) - : TargetPassConfig(TM, PM) {} + : TargetPassConfig(TM, PM) { + // Enable MI scheduler. + if (!DisableHexagonMISched) { + enablePass(&MachineSchedulerID); + MachineSchedRegistry::setDefault(createVLIWMachineSched); + } + } HexagonTargetMachine &getHexagonTargetMachine() const { return getTM(); diff --git a/lib/Target/Hexagon/HexagonTargetMachine.h b/lib/Target/Hexagon/HexagonTargetMachine.h index 0336965..7a4215c 100644 --- a/lib/Target/Hexagon/HexagonTargetMachine.h +++ b/lib/Target/Hexagon/HexagonTargetMachine.h @@ -20,20 +20,23 @@ #include "HexagonSelectionDAGInfo.h" #include "HexagonFrameLowering.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { class Module; class HexagonTargetMachine : public LLVMTargetMachine { - const TargetData DataLayout; // Calculates type size & alignment. + const DataLayout DL; // Calculates type size & alignment. HexagonSubtarget Subtarget; HexagonInstrInfo InstrInfo; HexagonTargetLowering TLInfo; HexagonSelectionDAGInfo TSInfo; HexagonFrameLowering FrameLowering; const InstrItineraryData* InstrItins; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: HexagonTargetMachine(const Target &T, StringRef TT,StringRef CPU, @@ -68,7 +71,15 @@ public: return &TSInfo; } - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } + + virtual const DataLayout *getDataLayout() const { return &DL; } static unsigned getModuleMatchQuality(const Module &M); // Pass Pipeline Configuration. diff --git a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp index 32cc709..f4d7761 100644 --- a/lib/Target/Hexagon/HexagonTargetObjectFile.cpp +++ b/lib/Target/Hexagon/HexagonTargetObjectFile.cpp @@ -16,7 +16,7 @@ #include "HexagonTargetMachine.h" #include "llvm/Function.h" #include "llvm/GlobalVariable.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/DerivedTypes.h" #include "llvm/MC/MCContext.h" #include "llvm/Support/ELF.h" @@ -73,7 +73,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, if (Kind.isBSS() || Kind.isDataNoRel() || Kind.isCommon()) { Type *Ty = GV->getType()->getElementType(); - return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty)); + return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty)); } return false; diff --git a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp index a03ed03..3d5f685 100644 --- a/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp +++ b/lib/Target/Hexagon/HexagonVLIWPacketizer.cpp @@ -3474,8 +3474,8 @@ bool HexagonPacketizerList::isLegalToPacketizeTogether(SUnit *SUI, SUnit *SUJ) { // 1. Two loads unless they are volatile. // 2. Two stores in V4 unless they are volatile. else if ((DepType == SDep::Order) && - !I->hasVolatileMemoryRef() && - !J->hasVolatileMemoryRef()) { + !I->hasOrderedMemoryRef() && + !J->hasOrderedMemoryRef()) { if (QRI->Subtarget.hasV4TOps() && // hexagonv4 allows dual store. MCIDI.mayStore() && MCIDJ.mayStore()) { diff --git a/lib/Target/Hexagon/HexagonVarargsCallingConvention.h b/lib/Target/Hexagon/HexagonVarargsCallingConvention.h index 9305c27..c607b5d 100644 --- a/lib/Target/Hexagon/HexagonVarargsCallingConvention.h +++ b/lib/Target/Hexagon/HexagonVarargsCallingConvention.h @@ -75,9 +75,9 @@ static bool CC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT, const Type* ArgTy = LocVT.getTypeForEVT(State.getContext()); unsigned Alignment = - State.getTarget().getTargetData()->getABITypeAlignment(ArgTy); + State.getTarget().getDataLayout()->getABITypeAlignment(ArgTy); unsigned Size = - State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8; + State.getTarget().getDataLayout()->getTypeSizeInBits(ArgTy) / 8; // If it's passed by value, then we need the size of the aggregate not of // the pointer. @@ -130,9 +130,9 @@ static bool RetCC_Hexagon32_VarArgs(unsigned ValNo, EVT ValVT, const Type* ArgTy = LocVT.getTypeForEVT(State.getContext()); unsigned Alignment = - State.getTarget().getTargetData()->getABITypeAlignment(ArgTy); + State.getTarget().getDataLayout()->getABITypeAlignment(ArgTy); unsigned Size = - State.getTarget().getTargetData()->getTypeSizeInBits(ArgTy) / 8; + State.getTarget().getDataLayout()->getTypeSizeInBits(ArgTy) / 8; unsigned Offset3 = State.AllocateStack(Size, Alignment); State.addLoc(CCValAssign::getMem(ValNo, ValVT.getSimpleVT(), Offset3, diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp index d6e6c36..86f75d1 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCAsmInfo.cpp @@ -24,7 +24,7 @@ HexagonMCAsmInfo::HexagonMCAsmInfo(const Target &T, StringRef TT) { HasLEB128 = true; PrivateGlobalPrefix = ".L"; - LCOMMDirectiveType = LCOMM::ByteAlignment; + LCOMMDirectiveAlignmentType = LCOMM::ByteAlignment; InlineAsmStart = "# InlineAsm Start"; InlineAsmEnd = "# InlineAsm End"; ZeroDirective = "\t.space\t"; diff --git a/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp b/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp index 38fb0e8..f7809ca 100644 --- a/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp +++ b/lib/Target/MBlaze/AsmParser/MBlazeAsmParser.cpp @@ -44,9 +44,10 @@ class MBlazeAsmParser : public MCTargetAsmParser { bool ParseDirectiveWord(unsigned Size, SMLoc L); - bool MatchAndEmitInstruction(SMLoc IDLoc, + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out); + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm); /// @name Auto-generated Match Functions /// { @@ -56,12 +57,12 @@ class MBlazeAsmParser : public MCTargetAsmParser { /// } - public: MBlazeAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) : MCTargetAsmParser(), Parser(_Parser) {} - virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc, + virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, SmallVectorImpl &Operands); virtual bool ParseDirective(AsmToken DirectiveID); @@ -313,14 +314,13 @@ static unsigned MatchRegisterName(StringRef Name); /// } // bool MBlazeAsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, +MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out) { + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm) { MCInst Inst; - SMLoc ErrorLoc; - unsigned ErrorInfo; - - switch (MatchInstructionImpl(Operands, Inst, ErrorInfo)) { + switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, + MatchingInlineAsm)) { default: break; case Match_Success: Out.EmitInstruction(Inst); @@ -329,10 +329,8 @@ MatchAndEmitInstruction(SMLoc IDLoc, return Error(IDLoc, "instruction use requires an option to be enabled"); case Match_MnemonicFail: return Error(IDLoc, "unrecognized instruction mnemonic"); - case Match_ConversionFail: - return Error(IDLoc, "unable to convert operands to instruction"); - case Match_InvalidOperand: - ErrorLoc = IDLoc; + case Match_InvalidOperand: { + SMLoc ErrorLoc = IDLoc; if (ErrorInfo != ~0U) { if (ErrorInfo >= Operands.size()) return Error(IDLoc, "too few operands for instruction"); @@ -343,6 +341,7 @@ MatchAndEmitInstruction(SMLoc IDLoc, return Error(ErrorLoc, "invalid operand for instruction"); } + } llvm_unreachable("Implement any new match types added!"); } @@ -479,7 +478,7 @@ ParseOperand(SmallVectorImpl &Operands) { /// Parse an mblaze instruction mnemonic followed by its operands. bool MBlazeAsmParser:: -ParseInstruction(StringRef Name, SMLoc NameLoc, +ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, SmallVectorImpl &Operands) { // The first operands is the token for the instruction name size_t dotLoc = Name.find('.'); diff --git a/lib/Target/MBlaze/CMakeLists.txt b/lib/Target/MBlaze/CMakeLists.txt index 6c3e8b6..0bf93d7 100644 --- a/lib/Target/MBlaze/CMakeLists.txt +++ b/lib/Target/MBlaze/CMakeLists.txt @@ -27,7 +27,6 @@ add_llvm_target(MBlazeCodeGen MBlazeSelectionDAGInfo.cpp MBlazeAsmPrinter.cpp MBlazeMCInstLower.cpp - MBlazeELFWriterInfo.cpp ) add_dependencies(LLVMMBlazeCodeGen intrinsics_gen) diff --git a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp index e9f340f..b679a31 100644 --- a/lib/Target/MBlaze/MBlazeAsmPrinter.cpp +++ b/lib/Target/MBlaze/MBlazeAsmPrinter.cpp @@ -34,7 +34,7 @@ #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" diff --git a/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp b/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp deleted file mode 100644 index e3c7236..0000000 --- a/lib/Target/MBlaze/MBlazeELFWriterInfo.cpp +++ /dev/null @@ -1,107 +0,0 @@ -//===-- MBlazeELFWriterInfo.cpp - ELF Writer Info for the MBlaze backend --===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements ELF writer information for the MBlaze backend. -// -//===----------------------------------------------------------------------===// - -#include "MBlazeELFWriterInfo.h" -#include "MBlazeRelocations.h" -#include "llvm/Function.h" -#include "llvm/Support/ELF.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" - -using namespace llvm; - -//===----------------------------------------------------------------------===// -// Implementation of the MBlazeELFWriterInfo class -//===----------------------------------------------------------------------===// - -MBlazeELFWriterInfo::MBlazeELFWriterInfo(TargetMachine &TM) - : TargetELFWriterInfo(TM.getTargetData()->getPointerSizeInBits() == 64, - TM.getTargetData()->isLittleEndian()) { -} - -MBlazeELFWriterInfo::~MBlazeELFWriterInfo() {} - -unsigned MBlazeELFWriterInfo::getRelocationType(unsigned MachineRelTy) const { - switch (MachineRelTy) { - case MBlaze::reloc_pcrel_word: - return ELF::R_MICROBLAZE_64_PCREL; - case MBlaze::reloc_absolute_word: - return ELF::R_MICROBLAZE_NONE; - default: - llvm_unreachable("unknown mblaze machine relocation type"); - } -} - -long int MBlazeELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier) const { - switch (RelTy) { - case ELF::R_MICROBLAZE_32_PCREL: - return Modifier - 4; - case ELF::R_MICROBLAZE_32: - return Modifier; - default: - llvm_unreachable("unknown mblaze relocation type"); - } -} - -unsigned MBlazeELFWriterInfo::getRelocationTySize(unsigned RelTy) const { - // FIXME: Most of these sizes are guesses based on the name - switch (RelTy) { - case ELF::R_MICROBLAZE_32: - case ELF::R_MICROBLAZE_32_PCREL: - case ELF::R_MICROBLAZE_32_PCREL_LO: - case ELF::R_MICROBLAZE_32_LO: - case ELF::R_MICROBLAZE_SRO32: - case ELF::R_MICROBLAZE_SRW32: - case ELF::R_MICROBLAZE_32_SYM_OP_SYM: - case ELF::R_MICROBLAZE_GOTOFF_32: - return 32; - - case ELF::R_MICROBLAZE_64_PCREL: - case ELF::R_MICROBLAZE_64: - case ELF::R_MICROBLAZE_GOTPC_64: - case ELF::R_MICROBLAZE_GOT_64: - case ELF::R_MICROBLAZE_PLT_64: - case ELF::R_MICROBLAZE_GOTOFF_64: - return 64; - } - - return 0; -} - -bool MBlazeELFWriterInfo::isPCRelativeRel(unsigned RelTy) const { - // FIXME: Most of these are guesses based on the name - switch (RelTy) { - case ELF::R_MICROBLAZE_32_PCREL: - case ELF::R_MICROBLAZE_64_PCREL: - case ELF::R_MICROBLAZE_32_PCREL_LO: - case ELF::R_MICROBLAZE_GOTPC_64: - return true; - } - - return false; -} - -unsigned MBlazeELFWriterInfo::getAbsoluteLabelMachineRelTy() const { - return MBlaze::reloc_absolute_word; -} - -long int MBlazeELFWriterInfo::computeRelocation(unsigned SymOffset, - unsigned RelOffset, - unsigned RelTy) const { - assert((RelTy == ELF::R_MICROBLAZE_32_PCREL || - RelTy == ELF::R_MICROBLAZE_64_PCREL) && - "computeRelocation unknown for this relocation type"); - return SymOffset - (RelOffset + 4); -} diff --git a/lib/Target/MBlaze/MBlazeELFWriterInfo.h b/lib/Target/MBlaze/MBlazeELFWriterInfo.h deleted file mode 100644 index a314eb7..0000000 --- a/lib/Target/MBlaze/MBlazeELFWriterInfo.h +++ /dev/null @@ -1,59 +0,0 @@ -//===-- MBlazeELFWriterInfo.h - ELF Writer Info for MBlaze ------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements ELF writer information for the MBlaze backend. -// -//===----------------------------------------------------------------------===// - -#ifndef MBLAZE_ELF_WRITER_INFO_H -#define MBLAZE_ELF_WRITER_INFO_H - -#include "llvm/Target/TargetELFWriterInfo.h" - -namespace llvm { - class TargetMachine; - - class MBlazeELFWriterInfo : public TargetELFWriterInfo { - public: - MBlazeELFWriterInfo(TargetMachine &TM); - virtual ~MBlazeELFWriterInfo(); - - /// getRelocationType - Returns the target specific ELF Relocation type. - /// 'MachineRelTy' contains the object code independent relocation type - virtual unsigned getRelocationType(unsigned MachineRelTy) const; - - /// hasRelocationAddend - True if the target uses an addend in the - /// ELF relocation entry. - virtual bool hasRelocationAddend() const { return false; } - - /// getDefaultAddendForRelTy - Gets the default addend value for a - /// relocation entry based on the target ELF relocation type. - virtual long int getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier = 0) const; - - /// getRelTySize - Returns the size of relocatable field in bits - virtual unsigned getRelocationTySize(unsigned RelTy) const; - - /// isPCRelativeRel - True if the relocation type is pc relative - virtual bool isPCRelativeRel(unsigned RelTy) const; - - /// getJumpTableRelocationTy - Returns the machine relocation type used - /// to reference a jumptable. - virtual unsigned getAbsoluteLabelMachineRelTy() const; - - /// computeRelocation - Some relocatable fields could be relocated - /// directly, avoiding the relocation symbol emission, compute the - /// final relocation value for this symbol. - virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset, - unsigned RelTy) const; - }; - -} // end llvm namespace - -#endif // MBLAZE_ELF_WRITER_INFO_H diff --git a/lib/Target/MBlaze/MBlazeFrameLowering.cpp b/lib/Target/MBlaze/MBlazeFrameLowering.cpp index d2f14a5..9e467bf 100644 --- a/lib/Target/MBlaze/MBlazeFrameLowering.cpp +++ b/lib/Target/MBlaze/MBlazeFrameLowering.cpp @@ -23,7 +23,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" diff --git a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp b/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp index 91aaf94..1c2e3b2 100644 --- a/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp +++ b/lib/Target/MBlaze/MBlazeIntrinsicInfo.cpp @@ -83,7 +83,7 @@ bool MBlazeIntrinsicInfo::isOverloaded(unsigned IntrID) const { #undef GET_INTRINSIC_OVERLOAD_TABLE } -/// This defines the "getAttributes(ID id)" method. +/// This defines the "getAttributes(LLVMContext &C, ID id)" method. #define GET_INTRINSIC_ATTRIBUTES #include "MBlazeGenIntrinsics.inc" #undef GET_INTRINSIC_ATTRIBUTES @@ -104,7 +104,8 @@ Function *MBlazeIntrinsicInfo::getDeclaration(Module *M, unsigned IntrID, Type **Tys, unsigned numTy) const { assert(!isOverloaded(IntrID) && "MBlaze intrinsics are not overloaded"); - AttrListPtr AList = getAttributes((mblazeIntrinsic::ID) IntrID); + AttrListPtr AList = getAttributes(M->getContext(), + (mblazeIntrinsic::ID) IntrID); return cast(M->getOrInsertFunction(getName(IntrID), getType(M->getContext(), IntrID), AList)); diff --git a/lib/Target/MBlaze/MBlazeRegisterInfo.cpp b/lib/Target/MBlaze/MBlazeRegisterInfo.cpp index 46f5207..daa76e8 100644 --- a/lib/Target/MBlaze/MBlazeRegisterInfo.cpp +++ b/lib/Target/MBlaze/MBlazeRegisterInfo.cpp @@ -140,7 +140,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned oi = i == 2 ? 1 : 2; - DEBUG(dbgs() << "\nFunction : " << MF.getFunction()->getName() << "\n"; + DEBUG(dbgs() << "\nFunction : " << MF.getName() << "\n"; dbgs() << "<--------->\n" << MI); int FrameIndex = MI.getOperand(i).getIndex(); diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.cpp b/lib/Target/MBlaze/MBlazeTargetMachine.cpp index 5f82f14..f180652 100644 --- a/lib/Target/MBlaze/MBlazeTargetMachine.cpp +++ b/lib/Target/MBlaze/MBlazeTargetMachine.cpp @@ -38,11 +38,12 @@ MBlazeTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), - DataLayout("E-p:32:32:32-i8:8:8-i16:16:16"), + DL("E-p:32:32:32-i8:8:8-i16:16:16"), InstrInfo(*this), FrameLowering(Subtarget), - TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this), - InstrItins(Subtarget.getInstrItineraryData()) { + TLInfo(*this), TSInfo(*this), + InstrItins(Subtarget.getInstrItineraryData()), + STTI(&TLInfo), VTTI(&TLInfo) { } namespace { diff --git a/lib/Target/MBlaze/MBlazeTargetMachine.h b/lib/Target/MBlaze/MBlazeTargetMachine.h index 1647a21..a8df4e6 100644 --- a/lib/Target/MBlaze/MBlazeTargetMachine.h +++ b/lib/Target/MBlaze/MBlazeTargetMachine.h @@ -20,25 +20,26 @@ #include "MBlazeSelectionDAGInfo.h" #include "MBlazeIntrinsicInfo.h" #include "MBlazeFrameLowering.h" -#include "MBlazeELFWriterInfo.h" #include "llvm/MC/MCStreamer.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { class formatted_raw_ostream; class MBlazeTargetMachine : public LLVMTargetMachine { MBlazeSubtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment MBlazeInstrInfo InstrInfo; MBlazeFrameLowering FrameLowering; MBlazeTargetLowering TLInfo; MBlazeSelectionDAGInfo TSInfo; MBlazeIntrinsicInfo IntrinsicInfo; - MBlazeELFWriterInfo ELFWriterInfo; InstrItineraryData InstrItins; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: MBlazeTargetMachine(const Target &T, StringRef TT, @@ -59,8 +60,8 @@ namespace llvm { virtual const MBlazeSubtarget *getSubtargetImpl() const { return &Subtarget; } - virtual const TargetData *getTargetData() const - { return &DataLayout;} + virtual const DataLayout *getDataLayout() const + { return &DL;} virtual const MBlazeRegisterInfo *getRegisterInfo() const { return &InstrInfo.getRegisterInfo(); } @@ -74,9 +75,10 @@ namespace llvm { const TargetIntrinsicInfo *getIntrinsicInfo() const { return &IntrinsicInfo; } - virtual const MBlazeELFWriterInfo *getELFWriterInfo() const { - return &ELFWriterInfo; - } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const + { return &STTI; } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const + { return &VTTI; } // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); diff --git a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp b/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp index f66ea30..899c74ee 100644 --- a/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp +++ b/lib/Target/MBlaze/MBlazeTargetObjectFile.cpp @@ -13,7 +13,7 @@ #include "llvm/GlobalVariable.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSectionELF.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ELF.h" @@ -70,7 +70,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, return false; Type *Ty = GV->getType()->getElementType(); - return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty)); + return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty)); } const MCSection *MBlazeTargetObjectFile:: diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp index f383fec..44feeb4 100644 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp +++ b/lib/Target/MBlaze/MCTargetDesc/MBlazeAsmBackend.cpp @@ -156,7 +156,8 @@ void ELFMBlazeAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, } } // end anonymous namespace -MCAsmBackend *llvm::createMBlazeAsmBackend(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createMBlazeAsmBackend(const Target &T, StringRef TT, + StringRef CPU) { Triple TheTriple(TT); if (TheTriple.isOSDarwin()) diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp index bfd11a0..2b71d9d 100644 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp +++ b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCCodeEmitter.cpp @@ -29,8 +29,8 @@ STATISTIC(MCNumEmitted, "Number of MC instructions emitted"); namespace { class MBlazeMCCodeEmitter : public MCCodeEmitter { - MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &); // DO NOT IMPLEMENT - void operator=(const MBlazeMCCodeEmitter &); // DO NOT IMPLEMENT + MBlazeMCCodeEmitter(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const MBlazeMCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCInstrInfo &MCII; public: diff --git a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h index 7cc96c6..7bc7d8f 100644 --- a/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h +++ b/lib/Target/MBlaze/MCTargetDesc/MBlazeMCTargetDesc.h @@ -35,7 +35,8 @@ MCCodeEmitter *createMBlazeMCCodeEmitter(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx); -MCAsmBackend *createMBlazeAsmBackend(const Target &T, StringRef TT); +MCAsmBackend *createMBlazeAsmBackend(const Target &T, StringRef TT, + StringRef CPU); MCObjectWriter *createMBlazeELFObjectWriter(raw_ostream &OS, uint8_t OSABI); } // End llvm namespace diff --git a/lib/Target/MSP430/MSP430FrameLowering.cpp b/lib/Target/MSP430/MSP430FrameLowering.cpp index 61d7f2b..2e170f1 100644 --- a/lib/Target/MSP430/MSP430FrameLowering.cpp +++ b/lib/Target/MSP430/MSP430FrameLowering.cpp @@ -20,7 +20,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" @@ -221,3 +221,17 @@ MSP430FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, return true; } + +void +MSP430FrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) + const { + const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); + + // Create a frame entry for the FPW register that must be saved. + if (TFI->hasFP(MF)) { + int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true); + (void)FrameIdx; + assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && + "Slot for FPW register must be last in order to be found!"); + } +} diff --git a/lib/Target/MSP430/MSP430FrameLowering.h b/lib/Target/MSP430/MSP430FrameLowering.h index b636827..cb02545 100644 --- a/lib/Target/MSP430/MSP430FrameLowering.h +++ b/lib/Target/MSP430/MSP430FrameLowering.h @@ -46,6 +46,7 @@ public: bool hasFP(const MachineFunction &MF) const; bool hasReservedCallFrame(const MachineFunction &MF) const; + void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; }; } // End llvm namespace diff --git a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp index 5430d43..5efc6a3 100644 --- a/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp +++ b/lib/Target/MSP430/MSP430ISelDAGToDAG.cpp @@ -274,8 +274,8 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N, else if (AM.JT != -1) Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i16, 0/*AM.SymbolFlags*/); else if (AM.BlockAddr) - Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32, - true, 0/*AM.SymbolFlags*/); + Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, 0, + 0/*AM.SymbolFlags*/); else Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i16); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index f8b7e14..fc677ae 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -61,7 +61,7 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) : TargetLowering(tm, new TargetLoweringObjectFileELF()), Subtarget(*tm.getSubtargetImpl()) { - TD = getTargetData(); + TD = getDataLayout(); // Set up the register classes. addRegisterClass(MVT::i8, &MSP430::GR8RegClass); @@ -655,7 +655,7 @@ SDValue MSP430TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { DebugLoc dl = Op.getDebugLoc(); const BlockAddress *BA = cast(Op)->getBlockAddress(); - SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true); + SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); return DAG.getNode(MSP430ISD::Wrapper, dl, getPointerTy(), Result); } diff --git a/lib/Target/MSP430/MSP430ISelLowering.h b/lib/Target/MSP430/MSP430ISelLowering.h index d8ad02f..991304c 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.h +++ b/lib/Target/MSP430/MSP430ISelLowering.h @@ -169,7 +169,7 @@ namespace llvm { SelectionDAG &DAG) const; const MSP430Subtarget &Subtarget; - const TargetData *TD; + const DataLayout *TD; }; } // namespace llvm diff --git a/lib/Target/MSP430/MSP430RegisterInfo.cpp b/lib/Target/MSP430/MSP430RegisterInfo.cpp index aed46a2..9ae238f 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.cpp +++ b/lib/Target/MSP430/MSP430RegisterInfo.cpp @@ -220,20 +220,6 @@ MSP430RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, MI.getOperand(i+1).ChangeToImmediate(Offset); } -void -MSP430RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) - const { - const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); - - // Create a frame entry for the FPW register that must be saved. - if (TFI->hasFP(MF)) { - int FrameIdx = MF.getFrameInfo()->CreateFixedObject(2, -4, true); - (void)FrameIdx; - assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() && - "Slot for FPW register must be last in order to be found!"); - } -} - unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); diff --git a/lib/Target/MSP430/MSP430RegisterInfo.h b/lib/Target/MSP430/MSP430RegisterInfo.h index 9ee0a03..64a43bc 100644 --- a/lib/Target/MSP430/MSP430RegisterInfo.h +++ b/lib/Target/MSP430/MSP430RegisterInfo.h @@ -49,8 +49,6 @@ public: void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, RegScavenger *RS = NULL) const; - void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; - // Debug information queries. unsigned getFrameRegister(const MachineFunction &MF) const; }; diff --git a/lib/Target/MSP430/MSP430TargetMachine.cpp b/lib/Target/MSP430/MSP430TargetMachine.cpp index 817001d..13e37b3 100644 --- a/lib/Target/MSP430/MSP430TargetMachine.cpp +++ b/lib/Target/MSP430/MSP430TargetMachine.cpp @@ -33,10 +33,10 @@ MSP430TargetMachine::MSP430TargetMachine(const Target &T, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), - // FIXME: Check TargetData string. - DataLayout("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"), + // FIXME: Check DataLayout string. + DL("e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16"), InstrInfo(*this), TLInfo(*this), TSInfo(*this), - FrameLowering(Subtarget) { } + FrameLowering(Subtarget), STTI(&TLInfo), VTTI(&TLInfo) { } namespace { /// MSP430 Code Generator Pass Configuration Options. diff --git a/lib/Target/MSP430/MSP430TargetMachine.h b/lib/Target/MSP430/MSP430TargetMachine.h index f54146b..186172e 100644 --- a/lib/Target/MSP430/MSP430TargetMachine.h +++ b/lib/Target/MSP430/MSP430TargetMachine.h @@ -21,9 +21,10 @@ #include "MSP430SelectionDAGInfo.h" #include "MSP430RegisterInfo.h" #include "MSP430Subtarget.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { @@ -31,11 +32,13 @@ namespace llvm { /// class MSP430TargetMachine : public LLVMTargetMachine { MSP430Subtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment MSP430InstrInfo InstrInfo; MSP430TargetLowering TLInfo; MSP430SelectionDAGInfo TSInfo; MSP430FrameLowering FrameLowering; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: MSP430TargetMachine(const Target &T, StringRef TT, @@ -47,7 +50,7 @@ public: return &FrameLowering; } virtual const MSP430InstrInfo *getInstrInfo() const { return &InstrInfo; } - virtual const TargetData *getTargetData() const { return &DataLayout;} + virtual const DataLayout *getDataLayout() const { return &DL;} virtual const MSP430Subtarget *getSubtargetImpl() const { return &Subtarget; } virtual const TargetRegisterInfo *getRegisterInfo() const { @@ -61,7 +64,12 @@ public: virtual const MSP430SelectionDAGInfo* getSelectionDAGInfo() const { return &TSInfo; } - + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); }; // MSP430TargetMachine. diff --git a/lib/Target/Mangler.cpp b/lib/Target/Mangler.cpp index 786a0c5..539a1f7 100644 --- a/lib/Target/Mangler.cpp +++ b/lib/Target/Mangler.cpp @@ -14,7 +14,7 @@ #include "llvm/Target/Mangler.h" #include "llvm/DerivedTypes.h" #include "llvm/Function.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/Support/raw_ostream.h" @@ -44,7 +44,7 @@ static void MangleLetter(SmallVectorImpl &OutName, unsigned char C) { OutName.push_back('_'); } -/// NameNeedsEscaping - Return true if the identifier \arg Str needs quotes +/// NameNeedsEscaping - Return true if the identifier \p Str needs quotes /// for this assembler. static bool NameNeedsEscaping(StringRef Str, const MCAsmInfo &MAI) { assert(!Str.empty() && "Cannot create an empty MCSymbol"); @@ -157,7 +157,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl &OutName, /// a suffix on their name indicating the number of words of arguments they /// take. static void AddFastCallStdCallSuffix(SmallVectorImpl &OutName, - const Function *F, const TargetData &TD) { + const Function *F, const DataLayout &TD) { // Calculate arguments size total. unsigned ArgWords = 0; for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end(); @@ -183,8 +183,7 @@ void Mangler::getNameWithPrefix(SmallVectorImpl &OutName, ManglerPrefixTy PrefixTy = Mangler::Default; if (GV->hasPrivateLinkage() || isImplicitlyPrivate) PrefixTy = Mangler::Private; - else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage() || - GV->hasLinkerPrivateWeakDefAutoLinkage()) + else if (GV->hasLinkerPrivateLinkage() || GV->hasLinkerPrivateWeakLinkage()) PrefixTy = Mangler::LinkerPrivate; // If this global has a name, handle it simply. diff --git a/lib/Target/Mips/AsmParser/CMakeLists.txt b/lib/Target/Mips/AsmParser/CMakeLists.txt index 6c7343b..28f5219 100644 --- a/lib/Target/Mips/AsmParser/CMakeLists.txt +++ b/lib/Target/Mips/AsmParser/CMakeLists.txt @@ -1,3 +1,4 @@ +include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/.. ) add_llvm_library(LLVMMipsAsmParser MipsAsmParser.cpp ) diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index 58b5590..67b5248 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -8,53 +8,1316 @@ //===----------------------------------------------------------------------===// #include "MCTargetDesc/MipsMCTargetDesc.h" +#include "MipsRegisterInfo.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCParser/MCAsmLexer.h" +#include "llvm/MC/MCParser/MCParsedAsmOperand.h" #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/Support/TargetRegistry.h" using namespace llvm; namespace { +class MipsAssemblerOptions { +public: + MipsAssemblerOptions(): + aTReg(1), reorder(true), macro(true) { + } + + unsigned getATRegNum() {return aTReg;} + bool setATReg(unsigned Reg); + + bool isReorder() {return reorder;} + void setReorder() {reorder = true;} + void setNoreorder() {reorder = false;} + + bool isMacro() {return macro;} + void setMacro() {macro = true;} + void setNomacro() {macro = false;} + +private: + unsigned aTReg; + bool reorder; + bool macro; +}; +} + +namespace { class MipsAsmParser : public MCTargetAsmParser { - bool MatchAndEmitInstruction(SMLoc IDLoc, + + enum FpFormatTy { + FP_FORMAT_NONE = -1, + FP_FORMAT_S, + FP_FORMAT_D, + FP_FORMAT_L, + FP_FORMAT_W + } FpFormat; + + MCSubtargetInfo &STI; + MCAsmParser &Parser; + MipsAssemblerOptions Options; + + +#define GET_ASSEMBLER_HEADER +#include "MipsGenAsmMatcher.inc" + + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out); + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm); bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); - bool ParseInstruction(StringRef Name, SMLoc NameLoc, - SmallVectorImpl &Operands); + bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, + SmallVectorImpl &Operands); + + bool parseMathOperation(StringRef Name, SMLoc NameLoc, + SmallVectorImpl &Operands); bool ParseDirective(AsmToken DirectiveID); + MipsAsmParser::OperandMatchResultTy + parseMemOperand(SmallVectorImpl&); + + bool ParseOperand(SmallVectorImpl &, + StringRef Mnemonic); + + int tryParseRegister(StringRef Mnemonic); + + bool tryParseRegisterOperand(SmallVectorImpl &Operands, + StringRef Mnemonic); + + bool needsExpansion(MCInst &Inst); + + void expandInstruction(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions); + void expandLoadImm(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions); + void expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions); + void expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions); + bool reportParseError(StringRef ErrorMsg); + + bool parseMemOffset(const MCExpr *&Res); + bool parseRelocOperand(const MCExpr *&Res); + + bool parseDirectiveSet(); + + bool parseSetAtDirective(); + bool parseSetNoAtDirective(); + bool parseSetMacroDirective(); + bool parseSetNoMacroDirective(); + bool parseSetReorderDirective(); + bool parseSetNoReorderDirective(); + + MCSymbolRefExpr::VariantKind getVariantKind(StringRef Symbol); + + bool isMips64() const { + return (STI.getFeatureBits() & Mips::FeatureMips64) != 0; + } + + bool isFP64() const { + return (STI.getFeatureBits() & Mips::FeatureFP64Bit) != 0; + } + + int matchRegisterName(StringRef Symbol); + + int matchRegisterByNumber(unsigned RegNum, StringRef Mnemonic); + + void setFpFormat(FpFormatTy Format) { + FpFormat = Format; + } + + void setDefaultFpFormat(); + + void setFpFormat(StringRef Format); + + FpFormatTy getFpFormat() {return FpFormat;} + + bool requestsDoubleOperand(StringRef Mnemonic); + + unsigned getReg(int RC,int RegNo); + + unsigned getATReg(); public: MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) - : MCTargetAsmParser() { + : MCTargetAsmParser(), STI(sti), Parser(parser) { + // Initialize the set of available features. + setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); + } + + MCAsmParser &getParser() const { return Parser; } + MCAsmLexer &getLexer() const { return Parser.getLexer(); } + +}; +} + +namespace { + +/// MipsOperand - Instances of this class represent a parsed Mips machine +/// instruction. +class MipsOperand : public MCParsedAsmOperand { + + enum KindTy { + k_CondCode, + k_CoprocNum, + k_Immediate, + k_Memory, + k_PostIndexRegister, + k_Register, + k_Token + } Kind; + + MipsOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} + + union { + struct { + const char *Data; + unsigned Length; + } Tok; + + struct { + unsigned RegNum; + } Reg; + + struct { + const MCExpr *Val; + } Imm; + + struct { + unsigned Base; + const MCExpr *Off; + } Mem; + }; + + SMLoc StartLoc, EndLoc; + +public: + void addRegOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::CreateReg(getReg())); + } + + void addExpr(MCInst &Inst, const MCExpr *Expr) const{ + // Add as immediate when possible. Null MCExpr = 0. + if (Expr == 0) + Inst.addOperand(MCOperand::CreateImm(0)); + else if (const MCConstantExpr *CE = dyn_cast(Expr)) + Inst.addOperand(MCOperand::CreateImm(CE->getValue())); + else + Inst.addOperand(MCOperand::CreateExpr(Expr)); + } + + void addImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + const MCExpr *Expr = getImm(); + addExpr(Inst,Expr); + } + + void addMemOperands(MCInst &Inst, unsigned N) const { + assert(N == 2 && "Invalid number of operands!"); + + Inst.addOperand(MCOperand::CreateReg(getMemBase())); + + const MCExpr *Expr = getMemOff(); + addExpr(Inst,Expr); + } + + bool isReg() const { return Kind == k_Register; } + bool isImm() const { return Kind == k_Immediate; } + bool isToken() const { return Kind == k_Token; } + bool isMem() const { return Kind == k_Memory; } + + StringRef getToken() const { + assert(Kind == k_Token && "Invalid access!"); + return StringRef(Tok.Data, Tok.Length); + } + + unsigned getReg() const { + assert((Kind == k_Register) && "Invalid access!"); + return Reg.RegNum; + } + + const MCExpr *getImm() const { + assert((Kind == k_Immediate) && "Invalid access!"); + return Imm.Val; + } + + unsigned getMemBase() const { + assert((Kind == k_Memory) && "Invalid access!"); + return Mem.Base; + } + + const MCExpr *getMemOff() const { + assert((Kind == k_Memory) && "Invalid access!"); + return Mem.Off; + } + + static MipsOperand *CreateToken(StringRef Str, SMLoc S) { + MipsOperand *Op = new MipsOperand(k_Token); + Op->Tok.Data = Str.data(); + Op->Tok.Length = Str.size(); + Op->StartLoc = S; + Op->EndLoc = S; + return Op; } + static MipsOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { + MipsOperand *Op = new MipsOperand(k_Register); + Op->Reg.RegNum = RegNum; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static MipsOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { + MipsOperand *Op = new MipsOperand(k_Immediate); + Op->Imm.Val = Val; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static MipsOperand *CreateMem(unsigned Base, const MCExpr *Off, + SMLoc S, SMLoc E) { + MipsOperand *Op = new MipsOperand(k_Memory); + Op->Mem.Base = Base; + Op->Mem.Off = Off; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + /// getStartLoc - Get the location of the first token of this operand. + SMLoc getStartLoc() const { return StartLoc; } + /// getEndLoc - Get the location of the last token of this operand. + SMLoc getEndLoc() const { return EndLoc; } + + virtual void print(raw_ostream &OS) const { + llvm_unreachable("unimplemented!"); + } }; } +bool MipsAsmParser::needsExpansion(MCInst &Inst) { + + switch(Inst.getOpcode()) { + case Mips::LoadImm32Reg: + case Mips::LoadAddr32Imm: + case Mips::LoadAddr32Reg: + return true; + default: + return false; + } +} + +void MipsAsmParser::expandInstruction(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions){ + switch(Inst.getOpcode()) { + case Mips::LoadImm32Reg: + return expandLoadImm(Inst, IDLoc, Instructions); + case Mips::LoadAddr32Imm: + return expandLoadAddressImm(Inst,IDLoc,Instructions); + case Mips::LoadAddr32Reg: + return expandLoadAddressReg(Inst,IDLoc,Instructions); + } +} + +void MipsAsmParser::expandLoadImm(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions){ + MCInst tmpInst; + const MCOperand &ImmOp = Inst.getOperand(1); + assert(ImmOp.isImm() && "expected immediate operand kind"); + const MCOperand &RegOp = Inst.getOperand(0); + assert(RegOp.isReg() && "expected register operand kind"); + + int ImmValue = ImmOp.getImm(); + tmpInst.setLoc(IDLoc); + if ( 0 <= ImmValue && ImmValue <= 65535) { + // for 0 <= j <= 65535. + // li d,j => ori d,$zero,j + tmpInst.setOpcode(isMips64() ? Mips::ORi64 : Mips::ORi); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand( + MCOperand::CreateReg(isMips64() ? Mips::ZERO_64 : Mips::ZERO)); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); + Instructions.push_back(tmpInst); + } else if ( ImmValue < 0 && ImmValue >= -32768) { + // for -32768 <= j < 0. + // li d,j => addiu d,$zero,j + tmpInst.setOpcode(Mips::ADDiu); //TODO:no ADDiu64 in td files? + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand( + MCOperand::CreateReg(isMips64() ? Mips::ZERO_64 : Mips::ZERO)); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); + Instructions.push_back(tmpInst); + } else { + // for any other value of j that is representable as a 32-bit integer. + // li d,j => lui d,hi16(j) + // ori d,d,lo16(j) + tmpInst.setOpcode(isMips64() ? Mips::LUi64 : Mips::LUi); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16)); + Instructions.push_back(tmpInst); + tmpInst.clear(); + tmpInst.setOpcode(isMips64() ? Mips::ORi64 : Mips::ORi); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff)); + tmpInst.setLoc(IDLoc); + Instructions.push_back(tmpInst); + } +} + +void MipsAsmParser::expandLoadAddressReg(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions){ + MCInst tmpInst; + const MCOperand &ImmOp = Inst.getOperand(2); + assert(ImmOp.isImm() && "expected immediate operand kind"); + const MCOperand &SrcRegOp = Inst.getOperand(1); + assert(SrcRegOp.isReg() && "expected register operand kind"); + const MCOperand &DstRegOp = Inst.getOperand(0); + assert(DstRegOp.isReg() && "expected register operand kind"); + int ImmValue = ImmOp.getImm(); + if ( -32768 <= ImmValue && ImmValue <= 65535) { + //for -32768 <= j <= 65535. + //la d,j(s) => addiu d,s,j + tmpInst.setOpcode(Mips::ADDiu); //TODO:no ADDiu64 in td files? + tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateReg(SrcRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); + Instructions.push_back(tmpInst); + } else { + //for any other value of j that is representable as a 32-bit integer. + //la d,j(s) => lui d,hi16(j) + // ori d,d,lo16(j) + // addu d,d,s + tmpInst.setOpcode(isMips64()?Mips::LUi64:Mips::LUi); + tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16)); + Instructions.push_back(tmpInst); + tmpInst.clear(); + tmpInst.setOpcode(isMips64()?Mips::ORi64:Mips::ORi); + tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff)); + Instructions.push_back(tmpInst); + tmpInst.clear(); + tmpInst.setOpcode(Mips::ADDu); + tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateReg(DstRegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateReg(SrcRegOp.getReg())); + Instructions.push_back(tmpInst); + } +} + +void MipsAsmParser::expandLoadAddressImm(MCInst &Inst, SMLoc IDLoc, + SmallVectorImpl &Instructions){ + MCInst tmpInst; + const MCOperand &ImmOp = Inst.getOperand(1); + assert(ImmOp.isImm() && "expected immediate operand kind"); + const MCOperand &RegOp = Inst.getOperand(0); + assert(RegOp.isReg() && "expected register operand kind"); + int ImmValue = ImmOp.getImm(); + if ( -32768 <= ImmValue && ImmValue <= 65535) { + //for -32768 <= j <= 65535. + //la d,j => addiu d,$zero,j + tmpInst.setOpcode(Mips::ADDiu); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand( + MCOperand::CreateReg(isMips64()?Mips::ZERO_64:Mips::ZERO)); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue)); + Instructions.push_back(tmpInst); + } else { + //for any other value of j that is representable as a 32-bit integer. + //la d,j => lui d,hi16(j) + // ori d,d,lo16(j) + tmpInst.setOpcode(isMips64()?Mips::LUi64:Mips::LUi); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm((ImmValue & 0xffff0000) >> 16)); + Instructions.push_back(tmpInst); + tmpInst.clear(); + tmpInst.setOpcode(isMips64()?Mips::ORi64:Mips::ORi); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateReg(RegOp.getReg())); + tmpInst.addOperand(MCOperand::CreateImm(ImmValue & 0xffff)); + Instructions.push_back(tmpInst); + } +} + bool MipsAsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, +MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out) { + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm) { + MCInst Inst; + unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, + MatchingInlineAsm); + + switch (MatchResult) { + default: break; + case Match_Success: { + if (needsExpansion(Inst)) { + SmallVector Instructions; + expandInstruction(Inst, IDLoc, Instructions); + for(unsigned i =0; i < Instructions.size(); i++){ + Out.EmitInstruction(Instructions[i]); + } + } else { + Inst.setLoc(IDLoc); + Out.EmitInstruction(Inst); + } + return false; + } + case Match_MissingFeature: + Error(IDLoc, "instruction requires a CPU feature not currently enabled"); + return true; + case Match_InvalidOperand: { + SMLoc ErrorLoc = IDLoc; + if (ErrorInfo != ~0U) { + if (ErrorInfo >= Operands.size()) + return Error(IDLoc, "too few operands for instruction"); + + ErrorLoc = ((MipsOperand*)Operands[ErrorInfo])->getStartLoc(); + if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; + } + + return Error(ErrorLoc, "invalid operand for instruction"); + } + case Match_MnemonicFail: + return Error(IDLoc, "invalid instruction"); + } + return true; +} + +int MipsAsmParser::matchRegisterName(StringRef Name) { + + int CC; + if (!isMips64()) + CC = StringSwitch(Name) + .Case("zero", Mips::ZERO) + .Case("a0", Mips::A0) + .Case("a1", Mips::A1) + .Case("a2", Mips::A2) + .Case("a3", Mips::A3) + .Case("v0", Mips::V0) + .Case("v1", Mips::V1) + .Case("s0", Mips::S0) + .Case("s1", Mips::S1) + .Case("s2", Mips::S2) + .Case("s3", Mips::S3) + .Case("s4", Mips::S4) + .Case("s5", Mips::S5) + .Case("s6", Mips::S6) + .Case("s7", Mips::S7) + .Case("k0", Mips::K0) + .Case("k1", Mips::K1) + .Case("sp", Mips::SP) + .Case("fp", Mips::FP) + .Case("gp", Mips::GP) + .Case("ra", Mips::RA) + .Case("t0", Mips::T0) + .Case("t1", Mips::T1) + .Case("t2", Mips::T2) + .Case("t3", Mips::T3) + .Case("t4", Mips::T4) + .Case("t5", Mips::T5) + .Case("t6", Mips::T6) + .Case("t7", Mips::T7) + .Case("t8", Mips::T8) + .Case("t9", Mips::T9) + .Case("at", Mips::AT) + .Case("fcc0", Mips::FCC0) + .Default(-1); + else + CC = StringSwitch(Name) + .Case("zero", Mips::ZERO_64) + .Case("at", Mips::AT_64) + .Case("v0", Mips::V0_64) + .Case("v1", Mips::V1_64) + .Case("a0", Mips::A0_64) + .Case("a1", Mips::A1_64) + .Case("a2", Mips::A2_64) + .Case("a3", Mips::A3_64) + .Case("a4", Mips::T0_64) + .Case("a5", Mips::T1_64) + .Case("a6", Mips::T2_64) + .Case("a7", Mips::T3_64) + .Case("t4", Mips::T4_64) + .Case("t5", Mips::T5_64) + .Case("t6", Mips::T6_64) + .Case("t7", Mips::T7_64) + .Case("s0", Mips::S0_64) + .Case("s1", Mips::S1_64) + .Case("s2", Mips::S2_64) + .Case("s3", Mips::S3_64) + .Case("s4", Mips::S4_64) + .Case("s5", Mips::S5_64) + .Case("s6", Mips::S6_64) + .Case("s7", Mips::S7_64) + .Case("t8", Mips::T8_64) + .Case("t9", Mips::T9_64) + .Case("kt0", Mips::K0_64) + .Case("kt1", Mips::K1_64) + .Case("gp", Mips::GP_64) + .Case("sp", Mips::SP_64) + .Case("fp", Mips::FP_64) + .Case("s8", Mips::FP_64) + .Case("ra", Mips::RA_64) + .Default(-1); + + if (CC != -1) + return CC; + + if (Name[0] == 'f') { + StringRef NumString = Name.substr(1); + unsigned IntVal; + if( NumString.getAsInteger(10, IntVal)) + return -1; // not integer + if (IntVal > 31) + return -1; + + FpFormatTy Format = getFpFormat(); + + if (Format == FP_FORMAT_S || Format == FP_FORMAT_W) + return getReg(Mips::FGR32RegClassID, IntVal); + if (Format == FP_FORMAT_D) { + if(isFP64()) { + return getReg(Mips::FGR64RegClassID, IntVal); + } + // only even numbers available as register pairs + if (( IntVal > 31) || (IntVal%2 != 0)) + return -1; + return getReg(Mips::AFGR64RegClassID, IntVal/2); + } + } + + return -1; +} +void MipsAsmParser::setDefaultFpFormat() { + + if (isMips64() || isFP64()) + FpFormat = FP_FORMAT_D; + else + FpFormat = FP_FORMAT_S; +} + +bool MipsAsmParser::requestsDoubleOperand(StringRef Mnemonic){ + + bool IsDouble = StringSwitch(Mnemonic.lower()) + .Case("ldxc1", true) + .Case("ldc1", true) + .Case("sdxc1", true) + .Case("sdc1", true) + .Default(false); + + return IsDouble; +} +void MipsAsmParser::setFpFormat(StringRef Format) { + + FpFormat = StringSwitch(Format.lower()) + .Case(".s", FP_FORMAT_S) + .Case(".d", FP_FORMAT_D) + .Case(".l", FP_FORMAT_L) + .Case(".w", FP_FORMAT_W) + .Default(FP_FORMAT_NONE); +} + +bool MipsAssemblerOptions::setATReg(unsigned Reg) { + if (Reg > 31) + return false; + + aTReg = Reg; return true; } +unsigned MipsAsmParser::getATReg() { + unsigned Reg = Options.getATRegNum(); + if (isMips64()) + return getReg(Mips::CPU64RegsRegClassID,Reg); + + return getReg(Mips::CPURegsRegClassID,Reg); +} + +unsigned MipsAsmParser::getReg(int RC,int RegNo) { + return *(getContext().getRegisterInfo().getRegClass(RC).begin() + RegNo); +} + +int MipsAsmParser::matchRegisterByNumber(unsigned RegNum, StringRef Mnemonic) { + + if (Mnemonic.lower() == "rdhwr") { + // at the moment only hwreg29 is supported + if (RegNum != 29) + return -1; + return Mips::HWR29; + } + + if (RegNum > 31) + return -1; + + // MIPS64 registers are numbered 1 after the 32-bit equivalents + return getReg(Mips::CPURegsRegClassID, RegNum) + isMips64(); +} + +int MipsAsmParser::tryParseRegister(StringRef Mnemonic) { + const AsmToken &Tok = Parser.getTok(); + int RegNum = -1; + + if (Tok.is(AsmToken::Identifier)) { + std::string lowerCase = Tok.getString().lower(); + RegNum = matchRegisterName(lowerCase); + } else if (Tok.is(AsmToken::Integer)) + RegNum = matchRegisterByNumber(static_cast(Tok.getIntVal()), + Mnemonic.lower()); + else + return RegNum; //error + // 64 bit div operations require Mips::ZERO instead of MIPS::ZERO_64 + if (isMips64() && RegNum == Mips::ZERO_64) { + if (Mnemonic.find("ddiv") != StringRef::npos) + RegNum = Mips::ZERO; + } + return RegNum; +} + bool MipsAsmParser:: -ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { + tryParseRegisterOperand(SmallVectorImpl &Operands, + StringRef Mnemonic){ + + SMLoc S = Parser.getTok().getLoc(); + int RegNo = -1; + + // FIXME: we should make a more generic method for CCR + if ((Mnemonic == "cfc1" || Mnemonic == "ctc1") + && Operands.size() == 2 && Parser.getTok().is(AsmToken::Integer)){ + RegNo = Parser.getTok().getIntVal(); // get the int value + // at the moment only fcc0 is supported + if (RegNo == 0) + RegNo = Mips::FCC0; + } else + RegNo = tryParseRegister(Mnemonic); + if (RegNo == -1) + return true; + + Operands.push_back(MipsOperand::CreateReg(RegNo, S, + Parser.getTok().getLoc())); + Parser.Lex(); // Eat register token. + return false; +} + +bool MipsAsmParser::ParseOperand(SmallVectorImpl&Operands, + StringRef Mnemonic) { + // Check if the current operand has a custom associated parser, if so, try to + // custom parse the operand, or fallback to the general approach. + OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); + if (ResTy == MatchOperand_Success) + return false; + // If there wasn't a custom match, try the generic matcher below. Otherwise, + // there was a match, but an error occurred, in which case, just return that + // the operand parsing failed. + if (ResTy == MatchOperand_ParseFail) + return true; + + switch (getLexer().getKind()) { + default: + Error(Parser.getTok().getLoc(), "unexpected token in operand"); + return true; + case AsmToken::Dollar: { + // parse register + SMLoc S = Parser.getTok().getLoc(); + Parser.Lex(); // Eat dollar token. + // parse register operand + if (!tryParseRegisterOperand(Operands, Mnemonic)) { + if (getLexer().is(AsmToken::LParen)) { + // check if it is indexed addressing operand + Operands.push_back(MipsOperand::CreateToken("(", S)); + Parser.Lex(); // eat parenthesis + if (getLexer().isNot(AsmToken::Dollar)) + return true; + + Parser.Lex(); // eat dollar + if (tryParseRegisterOperand(Operands, Mnemonic)) + return true; + + if (!getLexer().is(AsmToken::RParen)) + return true; + + S = Parser.getTok().getLoc(); + Operands.push_back(MipsOperand::CreateToken(")", S)); + Parser.Lex(); + } + return false; + } + // maybe it is a symbol reference + StringRef Identifier; + if (Parser.ParseIdentifier(Identifier)) + return true; + + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + + MCSymbol *Sym = getContext().GetOrCreateSymbol("$" + Identifier); + + // Otherwise create a symbol ref. + const MCExpr *Res = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_None, + getContext()); + + Operands.push_back(MipsOperand::CreateImm(Res, S, E)); + return false; + } + case AsmToken::Identifier: + case AsmToken::LParen: + case AsmToken::Minus: + case AsmToken::Plus: + case AsmToken::Integer: + case AsmToken::String: { + // quoted label names + const MCExpr *IdVal; + SMLoc S = Parser.getTok().getLoc(); + if (getParser().ParseExpression(IdVal)) + return true; + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + Operands.push_back(MipsOperand::CreateImm(IdVal, S, E)); + return false; + } + case AsmToken::Percent: { + // it is a symbol reference or constant expression + const MCExpr *IdVal; + SMLoc S = Parser.getTok().getLoc(); // start location of the operand + if (parseRelocOperand(IdVal)) + return true; + + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + + Operands.push_back(MipsOperand::CreateImm(IdVal, S, E)); + return false; + } // case AsmToken::Percent + } // switch(getLexer().getKind()) + return true; +} + +bool MipsAsmParser::parseRelocOperand(const MCExpr *&Res) { + + Parser.Lex(); // eat % token + const AsmToken &Tok = Parser.getTok(); // get next token, operation + if (Tok.isNot(AsmToken::Identifier)) + return true; + + std::string Str = Tok.getIdentifier().str(); + + Parser.Lex(); // eat identifier + // now make expression from the rest of the operand + const MCExpr *IdVal; + SMLoc EndLoc; + + if (getLexer().getKind() == AsmToken::LParen) { + while (1) { + Parser.Lex(); // eat '(' token + if (getLexer().getKind() == AsmToken::Percent) { + Parser.Lex(); // eat % token + const AsmToken &nextTok = Parser.getTok(); + if (nextTok.isNot(AsmToken::Identifier)) + return true; + Str += "(%"; + Str += nextTok.getIdentifier(); + Parser.Lex(); // eat identifier + if (getLexer().getKind() != AsmToken::LParen) + return true; + } else + break; + } + if (getParser().ParseParenExpression(IdVal,EndLoc)) + return true; + + while (getLexer().getKind() == AsmToken::RParen) + Parser.Lex(); // eat ')' token + + } else + return true; // parenthesis must follow reloc operand + + // Check the type of the expression + if (const MCConstantExpr *MCE = dyn_cast(IdVal)) { + // it's a constant, evaluate lo or hi value + int Val = MCE->getValue(); + if (Str == "lo") { + Val = Val & 0xffff; + } else if (Str == "hi") { + Val = (Val & 0xffff0000) >> 16; + } + Res = MCConstantExpr::Create(Val, getContext()); + return false; + } + + if (const MCSymbolRefExpr *MSRE = dyn_cast(IdVal)) { + // it's a symbol, create symbolic expression from symbol + StringRef Symbol = MSRE->getSymbol().getName(); + MCSymbolRefExpr::VariantKind VK = getVariantKind(Str); + Res = MCSymbolRefExpr::Create(Symbol,VK,getContext()); + return false; + } return true; } +bool MipsAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, + SMLoc &EndLoc) { + + StartLoc = Parser.getTok().getLoc(); + RegNo = tryParseRegister(""); + EndLoc = Parser.getTok().getLoc(); + return (RegNo == (unsigned)-1); +} + +bool MipsAsmParser::parseMemOffset(const MCExpr *&Res) { + + SMLoc S; + + switch(getLexer().getKind()) { + default: + return true; + case AsmToken::Integer: + case AsmToken::Minus: + case AsmToken::Plus: + return (getParser().ParseExpression(Res)); + case AsmToken::Percent: + return parseRelocOperand(Res); + case AsmToken::LParen: + return false; // it's probably assuming 0 + } + return true; +} + +MipsAsmParser::OperandMatchResultTy MipsAsmParser::parseMemOperand( + SmallVectorImpl&Operands) { + + const MCExpr *IdVal = 0; + SMLoc S; + // first operand is the offset + S = Parser.getTok().getLoc(); + + if (parseMemOffset(IdVal)) + return MatchOperand_ParseFail; + + const AsmToken &Tok = Parser.getTok(); // get next token + if (Tok.isNot(AsmToken::LParen)) { + MipsOperand *Mnemonic = static_cast(Operands[0]); + if (Mnemonic->getToken() == "la") { + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer()-1); + Operands.push_back(MipsOperand::CreateImm(IdVal, S, E)); + return MatchOperand_Success; + } + Error(Parser.getTok().getLoc(), "'(' expected"); + return MatchOperand_ParseFail; + } + + Parser.Lex(); // Eat '(' token. + + const AsmToken &Tok1 = Parser.getTok(); // get next token + if (Tok1.is(AsmToken::Dollar)) { + Parser.Lex(); // Eat '$' token. + if (tryParseRegisterOperand(Operands,"")) { + Error(Parser.getTok().getLoc(), "unexpected token in operand"); + return MatchOperand_ParseFail; + } + + } else { + Error(Parser.getTok().getLoc(), "unexpected token in operand"); + return MatchOperand_ParseFail; + } + + const AsmToken &Tok2 = Parser.getTok(); // get next token + if (Tok2.isNot(AsmToken::RParen)) { + Error(Parser.getTok().getLoc(), "')' expected"); + return MatchOperand_ParseFail; + } + + SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); + + Parser.Lex(); // Eat ')' token. + + if (IdVal == 0) + IdVal = MCConstantExpr::Create(0, getContext()); + + // now replace register operand with the mem operand + MipsOperand* op = static_cast(Operands.back()); + int RegNo = op->getReg(); + // remove register from operands + Operands.pop_back(); + // and add memory operand + Operands.push_back(MipsOperand::CreateMem(RegNo, IdVal, S, E)); + delete op; + return MatchOperand_Success; +} + +MCSymbolRefExpr::VariantKind MipsAsmParser::getVariantKind(StringRef Symbol) { + + MCSymbolRefExpr::VariantKind VK + = StringSwitch(Symbol) + .Case("hi", MCSymbolRefExpr::VK_Mips_ABS_HI) + .Case("lo", MCSymbolRefExpr::VK_Mips_ABS_LO) + .Case("gp_rel", MCSymbolRefExpr::VK_Mips_GPREL) + .Case("call16", MCSymbolRefExpr::VK_Mips_GOT_CALL) + .Case("got", MCSymbolRefExpr::VK_Mips_GOT) + .Case("tlsgd", MCSymbolRefExpr::VK_Mips_TLSGD) + .Case("tlsldm", MCSymbolRefExpr::VK_Mips_TLSLDM) + .Case("dtprel_hi", MCSymbolRefExpr::VK_Mips_DTPREL_HI) + .Case("dtprel_lo", MCSymbolRefExpr::VK_Mips_DTPREL_LO) + .Case("gottprel", MCSymbolRefExpr::VK_Mips_GOTTPREL) + .Case("tprel_hi", MCSymbolRefExpr::VK_Mips_TPREL_HI) + .Case("tprel_lo", MCSymbolRefExpr::VK_Mips_TPREL_LO) + .Case("got_disp", MCSymbolRefExpr::VK_Mips_GOT_DISP) + .Case("got_page", MCSymbolRefExpr::VK_Mips_GOT_PAGE) + .Case("got_ofst", MCSymbolRefExpr::VK_Mips_GOT_OFST) + .Case("hi(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_HI) + .Case("lo(%neg(%gp_rel", MCSymbolRefExpr::VK_Mips_GPOFF_LO) + .Default(MCSymbolRefExpr::VK_None); + + return VK; +} + +static int ConvertCcString(StringRef CondString) { + int CC = StringSwitch(CondString) + .Case(".f", 0) + .Case(".un", 1) + .Case(".eq", 2) + .Case(".ueq", 3) + .Case(".olt", 4) + .Case(".ult", 5) + .Case(".ole", 6) + .Case(".ule", 7) + .Case(".sf", 8) + .Case(".ngle", 9) + .Case(".seq", 10) + .Case(".ngl", 11) + .Case(".lt", 12) + .Case(".nge", 13) + .Case(".le", 14) + .Case(".ngt", 15) + .Default(-1); + + return CC; +} + bool MipsAsmParser:: -ParseInstruction(StringRef Name, SMLoc NameLoc, +parseMathOperation(StringRef Name, SMLoc NameLoc, + SmallVectorImpl &Operands) { + // split the format + size_t Start = Name.find('.'), Next = Name.rfind('.'); + StringRef Format1 = Name.slice(Start, Next); + // and add the first format to the operands + Operands.push_back(MipsOperand::CreateToken(Format1, NameLoc)); + // now for the second format + StringRef Format2 = Name.slice(Next, StringRef::npos); + Operands.push_back(MipsOperand::CreateToken(Format2, NameLoc)); + + // set the format for the first register + setFpFormat(Format1); + + // Read the remaining operands. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + // Read the first operand. + if (ParseOperand(Operands, Name)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + + if (getLexer().isNot(AsmToken::Comma)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + + } + Parser.Lex(); // Eat the comma. + + //set the format for the first register + setFpFormat(Format2); + + // Parse and remember the operand. + if (ParseOperand(Operands, Name)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + } + + if (getLexer().isNot(AsmToken::EndOfStatement)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + + Parser.Lex(); // Consume the EndOfStatement + return false; +} + +bool MipsAsmParser:: +ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, SmallVectorImpl &Operands) { + // floating point instructions: should register be treated as double? + if (requestsDoubleOperand(Name)) { + setFpFormat(FP_FORMAT_D); + Operands.push_back(MipsOperand::CreateToken(Name, NameLoc)); + } + else { + setDefaultFpFormat(); + // Create the leading tokens for the mnemonic, split by '.' characters. + size_t Start = 0, Next = Name.find('.'); + StringRef Mnemonic = Name.slice(Start, Next); + + Operands.push_back(MipsOperand::CreateToken(Mnemonic, NameLoc)); + + if (Next != StringRef::npos) { + // there is a format token in mnemonic + // StringRef Rest = Name.slice(Next, StringRef::npos); + size_t Dot = Name.find('.', Next+1); + StringRef Format = Name.slice(Next, Dot); + if (Dot == StringRef::npos) //only one '.' in a string, it's a format + Operands.push_back(MipsOperand::CreateToken(Format, NameLoc)); + else { + if (Name.startswith("c.")){ + // floating point compare, add '.' and immediate represent for cc + Operands.push_back(MipsOperand::CreateToken(".", NameLoc)); + int Cc = ConvertCcString(Format); + if (Cc == -1) { + return Error(NameLoc, "Invalid conditional code"); + } + SMLoc E = SMLoc::getFromPointer( + Parser.getTok().getLoc().getPointer() -1 ); + Operands.push_back(MipsOperand::CreateImm( + MCConstantExpr::Create(Cc, getContext()), NameLoc, E)); + } else { + // trunc, ceil, floor ... + return parseMathOperation(Name, NameLoc, Operands); + } + + // the rest is a format + Format = Name.slice(Dot, StringRef::npos); + Operands.push_back(MipsOperand::CreateToken(Format, NameLoc)); + } + + setFpFormat(Format); + } + } + + // Read the remaining operands. + if (getLexer().isNot(AsmToken::EndOfStatement)) { + // Read the first operand. + if (ParseOperand(Operands, Name)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + + while (getLexer().is(AsmToken::Comma) ) { + Parser.Lex(); // Eat the comma. + + // Parse and remember the operand. + if (ParseOperand(Operands, Name)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + } + } + + if (getLexer().isNot(AsmToken::EndOfStatement)) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, "unexpected token in argument list"); + } + + Parser.Lex(); // Consume the EndOfStatement + return false; +} + +bool MipsAsmParser::reportParseError(StringRef ErrorMsg) { + SMLoc Loc = getLexer().getLoc(); + Parser.EatToEndOfStatement(); + return Error(Loc, ErrorMsg); +} + +bool MipsAsmParser::parseSetNoAtDirective() { + // line should look like: + // .set noat + // set at reg to 0 + Options.setATReg(0); + // eat noat + Parser.Lex(); + // if this is not the end of the statement, report error + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token in statement"); + return false; + } + Parser.Lex(); // Consume the EndOfStatement + return false; +} +bool MipsAsmParser::parseSetAtDirective() { + // line can be + // .set at - defaults to $1 + // or .set at=$reg + getParser().Lex(); + if (getLexer().is(AsmToken::EndOfStatement)) { + Options.setATReg(1); + Parser.Lex(); // Consume the EndOfStatement + return false; + } else if (getLexer().is(AsmToken::Equal)) { + getParser().Lex(); //eat '=' + if (getLexer().isNot(AsmToken::Dollar)) { + reportParseError("unexpected token in statement"); + return false; + } + Parser.Lex(); // eat '$' + if (getLexer().isNot(AsmToken::Integer)) { + reportParseError("unexpected token in statement"); + return false; + } + const AsmToken &Reg = Parser.getTok(); + if (!Options.setATReg(Reg.getIntVal())) { + reportParseError("unexpected token in statement"); + return false; + } + getParser().Lex(); //eat reg + + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token in statement"); + return false; + } + Parser.Lex(); // Consume the EndOfStatement + return false; + } else { + reportParseError("unexpected token in statement"); + return false; + } +} + +bool MipsAsmParser::parseSetReorderDirective() { + Parser.Lex(); + // if this is not the end of the statement, report error + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token in statement"); + return false; + } + Options.setReorder(); + Parser.Lex(); // Consume the EndOfStatement + return false; +} + +bool MipsAsmParser::parseSetNoReorderDirective() { + Parser.Lex(); + // if this is not the end of the statement, report error + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token in statement"); + return false; + } + Options.setNoreorder(); + Parser.Lex(); // Consume the EndOfStatement + return false; +} + +bool MipsAsmParser::parseSetMacroDirective() { + Parser.Lex(); + // if this is not the end of the statement, report error + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("unexpected token in statement"); + return false; + } + Options.setMacro(); + Parser.Lex(); // Consume the EndOfStatement + return false; +} + +bool MipsAsmParser::parseSetNoMacroDirective() { + Parser.Lex(); + // if this is not the end of the statement, report error + if (getLexer().isNot(AsmToken::EndOfStatement)) { + reportParseError("`noreorder' must be set before `nomacro'"); + return false; + } + if (Options.isReorder()) { + reportParseError("`noreorder' must be set before `nomacro'"); + return false; + } + Options.setNomacro(); + Parser.Lex(); // Consume the EndOfStatement + return false; +} +bool MipsAsmParser::parseDirectiveSet() { + + // get next token + const AsmToken &Tok = Parser.getTok(); + + if (Tok.getString() == "noat") { + return parseSetNoAtDirective(); + } else if (Tok.getString() == "at") { + return parseSetAtDirective(); + } else if (Tok.getString() == "reorder") { + return parseSetReorderDirective(); + } else if (Tok.getString() == "noreorder") { + return parseSetNoReorderDirective(); + } else if (Tok.getString() == "macro") { + return parseSetMacroDirective(); + } else if (Tok.getString() == "nomacro") { + return parseSetNoMacroDirective(); + } else if (Tok.getString() == "nomips16") { + // ignore this directive for now + Parser.EatToEndOfStatement(); + return false; + } else if (Tok.getString() == "nomicromips") { + // ignore this directive for now + Parser.EatToEndOfStatement(); + return false; + } return true; } -bool MipsAsmParser:: -ParseDirective(AsmToken DirectiveID) { +bool MipsAsmParser::ParseDirective(AsmToken DirectiveID) { + + if (DirectiveID.getString() == ".ent") { + // ignore this directive for now + Parser.Lex(); + return false; + } + + if (DirectiveID.getString() == ".end") { + // ignore this directive for now + Parser.Lex(); + return false; + } + + if (DirectiveID.getString() == ".frame") { + // ignore this directive for now + Parser.EatToEndOfStatement(); + return false; + } + + if (DirectiveID.getString() == ".set") { + return parseDirectiveSet(); + } + + if (DirectiveID.getString() == ".fmask") { + // ignore this directive for now + Parser.EatToEndOfStatement(); + return false; + } + + if (DirectiveID.getString() == ".mask") { + // ignore this directive for now + Parser.EatToEndOfStatement(); + return false; + } + + if (DirectiveID.getString() == ".gpword") { + // ignore this directive for now + Parser.EatToEndOfStatement(); + return false; + } + return true; } @@ -64,3 +1327,7 @@ extern "C" void LLVMInitializeMipsAsmParser() { RegisterMCAsmParser A(TheMips64Target); RegisterMCAsmParser B(TheMips64elTarget); } + +#define GET_REGISTER_MATCHER +#define GET_MATCHER_IMPLEMENTATION +#include "MipsGenAsmMatcher.inc" diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt index aab8a01..ef56e75 100644 --- a/lib/Target/Mips/CMakeLists.txt +++ b/lib/Target/Mips/CMakeLists.txt @@ -10,6 +10,8 @@ tablegen(LLVM MipsGenDAGISel.inc -gen-dag-isel) tablegen(LLVM MipsGenCallingConv.inc -gen-callingconv) tablegen(LLVM MipsGenSubtargetInfo.inc -gen-subtarget) tablegen(LLVM MipsGenEDInfo.inc -gen-enhanced-disassembly-info) +tablegen(LLVM MipsGenAsmMatcher.inc -gen-asm-matcher) +tablegen(LLVM MipsGenMCPseudoLowering.inc -gen-pseudo-lowering) add_public_tablegen_target(MipsCommonTableGen) add_llvm_target(MipsCodeGen diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp index aa57472..82dbcc5 100644 --- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp +++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -108,6 +108,11 @@ static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, @@ -138,6 +143,11 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeACRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder); + static DecodeStatus DecodeBranchTarget(MCInst &Inst, unsigned Offset, uint64_t Address, @@ -346,6 +356,13 @@ static DecodeStatus DecodeCPURegsRegisterClass(MCInst &Inst, return MCDisassembler::Success; } +static DecodeStatus DecodeDSPRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + return DecodeCPURegsRegisterClass(Inst, RegNo, Address, Decoder); +} + static DecodeStatus DecodeFGR64RegisterClass(MCInst &Inst, unsigned RegNo, uint64_t Address, @@ -463,6 +480,18 @@ static DecodeStatus DecodeHWRegs64RegisterClass(MCInst &Inst, return MCDisassembler::Success; } +static DecodeStatus DecodeACRegsRegisterClass(MCInst &Inst, + unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo >= 4) + return MCDisassembler::Fail; + + unsigned Reg = getReg(Decoder, Mips::ACRegsRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Reg)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeBranchTarget(MCInst &Inst, unsigned Offset, uint64_t Address, diff --git a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt index fa23150..be5d7e4 100644 --- a/lib/Target/Mips/MCTargetDesc/CMakeLists.txt +++ b/lib/Target/Mips/MCTargetDesc/CMakeLists.txt @@ -1,5 +1,6 @@ add_llvm_library(LLVMMipsDesc MipsAsmBackend.cpp + MipsDirectObjLower.cpp MipsMCAsmInfo.cpp MipsMCCodeEmitter.cpp MipsMCTargetDesc.cpp diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp index 18961fd..9a35bb6 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -92,7 +92,7 @@ public: MCELFObjectTargetWriter::getOSABI(OSType), IsLittle, Is64Bit); } - /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided + /// ApplyFixup - Apply the \p Value for given \p Fixup into the provided /// data fragment, at the offset specified by the fixup and following the /// fixup kind as appropriate. void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, @@ -217,7 +217,7 @@ public: /// /// \param Inst - The instruction to relax, which may be the same /// as the output. - /// \parm Res [output] - On return, the relaxed instruction. + /// \param [out] Res On return, the relaxed instruction. void relaxInstruction(const MCInst &Inst, MCInst &Res) const { } @@ -244,22 +244,26 @@ public: } // namespace // MCAsmBackend -MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T, StringRef TT, + StringRef CPU) { return new MipsAsmBackend(T, Triple(TT).getOS(), /*IsLittle*/true, /*Is64Bit*/false); } -MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T, StringRef TT, + StringRef CPU) { return new MipsAsmBackend(T, Triple(TT).getOS(), /*IsLittle*/false, /*Is64Bit*/false); } -MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T, StringRef TT, + StringRef CPU) { return new MipsAsmBackend(T, Triple(TT).getOS(), /*IsLittle*/true, /*Is64Bit*/true); } -MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T, StringRef TT, + StringRef CPU) { return new MipsAsmBackend(T, Triple(TT).getOS(), /*IsLittle*/false, /*Is64Bit*/true); } diff --git a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h index 234455e..233214b 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h +++ b/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h @@ -122,14 +122,16 @@ inline static unsigned getMipsRegisterNumbering(unsigned RegEnum) { switch (RegEnum) { case Mips::ZERO: case Mips::ZERO_64: case Mips::F0: case Mips::D0_64: - case Mips::D0: + case Mips::D0: case Mips::FCC0: case Mips::AC0: return 0; case Mips::AT: case Mips::AT_64: case Mips::F1: case Mips::D1_64: + case Mips::AC1: return 1; case Mips::V0: case Mips::V0_64: case Mips::F2: case Mips::D2_64: - case Mips::D1: + case Mips::D1: case Mips::AC2: return 2; case Mips::V1: case Mips::V1_64: case Mips::F3: case Mips::D3_64: + case Mips::AC3: return 3; case Mips::A0: case Mips::A0_64: case Mips::F4: case Mips::D4_64: case Mips::D2: diff --git a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp new file mode 100644 index 0000000..15c4282 --- /dev/null +++ b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.cpp @@ -0,0 +1,81 @@ +//===-- MipsDirectObjLower.cpp - Mips LLVM direct object lowering -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains code to lower Mips MCInst records that are normally +// left to the assembler to lower such as large shifts. +// +//===----------------------------------------------------------------------===// +#include "MipsInstrInfo.h" +#include "MCTargetDesc/MipsDirectObjLower.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCStreamer.h" + +using namespace llvm; + +// If the D instruction has a shift amount that is greater +// than 31 (checked in calling routine), lower it to a D32 instruction +void Mips::LowerLargeShift(MCInst& Inst) { + + assert(Inst.getNumOperands() == 3 && "Invalid no. of operands for shift!"); + assert(Inst.getOperand(2).isImm()); + + int64_t Shift = Inst.getOperand(2).getImm(); + if (Shift <= 31) + return; // Do nothing + Shift -= 32; + + // saminus32 + Inst.getOperand(2).setImm(Shift); + + switch (Inst.getOpcode()) { + default: + // Calling function is not synchronized + llvm_unreachable("Unexpected shift instruction"); + case Mips::DSLL: + Inst.setOpcode(Mips::DSLL32); + return; + case Mips::DSRL: + Inst.setOpcode(Mips::DSRL32); + return; + case Mips::DSRA: + Inst.setOpcode(Mips::DSRA32); + return; + } +} + +// Pick a DEXT or DINS instruction variant based on the pos and size operands +void Mips::LowerDextDins(MCInst& InstIn) { + int Opcode = InstIn.getOpcode(); + + if (Opcode == Mips::DEXT) + assert(InstIn.getNumOperands() == 4 && + "Invalid no. of machine operands for DEXT!"); + else // Only DEXT and DINS are possible + assert(InstIn.getNumOperands() == 5 && + "Invalid no. of machine operands for DINS!"); + + assert(InstIn.getOperand(2).isImm()); + int64_t pos = InstIn.getOperand(2).getImm(); + assert(InstIn.getOperand(3).isImm()); + int64_t size = InstIn.getOperand(3).getImm(); + + if (size <= 32) { + if (pos < 32) // DEXT/DINS, do nothing + return; + // DEXTU/DINSU + InstIn.getOperand(2).setImm(pos - 32); + InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTU : Mips::DINSU); + return; + } + // DEXTM/DINSM + assert(pos < 32 && "DEXT/DINS cannot have both size and pos > 32"); + InstIn.getOperand(3).setImm(size - 32); + InstIn.setOpcode((Opcode == Mips::DEXT) ? Mips::DEXTM : Mips::DINSM); + return; +} diff --git a/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h new file mode 100644 index 0000000..8813cc9 --- /dev/null +++ b/lib/Target/Mips/MCTargetDesc/MipsDirectObjLower.h @@ -0,0 +1,28 @@ +//===-- MipsDirectObjLower.h - Mips LLVM direct object lowering *- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSDIRECTOBJLOWER_H +#define MIPSDIRECTOBJLOWER_H +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/Compiler.h" + +namespace llvm { + class MCInst; + class MCStreamer; + + namespace Mips { + /// MipsDirectObjLower - This name space is used to lower MCInstr in cases + // where the assembler usually finishes the lowering + // such as large shifts. + void LowerLargeShift(MCInst &Inst); + void LowerDextDins(MCInst &Inst); + } +} + +#endif diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp index 8e84b3f..5d240fe 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp @@ -34,7 +34,8 @@ namespace { class MipsELFObjectWriter : public MCELFObjectTargetWriter { public: - MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI, bool _isN64); + MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI, + bool _isN64, bool IsLittleEndian); virtual ~MipsELFObjectWriter(); @@ -53,9 +54,9 @@ namespace { } MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI, - bool _isN64) + bool _isN64, bool IsLittleEndian) : MCELFObjectTargetWriter(_is64Bit, OSABI, ELF::EM_MIPS, - /*HasRelocationAddend*/ false, + /*HasRelocationAddend*/ (_isN64) ? true : false, /*IsN64*/ _isN64) {} MipsELFObjectWriter::~MipsELFObjectWriter() {} @@ -274,6 +275,7 @@ MCObjectWriter *llvm::createMipsELFObjectWriter(raw_ostream &OS, bool IsLittleEndian, bool Is64Bit) { MCELFObjectTargetWriter *MOTW = new MipsELFObjectWriter(Is64Bit, OSABI, - (Is64Bit) ? true : false); + (Is64Bit) ? true : false, + IsLittleEndian); return createELFObjectWriter(MOTW, OS, IsLittleEndian); } diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index 8dab62d..7fbdae0 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -13,6 +13,7 @@ // #define DEBUG_TYPE "mccodeemitter" #include "MCTargetDesc/MipsBaseInfo.h" +#include "MCTargetDesc/MipsDirectObjLower.h" #include "MCTargetDesc/MipsFixupKinds.h" #include "MCTargetDesc/MipsMCTargetDesc.h" #include "llvm/ADT/APFloat.h" @@ -29,17 +30,14 @@ using namespace llvm; namespace { class MipsMCCodeEmitter : public MCCodeEmitter { - MipsMCCodeEmitter(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT - void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT + MipsMCCodeEmitter(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const MipsMCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCInstrInfo &MCII; - const MCSubtargetInfo &STI; - MCContext &Ctx; bool IsLittleEndian; public: - MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, - MCContext &ctx, bool IsLittle) : - MCII(mcii), STI(sti) , Ctx(ctx), IsLittleEndian(IsLittle) {} + MipsMCCodeEmitter(const MCInstrInfo &mcii, bool IsLittle) : + MCII(mcii), IsLittleEndian(IsLittle) {} ~MipsMCCodeEmitter() {} @@ -95,7 +93,7 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx) { - return new MipsMCCodeEmitter(MCII, STI, Ctx, false); + return new MipsMCCodeEmitter(MCII, false); } MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII, @@ -103,7 +101,7 @@ MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx) { - return new MipsMCCodeEmitter(MCII, STI, Ctx, true); + return new MipsMCCodeEmitter(MCII, true); } /// EncodeInstruction - Emit the instruction. @@ -112,16 +110,35 @@ void MipsMCCodeEmitter:: EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups) const { - uint32_t Binary = getBinaryCodeForInstr(MI, Fixups); + + // Non-pseudo instructions that get changed for direct object + // only based on operand values. + // If this list of instructions get much longer we will move + // the check to a function call. Until then, this is more efficient. + MCInst TmpInst = MI; + switch (MI.getOpcode()) { + // If shift amount is >= 32 it the inst needs to be lowered further + case Mips::DSLL: + case Mips::DSRL: + case Mips::DSRA: + Mips::LowerLargeShift(TmpInst); + break; + // Double extract instruction is chosen by pos and size operands + case Mips::DEXT: + case Mips::DINS: + Mips::LowerDextDins(TmpInst); + } + + uint32_t Binary = getBinaryCodeForInstr(TmpInst, Fixups); // Check for unimplemented opcodes. - // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0 + // Unfortunately in MIPS both NOP and SLL will come in with Binary == 0 // so we have to special check for them. - unsigned Opcode = MI.getOpcode(); + unsigned Opcode = TmpInst.getOpcode(); if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary) llvm_unreachable("unimplemented opcode in EncodeInstruction()"); - const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); + const MCInstrDesc &Desc = MCII.get(TmpInst.getOpcode()); uint64_t TSFlags = Desc.TSFlags; // Pseudo instructions don't get encoded and shouldn't be here @@ -129,8 +146,10 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo) llvm_unreachable("Pseudo opcode found in EncodeInstruction()"); - // For now all instructions are 4 bytes - int Size = 4; // FIXME: Have Desc.getSize() return the correct value! + // Get byte count of instruction + unsigned Size = Desc.getSize(); + if (!Size) + llvm_unreachable("Desc.getSize() returns 0"); EmitInstruction(Binary, Size, OS); } @@ -143,7 +162,11 @@ getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); - assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions"); + + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) return MO.getImm(); + assert(MO.isExpr() && + "getBranchTargetOpValue expects only expressions or immediates"); const MCExpr *Expr = MO.getExpr(); Fixups.push_back(MCFixup::Create(0, Expr, @@ -159,7 +182,10 @@ getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); - assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions"); + // If the destination is an immediate, we have nothing to do. + if (MO.isImm()) return MO.getImm(); + assert(MO.isExpr() && + "getJumpTargetOpValue expects only expressions or an immediate"); const MCExpr *Expr = MO.getExpr(); Fixups.push_back(MCFixup::Create(0, Expr, diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h index bfcc2a2..71954a4 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h +++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h @@ -42,10 +42,14 @@ MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx); -MCAsmBackend *createMipsAsmBackendEB32(const Target &T, StringRef TT); -MCAsmBackend *createMipsAsmBackendEL32(const Target &T, StringRef TT); -MCAsmBackend *createMipsAsmBackendEB64(const Target &T, StringRef TT); -MCAsmBackend *createMipsAsmBackendEL64(const Target &T, StringRef TT); +MCAsmBackend *createMipsAsmBackendEB32(const Target &T, StringRef TT, + StringRef CPU); +MCAsmBackend *createMipsAsmBackendEL32(const Target &T, StringRef TT, + StringRef CPU); +MCAsmBackend *createMipsAsmBackendEB64(const Target &T, StringRef TT, + StringRef CPU); +MCAsmBackend *createMipsAsmBackendEL64(const Target &T, StringRef TT, + StringRef CPU); MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS, uint8_t OSABI, diff --git a/lib/Target/Mips/Makefile b/lib/Target/Mips/Makefile index 596f071..bd8c517 100644 --- a/lib/Target/Mips/Makefile +++ b/lib/Target/Mips/Makefile @@ -16,7 +16,9 @@ BUILT_SOURCES = MipsGenRegisterInfo.inc MipsGenInstrInfo.inc \ MipsGenAsmWriter.inc MipsGenCodeEmitter.inc \ MipsGenDAGISel.inc MipsGenCallingConv.inc \ MipsGenSubtargetInfo.inc MipsGenMCCodeEmitter.inc \ - MipsGenEDInfo.inc MipsGenDisassemblerTables.inc + MipsGenEDInfo.inc MipsGenDisassemblerTables.inc \ + MipsGenMCPseudoLowering.inc MipsGenAsmMatcher.inc + DIRS = InstPrinter Disassembler AsmParser TargetInfo MCTargetDesc include $(LEVEL)/Makefile.common diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td index 90f7942..90c01d5 100644 --- a/lib/Target/Mips/Mips.td +++ b/lib/Target/Mips/Mips.td @@ -77,6 +77,10 @@ def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion", def FeatureMips16 : SubtargetFeature<"mips16", "InMips16Mode", "true", "Mips16 mode">; +def FeatureDSP : SubtargetFeature<"dsp", "HasDSP", "true", "Mips DSP ASE">; +def FeatureDSPR2 : SubtargetFeature<"dspr2", "HasDSPR2", "true", + "Mips DSP-R2 ASE", [FeatureDSP]>; + //===----------------------------------------------------------------------===// // Mips processors supported. //===----------------------------------------------------------------------===// @@ -95,9 +99,20 @@ def MipsAsmWriter : AsmWriter { bit isMCAsmWriter = 1; } +def MipsAsmParser : AsmParser { + let ShouldEmitMatchRegisterName = 0; +} + +def MipsAsmParserVariant : AsmParserVariant { + int Variant = 0; + + // Recognize hard coded registers. + string RegisterPrefix = "$"; +} + def Mips : Target { let InstructionSet = MipsInstrInfo; - + let AssemblyParsers = [MipsAsmParser]; let AssemblyWriters = [MipsAsmWriter]; + let AssemblyParserVariants = [MipsAsmParserVariant]; } - diff --git a/lib/Target/Mips/Mips16FrameLowering.cpp b/lib/Target/Mips/Mips16FrameLowering.cpp index 030042f..4e6b21f 100644 --- a/lib/Target/Mips/Mips16FrameLowering.cpp +++ b/lib/Target/Mips/Mips16FrameLowering.cpp @@ -20,7 +20,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" @@ -41,6 +41,11 @@ void Mips16FrameLowering::emitPrologue(MachineFunction &MF) const { // Adjust stack. if (isInt<16>(-StackSize)) BuildMI(MBB, MBBI, dl, TII.get(Mips::SaveRaF16)).addImm(StackSize); + + if (hasFP(MF)) + BuildMI(MBB, MBBI, dl, TII.get(Mips::MoveR3216), Mips::S0) + .addReg(Mips::SP); + } void Mips16FrameLowering::emitEpilogue(MachineFunction &MF, @@ -55,6 +60,10 @@ void Mips16FrameLowering::emitEpilogue(MachineFunction &MF, if (!StackSize) return; + if (hasFP(MF)) + BuildMI(MBB, MBBI, dl, TII.get(Mips::Move32R16), Mips::SP) + .addReg(Mips::S0); + // Adjust stack. if (isInt<16>(StackSize)) // assumes stacksize multiple of 8 @@ -66,19 +75,58 @@ spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const std::vector &CSI, const TargetRegisterInfo *TRI) const { - // FIXME: implement. + MachineFunction *MF = MBB.getParent(); + MachineBasicBlock *EntryBlock = MF->begin(); + + // + // Registers RA, S0,S1 are the callee saved registers and they + // will be saved with the "save" instruction + // during emitPrologue + // + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + // Add the callee-saved register as live-in. Do not add if the register is + // RA and return address is taken, because it has already been added in + // method MipsTargetLowering::LowerRETURNADDR. + // It's killed at the spill, unless the register is RA and return address + // is taken. + unsigned Reg = CSI[i].getReg(); + bool IsRAAndRetAddrIsTaken = (Reg == Mips::RA) + && MF->getFrameInfo()->isReturnAddressTaken(); + if (!IsRAAndRetAddrIsTaken) + EntryBlock->addLiveIn(Reg); + } + + return true; +} + +bool Mips16FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const { + // + // Registers RA,S0,S1 are the callee saved registers and they will be restored + // with the restore instruction during emitEpilogue. + // We need to override this virtual function, otherwise llvm will try and + // restore the registers on it's on from the stack. + // + return true; } bool Mips16FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const { - // FIXME: implement. - return true; + const MachineFrameInfo *MFI = MF.getFrameInfo(); + // Reserve call frame if the size of the maximum call frame fits into 15-bit + // immediate field and there are no variable sized objects on the stack. + return isInt<15>(MFI->getMaxCallFrameSize()) && !MFI->hasVarSizedObjects(); } void Mips16FrameLowering:: processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS) const { + MF.getRegInfo().setPhysRegUsed(Mips::RA); + MF.getRegInfo().setPhysRegUsed(Mips::S0); + MF.getRegInfo().setPhysRegUsed(Mips::S1); } const MipsFrameLowering * diff --git a/lib/Target/Mips/Mips16FrameLowering.h b/lib/Target/Mips/Mips16FrameLowering.h index 25cc37b..01db71e 100644 --- a/lib/Target/Mips/Mips16FrameLowering.h +++ b/lib/Target/Mips/Mips16FrameLowering.h @@ -32,6 +32,11 @@ public: const std::vector &CSI, const TargetRegisterInfo *TRI) const; + bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const; + bool hasReservedCallFrame(const MachineFunction &MF) const; void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp index 2bc286b..619646b 100644 --- a/lib/Target/Mips/Mips16InstrInfo.cpp +++ b/lib/Target/Mips/Mips16InstrInfo.cpp @@ -25,7 +25,7 @@ using namespace llvm; Mips16InstrInfo::Mips16InstrInfo(MipsTargetMachine &tm) - : MipsInstrInfo(tm, /* FIXME: set mips16 unconditional br */ 0), + : MipsInstrInfo(tm, Mips::BimmX16), RI(*tm.getSubtargetImpl(), *this) {} const MipsRegisterInfo &Mips16InstrInfo::getRegisterInfo() const { @@ -58,12 +58,22 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const { - unsigned Opc = 0, ZeroReg = 0; + unsigned Opc = 0; + + if (Mips::CPU16RegsRegClass.contains(DestReg) && + Mips::CPURegsRegClass.contains(SrcReg)) + Opc = Mips::MoveR3216; + else if (Mips::CPURegsRegClass.contains(DestReg) && + Mips::CPU16RegsRegClass.contains(SrcReg)) + Opc = Mips::Move32R16; + else if ((SrcReg == Mips::HI) && + (Mips::CPU16RegsRegClass.contains(DestReg))) + Opc = Mips::Mfhi16, SrcReg = 0; + + else if ((SrcReg == Mips::LO) && + (Mips::CPU16RegsRegClass.contains(DestReg))) + Opc = Mips::Mflo16, SrcReg = 0; - if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg. - if (Mips::CPURegsRegClass.contains(SrcReg)) - Opc = Mips::Mov32R16; - } assert(Opc && "Cannot copy registers"); @@ -72,9 +82,6 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB, if (DestReg) MIB.addReg(DestReg, RegState::Define); - if (ZeroReg) - MIB.addReg(ZeroReg); - if (SrcReg) MIB.addReg(SrcReg, getKillRegState(KillSrc)); } @@ -84,7 +91,15 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - assert(false && "Implement this function."); + DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); + MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore); + unsigned Opc = 0; + if (Mips::CPU16RegsRegClass.hasSubClassEq(RC)) + Opc = Mips::SwRxSpImmX16; + assert(Opc && "Register class not handled!"); + BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI).addImm(0).addMemOperand(MMO); } void Mips16InstrInfo:: @@ -92,7 +107,16 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, int FI, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { - assert(false && "Implement this function."); + DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); + MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); + unsigned Opc = 0; + + if (Mips::CPU16RegsRegClass.hasSubClassEq(RC)) + Opc = Mips::LwRxSpImmX16; + assert(Opc && "Register class not handled!"); + BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0) + .addMemOperand(MMO); } bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { @@ -102,7 +126,7 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { default: return false; case Mips::RetRA16: - ExpandRetRA16(MBB, MI, Mips::JrRa16); + ExpandRetRA16(MBB, MI, Mips::JrcRa16); break; } @@ -113,12 +137,55 @@ bool Mips16InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { /// GetOppositeBranchOpc - Return the inverse of the specified /// opcode, e.g. turning BEQ to BNE. unsigned Mips16InstrInfo::GetOppositeBranchOpc(unsigned Opc) const { + switch (Opc) { + default: llvm_unreachable("Illegal opcode!"); + case Mips::BeqzRxImmX16: return Mips::BnezRxImmX16; + case Mips::BnezRxImmX16: return Mips::BeqzRxImmX16; + case Mips::BteqzT8CmpX16: return Mips::BtnezT8CmpX16; + case Mips::BteqzT8SltX16: return Mips::BtnezT8SltX16; + case Mips::BteqzT8SltiX16: return Mips::BtnezT8SltiX16; + case Mips::BtnezX16: return Mips::BteqzX16; + case Mips::BtnezT8CmpiX16: return Mips::BteqzT8CmpiX16; + case Mips::BtnezT8SltuX16: return Mips::BteqzT8SltuX16; + case Mips::BtnezT8SltiuX16: return Mips::BteqzT8SltiuX16; + case Mips::BteqzX16: return Mips::BtnezX16; + case Mips::BteqzT8CmpiX16: return Mips::BtnezT8CmpiX16; + case Mips::BteqzT8SltuX16: return Mips::BtnezT8SltuX16; + case Mips::BteqzT8SltiuX16: return Mips::BtnezT8SltiuX16; + case Mips::BtnezT8CmpX16: return Mips::BteqzT8CmpX16; + case Mips::BtnezT8SltX16: return Mips::BteqzT8SltX16; + case Mips::BtnezT8SltiX16: return Mips::BteqzT8SltiX16; + } assert(false && "Implement this function."); return 0; } +/// Adjust SP by Amount bytes. +void Mips16InstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc(); + if (isInt<16>(Amount)) { + if (Amount < 0) + BuildMI(MBB, I, DL, get(Mips::SaveDecSpF16)). addImm(-Amount); + else if (Amount > 0) + BuildMI(MBB, I, DL, get(Mips::RestoreIncSpF16)).addImm(Amount); + } + else + // not implemented for large values yet + assert(false && "adjust stack pointer amount exceeded"); +} + unsigned Mips16InstrInfo::GetAnalyzableBrOpc(unsigned Opc) const { - return 0; + return (Opc == Mips::BeqzRxImmX16 || Opc == Mips::BimmX16 || + Opc == Mips::BnezRxImmX16 || Opc == Mips::BteqzX16 || + Opc == Mips::BteqzT8CmpX16 || Opc == Mips::BteqzT8CmpiX16 || + Opc == Mips::BteqzT8SltX16 || Opc == Mips::BteqzT8SltuX16 || + Opc == Mips::BteqzT8SltiX16 || Opc == Mips::BteqzT8SltiuX16 || + Opc == Mips::BtnezX16 || Opc == Mips::BtnezT8CmpX16 || + Opc == Mips::BtnezT8CmpiX16 || Opc == Mips::BtnezT8SltX16 || + Opc == Mips::BtnezT8SltuX16 || Opc == Mips::BtnezT8SltiX16 || + Opc == Mips::BtnezT8SltiuX16 ) ? Opc : 0; } void Mips16InstrInfo::ExpandRetRA16(MachineBasicBlock &MBB, diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h index 260c5b6..e06ccfe 100644 --- a/lib/Target/Mips/Mips16InstrInfo.h +++ b/lib/Target/Mips/Mips16InstrInfo.h @@ -64,6 +64,10 @@ public: virtual unsigned GetOppositeBranchOpc(unsigned Opc) const; + /// Adjust SP by Amount bytes. + void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; + private: virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const; diff --git a/lib/Target/Mips/Mips16InstrInfo.td b/lib/Target/Mips/Mips16InstrInfo.td index 94cf984..5defc75 100644 --- a/lib/Target/Mips/Mips16InstrInfo.td +++ b/lib/Target/Mips/Mips16InstrInfo.td @@ -10,21 +10,74 @@ // This file describes Mips16 instructions. // //===----------------------------------------------------------------------===// +// +// +// Mips Address +// +def addr16 : + ComplexPattern; // -// RRR-type instruction format +// Address operand +def mem16 : Operand { + let PrintMethod = "printMemOperand"; + let MIOperandInfo = (ops CPU16Regs, simm16, CPU16Regs); + let EncoderMethod = "getMemEncoding"; +} + +def mem16_ea : Operand { + let PrintMethod = "printMemOperandEA"; + let MIOperandInfo = (ops CPU16Regs, simm16); + let EncoderMethod = "getMemEncoding"; +} + +// +// Compare a register and immediate and place result in CC +// Implicit use of T8 // +// EXT-CCRR Instruction format +// +class FEXT_CCRXI16_ins _op, string asmstr, + InstrItinClass itin>: + FEXT_RI16<_op, (outs CPU16Regs:$cc), (ins CPU16Regs:$rx, simm16:$imm), + !strconcat(asmstr, "\t$rx, $imm\n\tmove\t$cc, $$t8"), [], itin> { + let isCodeGenOnly=1; +} -class FRRR16_ins _f, string asmstr, InstrItinClass itin> : - FRRR16<_f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry), - !strconcat(asmstr, "\t$rz, $rx, $ry"), [], itin>; +// +// EXT-I instruction format +// +class FEXT_I16_ins eop, string asmstr, InstrItinClass itin> : + FEXT_I16; // -// I8_MOV32R instruction format (used only by MOV32R instruction) +// EXT-I8 instruction format // -class FI8_MOV32R16_ins: - FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz), - !strconcat(asmstr, "\t$r32, $rz"), [], itin>; + +class FEXT_I816_ins_base _func, string asmstr, + string asmstr2, InstrItinClass itin>: + FEXT_I816<_func, (outs), (ins uimm16:$imm), !strconcat(asmstr, asmstr2), + [], itin>; + +class FEXT_I816_ins _func, string asmstr, + InstrItinClass itin>: + FEXT_I816_ins_base<_func, asmstr, "\t$imm", itin>; + +// +// Assembler formats in alphabetical order. +// Natural and pseudos are mixed together. +// +// Compare two registers and place result in CC +// Implicit use of T8 +// +// CC-RR Instruction format +// +class FCCRR16_ins f, string asmstr, InstrItinClass itin> : + FRR16 { + let isCodeGenOnly=1; +} // // EXT-RI instruction format @@ -42,6 +95,10 @@ class FEXT_RI16_ins _op, string asmstr, class FEXT_RI16_PC_ins _op, string asmstr, InstrItinClass itin>: FEXT_RI16_ins_base<_op, asmstr, "\t$rx, $$pc, $imm", itin>; +class FEXT_RI16_B_ins _op, string asmstr, + InstrItinClass itin>: + FEXT_RI16<_op, (outs), (ins CPU16Regs:$rx, brtarget:$imm), + !strconcat(asmstr, "\t$rx, $imm"), [], itin>; class FEXT_2RI16_ins _op, string asmstr, InstrItinClass itin>: @@ -51,6 +108,104 @@ class FEXT_2RI16_ins _op, string asmstr, } +// this has an explicit sp argument that we ignore to work around a problem +// in the compiler +class FEXT_RI16_SP_explicit_ins _op, string asmstr, + InstrItinClass itin>: + FEXT_RI16<_op, (outs CPU16Regs:$rx), (ins CPUSPReg:$ry, simm16:$imm), + !strconcat(asmstr, "\t$rx, $imm ( $ry ); "), [], itin>; + +// +// EXT-RRI instruction format +// + +class FEXT_RRI16_mem_ins op, string asmstr, Operand MemOpnd, + InstrItinClass itin>: + FEXT_RRI16; + +class FEXT_RRI16_mem2_ins op, string asmstr, Operand MemOpnd, + InstrItinClass itin>: + FEXT_RRI16; + +// +// +// EXT-RRI-A instruction format +// + +class FEXT_RRI_A16_mem_ins op, string asmstr, Operand MemOpnd, + InstrItinClass itin>: + FEXT_RRI_A16; + +// +// EXT-SHIFT instruction format +// +class FEXT_SHIFT16_ins _f, string asmstr, InstrItinClass itin>: + FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa), + !strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>; + +// +// EXT-T8I8 +// +class FEXT_T8I816_ins _func, string asmstr, string asmstr2, + InstrItinClass itin>: + FEXT_I816<_func, (outs), + (ins CPU16Regs:$rx, CPU16Regs:$ry, brtarget:$imm), + !strconcat(asmstr2, !strconcat("\t$rx, $ry\n\t", + !strconcat(asmstr, "\t$imm"))),[], itin> { + let isCodeGenOnly=1; +} + +// +// EXT-T8I8I +// +class FEXT_T8I8I16_ins _func, string asmstr, string asmstr2, + InstrItinClass itin>: + FEXT_I816<_func, (outs), + (ins CPU16Regs:$rx, simm16:$imm, brtarget:$targ), + !strconcat(asmstr2, !strconcat("\t$rx, $imm\n\t", + !strconcat(asmstr, "\t$targ"))), [], itin> { + let isCodeGenOnly=1; +} +// + + +// +// I8_MOVR32 instruction format (used only by the MOVR32 instructio +// +class FI8_MOVR3216_ins: + FI8_MOVR3216<(outs CPU16Regs:$rz), (ins CPURegs:$r32), + !strconcat(asmstr, "\t$rz, $r32"), [], itin>; + +// +// I8_MOV32R instruction format (used only by MOV32R instruction) +// + +class FI8_MOV32R16_ins: + FI8_MOV32R16<(outs CPURegs:$r32), (ins CPU16Regs:$rz), + !strconcat(asmstr, "\t$r32, $rz"), [], itin>; + +// +// This are pseudo formats for multiply +// This first one can be changed to non pseudo now. +// +// MULT +// +class FMULT16_ins : + MipsPseudo16<(outs), (ins CPU16Regs:$rx, CPU16Regs:$ry), + !strconcat(asmstr, "\t$rx, $ry"), []>; + +// +// MULT-LO +// +class FMULT16_LO_ins : + MipsPseudo16<(outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry), + !strconcat(asmstr, "\t$rx, $ry\n\tmflo\t$rz"), []> { + let isCodeGenOnly=1; +} + // // RR-type instruction format // @@ -60,6 +215,27 @@ class FRR16_ins f, string asmstr, InstrItinClass itin> : !strconcat(asmstr, "\t$rx, $ry"), [], itin> { } +class FRRTR16_ins f, string asmstr, InstrItinClass itin> : + FRR16 ; + +// +// maybe refactor but need a $zero as a dummy first parameter +// +class FRR16_div_ins f, string asmstr, InstrItinClass itin> : + FRR16 ; + +class FUnaryRR16_ins f, string asmstr, InstrItinClass itin> : + FRR16 ; + + +class FRR16_M_ins f, string asmstr, + InstrItinClass itin> : + FRR16; + class FRxRxRy16_ins f, string asmstr, InstrItinClass itin> : FRR16 nd_, bits<1> l_, FRR16_JALRC ; + +class FRR16_JALRC_ins nd, bits<1> l, bits<1> ra, + string asmstr, InstrItinClass itin>: + FRR16_JALRC ; + // -// EXT-RRI instruction format +// RRR-type instruction format // -class FEXT_RRI16_mem_ins op, string asmstr, Operand MemOpnd, - InstrItinClass itin>: - FEXT_RRI16; +class FRRR16_ins _f, string asmstr, InstrItinClass itin> : + FRRR16<_f, (outs CPU16Regs:$rz), (ins CPU16Regs:$rx, CPU16Regs:$ry), + !strconcat(asmstr, "\t$rz, $rx, $ry"), [], itin>; -class FEXT_RRI16_mem2_ins op, string asmstr, Operand MemOpnd, - InstrItinClass itin>: - FEXT_RRI16; +// +// These Sel patterns support the generation of conditional move +// pseudo instructions. +// +// The nomenclature uses the components making up the pseudo and may +// be a bit counter intuitive when compared with the end result we seek. +// For example using a bqez in the example directly below results in the +// conditional move being done if the tested register is not zero. +// I considered in easier to check by keeping the pseudo consistent with +// it's components but it could have been done differently. +// +// The simplest case is when can test and operand directly and do the +// conditional move based on a simple mips16 conditional +// branch instruction. +// for example: +// if $op == beqz or bnez: +// +// $op1 $rt, .+4 +// move $rd, $rs +// +// if $op == beqz, then if $rt != 0, then the conditional assignment +// $rd = $rs is done. +// if $op == bnez, then if $rt == 0, then the conditional assignment +// $rd = $rs is done. // -// EXT-SHIFT instruction format +// So this pseudo class only has one operand, i.e. op // -class FEXT_SHIFT16_ins _f, string asmstr, InstrItinClass itin>: - FEXT_SHIFT16<_f, (outs CPU16Regs:$rx), (ins CPU16Regs:$ry, shamt:$sa), - !strconcat(asmstr, "\t$rx, $ry, $sa"), [], itin>; +class Sel f1, string op, InstrItinClass itin>: + MipsInst16_32<(outs CPU16Regs:$rd_), (ins CPU16Regs:$rd, CPU16Regs:$rs, + CPU16Regs:$rt), + !strconcat(op, "\t$rt, .+4\n\t\n\tmove $rd, $rs"), [], itin, + Pseudo16> { + let isCodeGenOnly=1; + let Constraints = "$rd = $rd_"; +} // -// Address operand -def mem16 : Operand { - let PrintMethod = "printMemOperand"; - let MIOperandInfo = (ops CPU16Regs, simm16); - let EncoderMethod = "getMemEncoding"; +// The next two instruction classes allow for an operand which tests +// two operands and returns a value in register T8 and +//then does a conditional branch based on the value of T8 +// + +// op2 can be cmpi or slti/sltiu +// op1 can bteqz or btnez +// the operands for op2 are a register and a signed constant +// +// $op2 $t, $imm ;test register t and branch conditionally +// $op1 .+4 ;op1 is a conditional branch +// move $rd, $rs +// +// +class SeliT f1, string op1, bits<5> f2, string op2, + InstrItinClass itin>: + MipsInst16_32<(outs CPU16Regs:$rd_), (ins CPU16Regs:$rd, CPU16Regs:$rs, + CPU16Regs:$rl, simm16:$imm), + !strconcat(op2, + !strconcat("\t$rl, $imm\n\t", + !strconcat(op1, "\t.+4\n\tmove $rd, $rs"))), [], itin, + Pseudo16> { + let isCodeGenOnly=1; + let Constraints = "$rd = $rd_"; +} + +// +// op2 can be cmp or slt/sltu +// op1 can be bteqz or btnez +// the operands for op2 are two registers +// op1 is a conditional branch +// +// +// $op2 $rl, $rr ;test registers rl,rr +// $op1 .+4 ;op2 is a conditional branch +// move $rd, $rs +// +// +class SelT f1, string op1, bits<5> f2, string op2, + InstrItinClass itin>: + MipsInst16_32<(outs CPU16Regs:$rd_), (ins CPU16Regs:$rd, CPU16Regs:$rs, + CPU16Regs:$rl, CPU16Regs:$rr), + !strconcat(op2, + !strconcat("\t$rl, $rr\n\t", + !strconcat(op1, "\t.+4\n\tmove $rd, $rs"))), [], itin, + Pseudo16> { + let isCodeGenOnly=1; + let Constraints = "$rd = $rd_"; } + // // Some general instruction class info // @@ -115,6 +365,24 @@ class ArithLogic16Defs { bit neverHasSideEffects = 1; } +class branch16 { + bit isBranch = 1; + bit isTerminator = 1; + bit isBarrier = 1; +} + +class cbranch16 { + bit isBranch = 1; + bit isTerminator = 1; +} + +class MayLoad { + bit mayLoad = 1; +} + +class MayStore { + bit mayStore = 1; +} // // Format: ADDIU rx, immediate MIPS16e @@ -126,6 +394,9 @@ def AddiuRxImmX16: FEXT_RI16_ins<0b01001, "addiu", IIAlu>; def AddiuRxRxImmX16: FEXT_2RI16_ins<0b01001, "addiu", IIAlu>, ArithLogic16Defs<0>; +def AddiuRxRyOffMemX16: + FEXT_RRI_A16_mem_ins<0, "addiu", mem16_ea, IIAlu>; + // // Format: ADDIU rx, pc, immediate MIPS16e @@ -148,6 +419,87 @@ def AdduRxRyRz16: FRRR16_ins<01, "addu", IIAlu>, ArithLogic16Defs<1>; def AndRxRxRy16: FRxRxRy16_ins<0b01100, "and", IIAlu>, ArithLogic16Defs<1>; + +// +// Format: BEQZ rx, offset MIPS16e +// Purpose: Branch on Equal to Zero (Extended) +// To test a GPR then do a PC-relative conditional branch. +// +def BeqzRxImmX16: FEXT_RI16_B_ins<0b00100, "beqz", IIAlu>, cbranch16; + +// Format: B offset MIPS16e +// Purpose: Unconditional Branch +// To do an unconditional PC-relative branch. +// +def BimmX16: FEXT_I16_ins<0b00010, "b", IIAlu>, branch16; + +// +// Format: BNEZ rx, offset MIPS16e +// Purpose: Branch on Not Equal to Zero (Extended) +// To test a GPR then do a PC-relative conditional branch. +// +def BnezRxImmX16: FEXT_RI16_B_ins<0b00101, "bnez", IIAlu>, cbranch16; + +// +// Format: BTEQZ offset MIPS16e +// Purpose: Branch on T Equal to Zero (Extended) +// To test special register T then do a PC-relative conditional branch. +// +def BteqzX16: FEXT_I816_ins<0b000, "bteqz", IIAlu>, cbranch16; + +def BteqzT8CmpX16: FEXT_T8I816_ins<0b000, "bteqz", "cmp", IIAlu>, cbranch16; + +def BteqzT8CmpiX16: FEXT_T8I8I16_ins<0b000, "bteqz", "cmpi", IIAlu>, + cbranch16; + +def BteqzT8SltX16: FEXT_T8I816_ins<0b000, "bteqz", "slt", IIAlu>, cbranch16; + +def BteqzT8SltuX16: FEXT_T8I816_ins<0b000, "bteqz", "sltu", IIAlu>, cbranch16; + +def BteqzT8SltiX16: FEXT_T8I8I16_ins<0b000, "bteqz", "slti", IIAlu>, cbranch16; + +def BteqzT8SltiuX16: FEXT_T8I8I16_ins<0b000, "bteqz", "sltiu", IIAlu>, + cbranch16; + +// +// Format: BTNEZ offset MIPS16e +// Purpose: Branch on T Not Equal to Zero (Extended) +// To test special register T then do a PC-relative conditional branch. +// +def BtnezX16: FEXT_I816_ins<0b001, "btnez", IIAlu> ,cbranch16; + +def BtnezT8CmpX16: FEXT_T8I816_ins<0b000, "btnez", "cmp", IIAlu>, cbranch16; + +def BtnezT8CmpiX16: FEXT_T8I8I16_ins<0b000, "btnez", "cmpi", IIAlu>, cbranch16; + +def BtnezT8SltX16: FEXT_T8I816_ins<0b000, "btnez", "slt", IIAlu>, cbranch16; + +def BtnezT8SltuX16: FEXT_T8I816_ins<0b000, "btnez", "sltu", IIAlu>, cbranch16; + +def BtnezT8SltiX16: FEXT_T8I8I16_ins<0b000, "btnez", "slti", IIAlu>, cbranch16; + +def BtnezT8SltiuX16: FEXT_T8I8I16_ins<0b000, "btnez", "sltiu", IIAlu>, + cbranch16; + +// +// Format: DIV rx, ry MIPS16e +// Purpose: Divide Word +// To divide 32-bit signed integers. +// +def DivRxRy16: FRR16_div_ins<0b11010, "div", IIAlu> { + let Defs = [HI, LO]; +} + +// +// Format: DIVU rx, ry MIPS16e +// Purpose: Divide Unsigned Word +// To divide 32-bit unsigned integers. +// +def DivuRxRy16: FRR16_div_ins<0b11011, "divu", IIAlu> { + let Defs = [HI, LO]; +} + + // // Format: JR ra MIPS16e // Purpose: Jump Register Through Register ra @@ -155,35 +507,56 @@ def AndRxRxRy16: FRxRxRy16_ins<0b01100, "and", IIAlu>, ArithLogic16Defs<1>; // address register. // -def JrRa16: FRR16_JALRC_RA_only_ins<0, 0, "jr", IIAlu>; +def JrRa16: FRR16_JALRC_RA_only_ins<0, 0, "jr", IIAlu> { + let isBranch = 1; + let isIndirectBranch = 1; + let hasDelaySlot = 1; + let isTerminator=1; + let isBarrier=1; +} + +def JrcRa16: FRR16_JALRC_RA_only_ins<0, 0, "jrc", IIAlu> { + let isBranch = 1; + let isIndirectBranch = 1; + let isTerminator=1; + let isBarrier=1; +} +def JrcRx16: FRR16_JALRC_ins<1, 1, 0, "jrc", IIAlu> { + let isBranch = 1; + let isIndirectBranch = 1; + let isTerminator=1; + let isBarrier=1; +} // // Format: LB ry, offset(rx) MIPS16e // Purpose: Load Byte (Extended) // To load a byte from memory as a signed value. // -def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IIAlu>; +def LbRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lb", mem16, IILoad>, MayLoad; // // Format: LBU ry, offset(rx) MIPS16e // Purpose: Load Byte Unsigned (Extended) // To load a byte from memory as a unsigned value. // -def LbuRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, IIAlu>; +def LbuRxRyOffMemX16: + FEXT_RRI16_mem_ins<0b10100, "lbu", mem16, IILoad>, MayLoad; // // Format: LH ry, offset(rx) MIPS16e // Purpose: Load Halfword signed (Extended) // To load a halfword from memory as a signed value. // -def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IIAlu>; +def LhRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lh", mem16, IILoad>, MayLoad; // // Format: LHU ry, offset(rx) MIPS16e // Purpose: Load Halfword unsigned (Extended) // To load a halfword from memory as an unsigned value. // -def LhuRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, IIAlu>; +def LhuRxRyOffMemX16: + FEXT_RRI16_mem_ins<0b10100, "lhu", mem16, IILoad>, MayLoad; // // Format: LI rx, immediate MIPS16e @@ -197,28 +570,98 @@ def LiRxImmX16: FEXT_RI16_ins<0b01101, "li", IIAlu>; // Purpose: Load Word (Extended) // To load a word from memory as a signed value. // -def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IIAlu>; +def LwRxRyOffMemX16: FEXT_RRI16_mem_ins<0b10011, "lw", mem16, IILoad>, MayLoad; + +// Format: LW rx, offset(sp) MIPS16e +// Purpose: Load Word (SP-Relative, Extended) +// To load an SP-relative word from memory as a signed value. +// +def LwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b10110, "lw", IILoad>, MayLoad; // // Format: MOVE r32, rz MIPS16e // Purpose: Move // To move the contents of a GPR to a GPR. // -def Mov32R16: FI8_MOV32R16_ins<"move", IIAlu>; +def Move32R16: FI8_MOV32R16_ins<"move", IIAlu>; + +// +// Format: MOVE ry, r32 MIPS16e +//Purpose: Move +// To move the contents of a GPR to a GPR. +// +def MoveR3216: FI8_MOVR3216_ins<"move", IIAlu>; + +// +// Format: MFHI rx MIPS16e +// Purpose: Move From HI Register +// To copy the special purpose HI register to a GPR. +// +def Mfhi16: FRR16_M_ins<0b10000, "mfhi", IIAlu> { + let Uses = [HI]; + let neverHasSideEffects = 1; +} + +// +// Format: MFLO rx MIPS16e +// Purpose: Move From LO Register +// To copy the special purpose LO register to a GPR. +// +def Mflo16: FRR16_M_ins<0b10010, "mflo", IIAlu> { + let Uses = [LO]; + let neverHasSideEffects = 1; +} + +// +// Pseudo Instruction for mult +// +def MultRxRy16: FMULT16_ins<"mult", IIAlu> { + let isCommutable = 1; + let neverHasSideEffects = 1; + let Defs = [HI, LO]; +} + +def MultuRxRy16: FMULT16_ins<"multu", IIAlu> { + let isCommutable = 1; + let neverHasSideEffects = 1; + let Defs = [HI, LO]; +} + +// +// Format: MULT rx, ry MIPS16e +// Purpose: Multiply Word +// To multiply 32-bit signed integers. +// +def MultRxRyRz16: FMULT16_LO_ins<"mult", IIAlu> { + let isCommutable = 1; + let neverHasSideEffects = 1; + let Defs = [HI, LO]; +} + +// +// Format: MULTU rx, ry MIPS16e +// Purpose: Multiply Unsigned Word +// To multiply 32-bit unsigned integers. +// +def MultuRxRyRz16: FMULT16_LO_ins<"multu", IIAlu> { + let isCommutable = 1; + let neverHasSideEffects = 1; + let Defs = [HI, LO]; +} // // Format: NEG rx, ry MIPS16e // Purpose: Negate // To negate an integer value. // -def NegRxRy16: FRR16_ins<0b11101, "neg", IIAlu>; +def NegRxRy16: FUnaryRR16_ins<0b11101, "neg", IIAlu>; // // Format: NOT rx, ry MIPS16e // Purpose: Not // To complement an integer value // -def NotRxRy16: FRR16_ins<0b01111, "not", IIAlu>; +def NotRxRy16: FUnaryRR16_ins<0b01111, "not", IIAlu>; // // Format: OR rx, ry MIPS16e @@ -240,10 +683,22 @@ def OrRxRxRy16: FRxRxRy16_ins<0b01101, "or", IIAlu>, ArithLogic16Defs<1>; // for direct object emitter, encoding needs to be adjusted for the // frame size // -let ra=1, s=0,s0=0,s1=0 in +let ra=1, s=0,s0=1,s1=1 in def RestoreRaF16: FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size), - "restore \t$$ra, $frame_size", [], IILoad >; + "restore\t$$ra, $$s0, $$s1, $frame_size", [], IILoad >, MayLoad { + let isCodeGenOnly = 1; +} + +// Use Restore to increment SP since SP is not a Mip 16 register, this +// is an easy way to do that which does not require a register. +// +let ra=0, s=0,s0=0,s1=0 in +def RestoreIncSpF16: + FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size), + "restore\t$frame_size", [], IILoad >, MayLoad { + let isCodeGenOnly = 1; +} // // Format: SAVE {ra,}{s0/s1/s0-1,}{framesize} (All arguments are optional) @@ -252,24 +707,152 @@ def RestoreRaF16: // To set up a stack frame on entry to a subroutine, // saving return address and static registers, and adjusting stack // -let ra=1, s=1,s0=0,s1=0 in +let ra=1, s=1,s0=1,s1=1 in def SaveRaF16: FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size), - "save \t$$ra, $frame_size", [], IILoad >; + "save\t$$ra, $$s0, $$s1, $frame_size", [], IIStore >, MayStore { + let isCodeGenOnly = 1; +} // +// Use Save to decrement the SP by a constant since SP is not +// a Mips16 register. +// +let ra=0, s=0,s0=0,s1=0 in +def SaveDecSpF16: + FI8_SVRS16<0b1, (outs), (ins uimm16:$frame_size), + "save\t$frame_size", [], IIStore >, MayStore { + let isCodeGenOnly = 1; +} +// // Format: SB ry, offset(rx) MIPS16e // Purpose: Store Byte (Extended) // To store a byte to memory. // -def SbRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIAlu>; +def SbRxRyOffMemX16: + FEXT_RRI16_mem2_ins<0b11000, "sb", mem16, IIStore>, MayStore; // +// The Sel(T) instructions are pseudos +// T means that they use T8 implicitly. +// +// +// Format: SelBeqZ rd, rs, rt +// Purpose: if rt==0, do nothing +// else rs = rt +// +def SelBeqZ: Sel<0b00100, "beqz", IIAlu>; + +// +// Format: SelTBteqZCmp rd, rs, rl, rr +// Purpose: b = Cmp rl, rr. +// If b==0 then do nothing. +// if b!=0 then rd = rs +// +def SelTBteqZCmp: SelT<0b000, "bteqz", 0b01010, "cmp", IIAlu>; + +// +// Format: SelTBteqZCmpi rd, rs, rl, rr +// Purpose: b = Cmpi rl, imm. +// If b==0 then do nothing. +// if b!=0 then rd = rs +// +def SelTBteqZCmpi: SeliT<0b000, "bteqz", 0b01110, "cmpi", IIAlu>; + +// +// Format: SelTBteqZSlt rd, rs, rl, rr +// Purpose: b = Slt rl, rr. +// If b==0 then do nothing. +// if b!=0 then rd = rs +// +def SelTBteqZSlt: SelT<0b000, "bteqz", 0b00010, "slt", IIAlu>; + +// +// Format: SelTBteqZSlti rd, rs, rl, rr +// Purpose: b = Slti rl, imm. +// If b==0 then do nothing. +// if b!=0 then rd = rs +// +def SelTBteqZSlti: SeliT<0b000, "bteqz", 0b01010, "slti", IIAlu>; + +// +// Format: SelTBteqZSltu rd, rs, rl, rr +// Purpose: b = Sltu rl, rr. +// If b==0 then do nothing. +// if b!=0 then rd = rs +// +def SelTBteqZSltu: SelT<0b000, "bteqz", 0b00011, "sltu", IIAlu>; + +// +// Format: SelTBteqZSltiu rd, rs, rl, rr +// Purpose: b = Sltiu rl, imm. +// If b==0 then do nothing. +// if b!=0 then rd = rs +// +def SelTBteqZSltiu: SeliT<0b000, "bteqz", 0b01011, "sltiu", IIAlu>; + +// +// Format: SelBnez rd, rs, rt +// Purpose: if rt!=0, do nothing +// else rs = rt +// +def SelBneZ: Sel<0b00101, "bnez", IIAlu>; + +// +// Format: SelTBtneZCmp rd, rs, rl, rr +// Purpose: b = Cmp rl, rr. +// If b!=0 then do nothing. +// if b0=0 then rd = rs +// +def SelTBtneZCmp: SelT<0b001, "btnez", 0b01010, "cmp", IIAlu>; + +// +// Format: SelTBtnezCmpi rd, rs, rl, rr +// Purpose: b = Cmpi rl, imm. +// If b!=0 then do nothing. +// if b==0 then rd = rs +// +def SelTBtneZCmpi: SeliT<0b000, "btnez", 0b01110, "cmpi", IIAlu>; + +// +// Format: SelTBtneZSlt rd, rs, rl, rr +// Purpose: b = Slt rl, rr. +// If b!=0 then do nothing. +// if b==0 then rd = rs +// +def SelTBtneZSlt: SelT<0b001, "btnez", 0b00010, "slt", IIAlu>; + +// +// Format: SelTBtneZSlti rd, rs, rl, rr +// Purpose: b = Slti rl, imm. +// If b!=0 then do nothing. +// if b==0 then rd = rs +// +def SelTBtneZSlti: SeliT<0b001, "btnez", 0b01010, "slti", IIAlu>; + +// +// Format: SelTBtneZSltu rd, rs, rl, rr +// Purpose: b = Sltu rl, rr. +// If b!=0 then do nothing. +// if b==0 then rd = rs +// +def SelTBtneZSltu: SelT<0b001, "btnez", 0b00011, "sltu", IIAlu>; + +// +// Format: SelTBtneZSltiu rd, rs, rl, rr +// Purpose: b = Slti rl, imm. +// If b!=0 then do nothing. +// if b==0 then rd = rs +// +def SelTBtneZSltiu: SeliT<0b001, "btnez", 0b01011, "sltiu", IIAlu>; +// +// // Format: SH ry, offset(rx) MIPS16e // Purpose: Store Halfword (Extended) // To store a halfword to memory. // -def ShRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, IIAlu>; +def ShRxRyOffMemX16: + FEXT_RRI16_mem2_ins<0b11001, "sh", mem16, IIStore>, MayStore; // // Format: SLL rx, ry, sa MIPS16e @@ -285,8 +868,40 @@ def SllX16: FEXT_SHIFT16_ins<0b00, "sll", IIAlu>; // def SllvRxRy16 : FRxRxRy16_ins<0b00100, "sllv", IIAlu>; +// +// Format: SLTI rx, immediate MIPS16e +// Purpose: Set on Less Than Immediate (Extended) +// To record the result of a less-than comparison with a constant. +// +def SltiCCRxImmX16: FEXT_CCRXI16_ins<0b01010, "slti", IIAlu>; // +// Format: SLTIU rx, immediate MIPS16e +// Purpose: Set on Less Than Immediate Unsigned (Extended) +// To record the result of a less-than comparison with a constant. +// +def SltiuCCRxImmX16: FEXT_CCRXI16_ins<0b01011, "sltiu", IIAlu>; + +// +// Format: SLT rx, ry MIPS16e +// Purpose: Set on Less Than +// To record the result of a less-than comparison. +// +def SltRxRy16: FRR16_ins<0b00010, "slt", IIAlu>; + +def SltCCRxRy16: FCCRR16_ins<0b00010, "slt", IIAlu>; + +// Format: SLTU rx, ry MIPS16e +// Purpose: Set on Less Than Unsigned +// To record the result of an unsigned less-than comparison. +// +def SltuRxRyRz16: FRRTR16_ins<0b00011, "sltu", IIAlu> { + let isCodeGenOnly=1; +} + + +def SltuCCRxRy16: FCCRR16_ins<0b00011, "sltu", IIAlu>; +// // Format: SRAV ry, rx MIPS16e // Purpose: Shift Word Right Arithmetic Variable // To execute an arithmetic right-shift of a word by a variable @@ -333,9 +948,18 @@ def SubuRxRyRz16: FRRR16_ins<0b11, "subu", IIAlu>, ArithLogic16Defs<0>; // Purpose: Store Word (Extended) // To store a word to memory. // -def SwRxRyOffMemX16: FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, IIAlu>; +def SwRxRyOffMemX16: + FEXT_RRI16_mem2_ins<0b11011, "sw", mem16, IIStore>, MayStore; // +// Format: SW rx, offset(sp) MIPS16e +// Purpose: Store Word rx (SP-Relative) +// To store an SP-relative word to memory. +// +def SwRxSpImmX16: FEXT_RI16_SP_explicit_ins<0b11010, "sw", IIStore>, MayStore; + +// +// // Format: XOR rx, ry MIPS16e // Purpose: Xor // To do a bitwise logical XOR. @@ -361,6 +985,7 @@ class ArithLogic16_pat : def: ArithLogic16_pat; def: ArithLogic16_pat; +def: ArithLogic16_pat; def: ArithLogic16_pat; def: ArithLogic16_pat; def: ArithLogic16_pat; @@ -385,35 +1010,533 @@ def: shift_rotate_reg16_pat; def: shift_rotate_reg16_pat; class LoadM16_pat : - Mips16Pat<(OpNode addr:$addr), (I addr:$addr)>; + Mips16Pat<(OpNode addr16:$addr), (I addr16:$addr)>; def: LoadM16_pat; def: LoadM16_pat; -def: LoadM16_pat; -def: LoadM16_pat; -def: LoadM16_pat; +def: LoadM16_pat; +def: LoadM16_pat; +def: LoadM16_pat; class StoreM16_pat : - Mips16Pat<(OpNode CPU16Regs:$r, addr:$addr), (I CPU16Regs:$r, addr:$addr)>; + Mips16Pat<(OpNode CPU16Regs:$r, addr16:$addr), + (I CPU16Regs:$r, addr16:$addr)>; def: StoreM16_pat; -def: StoreM16_pat; -def: StoreM16_pat; +def: StoreM16_pat; +def: StoreM16_pat; + +// Unconditional branch +class UncondBranch16_pat: + Mips16Pat<(OpNode bb:$imm16), (I bb:$imm16)> { + let Predicates = [RelocPIC, InMips16Mode]; + } + +// Indirect branch +def: Mips16Pat< + (brind CPU16Regs:$rs), + (JrcRx16 CPU16Regs:$rs)>; // Jump and Link (Call) -let isCall=1, hasDelaySlot=1 in +let isCall=1, hasDelaySlot=0 in def JumpLinkReg16: FRR16_JALRC<0, 0, 0, (outs), (ins CPU16Regs:$rs), - "jalr \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch>; + "jalrc \t$rs", [(MipsJmpLink CPU16Regs:$rs)], IIBranch>; // Mips16 pseudos let isReturn=1, isTerminator=1, hasDelaySlot=1, isBarrier=1, hasCtrlDep=1, hasExtraSrcRegAllocReq = 1 in def RetRA16 : MipsPseudo16<(outs), (ins), "", [(MipsRet)]>; + +// setcc patterns + +class SetCC_R16: + Mips16Pat<(cond_op CPU16Regs:$rx, CPU16Regs:$ry), + (I CPU16Regs:$rx, CPU16Regs:$ry)>; + +class SetCC_I16: + Mips16Pat<(cond_op CPU16Regs:$rx, imm_type:$imm16), + (I CPU16Regs:$rx, imm_type:$imm16)>; + + +def: Mips16Pat<(i32 addr16:$addr), + (AddiuRxRyOffMemX16 addr16:$addr)>; + + +// Large (>16 bit) immediate loads +def : Mips16Pat<(i32 imm:$imm), + (OrRxRxRy16 (SllX16 (LiRxImmX16 (HI16 imm:$imm)), 16), + (LiRxImmX16 (LO16 imm:$imm)))>; + +// Carry MipsPatterns +def : Mips16Pat<(subc CPU16Regs:$lhs, CPU16Regs:$rhs), + (SubuRxRyRz16 CPU16Regs:$lhs, CPU16Regs:$rhs)>; +def : Mips16Pat<(addc CPU16Regs:$lhs, CPU16Regs:$rhs), + (AdduRxRyRz16 CPU16Regs:$lhs, CPU16Regs:$rhs)>; +def : Mips16Pat<(addc CPU16Regs:$src, immSExt16:$imm), + (AddiuRxRxImmX16 CPU16Regs:$src, imm:$imm)>; + +// +// Some branch conditional patterns are not generated by llvm at this time. +// Some are for seemingly arbitrary reasons not used: i.e. with signed number +// comparison they are used and for unsigned a different pattern is used. +// I am pushing upstream from the full mips16 port and it seemed that I needed +// these earlier and the mips32 port has these but now I cannot create test +// cases that use these patterns. While I sort this all out I will leave these +// extra patterns commented out and if I can be sure they are really not used, +// I will delete the code. I don't want to check the code in uncommented without +// a valid test case. In some cases, the compiler is generating patterns with +// setcc instead and earlier I had implemented setcc first so may have masked +// the problem. The setcc variants are suboptimal for mips16 so I may wantto +// figure out how to enable the brcond patterns or else possibly new +// combinations of of brcond and setcc. +// +// +// bcond-seteq +// +def: Mips16Pat + <(brcond (i32 (seteq CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), + (BteqzT8CmpX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16) + >; + + +def: Mips16Pat + <(brcond (i32 (seteq CPU16Regs:$rx, immZExt16:$imm)), bb:$targ16), + (BteqzT8CmpiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$targ16) + >; + +def: Mips16Pat + <(brcond (i32 (seteq CPU16Regs:$rx, 0)), bb:$targ16), + (BeqzRxImmX16 CPU16Regs:$rx, bb:$targ16) + >; + +// +// bcond-setgt (do we need to have this pair of setlt, setgt??) +// +def: Mips16Pat + <(brcond (i32 (setgt CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), + (BtnezT8SltX16 CPU16Regs:$ry, CPU16Regs:$rx, bb:$imm16) + >; + +// +// bcond-setge +// +def: Mips16Pat + <(brcond (i32 (setge CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), + (BteqzT8SltX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16) + >; + +// +// never called because compiler transforms a >= k to a > (k-1) +def: Mips16Pat + <(brcond (i32 (setge CPU16Regs:$rx, immSExt16:$imm)), bb:$imm16), + (BteqzT8SltiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$imm16) + >; + +// +// bcond-setlt +// +def: Mips16Pat + <(brcond (i32 (setlt CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), + (BtnezT8SltX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16) + >; + +def: Mips16Pat + <(brcond (i32 (setlt CPU16Regs:$rx, immSExt16:$imm)), bb:$imm16), + (BtnezT8SltiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$imm16) + >; + +// +// bcond-setle +// +def: Mips16Pat + <(brcond (i32 (setle CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), + (BteqzT8SltX16 CPU16Regs:$ry, CPU16Regs:$rx, bb:$imm16) + >; + +// +// bcond-setne +// +def: Mips16Pat + <(brcond (i32 (setne CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), + (BtnezT8CmpX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16) + >; + +def: Mips16Pat + <(brcond (i32 (setne CPU16Regs:$rx, immZExt16:$imm)), bb:$targ16), + (BtnezT8CmpiX16 CPU16Regs:$rx, immSExt16:$imm, bb:$targ16) + >; + +def: Mips16Pat + <(brcond (i32 (setne CPU16Regs:$rx, 0)), bb:$targ16), + (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16) + >; + +// +// This needs to be there but I forget which code will generate it +// +def: Mips16Pat + <(brcond CPU16Regs:$rx, bb:$targ16), + (BnezRxImmX16 CPU16Regs:$rx, bb:$targ16) + >; + +// + +// +// bcond-setugt +// +//def: Mips16Pat +// <(brcond (i32 (setugt CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), +// (BtnezT8SltuX16 CPU16Regs:$ry, CPU16Regs:$rx, bb:$imm16) +// >; + +// +// bcond-setuge +// +//def: Mips16Pat +// <(brcond (i32 (setuge CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), +// (BteqzT8SltuX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16) +// >; + + +// +// bcond-setult +// +//def: Mips16Pat +// <(brcond (i32 (setult CPU16Regs:$rx, CPU16Regs:$ry)), bb:$imm16), +// (BtnezT8SltuX16 CPU16Regs:$rx, CPU16Regs:$ry, bb:$imm16) +// >; + +def: UncondBranch16_pat; + // Small immediates +def: Mips16Pat<(i32 immSExt16:$in), + (AddiuRxRxImmX16 (Move32R16 ZERO), immSExt16:$in)>; + def: Mips16Pat<(i32 immZExt16:$in), (LiRxImmX16 immZExt16:$in)>; +// +// MipsDivRem +// +def: Mips16Pat + <(MipsDivRem CPU16Regs:$rx, CPU16Regs:$ry), + (DivRxRy16 CPU16Regs:$rx, CPU16Regs:$ry)>; + +// +// MipsDivRemU +// +def: Mips16Pat + <(MipsDivRemU CPU16Regs:$rx, CPU16Regs:$ry), + (DivuRxRy16 CPU16Regs:$rx, CPU16Regs:$ry)>; + +// signed a,b +// x = (a>=b)?x:y +// +// if !(a < b) x = y +// +def : Mips16Pat<(select (i32 (setge CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBteqZSlt CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a, CPU16Regs:$b)>; + +// signed a,b +// x = (a>b)?x:y +// +// if (b < a) x = y +// +def : Mips16Pat<(select (i32 (setgt CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBtneZSlt CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$b, CPU16Regs:$a)>; + +// unsigned a,b +// x = (a>=b)?x:y +// +// if !(a < b) x = y; +// +def : Mips16Pat< + (select (i32 (setuge CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBteqZSltu CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a, CPU16Regs:$b)>; + +// unsigned a,b +// x = (a>b)?x:y +// +// if (b < a) x = y +// +def : Mips16Pat<(select (i32 (setugt CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBtneZSltu CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$b, CPU16Regs:$a)>; + +// signed +// x = (a >= k)?x:y +// due to an llvm optimization, i don't think that this will ever +// be used. This is transformed into x = (a > k-1)?x:y +// +// + +//def : Mips16Pat< +// (select (i32 (setge CPU16Regs:$lhs, immSExt16:$rhs)), +// CPU16Regs:$T, CPU16Regs:$F), +// (SelTBteqZSlti CPU16Regs:$T, CPU16Regs:$F, +// CPU16Regs:$lhs, immSExt16:$rhs)>; + +//def : Mips16Pat< +// (select (i32 (setuge CPU16Regs:$lhs, immSExt16:$rhs)), +// CPU16Regs:$T, CPU16Regs:$F), +// (SelTBteqZSltiu CPU16Regs:$T, CPU16Regs:$F, +// CPU16Regs:$lhs, immSExt16:$rhs)>; + +// signed +// x = (a < k)?x:y +// +// if !(a < k) x = y; +// +def : Mips16Pat< + (select (i32 (setlt CPU16Regs:$a, immSExt16:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBtneZSlti CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a, immSExt16:$b)>; + + +// +// +// signed +// x = (a <= b)? x : y +// +// if (b < a) x = y +// +def : Mips16Pat<(select (i32 (setle CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBteqZSlt CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$b, CPU16Regs:$a)>; + +// +// unnsigned +// x = (a <= b)? x : y +// +// if (b < a) x = y +// +def : Mips16Pat<(select (i32 (setule CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBteqZSltu CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$b, CPU16Regs:$a)>; + +// +// signed/unsigned +// x = (a == b)? x : y +// +// if (a != b) x = y +// +def : Mips16Pat<(select (i32 (seteq CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBteqZCmp CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$b, CPU16Regs:$a)>; + +// +// signed/unsigned +// x = (a == 0)? x : y +// +// if (a != 0) x = y +// +def : Mips16Pat<(select (i32 (seteq CPU16Regs:$a, 0)), + CPU16Regs:$x, CPU16Regs:$y), + (SelBeqZ CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a)>; + + +// +// signed/unsigned +// x = (a == k)? x : y +// +// if (a != k) x = y +// +def : Mips16Pat<(select (i32 (seteq CPU16Regs:$a, immZExt16:$k)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBteqZCmpi CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a, immZExt16:$k)>; + + +// +// signed/unsigned +// x = (a != b)? x : y +// +// if (a == b) x = y +// +// +def : Mips16Pat<(select (i32 (setne CPU16Regs:$a, CPU16Regs:$b)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBtneZCmp CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$b, CPU16Regs:$a)>; + +// +// signed/unsigned +// x = (a != 0)? x : y +// +// if (a == 0) x = y +// +def : Mips16Pat<(select (i32 (setne CPU16Regs:$a, 0)), + CPU16Regs:$x, CPU16Regs:$y), + (SelBneZ CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a)>; + +// signed/unsigned +// x = (a)? x : y +// +// if (!a) x = y +// +def : Mips16Pat<(select CPU16Regs:$a, + CPU16Regs:$x, CPU16Regs:$y), + (SelBneZ CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a)>; + + +// +// signed/unsigned +// x = (a != k)? x : y +// +// if (a == k) x = y +// +def : Mips16Pat<(select (i32 (setne CPU16Regs:$a, immZExt16:$k)), + CPU16Regs:$x, CPU16Regs:$y), + (SelTBtneZCmpi CPU16Regs:$x, CPU16Regs:$y, + CPU16Regs:$a, immZExt16:$k)>; + +// +// When writing C code to test setxx these patterns, +// some will be transformed into +// other things. So we test using C code but using -O3 and -O0 +// +// seteq +// +def : Mips16Pat + <(seteq CPU16Regs:$lhs,CPU16Regs:$rhs), + (SltiuCCRxImmX16 (XorRxRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs), 1)>; + +def : Mips16Pat + <(seteq CPU16Regs:$lhs, 0), + (SltiuCCRxImmX16 CPU16Regs:$lhs, 1)>; + + +// +// setge +// + +def: Mips16Pat + <(setge CPU16Regs:$lhs, CPU16Regs:$rhs), + (XorRxRxRy16 (SltCCRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs), + (LiRxImmX16 1))>; + +// +// For constants, llvm transforms this to: +// x > (k -1) and then reverses the operands to use setlt. So this pattern +// is not used now by the compiler. (Presumably checking that k-1 does not +// overflow). The compiler never uses this at a the current time, due to +// other optimizations. +// +//def: Mips16Pat +// <(setge CPU16Regs:$lhs, immSExt16:$rhs), +// (XorRxRxRy16 (SltiCCRxImmX16 CPU16Regs:$lhs, immSExt16:$rhs), +// (LiRxImmX16 1))>; + +// This catches the x >= -32768 case by transforming it to x > -32769 +// +def: Mips16Pat + <(setgt CPU16Regs:$lhs, -32769), + (XorRxRxRy16 (SltiCCRxImmX16 CPU16Regs:$lhs, -32768), + (LiRxImmX16 1))>; + +// +// setgt +// +// + +def: Mips16Pat + <(setgt CPU16Regs:$lhs, CPU16Regs:$rhs), + (SltCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs)>; + +// +// setle +// +def: Mips16Pat + <(setle CPU16Regs:$lhs, CPU16Regs:$rhs), + (XorRxRxRy16 (SltCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs), (LiRxImmX16 1))>; + +// +// setlt +// +def: SetCC_R16; + +def: SetCC_I16; + +// +// setne +// +def : Mips16Pat + <(setne CPU16Regs:$lhs,CPU16Regs:$rhs), + (SltuCCRxRy16 (LiRxImmX16 0), + (XorRxRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs))>; + + +// +// setuge +// +def: Mips16Pat + <(setuge CPU16Regs:$lhs, CPU16Regs:$rhs), + (XorRxRxRy16 (SltuCCRxRy16 CPU16Regs:$lhs, CPU16Regs:$rhs), + (LiRxImmX16 1))>; + +// this pattern will never be used because the compiler will transform +// x >= k to x > (k - 1) and then use SLT +// +//def: Mips16Pat +// <(setuge CPU16Regs:$lhs, immZExt16:$rhs), +// (XorRxRxRy16 (SltiuCCRxImmX16 CPU16Regs:$lhs, immZExt16:$rhs), +// (LiRxImmX16 1))>; + +// +// setugt +// +def: Mips16Pat + <(setugt CPU16Regs:$lhs, CPU16Regs:$rhs), + (SltuCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs)>; + +// +// setule +// +def: Mips16Pat + <(setule CPU16Regs:$lhs, CPU16Regs:$rhs), + (XorRxRxRy16 (SltuCCRxRy16 CPU16Regs:$rhs, CPU16Regs:$lhs), (LiRxImmX16 1))>; + +// +// setult +// +def: SetCC_R16; + +def: SetCC_I16; + def: Mips16Pat<(add CPU16Regs:$hi, (MipsLo tglobaladdr:$lo)), (AddiuRxRxImmX16 CPU16Regs:$hi, tglobaladdr:$lo)>; + +// hi/lo relocs + +def : Mips16Pat<(MipsHi tglobaltlsaddr:$in), + (SllX16 (LiRxImmX16 tglobaltlsaddr:$in), 16)>; + +// wrapper_pic +class Wrapper16Pat: + Mips16Pat<(MipsWrapper RC:$gp, node:$in), + (ADDiuOp RC:$gp, node:$in)>; + + +def : Wrapper16Pat; +def : Wrapper16Pat; + +def : Mips16Pat<(i32 (extloadi8 addr16:$src)), + (LbuRxRyOffMemX16 addr16:$src)>; +def : Mips16Pat<(i32 (extloadi16 addr16:$src)), + (LhuRxRyOffMemX16 addr16:$src)>; \ No newline at end of file diff --git a/lib/Target/Mips/Mips16RegisterInfo.cpp b/lib/Target/Mips/Mips16RegisterInfo.cpp index c15d1bf..d7397a3 100644 --- a/lib/Target/Mips/Mips16RegisterInfo.cpp +++ b/lib/Target/Mips/Mips16RegisterInfo.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "Mips16RegisterInfo.h" +#include "Mips16InstrInfo.h" #include "Mips.h" #include "MipsAnalyzeImmediate.h" #include "MipsInstrInfo.h" @@ -39,15 +40,27 @@ using namespace llvm; Mips16RegisterInfo::Mips16RegisterInfo(const MipsSubtarget &ST, - const TargetInstrInfo &TII) - : MipsRegisterInfo(ST, TII) {} + const Mips16InstrInfo &I) + : MipsRegisterInfo(ST), TII(I) {} // This function eliminate ADJCALLSTACKDOWN, // ADJCALLSTACKUP pseudo instructions void Mips16RegisterInfo:: eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const { - // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions. + const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); + + if (!TFI->hasReservedCallFrame(MF)) { + int64_t Amount = I->getOperand(0).getImm(); + + if (I->getOpcode() == Mips::ADJCALLSTACKDOWN) + Amount = -Amount; + + const Mips16InstrInfo *II = static_cast(&TII); + + II->adjustStackPtr(Mips::SP, Amount, MBB, I); + } + MBB.erase(I); } @@ -55,57 +68,60 @@ void Mips16RegisterInfo::eliminateFI(MachineBasicBlock::iterator II, unsigned OpNo, int FrameIndex, uint64_t StackSize, int64_t SPOffset) const { - MachineInstr &MI = *II; - MachineFunction &MF = *MI.getParent()->getParent(); - MachineFrameInfo *MFI = MF.getFrameInfo(); - MipsFunctionInfo *MipsFI = MF.getInfo(); - - const std::vector &CSI = MFI->getCalleeSavedInfo(); - int MinCSFI = 0; - int MaxCSFI = -1; - - if (CSI.size()) { - MinCSFI = CSI[0].getFrameIdx(); - MaxCSFI = CSI[CSI.size() - 1].getFrameIdx(); - } - - // The following stack frame objects are always - // referenced relative to $sp: - // 1. Outgoing arguments. - // 2. Pointer to dynamically allocated stack space. - // 3. Locations for callee-saved registers. - // Everything else is referenced relative to whatever register - // getFrameRegister() returns. - unsigned FrameReg; - - if (MipsFI->isOutArgFI(FrameIndex) || - (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)) - FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP; + MachineInstr &MI = *II; + MachineFunction &MF = *MI.getParent()->getParent(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + + const std::vector &CSI = MFI->getCalleeSavedInfo(); + int MinCSFI = 0; + int MaxCSFI = -1; + + if (CSI.size()) { + MinCSFI = CSI[0].getFrameIdx(); + MaxCSFI = CSI[CSI.size() - 1].getFrameIdx(); + } + + // The following stack frame objects are always + // referenced relative to $sp: + // 1. Outgoing arguments. + // 2. Pointer to dynamically allocated stack space. + // 3. Locations for callee-saved registers. + // Everything else is referenced relative to whatever register + // getFrameRegister() returns. + unsigned FrameReg; + + if (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI) + FrameReg = Mips::SP; + else { + const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); + if (TFI->hasFP(MF)) { + FrameReg = Mips::S0; + } + else { + if ((MI.getNumOperands()> OpNo+2) && MI.getOperand(OpNo+2).isReg()) + FrameReg = MI.getOperand(OpNo+2).getReg(); else - FrameReg = getFrameRegister(MF); - - // Calculate final offset. - // - There is no need to change the offset if the frame object - // is one of the - // following: an outgoing argument, pointer to a dynamically allocated - // stack space or a $gp restore location, - // - If the frame object is any of the following, - // its offset must be adjusted - // by adding the size of the stack: - // incoming argument, callee-saved register location or local variable. - int64_t Offset; - - if (MipsFI->isOutArgFI(FrameIndex)) - Offset = SPOffset; - else - Offset = SPOffset + (int64_t)StackSize; - - Offset += MI.getOperand(OpNo + 1).getImm(); - - DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); - - MI.getOperand(OpNo).ChangeToRegister(FrameReg, false); - MI.getOperand(OpNo + 1).ChangeToImmediate(Offset); + FrameReg = Mips::SP; + } + } + // Calculate final offset. + // - There is no need to change the offset if the frame object + // is one of the + // following: an outgoing argument, pointer to a dynamically allocated + // stack space or a $gp restore location, + // - If the frame object is any of the following, + // its offset must be adjusted + // by adding the size of the stack: + // incoming argument, callee-saved register location or local variable. + int64_t Offset; + Offset = SPOffset + (int64_t)StackSize; + Offset += MI.getOperand(OpNo + 1).getImm(); + + + DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); + + MI.getOperand(OpNo).ChangeToRegister(FrameReg, false); + MI.getOperand(OpNo + 1).ChangeToImmediate(Offset); } diff --git a/lib/Target/Mips/Mips16RegisterInfo.h b/lib/Target/Mips/Mips16RegisterInfo.h index 3f4b3a7..153def2 100644 --- a/lib/Target/Mips/Mips16RegisterInfo.h +++ b/lib/Target/Mips/Mips16RegisterInfo.h @@ -17,11 +17,12 @@ #include "MipsRegisterInfo.h" namespace llvm { +class Mips16InstrInfo; class Mips16RegisterInfo : public MipsRegisterInfo { + const Mips16InstrInfo &TII; public: - Mips16RegisterInfo(const MipsSubtarget &Subtarget, - const TargetInstrInfo &TII); + Mips16RegisterInfo(const MipsSubtarget &Subtarget, const Mips16InstrInfo &TII); void eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td index 20fc178..a611168 100644 --- a/lib/Target/Mips/Mips64InstrInfo.td +++ b/lib/Target/Mips/Mips64InstrInfo.td @@ -83,8 +83,10 @@ let usesCustomInserter = 1, Predicates = [HasMips64, HasStandardEncoding], //===----------------------------------------------------------------------===// let DecoderNamespace = "Mips64" in { /// Arithmetic Instructions (ALU Immediate) -def DADDiu : ArithLogicI<0x19, "daddiu", add, simm16_64, immSExt16, +def DADDi : ArithOverflowI<0x18, "daddi", add, simm16_64, immSExt16, CPU64Regs>; +def DADDiu : ArithLogicI<0x19, "daddiu", add, simm16_64, immSExt16, + CPU64Regs>, IsAsCheapAsAMove; def DANDi : ArithLogicI<0x0c, "andi", and, uimm16_64, immZExt16, CPU64Regs>; def SLTi64 : SetCC_I<0x0a, "slti", setlt, simm16_64, immSExt16, CPU64Regs>; def SLTiu64 : SetCC_I<0x0b, "sltiu", setult, simm16_64, immSExt16, CPU64Regs>; @@ -93,6 +95,7 @@ def XORi64 : ArithLogicI<0x0e, "xori", xor, uimm16_64, immZExt16, CPU64Regs>; def LUi64 : LoadUpper<0x0f, "lui", CPU64Regs, uimm16_64>; /// Arithmetic Instructions (3-Operand, R-Type) +def DADD : ArithOverflowR<0x00, 0x2C, "dadd", IIAlu, CPU64Regs, 1>; def DADDu : ArithLogicR<0x00, 0x2d, "daddu", add, IIAlu, CPU64Regs, 1>; def DSUBu : ArithLogicR<0x00, 0x2f, "dsubu", sub, IIAlu, CPU64Regs>; def SLT64 : SetCC_R<0x00, 0x2a, "slt", setlt, CPU64Regs>; @@ -110,9 +113,9 @@ def DSLLV : shift_rotate_reg<0x14, 0x00, "dsllv", shl, CPU64Regs>; def DSRLV : shift_rotate_reg<0x16, 0x00, "dsrlv", srl, CPU64Regs>; def DSRAV : shift_rotate_reg<0x17, 0x00, "dsrav", sra, CPU64Regs>; let Pattern = [] in { -def DSLL32 : shift_rotate_imm64<0x3c, 0x00, "dsll32", shl>; -def DSRL32 : shift_rotate_imm64<0x3e, 0x00, "dsrl32", srl>; -def DSRA32 : shift_rotate_imm64<0x3f, 0x00, "dsra32", sra>; + def DSLL32 : shift_rotate_imm64<0x3c, 0x00, "dsll32", shl>; + def DSRL32 : shift_rotate_imm64<0x3e, 0x00, "dsrl32", srl>; + def DSRA32 : shift_rotate_imm64<0x3f, 0x00, "dsra32", sra>; } } // Rotate Instructions @@ -127,24 +130,15 @@ let DecoderNamespace = "Mips64" in { /// aligned defm LB64 : LoadM64<0x20, "lb", sextloadi8>; defm LBu64 : LoadM64<0x24, "lbu", zextloadi8>; -defm LH64 : LoadM64<0x21, "lh", sextloadi16_a>; -defm LHu64 : LoadM64<0x25, "lhu", zextloadi16_a>; -defm LW64 : LoadM64<0x23, "lw", sextloadi32_a>; -defm LWu64 : LoadM64<0x27, "lwu", zextloadi32_a>; +defm LH64 : LoadM64<0x21, "lh", sextloadi16>; +defm LHu64 : LoadM64<0x25, "lhu", zextloadi16>; +defm LW64 : LoadM64<0x23, "lw", sextloadi32>; +defm LWu64 : LoadM64<0x27, "lwu", zextloadi32>; defm SB64 : StoreM64<0x28, "sb", truncstorei8>; -defm SH64 : StoreM64<0x29, "sh", truncstorei16_a>; -defm SW64 : StoreM64<0x2b, "sw", truncstorei32_a>; -defm LD : LoadM64<0x37, "ld", load_a>; -defm SD : StoreM64<0x3f, "sd", store_a>; - -/// unaligned -defm ULH64 : LoadM64<0x21, "ulh", sextloadi16_u, 1>; -defm ULHu64 : LoadM64<0x25, "ulhu", zextloadi16_u, 1>; -defm ULW64 : LoadM64<0x23, "ulw", sextloadi32_u, 1>; -defm USH64 : StoreM64<0x29, "ush", truncstorei16_u, 1>; -defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>; -defm ULD : LoadM64<0x37, "uld", load_u, 1>; -defm USD : StoreM64<0x3f, "usd", store_u, 1>; +defm SH64 : StoreM64<0x29, "sh", truncstorei16>; +defm SW64 : StoreM64<0x2b, "sw", truncstorei32>; +defm LD : LoadM64<0x37, "ld", load>; +defm SD : StoreM64<0x3f, "sd", store>; /// load/store left/right let isCodeGenOnly = 1 in { @@ -183,6 +177,7 @@ def BLTZ64 : CBranchZero<0x01, 0, "bltz", setlt, CPU64Regs>; } let DecoderNamespace = "Mips64" in def JALR64 : JumpLinkReg<0x00, 0x09, "jalr", CPU64Regs>; +def TAILCALL64_R : JumpFR, IsTailCall; let DecoderNamespace = "Mips64" in { /// Multiply and Divide Instructions. @@ -217,7 +212,15 @@ let DecoderNamespace = "Mips64" in { def RDHWR64 : ReadHardware; def DEXT : ExtBase<3, "dext", CPU64Regs>; +let Pattern = [] in { + def DEXTU : ExtBase<2, "dextu", CPU64Regs>; + def DEXTM : ExtBase<1, "dextm", CPU64Regs>; +} def DINS : InsBase<7, "dins", CPU64Regs>; +let Pattern = [] in { + def DINSU : InsBase<6, "dinsu", CPU64Regs>; + def DINSM : InsBase<5, "dinsm", CPU64Regs>; +} let isCodeGenOnly = 1, rs = 0, shamt = 0 in { def DSLL64_32 : FR<0x00, 0x3c, (outs CPU64Regs:$rd), (ins CPURegs:$rt), @@ -236,21 +239,14 @@ let isCodeGenOnly = 1, rs = 0, shamt = 0 in { let Predicates = [NotN64, HasStandardEncoding] in { def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>; def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>; - def : MipsPat<(i64 (extloadi16_a addr:$src)), (LH64 addr:$src)>; - def : MipsPat<(i64 (extloadi16_u addr:$src)), (ULH64 addr:$src)>; - def : MipsPat<(i64 (extloadi32_a addr:$src)), (LW64 addr:$src)>; - def : MipsPat<(i64 (extloadi32_u addr:$src)), (ULW64 addr:$src)>; - def : MipsPat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>; + def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64 addr:$src)>; + def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64 addr:$src)>; } let Predicates = [IsN64, HasStandardEncoding] in { def : MipsPat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>; def : MipsPat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>; - def : MipsPat<(i64 (extloadi16_a addr:$src)), (LH64_P8 addr:$src)>; - def : MipsPat<(i64 (extloadi16_u addr:$src)), (ULH64_P8 addr:$src)>; - def : MipsPat<(i64 (extloadi32_a addr:$src)), (LW64_P8 addr:$src)>; - def : MipsPat<(i64 (extloadi32_u addr:$src)), (ULW64_P8 addr:$src)>; - def : MipsPat<(zextloadi32_u addr:$a), - (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>; + def : MipsPat<(i64 (extloadi16 addr:$src)), (LH64_P8 addr:$src)>; + def : MipsPat<(i64 (extloadi32 addr:$src)), (LW64_P8 addr:$src)>; } // hi/lo relocs @@ -315,3 +311,38 @@ def : MipsPat<(i64 (sext_inreg CPU64Regs:$src, i32)), // bswap MipsPattern def : MipsPat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>; + +//===----------------------------------------------------------------------===// +// Instruction aliases +//===----------------------------------------------------------------------===// +def : InstAlias<"move $dst,$src", (DADD CPU64Regs:$dst,CPU64Regs:$src,ZERO_64)>; + +/// Move between CPU and coprocessor registers +let DecoderNamespace = "Mips64" in { +def MFC0_3OP64 : MFC3OP<0x10, 0, (outs CPU64Regs:$rt), + (ins CPU64Regs:$rd, uimm16:$sel),"mfc0\t$rt, $rd, $sel">; +def MTC0_3OP64 : MFC3OP<0x10, 4, (outs CPU64Regs:$rd, uimm16:$sel), + (ins CPU64Regs:$rt),"mtc0\t$rt, $rd, $sel">; +def MFC2_3OP64 : MFC3OP<0x12, 0, (outs CPU64Regs:$rt), + (ins CPU64Regs:$rd, uimm16:$sel),"mfc2\t$rt, $rd, $sel">; +def MTC2_3OP64 : MFC3OP<0x12, 4, (outs CPU64Regs:$rd, uimm16:$sel), + (ins CPU64Regs:$rt),"mtc2\t$rt, $rd, $sel">; +def DMFC0_3OP64 : MFC3OP<0x10, 1, (outs CPU64Regs:$rt), + (ins CPU64Regs:$rd, uimm16:$sel),"dmfc0\t$rt, $rd, $sel">; +def DMTC0_3OP64 : MFC3OP<0x10, 5, (outs CPU64Regs:$rd, uimm16:$sel), + (ins CPU64Regs:$rt),"dmtc0\t$rt, $rd, $sel">; +def DMFC2_3OP64 : MFC3OP<0x12, 1, (outs CPU64Regs:$rt), + (ins CPU64Regs:$rd, uimm16:$sel),"dmfc2\t$rt, $rd, $sel">; +def DMTC2_3OP64 : MFC3OP<0x12, 5, (outs CPU64Regs:$rd, uimm16:$sel), + (ins CPU64Regs:$rt),"dmtc2\t$rt, $rd, $sel">; +} +// Two operand (implicit 0 selector) versions: +def : InstAlias<"mfc0 $rt, $rd", (MFC0_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>; +def : InstAlias<"mtc0 $rt, $rd", (MTC0_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>; +def : InstAlias<"mfc2 $rt, $rd", (MFC2_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>; +def : InstAlias<"mtc2 $rt, $rd", (MTC2_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>; +def : InstAlias<"dmfc0 $rt, $rd", (DMFC0_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>; +def : InstAlias<"dmtc0 $rt, $rd", (DMTC0_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>; +def : InstAlias<"dmfc2 $rt, $rd", (DMFC2_3OP64 CPU64Regs:$rt, CPU64Regs:$rd, 0)>; +def : InstAlias<"dmtc2 $rt, $rd", (DMTC2_3OP64 CPU64Regs:$rd, 0, CPU64Regs:$rt)>; + diff --git a/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/lib/Target/Mips/MipsAnalyzeImmediate.cpp index dc8fbd0..99b163e 100644 --- a/lib/Target/Mips/MipsAnalyzeImmediate.cpp +++ b/lib/Target/Mips/MipsAnalyzeImmediate.cpp @@ -91,7 +91,7 @@ void MipsAnalyzeImmediate::ReplaceADDiuSLLWithLUi(InstSeq &Seq) { // Sign-extend and shift operand of ADDiu and see if it still fits in 16-bit. int64_t Imm = SignExtend64<16>(Seq[0].ImmOpnd); - int64_t ShiftedImm = Imm << (Seq[1].ImmOpnd - 16); + int64_t ShiftedImm = (uint64_t)Imm << (Seq[1].ImmOpnd - 16); if (!isInt<16>(ShiftedImm)) return; diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index 00ff754..bf2818d 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -37,7 +37,7 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetOptions.h" @@ -49,6 +49,13 @@ bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) { return true; } +bool MipsAsmPrinter::lowerOperand(const MachineOperand &MO, MCOperand &MCOp) { + MCOp = MCInstLowering.LowerOperand(MO); + return MCOp.isValid(); +} + +#include "MipsGenMCPseudoLowering.inc" + void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) { if (MI->isDebugValue()) { SmallString<128> Str; @@ -58,24 +65,9 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } - // Direct object specific instruction lowering - if (!OutStreamer.hasRawTextSupport()) - switch (MI->getOpcode()) { - case Mips::DSLL: - case Mips::DSRL: - case Mips::DSRA: - assert(MI->getNumOperands() == 3 && - "Invalid no. of machine operands for shift!"); - assert(MI->getOperand(2).isImm()); - int64_t Shift = MI->getOperand(2).getImm(); - if (Shift > 31) { - MCInst TmpInst0; - MCInstLowering.LowerLargeShift(MI, TmpInst0, Shift - 32); - OutStreamer.EmitInstruction(TmpInst0); - return; - } - break; - } + // Do any auto-generated pseudo lowerings. + if (emitPseudoExpansionLowering(OutStreamer, MI)) + return; MachineBasicBlock::const_instr_iterator I = MI; MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end(); @@ -83,8 +75,9 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) { do { MCInst TmpInst0; MCInstLowering.Lower(I++, TmpInst0); + OutStreamer.EmitInstruction(TmpInst0); - } while ((I != E) && I->isInsideBundle()); + } while ((I != E) && I->isInsideBundle()); // Delay slot check } //===----------------------------------------------------------------------===// @@ -214,7 +207,7 @@ const char *MipsAsmPrinter::getCurrentABIString() const { case MipsSubtarget::N32: return "abiN32"; case MipsSubtarget::N64: return "abi64"; case MipsSubtarget::EABI: return "eabi32"; // TODO: handle eabi64 - default: llvm_unreachable("Unknown Mips ABI");; + default: llvm_unreachable("Unknown Mips ABI"); } } @@ -246,8 +239,7 @@ void MipsAsmPrinter::EmitFunctionBodyStart() { OutStreamer.EmitRawText(StringRef("\t.set\tnoreorder")); OutStreamer.EmitRawText(StringRef("\t.set\tnomacro")); - if (MipsFI->getEmitNOAT()) - OutStreamer.EmitRawText(StringRef("\t.set\tnoat")); + OutStreamer.EmitRawText(StringRef("\t.set\tnoat")); } } @@ -258,9 +250,7 @@ void MipsAsmPrinter::EmitFunctionBodyEnd() { // always be at the function end, and we can't emit and // break with BB logic. if (OutStreamer.hasRawTextSupport()) { - if (MipsFI->getEmitNOAT()) - OutStreamer.EmitRawText(StringRef("\t.set\tat")); - + OutStreamer.EmitRawText(StringRef("\t.set\tat")); OutStreamer.EmitRawText(StringRef("\t.set\tmacro")); OutStreamer.EmitRawText(StringRef("\t.set\treorder")); OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName())); diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h index 562bf9c..94d8bfa 100644 --- a/lib/Target/Mips/MipsAsmPrinter.h +++ b/lib/Target/Mips/MipsAsmPrinter.h @@ -32,6 +32,14 @@ class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter { void EmitInstrWithMacroNoAT(const MachineInstr *MI); +private: + // tblgen'erated function. + bool emitPseudoExpansionLowering(MCStreamer &OutStreamer, + const MachineInstr *MI); + + // lowerOperand - Convert a MachineOperand into the equivalent MCOperand. + bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp); + public: const MipsSubtarget *Subtarget; diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td index 19213fa..78cf140 100644 --- a/lib/Target/Mips/MipsCallingConv.td +++ b/lib/Target/Mips/MipsCallingConv.td @@ -35,9 +35,6 @@ def RetCC_MipsO32 : CallingConv<[ //===----------------------------------------------------------------------===// def CC_MipsN : CallingConv<[ - // Handles byval parameters. - CCIfByVal>, - // Promote i8/i16 arguments to i32. CCIfType<[i8, i16], CCPromoteToType>, @@ -72,9 +69,6 @@ def CC_MipsN : CallingConv<[ // N32/64 variable arguments. // All arguments are passed in integer registers. def CC_MipsN_VarArg : CallingConv<[ - // Handles byval parameters. - CCIfByVal>, - // Promote i8/i16 arguments to i32. CCIfType<[i8, i16], CCPromoteToType>, @@ -211,12 +205,6 @@ def CC_Mips_FastCC : CallingConv<[ // Mips Calling Convention Dispatch //===----------------------------------------------------------------------===// -def CC_Mips : CallingConv<[ - CCIfSubtarget<"isABI_EABI()", CCDelegateTo>, - CCIfSubtarget<"isABI_N32()", CCDelegateTo>, - CCIfSubtarget<"isABI_N64()", CCDelegateTo> -]>; - def RetCC_Mips : CallingConv<[ CCIfSubtarget<"isABI_EABI()", CCDelegateTo>, CCIfSubtarget<"isABI_N32()", CCDelegateTo>, diff --git a/lib/Target/Mips/MipsCodeEmitter.cpp b/lib/Target/Mips/MipsCodeEmitter.cpp index cb7022b..4bfccd8 100644 --- a/lib/Target/Mips/MipsCodeEmitter.cpp +++ b/lib/Target/Mips/MipsCodeEmitter.cpp @@ -30,7 +30,6 @@ #include "llvm/CodeGen/Passes.h" #include "llvm/Constants.h" #include "llvm/DerivedTypes.h" -#include "llvm/Function.h" #include "llvm/PassManager.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -48,7 +47,7 @@ namespace { class MipsCodeEmitter : public MachineFunctionPass { MipsJITInfo *JTI; const MipsInstrInfo *II; - const TargetData *TD; + const DataLayout *TD; const MipsSubtarget *Subtarget; TargetMachine &TM; JITCodeEmitter &MCE; @@ -67,7 +66,7 @@ class MipsCodeEmitter : public MachineFunctionPass { MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) : MachineFunctionPass(ID), JTI(0), II((const MipsInstrInfo *) tm.getInstrInfo()), - TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0), + TD(tm.getDataLayout()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0), IsPIC(TM.getRelocationModel() == Reloc::PIC_) { } @@ -129,7 +128,7 @@ char MipsCodeEmitter::ID = 0; bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) { JTI = ((MipsTargetMachine&) MF.getTarget()).getJITInfo(); II = ((const MipsTargetMachine&) MF.getTarget()).getInstrInfo(); - TD = ((const MipsTargetMachine&) MF.getTarget()).getTargetData(); + TD = ((const MipsTargetMachine&) MF.getTarget()).getDataLayout(); Subtarget = &TM.getSubtarget (); MCPEs = &MF.getConstantPool()->getConstants(); MJTEs = 0; @@ -139,7 +138,7 @@ bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) { do { DEBUG(errs() << "JITTing function '" - << MF.getFunction()->getName() << "'\n"); + << MF.getName() << "'\n"); MCE.startFunction(MF); for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); @@ -219,15 +218,9 @@ unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI, return getMipsRegisterNumbering(MO.getReg()); else if (MO.isImm()) return static_cast(MO.getImm()); - else if (MO.isGlobal()) { - if (MI.getOpcode() == Mips::ULW || MI.getOpcode() == Mips::USW || - MI.getOpcode() == Mips::ULH || MI.getOpcode() == Mips::ULHu) - emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 4); - else if (MI.getOpcode() == Mips::USH) - emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 8); - else - emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true); - } else if (MO.isSymbol()) + else if (MO.isGlobal()) + emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true); + else if (MO.isSymbol()) emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO)); else if (MO.isCPI()) emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO)); @@ -384,29 +377,8 @@ void MipsCodeEmitter::emitInstruction(const MachineInstr &MI) { if ((MI.getDesc().TSFlags & MipsII::FormMask) == MipsII::Pseudo) return; - - switch (MI.getOpcode()) { - case Mips::USW: - NumEmitted += emitUSW(MI); - break; - case Mips::ULW: - NumEmitted += emitULW(MI); - break; - case Mips::ULH: - NumEmitted += emitULH(MI); - break; - case Mips::ULHu: - NumEmitted += emitULHu(MI); - break; - case Mips::USH: - NumEmitted += emitUSH(MI); - break; - - default: - emitWordLE(getBinaryCodeForInstr(MI)); - ++NumEmitted; // Keep track of the # of mi's emitted - break; - } + emitWordLE(getBinaryCodeForInstr(MI)); + ++NumEmitted; // Keep track of the # of mi's emitted MCE.processDebugLoc(MI.getDebugLoc(), false); } diff --git a/lib/Target/Mips/MipsDSPInstrFormats.td b/lib/Target/Mips/MipsDSPInstrFormats.td new file mode 100644 index 0000000..8e01d06 --- /dev/null +++ b/lib/Target/Mips/MipsDSPInstrFormats.td @@ -0,0 +1,309 @@ +//===- MipsDSPInstrFormats.td - Mips Instruction Formats ---*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +def HasDSP : Predicate<"Subtarget.hasDSP()">, + AssemblerPredicate<"FeatureDSP">; +def HasDSPR2 : Predicate<"Subtarget.hasDSPR2()">, + AssemblerPredicate<"FeatureDSPR2">; + +// Fields. +class Field6 val> { + bits<6> V = val; +} + +def SPECIAL3_OPCODE : Field6<0b011111>; +def REGIMM_OPCODE : Field6<0b000001>; + +class DSPInst : MipsInst<(outs), (ins), "", [], NoItinerary, FrmOther> { + let Predicates = [HasDSP]; +} + +class PseudoDSP pattern>: + MipsPseudo { + let Predicates = [HasDSP]; +} + +// ADDU.QB sub-class format. +class ADDU_QB_FMT op> : DSPInst { + bits<5> rd; + bits<5> rs; + bits<5> rt; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b010000; +} + +class RADDU_W_QB_FMT op> : DSPInst { + bits<5> rd; + bits<5> rs; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = 0; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b010000; +} + +// CMPU.EQ.QB sub-class format. +class CMP_EQ_QB_R2_FMT op> : DSPInst { + bits<5> rs; + bits<5> rt; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = 0; + let Inst{10-6} = op; + let Inst{5-0} = 0b010001; +} + +class CMP_EQ_QB_R3_FMT op> : DSPInst { + bits<5> rs; + bits<5> rt; + bits<5> rd; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b010001; +} + +class PRECR_SRA_PH_W_FMT op> : DSPInst { + bits<5> rs; + bits<5> rt; + bits<5> sa; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = sa; + let Inst{10-6} = op; + let Inst{5-0} = 0b010001; +} + +// ABSQ_S.PH sub-class format. +class ABSQ_S_PH_R2_FMT op> : DSPInst { + bits<5> rd; + bits<5> rt; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = 0; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b010010; +} + + +class REPL_FMT op> : DSPInst { + bits<5> rd; + bits<10> imm; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-16} = imm; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b010010; +} + +// SHLL.QB sub-class format. +class SHLL_QB_FMT op> : DSPInst { + bits<5> rd; + bits<5> rt; + bits<5> rs_sa; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs_sa; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b010011; +} + +// LX sub-class format. +class LX_FMT op> : DSPInst { + bits<5> rd; + bits<5> base; + bits<5> index; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = base; + let Inst{20-16} = index; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b001010; +} + +// ADDUH.QB sub-class format. +class ADDUH_QB_FMT op> : DSPInst { + bits<5> rd; + bits<5> rs; + bits<5> rt; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b011000; +} + +// APPEND sub-class format. +class APPEND_FMT op> : DSPInst { + bits<5> rt; + bits<5> rs; + bits<5> sa; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = sa; + let Inst{10-6} = op; + let Inst{5-0} = 0b110001; +} + +// DPA.W.PH sub-class format. +class DPA_W_PH_FMT op> : DSPInst { + bits<2> ac; + bits<5> rs; + bits<5> rt; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-13} = 0; + let Inst{12-11} = ac; + let Inst{10-6} = op; + let Inst{5-0} = 0b110000; +} + +// MULT sub-class format. +class MULT_FMT opcode, bits<6> funct> : DSPInst { + bits<2> ac; + bits<5> rs; + bits<5> rt; + + let Opcode = opcode; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-13} = 0; + let Inst{12-11} = ac; + let Inst{10-6} = 0; + let Inst{5-0} = funct; +} + +// EXTR.W sub-class format (type 1). +class EXTR_W_TY1_FMT op> : DSPInst { + bits<5> rt; + bits<2> ac; + bits<5> shift_rs; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = shift_rs; + let Inst{20-16} = rt; + let Inst{15-13} = 0; + let Inst{12-11} = ac; + let Inst{10-6} = op; + let Inst{5-0} = 0b111000; +} + +// SHILO sub-class format. +class SHILO_R1_FMT op> : DSPInst { + bits<2> ac; + bits<6> shift; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-20} = shift; + let Inst{19-13} = 0; + let Inst{12-11} = ac; + let Inst{10-6} = op; + let Inst{5-0} = 0b111000; +} + +class SHILO_R2_FMT op> : DSPInst { + bits<2> ac; + bits<5> rs; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-13} = 0; + let Inst{12-11} = ac; + let Inst{10-6} = op; + let Inst{5-0} = 0b111000; +} + +class RDDSP_FMT op> : DSPInst { + bits<5> rd; + bits<10> mask; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-16} = mask; + let Inst{15-11} = rd; + let Inst{10-6} = op; + let Inst{5-0} = 0b111000; +} + +class WRDSP_FMT op> : DSPInst { + bits<5> rs; + bits<10> mask; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-11} = mask; + let Inst{10-6} = op; + let Inst{5-0} = 0b111000; +} + +class BPOSGE32_FMT op> : DSPInst { + bits<16> offset; + + let Opcode = REGIMM_OPCODE.V; + + let Inst{25-21} = 0; + let Inst{20-16} = op; + let Inst{15-0} = offset; +} + +// INSV sub-class format. +class INSV_FMT op> : DSPInst { + bits<5> rt; + bits<5> rs; + + let Opcode = SPECIAL3_OPCODE.V; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-6} = 0; + let Inst{5-0} = op; +} diff --git a/lib/Target/Mips/MipsDSPInstrInfo.td b/lib/Target/Mips/MipsDSPInstrInfo.td new file mode 100644 index 0000000..ef94028 --- /dev/null +++ b/lib/Target/Mips/MipsDSPInstrInfo.td @@ -0,0 +1,1319 @@ +//===- MipsDSPInstrInfo.td - DSP ASE instructions -*- tablegen ------------*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes Mips DSP ASE instructions. +// +//===----------------------------------------------------------------------===// + +// ImmLeaf +def immZExt2 : ImmLeaf(Imm);}]>; +def immZExt3 : ImmLeaf(Imm);}]>; +def immZExt4 : ImmLeaf(Imm);}]>; +def immZExt8 : ImmLeaf(Imm);}]>; +def immZExt10 : ImmLeaf(Imm);}]>; +def immSExt6 : ImmLeaf(Imm);}]>; + +// Mips-specific dsp nodes +def SDT_MipsExtr : SDTypeProfile<1, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>]>; +def SDT_MipsShilo : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; +def SDT_MipsDPA : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>]>; + +class MipsDSPBase : + SDNode; + +class MipsDSPSideEffectBase : + SDNode; + +def MipsEXTP : MipsDSPSideEffectBase<"EXTP", SDT_MipsExtr>; +def MipsEXTPDP : MipsDSPSideEffectBase<"EXTPDP", SDT_MipsExtr>; +def MipsEXTR_S_H : MipsDSPSideEffectBase<"EXTR_S_H", SDT_MipsExtr>; +def MipsEXTR_W : MipsDSPSideEffectBase<"EXTR_W", SDT_MipsExtr>; +def MipsEXTR_R_W : MipsDSPSideEffectBase<"EXTR_R_W", SDT_MipsExtr>; +def MipsEXTR_RS_W : MipsDSPSideEffectBase<"EXTR_RS_W", SDT_MipsExtr>; + +def MipsSHILO : MipsDSPBase<"SHILO", SDT_MipsShilo>; +def MipsMTHLIP : MipsDSPBase<"MTHLIP", SDT_MipsShilo>; + +def MipsMULSAQ_S_W_PH : MipsDSPSideEffectBase<"MULSAQ_S_W_PH", SDT_MipsDPA>; +def MipsMAQ_S_W_PHL : MipsDSPSideEffectBase<"MAQ_S_W_PHL", SDT_MipsDPA>; +def MipsMAQ_S_W_PHR : MipsDSPSideEffectBase<"MAQ_S_W_PHR", SDT_MipsDPA>; +def MipsMAQ_SA_W_PHL : MipsDSPSideEffectBase<"MAQ_SA_W_PHL", SDT_MipsDPA>; +def MipsMAQ_SA_W_PHR : MipsDSPSideEffectBase<"MAQ_SA_W_PHR", SDT_MipsDPA>; + +def MipsDPAU_H_QBL : MipsDSPBase<"DPAU_H_QBL", SDT_MipsDPA>; +def MipsDPAU_H_QBR : MipsDSPBase<"DPAU_H_QBR", SDT_MipsDPA>; +def MipsDPSU_H_QBL : MipsDSPBase<"DPSU_H_QBL", SDT_MipsDPA>; +def MipsDPSU_H_QBR : MipsDSPBase<"DPSU_H_QBR", SDT_MipsDPA>; +def MipsDPAQ_S_W_PH : MipsDSPSideEffectBase<"DPAQ_S_W_PH", SDT_MipsDPA>; +def MipsDPSQ_S_W_PH : MipsDSPSideEffectBase<"DPSQ_S_W_PH", SDT_MipsDPA>; +def MipsDPAQ_SA_L_W : MipsDSPSideEffectBase<"DPAQ_SA_L_W", SDT_MipsDPA>; +def MipsDPSQ_SA_L_W : MipsDSPSideEffectBase<"DPSQ_SA_L_W", SDT_MipsDPA>; + +def MipsDPA_W_PH : MipsDSPBase<"DPA_W_PH", SDT_MipsDPA>; +def MipsDPS_W_PH : MipsDSPBase<"DPS_W_PH", SDT_MipsDPA>; +def MipsDPAQX_S_W_PH : MipsDSPSideEffectBase<"DPAQX_S_W_PH", SDT_MipsDPA>; +def MipsDPAQX_SA_W_PH : MipsDSPSideEffectBase<"DPAQX_SA_W_PH", SDT_MipsDPA>; +def MipsDPAX_W_PH : MipsDSPBase<"DPAX_W_PH", SDT_MipsDPA>; +def MipsDPSX_W_PH : MipsDSPBase<"DPSX_W_PH", SDT_MipsDPA>; +def MipsDPSQX_S_W_PH : MipsDSPSideEffectBase<"DPSQX_S_W_PH", SDT_MipsDPA>; +def MipsDPSQX_SA_W_PH : MipsDSPSideEffectBase<"DPSQX_SA_W_PH", SDT_MipsDPA>; +def MipsMULSA_W_PH : MipsDSPBase<"MULSA_W_PH", SDT_MipsDPA>; + +def MipsMULT : MipsDSPBase<"MULT", SDT_MipsDPA>; +def MipsMULTU : MipsDSPBase<"MULTU", SDT_MipsDPA>; +def MipsMADD_DSP : MipsDSPBase<"MADD_DSP", SDT_MipsDPA>; +def MipsMADDU_DSP : MipsDSPBase<"MADDU_DSP", SDT_MipsDPA>; +def MipsMSUB_DSP : MipsDSPBase<"MSUB_DSP", SDT_MipsDPA>; +def MipsMSUBU_DSP : MipsDSPBase<"MSUBU_DSP", SDT_MipsDPA>; + +// Flags. +class IsCommutable { + bit isCommutable = 1; +} + +class UseAC { + list Uses = [AC0]; +} + +class UseDSPCtrl { + list Uses = [DSPCtrl]; +} + +class ClearDefs { + list Defs = []; +} + +// Instruction encoding. +class ADDU_QB_ENC : ADDU_QB_FMT<0b00000>; +class ADDU_S_QB_ENC : ADDU_QB_FMT<0b00100>; +class SUBU_QB_ENC : ADDU_QB_FMT<0b00001>; +class SUBU_S_QB_ENC : ADDU_QB_FMT<0b00101>; +class ADDQ_PH_ENC : ADDU_QB_FMT<0b01010>; +class ADDQ_S_PH_ENC : ADDU_QB_FMT<0b01110>; +class SUBQ_PH_ENC : ADDU_QB_FMT<0b01011>; +class SUBQ_S_PH_ENC : ADDU_QB_FMT<0b01111>; +class ADDQ_S_W_ENC : ADDU_QB_FMT<0b10110>; +class SUBQ_S_W_ENC : ADDU_QB_FMT<0b10111>; +class ADDSC_ENC : ADDU_QB_FMT<0b10000>; +class ADDWC_ENC : ADDU_QB_FMT<0b10001>; +class MODSUB_ENC : ADDU_QB_FMT<0b10010>; +class RADDU_W_QB_ENC : RADDU_W_QB_FMT<0b10100>; +class ABSQ_S_PH_ENC : ABSQ_S_PH_R2_FMT<0b01001>; +class ABSQ_S_W_ENC : ABSQ_S_PH_R2_FMT<0b10001>; +class PRECRQ_QB_PH_ENC : CMP_EQ_QB_R3_FMT<0b01100>; +class PRECRQ_PH_W_ENC : CMP_EQ_QB_R3_FMT<0b10100>; +class PRECRQ_RS_PH_W_ENC : CMP_EQ_QB_R3_FMT<0b10101>; +class PRECRQU_S_QB_PH_ENC : CMP_EQ_QB_R3_FMT<0b01111>; +class PRECEQ_W_PHL_ENC : ABSQ_S_PH_R2_FMT<0b01100>; +class PRECEQ_W_PHR_ENC : ABSQ_S_PH_R2_FMT<0b01101>; +class PRECEQU_PH_QBL_ENC : ABSQ_S_PH_R2_FMT<0b00100>; +class PRECEQU_PH_QBR_ENC : ABSQ_S_PH_R2_FMT<0b00101>; +class PRECEQU_PH_QBLA_ENC : ABSQ_S_PH_R2_FMT<0b00110>; +class PRECEQU_PH_QBRA_ENC : ABSQ_S_PH_R2_FMT<0b00111>; +class PRECEU_PH_QBL_ENC : ABSQ_S_PH_R2_FMT<0b11100>; +class PRECEU_PH_QBR_ENC : ABSQ_S_PH_R2_FMT<0b11101>; +class PRECEU_PH_QBLA_ENC : ABSQ_S_PH_R2_FMT<0b11110>; +class PRECEU_PH_QBRA_ENC : ABSQ_S_PH_R2_FMT<0b11111>; +class SHLL_QB_ENC : SHLL_QB_FMT<0b00000>; +class SHLLV_QB_ENC : SHLL_QB_FMT<0b00010>; +class SHRL_QB_ENC : SHLL_QB_FMT<0b00001>; +class SHRLV_QB_ENC : SHLL_QB_FMT<0b00011>; +class SHLL_PH_ENC : SHLL_QB_FMT<0b01000>; +class SHLLV_PH_ENC : SHLL_QB_FMT<0b01010>; +class SHLL_S_PH_ENC : SHLL_QB_FMT<0b01100>; +class SHLLV_S_PH_ENC : SHLL_QB_FMT<0b01110>; +class SHRA_PH_ENC : SHLL_QB_FMT<0b01001>; +class SHRAV_PH_ENC : SHLL_QB_FMT<0b01011>; +class SHRA_R_PH_ENC : SHLL_QB_FMT<0b01101>; +class SHRAV_R_PH_ENC : SHLL_QB_FMT<0b01111>; +class SHLL_S_W_ENC : SHLL_QB_FMT<0b10100>; +class SHLLV_S_W_ENC : SHLL_QB_FMT<0b10110>; +class SHRA_R_W_ENC : SHLL_QB_FMT<0b10101>; +class SHRAV_R_W_ENC : SHLL_QB_FMT<0b10111>; +class MULEU_S_PH_QBL_ENC : ADDU_QB_FMT<0b00110>; +class MULEU_S_PH_QBR_ENC : ADDU_QB_FMT<0b00111>; +class MULEQ_S_W_PHL_ENC : ADDU_QB_FMT<0b11100>; +class MULEQ_S_W_PHR_ENC : ADDU_QB_FMT<0b11101>; +class MULQ_RS_PH_ENC : ADDU_QB_FMT<0b11111>; +class MULSAQ_S_W_PH_ENC : DPA_W_PH_FMT<0b00110>; +class MAQ_S_W_PHL_ENC : DPA_W_PH_FMT<0b10100>; +class MAQ_S_W_PHR_ENC : DPA_W_PH_FMT<0b10110>; +class MAQ_SA_W_PHL_ENC : DPA_W_PH_FMT<0b10000>; +class MAQ_SA_W_PHR_ENC : DPA_W_PH_FMT<0b10010>; +class DPAU_H_QBL_ENC : DPA_W_PH_FMT<0b00011>; +class DPAU_H_QBR_ENC : DPA_W_PH_FMT<0b00111>; +class DPSU_H_QBL_ENC : DPA_W_PH_FMT<0b01011>; +class DPSU_H_QBR_ENC : DPA_W_PH_FMT<0b01111>; +class DPAQ_S_W_PH_ENC : DPA_W_PH_FMT<0b00100>; +class DPSQ_S_W_PH_ENC : DPA_W_PH_FMT<0b00101>; +class DPAQ_SA_L_W_ENC : DPA_W_PH_FMT<0b01100>; +class DPSQ_SA_L_W_ENC : DPA_W_PH_FMT<0b01101>; +class MULT_DSP_ENC : MULT_FMT<0b000000, 0b011000>; +class MULTU_DSP_ENC : MULT_FMT<0b000000, 0b011001>; +class MADD_DSP_ENC : MULT_FMT<0b011100, 0b000000>; +class MADDU_DSP_ENC : MULT_FMT<0b011100, 0b000001>; +class MSUB_DSP_ENC : MULT_FMT<0b011100, 0b000100>; +class MSUBU_DSP_ENC : MULT_FMT<0b011100, 0b000101>; +class CMPU_EQ_QB_ENC : CMP_EQ_QB_R2_FMT<0b00000>; +class CMPU_LT_QB_ENC : CMP_EQ_QB_R2_FMT<0b00001>; +class CMPU_LE_QB_ENC : CMP_EQ_QB_R2_FMT<0b00010>; +class CMPGU_EQ_QB_ENC : CMP_EQ_QB_R3_FMT<0b00100>; +class CMPGU_LT_QB_ENC : CMP_EQ_QB_R3_FMT<0b00101>; +class CMPGU_LE_QB_ENC : CMP_EQ_QB_R3_FMT<0b00110>; +class CMP_EQ_PH_ENC : CMP_EQ_QB_R2_FMT<0b01000>; +class CMP_LT_PH_ENC : CMP_EQ_QB_R2_FMT<0b01001>; +class CMP_LE_PH_ENC : CMP_EQ_QB_R2_FMT<0b01010>; +class BITREV_ENC : ABSQ_S_PH_R2_FMT<0b11011>; +class PACKRL_PH_ENC : CMP_EQ_QB_R3_FMT<0b01110>; +class REPL_QB_ENC : REPL_FMT<0b00010>; +class REPL_PH_ENC : REPL_FMT<0b01010>; +class REPLV_QB_ENC : ABSQ_S_PH_R2_FMT<0b00011>; +class REPLV_PH_ENC : ABSQ_S_PH_R2_FMT<0b01011>; +class PICK_QB_ENC : CMP_EQ_QB_R3_FMT<0b00011>; +class PICK_PH_ENC : CMP_EQ_QB_R3_FMT<0b01011>; +class LWX_ENC : LX_FMT<0b00000>; +class LHX_ENC : LX_FMT<0b00100>; +class LBUX_ENC : LX_FMT<0b00110>; +class BPOSGE32_ENC : BPOSGE32_FMT<0b11100>; +class INSV_ENC : INSV_FMT<0b001100>; + +class EXTP_ENC : EXTR_W_TY1_FMT<0b00010>; +class EXTPV_ENC : EXTR_W_TY1_FMT<0b00011>; +class EXTPDP_ENC : EXTR_W_TY1_FMT<0b01010>; +class EXTPDPV_ENC : EXTR_W_TY1_FMT<0b01011>; +class EXTR_W_ENC : EXTR_W_TY1_FMT<0b00000>; +class EXTRV_W_ENC : EXTR_W_TY1_FMT<0b00001>; +class EXTR_R_W_ENC : EXTR_W_TY1_FMT<0b00100>; +class EXTRV_R_W_ENC : EXTR_W_TY1_FMT<0b00101>; +class EXTR_RS_W_ENC : EXTR_W_TY1_FMT<0b00110>; +class EXTRV_RS_W_ENC : EXTR_W_TY1_FMT<0b00111>; +class EXTR_S_H_ENC : EXTR_W_TY1_FMT<0b01110>; +class EXTRV_S_H_ENC : EXTR_W_TY1_FMT<0b01111>; +class SHILO_ENC : SHILO_R1_FMT<0b11010>; +class SHILOV_ENC : SHILO_R2_FMT<0b11011>; +class MTHLIP_ENC : SHILO_R2_FMT<0b11111>; + +class RDDSP_ENC : RDDSP_FMT<0b10010>; +class WRDSP_ENC : WRDSP_FMT<0b10011>; +class ADDU_PH_ENC : ADDU_QB_FMT<0b01000>; +class ADDU_S_PH_ENC : ADDU_QB_FMT<0b01100>; +class SUBU_PH_ENC : ADDU_QB_FMT<0b01001>; +class SUBU_S_PH_ENC : ADDU_QB_FMT<0b01101>; +class CMPGDU_EQ_QB_ENC : CMP_EQ_QB_R3_FMT<0b11000>; +class CMPGDU_LT_QB_ENC : CMP_EQ_QB_R3_FMT<0b11001>; +class CMPGDU_LE_QB_ENC : CMP_EQ_QB_R3_FMT<0b11010>; +class ABSQ_S_QB_ENC : ABSQ_S_PH_R2_FMT<0b00001>; +class ADDUH_QB_ENC : ADDUH_QB_FMT<0b00000>; +class ADDUH_R_QB_ENC : ADDUH_QB_FMT<0b00010>; +class SUBUH_QB_ENC : ADDUH_QB_FMT<0b00001>; +class SUBUH_R_QB_ENC : ADDUH_QB_FMT<0b00011>; +class ADDQH_PH_ENC : ADDUH_QB_FMT<0b01000>; +class ADDQH_R_PH_ENC : ADDUH_QB_FMT<0b01010>; +class SUBQH_PH_ENC : ADDUH_QB_FMT<0b01001>; +class SUBQH_R_PH_ENC : ADDUH_QB_FMT<0b01011>; +class ADDQH_W_ENC : ADDUH_QB_FMT<0b10000>; +class ADDQH_R_W_ENC : ADDUH_QB_FMT<0b10010>; +class SUBQH_W_ENC : ADDUH_QB_FMT<0b10001>; +class SUBQH_R_W_ENC : ADDUH_QB_FMT<0b10011>; +class MUL_PH_ENC : ADDUH_QB_FMT<0b01100>; +class MUL_S_PH_ENC : ADDUH_QB_FMT<0b01110>; +class MULQ_S_W_ENC : ADDUH_QB_FMT<0b10110>; +class MULQ_RS_W_ENC : ADDUH_QB_FMT<0b10111>; +class MULQ_S_PH_ENC : ADDU_QB_FMT<0b11110>; +class DPA_W_PH_ENC : DPA_W_PH_FMT<0b00000>; +class DPS_W_PH_ENC : DPA_W_PH_FMT<0b00001>; +class DPAQX_S_W_PH_ENC : DPA_W_PH_FMT<0b11000>; +class DPAQX_SA_W_PH_ENC : DPA_W_PH_FMT<0b11010>; +class DPAX_W_PH_ENC : DPA_W_PH_FMT<0b01000>; +class DPSX_W_PH_ENC : DPA_W_PH_FMT<0b01001>; +class DPSQX_S_W_PH_ENC : DPA_W_PH_FMT<0b11001>; +class DPSQX_SA_W_PH_ENC : DPA_W_PH_FMT<0b11011>; +class MULSA_W_PH_ENC : DPA_W_PH_FMT<0b00010>; +class PRECR_QB_PH_ENC : CMP_EQ_QB_R3_FMT<0b01101>; +class PRECR_SRA_PH_W_ENC : PRECR_SRA_PH_W_FMT<0b11110>; +class PRECR_SRA_R_PH_W_ENC : PRECR_SRA_PH_W_FMT<0b11111>; +class SHRA_QB_ENC : SHLL_QB_FMT<0b00100>; +class SHRAV_QB_ENC : SHLL_QB_FMT<0b00110>; +class SHRA_R_QB_ENC : SHLL_QB_FMT<0b00101>; +class SHRAV_R_QB_ENC : SHLL_QB_FMT<0b00111>; +class SHRL_PH_ENC : SHLL_QB_FMT<0b11001>; +class SHRLV_PH_ENC : SHLL_QB_FMT<0b11011>; +class APPEND_ENC : APPEND_FMT<0b00000>; +class BALIGN_ENC : APPEND_FMT<0b10000>; +class PREPEND_ENC : APPEND_FMT<0b00001>; + +// Instruction desc. +class ADDU_QB_DESC_BASE { + dag OutOperandList = (outs RCD:$rd); + dag InOperandList = (ins RCS:$rs, RCT:$rt); + string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt"); + list Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class RADDU_W_QB_DESC_BASE { + dag OutOperandList = (outs RCD:$rd); + dag InOperandList = (ins RCS:$rs); + string AsmString = !strconcat(instr_asm, "\t$rd, $rs"); + list Pattern = [(set RCD:$rd, (OpNode RCS:$rs))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class CMP_EQ_QB_R2_DESC_BASE { + dag OutOperandList = (outs); + dag InOperandList = (ins RCS:$rs, RCT:$rt); + string AsmString = !strconcat(instr_asm, "\t$rs, $rt"); + list Pattern = [(OpNode RCS:$rs, RCT:$rt)]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class CMP_EQ_QB_R3_DESC_BASE { + dag OutOperandList = (outs RCD:$rd); + dag InOperandList = (ins RCS:$rs, RCT:$rt); + string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt"); + list Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class PRECR_SRA_PH_W_DESC_BASE { + dag OutOperandList = (outs RCT:$rt); + dag InOperandList = (ins RCS:$rs, shamt:$sa, RCS:$src); + string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa"); + list Pattern = [(set RCT:$rt, (OpNode RCS:$src, RCS:$rs, immZExt5:$sa))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; + string Constraints = "$src = $rt"; +} + +class ABSQ_S_PH_R2_DESC_BASE { + dag OutOperandList = (outs RCD:$rd); + dag InOperandList = (ins RCT:$rt); + string AsmString = !strconcat(instr_asm, "\t$rd, $rt"); + list Pattern = [(set RCD:$rd, (OpNode RCT:$rt))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class REPL_DESC_BASE { + dag OutOperandList = (outs RC:$rd); + dag InOperandList = (ins uimm16:$imm); + string AsmString = !strconcat(instr_asm, "\t$rd, $imm"); + list Pattern = [(set RC:$rd, (OpNode immPat:$imm))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class SHLL_QB_R3_DESC_BASE { + dag OutOperandList = (outs RC:$rd); + dag InOperandList = (ins RC:$rt, CPURegs:$rs_sa); + string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa"); + list Pattern = [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs_sa))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class SHLL_QB_R2_DESC_BASE { + dag OutOperandList = (outs RC:$rd); + dag InOperandList = (ins RC:$rt, uimm16:$rs_sa); + string AsmString = !strconcat(instr_asm, "\t$rd, $rt, $rs_sa"); + list Pattern = [(set RC:$rd, (OpNode RC:$rt, ImmPat:$rs_sa))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class LX_DESC_BASE { + dag OutOperandList = (outs CPURegs:$rd); + dag InOperandList = (ins CPURegs:$base, CPURegs:$index); + string AsmString = !strconcat(instr_asm, "\t$rd, ${index}(${base})"); + list Pattern = [(set CPURegs:$rd, + (OpNode CPURegs:$base, CPURegs:$index))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; + bit mayLoad = 1; +} + +class ADDUH_QB_DESC_BASE { + dag OutOperandList = (outs RCD:$rd); + dag InOperandList = (ins RCS:$rs, RCT:$rt); + string AsmString = !strconcat(instr_asm, "\t$rd, $rs, $rt"); + list Pattern = [(set RCD:$rd, (OpNode RCS:$rs, RCT:$rt))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class APPEND_DESC_BASE { + dag OutOperandList = (outs CPURegs:$rt); + dag InOperandList = (ins CPURegs:$rs, shamt:$sa, CPURegs:$src); + string AsmString = !strconcat(instr_asm, "\t$rt, $rs, $sa"); + list Pattern = [(set CPURegs:$rt, + (OpNode CPURegs:$src, CPURegs:$rs, ImmOp:$sa))]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; + string Constraints = "$src = $rt"; +} + +class EXTR_W_TY1_R2_DESC_BASE { + dag OutOperandList = (outs CPURegs:$rt); + dag InOperandList = (ins ACRegs:$ac, CPURegs:$shift_rs); + string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs"); + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class EXTR_W_TY1_R1_DESC_BASE { + dag OutOperandList = (outs CPURegs:$rt); + dag InOperandList = (ins ACRegs:$ac, uimm16:$shift_rs); + string AsmString = !strconcat(instr_asm, "\t$rt, $ac, $shift_rs"); + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class SHILO_R1_PSEUDO_BASE : + PseudoDSP<(outs), (ins simm16:$shift), [(OpNode immSExt6:$shift)]>, + PseudoInstExpansion<(realinst AC0, simm16:$shift)> { + list Defs = [DSPCtrl, AC0]; + list Uses = [AC0]; + InstrItinClass Itinerary = itin; +} + +class SHILO_R1_DESC_BASE { + dag OutOperandList = (outs ACRegs:$ac); + dag InOperandList = (ins simm16:$shift); + string AsmString = !strconcat(instr_asm, "\t$ac, $shift"); +} + +class SHILO_R2_PSEUDO_BASE : + PseudoDSP<(outs), (ins CPURegs:$rs), [(OpNode CPURegs:$rs)]>, + PseudoInstExpansion<(realinst AC0, CPURegs:$rs)> { + list Defs = [DSPCtrl, AC0]; + list Uses = [AC0]; + InstrItinClass Itinerary = itin; +} + +class SHILO_R2_DESC_BASE { + dag OutOperandList = (outs ACRegs:$ac); + dag InOperandList = (ins CPURegs:$rs); + string AsmString = !strconcat(instr_asm, "\t$ac, $rs"); +} + +class MTHLIP_DESC_BASE { + dag OutOperandList = (outs ACRegs:$ac); + dag InOperandList = (ins CPURegs:$rs); + string AsmString = !strconcat(instr_asm, "\t$rs, $ac"); +} + +class RDDSP_DESC_BASE { + dag OutOperandList = (outs CPURegs:$rd); + dag InOperandList = (ins uimm16:$mask); + string AsmString = !strconcat(instr_asm, "\t$rd, $mask"); + list Pattern = [(set CPURegs:$rd, (OpNode immZExt10:$mask))]; + InstrItinClass Itinerary = itin; + list Uses = [DSPCtrl]; +} + +class WRDSP_DESC_BASE { + dag OutOperandList = (outs); + dag InOperandList = (ins CPURegs:$rs, uimm16:$mask); + string AsmString = !strconcat(instr_asm, "\t$rs, $mask"); + list Pattern = [(OpNode CPURegs:$rs, immZExt10:$mask)]; + InstrItinClass Itinerary = itin; + list Defs = [DSPCtrl]; +} + +class DPA_W_PH_PSEUDO_BASE : + PseudoDSP<(outs), (ins CPURegs:$rs, CPURegs:$rt), + [(OpNode CPURegs:$rs, CPURegs:$rt)]>, + PseudoInstExpansion<(realinst AC0, CPURegs:$rs, CPURegs:$rt)> { + list Defs = [DSPCtrl, AC0]; + list Uses = [AC0]; + InstrItinClass Itinerary = itin; +} + +class DPA_W_PH_DESC_BASE { + dag OutOperandList = (outs ACRegs:$ac); + dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt); + string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); +} + +class MULT_PSEUDO_BASE : + PseudoDSP<(outs), (ins CPURegs:$rs, CPURegs:$rt), + [(OpNode CPURegs:$rs, CPURegs:$rt)]>, + PseudoInstExpansion<(realinst AC0, CPURegs:$rs, CPURegs:$rt)> { + list Defs = [DSPCtrl, AC0]; + InstrItinClass Itinerary = itin; +} + +class MULT_DESC_BASE { + dag OutOperandList = (outs ACRegs:$ac); + dag InOperandList = (ins CPURegs:$rs, CPURegs:$rt); + string AsmString = !strconcat(instr_asm, "\t$ac, $rs, $rt"); +} + +class BPOSGE32_PSEUDO_DESC_BASE : + MipsPseudo<(outs CPURegs:$dst), (ins), "", [(set CPURegs:$dst, (OpNode))]> { + list Uses = [DSPCtrl]; + bit usesCustomInserter = 1; +} + +class BPOSGE32_DESC_BASE { + dag OutOperandList = (outs); + dag InOperandList = (ins brtarget:$offset); + string AsmString = !strconcat(instr_asm, "\t$offset"); + InstrItinClass Itinerary = itin; + list Uses = [DSPCtrl]; + bit isBranch = 1; + bit isTerminator = 1; + bit hasDelaySlot = 1; +} + +class INSV_DESC_BASE { + dag OutOperandList = (outs CPURegs:$rt); + dag InOperandList = (ins CPURegs:$src, CPURegs:$rs); + string AsmString = !strconcat(instr_asm, "\t$rt, $rs"); + list Pattern = [(set CPURegs:$rt, (OpNode CPURegs:$src, CPURegs:$rs))]; + InstrItinClass Itinerary = itin; + list Uses = [DSPCtrl]; + string Constraints = "$src = $rt"; +} + +//===----------------------------------------------------------------------===// +// MIPS DSP Rev 1 +//===----------------------------------------------------------------------===// + +// Addition/subtraction +class ADDU_QB_DESC : ADDU_QB_DESC_BASE<"addu.qb", int_mips_addu_qb, NoItinerary, + DSPRegs, DSPRegs>, IsCommutable; + +class ADDU_S_QB_DESC : ADDU_QB_DESC_BASE<"addu_s.qb", int_mips_addu_s_qb, + NoItinerary, DSPRegs, DSPRegs>, + IsCommutable; + +class SUBU_QB_DESC : ADDU_QB_DESC_BASE<"subu.qb", int_mips_subu_qb, NoItinerary, + DSPRegs, DSPRegs>; + +class SUBU_S_QB_DESC : ADDU_QB_DESC_BASE<"subu_s.qb", int_mips_subu_s_qb, + NoItinerary, DSPRegs, DSPRegs>; + +class ADDQ_PH_DESC : ADDU_QB_DESC_BASE<"addq.ph", int_mips_addq_ph, NoItinerary, + DSPRegs, DSPRegs>, IsCommutable; + +class ADDQ_S_PH_DESC : ADDU_QB_DESC_BASE<"addq_s.ph", int_mips_addq_s_ph, + NoItinerary, DSPRegs, DSPRegs>, + IsCommutable; + +class SUBQ_PH_DESC : ADDU_QB_DESC_BASE<"subq.ph", int_mips_subq_ph, NoItinerary, + DSPRegs, DSPRegs>; + +class SUBQ_S_PH_DESC : ADDU_QB_DESC_BASE<"subq_s.ph", int_mips_subq_s_ph, + NoItinerary, DSPRegs, DSPRegs>; + +class ADDQ_S_W_DESC : ADDU_QB_DESC_BASE<"addq_s.w", int_mips_addq_s_w, + NoItinerary, CPURegs, CPURegs>, + IsCommutable; + +class SUBQ_S_W_DESC : ADDU_QB_DESC_BASE<"subq_s.w", int_mips_subq_s_w, + NoItinerary, CPURegs, CPURegs>; + +class ADDSC_DESC : ADDU_QB_DESC_BASE<"addsc", int_mips_addsc, NoItinerary, + CPURegs, CPURegs>, IsCommutable; + +class ADDWC_DESC : ADDU_QB_DESC_BASE<"addwc", int_mips_addwc, NoItinerary, + CPURegs, CPURegs>, + IsCommutable, UseDSPCtrl; + +class MODSUB_DESC : ADDU_QB_DESC_BASE<"modsub", int_mips_modsub, NoItinerary, + CPURegs, CPURegs>, ClearDefs; + +class RADDU_W_QB_DESC : RADDU_W_QB_DESC_BASE<"raddu.w.qb", int_mips_raddu_w_qb, + NoItinerary, CPURegs, DSPRegs>, + ClearDefs; + +// Absolute value +class ABSQ_S_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.ph", int_mips_absq_s_ph, + NoItinerary, DSPRegs>; + +class ABSQ_S_W_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.w", int_mips_absq_s_w, + NoItinerary, CPURegs>; + +// Precision reduce/expand +class PRECRQ_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.qb.ph", + int_mips_precrq_qb_ph, + NoItinerary, DSPRegs, DSPRegs>, + ClearDefs; + +class PRECRQ_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq.ph.w", + int_mips_precrq_ph_w, + NoItinerary, DSPRegs, CPURegs>, + ClearDefs; + +class PRECRQ_RS_PH_W_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrq_rs.ph.w", + int_mips_precrq_rs_ph_w, + NoItinerary, DSPRegs, + CPURegs>; + +class PRECRQU_S_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precrqu_s.qb.ph", + int_mips_precrqu_s_qb_ph, + NoItinerary, DSPRegs, + DSPRegs>; + +class PRECEQ_W_PHL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phl", + int_mips_preceq_w_phl, + NoItinerary, CPURegs, DSPRegs>, + ClearDefs; + +class PRECEQ_W_PHR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceq.w.phr", + int_mips_preceq_w_phr, + NoItinerary, CPURegs, DSPRegs>, + ClearDefs; + +class PRECEQU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbl", + int_mips_precequ_ph_qbl, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEQU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbr", + int_mips_precequ_ph_qbr, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEQU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbla", + int_mips_precequ_ph_qbla, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEQU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"precequ.ph.qbra", + int_mips_precequ_ph_qbra, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEU_PH_QBL_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbl", + int_mips_preceu_ph_qbl, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEU_PH_QBR_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbr", + int_mips_preceu_ph_qbr, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEU_PH_QBLA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbla", + int_mips_preceu_ph_qbla, + NoItinerary, DSPRegs>, + ClearDefs; + +class PRECEU_PH_QBRA_DESC : ABSQ_S_PH_R2_DESC_BASE<"preceu.ph.qbra", + int_mips_preceu_ph_qbra, + NoItinerary, DSPRegs>, + ClearDefs; + +// Shift +class SHLL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shll.qb", int_mips_shll_qb, immZExt3, + NoItinerary, DSPRegs>; + +class SHLLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shllv.qb", int_mips_shll_qb, + NoItinerary, DSPRegs>; + +class SHRL_QB_DESC : SHLL_QB_R2_DESC_BASE<"shrl.qb", int_mips_shrl_qb, immZExt3, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRLV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.qb", int_mips_shrl_qb, + NoItinerary, DSPRegs>, ClearDefs; + +class SHLL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll.ph", int_mips_shll_ph, immZExt4, + NoItinerary, DSPRegs>; + +class SHLLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv.ph", int_mips_shll_ph, + NoItinerary, DSPRegs>; + +class SHLL_S_PH_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.ph", int_mips_shll_s_ph, + immZExt4, NoItinerary, DSPRegs>; + +class SHLLV_S_PH_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.ph", int_mips_shll_s_ph, + NoItinerary, DSPRegs>; + +class SHRA_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra.ph", int_mips_shra_ph, immZExt4, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRAV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav.ph", int_mips_shra_ph, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRA_R_PH_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.ph", int_mips_shra_r_ph, + immZExt4, NoItinerary, DSPRegs>, + ClearDefs; + +class SHRAV_R_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.ph", int_mips_shra_r_ph, + NoItinerary, DSPRegs>, ClearDefs; + +class SHLL_S_W_DESC : SHLL_QB_R2_DESC_BASE<"shll_s.w", int_mips_shll_s_w, + immZExt5, NoItinerary, CPURegs>; + +class SHLLV_S_W_DESC : SHLL_QB_R3_DESC_BASE<"shllv_s.w", int_mips_shll_s_w, + NoItinerary, CPURegs>; + +class SHRA_R_W_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.w", int_mips_shra_r_w, + immZExt5, NoItinerary, CPURegs>, + ClearDefs; + +class SHRAV_R_W_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.w", int_mips_shra_r_w, + NoItinerary, CPURegs>; + +// Multiplication +class MULEU_S_PH_QBL_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbl", + int_mips_muleu_s_ph_qbl, + NoItinerary, DSPRegs, DSPRegs>; + +class MULEU_S_PH_QBR_DESC : ADDU_QB_DESC_BASE<"muleu_s.ph.qbr", + int_mips_muleu_s_ph_qbr, + NoItinerary, DSPRegs, DSPRegs>; + +class MULEQ_S_W_PHL_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phl", + int_mips_muleq_s_w_phl, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class MULEQ_S_W_PHR_DESC : ADDU_QB_DESC_BASE<"muleq_s.w.phr", + int_mips_muleq_s_w_phr, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class MULQ_RS_PH_DESC : ADDU_QB_DESC_BASE<"mulq_rs.ph", int_mips_mulq_rs_ph, + NoItinerary, DSPRegs, DSPRegs>, + IsCommutable; + +class MULSAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsaq_s.w.ph">; + +class MAQ_S_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phl">; + +class MAQ_S_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_s.w.phr">; + +class MAQ_SA_W_PHL_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phl">; + +class MAQ_SA_W_PHR_DESC : DPA_W_PH_DESC_BASE<"maq_sa.w.phr">; + +// Dot product with accumulate/subtract +class DPAU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbl">; + +class DPAU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpau.h.qbr">; + +class DPSU_H_QBL_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbl">; + +class DPSU_H_QBR_DESC : DPA_W_PH_DESC_BASE<"dpsu.h.qbr">; + +class DPAQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaq_s.w.ph">; + +class DPSQ_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsq_s.w.ph">; + +class DPAQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpaq_sa.l.w">; + +class DPSQ_SA_L_W_DESC : DPA_W_PH_DESC_BASE<"dpsq_sa.l.w">; + +class MULT_DSP_DESC : MULT_DESC_BASE<"mult">; + +class MULTU_DSP_DESC : MULT_DESC_BASE<"multu">; + +class MADD_DSP_DESC : MULT_DESC_BASE<"madd">; + +class MADDU_DSP_DESC : MULT_DESC_BASE<"maddu">; + +class MSUB_DSP_DESC : MULT_DESC_BASE<"msub">; + +class MSUBU_DSP_DESC : MULT_DESC_BASE<"msubu">; + +// Comparison +class CMPU_EQ_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.eq.qb", + int_mips_cmpu_eq_qb, NoItinerary, + DSPRegs>, IsCommutable; + +class CMPU_LT_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.lt.qb", + int_mips_cmpu_lt_qb, NoItinerary, + DSPRegs>, IsCommutable; + +class CMPU_LE_QB_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmpu.le.qb", + int_mips_cmpu_le_qb, NoItinerary, + DSPRegs>, IsCommutable; + +class CMPGU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.eq.qb", + int_mips_cmpgu_eq_qb, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class CMPGU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.lt.qb", + int_mips_cmpgu_lt_qb, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class CMPGU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgu.le.qb", + int_mips_cmpgu_le_qb, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class CMP_EQ_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.eq.ph", int_mips_cmp_eq_ph, + NoItinerary, DSPRegs>, + IsCommutable; + +class CMP_LT_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.lt.ph", int_mips_cmp_lt_ph, + NoItinerary, DSPRegs>, + IsCommutable; + +class CMP_LE_PH_DESC : CMP_EQ_QB_R2_DESC_BASE<"cmp.le.ph", int_mips_cmp_le_ph, + NoItinerary, DSPRegs>, + IsCommutable; + +// Misc +class BITREV_DESC : ABSQ_S_PH_R2_DESC_BASE<"bitrev", int_mips_bitrev, + NoItinerary, CPURegs>, ClearDefs; + +class PACKRL_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"packrl.ph", int_mips_packrl_ph, + NoItinerary, DSPRegs, DSPRegs>, + ClearDefs; + +class REPL_QB_DESC : REPL_DESC_BASE<"repl.qb", int_mips_repl_qb, immZExt8, + NoItinerary, DSPRegs>, ClearDefs; + +class REPL_PH_DESC : REPL_DESC_BASE<"repl.ph", int_mips_repl_ph, immZExt10, + NoItinerary, DSPRegs>, ClearDefs; + +class REPLV_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.qb", int_mips_repl_qb, + NoItinerary, DSPRegs, CPURegs>, + ClearDefs; + +class REPLV_PH_DESC : ABSQ_S_PH_R2_DESC_BASE<"replv.ph", int_mips_repl_ph, + NoItinerary, DSPRegs, CPURegs>, + ClearDefs; + +class PICK_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.qb", int_mips_pick_qb, + NoItinerary, DSPRegs, DSPRegs>, + ClearDefs, UseDSPCtrl; + +class PICK_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"pick.ph", int_mips_pick_ph, + NoItinerary, DSPRegs, DSPRegs>, + ClearDefs, UseDSPCtrl; + +class LWX_DESC : LX_DESC_BASE<"lwx", int_mips_lwx, NoItinerary>, ClearDefs; + +class LHX_DESC : LX_DESC_BASE<"lhx", int_mips_lhx, NoItinerary>, ClearDefs; + +class LBUX_DESC : LX_DESC_BASE<"lbux", int_mips_lbux, NoItinerary>, ClearDefs; + +class BPOSGE32_DESC : BPOSGE32_DESC_BASE<"bposge32", NoItinerary>; + +// Extr +class EXTP_DESC : EXTR_W_TY1_R1_DESC_BASE<"extp", MipsEXTP, NoItinerary>; + +class EXTPV_DESC : EXTR_W_TY1_R2_DESC_BASE<"extpv", MipsEXTP, NoItinerary>; + +class EXTPDP_DESC : EXTR_W_TY1_R1_DESC_BASE<"extpdp", MipsEXTPDP, NoItinerary>; + +class EXTPDPV_DESC : EXTR_W_TY1_R2_DESC_BASE<"extpdpv", MipsEXTPDP, + NoItinerary>; + +class EXTR_W_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr.w", MipsEXTR_W, NoItinerary>; + +class EXTRV_W_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv.w", MipsEXTR_W, + NoItinerary>; + +class EXTR_R_W_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_r.w", MipsEXTR_R_W, + NoItinerary>; + +class EXTRV_R_W_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_r.w", MipsEXTR_R_W, + NoItinerary>; + +class EXTR_RS_W_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_rs.w", MipsEXTR_RS_W, + NoItinerary>; + +class EXTRV_RS_W_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_rs.w", MipsEXTR_RS_W, + NoItinerary>; + +class EXTR_S_H_DESC : EXTR_W_TY1_R1_DESC_BASE<"extr_s.h", MipsEXTR_S_H, + NoItinerary>; + +class EXTRV_S_H_DESC : EXTR_W_TY1_R2_DESC_BASE<"extrv_s.h", MipsEXTR_S_H, + NoItinerary>; + +class SHILO_DESC : SHILO_R1_DESC_BASE<"shilo">; + +class SHILOV_DESC : SHILO_R2_DESC_BASE<"shilov">; + +class MTHLIP_DESC : MTHLIP_DESC_BASE<"mthlip">; + +class RDDSP_DESC : RDDSP_DESC_BASE<"rddsp", int_mips_rddsp, NoItinerary>; + +class WRDSP_DESC : WRDSP_DESC_BASE<"wrdsp", int_mips_wrdsp, NoItinerary>; + +class INSV_DESC : INSV_DESC_BASE<"insv", int_mips_insv, NoItinerary>; + +//===----------------------------------------------------------------------===// +// MIPS DSP Rev 2 +// Addition/subtraction +class ADDU_PH_DESC : ADDU_QB_DESC_BASE<"addu.ph", int_mips_addu_ph, NoItinerary, + DSPRegs, DSPRegs>, IsCommutable; + +class ADDU_S_PH_DESC : ADDU_QB_DESC_BASE<"addu_s.ph", int_mips_addu_s_ph, + NoItinerary, DSPRegs, DSPRegs>, + IsCommutable; + +class SUBU_PH_DESC : ADDU_QB_DESC_BASE<"subu.ph", int_mips_subu_ph, NoItinerary, + DSPRegs, DSPRegs>; + +class SUBU_S_PH_DESC : ADDU_QB_DESC_BASE<"subu_s.ph", int_mips_subu_s_ph, + NoItinerary, DSPRegs, DSPRegs>; + +class ADDUH_QB_DESC : ADDUH_QB_DESC_BASE<"adduh.qb", int_mips_adduh_qb, + NoItinerary, DSPRegs>, + ClearDefs, IsCommutable; + +class ADDUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"adduh_r.qb", int_mips_adduh_r_qb, + NoItinerary, DSPRegs>, + ClearDefs, IsCommutable; + +class SUBUH_QB_DESC : ADDUH_QB_DESC_BASE<"subuh.qb", int_mips_subuh_qb, + NoItinerary, DSPRegs>, ClearDefs; + +class SUBUH_R_QB_DESC : ADDUH_QB_DESC_BASE<"subuh_r.qb", int_mips_subuh_r_qb, + NoItinerary, DSPRegs>, ClearDefs; + +class ADDQH_PH_DESC : ADDUH_QB_DESC_BASE<"addqh.ph", int_mips_addqh_ph, + NoItinerary, DSPRegs>, + ClearDefs, IsCommutable; + +class ADDQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"addqh_r.ph", int_mips_addqh_r_ph, + NoItinerary, DSPRegs>, + ClearDefs, IsCommutable; + +class SUBQH_PH_DESC : ADDUH_QB_DESC_BASE<"subqh.ph", int_mips_subqh_ph, + NoItinerary, DSPRegs>, ClearDefs; + +class SUBQH_R_PH_DESC : ADDUH_QB_DESC_BASE<"subqh_r.ph", int_mips_subqh_r_ph, + NoItinerary, DSPRegs>, ClearDefs; + +class ADDQH_W_DESC : ADDUH_QB_DESC_BASE<"addqh.w", int_mips_addqh_w, + NoItinerary, CPURegs>, + ClearDefs, IsCommutable; + +class ADDQH_R_W_DESC : ADDUH_QB_DESC_BASE<"addqh_r.w", int_mips_addqh_r_w, + NoItinerary, CPURegs>, + ClearDefs, IsCommutable; + +class SUBQH_W_DESC : ADDUH_QB_DESC_BASE<"subqh.w", int_mips_subqh_w, + NoItinerary, CPURegs>, ClearDefs; + +class SUBQH_R_W_DESC : ADDUH_QB_DESC_BASE<"subqh_r.w", int_mips_subqh_r_w, + NoItinerary, CPURegs>, ClearDefs; + +// Comparison +class CMPGDU_EQ_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.eq.qb", + int_mips_cmpgdu_eq_qb, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class CMPGDU_LT_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.lt.qb", + int_mips_cmpgdu_lt_qb, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +class CMPGDU_LE_QB_DESC : CMP_EQ_QB_R3_DESC_BASE<"cmpgdu.le.qb", + int_mips_cmpgdu_le_qb, + NoItinerary, CPURegs, DSPRegs>, + IsCommutable; + +// Absolute +class ABSQ_S_QB_DESC : ABSQ_S_PH_R2_DESC_BASE<"absq_s.qb", int_mips_absq_s_qb, + NoItinerary, DSPRegs>; + +// Multiplication +class MUL_PH_DESC : ADDUH_QB_DESC_BASE<"mul.ph", int_mips_mul_ph, NoItinerary, + DSPRegs>, IsCommutable; + +class MUL_S_PH_DESC : ADDUH_QB_DESC_BASE<"mul_s.ph", int_mips_mul_s_ph, + NoItinerary, DSPRegs>, IsCommutable; + +class MULQ_S_W_DESC : ADDUH_QB_DESC_BASE<"mulq_s.w", int_mips_mulq_s_w, + NoItinerary, CPURegs>, IsCommutable; + +class MULQ_RS_W_DESC : ADDUH_QB_DESC_BASE<"mulq_rs.w", int_mips_mulq_rs_w, + NoItinerary, CPURegs>, IsCommutable; + +class MULQ_S_PH_DESC : ADDU_QB_DESC_BASE<"mulq_s.ph", int_mips_mulq_s_ph, + NoItinerary, DSPRegs, DSPRegs>, + IsCommutable; + +// Dot product with accumulate/subtract +class DPA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpa.w.ph">; + +class DPS_W_PH_DESC : DPA_W_PH_DESC_BASE<"dps.w.ph">; + +class DPAQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_s.w.ph">; + +class DPAQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpaqx_sa.w.ph">; + +class DPAX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpax.w.ph">; + +class DPSX_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsx.w.ph">; + +class DPSQX_S_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_s.w.ph">; + +class DPSQX_SA_W_PH_DESC : DPA_W_PH_DESC_BASE<"dpsqx_sa.w.ph">; + +class MULSA_W_PH_DESC : DPA_W_PH_DESC_BASE<"mulsa.w.ph">; + +// Precision reduce/expand +class PRECR_QB_PH_DESC : CMP_EQ_QB_R3_DESC_BASE<"precr.qb.ph", + int_mips_precr_qb_ph, + NoItinerary, DSPRegs, DSPRegs>; + +class PRECR_SRA_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra.ph.w", + int_mips_precr_sra_ph_w, + NoItinerary, DSPRegs, + CPURegs>, ClearDefs; + +class PRECR_SRA_R_PH_W_DESC : PRECR_SRA_PH_W_DESC_BASE<"precr_sra_r.ph.w", + int_mips_precr_sra_r_ph_w, + NoItinerary, DSPRegs, + CPURegs>, ClearDefs; + +// Shift +class SHRA_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra.qb", int_mips_shra_qb, immZExt3, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRAV_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav.qb", int_mips_shra_qb, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRA_R_QB_DESC : SHLL_QB_R2_DESC_BASE<"shra_r.qb", int_mips_shra_r_qb, + immZExt3, NoItinerary, DSPRegs>, + ClearDefs; + +class SHRAV_R_QB_DESC : SHLL_QB_R3_DESC_BASE<"shrav_r.qb", int_mips_shra_r_qb, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRL_PH_DESC : SHLL_QB_R2_DESC_BASE<"shrl.ph", int_mips_shrl_ph, immZExt4, + NoItinerary, DSPRegs>, ClearDefs; + +class SHRLV_PH_DESC : SHLL_QB_R3_DESC_BASE<"shrlv.ph", int_mips_shrl_ph, + NoItinerary, DSPRegs>, ClearDefs; + +// Misc +class APPEND_DESC : APPEND_DESC_BASE<"append", int_mips_append, immZExt5, + NoItinerary>, ClearDefs; + +class BALIGN_DESC : APPEND_DESC_BASE<"balign", int_mips_balign, immZExt2, + NoItinerary>, ClearDefs; + +class PREPEND_DESC : APPEND_DESC_BASE<"prepend", int_mips_prepend, immZExt5, + NoItinerary>, ClearDefs; + +// Pseudos. +def BPOSGE32_PSEUDO : BPOSGE32_PSEUDO_DESC_BASE; + +// Instruction defs. +// MIPS DSP Rev 1 +def ADDU_QB : ADDU_QB_ENC, ADDU_QB_DESC; +def ADDU_S_QB : ADDU_S_QB_ENC, ADDU_S_QB_DESC; +def SUBU_QB : SUBU_QB_ENC, SUBU_QB_DESC; +def SUBU_S_QB : SUBU_S_QB_ENC, SUBU_S_QB_DESC; +def ADDQ_PH : ADDQ_PH_ENC, ADDQ_PH_DESC; +def ADDQ_S_PH : ADDQ_S_PH_ENC, ADDQ_S_PH_DESC; +def SUBQ_PH : SUBQ_PH_ENC, SUBQ_PH_DESC; +def SUBQ_S_PH : SUBQ_S_PH_ENC, SUBQ_S_PH_DESC; +def ADDQ_S_W : ADDQ_S_W_ENC, ADDQ_S_W_DESC; +def SUBQ_S_W : SUBQ_S_W_ENC, SUBQ_S_W_DESC; +def ADDSC : ADDSC_ENC, ADDSC_DESC; +def ADDWC : ADDWC_ENC, ADDWC_DESC; +def MODSUB : MODSUB_ENC, MODSUB_DESC; +def RADDU_W_QB : RADDU_W_QB_ENC, RADDU_W_QB_DESC; +def ABSQ_S_PH : ABSQ_S_PH_ENC, ABSQ_S_PH_DESC; +def ABSQ_S_W : ABSQ_S_W_ENC, ABSQ_S_W_DESC; +def PRECRQ_QB_PH : PRECRQ_QB_PH_ENC, PRECRQ_QB_PH_DESC; +def PRECRQ_PH_W : PRECRQ_PH_W_ENC, PRECRQ_PH_W_DESC; +def PRECRQ_RS_PH_W : PRECRQ_RS_PH_W_ENC, PRECRQ_RS_PH_W_DESC; +def PRECRQU_S_QB_PH : PRECRQU_S_QB_PH_ENC, PRECRQU_S_QB_PH_DESC; +def PRECEQ_W_PHL : PRECEQ_W_PHL_ENC, PRECEQ_W_PHL_DESC; +def PRECEQ_W_PHR : PRECEQ_W_PHR_ENC, PRECEQ_W_PHR_DESC; +def PRECEQU_PH_QBL : PRECEQU_PH_QBL_ENC, PRECEQU_PH_QBL_DESC; +def PRECEQU_PH_QBR : PRECEQU_PH_QBR_ENC, PRECEQU_PH_QBR_DESC; +def PRECEQU_PH_QBLA : PRECEQU_PH_QBLA_ENC, PRECEQU_PH_QBLA_DESC; +def PRECEQU_PH_QBRA : PRECEQU_PH_QBRA_ENC, PRECEQU_PH_QBRA_DESC; +def PRECEU_PH_QBL : PRECEU_PH_QBL_ENC, PRECEU_PH_QBL_DESC; +def PRECEU_PH_QBR : PRECEU_PH_QBR_ENC, PRECEU_PH_QBR_DESC; +def PRECEU_PH_QBLA : PRECEU_PH_QBLA_ENC, PRECEU_PH_QBLA_DESC; +def PRECEU_PH_QBRA : PRECEU_PH_QBRA_ENC, PRECEU_PH_QBRA_DESC; +def SHLL_QB : SHLL_QB_ENC, SHLL_QB_DESC; +def SHLLV_QB : SHLLV_QB_ENC, SHLLV_QB_DESC; +def SHRL_QB : SHRL_QB_ENC, SHRL_QB_DESC; +def SHRLV_QB : SHRLV_QB_ENC, SHRLV_QB_DESC; +def SHLL_PH : SHLL_PH_ENC, SHLL_PH_DESC; +def SHLLV_PH : SHLLV_PH_ENC, SHLLV_PH_DESC; +def SHLL_S_PH : SHLL_S_PH_ENC, SHLL_S_PH_DESC; +def SHLLV_S_PH : SHLLV_S_PH_ENC, SHLLV_S_PH_DESC; +def SHRA_PH : SHRA_PH_ENC, SHRA_PH_DESC; +def SHRAV_PH : SHRAV_PH_ENC, SHRAV_PH_DESC; +def SHRA_R_PH : SHRA_R_PH_ENC, SHRA_R_PH_DESC; +def SHRAV_R_PH : SHRAV_R_PH_ENC, SHRAV_R_PH_DESC; +def SHLL_S_W : SHLL_S_W_ENC, SHLL_S_W_DESC; +def SHLLV_S_W : SHLLV_S_W_ENC, SHLLV_S_W_DESC; +def SHRA_R_W : SHRA_R_W_ENC, SHRA_R_W_DESC; +def SHRAV_R_W : SHRAV_R_W_ENC, SHRAV_R_W_DESC; +def MULEU_S_PH_QBL : MULEU_S_PH_QBL_ENC, MULEU_S_PH_QBL_DESC; +def MULEU_S_PH_QBR : MULEU_S_PH_QBR_ENC, MULEU_S_PH_QBR_DESC; +def MULEQ_S_W_PHL : MULEQ_S_W_PHL_ENC, MULEQ_S_W_PHL_DESC; +def MULEQ_S_W_PHR : MULEQ_S_W_PHR_ENC, MULEQ_S_W_PHR_DESC; +def MULQ_RS_PH : MULQ_RS_PH_ENC, MULQ_RS_PH_DESC; +def MULSAQ_S_W_PH : MULSAQ_S_W_PH_ENC, MULSAQ_S_W_PH_DESC; +def MAQ_S_W_PHL : MAQ_S_W_PHL_ENC, MAQ_S_W_PHL_DESC; +def MAQ_S_W_PHR : MAQ_S_W_PHR_ENC, MAQ_S_W_PHR_DESC; +def MAQ_SA_W_PHL : MAQ_SA_W_PHL_ENC, MAQ_SA_W_PHL_DESC; +def MAQ_SA_W_PHR : MAQ_SA_W_PHR_ENC, MAQ_SA_W_PHR_DESC; +def DPAU_H_QBL : DPAU_H_QBL_ENC, DPAU_H_QBL_DESC; +def DPAU_H_QBR : DPAU_H_QBR_ENC, DPAU_H_QBR_DESC; +def DPSU_H_QBL : DPSU_H_QBL_ENC, DPSU_H_QBL_DESC; +def DPSU_H_QBR : DPSU_H_QBR_ENC, DPSU_H_QBR_DESC; +def DPAQ_S_W_PH : DPAQ_S_W_PH_ENC, DPAQ_S_W_PH_DESC; +def DPSQ_S_W_PH : DPSQ_S_W_PH_ENC, DPSQ_S_W_PH_DESC; +def DPAQ_SA_L_W : DPAQ_SA_L_W_ENC, DPAQ_SA_L_W_DESC; +def DPSQ_SA_L_W : DPSQ_SA_L_W_ENC, DPSQ_SA_L_W_DESC; +def MULT_DSP : MULT_DSP_ENC, MULT_DSP_DESC; +def MULTU_DSP : MULTU_DSP_ENC, MULTU_DSP_DESC; +def MADD_DSP : MADD_DSP_ENC, MADD_DSP_DESC; +def MADDU_DSP : MADDU_DSP_ENC, MADDU_DSP_DESC; +def MSUB_DSP : MSUB_DSP_ENC, MSUB_DSP_DESC; +def MSUBU_DSP : MSUBU_DSP_ENC, MSUBU_DSP_DESC; +def CMPU_EQ_QB : CMPU_EQ_QB_ENC, CMPU_EQ_QB_DESC; +def CMPU_LT_QB : CMPU_LT_QB_ENC, CMPU_LT_QB_DESC; +def CMPU_LE_QB : CMPU_LE_QB_ENC, CMPU_LE_QB_DESC; +def CMPGU_EQ_QB : CMPGU_EQ_QB_ENC, CMPGU_EQ_QB_DESC; +def CMPGU_LT_QB : CMPGU_LT_QB_ENC, CMPGU_LT_QB_DESC; +def CMPGU_LE_QB : CMPGU_LE_QB_ENC, CMPGU_LE_QB_DESC; +def CMP_EQ_PH : CMP_EQ_PH_ENC, CMP_EQ_PH_DESC; +def CMP_LT_PH : CMP_LT_PH_ENC, CMP_LT_PH_DESC; +def CMP_LE_PH : CMP_LE_PH_ENC, CMP_LE_PH_DESC; +def BITREV : BITREV_ENC, BITREV_DESC; +def PACKRL_PH : PACKRL_PH_ENC, PACKRL_PH_DESC; +def REPL_QB : REPL_QB_ENC, REPL_QB_DESC; +def REPL_PH : REPL_PH_ENC, REPL_PH_DESC; +def REPLV_QB : REPLV_QB_ENC, REPLV_QB_DESC; +def REPLV_PH : REPLV_PH_ENC, REPLV_PH_DESC; +def PICK_QB : PICK_QB_ENC, PICK_QB_DESC; +def PICK_PH : PICK_PH_ENC, PICK_PH_DESC; +def LWX : LWX_ENC, LWX_DESC; +def LHX : LHX_ENC, LHX_DESC; +def LBUX : LBUX_ENC, LBUX_DESC; +def BPOSGE32 : BPOSGE32_ENC, BPOSGE32_DESC; +def INSV : INSV_ENC, INSV_DESC; +def EXTP : EXTP_ENC, EXTP_DESC; +def EXTPV : EXTPV_ENC, EXTPV_DESC; +def EXTPDP : EXTPDP_ENC, EXTPDP_DESC; +def EXTPDPV : EXTPDPV_ENC, EXTPDPV_DESC; +def EXTR_W : EXTR_W_ENC, EXTR_W_DESC; +def EXTRV_W : EXTRV_W_ENC, EXTRV_W_DESC; +def EXTR_R_W : EXTR_R_W_ENC, EXTR_R_W_DESC; +def EXTRV_R_W : EXTRV_R_W_ENC, EXTRV_R_W_DESC; +def EXTR_RS_W : EXTR_RS_W_ENC, EXTR_RS_W_DESC; +def EXTRV_RS_W : EXTRV_RS_W_ENC, EXTRV_RS_W_DESC; +def EXTR_S_H : EXTR_S_H_ENC, EXTR_S_H_DESC; +def EXTRV_S_H : EXTRV_S_H_ENC, EXTRV_S_H_DESC; +def SHILO : SHILO_ENC, SHILO_DESC; +def SHILOV : SHILOV_ENC, SHILOV_DESC; +def MTHLIP : MTHLIP_ENC, MTHLIP_DESC; +def RDDSP : RDDSP_ENC, RDDSP_DESC; +def WRDSP : WRDSP_ENC, WRDSP_DESC; + +// MIPS DSP Rev 2 +let Predicates = [HasDSPR2] in { + +def ADDU_PH : ADDU_PH_ENC, ADDU_PH_DESC; +def ADDU_S_PH : ADDU_S_PH_ENC, ADDU_S_PH_DESC; +def SUBU_PH : SUBU_PH_ENC, SUBU_PH_DESC; +def SUBU_S_PH : SUBU_S_PH_ENC, SUBU_S_PH_DESC; +def CMPGDU_EQ_QB : CMPGDU_EQ_QB_ENC, CMPGDU_EQ_QB_DESC; +def CMPGDU_LT_QB : CMPGDU_LT_QB_ENC, CMPGDU_LT_QB_DESC; +def CMPGDU_LE_QB : CMPGDU_LE_QB_ENC, CMPGDU_LE_QB_DESC; +def ABSQ_S_QB : ABSQ_S_QB_ENC, ABSQ_S_QB_DESC; +def ADDUH_QB : ADDUH_QB_ENC, ADDUH_QB_DESC; +def ADDUH_R_QB : ADDUH_R_QB_ENC, ADDUH_R_QB_DESC; +def SUBUH_QB : SUBUH_QB_ENC, SUBUH_QB_DESC; +def SUBUH_R_QB : SUBUH_R_QB_ENC, SUBUH_R_QB_DESC; +def ADDQH_PH : ADDQH_PH_ENC, ADDQH_PH_DESC; +def ADDQH_R_PH : ADDQH_R_PH_ENC, ADDQH_R_PH_DESC; +def SUBQH_PH : SUBQH_PH_ENC, SUBQH_PH_DESC; +def SUBQH_R_PH : SUBQH_R_PH_ENC, SUBQH_R_PH_DESC; +def ADDQH_W : ADDQH_W_ENC, ADDQH_W_DESC; +def ADDQH_R_W : ADDQH_R_W_ENC, ADDQH_R_W_DESC; +def SUBQH_W : SUBQH_W_ENC, SUBQH_W_DESC; +def SUBQH_R_W : SUBQH_R_W_ENC, SUBQH_R_W_DESC; +def MUL_PH : MUL_PH_ENC, MUL_PH_DESC; +def MUL_S_PH : MUL_S_PH_ENC, MUL_S_PH_DESC; +def MULQ_S_W : MULQ_S_W_ENC, MULQ_S_W_DESC; +def MULQ_RS_W : MULQ_RS_W_ENC, MULQ_RS_W_DESC; +def MULQ_S_PH : MULQ_S_PH_ENC, MULQ_S_PH_DESC; +def DPA_W_PH : DPA_W_PH_ENC, DPA_W_PH_DESC; +def DPS_W_PH : DPS_W_PH_ENC, DPS_W_PH_DESC; +def DPAQX_S_W_PH : DPAQX_S_W_PH_ENC, DPAQX_S_W_PH_DESC; +def DPAQX_SA_W_PH : DPAQX_SA_W_PH_ENC, DPAQX_SA_W_PH_DESC; +def DPAX_W_PH : DPAX_W_PH_ENC, DPAX_W_PH_DESC; +def DPSX_W_PH : DPSX_W_PH_ENC, DPSX_W_PH_DESC; +def DPSQX_S_W_PH : DPSQX_S_W_PH_ENC, DPSQX_S_W_PH_DESC; +def DPSQX_SA_W_PH : DPSQX_SA_W_PH_ENC, DPSQX_SA_W_PH_DESC; +def MULSA_W_PH : MULSA_W_PH_ENC, MULSA_W_PH_DESC; +def PRECR_QB_PH : PRECR_QB_PH_ENC, PRECR_QB_PH_DESC; +def PRECR_SRA_PH_W : PRECR_SRA_PH_W_ENC, PRECR_SRA_PH_W_DESC; +def PRECR_SRA_R_PH_W : PRECR_SRA_R_PH_W_ENC, PRECR_SRA_R_PH_W_DESC; +def SHRA_QB : SHRA_QB_ENC, SHRA_QB_DESC; +def SHRAV_QB : SHRAV_QB_ENC, SHRAV_QB_DESC; +def SHRA_R_QB : SHRA_R_QB_ENC, SHRA_R_QB_DESC; +def SHRAV_R_QB : SHRAV_R_QB_ENC, SHRAV_R_QB_DESC; +def SHRL_PH : SHRL_PH_ENC, SHRL_PH_DESC; +def SHRLV_PH : SHRLV_PH_ENC, SHRLV_PH_DESC; +def APPEND : APPEND_ENC, APPEND_DESC; +def BALIGN : BALIGN_ENC, BALIGN_DESC; +def PREPEND : PREPEND_ENC, PREPEND_DESC; + +} + +// Pseudos. +def MULSAQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def MAQ_S_W_PHL_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def MAQ_S_W_PHR_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def MAQ_SA_W_PHL_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def MAQ_SA_W_PHR_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAU_H_QBL_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAU_H_QBR_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSU_H_QBL_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSU_H_QBR_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSQ_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAQ_SA_L_W_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSQ_SA_L_W_PSEUDO : DPA_W_PH_PSEUDO_BASE; + +def MULT_DSP_PSEUDO : MULT_PSEUDO_BASE, + IsCommutable; +def MULTU_DSP_PSEUDO : MULT_PSEUDO_BASE, + IsCommutable; +def MADD_DSP_PSEUDO : MULT_PSEUDO_BASE, + IsCommutable, UseAC; +def MADDU_DSP_PSEUDO : MULT_PSEUDO_BASE, + IsCommutable, UseAC; +def MSUB_DSP_PSEUDO : MULT_PSEUDO_BASE, + UseAC; +def MSUBU_DSP_PSEUDO : MULT_PSEUDO_BASE, + UseAC; + +def SHILO_PSEUDO : SHILO_R1_PSEUDO_BASE; +def SHILOV_PSEUDO : SHILO_R2_PSEUDO_BASE; +def MTHLIP_PSEUDO : SHILO_R2_PSEUDO_BASE; + +let Predicates = [HasDSPR2] in { + +def DPA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPS_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAQX_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAQX_SA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPAX_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSX_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSQX_S_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def DPSQX_SA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; +def MULSA_W_PH_PSEUDO : DPA_W_PH_PSEUDO_BASE; + +} + +// Patterns. +class DSPPat : + Pat, Requires<[pred]>; + +class BitconvertPat : + DSPPat<(DstVT (bitconvert (SrcVT SrcRC:$src))), + (COPY_TO_REGCLASS SrcRC:$src, DstRC)>; + +def : BitconvertPat; +def : BitconvertPat; +def : BitconvertPat; +def : BitconvertPat; + +def : DSPPat<(v2i16 (load addr:$a)), + (v2i16 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>; +def : DSPPat<(v4i8 (load addr:$a)), + (v4i8 (COPY_TO_REGCLASS (LW addr:$a), DSPRegs))>; +def : DSPPat<(store (v2i16 DSPRegs:$val), addr:$a), + (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>; +def : DSPPat<(store (v4i8 DSPRegs:$val), addr:$a), + (SW (COPY_TO_REGCLASS DSPRegs:$val, CPURegs), addr:$a)>; + +// Extr patterns. +class EXTR_W_TY1_R2_Pat : + DSPPat<(i32 (OpNode CPURegs:$rs)), (Instr AC0, CPURegs:$rs)>; + +class EXTR_W_TY1_R1_Pat : + DSPPat<(i32 (OpNode immZExt5:$shift)), (Instr AC0, immZExt5:$shift)>; + +def : EXTR_W_TY1_R1_Pat; +def : EXTR_W_TY1_R2_Pat; +def : EXTR_W_TY1_R1_Pat; +def : EXTR_W_TY1_R2_Pat; +def : EXTR_W_TY1_R1_Pat; +def : EXTR_W_TY1_R2_Pat; +def : EXTR_W_TY1_R1_Pat; +def : EXTR_W_TY1_R2_Pat; +def : EXTR_W_TY1_R1_Pat; +def : EXTR_W_TY1_R2_Pat; +def : EXTR_W_TY1_R1_Pat; +def : EXTR_W_TY1_R2_Pat; diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index 2bba8a3..e3c8ed7 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -30,10 +30,11 @@ STATISTIC(FilledSlots, "Number of delay slots filled"); STATISTIC(UsefulSlots, "Number of delay slots filled with instructions that" " are not NOP."); -static cl::opt EnableDelaySlotFiller( - "enable-mips-delay-filler", +static cl::opt DisableDelaySlotFiller( + "disable-mips-delay-filler", cl::init(false), - cl::desc("Fill the Mips delay slots useful instructions."), + cl::desc("Disable the delay slot filler, which attempts to fill the Mips" + "delay slots with useful instructions."), cl::Hidden); // This option can be used to silence complaints by machine verifier passes. @@ -114,7 +115,9 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) { InstrIter D; - if (EnableDelaySlotFiller && findDelayInstr(MBB, I, D)) { + // Delay slot filling is disabled at -O0. + if (!DisableDelaySlotFiller && (TM.getOptLevel() != CodeGenOpt::None) && + findDelayInstr(MBB, I, D)) { MBB.splice(llvm::next(I), &MBB, D); ++UsefulSlots; } else diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp index 8c0474b..2cad2a6 100644 --- a/lib/Target/Mips/MipsFrameLowering.cpp +++ b/lib/Target/Mips/MipsFrameLowering.cpp @@ -23,7 +23,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" @@ -98,3 +98,37 @@ bool MipsFrameLowering::hasFP(const MachineFunction &MF) const { return MF.getTarget().Options.DisableFramePointerElim(MF) || MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken(); } + +uint64_t MipsFrameLowering::estimateStackSize(const MachineFunction &MF) const { + const MachineFrameInfo *MFI = MF.getFrameInfo(); + const TargetRegisterInfo &TRI = *MF.getTarget().getRegisterInfo(); + + int64_t Offset = 0; + + // Iterate over fixed sized objects. + for (int I = MFI->getObjectIndexBegin(); I != 0; ++I) + Offset = std::max(Offset, -MFI->getObjectOffset(I)); + + // Conservatively assume all callee-saved registers will be saved. + for (const uint16_t *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) { + unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize(); + Offset = RoundUpToAlignment(Offset + Size, Size); + } + + unsigned MaxAlign = MFI->getMaxAlignment(); + + // Check that MaxAlign is not zero if there is a stack object that is not a + // callee-saved spill. + assert(!MFI->getObjectIndexEnd() || MaxAlign); + + // Iterate over other objects. + for (unsigned I = 0, E = MFI->getObjectIndexEnd(); I != E; ++I) + Offset = RoundUpToAlignment(Offset + MFI->getObjectSize(I), MaxAlign); + + // Call frame. + if (MFI->adjustsStack() && hasReservedCallFrame(MF)) + Offset = RoundUpToAlignment(Offset + MFI->getMaxCallFrameSize(), + std::max(MaxAlign, getStackAlignment())); + + return RoundUpToAlignment(Offset, getStackAlignment()); +} diff --git a/lib/Target/Mips/MipsFrameLowering.h b/lib/Target/Mips/MipsFrameLowering.h index ed7b7fe..df52d92 100644 --- a/lib/Target/Mips/MipsFrameLowering.h +++ b/lib/Target/Mips/MipsFrameLowering.h @@ -34,6 +34,9 @@ public: const MipsSubtarget &ST); bool hasFP(const MachineFunction &MF) const; + +protected: + uint64_t estimateStackSize(const MachineFunction &MF) const; }; /// Create MipsInstrInfo objects. diff --git a/lib/Target/Mips/MipsISelDAGToDAG.cpp b/lib/Target/Mips/MipsISelDAGToDAG.cpp index 5a97c17..c5fca7f 100644 --- a/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -86,6 +86,10 @@ private: SDNode *getGlobalBaseReg(); + SDValue getMips16SPAliasReg(); + + void getMips16SPRefReg(SDNode *parent, SDValue &AliasReg); + std::pair SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty, bool HasLo, bool HasHi); @@ -94,6 +98,9 @@ private: // Complex Pattern. bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Offset); + bool SelectAddr16(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Offset, + SDValue &Alias); + // getImm - Return a target constant with the specified value. inline SDValue getImm(const SDNode *Node, unsigned Imm) { return CurDAG->getTargetConstant(Imm, Node->getValueType(0)); @@ -102,6 +109,7 @@ private: void ProcessFunctionAfterISel(MachineFunction &MF); bool ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&); void InitGlobalBaseReg(MachineFunction &MF); + void InitMips16SPAliasReg(MachineFunction &MF); virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, @@ -220,6 +228,26 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) { .addReg(Mips::V0).addReg(Mips::T9); } +// Insert instructions to initialize the Mips16 SP Alias register in the +// first MBB of the function. +// +void MipsDAGToDAGISel::InitMips16SPAliasReg(MachineFunction &MF) { + MipsFunctionInfo *MipsFI = MF.getInfo(); + + if (!MipsFI->mips16SPAliasRegSet()) + return; + + MachineBasicBlock &MBB = MF.front(); + MachineBasicBlock::iterator I = MBB.begin(); + const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); + DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc(); + unsigned Mips16SPAliasReg = MipsFI->getMips16SPAliasReg(); + + BuildMI(MBB, I, DL, TII.get(Mips::MoveR3216), Mips16SPAliasReg) + .addReg(Mips::SP); +} + + bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr& MI) { unsigned DstReg = 0, ZeroReg = 0; @@ -260,6 +288,7 @@ bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, void MipsDAGToDAGISel::ProcessFunctionAfterISel(MachineFunction &MF) { InitGlobalBaseReg(MF); + InitMips16SPAliasReg(MF); MachineRegisterInfo *MRI = &MF.getRegInfo(); @@ -284,6 +313,14 @@ SDNode *MipsDAGToDAGISel::getGlobalBaseReg() { return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); } +/// getMips16SPAliasReg - Output the instructions required to put the +/// SP into a Mips16 accessible aliased register. +SDValue MipsDAGToDAGISel::getMips16SPAliasReg() { + unsigned Mips16SPAliasReg = + MF->getInfo()->getMips16SPAliasReg(); + return CurDAG->getRegister(Mips16SPAliasReg, TLI.getPointerTy()); +} + /// ComplexPattern used on MipsInstrInfo /// Used on Mips Load/Store instructions bool MipsDAGToDAGISel:: @@ -337,8 +374,9 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) { // Generate: // lui $2, %hi($CPI1_0) // lwc1 $f0, %lo($CPI1_0)($2) - if (Addr.getOperand(1).getOpcode() == MipsISD::Lo) { - SDValue LoVal = Addr.getOperand(1), Opnd0 = LoVal.getOperand(0); + if (Addr.getOperand(1).getOpcode() == MipsISD::Lo || + Addr.getOperand(1).getOpcode() == MipsISD::GPRel) { + SDValue Opnd0 = Addr.getOperand(1).getOperand(0); if (isa(Opnd0) || isa(Opnd0) || isa(Opnd0)) { Base = Addr.getOperand(0); @@ -361,6 +399,115 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) { return true; } +void MipsDAGToDAGISel::getMips16SPRefReg(SDNode *Parent, SDValue &AliasReg) { + SDValue AliasFPReg = CurDAG->getRegister(Mips::S0, TLI.getPointerTy()); + if (Parent) { + switch (Parent->getOpcode()) { + case ISD::LOAD: { + LoadSDNode *SD = dyn_cast(Parent); + switch (SD->getMemoryVT().getSizeInBits()) { + case 8: + case 16: + AliasReg = TM.getFrameLowering()->hasFP(*MF)? + AliasFPReg: getMips16SPAliasReg(); + return; + } + break; + } + case ISD::STORE: { + StoreSDNode *SD = dyn_cast(Parent); + switch (SD->getMemoryVT().getSizeInBits()) { + case 8: + case 16: + AliasReg = TM.getFrameLowering()->hasFP(*MF)? + AliasFPReg: getMips16SPAliasReg(); + return; + } + break; + } + } + } + AliasReg = CurDAG->getRegister(Mips::SP, TLI.getPointerTy()); + return; + +} +bool MipsDAGToDAGISel::SelectAddr16( + SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset, + SDValue &Alias) { + EVT ValTy = Addr.getValueType(); + + Alias = CurDAG->getTargetConstant(0, ValTy); + + // if Address is FI, get the TargetFrameIndex. + if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + Offset = CurDAG->getTargetConstant(0, ValTy); + getMips16SPRefReg(Parent, Alias); + return true; + } + // on PIC code Load GA + if (Addr.getOpcode() == MipsISD::Wrapper) { + Base = Addr.getOperand(0); + Offset = Addr.getOperand(1); + return true; + } + if (TM.getRelocationModel() != Reloc::PIC_) { + if ((Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress)) + return false; + } + // Addresses of the form FI+const or FI|const + if (CurDAG->isBaseWithConstantOffset(Addr)) { + ConstantSDNode *CN = dyn_cast(Addr.getOperand(1)); + if (isInt<16>(CN->getSExtValue())) { + + // If the first operand is a FI, get the TargetFI Node + if (FrameIndexSDNode *FIN = dyn_cast + (Addr.getOperand(0))) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + getMips16SPRefReg(Parent, Alias); + } + else + Base = Addr.getOperand(0); + + Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy); + return true; + } + } + // Operand is a result from an ADD. + if (Addr.getOpcode() == ISD::ADD) { + // When loading from constant pools, load the lower address part in + // the instruction itself. Example, instead of: + // lui $2, %hi($CPI1_0) + // addiu $2, $2, %lo($CPI1_0) + // lwc1 $f0, 0($2) + // Generate: + // lui $2, %hi($CPI1_0) + // lwc1 $f0, %lo($CPI1_0)($2) + if (Addr.getOperand(1).getOpcode() == MipsISD::Lo || + Addr.getOperand(1).getOpcode() == MipsISD::GPRel) { + SDValue Opnd0 = Addr.getOperand(1).getOperand(0); + if (isa(Opnd0) || isa(Opnd0) || + isa(Opnd0)) { + Base = Addr.getOperand(0); + Offset = Opnd0; + return true; + } + } + + // If an indexed floating point load/store can be emitted, return false. + const LSBaseSDNode *LS = dyn_cast(Parent); + + if (LS && + (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) && + Subtarget.hasMips32r2Or64()) + return false; + } + Base = Addr; + Offset = CurDAG->getTargetConstant(0, ValTy); + return true; +} + /// Select multiply instructions. std::pair MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty, @@ -371,14 +518,16 @@ MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty, SDValue InFlag = SDValue(Mul, 0); if (HasLo) { - Lo = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64, dl, - Ty, MVT::Glue, InFlag); + unsigned Opcode = Subtarget.inMips16Mode() ? Mips::Mflo16 : + (Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64); + Lo = CurDAG->getMachineNode(Opcode, dl, Ty, MVT::Glue, InFlag); InFlag = SDValue(Lo, 1); } - if (HasHi) - Hi = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64, dl, - Ty, InFlag); - + if (HasHi) { + unsigned Opcode = Subtarget.inMips16Mode() ? Mips::Mfhi16 : + (Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64); + Hi = CurDAG->getMachineNode(Opcode, dl, Ty, InFlag); + } return std::make_pair(Lo, Hi); } @@ -410,6 +559,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { case ISD::SUBE: case ISD::ADDE: { + bool inMips16Mode = Subtarget.inMips16Mode(); SDValue InFlag = Node->getOperand(2), CmpLHS; unsigned Opc = InFlag.getOpcode(); (void)Opc; assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) || @@ -419,10 +569,16 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { unsigned MOp; if (Opcode == ISD::ADDE) { CmpLHS = InFlag.getValue(0); - MOp = Mips::ADDu; + if (inMips16Mode) + MOp = Mips::AdduRxRyRz16; + else + MOp = Mips::ADDu; } else { CmpLHS = InFlag.getOperand(0); - MOp = Mips::SUBu; + if (inMips16Mode) + MOp = Mips::SubuRxRyRz16; + else + MOp = Mips::SUBu; } SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) }; @@ -431,8 +587,11 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { SDValue RHS = Node->getOperand(1); EVT VT = LHS.getValueType(); - SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, dl, VT, Ops, 2); - SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, dl, VT, + + unsigned Sltu_op = inMips16Mode? Mips::SltuRxRyRz16: Mips::SLTu; + SDNode *Carry = CurDAG->getMachineNode(Sltu_op, dl, VT, Ops, 2); + unsigned Addu_op = inMips16Mode? Mips::AdduRxRyRz16 : Mips::ADDu; + SDNode *AddCarry = CurDAG->getMachineNode(Addu_op, dl, VT, SDValue(Carry,0), RHS); return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, @@ -442,8 +601,13 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { /// Mul with two results case ISD::SMUL_LOHI: case ISD::UMUL_LOHI: { - if (NodeTy == MVT::i32) - MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT); + if (NodeTy == MVT::i32) { + if (Subtarget.inMips16Mode()) + MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MultuRxRy16 : + Mips::MultRxRy16); + else + MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT); + } else MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::DMULTu : Mips::DMULT); @@ -469,8 +633,13 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { } case ISD::MULHS: case ISD::MULHU: { - if (NodeTy == MVT::i32) - MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT); + if (NodeTy == MVT::i32) { + if (Subtarget.inMips16Mode()) + MultOpc = (Opcode == ISD::MULHU ? + Mips::MultuRxRy16 : Mips::MultRxRy16); + else + MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT); + } else MultOpc = (Opcode == ISD::MULHU ? Mips::DMULTu : Mips::DMULT); @@ -539,6 +708,15 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { return RegOpnd; } +#ifndef NDEBUG + case ISD::LOAD: + case ISD::STORE: + assert(cast(Node)->getMemoryVT().getSizeInBits() / 8 <= + cast(Node)->getAlignment() && + "Unexpected unaligned loads/stores."); + break; +#endif + case MipsISD::ThreadPointer: { EVT PtrVT = TLI.getPointerTy(); unsigned RdhwrOpc, SrcReg, DestReg; diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index c5207c6..e225b6c 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -25,6 +25,7 @@ #include "llvm/GlobalVariable.h" #include "llvm/Intrinsics.h" #include "llvm/CallingConv.h" +#include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" @@ -32,12 +33,33 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; +STATISTIC(NumTailCalls, "Number of tail calls"); + +static cl::opt +EnableMipsTailCalls("enable-mips-tail-calls", cl::Hidden, + cl::desc("MIPS: Enable tail calls."), cl::init(false)); + +static const uint16_t O32IntRegs[4] = { + Mips::A0, Mips::A1, Mips::A2, Mips::A3 +}; + +static const uint16_t Mips64IntRegs[8] = { + Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64, + Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64 +}; + +static const uint16_t Mips64DPRegs[8] = { + Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64, + Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64 +}; + // If I is a shifted mask, set the size (Size) and the first bit of the // mask (Pos), and return true. // For example, if I is 0x003ff800, (Pos, Size) = (11, 11). @@ -58,6 +80,7 @@ static SDValue GetGlobalReg(SelectionDAG &DAG, EVT Ty) { const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { switch (Opcode) { case MipsISD::JmpLink: return "MipsISD::JmpLink"; + case MipsISD::TailCall: return "MipsISD::TailCall"; case MipsISD::Hi: return "MipsISD::Hi"; case MipsISD::Lo: return "MipsISD::Lo"; case MipsISD::GPRel: return "MipsISD::GPRel"; @@ -89,6 +112,20 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { case MipsISD::LDR: return "MipsISD::LDR"; case MipsISD::SDL: return "MipsISD::SDL"; case MipsISD::SDR: return "MipsISD::SDR"; + case MipsISD::EXTP: return "MipsISD::EXTP"; + case MipsISD::EXTPDP: return "MipsISD::EXTPDP"; + case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H"; + case MipsISD::EXTR_W: return "MipsISD::EXTR_W"; + case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W"; + case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W"; + case MipsISD::SHILO: return "MipsISD::SHILO"; + case MipsISD::MTHLIP: return "MipsISD::MTHLIP"; + case MipsISD::MULT: return "MipsISD::MULT"; + case MipsISD::MULTU: return "MipsISD::MULTU"; + case MipsISD::MADD_DSP: return "MipsISD::MADD_DSPDSP"; + case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP"; + case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP"; + case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP"; default: return NULL; } } @@ -113,7 +150,22 @@ MipsTargetLowering(MipsTargetMachine &TM) if (Subtarget->inMips16Mode()) { addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass); - addRegisterClass(MVT::i32, &Mips::CPURARegRegClass); + } + + if (Subtarget->hasDSP()) { + MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8}; + + for (unsigned i = 0; i < array_lengthof(VecTys); ++i) { + addRegisterClass(VecTys[i], &Mips::DSPRegsRegClass); + + // Expand all builtin opcodes. + for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc) + setOperationAction(Opc, VecTys[i], Expand); + + setOperationAction(ISD::LOAD, VecTys[i], Legal); + setOperationAction(ISD::STORE, VecTys[i], Legal); + setOperationAction(ISD::BITCAST, VecTys[i], Legal); + } } if (!TM.Options.UseSoftFloat) { @@ -160,10 +212,18 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); - setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); - setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); - setOperationAction(ISD::LOAD, MVT::i32, Custom); - setOperationAction(ISD::STORE, MVT::i32, Custom); + if (Subtarget->inMips16Mode()) { + setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand); + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); + } + else { + setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + } + if (!Subtarget->inMips16Mode()) { + setOperationAction(ISD::LOAD, MVT::i32, Custom); + setOperationAction(ISD::STORE, MVT::i32, Custom); + } if (!TM.Options.NoNaNsFPMath) { setOperationAction(ISD::FABS, MVT::f32, Custom); @@ -187,6 +247,10 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom); } + setOperationAction(ISD::ADD, MVT::i32, Custom); + if (HasMips64) + setOperationAction(ISD::ADD, MVT::i64, Custom); + setOperationAction(ISD::SDIV, MVT::i32, Expand); setOperationAction(ISD::SREM, MVT::i32, Expand); setOperationAction(ISD::UDIV, MVT::i32, Expand); @@ -254,6 +318,9 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::VACOPY, MVT::Other, Expand); setOperationAction(ISD::VAEND, MVT::Other, Expand); + setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); + setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); + // Use the default for now setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); @@ -263,6 +330,21 @@ MipsTargetLowering(MipsTargetMachine &TM) setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); + if (Subtarget->inMips16Mode()) { + setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand); + } + setInsertFencesForAtomic(true); if (!Subtarget->hasSEInReg()) { @@ -310,6 +392,9 @@ MipsTargetLowering(MipsTargetMachine &TM) bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; + if (Subtarget->inMips16Mode()) + return false; + switch (SVT) { case MVT::i64: case MVT::i32: @@ -785,6 +870,26 @@ SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) return SDValue(); } +void +MipsTargetLowering::LowerOperationWrapper(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const { + SDValue Res = LowerOperation(SDValue(N, 0), DAG); + + for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I) + Results.push_back(Res.getValue(I)); +} + +void +MipsTargetLowering::ReplaceNodeResults(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const { + SDValue Res = LowerOperation(SDValue(N, 0), DAG); + + for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I) + Results.push_back(Res.getValue(I)); +} + SDValue MipsTargetLowering:: LowerOperation(SDValue Op, SelectionDAG &DAG) const { @@ -811,6 +916,9 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const case ISD::SRL_PARTS: return LowerShiftRightParts(Op, DAG, false); case ISD::LOAD: return LowerLOAD(Op, DAG); case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); + case ISD::ADD: return LowerADD(Op, DAG); } return SDValue(); } @@ -919,6 +1027,70 @@ static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB, return BB; } */ + +MachineBasicBlock * +MipsTargetLowering::EmitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{ + // $bb: + // bposge32_pseudo $vr0 + // => + // $bb: + // bposge32 $tbb + // $fbb: + // li $vr2, 0 + // b $sink + // $tbb: + // li $vr1, 1 + // $sink: + // $vr0 = phi($vr2, $fbb, $vr1, $tbb) + + MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo(); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetRegisterClass *RC = &Mips::CPURegsRegClass; + DebugLoc DL = MI->getDebugLoc(); + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB)); + MachineFunction *F = BB->getParent(); + MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, FBB); + F->insert(It, TBB); + F->insert(It, Sink); + + // Transfer the remainder of BB and its successor edges to Sink. + Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)), + BB->end()); + Sink->transferSuccessorsAndUpdatePHIs(BB); + + // Add successors. + BB->addSuccessor(FBB); + BB->addSuccessor(TBB); + FBB->addSuccessor(Sink); + TBB->addSuccessor(Sink); + + // Insert the real bposge32 instruction to $BB. + BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB); + + // Fill $FBB. + unsigned VR2 = RegInfo.createVirtualRegister(RC); + BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2) + .addReg(Mips::ZERO).addImm(0); + BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink); + + // Fill $TBB. + unsigned VR1 = RegInfo.createVirtualRegister(RC); + BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1) + .addReg(Mips::ZERO).addImm(1); + + // Insert phi function to $Sink. + BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI), + MI->getOperand(0).getReg()) + .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB); + + MI->eraseFromParent(); // The pseudo instruction is gone now. + return Sink; +} + MachineBasicBlock * MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *BB) const { @@ -1027,6 +1199,8 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case Mips::ATOMIC_CMP_SWAP_I64: case Mips::ATOMIC_CMP_SWAP_I64_P8: return EmitAtomicCmpSwap(MI, BB, 8); + case Mips::BPOSGE32_PSEUDO: + return EmitBPOSGE32(MI, BB); } } @@ -1571,15 +1745,16 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op, if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) { SDVTList VTs = DAG.getVTList(MVT::i32); - MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering(); + const MipsTargetObjectFile &TLOF = + (const MipsTargetObjectFile&)getObjFileLowering(); // %gp_rel relocation if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) { SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, MipsII::MO_GPREL); SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1); - SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32); - return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode); + SDValue GPReg = DAG.getRegister(Mips::GP, MVT::i32); + return DAG.getNode(ISD::ADD, dl, MVT::i32, GPReg, GPRelNode); } // %hi/%lo relocation SDValue GAHi = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, @@ -1620,8 +1795,10 @@ SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op, if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) { // %hi/%lo relocation - SDValue BAHi = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_HI); - SDValue BALo = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_LO); + SDValue BAHi = + DAG.getTargetBlockAddress(BA, MVT::i32, 0, MipsII::MO_ABS_HI); + SDValue BALo = + DAG.getTargetBlockAddress(BA, MVT::i32, 0, MipsII::MO_ABS_LO); SDValue Hi = DAG.getNode(MipsISD::Hi, dl, MVT::i32, BAHi); SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, BALo); return DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, Lo); @@ -1630,10 +1807,10 @@ SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op, EVT ValTy = Op.getValueType(); unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT; unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO; - SDValue BAGOTOffset = DAG.getBlockAddress(BA, ValTy, true, GOTFlag); + SDValue BAGOTOffset = DAG.getTargetBlockAddress(BA, ValTy, 0, GOTFlag); BAGOTOffset = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), BAGOTOffset); - SDValue BALOOffset = DAG.getBlockAddress(BA, ValTy, true, OFSTFlag); + SDValue BALOOffset = DAG.getTargetBlockAddress(BA, ValTy, 0, OFSTFlag); SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), BAGOTOffset, MachinePointerInfo(), false, false, false, 0); SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, BALOOffset); @@ -2224,6 +2401,172 @@ SDValue MipsTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { return CreateStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7); } +// This function expands mips intrinsic nodes which have 64-bit input operands +// or output values. +// +// out64 = intrinsic-node in64 +// => +// lo = copy (extract-element (in64, 0)) +// hi = copy (extract-element (in64, 1)) +// mips-specific-node +// v0 = copy lo +// v1 = copy hi +// out64 = merge-values (v0, v1) +// +static SDValue LowerDSPIntr(SDValue Op, SelectionDAG &DAG, + unsigned Opc, bool HasI64In, bool HasI64Out) { + DebugLoc DL = Op.getDebugLoc(); + bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other; + SDValue Chain = HasChainIn ? Op->getOperand(0) : DAG.getEntryNode(); + SmallVector Ops; + + if (HasI64In) { + SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, + Op->getOperand(1 + HasChainIn), + DAG.getConstant(0, MVT::i32)); + SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, + Op->getOperand(1 + HasChainIn), + DAG.getConstant(1, MVT::i32)); + + Chain = DAG.getCopyToReg(Chain, DL, Mips::LO, InLo, SDValue()); + Chain = DAG.getCopyToReg(Chain, DL, Mips::HI, InHi, Chain.getValue(1)); + + Ops.push_back(Chain); + Ops.append(Op->op_begin() + HasChainIn + 2, Op->op_end()); + Ops.push_back(Chain.getValue(1)); + } else { + Ops.push_back(Chain); + Ops.append(Op->op_begin() + HasChainIn + 1, Op->op_end()); + } + + if (!HasI64Out) + return DAG.getNode(Opc, DL, Op->value_begin(), Op->getNumValues(), + Ops.begin(), Ops.size()); + + SDValue Intr = DAG.getNode(Opc, DL, DAG.getVTList(MVT::Other, MVT::Glue), + Ops.begin(), Ops.size()); + SDValue OutLo = DAG.getCopyFromReg(Intr.getValue(0), DL, Mips::LO, MVT::i32, + Intr.getValue(1)); + SDValue OutHi = DAG.getCopyFromReg(OutLo.getValue(1), DL, Mips::HI, MVT::i32, + OutLo.getValue(2)); + SDValue Out = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, OutLo, OutHi); + + if (!HasChainIn) + return Out; + + SDValue Vals[] = { Out, OutHi.getValue(1) }; + return DAG.getMergeValues(Vals, 2, DL); +} + +SDValue MipsTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + switch (cast(Op->getOperand(0))->getZExtValue()) { + default: + return SDValue(); + case Intrinsic::mips_shilo: + return LowerDSPIntr(Op, DAG, MipsISD::SHILO, true, true); + case Intrinsic::mips_dpau_h_qbl: + return LowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL, true, true); + case Intrinsic::mips_dpau_h_qbr: + return LowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR, true, true); + case Intrinsic::mips_dpsu_h_qbl: + return LowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL, true, true); + case Intrinsic::mips_dpsu_h_qbr: + return LowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR, true, true); + case Intrinsic::mips_dpa_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH, true, true); + case Intrinsic::mips_dps_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH, true, true); + case Intrinsic::mips_dpax_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH, true, true); + case Intrinsic::mips_dpsx_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH, true, true); + case Intrinsic::mips_mulsa_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH, true, true); + case Intrinsic::mips_mult: + return LowerDSPIntr(Op, DAG, MipsISD::MULT, false, true); + case Intrinsic::mips_multu: + return LowerDSPIntr(Op, DAG, MipsISD::MULTU, false, true); + case Intrinsic::mips_madd: + return LowerDSPIntr(Op, DAG, MipsISD::MADD_DSP, true, true); + case Intrinsic::mips_maddu: + return LowerDSPIntr(Op, DAG, MipsISD::MADDU_DSP, true, true); + case Intrinsic::mips_msub: + return LowerDSPIntr(Op, DAG, MipsISD::MSUB_DSP, true, true); + case Intrinsic::mips_msubu: + return LowerDSPIntr(Op, DAG, MipsISD::MSUBU_DSP, true, true); + } +} + +SDValue MipsTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, + SelectionDAG &DAG) const { + switch (cast(Op->getOperand(1))->getZExtValue()) { + default: + return SDValue(); + case Intrinsic::mips_extp: + return LowerDSPIntr(Op, DAG, MipsISD::EXTP, true, false); + case Intrinsic::mips_extpdp: + return LowerDSPIntr(Op, DAG, MipsISD::EXTPDP, true, false); + case Intrinsic::mips_extr_w: + return LowerDSPIntr(Op, DAG, MipsISD::EXTR_W, true, false); + case Intrinsic::mips_extr_r_w: + return LowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W, true, false); + case Intrinsic::mips_extr_rs_w: + return LowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W, true, false); + case Intrinsic::mips_extr_s_h: + return LowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H, true, false); + case Intrinsic::mips_mthlip: + return LowerDSPIntr(Op, DAG, MipsISD::MTHLIP, true, true); + case Intrinsic::mips_mulsaq_s_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH, true, true); + case Intrinsic::mips_maq_s_w_phl: + return LowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL, true, true); + case Intrinsic::mips_maq_s_w_phr: + return LowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR, true, true); + case Intrinsic::mips_maq_sa_w_phl: + return LowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL, true, true); + case Intrinsic::mips_maq_sa_w_phr: + return LowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR, true, true); + case Intrinsic::mips_dpaq_s_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH, true, true); + case Intrinsic::mips_dpsq_s_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH, true, true); + case Intrinsic::mips_dpaq_sa_l_w: + return LowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W, true, true); + case Intrinsic::mips_dpsq_sa_l_w: + return LowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W, true, true); + case Intrinsic::mips_dpaqx_s_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH, true, true); + case Intrinsic::mips_dpaqx_sa_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH, true, true); + case Intrinsic::mips_dpsqx_s_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH, true, true); + case Intrinsic::mips_dpsqx_sa_w_ph: + return LowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH, true, true); + } +} + +SDValue MipsTargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const { + if (Op->getOperand(0).getOpcode() != ISD::FRAMEADDR + || cast + (Op->getOperand(0).getOperand(0))->getZExtValue() != 0 + || Op->getOperand(1).getOpcode() != ISD::FRAME_TO_ARGS_OFFSET) + return SDValue(); + + // The pattern + // (add (frameaddr 0), (frame_to_args_offset)) + // results from lowering llvm.eh.dwarf.cfa intrinsic. Transform it to + // (add FrameObject, 0) + // where FrameObject is a fixed StackObject with offset 0 which points to + // the old stack pointer. + MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); + EVT ValTy = Op->getValueType(0); + int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false); + SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy); + return DAG.getNode(ISD::ADD, Op->getDebugLoc(), ValTy, InArgsAddr, + DAG.getConstant(0, ValTy)); +} + //===----------------------------------------------------------------------===// // Calling Convention Implementation //===----------------------------------------------------------------------===// @@ -2259,16 +2602,9 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, Mips::D6, Mips::D7 }; - // ByVal Args - if (ArgFlags.isByVal()) { - State.HandleByVal(ValNo, ValVT, LocVT, LocInfo, - 1 /*MinSize*/, 4 /*MinAlign*/, ArgFlags); - unsigned NextReg = (State.getNextStackOffset() + 3) / 4; - for (unsigned r = State.getFirstUnallocated(IntRegs, IntRegsSize); - r < std::min(IntRegsSize, NextReg); ++r) - State.AllocateReg(IntRegs[r]); - return false; - } + // Do not process byval args here. + if (ArgFlags.isByVal()) + return true; // Promote i8 and i16 if (LocVT == MVT::i8 || LocVT == MVT::i16) { @@ -2323,279 +2659,72 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT, } else llvm_unreachable("Cannot handle this ValVT."); - unsigned SizeInBytes = ValVT.getSizeInBits() >> 3; - unsigned Offset = State.AllocateStack(SizeInBytes, OrigAlign); - - if (!Reg) + if (!Reg) { + unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() >> 3, + OrigAlign); State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); - else + } else State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); - return false; // CC must always match -} - -static const uint16_t Mips64IntRegs[8] = - {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64, - Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64}; -static const uint16_t Mips64DPRegs[8] = - {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64, - Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64}; - -static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT, - CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags, CCState &State) { - unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8); - unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8; - unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8); - - assert(Align <= 16 && "Cannot handle alignments larger than 16."); - - // If byval is 16-byte aligned, the first arg register must be even. - if ((Align == 16) && (FirstIdx % 2)) { - State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]); - ++FirstIdx; - } - - // Mark the registers allocated. - for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I) - State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]); - - // Allocate space on caller's stack. - unsigned Offset = State.AllocateStack(Size, Align); - - if (FirstIdx < 8) - State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx], - LocVT, LocInfo)); - else - State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); - - return true; + return false; } #include "MipsGenCallingConv.inc" -static void -AnalyzeMips64CallOperands(CCState &CCInfo, - const SmallVectorImpl &Outs) { - unsigned NumOps = Outs.size(); - for (unsigned i = 0; i != NumOps; ++i) { - MVT ArgVT = Outs[i].VT; - ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; - bool R; - - if (Outs[i].IsFixed) - R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); - else - R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); - - if (R) { -#ifndef NDEBUG - dbgs() << "Call operand #" << i << " has unhandled type " - << EVT(ArgVT).getEVTString(); -#endif - llvm_unreachable(0); - } - } -} - //===----------------------------------------------------------------------===// // Call Calling Convention Implementation //===----------------------------------------------------------------------===// static const unsigned O32IntRegsSize = 4; -static const uint16_t O32IntRegs[] = { - Mips::A0, Mips::A1, Mips::A2, Mips::A3 -}; - // Return next O32 integer argument register. static unsigned getNextIntArgReg(unsigned Reg) { assert((Reg == Mips::A0) || (Reg == Mips::A2)); return (Reg == Mips::A0) ? Mips::A1 : Mips::A3; } -// Write ByVal Arg to arg registers and stack. -static void -WriteByValArg(SDValue Chain, DebugLoc dl, - SmallVector, 16> &RegsToPass, - SmallVector &MemOpChains, SDValue StackPtr, - MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, - const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, - MVT PtrType, bool isLittle) { - unsigned LocMemOffset = VA.getLocMemOffset(); - unsigned Offset = 0; - uint32_t RemainingSize = Flags.getByValSize(); - unsigned ByValAlign = Flags.getByValAlign(); - - // Copy the first 4 words of byval arg to registers A0 - A3. - // FIXME: Use a stricter alignment if it enables better optimization in passes - // run later. - for (; RemainingSize >= 4 && LocMemOffset < 4 * 4; - Offset += 4, RemainingSize -= 4, LocMemOffset += 4) { - SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, - DAG.getConstant(Offset, MVT::i32)); - SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr, - MachinePointerInfo(), false, false, false, - std::min(ByValAlign, (unsigned )4)); - MemOpChains.push_back(LoadVal.getValue(1)); - unsigned DstReg = O32IntRegs[LocMemOffset / 4]; - RegsToPass.push_back(std::make_pair(DstReg, LoadVal)); - } - - if (RemainingSize == 0) - return; +/// IsEligibleForTailCallOptimization - Check whether the call is eligible +/// for tail call optimization. +bool MipsTargetLowering:: +IsEligibleForTailCallOptimization(const MipsCC &MipsCCInfo, + unsigned NextStackOffset, + const MipsFunctionInfo& FI) const { + if (!EnableMipsTailCalls) + return false; - // If there still is a register available for argument passing, write the - // remaining part of the structure to it using subword loads and shifts. - if (LocMemOffset < 4 * 4) { - assert(RemainingSize <= 3 && RemainingSize >= 1 && - "There must be one to three bytes remaining."); - unsigned LoadSize = (RemainingSize == 3 ? 2 : RemainingSize); - SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, - DAG.getConstant(Offset, MVT::i32)); - unsigned Alignment = std::min(ByValAlign, (unsigned )4); - SDValue LoadVal = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, - LoadPtr, MachinePointerInfo(), - MVT::getIntegerVT(LoadSize * 8), false, - false, Alignment); - MemOpChains.push_back(LoadVal.getValue(1)); - - // If target is big endian, shift it to the most significant half-word or - // byte. - if (!isLittle) - LoadVal = DAG.getNode(ISD::SHL, dl, MVT::i32, LoadVal, - DAG.getConstant(32 - LoadSize * 8, MVT::i32)); - - Offset += LoadSize; - RemainingSize -= LoadSize; - - // Read second subword if necessary. - if (RemainingSize != 0) { - assert(RemainingSize == 1 && "There must be one byte remaining."); - LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, - DAG.getConstant(Offset, MVT::i32)); - unsigned Alignment = std::min(ByValAlign, (unsigned )2); - SDValue Subword = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, - LoadPtr, MachinePointerInfo(), - MVT::i8, false, false, Alignment); - MemOpChains.push_back(Subword.getValue(1)); - // Insert the loaded byte to LoadVal. - // FIXME: Use INS if supported by target. - unsigned ShiftAmt = isLittle ? 16 : 8; - SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i32, Subword, - DAG.getConstant(ShiftAmt, MVT::i32)); - LoadVal = DAG.getNode(ISD::OR, dl, MVT::i32, LoadVal, Shift); - } + // No tail call optimization for mips16. + if (Subtarget->inMips16Mode()) + return false; - unsigned DstReg = O32IntRegs[LocMemOffset / 4]; - RegsToPass.push_back(std::make_pair(DstReg, LoadVal)); - return; - } + // Return false if either the callee or caller has a byval argument. + if (MipsCCInfo.hasByValArg() || FI.hasByvalArg()) + return false; - // Copy remaining part of byval arg using memcpy. - SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, - DAG.getConstant(Offset, MVT::i32)); - SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, - DAG.getIntPtrConstant(LocMemOffset)); - Chain = DAG.getMemcpy(Chain, dl, Dst, Src, - DAG.getConstant(RemainingSize, MVT::i32), - std::min(ByValAlign, (unsigned)4), - /*isVolatile=*/false, /*AlwaysInline=*/false, - MachinePointerInfo(0), MachinePointerInfo(0)); - MemOpChains.push_back(Chain); + // Return true if the callee's argument area is no larger than the + // caller's. + return NextStackOffset <= FI.getIncomingArgSize(); } -// Copy Mips64 byVal arg to registers and stack. -void static -PassByValArg64(SDValue Chain, DebugLoc dl, - SmallVector, 16> &RegsToPass, - SmallVector &MemOpChains, SDValue StackPtr, - MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, - const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, - EVT PtrTy, bool isLittle) { - unsigned ByValSize = Flags.getByValSize(); - unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8); - bool IsRegLoc = VA.isRegLoc(); - unsigned Offset = 0; // Offset in # of bytes from the beginning of struct. - unsigned LocMemOffset = 0; - unsigned MemCpySize = ByValSize; - - if (!IsRegLoc) - LocMemOffset = VA.getLocMemOffset(); - else { - const uint16_t *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, - VA.getLocReg()); - const uint16_t *RegEnd = Mips64IntRegs + 8; - - // Copy double words to registers. - for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) { - SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, - DAG.getConstant(Offset, PtrTy)); - SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr, - MachinePointerInfo(), false, false, false, - Alignment); - MemOpChains.push_back(LoadVal.getValue(1)); - RegsToPass.push_back(std::make_pair(*Reg, LoadVal)); - } - - // Return if the struct has been fully copied. - if (!(MemCpySize = ByValSize - Offset)) - return; - - // If there is an argument register available, copy the remainder of the - // byval argument with sub-doubleword loads and shifts. - if (Reg != RegEnd) { - assert((ByValSize < Offset + 8) && - "Size of the remainder should be smaller than 8-byte."); - SDValue Val; - for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) { - unsigned RemSize = ByValSize - Offset; - - if (RemSize < LoadSize) - continue; - - SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, - DAG.getConstant(Offset, PtrTy)); - SDValue LoadVal = - DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr, - MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8), - false, false, Alignment); - MemOpChains.push_back(LoadVal.getValue(1)); - - // Offset in number of bits from double word boundary. - unsigned OffsetDW = (Offset % 8) * 8; - unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8); - SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal, - DAG.getConstant(Shamt, MVT::i32)); - - Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) : - Shift; - Offset += LoadSize; - Alignment = std::min(Alignment, LoadSize); - } - - RegsToPass.push_back(std::make_pair(*Reg, Val)); - return; - } +SDValue +MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset, + SDValue Chain, SDValue Arg, DebugLoc DL, + bool IsTailCall, SelectionDAG &DAG) const { + if (!IsTailCall) { + SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr, + DAG.getIntPtrConstant(Offset)); + return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo(), false, + false, 0); } - assert(MemCpySize && "MemCpySize must not be zero."); - - // Copy remainder of byval arg to it with memcpy. - SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, - DAG.getConstant(Offset, PtrTy)); - SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, - DAG.getIntPtrConstant(LocMemOffset)); - Chain = DAG.getMemcpy(Chain, dl, Dst, Src, - DAG.getConstant(MemCpySize, PtrTy), Alignment, - /*isVolatile=*/false, /*AlwaysInline=*/false, - MachinePointerInfo(0), MachinePointerInfo(0)); - MemOpChains.push_back(Chain); + MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); + int FI = MFI->CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false); + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); + return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(), + /*isVolatile=*/ true, false, 0); } /// LowerCall - functions arguments are copied from virtual regs to /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. -/// TODO: isTailCall. SDValue MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl &InVals) const { @@ -2610,56 +2739,49 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, CallingConv::ID CallConv = CLI.CallConv; bool isVarArg = CLI.IsVarArg; - // MIPs target does not yet support tail call optimization. - isTailCall = false; - MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering(); bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; - MipsFunctionInfo *MipsFI = MF.getInfo(); // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); + MipsCC MipsCCInfo(CallConv, isVarArg, IsO32, CCInfo); - if (CallConv == CallingConv::Fast) - CCInfo.AnalyzeCallOperands(Outs, CC_Mips_FastCC); - else if (IsO32) - CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32); - else if (HasMips64) - AnalyzeMips64CallOperands(CCInfo, Outs); - else - CCInfo.AnalyzeCallOperands(Outs, CC_Mips); + MipsCCInfo.analyzeCallOperands(Outs); // Get a count of how many bytes are to be pushed on the stack. unsigned NextStackOffset = CCInfo.getNextStackOffset(); - unsigned StackAlignment = TFL->getStackAlignment(); - NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment); - // Update size of the maximum argument space. - // For O32, a minimum of four words (16 bytes) of argument space is - // allocated. - if (IsO32 && (CallConv != CallingConv::Fast)) - NextStackOffset = std::max(NextStackOffset, (unsigned)16); + // Check if it's really possible to do a tail call. + if (isTailCall) + isTailCall = + IsEligibleForTailCallOptimization(MipsCCInfo, NextStackOffset, + *MF.getInfo()); + + if (isTailCall) + ++NumTailCalls; // Chain is the output chain of the last Load/Store or CopyToReg node. // ByValChain is the output chain of the last Memcpy node created for copying // byval arguments to the stack. + unsigned StackAlignment = TFL->getStackAlignment(); + NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment); SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true); - Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal); + + if (!isTailCall) + Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal); SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, IsN64 ? Mips::SP_64 : Mips::SP, getPointerTy()); - if (MipsFI->getMaxCallFrameSize() < NextStackOffset) - MipsFI->setMaxCallFrameSize(NextStackOffset); - // With EABI is it possible to have 16 args on registers. SmallVector, 16> RegsToPass; SmallVector MemOpChains; + MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin(); // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { @@ -2672,14 +2794,12 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (Flags.isByVal()) { assert(Flags.getByValSize() && "ByVal args of size 0 should have been ignored by front-end."); - if (IsO32) - WriteByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr, - MFI, DAG, Arg, VA, Flags, getPointerTy(), - Subtarget->isLittle()); - else - PassByValArg64(Chain, dl, RegsToPass, MemOpChains, StackPtr, - MFI, DAG, Arg, VA, Flags, getPointerTy(), - Subtarget->isLittle()); + assert(ByValArg != MipsCCInfo.byval_end()); + assert(!isTailCall && + "Do not tail-call optimize if there is a byval argument."); + passByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg, + MipsCCInfo, *ByValArg, Flags, Subtarget->isLittle()); + ++ByValArg; continue; } @@ -2729,10 +2849,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // emit ISD::STORE whichs stores the // parameter value to a stack Location - SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, - DAG.getIntPtrConstant(VA.getLocMemOffset())); - MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, - MachinePointerInfo(), false, false, 0)); + MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(), + Chain, Arg, dl, isTailCall, DAG)); } // Transform all store nodes into one single node because all store @@ -2861,6 +2979,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (InFlag.getNode()) Ops.push_back(InFlag); + if (isTailCall) + return DAG.getNode(MipsISD::TailCall, dl, MVT::Other, &Ops[0], Ops.size()); + Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size()); InFlag = Chain.getValue(1); @@ -2904,70 +3025,6 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, //===----------------------------------------------------------------------===// // Formal Arguments Calling Convention Implementation //===----------------------------------------------------------------------===// -static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl, - std::vector &OutChains, - SelectionDAG &DAG, unsigned NumWords, SDValue FIN, - const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, - const Argument *FuncArg) { - unsigned LocMem = VA.getLocMemOffset(); - unsigned FirstWord = LocMem / 4; - - // copy register A0 - A3 to frame object - for (unsigned i = 0; i < NumWords; ++i) { - unsigned CurWord = FirstWord + i; - if (CurWord >= O32IntRegsSize) - break; - - unsigned SrcReg = O32IntRegs[CurWord]; - unsigned Reg = AddLiveIn(MF, SrcReg, &Mips::CPURegsRegClass); - SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN, - DAG.getConstant(i * 4, MVT::i32)); - SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32), - StorePtr, MachinePointerInfo(FuncArg, i * 4), - false, false, 0); - OutChains.push_back(Store); - } -} - -// Create frame object on stack and copy registers used for byval passing to it. -static unsigned -CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl, - std::vector &OutChains, SelectionDAG &DAG, - const CCValAssign &VA, const ISD::ArgFlagsTy &Flags, - MachineFrameInfo *MFI, bool IsRegLoc, - SmallVectorImpl &InVals, MipsFunctionInfo *MipsFI, - EVT PtrTy, const Argument *FuncArg) { - const uint16_t *Reg = Mips64IntRegs + 8; - int FOOffset; // Frame object offset from virtual frame pointer. - - if (IsRegLoc) { - Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg()); - FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8; - } - else - FOOffset = VA.getLocMemOffset(); - - // Create frame object. - unsigned NumRegs = (Flags.getByValSize() + 7) / 8; - unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true); - SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy); - InVals.push_back(FIN); - - // Copy arg registers. - for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs); - ++Reg, ++I) { - unsigned VReg = AddLiveIn(MF, *Reg, &Mips::CPU64RegsRegClass); - SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN, - DAG.getConstant(I * 8, PtrTy)); - SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64), - StorePtr, MachinePointerInfo(FuncArg, I * 8), - false, false, 0); - OutChains.push_back(Store); - } - - return LastFI; -} - /// LowerFormalArguments - transform physical registers into virtual registers /// and generate load operations for arguments places on the stack. SDValue @@ -2991,20 +3048,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, SmallVector ArgLocs; CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), getTargetMachine(), ArgLocs, *DAG.getContext()); + MipsCC MipsCCInfo(CallConv, isVarArg, IsO32, CCInfo); - if (CallConv == CallingConv::Fast) - CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FastCC); - else if (IsO32) - CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32); - else - CCInfo.AnalyzeFormalArguments(Ins, CC_Mips); + MipsCCInfo.analyzeFormalArguments(Ins); + MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(), + MipsCCInfo.hasByValArg()); Function::const_arg_iterator FuncArg = DAG.getMachineFunction().getFunction()->arg_begin(); - int LastFI = 0;// MipsFI->LastInArgFI is 0 at the entry of this function. + unsigned CurArgIdx = 0; + MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin(); - for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++FuncArg) { + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; + std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx); + CurArgIdx = Ins[i].OrigArgIndex; EVT ValVT = VA.getValVT(); ISD::ArgFlagsTy Flags = Ins[i].Flags; bool IsRegLoc = VA.isRegLoc(); @@ -3012,18 +3070,10 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, if (Flags.isByVal()) { assert(Flags.getByValSize() && "ByVal args of size 0 should have been ignored by front-end."); - if (IsO32) { - unsigned NumWords = (Flags.getByValSize() + 3) / 4; - LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(), - true); - SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy()); - InVals.push_back(FIN); - ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags, - &*FuncArg); - } else // N32/64 - LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags, - MFI, IsRegLoc, InVals, MipsFI, - getPointerTy(), &*FuncArg); + assert(ByValArg != MipsCCInfo.byval_end()); + copyByValRegs(Chain, dl, OutChains, DAG, Flags, InVals, &*FuncArg, + MipsCCInfo, *ByValArg); + ++ByValArg; continue; } @@ -3085,13 +3135,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, assert(VA.isMemLoc()); // The stack pointer offset is relative to the caller stack frame. - LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, + int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, VA.getLocMemOffset(), true); // Create load nodes to retrieve arguments from the stack - SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy()); + SDValue FIN = DAG.getFrameIndex(FI, getPointerTy()); InVals.push_back(DAG.getLoad(ValVT, dl, Chain, FIN, - MachinePointerInfo::getFixedStack(LastFI), + MachinePointerInfo::getFixedStack(FI), false, false, false, 0)); } } @@ -3102,55 +3152,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { unsigned Reg = MipsFI->getSRetReturnReg(); if (!Reg) { - Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32)); + Reg = MF.getRegInfo(). + createVirtualRegister(getRegClassFor(IsN64 ? MVT::i64 : MVT::i32)); MipsFI->setSRetReturnReg(Reg); } SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); } - if (isVarArg) { - unsigned NumOfRegs = IsO32 ? 4 : 8; - const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs; - unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs); - int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot. - const TargetRegisterClass *RC = IsO32 ? - (const TargetRegisterClass*)&Mips::CPURegsRegClass : - (const TargetRegisterClass*)&Mips::CPU64RegsRegClass; - unsigned RegSize = RC->getSize(); - int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize; - - // Offset of the first variable argument from stack pointer. - int FirstVaArgOffset; - - if (IsO32 || (Idx == NumOfRegs)) { - FirstVaArgOffset = - (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize; - } else - FirstVaArgOffset = RegSlotOffset; - - // Record the frame index of the first variable argument - // which is a value necessary to VASTART. - LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true); - MipsFI->setVarArgsFrameIndex(LastFI); - - // Copy the integer registers that have not been used for argument passing - // to the argument register save area. For O32, the save area is allocated - // in the caller's stack frame, while for N32/64, it is allocated in the - // callee's stack frame. - for (int StackOffset = RegSlotOffset; - Idx < NumOfRegs; ++Idx, StackOffset += RegSize) { - unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC); - SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, - MVT::getIntegerVT(RegSize * 8)); - LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true); - SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy()); - OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, - MachinePointerInfo(), false, false, 0)); - } - } - - MipsFI->setLastInArgFI(LastFI); + if (isVarArg) + writeVarArgRegs(OutChains, MipsCCInfo, Chain, dl, DAG); // All stores are grouped in one node to allow the matching between // the size of Ins and InVals. This only happens when on varg functions @@ -3167,6 +3178,17 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, // Return Value Calling Convention Implementation //===----------------------------------------------------------------------===// +bool +MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv, + MachineFunction &MF, bool isVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context) const { + SmallVector RVLocs; + CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), + RVLocs, Context); + return CCInfo.CheckReturn(Outs, RetCC_Mips); +} + SDValue MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, @@ -3219,9 +3241,11 @@ MipsTargetLowering::LowerReturn(SDValue Chain, if (!Reg) llvm_unreachable("sret virtual register not created in the entry block"); SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); + unsigned V0 = IsN64 ? Mips::V0_64 : Mips::V0; - Chain = DAG.getCopyToReg(Chain, dl, Mips::V0, Val, Flag); + Chain = DAG.getCopyToReg(Chain, dl, V0, Val, Flag); Flag = Chain.getValue(1); + MF.getRegInfo().addLiveOut(V0); } // Return on Mips is always a "jr $ra" @@ -3325,8 +3349,11 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const case 'd': // Address register. Same as 'r' unless generating MIPS16 code. case 'y': // Same as 'r'. Exists for compatibility. case 'r': - if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) + if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) { + if (Subtarget->inMips16Mode()) + return std::make_pair(0U, &Mips::CPU16RegsRegClass); return std::make_pair(0U, &Mips::CPURegsRegClass); + } if (VT == MVT::i64 && !HasMips64) return std::make_pair(0U, &Mips::CPURegsRegClass); if (VT == MVT::i64 && HasMips64) @@ -3485,3 +3512,316 @@ unsigned MipsTargetLowering::getJumpTableEncoding() const { return TargetLowering::getJumpTableEncoding(); } + +MipsTargetLowering::MipsCC::MipsCC(CallingConv::ID CallConv, bool IsVarArg, + bool IsO32, CCState &Info) : CCInfo(Info) { + UseRegsForByval = true; + + if (IsO32) { + RegSize = 4; + NumIntArgRegs = array_lengthof(O32IntRegs); + ReservedArgArea = 16; + IntArgRegs = ShadowRegs = O32IntRegs; + FixedFn = VarFn = CC_MipsO32; + } else { + RegSize = 8; + NumIntArgRegs = array_lengthof(Mips64IntRegs); + ReservedArgArea = 0; + IntArgRegs = Mips64IntRegs; + ShadowRegs = Mips64DPRegs; + FixedFn = CC_MipsN; + VarFn = CC_MipsN_VarArg; + } + + if (CallConv == CallingConv::Fast) { + assert(!IsVarArg); + UseRegsForByval = false; + ReservedArgArea = 0; + FixedFn = VarFn = CC_Mips_FastCC; + } + + // Pre-allocate reserved argument area. + CCInfo.AllocateStack(ReservedArgArea, 1); +} + +void MipsTargetLowering::MipsCC:: +analyzeCallOperands(const SmallVectorImpl &Args) { + unsigned NumOpnds = Args.size(); + + for (unsigned I = 0; I != NumOpnds; ++I) { + MVT ArgVT = Args[I].VT; + ISD::ArgFlagsTy ArgFlags = Args[I].Flags; + bool R; + + if (ArgFlags.isByVal()) { + handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags); + continue; + } + + if (Args[I].IsFixed) + R = FixedFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + else + R = VarFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + + if (R) { +#ifndef NDEBUG + dbgs() << "Call operand #" << I << " has unhandled type " + << EVT(ArgVT).getEVTString(); +#endif + llvm_unreachable(0); + } + } +} + +void MipsTargetLowering::MipsCC:: +analyzeFormalArguments(const SmallVectorImpl &Args) { + unsigned NumArgs = Args.size(); + + for (unsigned I = 0; I != NumArgs; ++I) { + MVT ArgVT = Args[I].VT; + ISD::ArgFlagsTy ArgFlags = Args[I].Flags; + + if (ArgFlags.isByVal()) { + handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags); + continue; + } + + if (!FixedFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo)) + continue; + +#ifndef NDEBUG + dbgs() << "Formal Arg #" << I << " has unhandled type " + << EVT(ArgVT).getEVTString(); +#endif + llvm_unreachable(0); + } +} + +void +MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT, + MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags) { + assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0."); + + struct ByValArgInfo ByVal; + unsigned ByValSize = RoundUpToAlignment(ArgFlags.getByValSize(), RegSize); + unsigned Align = std::min(std::max(ArgFlags.getByValAlign(), RegSize), + RegSize * 2); + + if (UseRegsForByval) + allocateRegs(ByVal, ByValSize, Align); + + // Allocate space on caller's stack. + ByVal.Address = CCInfo.AllocateStack(ByValSize - RegSize * ByVal.NumRegs, + Align); + CCInfo.addLoc(CCValAssign::getMem(ValNo, ValVT, ByVal.Address, LocVT, + LocInfo)); + ByValArgs.push_back(ByVal); +} + +void MipsTargetLowering::MipsCC::allocateRegs(ByValArgInfo &ByVal, + unsigned ByValSize, + unsigned Align) { + assert(!(ByValSize % RegSize) && !(Align % RegSize) && + "Byval argument's size and alignment should be a multiple of" + "RegSize."); + + ByVal.FirstIdx = CCInfo.getFirstUnallocated(IntArgRegs, NumIntArgRegs); + + // If Align > RegSize, the first arg register must be even. + if ((Align > RegSize) && (ByVal.FirstIdx % 2)) { + CCInfo.AllocateReg(IntArgRegs[ByVal.FirstIdx], ShadowRegs[ByVal.FirstIdx]); + ++ByVal.FirstIdx; + } + + // Mark the registers allocated. + for (unsigned I = ByVal.FirstIdx; ByValSize && (I < NumIntArgRegs); + ByValSize -= RegSize, ++I, ++ByVal.NumRegs) + CCInfo.AllocateReg(IntArgRegs[I], ShadowRegs[I]); +} + +void MipsTargetLowering:: +copyByValRegs(SDValue Chain, DebugLoc DL, std::vector &OutChains, + SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags, + SmallVectorImpl &InVals, const Argument *FuncArg, + const MipsCC &CC, const ByValArgInfo &ByVal) const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + unsigned RegAreaSize = ByVal.NumRegs * CC.regSize(); + unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize); + int FrameObjOffset; + + if (RegAreaSize) + FrameObjOffset = (int)CC.reservedArgArea() - + (int)((CC.numIntArgRegs() - ByVal.FirstIdx) * CC.regSize()); + else + FrameObjOffset = ByVal.Address; + + // Create frame object. + EVT PtrTy = getPointerTy(); + int FI = MFI->CreateFixedObject(FrameObjSize, FrameObjOffset, true); + SDValue FIN = DAG.getFrameIndex(FI, PtrTy); + InVals.push_back(FIN); + + if (!ByVal.NumRegs) + return; + + // Copy arg registers. + EVT RegTy = MVT::getIntegerVT(CC.regSize() * 8); + const TargetRegisterClass *RC = getRegClassFor(RegTy); + + for (unsigned I = 0; I < ByVal.NumRegs; ++I) { + unsigned ArgReg = CC.intArgRegs()[ByVal.FirstIdx + I]; + unsigned VReg = AddLiveIn(MF, ArgReg, RC); + unsigned Offset = I * CC.regSize(); + SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN, + DAG.getConstant(Offset, PtrTy)); + SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy), + StorePtr, MachinePointerInfo(FuncArg, Offset), + false, false, 0); + OutChains.push_back(Store); + } +} + +// Copy byVal arg to registers and stack. +void MipsTargetLowering:: +passByValArg(SDValue Chain, DebugLoc DL, + SmallVector, 16> &RegsToPass, + SmallVector &MemOpChains, SDValue StackPtr, + MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, + const MipsCC &CC, const ByValArgInfo &ByVal, + const ISD::ArgFlagsTy &Flags, bool isLittle) const { + unsigned ByValSize = Flags.getByValSize(); + unsigned Offset = 0; // Offset in # of bytes from the beginning of struct. + unsigned RegSize = CC.regSize(); + unsigned Alignment = std::min(Flags.getByValAlign(), RegSize); + EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSize * 8); + + if (ByVal.NumRegs) { + const uint16_t *ArgRegs = CC.intArgRegs(); + bool LeftoverBytes = (ByVal.NumRegs * RegSize > ByValSize); + unsigned I = 0; + + // Copy words to registers. + for (; I < ByVal.NumRegs - LeftoverBytes; ++I, Offset += RegSize) { + SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, + DAG.getConstant(Offset, PtrTy)); + SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr, + MachinePointerInfo(), false, false, false, + Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I]; + RegsToPass.push_back(std::make_pair(ArgReg, LoadVal)); + } + + // Return if the struct has been fully copied. + if (ByValSize == Offset) + return; + + // Copy the remainder of the byval argument with sub-word loads and shifts. + if (LeftoverBytes) { + assert((ByValSize > Offset) && (ByValSize < Offset + RegSize) && + "Size of the remainder should be smaller than RegSize."); + SDValue Val; + + for (unsigned LoadSize = RegSize / 2, TotalSizeLoaded = 0; + Offset < ByValSize; LoadSize /= 2) { + unsigned RemSize = ByValSize - Offset; + + if (RemSize < LoadSize) + continue; + + // Load subword. + SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, + DAG.getConstant(Offset, PtrTy)); + SDValue LoadVal = + DAG.getExtLoad(ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, + MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8), + false, false, Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + + // Shift the loaded value. + unsigned Shamt; + + if (isLittle) + Shamt = TotalSizeLoaded; + else + Shamt = (RegSize - (TotalSizeLoaded + LoadSize)) * 8; + + SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal, + DAG.getConstant(Shamt, MVT::i32)); + + if (Val.getNode()) + Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift); + else + Val = Shift; + + Offset += LoadSize; + TotalSizeLoaded += LoadSize; + Alignment = std::min(Alignment, LoadSize); + } + + unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I]; + RegsToPass.push_back(std::make_pair(ArgReg, Val)); + return; + } + } + + // Copy remainder of byval arg to it with memcpy. + unsigned MemCpySize = ByValSize - Offset; + SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, + DAG.getConstant(Offset, PtrTy)); + SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr, + DAG.getIntPtrConstant(ByVal.Address)); + Chain = DAG.getMemcpy(Chain, DL, Dst, Src, + DAG.getConstant(MemCpySize, PtrTy), Alignment, + /*isVolatile=*/false, /*AlwaysInline=*/false, + MachinePointerInfo(0), MachinePointerInfo(0)); + MemOpChains.push_back(Chain); +} + +void +MipsTargetLowering::writeVarArgRegs(std::vector &OutChains, + const MipsCC &CC, SDValue Chain, + DebugLoc DL, SelectionDAG &DAG) const { + unsigned NumRegs = CC.numIntArgRegs(); + const uint16_t *ArgRegs = CC.intArgRegs(); + const CCState &CCInfo = CC.getCCInfo(); + unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumRegs); + unsigned RegSize = CC.regSize(); + EVT RegTy = MVT::getIntegerVT(RegSize * 8); + const TargetRegisterClass *RC = getRegClassFor(RegTy); + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MipsFunctionInfo *MipsFI = MF.getInfo(); + + // Offset of the first variable argument from stack pointer. + int VaArgOffset; + + if (NumRegs == Idx) + VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize); + else + VaArgOffset = + (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx)); + + // Record the frame index of the first variable argument + // which is a value necessary to VASTART. + int FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true); + MipsFI->setVarArgsFrameIndex(FI); + + // Copy the integer registers that have not been used for argument passing + // to the argument register save area. For O32, the save area is allocated + // in the caller's stack frame, while for N32/64, it is allocated in the + // callee's stack frame. + for (unsigned I = Idx; I < NumRegs; ++I, VaArgOffset += RegSize) { + unsigned Reg = AddLiveIn(MF, ArgRegs[I], RC); + SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy); + FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true); + SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy()); + SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, + MachinePointerInfo(), false, false, 0); + cast(Store.getNode())->getMemOperand()->setValue(0); + OutChains.push_back(Store); + } +} diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index 95ea8fa..43f97e8 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -17,6 +17,7 @@ #include "Mips.h" #include "MipsSubtarget.h" +#include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/Target/TargetLowering.h" @@ -29,6 +30,9 @@ namespace llvm { // Jump and link (call) JmpLink, + // Tail call + TailCall, + // Get the Higher 16 bits from a 32-bit immediate // No relation with Mips Hi register Hi, @@ -81,6 +85,47 @@ namespace llvm { Ext, Ins, + // EXTR.W instrinsic nodes. + EXTP, + EXTPDP, + EXTR_S_H, + EXTR_W, + EXTR_R_W, + EXTR_RS_W, + SHILO, + MTHLIP, + + // DPA.W intrinsic nodes. + MULSAQ_S_W_PH, + MAQ_S_W_PHL, + MAQ_S_W_PHR, + MAQ_SA_W_PHL, + MAQ_SA_W_PHR, + DPAU_H_QBL, + DPAU_H_QBR, + DPSU_H_QBL, + DPSU_H_QBR, + DPAQ_S_W_PH, + DPSQ_S_W_PH, + DPAQ_SA_L_W, + DPSQ_SA_L_W, + DPA_W_PH, + DPS_W_PH, + DPAQX_S_W_PH, + DPAQX_SA_W_PH, + DPAX_W_PH, + DPSX_W_PH, + DPSQX_S_W_PH, + DPSQX_SA_W_PH, + MULSA_W_PH, + + MULT, + MULTU, + MADD_DSP, + MADDU_DSP, + MSUB_DSP, + MSUBU_DSP, + // Load/Store Left/Right nodes. LWL = ISD::FIRST_TARGET_MEMORY_OPCODE, LWR, @@ -96,6 +141,7 @@ namespace llvm { //===--------------------------------------------------------------------===// // TargetLowering Implementation //===--------------------------------------------------------------------===// + class MipsFunctionInfo; class MipsTargetLowering : public TargetLowering { public: @@ -105,9 +151,19 @@ namespace llvm { virtual bool allowsUnalignedMemoryAccesses (EVT VT) const; + virtual void LowerOperationWrapper(SDNode *N, + SmallVectorImpl &Results, + SelectionDAG &DAG) const; + /// LowerOperation - Provide custom lowering hooks for some operations. virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; + /// ReplaceNodeResults - Replace the results of node with an illegal result + /// type with new values built out of custom code. + /// + virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl&Results, + SelectionDAG &DAG) const; + /// getTargetNodeName - This method returns the name of a target specific // DAG node. virtual const char *getTargetNodeName(unsigned Opcode) const; @@ -117,6 +173,69 @@ namespace llvm { virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; private: + + /// ByValArgInfo - Byval argument information. + struct ByValArgInfo { + unsigned FirstIdx; // Index of the first register used. + unsigned NumRegs; // Number of registers used for this argument. + unsigned Address; // Offset of the stack area used to pass this argument. + + ByValArgInfo() : FirstIdx(0), NumRegs(0), Address(0) {} + }; + + /// MipsCC - This class provides methods used to analyze formal and call + /// arguments and inquire about calling convention information. + class MipsCC { + public: + MipsCC(CallingConv::ID CallConv, bool IsVarArg, bool IsO32, + CCState &Info); + + void analyzeCallOperands(const SmallVectorImpl &Outs); + void analyzeFormalArguments(const SmallVectorImpl &Ins); + void handleByValArg(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags); + + const CCState &getCCInfo() const { return CCInfo; } + + /// hasByValArg - Returns true if function has byval arguments. + bool hasByValArg() const { return !ByValArgs.empty(); } + + /// useRegsForByval - Returns true if the calling convention allows the + /// use of registers to pass byval arguments. + bool useRegsForByval() const { return UseRegsForByval; } + + /// regSize - Size (in number of bits) of integer registers. + unsigned regSize() const { return RegSize; } + + /// numIntArgRegs - Number of integer registers available for calls. + unsigned numIntArgRegs() const { return NumIntArgRegs; } + + /// reservedArgArea - The size of the area the caller reserves for + /// register arguments. This is 16-byte if ABI is O32. + unsigned reservedArgArea() const { return ReservedArgArea; } + + /// intArgRegs - Pointer to array of integer registers. + const uint16_t *intArgRegs() const { return IntArgRegs; } + + typedef SmallVector::const_iterator byval_iterator; + byval_iterator byval_begin() const { return ByValArgs.begin(); } + byval_iterator byval_end() const { return ByValArgs.end(); } + + private: + void allocateRegs(ByValArgInfo &ByVal, unsigned ByValSize, + unsigned Align); + + CCState &CCInfo; + bool UseRegsForByval; + unsigned RegSize; + unsigned NumIntArgRegs; + unsigned ReservedArgArea; + const uint16_t *IntArgRegs, *ShadowRegs; + SmallVector ByValArgs; + llvm::CCAssignFn *FixedFn, *VarFn; + }; + // Subtarget Info const MipsSubtarget *Subtarget; @@ -151,6 +270,39 @@ namespace llvm { bool IsSRA) const; SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const; + + /// IsEligibleForTailCallOptimization - Check whether the call is eligible + /// for tail call optimization. + bool IsEligibleForTailCallOptimization(const MipsCC &MipsCCInfo, + unsigned NextStackOffset, + const MipsFunctionInfo& FI) const; + + /// copyByValArg - Copy argument registers which were used to pass a byval + /// argument to the stack. Create a stack frame object for the byval + /// argument. + void copyByValRegs(SDValue Chain, DebugLoc DL, + std::vector &OutChains, SelectionDAG &DAG, + const ISD::ArgFlagsTy &Flags, + SmallVectorImpl &InVals, + const Argument *FuncArg, + const MipsCC &CC, const ByValArgInfo &ByVal) const; + + /// passByValArg - Pass a byval argument in registers or on stack. + void passByValArg(SDValue Chain, DebugLoc DL, + SmallVector, 16> &RegsToPass, + SmallVector &MemOpChains, SDValue StackPtr, + MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, + const MipsCC &CC, const ByValArgInfo &ByVal, + const ISD::ArgFlagsTy &Flags, bool isLittle) const; + + /// writeVarArgRegs - Write variable function arguments passed in registers + /// to the stack. Also create a stack frame object for the first variable + /// argument. + void writeVarArgRegs(std::vector &OutChains, const MipsCC &CC, + SDValue Chain, DebugLoc DL, SelectionDAG &DAG) const; virtual SDValue LowerFormalArguments(SDValue Chain, @@ -159,10 +311,20 @@ namespace llvm { DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const; + SDValue passArgOnStack(SDValue StackPtr, unsigned Offset, SDValue Chain, + SDValue Arg, DebugLoc DL, bool IsTailCall, + SelectionDAG &DAG) const; + virtual SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI, SmallVectorImpl &InVals) const; + virtual bool + CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool isVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context) const; + virtual SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, @@ -209,6 +371,8 @@ namespace llvm { virtual unsigned getJumpTableEncoding() const; + MachineBasicBlock *EmitBPOSGE32(MachineInstr *MI, + MachineBasicBlock *BB) const; MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode, bool Nand = false) const; MachineBasicBlock *EmitAtomicBinaryPartword(MachineInstr *MI, diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index 3e78c45..33ee020 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -90,20 +90,20 @@ def fpimm0neg : PatLeaf<(fpimm), [{ let DecoderMethod = "DecodeFMem" in { class FPLoad op, string opstr, RegisterClass RC, Operand MemOpnd>: FMem; // FP store. class FPStore op, string opstr, RegisterClass RC, Operand MemOpnd>: FMem; } // FP indexed load. class FPIdxLoad funct, string opstr, RegisterClass DRC, RegisterClass PRC, SDPatternOperator FOp = null_frag>: FFMemIdx { let fs = 0; } @@ -112,7 +112,7 @@ class FPIdxLoad funct, string opstr, RegisterClass DRC, class FPIdxStore funct, string opstr, RegisterClass DRC, RegisterClass PRC, SDPatternOperator FOp= null_frag>: FFMemIdx { let fd = 0; } @@ -182,20 +182,21 @@ defm CEIL_W : FFR1_W_M<0xe, "ceil">; defm CEIL_L : FFR1_L_M<0xa, "ceil">; defm FLOOR_W : FFR1_W_M<0xf, "floor">; defm FLOOR_L : FFR1_L_M<0xb, "floor">; -defm CVT_W : FFR1_W_M<0x24, "cvt">; +defm CVT_W : FFR1_W_M<0x24, "cvt">, NeverHasSideEffects; //defm CVT_L : FFR1_L_M<0x25, "cvt">; -def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>; -def CVT_L_S : FFR1<0x25, 16, "cvt", "l.s", FGR64, FGR32>; -def CVT_L_D64: FFR1<0x25, 17, "cvt", "l.d", FGR64, FGR64>; +def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>, NeverHasSideEffects; +def CVT_L_S : FFR1<0x25, 16, "cvt", "l.s", FGR64, FGR32>, NeverHasSideEffects; +def CVT_L_D64: FFR1<0x25, 17, "cvt", "l.d", FGR64, FGR64>, NeverHasSideEffects; -let Predicates = [NotFP64bit, HasStandardEncoding] in { +let Predicates = [NotFP64bit, HasStandardEncoding], neverHasSideEffects = 1 in { def CVT_S_D32 : FFR1<0x20, 17, "cvt", "s.d", FGR32, AFGR64>; def CVT_D32_W : FFR1<0x21, 20, "cvt", "d.w", AFGR64, FGR32>; def CVT_D32_S : FFR1<0x21, 16, "cvt", "d.s", AFGR64, FGR32>; } -let Predicates = [IsFP64bit, HasStandardEncoding], DecoderNamespace = "Mips64" in { +let Predicates = [IsFP64bit, HasStandardEncoding], DecoderNamespace = "Mips64", + neverHasSideEffects = 1 in { def CVT_S_D64 : FFR1<0x20, 17, "cvt", "s.d", FGR32, FGR64>; def CVT_S_L : FFR1<0x20, 21, "cvt", "s.l", FGR32, FGR64>; def CVT_D64_W : FFR1<0x21, 20, "cvt", "d.w", FGR64, FGR32>; @@ -282,26 +283,26 @@ let Predicates = [NotN64, NotMips64, HasStandardEncoding] in { // Indexed loads and stores. let Predicates = [HasMips32r2Or64, HasStandardEncoding] in { - def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load_a>; - def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store_a>; + def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load>; + def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store>; } let Predicates = [HasMips32r2, NotMips64, HasStandardEncoding] in { - def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>; - def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>; + def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load>; + def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store>; } let Predicates = [HasMips64, NotN64, HasStandardEncoding], DecoderNamespace="Mips64" in { - def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>; - def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>; + def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load>; + def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store>; } // n64 let Predicates = [IsN64, HasStandardEncoding], isCodeGenOnly=1 in { - def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>; - def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>; - def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>; - def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>; + def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load>; + def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load>; + def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store>; + def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store>; } // Load/store doubleword indexed unaligned. diff --git a/lib/Target/Mips/MipsInstrFormats.td b/lib/Target/Mips/MipsInstrFormats.td index 8feb853..1ecbdc2 100644 --- a/lib/Target/Mips/MipsInstrFormats.td +++ b/lib/Target/Mips/MipsInstrFormats.td @@ -92,6 +92,14 @@ class PseudoSE pattern>: let Predicates = [HasStandardEncoding]; } +// Pseudo-instructions for alternate assembly syntax (never used by codegen). +// These are aliases that require C++ handling to convert to the target +// instruction, while InstAliases can be handled directly by tblgen. +class MipsAsmPseudoInst: + MipsInst { + let isPseudo = 1; + let Pattern = []; +} //===----------------------------------------------------------------------===// // Format R instruction class in Mips : <|opcode|rs|rt|rd|shamt|funct|> //===----------------------------------------------------------------------===// @@ -163,6 +171,27 @@ class FJ op, dag outs, dag ins, string asmstr, list pattern, let Inst{25-0} = addr; } + //===----------------------------------------------------------------------===// +// MFC instruction class in Mips : <|op|mf|rt|rd|0000000|sel|> +//===----------------------------------------------------------------------===// +class MFC3OP op, bits<5> _mfmt, dag outs, dag ins, string asmstr>: + InstSE +{ + bits<5> mfmt; + bits<5> rt; + bits<5> rd; + bits<3> sel; + + let Opcode = op; + let mfmt = _mfmt; + + let Inst{25-21} = mfmt; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-3} = 0; + let Inst{2-0} = sel; +} + //===----------------------------------------------------------------------===// // // FLOATING POINT INSTRUCTION FORMATS diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index 50e3eb5..ca80d43 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -95,6 +95,7 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, SmallVectorImpl &Cond, bool AllowModify) const { + MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend(); // Skip all the debug instructions. @@ -177,9 +178,14 @@ void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, const MCInstrDesc &MCID = get(Opc); MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID); - for (unsigned i = 1; i < Cond.size(); ++i) - MIB.addReg(Cond[i].getReg()); - + for (unsigned i = 1; i < Cond.size(); ++i) { + if (Cond[i].isReg()) + MIB.addReg(Cond[i].getReg()); + else if (Cond[i].isImm()) + MIB.addImm(Cond[i].getImm()); + else + assert(true && "Cannot copy operand"); + } MIB.addMBB(TBB); } @@ -262,46 +268,3 @@ unsigned MipsInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { } } } - -unsigned -llvm::Mips::loadImmediate(int64_t Imm, bool IsN64, const TargetInstrInfo &TII, - MachineBasicBlock& MBB, - MachineBasicBlock::iterator II, DebugLoc DL, - bool LastInstrIsADDiu, - MipsAnalyzeImmediate::Inst *LastInst) { - MipsAnalyzeImmediate AnalyzeImm; - unsigned Size = IsN64 ? 64 : 32; - unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi; - unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO; - unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT; - - const MipsAnalyzeImmediate::InstSeq &Seq = - AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu); - MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); - - if (LastInst && (Seq.size() == 1)) { - *LastInst = *Inst; - return 0; - } - - // The first instruction can be a LUi, which is different from other - // instructions (ADDiu, ORI and SLL) in that it does not have a register - // operand. - if (Inst->Opc == LUi) - BuildMI(MBB, II, DL, TII.get(LUi), ATReg) - .addImm(SignExtend64<16>(Inst->ImmOpnd)); - else - BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg) - .addImm(SignExtend64<16>(Inst->ImmOpnd)); - - // Build the remaining instructions in Seq. Skip the last instruction if - // LastInst is not 0. - for (++Inst; Inst != Seq.end() - !!LastInst; ++Inst) - BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg) - .addImm(SignExtend64<16>(Inst->ImmOpnd)); - - if (LastInst) - *LastInst = *Inst; - - return Seq.size() - !!LastInst; -} diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h index 7d56259..aca2bc7 100644 --- a/lib/Target/Mips/MipsInstrInfo.h +++ b/lib/Target/Mips/MipsInstrInfo.h @@ -88,18 +88,6 @@ private: const SmallVectorImpl& Cond) const; }; -namespace Mips { - /// Emit a series of instructions to load an immediate. All instructions - /// except for the last one are emitted. The function returns the number of - /// MachineInstrs generated. The opcode-immediate pair of the last - /// instruction is returned in LastInst, if it is not 0. - unsigned - loadImmediate(int64_t Imm, bool IsN64, const TargetInstrInfo &TII, - MachineBasicBlock& MBB, MachineBasicBlock::iterator II, - DebugLoc DL, bool LastInstrIsADDiu, - MipsAnalyzeImmediate::Inst *LastInst); -} - /// Create MipsInstrInfo objects. const MipsInstrInfo *createMips16InstrInfo(MipsTargetMachine &TM); const MipsInstrInfo *createMipsSEInstrInfo(MipsTargetMachine &TM); diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index da15d4d..f16b5f9 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -52,6 +52,10 @@ def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink, [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, SDNPVariadic]>; +// Tail call +def MipsTailCall : SDNode<"MipsISD::TailCall", SDT_MipsJmpLink, + [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; + // Hi and Lo nodes are used to handle global addresses. Used on // MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol // static model. (nothing to do with Mips Registers Hi and Lo) @@ -74,9 +78,10 @@ def MipsRet : SDNode<"MipsISD::Ret", SDTNone, [SDNPHasChain, SDNPOptInGlue]>; // These are target-independent nodes, but have target-specific formats. def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart, - [SDNPHasChain, SDNPOutGlue]>; + [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>; def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd, - [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + [SDNPHasChain, SDNPSideEffect, + SDNPOptInGlue, SDNPOutGlue]>; // MAdd*/MSub* nodes def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub, @@ -110,7 +115,7 @@ def MipsWrapper : SDNode<"MipsISD::Wrapper", SDTIntBinOp>; def MipsDynAlloc : SDNode<"MipsISD::DynAlloc", SDT_MipsDynAlloc, [SDNPHasChain, SDNPInGlue]>; -def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain]>; +def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain,SDNPSideEffect]>; def MipsExt : SDNode<"MipsISD::Ext", SDT_Ext>; def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>; @@ -174,6 +179,35 @@ class MipsPat : Pat { let Predicates = [HasStandardEncoding]; } +class IsBranch { + bit isBranch = 1; +} + +class IsReturn { + bit isReturn = 1; +} + +class IsCall { + bit isCall = 1; +} + +class IsTailCall { + bit isCall = 1; + bit isTerminator = 1; + bit isReturn = 1; + bit isBarrier = 1; + bit hasExtraSrcRegAllocReq = 1; + bit isCodeGenOnly = 1; +} + +class IsAsCheapAsAMove { + bit isAsCheapAsAMove = 1; +} + +class NeverHasSideEffects { + bit neverHasSideEffects = 1; +} + //===----------------------------------------------------------------------===// // Instruction format superclass //===----------------------------------------------------------------------===// @@ -208,17 +242,24 @@ def uimm16 : Operand { let PrintMethod = "printUnsignedImm"; } +def MipsMemAsmOperand : AsmOperandClass { + let Name = "Mem"; + let ParserMethod = "parseMemOperand"; +} + // Address operand def mem : Operand { let PrintMethod = "printMemOperand"; let MIOperandInfo = (ops CPURegs, simm16); let EncoderMethod = "getMemEncoding"; + let ParserMatchClass = MipsMemAsmOperand; } def mem64 : Operand { let PrintMethod = "printMemOperand"; let MIOperandInfo = (ops CPU64Regs, simm16_64); let EncoderMethod = "getMemEncoding"; + let ParserMatchClass = MipsMemAsmOperand; } def mem_ea : Operand { @@ -285,57 +326,25 @@ def addr : ComplexPattern; //===----------------------------------------------------------------------===// -// Pattern fragment for load/store +// Instructions specific format //===----------------------------------------------------------------------===// -class UnalignedLoad : - PatFrag<(ops node:$ptr), (Node node:$ptr), [{ - LoadSDNode *LD = cast(N); - return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment(); -}]>; -class AlignedLoad : - PatFrag<(ops node:$ptr), (Node node:$ptr), [{ - LoadSDNode *LD = cast(N); - return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment(); -}]>; - -class UnalignedStore : - PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{ - StoreSDNode *SD = cast(N); - return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment(); -}]>; +/// Move Control Registers From/To CPU Registers +def MFC0_3OP : MFC3OP<0x10, 0, (outs CPURegs:$rt), + (ins CPURegs:$rd, uimm16:$sel),"mfc0\t$rt, $rd, $sel">; +def : InstAlias<"mfc0 $rt, $rd", (MFC0_3OP CPURegs:$rt, CPURegs:$rd, 0)>; -class AlignedStore : - PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{ - StoreSDNode *SD = cast(N); - return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment(); -}]>; +def MTC0_3OP : MFC3OP<0x10, 4, (outs CPURegs:$rd, uimm16:$sel), + (ins CPURegs:$rt),"mtc0\t$rt, $rd, $sel">; +def : InstAlias<"mtc0 $rt, $rd", (MTC0_3OP CPURegs:$rd, 0, CPURegs:$rt)>; -// Load/Store PatFrags. -def sextloadi16_a : AlignedLoad; -def zextloadi16_a : AlignedLoad; -def extloadi16_a : AlignedLoad; -def load_a : AlignedLoad; -def sextloadi32_a : AlignedLoad; -def zextloadi32_a : AlignedLoad; -def extloadi32_a : AlignedLoad; -def truncstorei16_a : AlignedStore; -def store_a : AlignedStore; -def truncstorei32_a : AlignedStore; -def sextloadi16_u : UnalignedLoad; -def zextloadi16_u : UnalignedLoad; -def extloadi16_u : UnalignedLoad; -def load_u : UnalignedLoad; -def sextloadi32_u : UnalignedLoad; -def zextloadi32_u : UnalignedLoad; -def extloadi32_u : UnalignedLoad; -def truncstorei16_u : UnalignedStore; -def store_u : UnalignedStore; -def truncstorei32_u : UnalignedStore; +def MFC2_3OP : MFC3OP<0x12, 0, (outs CPURegs:$rt), + (ins CPURegs:$rd, uimm16:$sel),"mfc2\t$rt, $rd, $sel">; +def : InstAlias<"mfc2 $rt, $rd", (MFC2_3OP CPURegs:$rt, CPURegs:$rd, 0)>; -//===----------------------------------------------------------------------===// -// Instructions specific format -//===----------------------------------------------------------------------===// +def MTC2_3OP : MFC3OP<0x12, 4, (outs CPURegs:$rd, uimm16:$sel), + (ins CPURegs:$rt),"mtc2\t$rt, $rd, $sel">; +def : InstAlias<"mtc2 $rt, $rd", (MTC2_3OP CPURegs:$rd, 0, CPURegs:$rt)>; // Arithmetic and logical instructions with 3 register operands. class ArithLogicR op, bits<6> func, string instr_asm, SDNode OpNode, @@ -416,7 +425,7 @@ class shift_rotate_reg func, bits<5> isRotate, string instr_asm, // Load Upper Imediate class LoadUpper op, string instr_asm, RegisterClass RC, Operand Imm>: FI { + !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu>, IsAsCheapAsAMove { let rs = 0; let neverHasSideEffects = 1; let isReMaterializable = 1; @@ -597,14 +606,13 @@ class SetCC_I op, string instr_asm, PatFrag cond_op, Operand Od, IIAlu>; // Jump -class JumpFJ op, string instr_asm>: - FJ { - let isBranch=1; +class JumpFJ op, DAGOperand opnd, string instr_asm, + SDPatternOperator operator, SDPatternOperator targetoperator>: + FJ { let isTerminator=1; let isBarrier=1; let hasDelaySlot = 1; - let Predicates = [RelocStatic, HasStandardEncoding]; let DecoderMethod = "DecodeJumpTarget"; let Defs = [AT]; } @@ -625,21 +633,21 @@ class UncondBranch op, string instr_asm>: // Base class for indirect branch and return instruction classes. let isTerminator=1, isBarrier=1, hasDelaySlot = 1 in -class JumpFR pattern>: - FR<0, 0x8, (outs), (ins RC:$rs), "jr\t$rs", pattern, IIBranch> { +class JumpFR: + FR<0, 0x8, (outs), (ins RC:$rs), "jr\t$rs", [(operator RC:$rs)], IIBranch> { let rt = 0; let rd = 0; let shamt = 0; } // Indirect branch -class IndirectBranch: JumpFR { +class IndirectBranch: JumpFR { let isBranch = 1; let isIndirectBranch = 1; } // Return instruction -class RetBase: JumpFR { +class RetBase: JumpFR { let isReturn = 1; let isCodeGenOnly = 1; let hasCtrlDep = 1; @@ -905,12 +913,28 @@ let usesCustomInserter = 1 in { // Instruction definition //===----------------------------------------------------------------------===// +class LoadImm32< string instr_asm, Operand Od, RegisterClass RC> : + MipsAsmPseudoInst<(outs RC:$rt), (ins Od:$imm32), + !strconcat(instr_asm, "\t$rt, $imm32")> ; +def LoadImm32Reg : LoadImm32<"li", shamt,CPURegs>; + +class LoadAddress : + MipsAsmPseudoInst<(outs RC:$rt), (ins MemOpnd:$addr), + !strconcat(instr_asm, "\t$rt, $addr")> ; +def LoadAddr32Reg : LoadAddress<"la", mem, CPURegs>; + +class LoadAddressImm : + MipsAsmPseudoInst<(outs RC:$rt), (ins Od:$imm32), + !strconcat(instr_asm, "\t$rt, $imm32")> ; +def LoadAddr32Imm : LoadAddressImm<"la", shamt,CPURegs>; + //===----------------------------------------------------------------------===// // MipsI Instructions //===----------------------------------------------------------------------===// /// Arithmetic Instructions (ALU Immediate) -def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>; +def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>, + IsAsCheapAsAMove; def ADDi : ArithOverflowI<0x08, "addi", add, simm16, immSExt16, CPURegs>; def SLTi : SetCC_I<0x0a, "slti", setlt, simm16, immSExt16, CPURegs>; def SLTiu : SetCC_I<0x0b, "sltiu", setult, simm16, immSExt16, CPURegs>; @@ -949,19 +973,12 @@ let Predicates = [HasMips32r2, HasStandardEncoding] in { /// aligned defm LB : LoadM32<0x20, "lb", sextloadi8>; defm LBu : LoadM32<0x24, "lbu", zextloadi8>; -defm LH : LoadM32<0x21, "lh", sextloadi16_a>; -defm LHu : LoadM32<0x25, "lhu", zextloadi16_a>; -defm LW : LoadM32<0x23, "lw", load_a>; +defm LH : LoadM32<0x21, "lh", sextloadi16>; +defm LHu : LoadM32<0x25, "lhu", zextloadi16>; +defm LW : LoadM32<0x23, "lw", load>; defm SB : StoreM32<0x28, "sb", truncstorei8>; -defm SH : StoreM32<0x29, "sh", truncstorei16_a>; -defm SW : StoreM32<0x2b, "sw", store_a>; - -/// unaligned -defm ULH : LoadM32<0x21, "ulh", sextloadi16_u, 1>; -defm ULHu : LoadM32<0x25, "ulhu", zextloadi16_u, 1>; -defm ULW : LoadM32<0x23, "ulw", load_u, 1>; -defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>; -defm USW : StoreM32<0x2b, "usw", store_u, 1>; +defm SH : StoreM32<0x29, "sh", truncstorei16>; +defm SW : StoreM32<0x2b, "sw", store>; /// load/store left/right defm LWL : LoadLeftRightM32<0x22, "lwl", MipsLWL>; @@ -996,7 +1013,8 @@ def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, } /// Jump and Branch Instructions -def J : JumpFJ<0x02, "j">; +def J : JumpFJ<0x02, jmptarget, "j", br, bb>, + Requires<[RelocStatic, HasStandardEncoding]>, IsBranch; def JR : IndirectBranch; def B : UncondBranch<0x04, "b">; def BEQ : CBranch<0x04, "beq", seteq, CPURegs>; @@ -1014,6 +1032,8 @@ def JAL : JumpLink<0x03, "jal">; def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>; def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>; def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>; +def TAILCALL : JumpFJ<0x02, calltarget, "j", MipsTailCall, imm>, IsTailCall; +def TAILCALL_R : JumpFR, IsTailCall; def RET : RetBase; @@ -1072,6 +1092,26 @@ def EXT : ExtBase<0, "ext", CPURegs>; def INS : InsBase<4, "ins", CPURegs>; //===----------------------------------------------------------------------===// +// Instruction aliases +//===----------------------------------------------------------------------===// +def : InstAlias<"move $dst,$src", (ADD CPURegs:$dst,CPURegs:$src,ZERO)>; +def : InstAlias<"bal $offset", (BGEZAL RA,brtarget:$offset)>; +def : InstAlias<"addu $rs,$rt,$imm", + (ADDiu CPURegs:$rs,CPURegs:$rt,simm16:$imm)>; +def : InstAlias<"add $rs,$rt,$imm", + (ADDi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>; +def : InstAlias<"and $rs,$rt,$imm", + (ANDi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>; +def : InstAlias<"j $rs", (JR CPURegs:$rs)>; +def : InstAlias<"not $rt,$rs", (NOR CPURegs:$rt,CPURegs:$rs,ZERO)>; +def : InstAlias<"neg $rt,$rs", (SUB CPURegs:$rt,ZERO,CPURegs:$rs)>; +def : InstAlias<"negu $rt,$rs", (SUBu CPURegs:$rt,ZERO,CPURegs:$rs)>; +def : InstAlias<"slt $rs,$rt,$imm", + (SLTi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>; +def : InstAlias<"xor $rs,$rt,$imm", + (XORi CPURegs:$rs,CPURegs:$rt,simm16:$imm)>; + +//===----------------------------------------------------------------------===// // Arbitrary patterns that map to one or more instructions //===----------------------------------------------------------------------===// @@ -1103,6 +1143,11 @@ def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)), //def : MipsPat<(MipsJmpLink CPURegs:$dst), // (JALR CPURegs:$dst)>; +// Tail call +def : MipsPat<(MipsTailCall (iPTR tglobaladdr:$dst)), + (TAILCALL tglobaladdr:$dst)>; +def : MipsPat<(MipsTailCall (iPTR texternalsym:$dst)), + (TAILCALL texternalsym:$dst)>; // hi/lo relocs def : MipsPat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>; def : MipsPat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>; @@ -1153,24 +1198,20 @@ def : MipsPat<(not CPURegs:$in), let Predicates = [NotN64, HasStandardEncoding] in { def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>; def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>; - def : MipsPat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>; - def : MipsPat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>; + def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu addr:$src)>; } let Predicates = [IsN64, HasStandardEncoding] in { def : MipsPat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>; def : MipsPat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>; - def : MipsPat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>; - def : MipsPat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>; + def : MipsPat<(i32 (extloadi16 addr:$src)), (LHu_P8 addr:$src)>; } // peepholes let Predicates = [NotN64, HasStandardEncoding] in { - def : MipsPat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>; - def : MipsPat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>; + def : MipsPat<(store (i32 0), addr:$dst), (SW ZERO, addr:$dst)>; } let Predicates = [IsN64, HasStandardEncoding] in { - def : MipsPat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>; - def : MipsPat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>; + def : MipsPat<(store (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>; } // brcond patterns @@ -1265,3 +1306,8 @@ include "MipsCondMov.td" include "Mips16InstrFormats.td" include "Mips16InstrInfo.td" + +// DSP +include "MipsDSPInstrFormats.td" +include "MipsDSPInstrInfo.td" + diff --git a/lib/Target/Mips/MipsLongBranch.cpp b/lib/Target/Mips/MipsLongBranch.cpp index f78203f..5d9f0cf 100644 --- a/lib/Target/Mips/MipsLongBranch.cpp +++ b/lib/Target/Mips/MipsLongBranch.cpp @@ -10,6 +10,10 @@ // This pass expands a branch or jump instruction into a long branch if its // offset is too large to fit into its immediate field. // +// FIXME: +// 1. Fix pc-region jump instructions which cross 256MB segment boundaries. +// 2. If program has inline assembly statements whose size cannot be +// determined accurately, load branch target addresses from the GOT. //===----------------------------------------------------------------------===// #define DEBUG_TYPE "mips-long-branch" @@ -48,7 +52,7 @@ namespace { typedef MachineBasicBlock::reverse_iterator ReverseIter; struct MBBInfo { - uint64_t Size; + uint64_t Size, Address; bool HasLongBranch; MachineInstr *Br; @@ -61,7 +65,10 @@ namespace { static char ID; MipsLongBranch(TargetMachine &tm) : MachineFunctionPass(ID), TM(tm), - TII(static_cast(tm.getInstrInfo())) {} + TII(static_cast(tm.getInstrInfo())), + IsPIC(TM.getRelocationModel() == Reloc::PIC_), + ABI(TM.getSubtarget().getTargetABI()), + LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 13 : 9)) {} virtual const char *getPassName() const { return "Mips Long Branch"; @@ -81,6 +88,9 @@ namespace { const MipsInstrInfo *TII; MachineFunction *MF; SmallVector MBBInfos; + bool IsPIC; + unsigned ABI; + unsigned LongBranchSeqSize; }; char MipsLongBranch::ID = 0; @@ -230,12 +240,6 @@ void MipsLongBranch::replaceBranch(MachineBasicBlock &MBB, Iter Br, // Expand branch instructions to long branches. void MipsLongBranch::expandToLongBranch(MBBInfo &I) { - I.HasLongBranch = true; - - bool IsPIC = TM.getRelocationModel() == Reloc::PIC_; - unsigned ABI = TM.getSubtarget().getTargetABI(); - bool N64 = ABI == MipsSubtarget::N64; - MachineBasicBlock::iterator Pos; MachineBasicBlock *MBB = I.Br->getParent(), *TgtMBB = getTargetMBB(*I.Br); DebugLoc DL = I.Br->getDebugLoc(); @@ -248,101 +252,105 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) { MBB->addSuccessor(LongBrMBB); if (IsPIC) { - // $longbr: - // addiu $sp, $sp, -regsize * 2 - // sw $ra, 0($sp) - // bal $baltgt - // sw $a3, regsize($sp) - // $baltgt: - // lui $a3, %hi($baltgt) - // lui $at, %hi($tgt) - // addiu $a3, $a3, %lo($baltgt) - // addiu $at, $at, %lo($tgt) - // subu $at, $at, $a3 - // addu $at, $ra, $at - // - // if n64: - // lui $a3, %highest($baltgt) - // lui $ra, %highest($tgt) - // addiu $a3, $a3, %higher($baltgt) - // addiu $ra, $ra, %higher($tgt) - // dsll $a3, $a3, 32 - // dsll $ra, $ra, 32 - // subu $at, $at, $a3 - // addu $at, $at, $ra - // - // lw $ra, 0($sp) - // lw $a3, regsize($sp) - // jr $at - // addiu $sp, $sp, regsize * 2 - // $fallthrough: - // - MF->getInfo()->setEmitNOAT(); MachineBasicBlock *BalTgtMBB = MF->CreateMachineBasicBlock(BB); MF->insert(FallThroughMBB, BalTgtMBB); LongBrMBB->addSuccessor(BalTgtMBB); BalTgtMBB->addSuccessor(TgtMBB); - int RegSize = N64 ? 8 : 4; - unsigned AT = N64 ? Mips::AT_64 : Mips::AT; - unsigned A3 = N64 ? Mips::A3_64 : Mips::A3; - unsigned SP = N64 ? Mips::SP_64 : Mips::SP; - unsigned RA = N64 ? Mips::RA_64 : Mips::RA; - unsigned Load = N64 ? Mips::LD_P8 : Mips::LW; - unsigned Store = N64 ? Mips::SD_P8 : Mips::SW; - unsigned LUi = N64 ? Mips::LUi64 : Mips::LUi; - unsigned ADDiu = N64 ? Mips::DADDiu : Mips::ADDiu; - unsigned ADDu = N64 ? Mips::DADDu : Mips::ADDu; - unsigned SUBu = N64 ? Mips::SUBu : Mips::SUBu; - unsigned JR = N64 ? Mips::JR64 : Mips::JR; - - Pos = LongBrMBB->begin(); - - BuildMI(*LongBrMBB, Pos, DL, TII->get(ADDiu), SP).addReg(SP) - .addImm(-RegSize * 2); - BuildMI(*LongBrMBB, Pos, DL, TII->get(Store)).addReg(RA).addReg(SP) - .addImm(0); - BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB); - BuildMI(*LongBrMBB, Pos, DL, TII->get(Store)).addReg(A3).addReg(SP) - .addImm(RegSize)->setIsInsideBundle(); - - Pos = BalTgtMBB->begin(); - - BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), A3) - .addMBB(BalTgtMBB, MipsII::MO_ABS_HI); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), AT) - .addMBB(TgtMBB, MipsII::MO_ABS_HI); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), A3).addReg(A3) - .addMBB(BalTgtMBB, MipsII::MO_ABS_LO); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), AT).addReg(AT) - .addMBB(TgtMBB, MipsII::MO_ABS_LO); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(SUBu), AT).addReg(AT).addReg(A3); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDu), AT).addReg(RA).addReg(AT); - - if (N64) { - BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), A3) - .addMBB(BalTgtMBB, MipsII::MO_HIGHEST); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(LUi), RA) - .addMBB(TgtMBB, MipsII::MO_HIGHEST); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), A3).addReg(A3) - .addMBB(BalTgtMBB, MipsII::MO_HIGHER); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), RA).addReg(RA) - .addMBB(TgtMBB, MipsII::MO_HIGHER); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DSLL), A3).addReg(A3) - .addImm(32); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DSLL), RA).addReg(RA) - .addImm(32); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(SUBu), AT).addReg(AT).addReg(A3); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDu), AT).addReg(AT).addReg(RA); - I.Size += 4 * 8; + int64_t TgtAddress = MBBInfos[TgtMBB->getNumber()].Address; + int64_t Offset = TgtAddress - (I.Address + I.Size - 20); + int64_t Lo = SignExtend64<16>(Offset & 0xffff); + int64_t Hi = SignExtend64<16>(((Offset + 0x8000) >> 16) & 0xffff); + + if (ABI != MipsSubtarget::N64) { + // $longbr: + // addiu $sp, $sp, -8 + // sw $ra, 0($sp) + // bal $baltgt + // lui $at, %hi($tgt - $baltgt) + // $baltgt: + // addiu $at, $at, %lo($tgt - $baltgt) + // addu $at, $ra, $at + // lw $ra, 0($sp) + // jr $at + // addiu $sp, $sp, 8 + // $fallthrough: + // + + Pos = LongBrMBB->begin(); + + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP) + .addReg(Mips::SP).addImm(-8); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SW)).addReg(Mips::RA) + .addReg(Mips::SP).addImm(0); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LUi), Mips::AT).addImm(Hi) + ->setIsInsideBundle(); + + Pos = BalTgtMBB->begin(); + + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::AT) + .addReg(Mips::AT).addImm(Lo); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDu), Mips::AT) + .addReg(Mips::RA).addReg(Mips::AT); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LW), Mips::RA) + .addReg(Mips::SP).addImm(0); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JR)).addReg(Mips::AT); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP) + .addReg(Mips::SP).addImm(8)->setIsInsideBundle(); + } else { + // $longbr: + // daddiu $sp, $sp, -16 + // sd $ra, 0($sp) + // lui64 $at, %highest($tgt - $baltgt) + // daddiu $at, $at, %higher($tgt - $baltgt) + // dsll $at, $at, 16 + // daddiu $at, $at, %hi($tgt - $baltgt) + // bal $baltgt + // dsll $at, $at, 16 + // $baltgt: + // daddiu $at, $at, %lo($tgt - $baltgt) + // daddu $at, $ra, $at + // ld $ra, 0($sp) + // jr64 $at + // daddiu $sp, $sp, 16 + // $fallthrough: + // + + int64_t Higher = SignExtend64<16>(((Offset + 0x80008000) >> 32) & 0xffff); + int64_t Highest = + SignExtend64<16>(((Offset + 0x800080008000LL) >> 48) & 0xffff); + + Pos = LongBrMBB->begin(); + + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64) + .addReg(Mips::SP_64).addImm(-16); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::SD)).addReg(Mips::RA_64) + .addReg(Mips::SP_64).addImm(0); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LUi64), Mips::AT_64) + .addImm(Highest); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64) + .addReg(Mips::AT_64).addImm(Higher); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64) + .addReg(Mips::AT_64).addImm(16); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64) + .addReg(Mips::AT_64).addImm(Hi); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::BAL_BR)).addMBB(BalTgtMBB); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64) + .addReg(Mips::AT_64).addImm(16)->setIsInsideBundle(); + + Pos = BalTgtMBB->begin(); + + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::AT_64) + .addReg(Mips::AT_64).addImm(Lo); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDu), Mips::AT_64) + .addReg(Mips::RA_64).addReg(Mips::AT_64); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::LD), Mips::RA_64) + .addReg(Mips::SP_64).addImm(0); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JR64)).addReg(Mips::AT_64); + BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64) + .addReg(Mips::SP_64).addImm(16)->setIsInsideBundle(); } - - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Load), RA).addReg(SP).addImm(0); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Load), A3).addReg(SP).addImm(RegSize); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(JR)).addReg(AT); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(ADDiu), SP).addReg(SP) - .addImm(RegSize * 2)->setIsInsideBundle(); - I.Size += 4 * 14; } else { // $longbr: // j $tgt @@ -353,7 +361,6 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) { LongBrMBB->addSuccessor(TgtMBB); BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::J)).addMBB(TgtMBB); BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::NOP))->setIsInsideBundle(); - I.Size += 4 * 2; } if (I.Br->isUnconditionalBranch()) { @@ -401,19 +408,34 @@ bool MipsLongBranch::runOnMachineFunction(MachineFunction &F) { if (!I->Br || I->HasLongBranch) continue; - if (!ForceLongBranch) - // Check if offset fits into 16-bit immediate field of branches. - if (isInt<16>(computeOffset(I->Br) / 4)) - continue; + // Check if offset fits into 16-bit immediate field of branches. + if (!ForceLongBranch && isInt<16>(computeOffset(I->Br) / 4)) + continue; - expandToLongBranch(*I); + I->HasLongBranch = true; + I->Size += LongBranchSeqSize * 4; ++LongBranches; EverMadeChange = MadeChange = true; } } - if (EverMadeChange) - MF->RenumberBlocks(); + if (!EverMadeChange) + return true; + + // Compute basic block addresses. + if (TM.getRelocationModel() == Reloc::PIC_) { + uint64_t Address = 0; + + for (I = MBBInfos.begin(); I != E; Address += I->Size, ++I) + I->Address = Address; + } + + // Do the expansion. + for (I = MBBInfos.begin(); I != E; ++I) + if (I->HasLongBranch) + expandToLongBranch(*I); + + MF->RenumberBlocks(); return true; } diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp index d4c5e6d..5fa6339 100644 --- a/lib/Target/Mips/MipsMCInstLower.cpp +++ b/lib/Target/Mips/MipsMCInstLower.cpp @@ -11,7 +11,6 @@ // MCInst records. // //===----------------------------------------------------------------------===// - #include "MipsMCInstLower.h" #include "MipsAsmPrinter.h" #include "MipsInstrInfo.h" @@ -161,31 +160,3 @@ void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { } } -// If the D instruction has a shift amount that is greater -// than 31 (checked in calling routine), lower it to a D32 instruction -void MipsMCInstLower::LowerLargeShift(const MachineInstr *MI, - MCInst& Inst, - int64_t Shift) { - // rt - Inst.addOperand(LowerOperand(MI->getOperand(0))); - // rd - Inst.addOperand(LowerOperand(MI->getOperand(1))); - // saminus32 - Inst.addOperand(MCOperand::CreateImm(Shift)); - - switch (MI->getOpcode()) { - default: - // Calling function is not synchronized - llvm_unreachable("Unexpected shift instruction"); - break; - case Mips::DSLL: - Inst.setOpcode(Mips::DSLL32); - break; - case Mips::DSRL: - Inst.setOpcode(Mips::DSRL32); - break; - case Mips::DSRA: - Inst.setOpcode(Mips::DSRA32); - break; - } -} diff --git a/lib/Target/Mips/MipsMCInstLower.h b/lib/Target/Mips/MipsMCInstLower.h index 0abb996..c4a6016 100644 --- a/lib/Target/Mips/MipsMCInstLower.h +++ b/lib/Target/Mips/MipsMCInstLower.h @@ -33,12 +33,11 @@ public: MipsMCInstLower(MipsAsmPrinter &asmprinter); void Initialize(Mangler *mang, MCContext *C); void Lower(const MachineInstr *MI, MCInst &OutMI) const; - void LowerLargeShift(const MachineInstr *MI, MCInst &Inst, int64_t Shift); + MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const; private: MCOperand LowerSymbolOperand(const MachineOperand &MO, MachineOperandType MOTy, unsigned Offset) const; - MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const; }; } diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp index 362173e..5ff19ab 100644 --- a/lib/Target/Mips/MipsMachineFunction.cpp +++ b/lib/Target/Mips/MipsMachineFunction.cpp @@ -43,4 +43,17 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() { return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC); } +bool MipsFunctionInfo::mips16SPAliasRegSet() const { + return Mips16SPAliasReg; +} +unsigned MipsFunctionInfo::getMips16SPAliasReg() { + // Return if it has already been initialized. + if (Mips16SPAliasReg) + return Mips16SPAliasReg; + + const TargetRegisterClass *RC; + RC=(const TargetRegisterClass*)&Mips::CPU16RegsRegClass; + return Mips16SPAliasReg = MF.getRegInfo().createVirtualRegister(RC); +} + void MipsFunctionInfo::anchor() { } diff --git a/lib/Target/Mips/MipsMachineFunction.h b/lib/Target/Mips/MipsMachineFunction.h index df3c4c0..bb45f92 100644 --- a/lib/Target/Mips/MipsMachineFunction.h +++ b/lib/Target/Mips/MipsMachineFunction.h @@ -39,55 +39,45 @@ class MipsFunctionInfo : public MachineFunctionInfo { /// relocation models. unsigned GlobalBaseReg; + /// Mips16SPAliasReg - keeps track of the virtual register initialized for + /// use as an alias for SP for use in load/store of halfword/byte from/to + /// the stack + unsigned Mips16SPAliasReg; + /// VarArgsFrameIndex - FrameIndex for start of varargs area. int VarArgsFrameIndex; - // Range of frame object indices. - // InArgFIRange: Range of indices of all frame objects created during call to - // LowerFormalArguments. - // OutArgFIRange: Range of indices of all frame objects created during call to - // LowerCall except for the frame object for restoring $gp. - std::pair InArgFIRange, OutArgFIRange; - unsigned MaxCallFrameSize; + /// True if function has a byval argument. + bool HasByvalArg; - bool EmitNOAT; + /// Size of incoming argument area. + unsigned IncomingArgSize; public: MipsFunctionInfo(MachineFunction& MF) - : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), - VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)), - OutArgFIRange(std::make_pair(-1, 0)), MaxCallFrameSize(0), EmitNOAT(false) + : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), Mips16SPAliasReg(0), + VarArgsFrameIndex(0) {} - bool isInArgFI(int FI) const { - return FI <= InArgFIRange.first && FI >= InArgFIRange.second; - } - void setLastInArgFI(int FI) { InArgFIRange.second = FI; } - - bool isOutArgFI(int FI) const { - return FI <= OutArgFIRange.first && FI >= OutArgFIRange.second; - } - void extendOutArgFIRange(int FirstFI, int LastFI) { - if (!OutArgFIRange.second) - // this must be the first time this function was called. - OutArgFIRange.first = FirstFI; - OutArgFIRange.second = LastFI; - } - unsigned getSRetReturnReg() const { return SRetReturnReg; } void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } bool globalBaseRegSet() const; unsigned getGlobalBaseReg(); + bool mips16SPAliasRegSet() const; + unsigned getMips16SPAliasReg(); + int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; } - unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; } - void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; } + bool hasByvalArg() const { return HasByvalArg; } + void setFormalArgInfo(unsigned Size, bool HasByval) { + IncomingArgSize = Size; + HasByvalArg = HasByval; + } - bool getEmitNOAT() const { return EmitNOAT; } - void setEmitNOAT() { EmitNOAT = true; } + unsigned getIncomingArgSize() const { return IncomingArgSize; } }; } // end of namespace llvm diff --git a/lib/Target/Mips/MipsRegisterInfo.cpp b/lib/Target/Mips/MipsRegisterInfo.cpp index ae6ae3a..d8e0dd4 100644 --- a/lib/Target/Mips/MipsRegisterInfo.cpp +++ b/lib/Target/Mips/MipsRegisterInfo.cpp @@ -22,7 +22,6 @@ #include "llvm/Constants.h" #include "llvm/DebugInfo.h" #include "llvm/Type.h" -#include "llvm/Function.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineFunction.h" @@ -43,9 +42,8 @@ using namespace llvm; -MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST, - const TargetInstrInfo &tii) - : MipsGenRegisterInfo(Mips::RA), Subtarget(ST), TII(tii) {} +MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST) + : MipsGenRegisterInfo(Mips::RA), Subtarget(ST) {} unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; } @@ -83,11 +81,11 @@ MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const { BitVector MipsRegisterInfo:: getReservedRegs(const MachineFunction &MF) const { static const uint16_t ReservedCPURegs[] = { - Mips::ZERO, Mips::AT, Mips::K0, Mips::K1, Mips::SP + Mips::ZERO, Mips::K0, Mips::K1, Mips::SP }; static const uint16_t ReservedCPU64Regs[] = { - Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64, Mips::SP_64 + Mips::ZERO_64, Mips::K0_64, Mips::K1_64, Mips::SP_64 }; BitVector Reserved(getNumRegs()); @@ -96,41 +94,49 @@ getReservedRegs(const MachineFunction &MF) const { for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I) Reserved.set(ReservedCPURegs[I]); - if (Subtarget.hasMips64()) { - for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I) - Reserved.set(ReservedCPU64Regs[I]); + for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I) + Reserved.set(ReservedCPU64Regs[I]); + if (Subtarget.hasMips64()) { // Reserve all registers in AFGR64. for (RegIter Reg = Mips::AFGR64RegClass.begin(), EReg = Mips::AFGR64RegClass.end(); Reg != EReg; ++Reg) Reserved.set(*Reg); } else { - // Reserve all registers in CPU64Regs & FGR64. - for (RegIter Reg = Mips::CPU64RegsRegClass.begin(), - EReg = Mips::CPU64RegsRegClass.end(); Reg != EReg; ++Reg) - Reserved.set(*Reg); - + // Reserve all registers in FGR64. for (RegIter Reg = Mips::FGR64RegClass.begin(), EReg = Mips::FGR64RegClass.end(); Reg != EReg; ++Reg) Reserved.set(*Reg); } - // Reserve FP if this function should have a dedicated frame pointer register. if (MF.getTarget().getFrameLowering()->hasFP(MF)) { - Reserved.set(Mips::FP); - Reserved.set(Mips::FP_64); + if (Subtarget.inMips16Mode()) + Reserved.set(Mips::S0); + else { + Reserved.set(Mips::FP); + Reserved.set(Mips::FP_64); + } } // Reserve hardware registers. Reserved.set(Mips::HWR29); Reserved.set(Mips::HWR29_64); + // Reserve DSP control register. + Reserved.set(Mips::DSPCtrl); + // Reserve RA if in mips16 mode. if (Subtarget.inMips16Mode()) { Reserved.set(Mips::RA); Reserved.set(Mips::RA_64); } + // Reserve GP if small section is used. + if (Subtarget.useSmallSection()) { + Reserved.set(Mips::GP); + Reserved.set(Mips::GP_64); + } + return Reserved; } @@ -160,7 +166,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, "Instr doesn't have FrameIndex operand!"); } - DEBUG(errs() << "\nFunction : " << MF.getFunction()->getName() << "\n"; + DEBUG(errs() << "\nFunction : " << MF.getName() << "\n"; errs() << "<--------->\n" << MI); int FrameIndex = MI.getOperand(i).getIndex(); @@ -179,8 +185,12 @@ getFrameRegister(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); bool IsN64 = Subtarget.isABI_N64(); - return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) : - (IsN64 ? Mips::SP_64 : Mips::SP); + if (Subtarget.inMips16Mode()) + return TFI->hasFP(MF) ? Mips::S0 : Mips::SP; + else + return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) : + (IsN64 ? Mips::SP_64 : Mips::SP); + } unsigned MipsRegisterInfo:: diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h index 9a05e94..78adf7f 100644 --- a/lib/Target/Mips/MipsRegisterInfo.h +++ b/lib/Target/Mips/MipsRegisterInfo.h @@ -22,16 +22,14 @@ namespace llvm { class MipsSubtarget; -class TargetInstrInfo; class Type; class MipsRegisterInfo : public MipsGenRegisterInfo { protected: const MipsSubtarget &Subtarget; - const TargetInstrInfo &TII; public: - MipsRegisterInfo(const MipsSubtarget &Subtarget, const TargetInstrInfo &tii); + MipsRegisterInfo(const MipsSubtarget &Subtarget); /// getRegisterNumbering - Given the enum value for some register, e.g. /// Mips::RA, return the number that it corresponds to (e.g. 31). diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td index b255e42..391c19e 100644 --- a/lib/Target/Mips/MipsRegisterInfo.td +++ b/lib/Target/Mips/MipsRegisterInfo.td @@ -14,6 +14,8 @@ let Namespace = "Mips" in { def sub_fpeven : SubRegIndex; def sub_fpodd : SubRegIndex; def sub_32 : SubRegIndex; +def sub_lo : SubRegIndex; +def sub_hi : SubRegIndex; } // We have banks of 32 registers each. @@ -71,7 +73,7 @@ class HWR num, string n> : MipsReg { let Namespace = "Mips" in { // General Purpose Registers def ZERO : MipsGPRReg< 0, "zero">, DwarfRegNum<[0]>; - def AT : MipsGPRReg< 1, "at">, DwarfRegNum<[1]>; + def AT : MipsGPRReg< 1, "1">, DwarfRegNum<[1]>; def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>; def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>; def A0 : MipsGPRReg< 4, "4">, DwarfRegNum<[4]>; @@ -105,7 +107,7 @@ let Namespace = "Mips" in { // General Purpose 64-bit Registers def ZERO_64 : Mips64GPRReg< 0, "zero", [ZERO]>, DwarfRegNum<[0]>; - def AT_64 : Mips64GPRReg< 1, "at", [AT]>, DwarfRegNum<[1]>; + def AT_64 : Mips64GPRReg< 1, "1", [AT]>, DwarfRegNum<[1]>; def V0_64 : Mips64GPRReg< 2, "2", [V0]>, DwarfRegNum<[2]>; def V1_64 : Mips64GPRReg< 3, "3", [V1]>, DwarfRegNum<[3]>; def A0_64 : Mips64GPRReg< 4, "4", [A0]>, DwarfRegNum<[4]>; @@ -239,16 +241,29 @@ let Namespace = "Mips" in { // fcc0 register def FCC0 : Register<"fcc0">; + // PC register + def PC : Register<"pc">; + // Hardware register $29 def HWR29 : Register<"29">; def HWR29_64 : Register<"29">; + + // Accum registers + let SubRegIndices = [sub_lo, sub_hi] in + def AC0 : RegisterWithSubRegs<"ac0", [LO, HI]>; + def AC1 : Register<"ac1">; + def AC2 : Register<"ac2">; + def AC3 : Register<"ac3">; + + def DSPCtrl : Register<"dspctrl">; } //===----------------------------------------------------------------------===// // Register Classes //===----------------------------------------------------------------------===// -def CPURegs : RegisterClass<"Mips", [i32], 32, (add +class CPURegsClass regTypes> : + RegisterClass<"Mips", regTypes, 32, (add // Reserved ZERO, AT, // Return Values and Arguments @@ -262,6 +277,9 @@ def CPURegs : RegisterClass<"Mips", [i32], 32, (add // Reserved K0, K1, GP, SP, FP, RA)>; +def CPURegs : CPURegsClass<[i32]>; +def DSPRegs : CPURegsClass<[v4i8, v2i16]>; + def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add // Reserved ZERO_64, AT_64, @@ -284,6 +302,7 @@ def CPU16Regs : RegisterClass<"Mips", [i32], 32, (add def CPURAReg : RegisterClass<"Mips", [i32], 32, (add RA)>; +def CPUSPReg : RegisterClass<"Mips", [i32], 32, (add SP)>; // 64bit fp: // * FGR64 - 32 64-bit registers @@ -319,3 +338,5 @@ def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)>; def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>; def HWRegs64 : RegisterClass<"Mips", [i64], 32, (add HWR29_64)>; +// Accumulator Registers +def ACRegs : RegisterClass<"Mips", [i64], 64, (sequence "AC%u", 0, 3)>; diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp index 1c59847..03f5176 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -22,7 +22,8 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/CodeGen/RegisterScavenging.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" @@ -202,6 +203,19 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF, // Mark $fp as used if function has dedicated frame pointer. if (hasFP(MF)) MRI.setPhysRegUsed(FP); + + // Set scavenging frame index if necessary. + uint64_t MaxSPOffset = MF.getInfo()->getIncomingArgSize() + + estimateStackSize(MF); + + if (isInt<16>(MaxSPOffset)) + return; + + const TargetRegisterClass *RC = STI.isABI_N64() ? + &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + int FI = MF.getFrameInfo()->CreateStackObject(RC->getSize(), + RC->getAlignment(), false); + RS->setScavengingFrameIndex(FI); } const MipsFrameLowering * diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index eeb1de3..fb0f9df 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -260,14 +260,55 @@ void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount, if (isInt<16>(Amount))// addi sp, sp, amount BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount); else { // Expand immediate that doesn't fit in 16-bit. - unsigned ATReg = STI.isABI_N64() ? Mips::AT_64 : Mips::AT; - - MBB.getParent()->getInfo()->setEmitNOAT(); - Mips::loadImmediate(Amount, STI.isABI_N64(), *this, MBB, I, DL, false, 0); - BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(ATReg); + unsigned Reg = loadImmediate(Amount, MBB, I, DL, 0); + BuildMI(MBB, I, DL, get(ADDu), SP).addReg(SP).addReg(Reg, RegState::Kill); } } +/// This function generates the sequence of instructions needed to get the +/// result of adding register REG and immediate IMM. +unsigned +MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB, + MachineBasicBlock::iterator II, DebugLoc DL, + unsigned *NewImm) const { + MipsAnalyzeImmediate AnalyzeImm; + const MipsSubtarget &STI = TM.getSubtarget(); + MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo(); + unsigned Size = STI.isABI_N64() ? 64 : 32; + unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi; + unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; + const TargetRegisterClass *RC = STI.isABI_N64() ? + &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass; + bool LastInstrIsADDiu = NewImm; + + const MipsAnalyzeImmediate::InstSeq &Seq = + AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu); + MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); + + assert(Seq.size() && (!LastInstrIsADDiu || (Seq.size() > 1))); + + // The first instruction can be a LUi, which is different from other + // instructions (ADDiu, ORI and SLL) in that it does not have a register + // operand. + unsigned Reg = RegInfo.createVirtualRegister(RC); + + if (Inst->Opc == LUi) + BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd)); + else + BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(ZEROReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + + // Build the remaining instructions in Seq. + for (++Inst; Inst != Seq.end() - LastInstrIsADDiu; ++Inst) + BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(Reg, RegState::Kill) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + + if (LastInstrIsADDiu) + *NewImm = Inst->ImmOpnd; + + return Reg; +} + unsigned MipsSEInstrInfo::GetAnalyzableBrOpc(unsigned Opc) const { return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ || Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ || diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h index 346e74d..55b78b2 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.h +++ b/lib/Target/Mips/MipsSEInstrInfo.h @@ -15,7 +15,6 @@ #define MIPSSEINSTRUCTIONINFO_H #include "MipsInstrInfo.h" -#include "MipsAnalyzeImmediate.h" #include "MipsSERegisterInfo.h" namespace llvm { @@ -70,6 +69,13 @@ public: void adjustStackPtr(unsigned SP, int64_t Amount, MachineBasicBlock &MBB, MachineBasicBlock::iterator I) const; + /// Emit a series of instructions to load an immediate. If NewImm is a + /// non-NULL parameter, the last instruction is not emitted, but instead + /// its immediate operand is returned in NewImm. + unsigned loadImmediate(int64_t Imm, MachineBasicBlock &MBB, + MachineBasicBlock::iterator II, DebugLoc DL, + unsigned *NewImm) const; + private: virtual unsigned GetAnalyzableBrOpc(unsigned Opc) const; diff --git a/lib/Target/Mips/MipsSERegisterInfo.cpp b/lib/Target/Mips/MipsSERegisterInfo.cpp index 043a1ef..56b9ba9 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -26,6 +26,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" @@ -40,8 +41,18 @@ using namespace llvm; MipsSERegisterInfo::MipsSERegisterInfo(const MipsSubtarget &ST, - const TargetInstrInfo &TII) - : MipsRegisterInfo(ST, TII) {} + const MipsSEInstrInfo &I) + : MipsRegisterInfo(ST), TII(I) {} + +bool MipsSERegisterInfo:: +requiresRegisterScavenging(const MachineFunction &MF) const { + return true; +} + +bool MipsSERegisterInfo:: +requiresFrameIndexScavenging(const MachineFunction &MF) const { + return true; +} // This function eliminate ADJCALLSTACKDOWN, // ADJCALLSTACKUP pseudo instructions @@ -72,7 +83,6 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, MachineInstr &MI = *II; MachineFunction &MF = *MI.getParent()->getParent(); MachineFrameInfo *MFI = MF.getFrameInfo(); - MipsFunctionInfo *MipsFI = MF.getInfo(); const std::vector &CSI = MFI->getCalleeSavedInfo(); int MinCSFI = 0; @@ -91,8 +101,7 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, // getFrameRegister() returns. unsigned FrameReg; - if (MipsFI->isOutArgFI(FrameIndex) || - (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)) + if (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI) FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP; else FrameReg = getFrameRegister(MF); @@ -104,14 +113,11 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, // - If the frame object is any of the following, its offset must be adjusted // by adding the size of the stack: // incoming argument, callee-saved register location or local variable. + bool IsKill = false; int64_t Offset; - if (MipsFI->isOutArgFI(FrameIndex)) - Offset = SPOffset; - else - Offset = SPOffset + (int64_t)StackSize; - - Offset += MI.getOperand(OpNo + 1).getImm(); + Offset = SPOffset + (int64_t)StackSize; + Offset += MI.getOperand(OpNo + 1).getImm(); DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); @@ -121,18 +127,17 @@ void MipsSERegisterInfo::eliminateFI(MachineBasicBlock::iterator II, MachineBasicBlock &MBB = *MI.getParent(); DebugLoc DL = II->getDebugLoc(); unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu; - unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT; - MipsAnalyzeImmediate::Inst LastInst(0, 0); + unsigned NewImm; - MipsFI->setEmitNOAT(); - Mips::loadImmediate(Offset, Subtarget.isABI_N64(), TII, MBB, II, DL, true, - &LastInst); - BuildMI(MBB, II, DL, TII.get(ADDu), ATReg).addReg(FrameReg).addReg(ATReg); + unsigned Reg = TII.loadImmediate(Offset, MBB, II, DL, &NewImm); + BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(FrameReg) + .addReg(Reg, RegState::Kill); - FrameReg = ATReg; - Offset = SignExtend64<16>(LastInst.ImmOpnd); + FrameReg = Reg; + Offset = SignExtend64<16>(NewImm); + IsKill = true; } - MI.getOperand(OpNo).ChangeToRegister(FrameReg, false); + MI.getOperand(OpNo).ChangeToRegister(FrameReg, false, false, IsKill); MI.getOperand(OpNo + 1).ChangeToImmediate(Offset); } diff --git a/lib/Target/Mips/MipsSERegisterInfo.h b/lib/Target/Mips/MipsSERegisterInfo.h index 4b17b33..7437bd3 100644 --- a/lib/Target/Mips/MipsSERegisterInfo.h +++ b/lib/Target/Mips/MipsSERegisterInfo.h @@ -18,11 +18,18 @@ #include "MipsRegisterInfo.h" namespace llvm { +class MipsSEInstrInfo; class MipsSERegisterInfo : public MipsRegisterInfo { + const MipsSEInstrInfo &TII; + public: MipsSERegisterInfo(const MipsSubtarget &Subtarget, - const TargetInstrInfo &TII); + const MipsSEInstrInfo &TII); + + bool requiresRegisterScavenging(const MachineFunction &MF) const; + + bool requiresFrameIndexScavenging(const MachineFunction &MF) const; void eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, diff --git a/lib/Target/Mips/MipsSubtarget.cpp b/lib/Target/Mips/MipsSubtarget.cpp index 11ff809..930af4d 100644 --- a/lib/Target/Mips/MipsSubtarget.cpp +++ b/lib/Target/Mips/MipsSubtarget.cpp @@ -25,12 +25,14 @@ using namespace llvm; void MipsSubtarget::anchor() { } MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS, bool little) : + const std::string &FS, bool little, + Reloc::Model RM) : MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little), IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false), IsLinux(true), HasSEInReg(false), HasCondMov(false), HasMulDivAdd(false), - HasMinMax(false), HasSwap(false), HasBitCount(false), InMips16Mode(false) + HasMinMax(false), HasSwap(false), HasBitCount(false), InMips16Mode(false), + HasDSP(false), HasDSPR2(false), IsAndroid(false) { std::string CPUName = CPU; if (CPUName.empty()) @@ -54,6 +56,9 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU, // Is the target system Linux ? if (TT.find("linux") == std::string::npos) IsLinux = false; + + // Set UseSmallSection. + UseSmallSection = !IsLinux && (RM == Reloc::Static); } bool diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h index ba15362..ff69237 100644 --- a/lib/Target/Mips/MipsSubtarget.h +++ b/lib/Target/Mips/MipsSubtarget.h @@ -65,6 +65,9 @@ protected: // isLinux - Target system is Linux. Is false we consider ELFOS for now. bool IsLinux; + // UseSmallSection - Small section is used. + bool UseSmallSection; + /// Features related to the presence of specific instructions. // HasSEInReg - SEB and SEH (signext in register) instructions. @@ -89,6 +92,9 @@ protected: // InMips16 -- can process Mips16 instructions bool InMips16Mode; + // HasDSP, HasDSPR2 -- supports DSP ASE. + bool HasDSP, HasDSPR2; + // IsAndroid -- target is android bool IsAndroid; @@ -109,7 +115,7 @@ public: /// This constructor initializes the data members to match that /// of the specified triple. MipsSubtarget(const std::string &TT, const std::string &CPU, - const std::string &FS, bool little); + const std::string &FS, bool little, Reloc::Model RM); /// ParseSubtargetFeatures - Parses features string setting specified /// subtarget options. Definition of function is auto generated by tblgen. @@ -131,8 +137,11 @@ public: bool isNotSingleFloat() const { return !IsSingleFloat; } bool hasVFPU() const { return HasVFPU; } bool inMips16Mode() const { return InMips16Mode; } + bool hasDSP() const { return HasDSP; } + bool hasDSPR2() const { return HasDSPR2; } bool isAndroid() const { return IsAndroid; } bool isLinux() const { return IsLinux; } + bool useSmallSection() const { return UseSmallSection; } bool hasStandardEncoding() const { return !inMips16Mode(); } diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index 03a024a..983ee21 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -42,8 +42,8 @@ MipsTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL, bool isLittle) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - Subtarget(TT, CPU, FS, isLittle), - DataLayout(isLittle ? + Subtarget(TT, CPU, FS, isLittle, RM), + DL(isLittle ? (Subtarget.isABI_N64() ? "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") : @@ -52,7 +52,8 @@ MipsTargetMachine(const Target &T, StringRef TT, "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")), InstrInfo(MipsInstrInfo::create(*this)), FrameLowering(MipsFrameLowering::create(*this, Subtarget)), - TLInfo(*this), TSInfo(*this), JITInfo() { + TLInfo(*this), TSInfo(*this), JITInfo(), + STTI(&TLInfo), VTTI(&TLInfo) { } void MipsebTargetMachine::anchor() { } diff --git a/lib/Target/Mips/MipsTargetMachine.h b/lib/Target/Mips/MipsTargetMachine.h index 21b49e6..b54f5ce 100644 --- a/lib/Target/Mips/MipsTargetMachine.h +++ b/lib/Target/Mips/MipsTargetMachine.h @@ -21,8 +21,9 @@ #include "MipsSelectionDAGInfo.h" #include "MipsSubtarget.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { class formatted_raw_ostream; @@ -30,12 +31,14 @@ class MipsRegisterInfo; class MipsTargetMachine : public LLVMTargetMachine { MipsSubtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment const MipsInstrInfo *InstrInfo; const MipsFrameLowering *FrameLowering; MipsTargetLowering TLInfo; MipsSelectionDAGInfo TSInfo; MipsJITInfo JITInfo; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: MipsTargetMachine(const Target &T, StringRef TT, @@ -52,8 +55,8 @@ public: { return FrameLowering; } virtual const MipsSubtarget *getSubtargetImpl() const { return &Subtarget; } - virtual const TargetData *getTargetData() const - { return &DataLayout;} + virtual const DataLayout *getDataLayout() const + { return &DL;} virtual MipsJITInfo *getJITInfo() { return &JITInfo; } @@ -69,6 +72,13 @@ public: return &TSInfo; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } + // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); virtual bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE); diff --git a/lib/Target/Mips/MipsTargetObjectFile.cpp b/lib/Target/Mips/MipsTargetObjectFile.cpp index 04dc60a..881908b 100644 --- a/lib/Target/Mips/MipsTargetObjectFile.cpp +++ b/lib/Target/Mips/MipsTargetObjectFile.cpp @@ -13,7 +13,7 @@ #include "llvm/GlobalVariable.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSectionELF.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/ELF.h" @@ -26,6 +26,7 @@ SSThreshold("mips-ssection-threshold", cl::Hidden, void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){ TargetLoweringObjectFileELF::Initialize(Ctx, TM); + InitializeELF(TM.Options.UseInitArray); SmallDataSection = getContext().getELFSection(".sdata", ELF::SHT_PROGBITS, @@ -60,9 +61,10 @@ bool MipsTargetObjectFile:: IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, SectionKind Kind) const { - // Only use small section for non linux targets. const MipsSubtarget &Subtarget = TM.getSubtarget(); - if (Subtarget.isLinux()) + + // Return if small section is not available. + if (!Subtarget.useSmallSection()) return false; // Only global variables, not functions. @@ -80,7 +82,7 @@ IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, return false; Type *Ty = GV->getType()->getElementType(); - return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty)); + return IsInSmallSection(TM.getDataLayout()->getTypeAllocSize(Ty)); } diff --git a/lib/Target/NVPTX/NVPTX.td b/lib/Target/NVPTX/NVPTX.td index ae7710e..7aee359 100644 --- a/lib/Target/NVPTX/NVPTX.td +++ b/lib/Target/NVPTX/NVPTX.td @@ -24,7 +24,30 @@ include "NVPTXInstrInfo.td" // - Need at least one feature to avoid generating zero sized array by // TableGen in NVPTXGenSubtarget.inc. //===----------------------------------------------------------------------===// -def FeatureDummy : SubtargetFeature<"dummy", "dummy", "true", "">; + +// SM Versions +def SM10 : SubtargetFeature<"sm_10", "SmVersion", "10", + "Target SM 1.0">; +def SM11 : SubtargetFeature<"sm_11", "SmVersion", "11", + "Target SM 1.1">; +def SM12 : SubtargetFeature<"sm_12", "SmVersion", "12", + "Target SM 1.2">; +def SM13 : SubtargetFeature<"sm_13", "SmVersion", "13", + "Target SM 1.3">; +def SM20 : SubtargetFeature<"sm_20", "SmVersion", "20", + "Target SM 2.0">; +def SM21 : SubtargetFeature<"sm_21", "SmVersion", "21", + "Target SM 2.1">; +def SM30 : SubtargetFeature<"sm_30", "SmVersion", "30", + "Target SM 3.0">; +def SM35 : SubtargetFeature<"sm_35", "SmVersion", "35", + "Target SM 3.5">; + +// PTX Versions +def PTX30 : SubtargetFeature<"ptx30", "PTXVersion", "30", + "Use PTX version 3.0">; +def PTX31 : SubtargetFeature<"ptx31", "PTXVersion", "31", + "Use PTX version 3.1">; //===----------------------------------------------------------------------===// // NVPTX supported processors. @@ -33,7 +56,14 @@ def FeatureDummy : SubtargetFeature<"dummy", "dummy", "true", "">; class Proc Features> : Processor; -def : Proc<"sm_10", [FeatureDummy]>; +def : Proc<"sm_10", [SM10]>; +def : Proc<"sm_11", [SM11]>; +def : Proc<"sm_12", [SM12]>; +def : Proc<"sm_13", [SM13]>; +def : Proc<"sm_20", [SM20]>; +def : Proc<"sm_21", [SM21]>; +def : Proc<"sm_30", [SM30]>; +def : Proc<"sm_35", [SM35]>; def NVPTXInstrInfo : InstrInfo { diff --git a/lib/Target/NVPTX/NVPTXAllocaHoisting.h b/lib/Target/NVPTX/NVPTXAllocaHoisting.h index 24b3bd5..c7cabf6 100644 --- a/lib/Target/NVPTX/NVPTXAllocaHoisting.h +++ b/lib/Target/NVPTX/NVPTXAllocaHoisting.h @@ -16,7 +16,7 @@ #include "llvm/CodeGen/MachineFunctionAnalysis.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" namespace llvm { @@ -31,7 +31,7 @@ public: NVPTXAllocaHoisting() : FunctionPass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); + AU.addRequired(); AU.addPreserved(); } diff --git a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index f2b9616..0a885ce 100644 --- a/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -68,7 +68,54 @@ static cl::optInterleaveSrc("nvptx-emit-src", cl::location(llvm::InterleaveSrcInPtx)); +namespace { +/// DiscoverDependentGlobals - Return a set of GlobalVariables on which \p V +/// depends. +void DiscoverDependentGlobals(Value *V, + DenseSet &Globals) { + if (GlobalVariable *GV = dyn_cast(V)) + Globals.insert(GV); + else { + if (User *U = dyn_cast(V)) { + for (unsigned i = 0, e = U->getNumOperands(); i != e; ++i) { + DiscoverDependentGlobals(U->getOperand(i), Globals); + } + } + } +} +/// VisitGlobalVariableForEmission - Add \p GV to the list of GlobalVariable +/// instances to be emitted, but only after any dependents have been added +/// first. +void VisitGlobalVariableForEmission(GlobalVariable *GV, + SmallVectorImpl &Order, + DenseSet &Visited, + DenseSet &Visiting) { + // Have we already visited this one? + if (Visited.count(GV)) return; + + // Do we have a circular dependency? + if (Visiting.count(GV)) + report_fatal_error("Circular dependency found in global variable set"); + + // Start visiting this global + Visiting.insert(GV); + + // Make sure we visit all dependents first + DenseSet Others; + for (unsigned i = 0, e = GV->getNumOperands(); i != e; ++i) + DiscoverDependentGlobals(GV->getOperand(i), Others); + + for (DenseSet::iterator I = Others.begin(), + E = Others.end(); I != E; ++I) + VisitGlobalVariableForEmission(*I, Order, Visited, Visiting); + + // Now we can visit ourself + Order.push_back(GV); + Visited.insert(GV); + Visiting.erase(GV); +} +} // @TODO: This is a copy from AsmPrinter.cpp. The function is static, so we // cannot just link to the existing version. @@ -98,10 +145,10 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { switch (CE->getOpcode()) { default: // If the code isn't optimized, there may be outstanding folding - // opportunities. Attempt to fold the expression using TargetData as a + // opportunities. Attempt to fold the expression using DataLayout as a // last resort before giving up. if (Constant *C = - ConstantFoldConstantExpression(CE, AP.TM.getTargetData())) + ConstantFoldConstantExpression(CE, AP.TM.getDataLayout())) if (C != CE) return LowerConstant(C, AP); @@ -115,7 +162,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { report_fatal_error(OS.str()); } case Instruction::GetElementPtr: { - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); // Generate a symbolic expression for the byte address const Constant *PtrVal = CE->getOperand(0); SmallVector IdxVec(CE->op_begin()+1, CE->op_end()); @@ -145,7 +192,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { return LowerConstant(CE->getOperand(0), AP); case Instruction::IntToPtr: { - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); // Handle casts to pointers by changing them into casts to the appropriate // integer type. This promotes constant folding and simplifies this code. Constant *Op = CE->getOperand(0); @@ -155,7 +202,7 @@ const MCExpr *nvptx::LowerConstant(const Constant *CV, AsmPrinter &AP) { } case Instruction::PtrToInt: { - const TargetData &TD = *AP.TM.getTargetData(); + const DataLayout &TD = *AP.TM.getDataLayout(); // Support only foldable casts to/from pointers that can be eliminated by // changing the pointer to the appropriately sized integer type. Constant *Op = CE->getOperand(0); @@ -270,7 +317,7 @@ void NVPTXAsmPrinter::EmitInstruction(const MachineInstr *MI) { void NVPTXAsmPrinter::printReturnValStr(const Function *F, raw_ostream &O) { - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); const TargetLowering *TLI = TM.getTargetLowering(); Type *Ty = F->getReturnType(); @@ -874,7 +921,7 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) { const_cast(getObjFileLowering()) .Initialize(OutContext, TM); - Mang = new Mangler(OutContext, *TM.getTargetData()); + Mang = new Mangler(OutContext, *TM.getDataLayout()); // Emit header before any dwarf directives are emitted below. emitHeader(M, OS1); @@ -893,10 +940,27 @@ bool NVPTXAsmPrinter::doInitialization (Module &M) { emitDeclarations(M, OS2); - // Print out module-level global variables here. + // As ptxas does not support forward references of globals, we need to first + // sort the list of module-level globals in def-use order. We visit each + // global variable in order, and ensure that we emit it *after* its dependent + // globals. We use a little extra memory maintaining both a set and a list to + // have fast searches while maintaining a strict ordering. + SmallVector Globals; + DenseSet GVVisited; + DenseSet GVVisiting; + + // Visit each global variable, in order for (Module::global_iterator I = M.global_begin(), E = M.global_end(); - I != E; ++I) - printModuleLevelGV(I, OS2); + I != E; ++I) + VisitGlobalVariableForEmission(I, Globals, GVVisited, GVVisiting); + + assert(GVVisited.size() == M.getGlobalList().size() && + "Missed a global variable"); + assert(GVVisiting.size() == 0 && "Did not fully process a global variable"); + + // Print out module-level global variables in proper order + for (unsigned i = 0, e = Globals.size(); i != e; ++i) + printModuleLevelGV(Globals[i], OS2); OS2 << '\n'; @@ -910,7 +974,8 @@ void NVPTXAsmPrinter::emitHeader (Module &M, raw_ostream &O) { O << "//\n"; O << "\n"; - O << ".version 3.0\n"; + unsigned PTXVersion = nvptxSubtarget.getPTXVersion(); + O << ".version " << (PTXVersion / 10) << "." << (PTXVersion % 10) << "\n"; O << ".target "; O << nvptxSubtarget.getTargetName(); @@ -1023,7 +1088,7 @@ void NVPTXAsmPrinter::printModuleLevelGV(GlobalVariable* GVar, raw_ostream &O, return; } - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); // GlobalVariables are always constant pointers themselves. const PointerType *PTy = GVar->getType(); @@ -1296,7 +1361,7 @@ std::string NVPTXAsmPrinter::getPTXFundamentalTypeStr(const Type *Ty, void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar, raw_ostream &O) { - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); // GlobalVariables are always constant pointers themselves. const PointerType *PTy = GVar->getType(); @@ -1342,7 +1407,7 @@ void NVPTXAsmPrinter::emitPTXGlobalVariable(const GlobalVariable* GVar, static unsigned int -getOpenCLAlignment(const TargetData *TD, +getOpenCLAlignment(const DataLayout *TD, Type *Ty) { if (Ty->isPrimitiveType() || Ty->isIntegerTy() || isa(Ty)) return TD->getPrefTypeAlignment(Ty); @@ -1421,7 +1486,7 @@ void NVPTXAsmPrinter::printParamName(int paramIndex, raw_ostream &O) { void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, raw_ostream &O) { - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); const AttrListPtr &PAL = F->getAttributes(); const TargetLowering *TLI = TM.getTargetLowering(); Function::const_arg_iterator I, E; @@ -1456,7 +1521,8 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, continue; } - if (PAL.paramHasAttr(paramIndex+1, Attribute::ByVal) == false) { + if (PAL.getParamAttributes(paramIndex+1). + hasAttribute(Attributes::ByVal) == false) { // Just a scalar const PointerType *PTy = dyn_cast(Ty); if (isKernelFunc) { @@ -1524,6 +1590,9 @@ void NVPTXAsmPrinter::emitFunctionParamList(const Function *F, // = PAL.getparamalignment // size = typeallocsize of element type unsigned align = PAL.getParamAlignment(paramIndex+1); + if (align == 0) + align = TD->getABITypeAlignment(ETy); + unsigned sz = TD->getTypeAllocSize(ETy); O << "\t.param .align " << align << " .b8 "; @@ -1714,7 +1783,7 @@ void NVPTXAsmPrinter::printScalarConstant(Constant *CPV, raw_ostream &O) { void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, AggBuffer *aggBuffer) { - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); if (isa(CPV) || CPV->isNullValue()) { int s = TD->getTypeAllocSize(CPV->getType()); @@ -1843,7 +1912,7 @@ void NVPTXAsmPrinter::bufferLEByte(Constant *CPV, int Bytes, void NVPTXAsmPrinter::bufferAggregateConstant(Constant *CPV, AggBuffer *aggBuffer) { - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); int Bytes; // Old constants diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index 6ea10ea..f1a99d7 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -174,10 +174,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM) setTruncStoreAction(MVT::f64, MVT::f32, Expand); // PTX does not support load / store predicate registers - setOperationAction(ISD::LOAD, MVT::i1, Expand); + setOperationAction(ISD::LOAD, MVT::i1, Custom); + setOperationAction(ISD::STORE, MVT::i1, Custom); + setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); - setOperationAction(ISD::STORE, MVT::i1, Expand); setTruncStoreAction(MVT::i64, MVT::i1, Expand); setTruncStoreAction(MVT::i32, MVT::i1, Expand); setTruncStoreAction(MVT::i16, MVT::i1, Expand); @@ -402,7 +403,7 @@ std::string NVPTXTargetLowering::getPrototype(Type *retTy, if (isABI) { unsigned align = Outs[i].Flags.getByValAlign(); - unsigned sz = getTargetData()->getTypeAllocSize(ETy); + unsigned sz = getDataLayout()->getTypeAllocSize(ETy); O << ".param .align " << align << " .b8 "; O << "_"; @@ -655,11 +656,11 @@ NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, else { if (Func) { // direct call if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment)) - retAlignment = getTargetData()->getABITypeAlignment(retTy); + retAlignment = getDataLayout()->getABITypeAlignment(retTy); } else { // indirect call const CallInst *CallI = dyn_cast(CS->getInstruction()); if (!llvm::getAlign(*CallI, 0, retAlignment)) - retAlignment = getTargetData()->getABITypeAlignment(retTy); + retAlignment = getDataLayout()->getABITypeAlignment(retTy); } SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue); SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment, @@ -856,11 +857,64 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::EXTRACT_SUBVECTOR: return Op; case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); + case ISD::STORE: return LowerSTORE(Op, DAG); + case ISD::LOAD: return LowerLOAD(Op, DAG); default: llvm_unreachable("Custom lowering not defined for operation"); } } + +// v = ld i1* addr +// => +// v1 = ld i8* addr +// v = trunc v1 to i1 +SDValue NVPTXTargetLowering:: +LowerLOAD(SDValue Op, SelectionDAG &DAG) const { + SDNode *Node = Op.getNode(); + LoadSDNode *LD = cast(Node); + DebugLoc dl = Node->getDebugLoc(); + assert(LD->getExtensionType() == ISD::NON_EXTLOAD) ; + assert(Node->getValueType(0) == MVT::i1 && + "Custom lowering for i1 load only"); + SDValue newLD = DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(), + LD->getPointerInfo(), + LD->isVolatile(), LD->isNonTemporal(), + LD->isInvariant(), + LD->getAlignment()); + SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD); + // The legalizer (the caller) is expecting two values from the legalized + // load, so we build a MergeValues node for it. See ExpandUnalignedLoad() + // in LegalizeDAG.cpp which also uses MergeValues. + SDValue Ops[] = {result, LD->getChain()}; + return DAG.getMergeValues(Ops, 2, dl); +} + +// st i1 v, addr +// => +// v1 = zxt v to i8 +// st i8, addr +SDValue NVPTXTargetLowering:: +LowerSTORE(SDValue Op, SelectionDAG &DAG) const { + SDNode *Node = Op.getNode(); + DebugLoc dl = Node->getDebugLoc(); + StoreSDNode *ST = cast(Node); + SDValue Tmp1 = ST->getChain(); + SDValue Tmp2 = ST->getBasePtr(); + SDValue Tmp3 = ST->getValue(); + assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only"); + unsigned Alignment = ST->getAlignment(); + bool isVolatile = ST->isVolatile(); + bool isNonTemporal = ST->isNonTemporal(); + Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, + MVT::i8, Tmp3); + SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, + ST->getPointerInfo(), isVolatile, + isNonTemporal, Alignment); + return Result; +} + + SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx, EVT v) const { @@ -916,7 +970,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { MachineFunction &MF = DAG.getMachineFunction(); - const TargetData *TD = getTargetData(); + const DataLayout *TD = getDataLayout(); const Function *F = MF.getFunction(); const AttrListPtr &PAL = F->getAttributes(); @@ -965,7 +1019,7 @@ NVPTXTargetLowering::LowerFormalArguments(SDValue Chain, // to newly created nodes. The SDNOdes for params have to // appear in the same order as their order of appearance // in the original function. "idx+1" holds that order. - if (PAL.paramHasAttr(i+1, Attribute::ByVal) == false) { + if (PAL.getParamAttributes(i+1).hasAttribute(Attributes::ByVal) == false) { // A plain scalar. if (isABI || isKernel) { // If ABI, load from the param symbol diff --git a/lib/Target/NVPTX/NVPTXISelLowering.h b/lib/Target/NVPTX/NVPTXISelLowering.h index 86246e6..94a177c 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.h +++ b/lib/Target/NVPTX/NVPTXISelLowering.h @@ -138,6 +138,9 @@ private: SDValue getParamHelpSymbol(SelectionDAG &DAG, int idx); SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const; }; } // namespace llvm diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp index 56b2372..9273931 100644 --- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp +++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp @@ -21,7 +21,7 @@ #include "llvm/LLVMContext.h" #include "llvm/Module.h" #include "llvm/Support/InstIterator.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; @@ -110,7 +110,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) { SmallVector aggrMemcpys; SmallVector aggrMemsets; - TargetData *TD = &getAnalysis(); + DataLayout *TD = &getAnalysis(); LLVMContext &Context = F.getParent()->getContext(); // diff --git a/lib/Target/NVPTX/NVPTXLowerAggrCopies.h b/lib/Target/NVPTX/NVPTXLowerAggrCopies.h index ac7f150..b150c69 100644 --- a/lib/Target/NVPTX/NVPTXLowerAggrCopies.h +++ b/lib/Target/NVPTX/NVPTXLowerAggrCopies.h @@ -17,7 +17,7 @@ #include "llvm/Pass.h" #include "llvm/CodeGen/MachineFunctionAnalysis.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" namespace llvm { @@ -28,7 +28,7 @@ struct NVPTXLowerAggrCopies : public FunctionPass { NVPTXLowerAggrCopies() : FunctionPass(ID) {} void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); + AU.addRequired(); AU.addPreserved(); } diff --git a/lib/Target/NVPTX/NVPTXSubtarget.cpp b/lib/Target/NVPTX/NVPTXSubtarget.cpp index 6aadd43..7b62cce 100644 --- a/lib/Target/NVPTX/NVPTXSubtarget.cpp +++ b/lib/Target/NVPTX/NVPTXSubtarget.cpp @@ -34,16 +34,18 @@ DriverInterface(cl::desc("Choose driver interface:"), NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS, bool is64Bit) -:NVPTXGenSubtargetInfo(TT, "", FS), // Don't pass CPU to subtarget, - // because we don't register all - // nvptx targets. - Is64Bit(is64Bit) { +: NVPTXGenSubtargetInfo(TT, CPU, FS), + Is64Bit(is64Bit), + PTXVersion(0), + SmVersion(10) { drvInterface = DriverInterface; // Provide the default CPU if none std::string defCPU = "sm_10"; + ParseSubtargetFeatures((CPU.empty() ? defCPU : CPU), FS); + // Get the TargetName from the FS if available if (FS.empty() && CPU.empty()) TargetName = defCPU; @@ -52,6 +54,12 @@ NVPTXSubtarget::NVPTXSubtarget(const std::string &TT, const std::string &CPU, else llvm_unreachable("we are not using FeatureStr"); - // Set up the SmVersion - SmVersion = atoi(TargetName.c_str()+3); + // We default to PTX 3.1, but we cannot just default to it in the initializer + // since the attribute parser checks if the given option is >= the default. + // So if we set ptx31 as the default, the ptx30 attribute would never match. + // Instead, we use 0 as the default and manually set 31 if the default is + // used. + if (PTXVersion == 0) { + PTXVersion = 31; + } } diff --git a/lib/Target/NVPTX/NVPTXSubtarget.h b/lib/Target/NVPTX/NVPTXSubtarget.h index 8f2a629..3cfd971 100644 --- a/lib/Target/NVPTX/NVPTXSubtarget.h +++ b/lib/Target/NVPTX/NVPTXSubtarget.h @@ -25,13 +25,17 @@ namespace llvm { class NVPTXSubtarget : public NVPTXGenSubtargetInfo { - - unsigned int SmVersion; + std::string TargetName; NVPTX::DrvInterface drvInterface; - bool dummy; // For the 'dummy' feature, see NVPTX.td bool Is64Bit; + // PTX version x.y is represented as 10*x+y, e.g. 3.1 == 31 + unsigned PTXVersion; + + // SM version x.y is represented as 10*x+y, e.g. 3.1 == 31 + unsigned int SmVersion; + public: /// This constructor initializes the data members to match that /// of the specified module. @@ -69,6 +73,8 @@ public: NVPTX::DrvInterface getDrvInterface() const { return drvInterface; } std::string getTargetName() const { return TargetName; } + unsigned getPTXVersion() const { return PTXVersion; } + void ParseSubtargetFeatures(StringRef CPU, StringRef FS); std::string getDataLayout() const { diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp index 433f415..cbb4900 100644 --- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -32,7 +32,7 @@ #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" @@ -71,8 +71,9 @@ NVPTXTargetMachine::NVPTXTargetMachine(const Target &T, bool is64bit) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS, is64bit), - DataLayout(Subtarget.getDataLayout()), - InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit) + DL(Subtarget.getDataLayout()), + InstrInfo(*this), TLInfo(*this), TSInfo(*this), FrameLowering(*this,is64bit), + STTI(&TLInfo), VTTI(&TLInfo) /*FrameInfo(TargetFrameInfo::StackGrowsUp, 8, 0)*/ { } diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.h b/lib/Target/NVPTX/NVPTXTargetMachine.h index b3f9cac..11bc9d4 100644 --- a/lib/Target/NVPTX/NVPTXTargetMachine.h +++ b/lib/Target/NVPTX/NVPTXTargetMachine.h @@ -21,10 +21,11 @@ #include "NVPTXSubtarget.h" #include "NVPTXFrameLowering.h" #include "ManagedStringPool.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetSelectionDAGInfo.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { @@ -32,7 +33,7 @@ namespace llvm { /// class NVPTXTargetMachine : public LLVMTargetMachine { NVPTXSubtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment NVPTXInstrInfo InstrInfo; NVPTXTargetLowering TLInfo; TargetSelectionDAGInfo TSInfo; @@ -44,6 +45,9 @@ class NVPTXTargetMachine : public LLVMTargetMachine { // Hold Strings that can be free'd all together with NVPTXTargetMachine ManagedStringPool ManagedStrPool; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; + //bool addCommonCodeGenPasses(PassManagerBase &, CodeGenOpt::Level, // bool DisableVerify, MCContext *&OutCtx); @@ -58,7 +62,7 @@ public: return &FrameLowering; } virtual const NVPTXInstrInfo *getInstrInfo() const { return &InstrInfo; } - virtual const TargetData *getTargetData() const { return &DataLayout;} + virtual const DataLayout *getDataLayout() const { return &DL;} virtual const NVPTXSubtarget *getSubtargetImpl() const { return &Subtarget;} virtual const NVPTXRegisterInfo *getRegisterInfo() const { @@ -72,6 +76,12 @@ public: virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const { return &TSInfo; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } //virtual bool addInstSelector(PassManagerBase &PM, // CodeGenOpt::Level OptLevel); diff --git a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp index d175e3e..3d58306 100644 --- a/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp +++ b/lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp @@ -136,21 +136,21 @@ void PPCInstPrinter::printPredicateOperand(const MCInst *MI, unsigned OpNo, void PPCInstPrinter::printS5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - char Value = MI->getOperand(OpNo).getImm(); - Value = (Value << (32-5)) >> (32-5); + int Value = MI->getOperand(OpNo).getImm(); + Value = SignExtend32<5>(Value); O << (int)Value; } void PPCInstPrinter::printU5ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - unsigned char Value = MI->getOperand(OpNo).getImm(); + unsigned int Value = MI->getOperand(OpNo).getImm(); assert(Value <= 31 && "Invalid u5imm argument!"); O << (unsigned int)Value; } void PPCInstPrinter::printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { - unsigned char Value = MI->getOperand(OpNo).getImm(); + unsigned int Value = MI->getOperand(OpNo).getImm(); assert(Value <= 63 && "Invalid u6imm argument!"); O << (unsigned int)Value; } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp index 48de583..87ecb13 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp @@ -29,9 +29,14 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { case FK_Data_1: case FK_Data_2: case FK_Data_4: + case FK_Data_8: + case PPC::fixup_ppc_toc: return Value; + case PPC::fixup_ppc_lo14: + case PPC::fixup_ppc_toc16_ds: + return (Value & 0xffff) << 2; case PPC::fixup_ppc_brcond14: - return Value & 0x3ffc; + return Value & 0xfffc; case PPC::fixup_ppc_br24: return Value & 0x3fffffc; #if 0 @@ -41,6 +46,7 @@ static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { case PPC::fixup_ppc_ha16: return ((Value >> 16) + ((Value & 0x8000) ? 1 : 0)) & 0xffff; case PPC::fixup_ppc_lo16: + case PPC::fixup_ppc_toc16: return Value & 0xffff; } } @@ -72,7 +78,10 @@ public: { "fixup_ppc_brcond14", 16, 14, MCFixupKindInfo::FKF_IsPCRel }, { "fixup_ppc_lo16", 16, 16, 0 }, { "fixup_ppc_ha16", 16, 16, 0 }, - { "fixup_ppc_lo14", 16, 14, 0 } + { "fixup_ppc_lo14", 16, 14, 0 }, + { "fixup_ppc_toc", 0, 64, 0 }, + { "fixup_ppc_toc16", 16, 16, 0 }, + { "fixup_ppc_toc16_ds", 16, 14, 0 } }; if (Kind < FirstTargetFixupKind) @@ -181,7 +190,7 @@ namespace { -MCAsmBackend *llvm::createPPCAsmBackend(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createPPCAsmBackend(const Target &T, StringRef TT, StringRef CPU) { if (Triple(TT).isOSDarwin()) return new DarwinPPCAsmBackend(T); diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp index a197981..dc93f71 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp @@ -11,6 +11,8 @@ #include "MCTargetDesc/PPCMCTargetDesc.h" #include "llvm/MC/MCELFObjectWriter.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCValue.h" using namespace llvm; @@ -21,9 +23,15 @@ namespace { virtual ~PPCELFObjectWriter(); protected: + virtual unsigned getRelocTypeInner(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const; virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, bool IsPCRel, bool IsRelocWithSymbol, int64_t Addend) const; + virtual const MCSymbol *undefinedExplicitRelSym(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const; virtual void adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset); }; } @@ -36,11 +44,13 @@ PPCELFObjectWriter::PPCELFObjectWriter(bool Is64Bit, uint8_t OSABI) PPCELFObjectWriter::~PPCELFObjectWriter() { } -unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target, - const MCFixup &Fixup, - bool IsPCRel, - bool IsRelocWithSymbol, - int64_t Addend) const { +unsigned PPCELFObjectWriter::getRelocTypeInner(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const +{ + MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ? + MCSymbolRefExpr::VK_None : Target.getSymA()->getKind(); + // determine the type of the relocation unsigned Type; if (IsPCRel) { @@ -61,17 +71,53 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target, Type = ELF::R_PPC_ADDR24; break; case PPC::fixup_ppc_brcond14: - Type = ELF::R_PPC_ADDR14_BRTAKEN; // XXX: or BRNTAKEN?_ + Type = ELF::R_PPC_ADDR14; // XXX: or BRNTAKEN?_ break; case PPC::fixup_ppc_ha16: - Type = ELF::R_PPC_ADDR16_HA; + switch (Modifier) { + default: llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_PPC_TPREL16_HA: + Type = ELF::R_PPC_TPREL16_HA; + break; + case MCSymbolRefExpr::VK_None: + Type = ELF::R_PPC_ADDR16_HA; + break; + } break; case PPC::fixup_ppc_lo16: - Type = ELF::R_PPC_ADDR16_LO; + switch (Modifier) { + default: llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_PPC_TPREL16_LO: + Type = ELF::R_PPC_TPREL16_LO; + break; + case MCSymbolRefExpr::VK_None: + Type = ELF::R_PPC_ADDR16_LO; + break; + } break; case PPC::fixup_ppc_lo14: Type = ELF::R_PPC_ADDR14; break; + case PPC::fixup_ppc_toc: + Type = ELF::R_PPC64_TOC; + break; + case PPC::fixup_ppc_toc16: + Type = ELF::R_PPC64_TOC16; + break; + case PPC::fixup_ppc_toc16_ds: + Type = ELF::R_PPC64_TOC16_DS; + break; + case FK_Data_8: + switch (Modifier) { + default: llvm_unreachable("Unsupported Modifier"); + case MCSymbolRefExpr::VK_PPC_TOC: + Type = ELF::R_PPC64_TOC; + break; + case MCSymbolRefExpr::VK_None: + Type = ELF::R_PPC64_ADDR64; + break; + } + break; case FK_Data_4: Type = ELF::R_PPC_ADDR32; break; @@ -83,11 +129,41 @@ unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target, return Type; } +unsigned PPCELFObjectWriter::GetRelocType(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel, + bool IsRelocWithSymbol, + int64_t Addend) const { + return getRelocTypeInner(Target, Fixup, IsPCRel); +} + +const MCSymbol *PPCELFObjectWriter::undefinedExplicitRelSym(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel) const { + assert(Target.getSymA() && "SymA cannot be 0"); + const MCSymbol &Symbol = Target.getSymA()->getSymbol().AliasedSymbol(); + + unsigned RelocType = getRelocTypeInner(Target, Fixup, IsPCRel); + + // The .odp creation emits a relocation against the symbol ".TOC." which + // create a R_PPC64_TOC relocation. However the relocation symbol name + // in final object creation should be NULL, since the symbol does not + // really exist, it is just the reference to TOC base for the current + // object file. + bool EmitThisSym = RelocType != ELF::R_PPC64_TOC; + + if (EmitThisSym && !Symbol.isTemporary()) + return &Symbol; + return NULL; +} + void PPCELFObjectWriter:: adjustFixupOffset(const MCFixup &Fixup, uint64_t &RelocOffset) { switch ((unsigned)Fixup.getKind()) { case PPC::fixup_ppc_ha16: case PPC::fixup_ppc_lo16: + case PPC::fixup_ppc_toc16: + case PPC::fixup_ppc_toc16_ds: RelocOffset += 2; break; default: diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h index b3c889e..37b265e 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCFixupKinds.h @@ -34,6 +34,16 @@ enum Fixups { /// fixup_ppc_lo14 - A 14-bit fixup corresponding to lo16(_foo) for instrs /// like 'std'. fixup_ppc_lo14, + + /// fixup_ppc_toc - Insert value of TOC base (.TOC.). + fixup_ppc_toc, + + /// fixup_ppc_toc16 - A 16-bit signed fixup relative to the TOC base. + fixup_ppc_toc16, + + /// fixup_ppc_toc16_ds - A 14-bit signed fixup relative to the TOC base with + /// implied 2 zero bits + fixup_ppc_toc16_ds, // Marker LastTargetFixupKind, diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp index 245b457..215aa40 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCAsmInfo.cpp @@ -59,12 +59,10 @@ PPCLinuxMCAsmInfo::PPCLinuxMCAsmInfo(bool is64Bit) { HasLEB128 = true; // Target asm supports leb128 directives (little-endian) // Exceptions handling - if (!is64Bit) - ExceptionsType = ExceptionHandling::DwarfCFI; + ExceptionsType = ExceptionHandling::DwarfCFI; ZeroDirective = "\t.space\t"; Data64bitsDirective = is64Bit ? "\t.quad\t" : 0; - LCOMMDirectiveType = LCOMM::NoAlignment; AssemblerDialect = 0; // Old-Style mnemonics. } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp index f652422..2118302 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCCodeEmitter.cpp @@ -15,7 +15,9 @@ #include "MCTargetDesc/PPCBaseInfo.h" #include "MCTargetDesc/PPCFixupKinds.h" #include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/ErrorHandling.h" @@ -25,16 +27,28 @@ STATISTIC(MCNumEmitted, "Number of MC instructions emitted"); namespace { class PPCMCCodeEmitter : public MCCodeEmitter { - PPCMCCodeEmitter(const PPCMCCodeEmitter &); // DO NOT IMPLEMENT - void operator=(const PPCMCCodeEmitter &); // DO NOT IMPLEMENT - + PPCMCCodeEmitter(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const PPCMCCodeEmitter &) LLVM_DELETED_FUNCTION; + + const MCSubtargetInfo &STI; + Triple TT; + public: PPCMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, - MCContext &ctx) { + MCContext &ctx) + : STI(sti), TT(STI.getTargetTriple()) { } ~PPCMCCodeEmitter() {} + bool is64BitMode() const { + return (STI.getFeatureBits() & PPC::Feature64Bit) != 0; + } + + bool isSVR4ABI() const { + return TT.isMacOSX() == 0; + } + unsigned getDirectBrEncoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups) const; unsigned getCondBrEncoding(const MCInst &MI, unsigned OpNo, @@ -61,11 +75,19 @@ public: SmallVectorImpl &Fixups) const; void EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups) const { - unsigned Bits = getBinaryCodeForInstr(MI, Fixups); + uint64_t Bits = getBinaryCodeForInstr(MI, Fixups); + + // BL8_NOPELF and BLA8_NOP_ELF is both size of 8 bacause of the + // following 'nop'. + unsigned Size = 4; // FIXME: Have Desc.getSize() return the correct value! + unsigned Opcode = MI.getOpcode(); + if (Opcode == PPC::BL8_NOP_ELF || Opcode == PPC::BLA8_NOP_ELF) + Size = 8; // Output the constant in big endian byte order. - for (unsigned i = 0; i != 4; ++i) { - OS << (char)(Bits >> 24); + int ShiftValue = (Size * 8) - 8; + for (unsigned i = 0; i != Size; ++i) { + OS << (char)(Bits >> ShiftValue); Bits <<= 8; } @@ -140,8 +162,12 @@ unsigned PPCMCCodeEmitter::getMemRIEncoding(const MCInst &MI, unsigned OpNo, return (getMachineOpValue(MI, MO, Fixups) & 0xFFFF) | RegBits; // Add a fixup for the displacement field. - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_lo16)); + if (isSVR4ABI() && is64BitMode()) + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_toc16)); + else + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_lo16)); return RegBits; } @@ -158,8 +184,12 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo, return (getMachineOpValue(MI, MO, Fixups) & 0x3FFF) | RegBits; // Add a fixup for the branch target. - Fixups.push_back(MCFixup::Create(0, MO.getExpr(), - (MCFixupKind)PPC::fixup_ppc_lo14)); + if (isSVR4ABI() && is64BitMode()) + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_toc16_ds)); + else + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), + (MCFixupKind)PPC::fixup_ppc_lo14)); return RegBits; } @@ -168,7 +198,9 @@ unsigned PPCMCCodeEmitter:: get_crbitm_encoding(const MCInst &MI, unsigned OpNo, SmallVectorImpl &Fixups) const { const MCOperand &MO = MI.getOperand(OpNo); - assert((MI.getOpcode() == PPC::MTCRF || MI.getOpcode() == PPC::MFOCRF) && + assert((MI.getOpcode() == PPC::MTCRF || + MI.getOpcode() == PPC::MFOCRF || + MI.getOpcode() == PPC::MTCRF8) && (MO.getReg() >= PPC::CR0 && MO.getReg() <= PPC::CR7)); return 0x80 >> getPPCRegisterNumbering(MO.getReg()); } diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp index 6568e82..4c2578d 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.cpp @@ -70,7 +70,7 @@ static MCAsmInfo *createPPCMCAsmInfo(const Target &T, StringRef TT) { // Initial state of the frame pointer is R1. MachineLocation Dst(MachineLocation::VirtualFP); - MachineLocation Src(PPC::R1, 0); + MachineLocation Src(isPPC64? PPC::X1 : PPC::R1, 0); MAI->addInitialFrameState(0, Dst, Src); return MAI; diff --git a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h index 7162e15..a0e4cf3 100644 --- a/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h +++ b/lib/Target/PowerPC/MCTargetDesc/PPCMCTargetDesc.h @@ -36,7 +36,7 @@ MCCodeEmitter *createPPCMCCodeEmitter(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx); -MCAsmBackend *createPPCAsmBackend(const Target &T, StringRef TT); +MCAsmBackend *createPPCAsmBackend(const Target &T, StringRef TT, StringRef CPU); /// createPPCELFObjectWriter - Construct an PPC ELF object writer. MCObjectWriter *createPPCELFObjectWriter(raw_ostream &OS, diff --git a/lib/Target/PowerPC/PPC.td b/lib/Target/PowerPC/PPC.td index b7f1688..cb15dad 100644 --- a/lib/Target/PowerPC/PPC.td +++ b/lib/Target/PowerPC/PPC.td @@ -35,6 +35,10 @@ def Directive970 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_970", "">; def Directive32 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_32", "">; def Directive64 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_64", "">; def DirectiveA2 : SubtargetFeature<"", "DarwinDirective", "PPC::DIR_A2", "">; +def DirectiveE500mc : SubtargetFeature<"", "DarwinDirective", + "PPC::DIR_E500mc", "">; +def DirectiveE5500 : SubtargetFeature<"", "DarwinDirective", + "PPC::DIR_E5500", "">; def DirectivePwr6: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR6", "">; def DirectivePwr7: SubtargetFeature<"", "DarwinDirective", "PPC::DIR_PWR7", "">; @@ -94,6 +98,12 @@ def : Processor<"g5", G5Itineraries, [Directive970, FeatureAltivec, FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, Feature64Bit /*, Feature64BitRegs */]>; +def : ProcessorModel<"e500mc", PPCE500mcModel, + [DirectiveE500mc, FeatureMFOCRF, + FeatureSTFIWX, FeatureBookE, FeatureISEL]>; +def : ProcessorModel<"e5500", PPCE5500Model, + [DirectiveE5500, FeatureMFOCRF, Feature64Bit, + FeatureSTFIWX, FeatureBookE, FeatureISEL]>; def : Processor<"a2", PPCA2Itineraries, [DirectiveA2, FeatureBookE, FeatureMFOCRF, FeatureFSqrt, FeatureSTFIWX, FeatureISEL, diff --git a/lib/Target/PowerPC/PPCAsmPrinter.cpp b/lib/Target/PowerPC/PPCAsmPrinter.cpp index f76b89c..15d690b 100644 --- a/lib/Target/PowerPC/PPCAsmPrinter.cpp +++ b/lib/Target/PowerPC/PPCAsmPrinter.cpp @@ -54,12 +54,13 @@ #include "llvm/Support/ELF.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/SmallString.h" +#include "llvm/ADT/MapVector.h" using namespace llvm; namespace { class PPCAsmPrinter : public AsmPrinter { protected: - DenseMap TOC; + MapVector TOC; const PPCSubtarget &Subtarget; uint64_t TOCLabelID; public: @@ -109,6 +110,8 @@ namespace { bool doFinalization(Module &M); virtual void EmitFunctionEntryLabel(); + + void EmitFunctionBodyEnd(); }; /// PPCDarwinAsmPrinter - PowerPC assembly printer, customized for Darwin/Mac @@ -282,8 +285,22 @@ bool PPCAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, const char *ExtraCode, raw_ostream &O) { - if (ExtraCode && ExtraCode[0]) - return true; // Unknown modifier. + if (ExtraCode && ExtraCode[0]) { + if (ExtraCode[1] != 0) return true; // Unknown modifier. + + switch (ExtraCode[0]) { + default: return true; // Unknown modifier. + case 'y': // A memory reference for an X-form instruction + { + const char *RegName = "r0"; + if (!Subtarget.isDarwin()) RegName = stripRegisterPrefix(RegName); + O << RegName << ", "; + printOperand(MI, OpNo, O); + return false; + } + } + } + assert(MI->getOperand(OpNo).isReg()); O << "0("; printOperand(MI, OpNo, O); @@ -345,23 +362,37 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) { OutStreamer.EmitLabel(PICBase); return; } + case PPC::LDtocJTI: + case PPC::LDtocCPT: case PPC::LDtoc: { // Transform %X3 = LDtoc , %X2 LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin()); - + // Change the opcode to LD, and the global address operand to be a // reference to the TOC entry we will synthesize later. TmpInst.setOpcode(PPC::LD); const MachineOperand &MO = MI->getOperand(1); - assert(MO.isGlobal()); - - // Map symbol -> label of TOC entry. - MCSymbol *&TOCEntry = TOC[Mang->getSymbol(MO.getGlobal())]; - if (TOCEntry == 0) - TOCEntry = GetTempSymbol("C", TOCLabelID++); - + + // Map symbol -> label of TOC entry + assert(MO.isGlobal() || MO.isCPI() || MO.isJTI()); + MCSymbol *MOSymbol = 0; + if (MO.isGlobal()) + MOSymbol = Mang->getSymbol(MO.getGlobal()); + else if (MO.isCPI()) + MOSymbol = GetCPISymbol(MO.getIndex()); + else if (MO.isJTI()) + MOSymbol = GetJTISymbol(MO.getIndex()); + MCSymbol *&TOCEntry = TOC[MOSymbol]; + // To avoid name clash check if the name already exists. + while (TOCEntry == 0) { + if (OutContext.LookupSymbol(Twine(MAI->getPrivateGlobalPrefix()) + + "C" + Twine(TOCLabelID++)) == 0) { + TOCEntry = GetTempSymbol("C", TOCLabelID); + } + } + const MCExpr *Exp = - MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC, + MCSymbolRefExpr::Create(TOCEntry, MCSymbolRefExpr::VK_PPC_TOC_ENTRY, OutContext); TmpInst.getOperand(1) = MCOperand::CreateExpr(Exp); OutStreamer.EmitInstruction(TmpInst); @@ -404,11 +435,17 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { OutStreamer.EmitValueToAlignment(8); MCSymbol *Symbol1 = OutContext.GetOrCreateSymbol(".L." + Twine(CurrentFnSym->getName())); - MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC.@tocbase")); + // Generates a R_PPC64_ADDR64 (from FK_DATA_8) relocation for the function + // entry point. OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol1, OutContext), - Subtarget.isPPC64() ? 8 : 4/*size*/, 0/*addrspace*/); - OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2, OutContext), - Subtarget.isPPC64() ? 8 : 4/*size*/, 0/*addrspace*/); + 8/*size*/, 0/*addrspace*/); + MCSymbol *Symbol2 = OutContext.GetOrCreateSymbol(StringRef(".TOC.")); + // Generates a R_PPC64_TOC relocation for TOC base insertion. + OutStreamer.EmitValue(MCSymbolRefExpr::Create(Symbol2, + MCSymbolRefExpr::VK_PPC_TOC, OutContext), + 8/*size*/, 0/*addrspace*/); + // Emit a null environment pointer. + OutStreamer.EmitIntValue(0, 8 /* size */, 0 /* addrspace */); OutStreamer.SwitchSection(Current); MCSymbol *RealFnSym = OutContext.GetOrCreateSymbol( @@ -419,7 +456,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() { bool PPCLinuxAsmPrinter::doFinalization(Module &M) { - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); bool isPPC64 = TD->getPointerSizeInBits() == 64; @@ -429,18 +466,34 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) { SectionKind::getReadOnly()); OutStreamer.SwitchSection(Section); - // FIXME: This is nondeterminstic! - for (DenseMap::iterator I = TOC.begin(), + for (MapVector::iterator I = TOC.begin(), E = TOC.end(); I != E; ++I) { OutStreamer.EmitLabel(I->second); - OutStreamer.EmitRawText("\t.tc " + Twine(I->first->getName()) + - "[TC]," + I->first->getName()); + MCSymbol *S = OutContext.GetOrCreateSymbol(I->first->getName()); + OutStreamer.EmitTCEntry(*S); } } return AsmPrinter::doFinalization(M); } +/// EmitFunctionBodyEnd - Print the traceback table before the .size +/// directive. +/// +void PPCLinuxAsmPrinter::EmitFunctionBodyEnd() { + // Only the 64-bit target requires a traceback table. For now, + // we only emit the word of zeroes that GDB requires to find + // the end of the function, and zeroes for the eight-byte + // mandatory fields. + // FIXME: We should fill in the eight-byte mandatory fields as described in + // the PPC64 ELF ABI (this is a low-priority item because GDB does not + // currently make use of these fields). + if (Subtarget.isPPC64()) { + OutStreamer.EmitIntValue(0, 4/*size*/); + OutStreamer.EmitIntValue(0, 8/*size*/); + } +} + void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) { static const char *const CPUDirectives[] = { "", @@ -453,6 +506,8 @@ void PPCDarwinAsmPrinter::EmitStartOfAsmFile(Module &M) { "ppc750", "ppc970", "ppcA2", + "ppce500mc", + "ppce5500", "power6", "power7", "ppc64" @@ -508,7 +563,7 @@ static MCSymbol *GetAnonSym(MCSymbol *Sym, MCContext &Ctx) { void PPCDarwinAsmPrinter:: EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { - bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64; + bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64; const TargetLoweringObjectFileMachO &TLOFMacho = static_cast(getObjFileLowering()); @@ -603,7 +658,7 @@ EmitFunctionStubs(const MachineModuleInfoMachO::SymbolListTy &Stubs) { bool PPCDarwinAsmPrinter::doFinalization(Module &M) { - bool isPPC64 = TM.getTargetData()->getPointerSizeInBits() == 64; + bool isPPC64 = TM.getDataLayout()->getPointerSizeInBits() == 64; // Darwin/PPC always uses mach-o. const TargetLoweringObjectFileMachO &TLOFMacho = diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td index b2b5364..3f87e88 100644 --- a/lib/Target/PowerPC/PPCCallingConv.td +++ b/lib/Target/PowerPC/PPCCallingConv.td @@ -12,12 +12,19 @@ // //===----------------------------------------------------------------------===// +/// CCIfSubtarget - Match if the current subtarget has a feature F. +class CCIfSubtarget + : CCIf().", F), A>; + //===----------------------------------------------------------------------===// // Return Value Calling Convention //===----------------------------------------------------------------------===// // Return-value convention for PowerPC def RetCC_PPC : CallingConv<[ + // On PPC64, integer return values are always promoted to i64 + CCIfType<[i32], CCIfSubtarget<"isPPC64()", CCPromoteToType>>, + CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>, CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>, diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index c24afa9..caf7bf2 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -13,6 +13,7 @@ #include "PPCFrameLowering.h" #include "PPCInstrInfo.h" +#include "PPCInstrBuilder.h" #include "PPCMachineFunctionInfo.h" #include "llvm/Function.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -49,6 +50,11 @@ static const uint16_t VRRegNo[] = { /// to manipulate the VRSAVE register, even though it uses vector registers. /// This can happen when the only registers used are known to be live in or out /// of the function. Remove all of the VRSAVE related code from the function. +/// FIXME: The removal of the code results in a compile failure at -O0 when the +/// function contains a function call, as the GPR containing original VRSAVE +/// contents is spilled and reloaded around the call. Without the prolog code, +/// the spill instruction refers to an undefined register. This code needs +/// to account for all uses of that GPR. static void RemoveVRSaveCode(MachineInstr *MI) { MachineBasicBlock *Entry = MI->getParent(); MachineFunction *MF = Entry->getParent(); @@ -168,6 +174,11 @@ static void HandleVRSaveUpdate(MachineInstr *MI, const TargetInstrInfo &TII) { MI->eraseFromParent(); } +static bool spillsCR(const MachineFunction &MF) { + const PPCFunctionInfo *FuncInfo = MF.getInfo(); + return FuncInfo->isCRSpilled(); +} + /// determineFrameLayout - Determine the size of the frame and maximum call /// frame size. void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const { @@ -184,13 +195,22 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const { // If we are a leaf function, and use up to 224 bytes of stack space, // don't have a frame pointer, calls, or dynamic alloca then we do not need - // to adjust the stack pointer (we fit in the Red Zone). - bool DisableRedZone = MF.getFunction()->hasFnAttr(Attribute::NoRedZone); - // FIXME SVR4 The 32-bit SVR4 ABI has no red zone. + // to adjust the stack pointer (we fit in the Red Zone). For 64-bit + // SVR4, we also require a stack frame if we need to spill the CR, + // since this spill area is addressed relative to the stack pointer. + bool DisableRedZone = MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::NoRedZone); + // FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can + // still generate stackless code if all local vars are reg-allocated. + // Try: (FrameSize <= 224 + // || (FrameSize == 0 && Subtarget.isPPC32 && Subtarget.isSVR4ABI())) if (!DisableRedZone && FrameSize <= 224 && // Fits in red zone. !MFI->hasVarSizedObjects() && // No dynamic alloca. !MFI->adjustsStack() && // No calls. + !(Subtarget.isPPC64() && // No 64-bit SVR4 CRsave. + Subtarget.isSVR4ABI() + && spillsCR(MF)) && (!ALIGN_STACK || MaxAlign <= TargetAlign)) { // No special alignment. // No need for frame MFI->setStackSize(0); @@ -241,7 +261,7 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const { // Naked functions have no stack frame pushed, so we don't have a frame // pointer. - if (MF.getFunction()->hasFnAttr(Attribute::Naked)) + if (MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked)) return false; return MF.getTarget().Options.DisableFramePointerElim(MF) || @@ -268,12 +288,13 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { // Scan the prolog, looking for an UPDATE_VRSAVE instruction. If we find it, // process it. - for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) { - if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) { - HandleVRSaveUpdate(MBBI, TII); - break; + if (!Subtarget.isSVR4ABI()) + for (unsigned i = 0; MBBI != MBB.end(); ++i, ++MBBI) { + if (MBBI->getOpcode() == PPC::UPDATE_VRSAVE) { + HandleVRSaveUpdate(MBBI, TII); + break; + } } - } // Move MBBI back to the beginning of the function. MBBI = MBB.begin(); @@ -488,7 +509,6 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { // Add callee saved registers to move list. const std::vector &CSI = MFI->getCalleeSavedInfo(); for (unsigned I = 0, E = CSI.size(); I != E; ++I) { - int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); unsigned Reg = CSI[I].getReg(); if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue; @@ -497,6 +517,25 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const { if (PPC::CRBITRCRegClass.contains(Reg)) continue; + // For SVR4, don't emit a move for the CR spill slot if we haven't + // spilled CRs. + if (Subtarget.isSVR4ABI() + && (PPC::CR2 <= Reg && Reg <= PPC::CR4) + && !spillsCR(MF)) + continue; + + // For 64-bit SVR4 when we have spilled CRs, the spill location + // is SP+8, not a frame-relative slot. + if (Subtarget.isSVR4ABI() + && Subtarget.isPPC64() + && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) { + MachineLocation CSDst(PPC::X1, 8); + MachineLocation CSSrc(PPC::CR2); + Moves.push_back(MachineMove(Label, CSDst, CSSrc)); + continue; + } + + int Offset = MFI->getObjectOffset(CSI[I].getFrameIdx()); MachineLocation CSDst(MachineLocation::VirtualFP, Offset); MachineLocation CSSrc(Reg); Moves.push_back(MachineMove(Label, CSDst, CSSrc)); @@ -714,11 +753,6 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF, } } -static bool spillsCR(const MachineFunction &MF) { - const PPCFunctionInfo *FuncInfo = MF.getInfo(); - return FuncInfo->isCRSpilled(); -} - /// MustSaveLR - Return true if this function requires that we save the LR /// register onto the stack in the prolog and restore it in the epilog of the /// function. @@ -808,7 +842,6 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) bool HasGPSaveArea = false; bool HasG8SaveArea = false; bool HasFPSaveArea = false; - bool HasCRSaveArea = false; bool HasVRSAVESaveArea = false; bool HasVRSaveArea = false; @@ -843,10 +876,9 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) if (Reg < MinFPR) { MinFPR = Reg; } -// FIXME SVR4: Disable CR save area for now. } else if (PPC::CRBITRCRegClass.contains(Reg) || PPC::CRRCRegClass.contains(Reg)) { -// HasCRSaveArea = true; + ; // do nothing, as we already know whether CRs are spilled } else if (PPC::VRSAVERCRegClass.contains(Reg)) { HasVRSAVESaveArea = true; } else if (PPC::VRRCRegClass.contains(Reg)) { @@ -926,16 +958,21 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) } } - // The CR save area is below the general register save area. - if (HasCRSaveArea) { - // FIXME SVR4: Is it actually possible to have multiple elements in CSI - // which have the CR/CRBIT register class? + // For 32-bit only, the CR save area is below the general register + // save area. For 64-bit SVR4, the CR save area is addressed relative + // to the stack pointer and hence does not need an adjustment here. + // Only CR2 (the first nonvolatile spilled) has an associated frame + // index so that we have a single uniform save area. + if (spillsCR(MF) && !(Subtarget.isPPC64() && Subtarget.isSVR4ABI())) { // Adjust the frame index of the CR spill slot. for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); - if (PPC::CRBITRCRegClass.contains(Reg) || - PPC::CRRCRegClass.contains(Reg)) { + if ((Subtarget.isSVR4ABI() && Reg == PPC::CR2) + // Leave Darwin logic as-is. + || (!Subtarget.isSVR4ABI() && + (PPC::CRBITRCRegClass.contains(Reg) || + PPC::CRRCRegClass.contains(Reg)))) { int FI = CSI[i].getFrameIdx(); FFI->setObjectOffset(FI, LowerBound + FFI->getObjectOffset(FI)); @@ -973,3 +1010,184 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF) } } } + +bool +PPCFrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const { + + // Currently, this function only handles SVR4 32- and 64-bit ABIs. + // Return false otherwise to maintain pre-existing behavior. + if (!Subtarget.isSVR4ABI()) + return false; + + MachineFunction *MF = MBB.getParent(); + const PPCInstrInfo &TII = + *static_cast(MF->getTarget().getInstrInfo()); + DebugLoc DL; + bool CRSpilled = false; + + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + unsigned Reg = CSI[i].getReg(); + // CR2 through CR4 are the nonvolatile CR fields. + bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4; + + if (CRSpilled && IsCRField) + continue; + + // Add the callee-saved register as live-in; it's killed at the spill. + MBB.addLiveIn(Reg); + + // Insert the spill to the stack frame. + if (IsCRField) { + CRSpilled = true; + // The first time we see a CR field, store the whole CR into the + // save slot via GPR12 (available in the prolog for 32- and 64-bit). + if (Subtarget.isPPC64()) { + // 64-bit: SP+8 + MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::X12)); + MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::STW)) + .addReg(PPC::X12, + getKillRegState(true)) + .addImm(8) + .addReg(PPC::X1)); + } else { + // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have + // the same frame index in PPCRegisterInfo::hasReservedSpillSlot. + MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)); + MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW)) + .addReg(PPC::R12, + getKillRegState(true)), + CSI[i].getFrameIdx())); + } + + // Record that we spill the CR in this function. + PPCFunctionInfo *FuncInfo = MF->getInfo(); + FuncInfo->setSpillsCR(); + } else { + const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); + TII.storeRegToStackSlot(MBB, MI, Reg, true, + CSI[i].getFrameIdx(), RC, TRI); + } + } + return true; +} + +static void +restoreCRs(bool isPPC64, bool CR2Spilled, bool CR3Spilled, bool CR4Spilled, + MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + const std::vector &CSI, unsigned CSIIndex) { + + MachineFunction *MF = MBB.getParent(); + const PPCInstrInfo &TII = + *static_cast(MF->getTarget().getInstrInfo()); + DebugLoc DL; + unsigned RestoreOp, MoveReg; + + if (isPPC64) { + // 64-bit: SP+8 + MBB.insert(MI, BuildMI(*MF, DL, TII.get(PPC::LWZ), PPC::X12) + .addImm(8) + .addReg(PPC::X1)); + RestoreOp = PPC::MTCRF8; + MoveReg = PPC::X12; + } else { + // 32-bit: FP-relative + MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), + PPC::R12), + CSI[CSIIndex].getFrameIdx())); + RestoreOp = PPC::MTCRF; + MoveReg = PPC::R12; + } + + if (CR2Spilled) + MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2) + .addReg(MoveReg)); + + if (CR3Spilled) + MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3) + .addReg(MoveReg)); + + if (CR4Spilled) + MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4) + .addReg(MoveReg)); +} + +bool +PPCFrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const { + + // Currently, this function only handles SVR4 32- and 64-bit ABIs. + // Return false otherwise to maintain pre-existing behavior. + if (!Subtarget.isSVR4ABI()) + return false; + + MachineFunction *MF = MBB.getParent(); + const PPCInstrInfo &TII = + *static_cast(MF->getTarget().getInstrInfo()); + bool CR2Spilled = false; + bool CR3Spilled = false; + bool CR4Spilled = false; + unsigned CSIIndex = 0; + + // Initialize insertion-point logic; we will be restoring in reverse + // order of spill. + MachineBasicBlock::iterator I = MI, BeforeI = I; + bool AtStart = I == MBB.begin(); + + if (!AtStart) + --BeforeI; + + for (unsigned i = 0, e = CSI.size(); i != e; ++i) { + unsigned Reg = CSI[i].getReg(); + + if (Reg == PPC::CR2) { + CR2Spilled = true; + // The spill slot is associated only with CR2, which is the + // first nonvolatile spilled. Save it here. + CSIIndex = i; + continue; + } else if (Reg == PPC::CR3) { + CR3Spilled = true; + continue; + } else if (Reg == PPC::CR4) { + CR4Spilled = true; + continue; + } else { + // When we first encounter a non-CR register after seeing at + // least one CR register, restore all spilled CRs together. + if ((CR2Spilled || CR3Spilled || CR4Spilled) + && !(PPC::CR2 <= Reg && Reg <= PPC::CR4)) { + restoreCRs(Subtarget.isPPC64(), CR2Spilled, CR3Spilled, CR4Spilled, + MBB, I, CSI, CSIIndex); + CR2Spilled = CR3Spilled = CR4Spilled = false; + } + + // Default behavior for non-CR saves. + const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); + TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), + RC, TRI); + assert(I != MBB.begin() && + "loadRegFromStackSlot didn't insert any code!"); + } + + // Insert in reverse order. + if (AtStart) + I = MBB.begin(); + else { + I = BeforeI; + ++I; + } + } + + // If we haven't yet spilled the CRs, do so now. + if (CR2Spilled || CR3Spilled || CR4Spilled) + restoreCRs(Subtarget.isPPC64(), CR2Spilled, CR3Spilled, CR4Spilled, + MBB, I, CSI, CSIIndex); + + return true; +} + diff --git a/lib/Target/PowerPC/PPCFrameLowering.h b/lib/Target/PowerPC/PPCFrameLowering.h index d708541..4d957b9 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.h +++ b/lib/Target/PowerPC/PPCFrameLowering.h @@ -45,6 +45,16 @@ public: RegScavenger *RS = NULL) const; void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; + bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const; + + bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + const std::vector &CSI, + const TargetRegisterInfo *TRI) const; + /// targetHandlesStackFrameRounding - Returns true if the target is /// responsible for rounding up the stack frame (probably at emitPrologue /// time). @@ -170,23 +180,11 @@ public: {PPC::R15, -68}, {PPC::R14, -72}, - // CR save area offset. - // FIXME SVR4: Disable CR save area for now. -// {PPC::CR2, -4}, -// {PPC::CR3, -4}, -// {PPC::CR4, -4}, -// {PPC::CR2LT, -4}, -// {PPC::CR2GT, -4}, -// {PPC::CR2EQ, -4}, -// {PPC::CR2UN, -4}, -// {PPC::CR3LT, -4}, -// {PPC::CR3GT, -4}, -// {PPC::CR3EQ, -4}, -// {PPC::CR3UN, -4}, -// {PPC::CR4LT, -4}, -// {PPC::CR4GT, -4}, -// {PPC::CR4EQ, -4}, -// {PPC::CR4UN, -4}, + // CR save area offset. We map each of the nonvolatile CR fields + // to the slot for CR2, which is the first of the nonvolatile CR + // fields to be assigned, so that we only allocate one save slot. + // See PPCRegisterInfo::hasReservedSpillSlot() for more information. + {PPC::CR2, -4}, // VRSAVE save area offset. {PPC::VRSAVE, -4}, @@ -228,27 +226,6 @@ public: {PPC::F14, -144}, // General register save area offsets. - // FIXME 64-bit SVR4: Are 32-bit registers actually allocated in 64-bit - // mode? - {PPC::R31, -4}, - {PPC::R30, -12}, - {PPC::R29, -20}, - {PPC::R28, -28}, - {PPC::R27, -36}, - {PPC::R26, -44}, - {PPC::R25, -52}, - {PPC::R24, -60}, - {PPC::R23, -68}, - {PPC::R22, -76}, - {PPC::R21, -84}, - {PPC::R20, -92}, - {PPC::R19, -100}, - {PPC::R18, -108}, - {PPC::R17, -116}, - {PPC::R16, -124}, - {PPC::R15, -132}, - {PPC::R14, -140}, - {PPC::X31, -8}, {PPC::X30, -16}, {PPC::X29, -24}, @@ -268,24 +245,6 @@ public: {PPC::X15, -136}, {PPC::X14, -144}, - // CR save area offset. - // FIXME SVR4: Disable CR save area for now. -// {PPC::CR2, -4}, -// {PPC::CR3, -4}, -// {PPC::CR4, -4}, -// {PPC::CR2LT, -4}, -// {PPC::CR2GT, -4}, -// {PPC::CR2EQ, -4}, -// {PPC::CR2UN, -4}, -// {PPC::CR3LT, -4}, -// {PPC::CR3GT, -4}, -// {PPC::CR3EQ, -4}, -// {PPC::CR3UN, -4}, -// {PPC::CR4LT, -4}, -// {PPC::CR4GT, -4}, -// {PPC::CR4EQ, -4}, -// {PPC::CR4UN, -4}, - // VRSAVE save area offset. {PPC::VRSAVE, -4}, diff --git a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp index a00f686..254fea6 100644 --- a/lib/Target/PowerPC/PPCISelDAGToDAG.cpp +++ b/lib/Target/PowerPC/PPCISelDAGToDAG.cpp @@ -53,7 +53,9 @@ namespace { GlobalBaseReg = 0; SelectionDAGISel::runOnMachineFunction(MF); - InsertVRSaveCode(MF); + if (!PPCSubTarget.isSVR4ABI()) + InsertVRSaveCode(MF); + return true; } @@ -621,6 +623,88 @@ static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert, int &Other) { } } +// getVCmpInst: return the vector compare instruction for the specified +// vector type and condition code. Since this is for altivec specific code, +// only support the altivec types (v16i8, v8i16, v4i32, and v4f32). +static unsigned int getVCmpInst(MVT::SimpleValueType VecVT, ISD::CondCode CC) { + switch (CC) { + case ISD::SETEQ: + case ISD::SETUEQ: + case ISD::SETNE: + case ISD::SETUNE: + if (VecVT == MVT::v16i8) + return PPC::VCMPEQUB; + else if (VecVT == MVT::v8i16) + return PPC::VCMPEQUH; + else if (VecVT == MVT::v4i32) + return PPC::VCMPEQUW; + // v4f32 != v4f32 could be translate to unordered not equal + else if (VecVT == MVT::v4f32) + return PPC::VCMPEQFP; + break; + case ISD::SETLT: + case ISD::SETGT: + case ISD::SETLE: + case ISD::SETGE: + if (VecVT == MVT::v16i8) + return PPC::VCMPGTSB; + else if (VecVT == MVT::v8i16) + return PPC::VCMPGTSH; + else if (VecVT == MVT::v4i32) + return PPC::VCMPGTSW; + else if (VecVT == MVT::v4f32) + return PPC::VCMPGTFP; + break; + case ISD::SETULT: + case ISD::SETUGT: + case ISD::SETUGE: + case ISD::SETULE: + if (VecVT == MVT::v16i8) + return PPC::VCMPGTUB; + else if (VecVT == MVT::v8i16) + return PPC::VCMPGTUH; + else if (VecVT == MVT::v4i32) + return PPC::VCMPGTUW; + break; + case ISD::SETOEQ: + if (VecVT == MVT::v4f32) + return PPC::VCMPEQFP; + break; + case ISD::SETOLT: + case ISD::SETOGT: + case ISD::SETOLE: + if (VecVT == MVT::v4f32) + return PPC::VCMPGTFP; + break; + case ISD::SETOGE: + if (VecVT == MVT::v4f32) + return PPC::VCMPGEFP; + break; + default: + break; + } + llvm_unreachable("Invalid integer vector compare condition"); +} + +// getVCmpEQInst: return the equal compare instruction for the specified vector +// type. Since this is for altivec specific code, only support the altivec +// types (v16i8, v8i16, v4i32, and v4f32). +static unsigned int getVCmpEQInst(MVT::SimpleValueType VecVT) { + switch (VecVT) { + case MVT::v16i8: + return PPC::VCMPEQUB; + case MVT::v8i16: + return PPC::VCMPEQUH; + case MVT::v4i32: + return PPC::VCMPEQUW; + case MVT::v4f32: + return PPC::VCMPEQFP; + default: + llvm_unreachable("Invalid integer vector compare condition"); + } +} + + SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { DebugLoc dl = N->getDebugLoc(); unsigned Imm; @@ -701,10 +785,67 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { } } + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + + // Altivec Vector compare instructions do not set any CR register by default and + // vector compare operations return the same type as the operands. + if (LHS.getValueType().isVector()) { + EVT VecVT = LHS.getValueType(); + MVT::SimpleValueType VT = VecVT.getSimpleVT().SimpleTy; + unsigned int VCmpInst = getVCmpInst(VT, CC); + + switch (CC) { + case ISD::SETEQ: + case ISD::SETOEQ: + case ISD::SETUEQ: + return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS); + case ISD::SETNE: + case ISD::SETONE: + case ISD::SETUNE: { + SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0); + return CurDAG->SelectNodeTo(N, PPC::VNOR, VecVT, VCmp, VCmp); + } + case ISD::SETLT: + case ISD::SETOLT: + case ISD::SETULT: + return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, RHS, LHS); + case ISD::SETGT: + case ISD::SETOGT: + case ISD::SETUGT: + return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS); + case ISD::SETGE: + case ISD::SETOGE: + case ISD::SETUGE: { + // Small optimization: Altivec provides a 'Vector Compare Greater Than + // or Equal To' instruction (vcmpgefp), so in this case there is no + // need for extra logic for the equal compare. + if (VecVT.getSimpleVT().isFloatingPoint()) { + return CurDAG->SelectNodeTo(N, VCmpInst, VecVT, LHS, RHS); + } else { + SDValue VCmpGT(CurDAG->getMachineNode(VCmpInst, dl, VecVT, LHS, RHS), 0); + unsigned int VCmpEQInst = getVCmpEQInst(VT); + SDValue VCmpEQ(CurDAG->getMachineNode(VCmpEQInst, dl, VecVT, LHS, RHS), 0); + return CurDAG->SelectNodeTo(N, PPC::VOR, VecVT, VCmpGT, VCmpEQ); + } + } + case ISD::SETLE: + case ISD::SETOLE: + case ISD::SETULE: { + SDValue VCmpLE(CurDAG->getMachineNode(VCmpInst, dl, VecVT, RHS, LHS), 0); + unsigned int VCmpEQInst = getVCmpEQInst(VT); + SDValue VCmpEQ(CurDAG->getMachineNode(VCmpEQInst, dl, VecVT, LHS, RHS), 0); + return CurDAG->SelectNodeTo(N, PPC::VOR, VecVT, VCmpLE, VCmpEQ); + } + default: + llvm_unreachable("Invalid vector compare type: should be expanded by legalize"); + } + } + bool Inv; int OtherCondIdx; unsigned Idx = getCRIdxForSetCC(CC, Inv, OtherCondIdx); - SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl); + SDValue CCReg = SelectCC(LHS, RHS, CC, dl); SDValue IntCR; // Force the ccreg into CR7. @@ -717,7 +858,7 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDNode *N) { if (PPCSubTarget.hasMFOCRF() && OtherCondIdx == -1) IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg, CCReg), 0); - else + else IntCR = SDValue(CurDAG->getMachineNode(PPC::MFCRpseud, dl, MVT::i32, CR7Reg, CCReg), 0); @@ -975,6 +1116,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { case ISD::AND: { unsigned Imm, Imm2, SH, MB, ME; + uint64_t Imm64; // If this is an and of a value rotated between 0 and 31 bits and then and'd // with a mask, emit rlwinm @@ -993,6 +1135,14 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) { SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) }; return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4); } + // If this is a 64-bit zero-extension mask, emit rldicl. + if (isInt64Immediate(N->getOperand(1).getNode(), Imm64) && + isMask_64(Imm64)) { + SDValue Val = N->getOperand(0); + MB = 64 - CountTrailingOnes_64(Imm64); + SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB) }; + return CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops, 3); + } // AND X, 0 -> 0, not "rlwinm 32". if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) { ReplaceUses(SDValue(N, 0), N->getOperand(1)); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 61d44c5..adf78d5 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -361,6 +361,22 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); setOperationAction(ISD::CTTZ, VT, Expand); setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); + + for (unsigned j = (unsigned)MVT::FIRST_VECTOR_VALUETYPE; + j <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++j) { + MVT::SimpleValueType InnerVT = (MVT::SimpleValueType)j; + setTruncStoreAction(VT, InnerVT, Expand); + } + setLoadExtAction(ISD::SEXTLOAD, VT, Expand); + setLoadExtAction(ISD::ZEXTLOAD, VT, Expand); + setLoadExtAction(ISD::EXTLOAD, VT, Expand); + } + + for (unsigned i = (unsigned)MVT::FIRST_FP_VECTOR_VALUETYPE; + i <= (unsigned)MVT::LAST_FP_VECTOR_VALUETYPE; ++i) { + MVT::SimpleValueType VT = (MVT::SimpleValueType)i; + setOperationAction(ISD::FSQRT, VT, Expand); } // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle @@ -373,6 +389,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::LOAD , MVT::v4i32, Legal); setOperationAction(ISD::SELECT, MVT::v4i32, Expand); setOperationAction(ISD::STORE , MVT::v4i32, Legal); + setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); + setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); + setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); + setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass); addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass); @@ -392,6 +412,14 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom); setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom); setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); + + // Altivec does not contain unordered floating-point compare instructions + setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand); + setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand); + setCondCodeAction(ISD::SETUGT, MVT::v4f32, Expand); + setCondCodeAction(ISD::SETUGE, MVT::v4f32, Expand); + setCondCodeAction(ISD::SETULT, MVT::v4f32, Expand); + setCondCodeAction(ISD::SETULE, MVT::v4f32, Expand); } if (Subtarget->has64BitSupport()) { @@ -449,6 +477,21 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM) setSchedulingPreference(Sched::Hybrid); computeRegisterProperties(); + + // The Freescale cores does better with aggressive inlining of memcpy and + // friends. Gcc uses same threshold of 128 bytes (= 32 word stores). + if (Subtarget->getDarwinDirective() == PPC::DIR_E500mc || + Subtarget->getDarwinDirective() == PPC::DIR_E5500) { + maxStoresPerMemset = 32; + maxStoresPerMemsetOptSize = 16; + maxStoresPerMemcpy = 32; + maxStoresPerMemcpyOptSize = 8; + maxStoresPerMemmove = 32; + maxStoresPerMemmoveOptSize = 8; + + setPrefFunctionAlignment(4); + benefitFromCodePlacementOpt = true; + } } /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate @@ -517,11 +560,15 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const { case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ"; case PPCISD::MTFSF: return "PPCISD::MTFSF"; case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN"; + case PPCISD::CR6SET: return "PPCISD::CR6SET"; + case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET"; } } EVT PPCTargetLowering::getSetCCResultType(EVT VT) const { - return MVT::i32; + if (!VT.isVector()) + return MVT::i32; + return VT.changeVectorElementTypeToInteger(); } //===----------------------------------------------------------------------===// @@ -811,14 +858,13 @@ SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) { } // Properly sign extend the value. - int ShAmt = (4-ByteSize)*8; - int MaskVal = ((int)Value << ShAmt) >> ShAmt; + int MaskVal = SignExtend32(Value, ByteSize * 8); // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros. if (MaskVal == 0) return SDValue(); // Finally, if this value fits in a 5 bit sext field, return it - if (((MaskVal << (32-5)) >> (32-5)) == MaskVal) + if (SignExtend32<5>(MaskVal) == MaskVal) return DAG.getTargetConstant(MaskVal, MVT::i32); return SDValue(); } @@ -1204,6 +1250,14 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op, ConstantPoolSDNode *CP = cast(Op); const Constant *C = CP->getConstVal(); + // 64-bit SVR4 ABI code is always position-independent. + // The actual address of the GlobalValue is stored in the TOC. + if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { + SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment(), 0); + return DAG.getNode(PPCISD::TOC_ENTRY, CP->getDebugLoc(), MVT::i64, GA, + DAG.getRegister(PPC::X2, MVT::i64)); + } + unsigned MOHiFlag, MOLoFlag; bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); SDValue CPIHi = @@ -1217,6 +1271,14 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { EVT PtrVT = Op.getValueType(); JumpTableSDNode *JT = cast(Op); + // 64-bit SVR4 ABI code is always position-independent. + // The actual address of the GlobalValue is stored in the TOC. + if (PPCSubTarget.isSVR4ABI() && PPCSubTarget.isPPC64()) { + SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT); + return DAG.getNode(PPCISD::TOC_ENTRY, JT->getDebugLoc(), MVT::i64, GA, + DAG.getRegister(PPC::X2, MVT::i64)); + } + unsigned MOHiFlag, MOLoFlag; bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); @@ -1232,8 +1294,8 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op, unsigned MOHiFlag, MOLoFlag; bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); - SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag); - SDValue TgtBALo = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOLoFlag); + SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag); + SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag); return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG); } @@ -1441,7 +1503,7 @@ SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG, MachinePointerInfo(), MVT::i32, false, false, 0); - return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), + return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo(), false, false, false, 0); } @@ -1461,7 +1523,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = (PtrVT == MVT::i64); Type *IntPtrTy = - DAG.getTargetLoweringInfo().getTargetData()->getIntPtrType( + DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType( *DAG.getContext()); TargetLowering::ArgListTy Args; @@ -1684,9 +1746,13 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { - if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) { - return LowerFormalArguments_SVR4(Chain, CallConv, isVarArg, Ins, - dl, DAG, InVals); + if (PPCSubTarget.isSVR4ABI()) { + if (PPCSubTarget.isPPC64()) + return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, + dl, DAG, InVals); + else + return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, + dl, DAG, InVals); } else { return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG, InVals); @@ -1694,7 +1760,7 @@ PPCTargetLowering::LowerFormalArguments(SDValue Chain, } SDValue -PPCTargetLowering::LowerFormalArguments_SVR4( +PPCTargetLowering::LowerFormalArguments_32SVR4( SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl @@ -1911,6 +1977,334 @@ PPCTargetLowering::LowerFormalArguments_SVR4( return Chain; } +// PPC64 passes i8, i16, and i32 values in i64 registers. Promote +// value to MVT::i64 and then truncate to the correct register size. +SDValue +PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, + SelectionDAG &DAG, SDValue ArgVal, + DebugLoc dl) const { + if (Flags.isSExt()) + ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, + DAG.getValueType(ObjectVT)); + else if (Flags.isZExt()) + ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, + DAG.getValueType(ObjectVT)); + + return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); +} + +// Set the size that is at least reserved in caller of this function. Tail +// call optimized functions' reserved stack space needs to be aligned so that +// taking the difference between two stack areas will result in an aligned +// stack. +void +PPCTargetLowering::setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, + unsigned nAltivecParamsAtEnd, + unsigned MinReservedArea, + bool isPPC64) const { + PPCFunctionInfo *FI = MF.getInfo(); + // Add the Altivec parameters at the end, if needed. + if (nAltivecParamsAtEnd) { + MinReservedArea = ((MinReservedArea+15)/16)*16; + MinReservedArea += 16*nAltivecParamsAtEnd; + } + MinReservedArea = + std::max(MinReservedArea, + PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); + unsigned TargetAlign + = DAG.getMachineFunction().getTarget().getFrameLowering()-> + getStackAlignment(); + unsigned AlignMask = TargetAlign-1; + MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; + FI->setMinReservedArea(MinReservedArea); +} + +SDValue +PPCTargetLowering::LowerFormalArguments_64SVR4( + SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl + &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const { + // TODO: add description of PPC stack frame format, or at least some docs. + // + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + PPCFunctionInfo *FuncInfo = MF.getInfo(); + + EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + // Potential tail calls could cause overwriting of argument stack slots. + bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt && + (CallConv == CallingConv::Fast)); + unsigned PtrByteSize = 8; + + unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); + // Area that is at least reserved in caller of this function. + unsigned MinReservedArea = ArgOffset; + + static const uint16_t GPR[] = { + PPC::X3, PPC::X4, PPC::X5, PPC::X6, + PPC::X7, PPC::X8, PPC::X9, PPC::X10, + }; + + static const uint16_t *FPR = GetFPR(); + + static const uint16_t VR[] = { + PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, + PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 + }; + + const unsigned Num_GPR_Regs = array_lengthof(GPR); + const unsigned Num_FPR_Regs = 13; + const unsigned Num_VR_Regs = array_lengthof(VR); + + unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; + + // Add DAG nodes to load the arguments or copy them out of registers. On + // entry to a function on PPC, the arguments start after the linkage area, + // although the first ones are often in registers. + + SmallVector MemOps; + unsigned nAltivecParamsAtEnd = 0; + Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); + for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) { + SDValue ArgVal; + bool needsLoad = false; + EVT ObjectVT = Ins[ArgNo].VT; + unsigned ObjSize = ObjectVT.getSizeInBits()/8; + unsigned ArgSize = ObjSize; + ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags; + + unsigned CurArgOffset = ArgOffset; + + // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary. + if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 || + ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) { + if (isVarArg) { + MinReservedArea = ((MinReservedArea+15)/16)*16; + MinReservedArea += CalculateStackSlotSize(ObjectVT, + Flags, + PtrByteSize); + } else + nAltivecParamsAtEnd++; + } else + // Calculate min reserved area. + MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT, + Flags, + PtrByteSize); + + // FIXME the codegen can be much improved in some cases. + // We do not have to keep everything in memory. + if (Flags.isByVal()) { + // ObjSize is the true size, ArgSize rounded up to multiple of registers. + ObjSize = Flags.getByValSize(); + ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; + // Empty aggregate parameters do not take up registers. Examples: + // struct { } a; + // union { } b; + // int c[0]; + // etc. However, we have to provide a place-holder in InVals, so + // pretend we have an 8-byte item at the current address for that + // purpose. + if (!ObjSize) { + int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + InVals.push_back(FIN); + continue; + } + // All aggregates smaller than 8 bytes must be passed right-justified. + if (ObjSize < PtrByteSize) + CurArgOffset = CurArgOffset + (PtrByteSize - ObjSize); + // The value of the object is its address. + int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + InVals.push_back(FIN); + + if (ObjSize < 8) { + if (GPR_idx != Num_GPR_Regs) { + unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); + SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); + SDValue Store; + + if (ObjSize==1 || ObjSize==2 || ObjSize==4) { + EVT ObjType = (ObjSize == 1 ? MVT::i8 : + (ObjSize == 2 ? MVT::i16 : MVT::i32)); + Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, + MachinePointerInfo(FuncArg, CurArgOffset), + ObjType, false, false, 0); + } else { + // For sizes that don't fit a truncating store (3, 5, 6, 7), + // store the whole register as-is to the parameter save area + // slot. The address of the parameter was already calculated + // above (InVals.push_back(FIN)) to be the right-justified + // offset within the slot. For this store, we need a new + // frame index that points at the beginning of the slot. + int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, + MachinePointerInfo(FuncArg, ArgOffset), + false, false, 0); + } + + MemOps.push_back(Store); + ++GPR_idx; + } + // Whether we copied from a register or not, advance the offset + // into the parameter save area by a full doubleword. + ArgOffset += PtrByteSize; + continue; + } + + for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { + // Store whatever pieces of the object are in registers + // to memory. ArgOffset will be the address of the beginning + // of the object. + if (GPR_idx != Num_GPR_Regs) { + unsigned VReg; + VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); + int FI = MFI->CreateFixedObject(PtrByteSize, ArgOffset, true); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); + SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, + MachinePointerInfo(FuncArg, ArgOffset), + false, false, 0); + MemOps.push_back(Store); + ++GPR_idx; + ArgOffset += PtrByteSize; + } else { + ArgOffset += ArgSize - j; + break; + } + } + continue; + } + + switch (ObjectVT.getSimpleVT().SimpleTy) { + default: llvm_unreachable("Unhandled argument type!"); + case MVT::i32: + case MVT::i64: + if (GPR_idx != Num_GPR_Regs) { + unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); + ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); + + if (ObjectVT == MVT::i32) + // PPC64 passes i8, i16, and i32 values in i64 registers. Promote + // value to MVT::i64 and then truncate to the correct register size. + ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); + + ++GPR_idx; + } else { + needsLoad = true; + ArgSize = PtrByteSize; + } + ArgOffset += 8; + break; + + case MVT::f32: + case MVT::f64: + // Every 8 bytes of argument space consumes one of the GPRs available for + // argument passing. + if (GPR_idx != Num_GPR_Regs) { + ++GPR_idx; + } + if (FPR_idx != Num_FPR_Regs) { + unsigned VReg; + + if (ObjectVT == MVT::f32) + VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass); + else + VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass); + + ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); + ++FPR_idx; + } else { + needsLoad = true; + ArgSize = PtrByteSize; + } + + ArgOffset += 8; + break; + case MVT::v4f32: + case MVT::v4i32: + case MVT::v8i16: + case MVT::v16i8: + // Note that vector arguments in registers don't reserve stack space, + // except in varargs functions. + if (VR_idx != Num_VR_Regs) { + unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass); + ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT); + if (isVarArg) { + while ((ArgOffset % 16) != 0) { + ArgOffset += PtrByteSize; + if (GPR_idx != Num_GPR_Regs) + GPR_idx++; + } + ArgOffset += 16; + GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64? + } + ++VR_idx; + } else { + // Vectors are aligned. + ArgOffset = ((ArgOffset+15)/16)*16; + CurArgOffset = ArgOffset; + ArgOffset += 16; + needsLoad = true; + } + break; + } + + // We need to load the argument to a virtual register if we determined + // above that we ran out of physical registers of the appropriate type. + if (needsLoad) { + int FI = MFI->CreateFixedObject(ObjSize, + CurArgOffset + (ArgSize - ObjSize), + isImmutable); + SDValue FIN = DAG.getFrameIndex(FI, PtrVT); + ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo(), + false, false, false, 0); + } + + InVals.push_back(ArgVal); + } + + // Set the size that is at least reserved in caller of this function. Tail + // call optimized functions' reserved stack space needs to be aligned so that + // taking the difference between two stack areas will result in an aligned + // stack. + setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, true); + + // If the function takes variable number of arguments, make a frame index for + // the start of the first vararg value... for expansion of llvm.va_start. + if (isVarArg) { + int Depth = ArgOffset; + + FuncInfo->setVarArgsFrameIndex( + MFI->CreateFixedObject(PtrByteSize, Depth, true)); + SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); + + // If this function is vararg, store any remaining integer argument regs + // to their spots on the stack so that they may be loaded by deferencing the + // result of va_next. + for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { + unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); + SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); + SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, + MachinePointerInfo(), false, false, 0); + MemOps.push_back(Store); + // Increment the address by four for the next argument to store + SDValue PtrOff = DAG.getConstant(PtrByteSize, PtrVT); + FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff); + } + } + + if (!MemOps.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, + MVT::Other, &MemOps[0], MemOps.size()); + + return Chain; +} + SDValue PPCTargetLowering::LowerFormalArguments_Darwin( SDValue Chain, @@ -1987,10 +2381,12 @@ PPCTargetLowering::LowerFormalArguments_Darwin( default: llvm_unreachable("Unhandled argument type!"); case MVT::i32: case MVT::f32: - VecArgOffset += isPPC64 ? 8 : 4; + VecArgOffset += 4; break; case MVT::i64: // PPC64 case MVT::f64: + // FIXME: We are guaranteed to be !isPPC64 at this point. + // Does MVT::i64 apply? VecArgOffset += 8; break; case MVT::v4f32: @@ -2013,7 +2409,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin( SmallVector MemOps; unsigned nAltivecParamsAtEnd = 0; - for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) { + Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); + for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo, ++FuncArg) { SDValue ArgVal; bool needsLoad = false; EVT ObjectVT = Ins[ArgNo].VT; @@ -2061,10 +2458,11 @@ PPCTargetLowering::LowerFormalArguments_Darwin( else VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); + EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16; SDValue Store = DAG.getTruncStore(Val.getValue(1), dl, Val, FIN, - MachinePointerInfo(), - ObjSize==1 ? MVT::i8 : MVT::i16, - false, false, 0); + MachinePointerInfo(FuncArg, + CurArgOffset), + ObjType, false, false, 0); MemOps.push_back(Store); ++GPR_idx; } @@ -2075,8 +2473,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin( } for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { // Store whatever pieces of the object are in registers - // to memory. ArgVal will be address of the beginning of - // the object. + // to memory. ArgOffset will be the address of the beginning + // of the object. if (GPR_idx != Num_GPR_Regs) { unsigned VReg; if (isPPC64) @@ -2087,7 +2485,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin( SDValue FIN = DAG.getFrameIndex(FI, PtrVT); SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT); SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, - MachinePointerInfo(), + MachinePointerInfo(FuncArg, ArgOffset), false, false, 0); MemOps.push_back(Store); ++GPR_idx; @@ -2122,18 +2520,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin( unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64); - if (ObjectVT == MVT::i32) { + if (ObjectVT == MVT::i32) // PPC64 passes i8, i16, and i32 values in i64 registers. Promote // value to MVT::i64 and then truncate to the correct register size. - if (Flags.isSExt()) - ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal, - DAG.getValueType(ObjectVT)); - else if (Flags.isZExt()) - ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal, - DAG.getValueType(ObjectVT)); - - ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal); - } + ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl); ++GPR_idx; } else { @@ -2220,23 +2610,10 @@ PPCTargetLowering::LowerFormalArguments_Darwin( } // Set the size that is at least reserved in caller of this function. Tail - // call optimized function's reserved stack space needs to be aligned so that + // call optimized functions' reserved stack space needs to be aligned so that // taking the difference between two stack areas will result in an aligned // stack. - PPCFunctionInfo *FI = MF.getInfo(); - // Add the Altivec parameters at the end, if needed. - if (nAltivecParamsAtEnd) { - MinReservedArea = ((MinReservedArea+15)/16)*16; - MinReservedArea += 16*nAltivecParamsAtEnd; - } - MinReservedArea = - std::max(MinReservedArea, - PPCFrameLowering::getMinCallFrameSize(isPPC64, true)); - unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameLowering()-> - getStackAlignment(); - unsigned AlignMask = TargetAlign-1; - MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; - FI->setMinReservedArea(MinReservedArea); + setMinReservedArea(MF, DAG, nAltivecParamsAtEnd, MinReservedArea, isPPC64); // If the function takes variable number of arguments, make a frame index for // the start of the first vararg value... for expansion of llvm.va_start. @@ -2276,8 +2653,8 @@ PPCTargetLowering::LowerFormalArguments_Darwin( return Chain; } -/// CalculateParameterAndLinkageAreaSize - Get the size of the paramter plus -/// linkage area for the Darwin ABI. +/// CalculateParameterAndLinkageAreaSize - Get the size of the parameter plus +/// linkage area for the Darwin ABI, or the 64-bit SVR4 ABI. static unsigned CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, bool isPPC64, @@ -2408,7 +2785,7 @@ static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) { int Addr = C->getZExtValue(); if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero. - (Addr << 6 >> 6) != Addr) + SignExtend32<26>(Addr) != Addr) return 0; // Top 6 bits have to be sext of immediate. return DAG.getConstant((int)C->getZExtValue() >> 2, @@ -2686,7 +3063,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, // Thus for a call through a function pointer, the following actions need // to be performed: // 1. Save the TOC of the caller in the TOC save area of its stack - // frame (this is done in LowerCall_Darwin()). + // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()). // 2. Load the address of the function entry point from the function // descriptor. // 3. Load the TOC of the callee from the function descriptor into r2. @@ -2776,6 +3153,15 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, return CallOpc; } +static +bool isLocalCall(const SDValue &Callee) +{ + if (GlobalAddressSDNode *G = dyn_cast(Callee)) + return !G->getGlobal()->isDeclaration() && + !G->getGlobal()->isWeakForLinker(); + return false; +} + SDValue PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, @@ -2791,12 +3177,32 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Copy all of the result registers out of their specified physreg. for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; - EVT VT = VA.getValVT(); assert(VA.isRegLoc() && "Can only return in registers!"); - Chain = DAG.getCopyFromReg(Chain, dl, - VA.getLocReg(), VT, InFlag).getValue(1); - InVals.push_back(Chain.getValue(0)); - InFlag = Chain.getValue(2); + + SDValue Val = DAG.getCopyFromReg(Chain, dl, + VA.getLocReg(), VA.getLocVT(), InFlag); + Chain = Val.getValue(1); + InFlag = Val.getValue(2); + + switch (VA.getLocInfo()) { + default: llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: break; + case CCValAssign::AExt: + Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); + break; + case CCValAssign::ZExt: + Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val, + DAG.getValueType(VA.getValVT())); + Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); + break; + case CCValAssign::SExt: + Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val, + DAG.getValueType(VA.getValVT())); + Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); + break; + } + + InVals.push_back(Val); } return Chain; @@ -2819,6 +3225,10 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, isTailCall, RegsToPass, Ops, NodeTys, PPCSubTarget); + // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls + if (isVarArg && PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) + Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32)); + // When performing tail call optimization the callee pops its arguments off // the stack. Account for this here so these bytes can be pushed back on in // PPCRegisterInfo::eliminateCallFramePseudoInstr. @@ -2880,8 +3290,8 @@ PPCTargetLowering::FinishCall(CallingConv::ID CallConv, DebugLoc dl, // from allocating it), resulting in an additional register being // allocated and an unnecessary move instruction being generated. needsTOCRestore = true; - } else if (CallOpc == PPCISD::CALL_SVR4) { - // Otherwise insert NOP. + } else if ((CallOpc == PPCISD::CALL_SVR4) && !isLocalCall(Callee)) { + // Otherwise insert NOP for non-local calls. CallOpc = PPCISD::CALL_NOP_SVR4; } } @@ -2923,10 +3333,16 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, Ins, DAG); - if (PPCSubTarget.isSVR4ABI() && !PPCSubTarget.isPPC64()) - return LowerCall_SVR4(Chain, Callee, CallConv, isVarArg, - isTailCall, Outs, OutVals, Ins, - dl, DAG, InVals); + if (PPCSubTarget.isSVR4ABI()) { + if (PPCSubTarget.isPPC64()) + return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg, + isTailCall, Outs, OutVals, Ins, + dl, DAG, InVals); + else + return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg, + isTailCall, Outs, OutVals, Ins, + dl, DAG, InVals); + } return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg, isTailCall, Outs, OutVals, Ins, @@ -2934,15 +3350,15 @@ PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } SDValue -PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, - CallingConv::ID CallConv, bool isVarArg, - bool isTailCall, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const { - // See PPCTargetLowering::LowerFormalArguments_SVR4() for a description +PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool isTailCall, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const { + // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description // of the 32-bit SVR4 ABI stack frame layout. assert((CallConv == CallingConv::C || @@ -3042,73 +3458,455 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, SmallVector, 8> RegsToPass; SmallVector TailCallArguments; - SmallVector MemOpChains; + SmallVector MemOpChains; + + bool seenFloatArg = false; + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, j = 0, e = ArgLocs.size(); + i != e; + ++i) { + CCValAssign &VA = ArgLocs[i]; + SDValue Arg = OutVals[i]; + ISD::ArgFlagsTy Flags = Outs[i].Flags; + + if (Flags.isByVal()) { + // Argument is an aggregate which is passed by value, thus we need to + // create a copy of it in the local variable space of the current stack + // frame (which is the stack frame of the caller) and pass the address of + // this copy to the callee. + assert((j < ByValArgLocs.size()) && "Index out of bounds!"); + CCValAssign &ByValVA = ByValArgLocs[j++]; + assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); + + // Memory reserved in the local variable space of the callers stack frame. + unsigned LocMemOffset = ByValVA.getLocMemOffset(); + + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); + PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); + + // Create a copy of the argument in the local area of the current + // stack frame. + SDValue MemcpyCall = + CreateCopyOfByValArgument(Arg, PtrOff, + CallSeqStart.getNode()->getOperand(0), + Flags, DAG, dl); + + // This must go outside the CALLSEQ_START..END. + SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, + CallSeqStart.getNode()->getOperand(1)); + DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), + NewCallSeqStart.getNode()); + Chain = CallSeqStart = NewCallSeqStart; + + // Pass the address of the aggregate copy on the stack either in a + // physical register or in the parameter list area of the current stack + // frame to the callee. + Arg = PtrOff; + } + + if (VA.isRegLoc()) { + seenFloatArg |= VA.getLocVT().isFloatingPoint(); + // Put argument in a physical register. + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + } else { + // Put argument in the parameter list area of the current stack frame. + assert(VA.isMemLoc()); + unsigned LocMemOffset = VA.getLocMemOffset(); + + if (!isTailCall) { + SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); + PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); + + MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, + MachinePointerInfo(), + false, false, 0)); + } else { + // Calculate and remember argument location. + CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset, + TailCallArguments); + } + } + } + + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + // Build a sequence of copy-to-reg nodes chained together with token chain + // and flag operands which copy the outgoing args into the appropriate regs. + SDValue InFlag; + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + // Set CR bit 6 to true if this is a vararg call with floating args passed in + // registers. + if (isVarArg) { + SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); + SDValue Ops[] = { Chain, InFlag }; + + Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET, + dl, VTs, Ops, InFlag.getNode() ? 2 : 1); + + InFlag = Chain.getValue(1); + } + + if (isTailCall) + PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, + false, TailCallArguments); + + return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, + RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, + Ins, InVals); +} + +// Copy an argument into memory, being careful to do this outside the +// call sequence for the call to which the argument belongs. +SDValue +PPCTargetLowering::createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, + SDValue CallSeqStart, + ISD::ArgFlagsTy Flags, + SelectionDAG &DAG, + DebugLoc dl) const { + SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, + CallSeqStart.getNode()->getOperand(0), + Flags, DAG, dl); + // The MEMCPY must go outside the CALLSEQ_START..END. + SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, + CallSeqStart.getNode()->getOperand(1)); + DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), + NewCallSeqStart.getNode()); + return NewCallSeqStart; +} + +SDValue +PPCTargetLowering::LowerCall_64SVR4(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool isTailCall, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const { + + unsigned NumOps = Outs.size(); + + EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); + unsigned PtrByteSize = 8; + + MachineFunction &MF = DAG.getMachineFunction(); + + // Mark this function as potentially containing a function that contains a + // tail call. As a consequence the frame pointer will be used for dynamicalloc + // and restoring the callers stack pointer in this functions epilog. This is + // done because by tail calling the called function might overwrite the value + // in this function's (MF) stack pointer stack slot 0(SP). + if (getTargetMachine().Options.GuaranteedTailCallOpt && + CallConv == CallingConv::Fast) + MF.getInfo()->setHasFastCall(); + + unsigned nAltivecParamsAtEnd = 0; + + // Count how many bytes are to be pushed on the stack, including the linkage + // area, and parameter passing area. We start with at least 48 bytes, which + // is reserved space for [SP][CR][LR][3 x unused]. + // NOTE: For PPC64, nAltivecParamsAtEnd always remains zero as a result + // of this call. + unsigned NumBytes = + CalculateParameterAndLinkageAreaSize(DAG, true, isVarArg, CallConv, + Outs, OutVals, nAltivecParamsAtEnd); + + // Calculate by how many bytes the stack has to be adjusted in case of tail + // call optimization. + int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); + + // To protect arguments on the stack from being clobbered in a tail call, + // force all the loads to happen before doing any other lowering. + if (isTailCall) + Chain = DAG.getStackArgumentTokenFactor(Chain); + + // Adjust the stack pointer for the new arguments... + // These operations are automatically eliminated by the prolog/epilog pass + Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); + SDValue CallSeqStart = Chain; + + // Load the return address and frame pointer so it can be move somewhere else + // later. + SDValue LROp, FPOp; + Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, true, + dl); + + // Set up a copy of the stack pointer for use loading and storing any + // arguments that may not fit in the registers available for argument + // passing. + SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64); + + // Figure out which arguments are going to go in registers, and which in + // memory. Also, if this is a vararg function, floating point operations + // must be stored to our stack, and loaded into integer regs as well, if + // any integer regs are available for argument passing. + unsigned ArgOffset = PPCFrameLowering::getLinkageSize(true, true); + unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0; + + static const uint16_t GPR[] = { + PPC::X3, PPC::X4, PPC::X5, PPC::X6, + PPC::X7, PPC::X8, PPC::X9, PPC::X10, + }; + static const uint16_t *FPR = GetFPR(); + + static const uint16_t VR[] = { + PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8, + PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13 + }; + const unsigned NumGPRs = array_lengthof(GPR); + const unsigned NumFPRs = 13; + const unsigned NumVRs = array_lengthof(VR); + + SmallVector, 8> RegsToPass; + SmallVector TailCallArguments; - bool seenFloatArg = false; - // Walk the register/memloc assignments, inserting copies/loads. - for (unsigned i = 0, j = 0, e = ArgLocs.size(); - i != e; - ++i) { - CCValAssign &VA = ArgLocs[i]; + SmallVector MemOpChains; + for (unsigned i = 0; i != NumOps; ++i) { SDValue Arg = OutVals[i]; ISD::ArgFlagsTy Flags = Outs[i].Flags; + // PtrOff will be used to store the current argument to the stack if a + // register cannot be found for it. + SDValue PtrOff; + + PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType()); + + PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); + + // Promote integers to 64-bit values. + if (Arg.getValueType() == MVT::i32) { + // FIXME: Should this use ANY_EXTEND if neither sext nor zext? + unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; + Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg); + } + + // FIXME memcpy is used way more than necessary. Correctness first. + // Note: "by value" is code for passing a structure by value, not + // basic types. if (Flags.isByVal()) { - // Argument is an aggregate which is passed by value, thus we need to - // create a copy of it in the local variable space of the current stack - // frame (which is the stack frame of the caller) and pass the address of - // this copy to the callee. - assert((j < ByValArgLocs.size()) && "Index out of bounds!"); - CCValAssign &ByValVA = ByValArgLocs[j++]; - assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); + // Note: Size includes alignment padding, so + // struct x { short a; char b; } + // will have Size = 4. With #pragma pack(1), it will have Size = 3. + // These are the proper values we need for right-justifying the + // aggregate in a parameter register. + unsigned Size = Flags.getByValSize(); - // Memory reserved in the local variable space of the callers stack frame. - unsigned LocMemOffset = ByValVA.getLocMemOffset(); + // An empty aggregate parameter takes up no storage and no + // registers. + if (Size == 0) + continue; - SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); - PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); + // All aggregates smaller than 8 bytes must be passed right-justified. + if (Size==1 || Size==2 || Size==4) { + EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32); + if (GPR_idx != NumGPRs) { + SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, + MachinePointerInfo(), VT, + false, false, 0); + MemOpChains.push_back(Load.getValue(1)); + RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); - // Create a copy of the argument in the local area of the current - // stack frame. - SDValue MemcpyCall = - CreateCopyOfByValArgument(Arg, PtrOff, - CallSeqStart.getNode()->getOperand(0), - Flags, DAG, dl); + ArgOffset += PtrByteSize; + continue; + } + } - // This must go outside the CALLSEQ_START..END. - SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, - CallSeqStart.getNode()->getOperand(1)); - DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), - NewCallSeqStart.getNode()); - Chain = CallSeqStart = NewCallSeqStart; + if (GPR_idx == NumGPRs && Size < 8) { + SDValue Const = DAG.getConstant(PtrByteSize - Size, + PtrOff.getValueType()); + SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); + Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, + CallSeqStart, + Flags, DAG, dl); + ArgOffset += PtrByteSize; + continue; + } + // Copy entire object into memory. There are cases where gcc-generated + // code assumes it is there, even if it could be put entirely into + // registers. (This is not what the doc says.) - // Pass the address of the aggregate copy on the stack either in a - // physical register or in the parameter list area of the current stack - // frame to the callee. - Arg = PtrOff; + // FIXME: The above statement is likely due to a misunderstanding of the + // documents. All arguments must be copied into the parameter area BY + // THE CALLEE in the event that the callee takes the address of any + // formal argument. That has not yet been implemented. However, it is + // reasonable to use the stack area as a staging area for the register + // load. + + // Skip this for small aggregates, as we will use the same slot for a + // right-justified copy, below. + if (Size >= 8) + Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, + CallSeqStart, + Flags, DAG, dl); + + // When a register is available, pass a small aggregate right-justified. + if (Size < 8 && GPR_idx != NumGPRs) { + // The easiest way to get this right-justified in a register + // is to copy the structure into the rightmost portion of a + // local variable slot, then load the whole slot into the + // register. + // FIXME: The memcpy seems to produce pretty awful code for + // small aggregates, particularly for packed ones. + // FIXME: It would be preferable to use the slot in the + // parameter save area instead of a new local variable. + SDValue Const = DAG.getConstant(8 - Size, PtrOff.getValueType()); + SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); + Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, + CallSeqStart, + Flags, DAG, dl); + + // Load the slot into the register. + SDValue Load = DAG.getLoad(PtrVT, dl, Chain, PtrOff, + MachinePointerInfo(), + false, false, false, 0); + MemOpChains.push_back(Load.getValue(1)); + RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load)); + + // Done with this argument. + ArgOffset += PtrByteSize; + continue; + } + + // For aggregates larger than PtrByteSize, copy the pieces of the + // object that fit into registers from the parameter save area. + for (unsigned j=0; j(Callee) && + !dyn_cast(Callee) && + !isBLACompatibleAddress(Callee, DAG)) { + // Load r2 into a virtual register and store it to the TOC save area. + SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); + // TOC save area offset. + SDValue PtrOff = DAG.getIntPtrConstant(40); + SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); + Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), + false, false, 0); + // R12 must contain the address of an indirect callee. This does not + // mean the MTCTR instruction must use R12; it's easier to model this + // as an extra parameter, so do that. + RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee)); } // Build a sequence of copy-to-reg nodes chained together with token chain @@ -3134,8 +3944,8 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee, } if (isTailCall) - PrepareTailCall(DAG, InFlag, Chain, dl, false, SPDiff, NumBytes, LROp, FPOp, - false, TailCallArguments); + PrepareTailCall(DAG, InFlag, Chain, dl, true, SPDiff, NumBytes, LROp, + FPOp, true, TailCallArguments); return FinishCall(CallConv, dl, isTailCall, isVarArg, DAG, RegsToPass, InFlag, Chain, Callee, SPDiff, NumBytes, @@ -3152,7 +3962,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const { - unsigned NumOps = Outs.size(); + unsigned NumOps = Outs.size(); EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(); bool isPPC64 = PtrVT == MVT::i64; @@ -3259,11 +4069,13 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, } // FIXME memcpy is used way more than necessary. Correctness first. + // Note: "by value" is code for passing a structure by value, not + // basic types. if (Flags.isByVal()) { unsigned Size = Flags.getByValSize(); + // Very small objects are passed right-justified. Everything else is + // passed left-justified. if (Size==1 || Size==2) { - // Very small objects are passed right-justified. - // Everything else is passed left-justified. EVT VT = (Size==1) ? MVT::i8 : MVT::i16; if (GPR_idx != NumGPRs) { SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg, @@ -3274,17 +4086,12 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, ArgOffset += PtrByteSize; } else { - SDValue Const = DAG.getConstant(4 - Size, PtrOff.getValueType()); + SDValue Const = DAG.getConstant(PtrByteSize - Size, + PtrOff.getValueType()); SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const); - SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, AddPtr, - CallSeqStart.getNode()->getOperand(0), - Flags, DAG, dl); - // This must go outside the CALLSEQ_START..END. - SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, - CallSeqStart.getNode()->getOperand(1)); - DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), - NewCallSeqStart.getNode()); - Chain = CallSeqStart = NewCallSeqStart; + Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr, + CallSeqStart, + Flags, DAG, dl); ArgOffset += PtrByteSize; } continue; @@ -3292,15 +4099,13 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee, // Copy entire object into memory. There are cases where gcc-generated // code assumes it is there, even if it could be put entirely into // registers. (This is not what the doc says.) - SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff, - CallSeqStart.getNode()->getOperand(0), - Flags, DAG, dl); - // This must go outside the CALLSEQ_START..END. - SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, - CallSeqStart.getNode()->getOperand(1)); - DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), NewCallSeqStart.getNode()); - Chain = CallSeqStart = NewCallSeqStart; - // And copy the pieces of it that fit into registers. + Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff, + CallSeqStart, + Flags, DAG, dl); + + // For small aggregates (Darwin only) and aggregates >= PtrByteSize, + // copy the pieces of the object that fit into registers from the + // parameter save area. for (unsigned j=0; j(Callee) && - !dyn_cast(Callee) && - !isBLACompatibleAddress(Callee, DAG)) { - // Load r2 into a virtual register and store it to the TOC save area. - SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64); - // TOC save area offset. - SDValue PtrOff = DAG.getIntPtrConstant(40); - SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff); - Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr, MachinePointerInfo(), - false, false, 0); - } - // On Darwin, R12 must contain the address of an indirect callee. This does // not mean the MTCTR instruction must use R12; it's easier to model this as // an extra parameter, so do that. @@ -3548,8 +4336,24 @@ PPCTargetLowering::LowerReturn(SDValue Chain, for (unsigned i = 0; i != RVLocs.size(); ++i) { CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); - Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), - OutVals[i], Flag); + + SDValue Arg = OutVals[i]; + + switch (VA.getLocInfo()) { + default: llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg); + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg); + break; + } + + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag); Flag = Chain.getValue(1); } @@ -3781,7 +4585,52 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op, return SDValue(); if (Op.getOperand(0).getValueType() == MVT::i64) { - SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0)); + SDValue SINT = Op.getOperand(0); + // When converting to single-precision, we actually need to convert + // to double-precision first and then round to single-precision. + // To avoid double-rounding effects during that operation, we have + // to prepare the input operand. Bits that might be truncated when + // converting to double-precision are replaced by a bit that won't + // be lost at this stage, but is below the single-precision rounding + // position. + // + // However, if -enable-unsafe-fp-math is in effect, accept double + // rounding to avoid the extra overhead. + if (Op.getValueType() == MVT::f32 && + !DAG.getTarget().Options.UnsafeFPMath) { + + // Twiddle input to make sure the low 11 bits are zero. (If this + // is the case, we are guaranteed the value will fit into the 53 bit + // mantissa of an IEEE double-precision value without rounding.) + // If any of those low 11 bits were not zero originally, make sure + // bit 12 (value 2048) is set instead, so that the final rounding + // to single-precision gets the correct result. + SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64, + SINT, DAG.getConstant(2047, MVT::i64)); + Round = DAG.getNode(ISD::ADD, dl, MVT::i64, + Round, DAG.getConstant(2047, MVT::i64)); + Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT); + Round = DAG.getNode(ISD::AND, dl, MVT::i64, + Round, DAG.getConstant(-2048, MVT::i64)); + + // However, we cannot use that value unconditionally: if the magnitude + // of the input value is small, the bit-twiddling we did above might + // end up visibly changing the output. Fortunately, in that case, we + // don't need to twiddle bits since the original input will convert + // exactly to double-precision floating-point already. Therefore, + // construct a conditional to use the original value if the top 11 + // bits are all sign-bit copies, and use the rounded value computed + // above otherwise. + SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64, + SINT, DAG.getConstant(53, MVT::i32)); + Cond = DAG.getNode(ISD::ADD, dl, MVT::i64, + Cond, DAG.getConstant(1, MVT::i64)); + Cond = DAG.getSetCC(dl, MVT::i32, + Cond, DAG.getConstant(1, MVT::i64), ISD::SETUGT); + + SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT); + } + SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT); SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); if (Op.getValueType() == MVT::f32) FP = DAG.getNode(ISD::FP_ROUND, dl, @@ -4126,7 +4975,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, unsigned TypeShiftAmt = i & (SplatBitSize-1); // vsplti + shl self. - if (SextVal == (i << (int)TypeShiftAmt)) { + if (SextVal == (int)((unsigned)i << TypeShiftAmt)) { SDValue Res = BuildSplatI(i, SplatSize, MVT::Other, DAG, dl); static const unsigned IIDs[] = { // Intrinsic to use for each size. Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0, @@ -4171,17 +5020,17 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op, } // t = vsplti c, result = vsldoi t, t, 1 - if (SextVal == ((i << 8) | (i < 0 ? 0xFF : 0))) { + if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) { SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); return BuildVSLDOI(T, T, 1, Op.getValueType(), DAG, dl); } // t = vsplti c, result = vsldoi t, t, 2 - if (SextVal == ((i << 16) | (i < 0 ? 0xFFFF : 0))) { + if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) { SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); return BuildVSLDOI(T, T, 2, Op.getValueType(), DAG, dl); } // t = vsplti c, result = vsldoi t, t, 3 - if (SextVal == ((i << 24) | (i < 0 ? 0xFFFFFF : 0))) { + if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) { SDValue T = BuildSplatI(i, SplatSize, MVT::v16i8, DAG, dl); return BuildVSLDOI(T, T, 3, Op.getValueType(), DAG, dl); } @@ -5630,6 +6479,14 @@ PPCTargetLowering::getConstraintType(const std::string &Constraint) const { case 'v': case 'y': return C_RegisterClass; + case 'Z': + // FIXME: While Z does indicate a memory constraint, it specifically + // indicates an r+r address (used in conjunction with the 'y' modifier + // in the replacement string). Currently, we're forcing the base + // register to be r0 in the asm printer (which is interpreted as zero) + // and forming the complete address in the second register. This is + // suboptimal. + return C_Memory; } } return TargetLowering::getConstraintType(Constraint); @@ -5672,6 +6529,9 @@ PPCTargetLowering::getSingleConstraintMatchWeight( case 'y': weight = CW_Register; break; + case 'Z': + weight = CW_Memory; + break; } return weight; } @@ -5688,9 +6548,9 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, return std::make_pair(0U, &PPC::G8RCRegClass); return std::make_pair(0U, &PPC::GPRCRegClass); case 'f': - if (VT == MVT::f32) + if (VT == MVT::f32 || VT == MVT::i32) return std::make_pair(0U, &PPC::F4RCRegClass); - if (VT == MVT::f64) + if (VT == MVT::f64 || VT == MVT::i64) return std::make_pair(0U, &PPC::F8RCRegClass); break; case 'v': @@ -5870,7 +6730,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()) && MFI->getStackSize() && - !MF.getFunction()->hasFnAttr(Attribute::Naked); + !MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::Naked); unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) : (is31 ? PPC::R31 : PPC::R1); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, diff --git a/lib/Target/PowerPC/PPCISelLowering.h b/lib/Target/PowerPC/PPCISelLowering.h index b0a013b..b3c7f9c 100644 --- a/lib/Target/PowerPC/PPCISelLowering.h +++ b/lib/Target/PowerPC/PPCISelLowering.h @@ -174,6 +174,10 @@ namespace llvm { /// operand #3 optional in flag TC_RETURN, + /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls + CR6SET, + CR6UNSET, + /// STD_32 - This is the STD instruction for use with "32-bit" registers. STD_32 = ISD::FIRST_TARGET_MEMORY_OPCODE, @@ -463,20 +467,41 @@ namespace llvm { DebugLoc dl, SelectionDAG &DAG) const; SDValue + extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT, SelectionDAG &DAG, + SDValue ArgVal, DebugLoc dl) const; + + void + setMinReservedArea(MachineFunction &MF, SelectionDAG &DAG, + unsigned nAltivecParamsAtEnd, + unsigned MinReservedArea, bool isPPC64) const; + + SDValue LowerFormalArguments_Darwin(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const; SDValue - LowerFormalArguments_SVR4(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const; + LowerFormalArguments_64SVR4(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + SDValue + LowerFormalArguments_32SVR4(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + + SDValue + createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff, + SDValue CallSeqStart, ISD::ArgFlagsTy Flags, + SelectionDAG &DAG, DebugLoc dl) const; SDValue - LowerCall_Darwin(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, + LowerCall_Darwin(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, bool isTailCall, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, @@ -484,13 +509,22 @@ namespace llvm { DebugLoc dl, SelectionDAG &DAG, SmallVectorImpl &InVals) const; SDValue - LowerCall_SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, - bool isVarArg, bool isTailCall, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - const SmallVectorImpl &Ins, - DebugLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const; + LowerCall_64SVR4(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, + bool isVarArg, bool isTailCall, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; + SDValue + LowerCall_32SVR4(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, + bool isVarArg, bool isTailCall, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + const SmallVectorImpl &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl &InVals) const; }; } diff --git a/lib/Target/PowerPC/PPCInstr64Bit.td b/lib/Target/PowerPC/PPCInstr64Bit.td index 39778a5..9711452 100644 --- a/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/lib/Target/PowerPC/PPCInstr64Bit.td @@ -29,6 +29,9 @@ def symbolLo64 : Operand { let PrintMethod = "printSymbolLo"; let EncoderMethod = "getLO16Encoding"; } +def tocentry : Operand { + let MIOperandInfo = (ops i32imm:$imm); +} //===----------------------------------------------------------------------===// // 64-bit transformation functions. @@ -60,7 +63,7 @@ def HI48_64 : SDNodeXForm, + def MovePCtoLR8 : Pseudo<(outs), (ins), "#MovePCtoLR8", []>, PPC970_Unit_BRU; // Darwin ABI Calls. @@ -138,31 +141,31 @@ def : Pat<(PPCnop), let usesCustomInserter = 1 in { let Defs = [CR0] in { def ATOMIC_LOAD_ADD_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_ADD_I64", [(set G8RC:$dst, (atomic_load_add_64 xoaddr:$ptr, G8RC:$incr))]>; def ATOMIC_LOAD_SUB_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_SUB_I64", [(set G8RC:$dst, (atomic_load_sub_64 xoaddr:$ptr, G8RC:$incr))]>; def ATOMIC_LOAD_OR_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_OR_I64", [(set G8RC:$dst, (atomic_load_or_64 xoaddr:$ptr, G8RC:$incr))]>; def ATOMIC_LOAD_XOR_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_XOR_I64", [(set G8RC:$dst, (atomic_load_xor_64 xoaddr:$ptr, G8RC:$incr))]>; def ATOMIC_LOAD_AND_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_AND_i64", [(set G8RC:$dst, (atomic_load_and_64 xoaddr:$ptr, G8RC:$incr))]>; def ATOMIC_LOAD_NAND_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$incr), "#ATOMIC_LOAD_NAND_I64", [(set G8RC:$dst, (atomic_load_nand_64 xoaddr:$ptr, G8RC:$incr))]>; def ATOMIC_CMP_SWAP_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$old, G8RC:$new), "#ATOMIC_CMP_SWAP_I64", [(set G8RC:$dst, (atomic_cmp_swap_64 xoaddr:$ptr, G8RC:$old, G8RC:$new))]>; def ATOMIC_SWAP_I64 : Pseudo< - (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "", + (outs G8RC:$dst), (ins memrr:$ptr, G8RC:$new), "#ATOMIC_SWAP_I64", [(set G8RC:$dst, (atomic_swap_64 xoaddr:$ptr, G8RC:$new))]>; } } @@ -231,10 +234,10 @@ def : Pat<(PPCtc_return CTRRC8:$dst, imm:$imm), let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { let Defs = [CTR8], Uses = [CTR8] in { - def BDZ8 : IForm_ext<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), - "bdz $dst", BrB, []>; - def BDNZ8 : IForm_ext<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), - "bdnz $dst", BrB, []>; + def BDZ8 : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), + "bdz $dst">; + def BDNZ8 : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), + "bdnz $dst">; } } @@ -244,7 +247,7 @@ def MTCRF8 : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins G8RC:$rS), PPC970_MicroCode, PPC970_Unit_CRU; def MFCR8pseud: XFXForm_3<31, 19, (outs G8RC:$rT), (ins crbitm:$FXM), - "", SprMFCR>, + "#MFCR8pseud", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; def MFCR8 : XFXForm_3<31, 19, (outs G8RC:$rT), (ins), @@ -275,7 +278,7 @@ def MFTB8 : XFXForm_1_ext<31, 339, 268, (outs G8RC:$rT), (ins), // the POWER3. let Defs = [X1], Uses = [X1] in -def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"", +def DYNALLOC8 : Pseudo<(outs G8RC:$result), (ins G8RC:$negsize, memri:$fpsi),"#DYNALLOC8", [(set G8RC:$result, (PPCdynalloc G8RC:$negsize, iaddr:$fpsi))]>; @@ -296,12 +299,14 @@ def MFLR8 : XFXForm_1_ext<31, 339, 8, (outs G8RC:$rT), (ins), let PPC970_Unit = 1 in { // FXU Operations. +let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { def LI8 : DForm_2_r0<14, (outs G8RC:$rD), (ins symbolLo64:$imm), "li $rD, $imm", IntSimple, [(set G8RC:$rD, immSExt16:$imm)]>; def LIS8 : DForm_2_r0<15, (outs G8RC:$rD), (ins symbolHi64:$imm), "lis $rD, $imm", IntSimple, [(set G8RC:$rD, imm16ShiftedSExt:$imm)]>; +} // Logical ops. def NAND8: XForm_6<31, 476, (outs G8RC:$rA), (ins G8RC:$rS, G8RC:$rB), @@ -459,7 +464,7 @@ def EXTSW_32_64 : XForm_11<31, 986, (outs G8RC:$rA), (ins GPRC:$rS), let Defs = [CARRY] in { def SRADI : XSForm_1<31, 413, (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH), - "sradi $rA, $rS, $SH", IntRotateD, + "sradi $rA, $rS, $SH", IntRotateDI, [(set G8RC:$rA, (sra G8RC:$rS, (i32 imm:$SH)))]>, isPPC64; } def CNTLZD : XForm_11<31, 58, (outs G8RC:$rA), (ins G8RC:$rS), @@ -482,23 +487,23 @@ def MULLD : XOForm_1<31, 233, 0, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB), let isCommutable = 1 in { def RLDIMI : MDForm_1<30, 3, (outs G8RC:$rA), (ins G8RC:$rSi, G8RC:$rS, u6imm:$SH, u6imm:$MB), - "rldimi $rA, $rS, $SH, $MB", IntRotateD, + "rldimi $rA, $rS, $SH, $MB", IntRotateDI, []>, isPPC64, RegConstraint<"$rSi = $rA">, NoEncode<"$rSi">; } // Rotate instructions. def RLDCL : MDForm_1<30, 0, - (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB, u6imm:$MB), - "rldcl $rA, $rS, $rB, $MB", IntRotateD, + (outs G8RC:$rA), (ins G8RC:$rS, GPRC:$rB, u6imm:$MBE), + "rldcl $rA, $rS, $rB, $MBE", IntRotateD, []>, isPPC64; def RLDICL : MDForm_1<30, 0, - (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MB), - "rldicl $rA, $rS, $SH, $MB", IntRotateD, + (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MBE), + "rldicl $rA, $rS, $SH, $MBE", IntRotateDI, []>, isPPC64; def RLDICR : MDForm_1<30, 1, - (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$ME), - "rldicr $rA, $rS, $SH, $ME", IntRotateD, + (outs G8RC:$rA), (ins G8RC:$rS, u6imm:$SH, u6imm:$MBE), + "rldicr $rA, $rS, $SH, $MBE", IntRotateDI, []>, isPPC64; def RLWINM8 : MForm_2<21, @@ -506,7 +511,7 @@ def RLWINM8 : MForm_2<21, "rlwinm $rA, $rS, $SH, $MB, $ME", IntGeneral, []>; -def ISEL8 : AForm_1<31, 15, +def ISEL8 : AForm_4<31, 15, (outs G8RC:$rT), (ins G8RC:$rA, G8RC:$rB, pred:$cond), "isel $rT, $rA, $rB, $cond", IntGeneral, []>; @@ -541,19 +546,19 @@ def LWAX : XForm_1<31, 341, (outs G8RC:$rD), (ins memrr:$src), let mayLoad = 1 in def LHAU8 : DForm_1a<43, (outs G8RC:$rD, ptr_rc:$ea_result), (ins symbolLo:$disp, ptr_rc:$rA), - "lhau $rD, $disp($rA)", LdStLoad, + "lhau $rD, $disp($rA)", LdStLHAU, []>, RegConstraint<"$rA = $ea_result">, NoEncode<"$ea_result">; // NO LWAU! def LHAUX8 : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lhaux $rD, $addr", LdStLoad, + "lhaux $rD, $addr", LdStLHAU, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; -def LWAUX : XForm_1<31, 375, (outs G8RC:$rD, ptr_rc:$ea_result), +def LWAUX : XForm_1<31, 373, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lwaux $rD, $addr", LdStLoad, + "lwaux $rD, $addr", LdStLHAU, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">, isPPC64; } @@ -584,31 +589,31 @@ def LWZX8 : XForm_1<31, 23, (outs G8RC:$rD), (ins memrr:$src), // Update forms. let mayLoad = 1 in { def LBZU8 : DForm_1<35, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lbzu $rD, $addr", LdStLoad, + "lbzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LHZU8 : DForm_1<41, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lhzu $rD, $addr", LdStLoad, + "lhzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LWZU8 : DForm_1<33, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lwzu $rD, $addr", LdStLoad, + "lwzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LBZUX8 : XForm_1<31, 119, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lbzux $rD, $addr", LdStLoad, + "lbzux $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; -def LHZUX8 : XForm_1<31, 331, (outs G8RC:$rD, ptr_rc:$ea_result), +def LHZUX8 : XForm_1<31, 311, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lhzux $rD, $addr", LdStLoad, + "lhzux $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; def LWZUX8 : XForm_1<31, 55, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lwzux $rD, $addr", LdStLoad, + "lwzux $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; } @@ -621,18 +626,26 @@ def LD : DSForm_1<58, 0, (outs G8RC:$rD), (ins memrix:$src), "ld $rD, $src", LdStLD, [(set G8RC:$rD, (load ixaddr:$src))]>, isPPC64; def LDtoc: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), - "", + "#LDtoc", [(set G8RC:$rD, (PPCtoc_entry tglobaladdr:$disp, G8RC:$reg))]>, isPPC64; +def LDtocJTI: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), + "#LDtocJTI", + [(set G8RC:$rD, + (PPCtoc_entry tjumptable:$disp, G8RC:$reg))]>, isPPC64; +def LDtocCPT: Pseudo<(outs G8RC:$rD), (ins tocentry:$disp, G8RC:$reg), + "#LDtocCPT", + [(set G8RC:$rD, + (PPCtoc_entry tconstpool:$disp, G8RC:$reg))]>, isPPC64; let hasSideEffects = 1 in { -let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo. -def LDinto_toc: DSForm_1<58, 0, (outs), (ins G8RC:$reg), +let RST = 2, DS = 2 in +def LDinto_toc: DSForm_1a<58, 0, (outs), (ins G8RC:$reg), "ld 2, 8($reg)", LdStLD, [(PPCload_toc G8RC:$reg)]>, isPPC64; -let RST = 2, DS_RA = 0 in // FIXME: Should be a pseudo. -def LDtoc_restore : DSForm_1<58, 0, (outs), (ins), +let RST = 2, DS = 10, RA = 1 in +def LDtoc_restore : DSForm_1a<58, 0, (outs), (ins), "ld 2, 40(1)", LdStLD, [(PPCtoc_restore)]>, isPPC64; } @@ -642,13 +655,13 @@ def LDX : XForm_1<31, 21, (outs G8RC:$rD), (ins memrr:$src), let mayLoad = 1 in def LDU : DSForm_1<58, 1, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrix:$addr), - "ldu $rD, $addr", LdStLD, + "ldu $rD, $addr", LdStLDU, []>, RegConstraint<"$addr.reg = $ea_result">, isPPC64, NoEncode<"$ea_result">; def LDUX : XForm_1<31, 53, (outs G8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "ldux $rD, $addr", LdStLoad, + "ldux $rD, $addr", LdStLDU, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">, isPPC64; } @@ -693,16 +706,16 @@ def STDX : XForm_8<31, 149, (outs), (ins G8RC:$rS, memrr:$dst), let PPC970_Unit = 2 in { -def STBU8 : DForm_1a<38, (outs ptr_rc:$ea_res), (ins G8RC:$rS, +def STBU8 : DForm_1a<39, (outs ptr_rc:$ea_res), (ins G8RC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "stbu $rS, $ptroff($ptrreg)", LdStStore, + "stbu $rS, $ptroff($ptrreg)", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti8 G8RC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "sthu $rS, $ptroff($ptrreg)", LdStStore, + "sthu $rS, $ptroff($ptrreg)", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti16 G8RC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, @@ -710,7 +723,7 @@ def STHU8 : DForm_1a<45, (outs ptr_rc:$ea_res), (ins G8RC:$rS, def STWU8 : DForm_1a<37, (outs ptr_rc:$ea_res), (ins G8RC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "stwu $rS, $ptroff($ptrreg)", LdStStore, + "stwu $rS, $ptroff($ptrreg)", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti32 G8RC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, @@ -718,7 +731,7 @@ def STWU8 : DForm_1a<37, (outs ptr_rc:$ea_res), (ins G8RC:$rS, def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS, s16immX4:$ptroff, ptr_rc:$ptrreg), - "stdu $rS, $ptroff($ptrreg)", LdStSTD, + "stdu $rS, $ptroff($ptrreg)", LdStSTDU, [(set ptr_rc:$ea_res, (pre_store G8RC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">, @@ -727,7 +740,7 @@ def STDU : DSForm_1a<62, 1, (outs ptr_rc:$ea_res), (ins G8RC:$rS, def STBUX8 : XForm_8<31, 247, (outs ptr_rc:$ea_res), (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stbux $rS, $ptroff, $ptrreg", LdStStore, + "stbux $rS, $ptroff, $ptrreg", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti8 G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, @@ -736,7 +749,7 @@ def STBUX8 : XForm_8<31, 247, (outs ptr_rc:$ea_res), def STHUX8 : XForm_8<31, 439, (outs ptr_rc:$ea_res), (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "sthux $rS, $ptroff, $ptrreg", LdStStore, + "sthux $rS, $ptroff, $ptrreg", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti16 G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, @@ -745,7 +758,7 @@ def STHUX8 : XForm_8<31, 439, (outs ptr_rc:$ea_res), def STWUX8 : XForm_8<31, 183, (outs ptr_rc:$ea_res), (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stwux $rS, $ptroff, $ptrreg", LdStStore, + "stwux $rS, $ptroff, $ptrreg", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti32 G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, @@ -754,7 +767,7 @@ def STWUX8 : XForm_8<31, 183, (outs ptr_rc:$ea_res), def STDUX : XForm_8<31, 181, (outs ptr_rc:$ea_res), (ins G8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stdux $rS, $ptroff, $ptrreg", LdStStore, + "stdux $rS, $ptroff, $ptrreg", LdStSTDU, [(set ptr_rc:$ea_res, (pre_store G8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td index b0b8423..ba58c3e 100644 --- a/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/lib/Target/PowerPC/PPCInstrAltivec.td @@ -340,6 +340,28 @@ def VCTUXS : VXForm_1<906, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vctuxs $vD, $vB, $UIMM", VecFP, [(set VRRC:$vD, (int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>; + +// Defines with the UIM field set to 0 for floating-point +// to integer (fp_to_sint/fp_to_uint) conversions and integer +// to floating-point (sint_to_fp/uint_to_fp) conversions. +let VA = 0 in { +def VCFSX_0 : VXForm_1<842, (outs VRRC:$vD), (ins VRRC:$vB), + "vcfsx $vD, $vB, 0", VecFP, + [(set VRRC:$vD, + (int_ppc_altivec_vcfsx VRRC:$vB, 0))]>; +def VCTUXS_0 : VXForm_1<906, (outs VRRC:$vD), (ins VRRC:$vB), + "vctuxs $vD, $vB, 0", VecFP, + [(set VRRC:$vD, + (int_ppc_altivec_vctuxs VRRC:$vB, 0))]>; +def VCFUX_0 : VXForm_1<778, (outs VRRC:$vD), (ins VRRC:$vB), + "vcfux $vD, $vB, 0", VecFP, + [(set VRRC:$vD, + (int_ppc_altivec_vcfux VRRC:$vB, 0))]>; +def VCTSXS_0 : VXForm_1<970, (outs VRRC:$vD), (ins VRRC:$vB), + "vctsxs $vD, $vB, 0", VecFP, + [(set VRRC:$vD, + (int_ppc_altivec_vctsxs VRRC:$vB, 0))]>; +} def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>; def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>; @@ -689,3 +711,13 @@ def : Pat<(v8i16 (sra (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), (v8i16 (VSRAH VRRC:$vA, VRRC:$vB))>; def : Pat<(v4i32 (sra (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), (v4i32 (VSRAW VRRC:$vA, VRRC:$vB))>; + +// Float to integer and integer to float conversions +def : Pat<(v4i32 (fp_to_sint (v4f32 VRRC:$vA))), + (VCTSXS_0 VRRC:$vA)>; +def : Pat<(v4i32 (fp_to_uint (v4f32 VRRC:$vA))), + (VCTUXS_0 VRRC:$vA)>; +def : Pat<(v4f32 (sint_to_fp (v4i32 VRRC:$vA))), + (VCFSX_0 VRRC:$vA)>; +def : Pat<(v4f32 (uint_to_fp (v4i32 VRRC:$vA))), + (VCFUX_0 VRRC:$vA)>; diff --git a/lib/Target/PowerPC/PPCInstrFormats.td b/lib/Target/PowerPC/PPCInstrFormats.td index a41a027..c3c171c 100644 --- a/lib/Target/PowerPC/PPCInstrFormats.td +++ b/lib/Target/PowerPC/PPCInstrFormats.td @@ -94,12 +94,6 @@ class IForm opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr, let Inst{31} = lk; } -class IForm_ext opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL, - string asmstr, InstrItinClass itin, list pattern> - : IForm { - let LI{0-4} = bo; -} - // 1.7.2 B-Form class BForm opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr> : I { @@ -118,6 +112,13 @@ class BForm opcode, bit aa, bit lk, dag OOL, dag IOL, string asmstr> let Inst{31} = lk; } +class BForm_1 opcode, bits<5> bo, bit aa, bit lk, dag OOL, dag IOL, + string asmstr> + : BForm { + let BIBO{4-0} = bo; + let BIBO{6-5} = 0; + let CR = 0; +} // 1.7.4 D-Form class DForm_base opcode, dag OOL, dag IOL, string asmstr, @@ -625,9 +626,9 @@ class XFXForm_5 opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, InstrItinClass itin> : I { bits<8> FXM; - bits<5> ST; + bits<5> rS; - let Inst{6-10} = ST; + let Inst{6-10} = rS; let Inst{11} = 0; let Inst{12-19} = FXM; let Inst{20} = 0; @@ -666,7 +667,7 @@ class XFLForm opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, string cstr, InstrItinClass itin, listpattern> : I { bits<8> FM; - bits<5> RT; + bits<5> rT; bit RC = 0; // set by isDOT let Pattern = pattern; @@ -675,7 +676,7 @@ class XFLForm opcode, bits<10> xo, dag OOL, dag IOL, string asmstr, let Inst{6} = 0; let Inst{7-14} = FM; let Inst{15} = 0; - let Inst{16-20} = RT; + let Inst{16-20} = rT; let Inst{21-30} = xo; let Inst{31} = RC; } @@ -758,6 +759,26 @@ class AForm_3 opcode, bits<5> xo, dag OOL, dag IOL, string asmstr, let FRB = 0; } +class AForm_4 opcode, bits<5> xo, dag OOL, dag IOL, string asmstr, + InstrItinClass itin, list pattern> + : I { + bits<5> RT; + bits<5> RA; + bits<5> RB; + bits<7> BIBO; // 2 bits of BI and 5 bits of BO (must be 12). + bits<3> CR; + + let Pattern = pattern; + + let Inst{6-10} = RT; + let Inst{11-15} = RA; + let Inst{16-20} = RB; + let Inst{21-23} = CR; + let Inst{24-25} = BIBO{6-5}; + let Inst{26-30} = xo; + let Inst{31} = 0; +} + // 1.7.13 M-Form class MForm_1 opcode, dag OOL, dag IOL, string asmstr, InstrItinClass itin, list pattern> diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 47f09dc..d9d6844 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -54,7 +54,8 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer( const TargetMachine *TM, const ScheduleDAG *DAG) const { unsigned Directive = TM->getSubtarget().getDarwinDirective(); - if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2) { + if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 || + Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) { const InstrItineraryData *II = TM->getInstrItineraryData(); return new PPCScoreboardHazardRecognizer(II, DAG); } @@ -70,7 +71,8 @@ ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer( unsigned Directive = TM.getSubtarget().getDarwinDirective(); // Most subtargets use a PPC970 recognizer. - if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2) { + if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 && + Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) { const TargetInstrInfo *TII = TM.getInstrInfo(); assert(TII && "No InstrInfo?"); @@ -568,12 +570,15 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, // STVX VAL, 0, R0 // // FIXME: We use R0 here, because it isn't available for RA. - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0), + bool Is64Bit = TM.getSubtargetImpl()->isPPC64(); + unsigned Instr = Is64Bit ? PPC::ADDI8 : PPC::ADDI; + unsigned GPR0 = Is64Bit ? PPC::X0 : PPC::R0; + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Instr), GPR0), FrameIdx, 0, 0)); NewMIs.push_back(BuildMI(MF, DL, get(PPC::STVX)) .addReg(SrcReg, getKillRegState(isKill)) - .addReg(PPC::R0) - .addReg(PPC::R0)); + .addReg(GPR0) + .addReg(GPR0)); } else { llvm_unreachable("Unknown regclass!"); } @@ -705,10 +710,13 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, // Dest = LVX 0, R0 // // FIXME: We use R0 here, because it isn't available for RA. - NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::ADDI), PPC::R0), + bool Is64Bit = TM.getSubtargetImpl()->isPPC64(); + unsigned Instr = Is64Bit ? PPC::ADDI8 : PPC::ADDI; + unsigned GPR0 = Is64Bit ? PPC::X0 : PPC::R0; + NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Instr), GPR0), FrameIdx, 0, 0)); - NewMIs.push_back(BuildMI(MF, DL, get(PPC::LVX),DestReg).addReg(PPC::R0) - .addReg(PPC::R0)); + NewMIs.push_back(BuildMI(MF, DL, get(PPC::LVX),DestReg).addReg(GPR0) + .addReg(GPR0)); } else { llvm_unreachable("Unknown regclass!"); } diff --git a/lib/Target/PowerPC/PPCInstrInfo.td b/lib/Target/PowerPC/PPCInstrInfo.td index f57f0c9..6ee045a 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.td +++ b/lib/Target/PowerPC/PPCInstrInfo.td @@ -123,9 +123,11 @@ def PPCnop : SDNode<"PPCISD::NOP", SDT_PPCnop, [SDNPInGlue, SDNPOutGlue]>; def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>, - [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>; + [SDNPHasChain, SDNPSideEffect, + SDNPInGlue, SDNPOutGlue]>; def PPCtoc_restore : SDNode<"PPCISD::TOC_RESTORE", SDTypeProfile<0, 0, []>, - [SDNPHasChain, SDNPInGlue, SDNPOutGlue]>; + [SDNPHasChain, SDNPSideEffect, + SDNPInGlue, SDNPOutGlue]>; def PPCmtctr : SDNode<"PPCISD::MTCTR", SDT_PPCCall, [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; def PPCbctrl_Darwin : SDNode<"PPCISD::BCTRL_Darwin", SDTNone, @@ -153,6 +155,12 @@ def PPClbrx : SDNode<"PPCISD::LBRX", SDT_PPClbrx, def PPCstbrx : SDNode<"PPCISD::STBRX", SDT_PPCstbrx, [SDNPHasChain, SDNPMayStore]>; +// Instructions to set/unset CR bit 6 for SVR4 vararg calls +def PPCcr6set : SDNode<"PPCISD::CR6SET", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; +def PPCcr6unset : SDNode<"PPCISD::CR6UNSET", SDTNone, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + // Instructions to support atomic operations def PPClarx : SDNode<"PPCISD::LARX", SDT_PPClarx, [SDNPHasChain, SDNPMayLoad]>; @@ -330,9 +338,6 @@ def memrix : Operand { // memri where the imm is shifted 2 bits. let MIOperandInfo = (ops i32imm:$imm, ptr_rc:$reg); let EncoderMethod = "getMemRIXEncoding"; } -def tocentry : Operand { - let MIOperandInfo = (ops i32imm:$imm); -} // PowerPC Predicate operand. 20 = (0<<5)|20 = always, CR0 is a dummy reg // that doesn't matter. @@ -364,9 +369,9 @@ def IsBookE : Predicate<"PPCSubTarget.isBookE()">; let hasCtrlDep = 1 in { let Defs = [R1], Uses = [R1] in { -def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt), "", +def ADJCALLSTACKDOWN : Pseudo<(outs), (ins u16imm:$amt), "#ADJCALLSTACKDOWN $amt", [(callseq_start timm:$amt)]>; -def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2), "", +def ADJCALLSTACKUP : Pseudo<(outs), (ins u16imm:$amt1, u16imm:$amt2), "#ADJCALLSTACKUP $amt1 $amt2", [(callseq_end timm:$amt1, timm:$amt2)]>; } @@ -375,7 +380,7 @@ def UPDATE_VRSAVE : Pseudo<(outs GPRC:$rD), (ins GPRC:$rS), } let Defs = [R1], Uses = [R1] in -def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "", +def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "#DYNALLOC", [(set GPRC:$result, (PPCdynalloc GPRC:$negsize, iaddr:$fpsi))]>; @@ -384,19 +389,19 @@ def DYNALLOC : Pseudo<(outs GPRC:$result), (ins GPRC:$negsize, memri:$fpsi), "", let usesCustomInserter = 1, // Expanded after instruction selection. PPC970_Single = 1 in { def SELECT_CC_I4 : Pseudo<(outs GPRC:$dst), (ins CRRC:$cond, GPRC:$T, GPRC:$F, - i32imm:$BROPC), "", + i32imm:$BROPC), "#SELECT_CC_I4", []>; def SELECT_CC_I8 : Pseudo<(outs G8RC:$dst), (ins CRRC:$cond, G8RC:$T, G8RC:$F, - i32imm:$BROPC), "", + i32imm:$BROPC), "#SELECT_CC_I8", []>; def SELECT_CC_F4 : Pseudo<(outs F4RC:$dst), (ins CRRC:$cond, F4RC:$T, F4RC:$F, - i32imm:$BROPC), "", + i32imm:$BROPC), "#SELECT_CC_F4", []>; def SELECT_CC_F8 : Pseudo<(outs F8RC:$dst), (ins CRRC:$cond, F8RC:$T, F8RC:$F, - i32imm:$BROPC), "", + i32imm:$BROPC), "#SELECT_CC_F8", []>; def SELECT_CC_VRRC: Pseudo<(outs VRRC:$dst), (ins CRRC:$cond, VRRC:$T, VRRC:$F, - i32imm:$BROPC), "", + i32imm:$BROPC), "#SELECT_CC_VRRC", []>; } @@ -404,16 +409,16 @@ let usesCustomInserter = 1, // Expanded after instruction selection. // scavenge a register for it. let mayStore = 1 in def SPILL_CR : Pseudo<(outs), (ins CRRC:$cond, memri:$F), - "", []>; + "#SPILL_CR", []>; // RESTORE_CR - Indicate that we're restoring the CR register (previously // spilled), so we'll need to scavenge a register for it. let mayLoad = 1 in def RESTORE_CR : Pseudo<(outs CRRC:$cond), (ins memri:$F), - "", []>; + "#RESTORE_CR", []>; let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in { - let isReturn = 1, Uses = [LR, RM] in + let isCodeGenOnly = 1, isReturn = 1, Uses = [LR, RM] in def BLR : XLForm_2_br<19, 16, 0, (outs), (ins pred:$p), "b${p:cc}lr ${p:reg}", BrB, [(retflag)]>; @@ -422,7 +427,7 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit = 7 in { } let Defs = [LR] in - def MovePCtoLR : Pseudo<(outs), (ins), "", []>, + def MovePCtoLR : Pseudo<(outs), (ins), "#MovePCtoLR", []>, PPC970_Unit_BRU; let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { @@ -434,16 +439,17 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { // BCC represents an arbitrary conditional branch on a predicate. // FIXME: should be able to write a pattern for PPCcondbranch, but can't use - // a two-value operand where a dag node expects two operands. :( - def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst), - "b${cond:cc} ${cond:reg}, $dst" - /*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>; + // a two-value operand where a dag node expects two operands. :( + let isCodeGenOnly = 1 in + def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst), + "b${cond:cc} ${cond:reg}, $dst" + /*[(PPCcondbranch CRRC:$crS, imm:$opc, bb:$dst)]*/>; let Defs = [CTR], Uses = [CTR] in { - def BDZ : IForm_ext<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), - "bdz $dst", BrB, []>; - def BDNZ : IForm_ext<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), - "bdnz $dst", BrB, []>; + def BDZ : BForm_1<16, 18, 0, 0, (outs), (ins condbrtarget:$dst), + "bdz $dst">; + def BDNZ : BForm_1<16, 16, 0, 0, (outs), (ins condbrtarget:$dst), + "bdnz $dst">; } } @@ -559,81 +565,81 @@ def : Pat<(prefetch xoaddr:$dst, (i32 0), imm, (i32 1)), let usesCustomInserter = 1 in { let Defs = [CR0] in { def ATOMIC_LOAD_ADD_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I8", [(set GPRC:$dst, (atomic_load_add_8 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_SUB_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I8", [(set GPRC:$dst, (atomic_load_sub_8 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_AND_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I8", [(set GPRC:$dst, (atomic_load_and_8 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_OR_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I8", [(set GPRC:$dst, (atomic_load_or_8 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_XOR_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "ATOMIC_LOAD_XOR_I8", [(set GPRC:$dst, (atomic_load_xor_8 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_NAND_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I8", [(set GPRC:$dst, (atomic_load_nand_8 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_ADD_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I16", [(set GPRC:$dst, (atomic_load_add_16 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_SUB_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I16", [(set GPRC:$dst, (atomic_load_sub_16 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_AND_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I16", [(set GPRC:$dst, (atomic_load_and_16 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_OR_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I16", [(set GPRC:$dst, (atomic_load_or_16 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_XOR_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_XOR_I16", [(set GPRC:$dst, (atomic_load_xor_16 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_NAND_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I16", [(set GPRC:$dst, (atomic_load_nand_16 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_ADD_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_ADD_I32", [(set GPRC:$dst, (atomic_load_add_32 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_SUB_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_SUB_I32", [(set GPRC:$dst, (atomic_load_sub_32 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_AND_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_AND_I32", [(set GPRC:$dst, (atomic_load_and_32 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_OR_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_OR_I32", [(set GPRC:$dst, (atomic_load_or_32 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_XOR_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_XOR_I32", [(set GPRC:$dst, (atomic_load_xor_32 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_LOAD_NAND_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$incr), "#ATOMIC_LOAD_NAND_I32", [(set GPRC:$dst, (atomic_load_nand_32 xoaddr:$ptr, GPRC:$incr))]>; def ATOMIC_CMP_SWAP_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I8", [(set GPRC:$dst, (atomic_cmp_swap_8 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; def ATOMIC_CMP_SWAP_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I16 $dst $ptr $old $new", [(set GPRC:$dst, (atomic_cmp_swap_16 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; def ATOMIC_CMP_SWAP_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$old, GPRC:$new), "#ATOMIC_CMP_SWAP_I32 $dst $ptr $old $new", [(set GPRC:$dst, (atomic_cmp_swap_32 xoaddr:$ptr, GPRC:$old, GPRC:$new))]>; def ATOMIC_SWAP_I8 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_i8", [(set GPRC:$dst, (atomic_swap_8 xoaddr:$ptr, GPRC:$new))]>; def ATOMIC_SWAP_I16 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_I16", [(set GPRC:$dst, (atomic_swap_16 xoaddr:$ptr, GPRC:$new))]>; def ATOMIC_SWAP_I32 : Pseudo< - (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "", + (outs GPRC:$dst), (ins memrr:$ptr, GPRC:$new), "#ATOMIC_SWAP_I32", [(set GPRC:$dst, (atomic_swap_32 xoaddr:$ptr, GPRC:$new))]>; } } @@ -673,7 +679,7 @@ def LWZ : DForm_1<32, (outs GPRC:$rD), (ins memri:$src), [(set GPRC:$rD, (load iaddr:$src))]>; def LFS : DForm_1<48, (outs F4RC:$rD), (ins memri:$src), - "lfs $rD, $src", LdStLFDU, + "lfs $rD, $src", LdStLFD, [(set F4RC:$rD, (load iaddr:$src))]>; def LFD : DForm_1<50, (outs F8RC:$rD), (ins memri:$src), "lfd $rD, $src", LdStLFD, @@ -683,32 +689,32 @@ def LFD : DForm_1<50, (outs F8RC:$rD), (ins memri:$src), // Unindexed (r+i) Loads with Update (preinc). let mayLoad = 1 in { def LBZU : DForm_1<35, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lbzu $rD, $addr", LdStLoad, + "lbzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LHAU : DForm_1<43, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lhau $rD, $addr", LdStLoad, + "lhau $rD, $addr", LdStLHAU, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LHZU : DForm_1<41, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lhzu $rD, $addr", LdStLoad, + "lhzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LWZU : DForm_1<33, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lwzu $rD, $addr", LdStLoad, + "lwzu $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LFSU : DForm_1<49, (outs F4RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lfs $rD, $addr", LdStLFDU, + "lfsu $rD, $addr", LdStLFDU, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), - "lfd $rD, $addr", LdStLFD, + "lfdu $rD, $addr", LdStLFDU, []>, RegConstraint<"$addr.reg = $ea_result">, NoEncode<"$ea_result">; @@ -716,37 +722,37 @@ def LFDU : DForm_1<51, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memri:$addr), // Indexed (r+r) Loads with Update (preinc). def LBZUX : XForm_1<31, 119, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lbzux $rD, $addr", LdStLoad, + "lbzux $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; def LHAUX : XForm_1<31, 375, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lhaux $rD, $addr", LdStLoad, + "lhaux $rD, $addr", LdStLHAU, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; -def LHZUX : XForm_1<31, 331, (outs GPRC:$rD, ptr_rc:$ea_result), +def LHZUX : XForm_1<31, 311, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lhzux $rD, $addr", LdStLoad, + "lhzux $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; def LWZUX : XForm_1<31, 55, (outs GPRC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lwzux $rD, $addr", LdStLoad, + "lwzux $rD, $addr", LdStLoadUpd, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; def LFSUX : XForm_1<31, 567, (outs F4RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lfsux $rD, $addr", LdStLoad, + "lfsux $rD, $addr", LdStLFDU, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; def LFDUX : XForm_1<31, 631, (outs F8RC:$rD, ptr_rc:$ea_result), (ins memrr:$addr), - "lfdux $rD, $addr", LdStLoad, + "lfdux $rD, $addr", LdStLFDU, []>, RegConstraint<"$addr.offreg = $ea_result">, NoEncode<"$ea_result">; } @@ -778,10 +784,10 @@ def LWBRX : XForm_1<31, 534, (outs GPRC:$rD), (ins memrr:$src), [(set GPRC:$rD, (PPClbrx xoaddr:$src, i32))]>; def LFSX : XForm_25<31, 535, (outs F4RC:$frD), (ins memrr:$src), - "lfsx $frD, $src", LdStLFDU, + "lfsx $frD, $src", LdStLFD, [(set F4RC:$frD, (load xaddr:$src))]>; def LFDX : XForm_25<31, 599, (outs F8RC:$frD), (ins memrr:$src), - "lfdx $frD, $src", LdStLFDU, + "lfdx $frD, $src", LdStLFD, [(set F8RC:$frD, (load xaddr:$src))]>; } @@ -801,10 +807,10 @@ def STW : DForm_1<36, (outs), (ins GPRC:$rS, memri:$src), "stw $rS, $src", LdStStore, [(store GPRC:$rS, iaddr:$src)]>; def STFS : DForm_1<52, (outs), (ins F4RC:$rS, memri:$dst), - "stfs $rS, $dst", LdStUX, + "stfs $rS, $dst", LdStSTFD, [(store F4RC:$rS, iaddr:$dst)]>; def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst), - "stfd $rS, $dst", LdStUX, + "stfd $rS, $dst", LdStSTFD, [(store F8RC:$rS, iaddr:$dst)]>; } @@ -812,33 +818,33 @@ def STFD : DForm_1<54, (outs), (ins F8RC:$rS, memri:$dst), let PPC970_Unit = 2 in { def STBU : DForm_1a<39, (outs ptr_rc:$ea_res), (ins GPRC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "stbu $rS, $ptroff($ptrreg)", LdStStore, + "stbu $rS, $ptroff($ptrreg)", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti8 GPRC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; def STHU : DForm_1a<45, (outs ptr_rc:$ea_res), (ins GPRC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "sthu $rS, $ptroff($ptrreg)", LdStStore, + "sthu $rS, $ptroff($ptrreg)", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti16 GPRC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; def STWU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "stwu $rS, $ptroff($ptrreg)", LdStStore, + "stwu $rS, $ptroff($ptrreg)", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_store GPRC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; def STFSU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F4RC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "stfsu $rS, $ptroff($ptrreg)", LdStStore, + "stfsu $rS, $ptroff($ptrreg)", LdStSTFDU, [(set ptr_rc:$ea_res, (pre_store F4RC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; def STFDU : DForm_1a<37, (outs ptr_rc:$ea_res), (ins F8RC:$rS, symbolLo:$ptroff, ptr_rc:$ptrreg), - "stfdu $rS, $ptroff($ptrreg)", LdStStore, + "stfdu $rS, $ptroff($ptrreg)", LdStSTFDU, [(set ptr_rc:$ea_res, (pre_store F8RC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff))]>, RegConstraint<"$ptrreg = $ea_res">, NoEncode<"$ea_res">; @@ -863,7 +869,7 @@ def STWX : XForm_8<31, 151, (outs), (ins GPRC:$rS, memrr:$dst), def STBUX : XForm_8<31, 247, (outs ptr_rc:$ea_res), (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stbux $rS, $ptroff, $ptrreg", LdStStore, + "stbux $rS, $ptroff, $ptrreg", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti8 GPRC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, @@ -872,7 +878,7 @@ def STBUX : XForm_8<31, 247, (outs ptr_rc:$ea_res), def STHUX : XForm_8<31, 439, (outs ptr_rc:$ea_res), (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "sthux $rS, $ptroff, $ptrreg", LdStStore, + "sthux $rS, $ptroff, $ptrreg", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_truncsti16 GPRC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, @@ -881,7 +887,7 @@ def STHUX : XForm_8<31, 439, (outs ptr_rc:$ea_res), def STWUX : XForm_8<31, 183, (outs ptr_rc:$ea_res), (ins GPRC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stwux $rS, $ptroff, $ptrreg", LdStStore, + "stwux $rS, $ptroff, $ptrreg", LdStStoreUpd, [(set ptr_rc:$ea_res, (pre_store GPRC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, @@ -889,7 +895,7 @@ def STWUX : XForm_8<31, 183, (outs ptr_rc:$ea_res), def STFSUX : XForm_8<31, 695, (outs ptr_rc:$ea_res), (ins F4RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stfsux $rS, $ptroff, $ptrreg", LdStStore, + "stfsux $rS, $ptroff, $ptrreg", LdStSTFDU, [(set ptr_rc:$ea_res, (pre_store F4RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, @@ -897,7 +903,7 @@ def STFSUX : XForm_8<31, 695, (outs ptr_rc:$ea_res), def STFDUX : XForm_8<31, 759, (outs ptr_rc:$ea_res), (ins F8RC:$rS, ptr_rc:$ptroff, ptr_rc:$ptrreg), - "stfdux $rS, $ptroff, $ptrreg", LdStStore, + "stfdux $rS, $ptroff, $ptrreg", LdStSTFDU, [(set ptr_rc:$ea_res, (pre_store F8RC:$rS, ptr_rc:$ptrreg, xaddroff:$ptroff))]>, RegConstraint<"$ptroff = $ea_res">, NoEncode<"$ea_res">, @@ -913,14 +919,14 @@ def STWBRX: XForm_8<31, 662, (outs), (ins GPRC:$rS, memrr:$dst), PPC970_DGroup_Cracked; def STFIWX: XForm_28<31, 983, (outs), (ins F8RC:$frS, memrr:$dst), - "stfiwx $frS, $dst", LdStUX, + "stfiwx $frS, $dst", LdStSTFD, [(PPCstfiwx F8RC:$frS, xoaddr:$dst)]>; def STFSX : XForm_28<31, 663, (outs), (ins F4RC:$frS, memrr:$dst), - "stfsx $frS, $dst", LdStUX, + "stfsx $frS, $dst", LdStSTFD, [(store F4RC:$frS, xaddr:$dst)]>; def STFDX : XForm_28<31, 727, (outs), (ins F8RC:$frS, memrr:$dst), - "stfdx $frS, $dst", LdStUX, + "stfdx $frS, $dst", LdStSTFD, [(store F8RC:$frS, xaddr:$dst)]>; } @@ -964,7 +970,7 @@ def SUBFIC : DForm_2< 8, (outs GPRC:$rD), (ins GPRC:$rA, s16imm:$imm), [(set GPRC:$rD, (subc immSExt16:$imm, GPRC:$rA))]>; } -let isReMaterializable = 1 in { +let isReMaterializable = 1, isAsCheapAsAMove = 1, isMoveImm = 1 in { def LI : DForm_2_r0<14, (outs GPRC:$rD), (ins symbolLo:$imm), "li $rD, $imm", IntSimple, [(set GPRC:$rD, immSExt16:$imm)]>; @@ -1143,6 +1149,16 @@ def CRUNSET: XLForm_1_ext<19, 193, (outs CRBITRC:$dst), (ins), "crxor $dst, $dst, $dst", BrCR, []>; +let Defs = [CR1EQ], CRD = 6 in { +def CR6SET : XLForm_1_ext<19, 289, (outs), (ins), + "creqv 6, 6, 6", BrCR, + [(PPCcr6set)]>; + +def CR6UNSET: XLForm_1_ext<19, 193, (outs), (ins), + "crxor 6, 6, 6", BrCR, + [(PPCcr6unset)]>; +} + // XFX-Form instructions. Instructions that deal with SPRs. // let Uses = [CTR] in { @@ -1192,7 +1208,7 @@ def MTCRF : XFXForm_5<31, 144, (outs crbitm:$FXM), (ins GPRC:$rS), // // FIXME: Make this a real Pseudo instruction when the JIT switches to MC. def MFCRpseud: XFXForm_3<31, 19, (outs GPRC:$rT), (ins crbitm:$FXM), - "", SprMFCR>, + "#MFCRpseud", SprMFCR>, PPC970_MicroCode, PPC970_Unit_CRU; def MFCR : XFXForm_3<31, 19, (outs GPRC:$rT), (ins), @@ -1233,7 +1249,7 @@ let Uses = [RM] in { PPC970_DGroup_Single, PPC970_Unit_FPU; def FADDrtz: AForm_2<63, 21, (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fadd $FRT, $FRA, $FRB", FPGeneral, + "fadd $FRT, $FRA, $FRB", FPAddSub, [(set F8RC:$FRT, (PPCfaddrtz F8RC:$FRA, F8RC:$FRB))]>, PPC970_DGroup_Single, PPC970_Unit_FPU; } @@ -1364,7 +1380,7 @@ def FSELS : AForm_1<63, 23, let Uses = [RM] in { def FADD : AForm_2<63, 21, (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fadd $FRT, $FRA, $FRB", FPGeneral, + "fadd $FRT, $FRA, $FRB", FPAddSub, [(set F8RC:$FRT, (fadd F8RC:$FRA, F8RC:$FRB))]>; def FADDS : AForm_2<59, 21, (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB), @@ -1379,16 +1395,16 @@ let Uses = [RM] in { "fdivs $FRT, $FRA, $FRB", FPDivS, [(set F4RC:$FRT, (fdiv F4RC:$FRA, F4RC:$FRB))]>; def FMUL : AForm_3<63, 25, - (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fmul $FRT, $FRA, $FRB", FPFused, - [(set F8RC:$FRT, (fmul F8RC:$FRA, F8RC:$FRB))]>; + (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRC), + "fmul $FRT, $FRA, $FRC", FPFused, + [(set F8RC:$FRT, (fmul F8RC:$FRA, F8RC:$FRC))]>; def FMULS : AForm_3<59, 25, - (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB), - "fmuls $FRT, $FRA, $FRB", FPGeneral, - [(set F4RC:$FRT, (fmul F4RC:$FRA, F4RC:$FRB))]>; + (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRC), + "fmuls $FRT, $FRA, $FRC", FPGeneral, + [(set F4RC:$FRT, (fmul F4RC:$FRA, F4RC:$FRC))]>; def FSUB : AForm_2<63, 20, (outs F8RC:$FRT), (ins F8RC:$FRA, F8RC:$FRB), - "fsub $FRT, $FRA, $FRB", FPGeneral, + "fsub $FRT, $FRA, $FRB", FPAddSub, [(set F8RC:$FRT, (fsub F8RC:$FRA, F8RC:$FRB))]>; def FSUBS : AForm_2<59, 20, (outs F4RC:$FRT), (ins F4RC:$FRA, F4RC:$FRB), @@ -1398,7 +1414,7 @@ let Uses = [RM] in { } let PPC970_Unit = 1 in { // FXU Operations. - def ISEL : AForm_1<31, 15, + def ISEL : AForm_4<31, 15, (outs GPRC:$rT), (ins GPRC:$rA, GPRC:$rB, pred:$cond), "isel $rT, $rA, $rB, $cond", IntGeneral, []>; diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index ab8bf1f..459c358 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -71,7 +71,7 @@ PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST, : PPCGenRegisterInfo(ST.isPPC64() ? PPC::LR8 : PPC::LR, ST.isPPC64() ? 0 : 1, ST.isPPC64() ? 0 : 1), - Subtarget(ST), TII(tii) { + Subtarget(ST), TII(tii), CRSpillFrameIdx(0) { ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX; ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX; ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX; @@ -111,10 +111,15 @@ PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { return Subtarget.isPPC64() ? CSR_Darwin64_SaveList : CSR_Darwin32_SaveList; + // For 32-bit SVR4, also initialize the frame index associated with + // the CR spill slot. + if (!Subtarget.isPPC64()) + CRSpillFrameIdx = 0; + return Subtarget.isPPC64() ? CSR_SVR464_SaveList : CSR_SVR432_SaveList; } -const unsigned* +const uint32_t* PPCRegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { if (Subtarget.isDarwinABI()) return Subtarget.isPPC64() ? CSR_Darwin64_RegMask : @@ -477,6 +482,31 @@ void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II, MBB.erase(II); } +bool +PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, + unsigned Reg, int &FrameIdx) const { + + // For the nonvolatile condition registers (CR2, CR3, CR4) in an SVR4 + // ABI, return true to prevent allocating an additional frame slot. + // For 64-bit, the CR save area is at SP+8; the value of FrameIdx = 0 + // is arbitrary and will be subsequently ignored. For 32-bit, we must + // create exactly one stack slot and return its FrameIdx for all + // nonvolatiles. + if (Subtarget.isSVR4ABI() && PPC::CR2 <= Reg && Reg <= PPC::CR4) { + if (Subtarget.isPPC64()) { + FrameIdx = 0; + } else if (CRSpillFrameIdx) { + FrameIdx = CRSpillFrameIdx; + } else { + MachineFrameInfo *MFI = ((MachineFunction &)MF).getFrameInfo(); + FrameIdx = MFI->CreateFixedObject((uint64_t)4, (int64_t)-4, true); + CRSpillFrameIdx = FrameIdx; + } + return true; + } + return false; +} + void PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, RegScavenger *RS) const { @@ -566,7 +596,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // to Offset to get the correct offset. // Naked functions have stack size 0, although getStackSize may not reflect that // because we didn't call all the pieces that compute it for naked functions. - if (!MF.getFunction()->hasFnAttr(Attribute::Naked)) + if (!MF.getFunction()->getFnAttributes().hasAttribute(Attributes::Naked)) Offset += MFI->getStackSize(); // If we can, encode the offset directly into the instruction. If this is a diff --git a/lib/Target/PowerPC/PPCRegisterInfo.h b/lib/Target/PowerPC/PPCRegisterInfo.h index 152c36d..a8fd796 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.h +++ b/lib/Target/PowerPC/PPCRegisterInfo.h @@ -30,6 +30,7 @@ class PPCRegisterInfo : public PPCGenRegisterInfo { std::map ImmToIdxMap; const PPCSubtarget &Subtarget; const TargetInstrInfo &TII; + mutable int CRSpillFrameIdx; public: PPCRegisterInfo(const PPCSubtarget &SubTarget, const TargetInstrInfo &tii); @@ -43,7 +44,7 @@ public: /// Code Generation virtual methods... const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const; - const unsigned *getCallPreservedMask(CallingConv::ID CC) const; + const uint32_t *getCallPreservedMask(CallingConv::ID CC) const; BitVector getReservedRegs(const MachineFunction &MF) const; @@ -65,6 +66,8 @@ public: int SPAdj, RegScavenger *RS) const; void lowerCRRestore(MachineBasicBlock::iterator II, unsigned FrameIndex, int SPAdj, RegScavenger *RS) const; + bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg, + int &FrameIdx) const; void eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, RegScavenger *RS = NULL) const; diff --git a/lib/Target/PowerPC/PPCSchedule.td b/lib/Target/PowerPC/PPCSchedule.td index 6a6ccb9..660c0c3 100644 --- a/lib/Target/PowerPC/PPCSchedule.td +++ b/lib/Target/PowerPC/PPCSchedule.td @@ -40,6 +40,7 @@ def IntMulHWU : InstrItinClass; def IntMulLI : InstrItinClass; def IntRFID : InstrItinClass; def IntRotateD : InstrItinClass; +def IntRotateDI : InstrItinClass; def IntRotate : InstrItinClass; def IntShift : InstrItinClass; def IntTrapD : InstrItinClass; @@ -52,15 +53,18 @@ def LdStDCBA : InstrItinClass; def LdStDCBF : InstrItinClass; def LdStDCBI : InstrItinClass; def LdStLoad : InstrItinClass; +def LdStLoadUpd : InstrItinClass; def LdStStore : InstrItinClass; +def LdStStoreUpd : InstrItinClass; def LdStDSS : InstrItinClass; def LdStICBI : InstrItinClass; -def LdStUX : InstrItinClass; def LdStLD : InstrItinClass; +def LdStLDU : InstrItinClass; def LdStLDARX : InstrItinClass; def LdStLFD : InstrItinClass; def LdStLFDU : InstrItinClass; def LdStLHA : InstrItinClass; +def LdStLHAU : InstrItinClass; def LdStLMW : InstrItinClass; def LdStLVecX : InstrItinClass; def LdStLWA : InstrItinClass; @@ -69,6 +73,9 @@ def LdStSLBIA : InstrItinClass; def LdStSLBIE : InstrItinClass; def LdStSTD : InstrItinClass; def LdStSTDCX : InstrItinClass; +def LdStSTDU : InstrItinClass; +def LdStSTFD : InstrItinClass; +def LdStSTFDU : InstrItinClass; def LdStSTVEBX : InstrItinClass; def LdStSTWCX : InstrItinClass; def LdStSync : InstrItinClass; @@ -86,6 +93,7 @@ def SprMTSRIN : InstrItinClass; def SprRFI : InstrItinClass; def SprSC : InstrItinClass; def FPGeneral : InstrItinClass; +def FPAddSub : InstrItinClass; def FPCompare : InstrItinClass; def FPDivD : InstrItinClass; def FPDivS : InstrItinClass; @@ -110,6 +118,8 @@ include "PPCScheduleG4.td" include "PPCScheduleG4Plus.td" include "PPCScheduleG5.td" include "PPCScheduleA2.td" +include "PPCScheduleE500mc.td" +include "PPCScheduleE5500.td" //===----------------------------------------------------------------------===// // Instruction to itinerary class map - When add new opcodes to the supported @@ -171,7 +181,7 @@ include "PPCScheduleA2.td" // extsh IntSimple // extsw IntSimple // fabs FPGeneral -// fadd FPGeneral +// fadd FPAddSub // fadds FPGeneral // fcfid FPGeneral // fcmpo FPCompare @@ -201,35 +211,35 @@ include "PPCScheduleA2.td" // fsel FPGeneral // fsqrt FPSqrt // fsqrts FPSqrt -// fsub FPGeneral +// fsub FPAddSub // fsubs FPGeneral // icbi LdStICBI // isync SprISYNC // lbz LdStLoad -// lbzu LdStLoad -// lbzux LdStUX +// lbzu LdStLoadUpd +// lbzux LdStLoadUpd // lbzx LdStLoad // ld LdStLD // ldarx LdStLDARX -// ldu LdStLD -// ldux LdStLD +// ldu LdStLDU +// ldux LdStLDU // ldx LdStLD // lfd LdStLFD // lfdu LdStLFDU // lfdux LdStLFDU -// lfdx LdStLFDU -// lfs LdStLFDU +// lfdx LdStLFD +// lfs LdStLFD // lfsu LdStLFDU // lfsux LdStLFDU -// lfsx LdStLFDU +// lfsx LdStLFD // lha LdStLHA -// lhau LdStLHA -// lhaux LdStLHA +// lhau LdStLHAU +// lhaux LdStLHAU // lhax LdStLHA // lhbrx LdStLoad // lhz LdStLoad -// lhzu LdStLoad -// lhzux LdStUX +// lhzu LdStLoadUpd +// lhzux LdStLoadUpd // lhzx LdStLoad // lmw LdStLMW // lswi LdStLMW @@ -243,12 +253,12 @@ include "PPCScheduleA2.td" // lvxl LdStLVecX // lwa LdStLWA // lwarx LdStLWARX -// lwaux LdStLHA +// lwaux LdStLHAU // lwax LdStLHA // lwbrx LdStLoad // lwz LdStLoad -// lwzu LdStLoad -// lwzux LdStUX +// lwzu LdStLoadUpd +// lwzux LdStLoadUpd // lwzx LdStLoad // mcrf BrMCR // mcrfs FPGeneral @@ -292,10 +302,10 @@ include "PPCScheduleA2.td" // rfid IntRFID // rldcl IntRotateD // rldcr IntRotateD -// rldic IntRotateD -// rldicl IntRotateD -// rldicr IntRotateD -// rldimi IntRotateD +// rldic IntRotateDI +// rldicl IntRotateDI +// rldicr IntRotateDI +// rldimi IntRotateDI // rlwimi IntRotate // rlwinm IntGeneral // rlwnm IntGeneral @@ -305,33 +315,33 @@ include "PPCScheduleA2.td" // sld IntRotateD // slw IntGeneral // srad IntRotateD -// sradi IntRotateD +// sradi IntRotateDI // sraw IntShift // srawi IntShift // srd IntRotateD // srw IntGeneral // stb LdStStore -// stbu LdStStore -// stbux LdStStore +// stbu LdStStoreUpd +// stbux LdStStoreUpd // stbx LdStStore // std LdStSTD // stdcx. LdStSTDCX -// stdu LdStSTD -// stdux LdStSTD +// stdu LdStSTDU +// stdux LdStSTDU // stdx LdStSTD -// stfd LdStUX -// stfdu LdStUX -// stfdux LdStUX -// stfdx LdStUX -// stfiwx LdStUX -// stfs LdStUX -// stfsu LdStUX -// stfsux LdStUX -// stfsx LdStUX +// stfd LdStSTFD +// stfdu LdStSTFDU +// stfdux LdStSTFDU +// stfdx LdStSTFD +// stfiwx LdStSTFD +// stfs LdStSTFD +// stfsu LdStSTFDU +// stfsux LdStSTFDU +// stfsx LdStSTFD // sth LdStStore // sthbrx LdStStore -// sthu LdStStore -// sthux LdStStore +// sthu LdStStoreUpd +// sthux LdStStoreUpd // sthx LdStStore // stmw LdStLMW // stswi LdStLMW @@ -344,8 +354,8 @@ include "PPCScheduleA2.td" // stw LdStStore // stwbrx LdStStore // stwcx. LdStSTWCX -// stwu LdStStore -// stwux LdStStore +// stwu LdStStoreUpd +// stwux LdStStoreUpd // stwx LdStStore // subf IntGeneral // subfc IntGeneral diff --git a/lib/Target/PowerPC/PPCSchedule440.td b/lib/Target/PowerPC/PPCSchedule440.td index cd0fb70..37b6eac 100644 --- a/lib/Target/PowerPC/PPCSchedule440.td +++ b/lib/Target/PowerPC/PPCSchedule440.td @@ -288,6 +288,15 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<2, [LWB]>], [9, 5], [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [PDCD1, PDCD2]>, + InstrStage<1, [DISS1, DISS2]>, + InstrStage<1, [LRACC]>, + InstrStage<1, [AGEN]>, + InstrStage<1, [CRD]>, + InstrStage<2, [LWB]>], + [9, 5], + [GPR_Bypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1, DISS2]>, @@ -297,6 +306,15 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<2, [LWB]>], [8, 5], [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [PDCD1, PDCD2]>, + InstrStage<1, [DISS1, DISS2]>, + InstrStage<1, [LRACC]>, + InstrStage<1, [AGEN]>, + InstrStage<1, [CRD]>, + InstrStage<2, [LWB]>], + [8, 5], + [NoBypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1, DISS2]>, @@ -306,7 +324,7 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<1, [LWB]>], [8, 5], [NoBypass, GPR_Bypass]>, - InstrItinData, + InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1, DISS2]>, InstrStage<1, [LRACC]>, @@ -315,6 +333,15 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<1, [LWB]>], [8, 5, 5], [NoBypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [PDCD1, PDCD2]>, + InstrStage<1, [DISS1, DISS2]>, + InstrStage<1, [LRACC]>, + InstrStage<1, [AGEN]>, + InstrStage<1, [CRD]>, + InstrStage<1, [LWB]>], + [8, 5, 5], + [NoBypass, GPR_Bypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1, DISS2]>, @@ -342,6 +369,15 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<1, [LWB]>], [8, 5], [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [PDCD1, PDCD2]>, + InstrStage<1, [DISS1, DISS2]>, + InstrStage<1, [LRACC]>, + InstrStage<1, [AGEN]>, + InstrStage<1, [CRD]>, + InstrStage<1, [LWB]>], + [8, 5], + [NoBypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1, DISS2]>, @@ -371,6 +407,15 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<2, [LWB]>], [8, 5], [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [PDCD1, PDCD2]>, + InstrStage<1, [DISS1, DISS2]>, + InstrStage<1, [LRACC]>, + InstrStage<1, [AGEN]>, + InstrStage<1, [CRD]>, + InstrStage<2, [LWB]>], + [8, 5], + [NoBypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1]>, @@ -537,6 +582,19 @@ def PPC440Itineraries : ProcessorItineraries< InstrStage<1, [FWB]>], [10, 4, 4], [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<1, [PDCD1, PDCD2]>, + InstrStage<1, [DISS1, DISS2]>, + InstrStage<1, [FRACC]>, + InstrStage<1, [FEXE1]>, + InstrStage<1, [FEXE2]>, + InstrStage<1, [FEXE3]>, + InstrStage<1, [FEXE4]>, + InstrStage<1, [FEXE5]>, + InstrStage<1, [FEXE6]>, + InstrStage<1, [FWB]>], + [10, 4, 4], + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, InstrItinData, InstrStage<1, [PDCD1, PDCD2]>, InstrStage<1, [DISS1, DISS2]>, diff --git a/lib/Target/PowerPC/PPCScheduleA2.td b/lib/Target/PowerPC/PPCScheduleA2.td index 4d4a5d0..ba63b5c 100644 --- a/lib/Target/PowerPC/PPCScheduleA2.td +++ b/lib/Target/PowerPC/PPCScheduleA2.td @@ -181,6 +181,17 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [10, 7, 7], [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>, + InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>, + InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>, + InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], + [10, 7, 7], + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, @@ -302,7 +313,18 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [14, 7], [GPR_Bypass, GPR_Bypass]>, - InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>, + InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>, + InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>, + InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], + [14, 7], + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, IU4_4, IU4_5, IU4_6, IU4_7]>, @@ -324,6 +346,17 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [13, 7], [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>, + InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>, + InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>, + InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], + [13, 7], + [GPR_Bypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, @@ -335,7 +368,7 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [14, 7], [NoBypass, GPR_Bypass]>, - InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, IU4_4, IU4_5, IU4_6, IU4_7]>, @@ -346,6 +379,17 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [14, 7, 7], [NoBypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>, + InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>, + InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>, + InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], + [14, 7, 7], + [NoBypass, FPR_Bypass, FPR_Bypass]>, InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, @@ -379,6 +423,17 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [14, 7], [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>, + InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>, + InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>, + InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], + [14, 7], + [NoBypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, @@ -412,6 +467,17 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], [13, 7], [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [XRF1]>, + InstrStage<1, [XEX1]>, InstrStage<1, [XEX2]>, + InstrStage<1, [XEX3]>, InstrStage<1, [XEX4]>, + InstrStage<1, [XEX5]>, InstrStage<1, [XEX6]>], + [13, 7], + [GPR_Bypass, GPR_Bypass]>, InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, @@ -593,6 +659,17 @@ def PPCA2Itineraries : ProcessorItineraries< InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>], [15, 7, 7], [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, + IU4_4, IU4_5, IU4_6, IU4_7]>, + InstrStage<1, [IU5]>, InstrStage<1, [IU6]>, + InstrStage<1, [RF0]>, InstrStage<1, [FRF1]>, + InstrStage<1, [FEX1]>, InstrStage<1, [FEX2]>, + InstrStage<1, [FEX3]>, InstrStage<1, [FEX4]>, + InstrStage<1, [FEX5]>, InstrStage<1, [FEX6]>], + [15, 7, 7], + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, InstrItinData, InstrStage<1, [IU4_0, IU4_1, IU4_2, IU4_3, diff --git a/lib/Target/PowerPC/PPCScheduleE500mc.td b/lib/Target/PowerPC/PPCScheduleE500mc.td new file mode 100644 index 0000000..9bb779a --- /dev/null +++ b/lib/Target/PowerPC/PPCScheduleE500mc.td @@ -0,0 +1,265 @@ +//===-- PPCScheduleE500mc.td - e500mc Scheduling Defs ------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the itinerary class data for the Freescale e500mc 32-bit +// Power processor. +// +// All information is derived from the "e500mc Core Reference Manual", +// Freescale Document Number E500MCRM, Rev. 1, 03/2012. +// +//===----------------------------------------------------------------------===// +// Relevant functional units in the Freescale e500mc core: +// +// * Decode & Dispatch +// Can dispatch up to 2 instructions per clock cycle to either the GPR Issue +// queues (GIQx), FP Issue Queue (FIQ), or Branch issue queue (BIQ). +def DIS0 : FuncUnit; // Dispatch stage - insn 1 +def DIS1 : FuncUnit; // Dispatch stage - insn 2 + +// * Execute +// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX. +// Some instructions can only execute in SFX0 but not SFX1. +// The CFX has a bypass path, allowing non-divide instructions to execute +// while a divide instruction is executed. +def SFX0 : FuncUnit; // Simple unit 0 +def SFX1 : FuncUnit; // Simple unit 1 +def BU : FuncUnit; // Branch unit +def CFX_DivBypass + : FuncUnit; // CFX divide bypass path +def CFX_0 : FuncUnit; // CFX pipeline +def LSU_0 : FuncUnit; // LSU pipeline +def FPU_0 : FuncUnit; // FPU pipeline + +def PPCE500mcItineraries : ProcessorItineraries< + [DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, LSU_0, FPU_0], + [CR_Bypass, GPR_Bypass, FPR_Bypass], [ + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1, 1], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1, 1], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [5, 1, 1], // Latency = 1 or 2 + [CR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<14, [CFX_DivBypass]>], + [17, 1, 1], // Latency=4..35, Repeat= 4..35 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<8, [FPU_0]>], + [11], // Latency = 8 + [FPR_Bypass]>, + InstrItinData, + InstrStage<8, [FPU_0]>], + [11, 1, 1], // Latency = 8 + [NoBypass, NoBypass, NoBypass]>, + InstrItinData, + InstrStage<1, [CFX_0]>], + [7, 1, 1], // Latency = 4, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0]>], + [7, 1, 1], // Latency = 4, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0]>], + [7, 1, 1], // Latency = 4, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1, 1], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1, 1], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<2, [SFX0]>], + [5, 1], // Latency = 2, Repeat rate = 2 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [BU]>], + [4, 1], // Latency = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [BU]>], + [4, 1, 1], // Latency = 1 + [CR_Bypass, CR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<1, [BU]>], + [4, 1], // Latency = 1 + [CR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1, 1], // Latency = 1 + [CR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [NoBypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [6, 1, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 1, 1], // Latency = 4 + [FPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 1, 1], // Latency = 4 + [FPR_Bypass, GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 1], // Latency = r+3 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<3, [LSU_0]>], + [6, 1, 1], // Latency = 3, Repeat rate = 3 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [6, 1], // Latency = 3 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>]>, + InstrItinData, + InstrStage<4, [SFX0]>], + [7, 1], + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<2, [SFX0, SFX1]>], + [5, 1], // Latency = 2, Repeat rate = 4 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0]>], + [5, 1], + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0], 0>]>, + InstrItinData, + InstrStage<5, [SFX0]>], + [8, 1], + [GPR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<4, [SFX0]>], + [7, 1], // Latency = 4, Repeat rate = 4 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1], // Latency = 1, Repeat rate = 1 + [GPR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<4, [SFX0]>], + [7, 1], // Latency = 4, Repeat rate = 4 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [4, 1], // Latency = 1, Repeat rate = 1 + [CR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0]>], + [4, 1], + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<2, [FPU_0]>], + [11, 1, 1], // Latency = 8, Repeat rate = 2 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<4, [FPU_0]>], + [13, 1, 1], // Latency = 10, Repeat rate = 4 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<2, [FPU_0]>], + [11, 1, 1], // Latency = 8, Repeat rate = 2 + [CR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<68, [FPU_0]>], + [71, 1, 1], // Latency = 68, Repeat rate = 68 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<38, [FPU_0]>], + [41, 1, 1], // Latency = 38, Repeat rate = 38 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<4, [FPU_0]>], + [13, 1, 1, 1], // Latency = 10, Repeat rate = 4 + [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<38, [FPU_0]>], + [41, 1], // Latency = 38, Repeat rate = 38 + [FPR_Bypass, FPR_Bypass]> +]>; + +// ===---------------------------------------------------------------------===// +// e500mc machine model for scheduling and other instruction cost heuristics. + +def PPCE500mcModel : SchedMachineModel { + let IssueWidth = 2; // 2 micro-ops are dispatched per cycle. + let MinLatency = -1; // OperandCycles are interpreted as MinLatency. + let LoadLatency = 5; // Optimistic load latency assuming bypass. + // This is overriden by OperandCycles if the + // Itineraries are queried instead. + + let Itineraries = PPCE500mcItineraries; +} diff --git a/lib/Target/PowerPC/PPCScheduleE5500.td b/lib/Target/PowerPC/PPCScheduleE5500.td new file mode 100644 index 0000000..d7e11ac --- /dev/null +++ b/lib/Target/PowerPC/PPCScheduleE5500.td @@ -0,0 +1,309 @@ +//===-- PPCScheduleE500mc.td - e5500 Scheduling Defs -------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the itinerary class data for the Freescale e5500 64-bit +// Power processor. +// +// All information is derived from the "e5500 Core Reference Manual", +// Freescale Document Number e5500RM, Rev. 1, 03/2012. +// +//===----------------------------------------------------------------------===// +// Relevant functional units in the Freescale e5500 core +// (These are the same as for the e500mc) +// +// * Decode & Dispatch +// Can dispatch up to 2 instructions per clock cycle to either the GPR Issue +// queues (GIQx), FP Issue Queue (FIQ), or Branch issue queue (BIQ). +// def DIS0 : FuncUnit; +// def DIS1 : FuncUnit; + +// * Execute +// 6 pipelined execution units: SFX0, SFX1, BU, FPU, LSU, CFX. +// The CFX has a bypass path, allowing non-divide instructions to execute +// while a divide instruction is being executed. +// def SFX0 : FuncUnit; // Simple unit 0 +// def SFX1 : FuncUnit; // Simple unit 1 +// def BU : FuncUnit; // Branch unit +// def CFX_DivBypass +// : FuncUnit; // CFX divide bypass path +// def CFX_0 : FuncUnit; // CFX pipeline stage 0 + +def CFX_1 : FuncUnit; // CFX pipeline stage 1 + +// def LSU_0 : FuncUnit; // LSU pipeline +// def FPU_0 : FuncUnit; // FPU pipeline + + +def PPCE5500Itineraries : ProcessorItineraries< + [DIS0, DIS1, SFX0, SFX1, BU, CFX_DivBypass, CFX_0, CFX_1, + LSU_0, FPU_0], + [CR_Bypass, GPR_Bypass, FPR_Bypass], [ + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [5, 2, 2], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [5, 2, 2], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [6, 2, 2], // Latency = 1 or 2 + [CR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<26, [CFX_DivBypass]>], + [30, 2, 2], // Latency= 4..26, Repeat rate= 4..26 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<16, [CFX_DivBypass]>], + [20, 2, 2], // Latency= 4..16, Repeat rate= 4..16 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [FPU_0]>], + [11], // Latency = 7, Repeat rate = 1 + [FPR_Bypass]>, + InstrItinData, + InstrStage<7, [FPU_0]>], + [11, 2, 2], // Latency = 7, Repeat rate = 7 + [NoBypass, NoBypass, NoBypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<2, [CFX_1]>], + [9, 2, 2], // Latency = 4..7, Repeat rate = 2..4 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<1, [CFX_1]>], + [8, 2, 2], // Latency = 4, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<1, [CFX_1]>], + [8, 2, 2], // Latency = 4, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0], 0>, + InstrStage<2, [CFX_1]>], + [8, 2, 2], // Latency = 4 or 5, Repeat = 2 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [5, 2, 2], // Latency = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<2, [SFX0, SFX1]>], + [6, 2, 2], // Latency = 2, Repeat rate = 2 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [5, 2, 2], // Latency = 1, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<2, [SFX0, SFX1]>], + [6, 2, 2], // Latency = 2, Repeat rate = 2 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<2, [SFX0]>], + [6, 2], // Latency = 2, Repeat rate = 2 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [BU]>], + [5, 2], // Latency = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [BU]>], + [5, 2, 2], // Latency = 1 + [CR_Bypass, CR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<1, [BU]>], + [5, 2], // Latency = 1 + [CR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0]>], + [5, 2, 2], // Latency = 1 + [CR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<3, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 2, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [8, 2, 2], // Latency = 4, Repeat rate = 1 + [FPR_Bypass, GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [8, 2, 2], // Latency = 4, Repeat rate = 1 + [FPR_Bypass, GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [GPR_Bypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<4, [LSU_0]>], + [8, 2], // Latency = r+3, Repeat rate = r+3 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<3, [LSU_0]>], + [7, 2, 2], // Latency = 3, Repeat rate = 3 + [GPR_Bypass, GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1], 0>, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass], + 2>, // 2 micro-ops + InstrItinData, + InstrStage<1, [LSU_0]>], + [7, 2], // Latency = 3, Repeat rate = 1 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0]>]>, + InstrItinData, + InstrStage<2, [CFX_0]>], + [6, 2], // Latency = 2, Repeat rate = 4 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [LSU_0], 0>]>, + InstrItinData, + InstrStage<5, [CFX_0]>], + [9, 2], // Latency = 5, Repeat rate = 5 + [GPR_Bypass, CR_Bypass]>, + InstrItinData, + InstrStage<4, [SFX0]>], + [8, 2], // Latency = 4, Repeat rate = 4 + [GPR_Bypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [CFX_0]>], + [5], // Latency = 1, Repeat rate = 1 + [GPR_Bypass]>, + InstrItinData, + InstrStage<4, [CFX_0]>], + [8, 2], // Latency = 4, Repeat rate = 4 + [NoBypass, GPR_Bypass]>, + InstrItinData, + InstrStage<1, [SFX0, SFX1]>], + [5], // Latency = 1, Repeat rate = 1 + [GPR_Bypass]>, + InstrItinData, + InstrStage<1, [FPU_0]>], + [11, 2, 2], // Latency = 7, Repeat rate = 1 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<1, [FPU_0]>], + [11, 2, 2], // Latency = 7, Repeat rate = 1 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<1, [FPU_0]>], + [11, 2, 2], // Latency = 7, Repeat rate = 1 + [CR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<31, [FPU_0]>], + [39, 2, 2], // Latency = 35, Repeat rate = 31 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<16, [FPU_0]>], + [24, 2, 2], // Latency = 20, Repeat rate = 16 + [FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<1, [FPU_0]>], + [11, 2, 2, 2], // Latency = 7, Repeat rate = 1 + [FPR_Bypass, FPR_Bypass, FPR_Bypass, FPR_Bypass]>, + InstrItinData, + InstrStage<2, [FPU_0]>], + [12, 2], // Latency = 8, Repeat rate = 2 + [FPR_Bypass, FPR_Bypass]> +]>; + +// ===---------------------------------------------------------------------===// +// e5500 machine model for scheduling and other instruction cost heuristics. + +def PPCE5500Model : SchedMachineModel { + let IssueWidth = 2; // 2 micro-ops are dispatched per cycle. + let MinLatency = -1; // OperandCycles are interpreted as MinLatency. + let LoadLatency = 6; // Optimistic load latency assuming bypass. + // This is overriden by OperandCycles if the + // Itineraries are queried instead. + + let Itineraries = PPCE5500Itineraries; +} diff --git a/lib/Target/PowerPC/PPCScheduleG3.td b/lib/Target/PowerPC/PPCScheduleG3.td index 61e89ed..72a0a39 100644 --- a/lib/Target/PowerPC/PPCScheduleG3.td +++ b/lib/Target/PowerPC/PPCScheduleG3.td @@ -34,12 +34,16 @@ def G3Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, - InstrItinData]>, + InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, @@ -58,6 +62,7 @@ def G3Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, diff --git a/lib/Target/PowerPC/PPCScheduleG4.td b/lib/Target/PowerPC/PPCScheduleG4.td index e19ddfa..fc9120d 100644 --- a/lib/Target/PowerPC/PPCScheduleG4.td +++ b/lib/Target/PowerPC/PPCScheduleG4.td @@ -33,13 +33,17 @@ def G4Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, - InstrItinData]>, + InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, @@ -60,6 +64,7 @@ def G4Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, diff --git a/lib/Target/PowerPC/PPCScheduleG4Plus.td b/lib/Target/PowerPC/PPCScheduleG4Plus.td index e7446cb..a4e82ce 100644 --- a/lib/Target/PowerPC/PPCScheduleG4Plus.td +++ b/lib/Target/PowerPC/PPCScheduleG4Plus.td @@ -36,19 +36,24 @@ def G4PlusItineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, - InstrItinData]>, + InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, @@ -66,6 +71,7 @@ def G4PlusItineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, diff --git a/lib/Target/PowerPC/PPCScheduleG5.td b/lib/Target/PowerPC/PPCScheduleG5.td index 1371499..7c02ea0 100644 --- a/lib/Target/PowerPC/PPCScheduleG5.td +++ b/lib/Target/PowerPC/PPCScheduleG5.td @@ -27,6 +27,7 @@ def G5Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, @@ -37,15 +38,20 @@ def G5Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, - InstrItinData]>, + InstrItinData]>, + InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, @@ -53,6 +59,7 @@ def G5Itineraries : ProcessorItineraries< InstrItinData]>, // needs work InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, @@ -69,6 +76,7 @@ def G5Itineraries : ProcessorItineraries< InstrItinData]>, InstrItinData]>, InstrItinData]>, + InstrItinData]>, InstrItinData]>, InstrItinData]>, InstrItinData]>, diff --git a/lib/Target/PowerPC/PPCSubtarget.cpp b/lib/Target/PowerPC/PPCSubtarget.cpp index bb193ac..9c8cb92 100644 --- a/lib/Target/PowerPC/PPCSubtarget.cpp +++ b/lib/Target/PowerPC/PPCSubtarget.cpp @@ -54,19 +54,26 @@ PPCSubtarget::PPCSubtarget(const std::string &TT, const std::string &CPU, CPUName = sys::getHostCPUName(); #endif - // Parse features string. - ParseSubtargetFeatures(CPUName, FS); - // Initialize scheduling itinerary for the specified CPU. InstrItins = getInstrItineraryForCPU(CPUName); + // Make sure 64-bit features are available when CPUname is generic + std::string FullFS = FS; + // If we are generating code for ppc64, verify that options make sense. if (is64Bit) { Has64BitSupport = true; // Silently force 64-bit register use on ppc64. Use64BitRegs = true; + if (!FullFS.empty()) + FullFS = "+64bit," + FullFS; + else + FullFS = "+64bit"; } - + + // Parse features string. + ParseSubtargetFeatures(CPUName, FullFS); + // If the user requested use of 64-bit regs, but the cpu selected doesn't // support it, ignore. if (use64BitRegs() && !has64BitSupport()) diff --git a/lib/Target/PowerPC/PPCSubtarget.h b/lib/Target/PowerPC/PPCSubtarget.h index 0207c83..b9e22f4 100644 --- a/lib/Target/PowerPC/PPCSubtarget.h +++ b/lib/Target/PowerPC/PPCSubtarget.h @@ -33,32 +33,34 @@ namespace PPC { enum { DIR_NONE, DIR_32, - DIR_440, - DIR_601, - DIR_602, - DIR_603, + DIR_440, + DIR_601, + DIR_602, + DIR_603, DIR_7400, - DIR_750, - DIR_970, + DIR_750, + DIR_970, DIR_A2, + DIR_E500mc, + DIR_E5500, DIR_PWR6, DIR_PWR7, - DIR_64 + DIR_64 }; } class GlobalValue; class TargetMachine; - + class PPCSubtarget : public PPCGenSubtargetInfo { protected: /// stackAlignment - The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned StackAlignment; - + /// Selected instruction itineraries (one entry per itinerary class.) InstrItineraryData InstrItins; - + /// Which cpu directive was used. unsigned DarwinDirective; @@ -74,7 +76,7 @@ protected: bool IsBookE; bool HasLazyResolverStubs; bool IsJITCodeModel; - + /// TargetTriple - What processor and OS we're targeting. Triple TargetTriple; @@ -84,11 +86,11 @@ public: /// PPCSubtarget(const std::string &TT, const std::string &CPU, const std::string &FS, bool is64Bit); - - /// ParseSubtargetFeatures - Parses features string setting specified + + /// ParseSubtargetFeatures - Parses features string setting specified /// subtarget options. Definition of function is auto generated by tblgen. void ParseSubtargetFeatures(StringRef CPU, StringRef FS); - + /// SetJITMode - This is called to inform the subtarget info that we are /// producing code for the JIT. void SetJITMode(); @@ -97,20 +99,27 @@ public: /// stack frame on entry to the function and which must be maintained by every /// function for this subtarget. unsigned getStackAlignment() const { return StackAlignment; } - + /// getDarwinDirective - Returns the -m directive specified for the cpu. /// unsigned getDarwinDirective() const { return DarwinDirective; } - - /// getInstrItins - Return the instruction itineraies based on subtarget + + /// getInstrItins - Return the instruction itineraies based on subtarget /// selection. const InstrItineraryData &getInstrItineraryData() const { return InstrItins; } - /// getTargetDataString - Return the pointer size and type alignment + /// getDataLayoutString - Return the pointer size and type alignment /// properties of this subtarget. - const char *getTargetDataString() const { + const char *getDataLayoutString() const { // Note, the alignment values for f64 and i64 on ppc64 in Darwin // documentation are wrong; these are correct (i.e. "what gcc does"). + if (isPPC64() && isSVR4ABI()) { + if (TargetTriple.getOS() == llvm::Triple::FreeBSD) + return "E-p:64:64-f64:64:64-i64:64:64-f128:64:64-v128:128:128-n32:64"; + else + return "E-p:64:64-f64:64:64-i64:64:64-f128:128:128-v128:128:128-n32:64"; + } + return isPPC64() ? "E-p:64:64-f64:64:64-i64:64:64-f128:64:128-n32:64" : "E-p:32:32-f64:64:64-i64:64:64-f128:64:128-n32"; } @@ -118,22 +127,22 @@ public: /// isPPC64 - Return true if we are generating code for 64-bit pointer mode. /// bool isPPC64() const { return IsPPC64; } - + /// has64BitSupport - Return true if the selected CPU supports 64-bit /// instructions, regardless of whether we are in 32-bit or 64-bit mode. bool has64BitSupport() const { return Has64BitSupport; } - + /// use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit /// registers in 32-bit mode when possible. This can only true if /// has64BitSupport() returns true. bool use64BitRegs() const { return Use64BitRegs; } - + /// hasLazyResolverStub - Return true if accesses to the specified global have /// to go through a dyld lazy resolution stub. This means that an extra load /// is required to get the address of the global. - bool hasLazyResolverStub(const GlobalValue *GV, + bool hasLazyResolverStub(const GlobalValue *GV, const TargetMachine &TM) const; - + // isJITCodeModel - True if we're generating code for the JIT bool isJITCodeModel() const { return IsJITCodeModel; } diff --git a/lib/Target/PowerPC/PPCTargetMachine.cpp b/lib/Target/PowerPC/PPCTargetMachine.cpp index 9805112..3fc977e 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.cpp +++ b/lib/Target/PowerPC/PPCTargetMachine.cpp @@ -40,10 +40,11 @@ PPCTargetMachine::PPCTargetMachine(const Target &T, StringRef TT, bool is64Bit) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS, is64Bit), - DataLayout(Subtarget.getTargetDataString()), InstrInfo(*this), + DL(Subtarget.getDataLayoutString()), InstrInfo(*this), FrameLowering(Subtarget), JITInfo(*this, is64Bit), TLInfo(*this), TSInfo(*this), - InstrItins(Subtarget.getInstrItineraryData()) { + InstrItins(Subtarget.getInstrItineraryData()), + STTI(&TLInfo), VTTI(&TLInfo) { // The binutils for the BG/P are too old for CFI. if (Subtarget.isBGP()) diff --git a/lib/Target/PowerPC/PPCTargetMachine.h b/lib/Target/PowerPC/PPCTargetMachine.h index 7da2b0c..c168433 100644 --- a/lib/Target/PowerPC/PPCTargetMachine.h +++ b/lib/Target/PowerPC/PPCTargetMachine.h @@ -21,7 +21,8 @@ #include "PPCISelLowering.h" #include "PPCSelectionDAGInfo.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetTransformImpl.h" +#include "llvm/DataLayout.h" namespace llvm { @@ -29,13 +30,15 @@ namespace llvm { /// class PPCTargetMachine : public LLVMTargetMachine { PPCSubtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment PPCInstrInfo InstrInfo; PPCFrameLowering FrameLowering; PPCJITInfo JITInfo; PPCTargetLowering TLInfo; PPCSelectionDAGInfo TSInfo; InstrItineraryData InstrItins; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: PPCTargetMachine(const Target &T, StringRef TT, @@ -58,11 +61,17 @@ public: return &InstrInfo.getRegisterInfo(); } - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const DataLayout *getDataLayout() const { return &DL; } virtual const PPCSubtarget *getSubtargetImpl() const { return &Subtarget; } virtual const InstrItineraryData *getInstrItineraryData() const { return &InstrItins; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); diff --git a/lib/Target/README.txt b/lib/Target/README.txt index cbfa4cf..8165f5b 100644 --- a/lib/Target/README.txt +++ b/lib/Target/README.txt @@ -152,7 +152,7 @@ stuff too. //===---------------------------------------------------------------------===// -For vector types, TargetData.cpp::getTypeInfo() returns alignment that is equal +For vector types, DataLayout.cpp::getTypeInfo() returns alignment that is equal to the type size. It works but can be overly conservative as the alignment of specific vector types are target dependent. @@ -2367,8 +2367,3 @@ unsigned foo(unsigned x, unsigned y) { return x > y && x != 0; } should fold to x > y. //===---------------------------------------------------------------------===// - -int f(double x) { return __builtin_fabs(x) < 0.0; } -should fold to false. - -//===---------------------------------------------------------------------===// diff --git a/lib/Target/Sparc/SparcFrameLowering.cpp b/lib/Target/Sparc/SparcFrameLowering.cpp index 1c5c89e..716c79f 100644 --- a/lib/Target/Sparc/SparcFrameLowering.cpp +++ b/lib/Target/Sparc/SparcFrameLowering.cpp @@ -20,7 +20,7 @@ #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 79f7ebd..8e5619e 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -637,7 +637,7 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const PointerType *Ty = cast(CalleeFn->arg_begin()->getType()); Type *ElementTy = Ty->getElementType(); - return getTargetData()->getTypeAllocSize(ElementTy); + return getDataLayout()->getTypeAllocSize(ElementTy); } //===----------------------------------------------------------------------===// diff --git a/lib/Target/Sparc/SparcInstrInfo.td b/lib/Target/Sparc/SparcInstrInfo.td index 15541ef..e64c140 100644 --- a/lib/Target/Sparc/SparcInstrInfo.td +++ b/lib/Target/Sparc/SparcInstrInfo.td @@ -129,7 +129,7 @@ def retflag : SDNode<"SPISD::RET_FLAG", SDT_SPRet, [SDNPHasChain, SDNPOptInGlue]>; def flushw : SDNode<"SPISD::FLUSHW", SDTNone, - [SDNPHasChain]>; + [SDNPHasChain, SDNPSideEffect, SDNPMayStore]>; def getPCX : Operand { let PrintMethod = "printGetPCX"; diff --git a/lib/Target/Sparc/SparcTargetMachine.cpp b/lib/Target/Sparc/SparcTargetMachine.cpp index 9ee12ed..45c9624 100644 --- a/lib/Target/Sparc/SparcTargetMachine.cpp +++ b/lib/Target/Sparc/SparcTargetMachine.cpp @@ -33,10 +33,10 @@ SparcTargetMachine::SparcTargetMachine(const Target &T, StringRef TT, bool is64bit) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS, is64bit), - DataLayout(Subtarget.getDataLayout()), + DL(Subtarget.getDataLayout()), InstrInfo(Subtarget), TLInfo(*this), TSInfo(*this), - FrameLowering(Subtarget) { + FrameLowering(Subtarget), STTI(&TLInfo), VTTI(&TLInfo) { } namespace { diff --git a/lib/Target/Sparc/SparcTargetMachine.h b/lib/Target/Sparc/SparcTargetMachine.h index b2cc624..0fbe2d7 100644 --- a/lib/Target/Sparc/SparcTargetMachine.h +++ b/lib/Target/Sparc/SparcTargetMachine.h @@ -20,18 +20,21 @@ #include "SparcSelectionDAGInfo.h" #include "SparcSubtarget.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { class SparcTargetMachine : public LLVMTargetMachine { SparcSubtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment SparcInstrInfo InstrInfo; SparcTargetLowering TLInfo; SparcSelectionDAGInfo TSInfo; SparcFrameLowering FrameLowering; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: SparcTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, @@ -52,7 +55,13 @@ public: virtual const SparcSelectionDAGInfo* getSelectionDAGInfo() const { return &TSInfo; } - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } + virtual const DataLayout *getDataLayout() const { return &DL; } // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); diff --git a/lib/Target/Target.cpp b/lib/Target/Target.cpp index a2b83bc..393178a 100644 --- a/lib/Target/Target.cpp +++ b/lib/Target/Target.cpp @@ -16,7 +16,7 @@ #include "llvm-c/Initialization.h" #include "llvm/InitializePasses.h" #include "llvm/PassManager.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/LLVMContext.h" #include @@ -24,8 +24,9 @@ using namespace llvm; void llvm::initializeTarget(PassRegistry &Registry) { - initializeTargetDataPass(Registry); + initializeDataLayoutPass(Registry); initializeTargetLibraryInfoPass(Registry); + initializeTargetTransformInfoPass(Registry); } void LLVMInitializeTarget(LLVMPassRegistryRef R) { @@ -33,11 +34,11 @@ void LLVMInitializeTarget(LLVMPassRegistryRef R) { } LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep) { - return wrap(new TargetData(StringRep)); + return wrap(new DataLayout(StringRep)); } void LLVMAddTargetData(LLVMTargetDataRef TD, LLVMPassManagerRef PM) { - unwrap(PM)->add(new TargetData(*unwrap(TD))); + unwrap(PM)->add(new DataLayout(*unwrap(TD))); } void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef TLI, @@ -55,13 +56,21 @@ LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef TD) { } unsigned LLVMPointerSize(LLVMTargetDataRef TD) { - return unwrap(TD)->getPointerSize(); + return unwrap(TD)->getPointerSize(0); +} + +unsigned LLVMPointerSizeForAS(LLVMTargetDataRef TD, unsigned AS) { + return unwrap(TD)->getPointerSize(AS); } LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) { return wrap(unwrap(TD)->getIntPtrType(getGlobalContext())); } +LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) { + return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), AS)); +} + unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef TD, LLVMTypeRef Ty) { return unwrap(TD)->getTypeSizeInBits(unwrap(Ty)); } diff --git a/lib/Target/TargetData.cpp b/lib/Target/TargetData.cpp deleted file mode 100644 index cc6dc1e..0000000 --- a/lib/Target/TargetData.cpp +++ /dev/null @@ -1,663 +0,0 @@ -//===-- TargetData.cpp - Data size & alignment routines --------------------==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines target properties related to datatype size/offset/alignment -// information. -// -// This structure should be created once, filled in if the defaults are not -// correct and then passed around by const&. None of the members functions -// require modification to the object. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Target/TargetData.h" -#include "llvm/Constants.h" -#include "llvm/DerivedTypes.h" -#include "llvm/Module.h" -#include "llvm/Support/GetElementPtrTypeIterator.h" -#include "llvm/Support/MathExtras.h" -#include "llvm/Support/ManagedStatic.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/Mutex.h" -#include "llvm/ADT/DenseMap.h" -#include -#include -using namespace llvm; - -// Handle the Pass registration stuff necessary to use TargetData's. - -// Register the default SparcV9 implementation... -INITIALIZE_PASS(TargetData, "targetdata", "Target Data Layout", false, true) -char TargetData::ID = 0; - -//===----------------------------------------------------------------------===// -// Support for StructLayout -//===----------------------------------------------------------------------===// - -StructLayout::StructLayout(StructType *ST, const TargetData &TD) { - assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); - StructAlignment = 0; - StructSize = 0; - NumElements = ST->getNumElements(); - - // Loop over each of the elements, placing them in memory. - for (unsigned i = 0, e = NumElements; i != e; ++i) { - Type *Ty = ST->getElementType(i); - unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty); - - // Add padding if necessary to align the data element properly. - if ((StructSize & (TyAlign-1)) != 0) - StructSize = TargetData::RoundUpAlignment(StructSize, TyAlign); - - // Keep track of maximum alignment constraint. - StructAlignment = std::max(TyAlign, StructAlignment); - - MemberOffsets[i] = StructSize; - StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item - } - - // Empty structures have alignment of 1 byte. - if (StructAlignment == 0) StructAlignment = 1; - - // Add padding to the end of the struct so that it could be put in an array - // and all array elements would be aligned correctly. - if ((StructSize & (StructAlignment-1)) != 0) - StructSize = TargetData::RoundUpAlignment(StructSize, StructAlignment); -} - - -/// getElementContainingOffset - Given a valid offset into the structure, -/// return the structure index that contains it. -unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { - const uint64_t *SI = - std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset); - assert(SI != &MemberOffsets[0] && "Offset not in structure type!"); - --SI; - assert(*SI <= Offset && "upper_bound didn't work"); - assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) && - (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) && - "Upper bound didn't work!"); - - // Multiple fields can have the same offset if any of them are zero sized. - // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop - // at the i32 element, because it is the last element at that offset. This is - // the right one to return, because anything after it will have a higher - // offset, implying that this element is non-empty. - return SI-&MemberOffsets[0]; -} - -//===----------------------------------------------------------------------===// -// TargetAlignElem, TargetAlign support -//===----------------------------------------------------------------------===// - -TargetAlignElem -TargetAlignElem::get(AlignTypeEnum align_type, unsigned abi_align, - unsigned pref_align, uint32_t bit_width) { - assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); - TargetAlignElem retval; - retval.AlignType = align_type; - retval.ABIAlign = abi_align; - retval.PrefAlign = pref_align; - retval.TypeBitWidth = bit_width; - return retval; -} - -bool -TargetAlignElem::operator==(const TargetAlignElem &rhs) const { - return (AlignType == rhs.AlignType - && ABIAlign == rhs.ABIAlign - && PrefAlign == rhs.PrefAlign - && TypeBitWidth == rhs.TypeBitWidth); -} - -const TargetAlignElem -TargetData::InvalidAlignmentElem = { (AlignTypeEnum)0xFF, 0, 0, 0 }; - -//===----------------------------------------------------------------------===// -// TargetData Class Implementation -//===----------------------------------------------------------------------===// - -/// getInt - Get an integer ignoring errors. -static int getInt(StringRef R) { - int Result = 0; - R.getAsInteger(10, Result); - return Result; -} - -void TargetData::init() { - initializeTargetDataPass(*PassRegistry::getPassRegistry()); - - LayoutMap = 0; - LittleEndian = false; - PointerMemSize = 8; - PointerABIAlign = 8; - PointerPrefAlign = PointerABIAlign; - StackNaturalAlign = 0; - - // Default alignments - setAlignment(INTEGER_ALIGN, 1, 1, 1); // i1 - setAlignment(INTEGER_ALIGN, 1, 1, 8); // i8 - setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16 - setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32 - setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64 - setAlignment(FLOAT_ALIGN, 2, 2, 16); // half - setAlignment(FLOAT_ALIGN, 4, 4, 32); // float - setAlignment(FLOAT_ALIGN, 8, 8, 64); // double - setAlignment(FLOAT_ALIGN, 16, 16, 128); // ppcf128, quad, ... - setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ... - setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ... - setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct -} - -std::string TargetData::parseSpecifier(StringRef Desc, TargetData *td) { - - if (td) - td->init(); - - while (!Desc.empty()) { - std::pair Split = Desc.split('-'); - StringRef Token = Split.first; - Desc = Split.second; - - if (Token.empty()) - continue; - - Split = Token.split(':'); - StringRef Specifier = Split.first; - Token = Split.second; - - assert(!Specifier.empty() && "Can't be empty here"); - - switch (Specifier[0]) { - case 'E': - if (td) - td->LittleEndian = false; - break; - case 'e': - if (td) - td->LittleEndian = true; - break; - case 'p': { - // Pointer size. - Split = Token.split(':'); - int PointerMemSizeBits = getInt(Split.first); - if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0) - return "invalid pointer size, must be a positive 8-bit multiple"; - if (td) - td->PointerMemSize = PointerMemSizeBits / 8; - - // Pointer ABI alignment. - Split = Split.second.split(':'); - int PointerABIAlignBits = getInt(Split.first); - if (PointerABIAlignBits < 0 || PointerABIAlignBits % 8 != 0) { - return "invalid pointer ABI alignment, " - "must be a positive 8-bit multiple"; - } - if (td) - td->PointerABIAlign = PointerABIAlignBits / 8; - - // Pointer preferred alignment. - Split = Split.second.split(':'); - int PointerPrefAlignBits = getInt(Split.first); - if (PointerPrefAlignBits < 0 || PointerPrefAlignBits % 8 != 0) { - return "invalid pointer preferred alignment, " - "must be a positive 8-bit multiple"; - } - if (td) { - td->PointerPrefAlign = PointerPrefAlignBits / 8; - if (td->PointerPrefAlign == 0) - td->PointerPrefAlign = td->PointerABIAlign; - } - break; - } - case 'i': - case 'v': - case 'f': - case 'a': - case 's': { - AlignTypeEnum AlignType; - char field = Specifier[0]; - switch (field) { - default: - case 'i': AlignType = INTEGER_ALIGN; break; - case 'v': AlignType = VECTOR_ALIGN; break; - case 'f': AlignType = FLOAT_ALIGN; break; - case 'a': AlignType = AGGREGATE_ALIGN; break; - case 's': AlignType = STACK_ALIGN; break; - } - int Size = getInt(Specifier.substr(1)); - if (Size < 0) { - return std::string("invalid ") + field + "-size field, " - "must be positive"; - } - - Split = Token.split(':'); - int ABIAlignBits = getInt(Split.first); - if (ABIAlignBits < 0 || ABIAlignBits % 8 != 0) { - return std::string("invalid ") + field +"-abi-alignment field, " - "must be a positive 8-bit multiple"; - } - unsigned ABIAlign = ABIAlignBits / 8; - - Split = Split.second.split(':'); - - int PrefAlignBits = getInt(Split.first); - if (PrefAlignBits < 0 || PrefAlignBits % 8 != 0) { - return std::string("invalid ") + field +"-preferred-alignment field, " - "must be a positive 8-bit multiple"; - } - unsigned PrefAlign = PrefAlignBits / 8; - if (PrefAlign == 0) - PrefAlign = ABIAlign; - - if (td) - td->setAlignment(AlignType, ABIAlign, PrefAlign, Size); - break; - } - case 'n': // Native integer types. - Specifier = Specifier.substr(1); - do { - int Width = getInt(Specifier); - if (Width <= 0) { - return std::string("invalid native integer size \'") + Specifier.str() + - "\', must be a positive integer."; - } - if (td && Width != 0) - td->LegalIntWidths.push_back(Width); - Split = Token.split(':'); - Specifier = Split.first; - Token = Split.second; - } while (!Specifier.empty() || !Token.empty()); - break; - case 'S': { // Stack natural alignment. - int StackNaturalAlignBits = getInt(Specifier.substr(1)); - if (StackNaturalAlignBits < 0 || StackNaturalAlignBits % 8 != 0) { - return "invalid natural stack alignment (S-field), " - "must be a positive 8-bit multiple"; - } - if (td) - td->StackNaturalAlign = StackNaturalAlignBits / 8; - break; - } - default: - break; - } - } - - return ""; -} - -/// Default ctor. -/// -/// @note This has to exist, because this is a pass, but it should never be -/// used. -TargetData::TargetData() : ImmutablePass(ID) { - report_fatal_error("Bad TargetData ctor used. " - "Tool did not specify a TargetData to use?"); -} - -TargetData::TargetData(const Module *M) - : ImmutablePass(ID) { - std::string errMsg = parseSpecifier(M->getDataLayout(), this); - assert(errMsg == "" && "Module M has malformed target data layout string."); - (void)errMsg; -} - -void -TargetData::setAlignment(AlignTypeEnum align_type, unsigned abi_align, - unsigned pref_align, uint32_t bit_width) { - assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); - for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { - if (Alignments[i].AlignType == align_type && - Alignments[i].TypeBitWidth == bit_width) { - // Update the abi, preferred alignments. - Alignments[i].ABIAlign = abi_align; - Alignments[i].PrefAlign = pref_align; - return; - } - } - - Alignments.push_back(TargetAlignElem::get(align_type, abi_align, - pref_align, bit_width)); -} - -/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or -/// preferred if ABIInfo = false) the target wants for the specified datatype. -unsigned TargetData::getAlignmentInfo(AlignTypeEnum AlignType, - uint32_t BitWidth, bool ABIInfo, - Type *Ty) const { - // Check to see if we have an exact match and remember the best match we see. - int BestMatchIdx = -1; - int LargestInt = -1; - for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { - if (Alignments[i].AlignType == AlignType && - Alignments[i].TypeBitWidth == BitWidth) - return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign; - - // The best match so far depends on what we're looking for. - if (AlignType == INTEGER_ALIGN && - Alignments[i].AlignType == INTEGER_ALIGN) { - // The "best match" for integers is the smallest size that is larger than - // the BitWidth requested. - if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 || - Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth)) - BestMatchIdx = i; - // However, if there isn't one that's larger, then we must use the - // largest one we have (see below) - if (LargestInt == -1 || - Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth) - LargestInt = i; - } - } - - // Okay, we didn't find an exact solution. Fall back here depending on what - // is being looked for. - if (BestMatchIdx == -1) { - // If we didn't find an integer alignment, fall back on most conservative. - if (AlignType == INTEGER_ALIGN) { - BestMatchIdx = LargestInt; - } else { - assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!"); - - // By default, use natural alignment for vector types. This is consistent - // with what clang and llvm-gcc do. - unsigned Align = getTypeAllocSize(cast(Ty)->getElementType()); - Align *= cast(Ty)->getNumElements(); - // If the alignment is not a power of 2, round up to the next power of 2. - // This happens for non-power-of-2 length vectors. - if (Align & (Align-1)) - Align = NextPowerOf2(Align); - return Align; - } - } - - // Since we got a "best match" index, just return it. - return ABIInfo ? Alignments[BestMatchIdx].ABIAlign - : Alignments[BestMatchIdx].PrefAlign; -} - -namespace { - -class StructLayoutMap { - typedef DenseMap LayoutInfoTy; - LayoutInfoTy LayoutInfo; - -public: - virtual ~StructLayoutMap() { - // Remove any layouts. - for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end(); - I != E; ++I) { - StructLayout *Value = I->second; - Value->~StructLayout(); - free(Value); - } - } - - StructLayout *&operator[](StructType *STy) { - return LayoutInfo[STy]; - } - - // for debugging... - virtual void dump() const {} -}; - -} // end anonymous namespace - -TargetData::~TargetData() { - delete static_cast(LayoutMap); -} - -const StructLayout *TargetData::getStructLayout(StructType *Ty) const { - if (!LayoutMap) - LayoutMap = new StructLayoutMap(); - - StructLayoutMap *STM = static_cast(LayoutMap); - StructLayout *&SL = (*STM)[Ty]; - if (SL) return SL; - - // Otherwise, create the struct layout. Because it is variable length, we - // malloc it, then use placement new. - int NumElts = Ty->getNumElements(); - StructLayout *L = - (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t)); - - // Set SL before calling StructLayout's ctor. The ctor could cause other - // entries to be added to TheMap, invalidating our reference. - SL = L; - - new (L) StructLayout(Ty, *this); - - return L; -} - -std::string TargetData::getStringRepresentation() const { - std::string Result; - raw_string_ostream OS(Result); - - OS << (LittleEndian ? "e" : "E") - << "-p:" << PointerMemSize*8 << ':' << PointerABIAlign*8 - << ':' << PointerPrefAlign*8 - << "-S" << StackNaturalAlign*8; - - for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { - const TargetAlignElem &AI = Alignments[i]; - OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':' - << AI.ABIAlign*8 << ':' << AI.PrefAlign*8; - } - - if (!LegalIntWidths.empty()) { - OS << "-n" << (unsigned)LegalIntWidths[0]; - - for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i) - OS << ':' << (unsigned)LegalIntWidths[i]; - } - return OS.str(); -} - - -uint64_t TargetData::getTypeSizeInBits(Type *Ty) const { - assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); - switch (Ty->getTypeID()) { - case Type::LabelTyID: - case Type::PointerTyID: - return getPointerSizeInBits(); - case Type::ArrayTyID: { - ArrayType *ATy = cast(Ty); - return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements(); - } - case Type::StructTyID: - // Get the layout annotation... which is lazily created on demand. - return getStructLayout(cast(Ty))->getSizeInBits(); - case Type::IntegerTyID: - return cast(Ty)->getBitWidth(); - case Type::VoidTyID: - return 8; - case Type::HalfTyID: - return 16; - case Type::FloatTyID: - return 32; - case Type::DoubleTyID: - case Type::X86_MMXTyID: - return 64; - case Type::PPC_FP128TyID: - case Type::FP128TyID: - return 128; - // In memory objects this is always aligned to a higher boundary, but - // only 80 bits contain information. - case Type::X86_FP80TyID: - return 80; - case Type::VectorTyID: - return cast(Ty)->getBitWidth(); - default: - llvm_unreachable("TargetData::getTypeSizeInBits(): Unsupported type"); - } -} - -/*! - \param abi_or_pref Flag that determines which alignment is returned. true - returns the ABI alignment, false returns the preferred alignment. - \param Ty The underlying type for which alignment is determined. - - Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref - == false) for the requested type \a Ty. - */ -unsigned TargetData::getAlignment(Type *Ty, bool abi_or_pref) const { - int AlignType = -1; - - assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); - switch (Ty->getTypeID()) { - // Early escape for the non-numeric types. - case Type::LabelTyID: - case Type::PointerTyID: - return (abi_or_pref - ? getPointerABIAlignment() - : getPointerPrefAlignment()); - case Type::ArrayTyID: - return getAlignment(cast(Ty)->getElementType(), abi_or_pref); - - case Type::StructTyID: { - // Packed structure types always have an ABI alignment of one. - if (cast(Ty)->isPacked() && abi_or_pref) - return 1; - - // Get the layout annotation... which is lazily created on demand. - const StructLayout *Layout = getStructLayout(cast(Ty)); - unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); - return std::max(Align, Layout->getAlignment()); - } - case Type::IntegerTyID: - case Type::VoidTyID: - AlignType = INTEGER_ALIGN; - break; - case Type::HalfTyID: - case Type::FloatTyID: - case Type::DoubleTyID: - // PPC_FP128TyID and FP128TyID have different data contents, but the - // same size and alignment, so they look the same here. - case Type::PPC_FP128TyID: - case Type::FP128TyID: - case Type::X86_FP80TyID: - AlignType = FLOAT_ALIGN; - break; - case Type::X86_MMXTyID: - case Type::VectorTyID: - AlignType = VECTOR_ALIGN; - break; - default: - llvm_unreachable("Bad type for getAlignment!!!"); - } - - return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), - abi_or_pref, Ty); -} - -unsigned TargetData::getABITypeAlignment(Type *Ty) const { - return getAlignment(Ty, true); -} - -/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for -/// an integer type of the specified bitwidth. -unsigned TargetData::getABIIntegerTypeAlignment(unsigned BitWidth) const { - return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0); -} - - -unsigned TargetData::getCallFrameTypeAlignment(Type *Ty) const { - for (unsigned i = 0, e = Alignments.size(); i != e; ++i) - if (Alignments[i].AlignType == STACK_ALIGN) - return Alignments[i].ABIAlign; - - return getABITypeAlignment(Ty); -} - -unsigned TargetData::getPrefTypeAlignment(Type *Ty) const { - return getAlignment(Ty, false); -} - -unsigned TargetData::getPreferredTypeAlignmentShift(Type *Ty) const { - unsigned Align = getPrefTypeAlignment(Ty); - assert(!(Align & (Align-1)) && "Alignment is not a power of two!"); - return Log2_32(Align); -} - -/// getIntPtrType - Return an unsigned integer type that is the same size or -/// greater to the host pointer size. -IntegerType *TargetData::getIntPtrType(LLVMContext &C) const { - return IntegerType::get(C, getPointerSizeInBits()); -} - - -uint64_t TargetData::getIndexedOffset(Type *ptrTy, - ArrayRef Indices) const { - Type *Ty = ptrTy; - assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()"); - uint64_t Result = 0; - - generic_gep_type_iterator - TI = gep_type_begin(ptrTy, Indices); - for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX; - ++CurIDX, ++TI) { - if (StructType *STy = dyn_cast(*TI)) { - assert(Indices[CurIDX]->getType() == - Type::getInt32Ty(ptrTy->getContext()) && - "Illegal struct idx"); - unsigned FieldNo = cast(Indices[CurIDX])->getZExtValue(); - - // Get structure layout information... - const StructLayout *Layout = getStructLayout(STy); - - // Add in the offset, as calculated by the structure layout info... - Result += Layout->getElementOffset(FieldNo); - - // Update Ty to refer to current element - Ty = STy->getElementType(FieldNo); - } else { - // Update Ty to refer to current element - Ty = cast(Ty)->getElementType(); - - // Get the array index and the size of each array element. - if (int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue()) - Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty); - } - } - - return Result; -} - -/// getPreferredAlignment - Return the preferred alignment of the specified -/// global. This includes an explicitly requested alignment (if the global -/// has one). -unsigned TargetData::getPreferredAlignment(const GlobalVariable *GV) const { - Type *ElemType = GV->getType()->getElementType(); - unsigned Alignment = getPrefTypeAlignment(ElemType); - unsigned GVAlignment = GV->getAlignment(); - if (GVAlignment >= Alignment) { - Alignment = GVAlignment; - } else if (GVAlignment != 0) { - Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType)); - } - - if (GV->hasInitializer() && GVAlignment == 0) { - if (Alignment < 16) { - // If the global is not external, see if it is large. If so, give it a - // larger alignment. - if (getTypeSizeInBits(ElemType) > 128) - Alignment = 16; // 16-byte alignment. - } - } - return Alignment; -} - -/// getPreferredAlignmentLog - Return the preferred alignment of the -/// specified global, returned in log form. This includes an explicitly -/// requested alignment (if the global has one). -unsigned TargetData::getPreferredAlignmentLog(const GlobalVariable *GV) const { - return Log2_32(getPreferredAlignment(GV)); -} diff --git a/lib/Target/TargetELFWriterInfo.cpp b/lib/Target/TargetELFWriterInfo.cpp deleted file mode 100644 index a661ee9..0000000 --- a/lib/Target/TargetELFWriterInfo.cpp +++ /dev/null @@ -1,25 +0,0 @@ -//===-- lib/Target/TargetELFWriterInfo.cpp - ELF Writer Info --0-*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the TargetELFWriterInfo class. -// -//===----------------------------------------------------------------------===// - -#include "llvm/Function.h" -#include "llvm/Target/TargetELFWriterInfo.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" -using namespace llvm; - -TargetELFWriterInfo::TargetELFWriterInfo(bool is64Bit_, bool isLittleEndian_) : - is64Bit(is64Bit_), isLittleEndian(isLittleEndian_) { -} - -TargetELFWriterInfo::~TargetELFWriterInfo() {} - diff --git a/lib/Target/TargetLibraryInfo.cpp b/lib/Target/TargetLibraryInfo.cpp index 8e215a7..6d4eab1 100644 --- a/lib/Target/TargetLibraryInfo.cpp +++ b/lib/Target/TargetLibraryInfo.cpp @@ -24,6 +24,16 @@ void TargetLibraryInfo::anchor() { } const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = { + "_ZdaPv", + "_ZdlPv", + "_Znaj", + "_ZnajRKSt9nothrow_t", + "_Znam", + "_ZnamRKSt9nothrow_t", + "_Znwj", + "_ZnwjRKSt9nothrow_t", + "_Znwm", + "_ZnwmRKSt9nothrow_t", "__cxa_atexit", "__cxa_guard_abort", "__cxa_guard_acquire", @@ -31,16 +41,29 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "__memcpy_chk", "acos", "acosf", + "acosh", + "acoshf", + "acoshl", "acosl", "asin", "asinf", + "asinh", + "asinhf", + "asinhl", "asinl", "atan", "atan2", "atan2f", "atan2l", "atanf", + "atanh", + "atanhf", + "atanhl", "atanl", + "calloc", + "cbrt", + "cbrtf", + "cbrtl", "ceil", "ceilf", "ceill", @@ -54,6 +77,9 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "coshl", "cosl", "exp", + "exp10", + "exp10f", + "exp10l", "exp2", "exp2f", "exp2l", @@ -74,6 +100,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "fmodl", "fputc", "fputs", + "free", "fwrite", "iprintf", "log", @@ -86,8 +113,12 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "log2", "log2f", "log2l", + "logb", + "logbf", + "logbl", "logf", "logl", + "malloc", "memchr", "memcmp", "memcpy", @@ -97,11 +128,14 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "nearbyint", "nearbyintf", "nearbyintl", + "posix_memalign", "pow", "powf", "powl", "putchar", "puts", + "realloc", + "reallocf", "rint", "rintf", "rintl", @@ -118,14 +152,30 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "sqrt", "sqrtf", "sqrtl", + "stpcpy", "strcat", "strchr", + "strcmp", "strcpy", + "strcspn", + "strdup", "strlen", "strncat", "strncmp", "strncpy", + "strndup", "strnlen", + "strpbrk", + "strrchr", + "strspn", + "strstr", + "strtod", + "strtof", + "strtol", + "strtold", + "strtoll", + "strtoul", + "strtoull", "tan", "tanf", "tanh", @@ -134,7 +184,8 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = "tanl", "trunc", "truncf", - "truncl" + "truncl", + "valloc" }; /// initialize - Initialize the set of available library functions based on the @@ -205,6 +256,21 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T, TLI.setUnavailable(LibFunc::tanhl); // Win32 only has C89 math + TLI.setUnavailable(LibFunc::acosh); + TLI.setUnavailable(LibFunc::acoshf); + TLI.setUnavailable(LibFunc::acoshl); + TLI.setUnavailable(LibFunc::asinh); + TLI.setUnavailable(LibFunc::asinhf); + TLI.setUnavailable(LibFunc::asinhl); + TLI.setUnavailable(LibFunc::atanh); + TLI.setUnavailable(LibFunc::atanhf); + TLI.setUnavailable(LibFunc::atanhl); + TLI.setUnavailable(LibFunc::cbrt); + TLI.setUnavailable(LibFunc::cbrtf); + TLI.setUnavailable(LibFunc::cbrtl); + TLI.setUnavailable(LibFunc::exp10); + TLI.setUnavailable(LibFunc::exp10f); + TLI.setUnavailable(LibFunc::exp10l); TLI.setUnavailable(LibFunc::exp2); TLI.setUnavailable(LibFunc::exp2f); TLI.setUnavailable(LibFunc::exp2l); @@ -217,6 +283,9 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T, TLI.setUnavailable(LibFunc::log1p); TLI.setUnavailable(LibFunc::log1pf); TLI.setUnavailable(LibFunc::log1pl); + TLI.setUnavailable(LibFunc::logb); + TLI.setUnavailable(LibFunc::logbf); + TLI.setUnavailable(LibFunc::logbl); TLI.setUnavailable(LibFunc::nearbyint); TLI.setUnavailable(LibFunc::nearbyintf); TLI.setUnavailable(LibFunc::nearbyintl); @@ -254,6 +323,10 @@ static void initialize(TargetLibraryInfo &TLI, const Triple &T, TLI.setUnavailable(LibFunc::tanf); TLI.setUnavailable(LibFunc::tanhf); } + + // Win32 does *not* provide stpcpy. It is provided on POSIX systems: + // http://pubs.opengroup.org/onlinepubs/9699919799/functions/stpcpy.html + TLI.setUnavailable(LibFunc::stpcpy); } } diff --git a/lib/Target/TargetLoweringObjectFile.cpp b/lib/Target/TargetLoweringObjectFile.cpp index b74a0bd..9d7e2b8 100644 --- a/lib/Target/TargetLoweringObjectFile.cpp +++ b/lib/Target/TargetLoweringObjectFile.cpp @@ -22,7 +22,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/Dwarf.h" @@ -184,7 +184,7 @@ SectionKind TargetLoweringObjectFile::getKindForGlobal(const GlobalValue *GV, // Otherwise, just drop it into a mergable constant section. If we have // a section for this size, use it, otherwise use the arbitrary sized // mergable section. - switch (TM.getTargetData()->getTypeAllocSize(C->getType())) { + switch (TM.getDataLayout()->getTypeAllocSize(C->getType())) { case 4: return SectionKind::getMergeableConst4(); case 8: return SectionKind::getMergeableConst8(); case 16: return SectionKind::getMergeableConst16(); diff --git a/lib/Target/TargetMachineC.cpp b/lib/Target/TargetMachineC.cpp index d6bba8b..f69c2ab 100644 --- a/lib/Target/TargetMachineC.cpp +++ b/lib/Target/TargetMachineC.cpp @@ -14,7 +14,7 @@ #include "llvm-c/Core.h" #include "llvm-c/Target.h" #include "llvm-c/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/raw_ostream.h" @@ -146,7 +146,7 @@ char* LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T) { } LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T) { - return wrap(unwrap(T)->getTargetData()); + return wrap(unwrap(T)->getDataLayout()); } LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, @@ -158,14 +158,14 @@ LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, std::string error; - const TargetData* td = TM->getTargetData(); + const DataLayout* td = TM->getDataLayout(); if (!td) { - error = "No TargetData in TargetMachine"; + error = "No DataLayout in TargetMachine"; *ErrorMessage = strdup(error.c_str()); return true; } - pass.add(new TargetData(*td)); + pass.add(new DataLayout(*td)); TargetMachine::CodeGenFileType ft; switch (codegen) { @@ -184,7 +184,7 @@ LLVMBool LLVMTargetMachineEmitToFile(LLVMTargetMachineRef T, LLVMModuleRef M, } if (TM->addPassesToEmitFile(pass, destf, ft)) { - error = "No TargetData in TargetMachine"; + error = "No DataLayout in TargetMachine"; *ErrorMessage = strdup(error.c_str()); return true; } diff --git a/lib/Target/TargetRegisterInfo.cpp b/lib/Target/TargetRegisterInfo.cpp index 2395f2b..be8b582 100644 --- a/lib/Target/TargetRegisterInfo.cpp +++ b/lib/Target/TargetRegisterInfo.cpp @@ -20,8 +20,10 @@ using namespace llvm; TargetRegisterInfo::TargetRegisterInfo(const TargetRegisterInfoDesc *ID, regclass_iterator RCB, regclass_iterator RCE, - const char *const *subregindexnames) - : InfoDesc(ID), SubRegIndexNames(subregindexnames), + const char *const *SRINames, + const unsigned *SRILaneMasks) + : InfoDesc(ID), SubRegIndexNames(SRINames), + SubRegIndexLaneMasks(SRILaneMasks), RegClassBegin(RCB), RegClassEnd(RCE) { } diff --git a/lib/Target/TargetTransformImpl.cpp b/lib/Target/TargetTransformImpl.cpp new file mode 100644 index 0000000..b36e6f8 --- /dev/null +++ b/lib/Target/TargetTransformImpl.cpp @@ -0,0 +1,353 @@ +// llvm/Target/TargetTransformImpl.cpp - Target Loop Trans Info ---*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Target/TargetTransformImpl.h" +#include "llvm/Target/TargetLowering.h" +#include + +using namespace llvm; + +//===----------------------------------------------------------------------===// +// +// Calls used by scalar transformations. +// +//===----------------------------------------------------------------------===// + +bool ScalarTargetTransformImpl::isLegalAddImmediate(int64_t imm) const { + return TLI->isLegalAddImmediate(imm); +} + +bool ScalarTargetTransformImpl::isLegalICmpImmediate(int64_t imm) const { + return TLI->isLegalICmpImmediate(imm); +} + +bool ScalarTargetTransformImpl::isLegalAddressingMode(const AddrMode &AM, + Type *Ty) const { + return TLI->isLegalAddressingMode(AM, Ty); +} + +bool ScalarTargetTransformImpl::isTruncateFree(Type *Ty1, Type *Ty2) const { + return TLI->isTruncateFree(Ty1, Ty2); +} + +bool ScalarTargetTransformImpl::isTypeLegal(Type *Ty) const { + EVT T = TLI->getValueType(Ty); + return TLI->isTypeLegal(T); +} + +unsigned ScalarTargetTransformImpl::getJumpBufAlignment() const { + return TLI->getJumpBufAlignment(); +} + +unsigned ScalarTargetTransformImpl::getJumpBufSize() const { + return TLI->getJumpBufSize(); +} + +bool ScalarTargetTransformImpl::shouldBuildLookupTables() const { + return TLI->supportJumpTables() && + (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || + TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other)); +} + +//===----------------------------------------------------------------------===// +// +// Calls used by the vectorizers. +// +//===----------------------------------------------------------------------===// +int VectorTargetTransformImpl::InstructionOpcodeToISD(unsigned Opcode) const { + enum InstructionOpcodes { +#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM, +#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM +#include "llvm/Instruction.def" + }; + switch (static_cast(Opcode)) { + case Ret: return 0; + case Br: return 0; + case Switch: return 0; + case IndirectBr: return 0; + case Invoke: return 0; + case Resume: return 0; + case Unreachable: return 0; + case Add: return ISD::ADD; + case FAdd: return ISD::FADD; + case Sub: return ISD::SUB; + case FSub: return ISD::FSUB; + case Mul: return ISD::MUL; + case FMul: return ISD::FMUL; + case UDiv: return ISD::UDIV; + case SDiv: return ISD::UDIV; + case FDiv: return ISD::FDIV; + case URem: return ISD::UREM; + case SRem: return ISD::SREM; + case FRem: return ISD::FREM; + case Shl: return ISD::SHL; + case LShr: return ISD::SRL; + case AShr: return ISD::SRA; + case And: return ISD::AND; + case Or: return ISD::OR; + case Xor: return ISD::XOR; + case Alloca: return 0; + case Load: return ISD::LOAD; + case Store: return ISD::STORE; + case GetElementPtr: return 0; + case Fence: return 0; + case AtomicCmpXchg: return 0; + case AtomicRMW: return 0; + case Trunc: return ISD::TRUNCATE; + case ZExt: return ISD::ZERO_EXTEND; + case SExt: return ISD::SIGN_EXTEND; + case FPToUI: return ISD::FP_TO_UINT; + case FPToSI: return ISD::FP_TO_SINT; + case UIToFP: return ISD::UINT_TO_FP; + case SIToFP: return ISD::SINT_TO_FP; + case FPTrunc: return ISD::FP_ROUND; + case FPExt: return ISD::FP_EXTEND; + case PtrToInt: return ISD::BITCAST; + case IntToPtr: return ISD::BITCAST; + case BitCast: return ISD::BITCAST; + case ICmp: return ISD::SETCC; + case FCmp: return ISD::SETCC; + case PHI: return 0; + case Call: return 0; + case Select: return ISD::SELECT; + case UserOp1: return 0; + case UserOp2: return 0; + case VAArg: return 0; + case ExtractElement: return ISD::EXTRACT_VECTOR_ELT; + case InsertElement: return ISD::INSERT_VECTOR_ELT; + case ShuffleVector: return ISD::VECTOR_SHUFFLE; + case ExtractValue: return ISD::MERGE_VALUES; + case InsertValue: return ISD::MERGE_VALUES; + case LandingPad: return 0; + } + + llvm_unreachable("Unknown instruction type encountered!"); +} + +std::pair +VectorTargetTransformImpl::getTypeLegalizationCost(Type *Ty) const { + + LLVMContext &C = Ty->getContext(); + EVT MTy = TLI->getValueType(Ty); + + unsigned Cost = 1; + // We keep legalizing the type until we find a legal kind. We assume that + // the only operation that costs anything is the split. After splitting + // we need to handle two types. + while (true) { + TargetLowering::LegalizeKind LK = TLI->getTypeConversion(C, MTy); + + if (LK.first == TargetLowering::TypeLegal) + return std::make_pair(Cost, MTy.getSimpleVT()); + + if (LK.first == TargetLowering::TypeSplitVector || + LK.first == TargetLowering::TypeExpandInteger) + Cost *= 2; + + // Keep legalizing the type. + MTy = LK.second; + } +} + +unsigned +VectorTargetTransformImpl::getScalarizationOverhead(Type *Ty, + bool Insert, + bool Extract) const { + assert (Ty->isVectorTy() && "Can only scalarize vectors"); + unsigned Cost = 0; + + for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) { + if (Insert) + Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i); + if (Extract) + Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i); + } + + return Cost; +} + +unsigned VectorTargetTransformImpl::getArithmeticInstrCost(unsigned Opcode, + Type *Ty) const { + // Check if any of the operands are vector operands. + int ISD = InstructionOpcodeToISD(Opcode); + assert(ISD && "Invalid opcode"); + + std::pair LT = getTypeLegalizationCost(Ty); + + if (!TLI->isOperationExpand(ISD, LT.second)) { + // The operation is legal. Assume it costs 1. Multiply + // by the type-legalization overhead. + return LT.first * 1; + } + + // Else, assume that we need to scalarize this op. + if (Ty->isVectorTy()) { + unsigned Num = Ty->getVectorNumElements(); + unsigned Cost = getArithmeticInstrCost(Opcode, Ty->getScalarType()); + // return the cost of multiple scalar invocation plus the cost of inserting + // and extracting the values. + return getScalarizationOverhead(Ty, true, true) + Num * Cost; + } + + // We don't know anything about this scalar instruction. + return 1; +} + +unsigned VectorTargetTransformImpl::getBroadcastCost(Type *Tp) const { + return 1; +} + +unsigned VectorTargetTransformImpl::getCastInstrCost(unsigned Opcode, Type *Dst, + Type *Src) const { + int ISD = InstructionOpcodeToISD(Opcode); + assert(ISD && "Invalid opcode"); + + std::pair SrcLT = getTypeLegalizationCost(Src); + std::pair DstLT = getTypeLegalizationCost(Dst); + + // Handle scalar conversions. + if (!Src->isVectorTy() && !Dst->isVectorTy()) { + + // Scalar bitcasts are usually free. + if (Opcode == Instruction::BitCast) + return 0; + + if (Opcode == Instruction::Trunc && + TLI->isTruncateFree(SrcLT.second, DstLT.second)) + return 0; + + if (Opcode == Instruction::ZExt && + TLI->isZExtFree(SrcLT.second, DstLT.second)) + return 0; + + // Just check the op cost. If the operation is legal then assume it costs 1. + if (!TLI->isOperationExpand(ISD, DstLT.second)) + return 1; + + // Assume that illegal scalar instruction are expensive. + return 4; + } + + // Check vector-to-vector casts. + if (Dst->isVectorTy() && Src->isVectorTy()) { + + // If the cast is between same-sized registers, then the check is simple. + if (SrcLT.first == DstLT.first && + SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) { + + // Bitcast between types that are legalized to the same type are free. + if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc) + return 0; + + // Assume that Zext is done using AND. + if (Opcode == Instruction::ZExt) + return 1; + + // Assume that sext is done using SHL and SRA. + if (Opcode == Instruction::SExt) + return 2; + + // Just check the op cost. If the operation is legal then assume it costs + // 1 and multiply by the type-legalization overhead. + if (!TLI->isOperationExpand(ISD, DstLT.second)) + return SrcLT.first * 1; + } + + // If we are converting vectors and the operation is illegal, or + // if the vectors are legalized to different types, estimate the + // scalarization costs. + unsigned Num = Dst->getVectorNumElements(); + unsigned Cost = getCastInstrCost(Opcode, Dst->getScalarType(), + Src->getScalarType()); + + // Return the cost of multiple scalar invocation plus the cost of + // inserting and extracting the values. + return getScalarizationOverhead(Dst, true, true) + Num * Cost; + } + + // We already handled vector-to-vector and scalar-to-scalar conversions. This + // is where we handle bitcast between vectors and scalars. We need to assume + // that the conversion is scalarized in one way or another. + if (Opcode == Instruction::BitCast) + // Illegal bitcasts are done by storing and loading from a stack slot. + return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) + + (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0); + + llvm_unreachable("Unhandled cast"); + } + +unsigned VectorTargetTransformImpl::getCFInstrCost(unsigned Opcode) const { + return 1; +} + +unsigned VectorTargetTransformImpl::getCmpSelInstrCost(unsigned Opcode, + Type *ValTy, + Type *CondTy) const { + int ISD = InstructionOpcodeToISD(Opcode); + assert(ISD && "Invalid opcode"); + + // Selects on vectors are actually vector selects. + if (ISD == ISD::SELECT) { + assert(CondTy && "CondTy must exist"); + if (CondTy->isVectorTy()) + ISD = ISD::VSELECT; + } + + std::pair LT = getTypeLegalizationCost(ValTy); + + if (!TLI->isOperationExpand(ISD, LT.second)) { + // The operation is legal. Assume it costs 1. Multiply + // by the type-legalization overhead. + return LT.first * 1; + } + + // Otherwise, assume that the cast is scalarized. + if (ValTy->isVectorTy()) { + unsigned Num = ValTy->getVectorNumElements(); + if (CondTy) + CondTy = CondTy->getScalarType(); + unsigned Cost = getCmpSelInstrCost(Opcode, ValTy->getScalarType(), + CondTy); + + // Return the cost of multiple scalar invocation plus the cost of inserting + // and extracting the values. + return getScalarizationOverhead(ValTy, true, false) + Num * Cost; + } + + // Unknown scalar opcode. + return 1; +} + +unsigned VectorTargetTransformImpl::getVectorInstrCost(unsigned Opcode, + Type *Val, + unsigned Index) const { + return 1; +} + +unsigned +VectorTargetTransformImpl::getInstrCost(unsigned Opcode, Type *Ty1, + Type *Ty2) const { + return 1; +} + +unsigned +VectorTargetTransformImpl::getMemoryOpCost(unsigned Opcode, Type *Src, + unsigned Alignment, + unsigned AddressSpace) const { + std::pair LT = getTypeLegalizationCost(Src); + + // Assume that all loads of legal types cost 1. + return LT.first; +} + +unsigned +VectorTargetTransformImpl::getNumberOfParts(Type *Tp) const { + std::pair LT = getTypeLegalizationCost(Tp); + return LT.first; +} diff --git a/lib/Target/X86/AsmParser/X86AsmLexer.cpp b/lib/Target/X86/AsmParser/X86AsmLexer.cpp index 2794e60..66ad353 100644 --- a/lib/Target/X86/AsmParser/X86AsmLexer.cpp +++ b/lib/Target/X86/AsmParser/X86AsmLexer.cpp @@ -18,19 +18,19 @@ using namespace llvm; namespace { - + class X86AsmLexer : public MCTargetAsmLexer { const MCAsmInfo &AsmInfo; - + bool tentativeIsValid; AsmToken tentativeToken; - + const AsmToken &lexTentative() { tentativeToken = getLexer()->Lex(); tentativeIsValid = true; return tentativeToken; } - + const AsmToken &lexDefinite() { if (tentativeIsValid) { tentativeIsValid = false; @@ -38,7 +38,7 @@ class X86AsmLexer : public MCTargetAsmLexer { } return getLexer()->Lex(); } - + AsmToken LexTokenATT(); AsmToken LexTokenIntel(); protected: @@ -47,7 +47,7 @@ protected: SetError(SMLoc(), "No MCAsmLexer installed"); return AsmToken(AsmToken::Error, "", 0); } - + switch (AsmInfo.getAssemblerDialect()) { default: SetError(SMLoc(), "Unhandled dialect"); @@ -71,33 +71,32 @@ public: AsmToken X86AsmLexer::LexTokenATT() { AsmToken lexedToken = lexDefinite(); - + switch (lexedToken.getKind()) { default: return lexedToken; case AsmToken::Error: SetError(Lexer->getErrLoc(), Lexer->getErr()); return lexedToken; - + case AsmToken::Percent: { const AsmToken &nextToken = lexTentative(); if (nextToken.getKind() != AsmToken::Identifier) return lexedToken; - if (unsigned regID = MatchRegisterName(nextToken.getString())) { lexDefinite(); - + // FIXME: This is completely wrong when there is a space or other // punctuation between the % and the register name. StringRef regStr(lexedToken.getString().data(), - lexedToken.getString().size() + + lexedToken.getString().size() + nextToken.getString().size()); - - return AsmToken(AsmToken::Register, regStr, + + return AsmToken(AsmToken::Register, regStr, static_cast(regID)); } - + // Match register name failed. If this is "db[0-7]", match it as an alias // for dr[0-7]. if (nextToken.getString().size() == 3 && @@ -113,29 +112,29 @@ AsmToken X86AsmLexer::LexTokenATT() { case '6': RegNo = X86::DR6; break; case '7': RegNo = X86::DR7; break; } - + if (RegNo != -1) { lexDefinite(); // FIXME: This is completely wrong when there is a space or other // punctuation between the % and the register name. StringRef regStr(lexedToken.getString().data(), - lexedToken.getString().size() + + lexedToken.getString().size() + nextToken.getString().size()); - return AsmToken(AsmToken::Register, regStr, + return AsmToken(AsmToken::Register, regStr, static_cast(RegNo)); } } - - + + return lexedToken; - } + } } } AsmToken X86AsmLexer::LexTokenIntel() { const AsmToken &lexedToken = lexDefinite(); - + switch(lexedToken.getKind()) { default: return lexedToken; @@ -144,7 +143,7 @@ AsmToken X86AsmLexer::LexTokenIntel() { return lexedToken; case AsmToken::Identifier: { unsigned regID = MatchRegisterName(lexedToken.getString().lower()); - + if (regID) return AsmToken(AsmToken::Register, lexedToken.getString(), diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index fbbaa9500..ce446e7 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -11,12 +11,14 @@ #include "llvm/MC/MCTargetAsmParser.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCSubtargetInfo.h" #include "llvm/MC/MCParser/MCAsmLexer.h" #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/MC/MCParser/MCParsedAsmOperand.h" +#include "llvm/ADT/APFloat.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" @@ -33,13 +35,16 @@ struct X86Operand; class X86AsmParser : public MCTargetAsmParser { MCSubtargetInfo &STI; MCAsmParser &Parser; + ParseInstructionInfo *InstInfo; private: MCAsmParser &getParser() const { return Parser; } MCAsmLexer &getLexer() const { return Parser.getLexer(); } bool Error(SMLoc L, const Twine &Msg, - ArrayRef Ranges = ArrayRef()) { + ArrayRef Ranges = ArrayRef(), + bool MatchingInlineAsm = false) { + if (MatchingInlineAsm) return true; return Parser.Error(L, Msg, Ranges); } @@ -51,23 +56,25 @@ private: X86Operand *ParseOperand(); X86Operand *ParseATTOperand(); X86Operand *ParseIntelOperand(); - X86Operand *ParseIntelMemOperand(); + X86Operand *ParseIntelOffsetOfOperator(SMLoc StartLoc); + X86Operand *ParseIntelTypeOperator(SMLoc StartLoc); + X86Operand *ParseIntelMemOperand(unsigned SegReg, SMLoc StartLoc); X86Operand *ParseIntelBracExpression(unsigned SegReg, unsigned Size); X86Operand *ParseMemOperand(unsigned SegReg, SMLoc StartLoc); + bool ParseIntelDotOperator(const MCExpr *Disp, const MCExpr **NewDisp, + SmallString<64> &Err); + bool ParseDirectiveWord(unsigned Size, SMLoc L); bool ParseDirectiveCode(StringRef IDVal, SMLoc L); bool processInstruction(MCInst &Inst, const SmallVectorImpl &Ops); - bool MatchAndEmitInstruction(SMLoc IDLoc, + bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out); - - bool MatchInstruction(SMLoc IDLoc, - SmallVectorImpl &Operands, - SmallVectorImpl &MCInsts); + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm); /// isSrcOp - Returns true if operand is either (%rsi) or %ds:%(rsi) /// in 64bit mode or (%esi) or %es:(%esi) in 32bit mode. @@ -96,14 +103,15 @@ private: public: X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) - : MCTargetAsmParser(), STI(sti), Parser(parser) { + : MCTargetAsmParser(), STI(sti), Parser(parser), InstInfo(0) { // Initialize the set of available features. setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); } virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); - virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc, + virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, + SMLoc NameLoc, SmallVectorImpl &Operands); virtual bool ParseDirective(AsmToken DirectiveID); @@ -159,6 +167,7 @@ struct X86Operand : public MCParsedAsmOperand { } Kind; SMLoc StartLoc, EndLoc; + SMLoc OffsetOfLoc; union { struct { @@ -172,6 +181,7 @@ struct X86Operand : public MCParsedAsmOperand { struct { const MCExpr *Val; + bool NeedAsmRewrite; } Imm; struct { @@ -181,6 +191,7 @@ struct X86Operand : public MCParsedAsmOperand { unsigned IndexReg; unsigned Scale; unsigned Size; + bool NeedSizeDir; } Mem; }; @@ -191,8 +202,11 @@ struct X86Operand : public MCParsedAsmOperand { SMLoc getStartLoc() const { return StartLoc; } /// getEndLoc - Get the location of the last token of this operand. SMLoc getEndLoc() const { return EndLoc; } - + /// getLocRange - Get the range between the first and last token of this + /// operand. SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } + /// getOffsetOfLoc - Get the location of the offset operator. + SMLoc getOffsetOfLoc() const { return OffsetOfLoc; } virtual void print(raw_ostream &OS) const {} @@ -216,6 +230,11 @@ struct X86Operand : public MCParsedAsmOperand { return Imm.Val; } + bool needAsmRewrite() const { + assert(Kind == Immediate && "Invalid access!"); + return Imm.NeedAsmRewrite; + } + const MCExpr *getMemDisp() const { assert(Kind == Memory && "Invalid access!"); return Mem.Disp; @@ -312,6 +331,20 @@ struct X86Operand : public MCParsedAsmOperand { return isImmSExti64i32Value(CE->getValue()); } + unsigned getMemSize() const { + assert(Kind == Memory && "Invalid access!"); + return Mem.Size; + } + + bool isOffsetOf() const { + return OffsetOfLoc.getPointer(); + } + + bool needSizeDirective() const { + assert(Kind == Memory && "Invalid access!"); + return Mem.NeedSizeDir; + } + bool isMem() const { return Kind == Memory; } bool isMem8() const { return Kind == Memory && (!Mem.Size || Mem.Size == 8); @@ -437,21 +470,25 @@ struct X86Operand : public MCParsedAsmOperand { return Res; } - static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc) { + static X86Operand *CreateReg(unsigned RegNo, SMLoc StartLoc, SMLoc EndLoc, + SMLoc OffsetOfLoc = SMLoc()) { X86Operand *Res = new X86Operand(Register, StartLoc, EndLoc); Res->Reg.RegNo = RegNo; + Res->OffsetOfLoc = OffsetOfLoc; return Res; } - static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc){ + static X86Operand *CreateImm(const MCExpr *Val, SMLoc StartLoc, SMLoc EndLoc, + bool NeedRewrite = true){ X86Operand *Res = new X86Operand(Immediate, StartLoc, EndLoc); Res->Imm.Val = Val; + Res->Imm.NeedAsmRewrite = NeedRewrite; return Res; } /// Create an absolute memory operand. - static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, - SMLoc EndLoc, unsigned Size = 0) { + static X86Operand *CreateMem(const MCExpr *Disp, SMLoc StartLoc, SMLoc EndLoc, + unsigned Size = 0, bool NeedSizeDir = false){ X86Operand *Res = new X86Operand(Memory, StartLoc, EndLoc); Res->Mem.SegReg = 0; Res->Mem.Disp = Disp; @@ -459,6 +496,7 @@ struct X86Operand : public MCParsedAsmOperand { Res->Mem.IndexReg = 0; Res->Mem.Scale = 1; Res->Mem.Size = Size; + Res->Mem.NeedSizeDir = NeedSizeDir; return Res; } @@ -466,7 +504,7 @@ struct X86Operand : public MCParsedAsmOperand { static X86Operand *CreateMem(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, unsigned IndexReg, unsigned Scale, SMLoc StartLoc, SMLoc EndLoc, - unsigned Size = 0) { + unsigned Size = 0, bool NeedSizeDir = false) { // We should never just have a displacement, that should be parsed as an // absolute memory operand. assert((SegReg || BaseReg || IndexReg) && "Invalid memory operand!"); @@ -481,6 +519,7 @@ struct X86Operand : public MCParsedAsmOperand { Res->Mem.IndexReg = IndexReg; Res->Mem.Scale = Scale; Res->Mem.Size = Size; + Res->Mem.NeedSizeDir = NeedSizeDir; return Res; } }; @@ -510,12 +549,13 @@ bool X86AsmParser::isDstOp(X86Operand &Op) { bool X86AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { RegNo = 0; - if (!isParsingIntelSyntax()) { - const AsmToken &TokPercent = Parser.getTok(); - assert(TokPercent.is(AsmToken::Percent) && "Invalid token kind!"); - StartLoc = TokPercent.getLoc(); + const AsmToken &PercentTok = Parser.getTok(); + StartLoc = PercentTok.getLoc(); + + // If we encounter a %, ignore it. This code handles registers with and + // without the prefix, unprefixed registers can occur in cfi directives. + if (!isParsingIntelSyntax() && PercentTok.is(AsmToken::Percent)) Parser.Lex(); // Eat percent token. - } const AsmToken &Tok = Parser.getTok(); if (Tok.isNot(AsmToken::Identifier)) { @@ -621,23 +661,25 @@ X86Operand *X86AsmParser::ParseOperand() { /// getIntelMemOperandSize - Return intel memory operand size. static unsigned getIntelMemOperandSize(StringRef OpStr) { - unsigned Size = 0; - if (OpStr == "BYTE") Size = 8; - if (OpStr == "WORD") Size = 16; - if (OpStr == "DWORD") Size = 32; - if (OpStr == "QWORD") Size = 64; - if (OpStr == "XWORD") Size = 80; - if (OpStr == "XMMWORD") Size = 128; - if (OpStr == "YMMWORD") Size = 256; + unsigned Size = StringSwitch(OpStr) + .Cases("BYTE", "byte", 8) + .Cases("WORD", "word", 16) + .Cases("DWORD", "dword", 32) + .Cases("QWORD", "qword", 64) + .Cases("XWORD", "xword", 80) + .Cases("XMMWORD", "xmmword", 128) + .Cases("YMMWORD", "ymmword", 256) + .Default(0); return Size; } -X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, +X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, unsigned Size) { unsigned BaseReg = 0, IndexReg = 0, Scale = 1; - SMLoc Start = Parser.getTok().getLoc(), End; + const AsmToken &Tok = Parser.getTok(); + SMLoc Start = Tok.getLoc(), End; - const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); + const MCExpr *Disp = MCConstantExpr::Create(0, getContext()); // Parse [ BaseReg + Scale*IndexReg + Disp ] or [ symbol ] // Eat '[' @@ -653,15 +695,17 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, if (getLexer().isNot(AsmToken::RBrac)) return ErrorOperand(Start, "Expected ']' token!"); Parser.Lex(); + End = Tok.getLoc(); return X86Operand::CreateMem(Disp, Start, End, Size); } } else if (getLexer().is(AsmToken::Integer)) { - int64_t Val = Parser.getTok().getIntVal(); + int64_t Val = Tok.getIntVal(); Parser.Lex(); - SMLoc Loc = Parser.getTok().getLoc(); + SMLoc Loc = Tok.getLoc(); if (getLexer().is(AsmToken::RBrac)) { // Handle '[' number ']' Parser.Lex(); + End = Tok.getLoc(); const MCExpr *Disp = MCConstantExpr::Create(Val, getContext()); if (SegReg) return X86Operand::CreateMem(SegReg, Disp, 0, 0, Scale, @@ -670,7 +714,7 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, } else if (getLexer().is(AsmToken::Star)) { // Handle '[' Scale*IndexReg ']' Parser.Lex(); - SMLoc IdxRegLoc = Parser.getTok().getLoc(); + SMLoc IdxRegLoc = Tok.getLoc(); if (ParseRegister(IndexReg, IdxRegLoc, End)) return ErrorOperand(IdxRegLoc, "Expected register"); Scale = Val; @@ -678,16 +722,27 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, return ErrorOperand(Loc, "Unexpected token"); } - if (getLexer().is(AsmToken::Plus) || getLexer().is(AsmToken::Minus)) { - bool isPlus = getLexer().is(AsmToken::Plus); + // Parse ][ as a plus. + bool ExpectRBrac = true; + if (getLexer().is(AsmToken::RBrac)) { + ExpectRBrac = false; Parser.Lex(); - SMLoc PlusLoc = Parser.getTok().getLoc(); + End = Tok.getLoc(); + } + + if (getLexer().is(AsmToken::Plus) || getLexer().is(AsmToken::Minus) || + getLexer().is(AsmToken::LBrac)) { + ExpectRBrac = true; + bool isPlus = getLexer().is(AsmToken::Plus) || + getLexer().is(AsmToken::LBrac); + Parser.Lex(); + SMLoc PlusLoc = Tok.getLoc(); if (getLexer().is(AsmToken::Integer)) { - int64_t Val = Parser.getTok().getIntVal(); + int64_t Val = Tok.getIntVal(); Parser.Lex(); if (getLexer().is(AsmToken::Star)) { Parser.Lex(); - SMLoc IdxRegLoc = Parser.getTok().getLoc(); + SMLoc IdxRegLoc = Tok.getLoc(); if (ParseRegister(IndexReg, IdxRegLoc, End)) return ErrorOperand(IdxRegLoc, "Expected register"); Scale = Val; @@ -698,21 +753,48 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, return ErrorOperand(PlusLoc, "unexpected token after +"); } else if (getLexer().is(AsmToken::Identifier)) { // This could be an index register or a displacement expression. - End = Parser.getTok().getLoc(); + End = Tok.getLoc(); if (!IndexReg) ParseRegister(IndexReg, Start, End); else if (getParser().ParseExpression(Disp, End)) return 0; } } + + // Parse ][ as a plus. + if (getLexer().is(AsmToken::RBrac)) { + ExpectRBrac = false; + Parser.Lex(); + End = Tok.getLoc(); + if (getLexer().is(AsmToken::LBrac)) { + ExpectRBrac = true; + Parser.Lex(); + if (getParser().ParseExpression(Disp, End)) + return 0; + } + } else if (ExpectRBrac) { + if (getParser().ParseExpression(Disp, End)) + return 0; + } - if (getLexer().isNot(AsmToken::RBrac)) - if (getParser().ParseExpression(Disp, End)) return 0; + if (ExpectRBrac) { + if (getLexer().isNot(AsmToken::RBrac)) + return ErrorOperand(End, "expected ']' token!"); + Parser.Lex(); + End = Tok.getLoc(); + } - End = Parser.getTok().getLoc(); - if (getLexer().isNot(AsmToken::RBrac)) - return ErrorOperand(End, "expected ']' token!"); - Parser.Lex(); - End = Parser.getTok().getLoc(); + // Parse the dot operator (e.g., [ebx].foo.bar). + if (Tok.getString().startswith(".")) { + SmallString<64> Err; + const MCExpr *NewDisp; + if (ParseIntelDotOperator(Disp, &NewDisp, Err)) + return ErrorOperand(Tok.getLoc(), Err); + + Parser.Lex(); // Eat the field. + Disp = NewDisp; + } + + End = Tok.getLoc(); // handle [-42] if (!BaseReg && !IndexReg) @@ -723,15 +805,15 @@ X86Operand *X86AsmParser::ParseIntelBracExpression(unsigned SegReg, } /// ParseIntelMemOperand - Parse intel style memory operand. -X86Operand *X86AsmParser::ParseIntelMemOperand() { +X86Operand *X86AsmParser::ParseIntelMemOperand(unsigned SegReg, SMLoc Start) { const AsmToken &Tok = Parser.getTok(); - SMLoc Start = Parser.getTok().getLoc(), End; - unsigned SegReg = 0; + SMLoc End; unsigned Size = getIntelMemOperandSize(Tok.getString()); if (Size) { Parser.Lex(); - assert (Tok.getString() == "PTR" && "Unexpected token!"); + assert ((Tok.getString() == "PTR" || Tok.getString() == "ptr") && + "Unexpected token!"); Parser.Lex(); } @@ -750,12 +832,164 @@ X86Operand *X86AsmParser::ParseIntelMemOperand() { const MCExpr *Disp = MCConstantExpr::Create(0, getParser().getContext()); if (getParser().ParseExpression(Disp, End)) return 0; - return X86Operand::CreateMem(Disp, Start, End, Size); + End = Parser.getTok().getLoc(); + + bool NeedSizeDir = false; + if (!Size && isParsingInlineAsm()) { + if (const MCSymbolRefExpr *SymRef = dyn_cast(Disp)) { + const MCSymbol &Sym = SymRef->getSymbol(); + // FIXME: The SemaLookup will fail if the name is anything other then an + // identifier. + // FIXME: Pass a valid SMLoc. + SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Size); + NeedSizeDir = Size > 0; + } + } + if (!isParsingInlineAsm()) + return X86Operand::CreateMem(Disp, Start, End, Size); + else + // When parsing inline assembly we set the base register to a non-zero value + // as we don't know the actual value at this time. This is necessary to + // get the matching correct in some cases. + return X86Operand::CreateMem(/*SegReg*/0, Disp, /*BaseReg*/1, /*IndexReg*/0, + /*Scale*/1, Start, End, Size, NeedSizeDir); +} + +/// Parse the '.' operator. +bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp, + const MCExpr **NewDisp, + SmallString<64> &Err) { + AsmToken Tok = *&Parser.getTok(); + uint64_t OrigDispVal, DotDispVal; + + // FIXME: Handle non-constant expressions. + if (const MCConstantExpr *OrigDisp = dyn_cast(Disp)) { + OrigDispVal = OrigDisp->getValue(); + } else { + Err = "Non-constant offsets are not supported!"; + return true; + } + + // Drop the '.'. + StringRef DotDispStr = Tok.getString().drop_front(1); + + // .Imm gets lexed as a real. + if (Tok.is(AsmToken::Real)) { + APInt DotDisp; + DotDispStr.getAsInteger(10, DotDisp); + DotDispVal = DotDisp.getZExtValue(); + } else if (Tok.is(AsmToken::Identifier)) { + // We should only see an identifier when parsing the original inline asm. + // The front-end should rewrite this in terms of immediates. + assert (isParsingInlineAsm() && "Unexpected field name!"); + + unsigned DotDisp; + std::pair BaseMember = DotDispStr.split('.'); + if (SemaCallback->LookupInlineAsmField(BaseMember.first, BaseMember.second, + DotDisp)) { + Err = "Unable to lookup field reference!"; + return true; + } + DotDispVal = DotDisp; + } else { + Err = "Unexpected token type!"; + return true; + } + + if (isParsingInlineAsm() && Tok.is(AsmToken::Identifier)) { + SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data()); + unsigned Len = DotDispStr.size(); + unsigned Val = OrigDispVal + DotDispVal; + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len, + Val)); + } + + *NewDisp = MCConstantExpr::Create(OrigDispVal + DotDispVal, getContext()); + return false; +} + +/// Parse the 'offset' operator. This operator is used to specify the +/// location rather then the content of a variable. +X86Operand *X86AsmParser::ParseIntelOffsetOfOperator(SMLoc Start) { + SMLoc OffsetOfLoc = Start; + Parser.Lex(); // Eat offset. + Start = Parser.getTok().getLoc(); + assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); + + SMLoc End; + const MCExpr *Val; + if (getParser().ParseExpression(Val, End)) + return ErrorOperand(Start, "Unable to parse expression!"); + + End = Parser.getTok().getLoc(); + + // Don't emit the offset operator. + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7)); + + // The offset operator will have an 'r' constraint, thus we need to create + // register operand to ensure proper matching. Just pick a GPR based on + // the size of a pointer. + unsigned RegNo = is64BitMode() ? X86::RBX : X86::EBX; + return X86Operand::CreateReg(RegNo, Start, End, OffsetOfLoc); +} + +/// Parse the 'TYPE' operator. The TYPE operator returns the size of a C or +/// C++ type or variable. If the variable is an array, TYPE returns the size of +/// a single element of the array. +X86Operand *X86AsmParser::ParseIntelTypeOperator(SMLoc Start) { + SMLoc TypeLoc = Start; + Parser.Lex(); // Eat offset. + Start = Parser.getTok().getLoc(); + assert (Parser.getTok().is(AsmToken::Identifier) && "Expected an identifier"); + + SMLoc End; + const MCExpr *Val; + if (getParser().ParseExpression(Val, End)) + return 0; + + End = Parser.getTok().getLoc(); + + unsigned Size = 0; + if (const MCSymbolRefExpr *SymRef = dyn_cast(Val)) { + const MCSymbol &Sym = SymRef->getSymbol(); + // FIXME: The SemaLookup will fail if the name is anything other then an + // identifier. + // FIXME: Pass a valid SMLoc. + if (!SemaCallback->LookupInlineAsmIdentifier(Sym.getName(), NULL, Size)) + return ErrorOperand(Start, "Unable to lookup TYPE of expr!"); + + Size /= 8; // Size is in terms of bits, but we want bytes in the context. + } + + // Rewrite the type operator and the C or C++ type or variable in terms of an + // immediate. E.g. TYPE foo -> $$4 + unsigned Len = End.getPointer() - TypeLoc.getPointer(); + InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, Size)); + + const MCExpr *Imm = MCConstantExpr::Create(Size, getContext()); + return X86Operand::CreateImm(Imm, Start, End, /*NeedAsmRewrite*/false); } X86Operand *X86AsmParser::ParseIntelOperand() { SMLoc Start = Parser.getTok().getLoc(), End; + // offset operator. + StringRef AsmTokStr = Parser.getTok().getString(); + if ((AsmTokStr == "offset" || AsmTokStr == "OFFSET") && + isParsingInlineAsm()) + return ParseIntelOffsetOfOperator(Start); + + // Type directive. + if ((AsmTokStr == "type" || AsmTokStr == "TYPE") && + isParsingInlineAsm()) + return ParseIntelTypeOperator(Start); + + // Unsupported directives. + if (isParsingIntelSyntax() && + (AsmTokStr == "size" || AsmTokStr == "SIZE" || + AsmTokStr == "length" || AsmTokStr == "LENGTH")) + return ErrorOperand(Start, "Unsupported directive!"); + // immediate. if (getLexer().is(AsmToken::Integer) || getLexer().is(AsmToken::Real) || getLexer().is(AsmToken::Minus)) { @@ -769,12 +1003,17 @@ X86Operand *X86AsmParser::ParseIntelOperand() { // register unsigned RegNo = 0; if (!ParseRegister(RegNo, Start, End)) { - End = Parser.getTok().getLoc(); - return X86Operand::CreateReg(RegNo, Start, End); + // If this is a segment register followed by a ':', then this is the start + // of a memory reference, otherwise this is a normal register reference. + if (getLexer().isNot(AsmToken::Colon)) + return X86Operand::CreateReg(RegNo, Start, Parser.getTok().getLoc()); + + getParser().Lex(); // Eat the colon. + return ParseIntelMemOperand(RegNo, Start); } // mem operand - return ParseIntelMemOperand(); + return ParseIntelMemOperand(0, Start); } X86Operand *X86AsmParser::ParseATTOperand() { @@ -972,8 +1211,9 @@ X86Operand *X86AsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { } bool X86AsmParser:: -ParseInstruction(StringRef Name, SMLoc NameLoc, +ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, SmallVectorImpl &Operands) { + InstInfo = &Info; StringRef PatchedName = Name; // FIXME: Hack to recognize setneb as setne. @@ -1509,28 +1749,18 @@ processInstruction(MCInst &Inst, } bool X86AsmParser:: -MatchAndEmitInstruction(SMLoc IDLoc, +MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, SmallVectorImpl &Operands, - MCStreamer &Out) { - SmallVector Insts; - bool Error = MatchInstruction(IDLoc, Operands, Insts); - if (!Error) - for (unsigned i = 0, e = Insts.size(); i != e; ++i) - Out.EmitInstruction(Insts[i]); - return Error; -} - -bool X86AsmParser:: -MatchInstruction(SMLoc IDLoc, - SmallVectorImpl &Operands, - SmallVectorImpl &MCInsts) { + MCStreamer &Out, unsigned &ErrorInfo, + bool MatchingInlineAsm) { assert(!Operands.empty() && "Unexpect empty operand list!"); X86Operand *Op = static_cast(Operands[0]); assert(Op->isToken() && "Leading operand should always be a mnemonic!"); + ArrayRef EmptyRanges = ArrayRef(); // First, handle aliases that expand to multiple instructions. // FIXME: This should be replaced with a real .td file alias mechanism. - // Also, MatchInstructionImpl should do actually *do* the EmitInstruction + // Also, MatchInstructionImpl should actually *do* the EmitInstruction // call. if (Op->getToken() == "fstsw" || Op->getToken() == "fstcw" || Op->getToken() == "fstsww" || Op->getToken() == "fstcww" || @@ -1539,7 +1769,8 @@ MatchInstruction(SMLoc IDLoc, MCInst Inst; Inst.setOpcode(X86::WAIT); Inst.setLoc(IDLoc); - MCInsts.push_back(Inst); + if (!MatchingInlineAsm) + Out.EmitInstruction(Inst); const char *Repl = StringSwitch(Op->getToken()) @@ -1558,28 +1789,30 @@ MatchInstruction(SMLoc IDLoc, } bool WasOriginallyInvalidOperand = false; - unsigned OrigErrorInfo; MCInst Inst; // First, try a direct match. - switch (MatchInstructionImpl(Operands, Inst, OrigErrorInfo, + switch (MatchInstructionImpl(Operands, Inst, + ErrorInfo, MatchingInlineAsm, isParsingIntelSyntax())) { default: break; case Match_Success: // Some instructions need post-processing to, for example, tweak which // encoding is selected. Loop on it while changes happen so the // individual transformations can chain off each other. - while (processInstruction(Inst, Operands)) - ; + if (!MatchingInlineAsm) + while (processInstruction(Inst, Operands)) + ; Inst.setLoc(IDLoc); - MCInsts.push_back(Inst); + if (!MatchingInlineAsm) + Out.EmitInstruction(Inst); + Opcode = Inst.getOpcode(); return false; case Match_MissingFeature: - Error(IDLoc, "instruction requires a CPU feature not currently enabled"); + Error(IDLoc, "instruction requires a CPU feature not currently enabled", + EmptyRanges, MatchingInlineAsm); return true; - case Match_ConversionFail: - return Error(IDLoc, "unable to convert operands to instruction"); case Match_InvalidOperand: WasOriginallyInvalidOperand = true; break; @@ -1612,13 +1845,17 @@ MatchInstruction(SMLoc IDLoc, unsigned ErrorInfoIgnore; unsigned Match1, Match2, Match3, Match4; - Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore); + Match1 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, + isParsingIntelSyntax()); Tmp[Base.size()] = Suffixes[1]; - Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore); + Match2 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, + isParsingIntelSyntax()); Tmp[Base.size()] = Suffixes[2]; - Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore); + Match3 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, + isParsingIntelSyntax()); Tmp[Base.size()] = Suffixes[3]; - Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore); + Match4 = MatchInstructionImpl(Operands, Inst, ErrorInfoIgnore, + isParsingIntelSyntax()); // Restore the old token. Op->setTokenValue(Base); @@ -1631,7 +1868,9 @@ MatchInstruction(SMLoc IDLoc, (Match3 == Match_Success) + (Match4 == Match_Success); if (NumSuccessfulMatches == 1) { Inst.setLoc(IDLoc); - MCInsts.push_back(Inst); + if (!MatchingInlineAsm) + Out.EmitInstruction(Inst); + Opcode = Inst.getOpcode(); return false; } @@ -1658,7 +1897,7 @@ MatchInstruction(SMLoc IDLoc, OS << "'" << Base << MatchChars[i] << "'"; } OS << ")"; - Error(IDLoc, OS.str()); + Error(IDLoc, OS.str(), EmptyRanges, MatchingInlineAsm); return true; } @@ -1669,31 +1908,36 @@ MatchInstruction(SMLoc IDLoc, if ((Match1 == Match_MnemonicFail) && (Match2 == Match_MnemonicFail) && (Match3 == Match_MnemonicFail) && (Match4 == Match_MnemonicFail)) { if (!WasOriginallyInvalidOperand) { + ArrayRef Ranges = MatchingInlineAsm ? EmptyRanges : + Op->getLocRange(); return Error(IDLoc, "invalid instruction mnemonic '" + Base + "'", - Op->getLocRange()); + Ranges, MatchingInlineAsm); } // Recover location info for the operand if we know which was the problem. - if (OrigErrorInfo != ~0U) { - if (OrigErrorInfo >= Operands.size()) - return Error(IDLoc, "too few operands for instruction"); + if (ErrorInfo != ~0U) { + if (ErrorInfo >= Operands.size()) + return Error(IDLoc, "too few operands for instruction", + EmptyRanges, MatchingInlineAsm); - X86Operand *Operand = (X86Operand*)Operands[OrigErrorInfo]; + X86Operand *Operand = (X86Operand*)Operands[ErrorInfo]; if (Operand->getStartLoc().isValid()) { SMRange OperandRange = Operand->getLocRange(); return Error(Operand->getStartLoc(), "invalid operand for instruction", - OperandRange); + OperandRange, MatchingInlineAsm); } } - return Error(IDLoc, "invalid operand for instruction"); + return Error(IDLoc, "invalid operand for instruction", EmptyRanges, + MatchingInlineAsm); } // If one instruction matched with a missing feature, report this as a // missing feature. if ((Match1 == Match_MissingFeature) + (Match2 == Match_MissingFeature) + (Match3 == Match_MissingFeature) + (Match4 == Match_MissingFeature) == 1){ - Error(IDLoc, "instruction requires a CPU feature not currently enabled"); + Error(IDLoc, "instruction requires a CPU feature not currently enabled", + EmptyRanges, MatchingInlineAsm); return true; } @@ -1701,12 +1945,14 @@ MatchInstruction(SMLoc IDLoc, // operand failure. if ((Match1 == Match_InvalidOperand) + (Match2 == Match_InvalidOperand) + (Match3 == Match_InvalidOperand) + (Match4 == Match_InvalidOperand) == 1){ - Error(IDLoc, "invalid operand for instruction"); + Error(IDLoc, "invalid operand for instruction", EmptyRanges, + MatchingInlineAsm); return true; } // If all of these were an outright failure, report it in a useless way. - Error(IDLoc, "unknown use of instruction mnemonic without a size suffix"); + Error(IDLoc, "unknown use of instruction mnemonic without a size suffix", + EmptyRanges, MatchingInlineAsm); return true; } @@ -1717,7 +1963,10 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) { return ParseDirectiveWord(2, DirectiveID.getLoc()); else if (IDVal.startswith(".code")) return ParseDirectiveCode(IDVal, DirectiveID.getLoc()); - else if (IDVal.startswith(".intel_syntax")) { + else if (IDVal.startswith(".att_syntax")) { + getParser().setAssemblerDialect(0); + return false; + } else if (IDVal.startswith(".intel_syntax")) { getParser().setAssemblerDialect(1); if (getLexer().isNot(AsmToken::EndOfStatement)) { if(Parser.getTok().getString() == "noprefix") { diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index b886d46..f4d03a6 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -17,7 +17,6 @@ set(sources X86AsmPrinter.cpp X86COFFMachineModuleInfo.cpp X86CodeEmitter.cpp - X86ELFWriterInfo.cpp X86FastISel.cpp X86FloatingPoint.cpp X86FrameLowering.cpp diff --git a/lib/Target/X86/Disassembler/X86Disassembler.cpp b/lib/Target/X86/Disassembler/X86Disassembler.cpp index 5039887..f136927 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.cpp +++ b/lib/Target/X86/Disassembler/X86Disassembler.cpp @@ -44,7 +44,7 @@ void x86DisassemblerDebug(const char *file, dbgs() << file << ":" << line << ": " << s; } -const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii) { +const char *x86DisassemblerGetInstrName(unsigned Opcode, const void *mii) { const MCInstrInfo *MII = static_cast(mii); return MII->getName(Opcode); } @@ -95,8 +95,8 @@ const EDInstInfo *X86GenericDisassembler::getEDInfo() const { /// be a pointer to a MemoryObject. /// @param byte - A pointer to the byte to be read. /// @param address - The address to be read. -static int regionReader(void* arg, uint8_t* byte, uint64_t address) { - MemoryObject* region = static_cast(arg); +static int regionReader(const void* arg, uint8_t* byte, uint64_t address) { + const MemoryObject* region = static_cast(arg); return region->readByte(address, byte); } @@ -135,10 +135,10 @@ X86GenericDisassembler::getInstruction(MCInst &instr, int ret = decodeInstruction(&internalInstr, regionReader, - (void*)®ion, + (const void*)®ion, loggerFn, (void*)&vStream, - (void*)MII, + (const void*)MII, address, fMode); @@ -379,6 +379,8 @@ static void translateImmediate(MCInst &mcInst, uint64_t immediate, } switch (type) { + case TYPE_XMM32: + case TYPE_XMM64: case TYPE_XMM128: mcInst.addOperand(MCOperand::CreateReg(X86::XMM0 + (immediate >> 4))); return; diff --git a/lib/Target/X86/Disassembler/X86Disassembler.h b/lib/Target/X86/Disassembler/X86Disassembler.h index 0dbfa26..981701f 100644 --- a/lib/Target/X86/Disassembler/X86Disassembler.h +++ b/lib/Target/X86/Disassembler/X86Disassembler.h @@ -78,7 +78,7 @@ uint16_t operands; #define INSTRUCTION_IDS \ - unsigned instructionIDs; + uint16_t instructionIDs; #include "X86DisassemblerDecoderCommon.h" diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c index 0c92912..85d8a99 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.c @@ -138,6 +138,10 @@ static InstrUID decode(OpcodeType type, if (modFromModRM(modRM) == 0x3) return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)+8]; return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)]; + case MODRM_SPLITMISC: + if (modFromModRM(modRM) == 0x3) + return modRMTable[dec->instructionIDs+(modRM & 0x3f)+8]; + return modRMTable[dec->instructionIDs+((modRM & 0x38) >> 3)]; case MODRM_FULL: return modRMTable[dec->instructionIDs+modRM]; } @@ -200,7 +204,7 @@ static void unconsumeByte(struct InternalInstruction* insn) { insn->readerCursor + offset); \ if (ret) \ return ret; \ - combined = combined | ((type)byte << ((type)offset * 8)); \ + combined = combined | ((uint64_t)byte << (offset * 8)); \ } \ *ptr = combined; \ insn->readerCursor += sizeof(type); \ @@ -690,7 +694,7 @@ static int getIDWithAttrMask(uint16_t* instructionID, * @param orig - The instruction that is not 16-bit * @param equiv - The instruction that is 16-bit */ -static BOOL is16BitEquvalent(const char* orig, const char* equiv) { +static BOOL is16BitEquivalent(const char* orig, const char* equiv) { off_t i; for (i = 0;; i++) { @@ -719,7 +723,7 @@ static BOOL is16BitEquvalent(const char* orig, const char* equiv) { * @return - 0 if the ModR/M could be read when needed or was not needed; * nonzero otherwise. */ -static int getID(struct InternalInstruction* insn, void *miiArg) { +static int getID(struct InternalInstruction* insn, const void *miiArg) { uint8_t attrMask; uint16_t instructionID; @@ -856,7 +860,7 @@ static int getID(struct InternalInstruction* insn, void *miiArg) { specWithOpSizeName = x86DisassemblerGetInstrName(instructionIDWithOpsize, miiArg); - if (is16BitEquvalent(specName, specWithOpSizeName)) { + if (is16BitEquivalent(specName, specWithOpSizeName)) { insn->instructionID = instructionIDWithOpsize; insn->spec = specifierForUID(instructionIDWithOpsize); } else { @@ -1621,10 +1625,10 @@ static int readOperands(struct InternalInstruction* insn) { */ int decodeInstruction(struct InternalInstruction* insn, byteReader_t reader, - void* readerArg, + const void* readerArg, dlog_t logger, void* loggerArg, - void* miiArg, + const void* miiArg, uint64_t startLoc, DisassemblerMode mode) { memset(insn, 0, sizeof(struct InternalInstruction)); diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index 797703f..407ead3 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -24,7 +24,7 @@ extern "C" { uint16_t operands; #define INSTRUCTION_IDS \ - unsigned instructionIDs; + uint16_t instructionIDs; #include "X86DisassemblerDecoderCommon.h" @@ -403,7 +403,7 @@ typedef uint8_t BOOL; * be read from. * @return - -1 if the byte cannot be read for any reason; 0 otherwise. */ -typedef int (*byteReader_t)(void* arg, uint8_t* byte, uint64_t address); +typedef int (*byteReader_t)(const void* arg, uint8_t* byte, uint64_t address); /* * dlog_t - Type for the logging function that the consumer can provide to @@ -422,7 +422,7 @@ struct InternalInstruction { /* Reader interface (C) */ byteReader_t reader; /* Opaque value passed to the reader */ - void* readerArg; + const void* readerArg; /* The address of the next byte to read via the reader */ uint64_t readerCursor; @@ -561,10 +561,10 @@ struct InternalInstruction { */ int decodeInstruction(struct InternalInstruction* insn, byteReader_t reader, - void* readerArg, + const void* readerArg, dlog_t logger, void* loggerArg, - void* miiArg, + const void* miiArg, uint64_t startLoc, DisassemblerMode mode); @@ -579,7 +579,7 @@ void x86DisassemblerDebug(const char *file, unsigned line, const char *s); -const char *x86DisassemblerGetInstrName(unsigned Opcode, void *mii); +const char *x86DisassemblerGetInstrName(unsigned Opcode, const void *mii); #ifdef __cplusplus } diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h index b0a0e1e..23dfe4b 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoderCommon.h @@ -160,6 +160,10 @@ typedef uint16_t InstrUID; * MODRM_SPLITRM - If the ModR/M byte is between 0x00 and 0xbf, the opcode * corresponds to one instruction; otherwise, it corresponds to * a different instruction. + * MODRM_SPLITMISC- If the ModR/M byte is between 0x00 and 0xbf, ModR/M byte + * divided by 8 is used to select instruction; otherwise, each + * value of the ModR/M byte could correspond to a different + * instruction. * MODRM_SPLITREG - ModR/M byte divided by 8 is used to select instruction. This corresponds to instructions that use reg field as opcode * MODRM_FULL - Potentially, each value of the ModR/M byte could correspond @@ -169,6 +173,7 @@ typedef uint16_t InstrUID; #define MODRMTYPES \ ENUM_ENTRY(MODRM_ONEENTRY) \ ENUM_ENTRY(MODRM_SPLITRM) \ + ENUM_ENTRY(MODRM_SPLITMISC) \ ENUM_ENTRY(MODRM_SPLITREG) \ ENUM_ENTRY(MODRM_FULL) diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp index 5118e4c..a4bd114 100644 --- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.cpp @@ -15,6 +15,7 @@ #define DEBUG_TYPE "asm-printer" #include "X86ATTInstPrinter.h" #include "X86InstComments.h" +#include "MCTargetDesc/X86BaseInfo.h" #include "MCTargetDesc/X86MCTargetDesc.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCAsmInfo.h" @@ -33,11 +34,19 @@ using namespace llvm; void X86ATTInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { - OS << '%' << getRegisterName(RegNo); + OS << markup(""); } void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) { + const MCInstrDesc &Desc = MII.get(MI->getOpcode()); + uint64_t TSFlags = Desc.TSFlags; + + if (TSFlags & X86II::LOCK) + OS << "\tlock\n"; + // Try to print any aliases first. if (!printAliasInstr(MI, OS)) printInstruction(MI, OS); @@ -52,7 +61,8 @@ void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O) { - switch (MI->getOperand(Op).getImm()) { + int64_t Imm = MI->getOperand(Op).getImm() & 0xf; + switch (Imm) { default: llvm_unreachable("Invalid ssecc argument!"); case 0: O << "eq"; break; case 1: O << "lt"; break; @@ -70,6 +80,30 @@ void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op, case 0xd: O << "ge"; break; case 0xe: O << "gt"; break; case 0xf: O << "true"; break; + } +} + +void X86ATTInstPrinter::printAVXCC(const MCInst *MI, unsigned Op, + raw_ostream &O) { + int64_t Imm = MI->getOperand(Op).getImm() & 0x1f; + switch (Imm) { + default: llvm_unreachable("Invalid avxcc argument!"); + case 0: O << "eq"; break; + case 1: O << "lt"; break; + case 2: O << "le"; break; + case 3: O << "unord"; break; + case 4: O << "neq"; break; + case 5: O << "nlt"; break; + case 6: O << "nle"; break; + case 7: O << "ord"; break; + case 8: O << "eq_uq"; break; + case 9: O << "nge"; break; + case 0xa: O << "ngt"; break; + case 0xb: O << "false"; break; + case 0xc: O << "neq_oq"; break; + case 0xd: O << "ge"; break; + case 0xe: O << "gt"; break; + case 0xf: O << "true"; break; case 0x10: O << "eq_os"; break; case 0x11: O << "lt_oq"; break; case 0x12: O << "le_oq"; break; @@ -89,12 +123,12 @@ void X86ATTInstPrinter::printSSECC(const MCInst *MI, unsigned Op, } } -/// print_pcrel_imm - This is used to print an immediate value that ends up +/// printPCRelImm - This is used to print an immediate value that ends up /// being encoded as a pc-relative value (e.g. for jumps and calls). These /// print slightly differently than normal immediates. For example, a $ is not /// emitted. -void X86ATTInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { +void X86ATTInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isImm()) O << Op.getImm(); @@ -119,17 +153,21 @@ void X86ATTInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isReg()) { - O << '%' << getRegisterName(Op.getReg()); + printRegName(O, Op.getReg()); } else if (Op.isImm()) { // Print X86 immediates as signed values. - O << '$' << (int64_t)Op.getImm(); + O << markup(""); if (CommentStream && (Op.getImm() > 255 || Op.getImm() < -256)) *CommentStream << format("imm = 0x%" PRIX64 "\n", (uint64_t)Op.getImm()); } else { assert(Op.isExpr() && "unknown operand kind in printOperand"); - O << '$' << *Op.getExpr(); + O << markup(""); } } @@ -140,6 +178,8 @@ void X86ATTInstPrinter::printMemReference(const MCInst *MI, unsigned Op, const MCOperand &DispSpec = MI->getOperand(Op+3); const MCOperand &SegReg = MI->getOperand(Op+4); + O << markup("getOperand(Op+1).getImm(); - if (ScaleVal != 1) - O << ',' << ScaleVal; + if (ScaleVal != 1) { + O << ',' + << markup(""); + } } O << ')'; } + + O << markup(">"); } diff --git a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h index 2e00bff..8e09183 100644 --- a/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h +++ b/lib/Target/X86/InstPrinter/X86ATTInstPrinter.h @@ -40,7 +40,8 @@ public: void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &OS); void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &OS); void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &OS); - void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &OS); + void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS); + void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &OS); void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp index 4ea662c..d67aec7 100644 --- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp +++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.cpp @@ -1,4 +1,4 @@ -//===-- X86IntelInstPrinter.cpp - AT&T assembly instruction printing ------===// +//===-- X86IntelInstPrinter.cpp - Intel assembly instruction printing -----===// // // The LLVM Compiler Infrastructure // @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file includes code for rendering MCInst instances as AT&T-style +// This file includes code for rendering MCInst instances as Intel-style // assembly. // //===----------------------------------------------------------------------===// @@ -15,6 +15,7 @@ #define DEBUG_TYPE "asm-printer" #include "X86IntelInstPrinter.h" #include "X86InstComments.h" +#include "MCTargetDesc/X86BaseInfo.h" #include "MCTargetDesc/X86MCTargetDesc.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCExpr.h" @@ -32,6 +33,12 @@ void X86IntelInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) { + const MCInstrDesc &Desc = MII.get(MI->getOpcode()); + uint64_t TSFlags = Desc.TSFlags; + + if (TSFlags & X86II::LOCK) + OS << "\tlock\n"; + printInstruction(MI, OS); // Next always print the annotation. @@ -44,7 +51,8 @@ void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS, void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O) { - switch (MI->getOperand(Op).getImm()) { + int64_t Imm = MI->getOperand(Op).getImm() & 0xf; + switch (Imm) { default: llvm_unreachable("Invalid ssecc argument!"); case 0: O << "eq"; break; case 1: O << "lt"; break; @@ -62,6 +70,30 @@ void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op, case 0xd: O << "ge"; break; case 0xe: O << "gt"; break; case 0xf: O << "true"; break; + } +} + +void X86IntelInstPrinter::printAVXCC(const MCInst *MI, unsigned Op, + raw_ostream &O) { + int64_t Imm = MI->getOperand(Op).getImm() & 0x1f; + switch (Imm) { + default: llvm_unreachable("Invalid avxcc argument!"); + case 0: O << "eq"; break; + case 1: O << "lt"; break; + case 2: O << "le"; break; + case 3: O << "unord"; break; + case 4: O << "neq"; break; + case 5: O << "nlt"; break; + case 6: O << "nle"; break; + case 7: O << "ord"; break; + case 8: O << "eq_uq"; break; + case 9: O << "nge"; break; + case 0xa: O << "ngt"; break; + case 0xb: O << "false"; break; + case 0xc: O << "neq_oq"; break; + case 0xd: O << "ge"; break; + case 0xe: O << "gt"; break; + case 0xf: O << "true"; break; case 0x10: O << "eq_os"; break; case 0x11: O << "lt_oq"; break; case 0x12: O << "le_oq"; break; @@ -78,14 +110,13 @@ void X86IntelInstPrinter::printSSECC(const MCInst *MI, unsigned Op, case 0x1d: O << "ge_oq"; break; case 0x1e: O << "gt_oq"; break; case 0x1f: O << "true_us"; break; - } } -/// print_pcrel_imm - This is used to print an immediate value that ends up +/// printPCRelImm - This is used to print an immediate value that ends up /// being encoded as a pc-relative value. -void X86IntelInstPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo, - raw_ostream &O) { +void X86IntelInstPrinter::printPCRelImm(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { const MCOperand &Op = MI->getOperand(OpNo); if (Op.isImm()) O << Op.getImm(); @@ -153,8 +184,7 @@ void X86IntelInstPrinter::printMemReference(const MCInst *MI, unsigned Op, printOperand(MI, Op+2, O); NeedPlus = true; } - - + if (!DispSpec.isImm()) { if (NeedPlus) O << " + "; assert(DispSpec.isExpr() && "non-immediate displacement for LEA?"); diff --git a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h index 4f5938d..bb769eb 100644 --- a/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h +++ b/lib/Target/X86/InstPrinter/X86IntelInstPrinter.h @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This class prints an X86 MCInst to intel style .s file syntax. +// This class prints an X86 MCInst to Intel style .s file syntax. // //===----------------------------------------------------------------------===// @@ -37,7 +37,8 @@ public: void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printMemReference(const MCInst *MI, unsigned Op, raw_ostream &O); void printSSECC(const MCInst *MI, unsigned Op, raw_ostream &O); - void print_pcrel_imm(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printAVXCC(const MCInst *MI, unsigned Op, raw_ostream &O); + void printPCRelImm(const MCInst *MI, unsigned OpNo, raw_ostream &O); void printopaquemem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { O << "OPAQUE PTR "; diff --git a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index 32e40fe..467edad 100644 --- a/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -66,9 +66,10 @@ public: }; class X86AsmBackend : public MCAsmBackend { + StringRef CPU; public: - X86AsmBackend(const Target &T) - : MCAsmBackend() {} + X86AsmBackend(const Target &T, StringRef _CPU) + : MCAsmBackend(), CPU(_CPU) {} unsigned getNumFixupKinds() const { return X86::NumTargetFixupKinds; @@ -278,9 +279,9 @@ void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { Res.setOpcode(RelaxedOp); } -/// writeNopData - Write optimal nops to the output file for the \arg Count +/// writeNopData - Write optimal nops to the output file for the \p Count /// bytes. This returns the number of bytes written. It may return 0 if -/// the \arg Count is more than the maximum optimal nops. +/// the \p Count is more than the maximum optimal nops. bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { static const uint8_t Nops[10][10] = { // nop @@ -305,6 +306,15 @@ bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, }; + // This CPU doesnt support long nops. If needed add more. + // FIXME: Can we get this from the subtarget somehow? + if (CPU == "generic" || CPU == "i386" || CPU == "i486" || CPU == "i586" || + CPU == "pentium" || CPU == "pentium-mmx" || CPU == "geode") { + for (uint64_t i = 0; i < Count; ++i) + OW->Write8(0x90); + return true; + } + // Write an optimal sequence for the first 15 bytes. const uint64_t OptimalCount = (Count < 16) ? Count : 15; const uint64_t Prefixes = OptimalCount <= 10 ? 0 : OptimalCount - 10; @@ -327,8 +337,8 @@ namespace { class ELFX86AsmBackend : public X86AsmBackend { public: uint8_t OSABI; - ELFX86AsmBackend(const Target &T, uint8_t _OSABI) - : X86AsmBackend(T), OSABI(_OSABI) { + ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) + : X86AsmBackend(T, CPU), OSABI(_OSABI) { HasReliableSymbolDifference = true; } @@ -340,21 +350,21 @@ public: class ELFX86_32AsmBackend : public ELFX86AsmBackend { public: - ELFX86_32AsmBackend(const Target &T, uint8_t OSABI) - : ELFX86AsmBackend(T, OSABI) {} + ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) + : ELFX86AsmBackend(T, OSABI, CPU) {} MCObjectWriter *createObjectWriter(raw_ostream &OS) const { - return createX86ELFObjectWriter(OS, /*Is64Bit*/ false, OSABI); + return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386); } }; class ELFX86_64AsmBackend : public ELFX86AsmBackend { public: - ELFX86_64AsmBackend(const Target &T, uint8_t OSABI) - : ELFX86AsmBackend(T, OSABI) {} + ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) + : ELFX86AsmBackend(T, OSABI, CPU) {} MCObjectWriter *createObjectWriter(raw_ostream &OS) const { - return createX86ELFObjectWriter(OS, /*Is64Bit*/ true, OSABI); + return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64); } }; @@ -362,8 +372,8 @@ class WindowsX86AsmBackend : public X86AsmBackend { bool Is64Bit; public: - WindowsX86AsmBackend(const Target &T, bool is64Bit) - : X86AsmBackend(T) + WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU) + : X86AsmBackend(T, CPU) , Is64Bit(is64Bit) { } @@ -374,14 +384,14 @@ public: class DarwinX86AsmBackend : public X86AsmBackend { public: - DarwinX86AsmBackend(const Target &T) - : X86AsmBackend(T) { } + DarwinX86AsmBackend(const Target &T, StringRef CPU) + : X86AsmBackend(T, CPU) { } }; class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { public: - DarwinX86_32AsmBackend(const Target &T) - : DarwinX86AsmBackend(T) {} + DarwinX86_32AsmBackend(const Target &T, StringRef CPU) + : DarwinX86AsmBackend(T, CPU) {} MCObjectWriter *createObjectWriter(raw_ostream &OS) const { return createX86MachObjectWriter(OS, /*Is64Bit=*/false, @@ -392,8 +402,8 @@ public: class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { public: - DarwinX86_64AsmBackend(const Target &T) - : DarwinX86AsmBackend(T) { + DarwinX86_64AsmBackend(const Target &T, StringRef CPU) + : DarwinX86AsmBackend(T, CPU) { HasReliableSymbolDifference = true; } @@ -439,28 +449,28 @@ public: } // end anonymous namespace -MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT, StringRef CPU) { Triple TheTriple(TT); if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) - return new DarwinX86_32AsmBackend(T); + return new DarwinX86_32AsmBackend(T, CPU); - if (TheTriple.isOSWindows()) - return new WindowsX86AsmBackend(T, false); + if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) + return new WindowsX86AsmBackend(T, false, CPU); uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); - return new ELFX86_32AsmBackend(T, OSABI); + return new ELFX86_32AsmBackend(T, OSABI, CPU); } -MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT) { +MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT, StringRef CPU) { Triple TheTriple(TT); if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) - return new DarwinX86_64AsmBackend(T); + return new DarwinX86_64AsmBackend(T, CPU); - if (TheTriple.isOSWindows()) - return new WindowsX86AsmBackend(T, true); + if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) + return new WindowsX86AsmBackend(T, true, CPU); uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); - return new ELFX86_64AsmBackend(T, OSABI); + return new ELFX86_64AsmBackend(T, OSABI, CPU); } diff --git a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index db597fb..7ea1961 100644 --- a/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -276,9 +276,9 @@ namespace X86II { MRM_C1 = 33, MRM_C2 = 34, MRM_C3 = 35, MRM_C4 = 36, MRM_C8 = 37, MRM_C9 = 38, MRM_E8 = 39, MRM_F0 = 40, MRM_F8 = 41, MRM_F9 = 42, MRM_D0 = 45, MRM_D1 = 46, - MRM_D4 = 47, MRM_D8 = 48, MRM_D9 = 49, MRM_DA = 50, - MRM_DB = 51, MRM_DC = 52, MRM_DD = 53, MRM_DE = 54, - MRM_DF = 55, + MRM_D4 = 47, MRM_D5 = 48, MRM_D8 = 49, MRM_D9 = 50, + MRM_DA = 51, MRM_DB = 52, MRM_DC = 53, MRM_DD = 54, + MRM_DE = 55, MRM_DF = 56, /// RawFrmImm8 - This is used for the ENTER instruction, which has two /// immediates, the first of which is a 16-bit immediate (specified by @@ -580,11 +580,11 @@ namespace X86II { case X86II::MRM_E8: case X86II::MRM_F0: case X86II::MRM_F8: case X86II::MRM_F9: case X86II::MRM_D0: case X86II::MRM_D1: - case X86II::MRM_D4: case X86II::MRM_D8: - case X86II::MRM_D9: case X86II::MRM_DA: - case X86II::MRM_DB: case X86II::MRM_DC: - case X86II::MRM_DD: case X86II::MRM_DE: - case X86II::MRM_DF: + case X86II::MRM_D4: case X86II::MRM_D5: + case X86II::MRM_D8: case X86II::MRM_D9: + case X86II::MRM_DA: case X86II::MRM_DB: + case X86II::MRM_DC: case X86II::MRM_DD: + case X86II::MRM_DE: case X86II::MRM_DF: return -1; } } diff --git a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp index 5a42a80..de80dd8 100644 --- a/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp @@ -20,7 +20,7 @@ using namespace llvm; namespace { class X86ELFObjectWriter : public MCELFObjectTargetWriter { public: - X86ELFObjectWriter(bool is64Bit, uint8_t OSABI); + X86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine); virtual ~X86ELFObjectWriter(); protected: @@ -30,10 +30,11 @@ namespace { }; } -X86ELFObjectWriter::X86ELFObjectWriter(bool Is64Bit, uint8_t OSABI) - : MCELFObjectTargetWriter(Is64Bit, OSABI, - Is64Bit ? ELF::EM_X86_64 : ELF::EM_386, - /*HasRelocationAddend*/ Is64Bit) {} +X86ELFObjectWriter::X86ELFObjectWriter(bool IsELF64, uint8_t OSABI, + uint16_t EMachine) + : MCELFObjectTargetWriter(IsELF64, OSABI, EMachine, + // Only i386 uses Rel instead of RelA. + /*HasRelocationAddend*/ EMachine != ELF::EM_386) {} X86ELFObjectWriter::~X86ELFObjectWriter() {} @@ -48,7 +49,7 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target, MCSymbolRefExpr::VariantKind Modifier = Target.isAbsolute() ? MCSymbolRefExpr::VK_None : Target.getSymA()->getKind(); unsigned Type; - if (is64Bit()) { + if (getEMachine() == ELF::EM_X86_64) { if (IsPCRel) { switch ((unsigned)Fixup.getKind()) { default: llvm_unreachable("invalid fixup kind!"); @@ -130,7 +131,7 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target, case FK_Data_1: Type = ELF::R_X86_64_8; break; } } - } else { + } else if (getEMachine() == ELF::EM_386) { if (IsPCRel) { switch ((unsigned)Fixup.getKind()) { default: llvm_unreachable("invalid fixup kind!"); @@ -210,15 +211,17 @@ unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target, case FK_Data_1: Type = ELF::R_386_8; break; } } - } + } else + llvm_unreachable("Unsupported ELF machine type."); return Type; } MCObjectWriter *llvm::createX86ELFObjectWriter(raw_ostream &OS, - bool Is64Bit, - uint8_t OSABI) { + bool IsELF64, + uint8_t OSABI, + uint16_t EMachine) { MCELFObjectTargetWriter *MOTW = - new X86ELFObjectWriter(Is64Bit, OSABI); + new X86ELFObjectWriter(IsELF64, OSABI, EMachine); return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true); } diff --git a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp index b0acd7d..16488eb 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp @@ -34,6 +34,10 @@ AsmWriterFlavor("x86-asm-syntax", cl::init(ATT), clEnumValN(Intel, "intel", "Emit Intel-style assembly"), clEnumValEnd)); +static cl::opt +MarkedJTDataRegions("mark-data-regions", cl::init(false), + cl::desc("Mark code section jump table data regions."), + cl::Hidden); void X86MCAsmInfoDarwin::anchor() { } @@ -59,6 +63,7 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &T) { SupportsDebugInformation = true; DwarfUsesInlineInfoSection = true; + UseDataRegionDirectives = MarkedJTDataRegions; // Exceptions handling ExceptionsType = ExceptionHandling::DwarfCFI; diff --git a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp index 4a38324..122204a 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp @@ -16,6 +16,7 @@ #include "MCTargetDesc/X86BaseInfo.h" #include "MCTargetDesc/X86FixupKinds.h" #include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCInstrInfo.h" @@ -28,8 +29,8 @@ using namespace llvm; namespace { class X86MCCodeEmitter : public MCCodeEmitter { - X86MCCodeEmitter(const X86MCCodeEmitter &); // DO NOT IMPLEMENT - void operator=(const X86MCCodeEmitter &); // DO NOT IMPLEMENT + X86MCCodeEmitter(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const X86MCCodeEmitter &) LLVM_DELETED_FUNCTION; const MCInstrInfo &MCII; const MCSubtargetInfo &STI; MCContext &Ctx; @@ -51,8 +52,8 @@ public: return (STI.getFeatureBits() & X86::Mode64Bit) == 0; } - static unsigned GetX86RegNum(const MCOperand &MO) { - return X86_MC::getX86RegNum(MO.getReg()); + unsigned GetX86RegNum(const MCOperand &MO) const { + return Ctx.getRegisterInfo().getEncodingValue(MO.getReg()) & 0x7; } // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range @@ -64,8 +65,8 @@ public: // VEX.VVVV => XMM9 => ~9 // // See table 4-35 of Intel AVX Programming Reference for details. - static unsigned char getVEXRegisterEncoding(const MCInst &MI, - unsigned OpNum) { + unsigned char getVEXRegisterEncoding(const MCInst &MI, + unsigned OpNum) const { unsigned SrcReg = MI.getOperand(OpNum).getReg(); unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum)); if (X86II::isX86_64ExtendedReg(SrcReg)) @@ -560,15 +561,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, } - // Set the vector length to 256-bit if YMM0-YMM15 is used - for (unsigned i = 0; i != MI.getNumOperands(); ++i) { - if (!MI.getOperand(i).isReg()) - continue; - unsigned SrcReg = MI.getOperand(i).getReg(); - if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15) - VEX_L = 1; - } - // Classify VEX_B, VEX_4V, VEX_R, VEX_X unsigned NumOps = Desc.getNumOperands(); unsigned CurOp = 0; @@ -1129,13 +1121,13 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8: case X86II::MRM_C9: case X86II::MRM_D0: case X86II::MRM_D1: - case X86II::MRM_D4: case X86II::MRM_D8: - case X86II::MRM_D9: case X86II::MRM_DA: - case X86II::MRM_DB: case X86II::MRM_DC: - case X86II::MRM_DD: case X86II::MRM_DE: - case X86II::MRM_DF: case X86II::MRM_E8: - case X86II::MRM_F0: case X86II::MRM_F8: - case X86II::MRM_F9: + case X86II::MRM_D4: case X86II::MRM_D5: + case X86II::MRM_D8: case X86II::MRM_D9: + case X86II::MRM_DA: case X86II::MRM_DB: + case X86II::MRM_DC: case X86II::MRM_DD: + case X86II::MRM_DE: case X86II::MRM_DF: + case X86II::MRM_E8: case X86II::MRM_F0: + case X86II::MRM_F8: case X86II::MRM_F9: EmitByte(BaseOpcode, CurByte, OS); unsigned char MRM; @@ -1150,6 +1142,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, case X86II::MRM_D0: MRM = 0xD0; break; case X86II::MRM_D1: MRM = 0xD1; break; case X86II::MRM_D4: MRM = 0xD4; break; + case X86II::MRM_D5: MRM = 0xD5; break; case X86II::MRM_D8: MRM = 0xD8; break; case X86II::MRM_D9: MRM = 0xD9; break; case X86II::MRM_DA: MRM = 0xDA; break; diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp index 3482363..287c9f1 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -209,117 +209,10 @@ unsigned X86_MC::getDwarfRegFlavour(StringRef TT, bool isEH) { return DWARFFlavour::X86_32_Generic; } -/// getX86RegNum - This function maps LLVM register identifiers to their X86 -/// specific numbering, which is used in various places encoding instructions. -unsigned X86_MC::getX86RegNum(unsigned RegNo) { - switch(RegNo) { - case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX; - case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX; - case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX; - case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX; - case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH: - return N86::ESP; - case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH: - return N86::EBP; - case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH: - return N86::ESI; - case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH: - return N86::EDI; - - case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: - return N86::EAX; - case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: - return N86::ECX; - case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: - return N86::EDX; - case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: - return N86::EBX; - case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: - return N86::ESP; - case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: - return N86::EBP; - case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: - return N86::ESI; - case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: - return N86::EDI; - - case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3: - case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7: - return RegNo-X86::ST0; - - case X86::XMM0: case X86::XMM8: - case X86::YMM0: case X86::YMM8: case X86::MM0: - return 0; - case X86::XMM1: case X86::XMM9: - case X86::YMM1: case X86::YMM9: case X86::MM1: - return 1; - case X86::XMM2: case X86::XMM10: - case X86::YMM2: case X86::YMM10: case X86::MM2: - return 2; - case X86::XMM3: case X86::XMM11: - case X86::YMM3: case X86::YMM11: case X86::MM3: - return 3; - case X86::XMM4: case X86::XMM12: - case X86::YMM4: case X86::YMM12: case X86::MM4: - return 4; - case X86::XMM5: case X86::XMM13: - case X86::YMM5: case X86::YMM13: case X86::MM5: - return 5; - case X86::XMM6: case X86::XMM14: - case X86::YMM6: case X86::YMM14: case X86::MM6: - return 6; - case X86::XMM7: case X86::XMM15: - case X86::YMM7: case X86::YMM15: case X86::MM7: - return 7; - - case X86::ES: return 0; - case X86::CS: return 1; - case X86::SS: return 2; - case X86::DS: return 3; - case X86::FS: return 4; - case X86::GS: return 5; - - case X86::CR0: case X86::CR8 : case X86::DR0: return 0; - case X86::CR1: case X86::CR9 : case X86::DR1: return 1; - case X86::CR2: case X86::CR10: case X86::DR2: return 2; - case X86::CR3: case X86::CR11: case X86::DR3: return 3; - case X86::CR4: case X86::CR12: case X86::DR4: return 4; - case X86::CR5: case X86::CR13: case X86::DR5: return 5; - case X86::CR6: case X86::CR14: case X86::DR6: return 6; - case X86::CR7: case X86::CR15: case X86::DR7: return 7; - - // Pseudo index registers are equivalent to a "none" - // scaled index (See Intel Manual 2A, table 2-3) - case X86::EIZ: - case X86::RIZ: - return 4; - - default: - assert((int(RegNo) > 0) && "Unknown physical register!"); - return 0; - } -} - void X86_MC::InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI) { // FIXME: TableGen these. for (unsigned Reg = X86::NoRegister+1; Reg < X86::NUM_TARGET_REGS; ++Reg) { - int SEH = X86_MC::getX86RegNum(Reg); - switch (Reg) { - case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: - case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: - case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: - case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: - case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: - case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: - case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: - case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: - case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: - case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: - case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11: - case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15: - SEH += 8; - break; - } + unsigned SEH = MRI->getEncodingValue(Reg); MRI->mapLLVMRegToSEHReg(Reg, SEH); } } @@ -379,11 +272,15 @@ static MCAsmInfo *createX86MCAsmInfo(const Target &T, StringRef TT) { MAI = new X86_64MCAsmInfoDarwin(TheTriple); else MAI = new X86MCAsmInfoDarwin(TheTriple); + } else if (TheTriple.getEnvironment() == Triple::ELF) { + // Force the use of an ELF container. + MAI = new X86ELFMCAsmInfo(TheTriple); } else if (TheTriple.getOS() == Triple::Win32) { MAI = new X86MCAsmInfoMicrosoft(TheTriple); } else if (TheTriple.getOS() == Triple::MinGW32 || TheTriple.getOS() == Triple::Cygwin) { MAI = new X86MCAsmInfoGNUCOFF(TheTriple); } else { + // The default is ELF. MAI = new X86ELFMCAsmInfo(TheTriple); } @@ -465,7 +362,7 @@ static MCStreamer *createMCStreamer(const Target &T, StringRef TT, if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) return createMachOStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll); - if (TheTriple.isOSWindows()) + if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) return createWinCOFFStreamer(Ctx, MAB, *_Emitter, _OS, RelaxAll); return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack); diff --git a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h index 4650069..981aa1a 100644 --- a/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h +++ b/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h @@ -64,8 +64,6 @@ namespace X86_MC { unsigned getDwarfRegFlavour(StringRef TT, bool isEH); - unsigned getX86RegNum(unsigned RegNo); - void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI); /// createX86MCSubtargetInfo - Create a X86 MCSubtargetInfo instance. @@ -80,8 +78,8 @@ MCCodeEmitter *createX86MCCodeEmitter(const MCInstrInfo &MCII, const MCSubtargetInfo &STI, MCContext &Ctx); -MCAsmBackend *createX86_32AsmBackend(const Target &T, StringRef TT); -MCAsmBackend *createX86_64AsmBackend(const Target &T, StringRef TT); +MCAsmBackend *createX86_32AsmBackend(const Target &T, StringRef TT, StringRef CPU); +MCAsmBackend *createX86_64AsmBackend(const Target &T, StringRef TT, StringRef CPU); /// createX86MachObjectWriter - Construct an X86 Mach-O object writer. MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS, @@ -91,8 +89,9 @@ MCObjectWriter *createX86MachObjectWriter(raw_ostream &OS, /// createX86ELFObjectWriter - Construct an X86 ELF object writer. MCObjectWriter *createX86ELFObjectWriter(raw_ostream &OS, - bool Is64Bit, - uint8_t OSABI); + bool IsELF64, + uint8_t OSABI, + uint16_t EMachine); /// createX86WinCOFFObjectWriter - Construct an X86 Win COFF object writer. MCObjectWriter *createX86WinCOFFObjectWriter(raw_ostream &OS, bool Is64Bit); } // End llvm namespace diff --git a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp index f0f1982..7ff058e 100644 --- a/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp +++ b/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp @@ -11,11 +11,13 @@ #include "MCTargetDesc/X86MCTargetDesc.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCAsmLayout.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCMachObjectWriter.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MCValue.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Format.h" #include "llvm/Object/MachOFormat.h" using namespace llvm; @@ -23,7 +25,7 @@ using namespace llvm::object; namespace { class X86MachObjectWriter : public MCMachObjectTargetWriter { - void RecordScatteredRelocation(MachObjectWriter *Writer, + bool RecordScatteredRelocation(MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment *Fragment, @@ -335,7 +337,7 @@ void X86MachObjectWriter::RecordX86_64Relocation(MachObjectWriter *Writer, Writer->addRelocation(Fragment->getParent(), MRE); } -void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer, +bool X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer, const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment *Fragment, @@ -381,6 +383,19 @@ void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer, // Relocations are written out in reverse order, so the PAIR comes first. if (Type == macho::RIT_Difference || Type == macho::RIT_Generic_LocalDifference) { + // If the offset is too large to fit in a scattered relocation, + // we're hosed. It's an unfortunate limitation of the MachO format. + if (FixupOffset > 0xffffff) { + char Buffer[32]; + format("0x%x", FixupOffset).print(Buffer, sizeof(Buffer)); + Asm.getContext().FatalError(Fixup.getLoc(), + Twine("Section too large, can't encode " + "r_address (") + Buffer + + ") into 24 bits of scattered " + "relocation entry."); + llvm_unreachable("fatal error returned?!"); + } + macho::RelocationEntry MRE; MRE.Word0 = ((0 << 0) | (macho::RIT_Pair << 24) | @@ -389,6 +404,16 @@ void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer, macho::RF_Scattered); MRE.Word1 = Value2; Writer->addRelocation(Fragment->getParent(), MRE); + } else { + // If the offset is more than 24-bits, it won't fit in a scattered + // relocation offset field, so we fall back to using a non-scattered + // relocation. This is a bit risky, as if the offset reaches out of + // the block and the linker is doing scattered loading on this + // symbol, things can go badly. + // + // Required for 'as' compatibility. + if (FixupOffset > 0xffffff) + return false; } macho::RelocationEntry MRE; @@ -399,6 +424,7 @@ void X86MachObjectWriter::RecordScatteredRelocation(MachObjectWriter *Writer, macho::RF_Scattered); MRE.Word1 = Value; Writer->addRelocation(Fragment->getParent(), MRE); + return true; } void X86MachObjectWriter::RecordTLVPRelocation(MachObjectWriter *Writer, @@ -469,9 +495,11 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer, // If this is a difference or a defined symbol plus an offset, then we need a // scattered relocation entry. Differences always require scattered // relocations. - if (Target.getSymB()) - return RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, - Target, Log2Size, FixedValue); + if (Target.getSymB()) { + RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, Log2Size, FixedValue); + return; + } // Get the symbol data, if any. MCSymbolData *SD = 0; @@ -483,9 +511,13 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer, uint32_t Offset = Target.getConstant(); if (IsPCRel) Offset += 1 << Log2Size; - if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD)) - return RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, - Target, Log2Size, FixedValue); + // Try to record the scattered relocation if needed. Fall back to non + // scattered if necessary (see comments in RecordScatteredRelocation() + // for details). + if (Offset && SD && !Writer->doesSymbolRequireExternRelocation(SD) && + RecordScatteredRelocation(Writer, Asm, Layout, Fragment, Fixup, + Target, Log2Size, FixedValue)) + return; // See . uint32_t FixupOffset = Layout.getFragmentOffset(Fragment)+Fixup.getOffset(); diff --git a/lib/Target/X86/README-SSE.txt b/lib/Target/X86/README-SSE.txt index 624e56f..4011035 100644 --- a/lib/Target/X86/README-SSE.txt +++ b/lib/Target/X86/README-SSE.txt @@ -941,3 +941,15 @@ and inversion with an rsqrtss instruction, which computes 1/sqrt faster at the cost of reduced accuracy. //===---------------------------------------------------------------------===// + +This function should be matched to haddpd when the appropriate CPU is enabled: + +#include +double f (__m128d p) { + return p[0] + p[1]; +} + +similarly, v[0]-v[1] should match to hsubpd, and {v[0]-v[1], w[0]-w[1]} should +turn into hsubpd also. + +//===---------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index 18e6b7c..8ad0bc0 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -118,8 +118,13 @@ def FeatureBMI : SubtargetFeature<"bmi", "HasBMI", "true", "Support BMI instructions">; def FeatureBMI2 : SubtargetFeature<"bmi2", "HasBMI2", "true", "Support BMI2 instructions">; +def FeatureRTM : SubtargetFeature<"rtm", "HasRTM", "true", + "Support RTM instructions">; def FeatureLeaForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true", "Use LEA for adjusting the stack pointer">; +def FeatureSlowDivide : SubtargetFeature<"idiv-to-divb", + "HasSlowDivide", "true", + "Use small divide for positive values less than 256">; //===----------------------------------------------------------------------===// // X86 processors supported. @@ -159,8 +164,9 @@ def : Proc<"core2", [FeatureSSSE3, FeatureCMPXCHG16B, FeatureSlowBTMem]>; def : Proc<"penryn", [FeatureSSE41, FeatureCMPXCHG16B, FeatureSlowBTMem]>; -def : AtomProc<"atom", [ProcIntelAtom, FeatureSSE3, FeatureCMPXCHG16B, - FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP]>; +def : AtomProc<"atom", [ProcIntelAtom, FeatureSSSE3, FeatureCMPXCHG16B, + FeatureMOVBE, FeatureSlowBTMem, FeatureLeaForSP, + FeatureSlowDivide]>; // "Arrandale" along with corei3 and corei5 def : Proc<"corei7", [FeatureSSE42, FeatureCMPXCHG16B, FeatureSlowBTMem, FeatureFastUAMem, @@ -188,7 +194,8 @@ def : Proc<"core-avx2", [FeatureAVX2, FeatureCMPXCHG16B, FeaturePOPCNT, FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C, FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI, - FeatureBMI2, FeatureFMA]>; + FeatureBMI2, FeatureFMA, + FeatureRTM]>; def : Proc<"k6", [FeatureMMX]>; def : Proc<"k6-2", [Feature3DNow]>; @@ -227,6 +234,7 @@ def : Proc<"bdver2", [FeatureXOP, FeatureFMA4, FeatureCMPXCHG16B, FeatureAES, FeaturePCLMUL, FeatureF16C, FeatureLZCNT, FeaturePOPCNT, FeatureBMI, FeatureFMA]>; +def : Proc<"geode", [Feature3DNowA]>; def : Proc<"winchip-c6", [FeatureMMX]>; def : Proc<"winchip2", [Feature3DNow]>; diff --git a/lib/Target/X86/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index db71e27..fdd7125 100644 --- a/lib/Target/X86/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -13,7 +13,6 @@ //===----------------------------------------------------------------------===// #include "X86AsmPrinter.h" -#include "X86MCInstLower.h" #include "X86.h" #include "X86COFFMachineModuleInfo.h" #include "X86MachineFunctionInfo.h" @@ -206,10 +205,10 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO, } } -/// print_pcrel_imm - This is used to print an immediate value that ends up +/// printPCRelImm - This is used to print an immediate value that ends up /// being encoded as a pc-relative value. These print slightly differently, for /// example, a $ is not emitted. -void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo, +void X86AsmPrinter::printPCRelImm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O) { const MachineOperand &MO = MI->getOperand(OpNo); switch (MO.getType()) { @@ -233,15 +232,17 @@ void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo, void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, - raw_ostream &O, const char *Modifier) { + raw_ostream &O, const char *Modifier, + unsigned AsmVariant) { const MachineOperand &MO = MI->getOperand(OpNo); switch (MO.getType()) { default: llvm_unreachable("unknown operand type!"); case MachineOperand::MO_Register: { - O << '%'; + // FIXME: Enumerating AsmVariant, so we can remove magic number. + if (AsmVariant == 0) O << '%'; unsigned Reg = MO.getReg(); if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) { - EVT VT = (strcmp(Modifier+6,"64") == 0) ? + MVT::SimpleValueType VT = (strcmp(Modifier+6,"64") == 0) ? MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 : ((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8)); Reg = getX86SubSuperRegister(Reg, VT); @@ -265,46 +266,6 @@ void X86AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, } } -void X86AsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op, - raw_ostream &O) { - unsigned char value = MI->getOperand(Op).getImm(); - switch (value) { - default: llvm_unreachable("Invalid ssecc argument!"); - case 0: O << "eq"; break; - case 1: O << "lt"; break; - case 2: O << "le"; break; - case 3: O << "unord"; break; - case 4: O << "neq"; break; - case 5: O << "nlt"; break; - case 6: O << "nle"; break; - case 7: O << "ord"; break; - case 8: O << "eq_uq"; break; - case 9: O << "nge"; break; - case 0xa: O << "ngt"; break; - case 0xb: O << "false"; break; - case 0xc: O << "neq_oq"; break; - case 0xd: O << "ge"; break; - case 0xe: O << "gt"; break; - case 0xf: O << "true"; break; - case 0x10: O << "eq_os"; break; - case 0x11: O << "lt_oq"; break; - case 0x12: O << "le_oq"; break; - case 0x13: O << "unord_s"; break; - case 0x14: O << "neq_us"; break; - case 0x15: O << "nlt_uq"; break; - case 0x16: O << "nle_uq"; break; - case 0x17: O << "ord_s"; break; - case 0x18: O << "eq_us"; break; - case 0x19: O << "nge_uq"; break; - case 0x1a: O << "ngt_uq"; break; - case 0x1b: O << "false_os"; break; - case 0x1c: O << "neq_os"; break; - case 0x1d: O << "ge_oq"; break; - case 0x1e: O << "gt_oq"; break; - case 0x1f: O << "true_us"; break; - } -} - void X86AsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O, const char *Modifier) { const MachineOperand &BaseReg = MI->getOperand(Op); @@ -363,10 +324,51 @@ void X86AsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op, printLeaMemReference(MI, Op, O, Modifier); } -void X86AsmPrinter::printPICLabel(const MachineInstr *MI, unsigned Op, - raw_ostream &O) { - O << *MF->getPICBaseSymbol() << '\n'; - O << *MF->getPICBaseSymbol() << ':'; +void X86AsmPrinter::printIntelMemReference(const MachineInstr *MI, unsigned Op, + raw_ostream &O, const char *Modifier, + unsigned AsmVariant){ + const MachineOperand &BaseReg = MI->getOperand(Op); + unsigned ScaleVal = MI->getOperand(Op+1).getImm(); + const MachineOperand &IndexReg = MI->getOperand(Op+2); + const MachineOperand &DispSpec = MI->getOperand(Op+3); + const MachineOperand &SegReg = MI->getOperand(Op+4); + + // If this has a segment register, print it. + if (SegReg.getReg()) { + printOperand(MI, Op+4, O, Modifier, AsmVariant); + O << ':'; + } + + O << '['; + + bool NeedPlus = false; + if (BaseReg.getReg()) { + printOperand(MI, Op, O, Modifier, AsmVariant); + NeedPlus = true; + } + + if (IndexReg.getReg()) { + if (NeedPlus) O << " + "; + if (ScaleVal != 1) + O << ScaleVal << '*'; + printOperand(MI, Op+2, O, Modifier, AsmVariant); + NeedPlus = true; + } + + assert (DispSpec.isImm() && "Displacement is not an immediate!"); + int64_t DispVal = DispSpec.getImm(); + if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) { + if (NeedPlus) { + if (DispVal > 0) + O << " + "; + else { + O << " - "; + DispVal = -DispVal; + } + } + O << DispVal; + } + O << ']'; } bool X86AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode, @@ -457,7 +459,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, return false; case 'P': // This is the operand of a call, treat specially. - print_pcrel_imm(MI, OpNo, O); + printPCRelImm(MI, OpNo, O); return false; case 'n': // Negate the immediate or print a '-' before the operand. @@ -471,7 +473,7 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, } } - printOperand(MI, OpNo, O); + printOperand(MI, OpNo, O, /*Modifier*/ 0, AsmVariant); return false; } @@ -479,6 +481,11 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, unsigned AsmVariant, const char *ExtraCode, raw_ostream &O) { + if (AsmVariant) { + printIntelMemReference(MI, OpNo, O); + return false; + } + if (ExtraCode && ExtraCode[0]) { if (ExtraCode[1] != 0) return true; // Unknown modifier. @@ -680,7 +687,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) { MachineModuleInfoELF::SymbolListTy Stubs = MMIELF.GetGVStubList(); if (!Stubs.empty()) { OutStreamer.SwitchSection(TLOFELF.getDataRelSection()); - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); for (unsigned i = 0, e = Stubs.size(); i != e; ++i) { OutStreamer.EmitLabel(Stubs[i].first); diff --git a/lib/Target/X86/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h index 35386cd..61eb14e 100644 --- a/lib/Target/X86/X86AsmPrinter.h +++ b/lib/Target/X86/X86AsmPrinter.h @@ -34,47 +34,48 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter { Subtarget = &TM.getSubtarget(); } - virtual const char *getPassName() const { + virtual const char *getPassName() const LLVM_OVERRIDE { return "X86 AT&T-Style Assembly Printer"; } const X86Subtarget &getSubtarget() const { return *Subtarget; } - virtual void EmitStartOfAsmFile(Module &M); + virtual void EmitStartOfAsmFile(Module &M) LLVM_OVERRIDE; - virtual void EmitEndOfAsmFile(Module &M); + virtual void EmitEndOfAsmFile(Module &M) LLVM_OVERRIDE; - virtual void EmitInstruction(const MachineInstr *MI); + virtual void EmitInstruction(const MachineInstr *MI) LLVM_OVERRIDE; void printSymbolOperand(const MachineOperand &MO, raw_ostream &O); // These methods are used by the tablegen'erated instruction printer. void printOperand(const MachineInstr *MI, unsigned OpNo, raw_ostream &O, - const char *Modifier = 0); - void print_pcrel_imm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O); + const char *Modifier = 0, unsigned AsmVariant = 0); + void printPCRelImm(const MachineInstr *MI, unsigned OpNo, raw_ostream &O); bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O); - bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &OS); - bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, - unsigned AsmVariant, const char *ExtraCode, - raw_ostream &OS); - - void printMachineInstruction(const MachineInstr *MI); - void printSSECC(const MachineInstr *MI, unsigned Op, raw_ostream &O); + virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, const char *ExtraCode, + raw_ostream &OS) LLVM_OVERRIDE; + virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, const char *ExtraCode, + raw_ostream &OS) LLVM_OVERRIDE; + void printMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O, const char *Modifier=NULL); void printLeaMemReference(const MachineInstr *MI, unsigned Op, raw_ostream &O, const char *Modifier=NULL); - void printPICLabel(const MachineInstr *MI, unsigned Op, raw_ostream &O); + void printIntelMemReference(const MachineInstr *MI, unsigned Op, + raw_ostream &O, const char *Modifier=NULL, + unsigned AsmVariant = 1); - bool runOnMachineFunction(MachineFunction &F); + virtual bool runOnMachineFunction(MachineFunction &F) LLVM_OVERRIDE; void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); - MachineLocation getDebugValueLocation(const MachineInstr *MI) const; + virtual MachineLocation + getDebugValueLocation(const MachineInstr *MI) const LLVM_OVERRIDE; }; } // end namespace llvm diff --git a/lib/Target/X86/X86COFFMachineModuleInfo.h b/lib/Target/X86/X86COFFMachineModuleInfo.h index 471eb31..a5a8dc1 100644 --- a/lib/Target/X86/X86COFFMachineModuleInfo.h +++ b/lib/Target/X86/X86COFFMachineModuleInfo.h @@ -20,7 +20,7 @@ namespace llvm { class X86MachineFunctionInfo; - class TargetData; + class DataLayout; /// X86COFFMachineModuleInfo - This is a MachineModuleInfoImpl implementation /// for X86 COFF targets. diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index a6d2709..6786756 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -88,6 +88,21 @@ def RetCC_X86_32_Fast : CallingConv<[ CCDelegateTo ]>; +// Intel_OCL_BI return-value convention. +def RetCC_Intel_OCL_BI : CallingConv<[ + // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3. + CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64], + CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, + + // 256-bit FP vectors + // No more than 4 registers + CCIfType<[v8f32, v4f64, v8i32, v4i64], + CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>, + + // i32, i64 in the standard way + CCDelegateTo +]>; + // X86-64 C return-value convention. def RetCC_X86_64_C : CallingConv<[ // The X86-64 calling convention always returns FP values in XMM0. @@ -128,6 +143,10 @@ def RetCC_X86_64 : CallingConv<[ // This is the return-value convention used for the entire X86 backend. def RetCC_X86 : CallingConv<[ + + // Check if this is the Intel OpenCL built-ins calling convention + CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo>, + CCIfSubtarget<"is64Bit()", CCDelegateTo>, CCDelegateTo ]>; @@ -235,6 +254,29 @@ def CC_X86_Win64_C : CallingConv<[ CCIfType<[f80], CCAssignToStack<0, 0>> ]>; +// X86-64 Intel OpenCL built-ins calling convention. +def CC_Intel_OCL_BI : CallingConv<[ + CCIfType<[i32], CCIfSubtarget<"isTargetWin32()", CCAssignToStack<4, 4>>>, + + CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>, + CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8, R9 ]>>>, + + CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX]>>, + CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX]>>, + + // The SSE vector arguments are passed in XMM registers. + CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64], + CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>, + + // The 256-bit vector arguments are passed in YMM registers. + CCIfType<[v8f32, v4f64, v8i32, v4i64], + CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>, + + CCIfSubtarget<"isTargetWin64()", CCDelegateTo>, + CCDelegateTo +]>; + + def CC_X86_64_GHC : CallingConv<[ // Promote i8/i16/i32 arguments to i64. CCIfType<[i8, i16, i32], CCPromoteToType>, @@ -324,7 +366,7 @@ def CC_X86_32_FastCall : CallingConv<[ CCIfNest>, // The first 2 integer arguments are passed in ECX/EDX - CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>, + CCIfInReg>>, // Otherwise, same as everything else. CCDelegateTo @@ -408,6 +450,7 @@ def CC_X86_64 : CallingConv<[ // This is the argument convention used for the entire X86 backend. def CC_X86 : CallingConv<[ + CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo>, CCIfSubtarget<"is64Bit()", CCDelegateTo>, CCDelegateTo ]>; @@ -426,3 +469,17 @@ def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>; def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15, (sequence "XMM%u", 6, 15))>; + + +// Standard C + YMM6-15 +def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, + R13, R14, R15, + (sequence "YMM%u", 6, 15))>; + +//Standard C + XMM 8-15 +def CSR_64_Intel_OCL_BI : CalleeSavedRegs<(add CSR_64, + (sequence "XMM%u", 8, 15))>; + +//Standard C + YMM 8-15 +def CSR_64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add CSR_64, + (sequence "YMM%u", 8, 15))>; diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index d705049..44db563 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -26,7 +26,6 @@ #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/Function.h" #include "llvm/ADT/Statistic.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCExpr.h" @@ -43,7 +42,7 @@ namespace { template class Emitter : public MachineFunctionPass { const X86InstrInfo *II; - const TargetData *TD; + const DataLayout *TD; X86TargetMachine &TM; CodeEmitter &MCE; MachineModuleInfo *MMI; @@ -57,7 +56,7 @@ namespace { MCE(mce), PICBaseOffset(0), Is64BitMode(false), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} Emitter(X86TargetMachine &tm, CodeEmitter &mce, - const X86InstrInfo &ii, const TargetData &td, bool is64) + const X86InstrInfo &ii, const DataLayout &td, bool is64) : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm), MCE(mce), PICBaseOffset(0), Is64BitMode(is64), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} @@ -110,6 +109,14 @@ namespace { void emitMemModRMByte(const MachineInstr &MI, unsigned Op, unsigned RegOpcodeField, intptr_t PCAdj = 0); + + unsigned getX86RegNum(unsigned RegNo) const { + const TargetRegisterInfo *TRI = TM.getRegisterInfo(); + return TRI->getEncodingValue(RegNo) & 0x7; + } + + unsigned char getVEXRegisterEncoding(const MachineInstr &MI, + unsigned OpNum) const; }; template @@ -129,13 +136,12 @@ bool Emitter::runOnMachineFunction(MachineFunction &MF) { MCE.setModuleInfo(MMI); II = TM.getInstrInfo(); - TD = TM.getTargetData(); + TD = TM.getDataLayout(); Is64BitMode = TM.getSubtarget().is64Bit(); IsPIC = TM.getRelocationModel() == Reloc::PIC_; do { - DEBUG(dbgs() << "JITTing function '" - << MF.getFunction()->getName() << "'\n"); + DEBUG(dbgs() << "JITTing function '" << MF.getName() << "'\n"); MCE.startFunction(MF); for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); MBB != E; ++MBB) { @@ -365,7 +371,7 @@ inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode, template void Emitter::emitRegModRMByte(unsigned ModRMReg, unsigned RegOpcodeFld){ - MCE.emitByte(ModRMByte(3, RegOpcodeFld, X86_MC::getX86RegNum(ModRMReg))); + MCE.emitByte(ModRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg))); } template @@ -503,7 +509,7 @@ void Emitter::emitMemModRMByte(const MachineInstr &MI, // 2-7) and absolute references. unsigned BaseRegNo = -1U; if (BaseReg != 0 && BaseReg != X86::RIP) - BaseRegNo = X86_MC::getX86RegNum(BaseReg); + BaseRegNo = getX86RegNum(BaseReg); if (// The SIB byte must be used if there is an index register. IndexReg.getReg() == 0 && @@ -579,15 +585,15 @@ void Emitter::emitMemModRMByte(const MachineInstr &MI, // Manual 2A, table 2-7. The displacement has already been output. unsigned IndexRegNo; if (IndexReg.getReg()) - IndexRegNo = X86_MC::getX86RegNum(IndexReg.getReg()); + IndexRegNo = getX86RegNum(IndexReg.getReg()); else // Examples: [ESP+1*+4] or [scaled idx]+disp32 (MOD=0,BASE=5) IndexRegNo = 4; emitSIBByte(SS, IndexRegNo, 5); } else { - unsigned BaseRegNo = X86_MC::getX86RegNum(BaseReg); + unsigned BaseRegNo = getX86RegNum(BaseReg); unsigned IndexRegNo; if (IndexReg.getReg()) - IndexRegNo = X86_MC::getX86RegNum(IndexReg.getReg()); + IndexRegNo = getX86RegNum(IndexReg.getReg()); else IndexRegNo = 4; // For example [ESP+1*+4] emitSIBByte(SS, IndexRegNo, BaseRegNo); @@ -758,10 +764,12 @@ void Emitter::emitOpcodePrefix(uint64_t TSFlags, // VEX.VVVV => XMM9 => ~9 // // See table 4-35 of Intel AVX Programming Reference for details. -static unsigned char getVEXRegisterEncoding(const MachineInstr &MI, - unsigned OpNum) { +template +unsigned char +Emitter::getVEXRegisterEncoding(const MachineInstr &MI, + unsigned OpNum) const { unsigned SrcReg = MI.getOperand(OpNum).getReg(); - unsigned SrcRegNum = X86_MC::getX86RegNum(MI.getOperand(OpNum).getReg()); + unsigned SrcRegNum = getX86RegNum(MI.getOperand(OpNum).getReg()); if (X86II::isX86_64ExtendedReg(SrcReg)) SrcRegNum |= 8; @@ -923,17 +931,6 @@ void Emitter::emitVEXOpcodePrefix(uint64_t TSFlags, } - // Set the vector length to 256-bit if YMM0-YMM15 is used - for (unsigned i = 0; i != MI.getNumOperands(); ++i) { - if (!MI.getOperand(i).isReg()) - continue; - if (MI.getOperand(i).isImplicit()) - continue; - unsigned SrcReg = MI.getOperand(i).getReg(); - if (SrcReg >= X86::YMM0 && SrcReg <= X86::YMM15) - VEX_L = 1; - } - // Classify VEX_B, VEX_4V, VEX_R, VEX_X unsigned NumOps = Desc->getNumOperands(); unsigned CurOp = 0; @@ -1248,7 +1245,7 @@ void Emitter::emitInstruction(MachineInstr &MI, case X86II::AddRegFrm: { MCE.emitByte(BaseOpcode + - X86_MC::getX86RegNum(MI.getOperand(CurOp++).getReg())); + getX86RegNum(MI.getOperand(CurOp++).getReg())); if (CurOp == NumOps) break; @@ -1283,7 +1280,7 @@ void Emitter::emitInstruction(MachineInstr &MI, case X86II::MRMDestReg: { MCE.emitByte(BaseOpcode); emitRegModRMByte(MI.getOperand(CurOp).getReg(), - X86_MC::getX86RegNum(MI.getOperand(CurOp+1).getReg())); + getX86RegNum(MI.getOperand(CurOp+1).getReg())); CurOp += 2; break; } @@ -1294,7 +1291,7 @@ void Emitter::emitInstruction(MachineInstr &MI, if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) SrcRegNum++; emitMemModRMByte(MI, CurOp, - X86_MC::getX86RegNum(MI.getOperand(SrcRegNum).getReg())); + getX86RegNum(MI.getOperand(SrcRegNum).getReg())); CurOp = SrcRegNum + 1; break; } @@ -1310,7 +1307,7 @@ void Emitter::emitInstruction(MachineInstr &MI, ++SrcRegNum; emitRegModRMByte(MI.getOperand(SrcRegNum).getReg(), - X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg())); + getX86RegNum(MI.getOperand(CurOp).getReg())); // 2 operands skipped with HasMemOp4, compensate accordingly CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1; if (HasVEX_4VOp3) @@ -1332,7 +1329,7 @@ void Emitter::emitInstruction(MachineInstr &MI, intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ? X86II::getSizeOfImm(Desc->TSFlags) : 0; emitMemModRMByte(MI, FirstMemOp, - X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg()),PCAdj); + getX86RegNum(MI.getOperand(CurOp).getReg()),PCAdj); CurOp += AddrOperands + 1; if (HasVEX_4VOp3) ++CurOp; @@ -1422,7 +1419,7 @@ void Emitter::emitInstruction(MachineInstr &MI, MCE.emitByte(BaseOpcode); // Duplicate register, used by things like MOV8r0 (aka xor reg,reg). emitRegModRMByte(MI.getOperand(CurOp).getReg(), - X86_MC::getX86RegNum(MI.getOperand(CurOp).getReg())); + getX86RegNum(MI.getOperand(CurOp).getReg())); ++CurOp; break; @@ -1455,7 +1452,7 @@ void Emitter::emitInstruction(MachineInstr &MI, const MachineOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand : CurOp); ++CurOp; - unsigned RegNum = X86_MC::getX86RegNum(MO.getReg()) << 4; + unsigned RegNum = getX86RegNum(MO.getReg()) << 4; if (X86II::isX86_64ExtendedReg(MO.getReg())) RegNum |= 1 << 7; // If there is an additional 5th operand it must be an immediate, which diff --git a/lib/Target/X86/X86ELFWriterInfo.cpp b/lib/Target/X86/X86ELFWriterInfo.cpp deleted file mode 100644 index c1a49a7..0000000 --- a/lib/Target/X86/X86ELFWriterInfo.cpp +++ /dev/null @@ -1,147 +0,0 @@ -//===-- X86ELFWriterInfo.cpp - ELF Writer Info for the X86 backend --------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements ELF writer information for the X86 backend. -// -//===----------------------------------------------------------------------===// - -#include "X86ELFWriterInfo.h" -#include "X86Relocations.h" -#include "llvm/Function.h" -#include "llvm/Support/ELF.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Target/TargetData.h" -#include "llvm/Target/TargetMachine.h" - -using namespace llvm; - -//===----------------------------------------------------------------------===// -// Implementation of the X86ELFWriterInfo class -//===----------------------------------------------------------------------===// - -X86ELFWriterInfo::X86ELFWriterInfo(bool is64Bit_, bool isLittleEndian_) - : TargetELFWriterInfo(is64Bit_, isLittleEndian_) { - EMachine = is64Bit ? EM_X86_64 : EM_386; - } - -X86ELFWriterInfo::~X86ELFWriterInfo() {} - -unsigned X86ELFWriterInfo::getRelocationType(unsigned MachineRelTy) const { - if (is64Bit) { - switch(MachineRelTy) { - case X86::reloc_pcrel_word: - return ELF::R_X86_64_PC32; - case X86::reloc_absolute_word: - return ELF::R_X86_64_32; - case X86::reloc_absolute_word_sext: - return ELF::R_X86_64_32S; - case X86::reloc_absolute_dword: - return ELF::R_X86_64_64; - case X86::reloc_picrel_word: - default: - llvm_unreachable("unknown x86_64 machine relocation type"); - } - } else { - switch(MachineRelTy) { - case X86::reloc_pcrel_word: - return ELF::R_386_PC32; - case X86::reloc_absolute_word: - return ELF::R_386_32; - case X86::reloc_absolute_word_sext: - case X86::reloc_absolute_dword: - case X86::reloc_picrel_word: - default: - llvm_unreachable("unknown x86 machine relocation type"); - } - } -} - -long int X86ELFWriterInfo::getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier) const { - if (is64Bit) { - switch(RelTy) { - case ELF::R_X86_64_PC32: return Modifier - 4; - case ELF::R_X86_64_32: - case ELF::R_X86_64_32S: - case ELF::R_X86_64_64: - return Modifier; - default: - llvm_unreachable("unknown x86_64 relocation type"); - } - } else { - switch(RelTy) { - case ELF::R_386_PC32: return Modifier - 4; - case ELF::R_386_32: return Modifier; - default: - llvm_unreachable("unknown x86 relocation type"); - } - } -} - -unsigned X86ELFWriterInfo::getRelocationTySize(unsigned RelTy) const { - if (is64Bit) { - switch(RelTy) { - case ELF::R_X86_64_PC32: - case ELF::R_X86_64_32: - case ELF::R_X86_64_32S: - return 32; - case ELF::R_X86_64_64: - return 64; - default: - llvm_unreachable("unknown x86_64 relocation type"); - } - } else { - switch(RelTy) { - case ELF::R_386_PC32: - case ELF::R_386_32: - return 32; - default: - llvm_unreachable("unknown x86 relocation type"); - } - } -} - -bool X86ELFWriterInfo::isPCRelativeRel(unsigned RelTy) const { - if (is64Bit) { - switch(RelTy) { - case ELF::R_X86_64_PC32: - return true; - case ELF::R_X86_64_32: - case ELF::R_X86_64_32S: - case ELF::R_X86_64_64: - return false; - default: - llvm_unreachable("unknown x86_64 relocation type"); - } - } else { - switch(RelTy) { - case ELF::R_386_PC32: - return true; - case ELF::R_386_32: - return false; - default: - llvm_unreachable("unknown x86 relocation type"); - } - } -} - -unsigned X86ELFWriterInfo::getAbsoluteLabelMachineRelTy() const { - return is64Bit ? - X86::reloc_absolute_dword : X86::reloc_absolute_word; -} - -long int X86ELFWriterInfo::computeRelocation(unsigned SymOffset, - unsigned RelOffset, - unsigned RelTy) const { - - if (RelTy == ELF::R_X86_64_PC32 || RelTy == ELF::R_386_PC32) - return SymOffset - (RelOffset + 4); - - llvm_unreachable("computeRelocation unknown for this relocation type"); -} diff --git a/lib/Target/X86/X86ELFWriterInfo.h b/lib/Target/X86/X86ELFWriterInfo.h deleted file mode 100644 index a45b5bb..0000000 --- a/lib/Target/X86/X86ELFWriterInfo.h +++ /dev/null @@ -1,59 +0,0 @@ -//===-- X86ELFWriterInfo.h - ELF Writer Info for X86 ------------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements ELF writer information for the X86 backend. -// -//===----------------------------------------------------------------------===// - -#ifndef X86_ELF_WRITER_INFO_H -#define X86_ELF_WRITER_INFO_H - -#include "llvm/Target/TargetELFWriterInfo.h" - -namespace llvm { - - class X86ELFWriterInfo : public TargetELFWriterInfo { - - public: - X86ELFWriterInfo(bool is64Bit_, bool isLittleEndian_); - virtual ~X86ELFWriterInfo(); - - /// getRelocationType - Returns the target specific ELF Relocation type. - /// 'MachineRelTy' contains the object code independent relocation type - virtual unsigned getRelocationType(unsigned MachineRelTy) const; - - /// hasRelocationAddend - True if the target uses an addend in the - /// ELF relocation entry. - virtual bool hasRelocationAddend() const { return is64Bit ? true : false; } - - /// getDefaultAddendForRelTy - Gets the default addend value for a - /// relocation entry based on the target ELF relocation type. - virtual long int getDefaultAddendForRelTy(unsigned RelTy, - long int Modifier = 0) const; - - /// getRelTySize - Returns the size of relocatable field in bits - virtual unsigned getRelocationTySize(unsigned RelTy) const; - - /// isPCRelativeRel - True if the relocation type is pc relative - virtual bool isPCRelativeRel(unsigned RelTy) const; - - /// getJumpTableRelocationTy - Returns the machine relocation type used - /// to reference a jumptable. - virtual unsigned getAbsoluteLabelMachineRelTy() const; - - /// computeRelocation - Some relocatable fields could be relocated - /// directly, avoiding the relocation symbol emission, compute the - /// final relocation value for this symbol. - virtual long int computeRelocation(unsigned SymOffset, unsigned RelOffset, - unsigned RelTy) const; - }; - -} // end llvm namespace - -#endif // X86_ELF_WRITER_INFO_H diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index e5952aa..d4627c7 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -45,9 +45,9 @@ class X86FastISel : public FastISel { /// make the right decision when generating code for different targets. const X86Subtarget *Subtarget; - /// StackPtr - Register used as the stack pointer. + /// RegInfo - X86 register info. /// - unsigned StackPtr; + const X86RegisterInfo *RegInfo; /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 /// floating point ops. @@ -61,9 +61,9 @@ public: const TargetLibraryInfo *libInfo) : FastISel(funcInfo, libInfo) { Subtarget = &TM.getSubtarget(); - StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf32 = Subtarget->hasSSE1(); + RegInfo = static_cast(TM.getRegisterInfo()); } virtual bool TargetSelectInstruction(const Instruction *I); @@ -710,6 +710,8 @@ bool X86FastISel::X86SelectStore(const Instruction *I) { bool X86FastISel::X86SelectRet(const Instruction *I) { const ReturnInst *Ret = cast(I); const Function &F = *I->getParent()->getParent(); + const X86MachineFunctionInfo *X86MFInfo = + FuncInfo.MF->getInfo(); if (!FuncInfo.CanLowerReturn) return false; @@ -724,8 +726,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { return false; // Don't handle popping bytes on return for now. - if (FuncInfo.MF->getInfo() - ->getBytesToPopOnReturn() != 0) + if (X86MFInfo->getBytesToPopOnReturn() != 0) return 0; // fastcc with -tailcallopt is intended to provide a guaranteed @@ -809,6 +810,19 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { MRI.addLiveOut(VA.getLocReg()); } + // The x86-64 ABI for returning structs by value requires that we copy + // the sret argument into %rax for the return. We saved the argument into + // a virtual register in the entry block, so now we copy the value out + // and into %rax. + if (Subtarget->is64Bit() && F.hasStructRetAttr()) { + unsigned Reg = X86MFInfo->getSRetReturnReg(); + assert(Reg && + "SRetReturnReg should have been set in LowerFormalArguments()!"); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), + X86::RAX).addReg(Reg); + MRI.addLiveOut(X86::RAX); + } + // Now emit the RET. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET)); return true; @@ -1527,9 +1541,9 @@ static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget, CallingConv::ID CC = CS.getCallingConv(); if (CC == CallingConv::Fast || CC == CallingConv::GHC) return 0; - if (!CS.paramHasAttr(1, Attribute::StructRet)) + if (!CS.paramHasAttr(1, Attributes::StructRet)) return 0; - if (CS.paramHasAttr(1, Attribute::InReg)) + if (CS.paramHasAttr(1, Attributes::InReg)) return 0; return 4; } @@ -1608,12 +1622,12 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { Value *ArgVal = *i; ISD::ArgFlagsTy Flags; unsigned AttrInd = i - CS.arg_begin() + 1; - if (CS.paramHasAttr(AttrInd, Attribute::SExt)) + if (CS.paramHasAttr(AttrInd, Attributes::SExt)) Flags.setSExt(); - if (CS.paramHasAttr(AttrInd, Attribute::ZExt)) + if (CS.paramHasAttr(AttrInd, Attributes::ZExt)) Flags.setZExt(); - if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) { + if (CS.paramHasAttr(AttrInd, Attributes::ByVal)) { PointerType *Ty = cast(ArgVal->getType()); Type *ElementTy = Ty->getElementType(); unsigned FrameSize = TD.getTypeAllocSize(ElementTy); @@ -1627,9 +1641,9 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { return false; } - if (CS.paramHasAttr(AttrInd, Attribute::InReg)) + if (CS.paramHasAttr(AttrInd, Attributes::InReg)) Flags.setInReg(); - if (CS.paramHasAttr(AttrInd, Attribute::Nest)) + if (CS.paramHasAttr(AttrInd, Attributes::Nest)) Flags.setNest(); // If this is an i1/i8/i16 argument, promote to i32 to avoid an extra @@ -1771,7 +1785,7 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { } else { unsigned LocMemOffset = VA.getLocMemOffset(); X86AddressMode AM; - AM.Base.Reg = StackPtr; + AM.Base.Reg = RegInfo->getStackRegister(); AM.Disp = LocMemOffset; const Value *ArgVal = ArgVals[VA.getValNo()]; ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()]; @@ -1897,11 +1911,11 @@ bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) { ISD::InputArg MyFlags; MyFlags.VT = RegisterVT.getSimpleVT(); MyFlags.Used = !CS.getInstruction()->use_empty(); - if (CS.paramHasAttr(0, Attribute::SExt)) + if (CS.paramHasAttr(0, Attributes::SExt)) MyFlags.Flags.setSExt(); - if (CS.paramHasAttr(0, Attribute::ZExt)) + if (CS.paramHasAttr(0, Attributes::ZExt)) MyFlags.Flags.setZExt(); - if (CS.paramHasAttr(0, Attribute::InReg)) + if (CS.paramHasAttr(0, Attributes::InReg)) MyFlags.Flags.setInReg(); Ins.push_back(MyFlags); } @@ -2014,13 +2028,17 @@ X86FastISel::TargetSelectInstruction(const Instruction *I) { unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { MVT VT; if (!isTypeLegal(C->getType(), VT)) - return false; + return 0; + + // Can't handle alternate code models yet. + if (TM.getCodeModel() != CodeModel::Small) + return 0; // Get opcode and regclass of the output for the given load instruction. unsigned Opc = 0; const TargetRegisterClass *RC = NULL; switch (VT.SimpleTy) { - default: return false; + default: return 0; case MVT::i8: Opc = X86::MOV8rm; RC = &X86::GR8RegClass; @@ -2058,7 +2076,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) { break; case MVT::f80: // No f80 support yet. - return false; + return 0; } // Materialize addresses with LEA instructions. diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 955c75a..791f598 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -171,6 +171,7 @@ namespace { // Shuffle live registers to match the expectations of successor blocks. void finishBlockStack(); +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void dumpStack() const { dbgs() << "Stack contents:"; for (unsigned i = 0; i != StackTop; ++i) { @@ -181,6 +182,7 @@ namespace { dbgs() << ", ST" << i << " in FP" << unsigned(PendingST[i]); dbgs() << "\n"; } +#endif /// getSlot - Return the stack slot number a particular register number is /// in. @@ -575,8 +577,8 @@ namespace { friend bool operator<(const TableEntry &TE, unsigned V) { return TE.from < V; } - friend bool LLVM_ATTRIBUTE_USED operator<(unsigned V, - const TableEntry &TE) { + friend bool LLVM_ATTRIBUTE_UNUSED operator<(unsigned V, + const TableEntry &TE) { return V < TE.from; } }; diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 2238688..369589d 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -25,7 +25,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCSymbol.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" #include "llvm/ADT/SmallSet.h" @@ -313,11 +313,11 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, if (CSI.empty()) return; std::vector &Moves = MMI.getFrameMoves(); - const TargetData *TD = TM.getTargetData(); + const X86RegisterInfo *RegInfo = TM.getRegisterInfo(); bool HasFP = hasFP(MF); // Calculate amount of bytes used for return address storing. - int stackGrowth = -TD->getPointerSize(); + int stackGrowth = -RegInfo->getSlotSize(); // FIXME: This is dirty hack. The code itself is pretty mess right now. // It should be rewritten from scratch and generalized sometimes. @@ -674,7 +674,7 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // function, and use up to 128 bytes of stack space, don't have a frame // pointer, calls, or dynamic alloca then we do not need to adjust the // stack pointer (we fit in the Red Zone). - if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) && + if (Is64Bit && !Fn->getFnAttributes().hasAttribute(Attributes::NoRedZone) && !RegInfo->needsStackRealignment(MF) && !MFI->hasVarSizedObjects() && // No dynamic alloca. !MFI->adjustsStack() && // No calls. @@ -715,9 +715,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // ELSE => DW_CFA_offset_extended std::vector &Moves = MMI.getFrameMoves(); - const TargetData *TD = MF.getTarget().getTargetData(); uint64_t NumBytes = 0; - int stackGrowth = -TD->getPointerSize(); + int stackGrowth = -SlotSize; if (HasFP) { // Calculate required stack adjustment. @@ -836,8 +835,6 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { MI->getOperand(3).setIsDead(); } - DL = MBB.findDebugLoc(MBBI); - // If there is an SUB32ri of ESP immediately before this instruction, merge // the two. This can be the case when tail call elimination is enabled and // the callee has more arguments then the caller. diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 27195b4..99f5574 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -100,6 +100,7 @@ namespace { Base_Reg = Reg; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void dump() { dbgs() << "X86ISelAddressMode " << this << '\n'; dbgs() << "Base_Reg "; @@ -133,6 +134,7 @@ namespace { dbgs() << "nul"; dbgs() << " JT" << JT << " Align" << Align << '\n'; } +#endif }; } @@ -189,7 +191,6 @@ namespace { SDNode *Select(SDNode *N); SDNode *SelectGather(SDNode *N, unsigned Opc); SDNode *SelectAtomic64(SDNode *Node, unsigned Opc); - SDNode *SelectAtomicLoadAdd(SDNode *Node, EVT NVT); SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT); bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM); @@ -244,13 +245,15 @@ namespace { else if (AM.CP) Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp, AM.SymbolFlags); - else if (AM.ES) + else if (AM.ES) { + assert(!AM.Disp && "Non-zero displacement is ignored with ES."); Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags); - else if (AM.JT != -1) + } else if (AM.JT != -1) { + assert(!AM.Disp && "Non-zero displacement is ignored with JT."); Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags); - else if (AM.BlockAddr) - Disp = CurDAG->getBlockAddress(AM.BlockAddr, MVT::i32, - true, AM.SymbolFlags); + } else if (AM.BlockAddr) + Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp, + AM.SymbolFlags); else Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32); @@ -359,7 +362,7 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const { /// MoveBelowCallOrigChain - Replace the original chain operand of the call with /// load's chain operand and move load below the call's chain operand. static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, - SDValue Call, SDValue OrigChain) { + SDValue Call, SDValue OrigChain) { SmallVector Ops; SDValue Chain = OrigChain.getOperand(0); if (Chain.getNode() == Load.getNode()) @@ -383,11 +386,13 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size()); CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0), Load.getOperand(1), Load.getOperand(2)); + + unsigned NumOps = Call.getNode()->getNumOperands(); Ops.clear(); Ops.push_back(SDValue(Load.getNode(), 1)); - for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i) + for (unsigned i = 1, e = NumOps; i != e; ++i) Ops.push_back(Call.getOperand(i)); - CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], Ops.size()); + CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps); } /// isCalleeLoad - Return true if call address is a load and it can be @@ -396,6 +401,10 @@ static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load, /// In the case of a tail call, there isn't a callseq node between the call /// chain and the load. static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { + // The transformation is somewhat dangerous if the call's chain was glued to + // the call. After MoveBelowOrigChain the load is moved between the call and + // the chain, this can create a cycle if the load is not folded. So it is + // *really* important that we are sure the load will be folded. if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse()) return false; LoadSDNode *LD = dyn_cast(Callee.getNode()); @@ -425,7 +434,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { void X86DAGToDAGISel::PreprocessISelDAG() { // OptForSize is used in pattern predicates that isel is matching. - OptForSize = MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize); + OptForSize = MF->getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize); for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), E = CurDAG->allnodes_end(); I != E; ) { @@ -433,7 +443,10 @@ void X86DAGToDAGISel::PreprocessISelDAG() { if (OptLevel != CodeGenOpt::None && (N->getOpcode() == X86ISD::CALL || - N->getOpcode() == X86ISD::TC_RETURN)) { + (N->getOpcode() == X86ISD::TC_RETURN && + // Only does this if load can be foled into TC_RETURN. + (Subtarget->is64Bit() || + getTargetMachine().getRelocationModel() != Reloc::PIC_)))) { /// Also try moving call address load from outside callseq_start to just /// before the call to allow it to be folded. /// @@ -652,10 +665,16 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { } else if (JumpTableSDNode *J = dyn_cast(N0)) { AM.JT = J->getIndex(); AM.SymbolFlags = J->getTargetFlags(); - } else { - AM.BlockAddr = cast(N0)->getBlockAddress(); - AM.SymbolFlags = cast(N0)->getTargetFlags(); - } + } else if (BlockAddressSDNode *BA = dyn_cast(N0)) { + X86ISelAddressMode Backup = AM; + AM.BlockAddr = BA->getBlockAddress(); + AM.SymbolFlags = BA->getTargetFlags(); + if (FoldOffsetIntoAddress(BA->getOffset(), AM)) { + AM = Backup; + return true; + } + } else + llvm_unreachable("Unhandled symbol reference node."); if (N.getOpcode() == X86ISD::WrapperRIP) AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64)); @@ -684,10 +703,12 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { } else if (JumpTableSDNode *J = dyn_cast(N0)) { AM.JT = J->getIndex(); AM.SymbolFlags = J->getTargetFlags(); - } else { - AM.BlockAddr = cast(N0)->getBlockAddress(); - AM.SymbolFlags = cast(N0)->getTargetFlags(); - } + } else if (BlockAddressSDNode *BA = dyn_cast(N0)) { + AM.BlockAddr = BA->getBlockAddress(); + AM.Disp += BA->getOffset(); + AM.SymbolFlags = BA->getTargetFlags(); + } else + llvm_unreachable("Unhandled symbol reference node."); return false; } @@ -1011,7 +1032,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM, AM.IndexReg = ShVal.getNode()->getOperand(0); ConstantSDNode *AddVal = cast(ShVal.getNode()->getOperand(1)); - uint64_t Disp = AddVal->getSExtValue() << Val; + uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val; if (!FoldOffsetIntoAddress(Disp, AM)) return false; } @@ -1281,7 +1302,9 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, // that are not a MemSDNode, and thus don't have proper addrspace info. Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores - Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme + Parent->getOpcode() != X86ISD::TLSCALL && // Fixme + Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp + Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp unsigned AddrSpace = cast(Parent)->getPointerInfo().getAddrSpace(); // AddrSpace 256 -> GS, 257 -> FS. @@ -1468,6 +1491,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { SDValue In1 = Node->getOperand(1); SDValue In2L = Node->getOperand(2); SDValue In2H = Node->getOperand(3); + SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) return NULL; @@ -1481,159 +1505,13 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { return ResNode; } -// FIXME: Figure out some way to unify this with the 'or' and other code -// below. -SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { - if (Node->hasAnyUseOfValue(0)) - return 0; - - // Optimize common patterns for __sync_add_and_fetch and - // __sync_sub_and_fetch where the result is not used. This allows us - // to use "lock" version of add, sub, inc, dec instructions. - // FIXME: Do not use special instructions but instead add the "lock" - // prefix to the target node somehow. The extra information will then be - // transferred to machine instruction and it denotes the prefix. - SDValue Chain = Node->getOperand(0); - SDValue Ptr = Node->getOperand(1); - SDValue Val = Node->getOperand(2); - SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; - if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) - return 0; - - bool isInc = false, isDec = false, isSub = false, isCN = false; - ConstantSDNode *CN = dyn_cast(Val); - if (CN && CN->getSExtValue() == (int32_t)CN->getSExtValue()) { - isCN = true; - int64_t CNVal = CN->getSExtValue(); - if (CNVal == 1) - isInc = true; - else if (CNVal == -1) - isDec = true; - else if (CNVal >= 0) - Val = CurDAG->getTargetConstant(CNVal, NVT); - else { - isSub = true; - Val = CurDAG->getTargetConstant(-CNVal, NVT); - } - } else if (Val.hasOneUse() && - Val.getOpcode() == ISD::SUB && - X86::isZeroNode(Val.getOperand(0))) { - isSub = true; - Val = Val.getOperand(1); - } - - DebugLoc dl = Node->getDebugLoc(); - unsigned Opc = 0; - switch (NVT.getSimpleVT().SimpleTy) { - default: return 0; - case MVT::i8: - if (isInc) - Opc = X86::LOCK_INC8m; - else if (isDec) - Opc = X86::LOCK_DEC8m; - else if (isSub) { - if (isCN) - Opc = X86::LOCK_SUB8mi; - else - Opc = X86::LOCK_SUB8mr; - } else { - if (isCN) - Opc = X86::LOCK_ADD8mi; - else - Opc = X86::LOCK_ADD8mr; - } - break; - case MVT::i16: - if (isInc) - Opc = X86::LOCK_INC16m; - else if (isDec) - Opc = X86::LOCK_DEC16m; - else if (isSub) { - if (isCN) { - if (immSext8(Val.getNode())) - Opc = X86::LOCK_SUB16mi8; - else - Opc = X86::LOCK_SUB16mi; - } else - Opc = X86::LOCK_SUB16mr; - } else { - if (isCN) { - if (immSext8(Val.getNode())) - Opc = X86::LOCK_ADD16mi8; - else - Opc = X86::LOCK_ADD16mi; - } else - Opc = X86::LOCK_ADD16mr; - } - break; - case MVT::i32: - if (isInc) - Opc = X86::LOCK_INC32m; - else if (isDec) - Opc = X86::LOCK_DEC32m; - else if (isSub) { - if (isCN) { - if (immSext8(Val.getNode())) - Opc = X86::LOCK_SUB32mi8; - else - Opc = X86::LOCK_SUB32mi; - } else - Opc = X86::LOCK_SUB32mr; - } else { - if (isCN) { - if (immSext8(Val.getNode())) - Opc = X86::LOCK_ADD32mi8; - else - Opc = X86::LOCK_ADD32mi; - } else - Opc = X86::LOCK_ADD32mr; - } - break; - case MVT::i64: - if (isInc) - Opc = X86::LOCK_INC64m; - else if (isDec) - Opc = X86::LOCK_DEC64m; - else if (isSub) { - Opc = X86::LOCK_SUB64mr; - if (isCN) { - if (immSext8(Val.getNode())) - Opc = X86::LOCK_SUB64mi8; - else if (i64immSExt32(Val.getNode())) - Opc = X86::LOCK_SUB64mi32; - } - } else { - Opc = X86::LOCK_ADD64mr; - if (isCN) { - if (immSext8(Val.getNode())) - Opc = X86::LOCK_ADD64mi8; - else if (i64immSExt32(Val.getNode())) - Opc = X86::LOCK_ADD64mi32; - } - } - break; - } - - SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, - dl, NVT), 0); - MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); - MemOp[0] = cast(Node)->getMemOperand(); - if (isInc || isDec) { - SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; - SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 6), 0); - cast(Ret)->setMemRefs(MemOp, MemOp + 1); - SDValue RetVals[] = { Undef, Ret }; - return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); - } else { - SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; - SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); - cast(Ret)->setMemRefs(MemOp, MemOp + 1); - SDValue RetVals[] = { Undef, Ret }; - return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); - } -} - +/// Atomic opcode table +/// enum AtomicOpc { + ADD, + SUB, + INC, + DEC, OR, AND, XOR, @@ -1657,6 +1535,58 @@ enum AtomicSz { static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { { + X86::LOCK_ADD8mi, + X86::LOCK_ADD8mr, + X86::LOCK_ADD16mi8, + X86::LOCK_ADD16mi, + X86::LOCK_ADD16mr, + X86::LOCK_ADD32mi8, + X86::LOCK_ADD32mi, + X86::LOCK_ADD32mr, + X86::LOCK_ADD64mi8, + X86::LOCK_ADD64mi32, + X86::LOCK_ADD64mr, + }, + { + X86::LOCK_SUB8mi, + X86::LOCK_SUB8mr, + X86::LOCK_SUB16mi8, + X86::LOCK_SUB16mi, + X86::LOCK_SUB16mr, + X86::LOCK_SUB32mi8, + X86::LOCK_SUB32mi, + X86::LOCK_SUB32mr, + X86::LOCK_SUB64mi8, + X86::LOCK_SUB64mi32, + X86::LOCK_SUB64mr, + }, + { + 0, + X86::LOCK_INC8m, + 0, + 0, + X86::LOCK_INC16m, + 0, + 0, + X86::LOCK_INC32m, + 0, + 0, + X86::LOCK_INC64m, + }, + { + 0, + X86::LOCK_DEC8m, + 0, + 0, + X86::LOCK_DEC16m, + 0, + 0, + X86::LOCK_DEC32m, + 0, + 0, + X86::LOCK_DEC64m, + }, + { X86::LOCK_OR8mi, X86::LOCK_OR8mr, X86::LOCK_OR16mi8, @@ -1667,7 +1597,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { X86::LOCK_OR32mr, X86::LOCK_OR64mi8, X86::LOCK_OR64mi32, - X86::LOCK_OR64mr + X86::LOCK_OR64mr, }, { X86::LOCK_AND8mi, @@ -1680,7 +1610,7 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { X86::LOCK_AND32mr, X86::LOCK_AND64mi8, X86::LOCK_AND64mi32, - X86::LOCK_AND64mr + X86::LOCK_AND64mr, }, { X86::LOCK_XOR8mi, @@ -1693,18 +1623,74 @@ static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = { X86::LOCK_XOR32mr, X86::LOCK_XOR64mi8, X86::LOCK_XOR64mi32, - X86::LOCK_XOR64mr + X86::LOCK_XOR64mr, } }; +// Return the target constant operand for atomic-load-op and do simple +// translations, such as from atomic-load-add to lock-sub. The return value is +// one of the following 3 cases: +// + target-constant, the operand could be supported as a target constant. +// + empty, the operand is not needed any more with the new op selected. +// + non-empty, otherwise. +static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG, + DebugLoc dl, + enum AtomicOpc &Op, EVT NVT, + SDValue Val) { + if (ConstantSDNode *CN = dyn_cast(Val)) { + int64_t CNVal = CN->getSExtValue(); + // Quit if not 32-bit imm. + if ((int32_t)CNVal != CNVal) + return Val; + // For atomic-load-add, we could do some optimizations. + if (Op == ADD) { + // Translate to INC/DEC if ADD by 1 or -1. + if ((CNVal == 1) || (CNVal == -1)) { + Op = (CNVal == 1) ? INC : DEC; + // No more constant operand after being translated into INC/DEC. + return SDValue(); + } + // Translate to SUB if ADD by negative value. + if (CNVal < 0) { + Op = SUB; + CNVal = -CNVal; + } + } + return CurDAG->getTargetConstant(CNVal, NVT); + } + + // If the value operand is single-used, try to optimize it. + if (Op == ADD && Val.hasOneUse()) { + // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x). + if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) { + Op = SUB; + return Val.getOperand(1); + } + // A special case for i16, which needs truncating as, in most cases, it's + // promoted to i32. We will translate + // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x)) + if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 && + Val.getOperand(0).getOpcode() == ISD::SUB && + X86::isZeroNode(Val.getOperand(0).getOperand(0))) { + Op = SUB; + Val = Val.getOperand(0); + return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT, + Val.getOperand(1)); + } + } + + return Val; +} + SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { if (Node->hasAnyUseOfValue(0)) return 0; + DebugLoc dl = Node->getDebugLoc(); + // Optimize common patterns for __sync_or_and_fetch and similar arith // operations where the result is not used. This allows us to use the "lock" // version of the arithmetic instruction. - // FIXME: Same as for 'add' and 'sub', try to merge those down here. SDValue Chain = Node->getOperand(0); SDValue Ptr = Node->getOperand(1); SDValue Val = Node->getOperand(2); @@ -1715,6 +1701,8 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { // Which index into the table. enum AtomicOpc Op; switch (Node->getOpcode()) { + default: + return 0; case ISD::ATOMIC_LOAD_OR: Op = OR; break; @@ -1724,16 +1712,14 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { case ISD::ATOMIC_LOAD_XOR: Op = XOR; break; - default: - return 0; - } - - bool isCN = false; - ConstantSDNode *CN = dyn_cast(Val); - if (CN && (int32_t)CN->getSExtValue() == CN->getSExtValue()) { - isCN = true; - Val = CurDAG->getTargetConstant(CN->getSExtValue(), NVT); + case ISD::ATOMIC_LOAD_ADD: + Op = ADD; + break; } + + Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val); + bool isUnOp = !Val.getNode(); + bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant); unsigned Opc = 0; switch (NVT.getSimpleVT().SimpleTy) { @@ -1775,13 +1761,20 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) { assert(Opc != 0 && "Invalid arith lock transform!"); - DebugLoc dl = Node->getDebugLoc(); + SDValue Ret; SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, dl, NVT), 0); MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); MemOp[0] = cast(Node)->getMemOperand(); - SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; - SDValue Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, 7), 0); + if (isUnOp) { + SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain }; + Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, + array_lengthof(Ops)), 0); + } else { + SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain }; + Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops, + array_lengthof(Ops)), 0); + } cast(Ret)->setMemRefs(MemOp, MemOp + 1); SDValue RetVals[] = { Undef, Ret }; return CurDAG->getMergeValues(RetVals, 2, dl).getNode(); @@ -2059,6 +2052,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { case X86ISD::ATOMSUB64_DAG: case X86ISD::ATOMNAND64_DAG: case X86ISD::ATOMAND64_DAG: + case X86ISD::ATOMMAX64_DAG: + case X86ISD::ATOMMIN64_DAG: + case X86ISD::ATOMUMAX64_DAG: + case X86ISD::ATOMUMIN64_DAG: case X86ISD::ATOMSWAP64_DAG: { unsigned Opc; switch (Opcode) { @@ -2069,6 +2066,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break; case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break; case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break; + case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break; + case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break; + case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break; + case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break; case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break; } SDNode *RetVal = SelectAtomic64(Node, Opc); @@ -2077,15 +2078,10 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { break; } - case ISD::ATOMIC_LOAD_ADD: { - SDNode *RetVal = SelectAtomicLoadAdd(Node, NVT); - if (RetVal) - return RetVal; - break; - } case ISD::ATOMIC_LOAD_XOR: case ISD::ATOMIC_LOAD_AND: - case ISD::ATOMIC_LOAD_OR: { + case ISD::ATOMIC_LOAD_OR: + case ISD::ATOMIC_LOAD_ADD: { SDNode *RetVal = SelectAtomicLoadArith(Node, NVT); if (RetVal) return RetVal; @@ -2116,7 +2112,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // Make sure that we don't change the operation by removing bits. // This only matters for OR and XOR, AND is unaffected. - if (Opcode != ISD::AND && ((Val >> ShlVal) << ShlVal) != Val) + uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1; + if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0) break; unsigned ShlOp, Op; @@ -2199,13 +2196,16 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { SDValue N1 = Node->getOperand(1); bool isSigned = Opcode == ISD::SMUL_LOHI; + bool hasBMI2 = Subtarget->hasBMI2(); if (!isSigned) { switch (NVT.getSimpleVT().SimpleTy) { default: llvm_unreachable("Unsupported VT!"); case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break; case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break; - case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break; - case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break; + case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r; + MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break; + case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r; + MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break; } } else { switch (NVT.getSimpleVT().SimpleTy) { @@ -2217,13 +2217,31 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { } } - unsigned LoReg, HiReg; - switch (NVT.getSimpleVT().SimpleTy) { - default: llvm_unreachable("Unsupported VT!"); - case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break; - case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break; - case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break; - case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break; + unsigned SrcReg, LoReg, HiReg; + switch (Opc) { + default: llvm_unreachable("Unknown MUL opcode!"); + case X86::IMUL8r: + case X86::MUL8r: + SrcReg = LoReg = X86::AL; HiReg = X86::AH; + break; + case X86::IMUL16r: + case X86::MUL16r: + SrcReg = LoReg = X86::AX; HiReg = X86::DX; + break; + case X86::IMUL32r: + case X86::MUL32r: + SrcReg = LoReg = X86::EAX; HiReg = X86::EDX; + break; + case X86::IMUL64r: + case X86::MUL64r: + SrcReg = LoReg = X86::RAX; HiReg = X86::RDX; + break; + case X86::MULX32rr: + SrcReg = X86::EDX; LoReg = HiReg = 0; + break; + case X86::MULX64rr: + SrcReg = X86::RDX; LoReg = HiReg = 0; + break; } SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4; @@ -2235,22 +2253,47 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { std::swap(N0, N1); } - SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg, + SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg, N0, SDValue()).getValue(1); + SDValue ResHi, ResLo; if (foldedLoad) { + SDValue Chain; SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0), InFlag }; - SDNode *CNode = - CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops, - array_lengthof(Ops)); - InFlag = SDValue(CNode, 1); + if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) { + SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue); + SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, + array_lengthof(Ops)); + ResHi = SDValue(CNode, 0); + ResLo = SDValue(CNode, 1); + Chain = SDValue(CNode, 2); + InFlag = SDValue(CNode, 3); + } else { + SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue); + SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops, + array_lengthof(Ops)); + Chain = SDValue(CNode, 0); + InFlag = SDValue(CNode, 1); + } // Update the chain. - ReplaceUses(N1.getValue(1), SDValue(CNode, 0)); + ReplaceUses(N1.getValue(1), Chain); } else { - SDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag); - InFlag = SDValue(CNode, 0); + SDValue Ops[] = { N1, InFlag }; + if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) { + SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue); + SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, + array_lengthof(Ops)); + ResHi = SDValue(CNode, 0); + ResLo = SDValue(CNode, 1); + InFlag = SDValue(CNode, 2); + } else { + SDVTList VTs = CurDAG->getVTList(MVT::Glue); + SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, + array_lengthof(Ops)); + InFlag = SDValue(CNode, 0); + } } // Prevent use of AH in a REX instruction by referencing AX instead. @@ -2275,19 +2318,25 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { } // Copy the low half of the result, if it is needed. if (!SDValue(Node, 0).use_empty()) { - SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, - LoReg, NVT, InFlag); - InFlag = Result.getValue(2); - ReplaceUses(SDValue(Node, 0), Result); - DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); + if (ResLo.getNode() == 0) { + assert(LoReg && "Register for low half is not defined!"); + ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT, + InFlag); + InFlag = ResLo.getValue(2); + } + ReplaceUses(SDValue(Node, 0), ResLo); + DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n'); } // Copy the high half of the result, if it is needed. if (!SDValue(Node, 1).use_empty()) { - SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, - HiReg, NVT, InFlag); - InFlag = Result.getValue(2); - ReplaceUses(SDValue(Node, 1), Result); - DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n'); + if (ResHi.getNode() == 0) { + assert(HiReg && "Register for high half is not defined!"); + ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT, + InFlag); + InFlag = ResHi.getValue(2); + } + ReplaceUses(SDValue(Node, 1), ResHi); + DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n'); } return NULL; @@ -2488,7 +2537,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { MVT::i8, Reg); // Emit a testb. - return CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, Subreg, Imm); + SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32, + Subreg, Imm); + // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has + // one, do not call ReplaceAllUsesWith. + ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), + SDValue(NewNode, 0)); + return NULL; } // For example, "testl %eax, $2048" to "testb %ah, $8". @@ -2519,8 +2574,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only // target GR8_NOREX registers, so make sure the register class is // forced. - return CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, MVT::i32, - Subreg, ShiftedImm); + SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl, + MVT::i32, Subreg, ShiftedImm); + // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has + // one, do not call ReplaceAllUsesWith. + ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), + SDValue(NewNode, 0)); + return NULL; } // For example, "testl %eax, $32776" to "testw %ax, $32776". @@ -2536,7 +2596,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { MVT::i16, Reg); // Emit a testw. - return CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, Subreg, Imm); + SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32, + Subreg, Imm); + // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has + // one, do not call ReplaceAllUsesWith. + ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), + SDValue(NewNode, 0)); + return NULL; } // For example, "testq %rax, $268468232" to "testl %eax, $268468232". @@ -2552,7 +2618,13 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { MVT::i32, Reg); // Emit a testl. - return CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, Subreg, Imm); + SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32, + Subreg, Imm); + // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has + // one, do not call ReplaceAllUsesWith. + ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)), + SDValue(NewNode, 0)); + return NULL; } } break; @@ -2607,85 +2679,6 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { return Result; } - - // FIXME: Custom handling because TableGen doesn't support multiple implicit - // defs in an instruction pattern - case X86ISD::PCMPESTRI: { - SDValue N0 = Node->getOperand(0); - SDValue N1 = Node->getOperand(1); - SDValue N2 = Node->getOperand(2); - SDValue N3 = Node->getOperand(3); - SDValue N4 = Node->getOperand(4); - - // Make sure last argument is a constant - ConstantSDNode *Cst = dyn_cast(N4); - if (!Cst) - break; - - uint64_t Imm = Cst->getZExtValue(); - - SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, - X86::EAX, N1, SDValue()).getValue(1); - InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX, - N3, InFlag).getValue(1); - - SDValue Ops[] = { N0, N2, getI8Imm(Imm), InFlag }; - unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : - X86::PCMPESTRIrr; - InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops, - array_lengthof(Ops)), 0); - - if (!SDValue(Node, 0).use_empty()) { - SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, - X86::ECX, NVT, InFlag); - InFlag = Result.getValue(2); - ReplaceUses(SDValue(Node, 0), Result); - } - if (!SDValue(Node, 1).use_empty()) { - SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, - X86::EFLAGS, NVT, InFlag); - InFlag = Result.getValue(2); - ReplaceUses(SDValue(Node, 1), Result); - } - - return NULL; - } - - // FIXME: Custom handling because TableGen doesn't support multiple implicit - // defs in an instruction pattern - case X86ISD::PCMPISTRI: { - SDValue N0 = Node->getOperand(0); - SDValue N1 = Node->getOperand(1); - SDValue N2 = Node->getOperand(2); - - // Make sure last argument is a constant - ConstantSDNode *Cst = dyn_cast(N2); - if (!Cst) - break; - - uint64_t Imm = Cst->getZExtValue(); - - SDValue Ops[] = { N0, N1, getI8Imm(Imm) }; - unsigned Opc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : - X86::PCMPISTRIrr; - SDValue InFlag = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Ops, - array_lengthof(Ops)), 0); - - if (!SDValue(Node, 0).use_empty()) { - SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, - X86::ECX, NVT, InFlag); - InFlag = Result.getValue(2); - ReplaceUses(SDValue(Node, 0), Result); - } - if (!SDValue(Node, 1).use_empty()) { - SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, - X86::EFLAGS, NVT, InFlag); - InFlag = Result.getValue(2); - ReplaceUses(SDValue(Node, 1), Result); - } - - return NULL; - } } SDNode *ResNode = SelectCode(Node); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index c77355f..b35fb51 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -85,7 +85,7 @@ static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal, unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / 128) * ElemsPerChunk); - SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); + SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); SDValue Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx); @@ -118,7 +118,7 @@ static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/128) * ElemsPerChunk); - SDValue VecIdx = DAG.getConstant(NormalizedIdxVal, MVT::i32); + SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal); return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx); } @@ -158,10 +158,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) Subtarget = &TM.getSubtarget(); X86ScalarSSEf64 = Subtarget->hasSSE2(); X86ScalarSSEf32 = Subtarget->hasSSE1(); - X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP; RegInfo = TM.getRegisterInfo(); - TD = getTargetData(); + TD = getDataLayout(); // Set up the TargetLowering object. static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }; @@ -180,7 +179,11 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setSchedulingPreference(Sched::ILP); else setSchedulingPreference(Sched::RegPressure); - setStackPointerRegisterToSaveRestore(X86StackPtr); + setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); + + // Bypass i32 with i8 on Atom when compiling with O2 + if (Subtarget->hasSlowDivide() && TM.getOptLevel() >= CodeGenOpt::Default) + addBypassSlowDiv(32, 8); if (Subtarget->isTargetWindows() && !Subtarget->isTargetCygMing()) { // Setup Windows compiler runtime calls. @@ -453,6 +456,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::SETCC , MVT::i64 , Custom); } setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); + // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support + // SjLj exception handling but a light-weight setjmp/longjmp replacement to + // support continuation, user-level threading, and etc.. As a result, no + // other SjLj exception interfaces are implemented and please don't build + // your own exception handling based on them. + // LLVM/Clang supports zero-cost DWARF exception handling. + setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); + setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); // Darwin ABI issue. setOperationAction(ISD::ConstantPool , MVT::i32 , Custom); @@ -510,6 +521,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i64, Custom); setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i64, Custom); setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i64, Custom); + setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i64, Custom); } if (Subtarget->hasCmpxchg16b()) { @@ -541,6 +556,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); setOperationAction(ISD::TRAP, MVT::Other, Legal); + setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); // VASTART needs to be custom lowered to use the VarArgsFrameIndex setOperationAction(ISD::VASTART , MVT::Other, Custom); @@ -643,7 +659,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); if (!TM.Options.UnsafeFPMath) { + setOperationAction(ISD::FSIN , MVT::f32 , Expand); setOperationAction(ISD::FSIN , MVT::f64 , Expand); + setOperationAction(ISD::FCOS , MVT::f32 , Expand); setOperationAction(ISD::FCOS , MVT::f64 , Expand); } addLegalFPImmediate(APFloat(+0.0)); // FLD0 @@ -735,6 +753,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FFLOOR, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand); setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand); @@ -824,6 +843,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FDIV, MVT::v4f32, Legal); setOperationAction(ISD::FSQRT, MVT::v4f32, Legal); setOperationAction(ISD::FNEG, MVT::v4f32, Custom); + setOperationAction(ISD::FABS, MVT::v4f32, Custom); setOperationAction(ISD::LOAD, MVT::v4f32, Legal); setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); @@ -857,6 +877,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FDIV, MVT::v2f64, Legal); setOperationAction(ISD::FSQRT, MVT::v2f64, Legal); setOperationAction(ISD::FNEG, MVT::v2f64, Custom); + setOperationAction(ISD::FABS, MVT::v2f64, Custom); setOperationAction(ISD::SETCC, MVT::v2i64, Custom); setOperationAction(ISD::SETCC, MVT::v16i8, Custom); @@ -925,6 +946,18 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); + + setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom); + // As there is no 64-bit GPR available, we need build a special custom + // sequence to convert from v2i32 to v2f32. + if (!Subtarget->is64Bit()) + setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); + + setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); + setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); + + setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, Legal); } if (Subtarget->hasSSE41()) { @@ -939,6 +972,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FRINT, MVT::f64, Legal); setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal); + setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal); + setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal); + // FIXME: Do we need to handle scalar-to-vector here? setOperationAction(ISD::MUL, MVT::v4i32, Legal); @@ -1016,19 +1052,33 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FMUL, MVT::v8f32, Legal); setOperationAction(ISD::FDIV, MVT::v8f32, Legal); setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); + setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal); setOperationAction(ISD::FNEG, MVT::v8f32, Custom); + setOperationAction(ISD::FABS, MVT::v8f32, Custom); setOperationAction(ISD::FADD, MVT::v4f64, Legal); setOperationAction(ISD::FSUB, MVT::v4f64, Legal); setOperationAction(ISD::FMUL, MVT::v4f64, Legal); setOperationAction(ISD::FDIV, MVT::v4f64, Legal); setOperationAction(ISD::FSQRT, MVT::v4f64, Legal); + setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal); setOperationAction(ISD::FNEG, MVT::v4f64, Custom); + setOperationAction(ISD::FABS, MVT::v4f64, Custom); + + setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); + + setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); + setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom); + setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom); + + setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, Legal); + setOperationAction(ISD::SRL, MVT::v16i16, Custom); setOperationAction(ISD::SRL, MVT::v32i8, Custom); @@ -1052,7 +1102,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::VSELECT, MVT::v8i32, Legal); setOperationAction(ISD::VSELECT, MVT::v8f32, Legal); - if (Subtarget->hasFMA()) { + if (Subtarget->hasFMA() || Subtarget->hasFMA4()) { setOperationAction(ISD::FMA, MVT::v8f32, Custom); setOperationAction(ISD::FMA, MVT::v4f64, Custom); setOperationAction(ISD::FMA, MVT::v4f32, Custom); @@ -1217,10 +1267,8 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setTargetDAGCombine(ISD::ANY_EXTEND); setTargetDAGCombine(ISD::SIGN_EXTEND); setTargetDAGCombine(ISD::TRUNCATE); - setTargetDAGCombine(ISD::UINT_TO_FP); setTargetDAGCombine(ISD::SINT_TO_FP); setTargetDAGCombine(ISD::SETCC); - setTargetDAGCombine(ISD::FP_TO_SINT); if (Subtarget->is64Bit()) setTargetDAGCombine(ISD::MUL); setTargetDAGCombine(ISD::XOR); @@ -1318,7 +1366,7 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size, // cases like PR2962. This should be removed when PR2962 is fixed. const Function *F = MF.getFunction(); if (IsZeroVal && - !F->hasFnAttr(Attribute::NoImplicitFloat)) { + !F->getFnAttributes().hasAttribute(Attributes::NoImplicitFloat)) { if (Size >= 16 && (Subtarget->isUnalignedMemAccessFast() || ((DstAlign == 0 || DstAlign >= 16) && @@ -1986,7 +2034,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, TotalNumIntRegs); - bool NoImplicitFloatOps = Fn->hasFnAttr(Attribute::NoImplicitFloat); + bool NoImplicitFloatOps = Fn->getFnAttributes(). + hasAttribute(Attributes::NoImplicitFloat); assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && "SSE register cannot be used when SSE is disabled!"); assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && @@ -2134,16 +2183,14 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, /// optimization is performed and it is required (FPDiff!=0). static SDValue EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, - SDValue Chain, SDValue RetAddrFrIdx, - bool Is64Bit, int FPDiff, DebugLoc dl) { + SDValue Chain, SDValue RetAddrFrIdx, EVT PtrVT, + unsigned SlotSize, int FPDiff, DebugLoc dl) { // Store the return address to the appropriate stack slot. if (!FPDiff) return Chain; // Calculate the new stack slot for the return address. - int SlotSize = Is64Bit ? 8 : 4; int NewReturnAddrFI = MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize, false); - EVT VT = Is64Bit ? MVT::i64 : MVT::i32; - SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT); + SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, MachinePointerInfo::getFixedStack(NewReturnAddrFI), false, false, 0); @@ -2178,7 +2225,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Check if it's really possible to do a tail call. isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg, SR != NotStructReturn, - MF.getFunction()->hasStructRetAttr(), + MF.getFunction()->hasStructRetAttr(), CLI.RetTy, Outs, OutVals, Ins, DAG); // Sibcalls are automatically detected tailcalls which do not require @@ -2218,14 +2265,15 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, int FPDiff = 0; if (isTailCall && !IsSibcall) { // Lower arguments at fp - stackoffset + fpdiff. - unsigned NumBytesCallerPushed = - MF.getInfo()->getBytesToPopOnReturn(); + X86MachineFunctionInfo *X86Info = MF.getInfo(); + unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); + FPDiff = NumBytesCallerPushed - NumBytes; // Set the delta of movement of the returnaddr stackslot. // But only set if delta is greater than previous delta. - if (FPDiff < (MF.getInfo()->getTCReturnAddrDelta())) - MF.getInfo()->setTCReturnAddrDelta(FPDiff); + if (FPDiff < X86Info->getTCReturnAddrDelta()) + X86Info->setTCReturnAddrDelta(FPDiff); } if (!IsSibcall) @@ -2302,7 +2350,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } else if (!IsSibcall && (!isTailCall || isByVal)) { assert(VA.isMemLoc()); if (StackPtr.getNode() == 0) - StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, getPointerTy()); + StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), + getPointerTy()); MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, dl, DAG, VA, Flags)); } @@ -2390,7 +2439,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Copy relative to framepointer. SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset()); if (StackPtr.getNode() == 0) - StackPtr = DAG.getCopyFromReg(Chain, dl, X86StackPtr, + StackPtr = DAG.getCopyFromReg(Chain, dl, + RegInfo->getStackRegister(), getPointerTy()); Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source); @@ -2412,7 +2462,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, &MemOpChains2[0], MemOpChains2.size()); // Store the return address to the appropriate stack slot. - Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, Is64Bit, + Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, + getPointerTy(), RegInfo->getSlotSize(), FPDiff, dl); } @@ -2462,7 +2513,8 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, OpFlags = X86II::MO_DARWIN_STUB; } else if (Subtarget->isPICStyleRIPRel() && isa(GV) && - cast(GV)->hasFnAttr(Attribute::NonLazyBind)) { + cast(GV)->getFnAttributes(). + hasAttribute(Attributes::NonLazyBind)) { // If the function is marked as non-lazy, generate an indirect call // which loads from the GOT directly. This avoids runtime overhead // at the cost of eager binding (and one extra byte of encoding). @@ -2623,7 +2675,7 @@ X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, unsigned StackAlignment = TFI.getStackAlignment(); uint64_t AlignMask = StackAlignment - 1; int64_t Offset = StackSize; - uint64_t SlotSize = TD->getPointerSize(); + unsigned SlotSize = RegInfo->getSlotSize(); if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { // Number smaller than 12 so just add the difference. Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); @@ -2698,6 +2750,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, + Type *RetTy, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SmallVectorImpl &Ins, @@ -2709,6 +2762,13 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // If -tailcallopt is specified, make fastcc functions tail-callable. const MachineFunction &MF = DAG.getMachineFunction(); const Function *CallerF = DAG.getMachineFunction().getFunction(); + + // If the function return type is x86_fp80 and the callee return type is not, + // then the FP_EXTEND of the call result is not a nop. It's not safe to + // perform a tailcall optimization here. + if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) + return false; + CallingConv::ID CallerCC = CallerF->getCallingConv(); bool CCMatch = CallerCC == CalleeCC; @@ -2832,7 +2892,7 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, MachineFrameInfo *MFI = MF.getFrameInfo(); const MachineRegisterInfo *MRI = &MF.getRegInfo(); const X86InstrInfo *TII = - ((X86TargetMachine&)getTargetMachine()).getInstrInfo(); + ((const X86TargetMachine&)getTargetMachine()).getInstrInfo(); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; SDValue Arg = OutVals[i]; @@ -2983,7 +3043,7 @@ SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { if (ReturnAddrIndex == 0) { // Set up a frame object for the return address. - uint64_t SlotSize = TD->getPointerSize(); + unsigned SlotSize = RegInfo->getSlotSize(); ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize, -SlotSize, false); FuncInfo->setRAIndex(ReturnAddrIndex); @@ -3506,25 +3566,26 @@ SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp, if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i])) MatchOddMask = false; } - static const int CompactionMaskEven[] = {0, 2, -1, -1, 4, 6, -1, -1}; - static const int CompactionMaskOdd [] = {1, 3, -1, -1, 5, 7, -1, -1}; - const int *CompactionMask; - if (MatchEvenMask) - CompactionMask = CompactionMaskEven; - else if (MatchOddMask) - CompactionMask = CompactionMaskOdd; - else + if (!MatchEvenMask && !MatchOddMask) return SDValue(); SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT); - SDValue Op0 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(0), - UndefNode, CompactionMask); - SDValue Op1 = DAG.getVectorShuffle(VT, dl, SVOp->getOperand(1), - UndefNode, CompactionMask); - static const int UnpackMask[] = {0, 8, 1, 9, 4, 12, 5, 13}; - return DAG.getVectorShuffle(VT, dl, Op0, Op1, UnpackMask); + SDValue Op0 = SVOp->getOperand(0); + SDValue Op1 = SVOp->getOperand(1); + + if (MatchEvenMask) { + // Shift the second operand right to 32 bits. + static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 }; + Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask); + } else { + // Shift the first operand left to 32 bits. + static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 }; + Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask); + } + static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15}; + return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask); } /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand @@ -4575,7 +4636,6 @@ static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, MVT ShufVT = V.getValueType().getSimpleVT(); unsigned NumElems = ShufVT.getVectorNumElements(); SmallVector ShuffleMask; - SDValue ImmN; bool IsUnary; if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary)) @@ -4977,6 +5037,18 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl &Elts, LDBase->getAlignment(), false/*isVolatile*/, true/*ReadMem*/, false/*WriteMem*/); + + // Make sure the newly-created LOAD is in the same position as LDBase in + // terms of dependency. We create a TokenFactor for LDBase and ResNode, and + // update uses of LDBase's output chain to use the TokenFactor. + if (LDBase->hasAnyUseOfValue(1)) { + SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, + SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1)); + DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain); + DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1), + SDValue(ResNode.getNode(), 1)); + } + return DAG.getNode(ISD::BITCAST, DL, VT, ResNode); } return SDValue(); @@ -4990,7 +5062,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl &Elts, /// The VBROADCAST node is returned when a pattern is found, /// or SDValue() otherwise. SDValue -X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { +X86TargetLowering::LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const { if (!Subtarget->hasAVX()) return SDValue(); @@ -5114,80 +5186,78 @@ X86TargetLowering::LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const { return SDValue(); } -// LowerVectorFpExtend - Recognize the scalarized FP_EXTEND from v2f32 to v2f64 -// and convert it into X86ISD::VFPEXT due to the current ISD::FP_EXTEND has the -// constraint of matching input/output vector elements. SDValue -X86TargetLowering::LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const { - DebugLoc DL = Op.getDebugLoc(); - SDNode *N = Op.getNode(); +X86TargetLowering::buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); - unsigned NumElts = Op.getNumOperands(); - // Check supported types and sub-targets. - // - // Only v2f32 -> v2f64 needs special handling. - if (VT != MVT::v2f64 || !Subtarget->hasSSE2()) + // Skip if insert_vec_elt is not supported. + if (!isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) return SDValue(); - SDValue VecIn; - EVT VecInVT; - SmallVector Mask; - EVT SrcVT = MVT::Other; + DebugLoc DL = Op.getDebugLoc(); + unsigned NumElems = Op.getNumOperands(); - // Check the patterns could be translated into X86vfpext. - for (unsigned i = 0; i < NumElts; ++i) { - SDValue In = N->getOperand(i); - unsigned Opcode = In.getOpcode(); + SDValue VecIn1; + SDValue VecIn2; + SmallVector InsertIndices; + SmallVector Mask(NumElems, -1); + + for (unsigned i = 0; i != NumElems; ++i) { + unsigned Opc = Op.getOperand(i).getOpcode(); - // Skip if the element is undefined. - if (Opcode == ISD::UNDEF) { - Mask.push_back(-1); + if (Opc == ISD::UNDEF) continue; - } - // Quit if one of the elements is not defined from 'fpext'. - if (Opcode != ISD::FP_EXTEND) - return SDValue(); + if (Opc != ISD::EXTRACT_VECTOR_ELT) { + // Quit if more than 1 elements need inserting. + if (InsertIndices.size() > 1) + return SDValue(); + + InsertIndices.push_back(i); + continue; + } - // Check how the source of 'fpext' is defined. - SDValue L2In = In.getOperand(0); - EVT L2InVT = L2In.getValueType(); + SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); + SDValue ExtIdx = Op.getOperand(i).getOperand(1); - // Check the original type - if (SrcVT == MVT::Other) - SrcVT = L2InVT; - else if (SrcVT != L2InVT) // Quit if non-homogenous typed. + // Quit if extracted from vector of different type. + if (ExtractedFromVec.getValueType() != VT) return SDValue(); - // Check whether the value being 'fpext'ed is extracted from the same - // source. - Opcode = L2In.getOpcode(); - - // Quit if it's not extracted with a constant index. - if (Opcode != ISD::EXTRACT_VECTOR_ELT || - !isa(L2In.getOperand(1))) + // Quit if non-constant index. + if (!isa(ExtIdx)) return SDValue(); - SDValue ExtractedFromVec = L2In.getOperand(0); + if (VecIn1.getNode() == 0) + VecIn1 = ExtractedFromVec; + else if (VecIn1 != ExtractedFromVec) { + if (VecIn2.getNode() == 0) + VecIn2 = ExtractedFromVec; + else if (VecIn2 != ExtractedFromVec) + // Quit if more than 2 vectors to shuffle + return SDValue(); + } - if (VecIn.getNode() == 0) { - VecIn = ExtractedFromVec; - VecInVT = ExtractedFromVec.getValueType(); - } else if (VecIn != ExtractedFromVec) // Quit if built from more than 1 vec. - return SDValue(); + unsigned Idx = cast(ExtIdx)->getZExtValue(); - Mask.push_back(cast(L2In.getOperand(1))->getZExtValue()); + if (ExtractedFromVec == VecIn1) + Mask[i] = Idx; + else if (ExtractedFromVec == VecIn2) + Mask[i] = Idx + NumElems; } - // Fill the remaining mask as undef. - for (unsigned i = NumElts; i < VecInVT.getVectorNumElements(); ++i) - Mask.push_back(-1); + if (VecIn1.getNode() == 0) + return SDValue(); - return DAG.getNode(X86ISD::VFPEXT, DL, VT, - DAG.getVectorShuffle(VecInVT, DL, - VecIn, DAG.getUNDEF(VecInVT), - &Mask[0])); + VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); + SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]); + for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) { + unsigned Idx = InsertIndices[i]; + NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), + DAG.getIntPtrConstant(Idx)); + } + + return NV; } SDValue @@ -5222,10 +5292,6 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { if (Broadcast.getNode()) return Broadcast; - SDValue FpExt = LowerVectorFpExtend(Op, DAG); - if (FpExt.getNode()) - return FpExt; - unsigned EVTBits = ExtVT.getSizeInBits(); unsigned NumZero = 0; @@ -5470,6 +5536,11 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { if (LD.getNode()) return LD; + // Check for a build vector from mostly shuffle plus few inserting. + SDValue Sh = buildFromShuffleMostly(Op, DAG); + if (Sh.getNode()) + return Sh; + // For SSE 4.1, use insertps to put the high elements into the low element. if (getSubtarget()->hasSSE41()) { SDValue Result; @@ -5536,8 +5607,7 @@ static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl); } -SDValue -X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) { assert(Op.getNumOperands() == 2); // 256-bit AVX can use the vinsertf128 instruction to create 256-bit vectors @@ -5546,9 +5616,9 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { } // Try to lower a shuffle node into a simple blend instruction. -static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, - const X86Subtarget *Subtarget, - SelectionDAG &DAG) { +static SDValue +LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, + const X86Subtarget *Subtarget, SelectionDAG &DAG) { SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); DebugLoc dl = SVOp->getDebugLoc(); @@ -5618,9 +5688,9 @@ static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp, // 2. [ssse3] 1 x pshufb // 3. [ssse3] 2 x pshufb + 1 x por // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) -SDValue -X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, - SelectionDAG &DAG) const { +static SDValue +LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { ShuffleVectorSDNode *SVOp = cast(Op); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); @@ -5877,8 +5947,6 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, DebugLoc dl = SVOp->getDebugLoc(); ArrayRef MaskVals = SVOp->getMask(); - bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; - // If we have SSSE3, case 1 is generated when all result bytes come from // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is // present, fall back to case 3. @@ -5902,7 +5970,11 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i8, &pshufbMask[0], 16)); - if (V2IsUndef) + + // As PSHUFB will zero elements with negative indices, it's safe to ignore + // the 2nd operand if it's undefined or zero. + if (V2.getOpcode() == ISD::UNDEF || + ISD::isBuildVectorAllZeros(V2.getNode())) return V1; // Calculate the shuffle mask for the second input, shuffle it, and @@ -5988,6 +6060,51 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp, return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV); } +// v32i8 shuffles - Translate to VPSHUFB if possible. +static +SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp, + const X86Subtarget *Subtarget, + SelectionDAG &DAG) { + EVT VT = SVOp->getValueType(0); + SDValue V1 = SVOp->getOperand(0); + SDValue V2 = SVOp->getOperand(1); + DebugLoc dl = SVOp->getDebugLoc(); + SmallVector MaskVals(SVOp->getMask().begin(), SVOp->getMask().end()); + + bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; + bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode()); + bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode()); + + // VPSHUFB may be generated if + // (1) one of input vector is undefined or zeroinitializer. + // The mask value 0x80 puts 0 in the corresponding slot of the vector. + // And (2) the mask indexes don't cross the 128-bit lane. + if (VT != MVT::v32i8 || !Subtarget->hasAVX2() || + (!V2IsUndef && !V2IsAllZero && !V1IsAllZero)) + return SDValue(); + + if (V1IsAllZero && !V2IsAllZero) { + CommuteVectorShuffleMask(MaskVals, 32); + V1 = V2; + } + SmallVector pshufbMask; + for (unsigned i = 0; i != 32; i++) { + int EltIdx = MaskVals[i]; + if (EltIdx < 0 || EltIdx >= 32) + EltIdx = 0x80; + else { + if ((EltIdx >= 16 && i < 16) || (EltIdx < 16 && i >= 16)) + // Cross lane is not allowed. + return SDValue(); + EltIdx &= 0xf; + } + pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); + } + return DAG.getNode(X86ISD::PSHUFB, dl, MVT::v32i8, V1, + DAG.getNode(ISD::BUILD_VECTOR, dl, + MVT::v32i8, &pshufbMask[0], 32)); +} + /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be /// done when every pair / quad of shuffle mask elements point to elements in @@ -6322,17 +6439,17 @@ LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { } static bool MayFoldVectorLoad(SDValue V) { - if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) + while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST) V = V.getOperand(0); + if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) V = V.getOperand(0); if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR && V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF) // BUILD_VECTOR (load), undef V = V.getOperand(0); - if (MayFoldLoad(V)) - return true; - return false; + + return MayFoldLoad(V); } // FIXME: the version above should always be used. Since there's @@ -6455,6 +6572,81 @@ SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { getShuffleSHUFImmediate(SVOp), DAG); } +// Reduce a vector shuffle to zext. +SDValue +X86TargetLowering::lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const { + // PMOVZX is only available from SSE41. + if (!Subtarget->hasSSE41()) + return SDValue(); + + EVT VT = Op.getValueType(); + + // Only AVX2 support 256-bit vector integer extending. + if (!Subtarget->hasAVX2() && VT.is256BitVector()) + return SDValue(); + + ShuffleVectorSDNode *SVOp = cast(Op); + DebugLoc DL = Op.getDebugLoc(); + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + unsigned NumElems = VT.getVectorNumElements(); + + // Extending is an unary operation and the element type of the source vector + // won't be equal to or larger than i64. + if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() || + VT.getVectorElementType() == MVT::i64) + return SDValue(); + + // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4. + unsigned Shift = 1; // Start from 2, i.e. 1 << 1. + while ((1U << Shift) < NumElems) { + if (SVOp->getMaskElt(1U << Shift) == 1) + break; + Shift += 1; + // The maximal ratio is 8, i.e. from i8 to i64. + if (Shift > 3) + return SDValue(); + } + + // Check the shuffle mask. + unsigned Mask = (1U << Shift) - 1; + for (unsigned i = 0; i != NumElems; ++i) { + int EltIdx = SVOp->getMaskElt(i); + if ((i & Mask) != 0 && EltIdx != -1) + return SDValue(); + if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift)) + return SDValue(); + } + + unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift; + EVT NeVT = EVT::getIntegerVT(*DAG.getContext(), NBits); + EVT NVT = EVT::getVectorVT(*DAG.getContext(), NeVT, NumElems >> Shift); + + if (!isTypeLegal(NVT)) + return SDValue(); + + // Simplify the operand as it's prepared to be fed into shuffle. + unsigned SignificantBits = NVT.getSizeInBits() >> Shift; + if (V1.getOpcode() == ISD::BITCAST && + V1.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && + V1.getOperand(0).getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && + V1.getOperand(0) + .getOperand(0).getValueType().getSizeInBits() == SignificantBits) { + // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x) + SDValue V = V1.getOperand(0).getOperand(0).getOperand(0); + ConstantSDNode *CIdx = + dyn_cast(V1.getOperand(0).getOperand(0).getOperand(1)); + // If it's foldable, i.e. normal load with single use, we will let code + // selection to fold it. Otherwise, we will short the conversion sequence. + if (CIdx && CIdx->getZExtValue() == 0 && + (!ISD::isNormalLoad(V.getNode()) || !V.hasOneUse())) + V1 = DAG.getNode(ISD::BITCAST, DL, V1.getValueType(), V); + } + + return DAG.getNode(ISD::BITCAST, DL, VT, + DAG.getNode(X86ISD::VZEXT, DL, NVT, V1)); +} + SDValue X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { ShuffleVectorSDNode *SVOp = cast(Op); @@ -6485,6 +6677,11 @@ X86TargetLowering::NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const { return PromoteSplat(SVOp, DAG); } + // Check integer expanding shuffles. + SDValue NewOp = lowerVectorIntExtend(Op, DAG); + if (NewOp.getNode()) + return NewOp; + // If the shuffle can be profitably rewritten as a narrower shuffle, then // do it! if (VT == MVT::v8i16 || VT == MVT::v16i8 || @@ -6534,7 +6731,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { bool HasAVX = Subtarget->hasAVX(); bool HasAVX2 = Subtarget->hasAVX2(); MachineFunction &MF = DAG.getMachineFunction(); - bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); + bool OptForSize = MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize); assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); @@ -6803,7 +7001,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { // Handle v8i16 specifically since SSE can do byte extraction and insertion. if (VT == MVT::v8i16) { - SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); + SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG); if (NewOp.getNode()) return NewOp; } @@ -6814,6 +7012,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { return NewOp; } + if (VT == MVT::v32i8) { + SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG); + if (NewOp.getNode()) + return NewOp; + } + // Handle all 128-bit wide vectors with 4 elements, and match them with // several different shuffle types. if (NumElems == 4 && VT.is128BitVector()) @@ -6837,9 +7041,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, if (VT.getSizeInBits() == 8) { SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, - Op.getOperand(0), Op.getOperand(1)); + Op.getOperand(0), Op.getOperand(1)); SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, - DAG.getValueType(VT)); + DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); } @@ -6854,9 +7058,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, Op.getOperand(0)), Op.getOperand(1))); SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, - Op.getOperand(0), Op.getOperand(1)); + Op.getOperand(0), Op.getOperand(1)); SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract, - DAG.getValueType(VT)); + DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); } @@ -6940,9 +7144,9 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, // Transform it so it match pextrw which produces a 32-bit result. EVT EltVT = MVT::i32; SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT, - Op.getOperand(0), Op.getOperand(1)); + Op.getOperand(0), Op.getOperand(1)); SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract, - DAG.getValueType(VT)); + DAG.getValueType(VT)); return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert); } @@ -7085,8 +7289,7 @@ X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const { return SDValue(); } -SDValue -X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) { LLVMContext *Context = DAG.getContext(); DebugLoc dl = Op.getDebugLoc(); EVT OpVT = Op.getValueType(); @@ -7118,8 +7321,8 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const { // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in // a simple subregister reference or explicit instructions to grab // upper bits of a vector. -SDValue -X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { if (Subtarget->hasAVX()) { DebugLoc dl = Op.getNode()->getDebugLoc(); SDValue Vec = Op.getNode()->getOperand(0); @@ -7138,8 +7341,8 @@ X86TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a // simple superregister reference or explicit instructions to insert // the upper bits of a vector. -SDValue -X86TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { if (Subtarget->hasAVX()) { DebugLoc dl = Op.getNode()->getDebugLoc(); SDValue Vec = Op.getNode()->getOperand(0); @@ -7282,9 +7485,10 @@ X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { Subtarget->ClassifyBlockAddressReference(); CodeModel::Model M = getTargetMachine().getCodeModel(); const BlockAddress *BA = cast(Op)->getBlockAddress(); + int64_t Offset = cast(Op)->getOffset(); DebugLoc dl = Op.getDebugLoc(); - SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), - /*isTarget=*/true, OpFlags); + SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset, + OpFlags); if (Subtarget->isPICStyleRIPRel() && (M == CodeModel::Small || M == CodeModel::Kernel)) @@ -7393,8 +7597,8 @@ LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, SDValue InFlag; DebugLoc dl = GA->getDebugLoc(); // ? function entry point might be better SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, - DAG.getNode(X86ISD::GlobalBaseReg, - DebugLoc(), PtrVT), InFlag); + DAG.getNode(X86ISD::GlobalBaseReg, + DebugLoc(), PtrVT), InFlag); InFlag = Chain.getValue(1); return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); @@ -7895,11 +8099,29 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op, return Sub; } +SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op, + SelectionDAG &DAG) const { + SDValue N0 = Op.getOperand(0); + EVT SVT = N0.getValueType(); + DebugLoc dl = Op.getDebugLoc(); + + assert((SVT == MVT::v4i8 || SVT == MVT::v4i16 || + SVT == MVT::v8i8 || SVT == MVT::v8i16) && + "Custom UINT_TO_FP is not supported!"); + + EVT NVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, SVT.getVectorNumElements()); + return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), + DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0)); +} + SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { SDValue N0 = Op.getOperand(0); DebugLoc dl = Op.getDebugLoc(); + if (Op.getValueType().isVector()) + return lowerUINT_TO_FP_vec(Op, DAG); + // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform // the optimization here. @@ -8073,10 +8295,66 @@ FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, bool IsSigned, bool IsReplace) co } } +SDValue X86TargetLowering::lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + EVT VT = Op.getValueType(); + SDValue In = Op.getOperand(0); + EVT SVT = In.getValueType(); + + if (!VT.is256BitVector() || !SVT.is128BitVector() || + VT.getVectorNumElements() != SVT.getVectorNumElements()) + return SDValue(); + + assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!"); + + // AVX2 has better support of integer extending. + if (Subtarget->hasAVX2()) + return DAG.getNode(X86ISD::VZEXT, DL, VT, In); + + SDValue Lo = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, In); + static const int Mask[] = {4, 5, 6, 7, -1, -1, -1, -1}; + SDValue Hi = DAG.getNode(X86ISD::VZEXT, DL, MVT::v4i32, + DAG.getVectorShuffle(MVT::v8i16, DL, In, DAG.getUNDEF(MVT::v8i16), &Mask[0])); + + return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i32, Lo, Hi); +} + +SDValue X86TargetLowering::lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + EVT VT = Op.getValueType(); + EVT SVT = Op.getOperand(0).getValueType(); + + if (!VT.is128BitVector() || !SVT.is256BitVector() || + VT.getVectorNumElements() != SVT.getVectorNumElements()) + return SDValue(); + + assert(Subtarget->hasAVX() && "256-bit vector is observed without AVX!"); + + unsigned NumElems = VT.getVectorNumElements(); + EVT NVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), + NumElems * 2); + + SDValue In = Op.getOperand(0); + SmallVector MaskVec(NumElems * 2, -1); + // Prepare truncation shuffle mask + for (unsigned i = 0; i != NumElems; ++i) + MaskVec[i] = i * 2; + SDValue V = DAG.getVectorShuffle(NVT, DL, + DAG.getNode(ISD::BITCAST, DL, NVT, In), + DAG.getUNDEF(NVT), &MaskVec[0]); + return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, + DAG.getIntPtrConstant(0)); +} + SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const { - if (Op.getValueType().isVector()) + if (Op.getValueType().isVector()) { + if (Op.getValueType() == MVT::v8i16) + return DAG.getNode(ISD::TRUNCATE, Op.getDebugLoc(), Op.getValueType(), + DAG.getNode(ISD::FP_TO_SINT, Op.getDebugLoc(), + MVT::v8i32, Op.getOperand(0))); return SDValue(); + } std::pair Vals = FP_TO_INTHelper(Op, DAG, /*IsSigned=*/ true, /*IsReplace=*/ false); @@ -8111,26 +8389,49 @@ SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op, return FIST; } -SDValue X86TargetLowering::LowerFABS(SDValue Op, - SelectionDAG &DAG) const { +SDValue X86TargetLowering::lowerFP_EXTEND(SDValue Op, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + EVT VT = Op.getValueType(); + SDValue In = Op.getOperand(0); + EVT SVT = In.getValueType(); + + assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"); + + return DAG.getNode(X86ISD::VFPEXT, DL, VT, + DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, + In, DAG.getUNDEF(SVT))); +} + +SDValue X86TargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { LLVMContext *Context = DAG.getContext(); DebugLoc dl = Op.getDebugLoc(); EVT VT = Op.getValueType(); EVT EltVT = VT; - if (VT.isVector()) + unsigned NumElts = VT == MVT::f64 ? 2 : 4; + if (VT.isVector()) { EltVT = VT.getVectorElementType(); - Constant *C; - if (EltVT == MVT::f64) { - C = ConstantVector::getSplat(2, - ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63))))); - } else { - C = ConstantVector::getSplat(4, - ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31))))); + NumElts = VT.getVectorNumElements(); } - SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); + Constant *C; + if (EltVT == MVT::f64) + C = ConstantFP::get(*Context, APFloat(APInt(64, ~(1ULL << 63)))); + else + C = ConstantFP::get(*Context, APFloat(APInt(32, ~(1U << 31)))); + C = ConstantVector::getSplat(NumElts, C); + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); + unsigned Alignment = cast(CPIdx)->getAlignment(); SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(), - false, false, false, 16); + false, false, false, Alignment); + if (VT.isVector()) { + MVT ANDVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; + return DAG.getNode(ISD::BITCAST, dl, VT, + DAG.getNode(ISD::AND, dl, ANDVT, + DAG.getNode(ISD::BITCAST, dl, ANDVT, + Op.getOperand(0)), + DAG.getNode(ISD::BITCAST, dl, ANDVT, Mask))); + } return DAG.getNode(X86ISD::FAND, dl, VT, Op.getOperand(0), Mask); } @@ -8150,10 +8451,11 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const { else C = ConstantFP::get(*Context, APFloat(APInt(32, 1U << 31))); C = ConstantVector::getSplat(NumElts, C); - SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy()); + unsigned Alignment = cast(CPIdx)->getAlignment(); SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, MachinePointerInfo::getConstantPool(), - false, false, false, 16); + false, false, false, Alignment); if (VT.isVector()) { MVT XORVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; return DAG.getNode(ISD::BITCAST, dl, VT, @@ -8239,7 +8541,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit); } -SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { SDValue N0 = Op.getOperand(0); DebugLoc dl = Op.getDebugLoc(); EVT VT = Op.getValueType(); @@ -8250,6 +8552,98 @@ SDValue X86TargetLowering::LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const { return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT)); } +// LowerVectorAllZeroTest - Check whether an OR'd tree is PTEST-able. +// +SDValue X86TargetLowering::LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const { + assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree."); + + if (!Subtarget->hasSSE41()) + return SDValue(); + + if (!Op->hasOneUse()) + return SDValue(); + + SDNode *N = Op.getNode(); + DebugLoc DL = N->getDebugLoc(); + + SmallVector Opnds; + DenseMap VecInMap; + EVT VT = MVT::Other; + + // Recognize a special case where a vector is casted into wide integer to + // test all 0s. + Opnds.push_back(N->getOperand(0)); + Opnds.push_back(N->getOperand(1)); + + for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { + SmallVector::const_iterator I = Opnds.begin() + Slot; + // BFS traverse all OR'd operands. + if (I->getOpcode() == ISD::OR) { + Opnds.push_back(I->getOperand(0)); + Opnds.push_back(I->getOperand(1)); + // Re-evaluate the number of nodes to be traversed. + e += 2; // 2 more nodes (LHS and RHS) are pushed. + continue; + } + + // Quit if a non-EXTRACT_VECTOR_ELT + if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) + return SDValue(); + + // Quit if without a constant index. + SDValue Idx = I->getOperand(1); + if (!isa(Idx)) + return SDValue(); + + SDValue ExtractedFromVec = I->getOperand(0); + DenseMap::iterator M = VecInMap.find(ExtractedFromVec); + if (M == VecInMap.end()) { + VT = ExtractedFromVec.getValueType(); + // Quit if not 128/256-bit vector. + if (!VT.is128BitVector() && !VT.is256BitVector()) + return SDValue(); + // Quit if not the same type. + if (VecInMap.begin() != VecInMap.end() && + VT != VecInMap.begin()->first.getValueType()) + return SDValue(); + M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; + } + M->second |= 1U << cast(Idx)->getZExtValue(); + } + + assert((VT.is128BitVector() || VT.is256BitVector()) && + "Not extracted from 128-/256-bit vector."); + + unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; + SmallVector VecIns; + + for (DenseMap::const_iterator + I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { + // Quit if not all elements are used. + if (I->second != FullMask) + return SDValue(); + VecIns.push_back(I->first); + } + + EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; + + // Cast all vectors into TestVT for PTEST. + for (unsigned i = 0, e = VecIns.size(); i < e; ++i) + VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]); + + // If more than one full vectors are evaluated, OR them first before PTEST. + for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { + // Each iteration will OR 2 nodes and append the result until there is only + // 1 node left, i.e. the final OR'd value of all vectors. + SDValue LHS = VecIns[Slot]; + SDValue RHS = VecIns[Slot + 1]; + VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); + } + + return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, + VecIns.back(), VecIns.back()); +} + /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent. SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, @@ -8283,7 +8677,33 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, unsigned Opcode = 0; unsigned NumOperands = 0; - switch (Op.getNode()->getOpcode()) { + + // Truncate operations may prevent the merge of the SETCC instruction + // and the arithmetic intruction before it. Attempt to truncate the operands + // of the arithmetic instruction and use a reduced bit-width instruction. + bool NeedTruncation = false; + SDValue ArithOp = Op; + if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) { + SDValue Arith = Op->getOperand(0); + // Both the trunc and the arithmetic op need to have one user each. + if (Arith->hasOneUse()) + switch (Arith.getOpcode()) { + default: break; + case ISD::ADD: + case ISD::SUB: + case ISD::AND: + case ISD::OR: + case ISD::XOR: { + NeedTruncation = true; + ArithOp = Arith; + } + } + } + + // NOTICE: In the code below we use ArithOp to hold the arithmetic operation + // which may be the result of a CAST. We use the variable 'Op', which is the + // non-casted variable when we check for possible users. + switch (ArithOp.getOpcode()) { case ISD::ADD: // Due to an isel shortcoming, be conservative if this add is likely to be // selected as part of a load-modify-store instruction. When the root node @@ -8303,7 +8723,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, goto default_case; if (ConstantSDNode *C = - dyn_cast(Op.getNode()->getOperand(1))) { + dyn_cast(ArithOp.getNode()->getOperand(1))) { // An add of one will be selected as an INC. if (C->getAPIntValue() == 1) { Opcode = X86ISD::INC; @@ -8339,7 +8759,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC && - (User->getOpcode() != ISD::SELECT || UOpNo != 0)) { + !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) { NonFlagUse = true; break; } @@ -8360,14 +8780,20 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, goto default_case; // Otherwise use a regular EFLAGS-setting instruction. - switch (Op.getNode()->getOpcode()) { + switch (ArithOp.getOpcode()) { default: llvm_unreachable("unexpected operator!"); - case ISD::SUB: - Opcode = X86ISD::SUB; - break; - case ISD::OR: Opcode = X86ISD::OR; break; + case ISD::SUB: Opcode = X86ISD::SUB; break; case ISD::XOR: Opcode = X86ISD::XOR; break; case ISD::AND: Opcode = X86ISD::AND; break; + case ISD::OR: { + if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) { + SDValue EFLAGS = LowerVectorAllZeroTest(Op, DAG); + if (EFLAGS.getNode()) + return EFLAGS; + } + Opcode = X86ISD::OR; + break; + } } NumOperands = 2; @@ -8385,19 +8811,40 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, break; } + // If we found that truncation is beneficial, perform the truncation and + // update 'Op'. + if (NeedTruncation) { + EVT VT = Op.getValueType(); + SDValue WideVal = Op->getOperand(0); + EVT WideVT = WideVal.getValueType(); + unsigned ConvertedOp = 0; + // Use a target machine opcode to prevent further DAGCombine + // optimizations that may separate the arithmetic operations + // from the setcc node. + switch (WideVal.getOpcode()) { + default: break; + case ISD::ADD: ConvertedOp = X86ISD::ADD; break; + case ISD::SUB: ConvertedOp = X86ISD::SUB; break; + case ISD::AND: ConvertedOp = X86ISD::AND; break; + case ISD::OR: ConvertedOp = X86ISD::OR; break; + case ISD::XOR: ConvertedOp = X86ISD::XOR; break; + } + + if (ConvertedOp) { + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) { + SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0)); + SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1)); + Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1); + } + } + } + if (Opcode == 0) // Emit a CMP with 0, which is the TEST pattern. return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, DAG.getConstant(0, Op.getValueType())); - if (Opcode == X86ISD::CMP) { - SDValue New = DAG.getNode(Opcode, dl, MVT::i32, Op.getOperand(0), - Op.getOperand(1)); - // We can't replace usage of SUB with CMP. - // The SUB node will be removed later because there is no use of it. - return SDValue(New.getNode(), 0); - } - SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); SmallVector Ops; for (unsigned i = 0; i != NumOperands; ++i) @@ -8956,6 +9403,21 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { } } + // X86 doesn't have an i8 cmov. If both operands are the result of a truncate + // widen the cmov and push the truncate through. This avoids introducing a new + // branch during isel and doesn't add any extensions. + if (Op.getValueType() == MVT::i8 && + Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { + SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); + if (T1.getValueType() == T2.getValueType() && + // Blacklist CopyFromReg to avoid partial register stalls. + T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ + SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue); + SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond); + return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); + } + } + // X86ISD::CMOV means set the result (which is operand 1) to the RHS if // condition is true. SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue); @@ -9310,7 +9772,8 @@ X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag); Flag = Chain.getValue(1); - Chain = DAG.getCopyFromReg(Chain, dl, X86StackPtr, SPTy).getValue(1); + Chain = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), + SPTy).getValue(1); SDValue Ops1[2] = { Chain.getValue(0), Chain }; return DAG.getMergeValues(Ops1, 2, dl); @@ -9393,7 +9856,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { EVT ArgVT = Op.getNode()->getValueType(0); Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); - uint32_t ArgSize = getTargetData()->getTypeAllocSize(ArgTy); + uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy); uint8_t ArgMode; // Decide which area this value should be read from. @@ -9413,7 +9876,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { // Sanity Check: Make sure using fp_offset makes sense. assert(!getTargetMachine().Options.UseSoftFloat && !(DAG.getMachineFunction() - .getFunction()->hasFnAttr(Attribute::NoImplicitFloat)) && + .getFunction()->getFnAttributes() + .hasAttribute(Attributes::NoImplicitFloat)) && Subtarget->hasSSE1()); } @@ -9444,7 +9908,8 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { false, false, false, 0); } -SDValue X86TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { // X86-64 va_list is a struct { i32, i32, i8*, i8* }. assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!"); SDValue Chain = Op.getOperand(0); @@ -9505,8 +9970,7 @@ static SDValue getTargetVShiftNode(unsigned Opc, DebugLoc dl, EVT VT, return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); } -SDValue -X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) { DebugLoc dl = Op.getDebugLoc(); unsigned IntNo = cast(Op.getOperand(0))->getZExtValue(); switch (IntNo) { @@ -9894,62 +10358,6 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const Op.getOperand(1), Op.getOperand(2), DAG); } - // Fix vector shift instructions where the last operand is a non-immediate - // i32 value. - case Intrinsic::x86_mmx_pslli_w: - case Intrinsic::x86_mmx_pslli_d: - case Intrinsic::x86_mmx_pslli_q: - case Intrinsic::x86_mmx_psrli_w: - case Intrinsic::x86_mmx_psrli_d: - case Intrinsic::x86_mmx_psrli_q: - case Intrinsic::x86_mmx_psrai_w: - case Intrinsic::x86_mmx_psrai_d: { - SDValue ShAmt = Op.getOperand(2); - if (isa(ShAmt)) - return SDValue(); - - unsigned NewIntNo; - switch (IntNo) { - default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. - case Intrinsic::x86_mmx_pslli_w: - NewIntNo = Intrinsic::x86_mmx_psll_w; - break; - case Intrinsic::x86_mmx_pslli_d: - NewIntNo = Intrinsic::x86_mmx_psll_d; - break; - case Intrinsic::x86_mmx_pslli_q: - NewIntNo = Intrinsic::x86_mmx_psll_q; - break; - case Intrinsic::x86_mmx_psrli_w: - NewIntNo = Intrinsic::x86_mmx_psrl_w; - break; - case Intrinsic::x86_mmx_psrli_d: - NewIntNo = Intrinsic::x86_mmx_psrl_d; - break; - case Intrinsic::x86_mmx_psrli_q: - NewIntNo = Intrinsic::x86_mmx_psrl_q; - break; - case Intrinsic::x86_mmx_psrai_w: - NewIntNo = Intrinsic::x86_mmx_psra_w; - break; - case Intrinsic::x86_mmx_psrai_d: - NewIntNo = Intrinsic::x86_mmx_psra_d; - break; - } - - // The vector shift intrinsics with scalars uses 32b shift amounts but - // the sse2/mmx shift instructions reads 64 bits. Set the upper 32 bits - // to be zero. - ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, ShAmt, - DAG.getConstant(0, MVT::i32)); -// FIXME this must be lowered to get rid of the invalid type. - - EVT VT = Op.getValueType(); - ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt); - return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, - DAG.getConstant(NewIntNo, MVT::i32), - Op.getOperand(1), ShAmt); - } case Intrinsic::x86_sse42_pcmpistria128: case Intrinsic::x86_sse42_pcmpestria128: case Intrinsic::x86_sse42_pcmpistric128: @@ -10028,13 +10436,80 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); return DAG.getNode(Opcode, dl, VTs, NewOps.data(), NewOps.size()); } - } -} - -SDValue -X86TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const { - DebugLoc dl = Op.getDebugLoc(); - unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); + case Intrinsic::x86_fma_vfmadd_ps: + case Intrinsic::x86_fma_vfmadd_pd: + case Intrinsic::x86_fma_vfmsub_ps: + case Intrinsic::x86_fma_vfmsub_pd: + case Intrinsic::x86_fma_vfnmadd_ps: + case Intrinsic::x86_fma_vfnmadd_pd: + case Intrinsic::x86_fma_vfnmsub_ps: + case Intrinsic::x86_fma_vfnmsub_pd: + case Intrinsic::x86_fma_vfmaddsub_ps: + case Intrinsic::x86_fma_vfmaddsub_pd: + case Intrinsic::x86_fma_vfmsubadd_ps: + case Intrinsic::x86_fma_vfmsubadd_pd: + case Intrinsic::x86_fma_vfmadd_ps_256: + case Intrinsic::x86_fma_vfmadd_pd_256: + case Intrinsic::x86_fma_vfmsub_ps_256: + case Intrinsic::x86_fma_vfmsub_pd_256: + case Intrinsic::x86_fma_vfnmadd_ps_256: + case Intrinsic::x86_fma_vfnmadd_pd_256: + case Intrinsic::x86_fma_vfnmsub_ps_256: + case Intrinsic::x86_fma_vfnmsub_pd_256: + case Intrinsic::x86_fma_vfmaddsub_ps_256: + case Intrinsic::x86_fma_vfmaddsub_pd_256: + case Intrinsic::x86_fma_vfmsubadd_ps_256: + case Intrinsic::x86_fma_vfmsubadd_pd_256: { + unsigned Opc; + switch (IntNo) { + default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. + case Intrinsic::x86_fma_vfmadd_ps: + case Intrinsic::x86_fma_vfmadd_pd: + case Intrinsic::x86_fma_vfmadd_ps_256: + case Intrinsic::x86_fma_vfmadd_pd_256: + Opc = X86ISD::FMADD; + break; + case Intrinsic::x86_fma_vfmsub_ps: + case Intrinsic::x86_fma_vfmsub_pd: + case Intrinsic::x86_fma_vfmsub_ps_256: + case Intrinsic::x86_fma_vfmsub_pd_256: + Opc = X86ISD::FMSUB; + break; + case Intrinsic::x86_fma_vfnmadd_ps: + case Intrinsic::x86_fma_vfnmadd_pd: + case Intrinsic::x86_fma_vfnmadd_ps_256: + case Intrinsic::x86_fma_vfnmadd_pd_256: + Opc = X86ISD::FNMADD; + break; + case Intrinsic::x86_fma_vfnmsub_ps: + case Intrinsic::x86_fma_vfnmsub_pd: + case Intrinsic::x86_fma_vfnmsub_ps_256: + case Intrinsic::x86_fma_vfnmsub_pd_256: + Opc = X86ISD::FNMSUB; + break; + case Intrinsic::x86_fma_vfmaddsub_ps: + case Intrinsic::x86_fma_vfmaddsub_pd: + case Intrinsic::x86_fma_vfmaddsub_ps_256: + case Intrinsic::x86_fma_vfmaddsub_pd_256: + Opc = X86ISD::FMADDSUB; + break; + case Intrinsic::x86_fma_vfmsubadd_ps: + case Intrinsic::x86_fma_vfmsubadd_pd: + case Intrinsic::x86_fma_vfmsubadd_ps_256: + case Intrinsic::x86_fma_vfmsubadd_pd_256: + Opc = X86ISD::FMSUBADD; + break; + } + + return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1), + Op.getOperand(2), Op.getOperand(3)); + } + } +} + +static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) { + DebugLoc dl = Op.getDebugLoc(); + unsigned IntNo = cast(Op.getOperand(1))->getZExtValue(); switch (IntNo) { default: return SDValue(); // Don't custom lower most intrinsics. @@ -10070,21 +10545,21 @@ SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, unsigned Depth = cast(Op.getOperand(0))->getZExtValue(); DebugLoc dl = Op.getDebugLoc(); + EVT PtrVT = getPointerTy(); if (Depth > 0) { SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue Offset = - DAG.getConstant(TD->getPointerSize(), - Subtarget->is64Bit() ? MVT::i64 : MVT::i32); - return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), - DAG.getNode(ISD::ADD, dl, getPointerTy(), + DAG.getConstant(RegInfo->getSlotSize(), PtrVT); + return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), + DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), MachinePointerInfo(), false, false, false, 0); } // Just load the return address. SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); - return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), + return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, MachinePointerInfo(), false, false, false, 0); } @@ -10106,7 +10581,7 @@ SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const { - return DAG.getIntPtrConstant(2*TD->getPointerSize()); + return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize()); } SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { @@ -10121,7 +10596,7 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, - DAG.getIntPtrConstant(TD->getPointerSize())); + DAG.getIntPtrConstant(RegInfo->getSlotSize())); StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(), false, false, 0); @@ -10132,8 +10607,22 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { Chain, DAG.getRegister(StoreAddrReg, getPointerTy())); } -SDValue X86TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op, - SelectionDAG &DAG) const { +SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, + DAG.getVTList(MVT::i32, MVT::Other), + Op.getOperand(0), Op.getOperand(1)); +} + +SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, + SelectionDAG &DAG) const { + DebugLoc DL = Op.getDebugLoc(); + return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, + Op.getOperand(0), Op.getOperand(1)); +} + +static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { return Op.getOperand(0); } @@ -10146,6 +10635,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, DebugLoc dl = Op.getDebugLoc(); const Value *TrmpAddr = cast(Op.getOperand(4))->getValue(); + const TargetRegisterInfo* TRI = getTargetMachine().getRegisterInfo(); if (Subtarget->is64Bit()) { SDValue OutChains[6]; @@ -10154,8 +10644,8 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. - const unsigned char N86R10 = X86_MC::getX86RegNum(X86::R10); - const unsigned char N86R11 = X86_MC::getX86RegNum(X86::R11); + const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; + const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix @@ -10228,7 +10718,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, for (FunctionType::param_iterator I = FTy->param_begin(), E = FTy->param_end(); I != E; ++I, ++Idx) - if (Attrs.paramHasAttr(Idx, Attribute::InReg)) + if (Attrs.getParamAttributes(Idx).hasAttribute(Attributes::InReg)) // FIXME: should only count parameters that are lowered to integers. InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; @@ -10257,7 +10747,7 @@ SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, // This is storing the opcode for MOV32ri. const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. - const unsigned char N86Reg = X86_MC::getX86RegNum(NestReg); + const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(MOV32ri|N86Reg, MVT::i8), Trmp, MachinePointerInfo(TrmpAddr), @@ -10356,7 +10846,7 @@ SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); } -SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); @@ -10390,8 +10880,7 @@ SDValue X86TargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { return Op; } -SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op, - SelectionDAG &DAG) const { +static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); EVT OpVT = VT; unsigned NumBits = VT.getSizeInBits(); @@ -10416,7 +10905,7 @@ SDValue X86TargetLowering::LowerCTLZ_ZERO_UNDEF(SDValue Op, return Op; } -SDValue X86TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) { EVT VT = Op.getValueType(); unsigned NumBits = VT.getSizeInBits(); DebugLoc dl = Op.getDebugLoc(); @@ -10465,21 +10954,22 @@ static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) { DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); } -SDValue X86TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) { assert(Op.getValueType().is256BitVector() && Op.getValueType().isInteger() && "Only handle AVX 256-bit vector integer operation"); return Lower256IntArith(Op, DAG); } -SDValue X86TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) { assert(Op.getValueType().is256BitVector() && Op.getValueType().isInteger() && "Only handle AVX 256-bit vector integer operation"); return Lower256IntArith(Op, DAG); } -SDValue X86TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { EVT VT = Op.getValueType(); // Decompose 256-bit ops into smaller 128-bit ops. @@ -10754,7 +11244,7 @@ SDValue X86TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) const { return SDValue(); } -SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { // Lower the "add/sub/mul with overflow" instruction into a regular ins plus // a "setcc" instruction that checks the overflow flag. The "brcond" lowering // looks for this combo and may remove the "setcc" instruction if the "setcc" @@ -10869,7 +11359,7 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra); LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra); - return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);; + return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2); } // fall through case MVT::v4i32: @@ -10882,7 +11372,8 @@ SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, } -SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ +static SDValue LowerMEMBARRIER(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { DebugLoc dl = Op.getDebugLoc(); // Go ahead and emit the fence on x86-64 even if we asked for no-sse2. @@ -10927,8 +11418,8 @@ SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); } -SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op, - SelectionDAG &DAG) const { +static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { DebugLoc dl = Op.getDebugLoc(); AtomicOrdering FenceOrdering = static_cast( cast(Op.getOperand(1))->getZExtValue()); @@ -10966,7 +11457,8 @@ SDValue X86TargetLowering::LowerATOMIC_FENCE(SDValue Op, } -SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { EVT T = Op.getValueType(); DebugLoc DL = Op.getDebugLoc(); unsigned Reg = 0; @@ -10997,8 +11489,8 @@ SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { return cpOut; } -SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, - SelectionDAG &DAG) const { +static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget, + SelectionDAG &DAG) { assert(Subtarget->is64Bit() && "Result not type legalized?"); SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue TheChain = Op.getOperand(0); @@ -11016,8 +11508,7 @@ SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op, return DAG.getMergeValues(Ops, 2, dl); } -SDValue X86TargetLowering::LowerBITCAST(SDValue Op, - SelectionDAG &DAG) const { +SDValue X86TargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const { EVT SrcVT = Op.getOperand(0).getValueType(); EVT DstVT = Op.getValueType(); assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() && @@ -11037,7 +11528,7 @@ SDValue X86TargetLowering::LowerBITCAST(SDValue Op, return SDValue(); } -SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { +static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) { SDNode *Node = Op.getNode(); DebugLoc dl = Node->getDebugLoc(); EVT T = Node->getValueType(0); @@ -11110,9 +11601,9 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG); - case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); - case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op,DAG); - case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); + case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, Subtarget, DAG); + case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); + case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op, Subtarget, DAG); case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); @@ -11120,8 +11611,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); - case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); - case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, DAG); + case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); + case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG); case ISD::ConstantPool: return LowerConstantPool(Op, DAG); case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); @@ -11133,8 +11624,11 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); + case ISD::TRUNCATE: return lowerTRUNCATE(Op, DAG); + case ISD::ZERO_EXTEND: return lowerZERO_EXTEND(Op, DAG); case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); + case ISD::FP_EXTEND: return lowerFP_EXTEND(Op, DAG); case ISD::FABS: return LowerFABS(Op, DAG); case ISD::FNEG: return LowerFNEG(Op, DAG); case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); @@ -11145,7 +11639,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::JumpTable: return LowerJumpTable(Op, DAG); case ISD::VASTART: return LowerVASTART(Op, DAG); case ISD::VAARG: return LowerVAARG(Op, DAG); - case ISD::VACOPY: return LowerVACOPY(Op, DAG); + case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG); case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); @@ -11154,13 +11648,15 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); + case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); + case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); case ISD::CTLZ: return LowerCTLZ(Op, DAG); case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG); case ISD::CTTZ: return LowerCTTZ(Op, DAG); - case ISD::MUL: return LowerMUL(Op, DAG); + case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); case ISD::SRA: case ISD::SRL: case ISD::SHL: return LowerShift(Op, DAG); @@ -11170,7 +11666,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::USUBO: case ISD::SMULO: case ISD::UMULO: return LowerXALUO(Op, DAG); - case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); + case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); case ISD::BITCAST: return LowerBITCAST(Op, DAG); case ISD::ADDC: case ISD::ADDE: @@ -11263,6 +11759,27 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, } return; } + case ISD::UINT_TO_FP: { + if (N->getOperand(0).getValueType() != MVT::v2i32 && + N->getValueType(0) != MVT::v2f32) + return; + SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, + N->getOperand(0)); + SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), + MVT::f64); + SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias); + SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, + DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias)); + Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or); + SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); + Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); + return; + } + case ISD::FP_ROUND: { + SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); + Results.push_back(V); + return; + } case ISD::READCYCLECOUNTER: { SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue TheChain = N->getOperand(0); @@ -11330,6 +11847,10 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, case ISD::ATOMIC_LOAD_OR: case ISD::ATOMIC_LOAD_SUB: case ISD::ATOMIC_LOAD_XOR: + case ISD::ATOMIC_LOAD_MAX: + case ISD::ATOMIC_LOAD_MIN: + case ISD::ATOMIC_LOAD_UMAX: + case ISD::ATOMIC_LOAD_UMIN: case ISD::ATOMIC_SWAP: { unsigned Opc; switch (N->getOpcode()) { @@ -11352,6 +11873,18 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N, case ISD::ATOMIC_LOAD_XOR: Opc = X86ISD::ATOMXOR64_DAG; break; + case ISD::ATOMIC_LOAD_MAX: + Opc = X86ISD::ATOMMAX64_DAG; + break; + case ISD::ATOMIC_LOAD_MIN: + Opc = X86ISD::ATOMMIN64_DAG; + break; + case ISD::ATOMIC_LOAD_UMAX: + Opc = X86ISD::ATOMUMAX64_DAG; + break; + case ISD::ATOMIC_LOAD_UMIN: + Opc = X86ISD::ATOMUMIN64_DAG; + break; case ISD::ATOMIC_SWAP: Opc = X86ISD::ATOMSWAP64_DAG; break; @@ -11418,11 +11951,15 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::FHSUB: return "X86ISD::FHSUB"; case X86ISD::FMAX: return "X86ISD::FMAX"; case X86ISD::FMIN: return "X86ISD::FMIN"; + case X86ISD::FMAXC: return "X86ISD::FMAXC"; + case X86ISD::FMINC: return "X86ISD::FMINC"; case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; case X86ISD::FRCP: return "X86ISD::FRCP"; case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; + case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; + case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; @@ -11438,7 +11975,10 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; case X86ISD::VSEXT_MOVL: return "X86ISD::VSEXT_MOVL"; case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; + case X86ISD::VZEXT: return "X86ISD::VZEXT"; + case X86ISD::VSEXT: return "X86ISD::VSEXT"; case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; + case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; case X86ISD::VSHL: return "X86ISD::VSHL"; @@ -11505,6 +12045,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; + case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI"; + case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI"; } } @@ -11653,430 +12195,724 @@ X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl &Mask, // X86 Scheduler Hooks //===----------------------------------------------------------------------===// -// private utility function -MachineBasicBlock * -X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr, - MachineBasicBlock *MBB, - unsigned regOpc, - unsigned immOpc, - unsigned LoadOpc, - unsigned CXchgOpc, - unsigned notOpc, - unsigned EAXreg, - const TargetRegisterClass *RC, - bool Invert) const { - // For the atomic bitwise operator, we generate - // thisMBB: - // newMBB: - // ld t1 = [bitinstr.addr] - // op t2 = t1, [bitinstr.val] - // not t3 = t2 (if Invert) - // mov EAX = t1 - // lcs dest = [bitinstr.addr], t3 [EAX is implicit] - // bz newMBB - // fallthrough -->nextMBB - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - const BasicBlock *LLVM_BB = MBB->getBasicBlock(); - MachineFunction::iterator MBBIter = MBB; - ++MBBIter; +/// Utility function to emit xbegin specifying the start of an RTM region. +static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB, + const TargetInstrInfo *TII) { + DebugLoc DL = MI->getDebugLoc(); + + const BasicBlock *BB = MBB->getBasicBlock(); + MachineFunction::iterator I = MBB; + ++I; + + // For the v = xbegin(), we generate + // + // thisMBB: + // xbegin sinkMBB + // + // mainMBB: + // eax = -1 + // + // sinkMBB: + // v = eax - /// First build the CFG - MachineFunction *F = MBB->getParent(); MachineBasicBlock *thisMBB = MBB; - MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); - F->insert(MBBIter, newMBB); - F->insert(MBBIter, nextMBB); - - // Transfer the remainder of thisMBB and its successor edges to nextMBB. - nextMBB->splice(nextMBB->begin(), thisMBB, - llvm::next(MachineBasicBlock::iterator(bInstr)), - thisMBB->end()); - nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); - - // Update thisMBB to fall through to newMBB - thisMBB->addSuccessor(newMBB); - - // newMBB jumps to itself and fall through to nextMBB - newMBB->addSuccessor(nextMBB); - newMBB->addSuccessor(newMBB); - - // Insert instructions into newMBB based on incoming instruction - assert(bInstr->getNumOperands() < X86::AddrNumOperands + 4 && - "unexpected number of operands"); - DebugLoc dl = bInstr->getDebugLoc(); - MachineOperand& destOper = bInstr->getOperand(0); - MachineOperand* argOpers[2 + X86::AddrNumOperands]; - int numArgs = bInstr->getNumOperands() - 1; - for (int i=0; i < numArgs; ++i) - argOpers[i] = &bInstr->getOperand(i+1); - - // x86 address has 4 operands: base, index, scale, and displacement - int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] - int valArgIndx = lastAddrIndx + 1; - - unsigned t1 = F->getRegInfo().createVirtualRegister(RC); - MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(LoadOpc), t1); - for (int i=0; i <= lastAddrIndx; ++i) - (*MIB).addOperand(*argOpers[i]); - - unsigned t2 = F->getRegInfo().createVirtualRegister(RC); - assert((argOpers[valArgIndx]->isReg() || - argOpers[valArgIndx]->isImm()) && - "invalid operand"); - if (argOpers[valArgIndx]->isReg()) - MIB = BuildMI(newMBB, dl, TII->get(regOpc), t2); - else - MIB = BuildMI(newMBB, dl, TII->get(immOpc), t2); - MIB.addReg(t1); - (*MIB).addOperand(*argOpers[valArgIndx]); + MachineFunction *MF = MBB->getParent(); + MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); + MF->insert(I, mainMBB); + MF->insert(I, sinkMBB); - unsigned t3 = F->getRegInfo().createVirtualRegister(RC); - if (Invert) { - MIB = BuildMI(newMBB, dl, TII->get(notOpc), t3).addReg(t2); - } - else - t3 = t2; + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), MBB, + llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); + + // thisMBB: + // xbegin sinkMBB + // # fallthrough to mainMBB + // # abortion to sinkMBB + BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB); + thisMBB->addSuccessor(mainMBB); + thisMBB->addSuccessor(sinkMBB); + + // mainMBB: + // EAX = -1 + BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1); + mainMBB->addSuccessor(sinkMBB); + + // sinkMBB: + // EAX is live into the sinkMBB + sinkMBB->addLiveIn(X86::EAX); + BuildMI(*sinkMBB, sinkMBB->begin(), DL, + TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) + .addReg(X86::EAX); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), EAXreg); - MIB.addReg(t1); + MI->eraseFromParent(); + return sinkMBB; +} - MIB = BuildMI(newMBB, dl, TII->get(CXchgOpc)); - for (int i=0; i <= lastAddrIndx; ++i) - (*MIB).addOperand(*argOpers[i]); - MIB.addReg(t3); - assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); - (*MIB).setMemRefs(bInstr->memoperands_begin(), - bInstr->memoperands_end()); +// Get CMPXCHG opcode for the specified data type. +static unsigned getCmpXChgOpcode(EVT VT) { + switch (VT.getSimpleVT().SimpleTy) { + case MVT::i8: return X86::LCMPXCHG8; + case MVT::i16: return X86::LCMPXCHG16; + case MVT::i32: return X86::LCMPXCHG32; + case MVT::i64: return X86::LCMPXCHG64; + default: + break; + } + llvm_unreachable("Invalid operand size!"); +} - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); - MIB.addReg(EAXreg); +// Get LOAD opcode for the specified data type. +static unsigned getLoadOpcode(EVT VT) { + switch (VT.getSimpleVT().SimpleTy) { + case MVT::i8: return X86::MOV8rm; + case MVT::i16: return X86::MOV16rm; + case MVT::i32: return X86::MOV32rm; + case MVT::i64: return X86::MOV64rm; + default: + break; + } + llvm_unreachable("Invalid operand size!"); +} - // insert branch - BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); +// Get opcode of the non-atomic one from the specified atomic instruction. +static unsigned getNonAtomicOpcode(unsigned Opc) { + switch (Opc) { + case X86::ATOMAND8: return X86::AND8rr; + case X86::ATOMAND16: return X86::AND16rr; + case X86::ATOMAND32: return X86::AND32rr; + case X86::ATOMAND64: return X86::AND64rr; + case X86::ATOMOR8: return X86::OR8rr; + case X86::ATOMOR16: return X86::OR16rr; + case X86::ATOMOR32: return X86::OR32rr; + case X86::ATOMOR64: return X86::OR64rr; + case X86::ATOMXOR8: return X86::XOR8rr; + case X86::ATOMXOR16: return X86::XOR16rr; + case X86::ATOMXOR32: return X86::XOR32rr; + case X86::ATOMXOR64: return X86::XOR64rr; + } + llvm_unreachable("Unhandled atomic-load-op opcode!"); +} + +// Get opcode of the non-atomic one from the specified atomic instruction with +// extra opcode. +static unsigned getNonAtomicOpcodeWithExtraOpc(unsigned Opc, + unsigned &ExtraOpc) { + switch (Opc) { + case X86::ATOMNAND8: ExtraOpc = X86::NOT8r; return X86::AND8rr; + case X86::ATOMNAND16: ExtraOpc = X86::NOT16r; return X86::AND16rr; + case X86::ATOMNAND32: ExtraOpc = X86::NOT32r; return X86::AND32rr; + case X86::ATOMNAND64: ExtraOpc = X86::NOT64r; return X86::AND64rr; + case X86::ATOMMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVL32rr; + case X86::ATOMMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVL16rr; + case X86::ATOMMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVL32rr; + case X86::ATOMMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVL64rr; + case X86::ATOMMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVG32rr; + case X86::ATOMMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVG16rr; + case X86::ATOMMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVG32rr; + case X86::ATOMMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVG64rr; + case X86::ATOMUMAX8: ExtraOpc = X86::CMP8rr; return X86::CMOVB32rr; + case X86::ATOMUMAX16: ExtraOpc = X86::CMP16rr; return X86::CMOVB16rr; + case X86::ATOMUMAX32: ExtraOpc = X86::CMP32rr; return X86::CMOVB32rr; + case X86::ATOMUMAX64: ExtraOpc = X86::CMP64rr; return X86::CMOVB64rr; + case X86::ATOMUMIN8: ExtraOpc = X86::CMP8rr; return X86::CMOVA32rr; + case X86::ATOMUMIN16: ExtraOpc = X86::CMP16rr; return X86::CMOVA16rr; + case X86::ATOMUMIN32: ExtraOpc = X86::CMP32rr; return X86::CMOVA32rr; + case X86::ATOMUMIN64: ExtraOpc = X86::CMP64rr; return X86::CMOVA64rr; + } + llvm_unreachable("Unhandled atomic-load-op opcode!"); +} + +// Get opcode of the non-atomic one from the specified atomic instruction for +// 64-bit data type on 32-bit target. +static unsigned getNonAtomic6432Opcode(unsigned Opc, unsigned &HiOpc) { + switch (Opc) { + case X86::ATOMAND6432: HiOpc = X86::AND32rr; return X86::AND32rr; + case X86::ATOMOR6432: HiOpc = X86::OR32rr; return X86::OR32rr; + case X86::ATOMXOR6432: HiOpc = X86::XOR32rr; return X86::XOR32rr; + case X86::ATOMADD6432: HiOpc = X86::ADC32rr; return X86::ADD32rr; + case X86::ATOMSUB6432: HiOpc = X86::SBB32rr; return X86::SUB32rr; + case X86::ATOMSWAP6432: HiOpc = X86::MOV32rr; return X86::MOV32rr; + case X86::ATOMMAX6432: HiOpc = X86::SETLr; return X86::SETLr; + case X86::ATOMMIN6432: HiOpc = X86::SETGr; return X86::SETGr; + case X86::ATOMUMAX6432: HiOpc = X86::SETBr; return X86::SETBr; + case X86::ATOMUMIN6432: HiOpc = X86::SETAr; return X86::SETAr; + } + llvm_unreachable("Unhandled atomic-load-op opcode!"); +} + +// Get opcode of the non-atomic one from the specified atomic instruction for +// 64-bit data type on 32-bit target with extra opcode. +static unsigned getNonAtomic6432OpcodeWithExtraOpc(unsigned Opc, + unsigned &HiOpc, + unsigned &ExtraOpc) { + switch (Opc) { + case X86::ATOMNAND6432: + ExtraOpc = X86::NOT32r; + HiOpc = X86::AND32rr; + return X86::AND32rr; + } + llvm_unreachable("Unhandled atomic-load-op opcode!"); +} - bInstr->eraseFromParent(); // The pseudo instruction is gone now. - return nextMBB; +// Get pseudo CMOV opcode from the specified data type. +static unsigned getPseudoCMOVOpc(EVT VT) { + switch (VT.getSimpleVT().SimpleTy) { + case MVT::i8: return X86::CMOV_GR8; + case MVT::i16: return X86::CMOV_GR16; + case MVT::i32: return X86::CMOV_GR32; + default: + break; + } + llvm_unreachable("Unknown CMOV opcode!"); } -// private utility function: 64 bit atomics on 32 bit host. +// EmitAtomicLoadArith - emit the code sequence for pseudo atomic instructions. +// They will be translated into a spin-loop or compare-exchange loop from +// +// ... +// dst = atomic-fetch-op MI.addr, MI.val +// ... +// +// to +// +// ... +// EAX = LOAD MI.addr +// loop: +// t1 = OP MI.val, EAX +// LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] +// JNE loop +// sink: +// dst = EAX +// ... MachineBasicBlock * -X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr, - MachineBasicBlock *MBB, - unsigned regOpcL, - unsigned regOpcH, - unsigned immOpcL, - unsigned immOpcH, - bool Invert) const { - // For the atomic bitwise operator, we generate - // thisMBB (instructions are in pairs, except cmpxchg8b) - // ld t1,t2 = [bitinstr.addr] - // newMBB: - // out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4) - // op t5, t6 <- out1, out2, [bitinstr.val] - // (for SWAP, substitute: mov t5, t6 <- [bitinstr.val]) - // neg t7, t8 < t5, t6 (if Invert) - // mov ECX, EBX <- t5, t6 - // mov EAX, EDX <- t1, t2 - // cmpxchg8b [bitinstr.addr] [EAX, EDX, EBX, ECX implicit] - // mov t3, t4 <- EAX, EDX - // bz newMBB - // result in out1, out2 - // fallthrough -->nextMBB - - const TargetRegisterClass *RC = &X86::GR32RegClass; - const unsigned LoadOpc = X86::MOV32rm; - const unsigned NotOpc = X86::NOT32r; +X86TargetLowering::EmitAtomicLoadArith(MachineInstr *MI, + MachineBasicBlock *MBB) const { const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - const BasicBlock *LLVM_BB = MBB->getBasicBlock(); - MachineFunction::iterator MBBIter = MBB; - ++MBBIter; + DebugLoc DL = MI->getDebugLoc(); + + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + const BasicBlock *BB = MBB->getBasicBlock(); + MachineFunction::iterator I = MBB; + ++I; + + assert(MI->getNumOperands() <= X86::AddrNumOperands + 2 && + "Unexpected number of operands"); + + assert(MI->hasOneMemOperand() && + "Expected atomic-load-op to have one memoperand"); + + // Memory Reference + MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); + MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); + + unsigned DstReg, SrcReg; + unsigned MemOpndSlot; + + unsigned CurOp = 0; + + DstReg = MI->getOperand(CurOp++).getReg(); + MemOpndSlot = CurOp; + CurOp += X86::AddrNumOperands; + SrcReg = MI->getOperand(CurOp++).getReg(); + + const TargetRegisterClass *RC = MRI.getRegClass(DstReg); + MVT::SimpleValueType VT = *RC->vt_begin(); + unsigned AccPhyReg = getX86SubSuperRegister(X86::EAX, VT); + + unsigned LCMPXCHGOpc = getCmpXChgOpcode(VT); + unsigned LOADOpc = getLoadOpcode(VT); + + // For the atomic load-arith operator, we generate + // + // thisMBB: + // EAX = LOAD [MI.addr] + // mainMBB: + // t1 = OP MI.val, EAX + // LCMPXCHG [MI.addr], t1, [EAX is implicitly used & defined] + // JNE mainMBB + // sinkMBB: - /// First build the CFG - MachineFunction *F = MBB->getParent(); MachineBasicBlock *thisMBB = MBB; - MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); - F->insert(MBBIter, newMBB); - F->insert(MBBIter, nextMBB); - - // Transfer the remainder of thisMBB and its successor edges to nextMBB. - nextMBB->splice(nextMBB->begin(), thisMBB, - llvm::next(MachineBasicBlock::iterator(bInstr)), - thisMBB->end()); - nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); - - // Update thisMBB to fall through to newMBB - thisMBB->addSuccessor(newMBB); - - // newMBB jumps to itself and fall through to nextMBB - newMBB->addSuccessor(nextMBB); - newMBB->addSuccessor(newMBB); - - DebugLoc dl = bInstr->getDebugLoc(); - // Insert instructions into newMBB based on incoming instruction - // There are 8 "real" operands plus 9 implicit def/uses, ignored here. - assert(bInstr->getNumOperands() < X86::AddrNumOperands + 14 && - "unexpected number of operands"); - MachineOperand& dest1Oper = bInstr->getOperand(0); - MachineOperand& dest2Oper = bInstr->getOperand(1); - MachineOperand* argOpers[2 + X86::AddrNumOperands]; - for (int i=0; i < 2 + X86::AddrNumOperands; ++i) { - argOpers[i] = &bInstr->getOperand(i+2); - - // We use some of the operands multiple times, so conservatively just - // clear any kill flags that might be present. - if (argOpers[i]->isReg() && argOpers[i]->isUse()) - argOpers[i]->setIsKill(false); - } - - // x86 address has 5 operands: base, index, scale, displacement, and segment. - int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] - - unsigned t1 = F->getRegInfo().createVirtualRegister(RC); - MachineInstrBuilder MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t1); - for (int i=0; i <= lastAddrIndx; ++i) - (*MIB).addOperand(*argOpers[i]); - unsigned t2 = F->getRegInfo().createVirtualRegister(RC); - MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2); - // add 4 to displacement. - for (int i=0; i <= lastAddrIndx-2; ++i) - (*MIB).addOperand(*argOpers[i]); - MachineOperand newOp3 = *(argOpers[3]); - if (newOp3.isImm()) - newOp3.setImm(newOp3.getImm()+4); - else - newOp3.setOffset(newOp3.getOffset()+4); - (*MIB).addOperand(newOp3); - (*MIB).addOperand(*argOpers[lastAddrIndx]); - - // t3/4 are defined later, at the bottom of the loop - unsigned t3 = F->getRegInfo().createVirtualRegister(RC); - unsigned t4 = F->getRegInfo().createVirtualRegister(RC); - BuildMI(newMBB, dl, TII->get(X86::PHI), dest1Oper.getReg()) - .addReg(t1).addMBB(thisMBB).addReg(t3).addMBB(newMBB); - BuildMI(newMBB, dl, TII->get(X86::PHI), dest2Oper.getReg()) - .addReg(t2).addMBB(thisMBB).addReg(t4).addMBB(newMBB); - - // The subsequent operations should be using the destination registers of - // the PHI instructions. - t1 = dest1Oper.getReg(); - t2 = dest2Oper.getReg(); - - int valArgIndx = lastAddrIndx + 1; - assert((argOpers[valArgIndx]->isReg() || - argOpers[valArgIndx]->isImm()) && - "invalid operand"); - unsigned t5 = F->getRegInfo().createVirtualRegister(RC); - unsigned t6 = F->getRegInfo().createVirtualRegister(RC); - if (argOpers[valArgIndx]->isReg()) - MIB = BuildMI(newMBB, dl, TII->get(regOpcL), t5); - else - MIB = BuildMI(newMBB, dl, TII->get(immOpcL), t5); - if (regOpcL != X86::MOV32rr) - MIB.addReg(t1); - (*MIB).addOperand(*argOpers[valArgIndx]); - assert(argOpers[valArgIndx + 1]->isReg() == - argOpers[valArgIndx]->isReg()); - assert(argOpers[valArgIndx + 1]->isImm() == - argOpers[valArgIndx]->isImm()); - if (argOpers[valArgIndx + 1]->isReg()) - MIB = BuildMI(newMBB, dl, TII->get(regOpcH), t6); - else - MIB = BuildMI(newMBB, dl, TII->get(immOpcH), t6); - if (regOpcH != X86::MOV32rr) - MIB.addReg(t2); - (*MIB).addOperand(*argOpers[valArgIndx + 1]); - - unsigned t7, t8; - if (Invert) { - t7 = F->getRegInfo().createVirtualRegister(RC); - t8 = F->getRegInfo().createVirtualRegister(RC); - MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t7).addReg(t5); - MIB = BuildMI(newMBB, dl, TII->get(NotOpc), t8).addReg(t6); - } else { - t7 = t5; - t8 = t6; + MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); + MF->insert(I, mainMBB); + MF->insert(I, sinkMBB); + + MachineInstrBuilder MIB; + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), MBB, + llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); + + // thisMBB: + MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), AccPhyReg); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) + MIB.addOperand(MI->getOperand(MemOpndSlot + i)); + MIB.setMemRefs(MMOBegin, MMOEnd); + + thisMBB->addSuccessor(mainMBB); + + // mainMBB: + MachineBasicBlock *origMainMBB = mainMBB; + mainMBB->addLiveIn(AccPhyReg); + + // Copy AccPhyReg as it is used more than once. + unsigned AccReg = MRI.createVirtualRegister(RC); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccReg) + .addReg(AccPhyReg); + + unsigned t1 = MRI.createVirtualRegister(RC); + unsigned Opc = MI->getOpcode(); + switch (Opc) { + default: + llvm_unreachable("Unhandled atomic-load-op opcode!"); + case X86::ATOMAND8: + case X86::ATOMAND16: + case X86::ATOMAND32: + case X86::ATOMAND64: + case X86::ATOMOR8: + case X86::ATOMOR16: + case X86::ATOMOR32: + case X86::ATOMOR64: + case X86::ATOMXOR8: + case X86::ATOMXOR16: + case X86::ATOMXOR32: + case X86::ATOMXOR64: { + unsigned ARITHOpc = getNonAtomicOpcode(Opc); + BuildMI(mainMBB, DL, TII->get(ARITHOpc), t1).addReg(SrcReg) + .addReg(AccReg); + break; + } + case X86::ATOMNAND8: + case X86::ATOMNAND16: + case X86::ATOMNAND32: + case X86::ATOMNAND64: { + unsigned t2 = MRI.createVirtualRegister(RC); + unsigned NOTOpc; + unsigned ANDOpc = getNonAtomicOpcodeWithExtraOpc(Opc, NOTOpc); + BuildMI(mainMBB, DL, TII->get(ANDOpc), t2).addReg(SrcReg) + .addReg(AccReg); + BuildMI(mainMBB, DL, TII->get(NOTOpc), t1).addReg(t2); + break; + } + case X86::ATOMMAX8: + case X86::ATOMMAX16: + case X86::ATOMMAX32: + case X86::ATOMMAX64: + case X86::ATOMMIN8: + case X86::ATOMMIN16: + case X86::ATOMMIN32: + case X86::ATOMMIN64: + case X86::ATOMUMAX8: + case X86::ATOMUMAX16: + case X86::ATOMUMAX32: + case X86::ATOMUMAX64: + case X86::ATOMUMIN8: + case X86::ATOMUMIN16: + case X86::ATOMUMIN32: + case X86::ATOMUMIN64: { + unsigned CMPOpc; + unsigned CMOVOpc = getNonAtomicOpcodeWithExtraOpc(Opc, CMPOpc); + + BuildMI(mainMBB, DL, TII->get(CMPOpc)) + .addReg(SrcReg) + .addReg(AccReg); + + if (Subtarget->hasCMov()) { + if (VT != MVT::i8) { + // Native support + BuildMI(mainMBB, DL, TII->get(CMOVOpc), t1) + .addReg(SrcReg) + .addReg(AccReg); + } else { + // Promote i8 to i32 to use CMOV32 + const TargetRegisterClass *RC32 = getRegClassFor(MVT::i32); + unsigned SrcReg32 = MRI.createVirtualRegister(RC32); + unsigned AccReg32 = MRI.createVirtualRegister(RC32); + unsigned t2 = MRI.createVirtualRegister(RC32); + + unsigned Undef = MRI.createVirtualRegister(RC32); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::IMPLICIT_DEF), Undef); + + BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), SrcReg32) + .addReg(Undef) + .addReg(SrcReg) + .addImm(X86::sub_8bit); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::INSERT_SUBREG), AccReg32) + .addReg(Undef) + .addReg(AccReg) + .addImm(X86::sub_8bit); + + BuildMI(mainMBB, DL, TII->get(CMOVOpc), t2) + .addReg(SrcReg32) + .addReg(AccReg32); + + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), t1) + .addReg(t2, 0, X86::sub_8bit); + } + } else { + // Use pseudo select and lower them. + assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && + "Invalid atomic-load-op transformation!"); + unsigned SelOpc = getPseudoCMOVOpc(VT); + X86::CondCode CC = X86::getCondFromCMovOpc(CMOVOpc); + assert(CC != X86::COND_INVALID && "Invalid atomic-load-op transformation!"); + MIB = BuildMI(mainMBB, DL, TII->get(SelOpc), t1) + .addReg(SrcReg).addReg(AccReg) + .addImm(CC); + mainMBB = EmitLoweredSelect(MIB, mainMBB); + } + break; + } } - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); - MIB.addReg(t1); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EDX); - MIB.addReg(t2); + // Copy AccPhyReg back from virtual register. + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), AccPhyReg) + .addReg(AccReg); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EBX); - MIB.addReg(t7); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::ECX); - MIB.addReg(t8); + MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) + MIB.addOperand(MI->getOperand(MemOpndSlot + i)); + MIB.addReg(t1); + MIB.setMemRefs(MMOBegin, MMOEnd); - MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG8B)); - for (int i=0; i <= lastAddrIndx; ++i) - (*MIB).addOperand(*argOpers[i]); + BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); - assert(bInstr->hasOneMemOperand() && "Unexpected number of memoperand"); - (*MIB).setMemRefs(bInstr->memoperands_begin(), - bInstr->memoperands_end()); + mainMBB->addSuccessor(origMainMBB); + mainMBB->addSuccessor(sinkMBB); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t3); - MIB.addReg(X86::EAX); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t4); - MIB.addReg(X86::EDX); + // sinkMBB: + sinkMBB->addLiveIn(AccPhyReg); - // insert branch - BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); + BuildMI(*sinkMBB, sinkMBB->begin(), DL, + TII->get(TargetOpcode::COPY), DstReg) + .addReg(AccPhyReg); - bInstr->eraseFromParent(); // The pseudo instruction is gone now. - return nextMBB; + MI->eraseFromParent(); + return sinkMBB; } -// private utility function +// EmitAtomicLoadArith6432 - emit the code sequence for pseudo atomic +// instructions. They will be translated into a spin-loop or compare-exchange +// loop from +// +// ... +// dst = atomic-fetch-op MI.addr, MI.val +// ... +// +// to +// +// ... +// EAX = LOAD [MI.addr + 0] +// EDX = LOAD [MI.addr + 4] +// loop: +// EBX = OP MI.val.lo, EAX +// ECX = OP MI.val.hi, EDX +// LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] +// JNE loop +// sink: +// dst = EDX:EAX +// ... MachineBasicBlock * -X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, - MachineBasicBlock *MBB, - unsigned cmovOpc) const { - // For the atomic min/max operator, we generate - // thisMBB: - // newMBB: - // ld t1 = [min/max.addr] - // mov t2 = [min/max.val] - // cmp t1, t2 - // cmov[cond] t2 = t1 - // mov EAX = t1 - // lcs dest = [bitinstr.addr], t2 [EAX is implicit] - // bz newMBB - // fallthrough -->nextMBB - // +X86TargetLowering::EmitAtomicLoadArith6432(MachineInstr *MI, + MachineBasicBlock *MBB) const { const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); - const BasicBlock *LLVM_BB = MBB->getBasicBlock(); - MachineFunction::iterator MBBIter = MBB; - ++MBBIter; + DebugLoc DL = MI->getDebugLoc(); + + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + const BasicBlock *BB = MBB->getBasicBlock(); + MachineFunction::iterator I = MBB; + ++I; + + assert(MI->getNumOperands() <= X86::AddrNumOperands + 4 && + "Unexpected number of operands"); + + assert(MI->hasOneMemOperand() && + "Expected atomic-load-op32 to have one memoperand"); + + // Memory Reference + MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); + MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); + + unsigned DstLoReg, DstHiReg; + unsigned SrcLoReg, SrcHiReg; + unsigned MemOpndSlot; + + unsigned CurOp = 0; + + DstLoReg = MI->getOperand(CurOp++).getReg(); + DstHiReg = MI->getOperand(CurOp++).getReg(); + MemOpndSlot = CurOp; + CurOp += X86::AddrNumOperands; + SrcLoReg = MI->getOperand(CurOp++).getReg(); + SrcHiReg = MI->getOperand(CurOp++).getReg(); + + const TargetRegisterClass *RC = &X86::GR32RegClass; + const TargetRegisterClass *RC8 = &X86::GR8RegClass; + + unsigned LCMPXCHGOpc = X86::LCMPXCHG8B; + unsigned LOADOpc = X86::MOV32rm; + + // For the atomic load-arith operator, we generate + // + // thisMBB: + // EAX = LOAD [MI.addr + 0] + // EDX = LOAD [MI.addr + 4] + // mainMBB: + // EBX = OP MI.vallo, EAX + // ECX = OP MI.valhi, EDX + // LCMPXCHG8B [MI.addr], [ECX:EBX & EDX:EAX are implicitly used and EDX:EAX is implicitly defined] + // JNE mainMBB + // sinkMBB: - /// First build the CFG - MachineFunction *F = MBB->getParent(); MachineBasicBlock *thisMBB = MBB; - MachineBasicBlock *newMBB = F->CreateMachineBasicBlock(LLVM_BB); - MachineBasicBlock *nextMBB = F->CreateMachineBasicBlock(LLVM_BB); - F->insert(MBBIter, newMBB); - F->insert(MBBIter, nextMBB); - - // Transfer the remainder of thisMBB and its successor edges to nextMBB. - nextMBB->splice(nextMBB->begin(), thisMBB, - llvm::next(MachineBasicBlock::iterator(mInstr)), - thisMBB->end()); - nextMBB->transferSuccessorsAndUpdatePHIs(thisMBB); - - // Update thisMBB to fall through to newMBB - thisMBB->addSuccessor(newMBB); - - // newMBB jumps to newMBB and fall through to nextMBB - newMBB->addSuccessor(nextMBB); - newMBB->addSuccessor(newMBB); - - DebugLoc dl = mInstr->getDebugLoc(); - // Insert instructions into newMBB based on incoming instruction - assert(mInstr->getNumOperands() < X86::AddrNumOperands + 4 && - "unexpected number of operands"); - MachineOperand& destOper = mInstr->getOperand(0); - MachineOperand* argOpers[2 + X86::AddrNumOperands]; - int numArgs = mInstr->getNumOperands() - 1; - for (int i=0; i < numArgs; ++i) - argOpers[i] = &mInstr->getOperand(i+1); - - // x86 address has 4 operands: base, index, scale, and displacement - int lastAddrIndx = X86::AddrNumOperands - 1; // [0,3] - int valArgIndx = lastAddrIndx + 1; - - unsigned t1 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); - MachineInstrBuilder MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rm), t1); - for (int i=0; i <= lastAddrIndx; ++i) - (*MIB).addOperand(*argOpers[i]); - - // We only support register and immediate values - assert((argOpers[valArgIndx]->isReg() || - argOpers[valArgIndx]->isImm()) && - "invalid operand"); - - unsigned t2 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); - if (argOpers[valArgIndx]->isReg()) - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), t2); - else - MIB = BuildMI(newMBB, dl, TII->get(X86::MOV32rr), t2); - (*MIB).addOperand(*argOpers[valArgIndx]); + MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); + MF->insert(I, mainMBB); + MF->insert(I, sinkMBB); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), X86::EAX); - MIB.addReg(t1); + MachineInstrBuilder MIB; - MIB = BuildMI(newMBB, dl, TII->get(X86::CMP32rr)); - MIB.addReg(t1); - MIB.addReg(t2); + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), MBB, + llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); + + // thisMBB: + // Lo + MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EAX); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) + MIB.addOperand(MI->getOperand(MemOpndSlot + i)); + MIB.setMemRefs(MMOBegin, MMOEnd); + // Hi + MIB = BuildMI(thisMBB, DL, TII->get(LOADOpc), X86::EDX); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { + if (i == X86::AddrDisp) + MIB.addDisp(MI->getOperand(MemOpndSlot + i), 4); // 4 == sizeof(i32) + else + MIB.addOperand(MI->getOperand(MemOpndSlot + i)); + } + MIB.setMemRefs(MMOBegin, MMOEnd); - // Generate movc - unsigned t3 = F->getRegInfo().createVirtualRegister(&X86::GR32RegClass); - MIB = BuildMI(newMBB, dl, TII->get(cmovOpc),t3); - MIB.addReg(t2); - MIB.addReg(t1); + thisMBB->addSuccessor(mainMBB); - // Cmp and exchange if none has modified the memory location - MIB = BuildMI(newMBB, dl, TII->get(X86::LCMPXCHG32)); - for (int i=0; i <= lastAddrIndx; ++i) - (*MIB).addOperand(*argOpers[i]); - MIB.addReg(t3); - assert(mInstr->hasOneMemOperand() && "Unexpected number of memoperand"); - (*MIB).setMemRefs(mInstr->memoperands_begin(), - mInstr->memoperands_end()); + // mainMBB: + MachineBasicBlock *origMainMBB = mainMBB; + mainMBB->addLiveIn(X86::EAX); + mainMBB->addLiveIn(X86::EDX); - MIB = BuildMI(newMBB, dl, TII->get(TargetOpcode::COPY), destOper.getReg()); - MIB.addReg(X86::EAX); + // Copy EDX:EAX as they are used more than once. + unsigned LoReg = MRI.createVirtualRegister(RC); + unsigned HiReg = MRI.createVirtualRegister(RC); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), LoReg).addReg(X86::EAX); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), HiReg).addReg(X86::EDX); - // insert branch - BuildMI(newMBB, dl, TII->get(X86::JNE_4)).addMBB(newMBB); + unsigned t1L = MRI.createVirtualRegister(RC); + unsigned t1H = MRI.createVirtualRegister(RC); - mInstr->eraseFromParent(); // The pseudo instruction is gone now. - return nextMBB; + unsigned Opc = MI->getOpcode(); + switch (Opc) { + default: + llvm_unreachable("Unhandled atomic-load-op6432 opcode!"); + case X86::ATOMAND6432: + case X86::ATOMOR6432: + case X86::ATOMXOR6432: + case X86::ATOMADD6432: + case X86::ATOMSUB6432: { + unsigned HiOpc; + unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); + BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(LoReg).addReg(SrcLoReg); + BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(HiReg).addReg(SrcHiReg); + break; + } + case X86::ATOMNAND6432: { + unsigned HiOpc, NOTOpc; + unsigned LoOpc = getNonAtomic6432OpcodeWithExtraOpc(Opc, HiOpc, NOTOpc); + unsigned t2L = MRI.createVirtualRegister(RC); + unsigned t2H = MRI.createVirtualRegister(RC); + BuildMI(mainMBB, DL, TII->get(LoOpc), t2L).addReg(SrcLoReg).addReg(LoReg); + BuildMI(mainMBB, DL, TII->get(HiOpc), t2H).addReg(SrcHiReg).addReg(HiReg); + BuildMI(mainMBB, DL, TII->get(NOTOpc), t1L).addReg(t2L); + BuildMI(mainMBB, DL, TII->get(NOTOpc), t1H).addReg(t2H); + break; + } + case X86::ATOMMAX6432: + case X86::ATOMMIN6432: + case X86::ATOMUMAX6432: + case X86::ATOMUMIN6432: { + unsigned HiOpc; + unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); + unsigned cL = MRI.createVirtualRegister(RC8); + unsigned cH = MRI.createVirtualRegister(RC8); + unsigned cL32 = MRI.createVirtualRegister(RC); + unsigned cH32 = MRI.createVirtualRegister(RC); + unsigned cc = MRI.createVirtualRegister(RC); + // cl := cmp src_lo, lo + BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) + .addReg(SrcLoReg).addReg(LoReg); + BuildMI(mainMBB, DL, TII->get(LoOpc), cL); + BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cL32).addReg(cL); + // ch := cmp src_hi, hi + BuildMI(mainMBB, DL, TII->get(X86::CMP32rr)) + .addReg(SrcHiReg).addReg(HiReg); + BuildMI(mainMBB, DL, TII->get(HiOpc), cH); + BuildMI(mainMBB, DL, TII->get(X86::MOVZX32rr8), cH32).addReg(cH); + // cc := if (src_hi == hi) ? cl : ch; + if (Subtarget->hasCMov()) { + BuildMI(mainMBB, DL, TII->get(X86::CMOVE32rr), cc) + .addReg(cH32).addReg(cL32); + } else { + MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), cc) + .addReg(cH32).addReg(cL32) + .addImm(X86::COND_E); + mainMBB = EmitLoweredSelect(MIB, mainMBB); + } + BuildMI(mainMBB, DL, TII->get(X86::TEST32rr)).addReg(cc).addReg(cc); + if (Subtarget->hasCMov()) { + BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1L) + .addReg(SrcLoReg).addReg(LoReg); + BuildMI(mainMBB, DL, TII->get(X86::CMOVNE32rr), t1H) + .addReg(SrcHiReg).addReg(HiReg); + } else { + MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1L) + .addReg(SrcLoReg).addReg(LoReg) + .addImm(X86::COND_NE); + mainMBB = EmitLoweredSelect(MIB, mainMBB); + MIB = BuildMI(mainMBB, DL, TII->get(X86::CMOV_GR32), t1H) + .addReg(SrcHiReg).addReg(HiReg) + .addImm(X86::COND_NE); + mainMBB = EmitLoweredSelect(MIB, mainMBB); + } + break; + } + case X86::ATOMSWAP6432: { + unsigned HiOpc; + unsigned LoOpc = getNonAtomic6432Opcode(Opc, HiOpc); + BuildMI(mainMBB, DL, TII->get(LoOpc), t1L).addReg(SrcLoReg); + BuildMI(mainMBB, DL, TII->get(HiOpc), t1H).addReg(SrcHiReg); + break; + } + } + + // Copy EDX:EAX back from HiReg:LoReg + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EAX).addReg(LoReg); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EDX).addReg(HiReg); + // Copy ECX:EBX from t1H:t1L + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::EBX).addReg(t1L); + BuildMI(mainMBB, DL, TII->get(TargetOpcode::COPY), X86::ECX).addReg(t1H); + + MIB = BuildMI(mainMBB, DL, TII->get(LCMPXCHGOpc)); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) + MIB.addOperand(MI->getOperand(MemOpndSlot + i)); + MIB.setMemRefs(MMOBegin, MMOEnd); + + BuildMI(mainMBB, DL, TII->get(X86::JNE_4)).addMBB(origMainMBB); + + mainMBB->addSuccessor(origMainMBB); + mainMBB->addSuccessor(sinkMBB); + + // sinkMBB: + sinkMBB->addLiveIn(X86::EAX); + sinkMBB->addLiveIn(X86::EDX); + + BuildMI(*sinkMBB, sinkMBB->begin(), DL, + TII->get(TargetOpcode::COPY), DstLoReg) + .addReg(X86::EAX); + BuildMI(*sinkMBB, sinkMBB->begin(), DL, + TII->get(TargetOpcode::COPY), DstHiReg) + .addReg(X86::EDX); + + MI->eraseFromParent(); + return sinkMBB; } // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 // or XMM0_V32I8 in AVX all of this code can be replaced with that // in the .td file. -MachineBasicBlock * -X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, - unsigned numArgs, bool memArg) const { - assert(Subtarget->hasSSE42() && - "Target must have SSE4.2 or AVX features enabled"); +static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB, + const TargetInstrInfo *TII) { + unsigned Opc; + switch (MI->getOpcode()) { + default: llvm_unreachable("illegal opcode!"); + case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break; + case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break; + case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break; + case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break; + case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break; + case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break; + case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break; + case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break; + } DebugLoc dl = MI->getDebugLoc(); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); + + unsigned NumArgs = MI->getNumOperands(); + for (unsigned i = 1; i < NumArgs; ++i) { + MachineOperand &Op = MI->getOperand(i); + if (!(Op.isReg() && Op.isImplicit())) + MIB.addOperand(Op); + } + if (MI->hasOneMemOperand()) + MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + + BuildMI(*BB, MI, dl, + TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) + .addReg(X86::XMM0); + + MI->eraseFromParent(); + return BB; +} + +// FIXME: Custom handling because TableGen doesn't support multiple implicit +// defs in an instruction pattern +static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB, + const TargetInstrInfo *TII) { unsigned Opc; - if (!Subtarget->hasAVX()) { - if (memArg) - Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; - else - Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; - } else { - if (memArg) - Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; - else - Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; + switch (MI->getOpcode()) { + default: llvm_unreachable("illegal opcode!"); + case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break; + case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break; + case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break; + case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break; + case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break; + case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break; + case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break; + case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break; } + DebugLoc dl = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc)); - for (unsigned i = 0; i < numArgs; ++i) { - MachineOperand &Op = MI->getOperand(i+1); + + unsigned NumArgs = MI->getNumOperands(); // remove the results + for (unsigned i = 1; i < NumArgs; ++i) { + MachineOperand &Op = MI->getOperand(i); if (!(Op.isReg() && Op.isImplicit())) MIB.addOperand(Op); } + if (MI->hasOneMemOperand()) + MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) - .addReg(X86::XMM0); + .addReg(X86::ECX); MI->eraseFromParent(); return BB; } -MachineBasicBlock * -X86TargetLowering::EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB) const { +static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB, + const TargetInstrInfo *TII, + const X86Subtarget* Subtarget) { DebugLoc dl = MI->getDebugLoc(); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); // Address into RAX/EAX, other two args into ECX, EDX. unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r; @@ -12717,51 +13553,248 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, DebugLoc DL = MI->getDebugLoc(); MachineFunction *F = BB->getParent(); - assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); - assert(MI->getOperand(3).isGlobal() && "This should be a global"); + assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?"); + assert(MI->getOperand(3).isGlobal() && "This should be a global"); + + // Get a register mask for the lowered call. + // FIXME: The 32-bit calls have non-standard calling conventions. Use a + // proper register mask. + const uint32_t *RegMask = + getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); + if (Subtarget->is64Bit()) { + MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, + TII->get(X86::MOV64rm), X86::RDI) + .addReg(X86::RIP) + .addImm(0).addReg(0) + .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, + MI->getOperand(3).getTargetFlags()) + .addReg(0); + MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); + addDirectMem(MIB, X86::RDI); + MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); + } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { + MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, + TII->get(X86::MOV32rm), X86::EAX) + .addReg(0) + .addImm(0).addReg(0) + .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, + MI->getOperand(3).getTargetFlags()) + .addReg(0); + MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); + addDirectMem(MIB, X86::EAX); + MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); + } else { + MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, + TII->get(X86::MOV32rm), X86::EAX) + .addReg(TII->getGlobalBaseReg(F)) + .addImm(0).addReg(0) + .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, + MI->getOperand(3).getTargetFlags()) + .addReg(0); + MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); + addDirectMem(MIB, X86::EAX); + MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); + } + + MI->eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} + +MachineBasicBlock * +X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const { + DebugLoc DL = MI->getDebugLoc(); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); + + const BasicBlock *BB = MBB->getBasicBlock(); + MachineFunction::iterator I = MBB; + ++I; + + // Memory Reference + MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); + MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); + + unsigned DstReg; + unsigned MemOpndSlot = 0; + + unsigned CurOp = 0; + + DstReg = MI->getOperand(CurOp++).getReg(); + const TargetRegisterClass *RC = MRI.getRegClass(DstReg); + assert(RC->hasType(MVT::i32) && "Invalid destination!"); + unsigned mainDstReg = MRI.createVirtualRegister(RC); + unsigned restoreDstReg = MRI.createVirtualRegister(RC); + + MemOpndSlot = CurOp; + + MVT PVT = getPointerTy(); + assert((PVT == MVT::i64 || PVT == MVT::i32) && + "Invalid Pointer Size!"); + + // For v = setjmp(buf), we generate + // + // thisMBB: + // buf[LabelOffset] = restoreMBB + // SjLjSetup restoreMBB + // + // mainMBB: + // v_main = 0 + // + // sinkMBB: + // v = phi(main, restore) + // + // restoreMBB: + // v_restore = 1 + + MachineBasicBlock *thisMBB = MBB; + MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); + MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); + MF->insert(I, mainMBB); + MF->insert(I, sinkMBB); + MF->push_back(restoreMBB); + + MachineInstrBuilder MIB; + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), MBB, + llvm::next(MachineBasicBlock::iterator(MI)), MBB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); + + // thisMBB: + unsigned PtrStoreOpc = 0; + unsigned LabelReg = 0; + const int64_t LabelOffset = 1 * PVT.getStoreSize(); + Reloc::Model RM = getTargetMachine().getRelocationModel(); + bool UseImmLabel = (getTargetMachine().getCodeModel() == CodeModel::Small) && + (RM == Reloc::Static || RM == Reloc::DynamicNoPIC); + + // Prepare IP either in reg or imm. + if (!UseImmLabel) { + PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; + const TargetRegisterClass *PtrRC = getRegClassFor(PVT); + LabelReg = MRI.createVirtualRegister(PtrRC); + if (Subtarget->is64Bit()) { + MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) + .addReg(X86::RIP) + .addImm(0) + .addReg(0) + .addMBB(restoreMBB) + .addReg(0); + } else { + const X86InstrInfo *XII = static_cast(TII); + MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) + .addReg(XII->getGlobalBaseReg(MF)) + .addImm(0) + .addReg(0) + .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference()) + .addReg(0); + } + } else + PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; + // Store IP + MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { + if (i == X86::AddrDisp) + MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset); + else + MIB.addOperand(MI->getOperand(MemOpndSlot + i)); + } + if (!UseImmLabel) + MIB.addReg(LabelReg); + else + MIB.addMBB(restoreMBB); + MIB.setMemRefs(MMOBegin, MMOEnd); + // Setup + MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) + .addMBB(restoreMBB); + MIB.addRegMask(RegInfo->getNoPreservedMask()); + thisMBB->addSuccessor(mainMBB); + thisMBB->addSuccessor(restoreMBB); + + // mainMBB: + // EAX = 0 + BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); + mainMBB->addSuccessor(sinkMBB); + + // sinkMBB: + BuildMI(*sinkMBB, sinkMBB->begin(), DL, + TII->get(X86::PHI), DstReg) + .addReg(mainDstReg).addMBB(mainMBB) + .addReg(restoreDstReg).addMBB(restoreMBB); + + // restoreMBB: + BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); + BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB); + restoreMBB->addSuccessor(sinkMBB); + + MI->eraseFromParent(); + return sinkMBB; +} + +MachineBasicBlock * +X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const { + DebugLoc DL = MI->getDebugLoc(); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + + MachineFunction *MF = MBB->getParent(); + MachineRegisterInfo &MRI = MF->getRegInfo(); - // Get a register mask for the lowered call. - // FIXME: The 32-bit calls have non-standard calling conventions. Use a - // proper register mask. - const uint32_t *RegMask = - getTargetMachine().getRegisterInfo()->getCallPreservedMask(CallingConv::C); - if (Subtarget->is64Bit()) { - MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, - TII->get(X86::MOV64rm), X86::RDI) - .addReg(X86::RIP) - .addImm(0).addReg(0) - .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, - MI->getOperand(3).getTargetFlags()) - .addReg(0); - MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); - addDirectMem(MIB, X86::RDI); - MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); - } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { - MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, - TII->get(X86::MOV32rm), X86::EAX) - .addReg(0) - .addImm(0).addReg(0) - .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, - MI->getOperand(3).getTargetFlags()) - .addReg(0); - MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); - addDirectMem(MIB, X86::EAX); - MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); - } else { - MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, - TII->get(X86::MOV32rm), X86::EAX) - .addReg(TII->getGlobalBaseReg(F)) - .addImm(0).addReg(0) - .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, - MI->getOperand(3).getTargetFlags()) - .addReg(0); - MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); - addDirectMem(MIB, X86::EAX); - MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); + // Memory Reference + MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); + MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end(); + + MVT PVT = getPointerTy(); + assert((PVT == MVT::i64 || PVT == MVT::i32) && + "Invalid Pointer Size!"); + + const TargetRegisterClass *RC = + (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; + unsigned Tmp = MRI.createVirtualRegister(RC); + // Since FP is only updated here but NOT referenced, it's treated as GPR. + unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; + unsigned SP = RegInfo->getStackRegister(); + + MachineInstrBuilder MIB; + + const int64_t LabelOffset = 1 * PVT.getStoreSize(); + const int64_t SPOffset = 2 * PVT.getStoreSize(); + + unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; + unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; + + // Reload FP + MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) + MIB.addOperand(MI->getOperand(i)); + MIB.setMemRefs(MMOBegin, MMOEnd); + // Reload IP + MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { + if (i == X86::AddrDisp) + MIB.addDisp(MI->getOperand(i), LabelOffset); + else + MIB.addOperand(MI->getOperand(i)); + } + MIB.setMemRefs(MMOBegin, MMOEnd); + // Reload SP + MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP); + for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { + if (i == X86::AddrDisp) + MIB.addDisp(MI->getOperand(i), SPOffset); + else + MIB.addOperand(MI->getOperand(i)); } + MIB.setMemRefs(MMOBegin, MMOEnd); + // Jump + BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); - MI->eraseFromParent(); // The pseudo instruction is gone now. - return BB; + MI->eraseFromParent(); + return MBB; } MachineBasicBlock * @@ -12893,198 +13926,101 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case X86::PCMPESTRM128REG: case X86::VPCMPESTRM128REG: case X86::PCMPESTRM128MEM: - case X86::VPCMPESTRM128MEM: { - unsigned NumArgs; - bool MemArg; - switch (MI->getOpcode()) { - default: llvm_unreachable("illegal opcode!"); - case X86::PCMPISTRM128REG: - case X86::VPCMPISTRM128REG: - NumArgs = 3; MemArg = false; break; - case X86::PCMPISTRM128MEM: - case X86::VPCMPISTRM128MEM: - NumArgs = 3; MemArg = true; break; - case X86::PCMPESTRM128REG: - case X86::VPCMPESTRM128REG: - NumArgs = 5; MemArg = false; break; - case X86::PCMPESTRM128MEM: - case X86::VPCMPESTRM128MEM: - NumArgs = 5; MemArg = true; break; - } - return EmitPCMP(MI, BB, NumArgs, MemArg); - } - - // Thread synchronization. + case X86::VPCMPESTRM128MEM: + assert(Subtarget->hasSSE42() && + "Target must have SSE4.2 or AVX features enabled"); + return EmitPCMPSTRM(MI, BB, getTargetMachine().getInstrInfo()); + + // String/text processing lowering. + case X86::PCMPISTRIREG: + case X86::VPCMPISTRIREG: + case X86::PCMPISTRIMEM: + case X86::VPCMPISTRIMEM: + case X86::PCMPESTRIREG: + case X86::VPCMPESTRIREG: + case X86::PCMPESTRIMEM: + case X86::VPCMPESTRIMEM: + assert(Subtarget->hasSSE42() && + "Target must have SSE4.2 or AVX features enabled"); + return EmitPCMPSTRI(MI, BB, getTargetMachine().getInstrInfo()); + + // Thread synchronization. case X86::MONITOR: - return EmitMonitor(MI, BB); + return EmitMonitor(MI, BB, getTargetMachine().getInstrInfo(), Subtarget); - // Atomic Lowering. - case X86::ATOMAND32: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, - X86::AND32ri, X86::MOV32rm, - X86::LCMPXCHG32, - X86::NOT32r, X86::EAX, - &X86::GR32RegClass); - case X86::ATOMOR32: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR32rr, - X86::OR32ri, X86::MOV32rm, - X86::LCMPXCHG32, - X86::NOT32r, X86::EAX, - &X86::GR32RegClass); - case X86::ATOMXOR32: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr, - X86::XOR32ri, X86::MOV32rm, - X86::LCMPXCHG32, - X86::NOT32r, X86::EAX, - &X86::GR32RegClass); - case X86::ATOMNAND32: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr, - X86::AND32ri, X86::MOV32rm, - X86::LCMPXCHG32, - X86::NOT32r, X86::EAX, - &X86::GR32RegClass, true); - case X86::ATOMMIN32: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr); - case X86::ATOMMAX32: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG32rr); - case X86::ATOMUMIN32: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB32rr); - case X86::ATOMUMAX32: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA32rr); + // xbegin + case X86::XBEGIN: + return EmitXBegin(MI, BB, getTargetMachine().getInstrInfo()); + // Atomic Lowering. + case X86::ATOMAND8: case X86::ATOMAND16: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, - X86::AND16ri, X86::MOV16rm, - X86::LCMPXCHG16, - X86::NOT16r, X86::AX, - &X86::GR16RegClass); + case X86::ATOMAND32: + case X86::ATOMAND64: + // Fall through + case X86::ATOMOR8: case X86::ATOMOR16: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR16rr, - X86::OR16ri, X86::MOV16rm, - X86::LCMPXCHG16, - X86::NOT16r, X86::AX, - &X86::GR16RegClass); + case X86::ATOMOR32: + case X86::ATOMOR64: + // Fall through case X86::ATOMXOR16: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR16rr, - X86::XOR16ri, X86::MOV16rm, - X86::LCMPXCHG16, - X86::NOT16r, X86::AX, - &X86::GR16RegClass); - case X86::ATOMNAND16: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND16rr, - X86::AND16ri, X86::MOV16rm, - X86::LCMPXCHG16, - X86::NOT16r, X86::AX, - &X86::GR16RegClass, true); - case X86::ATOMMIN16: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL16rr); - case X86::ATOMMAX16: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG16rr); - case X86::ATOMUMIN16: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB16rr); - case X86::ATOMUMAX16: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA16rr); - - case X86::ATOMAND8: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, - X86::AND8ri, X86::MOV8rm, - X86::LCMPXCHG8, - X86::NOT8r, X86::AL, - &X86::GR8RegClass); - case X86::ATOMOR8: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR8rr, - X86::OR8ri, X86::MOV8rm, - X86::LCMPXCHG8, - X86::NOT8r, X86::AL, - &X86::GR8RegClass); case X86::ATOMXOR8: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR8rr, - X86::XOR8ri, X86::MOV8rm, - X86::LCMPXCHG8, - X86::NOT8r, X86::AL, - &X86::GR8RegClass); - case X86::ATOMNAND8: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND8rr, - X86::AND8ri, X86::MOV8rm, - X86::LCMPXCHG8, - X86::NOT8r, X86::AL, - &X86::GR8RegClass, true); - // FIXME: There are no CMOV8 instructions; MIN/MAX need some other way. - // This group is for 64-bit host. - case X86::ATOMAND64: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, - X86::AND64ri32, X86::MOV64rm, - X86::LCMPXCHG64, - X86::NOT64r, X86::RAX, - &X86::GR64RegClass); - case X86::ATOMOR64: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::OR64rr, - X86::OR64ri32, X86::MOV64rm, - X86::LCMPXCHG64, - X86::NOT64r, X86::RAX, - &X86::GR64RegClass); + case X86::ATOMXOR32: case X86::ATOMXOR64: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR64rr, - X86::XOR64ri32, X86::MOV64rm, - X86::LCMPXCHG64, - X86::NOT64r, X86::RAX, - &X86::GR64RegClass); + // Fall through + case X86::ATOMNAND8: + case X86::ATOMNAND16: + case X86::ATOMNAND32: case X86::ATOMNAND64: - return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND64rr, - X86::AND64ri32, X86::MOV64rm, - X86::LCMPXCHG64, - X86::NOT64r, X86::RAX, - &X86::GR64RegClass, true); - case X86::ATOMMIN64: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL64rr); + // Fall through + case X86::ATOMMAX8: + case X86::ATOMMAX16: + case X86::ATOMMAX32: case X86::ATOMMAX64: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVG64rr); - case X86::ATOMUMIN64: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVB64rr); + // Fall through + case X86::ATOMMIN8: + case X86::ATOMMIN16: + case X86::ATOMMIN32: + case X86::ATOMMIN64: + // Fall through + case X86::ATOMUMAX8: + case X86::ATOMUMAX16: + case X86::ATOMUMAX32: case X86::ATOMUMAX64: - return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVA64rr); + // Fall through + case X86::ATOMUMIN8: + case X86::ATOMUMIN16: + case X86::ATOMUMIN32: + case X86::ATOMUMIN64: + return EmitAtomicLoadArith(MI, BB); // This group does 64-bit operations on a 32-bit host. case X86::ATOMAND6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::AND32rr, X86::AND32rr, - X86::AND32ri, X86::AND32ri, - false); case X86::ATOMOR6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::OR32rr, X86::OR32rr, - X86::OR32ri, X86::OR32ri, - false); case X86::ATOMXOR6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::XOR32rr, X86::XOR32rr, - X86::XOR32ri, X86::XOR32ri, - false); case X86::ATOMNAND6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::AND32rr, X86::AND32rr, - X86::AND32ri, X86::AND32ri, - true); case X86::ATOMADD6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::ADD32rr, X86::ADC32rr, - X86::ADD32ri, X86::ADC32ri, - false); case X86::ATOMSUB6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::SUB32rr, X86::SBB32rr, - X86::SUB32ri, X86::SBB32ri, - false); + case X86::ATOMMAX6432: + case X86::ATOMMIN6432: + case X86::ATOMUMAX6432: + case X86::ATOMUMIN6432: case X86::ATOMSWAP6432: - return EmitAtomicBit6432WithCustomInserter(MI, BB, - X86::MOV32rr, X86::MOV32rr, - X86::MOV32ri, X86::MOV32ri, - false); + return EmitAtomicLoadArith6432(MI, BB); + case X86::VASTART_SAVE_XMM_REGS: return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); case X86::VAARG_64: return EmitVAARG64WithCustomInserter(MI, BB); + + case X86::EH_SjLj_SetJmp32: + case X86::EH_SjLj_SetJmp64: + return emitEHSjLjSetJmp(MI, BB); + + case X86::EH_SjLj_LongJmp32: + case X86::EH_SjLj_LongJmp64: + return emitEHSjLjLongJmp(MI, BB); } } @@ -13331,12 +14267,12 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, } -/// DCI, PerformTruncateCombine - Converts truncate operation to +/// PerformTruncateCombine - Converts truncate operation to /// a sequence of vector shuffle operations. /// It is possible when we truncate 256-bit vector to 128-bit vector - -SDValue X86TargetLowering::PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, - DAGCombinerInfo &DCI) const { +static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget *Subtarget) { if (!DCI.isBeforeLegalizeOps()) return SDValue(); @@ -13528,7 +14464,7 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, // alignment is valid. unsigned Align = LN0->getAlignment(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - unsigned NewAlign = TLI.getTargetData()-> + unsigned NewAlign = TLI.getDataLayout()-> getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VT)) @@ -13559,6 +14495,14 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, return NewOp; SDValue InputVector = N->getOperand(0); + // Detect whether we are trying to convert from mmx to i32 and the bitcast + // from mmx to v2i32 has a single usage. + if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST && + InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx && + InputVector.hasOneUse() && N->getValueType(0) == MVT::i32) + return DAG.getNode(X86ISD::MMX_MOVD2W, InputVector.getDebugLoc(), + N->getValueType(0), + InputVector.getNode()->getOperand(0)); // Only operate on vectors of 4 elements, where the alternative shuffling // gets to be more expensive. @@ -13959,7 +14903,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // // where Op could be BRCOND or CMOV. // -static SDValue BoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { +static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { // Quit if not CMP and SUB with its value result used. if (Cmp.getOpcode() != X86ISD::CMP && (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0))) @@ -13995,40 +14939,55 @@ static SDValue BoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { if (SetCC.getOpcode() == ISD::ZERO_EXTEND) SetCC = SetCC.getOperand(0); - // Quit if not SETCC. - // FIXME: So far we only handle the boolean value generated from SETCC. If - // there is other ways to generate boolean values, we need handle them here - // as well. - if (SetCC.getOpcode() != X86ISD::SETCC) - return SDValue(); - - // Set the condition code or opposite one if necessary. - CC = X86::CondCode(SetCC.getConstantOperandVal(0)); - if (needOppositeCond) - CC = X86::GetOppositeBranchCondition(CC); - - return SetCC.getOperand(1); -} - -static bool IsValidFCMOVCondition(X86::CondCode CC) { - switch (CC) { - default: - return false; - case X86::COND_B: - case X86::COND_BE: - case X86::COND_E: - case X86::COND_P: - case X86::COND_AE: - case X86::COND_A: - case X86::COND_NE: - case X86::COND_NP: - return true; + switch (SetCC.getOpcode()) { + case X86ISD::SETCC: + // Set the condition code or opposite one if necessary. + CC = X86::CondCode(SetCC.getConstantOperandVal(0)); + if (needOppositeCond) + CC = X86::GetOppositeBranchCondition(CC); + return SetCC.getOperand(1); + case X86ISD::CMOV: { + // Check whether false/true value has canonical one, i.e. 0 or 1. + ConstantSDNode *FVal = dyn_cast(SetCC.getOperand(0)); + ConstantSDNode *TVal = dyn_cast(SetCC.getOperand(1)); + // Quit if true value is not a constant. + if (!TVal) + return SDValue(); + // Quit if false value is not a constant. + if (!FVal) { + // A special case for rdrand, where 0 is set if false cond is found. + SDValue Op = SetCC.getOperand(0); + if (Op.getOpcode() != X86ISD::RDRAND) + return SDValue(); + } + // Quit if false value is not the constant 0 or 1. + bool FValIsFalse = true; + if (FVal && FVal->getZExtValue() != 0) { + if (FVal->getZExtValue() != 1) + return SDValue(); + // If FVal is 1, opposite cond is needed. + needOppositeCond = !needOppositeCond; + FValIsFalse = false; + } + // Quit if TVal is not the constant opposite of FVal. + if (FValIsFalse && TVal->getZExtValue() != 1) + return SDValue(); + if (!FValIsFalse && TVal->getZExtValue() != 0) + return SDValue(); + CC = X86::CondCode(SetCC.getConstantOperandVal(2)); + if (needOppositeCond) + CC = X86::GetOppositeBranchCondition(CC); + return SetCC.getOperand(3); } + } + + return SDValue(); } /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, - TargetLowering::DAGCombinerInfo &DCI) { + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget *Subtarget) { DebugLoc DL = N->getDebugLoc(); // If the flag operand isn't dead, don't touch this CMOV. @@ -14053,10 +15012,10 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, SDValue Flags; - Flags = BoolTestSetCCCombine(Cond, CC); + Flags = checkBoolTestSetCCCombine(Cond, CC); if (Flags.getNode() && // Extra check as FCMOV only supports a subset of X86 cond. - (FalseOp.getValueType() != MVT::f80 || IsValidFCMOVCondition(CC))) { + (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) { SDValue Ops[] = { FalseOp, TrueOp, DAG.getConstant(CC, MVT::i8), Flags }; return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), @@ -14073,6 +15032,7 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { CC = X86::GetOppositeBranchCondition(CC); std::swap(TrueC, FalseC); + std::swap(TrueOp, FalseOp); } // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. @@ -14155,6 +15115,46 @@ static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG, } } } + + // Handle these cases: + // (select (x != c), e, c) -> select (x != c), e, x), + // (select (x == c), c, e) -> select (x == c), x, e) + // where the c is an integer constant, and the "select" is the combination + // of CMOV and CMP. + // + // The rationale for this change is that the conditional-move from a constant + // needs two instructions, however, conditional-move from a register needs + // only one instruction. + // + // CAVEAT: By replacing a constant with a symbolic value, it may obscure + // some instruction-combining opportunities. This opt needs to be + // postponed as late as possible. + // + if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { + // the DCI.xxxx conditions are provided to postpone the optimization as + // late as possible. + + ConstantSDNode *CmpAgainst = 0; + if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && + (CmpAgainst = dyn_cast(Cond.getOperand(1))) && + dyn_cast(Cond.getOperand(0)) == 0) { + + if (CC == X86::COND_NE && + CmpAgainst == dyn_cast(FalseOp)) { + CC = X86::GetOppositeBranchCondition(CC); + std::swap(TrueOp, FalseOp); + } + + if (CC == X86::COND_E && + CmpAgainst == dyn_cast(TrueOp)) { + SDValue Ops[] = { FalseOp, Cond.getOperand(0), + DAG.getConstant(CC, MVT::i8), Cond }; + return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops, + array_lengthof(Ops)); + } + } + } + return SDValue(); } @@ -14811,11 +15811,11 @@ static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG, ISD::LoadExtType Ext = Ld->getExtensionType(); // If this is a vector EXT Load then attempt to optimize it using a - // shuffle. We need SSE4 for the shuffles. + // shuffle. We need SSSE3 shuffles. // TODO: It is possible to support ZExt by zeroing the undef values // during the shuffle phase or after the shuffle. if (RegVT.isVector() && RegVT.isInteger() && - Ext == ISD::EXTLOAD && Subtarget->hasSSE41()) { + Ext == ISD::EXTLOAD && Subtarget->hasSSSE3()) { assert(MemVT != RegVT && "Cannot extend to the same type"); assert(MemVT.isVector() && "Must load a vector from memory"); @@ -15041,7 +16041,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, return SDValue(); const Function *F = DAG.getMachineFunction().getFunction(); - bool NoImplicitFloatOps = F->hasFnAttr(Attribute::NoImplicitFloat); + bool NoImplicitFloatOps = F->getFnAttributes(). + hasAttribute(Attributes::NoImplicitFloat); bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps && Subtarget->hasSSE2(); if ((VT.isVector() || @@ -15313,6 +16314,29 @@ static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +/// PerformFMinFMaxCombine - Do target-specific dag combines on X86ISD::FMIN and +/// X86ISD::FMAX nodes. +static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) { + assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX); + + // Only perform optimizations if UnsafeMath is used. + if (!DAG.getTarget().Options.UnsafeFPMath) + return SDValue(); + + // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes + // into FMINC and FMAXC, which are Commutative operations. + unsigned NewOp = 0; + switch (N->getOpcode()) { + default: llvm_unreachable("unknown opcode"); + case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; + case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; + } + + return DAG.getNode(NewOp, N->getDebugLoc(), N->getValueType(0), + N->getOperand(0), N->getOperand(1)); +} + + /// PerformFANDCombine - Do target-specific dag combines on X86ISD::FAND nodes. static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) { // FAND(0.0, x) -> 0.0 @@ -15418,8 +16442,13 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, DebugLoc dl = N->getDebugLoc(); EVT VT = N->getValueType(0); + // Let legalize expand this if it isn't a legal type yet. + if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) + return SDValue(); + EVT ScalarVT = VT.getScalarType(); - if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget->hasFMA()) + if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || + (!Subtarget->hasFMA() && !Subtarget->hasFMA4())) return SDValue(); SDValue A = N->getOperand(0); @@ -15441,9 +16470,10 @@ static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG, unsigned Opcode; if (!NegMul) - Opcode = (!NegC)? X86ISD::FMADD : X86ISD::FMSUB; + Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB; else - Opcode = (!NegC)? X86ISD::FNMADD : X86ISD::FNMSUB; + Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB; + return DAG.getNode(Opcode, dl, VT, A, B, C); } @@ -15540,24 +16570,51 @@ static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG) { return SDValue(); } +// Helper function of PerformSETCCCombine. It is to materialize "setb reg" +// as "sbb reg,reg", since it can be extended without zext and produces +// an all-ones bit which is more useful than 0/1 in some cases. +static SDValue MaterializeSETB(DebugLoc DL, SDValue EFLAGS, SelectionDAG &DAG) { + return DAG.getNode(ISD::AND, DL, MVT::i8, + DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, + DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS), + DAG.getConstant(1, MVT::i8)); +} + // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT -static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG) { +static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget *Subtarget) { DebugLoc DL = N->getDebugLoc(); X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); SDValue EFLAGS = N->getOperand(1); + if (CC == X86::COND_A) { + // Try to convert COND_A into COND_B in an attempt to facilitate + // materializing "setb reg". + // + // Do not flip "e > c", where "c" is a constant, because Cmp instruction + // cannot take an immediate as its first operand. + // + if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && + EFLAGS.getValueType().isInteger() && + !isa(EFLAGS.getOperand(1))) { + SDValue NewSub = DAG.getNode(X86ISD::SUB, EFLAGS.getDebugLoc(), + EFLAGS.getNode()->getVTList(), + EFLAGS.getOperand(1), EFLAGS.getOperand(0)); + SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); + return MaterializeSETB(DL, NewEFLAGS, DAG); + } + } + // Materialize "setb reg" as "sbb reg,reg", since it can be extended without // a zext and produces an all-ones bit which is more useful than 0/1 in some // cases. if (CC == X86::COND_B) - return DAG.getNode(ISD::AND, DL, MVT::i8, - DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, - DAG.getConstant(CC, MVT::i8), EFLAGS), - DAG.getConstant(1, MVT::i8)); + return MaterializeSETB(DL, EFLAGS, DAG); SDValue Flags; - Flags = BoolTestSetCCCombine(EFLAGS, CC); + Flags = checkBoolTestSetCCCombine(EFLAGS, CC); if (Flags.getNode()) { SDValue Cond = DAG.getConstant(CC, MVT::i8); return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags); @@ -15579,7 +16636,7 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, SDValue Flags; - Flags = BoolTestSetCCCombine(EFLAGS, CC); + Flags = checkBoolTestSetCCCombine(EFLAGS, CC); if (Flags.getNode()) { SDValue Cond = DAG.getConstant(CC, MVT::i8); return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond, @@ -15589,23 +16646,6 @@ static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -static SDValue PerformUINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG) { - SDValue Op0 = N->getOperand(0); - EVT InVT = Op0->getValueType(0); - - // UINT_TO_FP(v4i8) -> SINT_TO_FP(ZEXT(v4i8 to v4i32)) - if (InVT == MVT::v8i8 || InVT == MVT::v4i8) { - DebugLoc dl = N->getDebugLoc(); - MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; - SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0); - // Notice that we use SINT_TO_FP because we know that the high bits - // are zero and SINT_TO_FP is better supported by the hardware. - return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P); - } - - return SDValue(); -} - static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, const X86TargetLowering *XTLI) { SDValue Op0 = N->getOperand(0); @@ -15637,20 +16677,6 @@ static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG, return SDValue(); } -static SDValue PerformFP_TO_SINTCombine(SDNode *N, SelectionDAG &DAG) { - EVT VT = N->getValueType(0); - - // v4i8 = FP_TO_SINT() -> v4i8 = TRUNCATE (V4i32 = FP_TO_SINT() - if (VT == MVT::v8i8 || VT == MVT::v4i8) { - DebugLoc dl = N->getDebugLoc(); - MVT DstVT = VT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32; - SDValue I = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, N->getOperand(0)); - return DAG.getNode(ISD::TRUNCATE, dl, VT, I); - } - - return SDValue(); -} - // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG, X86TargetLowering::DAGCombinerInfo &DCI) { @@ -15765,6 +16791,21 @@ static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG, return OptimizeConditionalInDecrement(N, DAG); } +/// performVZEXTCombine - Performs build vector combines +static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget *Subtarget) { + // (vzext (bitcast (vzext (x)) -> (vzext x) + SDValue In = N->getOperand(0); + while (In.getOpcode() == ISD::BITCAST) + In = In.getOperand(0); + + if (In.getOpcode() != X86ISD::VZEXT) + return SDValue(); + + return DAG.getNode(X86ISD::VZEXT, N->getDebugLoc(), N->getValueType(0), In.getOperand(0)); +} + SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const { SelectionDAG &DAG = DCI.DAG; @@ -15774,7 +16815,7 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI); case ISD::VSELECT: case ISD::SELECT: return PerformSELECTCombine(N, DAG, DCI, Subtarget); - case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI); + case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget); case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget); case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget); case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI); @@ -15787,23 +16828,24 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget); case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget); case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget); - case ISD::UINT_TO_FP: return PerformUINT_TO_FPCombine(N, DAG); case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this); - case ISD::FP_TO_SINT: return PerformFP_TO_SINTCombine(N, DAG); case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget); case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget); case X86ISD::FXOR: case X86ISD::FOR: return PerformFORCombine(N, DAG); + case X86ISD::FMIN: + case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG); case X86ISD::FAND: return PerformFANDCombine(N, DAG); case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); case ISD::ANY_EXTEND: case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget); case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget); - case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG, DCI); + case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget); case ISD::SETCC: return PerformISDSETCCCombine(N, DAG); - case X86ISD::SETCC: return PerformSETCCCombine(N, DAG); + case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget); case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget); + case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget); case X86ISD::SHUFP: // Handle all target specific shuffles case X86ISD::PALIGN: case X86ISD::UNPCKH: @@ -16231,7 +17273,7 @@ void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, return; case 'K': if (ConstantSDNode *C = dyn_cast(Op)) { - if ((int8_t)C->getSExtValue() == C->getSExtValue()) { + if (isInt<8>(C->getSExtValue())) { Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType()); break; } @@ -16556,3 +17598,207 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint, return Res; } + +//===----------------------------------------------------------------------===// +// +// X86 cost model. +// +//===----------------------------------------------------------------------===// + +struct X86CostTblEntry { + int ISD; + MVT Type; + unsigned Cost; +}; + +static int +FindInTable(const X86CostTblEntry *Tbl, unsigned len, int ISD, MVT Ty) { + for (unsigned int i = 0; i < len; ++i) + if (Tbl[i].ISD == ISD && Tbl[i].Type == Ty) + return i; + + // Could not find an entry. + return -1; +} + +struct X86TypeConversionCostTblEntry { + int ISD; + MVT Dst; + MVT Src; + unsigned Cost; +}; + +static int +FindInConvertTable(const X86TypeConversionCostTblEntry *Tbl, unsigned len, + int ISD, MVT Dst, MVT Src) { + for (unsigned int i = 0; i < len; ++i) + if (Tbl[i].ISD == ISD && Tbl[i].Src == Src && Tbl[i].Dst == Dst) + return i; + + // Could not find an entry. + return -1; +} + +unsigned +X86VectorTargetTransformInfo::getArithmeticInstrCost(unsigned Opcode, + Type *Ty) const { + // Legalize the type. + std::pair LT = getTypeLegalizationCost(Ty); + + int ISD = InstructionOpcodeToISD(Opcode); + assert(ISD && "Invalid opcode"); + + const X86Subtarget &ST = TLI->getTargetMachine().getSubtarget(); + + static const X86CostTblEntry AVX1CostTable[] = { + // We don't have to scalarize unsupported ops. We can issue two half-sized + // operations and we only need to extract the upper YMM half. + // Two ops + 1 extract + 1 insert = 4. + { ISD::MUL, MVT::v8i32, 4 }, + { ISD::SUB, MVT::v8i32, 4 }, + { ISD::ADD, MVT::v8i32, 4 }, + { ISD::MUL, MVT::v4i64, 4 }, + { ISD::SUB, MVT::v4i64, 4 }, + { ISD::ADD, MVT::v4i64, 4 }, + }; + + // Look for AVX1 lowering tricks. + if (ST.hasAVX()) { + int Idx = FindInTable(AVX1CostTable, array_lengthof(AVX1CostTable), ISD, + LT.second); + if (Idx != -1) + return LT.first * AVX1CostTable[Idx].Cost; + } + // Fallback to the default implementation. + return VectorTargetTransformImpl::getArithmeticInstrCost(Opcode, Ty); +} + +unsigned +X86VectorTargetTransformInfo::getVectorInstrCost(unsigned Opcode, Type *Val, + unsigned Index) const { + assert(Val->isVectorTy() && "This must be a vector type"); + + if (Index != -1U) { + // Legalize the type. + std::pair LT = getTypeLegalizationCost(Val); + + // This type is legalized to a scalar type. + if (!LT.second.isVector()) + return 0; + + // The type may be split. Normalize the index to the new type. + unsigned Width = LT.second.getVectorNumElements(); + Index = Index % Width; + + // Floating point scalars are already located in index #0. + if (Val->getScalarType()->isFloatingPointTy() && Index == 0) + return 0; + } + + return VectorTargetTransformImpl::getVectorInstrCost(Opcode, Val, Index); +} + +unsigned X86VectorTargetTransformInfo::getCmpSelInstrCost(unsigned Opcode, + Type *ValTy, + Type *CondTy) const { + // Legalize the type. + std::pair LT = getTypeLegalizationCost(ValTy); + + MVT MTy = LT.second; + + int ISD = InstructionOpcodeToISD(Opcode); + assert(ISD && "Invalid opcode"); + + const X86Subtarget &ST = + TLI->getTargetMachine().getSubtarget(); + + static const X86CostTblEntry SSE42CostTbl[] = { + { ISD::SETCC, MVT::v2f64, 1 }, + { ISD::SETCC, MVT::v4f32, 1 }, + { ISD::SETCC, MVT::v2i64, 1 }, + { ISD::SETCC, MVT::v4i32, 1 }, + { ISD::SETCC, MVT::v8i16, 1 }, + { ISD::SETCC, MVT::v16i8, 1 }, + }; + + static const X86CostTblEntry AVX1CostTbl[] = { + { ISD::SETCC, MVT::v4f64, 1 }, + { ISD::SETCC, MVT::v8f32, 1 }, + // AVX1 does not support 8-wide integer compare. + { ISD::SETCC, MVT::v4i64, 4 }, + { ISD::SETCC, MVT::v8i32, 4 }, + { ISD::SETCC, MVT::v16i16, 4 }, + { ISD::SETCC, MVT::v32i8, 4 }, + }; + + static const X86CostTblEntry AVX2CostTbl[] = { + { ISD::SETCC, MVT::v4i64, 1 }, + { ISD::SETCC, MVT::v8i32, 1 }, + { ISD::SETCC, MVT::v16i16, 1 }, + { ISD::SETCC, MVT::v32i8, 1 }, + }; + + if (ST.hasSSE42()) { + int Idx = FindInTable(SSE42CostTbl, array_lengthof(SSE42CostTbl), ISD, MTy); + if (Idx != -1) + return LT.first * SSE42CostTbl[Idx].Cost; + } + + if (ST.hasAVX()) { + int Idx = FindInTable(AVX1CostTbl, array_lengthof(AVX1CostTbl), ISD, MTy); + if (Idx != -1) + return LT.first * AVX1CostTbl[Idx].Cost; + } + + if (ST.hasAVX2()) { + int Idx = FindInTable(AVX2CostTbl, array_lengthof(AVX2CostTbl), ISD, MTy); + if (Idx != -1) + return LT.first * AVX2CostTbl[Idx].Cost; + } + + return VectorTargetTransformImpl::getCmpSelInstrCost(Opcode, ValTy, CondTy); +} + +unsigned X86VectorTargetTransformInfo::getCastInstrCost(unsigned Opcode, + Type *Dst, + Type *Src) const { + int ISD = InstructionOpcodeToISD(Opcode); + assert(ISD && "Invalid opcode"); + + EVT SrcTy = TLI->getValueType(Src); + EVT DstTy = TLI->getValueType(Dst); + + if (!SrcTy.isSimple() || !DstTy.isSimple()) + return VectorTargetTransformImpl::getCastInstrCost(Opcode, Dst, Src); + + const X86Subtarget &ST = TLI->getTargetMachine().getSubtarget(); + + static const X86TypeConversionCostTblEntry AVXConversionTbl[] = { + { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, + { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, + { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, + { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, + { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, + { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 }, + { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 }, + { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 }, + { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 1 }, + { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 1 }, + { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 }, + { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 }, + { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 }, + { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 }, + { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, + }; + + if (ST.hasAVX()) { + int Idx = FindInConvertTable(AVXConversionTbl, + array_lengthof(AVXConversionTbl), + ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT()); + if (Idx != -1) + return AVXConversionTbl[Idx].Cost; + } + + return VectorTargetTransformImpl::getCastInstrCost(Opcode, Dst, Src); +} + diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 896d067..465c603 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -19,6 +19,7 @@ #include "X86RegisterInfo.h" #include "X86MachineFunctionInfo.h" #include "llvm/Target/TargetLowering.h" +#include "llvm/Target/TargetTransformImpl.h" #include "llvm/Target/TargetOptions.h" #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/SelectionDAG.h" @@ -142,6 +143,10 @@ namespace llvm { /// mnemonic, so do I; blame Intel. MOVDQ2Q, + /// MMX_MOVD2W - Copies a 32-bit value from the low word of a MMX + /// vector to a GPR. + MMX_MOVD2W, + /// PEXTRB - Extract an 8-bit value from a vector and zero extend it to /// i32, corresponds to X86::PEXTRB. PEXTRB, @@ -195,6 +200,9 @@ namespace llvm { /// FMAX, FMIN, + /// FMAXC, FMINC - Commutative FMIN and FMAX. + FMAXC, FMINC, + /// FRSQRT, FRCP - Floating point reciprocal-sqrt and reciprocal /// approximation. Note that these typically require refinement /// in order to obtain suitable precision. @@ -214,6 +222,12 @@ namespace llvm { // EH_RETURN - Exception Handling helpers. EH_RETURN, + // EH_SJLJ_SETJMP - SjLj exception handling setjmp. + EH_SJLJ_SETJMP, + + // EH_SJLJ_LONGJMP - SjLj exception handling longjmp. + EH_SJLJ_LONGJMP, + /// TC_RETURN - Tail call return. /// operand #0 chain /// operand #1 callee (register or absolute) @@ -227,9 +241,18 @@ namespace llvm { // VSEXT_MOVL - Vector move low and sign extend. VSEXT_MOVL, + // VZEXT - Vector integer zero-extend. + VZEXT, + + // VSEXT - Vector integer signed-extend. + VSEXT, + // VFPEXT - Vector FP extend. VFPEXT, + // VFPROUND - Vector FP round. + VFPROUND, + // VSHL, VSRL - 128-bit vector logical left / right shift VSHLDQ, VSRLDQ, @@ -345,6 +368,10 @@ namespace llvm { ATOMXOR64_DAG, ATOMAND64_DAG, ATOMNAND64_DAG, + ATOMMAX64_DAG, + ATOMMIN64_DAG, + ATOMUMAX64_DAG, + ATOMUMIN64_DAG, ATOMSWAP64_DAG, // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap. @@ -458,10 +485,6 @@ namespace llvm { getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const; - /// getStackPtrReg - Return the stack pointer register we are using: either - /// ESP or RSP. - unsigned getStackPtrReg() const { return X86StackPtr; } - /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. For X86, aggregates /// that contains are placed at 16-byte boundaries while the rest are at @@ -694,10 +717,7 @@ namespace llvm { /// make the right decision when generating code for different targets. const X86Subtarget *Subtarget; const X86RegisterInfo *RegInfo; - const TargetData *TD; - - /// X86StackPtr - X86 physical register used as stack ptr. - unsigned X86StackPtr; + const DataLayout *TD; /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 /// floating point ops. @@ -741,6 +761,7 @@ namespace llvm { bool isVarArg, bool isCalleeStructRet, bool isCallerStructRet, + Type *RetTy, const SmallVectorImpl &Outs, const SmallVectorImpl &OutVals, const SmallVectorImpl &Ins, @@ -760,15 +781,11 @@ namespace llvm { SDValue LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl, SelectionDAG &DAG) const; SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINSERT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; SDValue LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, @@ -782,12 +799,15 @@ namespace llvm { SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerToBT(SDValue And, ISD::CondCode CC, DebugLoc dl, SelectionDAG &DAG) const; SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; @@ -799,39 +819,26 @@ namespace llvm { SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const; SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerADD(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; - SDValue PerformTruncateCombine(SDNode* N, SelectionDAG &DAG, DAGCombinerInfo &DCI) const; - // Utility functions to help LowerVECTOR_SHUFFLE - SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerVectorBroadcast(SDValue &Op, SelectionDAG &DAG) const; + // Utility functions to help LowerVECTOR_SHUFFLE & LowerBUILD_VECTOR + SDValue LowerVectorBroadcast(SDValue Op, SelectionDAG &DAG) const; SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG) const; + SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) const; + + SDValue LowerVectorAllZeroTest(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerVectorFpExtend(SDValue &Op, SelectionDAG &DAG) const; + SDValue lowerVectorIntExtend(SDValue Op, SelectionDAG &DAG) const; virtual SDValue LowerFormalArguments(SDValue Chain, @@ -864,51 +871,17 @@ namespace llvm { const SmallVectorImpl &Outs, LLVMContext &Context) const; - /// Utility function to emit string processing sse4.2 instructions - /// that return in xmm0. - /// This takes the instruction to expand, the associated machine basic - /// block, the number of args, and whether or not the second arg is - /// in memory or not. - MachineBasicBlock *EmitPCMP(MachineInstr *BInstr, MachineBasicBlock *BB, - unsigned argNum, bool inMem) const; - - /// Utility functions to emit monitor and mwait instructions. These - /// need to make sure that the arguments to the intrinsic are in the - /// correct registers. - MachineBasicBlock *EmitMonitor(MachineInstr *MI, - MachineBasicBlock *BB) const; - MachineBasicBlock *EmitMwait(MachineInstr *MI, MachineBasicBlock *BB) const; - - /// Utility function to emit atomic bitwise operations (and, or, xor). - /// It takes the bitwise instruction to expand, the associated machine basic - /// block, and the associated X86 opcodes for reg/reg and reg/imm. - MachineBasicBlock *EmitAtomicBitwiseWithCustomInserter( - MachineInstr *BInstr, - MachineBasicBlock *BB, - unsigned regOpc, - unsigned immOpc, - unsigned loadOpc, - unsigned cxchgOpc, - unsigned notOpc, - unsigned EAXreg, - const TargetRegisterClass *RC, - bool Invert = false) const; - - MachineBasicBlock *EmitAtomicBit6432WithCustomInserter( - MachineInstr *BInstr, - MachineBasicBlock *BB, - unsigned regOpcL, - unsigned regOpcH, - unsigned immOpcL, - unsigned immOpcH, - bool Invert = false) const; - - /// Utility function to emit atomic min and max. It takes the min/max - /// instruction to expand, the associated basic block, and the associated - /// cmov opcode for moving the min or max value. - MachineBasicBlock *EmitAtomicMinMaxWithCustomInserter(MachineInstr *BInstr, - MachineBasicBlock *BB, - unsigned cmovOpc) const; + /// Utility function to emit atomic-load-arith operations (and, or, xor, + /// nand, max, min, umax, umin). It takes the corresponding instruction to + /// expand, the associated machine basic block, and the associated X86 + /// opcodes for reg/reg. + MachineBasicBlock *EmitAtomicLoadArith(MachineInstr *MI, + MachineBasicBlock *MBB) const; + + /// Utility function to emit atomic-load-arith operations (and, or, xor, + /// nand, add, sub, swap) for 64-bit operands on 32-bit target. + MachineBasicBlock *EmitAtomicLoadArith6432(MachineInstr *MI, + MachineBasicBlock *MBB) const; // Utility function to emit the low-level va_arg code for X86-64. MachineBasicBlock *EmitVAARG64WithCustomInserter( @@ -936,6 +909,12 @@ namespace llvm { MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI, MachineBasicBlock *BB) const; + MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const; + + MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI, + MachineBasicBlock *MBB) const; + /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; @@ -953,6 +932,23 @@ namespace llvm { FastISel *createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo); } + + class X86VectorTargetTransformInfo : public VectorTargetTransformImpl { + public: + explicit X86VectorTargetTransformInfo(const TargetLowering *TL) : + VectorTargetTransformImpl(TL) {} + + virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty) const; + + virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, + unsigned Index) const; + + unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, + Type *CondTy) const; + + virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, + Type *Src) const; + }; } #endif // X86ISELLOWERING_H diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 99c2b8f..9e6f279 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -165,6 +165,33 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr), } +let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1, + usesCustomInserter = 1 in { + def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf), + "#EH_SJLJ_SETJMP32", + [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, + Requires<[In32BitMode]>; + def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf), + "#EH_SJLJ_SETJMP64", + [(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>, + Requires<[In64BitMode]>; + let isTerminator = 1 in { + def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf), + "#EH_SJLJ_LONGJMP32", + [(X86eh_sjlj_longjmp addr:$buf)]>, + Requires<[In32BitMode]>; + def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf), + "#EH_SJLJ_LONGJMP64", + [(X86eh_sjlj_longjmp addr:$buf)]>, + Requires<[In64BitMode]>; + } +} + +let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in { + def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst), + "#EH_SjLj_Setup\t$dst", []>; +} + //===----------------------------------------------------------------------===// // Pseudo instructions used by segmented stacks. // @@ -230,25 +257,18 @@ def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src), IIC_ALU_NONMEM>; // Use sbb to materialize carry bit. -let Uses = [EFLAGS], Defs = [EFLAGS], isCodeGenOnly = 1 in { +let Uses = [EFLAGS], Defs = [EFLAGS], isPseudo = 1 in { // FIXME: These are pseudo ops that should be replaced with Pat<> patterns. // However, Pat<> can't replicate the destination reg into the inputs of the // result. -// FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces -// X86CodeEmitter. -def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "", - [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))], - IIC_ALU_NONMEM>; -def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "", - [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))], - IIC_ALU_NONMEM>, - OpSize; -def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "", - [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))], - IIC_ALU_NONMEM>; -def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "", - [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))], - IIC_ALU_NONMEM>; +def SETB_C8r : I<0, Pseudo, (outs GR8:$dst), (ins), "", + [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; +def SETB_C16r : I<0, Pseudo, (outs GR16:$dst), (ins), "", + [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; +def SETB_C32r : I<0, Pseudo, (outs GR32:$dst), (ins), "", + [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; +def SETB_C64r : I<0, Pseudo, (outs GR64:$dst), (ins), "", + [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>; } // isCodeGenOnly @@ -453,6 +473,11 @@ def CMOV_GR16 : I<0, Pseudo, "#CMOV_GR16* PSEUDO!", [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>; +} // Predicates = [NoCMov] + +// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no +// SSE1. +let Predicates = [FPStackf32] in def CMOV_RFP32 : I<0, Pseudo, (outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2, i8imm:$cond), @@ -460,6 +485,9 @@ def CMOV_RFP32 : I<0, Pseudo, [(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond, EFLAGS))]>; +// fcmov doesn't handle all possible EFLAGS, provide a fallback if there is no +// SSE2. +let Predicates = [FPStackf64] in def CMOV_RFP64 : I<0, Pseudo, (outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2, i8imm:$cond), @@ -474,7 +502,6 @@ def CMOV_RFP80 : I<0, Pseudo, [(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond, EFLAGS))]>; -} // Predicates = [NoCMov] } // UsesCustomInserter = 1, Uses = [EFLAGS] @@ -482,130 +509,74 @@ def CMOV_RFP80 : I<0, Pseudo, // Atomic Instruction Pseudo Instructions //===----------------------------------------------------------------------===// -// Atomic exchange, and, or, xor -let Constraints = "$val = $dst", Defs = [EFLAGS], - usesCustomInserter = 1 in { - -def ATOMAND8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), - "#ATOMAND8 PSEUDO!", - [(set GR8:$dst, (atomic_load_and_8 addr:$ptr, GR8:$val))]>; -def ATOMOR8 : I<0, Pseudo, (outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), - "#ATOMOR8 PSEUDO!", - [(set GR8:$dst, (atomic_load_or_8 addr:$ptr, GR8:$val))]>; -def ATOMXOR8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), - "#ATOMXOR8 PSEUDO!", - [(set GR8:$dst, (atomic_load_xor_8 addr:$ptr, GR8:$val))]>; -def ATOMNAND8 : I<0, Pseudo,(outs GR8:$dst),(ins i8mem:$ptr, GR8:$val), - "#ATOMNAND8 PSEUDO!", - [(set GR8:$dst, (atomic_load_nand_8 addr:$ptr, GR8:$val))]>; - -def ATOMAND16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMAND16 PSEUDO!", - [(set GR16:$dst, (atomic_load_and_16 addr:$ptr, GR16:$val))]>; -def ATOMOR16 : I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMOR16 PSEUDO!", - [(set GR16:$dst, (atomic_load_or_16 addr:$ptr, GR16:$val))]>; -def ATOMXOR16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMXOR16 PSEUDO!", - [(set GR16:$dst, (atomic_load_xor_16 addr:$ptr, GR16:$val))]>; -def ATOMNAND16 : I<0, Pseudo,(outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMNAND16 PSEUDO!", - [(set GR16:$dst, (atomic_load_nand_16 addr:$ptr, GR16:$val))]>; -def ATOMMIN16: I<0, Pseudo, (outs GR16:$dst), (ins i16mem:$ptr, GR16:$val), - "#ATOMMIN16 PSEUDO!", - [(set GR16:$dst, (atomic_load_min_16 addr:$ptr, GR16:$val))]>; -def ATOMMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMMAX16 PSEUDO!", - [(set GR16:$dst, (atomic_load_max_16 addr:$ptr, GR16:$val))]>; -def ATOMUMIN16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMUMIN16 PSEUDO!", - [(set GR16:$dst, (atomic_load_umin_16 addr:$ptr, GR16:$val))]>; -def ATOMUMAX16: I<0, Pseudo, (outs GR16:$dst),(ins i16mem:$ptr, GR16:$val), - "#ATOMUMAX16 PSEUDO!", - [(set GR16:$dst, (atomic_load_umax_16 addr:$ptr, GR16:$val))]>; - - -def ATOMAND32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMAND32 PSEUDO!", - [(set GR32:$dst, (atomic_load_and_32 addr:$ptr, GR32:$val))]>; -def ATOMOR32 : I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMOR32 PSEUDO!", - [(set GR32:$dst, (atomic_load_or_32 addr:$ptr, GR32:$val))]>; -def ATOMXOR32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMXOR32 PSEUDO!", - [(set GR32:$dst, (atomic_load_xor_32 addr:$ptr, GR32:$val))]>; -def ATOMNAND32 : I<0, Pseudo,(outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMNAND32 PSEUDO!", - [(set GR32:$dst, (atomic_load_nand_32 addr:$ptr, GR32:$val))]>; -def ATOMMIN32: I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$ptr, GR32:$val), - "#ATOMMIN32 PSEUDO!", - [(set GR32:$dst, (atomic_load_min_32 addr:$ptr, GR32:$val))]>; -def ATOMMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMMAX32 PSEUDO!", - [(set GR32:$dst, (atomic_load_max_32 addr:$ptr, GR32:$val))]>; -def ATOMUMIN32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMUMIN32 PSEUDO!", - [(set GR32:$dst, (atomic_load_umin_32 addr:$ptr, GR32:$val))]>; -def ATOMUMAX32: I<0, Pseudo, (outs GR32:$dst),(ins i32mem:$ptr, GR32:$val), - "#ATOMUMAX32 PSEUDO!", - [(set GR32:$dst, (atomic_load_umax_32 addr:$ptr, GR32:$val))]>; - - - -def ATOMAND64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMAND64 PSEUDO!", - [(set GR64:$dst, (atomic_load_and_64 addr:$ptr, GR64:$val))]>; -def ATOMOR64 : I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMOR64 PSEUDO!", - [(set GR64:$dst, (atomic_load_or_64 addr:$ptr, GR64:$val))]>; -def ATOMXOR64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMXOR64 PSEUDO!", - [(set GR64:$dst, (atomic_load_xor_64 addr:$ptr, GR64:$val))]>; -def ATOMNAND64 : I<0, Pseudo,(outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMNAND64 PSEUDO!", - [(set GR64:$dst, (atomic_load_nand_64 addr:$ptr, GR64:$val))]>; -def ATOMMIN64: I<0, Pseudo, (outs GR64:$dst), (ins i64mem:$ptr, GR64:$val), - "#ATOMMIN64 PSEUDO!", - [(set GR64:$dst, (atomic_load_min_64 addr:$ptr, GR64:$val))]>; -def ATOMMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMMAX64 PSEUDO!", - [(set GR64:$dst, (atomic_load_max_64 addr:$ptr, GR64:$val))]>; -def ATOMUMIN64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMUMIN64 PSEUDO!", - [(set GR64:$dst, (atomic_load_umin_64 addr:$ptr, GR64:$val))]>; -def ATOMUMAX64: I<0, Pseudo, (outs GR64:$dst),(ins i64mem:$ptr, GR64:$val), - "#ATOMUMAX64 PSEUDO!", - [(set GR64:$dst, (atomic_load_umax_64 addr:$ptr, GR64:$val))]>; +// Pseudo atomic instructions + +multiclass PSEUDO_ATOMIC_LOAD_BINOP { + let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in { + def #NAME#8 : I<0, Pseudo, (outs GR8:$dst), + (ins i8mem:$ptr, GR8:$val), + !strconcat(mnemonic, "8 PSEUDO!"), []>; + def #NAME#16 : I<0, Pseudo,(outs GR16:$dst), + (ins i16mem:$ptr, GR16:$val), + !strconcat(mnemonic, "16 PSEUDO!"), []>; + def #NAME#32 : I<0, Pseudo, (outs GR32:$dst), + (ins i32mem:$ptr, GR32:$val), + !strconcat(mnemonic, "32 PSEUDO!"), []>; + def #NAME#64 : I<0, Pseudo, (outs GR64:$dst), + (ins i64mem:$ptr, GR64:$val), + !strconcat(mnemonic, "64 PSEUDO!"), []>; + } +} + +multiclass PSEUDO_ATOMIC_LOAD_BINOP_PATS { + def : Pat<(!cast(frag # "_8") addr:$ptr, GR8:$val), + (!cast(name # "8") addr:$ptr, GR8:$val)>; + def : Pat<(!cast(frag # "_16") addr:$ptr, GR16:$val), + (!cast(name # "16") addr:$ptr, GR16:$val)>; + def : Pat<(!cast(frag # "_32") addr:$ptr, GR32:$val), + (!cast(name # "32") addr:$ptr, GR32:$val)>; + def : Pat<(!cast(frag # "_64") addr:$ptr, GR64:$val), + (!cast(name # "64") addr:$ptr, GR64:$val)>; } -let Constraints = "$val1 = $dst1, $val2 = $dst2", - Defs = [EFLAGS, EAX, EBX, ECX, EDX], - Uses = [EAX, EBX, ECX, EDX], - mayLoad = 1, mayStore = 1, - usesCustomInserter = 1 in { -def ATOMAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMAND6432 PSEUDO!", []>; -def ATOMOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMOR6432 PSEUDO!", []>; -def ATOMXOR6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMXOR6432 PSEUDO!", []>; -def ATOMNAND6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMNAND6432 PSEUDO!", []>; -def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMADD6432 PSEUDO!", []>; -def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMSUB6432 PSEUDO!", []>; -def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), - (ins i64mem:$ptr, GR32:$val1, GR32:$val2), - "#ATOMSWAP6432 PSEUDO!", []>; +// Atomic exchange, and, or, xor +defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMAND">; +defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMOR">; +defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMXOR">; +defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMNAND">; +defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMAX">; +defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMMIN">; +defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMAX">; +defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP<"#ATOMUMIN">; + +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMAND", "atomic_load_and">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMOR", "atomic_load_or">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMXOR", "atomic_load_xor">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMNAND", "atomic_load_nand">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMAX", "atomic_load_max">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMMIN", "atomic_load_min">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMAX", "atomic_load_umax">; +defm : PSEUDO_ATOMIC_LOAD_BINOP_PATS<"ATOMUMIN", "atomic_load_umin">; + +multiclass PSEUDO_ATOMIC_LOAD_BINOP6432 { + let usesCustomInserter = 1, mayLoad = 1, mayStore = 1 in + def #NAME#6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), + (ins i64mem:$ptr, GR32:$val1, GR32:$val2), + !strconcat(mnemonic, "6432 PSEUDO!"), []>; } +defm ATOMAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMAND">; +defm ATOMOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMOR">; +defm ATOMXOR : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMXOR">; +defm ATOMNAND : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMNAND">; +defm ATOMADD : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMADD">; +defm ATOMSUB : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSUB">; +defm ATOMMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMAX">; +defm ATOMMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMMIN">; +defm ATOMUMAX : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMAX">; +defm ATOMUMIN : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMUMIN">; +defm ATOMSWAP : PSEUDO_ATOMIC_LOAD_BINOP6432<"#ATOMSWAP">; + //===----------------------------------------------------------------------===// // Normal-Instructions-With-Lock-Prefix Pseudo Instructions //===----------------------------------------------------------------------===// @@ -617,7 +588,6 @@ def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2), // TODO: Get this to fold the constant into the instruction. let isCodeGenOnly = 1, Defs = [EFLAGS] in def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero), - "lock\n\t" "or{l}\t{$zero, $dst|$dst, $zero}", [], IIC_ALU_MEM>, Requires<[In32BitMode]>, LOCK; @@ -637,72 +607,72 @@ let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { def #NAME#8mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, RegOpc{3}, RegOpc{2}, RegOpc{1}, 0 }, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src2), - !strconcat("lock\n\t", mnemonic, "{b}\t", + !strconcat(mnemonic, "{b}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_NONMEM>, LOCK; def #NAME#16mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src2), - !strconcat("lock\n\t", mnemonic, "{w}\t", + !strconcat(mnemonic, "{w}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_NONMEM>, OpSize, LOCK; def #NAME#32mr : I<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src2), - !strconcat("lock\n\t", mnemonic, "{l}\t", + !strconcat(mnemonic, "{l}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_NONMEM>, LOCK; def #NAME#64mr : RI<{RegOpc{7}, RegOpc{6}, RegOpc{5}, RegOpc{4}, RegOpc{3}, RegOpc{2}, RegOpc{1}, 1 }, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), - !strconcat("lock\n\t", mnemonic, "{q}\t", + !strconcat(mnemonic, "{q}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_NONMEM>, LOCK; def #NAME#8mi : Ii8<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 0 }, ImmMod, (outs), (ins i8mem :$dst, i8imm :$src2), - !strconcat("lock\n\t", mnemonic, "{b}\t", + !strconcat(mnemonic, "{b}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_MEM>, LOCK; def #NAME#16mi : Ii16<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, ImmMod, (outs), (ins i16mem :$dst, i16imm :$src2), - !strconcat("lock\n\t", mnemonic, "{w}\t", + !strconcat(mnemonic, "{w}\t", "{$src2, $dst|$dst, $src2}"), - [], IIC_ALU_MEM>, LOCK; + [], IIC_ALU_MEM>, OpSize, LOCK; def #NAME#32mi : Ii32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, ImmMod, (outs), (ins i32mem :$dst, i32imm :$src2), - !strconcat("lock\n\t", mnemonic, "{l}\t", + !strconcat(mnemonic, "{l}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_MEM>, LOCK; def #NAME#64mi32 : RIi32<{ImmOpc{7}, ImmOpc{6}, ImmOpc{5}, ImmOpc{4}, ImmOpc{3}, ImmOpc{2}, ImmOpc{1}, 1 }, ImmMod, (outs), (ins i64mem :$dst, i64i32imm :$src2), - !strconcat("lock\n\t", mnemonic, "{q}\t", + !strconcat(mnemonic, "{q}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_MEM>, LOCK; def #NAME#16mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, ImmMod, (outs), (ins i16mem :$dst, i16i8imm :$src2), - !strconcat("lock\n\t", mnemonic, "{w}\t", + !strconcat(mnemonic, "{w}\t", "{$src2, $dst|$dst, $src2}"), - [], IIC_ALU_MEM>, LOCK; + [], IIC_ALU_MEM>, OpSize, LOCK; def #NAME#32mi8 : Ii8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, ImmMod, (outs), (ins i32mem :$dst, i32i8imm :$src2), - !strconcat("lock\n\t", mnemonic, "{l}\t", + !strconcat(mnemonic, "{l}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_MEM>, LOCK; def #NAME#64mi8 : RIi8<{ImmOpc8{7}, ImmOpc8{6}, ImmOpc8{5}, ImmOpc8{4}, ImmOpc8{3}, ImmOpc8{2}, ImmOpc8{1}, 1 }, ImmMod, (outs), (ins i64mem :$dst, i64i8imm :$src2), - !strconcat("lock\n\t", mnemonic, "{q}\t", + !strconcat(mnemonic, "{q}\t", "{$src2, $dst|$dst, $src2}"), [], IIC_ALU_MEM>, LOCK; @@ -717,107 +687,117 @@ defm LOCK_AND : LOCK_ArithBinOp<0x20, 0x80, 0x83, MRM4m, "and">; defm LOCK_XOR : LOCK_ArithBinOp<0x30, 0x80, 0x83, MRM6m, "xor">; // Optimized codegen when the non-memory output is not used. +multiclass LOCK_ArithUnOp Opc8, bits<8> Opc, Format Form, + string mnemonic> { let Defs = [EFLAGS], mayLoad = 1, mayStore = 1, isCodeGenOnly = 1 in { -def LOCK_INC8m : I<0xFE, MRM0m, (outs), (ins i8mem :$dst), - "lock\n\t" - "inc{b}\t$dst", [], IIC_UNARY_MEM>, LOCK; -def LOCK_INC16m : I<0xFF, MRM0m, (outs), (ins i16mem:$dst), - "lock\n\t" - "inc{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK; -def LOCK_INC32m : I<0xFF, MRM0m, (outs), (ins i32mem:$dst), - "lock\n\t" - "inc{l}\t$dst", [], IIC_UNARY_MEM>, LOCK; -def LOCK_INC64m : RI<0xFF, MRM0m, (outs), (ins i64mem:$dst), - "lock\n\t" - "inc{q}\t$dst", [], IIC_UNARY_MEM>, LOCK; - -def LOCK_DEC8m : I<0xFE, MRM1m, (outs), (ins i8mem :$dst), - "lock\n\t" - "dec{b}\t$dst", [], IIC_UNARY_MEM>, LOCK; -def LOCK_DEC16m : I<0xFF, MRM1m, (outs), (ins i16mem:$dst), - "lock\n\t" - "dec{w}\t$dst", [], IIC_UNARY_MEM>, OpSize, LOCK; -def LOCK_DEC32m : I<0xFF, MRM1m, (outs), (ins i32mem:$dst), - "lock\n\t" - "dec{l}\t$dst", [], IIC_UNARY_MEM>, LOCK; -def LOCK_DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), - "lock\n\t" - "dec{q}\t$dst", [], IIC_UNARY_MEM>, LOCK; +def #NAME#8m : I, LOCK; +def #NAME#16m : I, OpSize, LOCK; +def #NAME#32m : I, LOCK; +def #NAME#64m : RI, LOCK; +} } -// Atomic compare and swap. -let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX], - isCodeGenOnly = 1 in -def LCMPXCHG8B : I<0xC7, MRM1m, (outs), (ins i64mem:$ptr), - "lock\n\t" - "cmpxchg8b\t$ptr", - [(X86cas8 addr:$ptr)], IIC_CMPX_LOCK_8B>, TB, LOCK; +defm LOCK_INC : LOCK_ArithUnOp<0xFE, 0xFF, MRM0m, "inc">; +defm LOCK_DEC : LOCK_ArithUnOp<0xFE, 0xFF, MRM1m, "dec">; -let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], - isCodeGenOnly = 1 in -def LCMPXCHG16B : RI<0xC7, MRM1m, (outs), (ins i128mem:$ptr), - "lock\n\t" - "cmpxchg16b\t$ptr", - [(X86cas16 addr:$ptr)], IIC_CMPX_LOCK_16B>, TB, LOCK, - Requires<[HasCmpxchg16b]>; - -let Defs = [AL, EFLAGS], Uses = [AL], isCodeGenOnly = 1 in { -def LCMPXCHG8 : I<0xB0, MRMDestMem, (outs), (ins i8mem:$ptr, GR8:$swap), - "lock\n\t" - "cmpxchg{b}\t{$swap, $ptr|$ptr, $swap}", - [(X86cas addr:$ptr, GR8:$swap, 1)], IIC_CMPX_LOCK_8>, TB, LOCK; +// Atomic compare and swap. +multiclass LCMPXCHG_UnOp Opc, Format Form, string mnemonic, + SDPatternOperator frag, X86MemOperand x86memop, + InstrItinClass itin> { +let isCodeGenOnly = 1 in { + def #NAME# : I, TB, LOCK; +} } -let Defs = [AX, EFLAGS], Uses = [AX], isCodeGenOnly = 1 in { -def LCMPXCHG16 : I<0xB1, MRMDestMem, (outs), (ins i16mem:$ptr, GR16:$swap), - "lock\n\t" - "cmpxchg{w}\t{$swap, $ptr|$ptr, $swap}", - [(X86cas addr:$ptr, GR16:$swap, 2)], IIC_CMPX_LOCK>, TB, OpSize, LOCK; +multiclass LCMPXCHG_BinOp Opc8, bits<8> Opc, Format Form, + string mnemonic, SDPatternOperator frag, + InstrItinClass itin8, InstrItinClass itin> { +let isCodeGenOnly = 1 in { + let Defs = [AL, EFLAGS], Uses = [AL] in + def #NAME#8 : I, TB, LOCK; + let Defs = [AX, EFLAGS], Uses = [AX] in + def #NAME#16 : I, TB, OpSize, LOCK; + let Defs = [EAX, EFLAGS], Uses = [EAX] in + def #NAME#32 : I, TB, LOCK; + let Defs = [RAX, EFLAGS], Uses = [RAX] in + def #NAME#64 : RI, TB, LOCK; +} } -let Defs = [EAX, EFLAGS], Uses = [EAX], isCodeGenOnly = 1 in { -def LCMPXCHG32 : I<0xB1, MRMDestMem, (outs), (ins i32mem:$ptr, GR32:$swap), - "lock\n\t" - "cmpxchg{l}\t{$swap, $ptr|$ptr, $swap}", - [(X86cas addr:$ptr, GR32:$swap, 4)], IIC_CMPX_LOCK>, TB, LOCK; +let Defs = [EAX, EDX, EFLAGS], Uses = [EAX, EBX, ECX, EDX] in { +defm LCMPXCHG8B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg8b", + X86cas8, i64mem, + IIC_CMPX_LOCK_8B>; } -let Defs = [RAX, EFLAGS], Uses = [RAX], isCodeGenOnly = 1 in { -def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), - "lock\n\t" - "cmpxchg{q}\t{$swap, $ptr|$ptr, $swap}", - [(X86cas addr:$ptr, GR64:$swap, 8)], IIC_CMPX_LOCK>, TB, LOCK; +let Defs = [RAX, RDX, EFLAGS], Uses = [RAX, RBX, RCX, RDX], + Predicates = [HasCmpxchg16b] in { +defm LCMPXCHG16B : LCMPXCHG_UnOp<0xC7, MRM1m, "cmpxchg16b", + X86cas16, i128mem, + IIC_CMPX_LOCK_16B>, REX_W; } +defm LCMPXCHG : LCMPXCHG_BinOp<0xB0, 0xB1, MRMDestMem, "cmpxchg", + X86cas, IIC_CMPX_LOCK_8, IIC_CMPX_LOCK>; + // Atomic exchange and add -let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in { -def LXADD8 : I<0xC0, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), - "lock\n\t" - "xadd{b}\t{$val, $ptr|$ptr, $val}", - [(set GR8:$dst, (atomic_load_add_8 addr:$ptr, GR8:$val))], - IIC_XADD_LOCK_MEM8>, - TB, LOCK; -def LXADD16 : I<0xC1, MRMSrcMem, (outs GR16:$dst), (ins GR16:$val, i16mem:$ptr), - "lock\n\t" - "xadd{w}\t{$val, $ptr|$ptr, $val}", - [(set GR16:$dst, (atomic_load_add_16 addr:$ptr, GR16:$val))], - IIC_XADD_LOCK_MEM>, - TB, OpSize, LOCK; -def LXADD32 : I<0xC1, MRMSrcMem, (outs GR32:$dst), (ins GR32:$val, i32mem:$ptr), - "lock\n\t" - "xadd{l}\t{$val, $ptr|$ptr, $val}", - [(set GR32:$dst, (atomic_load_add_32 addr:$ptr, GR32:$val))], - IIC_XADD_LOCK_MEM>, - TB, LOCK; -def LXADD64 : RI<0xC1, MRMSrcMem, (outs GR64:$dst), (ins GR64:$val,i64mem:$ptr), - "lock\n\t" - "xadd{q}\t{$val, $ptr|$ptr, $val}", - [(set GR64:$dst, (atomic_load_add_64 addr:$ptr, GR64:$val))], - IIC_XADD_LOCK_MEM>, - TB, LOCK; +multiclass ATOMIC_LOAD_BINOP opc8, bits<8> opc, string mnemonic, + string frag, + InstrItinClass itin8, InstrItinClass itin> { + let Constraints = "$val = $dst", Defs = [EFLAGS], isCodeGenOnly = 1 in { + def #NAME#8 : I(frag # "_8") addr:$ptr, GR8:$val))], + itin8>; + def #NAME#16 : I(frag # "_16") addr:$ptr, GR16:$val))], + itin>, OpSize; + def #NAME#32 : I(frag # "_32") addr:$ptr, GR32:$val))], + itin>; + def #NAME#64 : RI(frag # "_64") addr:$ptr, GR64:$val))], + itin>; + } } +defm LXADD : ATOMIC_LOAD_BINOP<0xc0, 0xc1, "xadd", "atomic_load_add", + IIC_XADD_LOCK_MEM8, IIC_XADD_LOCK_MEM>, + TB, LOCK; + def ACQUIRE_MOV8rm : I<0, Pseudo, (outs GR8 :$dst), (ins i8mem :$src), "#ACQUIRE_MOV PSEUDO!", [(set GR8:$dst, (atomic_load_8 addr:$src))]>; @@ -1017,7 +997,24 @@ def : Pat<(X86call (i64 tglobaladdr:$dst)), def : Pat<(X86call (i64 texternalsym:$dst)), (CALL64pcrel32 texternalsym:$dst)>; -// tailcall stuff +// Tailcall stuff. The TCRETURN instructions execute after the epilog, so they +// can never use callee-saved registers. That is the purpose of the GR64_TC +// register classes. +// +// The only volatile register that is never used by the calling convention is +// %r11. This happens when calling a vararg function with 6 arguments. +// +// Match an X86tcret that uses less than 7 volatile registers. +def X86tcret_6regs : PatFrag<(ops node:$ptr, node:$off), + (X86tcret node:$ptr, node:$off), [{ + // X86tcret args: (*chain, ptr, imm, regs..., glue) + unsigned NumRegs = 0; + for (unsigned i = 3, e = N->getNumOperands(); i != e; ++i) + if (isa(N->getOperand(i)) && ++NumRegs > 6) + return false; + return true; +}]>; + def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), (TCRETURNri ptr_rc_tailcall:$dst, imm:$off)>, Requires<[In32BitMode]>; @@ -1041,7 +1038,9 @@ def : Pat<(X86tcret ptr_rc_tailcall:$dst, imm:$off), (TCRETURNri64 ptr_rc_tailcall:$dst, imm:$off)>, Requires<[In64BitMode]>; -def : Pat<(X86tcret (load addr:$dst), imm:$off), +// Don't fold loads into X86tcret requiring more than 6 regs. +// There wouldn't be enough scratch registers for base+index. +def : Pat<(X86tcret_6regs (load addr:$dst), imm:$off), (TCRETURNmi64 addr:$dst, imm:$off)>, Requires<[In64BitMode]>; diff --git a/lib/Target/X86/X86InstrControl.td b/lib/Target/X86/X86InstrControl.td index b0c27c8..bfe9541 100644 --- a/lib/Target/X86/X86InstrControl.td +++ b/lib/Target/X86/X86InstrControl.td @@ -16,15 +16,18 @@ // // Return instructions. +// +// The X86retflag return instructions are variadic because we may add ST0 and +// ST1 arguments when returning values on the x87 stack. let isTerminator = 1, isReturn = 1, isBarrier = 1, hasCtrlDep = 1, FPForm = SpecialFP in { - def RET : I <0xC3, RawFrm, (outs), (ins), + def RET : I <0xC3, RawFrm, (outs), (ins variable_ops), "ret", [(X86retflag 0)], IIC_RET>; def RETW : I <0xC3, RawFrm, (outs), (ins), "ret{w}", [], IIC_RET>, OpSize; - def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt), + def RETI : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt, variable_ops), "ret\t$amt", [(X86retflag timm:$amt)], IIC_RET_IMM>; def RETIW : Ii16<0xC2, RawFrm, (outs), (ins i16imm:$amt), diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td index 265b4bb..959d91a 100644 --- a/lib/Target/X86/X86InstrFMA.td +++ b/lib/Target/X86/X86InstrFMA.td @@ -16,243 +16,180 @@ //===----------------------------------------------------------------------===// let Constraints = "$src1 = $dst" in { -multiclass fma3p_rm opc, string OpcodeStr> { -let neverHasSideEffects = 1 in { - def r : FMA3; - let mayLoad = 1 in - def m : FMA3; - def rY : FMA3; - let mayLoad = 1 in - def mY : FMA3; -} // neverHasSideEffects = 1 -} - -// Intrinsic for 213 pattern -multiclass fma3p_rm_int opc, string OpcodeStr, - PatFrag MemFrag128, PatFrag MemFrag256, - Intrinsic Int128, Intrinsic Int256, SDNode Op213, - ValueType OpVT128, ValueType OpVT256> { - def r_Int : FMA3; - +multiclass fma3p_rm opc, string OpcodeStr, + PatFrag MemFrag128, PatFrag MemFrag256, + ValueType OpVT128, ValueType OpVT256, + SDPatternOperator Op = null_frag> { + let isCommutable = 1 in def r : FMA3; - def m_Int : FMA3; - + let mayLoad = 1 in def m : FMA3; - - def rY_Int : FMA3; - + let isCommutable = 1 in def rY : FMA3; - - def mY_Int : FMA3; + [(set VR256:$dst, (OpVT256 (Op VR256:$src2, VR256:$src1, + VR256:$src3)))]>, VEX_L; + let mayLoad = 1 in def mY : FMA3; + (OpVT256 (Op VR256:$src2, VR256:$src1, + (MemFrag256 addr:$src3))))]>, VEX_L; } } // Constraints = "$src1 = $dst" multiclass fma3p_forms opc132, bits<8> opc213, bits<8> opc231, string OpcodeStr, string PackTy, PatFrag MemFrag128, PatFrag MemFrag256, - Intrinsic Int128, Intrinsic Int256, SDNode Op, - ValueType OpTy128, ValueType OpTy256> { - defm r213 : fma3p_rm_int ; - defm r132 : fma3p_rm ; - defm r231 : fma3p_rm ; + SDNode Op, ValueType OpTy128, ValueType OpTy256> { + defm r213 : fma3p_rm; +let neverHasSideEffects = 1 in { + defm r132 : fma3p_rm; + defm r231 : fma3p_rm; +} // neverHasSideEffects = 1 } // Fused Multiply-Add let ExeDomain = SSEPackedSingle in { defm VFMADDPS : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", memopv4f32, - memopv8f32, int_x86_fma_vfmadd_ps, - int_x86_fma_vfmadd_ps_256, X86Fmadd, - v4f32, v8f32>; + memopv8f32, X86Fmadd, v4f32, v8f32>; defm VFMSUBPS : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", memopv4f32, - memopv8f32, int_x86_fma_vfmsub_ps, - int_x86_fma_vfmsub_ps_256, X86Fmsub, - v4f32, v8f32>; + memopv8f32, X86Fmsub, v4f32, v8f32>; defm VFMADDSUBPS : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", - memopv4f32, memopv8f32, - int_x86_fma_vfmaddsub_ps, - int_x86_fma_vfmaddsub_ps_256, X86Fmaddsub, + memopv4f32, memopv8f32, X86Fmaddsub, v4f32, v8f32>; defm VFMSUBADDPS : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", - memopv4f32, memopv8f32, - int_x86_fma_vfmsubadd_ps, - int_x86_fma_vfmaddsub_ps_256, X86Fmsubadd, + memopv4f32, memopv8f32, X86Fmsubadd, v4f32, v8f32>; } let ExeDomain = SSEPackedDouble in { defm VFMADDPD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", memopv2f64, - memopv4f64, int_x86_fma_vfmadd_pd, - int_x86_fma_vfmadd_pd_256, X86Fmadd, v2f64, - v4f64>, VEX_W; + memopv4f64, X86Fmadd, v2f64, v4f64>, VEX_W; defm VFMSUBPD : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", memopv2f64, - memopv4f64, int_x86_fma_vfmsub_pd, - int_x86_fma_vfmsub_pd_256, X86Fmsub, v2f64, - v4f64>, VEX_W; + memopv4f64, X86Fmsub, v2f64, v4f64>, VEX_W; defm VFMADDSUBPD : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", - memopv2f64, memopv4f64, - int_x86_fma_vfmaddsub_pd, - int_x86_fma_vfmaddsub_pd_256, X86Fmaddsub, + memopv2f64, memopv4f64, X86Fmaddsub, v2f64, v4f64>, VEX_W; defm VFMSUBADDPD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", - memopv2f64, memopv4f64, - int_x86_fma_vfmsubadd_pd, - int_x86_fma_vfmsubadd_pd_256, X86Fmsubadd, + memopv2f64, memopv4f64, X86Fmsubadd, v2f64, v4f64>, VEX_W; } // Fused Negative Multiply-Add let ExeDomain = SSEPackedSingle in { defm VFNMADDPS : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", memopv4f32, - memopv8f32, int_x86_fma_vfnmadd_ps, - int_x86_fma_vfnmadd_ps_256, X86Fnmadd, v4f32, - v8f32>; + memopv8f32, X86Fnmadd, v4f32, v8f32>; defm VFNMSUBPS : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", memopv4f32, - memopv8f32, int_x86_fma_vfnmsub_ps, - int_x86_fma_vfnmsub_ps_256, X86Fnmsub, v4f32, - v8f32>; + memopv8f32, X86Fnmsub, v4f32, v8f32>; } let ExeDomain = SSEPackedDouble in { defm VFNMADDPD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", memopv2f64, - memopv4f64, int_x86_fma_vfnmadd_pd, - int_x86_fma_vfnmadd_pd_256, X86Fnmadd, v2f64, - v4f64>, VEX_W; + memopv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W; defm VFNMSUBPD : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", - memopv2f64, - memopv4f64, int_x86_fma_vfnmsub_pd, - int_x86_fma_vfnmsub_pd_256, X86Fnmsub, v2f64, + memopv2f64, memopv4f64, X86Fnmsub, v2f64, v4f64>, VEX_W; } let Constraints = "$src1 = $dst" in { multiclass fma3s_rm opc, string OpcodeStr, X86MemOperand x86memop, - RegisterClass RC> { -let neverHasSideEffects = 1 in { - def r : FMA3; + RegisterClass RC, ValueType OpVT, PatFrag mem_frag, + SDPatternOperator OpNode = null_frag> { + let isCommutable = 1 in + def r : FMA3; let mayLoad = 1 in - def m : FMA3; -} // neverHasSideEffects = 1 + def m : FMA3; } multiclass fma3s_rm_int opc, string OpcodeStr, Operand memop, - ComplexPattern mem_cpat, Intrinsic IntId, - RegisterClass RC, SDNode OpNode, ValueType OpVT> { + ComplexPattern mem_cpat, Intrinsic IntId, + RegisterClass RC> { + let isCommutable = 1 in def r_Int : FMA3; def m_Int : FMA3; - def r : FMA3; - let mayLoad = 1 in - def m : FMA3; } } // Constraints = "$src1 = $dst" multiclass fma3s_forms opc132, bits<8> opc213, bits<8> opc231, - string OpStr, Intrinsic IntF32, Intrinsic IntF64, - SDNode OpNode> { - defm SSr132 : fma3s_rm; - defm SSr231 : fma3s_rm; - defm SDr132 : fma3s_rm, - VEX_W; - defm SDr231 : fma3s_rm, - VEX_W; - defm SSr213 : fma3s_rm_int ; - defm SDr213 : fma3s_rm_int , VEX_W; + string OpStr, string PackTy, Intrinsic Int, + SDNode OpNode, RegisterClass RC, ValueType OpVT, + X86MemOperand x86memop, Operand memop, PatFrag mem_frag, + ComplexPattern mem_cpat> { +let neverHasSideEffects = 1 in { + defm r132 : fma3s_rm; + defm r231 : fma3s_rm; +} + +defm r213 : fma3s_rm, + fma3s_rm_int; +} + +multiclass fma3s opc132, bits<8> opc213, bits<8> opc231, + string OpStr, Intrinsic IntF32, Intrinsic IntF64, + SDNode OpNode> { + defm SS : fma3s_forms; + defm SD : fma3s_forms, VEX_W; } -defm VFMADD : fma3s_forms<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss, - int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG; -defm VFMSUB : fma3s_forms<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss, - int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG; +defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", int_x86_fma_vfmadd_ss, + int_x86_fma_vfmadd_sd, X86Fmadd>, VEX_LIG; +defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", int_x86_fma_vfmsub_ss, + int_x86_fma_vfmsub_sd, X86Fmsub>, VEX_LIG; -defm VFNMADD : fma3s_forms<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss, - int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG; -defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss, - int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG; +defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", int_x86_fma_vfnmadd_ss, + int_x86_fma_vfnmadd_sd, X86Fnmadd>, VEX_LIG; +defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss, + int_x86_fma_vfnmsub_sd, X86Fnmsub>, VEX_LIG; //===----------------------------------------------------------------------===// @@ -260,73 +197,102 @@ defm VFNMSUB : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", int_x86_fma_vfnmsub_ss, //===----------------------------------------------------------------------===// -multiclass fma4s opc, string OpcodeStr, Operand memop, - ComplexPattern mem_cpat, Intrinsic Int> { - def rr : FMA4 opc, string OpcodeStr, RegisterClass RC, + X86MemOperand x86memop, ValueType OpVT, SDNode OpNode, + PatFrag mem_frag> { + let isCommutable = 1 in + def rr : FMA4, VEX_W, MemOp4; - def rm : FMA4, VEX_W, MemOp4; + def rm : FMA4, VEX_W, MemOp4; - def mr : FMA4, VEX_W, MemOp4; + def mr : FMA4; + [(set RC:$dst, + (OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>; // For disassembler let isCodeGenOnly = 1 in - def rr_REV : FMA4; } -multiclass fma4p opc, string OpcodeStr, - Intrinsic Int128, Intrinsic Int256, +multiclass fma4s_int opc, string OpcodeStr, Operand memop, + ComplexPattern mem_cpat, Intrinsic Int> { + let isCommutable = 1 in + def rr_Int : FMA4, VEX_W, MemOp4; + def rm_Int : FMA4, VEX_W, MemOp4; + def mr_Int : FMA4; +} + +multiclass fma4p opc, string OpcodeStr, SDNode OpNode, + ValueType OpVT128, ValueType OpVT256, PatFrag ld_frag128, PatFrag ld_frag256> { + let isCommutable = 1 in def rr : FMA4, VEX_W, MemOp4; + (OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>, + VEX_W, MemOp4; def rm : FMA4, VEX_W, MemOp4; def mr : FMA4; + (OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>; + let isCommutable = 1 in def rrY : FMA4, VEX_W, MemOp4; + (OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>, + VEX_W, MemOp4, VEX_L; def rmY : FMA4, VEX_W, MemOp4; + [(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2, + (ld_frag256 addr:$src3)))]>, VEX_W, MemOp4, VEX_L; def mrY : FMA4; + [(set VR256:$dst, (OpNode VR256:$src1, + (ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L; // For disassembler let isCodeGenOnly = 1 in { def rr_REV : FMA4; + "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>, + VEX_L; } // isCodeGenOnly = 1 } let Predicates = [HasFMA4] in { -defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", ssmem, sse_load_f32, - int_x86_fma_vfmadd_ss>; -defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", sdmem, sse_load_f64, - int_x86_fma_vfmadd_sd>; -defm VFMADDPS4 : fma4p<0x68, "vfmaddps", int_x86_fma_vfmadd_ps, - int_x86_fma_vfmadd_ps_256, memopv4f32, memopv8f32>; -defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", int_x86_fma_vfmadd_pd, - int_x86_fma_vfmadd_pd_256, memopv2f64, memopv4f64>; -defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", ssmem, sse_load_f32, - int_x86_fma_vfmsub_ss>; -defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", sdmem, sse_load_f64, - int_x86_fma_vfmsub_sd>; -defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", int_x86_fma_vfmsub_ps, - int_x86_fma_vfmsub_ps_256, memopv4f32, memopv8f32>; -defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", int_x86_fma_vfmsub_pd, - int_x86_fma_vfmsub_pd_256, memopv2f64, memopv4f64>; -defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", ssmem, sse_load_f32, - int_x86_fma_vfnmadd_ss>; -defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", sdmem, sse_load_f64, - int_x86_fma_vfnmadd_sd>; -defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", int_x86_fma_vfnmadd_ps, - int_x86_fma_vfnmadd_ps_256, memopv4f32, memopv8f32>; -defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", int_x86_fma_vfnmadd_pd, - int_x86_fma_vfnmadd_pd_256, memopv2f64, memopv4f64>; -defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", ssmem, sse_load_f32, - int_x86_fma_vfnmsub_ss>; -defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", sdmem, sse_load_f64, - int_x86_fma_vfnmsub_sd>; -defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", int_x86_fma_vfnmsub_ps, - int_x86_fma_vfnmsub_ps_256, memopv4f32, memopv8f32>; -defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", int_x86_fma_vfnmsub_pd, - int_x86_fma_vfnmsub_pd_256, memopv2f64, memopv4f64>; -defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", int_x86_fma_vfmaddsub_ps, - int_x86_fma_vfmaddsub_ps_256, memopv4f32, memopv8f32>; -defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", int_x86_fma_vfmaddsub_pd, - int_x86_fma_vfmaddsub_pd_256, memopv2f64, memopv4f64>; -defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", int_x86_fma_vfmsubadd_ps, - int_x86_fma_vfmsubadd_ps_256, memopv4f32, memopv8f32>; -defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", int_x86_fma_vfmsubadd_pd, - int_x86_fma_vfmsubadd_pd_256, memopv2f64, memopv4f64>; +defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, X86Fmadd, loadf32>, + fma4s_int<0x6A, "vfmaddss", ssmem, sse_load_f32, + int_x86_fma_vfmadd_ss>; +defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, X86Fmadd, loadf64>, + fma4s_int<0x6B, "vfmaddsd", sdmem, sse_load_f64, + int_x86_fma_vfmadd_sd>; +defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32>, + fma4s_int<0x6E, "vfmsubss", ssmem, sse_load_f32, + int_x86_fma_vfmsub_ss>; +defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64>, + fma4s_int<0x6F, "vfmsubsd", sdmem, sse_load_f64, + int_x86_fma_vfmsub_sd>; +defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32, + X86Fnmadd, loadf32>, + fma4s_int<0x7A, "vfnmaddss", ssmem, sse_load_f32, + int_x86_fma_vfnmadd_ss>; +defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64, + X86Fnmadd, loadf64>, + fma4s_int<0x7B, "vfnmaddsd", sdmem, sse_load_f64, + int_x86_fma_vfnmadd_sd>; +defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32, + X86Fnmsub, loadf32>, + fma4s_int<0x7E, "vfnmsubss", ssmem, sse_load_f32, + int_x86_fma_vfnmsub_ss>; +defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64, + X86Fnmsub, loadf64>, + fma4s_int<0x7F, "vfnmsubsd", sdmem, sse_load_f64, + int_x86_fma_vfnmsub_sd>; + +defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32, + memopv4f32, memopv8f32>; +defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64, + memopv2f64, memopv4f64>; +defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32, + memopv4f32, memopv8f32>; +defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64, + memopv2f64, memopv4f64>; +defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32, + memopv4f32, memopv8f32>; +defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64, + memopv2f64, memopv4f64>; +defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32, + memopv4f32, memopv8f32>; +defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64, + memopv2f64, memopv4f64>; +defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32, + memopv4f32, memopv8f32>; +defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64, + memopv2f64, memopv4f64>; +defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32, + memopv4f32, memopv8f32>; +defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64, + memopv2f64, memopv4f64>; } // HasFMA4 diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index 81b4f81..268e9fc 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -44,14 +44,15 @@ def RawFrmImm16 : Format<44>; def MRM_D0 : Format<45>; def MRM_D1 : Format<46>; def MRM_D4 : Format<47>; -def MRM_D8 : Format<48>; -def MRM_D9 : Format<49>; -def MRM_DA : Format<50>; -def MRM_DB : Format<51>; -def MRM_DC : Format<52>; -def MRM_DD : Format<53>; -def MRM_DE : Format<54>; -def MRM_DF : Format<55>; +def MRM_D5 : Format<48>; +def MRM_D8 : Format<49>; +def MRM_D9 : Format<50>; +def MRM_DA : Format<51>; +def MRM_DB : Format<52>; +def MRM_DC : Format<53>; +def MRM_DD : Format<54>; +def MRM_DE : Format<55>; +def MRM_DF : Format<56>; // ImmType - This specifies the immediate type used by an instruction. This is // part of the ad-hoc solution used to emit machine instruction encodings by our @@ -287,12 +288,14 @@ class Iseg32 o, Format f, dag outs, dag ins, string asm, let CodeSize = 3; } +def __xs : XS; + // SI - SSE 1 & 2 scalar instructions class SI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I { let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX], - !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2])); + !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])); // AVX instructions have a 'v' prefix in the mnemonic let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); @@ -303,7 +306,7 @@ class SIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : Ii8 { let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX], - !if(!eq(Prefix, 12 /* XS */), [HasSSE1], [HasSSE2])); + !if(!eq(Prefix, __xs.Prefix), [UseSSE1], [UseSSE2])); // AVX instructions have a 'v' prefix in the mnemonic let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); @@ -314,18 +317,25 @@ class PI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin, Domain d> : I { let Predicates = !if(hasVEXPrefix /* VEX */, [HasAVX], - !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1])); + !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1])); // AVX instructions have a 'v' prefix in the mnemonic let AsmString = !if(hasVEXPrefix, !strconcat("v", asm), asm); } +// MMXPI - SSE 1 & 2 packed instructions with MMX operands +class MMXPI o, Format F, dag outs, dag ins, string asm, list pattern, + InstrItinClass itin, Domain d> + : I { + let Predicates = !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1]); +} + // PIi8 - SSE 1 & 2 packed instructions with immediate class PIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin, Domain d> : Ii8 { let Predicates = !if(hasVEX_4VPrefix /* VEX */, [HasAVX], - !if(hasOpSizePrefix /* OpSize */, [HasSSE2], [HasSSE1])); + !if(hasOpSizePrefix /* OpSize */, [UseSSE2], [UseSSE1])); // AVX instructions have a 'v' prefix in the mnemonic let AsmString = !if(hasVEX_4VPrefix, !strconcat("v", asm), asm); @@ -341,18 +351,18 @@ class PIi8 o, Format F, dag outs, dag ins, string asm, class SSI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> - : I, XS, Requires<[HasSSE1]>; + : I, XS, Requires<[UseSSE1]>; class SSIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> - : Ii8, XS, Requires<[HasSSE1]>; + : Ii8, XS, Requires<[UseSSE1]>; class PSI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, TB, - Requires<[HasSSE1]>; + Requires<[UseSSE1]>; class PSIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : Ii8, TB, - Requires<[HasSSE1]>; + Requires<[UseSSE1]>; class VSSI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, XS, @@ -372,27 +382,31 @@ class VPSI o, Format F, dag outs, dag ins, string asm, // PDIi8 - SSE2 instructions with ImmT == Imm8 and TB and OpSize prefixes. // VSDI - SSE2 instructions with XD prefix in AVX form. // VPDI - SSE2 instructions with TB and OpSize prefixes in AVX form. +// MMXSDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix as well as +// MMX operands. +// MMXSSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix as well as +// MMX operands. class SDI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> - : I, XD, Requires<[HasSSE2]>; + : I, XD, Requires<[UseSSE2]>; class SDIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> - : Ii8, XD, Requires<[HasSSE2]>; + : Ii8, XD, Requires<[UseSSE2]>; class S2SI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> - : I, XS, Requires<[HasSSE2]>; + : I, XS, Requires<[UseSSE2]>; class S2SIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> - : Ii8, XS, Requires<[HasSSE2]>; + : Ii8, XS, Requires<[UseSSE2]>; class PDI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, TB, OpSize, - Requires<[HasSSE2]>; + Requires<[UseSSE2]>; class PDIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : Ii8, TB, OpSize, - Requires<[HasSSE2]>; + Requires<[UseSSE2]>; class VSDI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, XD, @@ -405,6 +419,12 @@ class VPDI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, TB, OpSize, Requires<[HasAVX]>; +class MMXSDIi8 o, Format F, dag outs, dag ins, string asm, + list pattern, InstrItinClass itin = IIC_DEFAULT> + : Ii8, XD, Requires<[HasSSE2]>; +class MMXS2SIi8 o, Format F, dag outs, dag ins, string asm, + list pattern, InstrItinClass itin = IIC_DEFAULT> + : Ii8, XS, Requires<[HasSSE2]>; // SSE3 Instruction Templates: // @@ -415,21 +435,23 @@ class VPDI o, Format F, dag outs, dag ins, string asm, class S3SI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, XS, - Requires<[HasSSE3]>; + Requires<[UseSSE3]>; class S3DI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, XD, - Requires<[HasSSE3]>; + Requires<[UseSSE3]>; class S3I o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, TB, OpSize, - Requires<[HasSSE3]>; + Requires<[UseSSE3]>; // SSSE3 Instruction Templates: // // SS38I - SSSE3 instructions with T8 prefix. // SS3AI - SSSE3 instructions with TA prefix. +// MMXSS38I - SSSE3 instructions with T8 prefix and MMX operands. +// MMXSS3AI - SSSE3 instructions with TA prefix and MMX operands. // // Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version // uses the MMX registers. The 64-bit versions are grouped with the MMX @@ -438,10 +460,18 @@ class S3I o, Format F, dag outs, dag ins, string asm, class SS38I o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, T8, - Requires<[HasSSSE3]>; + Requires<[UseSSSE3]>; class SS3AI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : Ii8, TA, + Requires<[UseSSSE3]>; +class MMXSS38I o, Format F, dag outs, dag ins, string asm, + list pattern, InstrItinClass itin = IIC_DEFAULT> + : I, T8, + Requires<[HasSSSE3]>; +class MMXSS3AI o, Format F, dag outs, dag ins, string asm, + list pattern, InstrItinClass itin = IIC_DEFAULT> + : Ii8, TA, Requires<[HasSSSE3]>; // SSE4.1 Instruction Templates: @@ -452,11 +482,11 @@ class SS3AI o, Format F, dag outs, dag ins, string asm, class SS48I o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, T8, - Requires<[HasSSE41]>; + Requires<[UseSSE41]>; class SS4AIi8 o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : Ii8, TA, - Requires<[HasSSE41]>; + Requires<[UseSSE41]>; // SSE4.2 Instruction Templates: // @@ -464,9 +494,10 @@ class SS4AIi8 o, Format F, dag outs, dag ins, string asm, class SS428I o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, T8, - Requires<[HasSSE42]>; + Requires<[UseSSE42]>; // SS42FI - SSE 4.2 instructions with T8XD prefix. +// NOTE: 'HasSSE42' is used as SS42FI is only used for CRC32 insns. class SS42FI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : I, T8XD, Requires<[HasSSE42]>; @@ -475,7 +506,7 @@ class SS42FI o, Format F, dag outs, dag ins, string asm, class SS42AI o, Format F, dag outs, dag ins, string asm, list pattern, InstrItinClass itin = IIC_DEFAULT> : Ii8, TA, - Requires<[HasSSE42]>; + Requires<[UseSSE42]>; // AVX Instruction Templates: // Instructions introduced in AVX (no SSE equivalent forms) diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 1db68c8..73ba001 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -29,6 +29,13 @@ def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>; def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>; + +// Commutative and Associative FMIN and FMAX. +def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp, + [SDNPCommutative, SDNPAssociative]>; +def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp, + [SDNPCommutative, SDNPAssociative]>; + def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp, [SDNPCommutative, SDNPAssociative]>; def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp, @@ -73,18 +80,30 @@ def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL", SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>; def X86vzmovly : SDNode<"X86ISD::VZEXT_MOVL", - SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, + SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, SDTCisOpSmallerThanOp<1, 0> ]>>; def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL", - SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>; + SDTypeProfile<1, 1, + [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>; def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>; +def X86vzext : SDNode<"X86ISD::VZEXT", + SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, + SDTCisInt<0>, SDTCisInt<1>]>>; + +def X86vsext : SDNode<"X86ISD::VSEXT", + SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, + SDTCisInt<0>, SDTCisInt<1>]>>; + def X86vfpext : SDNode<"X86ISD::VFPEXT", SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, SDTCisFP<0>, SDTCisFP<1>]>>; +def X86vfpround: SDNode<"X86ISD::VFPROUND", + SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>, + SDTCisFP<0>, SDTCisFP<1>]>>; def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>; def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>; @@ -175,8 +194,8 @@ def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>; def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>; def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>; def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>; -def X86Fmaddsub : SDNode<"X86ISD::FMSUBADD", SDTFma>; -def X86Fmsubadd : SDNode<"X86ISD::FMADDSUB", SDTFma>; +def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>; +def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>; def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>, @@ -232,6 +251,10 @@ def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>; def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>; def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>; +// 128-/256-bit extload pattern fragments +def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>; +def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>; + // Like 'store', but always requires 128-bit vector alignment. def alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index cca04e5..5a99ff0 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -561,6 +561,16 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VSQRTPSYr_Int, X86::VSQRTPSYm_Int, TB_ALIGN_32 }, { X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrm, TB_NO_REVERSE }, { X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrm, TB_NO_REVERSE }, + + // BMI/BMI2 foldable instructions + { X86::RORX32ri, X86::RORX32mi, 0 }, + { X86::RORX64ri, X86::RORX64mi, 0 }, + { X86::SARX32rr, X86::SARX32rm, 0 }, + { X86::SARX64rr, X86::SARX64rm, 0 }, + { X86::SHRX32rr, X86::SHRX32rm, 0 }, + { X86::SHRX64rr, X86::SHRX64rm, 0 }, + { X86::SHLX32rr, X86::SHLX32rm, 0 }, + { X86::SHLX64rr, X86::SHLX64rm, 0 }, }; for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) { @@ -1110,6 +1120,44 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VPUNPCKLWDYrr, X86::VPUNPCKLWDYrm, TB_ALIGN_32 }, { X86::VPXORYrr, X86::VPXORYrm, TB_ALIGN_32 }, // FIXME: add AVX 256-bit foldable instructions + + // FMA4 foldable patterns + { X86::VFMADDSS4rr, X86::VFMADDSS4mr, 0 }, + { X86::VFMADDSD4rr, X86::VFMADDSD4mr, 0 }, + { X86::VFMADDPS4rr, X86::VFMADDPS4mr, TB_ALIGN_16 }, + { X86::VFMADDPD4rr, X86::VFMADDPD4mr, TB_ALIGN_16 }, + { X86::VFMADDPS4rrY, X86::VFMADDPS4mrY, TB_ALIGN_32 }, + { X86::VFMADDPD4rrY, X86::VFMADDPD4mrY, TB_ALIGN_32 }, + { X86::VFNMADDSS4rr, X86::VFNMADDSS4mr, 0 }, + { X86::VFNMADDSD4rr, X86::VFNMADDSD4mr, 0 }, + { X86::VFNMADDPS4rr, X86::VFNMADDPS4mr, TB_ALIGN_16 }, + { X86::VFNMADDPD4rr, X86::VFNMADDPD4mr, TB_ALIGN_16 }, + { X86::VFNMADDPS4rrY, X86::VFNMADDPS4mrY, TB_ALIGN_32 }, + { X86::VFNMADDPD4rrY, X86::VFNMADDPD4mrY, TB_ALIGN_32 }, + { X86::VFMSUBSS4rr, X86::VFMSUBSS4mr, 0 }, + { X86::VFMSUBSD4rr, X86::VFMSUBSD4mr, 0 }, + { X86::VFMSUBPS4rr, X86::VFMSUBPS4mr, TB_ALIGN_16 }, + { X86::VFMSUBPD4rr, X86::VFMSUBPD4mr, TB_ALIGN_16 }, + { X86::VFMSUBPS4rrY, X86::VFMSUBPS4mrY, TB_ALIGN_32 }, + { X86::VFMSUBPD4rrY, X86::VFMSUBPD4mrY, TB_ALIGN_32 }, + { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4mr, 0 }, + { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4mr, 0 }, + { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4mr, TB_ALIGN_16 }, + { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4mr, TB_ALIGN_16 }, + { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4mrY, TB_ALIGN_32 }, + { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4mrY, TB_ALIGN_32 }, + { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4mr, TB_ALIGN_16 }, + { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4mr, TB_ALIGN_16 }, + { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4mrY, TB_ALIGN_32 }, + { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4mrY, TB_ALIGN_32 }, + { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4mr, TB_ALIGN_16 }, + { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4mr, TB_ALIGN_16 }, + { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4mrY, TB_ALIGN_32 }, + { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4mrY, TB_ALIGN_32 }, + + // BMI/BMI2 foldable instructions + { X86::MULX32rr, X86::MULX32rm, 0 }, + { X86::MULX64rr, X86::MULX64rm, 0 }, }; for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) { @@ -1145,10 +1193,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VFMADDPDr132rY, X86::VFMADDPDr132mY, TB_ALIGN_32 }, { X86::VFMADDPSr213rY, X86::VFMADDPSr213mY, TB_ALIGN_32 }, { X86::VFMADDPDr213rY, X86::VFMADDPDr213mY, TB_ALIGN_32 }, - { X86::VFMADDPSr213r_Int, X86::VFMADDPSr213m_Int, TB_ALIGN_16 }, - { X86::VFMADDPDr213r_Int, X86::VFMADDPDr213m_Int, TB_ALIGN_16 }, - { X86::VFMADDPSr213rY_Int, X86::VFMADDPSr213mY_Int, TB_ALIGN_32 }, - { X86::VFMADDPDr213rY_Int, X86::VFMADDPDr213mY_Int, TB_ALIGN_32 }, { X86::VFNMADDSSr231r, X86::VFNMADDSSr231m, 0 }, { X86::VFNMADDSDr231r, X86::VFNMADDSDr231m, 0 }, @@ -1171,10 +1215,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VFNMADDPDr132rY, X86::VFNMADDPDr132mY, TB_ALIGN_32 }, { X86::VFNMADDPSr213rY, X86::VFNMADDPSr213mY, TB_ALIGN_32 }, { X86::VFNMADDPDr213rY, X86::VFNMADDPDr213mY, TB_ALIGN_32 }, - { X86::VFNMADDPSr213r_Int, X86::VFNMADDPSr213m_Int, TB_ALIGN_16 }, - { X86::VFNMADDPDr213r_Int, X86::VFNMADDPDr213m_Int, TB_ALIGN_16 }, - { X86::VFNMADDPSr213rY_Int, X86::VFNMADDPSr213mY_Int, TB_ALIGN_32 }, - { X86::VFNMADDPDr213rY_Int, X86::VFNMADDPDr213mY_Int, TB_ALIGN_32 }, { X86::VFMSUBSSr231r, X86::VFMSUBSSr231m, 0 }, { X86::VFMSUBSDr231r, X86::VFMSUBSDr231m, 0 }, @@ -1197,10 +1237,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VFMSUBPDr132rY, X86::VFMSUBPDr132mY, TB_ALIGN_32 }, { X86::VFMSUBPSr213rY, X86::VFMSUBPSr213mY, TB_ALIGN_32 }, { X86::VFMSUBPDr213rY, X86::VFMSUBPDr213mY, TB_ALIGN_32 }, - { X86::VFMSUBPSr213r_Int, X86::VFMSUBPSr213m_Int, TB_ALIGN_16 }, - { X86::VFMSUBPDr213r_Int, X86::VFMSUBPDr213m_Int, TB_ALIGN_16 }, - { X86::VFMSUBPSr213rY_Int, X86::VFMSUBPSr213mY_Int, TB_ALIGN_32 }, - { X86::VFMSUBPDr213rY_Int, X86::VFMSUBPDr213mY_Int, TB_ALIGN_32 }, { X86::VFNMSUBSSr231r, X86::VFNMSUBSSr231m, 0 }, { X86::VFNMSUBSDr231r, X86::VFNMSUBSDr231m, 0 }, @@ -1223,10 +1259,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VFNMSUBPDr132rY, X86::VFNMSUBPDr132mY, TB_ALIGN_32 }, { X86::VFNMSUBPSr213rY, X86::VFNMSUBPSr213mY, TB_ALIGN_32 }, { X86::VFNMSUBPDr213rY, X86::VFNMSUBPDr213mY, TB_ALIGN_32 }, - { X86::VFNMSUBPSr213r_Int, X86::VFNMSUBPSr213m_Int, TB_ALIGN_16 }, - { X86::VFNMSUBPDr213r_Int, X86::VFNMSUBPDr213m_Int, TB_ALIGN_16 }, - { X86::VFNMSUBPSr213rY_Int, X86::VFNMSUBPSr213mY_Int, TB_ALIGN_32 }, - { X86::VFNMSUBPDr213rY_Int, X86::VFNMSUBPDr213mY_Int, TB_ALIGN_32 }, { X86::VFMADDSUBPSr231r, X86::VFMADDSUBPSr231m, TB_ALIGN_16 }, { X86::VFMADDSUBPDr231r, X86::VFMADDSUBPDr231m, TB_ALIGN_16 }, @@ -1240,10 +1272,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VFMADDSUBPDr132rY, X86::VFMADDSUBPDr132mY, TB_ALIGN_32 }, { X86::VFMADDSUBPSr213rY, X86::VFMADDSUBPSr213mY, TB_ALIGN_32 }, { X86::VFMADDSUBPDr213rY, X86::VFMADDSUBPDr213mY, TB_ALIGN_32 }, - { X86::VFMADDSUBPSr213r_Int, X86::VFMADDSUBPSr213m_Int, TB_ALIGN_16 }, - { X86::VFMADDSUBPDr213r_Int, X86::VFMADDSUBPDr213m_Int, TB_ALIGN_16 }, - { X86::VFMADDSUBPSr213rY_Int, X86::VFMADDSUBPSr213mY_Int, TB_ALIGN_32 }, - { X86::VFMADDSUBPDr213rY_Int, X86::VFMADDSUBPDr213mY_Int, TB_ALIGN_32 }, { X86::VFMSUBADDPSr231r, X86::VFMSUBADDPSr231m, TB_ALIGN_16 }, { X86::VFMSUBADDPDr231r, X86::VFMSUBADDPDr231m, TB_ALIGN_16 }, @@ -1257,10 +1285,40 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::VFMSUBADDPDr132rY, X86::VFMSUBADDPDr132mY, TB_ALIGN_32 }, { X86::VFMSUBADDPSr213rY, X86::VFMSUBADDPSr213mY, TB_ALIGN_32 }, { X86::VFMSUBADDPDr213rY, X86::VFMSUBADDPDr213mY, TB_ALIGN_32 }, - { X86::VFMSUBADDPSr213r_Int, X86::VFMSUBADDPSr213m_Int, TB_ALIGN_16 }, - { X86::VFMSUBADDPDr213r_Int, X86::VFMSUBADDPDr213m_Int, TB_ALIGN_16 }, - { X86::VFMSUBADDPSr213rY_Int, X86::VFMSUBADDPSr213mY_Int, TB_ALIGN_32 }, - { X86::VFMSUBADDPDr213rY_Int, X86::VFMSUBADDPDr213mY_Int, TB_ALIGN_32 }, + + // FMA4 foldable patterns + { X86::VFMADDSS4rr, X86::VFMADDSS4rm, 0 }, + { X86::VFMADDSD4rr, X86::VFMADDSD4rm, 0 }, + { X86::VFMADDPS4rr, X86::VFMADDPS4rm, TB_ALIGN_16 }, + { X86::VFMADDPD4rr, X86::VFMADDPD4rm, TB_ALIGN_16 }, + { X86::VFMADDPS4rrY, X86::VFMADDPS4rmY, TB_ALIGN_32 }, + { X86::VFMADDPD4rrY, X86::VFMADDPD4rmY, TB_ALIGN_32 }, + { X86::VFNMADDSS4rr, X86::VFNMADDSS4rm, 0 }, + { X86::VFNMADDSD4rr, X86::VFNMADDSD4rm, 0 }, + { X86::VFNMADDPS4rr, X86::VFNMADDPS4rm, TB_ALIGN_16 }, + { X86::VFNMADDPD4rr, X86::VFNMADDPD4rm, TB_ALIGN_16 }, + { X86::VFNMADDPS4rrY, X86::VFNMADDPS4rmY, TB_ALIGN_32 }, + { X86::VFNMADDPD4rrY, X86::VFNMADDPD4rmY, TB_ALIGN_32 }, + { X86::VFMSUBSS4rr, X86::VFMSUBSS4rm, 0 }, + { X86::VFMSUBSD4rr, X86::VFMSUBSD4rm, 0 }, + { X86::VFMSUBPS4rr, X86::VFMSUBPS4rm, TB_ALIGN_16 }, + { X86::VFMSUBPD4rr, X86::VFMSUBPD4rm, TB_ALIGN_16 }, + { X86::VFMSUBPS4rrY, X86::VFMSUBPS4rmY, TB_ALIGN_32 }, + { X86::VFMSUBPD4rrY, X86::VFMSUBPD4rmY, TB_ALIGN_32 }, + { X86::VFNMSUBSS4rr, X86::VFNMSUBSS4rm, 0 }, + { X86::VFNMSUBSD4rr, X86::VFNMSUBSD4rm, 0 }, + { X86::VFNMSUBPS4rr, X86::VFNMSUBPS4rm, TB_ALIGN_16 }, + { X86::VFNMSUBPD4rr, X86::VFNMSUBPD4rm, TB_ALIGN_16 }, + { X86::VFNMSUBPS4rrY, X86::VFNMSUBPS4rmY, TB_ALIGN_32 }, + { X86::VFNMSUBPD4rrY, X86::VFNMSUBPD4rmY, TB_ALIGN_32 }, + { X86::VFMADDSUBPS4rr, X86::VFMADDSUBPS4rm, TB_ALIGN_16 }, + { X86::VFMADDSUBPD4rr, X86::VFMADDSUBPD4rm, TB_ALIGN_16 }, + { X86::VFMADDSUBPS4rrY, X86::VFMADDSUBPS4rmY, TB_ALIGN_32 }, + { X86::VFMADDSUBPD4rrY, X86::VFMADDSUBPD4rmY, TB_ALIGN_32 }, + { X86::VFMSUBADDPS4rr, X86::VFMSUBADDPS4rm, TB_ALIGN_16 }, + { X86::VFMSUBADDPD4rr, X86::VFMSUBADDPD4rm, TB_ALIGN_16 }, + { X86::VFMSUBADDPS4rrY, X86::VFMSUBADDPS4rmY, TB_ALIGN_32 }, + { X86::VFMSUBADDPD4rrY, X86::VFMSUBADDPD4rmY, TB_ALIGN_32 }, }; for (unsigned i = 0, e = array_lengthof(OpTbl3); i != e; ++i) { @@ -1318,8 +1376,7 @@ X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, SrcReg = MI.getOperand(1).getReg(); DstReg = MI.getOperand(0).getReg(); switch (MI.getOpcode()) { - default: - llvm_unreachable(0); + default: llvm_unreachable("Unreachable!"); case X86::MOVSX16rr8: case X86::MOVZX16rr8: case X86::MOVSX32rr8: @@ -1483,69 +1540,69 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, AliasAnalysis *AA) const { switch (MI->getOpcode()) { default: break; - case X86::MOV8rm: - case X86::MOV16rm: - case X86::MOV32rm: - case X86::MOV64rm: - case X86::LD_Fp64m: - case X86::MOVSSrm: - case X86::MOVSDrm: - case X86::MOVAPSrm: - case X86::MOVUPSrm: - case X86::MOVAPDrm: - case X86::MOVDQArm: - case X86::VMOVSSrm: - case X86::VMOVSDrm: - case X86::VMOVAPSrm: - case X86::VMOVUPSrm: - case X86::VMOVAPDrm: - case X86::VMOVDQArm: - case X86::VMOVAPSYrm: - case X86::VMOVUPSYrm: - case X86::VMOVAPDYrm: - case X86::VMOVDQAYrm: - case X86::MMX_MOVD64rm: - case X86::MMX_MOVQ64rm: - case X86::FsVMOVAPSrm: - case X86::FsVMOVAPDrm: - case X86::FsMOVAPSrm: - case X86::FsMOVAPDrm: { - // Loads from constant pools are trivially rematerializable. - if (MI->getOperand(1).isReg() && - MI->getOperand(2).isImm() && - MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && - MI->isInvariantLoad(AA)) { - unsigned BaseReg = MI->getOperand(1).getReg(); - if (BaseReg == 0 || BaseReg == X86::RIP) - return true; - // Allow re-materialization of PIC load. - if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal()) - return false; - const MachineFunction &MF = *MI->getParent()->getParent(); - const MachineRegisterInfo &MRI = MF.getRegInfo(); - return regIsPICBase(BaseReg, MRI); - } - return false; + case X86::MOV8rm: + case X86::MOV16rm: + case X86::MOV32rm: + case X86::MOV64rm: + case X86::LD_Fp64m: + case X86::MOVSSrm: + case X86::MOVSDrm: + case X86::MOVAPSrm: + case X86::MOVUPSrm: + case X86::MOVAPDrm: + case X86::MOVDQArm: + case X86::VMOVSSrm: + case X86::VMOVSDrm: + case X86::VMOVAPSrm: + case X86::VMOVUPSrm: + case X86::VMOVAPDrm: + case X86::VMOVDQArm: + case X86::VMOVAPSYrm: + case X86::VMOVUPSYrm: + case X86::VMOVAPDYrm: + case X86::VMOVDQAYrm: + case X86::MMX_MOVD64rm: + case X86::MMX_MOVQ64rm: + case X86::FsVMOVAPSrm: + case X86::FsVMOVAPDrm: + case X86::FsMOVAPSrm: + case X86::FsMOVAPDrm: { + // Loads from constant pools are trivially rematerializable. + if (MI->getOperand(1).isReg() && + MI->getOperand(2).isImm() && + MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && + MI->isInvariantLoad(AA)) { + unsigned BaseReg = MI->getOperand(1).getReg(); + if (BaseReg == 0 || BaseReg == X86::RIP) + return true; + // Allow re-materialization of PIC load. + if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal()) + return false; + const MachineFunction &MF = *MI->getParent()->getParent(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + return regIsPICBase(BaseReg, MRI); } + return false; + } - case X86::LEA32r: - case X86::LEA64r: { - if (MI->getOperand(2).isImm() && - MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && - !MI->getOperand(4).isReg()) { - // lea fi#, lea GV, etc. are all rematerializable. - if (!MI->getOperand(1).isReg()) - return true; - unsigned BaseReg = MI->getOperand(1).getReg(); - if (BaseReg == 0) - return true; - // Allow re-materialization of lea PICBase + x. - const MachineFunction &MF = *MI->getParent()->getParent(); - const MachineRegisterInfo &MRI = MF.getRegInfo(); - return regIsPICBase(BaseReg, MRI); - } - return false; - } + case X86::LEA32r: + case X86::LEA64r: { + if (MI->getOperand(2).isImm() && + MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 && + !MI->getOperand(4).isReg()) { + // lea fi#, lea GV, etc. are all rematerializable. + if (!MI->getOperand(1).isReg()) + return true; + unsigned BaseReg = MI->getOperand(1).getReg(); + if (BaseReg == 0) + return true; + // Allow re-materialization of lea PICBase + x. + const MachineFunction &MF = *MI->getParent()->getParent(); + const MachineRegisterInfo &MRI = MF.getRegInfo(); + return regIsPICBase(BaseReg, MRI); + } + return false; + } } // All other instructions marked M_REMATERIALIZABLE are always trivially @@ -1654,7 +1711,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, case X86::MOV64r0: { if (!isSafeToClobberEFLAGS(MBB, I)) { switch (Opc) { - default: break; + default: llvm_unreachable("Unreachable!"); case X86::MOV8r0: Opc = X86::MOV8ri; break; case X86::MOV16r0: Opc = X86::MOV16ri; break; case X86::MOV32r0: Opc = X86::MOV32ri; break; @@ -1727,8 +1784,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc, MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(Opc), leaOutReg); switch (MIOpc) { - default: - llvm_unreachable(0); + default: llvm_unreachable("Unreachable!"); case X86::SHL16ri: { unsigned ShAmt = MI->getOperand(2).getImm(); MIB.addReg(0).addImm(1 << ShAmt) @@ -1812,10 +1868,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr *MI = MBBI; MachineFunction &MF = *MI->getParent()->getParent(); // All instructions input are two-addr instructions. Get the known operands. - unsigned Dest = MI->getOperand(0).getReg(); - unsigned Src = MI->getOperand(1).getReg(); - bool isDead = MI->getOperand(0).isDead(); - bool isKill = MI->getOperand(1).isKill(); + const MachineOperand &Dest = MI->getOperand(0); + const MachineOperand &Src = MI->getOperand(1); MachineInstr *NewMI = NULL; // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When @@ -1833,11 +1887,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, unsigned B = MI->getOperand(1).getReg(); unsigned C = MI->getOperand(2).getReg(); if (B != C) return 0; - unsigned A = MI->getOperand(0).getReg(); unsigned M = MI->getOperand(3).getImm(); NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri)) - .addReg(A, RegState::Define | getDeadRegState(isDead)) - .addReg(B, getKillRegState(isKill)).addImm(M); + .addOperand(Dest).addOperand(Src).addImm(M); break; } case X86::SHUFPDrri: { @@ -1847,15 +1899,13 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, unsigned B = MI->getOperand(1).getReg(); unsigned C = MI->getOperand(2).getReg(); if (B != C) return 0; - unsigned A = MI->getOperand(0).getReg(); unsigned M = MI->getOperand(3).getImm(); // Convert to PSHUFD mask. M = ((M & 1) << 1) | ((M & 1) << 3) | ((M & 2) << 4) | ((M & 2) << 6)| 0x44; NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri)) - .addReg(A, RegState::Define | getDeadRegState(isDead)) - .addReg(B, getKillRegState(isKill)).addImm(M); + .addOperand(Dest).addOperand(Src).addImm(M); break; } case X86::SHL64ri: { @@ -1866,15 +1916,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (ShAmt == 0 || ShAmt >= 4) return 0; // LEA can't handle RSP. - if (TargetRegisterInfo::isVirtualRegister(Src) && - !MF.getRegInfo().constrainRegClass(Src, &X86::GR64_NOSPRegClass)) + if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && + !MF.getRegInfo().constrainRegClass(Src.getReg(), + &X86::GR64_NOSPRegClass)) return 0; NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) - .addReg(Dest, RegState::Define | getDeadRegState(isDead)) - .addReg(0).addImm(1 << ShAmt) - .addReg(Src, getKillRegState(isKill)) - .addImm(0).addReg(0); + .addOperand(Dest) + .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); break; } case X86::SHL32ri: { @@ -1885,15 +1934,15 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (ShAmt == 0 || ShAmt >= 4) return 0; // LEA can't handle ESP. - if (TargetRegisterInfo::isVirtualRegister(Src) && - !MF.getRegInfo().constrainRegClass(Src, &X86::GR32_NOSPRegClass)) + if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && + !MF.getRegInfo().constrainRegClass(Src.getReg(), + &X86::GR32_NOSPRegClass)) return 0; unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addReg(Dest, RegState::Define | getDeadRegState(isDead)) - .addReg(0).addImm(1 << ShAmt) - .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0); + .addOperand(Dest) + .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); break; } case X86::SHL16ri: { @@ -1906,10 +1955,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (DisableLEA16) return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) - .addReg(Dest, RegState::Define | getDeadRegState(isDead)) - .addReg(0).addImm(1 << ShAmt) - .addReg(Src, getKillRegState(isKill)) - .addImm(0).addReg(0); + .addOperand(Dest) + .addReg(0).addImm(1 << ShAmt).addOperand(Src).addImm(0).addReg(0); break; } default: { @@ -1932,14 +1979,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, (const TargetRegisterClass*)&X86::GR32_NOSPRegClass; // LEA can't handle RSP. - if (TargetRegisterInfo::isVirtualRegister(Src) && - !MF.getRegInfo().constrainRegClass(Src, RC)) + if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && + !MF.getRegInfo().constrainRegClass(Src.getReg(), RC)) return 0; - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, 1); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest).addOperand(Src), 1); break; } case X86::INC16r: @@ -1947,10 +1992,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (DisableLEA16) return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!"); - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, 1); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) + .addOperand(Dest).addOperand(Src), 1); break; case X86::DEC64r: case X86::DEC32r: @@ -1962,14 +2005,12 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, (const TargetRegisterClass*)&X86::GR64_NOSPRegClass : (const TargetRegisterClass*)&X86::GR32_NOSPRegClass; // LEA can't handle RSP. - if (TargetRegisterInfo::isVirtualRegister(Src) && - !MF.getRegInfo().constrainRegClass(Src, RC)) + if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) && + !MF.getRegInfo().constrainRegClass(Src.getReg(), RC)) return 0; - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, -1); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest).addOperand(Src), -1); break; } case X86::DEC16r: @@ -1977,10 +2018,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (DisableLEA16) return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!"); - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, -1); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) + .addOperand(Dest).addOperand(Src), -1); break; case X86::ADD64rr: case X86::ADD64rr_DB: @@ -2007,9 +2046,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return 0; NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, Src2, isKill2); + .addOperand(Dest), + Src.getReg(), Src.isKill(), Src2, isKill2); // Preserve undefness of the operands. bool isUndef = MI->getOperand(1).isUndef(); @@ -2029,9 +2067,15 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, unsigned Src2 = MI->getOperand(2).getReg(); bool isKill2 = MI->getOperand(2).isKill(); NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, Src2, isKill2); + .addOperand(Dest), + Src.getReg(), Src.isKill(), Src2, isKill2); + + // Preserve undefness of the operands. + bool isUndef = MI->getOperand(1).isUndef(); + bool isUndef2 = MI->getOperand(2).isUndef(); + NewMI->getOperand(1).setIsUndef(isUndef); + NewMI->getOperand(3).setIsUndef(isUndef2); + if (LV && isKill2) LV->replaceKillInstruction(Src2, MI, NewMI); break; @@ -2041,10 +2085,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD64ri32_DB: case X86::ADD64ri8_DB: assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, MI->getOperand(2).getImm()); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r)) + .addOperand(Dest).addOperand(Src), + MI->getOperand(2).getImm()); break; case X86::ADD32ri: case X86::ADD32ri8: @@ -2052,10 +2095,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD32ri8_DB: { assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r; - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, MI->getOperand(2).getImm()); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc)) + .addOperand(Dest).addOperand(Src), + MI->getOperand(2).getImm()); break; } case X86::ADD16ri: @@ -2065,10 +2107,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (DisableLEA16) return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0; assert(MI->getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) - .addReg(Dest, RegState::Define | - getDeadRegState(isDead)), - Src, isKill, MI->getOperand(2).getImm()); + NewMI = addOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r)) + .addOperand(Dest).addOperand(Src), + MI->getOperand(2).getImm()); break; } } @@ -2077,10 +2118,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, if (!NewMI) return 0; if (LV) { // Update live variables - if (isKill) - LV->replaceKillInstruction(Src, MI, NewMI); - if (isDead) - LV->replaceKillInstruction(Dest, MI, NewMI); + if (Src.isKill()) + LV->replaceKillInstruction(Src.getReg(), MI, NewMI); + if (Dest.isDead()) + LV->replaceKillInstruction(Dest.getReg(), MI, NewMI); } MFI->insert(MBBI, NewMI); // Insert the new inst @@ -2120,57 +2161,25 @@ X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { MI->getOperand(3).setImm(Size-Amt); return TargetInstrInfoImpl::commuteInstruction(MI, NewMI); } - case X86::CMOVB16rr: - case X86::CMOVB32rr: - case X86::CMOVB64rr: - case X86::CMOVAE16rr: - case X86::CMOVAE32rr: - case X86::CMOVAE64rr: - case X86::CMOVE16rr: - case X86::CMOVE32rr: - case X86::CMOVE64rr: - case X86::CMOVNE16rr: - case X86::CMOVNE32rr: - case X86::CMOVNE64rr: - case X86::CMOVBE16rr: - case X86::CMOVBE32rr: - case X86::CMOVBE64rr: - case X86::CMOVA16rr: - case X86::CMOVA32rr: - case X86::CMOVA64rr: - case X86::CMOVL16rr: - case X86::CMOVL32rr: - case X86::CMOVL64rr: - case X86::CMOVGE16rr: - case X86::CMOVGE32rr: - case X86::CMOVGE64rr: - case X86::CMOVLE16rr: - case X86::CMOVLE32rr: - case X86::CMOVLE64rr: - case X86::CMOVG16rr: - case X86::CMOVG32rr: - case X86::CMOVG64rr: - case X86::CMOVS16rr: - case X86::CMOVS32rr: - case X86::CMOVS64rr: - case X86::CMOVNS16rr: - case X86::CMOVNS32rr: - case X86::CMOVNS64rr: - case X86::CMOVP16rr: - case X86::CMOVP32rr: - case X86::CMOVP64rr: - case X86::CMOVNP16rr: - case X86::CMOVNP32rr: - case X86::CMOVNP64rr: - case X86::CMOVO16rr: - case X86::CMOVO32rr: - case X86::CMOVO64rr: - case X86::CMOVNO16rr: - case X86::CMOVNO32rr: - case X86::CMOVNO64rr: { - unsigned Opc = 0; + case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr: + case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr: + case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: + case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr: + case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr: + case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr: + case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr: + case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr: + case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr: + case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr: + case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr: + case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr: + case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr: + case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr: + case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr: + case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: { + unsigned Opc; switch (MI->getOpcode()) { - default: break; + default: llvm_unreachable("Unreachable!"); case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break; case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break; case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break; @@ -2279,7 +2288,7 @@ static X86::CondCode getCondFromSETOpc(unsigned Opc) { } /// getCondFromCmovOpc - return condition code of a CMov opcode. -static X86::CondCode getCondFromCMovOpc(unsigned Opc) { +X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) { switch (Opc) { default: return X86::COND_INVALID; case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm: @@ -2402,7 +2411,7 @@ static X86::CondCode getSwappedCondition(X86::CondCode CC) { /// whether it has memory operand. static unsigned getSETFromCond(X86::CondCode CC, bool HasMemoryOperand) { - static const unsigned Opc[16][2] = { + static const uint16_t Opc[16][2] = { { X86::SETAr, X86::SETAm }, { X86::SETAEr, X86::SETAEm }, { X86::SETBr, X86::SETBm }, @@ -2429,7 +2438,7 @@ static unsigned getSETFromCond(X86::CondCode CC, /// register size in bytes, and operand type. static unsigned getCMovFromCond(X86::CondCode CC, unsigned RegBytes, bool HasMemoryOperand) { - static const unsigned Opc[32][3] = { + static const uint16_t Opc[32][3] = { { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr }, { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr }, { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr }, @@ -2762,19 +2771,18 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, // SrcReg(GR64) -> DestReg(VR64) if (X86::GR64RegClass.contains(DestReg)) { - if (X86::VR128RegClass.contains(SrcReg)) { + if (X86::VR128RegClass.contains(SrcReg)) // Copy from a VR128 register to a GR64 register. return HasAVX ? X86::VMOVPQIto64rr : X86::MOVPQIto64rr; - } else if (X86::VR64RegClass.contains(SrcReg)) { + if (X86::VR64RegClass.contains(SrcReg)) // Copy from a VR64 register to a GR64 register. return X86::MOVSDto64rr; - } } else if (X86::GR64RegClass.contains(SrcReg)) { // Copy from a GR64 register to a VR128 register. if (X86::VR128RegClass.contains(DestReg)) return HasAVX ? X86::VMOV64toPQIrr : X86::MOV64toPQIrr; // Copy from a GR64 register to a VR64 register. - else if (X86::VR64RegClass.contains(DestReg)) + if (X86::VR64RegClass.contains(DestReg)) return X86::MOV64toSDrr; } @@ -2782,12 +2790,12 @@ static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg, // SrcReg(GR32) -> DestReg(FR32) if (X86::GR32RegClass.contains(DestReg) && X86::FR32RegClass.contains(SrcReg)) - // Copy from a FR32 register to a GR32 register. - return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr; + // Copy from a FR32 register to a GR32 register. + return HasAVX ? X86::VMOVSS2DIrr : X86::MOVSS2DIrr; if (X86::FR32RegClass.contains(DestReg) && X86::GR32RegClass.contains(SrcReg)) - // Copy from a GR32 register to a FR32 register. - return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr; + // Copy from a GR32 register to a FR32 register. + return HasAVX ? X86::VMOVDI2SSrr : X86::MOVDI2SSrr; return 0; } @@ -2798,7 +2806,7 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, bool KillSrc) const { // First deal with the normal symmetric copies. bool HasAVX = TM.getSubtarget().hasAVX(); - unsigned Opc = 0; + unsigned Opc; if (X86::GR64RegClass.contains(DestReg, SrcReg)) Opc = X86::MOV64rr; else if (X86::GR32RegClass.contains(DestReg, SrcReg)) @@ -2837,7 +2845,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, BuildMI(MBB, MI, DL, get(X86::PUSHF64)); BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg); return; - } else if (X86::GR32RegClass.contains(DestReg)) { + } + if (X86::GR32RegClass.contains(DestReg)) { BuildMI(MBB, MI, DL, get(X86::PUSHF32)); BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg); return; @@ -2849,7 +2858,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, .addReg(SrcReg, getKillRegState(KillSrc)); BuildMI(MBB, MI, DL, get(X86::POPF64)); return; - } else if (X86::GR32RegClass.contains(SrcReg)) { + } + if (X86::GR32RegClass.contains(SrcReg)) { BuildMI(MBB, MI, DL, get(X86::PUSH32r)) .addReg(SrcReg, getKillRegState(KillSrc)); BuildMI(MBB, MI, DL, get(X86::POPF32)); @@ -3139,11 +3149,19 @@ inline static bool isDefConvertible(MachineInstr *MI) { case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr: case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm: case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm: + case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r: + case X86::DEC64m: case X86::DEC32m: case X86::DEC16m: case X86::DEC8m: + case X86::DEC64_32r: case X86::DEC64_16r: + case X86::DEC64_32m: case X86::DEC64_16m: case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri: case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8: case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr: case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm: case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm: + case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r: + case X86::INC64m: case X86::INC32m: case X86::INC16m: case X86::INC8m: + case X86::INC64_32r: case X86::INC64_16r: + case X86::INC64_32m: case X86::INC64_16m: case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri: case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8: case X86::AND8ri: case X86::AND64rr: case X86::AND32rr: @@ -3193,7 +3211,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, return false; // There is no use of the destination register, we can replace SUB with CMP. switch (CmpInstr->getOpcode()) { - default: llvm_unreachable(0); + default: llvm_unreachable("Unreachable!"); case X86::SUB64rm: NewOpcode = X86::CMP64rm; break; case X86::SUB32rm: NewOpcode = X86::CMP32rm; break; case X86::SUB16rm: NewOpcode = X86::CMP16rm; break; @@ -3318,7 +3336,7 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, if (OldCC != X86::COND_INVALID) OpcIsSET = true; else - OldCC = getCondFromCMovOpc(Instr.getOpcode()); + OldCC = X86::getCondFromCMovOpc(Instr.getOpcode()); } if (OldCC == X86::COND_INVALID) return false; } @@ -3383,12 +3401,14 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, Sub->getParent()->insert(MachineBasicBlock::iterator(Sub), Movr0Inst); } - // Make sure Sub instruction defines EFLAGS. + // Make sure Sub instruction defines EFLAGS and mark the def live. + unsigned LastOperand = Sub->getNumOperands() - 1; assert(Sub->getNumOperands() >= 2 && - Sub->getOperand(Sub->getNumOperands()-1).isReg() && - Sub->getOperand(Sub->getNumOperands()-1).getReg() == X86::EFLAGS && + Sub->getOperand(LastOperand).isReg() && + Sub->getOperand(LastOperand).getReg() == X86::EFLAGS && "EFLAGS should be the last operand of SUB, ADD, OR, XOR, AND"); - Sub->getOperand(Sub->getNumOperands()-1).setIsDef(true); + Sub->getOperand(LastOperand).setIsDef(true); + Sub->getOperand(LastOperand).setIsDead(false); CmpInstr->eraseFromParent(); // Modify the condition code of instructions in OpsToUpdate. @@ -3497,10 +3517,25 @@ static bool Expand2AddrUndef(MachineInstr *MI, const MCInstrDesc &Desc) { bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { bool HasAVX = TM.getSubtarget().hasAVX(); switch (MI->getOpcode()) { + case X86::SETB_C8r: + return Expand2AddrUndef(MI, get(X86::SBB8rr)); + case X86::SETB_C16r: + return Expand2AddrUndef(MI, get(X86::SBB16rr)); + case X86::SETB_C32r: + return Expand2AddrUndef(MI, get(X86::SBB32rr)); + case X86::SETB_C64r: + return Expand2AddrUndef(MI, get(X86::SBB64rr)); case X86::V_SET0: case X86::FsFLD0SS: case X86::FsFLD0SD: return Expand2AddrUndef(MI, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr)); + case X86::AVX_SET0: + assert(HasAVX && "AVX not supported"); + return Expand2AddrUndef(MI, get(X86::VXORPSYrr)); + case X86::V_SETALLONES: + return Expand2AddrUndef(MI, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr)); + case X86::AVX2_SETALLONES: + return Expand2AddrUndef(MI, get(X86::VPCMPEQDYrr)); case X86::TEST8ri_NOREX: MI->setDesc(get(X86::TEST8ri)); return true; @@ -3614,14 +3649,16 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, OpcodeTablePtr = &RegOp2MemOpTable2Addr; isTwoAddrFold = true; } else if (i == 0) { // If operand 0 - if (MI->getOpcode() == X86::MOV64r0) - NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI); - else if (MI->getOpcode() == X86::MOV32r0) - NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); - else if (MI->getOpcode() == X86::MOV16r0) - NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI); - else if (MI->getOpcode() == X86::MOV8r0) - NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI); + unsigned Opc = 0; + switch (MI->getOpcode()) { + default: break; + case X86::MOV64r0: Opc = X86::MOV64mi32; break; + case X86::MOV32r0: Opc = X86::MOV32mi; break; + case X86::MOV16r0: Opc = X86::MOV16mi; break; + case X86::MOV8r0: Opc = X86::MOV8mi; break; + } + if (Opc) + NewMI = MakeM0Inst(*this, Opc, MOs, MI); if (NewMI) return NewMI; @@ -3799,7 +3836,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Unless optimizing for size, don't fold to avoid partial // register update stalls - if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) && + if (!MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) return 0; @@ -3840,7 +3878,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Unless optimizing for size, don't fold to avoid partial // register update stalls - if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize) && + if (!MF.getFunction()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) return 0; @@ -3850,15 +3889,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, Alignment = (*LoadMI->memoperands_begin())->getAlignment(); else switch (LoadMI->getOpcode()) { - case X86::AVX_SET0PSY: - case X86::AVX_SET0PDY: case X86::AVX2_SETALLONES: - case X86::AVX2_SET0: + case X86::AVX_SET0: Alignment = 32; break; case X86::V_SET0: case X86::V_SETALLONES: - case X86::AVX_SETALLONES: Alignment = 16; break; case X86::FsFLD0SD: @@ -3894,11 +3930,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, switch (LoadMI->getOpcode()) { case X86::V_SET0: case X86::V_SETALLONES: - case X86::AVX_SET0PSY: - case X86::AVX_SET0PDY: - case X86::AVX_SETALLONES: case X86::AVX2_SETALLONES: - case X86::AVX2_SET0: + case X86::AVX_SET0: case X86::FsFLD0SD: case X86::FsFLD0SS: { // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure. @@ -3930,15 +3963,12 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, Ty = Type::getFloatTy(MF.getFunction()->getContext()); else if (Opc == X86::FsFLD0SD) Ty = Type::getDoubleTy(MF.getFunction()->getContext()); - else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY) - Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8); - else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX2_SET0) + else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0) Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 8); else Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); - bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX_SETALLONES || - Opc == X86::AVX2_SETALLONES); + bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES); const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) : Constant::getNullValue(Ty); unsigned CPI = MCP.getConstantPoolIndex(C, Alignment); @@ -4013,6 +4043,8 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI, OpcodeTablePtr = &RegOp2MemOpTable1; } else if (OpNum == 2) { OpcodeTablePtr = &RegOp2MemOpTable2; + } else if (OpNum == 3) { + OpcodeTablePtr = &RegOp2MemOpTable3; } if (OpcodeTablePtr && OpcodeTablePtr->count(Opc)) @@ -4102,7 +4134,6 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, getUndefRegState(MO.isUndef())); } // Change CMP32ri r, 0 back to TEST32rr r, r, etc. - unsigned NewOpc = 0; switch (DataMI->getOpcode()) { default: break; case X86::CMP64ri32: @@ -4115,8 +4146,9 @@ bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, MachineOperand &MO0 = DataMI->getOperand(0); MachineOperand &MO1 = DataMI->getOperand(1); if (MO1.getImm() == 0) { + unsigned NewOpc; switch (DataMI->getOpcode()) { - default: break; + default: llvm_unreachable("Unreachable!"); case X86::CMP64ri8: case X86::CMP64ri32: NewOpc = X86::TEST64rr; break; case X86::CMP32ri8: diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index b6f69af..260f054 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -61,6 +61,9 @@ namespace X86 { // Turn condition code into conditional branch opcode. unsigned GetCondBranchFromCond(CondCode CC); + // Turn CMov opcode into condition code. + CondCode getCondFromCMovOpc(unsigned Opc); + /// GetOppositeBranchCondition - Return the inverse of the specified cond, /// e.g. turning COND_E to COND_NE. CondCode GetOppositeBranchCondition(X86::CondCode CC); diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index d293156..650fa95 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -114,7 +114,7 @@ def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>; def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER, - [SDNPHasChain]>; + [SDNPHasChain,SDNPSideEffect]>; def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER, [SDNPHasChain]>; def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER, @@ -216,6 +216,14 @@ def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR, def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET, [SDNPHasChain]>; +def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP", + SDTypeProfile<1, 1, [SDTCisInt<0>, + SDTCisPtrTy<1>]>, + [SDNPHasChain, SDNPSideEffect]>; +def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP", + SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>, + [SDNPHasChain, SDNPSideEffect]>; + def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET, [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; @@ -397,7 +405,7 @@ def i64mem_TC : Operand { let OperandType = "OPERAND_PCREL", ParserMatchClass = X86AbsMemAsmOperand, - PrintMethod = "print_pcrel_imm" in { + PrintMethod = "printPCRelImm" in { def i32imm_pcrel : Operand; def i16imm_pcrel : Operand; @@ -418,7 +426,7 @@ def SSECC : Operand { } def AVXCC : Operand { - let PrintMethod = "printSSECC"; + let PrintMethod = "printAVXCC"; let OperandType = "OPERAND_IMMEDIATE"; } @@ -499,7 +507,7 @@ def i64i32imm : Operand { // 64-bits but only 32 bits are significant, and those bits are treated as being // pc relative. def i64i32imm_pcrel : Operand { - let PrintMethod = "print_pcrel_imm"; + let PrintMethod = "printPCRelImm"; let ParserMatchClass = X86AbsMemAsmOperand; let OperandType = "OPERAND_PCREL"; } @@ -552,14 +560,21 @@ def HasMMX : Predicate<"Subtarget->hasMMX()">; def Has3DNow : Predicate<"Subtarget->has3DNow()">; def Has3DNowA : Predicate<"Subtarget->has3DNowA()">; def HasSSE1 : Predicate<"Subtarget->hasSSE1()">; +def UseSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">; def HasSSE2 : Predicate<"Subtarget->hasSSE2()">; +def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">; def HasSSE3 : Predicate<"Subtarget->hasSSE3()">; +def UseSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">; def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">; +def UseSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">; def HasSSE41 : Predicate<"Subtarget->hasSSE41()">; +def UseSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">; def HasSSE42 : Predicate<"Subtarget->hasSSE42()">; +def UseSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">; def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">; def HasAVX : Predicate<"Subtarget->hasAVX()">; def HasAVX2 : Predicate<"Subtarget->hasAVX2()">; +def HasAVX1Only : Predicate<"Subtarget->hasAVX() && !Subtarget->hasAVX2()">; def HasPOPCNT : Predicate<"Subtarget->hasPOPCNT()">; def HasAES : Predicate<"Subtarget->hasAES()">; @@ -574,6 +589,7 @@ def HasFSGSBase : Predicate<"Subtarget->hasFSGSBase()">; def HasLZCNT : Predicate<"Subtarget->hasLZCNT()">; def HasBMI : Predicate<"Subtarget->hasBMI()">; def HasBMI2 : Predicate<"Subtarget->hasBMI2()">; +def HasRTM : Predicate<"Subtarget->hasRTM()">; def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">; def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">; def HasCmpxchg16b: Predicate<"Subtarget->hasCmpxchg16b()">; @@ -1259,28 +1275,46 @@ def BTS64mi8 : RIi8<0xBA, MRM5m, (outs), (ins i64mem:$src1, i64i8imm:$src2), // Atomic support // - // Atomic swap. These are just normal xchg instructions. But since a memory // operand is referenced, the atomicity is ensured. +multiclass ATOMIC_SWAP opc8, bits<8> opc, string mnemonic, string frag, + InstrItinClass itin> { + let Constraints = "$val = $dst" in { + def #NAME#8rm : I(frag # "_8") addr:$ptr, GR8:$val))], + itin>; + def #NAME#16rm : I(frag # "_16") addr:$ptr, GR16:$val))], + itin>, OpSize; + def #NAME#32rm : I(frag # "_32") addr:$ptr, GR32:$val))], + itin>; + def #NAME#64rm : RI(frag # "_64") addr:$ptr, GR64:$val))], + itin>; + } +} + +defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap", IIC_XCHG_MEM>; + +// Swap between registers. let Constraints = "$val = $dst" in { -def XCHG8rm : I<0x86, MRMSrcMem, (outs GR8:$dst), (ins GR8:$val, i8mem:$ptr), - "xchg{b}\t{$val, $ptr|$ptr, $val}", - [(set GR8:$dst, (atomic_swap_8 addr:$ptr, GR8:$val))], - IIC_XCHG_MEM>; -def XCHG16rm : I<0x87, MRMSrcMem, (outs GR16:$dst),(ins GR16:$val, i16mem:$ptr), - "xchg{w}\t{$val, $ptr|$ptr, $val}", - [(set GR16:$dst, (atomic_swap_16 addr:$ptr, GR16:$val))], - IIC_XCHG_MEM>, - OpSize; -def XCHG32rm : I<0x87, MRMSrcMem, (outs GR32:$dst),(ins GR32:$val, i32mem:$ptr), - "xchg{l}\t{$val, $ptr|$ptr, $val}", - [(set GR32:$dst, (atomic_swap_32 addr:$ptr, GR32:$val))], - IIC_XCHG_MEM>; -def XCHG64rm : RI<0x87, MRMSrcMem, (outs GR64:$dst),(ins GR64:$val,i64mem:$ptr), - "xchg{q}\t{$val, $ptr|$ptr, $val}", - [(set GR64:$dst, (atomic_swap_64 addr:$ptr, GR64:$val))], - IIC_XCHG_MEM>; - def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src), "xchg{b}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>; def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src), @@ -1291,6 +1325,7 @@ def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src), "xchg{q}\t{$val, $src|$src, $val}", [], IIC_XCHG_REG>; } +// Swap between EAX and other registers. def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src), "xchg{w}\t{$src, %ax|AX, $src}", [], IIC_XCHG_REG>, OpSize; def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src), @@ -1672,6 +1707,8 @@ include "X86Instr3DNow.td" include "X86InstrVMX.td" include "X86InstrSVM.td" +include "X86InstrTSX.td" + // System instructions. include "X86InstrSystem.td" diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index c8f40bb..127af6f 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -118,11 +118,11 @@ let Constraints = "$src1 = $dst" in { /// Unary MMX instructions requiring SSSE3. multiclass SS3I_unop_rm_int_mm opc, string OpcodeStr, Intrinsic IntId64, OpndItins itins> { - def rr64 : SS38I; - def rm64 : SS38I opc, string OpcodeStr, Intrinsic IntId64, OpndItins itins> { let isCommutable = 0 in - def rr64 : SS38I; - def rm64 : SS38I opc, string OpcodeStr, /// PALIGN MMX instructions (require SSSE3). multiclass ssse3_palign_mm { - def R64irr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst), + def R64irr : MMXSS3AI<0x0F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2, i8imm:$src3), !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [(set VR64:$dst, (IntId VR64:$src1, VR64:$src2, (i8 imm:$src3)))]>; - def R64irm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst), + def R64irm : MMXSS3AI<0x0F, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2, i8imm:$src3), !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), [(set VR64:$dst, (IntId VR64:$src1, @@ -163,12 +163,10 @@ multiclass ssse3_palign_mm { multiclass sse12_cvt_pint opc, RegisterClass SrcRC, RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag, string asm, OpndItins itins, Domain d> { - def irr : PI; - def irm : PI; + def irr : MMXPI; + def irm : MMXPI; } multiclass sse12_cvt_pint_3addr opc, RegisterClass SrcRC, @@ -209,8 +207,14 @@ def MMX_MOVD64rm : MMXI<0x6E, MRMSrcMem, (outs VR64:$dst), (ins i32mem:$src), let mayStore = 1 in def MMX_MOVD64mr : MMXI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR64:$src), "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_MM_RM>; -def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs), (ins GR32:$dst, VR64:$src), - "movd\t{$src, $dst|$dst, $src}", [], IIC_MMX_MOV_REG_MM>; + +// Low word of MMX to GPR. +def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1, + [SDTCisVT<0, i32>, SDTCisVT<1, x86mmx>]>>; +def MMX_MOVD64grr : MMXI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR64:$src), + "movd\t{$src, $dst|$dst, $src}", + [(set GR32:$dst, + (MMX_X86movd2w (x86mmx VR64:$src)))], IIC_MMX_MOV_REG_MM>; let neverHasSideEffects = 1 in def MMX_MOVD64to64rr : MMXRI<0x6E, MRMSrcReg, (outs VR64:$dst), (ins GR64:$src), @@ -243,29 +247,30 @@ def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), [(store (x86mmx VR64:$src), addr:$dst)], IIC_MMX_MOVQ_RM>; -def MMX_MOVDQ2Qrr : SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), - (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}", - [(set VR64:$dst, - (x86mmx (bitconvert - (i64 (vector_extract (v2i64 VR128:$src), - (iPTR 0))))))], - IIC_MMX_MOVQ_RR>; - -def MMX_MOVQ2DQrr : S2SIi8<0xD6, MRMSrcReg, (outs VR128:$dst), - (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, - (v2i64 (scalar_to_vector - (i64 (bitconvert (x86mmx VR64:$src))))))], - IIC_MMX_MOVQ_RR>; +def MMX_MOVDQ2Qrr : MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), + (ins VR128:$src), "movdq2q\t{$src, $dst|$dst, $src}", + [(set VR64:$dst, + (x86mmx (bitconvert + (i64 (vector_extract (v2i64 VR128:$src), + (iPTR 0))))))], + IIC_MMX_MOVQ_RR>; + +def MMX_MOVQ2DQrr : MMXS2SIi8<0xD6, MRMSrcReg, (outs VR128:$dst), + (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, + (v2i64 + (scalar_to_vector + (i64 (bitconvert (x86mmx VR64:$src))))))], + IIC_MMX_MOVQ_RR>; let neverHasSideEffects = 1 in -def MMX_MOVQ2FR64rr: S2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst), - (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", [], - IIC_MMX_MOVQ_RR>; +def MMX_MOVQ2FR64rr: MMXS2SIi8<0xD6, MRMSrcReg, (outs FR64:$dst), + (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", + [], IIC_MMX_MOVQ_RR>; -def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), - (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", [], - IIC_MMX_MOVQ_RR>; +def MMX_MOVFR642Qrr: MMXSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), + (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", + [], IIC_MMX_MOVQ_RR>; def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), "movntq\t{$src, $dst|$dst, $src}", @@ -577,6 +582,7 @@ def MMX_MASKMOVQ64: MMXI64<0xF7, MRMSrcReg, (outs), (ins VR64:$src, VR64:$mask), IIC_MMX_MASKMOV>; // 64-bit bit convert. +let Predicates = [HasSSE2] in { def : Pat<(x86mmx (bitconvert (i64 GR64:$src))), (MMX_MOVD64to64rr GR64:$src)>; def : Pat<(i64 (bitconvert (x86mmx VR64:$src))), @@ -585,5 +591,6 @@ def : Pat<(f64 (bitconvert (x86mmx VR64:$src))), (MMX_MOVQ2FR64rr VR64:$src)>; def : Pat<(x86mmx (bitconvert (f64 FR64:$src))), (MMX_MOVFR642Qrr FR64:$src)>; +} diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 20dc81e..6f48d7e 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -251,35 +251,37 @@ def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))), // A 128-bit subvector extract from the first 256-bit vector position // is a subregister copy that needs no instruction. -def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))), +def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (iPTR 0))), (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>; -def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))), +def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))), (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>; -def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))), +def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (iPTR 0))), (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>; -def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))), +def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))), (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>; -def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))), +def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (iPTR 0))), (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>; -def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))), +def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (iPTR 0))), (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>; // A 128-bit subvector insert to the first 256-bit vector position // is a subregister copy that needs no instruction. -def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)), +let AddedComplexity = 25 in { // to give priority over vinsertf128rm +def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)), (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; -def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)), +def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)), (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; -def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)), +def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)), (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; -def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)), +def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)), (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; -def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)), +def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (iPTR 0)), (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; -def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)), +def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (iPTR 0)), (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>; +} // Implicitly promote a 32-bit scalar to a vector. def : Pat<(v4f32 (scalar_to_vector FR32:$src)), @@ -362,7 +364,7 @@ let Predicates = [HasAVX] in { def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>; } -// Alias instructions that map fld0 to pxor for sse. +// Alias instructions that map fld0 to xorps for sse or vxorps for avx. // This is expanded by ExpandPostRAPseudos. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, isPseudo = 1 in { @@ -382,11 +384,11 @@ let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-zeros value if folding it would be beneficial. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isPseudo = 1, neverHasSideEffects = 1 in { -def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "", []>; + isPseudo = 1 in { +def V_SET0 : I<0, Pseudo, (outs VR128:$dst), (ins), "", + [(set VR128:$dst, (v4f32 immAllZerosV))]>; } -def : Pat<(v4f32 immAllZerosV), (V_SET0)>; def : Pat<(v2f64 immAllZerosV), (V_SET0)>; def : Pat<(v4i32 immAllZerosV), (V_SET0)>; def : Pat<(v2i64 immAllZerosV), (V_SET0)>; @@ -394,35 +396,29 @@ def : Pat<(v8i16 immAllZerosV), (V_SET0)>; def : Pat<(v16i8 immAllZerosV), (V_SET0)>; -// The same as done above but for AVX. The 256-bit ISA does not support PI, +// The same as done above but for AVX. The 256-bit AVX1 ISA doesn't support PI, // and doesn't need it because on sandy bridge the register is set to zero // at the rename stage without using any execution unit, so SET0PSY // and SET0PDY can be used for vector int instructions without penalty -// FIXME: Change encoding to pseudo! This is blocked right now by the x86 -// JIT implementatioan, it does not expand the instructions below like -// X86MCInstLower does. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isCodeGenOnly = 1 in { -let Predicates = [HasAVX] in { -def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "", - [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V; -def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "", - [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V; -} -let Predicates = [HasAVX2], neverHasSideEffects = 1 in -def AVX2_SET0 : PDI<0xef, MRMInitReg, (outs VR256:$dst), (ins), "", - []>, VEX_4V; + isPseudo = 1, Predicates = [HasAVX] in { +def AVX_SET0 : I<0, Pseudo, (outs VR256:$dst), (ins), "", + [(set VR256:$dst, (v8f32 immAllZerosV))]>; } -let Predicates = [HasAVX2], AddedComplexity = 5 in { - def : Pat<(v4i64 immAllZerosV), (AVX2_SET0)>; - def : Pat<(v8i32 immAllZerosV), (AVX2_SET0)>; - def : Pat<(v16i16 immAllZerosV), (AVX2_SET0)>; - def : Pat<(v32i8 immAllZerosV), (AVX2_SET0)>; +let Predicates = [HasAVX] in + def : Pat<(v4f64 immAllZerosV), (AVX_SET0)>; + +let Predicates = [HasAVX2] in { + def : Pat<(v4i64 immAllZerosV), (AVX_SET0)>; + def : Pat<(v8i32 immAllZerosV), (AVX_SET0)>; + def : Pat<(v16i16 immAllZerosV), (AVX_SET0)>; + def : Pat<(v32i8 immAllZerosV), (AVX_SET0)>; } -// AVX has no support for 256-bit integer instructions, but since the 128-bit +// AVX1 has no support for 256-bit integer instructions, but since the 128-bit // VPXOR instruction writes zero to its upper part, it's safe build zeros. +let Predicates = [HasAVX1Only] in { def : Pat<(v32i8 immAllZerosV), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>; def : Pat<(bc_v32i8 (v8f32 immAllZerosV)), (SUBREG_TO_REG (i8 0), (V_SET0), sub_xmm)>; @@ -438,22 +434,17 @@ def : Pat<(bc_v8i32 (v8f32 immAllZerosV)), def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>; def : Pat<(bc_v4i64 (v8f32 immAllZerosV)), (SUBREG_TO_REG (i64 0), (V_SET0), sub_xmm)>; +} // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-ones value if folding it would be beneficial. -// FIXME: Change encoding to pseudo! This is blocked right now by the x86 -// JIT implementation, it does not expand the instructions below like -// X86MCInstLower does. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, - isCodeGenOnly = 1, ExeDomain = SSEPackedInt in { - let Predicates = [HasAVX] in - def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "", - [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V; - def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "", - [(set VR128:$dst, (v4i32 immAllOnesV))]>; + isPseudo = 1 in { + def V_SETALLONES : I<0, Pseudo, (outs VR128:$dst), (ins), "", + [(set VR128:$dst, (v4i32 immAllOnesV))]>; let Predicates = [HasAVX2] in - def AVX2_SETALLONES : PDI<0x76, MRMInitReg, (outs VR256:$dst), (ins), "", - [(set VR256:$dst, (v8i32 immAllOnesV))]>, VEX_4V; + def AVX2_SETALLONES : I<0, Pseudo, (outs VR256:$dst), (ins), "", + [(set VR256:$dst, (v8i32 immAllOnesV))]>; } @@ -605,27 +596,27 @@ let Predicates = [HasAVX] in { // Represent the same patterns above but in the form they appear for // 256-bit types def : Pat<(v8i32 (X86vzmovl (insert_subvector undef, - (v4i32 (scalar_to_vector (loadi32 addr:$src))), (i32 0)))), + (v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>; def : Pat<(v8f32 (X86vzmovl (insert_subvector undef, - (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))), + (v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_xmm)>; def : Pat<(v4f64 (X86vzmovl (insert_subvector undef, - (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))), + (v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_xmm)>; } def : Pat<(v8f32 (X86vzmovl (insert_subvector undef, - (v4f32 (scalar_to_vector FR32:$src)), (i32 0)))), + (v4f32 (scalar_to_vector FR32:$src)), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)), sub_xmm)>; def : Pat<(v4f64 (X86vzmovl (insert_subvector undef, - (v2f64 (scalar_to_vector FR64:$src)), (i32 0)))), + (v2f64 (scalar_to_vector FR64:$src)), (iPTR 0)))), (SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDrr (v2f64 (V_SET0)), FR64:$src)), sub_xmm)>; def : Pat<(v4i64 (X86vzmovl (insert_subvector undef, - (v2i64 (scalar_to_vector (loadi64 addr:$src))), (i32 0)))), + (v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))), (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_xmm)>; // Move low f64 and clear high bits. @@ -704,7 +695,7 @@ let Predicates = [HasAVX] in { (VMOVSDrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR64))>; } -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { let AddedComplexity = 15 in { // Move scalar to XMM zero-extended, zeroing a VR128 then do a // MOVSS to the lower bits. @@ -738,7 +729,7 @@ let Predicates = [HasSSE1] in { (MOVSSrr VR128:$src1, (COPY_TO_REGCLASS VR128:$src2, FR32))>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { let AddedComplexity = 15 in { // Move scalar to XMM zero-extended, zeroing a VR128 then do a // MOVSD to the lower bits. @@ -822,16 +813,16 @@ defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32, "movaps", SSEPackedSingle, SSE_MOVA_ITINS>, - TB, VEX; + TB, VEX, VEX_L; defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64, "movapd", SSEPackedDouble, SSE_MOVA_ITINS>, - TB, OpSize, VEX; + TB, OpSize, VEX, VEX_L; defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, "movups", SSEPackedSingle, SSE_MOVU_ITINS>, - TB, VEX; + TB, VEX, VEX_L; defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>, - TB, OpSize, VEX; + TB, OpSize, VEX, VEX_L; defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps", SSEPackedSingle, SSE_MOVA_ITINS>, TB; @@ -864,19 +855,19 @@ def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movaps\t{$src, $dst|$dst, $src}", [(alignedstore256 (v8f32 VR256:$src), addr:$dst)], - IIC_SSE_MOVA_P_MR>, VEX; + IIC_SSE_MOVA_P_MR>, VEX, VEX_L; def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movapd\t{$src, $dst|$dst, $src}", [(alignedstore256 (v4f64 VR256:$src), addr:$dst)], - IIC_SSE_MOVA_P_MR>, VEX; + IIC_SSE_MOVA_P_MR>, VEX, VEX_L; def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movups\t{$src, $dst|$dst, $src}", [(store (v8f32 VR256:$src), addr:$dst)], - IIC_SSE_MOVU_P_MR>, VEX; + IIC_SSE_MOVU_P_MR>, VEX, VEX_L; def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movupd\t{$src, $dst|$dst, $src}", [(store (v4f64 VR256:$src), addr:$dst)], - IIC_SSE_MOVU_P_MR>, VEX; + IIC_SSE_MOVU_P_MR>, VEX, VEX_L; // For disassembler let isCodeGenOnly = 1 in { @@ -899,33 +890,33 @@ let isCodeGenOnly = 1 in { def VMOVAPSYrr_REV : VPSI<0x29, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), "movaps\t{$src, $dst|$dst, $src}", [], - IIC_SSE_MOVA_P_RR>, VEX; + IIC_SSE_MOVA_P_RR>, VEX, VEX_L; def VMOVAPDYrr_REV : VPDI<0x29, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), "movapd\t{$src, $dst|$dst, $src}", [], - IIC_SSE_MOVA_P_RR>, VEX; + IIC_SSE_MOVA_P_RR>, VEX, VEX_L; def VMOVUPSYrr_REV : VPSI<0x11, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), "movups\t{$src, $dst|$dst, $src}", [], - IIC_SSE_MOVU_P_RR>, VEX; + IIC_SSE_MOVU_P_RR>, VEX, VEX_L; def VMOVUPDYrr_REV : VPDI<0x11, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), "movupd\t{$src, $dst|$dst, $src}", [], - IIC_SSE_MOVU_P_RR>, VEX; + IIC_SSE_MOVU_P_RR>, VEX, VEX_L; } let Predicates = [HasAVX] in { def : Pat<(v8i32 (X86vzmovl - (insert_subvector undef, (v4i32 VR128:$src), (i32 0)))), + (insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>; def : Pat<(v4i64 (X86vzmovl - (insert_subvector undef, (v2i64 VR128:$src), (i32 0)))), + (insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>; def : Pat<(v8f32 (X86vzmovl - (insert_subvector undef, (v4f32 VR128:$src), (i32 0)))), + (insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>; def : Pat<(v4f64 (X86vzmovl - (insert_subvector undef, (v2f64 VR128:$src), (i32 0)))), + (insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>; } @@ -975,10 +966,10 @@ let Predicates = [HasAVX] in { (VMOVUPDmr addr:$dst, VR128:$src)>; } -let Predicates = [HasSSE1] in +let Predicates = [UseSSE1] in def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src), (MOVUPSmr addr:$dst, VR128:$src)>; -let Predicates = [HasSSE2] in +let Predicates = [UseSSE2] in def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src), (MOVUPDmr addr:$dst, VR128:$src)>; @@ -1028,12 +1019,52 @@ let Predicates = [HasAVX] in { (VMOVUPSYmr addr:$dst, VR256:$src)>; def : Pat<(store (v32i8 VR256:$src), addr:$dst), (VMOVUPSYmr addr:$dst, VR256:$src)>; + + // Special patterns for storing subvector extracts of lower 128-bits + // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr + def : Pat<(alignedstore (v2f64 (extract_subvector + (v4f64 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v4f32 (extract_subvector + (v8f32 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v2i64 (extract_subvector + (v4i64 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v4i32 (extract_subvector + (v8i32 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v8i16 (extract_subvector + (v16i16 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v16i8 (extract_subvector + (v32i8 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + + def : Pat<(store (v2f64 (extract_subvector + (v4f64 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVUPDmr addr:$dst, (v2f64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v4f32 (extract_subvector + (v8f32 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v2i64 (extract_subvector + (v4i64 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v4i32 (extract_subvector + (v8i32 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v8i16 (extract_subvector + (v16i16 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v16i8 (extract_subvector + (v32i8 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; } // Use movaps / movups for SSE integer load / store (one byte shorter). // The instructions selected below are then converted to MOVDQA/MOVDQU // during the SSE domain pass. -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { def : Pat<(alignedloadv2i64 addr:$src), (MOVAPSrm addr:$src)>; def : Pat<(loadv2i64 addr:$src), @@ -1180,7 +1211,7 @@ let Predicates = [HasAVX] in { (VMOVLPDmr addr:$src1, VR128:$src2)>; } -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS def : Pat<(store (i64 (vector_extract (bc_v2i64 (v4f32 VR128:$src2)), (iPTR 0))), addr:$src1), @@ -1205,7 +1236,7 @@ let Predicates = [HasSSE1] in { (MOVLPSmr addr:$src1, VR128:$src2)>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { // Shuffle with MOVLPD def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))), (MOVLPDrm VR128:$src1, addr:$src2)>; @@ -1271,7 +1302,7 @@ let Predicates = [HasAVX] in { (VMOVHPSrm VR128:$src1, addr:$src2)>; // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem - // is during lowering, where it's not possible to recognize the load fold + // is during lowering, where it's not possible to recognize the load fold // cause it has two uses through a bitcast. One use disappears at isel time // and the fold opportunity reappears. def : Pat<(v2f64 (X86Unpckl VR128:$src1, @@ -1279,7 +1310,7 @@ let Predicates = [HasAVX] in { (VMOVHPDrm VR128:$src1, addr:$src2)>; } -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { // MOVHPS patterns def : Pat<(X86Movlhps VR128:$src1, (bc_v4f32 (v2i64 (scalar_to_vector (loadi64 addr:$src2))))), @@ -1289,9 +1320,9 @@ let Predicates = [HasSSE1] in { (MOVHPSrm VR128:$src1, addr:$src2)>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { // FIXME: Instead of X86Unpckl, there should be a X86Movlhpd here, the problem - // is during lowering, where it's not possible to recognize the load fold + // is during lowering, where it's not possible to recognize the load fold // cause it has two uses through a bitcast. One use disappears at isel time // and the fold opportunity reappears. def : Pat<(v2f64 (X86Unpckl VR128:$src1, @@ -1346,7 +1377,7 @@ let Predicates = [HasAVX] in { (VMOVHLPSrr VR128:$src1, VR128:$src2)>; } -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { // MOVLHPS patterns def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)), (MOVLHPSrr VR128:$src1, VR128:$src2)>; @@ -1456,7 +1487,7 @@ def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}", def : InstAlias<"vcvtsi2sd{l}\t{$src, $src1, $dst|$dst, $src1, $src}", (VCVTSI2SDrm FR64:$dst, FR64:$src1, i32mem:$src)>; -let Predicates = [HasAVX], AddedComplexity = 1 in { +let Predicates = [HasAVX] in { def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))), (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>; def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))), @@ -1628,12 +1659,12 @@ defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem, defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem, "vcvtdq2ps\t{$src, $dst|$dst, $src}", SSEPackedSingle, SSE_CVT_PS>, - TB, VEX, Requires<[HasAVX]>; + TB, VEX, VEX_L, Requires<[HasAVX]>; defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem, "cvtdq2ps\t{$src, $dst|$dst, $src}", SSEPackedSingle, SSE_CVT_PS>, - TB, Requires<[HasSSE2]>; + TB, Requires<[UseSSE2]>; /// SSE 2 Only @@ -1663,7 +1694,7 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), [(set FR32:$dst, (fround (loadf64 addr:$src)))], IIC_SSE_CVT_Scalar_RM>, XD, - Requires<[HasSSE2, OptForSize]>; + Requires<[UseSSE2, OptForSize]>; def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), @@ -1684,13 +1715,13 @@ def Int_CVTSD2SSrr: I<0x5A, MRMSrcReg, "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, VR128:$src2))], - IIC_SSE_CVT_Scalar_RR>, XD, Requires<[HasSSE2]>; + IIC_SSE_CVT_Scalar_RR>, XD, Requires<[UseSSE2]>; def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2), "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtsd2ss VR128:$src1, sse_load_f64:$src2))], - IIC_SSE_CVT_Scalar_RM>, XD, Requires<[HasSSE2]>; + IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>; } // Convert scalar single to scalar double @@ -1709,30 +1740,28 @@ def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), XS, VEX_4V, VEX_LIG, Requires<[HasAVX, OptForSize]>; } -let AddedComplexity = 1 in { // give AVX priority - def : Pat<(f64 (fextend FR32:$src)), - (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>; - def : Pat<(fextend (loadf32 addr:$src)), - (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>; +def : Pat<(f64 (fextend FR32:$src)), + (VCVTSS2SDrr FR32:$src, FR32:$src)>, Requires<[HasAVX]>; +def : Pat<(fextend (loadf32 addr:$src)), + (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX]>; - def : Pat<(extloadf32 addr:$src), - (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, - Requires<[HasAVX, OptForSize]>; - def : Pat<(extloadf32 addr:$src), - (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>, - Requires<[HasAVX, OptForSpeed]>; -} // AddedComplexity = 1 +def : Pat<(extloadf32 addr:$src), + (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>, + Requires<[HasAVX, OptForSize]>; +def : Pat<(extloadf32 addr:$src), + (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (VMOVSSrm addr:$src))>, + Requires<[HasAVX, OptForSpeed]>; def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (fextend FR32:$src))], IIC_SSE_CVT_Scalar_RR>, XS, - Requires<[HasSSE2]>; + Requires<[UseSSE2]>; def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), "cvtss2sd\t{$src, $dst|$dst, $src}", [(set FR64:$dst, (extloadf32 addr:$src))], IIC_SSE_CVT_Scalar_RM>, XS, - Requires<[HasSSE2, OptForSize]>; + Requires<[UseSSE2, OptForSize]>; // extload f32 -> f64. This matches load+fextend because we have a hack in // the isel (PreprocessForFPConvert) that can introduce loads after dag @@ -1740,9 +1769,9 @@ def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src), // Since these loads aren't folded into the fextend, we have to match it // explicitly here. def : Pat<(fextend (loadf32 addr:$src)), - (CVTSS2SDrm addr:$src)>, Requires<[HasSSE2]>; + (CVTSS2SDrm addr:$src)>, Requires<[UseSSE2]>; def : Pat<(extloadf32 addr:$src), - (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[HasSSE2, OptForSpeed]>; + (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>; def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), @@ -1762,13 +1791,13 @@ def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg, "cvtss2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, VR128:$src2))], - IIC_SSE_CVT_Scalar_RR>, XS, Requires<[HasSSE2]>; + IIC_SSE_CVT_Scalar_RR>, XS, Requires<[UseSSE2]>; def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2), "cvtss2sd\t{$src2, $dst|$dst, $src2}", [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1, sse_load_f32:$src2))], - IIC_SSE_CVT_Scalar_RM>, XS, Requires<[HasSSE2]>; + IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>; } // Convert packed single/double fp to doubleword @@ -1785,12 +1814,12 @@ def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2dq_256 VR256:$src))], - IIC_SSE_CVT_PS_RR>, VEX; + IIC_SSE_CVT_PS_RR>, VEX, VEX_L; def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, VEX; + IIC_SSE_CVT_PS_RM>, VEX, VEX_L; def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2dq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))], @@ -1824,7 +1853,7 @@ def VCVTPD2DQXrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def VCVTPD2DQYrr : SDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src), "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, - (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX; + (int_x86_avx_cvt_pd2dq_256 VR256:$src))]>, VEX, VEX_L; def VCVTPD2DQYrm : SDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src), "vcvtpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, @@ -1860,12 +1889,12 @@ def VCVTTPS2DQYrr : VS2SI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256 VR256:$src))], - IIC_SSE_CVT_PS_RR>, VEX; + IIC_SSE_CVT_PS_RR>, VEX, VEX_L; def VCVTTPS2DQYrm : VS2SI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)))], - IIC_SSE_CVT_PS_RM>, VEX; + IIC_SSE_CVT_PS_RM>, VEX, VEX_L; def CVTTPS2DQrr : S2SI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvttps2dq\t{$src, $dst|$dst, $src}", @@ -1904,7 +1933,7 @@ let Predicates = [HasAVX] in { (VCVTTPS2DQYrm addr:$src)>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))), (CVTDQ2PSrr VR128:$src)>; def : Pat<(v4f32 (sint_to_fp (bc_v4i32 (memopv2i64 addr:$src)))), @@ -1945,7 +1974,7 @@ def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src), "cvttpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvtt_pd2dq_256 VR256:$src))], - IIC_SSE_CVT_PD_RR>, VEX; + IIC_SSE_CVT_PD_RR>, VEX, VEX_L; def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src), "cvttpd2dq{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, @@ -1978,31 +2007,31 @@ def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))], IIC_SSE_CVT_PD_RR>, TB, VEX; -let neverHasSideEffects = 1, mayLoad = 1 in def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), - "vcvtps2pd\t{$src, $dst|$dst, $src}", [], - IIC_SSE_CVT_PD_RM>, TB, VEX; + "vcvtps2pd\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))], + IIC_SSE_CVT_PD_RM>, TB, VEX; def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2_pd_256 VR128:$src))], - IIC_SSE_CVT_PD_RR>, TB, VEX; + IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L; def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src), "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)))], - IIC_SSE_CVT_PD_RM>, TB, VEX; + IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))], IIC_SSE_CVT_PD_RR>, TB; -let neverHasSideEffects = 1, mayLoad = 1 in def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), - "cvtps2pd\t{$src, $dst|$dst, $src}", [], - IIC_SSE_CVT_PD_RM>, TB; + "cvtps2pd\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))], + IIC_SSE_CVT_PD_RM>, TB; } // Convert Packed DW Integers to Packed Double FP @@ -2019,11 +2048,11 @@ def VCVTDQ2PDYrm : S2SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins i128mem:$src), "vcvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx_cvtdq2_pd_256 - (bitconvert (memopv2i64 addr:$src))))]>, VEX; + (bitconvert (memopv2i64 addr:$src))))]>, VEX, VEX_L; def VCVTDQ2PDYrr : S2SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src), "vcvtdq2pd\t{$src, $dst|$dst, $src}", [(set VR256:$dst, - (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX; + (int_x86_avx_cvtdq2_pd_256 VR128:$src))]>, VEX, VEX_L; } let neverHasSideEffects = 1, mayLoad = 1 in @@ -2066,7 +2095,7 @@ def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src), "cvtpd2ps{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_avx_cvt_pd2_ps_256 VR256:$src))], - IIC_SSE_CVT_PD_RR>, VEX; + IIC_SSE_CVT_PD_RR>, VEX, VEX_L; def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src), "cvtpd2ps{y}\t{$src, $dst|$dst, $src}", [(set VR128:$dst, @@ -2096,6 +2125,10 @@ let Predicates = [HasAVX] in { (VCVTDQ2PSYrm addr:$src)>; // Match fround and fextend for 128/256-bit conversions + def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))), + (VCVTPD2PSrr VR128:$src)>; + def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))), + (VCVTPD2PSXrm addr:$src)>; def : Pat<(v4f32 (fround (v4f64 VR256:$src))), (VCVTPD2PSYrr VR256:$src)>; def : Pat<(v4f32 (fround (loadv4f64 addr:$src))), @@ -2105,12 +2138,17 @@ let Predicates = [HasAVX] in { (VCVTPS2PDrr VR128:$src)>; def : Pat<(v4f64 (fextend (v4f32 VR128:$src))), (VCVTPS2PDYrr VR128:$src)>; - def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))), + def : Pat<(v4f64 (extloadv4f32 addr:$src)), (VCVTPS2PDYrm addr:$src)>; } -let Predicates = [HasSSE2] in { - // Match fextend for 128 conversions +let Predicates = [UseSSE2] in { + // Match fround and fextend for 128 conversions + def : Pat<(v4f32 (X86vfpround (v2f64 VR128:$src))), + (CVTPD2PSrr VR128:$src)>; + def : Pat<(v4f32 (X86vfpround (memopv2f64 addr:$src))), + (CVTPD2PSrm addr:$src)>; + def : Pat<(v2f64 (X86vfpext (v4f32 VR128:$src))), (CVTPS2PDrr VR128:$src)>; } @@ -2121,7 +2159,7 @@ let Predicates = [HasSSE2] in { // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions multiclass sse12_cmp_scalar { def rr : SIi8<0xC2, MRMSrcReg, @@ -2267,7 +2305,7 @@ let Defs = [EFLAGS] in { // sse12_cmp_packed - sse 1 & 2 compare packed instructions multiclass sse12_cmp_packed { def rri : PIi8<0xC2, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm, @@ -2300,11 +2338,11 @@ defm VCMPPD : sse12_cmp_packed, TB, VEX_4V; + SSEPackedSingle>, TB, VEX_4V, VEX_L; defm VCMPPDY : sse12_cmp_packed, TB, OpSize, VEX_4V; + SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L; let Constraints = "$src1 = $dst" in { defm CMPPS : sse12_cmp_packed; } -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), VR128:$src2, imm:$cc)), (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>; def : Pat<(v4i32 (X86cmpp (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)), (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), VR128:$src2, imm:$cc)), (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>; def : Pat<(v2i64 (X86cmpp (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)), @@ -2374,13 +2412,13 @@ defm VSHUFPS : sse12_shuffle, TB, VEX_4V; defm VSHUFPSY : sse12_shuffle, TB, VEX_4V; + memopv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L; defm VSHUFPD : sse12_shuffle, TB, OpSize, VEX_4V; defm VSHUFPDY : sse12_shuffle, TB, OpSize, VEX_4V; + memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L; let Constraints = "$src1 = $dst" in { defm SHUFPS : sse12_shuffle; } -let Predicates = [HasSSE1] in { +let Predicates = [UseSSE1] in { def : Pat<(v4i32 (X86Shufp VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))), (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>; @@ -2428,7 +2466,7 @@ let Predicates = [HasSSE1] in { (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { // Generic SHUFPD patterns def : Pat<(v2i64 (X86Shufp VR128:$src1, (memopv2i64 addr:$src2), (i8 imm:$imm))), @@ -2474,16 +2512,16 @@ defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64, defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, memopv8f32, VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}", - SSEPackedSingle>, TB, VEX_4V; + SSEPackedSingle>, TB, VEX_4V, VEX_L; defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, memopv4f64, VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}", - SSEPackedDouble>, TB, OpSize, VEX_4V; + SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L; defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, memopv8f32, VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}", - SSEPackedSingle>, TB, VEX_4V; + SSEPackedSingle>, TB, VEX_4V, VEX_L; defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, memopv4f64, VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}", - SSEPackedDouble>, TB, OpSize, VEX_4V; + SSEPackedDouble>, TB, OpSize, VEX_4V, VEX_L; let Constraints = "$src1 = $dst" in { defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32, @@ -2500,7 +2538,27 @@ let Constraints = "$src1 = $dst" in { SSEPackedDouble>, TB, OpSize; } // Constraints = "$src1 = $dst" -let Predicates = [HasAVX], AddedComplexity = 1 in { +let Predicates = [HasAVX1Only] in { + def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))), + (VUNPCKLPSYrm VR256:$src1, addr:$src2)>; + def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)), + (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>; + def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))), + (VUNPCKHPSYrm VR256:$src1, addr:$src2)>; + def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)), + (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>; + + def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))), + (VUNPCKLPDYrm VR256:$src1, addr:$src2)>; + def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)), + (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>; + def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))), + (VUNPCKHPDYrm VR256:$src1, addr:$src2)>; + def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)), + (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>; +} + +let Predicates = [HasAVX] in { // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the // problem is during lowering, where it's not possible to recognize the load // fold cause it has two uses through a bitcast. One use disappears at isel @@ -2509,7 +2567,7 @@ let Predicates = [HasAVX], AddedComplexity = 1 in { (VUNPCKLPDrr VR128:$src, VR128:$src)>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { // FIXME: Instead of X86Movddup, there should be a X86Unpckl here, the // problem is during lowering, where it's not possible to recognize the load // fold cause it has two uses through a bitcast. One use disappears at isel @@ -2540,10 +2598,11 @@ let Predicates = [HasAVX] in { "movmskpd", SSEPackedDouble>, TB, OpSize, VEX; defm VMOVMSKPSY : sse12_extr_sign_mask, TB, VEX; + "movmskps", SSEPackedSingle>, TB, + VEX, VEX_L; defm VMOVMSKPDY : sse12_extr_sign_mask, TB, - OpSize, VEX; + OpSize, VEX, VEX_L; def : Pat<(i32 (X86fgetsign FR32:$src)), (VMOVMSKPSrr32 (COPY_TO_REGCLASS FR32:$src, VR128))>; @@ -2564,11 +2623,11 @@ let Predicates = [HasAVX] in { OpSize, VEX; def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), "movmskps\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK, - SSEPackedSingle>, TB, VEX; + SSEPackedSingle>, TB, VEX, VEX_L; def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), "movmskpd\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVMSK, SSEPackedDouble>, TB, - OpSize, VEX; + OpSize, VEX, VEX_L; } defm MOVMSKPS : sse12_extr_sign_mask, - Requires<[HasSSE1]>; + Requires<[UseSSE1]>; def : Pat<(i64 (X86fgetsign FR32:$src)), (MOVMSKPSrr64 (COPY_TO_REGCLASS FR32:$src, VR128))>, - Requires<[HasSSE1]>; + Requires<[UseSSE1]>; def : Pat<(i32 (X86fgetsign FR64:$src)), (MOVMSKPDrr32 (COPY_TO_REGCLASS FR64:$src, VR128))>, - Requires<[HasSSE2]>; + Requires<[UseSSE2]>; def : Pat<(i64 (X86fgetsign FR64:$src)), (MOVMSKPDrr64 (COPY_TO_REGCLASS FR64:$src, VR128))>, - Requires<[HasSSE2]>; + Requires<[UseSSE2]>; //===---------------------------------------------------------------------===// // SSE2 - Packed Integer Logical Instructions @@ -2646,13 +2705,13 @@ defm PANDN : PDI_binop_rm<0xDF, "pandn", X86andnp, v2i64, VR128, memopv2i64, let Predicates = [HasAVX2] in { defm VPANDY : PDI_binop_rm<0xDB, "vpand", and, v4i64, VR256, memopv4i64, - i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPORY : PDI_binop_rm<0xEB, "vpor", or, v4i64, VR256, memopv4i64, - i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPXORY : PDI_binop_rm<0xEF, "vpxor", xor, v4i64, VR256, memopv4i64, - i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_BIT_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPANDNY : PDI_binop_rm<0xDF, "vpandn", X86andnp, v4i64, VR256, memopv4i64, - i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V; + i256mem, SSE_BIT_ITINS_P, 0, 0>, VEX_4V, VEX_L; } //===----------------------------------------------------------------------===// @@ -2683,14 +2742,12 @@ multiclass sse12_fp_alias_pack_logical opc, string OpcodeStr, } // Alias bitwise logical operations using SSE logical ops on packed FP values. -let mayLoad = 0 in { - defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand, - SSE_BIT_ITINS_P>; - defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for, - SSE_BIT_ITINS_P>; - defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor, - SSE_BIT_ITINS_P>; -} +defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand, + SSE_BIT_ITINS_P>; +defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for, + SSE_BIT_ITINS_P>; +defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor, + SSE_BIT_ITINS_P>; let neverHasSideEffects = 1, Pattern = [], isCommutable = 0 in defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef, @@ -2740,7 +2797,7 @@ multiclass sse12_fp_packed_logical_y opc, string OpcodeStr, !strconcat(OpcodeStr, "ps"), f256mem, [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))], [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)), - (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V; + (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L; defm PDY : sse12_fp_packed_logical_rm opc, string OpcodeStr, (bc_v4i64 (v4f64 VR256:$src2))))], [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)), (memopv4i64 addr:$src2)))], 0>, - TB, OpSize, VEX_4V; + TB, OpSize, VEX_4V, VEX_L; } // AVX 256-bit packed logical ops forms @@ -2794,27 +2851,23 @@ multiclass basic_sse12_fp_binop_s opc, string OpcodeStr, SDNode OpNode, multiclass basic_sse12_fp_binop_p opc, string OpcodeStr, SDNode OpNode, SizeItins itins, bit Is2Addr = 1> { - let mayLoad = 0 in { defm PS : sse12_fp_packed, TB; defm PD : sse12_fp_packed, TB, OpSize; - } } multiclass basic_sse12_fp_binop_p_y opc, string OpcodeStr, SDNode OpNode, SizeItins itins> { - let mayLoad = 0 in { - defm PSY : sse12_fp_packed, - TB; - defm PDY : sse12_fp_packed, - TB, OpSize; - } + TB, OpSize, VEX_L; } multiclass basic_sse12_fp_binop_s_int opc, string OpcodeStr, @@ -2846,11 +2899,11 @@ multiclass basic_sse12_fp_binop_p_y_int opc, string OpcodeStr, SizeItins itins> { defm PSY : sse12_fp_packed_int, TB; + SSEPackedSingle, itins.s, 0>, TB, VEX_L; defm PDY : sse12_fp_packed_int, TB, OpSize; + SSEPackedDouble, itins.d, 0>, TB, OpSize, VEX_L; } // Binary Arithmetic instructions @@ -2872,7 +2925,8 @@ let isCommutable = 0 in { basic_sse12_fp_binop_s_int<0x5C, "sub", SSE_ALU_ITINS_S, 0>, VEX_4V, VEX_LIG; defm VSUB : basic_sse12_fp_binop_p<0x5C, "sub", fsub, SSE_ALU_ITINS_P, 0>, - basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>, VEX_4V; + basic_sse12_fp_binop_p_y<0x5C, "sub", fsub, SSE_ALU_ITINS_P>, + VEX_4V; defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, SSE_DIV_ITINS_S, 0>, basic_sse12_fp_binop_s_int<0x5E, "div", SSE_DIV_ITINS_S, 0>, VEX_4V, VEX_LIG; @@ -2923,6 +2977,23 @@ let Constraints = "$src1 = $dst" in { } } +let isCodeGenOnly = 1 in { + defm VMAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S, 0>, + VEX_4V, VEX_LIG; + defm VMAXC: basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P, 0>, + basic_sse12_fp_binop_p_y<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>, VEX_4V; + defm VMINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S, 0>, + VEX_4V, VEX_LIG; + defm VMINC: basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P, 0>, + basic_sse12_fp_binop_p_y<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>, VEX_4V; + let Constraints = "$src1 = $dst" in { + defm MAXC: basic_sse12_fp_binop_s<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_S>, + basic_sse12_fp_binop_p<0x5F, "max", X86fmaxc, SSE_ALU_ITINS_P>; + defm MINC: basic_sse12_fp_binop_s<0x5D, "min", X86fminc, SSE_ALU_ITINS_S>, + basic_sse12_fp_binop_p<0x5D, "min", X86fminc, SSE_ALU_ITINS_P>; + } +} + /// Unop Arithmetic /// In addition, we also have a special variant of the scalar form here to /// represent the associated intrinsic operation. This form is unlike the @@ -2960,7 +3031,7 @@ multiclass sse1_fp_unop_s opc, string OpcodeStr, def SSm : I, XS, - Requires<[HasSSE1, OptForSize]>; + Requires<[UseSSE1, OptForSize]>; def SSr_Int : SSI; @@ -2974,7 +3045,7 @@ multiclass sse1_fp_unop_s_avx opc, string OpcodeStr> { def SSr : SSI; - let mayLoad = 1 in + let mayLoad = 1 in { def SSm : SSI; @@ -2982,6 +3053,7 @@ multiclass sse1_fp_unop_s_avx opc, string OpcodeStr> { (ins VR128:$src1, ssmem:$src2), !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; + } } /// sse1_fp_unop_p - SSE1 unops in packed form. @@ -3001,11 +3073,11 @@ multiclass sse1_fp_unop_p_y opc, string OpcodeStr, SDNode OpNode, def PSYr : PSI; + itins.rr>, VEX_L; def PSYm : PSI; + itins.rm>, VEX_L; } /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms. @@ -3027,11 +3099,11 @@ multiclass sse1_fp_unop_p_y_int opc, string OpcodeStr, def PSYr_Int : PSI; + itins.rr>, VEX_L; def PSYm_Int : PSI; + itins.rm>, VEX_L; } /// sse2_fp_unop_s - SSE2 unops in scalar form. @@ -3044,7 +3116,7 @@ multiclass sse2_fp_unop_s opc, string OpcodeStr, def SDm : I, XD, - Requires<[HasSSE2, OptForSize]>; + Requires<[UseSSE2, OptForSize]>; def SDr_Int : SDI; @@ -3054,20 +3126,20 @@ multiclass sse2_fp_unop_s opc, string OpcodeStr, } /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form. +let hasSideEffects = 0 in multiclass sse2_fp_unop_s_avx opc, string OpcodeStr> { - let neverHasSideEffects = 1 in { def SDr : SDI; - let mayLoad = 1 in + let mayLoad = 1 in { def SDm : SDI; - } def SDm_Int : SDI; + } } /// sse2_fp_unop_p - SSE2 unops in vector forms. @@ -3087,11 +3159,11 @@ multiclass sse2_fp_unop_p_y opc, string OpcodeStr, SDNode OpNode, def PDYr : PDI; + itins.rr>, VEX_L; def PDYm : PDI; + itins.rm>, VEX_L; } /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms. @@ -3113,11 +3185,11 @@ multiclass sse2_fp_unop_p_y_int opc, string OpcodeStr, def PDYr_Int : PDI; + itins.rr>, VEX_L; def PDYm_Int : PDI; + itins.rm>, VEX_L; } let Predicates = [HasAVX] in { @@ -3158,7 +3230,6 @@ let Predicates = [HasAVX] in { SSE_RCPP>, VEX; } -let AddedComplexity = 1 in { def : Pat<(f32 (fsqrt FR32:$src)), (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>; def : Pat<(f32 (fsqrt (load addr:$src))), @@ -3181,9 +3252,8 @@ def : Pat<(f32 (X86frcp FR32:$src)), def : Pat<(f32 (X86frcp (load addr:$src))), (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX, OptForSize]>; -} -let Predicates = [HasAVX], AddedComplexity = 1 in { +let Predicates = [HasAVX] in { def : Pat<(int_x86_sse_sqrt_ss VR128:$src), (COPY_TO_REGCLASS (VSQRTSSr (f32 (IMPLICIT_DEF)), (COPY_TO_REGCLASS VR128:$src, FR32)), @@ -3223,17 +3293,52 @@ defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss, sse2_fp_unop_p<0x51, "sqrt", fsqrt, SSE_SQRTS>, sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd, SSE_SQRTS>; +/// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand. +multiclass sse1_fp_unop_rw opc, string OpcodeStr, SDNode OpNode, + Intrinsic F32Int, OpndItins itins> { + def SSr : SSI; + // For scalar unary operations, fold a load into the operation + // only in OptForSize mode. It eliminates an instruction, but it also + // eliminates a whole-register clobber (the load), so it introduces a + // partial register update condition. + def SSm : I, XS, + Requires<[UseSSE1, OptForSize]>; + let Constraints = "$src1 = $dst" in { + def SSr_Int : SSI; + def SSm_Int : SSI; + } +} + // Reciprocal approximations. Note that these typically require refinement // in order to obtain suitable precision. -defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss, - SSE_SQRTS>, +defm RSQRT : sse1_fp_unop_rw<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss, + SSE_SQRTS>, sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt, SSE_SQRTS>, sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps, SSE_SQRTS>; -defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss, - SSE_RCPS>, +let Predicates = [UseSSE1] in { + def : Pat<(int_x86_sse_rsqrt_ss VR128:$src), + (RSQRTSSr_Int VR128:$src, VR128:$src)>; +} + +defm RCP : sse1_fp_unop_rw<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss, + SSE_RCPS>, sse1_fp_unop_p<0x53, "rcp", X86frcp, SSE_RCPS>, sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps, SSE_RCPS>; +let Predicates = [UseSSE1] in { + def : Pat<(int_x86_sse_rcp_ss VR128:$src), + (RCPSSr_Int VR128:$src, VR128:$src)>; +} // There is no f64 version of the reciprocal approximation instructions. @@ -3271,20 +3376,20 @@ let AddedComplexity = 400 in { // Prefer non-temporal versions "movntps\t{$src, $dst|$dst, $src}", [(alignednontemporalstore (v8f32 VR256:$src), addr:$dst)], - IIC_SSE_MOVNT>, VEX; + IIC_SSE_MOVNT>, VEX, VEX_L; def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movntpd\t{$src, $dst|$dst, $src}", [(alignednontemporalstore (v4f64 VR256:$src), addr:$dst)], - IIC_SSE_MOVNT>, VEX; + IIC_SSE_MOVNT>, VEX, VEX_L; let ExeDomain = SSEPackedInt in def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movntdq\t{$src, $dst|$dst, $src}", [(alignednontemporalstore (v4i64 VR256:$src), addr:$dst)], - IIC_SSE_MOVNT>, VEX; + IIC_SSE_MOVNT>, VEX, VEX_L; } let AddedComplexity = 400 in { // Prefer non-temporal versions @@ -3304,7 +3409,7 @@ def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), IIC_SSE_MOVNT>; def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst), - (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasSSE2]>; + (MOVNTDQmr addr:$dst, VR128:$src)>, Requires<[UseSSE2]>; // There is no AVX form for instructions below this point def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), @@ -3393,14 +3498,14 @@ def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), VEX; def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RR>, - VEX; + VEX, VEX_L; } def VMOVDQUrr : VSSI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>, VEX; def VMOVDQUYrr : VSSI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>, - VEX; + VEX, VEX_L; // For Disassembler let isCodeGenOnly = 1 in { @@ -3410,16 +3515,14 @@ def VMOVDQArr_REV : VPDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src), VEX; def VMOVDQAYrr_REV : VPDI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), "movdqa\t{$src, $dst|$dst, $src}", [], - IIC_SSE_MOVA_P_RR>, - VEX; + IIC_SSE_MOVA_P_RR>, VEX, VEX_L; def VMOVDQUrr_REV : VSSI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src), "movdqu\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVU_P_RR>, VEX; def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), "movdqu\t{$src, $dst|$dst, $src}", [], - IIC_SSE_MOVU_P_RR>, - VEX; + IIC_SSE_MOVU_P_RR>, VEX, VEX_L; } let canFoldAsLoad = 1, mayLoad = 1 in { @@ -3428,14 +3531,14 @@ def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), VEX; def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>, - VEX; + VEX, VEX_L; let Predicates = [HasAVX] in { def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>, XS, VEX; def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>, - XS, VEX; + XS, VEX, VEX_L; } } @@ -3447,14 +3550,14 @@ def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs), def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src), "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>, - VEX; + VEX, VEX_L; let Predicates = [HasAVX] in { def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>, XS, VEX; def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src), "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>, - XS, VEX; + XS, VEX, VEX_L; } } @@ -3464,7 +3567,7 @@ def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movdqu\t{$src, $dst|$dst, $src}", - [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>; + [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>; // For Disassembler let isCodeGenOnly = 1 in { @@ -3474,7 +3577,7 @@ def MOVDQArr_REV : PDI<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src), def MOVDQUrr_REV : I<0x7F, MRMDestReg, (outs VR128:$dst), (ins VR128:$src), "movdqu\t{$src, $dst|$dst, $src}", - [], IIC_SSE_MOVU_P_RR>, XS, Requires<[HasSSE2]>; + [], IIC_SSE_MOVU_P_RR>, XS, Requires<[UseSSE2]>; } let canFoldAsLoad = 1, mayLoad = 1 in { @@ -3486,7 +3589,7 @@ def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "movdqu\t{$src, $dst|$dst, $src}", [/*(set VR128:$dst, (loadv2i64 addr:$src))*/], IIC_SSE_MOVU_P_RM>, - XS, Requires<[HasSSE2]>; + XS, Requires<[UseSSE2]>; } let mayStore = 1 in { @@ -3498,7 +3601,7 @@ def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), "movdqu\t{$src, $dst|$dst, $src}", [/*(store (v2i64 VR128:$src), addr:$dst)*/], IIC_SSE_MOVU_P_MR>, - XS, Requires<[HasSSE2]>; + XS, Requires<[UseSSE2]>; } // Intrinsic forms of MOVDQU load and store @@ -3512,7 +3615,7 @@ def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), "movdqu\t{$src, $dst|$dst, $src}", [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)], IIC_SSE_MOVU_P_MR>, - XS, Requires<[HasSSE2]>; + XS, Requires<[UseSSE2]>; } // ExeDomain = SSEPackedInt @@ -3690,82 +3793,82 @@ defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, let Predicates = [HasAVX2] in { defm VPADDBY : PDI_binop_rm<0xFC, "vpaddb", add, v32i8, VR256, memopv4i64, - i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPADDWY : PDI_binop_rm<0xFD, "vpaddw", add, v16i16, VR256, memopv4i64, - i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPADDDY : PDI_binop_rm<0xFE, "vpaddd", add, v8i32, VR256, memopv4i64, - i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPADDQY : PDI_binop_rm<0xD4, "vpaddq", add, v4i64, VR256, memopv4i64, - i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_INTALUQ_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMULLWY : PDI_binop_rm<0xD5, "vpmullw", mul, v16i16, VR256, memopv4i64, - i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V; + i256mem, SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPSUBBY : PDI_binop_rm<0xF8, "vpsubb", sub, v32i8, VR256, memopv4i64, - i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPSUBWY : PDI_binop_rm<0xF9, "vpsubw", sub, v16i16,VR256, memopv4i64, - i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPSUBDY : PDI_binop_rm<0xFA, "vpsubd", sub, v8i32, VR256, memopv4i64, - i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + i256mem, SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPSUBQY : PDI_binop_rm<0xFB, "vpsubq", sub, v4i64, VR256, memopv4i64, - i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V; + i256mem, SSE_INTALUQ_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPMULUDQY : PDI_binop_rm2<0xF4, "vpmuludq", X86pmuludq, v4i64, v8i32, VR256, memopv4i64, i256mem, - SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V; + SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L; // Intrinsic forms defm VPSUBSBY : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_avx2_psubs_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPSUBSWY : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_avx2_psubs_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPSUBUSBY : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_avx2_psubus_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPSUBUSWY : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_avx2_psubus_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPADDSBY : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_avx2_padds_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPADDSWY : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_avx2_padds_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPADDUSBY : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_avx2_paddus_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPADDUSWY : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_avx2_paddus_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMULHUWY : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_avx2_pmulhu_w, VR256, memopv4i64, i256mem, - SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V; + SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMULHWY : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_avx2_pmulh_w, VR256, memopv4i64, i256mem, - SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V; + SSE_INTMUL_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMADDWDY : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_avx2_pmadd_wd, VR256, memopv4i64, i256mem, - SSE_PMADD, 1, 0>, VEX_4V; + SSE_PMADD, 1, 0>, VEX_4V, VEX_L; defm VPAVGBY : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_avx2_pavg_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPAVGWY : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_avx2_pavg_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMINUBY : PDI_binop_rm_int<0xDA, "vpminub", int_x86_avx2_pminu_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMINSWY : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_avx2_pmins_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMAXUBY : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_avx2_pmaxu_b, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPMAXSWY : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_avx2_pmaxs_w, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPSADBWY : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_avx2_psad_bw, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; } let Constraints = "$src1 = $dst" in { @@ -3901,30 +4004,30 @@ let ExeDomain = SSEPackedInt in { let Predicates = [HasAVX2] in { defm VPSLLWY : PDI_binop_rmi<0xF1, 0x71, MRM6r, "vpsllw", X86vshl, X86vshli, VR256, v16i16, v8i16, bc_v8i16, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSLLDY : PDI_binop_rmi<0xF2, 0x72, MRM6r, "vpslld", X86vshl, X86vshli, VR256, v8i32, v4i32, bc_v4i32, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSLLQY : PDI_binop_rmi<0xF3, 0x73, MRM6r, "vpsllq", X86vshl, X86vshli, VR256, v4i64, v2i64, bc_v2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRLWY : PDI_binop_rmi<0xD1, 0x71, MRM2r, "vpsrlw", X86vsrl, X86vsrli, VR256, v16i16, v8i16, bc_v8i16, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRLDY : PDI_binop_rmi<0xD2, 0x72, MRM2r, "vpsrld", X86vsrl, X86vsrli, VR256, v8i32, v4i32, bc_v4i32, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRLQY : PDI_binop_rmi<0xD3, 0x73, MRM2r, "vpsrlq", X86vsrl, X86vsrli, VR256, v4i64, v2i64, bc_v2i64, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRAWY : PDI_binop_rmi<0xE1, 0x71, MRM4r, "vpsraw", X86vsra, X86vsrai, VR256, v16i16, v8i16, bc_v8i16, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; defm VPSRADY : PDI_binop_rmi<0xE2, 0x72, MRM4r, "vpsrad", X86vsra, X86vsrai, VR256, v8i32, v4i32, bc_v4i32, - SSE_INTSHIFT_ITINS_P, 0>, VEX_4V; + SSE_INTSHIFT_ITINS_P, 0>, VEX_4V, VEX_L; let ExeDomain = SSEPackedInt in { // 256-bit logical shifts. @@ -3933,13 +4036,13 @@ let ExeDomain = SSEPackedInt in { "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR256:$dst, (int_x86_avx2_psll_dq_bs VR256:$src1, imm:$src2))]>, - VEX_4V; + VEX_4V, VEX_L; def VPSRLDQYri : PDIi8<0x73, MRM3r, (outs VR256:$dst), (ins VR256:$src1, i32i8imm:$src2), "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR256:$dst, (int_x86_avx2_psrl_dq_bs VR256:$src1, imm:$src2))]>, - VEX_4V; + VEX_4V, VEX_L; // PSRADQYri doesn't exist in SSE[1-3]. } } // Predicates = [HasAVX2] @@ -4010,7 +4113,7 @@ let Predicates = [HasAVX2] in { (VPSRLDQYri VR256:$src1, (BYTE_imm imm:$src2))>; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2), (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2))>; def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2), @@ -4053,22 +4156,22 @@ let Predicates = [HasAVX] in { let Predicates = [HasAVX2] in { defm VPCMPEQBY : PDI_binop_rm<0x74, "vpcmpeqb", X86pcmpeq, v32i8, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPCMPEQWY : PDI_binop_rm<0x75, "vpcmpeqw", X86pcmpeq, v16i16, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPCMPEQDY : PDI_binop_rm<0x76, "vpcmpeqd", X86pcmpeq, v8i32, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 1, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 1, 0>, VEX_4V, VEX_L; defm VPCMPGTBY : PDI_binop_rm<0x64, "vpcmpgtb", X86pcmpgt, v32i8, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPCMPGTWY : PDI_binop_rm<0x65, "vpcmpgtw", X86pcmpgt, v16i16, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPCMPGTDY : PDI_binop_rm<0x66, "vpcmpgtd", X86pcmpgt, v8i32, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; } let Constraints = "$src1 = $dst" in { @@ -4111,13 +4214,13 @@ defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128, let Predicates = [HasAVX2] in { defm VPACKSSWBY : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_avx2_packsswb, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPACKSSDWY : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_avx2_packssdw, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; defm VPACKUSWBY : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_avx2_packuswb, VR256, memopv4i64, i256mem, - SSE_INTALU_ITINS_P, 0, 0>, VEX_4V; + SSE_INTALU_ITINS_P, 0, 0>, VEX_4V, VEX_L; } let Constraints = "$src1 = $dst" in { @@ -4187,12 +4290,15 @@ let Predicates = [HasAVX] in { } let Predicates = [HasAVX2] in { - defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>, TB, OpSize, VEX; - defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>, XS, VEX; - defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>, XD, VEX; + defm VPSHUFD : sse2_pshuffle_y<"vpshufd", v8i32, X86PShufd>, + TB, OpSize, VEX,VEX_L; + defm VPSHUFHW : sse2_pshuffle_y<"vpshufhw", v16i16, X86PShufhw>, + XS, VEX, VEX_L; + defm VPSHUFLW : sse2_pshuffle_y<"vpshuflw", v16i16, X86PShuflw>, + XD, VEX, VEX_L; } -let Predicates = [HasSSE2] in { +let Predicates = [UseSSE2] in { let AddedComplexity = 5 in defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, X86PShufd>, TB, OpSize; @@ -4268,22 +4374,22 @@ let Predicates = [HasAVX] in { let Predicates = [HasAVX2] in { defm VPUNPCKLBW : sse2_unpack_y<0x60, "vpunpcklbw", v32i8, X86Unpckl, - bc_v32i8>, VEX_4V; + bc_v32i8>, VEX_4V, VEX_L; defm VPUNPCKLWD : sse2_unpack_y<0x61, "vpunpcklwd", v16i16, X86Unpckl, - bc_v16i16>, VEX_4V; + bc_v16i16>, VEX_4V, VEX_L; defm VPUNPCKLDQ : sse2_unpack_y<0x62, "vpunpckldq", v8i32, X86Unpckl, - bc_v8i32>, VEX_4V; + bc_v8i32>, VEX_4V, VEX_L; defm VPUNPCKLQDQ : sse2_unpack_y<0x6C, "vpunpcklqdq", v4i64, X86Unpckl, - bc_v4i64>, VEX_4V; + bc_v4i64>, VEX_4V, VEX_L; defm VPUNPCKHBW : sse2_unpack_y<0x68, "vpunpckhbw", v32i8, X86Unpckh, - bc_v32i8>, VEX_4V; + bc_v32i8>, VEX_4V, VEX_L; defm VPUNPCKHWD : sse2_unpack_y<0x69, "vpunpckhwd", v16i16, X86Unpckh, - bc_v16i16>, VEX_4V; + bc_v16i16>, VEX_4V, VEX_L; defm VPUNPCKHDQ : sse2_unpack_y<0x6A, "vpunpckhdq", v8i32, X86Unpckh, - bc_v8i32>, VEX_4V; + bc_v8i32>, VEX_4V, VEX_L; defm VPUNPCKHQDQ : sse2_unpack_y<0x6D, "vpunpckhqdq", v4i64, X86Unpckh, - bc_v4i64>, VEX_4V; + bc_v4i64>, VEX_4V, VEX_L; } let Constraints = "$src1 = $dst" in { @@ -4307,28 +4413,6 @@ let Constraints = "$src1 = $dst" in { } } // ExeDomain = SSEPackedInt -// Patterns for using AVX1 instructions with integer vectors -// Here to give AVX2 priority -let Predicates = [HasAVX] in { - def : Pat<(v8i32 (X86Unpckl VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))), - (VUNPCKLPSYrm VR256:$src1, addr:$src2)>; - def : Pat<(v8i32 (X86Unpckl VR256:$src1, VR256:$src2)), - (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>; - def : Pat<(v8i32 (X86Unpckh VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)))), - (VUNPCKHPSYrm VR256:$src1, addr:$src2)>; - def : Pat<(v8i32 (X86Unpckh VR256:$src1, VR256:$src2)), - (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>; - - def : Pat<(v4i64 (X86Unpckl VR256:$src1, (memopv4i64 addr:$src2))), - (VUNPCKLPDYrm VR256:$src1, addr:$src2)>; - def : Pat<(v4i64 (X86Unpckl VR256:$src1, VR256:$src2)), - (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>; - def : Pat<(v4i64 (X86Unpckh VR256:$src1, (memopv4i64 addr:$src2))), - (VUNPCKHPDYrm VR256:$src1, addr:$src2)>; - def : Pat<(v4i64 (X86Unpckh VR256:$src1, VR256:$src2)), - (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>; -} - //===---------------------------------------------------------------------===// // SSE2 - Packed Integer Extract and Insert //===---------------------------------------------------------------------===// @@ -4377,7 +4461,7 @@ let Predicates = [HasAVX] in { } let Constraints = "$src1 = $dst" in - defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>; + defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[UseSSE2]>; } // ExeDomain = SSEPackedInt @@ -4397,9 +4481,9 @@ def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), let Predicates = [HasAVX2] in { def VPMOVMSKBYrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src), "pmovmskb\t{$src, $dst|$dst, $src}", - [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX; + [(set GR32:$dst, (int_x86_avx2_pmovmskb VR256:$src))]>, VEX, VEX_L; def VPMOVMSKBYr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), - "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX; + "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L; } def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), @@ -4538,7 +4622,7 @@ def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src), // Move Packed Doubleword Int first element to Doubleword Int // def VMOVPQIto64rr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), - "mov{d|q}\t{$src, $dst|$dst, $src}", + "vmov{d|q}\t{$src, $dst|$dst, $src}", [(set GR64:$dst, (vector_extract (v2i64 VR128:$src), (iPTR 0)))], IIC_SSE_MOVD_ToGP>, @@ -4654,14 +4738,14 @@ let Predicates = [HasAVX] in { } // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext. def : Pat<(v8i32 (X86vzmovl (insert_subvector undef, - (v4i32 (scalar_to_vector GR32:$src)),(i32 0)))), + (v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))), (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>; def : Pat<(v4i64 (X86vzmovl (insert_subvector undef, - (v2i64 (scalar_to_vector GR64:$src)),(i32 0)))), + (v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))), (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>; } -let Predicates = [HasSSE2], AddedComplexity = 20 in { +let Predicates = [UseSSE2], AddedComplexity = 20 in { def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))), (MOVZDI2PDIrm addr:$src)>; def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))), @@ -4701,7 +4785,7 @@ def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), [(set VR128:$dst, (v2i64 (scalar_to_vector (loadi64 addr:$src))))], IIC_SSE_MOVDQ>, XS, - Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix + Requires<[UseSSE2]>; // SSE2 instruction with XS Prefix //===---------------------------------------------------------------------===// // Move Packed Quadword Int to Quadword Int @@ -4744,7 +4828,7 @@ def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), (v2i64 (X86vzmovl (v2i64 (scalar_to_vector (loadi64 addr:$src))))))], IIC_SSE_MOVDQ>, - XS, Requires<[HasSSE2]>; + XS, Requires<[UseSSE2]>; let Predicates = [HasAVX], AddedComplexity = 20 in { def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))), @@ -4755,7 +4839,7 @@ let Predicates = [HasAVX], AddedComplexity = 20 in { (VMOVZQI2PQIrm addr:$src)>; } -let Predicates = [HasSSE2], AddedComplexity = 20 in { +let Predicates = [UseSSE2], AddedComplexity = 20 in { def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))), (MOVZQI2PQIrm addr:$src)>; def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))), @@ -4785,7 +4869,7 @@ def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))], IIC_SSE_MOVQ_RR>, - XS, Requires<[HasSSE2]>; + XS, Requires<[UseSSE2]>; let AddedComplexity = 20 in def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), @@ -4800,7 +4884,7 @@ def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), [(set VR128:$dst, (v2i64 (X86vzmovl (loadv2i64 addr:$src))))], IIC_SSE_MOVDQ>, - XS, Requires<[HasSSE2]>; + XS, Requires<[UseSSE2]>; } let AddedComplexity = 20 in { @@ -4810,7 +4894,7 @@ let AddedComplexity = 20 in { def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))), (VMOVZPQILo2PQIrr VR128:$src)>; } - let Predicates = [HasSSE2] in { + let Predicates = [UseSSE2] in { def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))), (MOVZPQILo2PQIrm addr:$src)>; def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))), @@ -4862,9 +4946,9 @@ let Predicates = [HasAVX] in { defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup", v4f32, VR128, memopv4f32, f128mem>, VEX; defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup", - v8f32, VR256, memopv8f32, f256mem>, VEX; + v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L; defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup", - v8f32, VR256, memopv8f32, f256mem>, VEX; + v8f32, VR256, memopv8f32, f256mem>, VEX, VEX_L; } defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128, memopv4f32, f128mem>; @@ -4890,7 +4974,7 @@ let Predicates = [HasAVX] in { (VMOVSLDUPYrm addr:$src)>; } -let Predicates = [HasSSE3] in { +let Predicates = [UseSSE3] in { def : Pat<(v4i32 (X86Movshdup VR128:$src)), (MOVSHDUPrr VR128:$src)>; def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))), @@ -4932,7 +5016,7 @@ def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), let Predicates = [HasAVX] in { defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX; - defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX; + defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX, VEX_L; } defm MOVDDUP : sse3_replicate_dfp<"movddup">; @@ -4959,7 +5043,7 @@ let Predicates = [HasAVX] in { (VMOVDDUPYrr VR256:$src)>; } -let Predicates = [HasSSE3] in { +let Predicates = [UseSSE3] in { def : Pat<(X86Movddup (memopv2f64 addr:$src)), (MOVDDUPrm addr:$src)>; def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))), @@ -4981,7 +5065,8 @@ let Predicates = [HasAVX] in { [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX; def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), "vlddqu\t{$src, $dst|$dst, $src}", - [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX; + [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, + VEX, VEX_L; } def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "lddqu\t{$src, $dst|$dst, $src}", @@ -5014,16 +5099,16 @@ let Predicates = [HasAVX] in { defm VADDSUBPS : sse3_addsub, TB, XD, VEX_4V; defm VADDSUBPSY : sse3_addsub, TB, XD, VEX_4V; + f256mem, SSE_ALU_F32P, 0>, TB, XD, VEX_4V, VEX_L; } let ExeDomain = SSEPackedDouble in { defm VADDSUBPD : sse3_addsub, TB, OpSize, VEX_4V; defm VADDSUBPDY : sse3_addsub, TB, OpSize, VEX_4V; + f256mem, SSE_ALU_F64P, 0>, TB, OpSize, VEX_4V, VEX_L; } } -let Constraints = "$src1 = $dst", Predicates = [HasSSE3] in { +let Constraints = "$src1 = $dst", Predicates = [UseSSE3] in { let ExeDomain = SSEPackedSingle in defm ADDSUBPS : sse3_addsub, TB, XD; @@ -5075,9 +5160,9 @@ let Predicates = [HasAVX] in { defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem, X86fhsub, 0>, VEX_4V; defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem, - X86fhadd, 0>, VEX_4V; + X86fhadd, 0>, VEX_4V, VEX_L; defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem, - X86fhsub, 0>, VEX_4V; + X86fhsub, 0>, VEX_4V, VEX_L; } let ExeDomain = SSEPackedDouble in { defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem, @@ -5085,9 +5170,9 @@ let Predicates = [HasAVX] in { defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem, X86fhsub, 0>, VEX_4V; defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem, - X86fhadd, 0>, VEX_4V; + X86fhadd, 0>, VEX_4V, VEX_L; defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem, - X86fhsub, 0>, VEX_4V; + X86fhsub, 0>, VEX_4V, VEX_L; } } @@ -5153,11 +5238,11 @@ let Predicates = [HasAVX] in { let Predicates = [HasAVX2] in { defm VPABSB : SS3I_unop_rm_int_y<0x1C, "vpabsb", - int_x86_avx2_pabs_b>, VEX; + int_x86_avx2_pabs_b>, VEX, VEX_L; defm VPABSW : SS3I_unop_rm_int_y<0x1D, "vpabsw", - int_x86_avx2_pabs_w>, VEX; + int_x86_avx2_pabs_w>, VEX, VEX_L; defm VPABSD : SS3I_unop_rm_int_y<0x1E, "vpabsd", - int_x86_avx2_pabs_d>, VEX; + int_x86_avx2_pabs_d>, VEX, VEX_L; } defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", @@ -5296,37 +5381,37 @@ let ImmT = NoImm, Predicates = [HasAVX2] in { let isCommutable = 0 in { defm VPHADDWY : SS3I_binop_rm<0x01, "vphaddw", X86hadd, v16i16, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPHADDDY : SS3I_binop_rm<0x02, "vphaddd", X86hadd, v8i32, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPHSUBWY : SS3I_binop_rm<0x05, "vphsubw", X86hsub, v16i16, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPHSUBDY : SS3I_binop_rm<0x06, "vphsubd", X86hsub, v8i32, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPSIGNBY : SS3I_binop_rm<0x08, "vpsignb", X86psign, v32i8, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPSIGNWY : SS3I_binop_rm<0x09, "vpsignw", X86psign, v16i16, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPSIGNDY : SS3I_binop_rm<0x0A, "vpsignd", X86psign, v8i32, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPSHUFBY : SS3I_binop_rm<0x00, "vpshufb", X86pshufb, v32i8, VR256, memopv4i64, i256mem, - SSE_PHADDSUBW, 0>, VEX_4V; + SSE_PHADDSUBW, 0>, VEX_4V, VEX_L; defm VPHADDSW : SS3I_binop_rm_int_y<0x03, "vphaddsw", - int_x86_avx2_phadd_sw>, VEX_4V; + int_x86_avx2_phadd_sw>, VEX_4V, VEX_L; defm VPHSUBSW : SS3I_binop_rm_int_y<0x07, "vphsubsw", - int_x86_avx2_phsub_sw>, VEX_4V; + int_x86_avx2_phsub_sw>, VEX_4V, VEX_L; defm VPMADDUBSW : SS3I_binop_rm_int_y<0x04, "vpmaddubsw", - int_x86_avx2_pmadd_ub_sw>, VEX_4V; + int_x86_avx2_pmadd_ub_sw>, VEX_4V, VEX_L; } defm VPMULHRSW : SS3I_binop_rm_int_y<0x0B, "vpmulhrsw", - int_x86_avx2_pmul_hr_sw>, VEX_4V; + int_x86_avx2_pmul_hr_sw>, VEX_4V, VEX_L; } // None of these have i8 immediate fields. @@ -5405,8 +5490,8 @@ multiclass ssse3_palign_y { let Predicates = [HasAVX] in defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V; let Predicates = [HasAVX2] in - defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V; -let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in + defm VPALIGN : ssse3_palign_y<"vpalignr", 0>, VEX_4V, VEX_L; +let Constraints = "$src1 = $dst", Predicates = [UseSSSE3] in defm PALIGN : ssse3_palign<"palignr">; let Predicates = [HasAVX2] in { @@ -5431,7 +5516,7 @@ def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>; } -let Predicates = [HasSSSE3] in { +let Predicates = [UseSSSE3] in { def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>; def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), @@ -5512,17 +5597,17 @@ defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>, let Predicates = [HasAVX2] in { defm VPMOVSXBW : SS41I_binop_rm_int16_y<0x20, "vpmovsxbw", - int_x86_avx2_pmovsxbw>, VEX; + int_x86_avx2_pmovsxbw>, VEX, VEX_L; defm VPMOVSXWD : SS41I_binop_rm_int16_y<0x23, "vpmovsxwd", - int_x86_avx2_pmovsxwd>, VEX; + int_x86_avx2_pmovsxwd>, VEX, VEX_L; defm VPMOVSXDQ : SS41I_binop_rm_int16_y<0x25, "vpmovsxdq", - int_x86_avx2_pmovsxdq>, VEX; + int_x86_avx2_pmovsxdq>, VEX, VEX_L; defm VPMOVZXBW : SS41I_binop_rm_int16_y<0x30, "vpmovzxbw", - int_x86_avx2_pmovzxbw>, VEX; + int_x86_avx2_pmovzxbw>, VEX, VEX_L; defm VPMOVZXWD : SS41I_binop_rm_int16_y<0x33, "vpmovzxwd", - int_x86_avx2_pmovzxwd>, VEX; + int_x86_avx2_pmovzxwd>, VEX, VEX_L; defm VPMOVZXDQ : SS41I_binop_rm_int16_y<0x35, "vpmovzxdq", - int_x86_avx2_pmovzxdq>, VEX; + int_x86_avx2_pmovzxdq>, VEX, VEX_L; } defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>; @@ -5538,64 +5623,88 @@ let Predicates = [HasAVX] in { (VPMOVSXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)), (VPMOVSXBWrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))), + (VPMOVSXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)), (VPMOVSXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)), (VPMOVSXWDrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))), + (VPMOVSXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)), (VPMOVSXDQrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)), (VPMOVSXDQrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))), + (VPMOVSXDQrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)), (VPMOVZXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)), (VPMOVZXBWrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))), + (VPMOVZXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)), (VPMOVZXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)), (VPMOVZXWDrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))), + (VPMOVZXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)), (VPMOVZXDQrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)), (VPMOVZXDQrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))), + (VPMOVZXDQrm addr:$src)>; } -let Predicates = [HasSSE41] in { +let Predicates = [UseSSE41] in { // Common patterns involving scalar load. def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)), (PMOVSXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)), (PMOVSXBWrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovsxbw (bc_v16i8 (loadv2i64 addr:$src))), + (PMOVSXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)), (PMOVSXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)), (PMOVSXWDrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovsxwd (bc_v8i16 (loadv2i64 addr:$src))), + (PMOVSXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)), (PMOVSXDQrm addr:$src)>; def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)), (PMOVSXDQrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovsxdq (bc_v4i32 (loadv2i64 addr:$src))), + (PMOVSXDQrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)), (PMOVZXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)), (PMOVZXBWrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovzxbw (bc_v16i8 (loadv2i64 addr:$src))), + (PMOVZXBWrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)), (PMOVZXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)), (PMOVZXWDrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovzxwd (bc_v8i16 (loadv2i64 addr:$src))), + (PMOVZXWDrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)), (PMOVZXDQrm addr:$src)>; def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)), (PMOVZXDQrm addr:$src)>; + def : Pat<(int_x86_sse41_pmovzxdq (bc_v4i32 (loadv2i64 addr:$src))), + (PMOVZXDQrm addr:$src)>; } let Predicates = [HasAVX2] in { @@ -5615,7 +5724,7 @@ let Predicates = [HasAVX] in { def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (VPMOVSXWDrr VR128:$src)>; } -let Predicates = [HasSSE41] in { +let Predicates = [UseSSE41] in { def : Pat<(v2i64 (X86vsmovl (v4i32 VR128:$src))), (PMOVSXDQrr VR128:$src)>; def : Pat<(v4i32 (X86vsmovl (v8i16 VR128:$src))), (PMOVSXWDrr VR128:$src)>; } @@ -5659,13 +5768,13 @@ defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>, let Predicates = [HasAVX2] in { defm VPMOVSXBD : SS41I_binop_rm_int8_y<0x21, "vpmovsxbd", - int_x86_avx2_pmovsxbd>, VEX; + int_x86_avx2_pmovsxbd>, VEX, VEX_L; defm VPMOVSXWQ : SS41I_binop_rm_int8_y<0x24, "vpmovsxwq", - int_x86_avx2_pmovsxwq>, VEX; + int_x86_avx2_pmovsxwq>, VEX, VEX_L; defm VPMOVZXBD : SS41I_binop_rm_int8_y<0x31, "vpmovzxbd", - int_x86_avx2_pmovzxbd>, VEX; + int_x86_avx2_pmovzxbd>, VEX, VEX_L; defm VPMOVZXWQ : SS41I_binop_rm_int8_y<0x34, "vpmovzxwq", - int_x86_avx2_pmovzxwq>, VEX; + int_x86_avx2_pmovzxwq>, VEX, VEX_L; } defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>; @@ -5686,7 +5795,7 @@ let Predicates = [HasAVX] in { (VPMOVZXWQrm addr:$src)>; } -let Predicates = [HasSSE41] in { +let Predicates = [UseSSE41] in { // Common patterns involving scalar load def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)), (PMOVSXBDrm addr:$src)>; @@ -5734,9 +5843,9 @@ defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>, } let Predicates = [HasAVX2] in { defm VPMOVSXBQ : SS41I_binop_rm_int4_y<0x22, "vpmovsxbq", - int_x86_avx2_pmovsxbq>, VEX; + int_x86_avx2_pmovsxbq>, VEX, VEX_L; defm VPMOVZXBQ : SS41I_binop_rm_int4_y<0x32, "vpmovzxbq", - int_x86_avx2_pmovzxbq>, VEX; + int_x86_avx2_pmovzxbq>, VEX, VEX_L; } defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>; defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>; @@ -5754,7 +5863,7 @@ let Predicates = [HasAVX] in { (VPMOVZXBQrm addr:$src)>; } -let Predicates = [HasSSE41] in { +let Predicates = [UseSSE41] in { // Common patterns involving scalar load def : Pat<(int_x86_sse41_pmovsxbq (bitconvert (v4i32 (X86vzmovl @@ -5767,6 +5876,100 @@ let Predicates = [HasSSE41] in { (PMOVZXBQrm addr:$src)>; } +let Predicates = [HasAVX2] in { + def : Pat<(v16i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWYrr VR128:$src)>; + def : Pat<(v8i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDYrr VR128:$src)>; + def : Pat<(v4i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQYrr VR128:$src)>; + + def : Pat<(v8i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDYrr VR128:$src)>; + def : Pat<(v4i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQYrr VR128:$src)>; + + def : Pat<(v4i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQYrr VR128:$src)>; + + def : Pat<(v16i16 (X86vzext (v32i8 VR256:$src))), + (VPMOVZXBWYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>; + def : Pat<(v8i32 (X86vzext (v32i8 VR256:$src))), + (VPMOVZXBDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>; + def : Pat<(v4i64 (X86vzext (v32i8 VR256:$src))), + (VPMOVZXBQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>; + + def : Pat<(v8i32 (X86vzext (v16i16 VR256:$src))), + (VPMOVZXWDYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>; + def : Pat<(v4i64 (X86vzext (v16i16 VR256:$src))), + (VPMOVZXWQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>; + + def : Pat<(v4i64 (X86vzext (v8i32 VR256:$src))), + (VPMOVZXDQYrr (EXTRACT_SUBREG VR256:$src, sub_xmm))>; +} + +let Predicates = [HasAVX] in { + def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBWrr VR128:$src)>; + def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBDrr VR128:$src)>; + def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (VPMOVZXBQrr VR128:$src)>; + + def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWDrr VR128:$src)>; + def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (VPMOVZXWQrr VR128:$src)>; + + def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (VPMOVZXDQrr VR128:$src)>; + + def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))), + (VPMOVZXBWrm addr:$src)>; + def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))), + (VPMOVZXBWrm addr:$src)>; + def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))), + (VPMOVZXBDrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))), + (VPMOVZXBQrm addr:$src)>; + + def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))), + (VPMOVZXWDrm addr:$src)>; + def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))), + (VPMOVZXWDrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))), + (VPMOVZXWQrm addr:$src)>; + + def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))), + (VPMOVZXDQrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))), + (VPMOVZXDQrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))), + (VPMOVZXDQrm addr:$src)>; +} + +let Predicates = [UseSSE41] in { + def : Pat<(v8i16 (X86vzext (v16i8 VR128:$src))), (PMOVZXBWrr VR128:$src)>; + def : Pat<(v4i32 (X86vzext (v16i8 VR128:$src))), (PMOVZXBDrr VR128:$src)>; + def : Pat<(v2i64 (X86vzext (v16i8 VR128:$src))), (PMOVZXBQrr VR128:$src)>; + + def : Pat<(v4i32 (X86vzext (v8i16 VR128:$src))), (PMOVZXWDrr VR128:$src)>; + def : Pat<(v2i64 (X86vzext (v8i16 VR128:$src))), (PMOVZXWQrr VR128:$src)>; + + def : Pat<(v2i64 (X86vzext (v4i32 VR128:$src))), (PMOVZXDQrr VR128:$src)>; + + def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))), + (PMOVZXBWrm addr:$src)>; + def : Pat<(v8i16 (X86vzext (v16i8 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))), + (PMOVZXBWrm addr:$src)>; + def : Pat<(v4i32 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))), + (PMOVZXBDrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v16i8 (bitconvert (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))), + (PMOVZXBQrm addr:$src)>; + + def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))), + (PMOVZXWDrm addr:$src)>; + def : Pat<(v4i32 (X86vzext (v8i16 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))), + (PMOVZXWDrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v8i16 (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))), + (PMOVZXWQrm addr:$src)>; + + def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))), + (PMOVZXDQrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2f64 (scalar_to_vector (loadf64 addr:$src))))))), + (PMOVZXDQrm addr:$src)>; + def : Pat<(v2i64 (X86vzext (v4i32 (bitconvert (v2i64 (X86vzload addr:$src)))))), + (PMOVZXDQrm addr:$src)>; +} + //===----------------------------------------------------------------------===// // SSE4.1 - Extract Instructions //===----------------------------------------------------------------------===// @@ -5900,7 +6103,7 @@ def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))), addr:$dst), (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>, - Requires<[HasSSE41]>; + Requires<[UseSSE41]>; //===----------------------------------------------------------------------===// // SSE4.1 - Insert Instructions @@ -6147,7 +6350,7 @@ let Predicates = [HasAVX] in { defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256, memopv8f32, memopv4f64, int_x86_avx_round_ps_256, - int_x86_avx_round_pd_256>, VEX; + int_x86_avx_round_pd_256>, VEX, VEX_L; defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround", int_x86_sse41_round_ss, int_x86_sse41_round_sd, 0>, VEX_4V, VEX_LIG; @@ -6172,6 +6375,15 @@ let Predicates = [HasAVX] in { (VROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>; def : Pat<(f64 (ftrunc FR64:$src)), (VROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>; + + def : Pat<(v4f32 (ffloor VR128:$src)), + (VROUNDPSr VR128:$src, (i32 0x1))>; + def : Pat<(v2f64 (ffloor VR128:$src)), + (VROUNDPDr VR128:$src, (i32 0x1))>; + def : Pat<(v8f32 (ffloor VR256:$src)), + (VROUNDYPSr VR256:$src, (i32 0x1))>; + def : Pat<(v4f64 (ffloor VR256:$src)), + (VROUNDYPDr VR256:$src, (i32 0x1))>; } defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128, @@ -6181,26 +6393,33 @@ let Constraints = "$src1 = $dst" in defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round", int_x86_sse41_round_ss, int_x86_sse41_round_sd>; -def : Pat<(ffloor FR32:$src), - (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>; -def : Pat<(f64 (ffloor FR64:$src)), - (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>; -def : Pat<(f32 (fnearbyint FR32:$src)), - (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>; -def : Pat<(f64 (fnearbyint FR64:$src)), - (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>; -def : Pat<(f32 (fceil FR32:$src)), - (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>; -def : Pat<(f64 (fceil FR64:$src)), - (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>; -def : Pat<(f32 (frint FR32:$src)), - (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>; -def : Pat<(f64 (frint FR64:$src)), - (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>; -def : Pat<(f32 (ftrunc FR32:$src)), - (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>; -def : Pat<(f64 (ftrunc FR64:$src)), - (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>; +let Predicates = [UseSSE41] in { + def : Pat<(ffloor FR32:$src), + (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x1))>; + def : Pat<(f64 (ffloor FR64:$src)), + (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x1))>; + def : Pat<(f32 (fnearbyint FR32:$src)), + (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0xC))>; + def : Pat<(f64 (fnearbyint FR64:$src)), + (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0xC))>; + def : Pat<(f32 (fceil FR32:$src)), + (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x2))>; + def : Pat<(f64 (fceil FR64:$src)), + (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x2))>; + def : Pat<(f32 (frint FR32:$src)), + (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x4))>; + def : Pat<(f64 (frint FR64:$src)), + (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x4))>; + def : Pat<(f32 (ftrunc FR32:$src)), + (ROUNDSSr (f32 (IMPLICIT_DEF)), FR32:$src, (i32 0x3))>; + def : Pat<(f64 (ftrunc FR64:$src)), + (ROUNDSDr (f64 (IMPLICIT_DEF)), FR64:$src, (i32 0x3))>; + + def : Pat<(v4f32 (ffloor VR128:$src)), + (ROUNDPSr VR128:$src, (i32 0x1))>; + def : Pat<(v2f64 (ffloor VR128:$src)), + (ROUNDPDr VR128:$src, (i32 0x1))>; +} //===----------------------------------------------------------------------===// // SSE4.1 - Packed Bit Test @@ -6221,11 +6440,11 @@ def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2), "vptest\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>, - OpSize, VEX; + OpSize, VEX, VEX_L; def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2), "vptest\t{$src2, $src1|$src1, $src2}", [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>, - OpSize, VEX; + OpSize, VEX, VEX_L; } let Defs = [EFLAGS] in { @@ -6254,11 +6473,13 @@ multiclass avx_bittest opc, string OpcodeStr, RegisterClass RC, let Defs = [EFLAGS], Predicates = [HasAVX] in { let ExeDomain = SSEPackedSingle in { defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>; -defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>; +defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>, + VEX_L; } let ExeDomain = SSEPackedDouble in { defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>; -defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>; +defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>, + VEX_L; } } @@ -6338,7 +6559,7 @@ multiclass SS41I_binop_rm_int opc, string OpcodeStr, (bitconvert (memopv2i64 addr:$src2))))]>, OpSize; } -/// SS41I_binop_rm_int - Simple SSE 4.1 binary operator +/// SS41I_binop_rm_int_y - Simple SSE 4.1 binary operator multiclass SS41I_binop_rm_int_y opc, string OpcodeStr, Intrinsic IntId256> { let isCommutable = 1 in @@ -6381,25 +6602,25 @@ let Predicates = [HasAVX] in { let Predicates = [HasAVX2] in { let isCommutable = 0 in defm VPACKUSDW : SS41I_binop_rm_int_y<0x2B, "vpackusdw", - int_x86_avx2_packusdw>, VEX_4V; + int_x86_avx2_packusdw>, VEX_4V, VEX_L; defm VPMINSB : SS41I_binop_rm_int_y<0x38, "vpminsb", - int_x86_avx2_pmins_b>, VEX_4V; + int_x86_avx2_pmins_b>, VEX_4V, VEX_L; defm VPMINSD : SS41I_binop_rm_int_y<0x39, "vpminsd", - int_x86_avx2_pmins_d>, VEX_4V; + int_x86_avx2_pmins_d>, VEX_4V, VEX_L; defm VPMINUD : SS41I_binop_rm_int_y<0x3B, "vpminud", - int_x86_avx2_pminu_d>, VEX_4V; + int_x86_avx2_pminu_d>, VEX_4V, VEX_L; defm VPMINUW : SS41I_binop_rm_int_y<0x3A, "vpminuw", - int_x86_avx2_pminu_w>, VEX_4V; + int_x86_avx2_pminu_w>, VEX_4V, VEX_L; defm VPMAXSB : SS41I_binop_rm_int_y<0x3C, "vpmaxsb", - int_x86_avx2_pmaxs_b>, VEX_4V; + int_x86_avx2_pmaxs_b>, VEX_4V, VEX_L; defm VPMAXSD : SS41I_binop_rm_int_y<0x3D, "vpmaxsd", - int_x86_avx2_pmaxs_d>, VEX_4V; + int_x86_avx2_pmaxs_d>, VEX_4V, VEX_L; defm VPMAXUD : SS41I_binop_rm_int_y<0x3F, "vpmaxud", - int_x86_avx2_pmaxu_d>, VEX_4V; + int_x86_avx2_pmaxu_d>, VEX_4V, VEX_L; defm VPMAXUW : SS41I_binop_rm_int_y<0x3E, "vpmaxuw", - int_x86_avx2_pmaxu_w>, VEX_4V; + int_x86_avx2_pmaxu_w>, VEX_4V, VEX_L; defm VPMULDQ : SS41I_binop_rm_int_y<0x28, "vpmuldq", - int_x86_avx2_pmul_dq>, VEX_4V; + int_x86_avx2_pmul_dq>, VEX_4V, VEX_L; } let Constraints = "$src1 = $dst" in { @@ -6445,9 +6666,9 @@ let Predicates = [HasAVX] in { } let Predicates = [HasAVX2] in { defm VPMULLDY : SS48I_binop_rm<0x40, "vpmulld", mul, v8i32, VR256, - memopv4i64, i256mem, 0>, VEX_4V; + memopv4i64, i256mem, 0>, VEX_4V, VEX_L; defm VPCMPEQQY : SS48I_binop_rm<0x29, "vpcmpeqq", X86pcmpeq, v4i64, VR256, - memopv4i64, i256mem, 0>, VEX_4V; + memopv4i64, i256mem, 0>, VEX_4V, VEX_L; } let Constraints = "$src1 = $dst" in { @@ -6490,13 +6711,15 @@ let Predicates = [HasAVX] in { defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps, VR128, memopv4f32, f128mem, 0>, VEX_4V; defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps", - int_x86_avx_blend_ps_256, VR256, memopv8f32, f256mem, 0>, VEX_4V; + int_x86_avx_blend_ps_256, VR256, memopv8f32, + f256mem, 0>, VEX_4V, VEX_L; } let ExeDomain = SSEPackedDouble in { defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd, VR128, memopv2f64, f128mem, 0>, VEX_4V; defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd", - int_x86_avx_blend_pd_256, VR256, memopv4f64, f256mem, 0>, VEX_4V; + int_x86_avx_blend_pd_256,VR256, memopv4f64, + f256mem, 0>, VEX_4V, VEX_L; } defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw, VR128, memopv2i64, i128mem, 0>, VEX_4V; @@ -6511,15 +6734,15 @@ let Predicates = [HasAVX] in { VR128, memopv2f64, f128mem, 0>, VEX_4V; let ExeDomain = SSEPackedSingle in defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256, - VR256, memopv8f32, i256mem, 0>, VEX_4V; + VR256, memopv8f32, i256mem, 0>, VEX_4V, VEX_L; } let Predicates = [HasAVX2] in { let isCommutable = 0 in { defm VPBLENDWY : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_avx2_pblendw, - VR256, memopv4i64, i256mem, 0>, VEX_4V; + VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L; defm VMPSADBWY : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_avx2_mpsadbw, - VR256, memopv4i64, i256mem, 0>, VEX_4V; + VR256, memopv4i64, i256mem, 0>, VEX_4V, VEX_L; } } @@ -6570,13 +6793,13 @@ let ExeDomain = SSEPackedDouble in { defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, f128mem, memopv2f64, int_x86_sse41_blendvpd>; defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, f256mem, - memopv4f64, int_x86_avx_blendv_pd_256>; + memopv4f64, int_x86_avx_blendv_pd_256>, VEX_L; } // ExeDomain = SSEPackedDouble let ExeDomain = SSEPackedSingle in { defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, f128mem, memopv4f32, int_x86_sse41_blendvps>; defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, f256mem, - memopv8f32, int_x86_avx_blendv_ps_256>; + memopv8f32, int_x86_avx_blendv_ps_256>, VEX_L; } // ExeDomain = SSEPackedSingle defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem, memopv2i64, int_x86_sse41_pblendvb>; @@ -6584,7 +6807,7 @@ defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem, let Predicates = [HasAVX2] in { defm VPBLENDVBY : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR256, i256mem, - memopv4i64, int_x86_avx2_pblendvb>; + memopv4i64, int_x86_avx2_pblendvb>, VEX_L; } let Predicates = [HasAVX] in { @@ -6687,7 +6910,7 @@ def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", def : InstAlias<"pblendvb\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}", (PBLENDVBrm0 VR128:$dst, i128mem:$src2)>; -let Predicates = [HasSSE41] in { +let Predicates = [UseSSE41] in { def : Pat<(v16i8 (vselect (v16i8 XMM0), (v16i8 VR128:$src1), (v16i8 VR128:$src2))), (PBLENDVBrr0 VR128:$src2, VR128:$src1)>; @@ -6725,7 +6948,7 @@ let Predicates = [HasAVX2] in def VMOVNTDQAYrm : SS48I<0x2A, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}", [(set VR256:$dst, (int_x86_avx2_movntdqa addr:$src))]>, - OpSize, VEX; + OpSize, VEX, VEX_L; def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "movntdqa\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>, @@ -6761,7 +6984,7 @@ let Predicates = [HasAVX] in let Predicates = [HasAVX2] in defm VPCMPGTQY : SS42I_binop_rm<0x37, "vpcmpgtq", X86pcmpgt, v4i64, VR256, - memopv4i64, i256mem, 0>, VEX_4V; + memopv4i64, i256mem, 0>, VEX_4V, VEX_L; let Constraints = "$src1 = $dst" in defm PCMPGTQ : SS42I_binop_rm<0x37, "pcmpgtq", X86pcmpgt, v2i64, VR128, @@ -6779,34 +7002,31 @@ multiclass pseudo_pcmpistrm { imm:$src3))]>; def MEM : PseudoI<(outs VR128:$dst), (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 - VR128:$src1, (load addr:$src2), imm:$src3))]>; + [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, + (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>; } let Defs = [EFLAGS], usesCustomInserter = 1 in { - let AddedComplexity = 1 in - defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>; - defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>; + defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>; + defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[UseSSE42]>; } -let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1, Predicates = [HasAVX] in { - def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX; +multiclass pcmpistrm_SS42AI { + def rr : SS42AI<0x62, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"), + []>, OpSize; let mayLoad = 1 in - def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX; + def rm :SS42AI<0x62, MRMSrcMem, (outs), + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"), + []>, OpSize; } let Defs = [XMM0, EFLAGS], neverHasSideEffects = 1 in { - def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize; - let mayLoad = 1 in - def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize; + let Predicates = [HasAVX] in + defm VPCMPISTRM128 : pcmpistrm_SS42AI<"vpcmpistrm">, VEX; + defm PCMPISTRM128 : pcmpistrm_SS42AI<"pcmpistrm"> ; } // Packed Compare Explicit Length Strings, Return Mask @@ -6817,74 +7037,103 @@ multiclass pseudo_pcmpestrm { VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>; def MEM : PseudoI<(outs VR128:$dst), (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 - VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>; + [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 VR128:$src1, EAX, + (bc_v16i8 (memopv2i64 addr:$src3)), EDX, imm:$src5))]>; } let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in { - let AddedComplexity = 1 in - defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>; - defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>; + defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>; + defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[UseSSE42]>; } -let Predicates = [HasAVX], - Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in { - def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX; +multiclass SS42AI_pcmpestrm { + def rr : SS42AI<0x60, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"), + []>, OpSize; let mayLoad = 1 in - def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX; + def rm : SS42AI<0x60, MRMSrcMem, (outs), + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"), + []>, OpSize; } let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in { - def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize; - let mayLoad = 1 in - def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize; + let Predicates = [HasAVX] in + defm VPCMPESTRM128 : SS42AI_pcmpestrm<"vpcmpestrm">, VEX; + defm PCMPESTRM128 : SS42AI_pcmpestrm<"pcmpestrm">; } // Packed Compare Implicit Length Strings, Return Index -let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in { - multiclass SS42AI_pcmpistri { - def rr : SS42AI<0x63, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"), - []>, OpSize; - let mayLoad = 1 in - def rm : SS42AI<0x63, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"), - []>, OpSize; - } +multiclass pseudo_pcmpistri { + def REG : PseudoI<(outs GR32:$dst), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + [(set GR32:$dst, EFLAGS, + (X86pcmpistri VR128:$src1, VR128:$src2, imm:$src3))]>; + def MEM : PseudoI<(outs GR32:$dst), + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + [(set GR32:$dst, EFLAGS, (X86pcmpistri VR128:$src1, + (bc_v16i8 (memopv2i64 addr:$src2)), imm:$src3))]>; } -let Predicates = [HasAVX] in -defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX; -defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">; +let Defs = [EFLAGS], usesCustomInserter = 1 in { + defm VPCMPISTRI : pseudo_pcmpistri<"#VPCMPISTRI">, Requires<[HasAVX]>; + defm PCMPISTRI : pseudo_pcmpistri<"#PCMPISTRI">, Requires<[UseSSE42]>; +} + +multiclass SS42AI_pcmpistri { + def rr : SS42AI<0x63, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"), + []>, OpSize; + let mayLoad = 1 in + def rm : SS42AI<0x63, MRMSrcMem, (outs), + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"), + []>, OpSize; +} + +let Defs = [ECX, EFLAGS], neverHasSideEffects = 1 in { + let Predicates = [HasAVX] in + defm VPCMPISTRI : SS42AI_pcmpistri<"vpcmpistri">, VEX; + defm PCMPISTRI : SS42AI_pcmpistri<"pcmpistri">; +} // Packed Compare Explicit Length Strings, Return Index -let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in { - multiclass SS42AI_pcmpestri { - def rr : SS42AI<0x61, MRMSrcReg, (outs), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"), - []>, OpSize; - let mayLoad = 1 in - def rm : SS42AI<0x61, MRMSrcMem, (outs), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"), - []>, OpSize; - } +multiclass pseudo_pcmpestri { + def REG : PseudoI<(outs GR32:$dst), + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + [(set GR32:$dst, EFLAGS, + (X86pcmpestri VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>; + def MEM : PseudoI<(outs GR32:$dst), + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + [(set GR32:$dst, EFLAGS, + (X86pcmpestri VR128:$src1, EAX, (bc_v16i8 (memopv2i64 addr:$src3)), EDX, + imm:$src5))]>; } -let Predicates = [HasAVX] in -defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX; -defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">; +let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in { + defm VPCMPESTRI : pseudo_pcmpestri<"#VPCMPESTRI">, Requires<[HasAVX]>; + defm PCMPESTRI : pseudo_pcmpestri<"#PCMPESTRI">, Requires<[UseSSE42]>; +} + +multiclass SS42AI_pcmpestri { + def rr : SS42AI<0x61, MRMSrcReg, (outs), + (ins VR128:$src1, VR128:$src3, i8imm:$src5), + !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"), + []>, OpSize; + let mayLoad = 1 in + def rm : SS42AI<0x61, MRMSrcMem, (outs), + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), + !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"), + []>, OpSize; +} + +let Defs = [ECX, EFLAGS], Uses = [EAX, EDX], neverHasSideEffects = 1 in { + let Predicates = [HasAVX] in + defm VPCMPESTRI : SS42AI_pcmpestri<"vpcmpestri">, VEX; + defm PCMPESTRI : SS42AI_pcmpestri<"pcmpestri">; +} //===----------------------------------------------------------------------===// // SSE4.2 - CRC Instructions @@ -7175,27 +7424,27 @@ let ExeDomain = SSEPackedSingle in { def VBROADCASTSSrm : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem, int_x86_avx_vbroadcast_ss>; def VBROADCASTSSYrm : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem, - int_x86_avx_vbroadcast_ss_256>; + int_x86_avx_vbroadcast_ss_256>, VEX_L; } let ExeDomain = SSEPackedDouble in def VBROADCASTSDYrm : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem, - int_x86_avx_vbroadcast_sd_256>; + int_x86_avx_vbroadcast_sd_256>, VEX_L; def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem, - int_x86_avx_vbroadcastf128_pd_256>; + int_x86_avx_vbroadcastf128_pd_256>, VEX_L; let ExeDomain = SSEPackedSingle in { def VBROADCASTSSrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR128, int_x86_avx2_vbroadcast_ss_ps>; def VBROADCASTSSYrr : avx2_broadcast_reg<0x18, "vbroadcastss", VR256, - int_x86_avx2_vbroadcast_ss_ps_256>; + int_x86_avx2_vbroadcast_ss_ps_256>, VEX_L; } let ExeDomain = SSEPackedDouble in def VBROADCASTSDYrr : avx2_broadcast_reg<0x19, "vbroadcastsd", VR256, - int_x86_avx2_vbroadcast_sd_pd_256>; + int_x86_avx2_vbroadcast_sd_pd_256>, VEX_L; let Predicates = [HasAVX2] in def VBROADCASTI128 : avx_broadcast<0x5A, "vbroadcasti128", VR256, i128mem, - int_x86_avx2_vbroadcasti128>; + int_x86_avx2_vbroadcasti128>, VEX_L; let Predicates = [HasAVX] in def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src), @@ -7209,50 +7458,69 @@ let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in { def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR128:$src2, i8imm:$src3), "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - []>, VEX_4V; + []>, VEX_4V, VEX_L; let mayLoad = 1 in def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, f128mem:$src2, i8imm:$src3), "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - []>, VEX_4V; + []>, VEX_4V, VEX_L; } let Predicates = [HasAVX] in { def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; + +def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (memopv4f32 addr:$src2), + (iPTR imm)), + (VINSERTF128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; +def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (memopv2f64 addr:$src2), + (iPTR imm)), + (VINSERTF128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; +} + +let Predicates = [HasAVX1Only] in { def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTF128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (loadv4f32 addr:$src2), - (i32 imm)), +def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2), + (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (loadv2f64 addr:$src2), - (i32 imm)), +def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), + (bc_v4i32 (memopv2i64 addr:$src2)), + (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; -def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (loadv2i64 addr:$src2), - (i32 imm)), +def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), + (bc_v16i8 (memopv2i64 addr:$src2)), + (iPTR imm)), + (VINSERTF128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; +def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), + (bc_v8i16 (memopv2i64 addr:$src2)), + (iPTR imm)), (VINSERTF128rm VR256:$src1, addr:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; } @@ -7264,64 +7532,69 @@ let neverHasSideEffects = 1, ExeDomain = SSEPackedSingle in { def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst), (ins VR256:$src1, i8imm:$src2), "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}", - []>, VEX; + []>, VEX, VEX_L; let mayStore = 1 in def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs), (ins f128mem:$dst, VR256:$src1, i8imm:$src2), "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}", - []>, VEX; -} - -// Extract and store. -let Predicates = [HasAVX] in { - def : Pat<(alignedstore (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2), addr:$dst), - (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; - def : Pat<(alignedstore (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), addr:$dst), - (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; - def : Pat<(alignedstore (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2), addr:$dst), - (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; - - def : Pat<(int_x86_sse_storeu_ps addr:$dst, (int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2)), - (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; - def : Pat<(int_x86_sse2_storeu_pd addr:$dst, (int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2)), - (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; - def : Pat<(int_x86_sse2_storeu_dq addr:$dst, (bc_v16i8 (int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2))), - (VEXTRACTF128mr addr:$dst, VR256:$src1, imm:$src2)>; + []>, VEX, VEX_L; } // AVX1 patterns let Predicates = [HasAVX] in { -def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), - (VEXTRACTF128rr VR256:$src1, imm:$src2)>; -def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2), - (VEXTRACTF128rr VR256:$src1, imm:$src2)>; -def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2), - (VEXTRACTF128rr VR256:$src1, imm:$src2)>; - -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v4f32 (VEXTRACTF128rr (v8f32 VR256:$src1), (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v2f64 (VEXTRACTF128rr (v4f64 VR256:$src1), (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), + +def : Pat<(alignedstore (v4f32 (vextractf128_extract:$ext (v8f32 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v2f64 (vextractf128_extract:$ext (v4f64 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +} + +let Predicates = [HasAVX1Only] in { +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v2i64 (VEXTRACTF128rr - (v4i64 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), + (v4i64 VR256:$src1), + (EXTRACT_get_vextractf128_imm VR128:$ext)))>; +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v4i32 (VEXTRACTF128rr - (v8i32 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), + (v8i32 VR256:$src1), + (EXTRACT_get_vextractf128_imm VR128:$ext)))>; +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v8i16 (VEXTRACTF128rr - (v16i16 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), + (v16i16 VR256:$src1), + (EXTRACT_get_vextractf128_imm VR128:$ext)))>; +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v16i8 (VEXTRACTF128rr - (v32i8 VR256:$src1), - (EXTRACT_get_vextractf128_imm VR128:$ext)))>; + (v32i8 VR256:$src1), + (EXTRACT_get_vextractf128_imm VR128:$ext)))>; + +def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTF128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; } //===----------------------------------------------------------------------===// @@ -7339,7 +7612,7 @@ multiclass avx_movmask_rm opc_rm, bits<8> opc_mr, string OpcodeStr, (ins VR256:$src1, f256mem:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>, - VEX_4V; + VEX_4V, VEX_L; def mr : AVX8I opc_rm, bits<8> opc_mr, string OpcodeStr, def Ymr : AVX8I, VEX_4V; + [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L; } let ExeDomain = SSEPackedSingle in @@ -7395,13 +7668,13 @@ let ExeDomain = SSEPackedSingle in { defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem, memopv2i64, int_x86_avx_vpermilvar_ps, v4f32>; defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem, - memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>; + memopv4i64, int_x86_avx_vpermilvar_ps_256, v8f32>, VEX_L; } let ExeDomain = SSEPackedDouble in { defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem, memopv2i64, int_x86_avx_vpermilvar_pd, v2f64>; defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem, - memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>; + memopv4i64, int_x86_avx_vpermilvar_pd_256, v4f64>, VEX_L; } let Predicates = [HasAVX] in { @@ -7429,38 +7702,38 @@ def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR256:$src2, i8imm:$src3), "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR256:$dst, (v8f32 (X86VPerm2x128 VR256:$src1, VR256:$src2, - (i8 imm:$src3))))]>, VEX_4V; + (i8 imm:$src3))))]>, VEX_4V, VEX_L; def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, f256mem:$src2, i8imm:$src3), "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv8f32 addr:$src2), - (i8 imm:$src3)))]>, VEX_4V; + (i8 imm:$src3)))]>, VEX_4V, VEX_L; } let Predicates = [HasAVX] in { +def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), + (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>; +def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, + (memopv4f64 addr:$src2), (i8 imm:$imm))), + (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>; +} + +let Predicates = [HasAVX1Only] in { def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>; def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>; -def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), - (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>; def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>; def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>; -def : Pat<(v8f32 (X86VPerm2x128 VR256:$src1, - (memopv8f32 addr:$src2), (i8 imm:$imm))), - (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>; def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))), (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>; def : Pat<(v4i64 (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2), (i8 imm:$imm))), (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>; -def : Pat<(v4f64 (X86VPerm2x128 VR256:$src1, - (memopv4f64 addr:$src2), (i8 imm:$imm))), - (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>; def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, (bc_v32i8 (memopv4i64 addr:$src2)), (i8 imm:$imm))), (VPERM2F128rm VR256:$src1, addr:$src2, imm:$imm)>; @@ -7511,9 +7784,9 @@ multiclass f16c_ps2ph { let Predicates = [HasAVX, HasF16C] in { defm VCVTPH2PS : f16c_ph2ps; - defm VCVTPH2PSY : f16c_ph2ps; + defm VCVTPH2PSY : f16c_ph2ps, VEX_L; defm VCVTPS2PH : f16c_ps2ph; - defm VCVTPS2PHY : f16c_ps2ph; + defm VCVTPS2PHY : f16c_ps2ph, VEX_L; } //===----------------------------------------------------------------------===// @@ -7545,7 +7818,7 @@ let isCommutable = 0 in { defm VPBLENDD : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_128, VR128, memopv2i64, i128mem>; defm VPBLENDDY : AVX2_binop_rmi_int<0x02, "vpblendd", int_x86_avx2_pblendd_256, - VR256, memopv4i64, i256mem>; + VR256, memopv4i64, i256mem>, VEX_L; } //===----------------------------------------------------------------------===// @@ -7564,11 +7837,12 @@ multiclass avx2_broadcast opc, string OpcodeStr, (Int128 (scalar_to_vector (ld_frag addr:$src))))]>, VEX; def Yrr : AVX28I, VEX; + [(set VR256:$dst, (Int256 VR128:$src))]>, VEX, VEX_L; def Yrm : AVX28I, VEX; + (Int256 (scalar_to_vector (ld_frag addr:$src))))]>, + VEX, VEX_L; } defm VPBROADCASTB : avx2_broadcast<0x78, "vpbroadcastb", i8mem, loadi8, @@ -7647,19 +7921,22 @@ let Predicates = [HasAVX2] in { } // AVX1 broadcast patterns -let Predicates = [HasAVX] in { +let Predicates = [HasAVX1Only] in { def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))), (VBROADCASTSSYrm addr:$src)>; def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))), (VBROADCASTSDYrm addr:$src)>; +def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))), + (VBROADCASTSSrm addr:$src)>; +} + +let Predicates = [HasAVX] in { def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))), (VBROADCASTSSYrm addr:$src)>; def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))), (VBROADCASTSDYrm addr:$src)>; def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))), (VBROADCASTSSrm addr:$src)>; -def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))), - (VBROADCASTSSrm addr:$src)>; // Provide fallback in case the load node that is used in the patterns above // is used by additional users, which prevents the pattern selection. @@ -7700,7 +7977,8 @@ multiclass avx2_perm opc, string OpcodeStr, PatFrag mem_frag, !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set VR256:$dst, - (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>, VEX_4V; + (OpVT (X86VPermv VR256:$src1, VR256:$src2)))]>, + VEX_4V, VEX_L; def Yrm : AVX28I opc, string OpcodeStr, PatFrag mem_frag, [(set VR256:$dst, (OpVT (X86VPermv VR256:$src1, (bitconvert (mem_frag addr:$src2)))))]>, - VEX_4V; + VEX_4V, VEX_L; } defm VPERMD : avx2_perm<0x36, "vpermd", memopv4i64, v8i32>; @@ -7722,14 +8000,15 @@ multiclass avx2_perm_imm opc, string OpcodeStr, PatFrag mem_frag, !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set VR256:$dst, - (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>, VEX; + (OpVT (X86VPermi VR256:$src1, (i8 imm:$src2))))]>, + VEX, VEX_L; def Ymi : AVX2AIi8, VEX; + (i8 imm:$src2))))]>, VEX, VEX_L; } defm VPERMQ : avx2_perm_imm<0x00, "vpermq", memopv4i64, v4i64>, VEX_W; @@ -7739,20 +8018,18 @@ defm VPERMPD : avx2_perm_imm<0x01, "vpermpd", memopv4f64, v4f64>, VEX_W; //===----------------------------------------------------------------------===// // VPERM2I128 - Permute Floating-Point Values in 128-bit chunks // -let AddedComplexity = 1 in { def VPERM2I128rr : AVX2AIi8<0x46, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR256:$src2, i8imm:$src3), "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR256:$dst, (v4i64 (X86VPerm2x128 VR256:$src1, VR256:$src2, - (i8 imm:$src3))))]>, VEX_4V; + (i8 imm:$src3))))]>, VEX_4V, VEX_L; def VPERM2I128rm : AVX2AIi8<0x46, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, f256mem:$src2, i8imm:$src3), "vperm2i128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", [(set VR256:$dst, (X86VPerm2x128 VR256:$src1, (memopv4i64 addr:$src2), - (i8 imm:$src3)))]>, VEX_4V; -} + (i8 imm:$src3)))]>, VEX_4V, VEX_L; -let Predicates = [HasAVX2], AddedComplexity = 1 in { +let Predicates = [HasAVX2] in { def : Pat<(v8i32 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), (VPERM2I128rr VR256:$src1, VR256:$src2, imm:$imm)>; def : Pat<(v32i8 (X86VPerm2x128 VR256:$src1, VR256:$src2, (i8 imm:$imm))), @@ -7779,31 +8056,51 @@ let neverHasSideEffects = 1 in { def VINSERTI128rr : AVX2AIi8<0x38, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src1, VR128:$src2, i8imm:$src3), "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - []>, VEX_4V; + []>, VEX_4V, VEX_L; let mayLoad = 1 in def VINSERTI128rm : AVX2AIi8<0x38, MRMSrcMem, (outs VR256:$dst), (ins VR256:$src1, i128mem:$src2, i8imm:$src3), "vinserti128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - []>, VEX_4V; + []>, VEX_4V, VEX_L; } -let Predicates = [HasAVX2], AddedComplexity = 1 in { +let Predicates = [HasAVX2] in { def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2), - (i32 imm)), + (iPTR imm)), (VINSERTI128rr VR256:$src1, VR128:$src2, (INSERT_get_vinsertf128_imm VR256:$ins))>; + +def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (memopv2i64 addr:$src2), + (iPTR imm)), + (VINSERTI128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; +def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), + (bc_v4i32 (memopv2i64 addr:$src2)), + (iPTR imm)), + (VINSERTI128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; +def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), + (bc_v16i8 (memopv2i64 addr:$src2)), + (iPTR imm)), + (VINSERTI128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; +def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), + (bc_v8i16 (memopv2i64 addr:$src2)), + (iPTR imm)), + (VINSERTI128rm VR256:$src1, addr:$src2, + (INSERT_get_vinsertf128_imm VR256:$ins))>; } //===----------------------------------------------------------------------===// @@ -7814,29 +8111,47 @@ def VEXTRACTI128rr : AVX2AIi8<0x39, MRMDestReg, (outs VR128:$dst), "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", [(set VR128:$dst, (int_x86_avx2_vextracti128 VR256:$src1, imm:$src2))]>, - VEX; + VEX, VEX_L; let neverHasSideEffects = 1, mayStore = 1 in def VEXTRACTI128mr : AVX2AIi8<0x39, MRMDestMem, (outs), (ins i128mem:$dst, VR256:$src1, i8imm:$src2), - "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, VEX; + "vextracti128\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, + VEX, VEX_L; -let Predicates = [HasAVX2], AddedComplexity = 1 in { -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), +let Predicates = [HasAVX2] in { +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v2i64 (VEXTRACTI128rr (v4i64 VR256:$src1), (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v4i32 (VEXTRACTI128rr (v8i32 VR256:$src1), (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v8i16 (VEXTRACTI128rr (v16i16 VR256:$src1), (EXTRACT_get_vextractf128_imm VR128:$ext)))>; -def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)), +def : Pat<(vextractf128_extract:$ext VR256:$src1, (iPTR imm)), (v16i8 (VEXTRACTI128rr (v32i8 VR256:$src1), (EXTRACT_get_vextractf128_imm VR128:$ext)))>; + +def : Pat<(alignedstore (v2i64 (vextractf128_extract:$ext (v4i64 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTI128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v4i32 (vextractf128_extract:$ext (v8i32 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTI128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v8i16 (vextractf128_extract:$ext (v16i16 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTI128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; +def : Pat<(alignedstore (v16i8 (vextractf128_extract:$ext (v32i8 VR256:$src1), + (iPTR imm))), addr:$dst), + (VEXTRACTI128mr addr:$dst, VR256:$src1, + (EXTRACT_get_vextractf128_imm VR128:$ext))>; } //===----------------------------------------------------------------------===// @@ -7852,7 +8167,8 @@ multiclass avx2_pmovmask, VEX_4V; + [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>, + VEX_4V, VEX_L; def mr : AVX28I<0x8e, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src1, VR128:$src2), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), @@ -7860,7 +8176,7 @@ multiclass avx2_pmovmask, VEX_4V; + [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V, VEX_L; } defm VPMASKMOVD : avx2_pmovmask<"vpmaskmovd", @@ -7898,14 +8214,14 @@ multiclass avx2_var_shift opc, string OpcodeStr, SDNode OpNode, !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), [(set VR256:$dst, (vt256 (OpNode VR256:$src1, (vt256 VR256:$src2))))]>, - VEX_4V; + VEX_4V, VEX_L; def Yrm : AVX28I, - VEX_4V; + VEX_4V, VEX_L; } defm VPSLLVD : avx2_var_shift<0x47, "vpsllvd", shl, v4i32, v8i32>; diff --git a/lib/Target/X86/X86InstrShiftRotate.td b/lib/Target/X86/X86InstrShiftRotate.td index bdeb63f..893488c 100644 --- a/lib/Target/X86/X86InstrShiftRotate.td +++ b/lib/Target/X86/X86InstrShiftRotate.td @@ -839,6 +839,16 @@ def SHRD64mri8 : RIi8<0xAC, MRMDestMem, } // Defs = [EFLAGS] +def ROT32L2R_imm8 : SDNodeXFormgetZExtValue()); +}]>; + +def ROT64L2R_imm8 : SDNodeXFormgetZExtValue()); +}]>; + multiclass bmi_rotate { let neverHasSideEffects = 1 in { def ri : Ii8<0xF0, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, i8imm:$src2), @@ -873,4 +883,72 @@ let Predicates = [HasBMI2] in { defm SHRX64 : bmi_shift<"shrx{q}", GR64, i64mem>, T8XD, VEX_W; defm SHLX32 : bmi_shift<"shlx{l}", GR32, i32mem>, T8, OpSize; defm SHLX64 : bmi_shift<"shlx{q}", GR64, i64mem>, T8, OpSize, VEX_W; + + // Prefer RORX which is non-destructive and doesn't update EFLAGS. + let AddedComplexity = 10 in { + def : Pat<(rotl GR32:$src, (i8 imm:$shamt)), + (RORX32ri GR32:$src, (ROT32L2R_imm8 imm:$shamt))>; + def : Pat<(rotl GR64:$src, (i8 imm:$shamt)), + (RORX64ri GR64:$src, (ROT64L2R_imm8 imm:$shamt))>; + } + + def : Pat<(rotl (loadi32 addr:$src), (i8 imm:$shamt)), + (RORX32mi addr:$src, (ROT32L2R_imm8 imm:$shamt))>; + def : Pat<(rotl (loadi64 addr:$src), (i8 imm:$shamt)), + (RORX64mi addr:$src, (ROT64L2R_imm8 imm:$shamt))>; + + // Prefer SARX/SHRX/SHLX over SAR/SHR/SHL with variable shift BUT not + // immedidate shift, i.e. the following code is considered better + // + // mov %edi, %esi + // shl $imm, %esi + // ... %edi, ... + // + // than + // + // movb $imm, %sil + // shlx %sil, %edi, %esi + // ... %edi, ... + // + let AddedComplexity = 1 in { + def : Pat<(sra GR32:$src1, GR8:$src2), + (SARX32rr GR32:$src1, + (INSERT_SUBREG + (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + def : Pat<(sra GR64:$src1, GR8:$src2), + (SARX64rr GR64:$src1, + (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + + def : Pat<(srl GR32:$src1, GR8:$src2), + (SHRX32rr GR32:$src1, + (INSERT_SUBREG + (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + def : Pat<(srl GR64:$src1, GR8:$src2), + (SHRX64rr GR64:$src1, + (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + + def : Pat<(shl GR32:$src1, GR8:$src2), + (SHLX32rr GR32:$src1, + (INSERT_SUBREG + (i32 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + def : Pat<(shl GR64:$src1, GR8:$src2), + (SHLX64rr GR64:$src1, + (INSERT_SUBREG + (i64 (IMPLICIT_DEF)), GR8:$src2, sub_8bit))>; + } + + // Patterns on SARXrm/SHRXrm/SHLXrm are explicitly omitted to favor + // + // mov (%ecx), %esi + // shl $imm, $esi + // + // over + // + // movb $imm %al + // shlx %al, (%ecx), %esi + // + // As SARXrr/SHRXrr/SHLXrr is favored on variable shift, the peephole + // optimization will fold them into SARXrm/SHRXrm/SHLXrm if possible. } diff --git a/lib/Target/X86/X86InstrTSX.td b/lib/Target/X86/X86InstrTSX.td new file mode 100644 index 0000000..ad55058 --- /dev/null +++ b/lib/Target/X86/X86InstrTSX.td @@ -0,0 +1,32 @@ +//===-- X86InstrVMX.td - TSX Instruction Set Extension -----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the instructions that make up the Intel TSX instruction +// set. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// TSX instructions + +let usesCustomInserter = 1 in +def XBEGIN : I<0, Pseudo, (outs GR32:$dst), (ins), + "# XBEGIN", [(set GR32:$dst, (int_x86_xbegin))]>, + Requires<[HasRTM]>; + +let isBranch = 1, isTerminator = 1, Defs = [EAX] in +def XBEGIN_4 : Ii32PCRel<0xc7, MRM_F8, (outs), (ins brtarget:$dst), + "xbegin\t$dst", []>; + +def XEND : I<0x01, MRM_D5, (outs), (ins), + "xend", [(int_x86_xend)]>, TB, Requires<[HasRTM]>; + +def XABORT : Ii8<0xc6, MRM_F8, (outs), (ins i8imm:$imm), + "xabort\t$imm", + [(int_x86_xabort imm:$imm)]>, Requires<[HasRTM]>; diff --git a/lib/Target/X86/X86InstrXOP.td b/lib/Target/X86/X86InstrXOP.td index 8ec2c68..2aa08fa 100644 --- a/lib/Target/X86/X86InstrXOP.td +++ b/lib/Target/X86/X86InstrXOP.td @@ -75,10 +75,10 @@ multiclass xop2op256 opc, string OpcodeStr, Intrinsic Int, PatFrag memop> { def rrY : IXOP, VEX; + [(set VR256:$dst, (Int VR256:$src))]>, VEX, VEX_L; def rmY : IXOP, VEX; + [(set VR256:$dst, (Int (bitconvert (memop addr:$src))))]>, VEX, VEX_L; } let isAsmParserOnly = 1 in { @@ -238,7 +238,7 @@ multiclass xop4op256 opc, string OpcodeStr, Intrinsic Int> { !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, VR256:$src3))]>, - VEX_4V, VEX_I8IMM; + VEX_4V, VEX_I8IMM, VEX_L; def rmY : IXOPi8 opc, string OpcodeStr, Intrinsic Int> { [(set VR256:$dst, (Int VR256:$src1, VR256:$src2, (bitconvert (memopv4i64 addr:$src3))))]>, - VEX_4V, VEX_I8IMM, VEX_W, MemOp4; + VEX_4V, VEX_I8IMM, VEX_W, MemOp4, VEX_L; def mrY : IXOPi8 opc, string OpcodeStr, Intrinsic Int> { [(set VR256:$dst, (Int VR256:$src1, (bitconvert (memopv4i64 addr:$src2)), VR256:$src3))]>, - VEX_4V, VEX_I8IMM; + VEX_4V, VEX_I8IMM, VEX_L; } let isAsmParserOnly = 1 in { @@ -287,20 +287,21 @@ multiclass xop5op opc, string OpcodeStr, Intrinsic Int128, !strconcat(OpcodeStr, "\t{$src4, $src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3, $src4}"), [(set VR256:$dst, - (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>; + (Int256 VR256:$src1, VR256:$src2, VR256:$src3, imm:$src4))]>, VEX_L; def rmY : IXOP5, - VEX_W, MemOp4; + VEX_W, MemOp4, VEX_L; def mrY : IXOP5; + (Int256 VR256:$src1, (ld_256 addr:$src2), VR256:$src3, imm:$src4))]>, + VEX_L; } defm VPERMIL2PD : xop5op<0x49, "vpermil2pd", int_x86_xop_vpermil2pd, diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp index 0168d12..764aa5d 100644 --- a/lib/Target/X86/X86JITInfo.cpp +++ b/lib/Target/X86/X86JITInfo.cpp @@ -532,6 +532,15 @@ uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) { #endif } +template static void addUnaligned(void *Pos, T Delta) { + T Value; + std::memcpy(reinterpret_cast(&Value), reinterpret_cast(Pos), + sizeof(T)); + Value += Delta; + std::memcpy(reinterpret_cast(Pos), reinterpret_cast(&Value), + sizeof(T)); +} + /// relocate - Before the JIT can run a block of code that has been emitted, /// it must rewrite the code to contain the actual addresses of any /// referenced global symbols. @@ -545,24 +554,24 @@ void X86JITInfo::relocate(void *Function, MachineRelocation *MR, // PC relative relocation, add the relocated value to the value already in // memory, after we adjust it for where the PC is. ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal(); - *((unsigned*)RelocPos) += (unsigned)ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; } case X86::reloc_picrel_word: { // PIC base relative relocation, add the relocated value to the value // already in memory, after we adjust it for where the PIC base is. ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal()); - *((unsigned*)RelocPos) += (unsigned)ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; } case X86::reloc_absolute_word: case X86::reloc_absolute_word_sext: // Absolute relocation, just add the relocated value to the value already // in memory. - *((unsigned*)RelocPos) += (unsigned)ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; case X86::reloc_absolute_dword: - *((intptr_t*)RelocPos) += ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; } } diff --git a/lib/Target/X86/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index 9c0ce4e..cfd68f7 100644 --- a/lib/Target/X86/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -12,7 +12,6 @@ // //===----------------------------------------------------------------------===// -#include "X86MCInstLower.h" #include "X86AsmPrinter.h" #include "X86COFFMachineModuleInfo.h" #include "InstPrinter/X86ATTInstPrinter.h" @@ -29,6 +28,31 @@ #include "llvm/ADT/SmallString.h" using namespace llvm; +namespace { + +/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. +class X86MCInstLower { + MCContext &Ctx; + Mangler *Mang; + const MachineFunction &MF; + const TargetMachine &TM; + const MCAsmInfo &MAI; + X86AsmPrinter &AsmPrinter; +public: + X86MCInstLower(Mangler *mang, const MachineFunction &MF, + X86AsmPrinter &asmprinter); + + void Lower(const MachineInstr *MI, MCInst &OutMI) const; + + MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; + MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; + +private: + MachineModuleInfoMachO &getMachOMMI() const; +}; + +} // end anonymous namespace + X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf, X86AsmPrinter &asmprinter) : Ctx(mf.getContext()), Mang(mang), MF(mf), TM(mf.getTarget()), @@ -43,15 +67,11 @@ MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { /// operand to an MCSymbol. MCSymbol *X86MCInstLower:: GetSymbolFromOperand(const MachineOperand &MO) const { - assert((MO.isGlobal() || MO.isSymbol()) && "Isn't a symbol reference"); + assert((MO.isGlobal() || MO.isSymbol() || MO.isMBB()) && "Isn't a symbol reference"); SmallString<128> Name; - if (!MO.isGlobal()) { - assert(MO.isSymbol()); - Name += MAI.getGlobalPrefix(); - Name += MO.getSymbolName(); - } else { + if (MO.isGlobal()) { const GlobalValue *GV = MO.getGlobal(); bool isImplicitlyPrivate = false; if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB || @@ -61,6 +81,11 @@ GetSymbolFromOperand(const MachineOperand &MO) const { isImplicitlyPrivate = true; Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate); + } else if (MO.isSymbol()) { + Name += MAI.getGlobalPrefix(); + Name += MO.getSymbolName(); + } else if (MO.isMBB()) { + Name += MO.getMBB()->getSymbol()->getName(); } // If the target flags on the operand changes the name of the symbol, do that @@ -191,7 +216,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, if (Expr == 0) Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx); - if (!MO.isJTI() && MO.getOffset()) + if (!MO.isJTI() && !MO.isMBB() && MO.getOffset()) Expr = MCBinaryExpr::CreateAdd(Expr, MCConstantExpr::Create(MO.getOffset(), Ctx), Ctx); @@ -324,9 +349,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { MCOp = MCOperand::CreateImm(MO.getImm()); break; case MachineOperand::MO_MachineBasicBlock: - MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create( - MO.getMBB()->getSymbol(), Ctx)); - break; case MachineOperand::MO_GlobalAddress: case MachineOperand::MO_ExternalSymbol: MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO)); @@ -371,18 +393,8 @@ ReSimplify: case X86::MOVZX64rm8: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm8); break; case X86::MOVZX64rr16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rr16); break; case X86::MOVZX64rm16: LowerSubReg32_Op0(OutMI, X86::MOVZX32rm16); break; - case X86::SETB_C8r: LowerUnaryToTwoAddr(OutMI, X86::SBB8rr); break; - case X86::SETB_C16r: LowerUnaryToTwoAddr(OutMI, X86::SBB16rr); break; - case X86::SETB_C32r: LowerUnaryToTwoAddr(OutMI, X86::SBB32rr); break; - case X86::SETB_C64r: LowerUnaryToTwoAddr(OutMI, X86::SBB64rr); break; case X86::MOV8r0: LowerUnaryToTwoAddr(OutMI, X86::XOR8rr); break; case X86::MOV32r0: LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); break; - case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break; - case X86::AVX_SET0PSY: LowerUnaryToTwoAddr(OutMI, X86::VXORPSYrr); break; - case X86::AVX_SET0PDY: LowerUnaryToTwoAddr(OutMI, X86::VXORPDYrr); break; - case X86::AVX_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::VPCMPEQDrr); break; - case X86::AVX2_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::VPCMPEQDYrr);break; - case X86::AVX2_SET0: LowerUnaryToTwoAddr(OutMI, X86::VPXORYrr); break; case X86::MOV16r0: LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0 diff --git a/lib/Target/X86/X86MCInstLower.h b/lib/Target/X86/X86MCInstLower.h deleted file mode 100644 index b4d4cfd..0000000 --- a/lib/Target/X86/X86MCInstLower.h +++ /dev/null @@ -1,52 +0,0 @@ -//===-- X86MCInstLower.h - Lower MachineInstr to MCInst ---------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#ifndef X86_MCINSTLOWER_H -#define X86_MCINSTLOWER_H - -#include "llvm/Support/Compiler.h" - -namespace llvm { - class MCAsmInfo; - class MCContext; - class MCInst; - class MCOperand; - class MCSymbol; - class MachineInstr; - class MachineFunction; - class MachineModuleInfoMachO; - class MachineOperand; - class Mangler; - class TargetMachine; - class X86AsmPrinter; - -/// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. -class LLVM_LIBRARY_VISIBILITY X86MCInstLower { - MCContext &Ctx; - Mangler *Mang; - const MachineFunction &MF; - const TargetMachine &TM; - const MCAsmInfo &MAI; - X86AsmPrinter &AsmPrinter; -public: - X86MCInstLower(Mangler *mang, const MachineFunction &MF, - X86AsmPrinter &asmprinter); - - void Lower(const MachineInstr *MI, MCInst &OutMI) const; - - MCSymbol *GetSymbolFromOperand(const MachineOperand &MO) const; - MCOperand LowerSymbolOperand(const MachineOperand &MO, MCSymbol *Sym) const; - -private: - MachineModuleInfoMachO &getMachOMMI() const; -}; - -} - -#endif diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 877b8f6..73ac747 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -106,23 +106,7 @@ X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const { int X86RegisterInfo::getSEHRegNum(unsigned i) const { - int reg = X86_MC::getX86RegNum(i); - switch (i) { - case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B: - case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B: - case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B: - case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B: - case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B: - case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B: - case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B: - case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B: - case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11: - case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15: - case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11: - case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15: - reg += 8; - } - return reg; + return getEncodingValue(i); } const TargetRegisterClass * @@ -245,15 +229,26 @@ const uint16_t * X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { bool callsEHReturn = false; bool ghcCall = false; + bool oclBiCall = false; + bool HasAVX = TM.getSubtarget().hasAVX(); if (MF) { callsEHReturn = MF->getMMI().callsEHReturn(); const Function *F = MF->getFunction(); ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false); + oclBiCall = (F ? F->getCallingConv() == CallingConv::Intel_OCL_BI : false); } if (ghcCall) return CSR_NoRegs_SaveList; + if (oclBiCall) { + if (HasAVX && IsWin64) + return CSR_Win64_Intel_OCL_BI_AVX_SaveList; + if (HasAVX && Is64Bit) + return CSR_64_Intel_OCL_BI_AVX_SaveList; + if (!HasAVX && !IsWin64 && Is64Bit) + return CSR_64_Intel_OCL_BI_SaveList; + } if (Is64Bit) { if (IsWin64) return CSR_Win64_SaveList; @@ -268,6 +263,16 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { const uint32_t* X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { + bool HasAVX = TM.getSubtarget().hasAVX(); + + if (CC == CallingConv::Intel_OCL_BI) { + if (IsWin64 && HasAVX) + return CSR_Win64_Intel_OCL_BI_AVX_RegMask; + if (Is64Bit && HasAVX) + return CSR_64_Intel_OCL_BI_AVX_RegMask; + if (!HasAVX && !IsWin64 && Is64Bit) + return CSR_64_Intel_OCL_BI_RegMask; + } if (CC == CallingConv::GHC) return CSR_NoRegs_RegMask; if (!Is64Bit) @@ -277,6 +282,11 @@ X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { return CSR_64_RegMask; } +const uint32_t* +X86RegisterInfo::getNoPreservedMask() const { + return CSR_NoRegs_RegMask; +} + BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { BitVector Reserved(getNumRegs()); const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); @@ -398,8 +408,9 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); const Function *F = MF.getFunction(); unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); - bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || - F->hasFnAttr(Attribute::StackAlignment)); + bool requiresRealignment = + ((MFI->getMaxAlignment() > StackAlign) || + F->getFnAttributes().hasAttribute(Attributes::StackAlignment)); // If we've requested that we force align the stack do so now. if (ForceStackAlign) @@ -522,7 +533,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, - int SPAdj, RegScavenger *RS) const{ + int SPAdj, RegScavenger *RS) const { assert(SPAdj == 0 && "Unexpected"); unsigned i = 0; @@ -590,9 +601,10 @@ unsigned X86RegisterInfo::getEHHandlerRegister() const { } namespace llvm { -unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { - switch (VT.getSimpleVT().SimpleTy) { - default: return Reg; +unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT, + bool High) { + switch (VT) { + default: llvm_unreachable("Unexpected VT"); case MVT::i8: if (High) { switch (Reg) { @@ -608,7 +620,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { } } else { switch (Reg) { - default: return 0; + default: llvm_unreachable("Unexpected register"); case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: return X86::AL; case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: @@ -645,7 +657,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { } case MVT::i16: switch (Reg) { - default: return Reg; + default: llvm_unreachable("Unexpected register"); case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: return X86::AX; case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: @@ -681,7 +693,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { } case MVT::i32: switch (Reg) { - default: return Reg; + default: llvm_unreachable("Unexpected register"); case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: return X86::EAX; case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: @@ -733,7 +745,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { } } switch (Reg) { - default: return Reg; + default: llvm_unreachable("Unexpected register"); case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX: return X86::RAX; case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX: diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index 1bc32cb..7932ede 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -58,10 +58,6 @@ private: public: X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii); - /// getX86RegNum - Returns the native X86 register number for the given LLVM - /// register identifier. - static unsigned getX86RegNum(unsigned RegNo); - // FIXME: This should be tablegen'd like getDwarfRegNum is int getSEHRegNum(unsigned i) const; @@ -104,6 +100,7 @@ public: /// callee-save registers on this target. const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const; const uint32_t *getCallPreservedMask(CallingConv::ID) const; + const uint32_t *getNoPreservedMask() const; /// getReservedRegs - Returns a bitset indexed by physical register number /// indicating if a register is a special register that has particular uses and @@ -141,8 +138,8 @@ public: // getX86SubSuperRegister - X86 utility function. It returns the sub or super // register of a specific X86 register. -// e.g. getX86SubSuperRegister(X86::EAX, EVT::i16) return X86:AX -unsigned getX86SubSuperRegister(unsigned, EVT, bool High=false); +// e.g. getX86SubSuperRegister(X86::EAX, MVT::i16) return X86:AX +unsigned getX86SubSuperRegister(unsigned, MVT::SimpleValueType, bool High=false); } // End llvm namespace diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index edc7184..be6282a 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -13,258 +13,264 @@ // //===----------------------------------------------------------------------===// -//===----------------------------------------------------------------------===// -// Register definitions... -// -let Namespace = "X86" in { +class X86Reg Enc, list subregs = []> : Register { + let Namespace = "X86"; + let HWEncoding = Enc; + let SubRegs = subregs; +} - // Subregister indices. +// Subregister indices. +let Namespace = "X86" in { def sub_8bit : SubRegIndex; def sub_8bit_hi : SubRegIndex; def sub_16bit : SubRegIndex; def sub_32bit : SubRegIndex; - def sub_xmm : SubRegIndex; - - - // In the register alias definitions below, we define which registers alias - // which others. We only specify which registers the small registers alias, - // because the register file generator is smart enough to figure out that - // AL aliases AX if we tell it that AX aliased AL (for example). - - // Dwarf numbering is different for 32-bit and 64-bit, and there are - // variations by target as well. Currently the first entry is for X86-64, - // second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux - // and debug information on X86-32/Darwin) - - // 8-bit registers - // Low registers - def AL : Register<"al">; - def DL : Register<"dl">; - def CL : Register<"cl">; - def BL : Register<"bl">; - - // X86-64 only, requires REX. - let CostPerUse = 1 in { - def SIL : Register<"sil">; - def DIL : Register<"dil">; - def BPL : Register<"bpl">; - def SPL : Register<"spl">; - def R8B : Register<"r8b">; - def R9B : Register<"r9b">; - def R10B : Register<"r10b">; - def R11B : Register<"r11b">; - def R12B : Register<"r12b">; - def R13B : Register<"r13b">; - def R14B : Register<"r14b">; - def R15B : Register<"r15b">; - } - - // High registers. On x86-64, these cannot be used in any instruction - // with a REX prefix. - def AH : Register<"ah">; - def DH : Register<"dh">; - def CH : Register<"ch">; - def BH : Register<"bh">; - - // 16-bit registers - let SubRegIndices = [sub_8bit, sub_8bit_hi], CoveredBySubRegs = 1 in { - def AX : RegisterWithSubRegs<"ax", [AL,AH]>; - def DX : RegisterWithSubRegs<"dx", [DL,DH]>; - def CX : RegisterWithSubRegs<"cx", [CL,CH]>; - def BX : RegisterWithSubRegs<"bx", [BL,BH]>; - } - let SubRegIndices = [sub_8bit] in { - def SI : RegisterWithSubRegs<"si", [SIL]>; - def DI : RegisterWithSubRegs<"di", [DIL]>; - def BP : RegisterWithSubRegs<"bp", [BPL]>; - def SP : RegisterWithSubRegs<"sp", [SPL]>; - } - def IP : Register<"ip">; - - // X86-64 only, requires REX. - let SubRegIndices = [sub_8bit], CostPerUse = 1 in { - def R8W : RegisterWithSubRegs<"r8w", [R8B]>; - def R9W : RegisterWithSubRegs<"r9w", [R9B]>; - def R10W : RegisterWithSubRegs<"r10w", [R10B]>; - def R11W : RegisterWithSubRegs<"r11w", [R11B]>; - def R12W : RegisterWithSubRegs<"r12w", [R12B]>; - def R13W : RegisterWithSubRegs<"r13w", [R13B]>; - def R14W : RegisterWithSubRegs<"r14w", [R14B]>; - def R15W : RegisterWithSubRegs<"r15w", [R15B]>; - } - // 32-bit registers - let SubRegIndices = [sub_16bit] in { - def EAX : RegisterWithSubRegs<"eax", [AX]>, DwarfRegNum<[-2, 0, 0]>; - def EDX : RegisterWithSubRegs<"edx", [DX]>, DwarfRegNum<[-2, 2, 2]>; - def ECX : RegisterWithSubRegs<"ecx", [CX]>, DwarfRegNum<[-2, 1, 1]>; - def EBX : RegisterWithSubRegs<"ebx", [BX]>, DwarfRegNum<[-2, 3, 3]>; - def ESI : RegisterWithSubRegs<"esi", [SI]>, DwarfRegNum<[-2, 6, 6]>; - def EDI : RegisterWithSubRegs<"edi", [DI]>, DwarfRegNum<[-2, 7, 7]>; - def EBP : RegisterWithSubRegs<"ebp", [BP]>, DwarfRegNum<[-2, 4, 5]>; - def ESP : RegisterWithSubRegs<"esp", [SP]>, DwarfRegNum<[-2, 5, 4]>; - def EIP : RegisterWithSubRegs<"eip", [IP]>, DwarfRegNum<[-2, 8, 8]>; - - // X86-64 only, requires REX - let CostPerUse = 1 in { - def R8D : RegisterWithSubRegs<"r8d", [R8W]>; - def R9D : RegisterWithSubRegs<"r9d", [R9W]>; - def R10D : RegisterWithSubRegs<"r10d", [R10W]>; - def R11D : RegisterWithSubRegs<"r11d", [R11W]>; - def R12D : RegisterWithSubRegs<"r12d", [R12W]>; - def R13D : RegisterWithSubRegs<"r13d", [R13W]>; - def R14D : RegisterWithSubRegs<"r14d", [R14W]>; - def R15D : RegisterWithSubRegs<"r15d", [R15W]>; - }} - - // 64-bit registers, X86-64 only - let SubRegIndices = [sub_32bit] in { - def RAX : RegisterWithSubRegs<"rax", [EAX]>, DwarfRegNum<[0, -2, -2]>; - def RDX : RegisterWithSubRegs<"rdx", [EDX]>, DwarfRegNum<[1, -2, -2]>; - def RCX : RegisterWithSubRegs<"rcx", [ECX]>, DwarfRegNum<[2, -2, -2]>; - def RBX : RegisterWithSubRegs<"rbx", [EBX]>, DwarfRegNum<[3, -2, -2]>; - def RSI : RegisterWithSubRegs<"rsi", [ESI]>, DwarfRegNum<[4, -2, -2]>; - def RDI : RegisterWithSubRegs<"rdi", [EDI]>, DwarfRegNum<[5, -2, -2]>; - def RBP : RegisterWithSubRegs<"rbp", [EBP]>, DwarfRegNum<[6, -2, -2]>; - def RSP : RegisterWithSubRegs<"rsp", [ESP]>, DwarfRegNum<[7, -2, -2]>; - - // These also require REX. - let CostPerUse = 1 in { - def R8 : RegisterWithSubRegs<"r8", [R8D]>, DwarfRegNum<[8, -2, -2]>; - def R9 : RegisterWithSubRegs<"r9", [R9D]>, DwarfRegNum<[9, -2, -2]>; - def R10 : RegisterWithSubRegs<"r10", [R10D]>, DwarfRegNum<[10, -2, -2]>; - def R11 : RegisterWithSubRegs<"r11", [R11D]>, DwarfRegNum<[11, -2, -2]>; - def R12 : RegisterWithSubRegs<"r12", [R12D]>, DwarfRegNum<[12, -2, -2]>; - def R13 : RegisterWithSubRegs<"r13", [R13D]>, DwarfRegNum<[13, -2, -2]>; - def R14 : RegisterWithSubRegs<"r14", [R14D]>, DwarfRegNum<[14, -2, -2]>; - def R15 : RegisterWithSubRegs<"r15", [R15D]>, DwarfRegNum<[15, -2, -2]>; - def RIP : RegisterWithSubRegs<"rip", [EIP]>, DwarfRegNum<[16, -2, -2]>; - }} - - // MMX Registers. These are actually aliased to ST0 .. ST7 - def MM0 : Register<"mm0">, DwarfRegNum<[41, 29, 29]>; - def MM1 : Register<"mm1">, DwarfRegNum<[42, 30, 30]>; - def MM2 : Register<"mm2">, DwarfRegNum<[43, 31, 31]>; - def MM3 : Register<"mm3">, DwarfRegNum<[44, 32, 32]>; - def MM4 : Register<"mm4">, DwarfRegNum<[45, 33, 33]>; - def MM5 : Register<"mm5">, DwarfRegNum<[46, 34, 34]>; - def MM6 : Register<"mm6">, DwarfRegNum<[47, 35, 35]>; - def MM7 : Register<"mm7">, DwarfRegNum<[48, 36, 36]>; - - // Pseudo Floating Point registers - def FP0 : Register<"fp0">; - def FP1 : Register<"fp1">; - def FP2 : Register<"fp2">; - def FP3 : Register<"fp3">; - def FP4 : Register<"fp4">; - def FP5 : Register<"fp5">; - def FP6 : Register<"fp6">; - - // XMM Registers, used by the various SSE instruction set extensions. - def XMM0: Register<"xmm0">, DwarfRegNum<[17, 21, 21]>; - def XMM1: Register<"xmm1">, DwarfRegNum<[18, 22, 22]>; - def XMM2: Register<"xmm2">, DwarfRegNum<[19, 23, 23]>; - def XMM3: Register<"xmm3">, DwarfRegNum<[20, 24, 24]>; - def XMM4: Register<"xmm4">, DwarfRegNum<[21, 25, 25]>; - def XMM5: Register<"xmm5">, DwarfRegNum<[22, 26, 26]>; - def XMM6: Register<"xmm6">, DwarfRegNum<[23, 27, 27]>; - def XMM7: Register<"xmm7">, DwarfRegNum<[24, 28, 28]>; - - // X86-64 only - let CostPerUse = 1 in { - def XMM8: Register<"xmm8">, DwarfRegNum<[25, -2, -2]>; - def XMM9: Register<"xmm9">, DwarfRegNum<[26, -2, -2]>; - def XMM10: Register<"xmm10">, DwarfRegNum<[27, -2, -2]>; - def XMM11: Register<"xmm11">, DwarfRegNum<[28, -2, -2]>; - def XMM12: Register<"xmm12">, DwarfRegNum<[29, -2, -2]>; - def XMM13: Register<"xmm13">, DwarfRegNum<[30, -2, -2]>; - def XMM14: Register<"xmm14">, DwarfRegNum<[31, -2, -2]>; - def XMM15: Register<"xmm15">, DwarfRegNum<[32, -2, -2]>; - } // CostPerUse - - // YMM Registers, used by AVX instructions - let SubRegIndices = [sub_xmm] in { - def YMM0: RegisterWithSubRegs<"ymm0", [XMM0]>, DwarfRegAlias; - def YMM1: RegisterWithSubRegs<"ymm1", [XMM1]>, DwarfRegAlias; - def YMM2: RegisterWithSubRegs<"ymm2", [XMM2]>, DwarfRegAlias; - def YMM3: RegisterWithSubRegs<"ymm3", [XMM3]>, DwarfRegAlias; - def YMM4: RegisterWithSubRegs<"ymm4", [XMM4]>, DwarfRegAlias; - def YMM5: RegisterWithSubRegs<"ymm5", [XMM5]>, DwarfRegAlias; - def YMM6: RegisterWithSubRegs<"ymm6", [XMM6]>, DwarfRegAlias; - def YMM7: RegisterWithSubRegs<"ymm7", [XMM7]>, DwarfRegAlias; - def YMM8: RegisterWithSubRegs<"ymm8", [XMM8]>, DwarfRegAlias; - def YMM9: RegisterWithSubRegs<"ymm9", [XMM9]>, DwarfRegAlias; - def YMM10: RegisterWithSubRegs<"ymm10", [XMM10]>, DwarfRegAlias; - def YMM11: RegisterWithSubRegs<"ymm11", [XMM11]>, DwarfRegAlias; - def YMM12: RegisterWithSubRegs<"ymm12", [XMM12]>, DwarfRegAlias; - def YMM13: RegisterWithSubRegs<"ymm13", [XMM13]>, DwarfRegAlias; - def YMM14: RegisterWithSubRegs<"ymm14", [XMM14]>, DwarfRegAlias; - def YMM15: RegisterWithSubRegs<"ymm15", [XMM15]>, DwarfRegAlias; - } - - class STRegister A> : Register { - let Aliases = A; - } - - // Floating point stack registers. These don't map one-to-one to the FP - // pseudo registers, but we still mark them as aliasing FP registers. That - // way both kinds can be live without exceeding the stack depth. ST registers - // are only live around inline assembly. - def ST0 : STRegister<"st(0)", []>, DwarfRegNum<[33, 12, 11]>; - def ST1 : STRegister<"st(1)", [FP6]>, DwarfRegNum<[34, 13, 12]>; - def ST2 : STRegister<"st(2)", [FP5]>, DwarfRegNum<[35, 14, 13]>; - def ST3 : STRegister<"st(3)", [FP4]>, DwarfRegNum<[36, 15, 14]>; - def ST4 : STRegister<"st(4)", [FP3]>, DwarfRegNum<[37, 16, 15]>; - def ST5 : STRegister<"st(5)", [FP2]>, DwarfRegNum<[38, 17, 16]>; - def ST6 : STRegister<"st(6)", [FP1]>, DwarfRegNum<[39, 18, 17]>; - def ST7 : STRegister<"st(7)", [FP0]>, DwarfRegNum<[40, 19, 18]>; - - // Floating-point status word - def FPSW : Register<"fpsw">; - - // Status flags register - def EFLAGS : Register<"flags">; - - // Segment registers - def CS : Register<"cs">; - def DS : Register<"ds">; - def SS : Register<"ss">; - def ES : Register<"es">; - def FS : Register<"fs">; - def GS : Register<"gs">; - - // Debug registers - def DR0 : Register<"dr0">; - def DR1 : Register<"dr1">; - def DR2 : Register<"dr2">; - def DR3 : Register<"dr3">; - def DR4 : Register<"dr4">; - def DR5 : Register<"dr5">; - def DR6 : Register<"dr6">; - def DR7 : Register<"dr7">; - - // Control registers - def CR0 : Register<"cr0">; - def CR1 : Register<"cr1">; - def CR2 : Register<"cr2">; - def CR3 : Register<"cr3">; - def CR4 : Register<"cr4">; - def CR5 : Register<"cr5">; - def CR6 : Register<"cr6">; - def CR7 : Register<"cr7">; - def CR8 : Register<"cr8">; - def CR9 : Register<"cr9">; - def CR10 : Register<"cr10">; - def CR11 : Register<"cr11">; - def CR12 : Register<"cr12">; - def CR13 : Register<"cr13">; - def CR14 : Register<"cr14">; - def CR15 : Register<"cr15">; - - // Pseudo index registers - def EIZ : Register<"eiz">; - def RIZ : Register<"riz">; + def sub_xmm : SubRegIndex; } +//===----------------------------------------------------------------------===// +// Register definitions... +// + +// In the register alias definitions below, we define which registers alias +// which others. We only specify which registers the small registers alias, +// because the register file generator is smart enough to figure out that +// AL aliases AX if we tell it that AX aliased AL (for example). + +// Dwarf numbering is different for 32-bit and 64-bit, and there are +// variations by target as well. Currently the first entry is for X86-64, +// second - for EH on X86-32/Darwin and third is 'generic' one (X86-32/Linux +// and debug information on X86-32/Darwin) + +// 8-bit registers +// Low registers +def AL : X86Reg<"al", 0>; +def DL : X86Reg<"dl", 2>; +def CL : X86Reg<"cl", 1>; +def BL : X86Reg<"bl", 3>; + +// High registers. On x86-64, these cannot be used in any instruction +// with a REX prefix. +def AH : X86Reg<"ah", 4>; +def DH : X86Reg<"dh", 6>; +def CH : X86Reg<"ch", 5>; +def BH : X86Reg<"bh", 7>; + +// X86-64 only, requires REX. +let CostPerUse = 1 in { +def SIL : X86Reg<"sil", 6>; +def DIL : X86Reg<"dil", 7>; +def BPL : X86Reg<"bpl", 5>; +def SPL : X86Reg<"spl", 4>; +def R8B : X86Reg<"r8b", 8>; +def R9B : X86Reg<"r9b", 9>; +def R10B : X86Reg<"r10b", 10>; +def R11B : X86Reg<"r11b", 11>; +def R12B : X86Reg<"r12b", 12>; +def R13B : X86Reg<"r13b", 13>; +def R14B : X86Reg<"r14b", 14>; +def R15B : X86Reg<"r15b", 15>; +} + +// 16-bit registers +let SubRegIndices = [sub_8bit, sub_8bit_hi], CoveredBySubRegs = 1 in { +def AX : X86Reg<"ax", 0, [AL,AH]>; +def DX : X86Reg<"dx", 2, [DL,DH]>; +def CX : X86Reg<"cx", 1, [CL,CH]>; +def BX : X86Reg<"bx", 3, [BL,BH]>; +} +let SubRegIndices = [sub_8bit] in { +def SI : X86Reg<"si", 6, [SIL]>; +def DI : X86Reg<"di", 7, [DIL]>; +def BP : X86Reg<"bp", 5, [BPL]>; +def SP : X86Reg<"sp", 4, [SPL]>; +} +def IP : X86Reg<"ip", 0>; + +// X86-64 only, requires REX. +let SubRegIndices = [sub_8bit], CostPerUse = 1 in { +def R8W : X86Reg<"r8w", 8, [R8B]>; +def R9W : X86Reg<"r9w", 9, [R9B]>; +def R10W : X86Reg<"r10w", 10, [R10B]>; +def R11W : X86Reg<"r11w", 11, [R11B]>; +def R12W : X86Reg<"r12w", 12, [R12B]>; +def R13W : X86Reg<"r13w", 13, [R13B]>; +def R14W : X86Reg<"r14w", 14, [R14B]>; +def R15W : X86Reg<"r15w", 15, [R15B]>; +} + +// 32-bit registers +let SubRegIndices = [sub_16bit] in { +def EAX : X86Reg<"eax", 0, [AX]>, DwarfRegNum<[-2, 0, 0]>; +def EDX : X86Reg<"edx", 2, [DX]>, DwarfRegNum<[-2, 2, 2]>; +def ECX : X86Reg<"ecx", 1, [CX]>, DwarfRegNum<[-2, 1, 1]>; +def EBX : X86Reg<"ebx", 3, [BX]>, DwarfRegNum<[-2, 3, 3]>; +def ESI : X86Reg<"esi", 6, [SI]>, DwarfRegNum<[-2, 6, 6]>; +def EDI : X86Reg<"edi", 7, [DI]>, DwarfRegNum<[-2, 7, 7]>; +def EBP : X86Reg<"ebp", 5, [BP]>, DwarfRegNum<[-2, 4, 5]>; +def ESP : X86Reg<"esp", 4, [SP]>, DwarfRegNum<[-2, 5, 4]>; +def EIP : X86Reg<"eip", 0, [IP]>, DwarfRegNum<[-2, 8, 8]>; + +// X86-64 only, requires REX +let CostPerUse = 1 in { +def R8D : X86Reg<"r8d", 8, [R8W]>; +def R9D : X86Reg<"r9d", 9, [R9W]>; +def R10D : X86Reg<"r10d", 10, [R10W]>; +def R11D : X86Reg<"r11d", 11, [R11W]>; +def R12D : X86Reg<"r12d", 12, [R12W]>; +def R13D : X86Reg<"r13d", 13, [R13W]>; +def R14D : X86Reg<"r14d", 14, [R14W]>; +def R15D : X86Reg<"r15d", 15, [R15W]>; +}} + +// 64-bit registers, X86-64 only +let SubRegIndices = [sub_32bit] in { +def RAX : X86Reg<"rax", 0, [EAX]>, DwarfRegNum<[0, -2, -2]>; +def RDX : X86Reg<"rdx", 2, [EDX]>, DwarfRegNum<[1, -2, -2]>; +def RCX : X86Reg<"rcx", 1, [ECX]>, DwarfRegNum<[2, -2, -2]>; +def RBX : X86Reg<"rbx", 3, [EBX]>, DwarfRegNum<[3, -2, -2]>; +def RSI : X86Reg<"rsi", 6, [ESI]>, DwarfRegNum<[4, -2, -2]>; +def RDI : X86Reg<"rdi", 7, [EDI]>, DwarfRegNum<[5, -2, -2]>; +def RBP : X86Reg<"rbp", 5, [EBP]>, DwarfRegNum<[6, -2, -2]>; +def RSP : X86Reg<"rsp", 4, [ESP]>, DwarfRegNum<[7, -2, -2]>; + +// These also require REX. +let CostPerUse = 1 in { +def R8 : X86Reg<"r8", 8, [R8D]>, DwarfRegNum<[ 8, -2, -2]>; +def R9 : X86Reg<"r9", 9, [R9D]>, DwarfRegNum<[ 9, -2, -2]>; +def R10 : X86Reg<"r10", 10, [R10D]>, DwarfRegNum<[10, -2, -2]>; +def R11 : X86Reg<"r11", 11, [R11D]>, DwarfRegNum<[11, -2, -2]>; +def R12 : X86Reg<"r12", 12, [R12D]>, DwarfRegNum<[12, -2, -2]>; +def R13 : X86Reg<"r13", 13, [R13D]>, DwarfRegNum<[13, -2, -2]>; +def R14 : X86Reg<"r14", 14, [R14D]>, DwarfRegNum<[14, -2, -2]>; +def R15 : X86Reg<"r15", 15, [R15D]>, DwarfRegNum<[15, -2, -2]>; +def RIP : X86Reg<"rip", 0, [EIP]>, DwarfRegNum<[16, -2, -2]>; +}} + +// MMX Registers. These are actually aliased to ST0 .. ST7 +def MM0 : X86Reg<"mm0", 0>, DwarfRegNum<[41, 29, 29]>; +def MM1 : X86Reg<"mm1", 1>, DwarfRegNum<[42, 30, 30]>; +def MM2 : X86Reg<"mm2", 2>, DwarfRegNum<[43, 31, 31]>; +def MM3 : X86Reg<"mm3", 3>, DwarfRegNum<[44, 32, 32]>; +def MM4 : X86Reg<"mm4", 4>, DwarfRegNum<[45, 33, 33]>; +def MM5 : X86Reg<"mm5", 5>, DwarfRegNum<[46, 34, 34]>; +def MM6 : X86Reg<"mm6", 6>, DwarfRegNum<[47, 35, 35]>; +def MM7 : X86Reg<"mm7", 7>, DwarfRegNum<[48, 36, 36]>; + +// Pseudo Floating Point registers +def FP0 : X86Reg<"fp0", 0>; +def FP1 : X86Reg<"fp1", 0>; +def FP2 : X86Reg<"fp2", 0>; +def FP3 : X86Reg<"fp3", 0>; +def FP4 : X86Reg<"fp4", 0>; +def FP5 : X86Reg<"fp5", 0>; +def FP6 : X86Reg<"fp6", 0>; + +// XMM Registers, used by the various SSE instruction set extensions. +def XMM0: X86Reg<"xmm0", 0>, DwarfRegNum<[17, 21, 21]>; +def XMM1: X86Reg<"xmm1", 1>, DwarfRegNum<[18, 22, 22]>; +def XMM2: X86Reg<"xmm2", 2>, DwarfRegNum<[19, 23, 23]>; +def XMM3: X86Reg<"xmm3", 3>, DwarfRegNum<[20, 24, 24]>; +def XMM4: X86Reg<"xmm4", 4>, DwarfRegNum<[21, 25, 25]>; +def XMM5: X86Reg<"xmm5", 5>, DwarfRegNum<[22, 26, 26]>; +def XMM6: X86Reg<"xmm6", 6>, DwarfRegNum<[23, 27, 27]>; +def XMM7: X86Reg<"xmm7", 7>, DwarfRegNum<[24, 28, 28]>; + +// X86-64 only +let CostPerUse = 1 in { +def XMM8: X86Reg<"xmm8", 8>, DwarfRegNum<[25, -2, -2]>; +def XMM9: X86Reg<"xmm9", 9>, DwarfRegNum<[26, -2, -2]>; +def XMM10: X86Reg<"xmm10", 10>, DwarfRegNum<[27, -2, -2]>; +def XMM11: X86Reg<"xmm11", 11>, DwarfRegNum<[28, -2, -2]>; +def XMM12: X86Reg<"xmm12", 12>, DwarfRegNum<[29, -2, -2]>; +def XMM13: X86Reg<"xmm13", 13>, DwarfRegNum<[30, -2, -2]>; +def XMM14: X86Reg<"xmm14", 14>, DwarfRegNum<[31, -2, -2]>; +def XMM15: X86Reg<"xmm15", 15>, DwarfRegNum<[32, -2, -2]>; +} // CostPerUse + +// YMM Registers, used by AVX instructions +let SubRegIndices = [sub_xmm] in { +def YMM0: X86Reg<"ymm0", 0, [XMM0]>, DwarfRegAlias; +def YMM1: X86Reg<"ymm1", 1, [XMM1]>, DwarfRegAlias; +def YMM2: X86Reg<"ymm2", 2, [XMM2]>, DwarfRegAlias; +def YMM3: X86Reg<"ymm3", 3, [XMM3]>, DwarfRegAlias; +def YMM4: X86Reg<"ymm4", 4, [XMM4]>, DwarfRegAlias; +def YMM5: X86Reg<"ymm5", 5, [XMM5]>, DwarfRegAlias; +def YMM6: X86Reg<"ymm6", 6, [XMM6]>, DwarfRegAlias; +def YMM7: X86Reg<"ymm7", 7, [XMM7]>, DwarfRegAlias; +def YMM8: X86Reg<"ymm8", 8, [XMM8]>, DwarfRegAlias; +def YMM9: X86Reg<"ymm9", 9, [XMM9]>, DwarfRegAlias; +def YMM10: X86Reg<"ymm10", 10, [XMM10]>, DwarfRegAlias; +def YMM11: X86Reg<"ymm11", 11, [XMM11]>, DwarfRegAlias; +def YMM12: X86Reg<"ymm12", 12, [XMM12]>, DwarfRegAlias; +def YMM13: X86Reg<"ymm13", 13, [XMM13]>, DwarfRegAlias; +def YMM14: X86Reg<"ymm14", 14, [XMM14]>, DwarfRegAlias; +def YMM15: X86Reg<"ymm15", 15, [XMM15]>, DwarfRegAlias; +} + +class STRegister Enc, list A> : X86Reg { + let Aliases = A; +} + +// Floating point stack registers. These don't map one-to-one to the FP +// pseudo registers, but we still mark them as aliasing FP registers. That +// way both kinds can be live without exceeding the stack depth. ST registers +// are only live around inline assembly. +def ST0 : STRegister<"st(0)", 0, []>, DwarfRegNum<[33, 12, 11]>; +def ST1 : STRegister<"st(1)", 1, [FP6]>, DwarfRegNum<[34, 13, 12]>; +def ST2 : STRegister<"st(2)", 2, [FP5]>, DwarfRegNum<[35, 14, 13]>; +def ST3 : STRegister<"st(3)", 3, [FP4]>, DwarfRegNum<[36, 15, 14]>; +def ST4 : STRegister<"st(4)", 4, [FP3]>, DwarfRegNum<[37, 16, 15]>; +def ST5 : STRegister<"st(5)", 5, [FP2]>, DwarfRegNum<[38, 17, 16]>; +def ST6 : STRegister<"st(6)", 6, [FP1]>, DwarfRegNum<[39, 18, 17]>; +def ST7 : STRegister<"st(7)", 7, [FP0]>, DwarfRegNum<[40, 19, 18]>; + +// Floating-point status word +def FPSW : X86Reg<"fpsw", 0>; + +// Status flags register +def EFLAGS : X86Reg<"flags", 0>; + +// Segment registers +def CS : X86Reg<"cs", 1>; +def DS : X86Reg<"ds", 3>; +def SS : X86Reg<"ss", 2>; +def ES : X86Reg<"es", 0>; +def FS : X86Reg<"fs", 4>; +def GS : X86Reg<"gs", 5>; + +// Debug registers +def DR0 : X86Reg<"dr0", 0>; +def DR1 : X86Reg<"dr1", 1>; +def DR2 : X86Reg<"dr2", 2>; +def DR3 : X86Reg<"dr3", 3>; +def DR4 : X86Reg<"dr4", 4>; +def DR5 : X86Reg<"dr5", 5>; +def DR6 : X86Reg<"dr6", 6>; +def DR7 : X86Reg<"dr7", 7>; + +// Control registers +def CR0 : X86Reg<"cr0", 0>; +def CR1 : X86Reg<"cr1", 1>; +def CR2 : X86Reg<"cr2", 2>; +def CR3 : X86Reg<"cr3", 3>; +def CR4 : X86Reg<"cr4", 4>; +def CR5 : X86Reg<"cr5", 5>; +def CR6 : X86Reg<"cr6", 6>; +def CR7 : X86Reg<"cr7", 7>; +def CR8 : X86Reg<"cr8", 8>; +def CR9 : X86Reg<"cr9", 9>; +def CR10 : X86Reg<"cr10", 10>; +def CR11 : X86Reg<"cr11", 11>; +def CR12 : X86Reg<"cr12", 12>; +def CR13 : X86Reg<"cr13", 13>; +def CR14 : X86Reg<"cr14", 14>; +def CR15 : X86Reg<"cr15", 15>; + +// Pseudo index registers +def EIZ : X86Reg<"eiz", 4>; +def RIZ : X86Reg<"riz", 4>; + //===----------------------------------------------------------------------===// // Register Class Definitions... now that we have all of the pieces, define the diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index 00edcbc..723e50c 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -54,7 +54,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemset(SelectionDAG &DAG, DebugLoc dl, if (const char *bzeroEntry = V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) { EVT IntPtr = TLI.getPointerTy(); - Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); + Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; Entry.Node = Dst; diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 9087852..d1ed680 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -163,17 +163,6 @@ bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const { return isTargetELF() || TM.getRelocationModel() == Reloc::Static; } -/// getSpecialAddressLatency - For targets where it is beneficial to -/// backschedule instructions that compute addresses, return a value -/// indicating the number of scheduling cycles of backscheduling that -/// should be attempted. -unsigned X86Subtarget::getSpecialAddressLatency() const { - // For x86 out-of-order targets, back-schedule address computations so - // that loads and stores aren't blocked. - // This value was chosen arbitrarily. - return 200; -} - void X86Subtarget::AutoDetectSubtargetFeatures() { unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0; unsigned MaxLevel; @@ -313,6 +302,10 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { HasBMI2 = true; ToggleFeature(X86::FeatureBMI2); } + if (IsIntel && ((EBX >> 11) & 0x1)) { + HasRTM = true; + ToggleFeature(X86::FeatureRTM); + } } } } @@ -341,11 +334,13 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, , HasLZCNT(false) , HasBMI(false) , HasBMI2(false) + , HasRTM(false) , IsBTMemSlow(false) , IsUAMemFast(false) , HasVectorUAMem(false) , HasCmpxchg16b(false) , UseLeaForSP(false) + , HasSlowDivide(false) , PostRAScheduler(false) , stackAlignment(4) // FIXME: this is a known good value for Yonah. How about others? @@ -400,6 +395,10 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, } } + // CPUName may have been set by the CPU detection code. Make sure the + // new MCSchedModel is used. + InitMCProcessorInfo(CPUName, FS); + if (X86ProcFamily == IntelAtom) PostRAScheduler = true; @@ -416,12 +415,12 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, assert((!In64BitMode || HasX86_64) && "64-bit code requested on a subtarget that doesn't support it!"); - // Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both + // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both // 32 and 64 bit) and for all 64-bit targets. if (StackAlignOverride) stackAlignment = StackAlignOverride; - else if (isTargetDarwin() || isTargetFreeBSD() || isTargetLinux() || - isTargetSolaris() || In64BitMode) + else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() || + In64BitMode) stackAlignment = 16; } diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 6841c5b..8bf4cc7 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -118,6 +118,9 @@ protected: /// HasBMI2 - Processor has BMI2 instructions. bool HasBMI2; + /// HasRTM - Processor has RTM instructions. + bool HasRTM; + /// IsBTMemSlow - True if BT (bit test) of memory instructions are slow. bool IsBTMemSlow; @@ -136,6 +139,10 @@ protected: /// the stack pointer. This is an optimization for Intel Atom processors. bool UseLeaForSP; + /// HasSlowDivide - True if smaller divides are significantly faster than + /// full divides and should be used when possible. + bool HasSlowDivide; + /// PostRAScheduler - True if using post-register-allocation scheduler. bool PostRAScheduler; @@ -205,7 +212,8 @@ public: bool hasAES() const { return HasAES; } bool hasPCLMUL() const { return HasPCLMUL; } bool hasFMA() const { return HasFMA; } - bool hasFMA4() const { return HasFMA4; } + // FIXME: Favor FMA when both are enabled. Is this the right thing to do? + bool hasFMA4() const { return HasFMA4 && !HasFMA; } bool hasXOP() const { return HasXOP; } bool hasMOVBE() const { return HasMOVBE; } bool hasRDRAND() const { return HasRDRAND; } @@ -214,11 +222,13 @@ public: bool hasLZCNT() const { return HasLZCNT; } bool hasBMI() const { return HasBMI; } bool hasBMI2() const { return HasBMI2; } + bool hasRTM() const { return HasRTM; } bool isBTMemSlow() const { return IsBTMemSlow; } bool isUnalignedMemAccessFast() const { return IsUAMemFast; } bool hasVectorUAMem() const { return HasVectorUAMem; } bool hasCmpxchg16b() const { return HasCmpxchg16b; } bool useLeaForSP() const { return UseLeaForSP; } + bool hasSlowDivide() const { return HasSlowDivide; } bool isAtom() const { return X86ProcFamily == IntelAtom; } @@ -231,10 +241,10 @@ public: bool isTargetSolaris() const { return TargetTriple.getOS() == Triple::Solaris; } - - // ELF is a reasonably sane default and the only other X86 targets we - // support are Darwin and Windows. Just use "not those". - bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); } + bool isTargetELF() const { + return (TargetTriple.getEnvironment() == Triple::ELF || + TargetTriple.isOSBinFormatELF()); + } bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; } bool isTargetNaCl() const { return TargetTriple.getOS() == Triple::NativeClient; @@ -245,7 +255,10 @@ public: bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; } bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; } bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); } - bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); } + bool isTargetCOFF() const { + return (TargetTriple.getEnvironment() != Triple::ELF && + TargetTriple.isOSBinFormatCOFF()); + } bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); } bool isTargetWin64() const { @@ -296,12 +309,6 @@ public: /// returns null. const char *getBZeroEntry() const; - /// getSpecialAddressLatency - For targets where it is beneficial to - /// backschedule instructions that compute addresses, return a value - /// indicating the number of scheduling cycles of backscheduling that - /// should be attempted. - unsigned getSpecialAddressLatency() const; - /// enablePostRAScheduler - run for Atom optimization. bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, TargetSubtargetInfo::AntiDepBreakMode& Mode, diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index b7ba568..158f9dc 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -36,7 +36,7 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false), - DataLayout(getSubtargetImpl()->isTargetDarwin() ? + DL(getSubtargetImpl()->isTargetDarwin() ? "e-p:32:32-f64:32:64-i64:32:64-f80:128:128-f128:128:128-" "n8:16:32-S128" : (getSubtargetImpl()->isTargetCygMing() || @@ -48,7 +48,8 @@ X86_32TargetMachine::X86_32TargetMachine(const Target &T, StringRef TT, InstrInfo(*this), TSInfo(*this), TLInfo(*this), - JITInfo(*this) { + JITInfo(*this), + STTI(&TLInfo), VTTI(&TLInfo) { } void X86_64TargetMachine::anchor() { } @@ -59,12 +60,13 @@ X86_64TargetMachine::X86_64TargetMachine(const Target &T, StringRef TT, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) : X86TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true), - DataLayout("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-" + DL("e-p:64:64-s:64-f64:64:64-i64:64:64-f80:128:128-f128:128:128-" "n8:16:32:64-S128"), InstrInfo(*this), TSInfo(*this), TLInfo(*this), - JITInfo(*this) { + JITInfo(*this), + STTI(&TLInfo), VTTI(&TLInfo){ } /// X86TargetMachine ctor - Create an X86 target. @@ -78,7 +80,6 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS, Options.StackAlignmentOverride, is64Bit), FrameLowering(*this, Subtarget), - ELFWriterInfo(is64Bit, true), InstrItins(Subtarget.getInstrItineraryData()){ // Determine the PICStyle based on the target selected. if (getRelocationModel() == Reloc::Static) { @@ -113,6 +114,12 @@ UseVZeroUpper("x86-use-vzeroupper", cl::desc("Minimize AVX to SSE transition penalty"), cl::init(true)); +// Temporary option to control early if-conversion for x86 while adding machine +// models. +static cl::opt +X86EarlyIfConv("x86-early-ifcvt", + cl::desc("Enable early if-conversion on X86")); + //===----------------------------------------------------------------------===// // Pass Pipeline Configuration //===----------------------------------------------------------------------===// @@ -142,7 +149,7 @@ public: TargetPassConfig *X86TargetMachine::createPassConfig(PassManagerBase &PM) { X86PassConfig *PC = new X86PassConfig(this, PM); - if (Subtarget.hasCMov()) + if (X86EarlyIfConv && Subtarget.hasCMov()) PC->enablePass(&EarlyIfConverterID); return PC; diff --git a/lib/Target/X86/X86TargetMachine.h b/lib/Target/X86/X86TargetMachine.h index 8e935af..12311a1 100644 --- a/lib/Target/X86/X86TargetMachine.h +++ b/lib/Target/X86/X86TargetMachine.h @@ -15,7 +15,6 @@ #define X86TARGETMACHINE_H #include "X86.h" -#include "X86ELFWriterInfo.h" #include "X86InstrInfo.h" #include "X86ISelLowering.h" #include "X86FrameLowering.h" @@ -23,8 +22,9 @@ #include "X86SelectionDAGInfo.h" #include "X86Subtarget.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetFrameLowering.h" +#include "llvm/Target/TargetTransformImpl.h" namespace llvm { @@ -33,7 +33,6 @@ class StringRef; class X86TargetMachine : public LLVMTargetMachine { X86Subtarget Subtarget; X86FrameLowering FrameLowering; - X86ELFWriterInfo ELFWriterInfo; InstrItineraryData InstrItins; public: @@ -62,9 +61,6 @@ public: virtual const X86RegisterInfo *getRegisterInfo() const { return &getInstrInfo()->getRegisterInfo(); } - virtual const X86ELFWriterInfo *getELFWriterInfo() const { - return Subtarget.isTargetELF() ? &ELFWriterInfo : 0; - } virtual const InstrItineraryData *getInstrItineraryData() const { return &InstrItins; } @@ -80,17 +76,19 @@ public: /// class X86_32TargetMachine : public X86TargetMachine { virtual void anchor(); - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment X86InstrInfo InstrInfo; X86SelectionDAGInfo TSInfo; X86TargetLowering TLInfo; X86JITInfo JITInfo; + ScalarTargetTransformImpl STTI; + X86VectorTargetTransformInfo VTTI; public: X86_32TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const DataLayout *getDataLayout() const { return &DL; } virtual const X86TargetLowering *getTargetLowering() const { return &TLInfo; } @@ -103,23 +101,31 @@ public: virtual X86JITInfo *getJITInfo() { return &JITInfo; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } }; /// X86_64TargetMachine - X86 64-bit target machine. /// class X86_64TargetMachine : public X86TargetMachine { virtual void anchor(); - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment X86InstrInfo InstrInfo; X86SelectionDAGInfo TSInfo; X86TargetLowering TLInfo; X86JITInfo JITInfo; + ScalarTargetTransformImpl STTI; + X86VectorTargetTransformInfo VTTI; public: X86_64TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL); - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const DataLayout *getDataLayout() const { return &DL; } virtual const X86TargetLowering *getTargetLowering() const { return &TLInfo; } @@ -132,6 +138,12 @@ public: virtual X86JITInfo *getJITInfo() { return &JITInfo; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } }; } // End llvm namespace diff --git a/lib/Target/X86/X86VZeroUpper.cpp b/lib/Target/X86/X86VZeroUpper.cpp index 80b75dc..c4a5887 100644 --- a/lib/Target/X86/X86VZeroUpper.cpp +++ b/lib/Target/X86/X86VZeroUpper.cpp @@ -42,7 +42,6 @@ namespace { private: const TargetInstrInfo *TII; // Machine instruction info. - MachineBasicBlock *MBB; // Current basic block // Any YMM register live-in to this function? bool FnHasLiveInYmm; @@ -84,7 +83,7 @@ namespace { // 2) All states must be clean for the result to be clean // 3) If none above and one unknown, the result state is also unknown // - unsigned computeState(unsigned PrevState, unsigned CurState) { + static unsigned computeState(unsigned PrevState, unsigned CurState) { if (PrevState == ST_INIT) return CurState; @@ -122,7 +121,7 @@ static bool checkFnHasLiveInYmm(MachineRegisterInfo &MRI) { } static bool hasYmmReg(MachineInstr *MI) { - for (int i = 0, e = MI->getNumOperands(); i != e; ++i) { + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg()) continue; @@ -148,7 +147,7 @@ bool VZeroUpperInserter::runOnMachineFunction(MachineFunction &MF) { const TargetRegisterClass *RC = &X86::VR256RegClass; for (TargetRegisterClass::iterator i = RC->begin(), e = RC->end(); i != e; i++) { - if (MRI.isPhysRegUsed(*i)) { + if (!MRI.reg_nodbg_empty(*i)) { YMMUsed = true; break; } @@ -189,7 +188,6 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { bool Changed = false; unsigned BBNum = BB.getNumber(); - MBB = &BB; // Don't process already solved BBs if (BBSolved[BBNum]) @@ -207,7 +205,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF, // The entry MBB for the function may set the initial state to dirty if // the function receives any YMM incoming arguments - if (MBB == MF.begin()) { + if (&BB == MF.begin()) { EntryState = ST_CLEAN; if (FnHasLiveInYmm) EntryState = ST_DIRTY; @@ -253,7 +251,7 @@ bool VZeroUpperInserter::processBasicBlock(MachineFunction &MF, // When unknown, only compute the information within the block to have // it available in the exit if possible, but don't change the block. if (EntryState != ST_UNKNOWN) { - BuildMI(*MBB, I, dl, TII->get(X86::VZEROUPPER)); + BuildMI(BB, I, dl, TII->get(X86::VZEROUPPER)); ++NumVZU; } diff --git a/lib/Target/XCore/XCoreAsmPrinter.cpp b/lib/Target/XCore/XCoreAsmPrinter.cpp index c76866f..caae562 100644 --- a/lib/Target/XCore/XCoreAsmPrinter.cpp +++ b/lib/Target/XCore/XCoreAsmPrinter.cpp @@ -31,7 +31,7 @@ #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Target/Mangler.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" @@ -112,7 +112,7 @@ void XCoreAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) { EmitSpecialLLVMGlobal(GV)) return; - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); OutStreamer.SwitchSection(getObjFileLowering().SectionForGlobal(GV, Mang,TM)); diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp index a4e5647..e18d973 100644 --- a/lib/Target/XCore/XCoreFrameLowering.cpp +++ b/lib/Target/XCore/XCoreFrameLowering.cpp @@ -23,7 +23,7 @@ #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterScavenging.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Support/ErrorHandling.h" @@ -98,12 +98,13 @@ void XCoreFrameLowering::emitPrologue(MachineFunction &MF) const { DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); bool FP = hasFP(MF); - bool Nested = MF.getFunction()-> - getAttributes().hasAttrSomewhere(Attribute::Nest); + const AttrListPtr &PAL = MF.getFunction()->getAttributes(); - if (Nested) { - loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII); - } + for (unsigned I = 0, E = PAL.getNumAttrs(); I != E; ++I) + if (PAL.getAttributesAtIndex(I).hasAttribute(Attributes::Nest)) { + loadFromStack(MBB, MBBI, XCore::R11, 0, dl, TII); + break; + } // Work out frame sizes. int FrameSize = MFI->getStackSize(); diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 8643ffc..9e7816e 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -285,7 +285,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const llvm_unreachable(0); } SDValue base = getGlobalAddressWrapper(GA, GV, DAG); - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); unsigned Size = TD->getTypeAllocSize(Ty); SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl), DAG.getConstant(Size, MVT::i32)); @@ -298,7 +298,7 @@ LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const DebugLoc DL = Op.getDebugLoc(); const BlockAddress *BA = cast(Op)->getBlockAddress(); - SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true); + SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy()); return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result); } @@ -405,7 +405,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const { if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) return SDValue(); - unsigned ABIAlignment = getTargetData()-> + unsigned ABIAlignment = getDataLayout()-> getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext())); // Leave aligned load alone. if (LD->getAlignment() >= ABIAlignment) @@ -477,7 +477,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const { } // Lower to a call to __misaligned_load(BasePtr). - Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); + Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; @@ -507,7 +507,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { return SDValue(); } - unsigned ABIAlignment = getTargetData()-> + unsigned ABIAlignment = getDataLayout()-> getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext())); // Leave aligned store alone. if (ST->getAlignment() >= ABIAlignment) { @@ -536,7 +536,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const } // Lower to a call to __misaligned_store(BasePtr, Value). - Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext()); + Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext()); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; @@ -1499,7 +1499,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, if (StoreBits % 8) { break; } - unsigned ABIAlignment = getTargetData()->getABITypeAlignment( + unsigned ABIAlignment = getDataLayout()->getABITypeAlignment( ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext())); unsigned Alignment = ST->getAlignment(); if (Alignment >= ABIAlignment) { @@ -1570,7 +1570,7 @@ XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM, if (Ty->getTypeID() == Type::VoidTyID) return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs); - const TargetData *TD = TM.getTargetData(); + const DataLayout *TD = TM.getDataLayout(); unsigned Size = TD->getTypeAllocSize(Ty); if (AM.BaseGV) { return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 && diff --git a/lib/Target/XCore/XCoreInstrInfo.td b/lib/Target/XCore/XCoreInstrInfo.td index ae646a2..3e7666b 100644 --- a/lib/Target/XCore/XCoreInstrInfo.td +++ b/lib/Target/XCore/XCoreInstrInfo.td @@ -33,7 +33,7 @@ def XCoreBranchLink : SDNode<"XCoreISD::BL",SDT_XCoreBranchLink, SDNPVariadic]>; def XCoreRetsp : SDNode<"XCoreISD::RETSP", SDTBrind, - [SDNPHasChain, SDNPOptInGlue]>; + [SDNPHasChain, SDNPOptInGlue, SDNPMayLoad]>; def SDT_XCoreBR_JT : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; @@ -58,7 +58,7 @@ def cprelwrapper : SDNode<"XCoreISD::CPRelativeWrapper", SDT_XCoreAddress, def SDT_XCoreStwsp : SDTypeProfile<0, 2, [SDTCisInt<1>]>; def XCoreStwsp : SDNode<"XCoreISD::STWSP", SDT_XCoreStwsp, - [SDNPHasChain]>; + [SDNPHasChain, SDNPMayStore]>; // These are target-independent nodes, but have target-specific formats. def SDT_XCoreCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32> ]>; diff --git a/lib/Target/XCore/XCoreRegisterInfo.cpp b/lib/Target/XCore/XCoreRegisterInfo.cpp index cdd0a08..be5855a 100644 --- a/lib/Target/XCore/XCoreRegisterInfo.cpp +++ b/lib/Target/XCore/XCoreRegisterInfo.cpp @@ -176,7 +176,7 @@ XCoreRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, #ifndef NDEBUG DEBUG(errs() << "\nFunction : " - << MF.getFunction()->getName() << "\n"); + << MF.getName() << "\n"); DEBUG(errs() << "<--------->\n"); DEBUG(MI.print(errs())); DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n"); diff --git a/lib/Target/XCore/XCoreTargetMachine.cpp b/lib/Target/XCore/XCoreTargetMachine.cpp index 11ec86b..d5a932c 100644 --- a/lib/Target/XCore/XCoreTargetMachine.cpp +++ b/lib/Target/XCore/XCoreTargetMachine.cpp @@ -27,12 +27,12 @@ XCoreTargetMachine::XCoreTargetMachine(const Target &T, StringRef TT, CodeGenOpt::Level OL) : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), Subtarget(TT, CPU, FS), - DataLayout("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-" + DL("e-p:32:32:32-a0:0:32-f32:32:32-f64:32:32-i1:8:32-i8:8:32-" "i16:16:32-i32:32:32-i64:32:32-n32"), InstrInfo(), FrameLowering(Subtarget), TLInfo(*this), - TSInfo(*this) { + TSInfo(*this), STTI(&TLInfo), VTTI(&TLInfo) { } namespace { diff --git a/lib/Target/XCore/XCoreTargetMachine.h b/lib/Target/XCore/XCoreTargetMachine.h index 2546681..c60c6a3 100644 --- a/lib/Target/XCore/XCoreTargetMachine.h +++ b/lib/Target/XCore/XCoreTargetMachine.h @@ -20,17 +20,20 @@ #include "XCoreISelLowering.h" #include "XCoreSelectionDAGInfo.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetTransformImpl.h" +#include "llvm/DataLayout.h" namespace llvm { class XCoreTargetMachine : public LLVMTargetMachine { XCoreSubtarget Subtarget; - const TargetData DataLayout; // Calculates type size & alignment + const DataLayout DL; // Calculates type size & alignment XCoreInstrInfo InstrInfo; XCoreFrameLowering FrameLowering; XCoreTargetLowering TLInfo; XCoreSelectionDAGInfo TSInfo; + ScalarTargetTransformImpl STTI; + VectorTargetTransformImpl VTTI; public: XCoreTargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, @@ -53,7 +56,13 @@ public: virtual const TargetRegisterInfo *getRegisterInfo() const { return &InstrInfo.getRegisterInfo(); } - virtual const TargetData *getTargetData() const { return &DataLayout; } + virtual const ScalarTargetTransformInfo *getScalarTargetTransformInfo()const { + return &STTI; + } + virtual const VectorTargetTransformInfo *getVectorTargetTransformInfo()const { + return &VTTI; + } + virtual const DataLayout *getDataLayout() const { return &DL; } // Pass Pipeline Configuration virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp index b94dd69..be48b20 100644 --- a/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -153,7 +153,8 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) { SmallPtrSet ArgsToPromote; SmallPtrSet ByValArgsToTransform; for (unsigned i = 0; i != PointerArgs.size(); ++i) { - bool isByVal = F->paramHasAttr(PointerArgs[i].second+1, Attribute::ByVal); + bool isByVal=F->getParamAttributes(PointerArgs[i].second+1). + hasAttribute(Attributes::ByVal); Argument *PtrArg = PointerArgs[i].first; Type *AgTy = cast(PtrArg->getType())->getElementType(); @@ -517,8 +518,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, const AttrListPtr &PAL = F->getAttributes(); // Add any return attributes. - if (Attributes attrs = PAL.getRetAttributes()) - AttributesVec.push_back(AttributeWithIndex::get(0, attrs)); + Attributes attrs = PAL.getRetAttributes(); + if (attrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex, + attrs)); // First, determine the new argument list unsigned ArgIndex = 1; @@ -534,7 +537,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, } else if (!ArgsToPromote.count(I)) { // Unchanged argument Params.push_back(I->getType()); - if (Attributes attrs = PAL.getParamAttributes(ArgIndex)) + Attributes attrs = PAL.getParamAttributes(ArgIndex); + if (attrs.hasAttributes()) AttributesVec.push_back(AttributeWithIndex::get(Params.size(), attrs)); } else if (I->use_empty()) { // Dead argument (which are always marked as promotable) @@ -587,19 +591,13 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, } // Add any function attributes. - if (Attributes attrs = PAL.getFnAttributes()) - AttributesVec.push_back(AttributeWithIndex::get(~0, attrs)); + attrs = PAL.getFnAttributes(); + if (attrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + attrs)); Type *RetTy = FTy->getReturnType(); - // Work around LLVM bug PR56: the CWriter cannot emit varargs functions which - // have zero fixed arguments. - bool ExtraArgHack = false; - if (Params.empty() && FTy->isVarArg()) { - ExtraArgHack = true; - Params.push_back(Type::getInt32Ty(F->getContext())); - } - // Construct the new function type using the new arguments. FunctionType *NFTy = FunctionType::get(RetTy, Params, FTy->isVarArg()); @@ -613,7 +611,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, // Recompute the parameter attributes list based on the new arguments for // the function. - NF->setAttributes(AttrListPtr::get(AttributesVec)); + NF->setAttributes(AttrListPtr::get(F->getContext(), AttributesVec)); AttributesVec.clear(); F->getParent()->getFunctionList().insert(F, NF); @@ -641,8 +639,10 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, const AttrListPtr &CallPAL = CS.getAttributes(); // Add any return attributes. - if (Attributes attrs = CallPAL.getRetAttributes()) - AttributesVec.push_back(AttributeWithIndex::get(0, attrs)); + Attributes attrs = CallPAL.getRetAttributes(); + if (attrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex, + attrs)); // Loop over the operands, inserting GEP and loads in the caller as // appropriate. @@ -653,7 +653,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, if (!ArgsToPromote.count(I) && !ByValArgsToTransform.count(I)) { Args.push_back(*AI); // Unmodified argument - if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex)) + Attributes Attrs = CallPAL.getParamAttributes(ArgIndex); + if (Attrs.hasAttributes()) AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs)); } else if (ByValArgsToTransform.count(I)) { @@ -711,30 +712,32 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, } } - if (ExtraArgHack) - Args.push_back(Constant::getNullValue(Type::getInt32Ty(F->getContext()))); - // Push any varargs arguments on the list. for (; AI != CS.arg_end(); ++AI, ++ArgIndex) { Args.push_back(*AI); - if (Attributes Attrs = CallPAL.getParamAttributes(ArgIndex)) + Attributes Attrs = CallPAL.getParamAttributes(ArgIndex); + if (Attrs.hasAttributes()) AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs)); } // Add any function attributes. - if (Attributes attrs = CallPAL.getFnAttributes()) - AttributesVec.push_back(AttributeWithIndex::get(~0, attrs)); + attrs = CallPAL.getFnAttributes(); + if (attrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + attrs)); Instruction *New; if (InvokeInst *II = dyn_cast(Call)) { New = InvokeInst::Create(NF, II->getNormalDest(), II->getUnwindDest(), Args, "", Call); cast(New)->setCallingConv(CS.getCallingConv()); - cast(New)->setAttributes(AttrListPtr::get(AttributesVec)); + cast(New)->setAttributes(AttrListPtr::get(II->getContext(), + AttributesVec)); } else { New = CallInst::Create(NF, Args, "", Call); cast(New)->setCallingConv(CS.getCallingConv()); - cast(New)->setAttributes(AttrListPtr::get(AttributesVec)); + cast(New)->setAttributes(AttrListPtr::get(New->getContext(), + AttributesVec)); if (cast(Call)->isTailCall()) cast(New)->setTailCall(); } @@ -870,16 +873,9 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, } // Increment I2 past all of the arguments added for this promoted pointer. - for (unsigned i = 0, e = ArgIndices.size(); i != e; ++i) - ++I2; + std::advance(I2, ArgIndices.size()); } - // Notify the alias analysis implementation that we inserted a new argument. - if (ExtraArgHack) - AA.copyValue(Constant::getNullValue(Type::getInt32Ty(F->getContext())), - NF->arg_begin()); - - // Tell the alias analysis that the old function is about to disappear. AA.replaceWithNewValue(F, NF); diff --git a/lib/Transforms/IPO/BarrierNoopPass.cpp b/lib/Transforms/IPO/BarrierNoopPass.cpp new file mode 100644 index 0000000..2e32240 --- /dev/null +++ b/lib/Transforms/IPO/BarrierNoopPass.cpp @@ -0,0 +1,47 @@ +//===- BarrierNoopPass.cpp - A barrier pass for the pass manager ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// NOTE: DO NOT USE THIS IF AVOIDABLE +// +// This pass is a nonce pass intended to allow manipulation of the implicitly +// nesting pass manager. For example, it can be used to cause a CGSCC pass +// manager to be closed prior to running a new collection of function passes. +// +// FIXME: This is a huge HACK. This should be removed when the pass manager's +// nesting is made explicit instead of implicit. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Pass.h" +#include "llvm/Transforms/IPO.h" +using namespace llvm; + +namespace { +/// \brief A nonce module pass used to place a barrier in a pass manager. +/// +/// There is no mechanism for ending a CGSCC pass manager once one is started. +/// This prevents extension points from having clear deterministic ordering +/// when they are phrased as non-module passes. +class BarrierNoop : public ModulePass { +public: + static char ID; // Pass identification. + + BarrierNoop() : ModulePass(ID) { + initializeBarrierNoopPass(*PassRegistry::getPassRegistry()); + } + + bool runOnModule(Module &M) { return false; } +}; +} + +ModulePass *llvm::createBarrierNoopPass() { return new BarrierNoop(); } + +char BarrierNoop::ID = 0; +INITIALIZE_PASS(BarrierNoop, "barrier", "A No-Op Barrier Pass", + false, false) diff --git a/lib/Transforms/IPO/CMakeLists.txt b/lib/Transforms/IPO/CMakeLists.txt index 3f6b1de..90c1c33 100644 --- a/lib/Transforms/IPO/CMakeLists.txt +++ b/lib/Transforms/IPO/CMakeLists.txt @@ -1,5 +1,6 @@ add_llvm_library(LLVMipo ArgumentPromotion.cpp + BarrierNoopPass.cpp ConstantMerge.cpp DeadArgumentElimination.cpp ExtractGV.cpp diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp index d8fae8a..e2f0126 100644 --- a/lib/Transforms/IPO/ConstantMerge.cpp +++ b/lib/Transforms/IPO/ConstantMerge.cpp @@ -23,7 +23,7 @@ #include "llvm/DerivedTypes.h" #include "llvm/Module.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" @@ -50,7 +50,7 @@ namespace { // alignment to a concrete value. unsigned getAlignment(GlobalVariable *GV) const; - const TargetData *TD; + const DataLayout *TD; }; } @@ -98,7 +98,7 @@ unsigned ConstantMerge::getAlignment(GlobalVariable *GV) const { } bool ConstantMerge::runOnModule(Module &M) { - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); // Find all the globals that are marked "used". These cannot be merged. SmallPtrSet UsedGlobals; @@ -107,7 +107,7 @@ bool ConstantMerge::runOnModule(Module &M) { // Map unique pairs to globals. We don't // want to merge globals of unknown alignment with those of explicit - // alignment. If we have TargetData, we always know the alignment. + // alignment. If we have DataLayout, we always know the alignment. DenseMap, GlobalVariable*> CMap; // Replacements - This vector contains a list of replacements to perform. diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp index fd23a93..4cfd0b2 100644 --- a/lib/Transforms/IPO/DeadArgumentElimination.cpp +++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp @@ -21,7 +21,9 @@ #include "llvm/Transforms/IPO.h" #include "llvm/CallingConv.h" #include "llvm/Constant.h" +#include "llvm/DebugInfo.h" #include "llvm/DerivedTypes.h" +#include "llvm/DIBuilder.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" #include "llvm/LLVMContext.h" @@ -30,6 +32,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringExtras.h" @@ -121,6 +124,15 @@ namespace { typedef SmallVector UseVector; + // Map each LLVM function to corresponding metadata with debug info. If + // the function is replaced with another one, we should patch the pointer + // to LLVM function in metadata. + // As the code generation for module is finished (and DIBuilder is + // finalized) we assume that subprogram descriptors won't be changed, and + // they are stored in map for short duration anyway. + typedef DenseMap FunctionDIMap; + FunctionDIMap FunctionDIs; + protected: // DAH uses this to specify a different ID. explicit DAE(char &ID) : ModulePass(ID) {} @@ -141,6 +153,7 @@ namespace { unsigned RetValNum = 0); Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses); + void CollectFunctionDIs(Module &M); void SurveyFunction(const Function &F); void MarkValue(const RetOrArg &RA, Liveness L, const UseVector &MaybeLiveUses); @@ -180,6 +193,33 @@ INITIALIZE_PASS(DAH, "deadarghaX0r", ModulePass *llvm::createDeadArgEliminationPass() { return new DAE(); } ModulePass *llvm::createDeadArgHackingPass() { return new DAH(); } +/// CollectFunctionDIs - Map each function in the module to its debug info +/// descriptor. +void DAE::CollectFunctionDIs(Module &M) { + FunctionDIs.clear(); + + for (Module::named_metadata_iterator I = M.named_metadata_begin(), + E = M.named_metadata_end(); I != E; ++I) { + NamedMDNode &NMD = *I; + for (unsigned MDIndex = 0, MDNum = NMD.getNumOperands(); + MDIndex < MDNum; ++MDIndex) { + MDNode *Node = NMD.getOperand(MDIndex); + if (!DIDescriptor(Node).isCompileUnit()) + continue; + DICompileUnit CU(Node); + const DIArray &SPs = CU.getSubprograms(); + for (unsigned SPIndex = 0, SPNum = SPs.getNumElements(); + SPIndex < SPNum; ++SPIndex) { + DISubprogram SP(SPs.getElement(SPIndex)); + if (!SP.Verify()) + continue; + if (Function *F = SP.getFunction()) + FunctionDIs[F] = SP; + } + } + } +} + /// DeleteDeadVarargs - If this is an function that takes a ... list, and if /// llvm.vastart is never called, the varargs list is dead for the function. bool DAE::DeleteDeadVarargs(Function &Fn) { @@ -236,9 +276,11 @@ bool DAE::DeleteDeadVarargs(Function &Fn) { SmallVector AttributesVec; for (unsigned i = 0; PAL.getSlot(i).Index <= NumArgs; ++i) AttributesVec.push_back(PAL.getSlot(i)); - if (Attributes FnAttrs = PAL.getFnAttributes()) - AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); - PAL = AttrListPtr::get(AttributesVec); + Attributes FnAttrs = PAL.getFnAttributes(); + if (FnAttrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + FnAttrs)); + PAL = AttrListPtr::get(Fn.getContext(), AttributesVec); } Instruction *New; @@ -284,6 +326,11 @@ bool DAE::DeleteDeadVarargs(Function &Fn) { I2->takeName(I); } + // Patch the pointer to LLVM function in debug info descriptor. + FunctionDIMap::iterator DI = FunctionDIs.find(&Fn); + if (DI != FunctionDIs.end()) + DI->second.replaceFunction(NF); + // Finally, nuke the old function. Fn.eraseFromParent(); return true; @@ -717,13 +764,17 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { // here. Currently, this should not be possible, but special handling might be // required when new return value attributes are added. if (NRetTy->isVoidTy()) - RAttrs &= ~Attribute::typeIncompatible(NRetTy); + RAttrs = + Attributes::get(NRetTy->getContext(), AttrBuilder(RAttrs). + removeAttributes(Attributes::typeIncompatible(NRetTy))); else - assert((RAttrs & Attribute::typeIncompatible(NRetTy)) == 0 - && "Return attributes no longer compatible?"); + assert(!AttrBuilder(RAttrs). + hasAttributes(Attributes::typeIncompatible(NRetTy)) && + "Return attributes no longer compatible?"); - if (RAttrs) - AttributesVec.push_back(AttributeWithIndex::get(0, RAttrs)); + if (RAttrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex, + RAttrs)); // Remember which arguments are still alive. SmallVector ArgAlive(FTy->getNumParams(), false); @@ -740,7 +791,8 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { // Get the original parameter attributes (skipping the first one, that is // for the return value. - if (Attributes Attrs = PAL.getParamAttributes(i + 1)) + Attributes Attrs = PAL.getParamAttributes(i + 1); + if (Attrs.hasAttributes()) AttributesVec.push_back(AttributeWithIndex::get(Params.size(), Attrs)); } else { ++NumArgumentsEliminated; @@ -749,11 +801,12 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { } } - if (FnAttrs != Attribute::None) - AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); + if (FnAttrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + FnAttrs)); // Reconstruct the AttributesList based on the vector we constructed. - AttrListPtr NewPAL = AttrListPtr::get(AttributesVec); + AttrListPtr NewPAL = AttrListPtr::get(F->getContext(), AttributesVec); // Create the new function type based on the recomputed parameters. FunctionType *NFTy = FunctionType::get(NRetTy, Params, FTy->isVarArg()); @@ -786,9 +839,12 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { Attributes RAttrs = CallPAL.getRetAttributes(); Attributes FnAttrs = CallPAL.getFnAttributes(); // Adjust in case the function was changed to return void. - RAttrs &= ~Attribute::typeIncompatible(NF->getReturnType()); - if (RAttrs) - AttributesVec.push_back(AttributeWithIndex::get(0, RAttrs)); + RAttrs = + Attributes::get(NF->getContext(), AttrBuilder(RAttrs). + removeAttributes(Attributes::typeIncompatible(NF->getReturnType()))); + if (RAttrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex, + RAttrs)); // Declare these outside of the loops, so we can reuse them for the second // loop, which loops the varargs. @@ -800,22 +856,25 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { if (ArgAlive[i]) { Args.push_back(*I); // Get original parameter attributes, but skip return attributes. - if (Attributes Attrs = CallPAL.getParamAttributes(i + 1)) + Attributes Attrs = CallPAL.getParamAttributes(i + 1); + if (Attrs.hasAttributes()) AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs)); } // Push any varargs arguments on the list. Don't forget their attributes. for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) { Args.push_back(*I); - if (Attributes Attrs = CallPAL.getParamAttributes(i + 1)) + Attributes Attrs = CallPAL.getParamAttributes(i + 1); + if (Attrs.hasAttributes()) AttributesVec.push_back(AttributeWithIndex::get(Args.size(), Attrs)); } - if (FnAttrs != Attribute::None) - AttributesVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); + if (FnAttrs.hasAttributes()) + AttributesVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + FnAttrs)); // Reconstruct the AttributesList based on the vector we constructed. - AttrListPtr NewCallPAL = AttrListPtr::get(AttributesVec); + AttrListPtr NewCallPAL = AttrListPtr::get(F->getContext(), AttributesVec); Instruction *New; if (InvokeInst *II = dyn_cast(Call)) { @@ -952,6 +1011,11 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { BB->getInstList().erase(RI); } + // Patch the pointer to LLVM function in debug info descriptor. + FunctionDIMap::iterator DI = FunctionDIs.find(F); + if (DI != FunctionDIs.end()) + DI->second.replaceFunction(NF); + // Now that the old function is dead, delete it. F->eraseFromParent(); @@ -961,6 +1025,9 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { bool DAE::runOnModule(Module &M) { bool Changed = false; + // Collect debug info descriptors for functions. + CollectFunctionDIs(M); + // First pass: Do a simple check to see if any functions can have their "..." // removed. We can do this if they never call va_start. This loop cannot be // fused with the next loop, because deleting a function invalidates diff --git a/lib/Transforms/IPO/ExtractGV.cpp b/lib/Transforms/IPO/ExtractGV.cpp index 4c7f0ed..6716deb 100644 --- a/lib/Transforms/IPO/ExtractGV.cpp +++ b/lib/Transforms/IPO/ExtractGV.cpp @@ -51,32 +51,75 @@ namespace { // Visit the GlobalVariables. for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I) { - if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration()) { - I->setInitializer(0); - } else { + bool Delete = + deleteStuff == (bool)Named.count(I) && !I->isDeclaration(); + if (!Delete) { if (I->hasAvailableExternallyLinkage()) continue; if (I->getName() == "llvm.global_ctors") continue; } - if (I->hasLocalLinkage()) + bool Local = I->hasLocalLinkage(); + if (Local) I->setVisibility(GlobalValue::HiddenVisibility); - I->setLinkage(GlobalValue::ExternalLinkage); + + if (Local || Delete) + I->setLinkage(GlobalValue::ExternalLinkage); + + if (Delete) + I->setInitializer(0); } // Visit the Functions. for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { - if (deleteStuff == (bool)Named.count(I) && !I->isDeclaration()) { - I->deleteBody(); - } else { + bool Delete = + deleteStuff == (bool)Named.count(I) && !I->isDeclaration(); + if (!Delete) { if (I->hasAvailableExternallyLinkage()) continue; } - if (I->hasLocalLinkage()) + bool Local = I->hasLocalLinkage(); + if (Local) I->setVisibility(GlobalValue::HiddenVisibility); - I->setLinkage(GlobalValue::ExternalLinkage); + + if (Local || Delete) + I->setLinkage(GlobalValue::ExternalLinkage); + + if (Delete) + I->deleteBody(); + } + + // Visit the Aliases. + for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); + I != E;) { + Module::alias_iterator CurI = I; + ++I; + + if (CurI->hasLocalLinkage()) { + CurI->setVisibility(GlobalValue::HiddenVisibility); + CurI->setLinkage(GlobalValue::ExternalLinkage); + } + + if (deleteStuff == (bool)Named.count(CurI)) { + Type *Ty = CurI->getType()->getElementType(); + + CurI->removeFromParent(); + llvm::Value *Declaration; + if (FunctionType *FTy = dyn_cast(Ty)) { + Declaration = Function::Create(FTy, GlobalValue::ExternalLinkage, + CurI->getName(), &M); + + } else { + Declaration = + new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, + 0, CurI->getName()); + + } + CurI->replaceAllUsesWith(Declaration); + delete CurI; + } } return true; diff --git a/lib/Transforms/IPO/FunctionAttrs.cpp b/lib/Transforms/IPO/FunctionAttrs.cpp index f3f6228..18409f7 100644 --- a/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/lib/Transforms/IPO/FunctionAttrs.cpp @@ -28,9 +28,9 @@ #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CaptureTracking.h" #include "llvm/ADT/SCCIterator.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" -#include "llvm/ADT/UniqueVector.h" #include "llvm/Support/InstIterator.h" using namespace llvm; @@ -212,10 +212,17 @@ bool FunctionAttrs::AddReadAttrs(const CallGraphSCC &SCC) { MadeChange = true; // Clear out any existing attributes. - F->removeAttribute(~0, Attribute::ReadOnly | Attribute::ReadNone); + AttrBuilder B; + B.addAttribute(Attributes::ReadOnly) + .addAttribute(Attributes::ReadNone); + F->removeAttribute(AttrListPtr::FunctionIndex, + Attributes::get(F->getContext(), B)); // Add in the new attribute. - F->addAttribute(~0, ReadsMemory? Attribute::ReadOnly : Attribute::ReadNone); + B.clear(); + B.addAttribute(ReadsMemory ? Attributes::ReadOnly : Attributes::ReadNone); + F->addAttribute(AttrListPtr::FunctionIndex, + Attributes::get(F->getContext(), B)); if (ReadsMemory) ++NumReadOnly; @@ -276,8 +283,6 @@ namespace { void tooManyUses() { Captured = true; } - bool shouldExplore(Use *U) { return true; } - bool captured(Use *U) { CallSite CS(U->getUser()); if (!CS.getInstruction()) { Captured = true; return true; } @@ -352,6 +357,9 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) { ArgumentGraph AG; + AttrBuilder B; + B.addAttribute(Attributes::NoCapture); + // Check each function in turn, determining which pointer arguments are not // captured. for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { @@ -373,7 +381,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) { for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E; ++A) { if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) { - A->addAttr(Attribute::NoCapture); + A->addAttr(Attributes::get(F->getContext(), B)); ++NumNoCapture; Changed = true; } @@ -388,7 +396,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) { if (!Tracker.Captured) { if (Tracker.Uses.empty()) { // If it's trivially not captured, mark it nocapture now. - A->addAttr(Attribute::NoCapture); + A->addAttr(Attributes::get(F->getContext(), B)); ++NumNoCapture; Changed = true; } else { @@ -421,7 +429,9 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) { // eg. "void f(int* x) { if (...) f(x); }" if (ArgumentSCC[0]->Uses.size() == 1 && ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) { - ArgumentSCC[0]->Definition->addAttr(Attribute::NoCapture); + ArgumentSCC[0]-> + Definition-> + addAttr(Attributes::get(ArgumentSCC[0]->Definition->getContext(), B)); ++NumNoCapture; Changed = true; } @@ -463,7 +473,7 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) { for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) { Argument *A = ArgumentSCC[i]->Definition; - A->addAttr(Attribute::NoCapture); + A->addAttr(Attributes::get(A->getContext(), B)); ++NumNoCapture; Changed = true; } @@ -476,13 +486,13 @@ bool FunctionAttrs::AddNoCaptureAttrs(const CallGraphSCC &SCC) { /// or a pointer that doesn't alias any other pointer visible to the caller. bool FunctionAttrs::IsFunctionMallocLike(Function *F, SmallPtrSet &SCCNodes) const { - UniqueVector FlowsToReturn; + SmallSetVector FlowsToReturn; for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) if (ReturnInst *Ret = dyn_cast(I->getTerminator())) FlowsToReturn.insert(Ret->getReturnValue()); for (unsigned i = 0; i != FlowsToReturn.size(); ++i) { - Value *RetVal = FlowsToReturn[i+1]; // UniqueVector[0] is reserved. + Value *RetVal = FlowsToReturn[i]; if (Constant *C = dyn_cast(RetVal)) { if (!C->isNullValue() && !isa(C)) @@ -520,7 +530,7 @@ bool FunctionAttrs::IsFunctionMallocLike(Function *F, case Instruction::Call: case Instruction::Invoke: { CallSite CS(RVI); - if (CS.paramHasAttr(0, Attribute::NoAlias)) + if (CS.paramHasAttr(0, Attributes::NoAlias)) break; if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction())) diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index 6d950d2..591278f 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -25,7 +25,7 @@ #include "llvm/Pass.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/MemoryBuiltins.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" @@ -83,7 +83,7 @@ namespace { const GlobalStatus &GS); bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); - TargetData *TD; + DataLayout *TD; TargetLibraryInfo *TLI; }; } @@ -225,6 +225,7 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS, // Don't hack on volatile stores. if (SI->isVolatile()) return true; + GS.Ordering = StrongerOrdering(GS.Ordering, SI->getOrdering()); // If this is a direct store to the global (i.e., the global is a scalar @@ -234,6 +235,14 @@ static bool AnalyzeGlobal(const Value *V, GlobalStatus &GS, if (const GlobalVariable *GV = dyn_cast( SI->getOperand(1))) { Value *StoredVal = SI->getOperand(0); + + if (Constant *C = dyn_cast(StoredVal)) { + if (C->isThreadDependent()) { + // The stored value changes between threads; don't track it. + return true; + } + } + if (StoredVal == GV->getInitializer()) { if (GS.StoredType < GlobalStatus::isInitializerStored) GS.StoredType = GlobalStatus::isInitializerStored; @@ -346,7 +355,7 @@ static bool isLeakCheckerRoot(GlobalVariable *GV) { /// Given a value that is stored to a global but never read, determine whether /// it's safe to remove the store and the chain of computation that feeds the /// store. -static bool IsSafeComputationToRemove(Value *V) { +static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { do { if (isa(V)) return true; @@ -355,7 +364,7 @@ static bool IsSafeComputationToRemove(Value *V) { if (isa(V) || isa(V) || isa(V) || isa(V)) return false; - if (isAllocationFn(V)) + if (isAllocationFn(V, TLI)) return true; Instruction *I = cast(V); @@ -376,7 +385,8 @@ static bool IsSafeComputationToRemove(Value *V) { /// of the global and clean up any that obviously don't assign the global a /// value that isn't dynamically allocated. /// -static bool CleanupPointerRootUsers(GlobalVariable *GV) { +static bool CleanupPointerRootUsers(GlobalVariable *GV, + const TargetLibraryInfo *TLI) { // A brief explanation of leak checkers. The goal is to find bugs where // pointers are forgotten, causing an accumulating growth in memory // usage over time. The common strategy for leak checkers is to whitelist the @@ -432,18 +442,18 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV) { C->destroyConstant(); // This could have invalidated UI, start over from scratch. Dead.clear(); - CleanupPointerRootUsers(GV); + CleanupPointerRootUsers(GV, TLI); return true; } } } for (int i = 0, e = Dead.size(); i != e; ++i) { - if (IsSafeComputationToRemove(Dead[i].first)) { + if (IsSafeComputationToRemove(Dead[i].first, TLI)) { Dead[i].second->eraseFromParent(); Instruction *I = Dead[i].first; do { - if (isAllocationFn(I)) + if (isAllocationFn(I, TLI)) break; Instruction *J = dyn_cast(I->getOperand(0)); if (!J) @@ -463,7 +473,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV) { /// quick scan over the use list to clean up the easy and obvious cruft. This /// returns true if it made a change. static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, - TargetData *TD, TargetLibraryInfo *TLI) { + DataLayout *TD, TargetLibraryInfo *TLI) { bool Changed = false; for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) { User *U = *UI++; @@ -655,7 +665,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) { /// behavior of the program in a more fine-grained way. We have determined that /// this transformation is safe already. We return the first global variable we /// insert so that the caller can reprocess it. -static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) { +static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) { // Make sure this global only has simple uses that we can SRA. if (!GlobalUsersSafeToSRA(GV)) return 0; @@ -931,7 +941,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { /// if the loaded value is dynamically null, then we know that they cannot be /// reachable with a null optimize away the load. static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, - TargetData *TD, + DataLayout *TD, TargetLibraryInfo *TLI) { bool Changed = false; @@ -961,7 +971,9 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, // If we get here we could have other crazy uses that are transitively // loaded. assert((isa(GlobalUser) || isa(GlobalUser) || - isa(GlobalUser) || isa(GlobalUser)) && + isa(GlobalUser) || isa(GlobalUser) || + isa(GlobalUser) || + isa(GlobalUser)) && "Only expect load and stores!"); } } @@ -975,7 +987,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, // nor is the global. if (AllNonStoreUsesGone) { if (isLeakCheckerRoot(GV)) { - Changed |= CleanupPointerRootUsers(GV); + Changed |= CleanupPointerRootUsers(GV, TLI); } else { Changed = true; CleanupConstantGlobalUsers(GV, 0, TD, TLI); @@ -993,7 +1005,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the /// instructions that are foldable. static void ConstantPropUsersOf(Value *V, - TargetData *TD, TargetLibraryInfo *TLI) { + DataLayout *TD, TargetLibraryInfo *TLI) { for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) if (Instruction *I = dyn_cast(*UI++)) if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) { @@ -1016,7 +1028,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy, ConstantInt *NElements, - TargetData *TD, + DataLayout *TD, TargetLibraryInfo *TLI) { DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); @@ -1465,9 +1477,10 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break /// it up into multiple allocations of arrays of the fields. static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, - Value *NElems, TargetData *TD) { + Value *NElems, DataLayout *TD, + const TargetLibraryInfo *TLI) { DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); - Type *MAT = getMallocAllocatedType(CI); + Type *MAT = getMallocAllocatedType(CI, TLI); StructType *STy = cast(MAT); // There is guaranteed to be at least one use of the malloc (storing @@ -1656,7 +1669,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, Type *AllocTy, AtomicOrdering Ordering, Module::global_iterator &GVI, - TargetData *TD, + DataLayout *TD, TargetLibraryInfo *TLI) { if (!TD) return false; @@ -1688,7 +1701,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, // This eliminates dynamic allocation, avoids an indirection accessing the // data, and exposes the resultant global to further GlobalOpt. // We cannot optimize the malloc if we cannot determine malloc array size. - Value *NElems = getMallocArraySize(CI, TD, true); + Value *NElems = getMallocArraySize(CI, TD, TLI, true); if (!NElems) return false; @@ -1725,7 +1738,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, // If this is a fixed size array, transform the Malloc to be an alloc of // structs. malloc [100 x struct],1 -> malloc struct, 100 - if (ArrayType *AT = dyn_cast(getMallocAllocatedType(CI))) { + if (ArrayType *AT = dyn_cast(getMallocAllocatedType(CI, TLI))) { Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); @@ -1742,7 +1755,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CI = cast(Malloc); } - GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD); + GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true), + TD, TLI); return true; } @@ -1754,7 +1768,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, AtomicOrdering Ordering, Module::global_iterator &GVI, - TargetData *TD, TargetLibraryInfo *TLI) { + DataLayout *TD, TargetLibraryInfo *TLI) { // Ignore no-op GEPs and bitcasts. StoredOnceVal = StoredOnceVal->stripPointerCasts(); @@ -1771,8 +1785,8 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, // Optimize away any trapping uses of the loaded value. if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI)) return true; - } else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { - Type *MallocType = getMallocAllocatedType(CI); + } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { + Type *MallocType = getMallocAllocatedType(CI, TLI); if (MallocType && TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, TD, TLI)) @@ -1964,7 +1978,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, bool Changed; if (isLeakCheckerRoot(GV)) { // Delete any constant stores to the global. - Changed = CleanupPointerRootUsers(GV); + Changed = CleanupPointerRootUsers(GV, TLI); } else { // Delete any stores we can find to the global. We may not be able to // make it completely dead though. @@ -1997,7 +2011,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, ++NumMarked; return true; } else if (!GV->getInitializer()->getType()->isSingleValueType()) { - if (TargetData *TD = getAnalysisIfAvailable()) + if (DataLayout *TD = getAnalysisIfAvailable()) if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) { GVI = FirstNewGV; // Don't skip the newly produced globals! return true; @@ -2056,25 +2070,26 @@ static void ChangeCalleesToFastCall(Function *F) { } } -static AttrListPtr StripNest(const AttrListPtr &Attrs) { +static AttrListPtr StripNest(LLVMContext &C, const AttrListPtr &Attrs) { for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { - if ((Attrs.getSlot(i).Attrs & Attribute::Nest) == 0) + if (!Attrs.getSlot(i).Attrs.hasAttribute(Attributes::Nest)) continue; // There can be only one. - return Attrs.removeAttr(Attrs.getSlot(i).Index, Attribute::Nest); + return Attrs.removeAttr(C, Attrs.getSlot(i).Index, + Attributes::get(C, Attributes::Nest)); } return Attrs; } static void RemoveNestAttribute(Function *F) { - F->setAttributes(StripNest(F->getAttributes())); + F->setAttributes(StripNest(F->getContext(), F->getAttributes())); for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ if (isa(*UI)) continue; CallSite User(cast(*UI)); - User.setAttributes(StripNest(User.getAttributes())); + User.setAttributes(StripNest(F->getContext(), User.getAttributes())); } } @@ -2103,7 +2118,7 @@ bool GlobalOpt::OptimizeFunctions(Module &M) { Changed = true; } - if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && + if (F->getAttributes().hasAttrSomewhere(Attributes::Nest) && !F->hasAddressTaken()) { // The function is not used by a trampoline intrinsic, so it is safe // to remove the 'nest' attribute. @@ -2251,7 +2266,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, static inline bool isSimpleEnoughValueToCommit(Constant *C, SmallPtrSet &SimpleConstants, - const TargetData *TD); + const DataLayout *TD); /// isSimpleEnoughValueToCommit - Return true if the specified constant can be @@ -2264,7 +2279,7 @@ isSimpleEnoughValueToCommit(Constant *C, /// time. static bool isSimpleEnoughValueToCommitHelper(Constant *C, SmallPtrSet &SimpleConstants, - const TargetData *TD) { + const DataLayout *TD) { // Simple integer, undef, constant aggregate zero, global addresses, etc are // all supported. if (C->getNumOperands() == 0 || isa(C) || @@ -2319,7 +2334,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C, static inline bool isSimpleEnoughValueToCommit(Constant *C, SmallPtrSet &SimpleConstants, - const TargetData *TD) { + const DataLayout *TD) { // If we already checked this constant, we win. if (!SimpleConstants.insert(C)) return true; // Check the constant. @@ -2450,7 +2465,7 @@ namespace { /// Once an evaluation call fails, the evaluation object should not be reused. class Evaluator { public: - Evaluator(const TargetData *TD, const TargetLibraryInfo *TLI) + Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI) : TD(TD), TLI(TLI) { ValueStack.push_back(new DenseMap); } @@ -2531,7 +2546,7 @@ private: /// simple enough to live in a static initializer of a global. SmallPtrSet SimpleConstants; - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; }; @@ -2869,7 +2884,7 @@ bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, /// EvaluateStaticConstructor - Evaluate static constructors in the function, if /// we can. Return true if we can, false otherwise. -static bool EvaluateStaticConstructor(Function *F, const TargetData *TD, +static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD, const TargetLibraryInfo *TLI) { // Call the function. Evaluator Eval(TD, TLI); @@ -3110,7 +3125,7 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { bool GlobalOpt::runOnModule(Module &M) { bool Changed = false; - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); // Try to find the llvm.globalctors list. diff --git a/lib/Transforms/IPO/IPO.cpp b/lib/Transforms/IPO/IPO.cpp index 6233922..5d563d8 100644 --- a/lib/Transforms/IPO/IPO.cpp +++ b/lib/Transforms/IPO/IPO.cpp @@ -1,4 +1,4 @@ -//===-- Scalar.cpp --------------------------------------------------------===// +//===-- IPO.cpp -----------------------------------------------------------===// // // The LLVM Compiler Infrastructure // @@ -95,7 +95,10 @@ void LLVMAddIPSCCPPass(LLVMPassManagerRef PM) { } void LLVMAddInternalizePass(LLVMPassManagerRef PM, unsigned AllButMain) { - unwrap(PM)->add(createInternalizePass(AllButMain != 0)); + std::vector Export; + if (AllButMain) + Export.push_back("main"); + unwrap(PM)->add(createInternalizePass(Export)); } void LLVMAddStripDeadPrototypesPass(LLVMPassManagerRef PM) { diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp index 664ddf6..b1c36c1 100644 --- a/lib/Transforms/IPO/InlineAlways.cpp +++ b/lib/Transforms/IPO/InlineAlways.cpp @@ -23,7 +23,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/InlinerPass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/SmallPtrSet.h" using namespace llvm; @@ -65,7 +65,7 @@ Pass *llvm::createAlwaysInlinerPass(bool InsertLifetime) { /// \brief Minimal filter to detect invalid constructs for inlining. static bool isInlineViable(Function &F) { - bool ReturnsTwice = F.hasFnAttr(Attribute::ReturnsTwice); + bool ReturnsTwice =F.getFnAttributes().hasAttribute(Attributes::ReturnsTwice); for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { // Disallow inlining of functions which contain an indirect branch. if (isa(BI->getTerminator())) @@ -114,7 +114,7 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) { if (Callee->isDeclaration()) return InlineCost::getNever(); // Return never for anything not marked as always inline. - if (!Callee->hasFnAttr(Attribute::AlwaysInline)) + if (!Callee->getFnAttributes().hasAttribute(Attributes::AlwaysInline)) return InlineCost::getNever(); // Do some minimal analysis to preclude non-viable functions. diff --git a/lib/Transforms/IPO/InlineSimple.cpp b/lib/Transforms/IPO/InlineSimple.cpp index 50038d8..bf0b1f9 100644 --- a/lib/Transforms/IPO/InlineSimple.cpp +++ b/lib/Transforms/IPO/InlineSimple.cpp @@ -22,7 +22,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/InlinerPass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; @@ -62,7 +62,7 @@ Pass *llvm::createFunctionInliningPass(int Threshold) { // doInitialization - Initializes the vector of functions that have been // annotated with the noinline attribute. bool SimpleInliner::doInitialization(CallGraph &CG) { - CA.setTargetData(getAnalysisIfAvailable()); + CA.setDataLayout(getAnalysisIfAvailable()); return false; } diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp index 712888a..abcb25f 100644 --- a/lib/Transforms/IPO/Inliner.cpp +++ b/lib/Transforms/IPO/Inliner.cpp @@ -19,7 +19,8 @@ #include "llvm/IntrinsicInst.h" #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/InlineCost.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/IPO/InlinerPass.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Local.h" @@ -92,11 +93,11 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI, // If the inlined function had a higher stack protection level than the // calling function, then bump up the caller's stack protection level. - if (Callee->hasFnAttr(Attribute::StackProtectReq)) - Caller->addFnAttr(Attribute::StackProtectReq); - else if (Callee->hasFnAttr(Attribute::StackProtect) && - !Caller->hasFnAttr(Attribute::StackProtectReq)) - Caller->addFnAttr(Attribute::StackProtect); + if (Callee->getFnAttributes().hasAttribute(Attributes::StackProtectReq)) + Caller->addFnAttr(Attributes::StackProtectReq); + else if (Callee->getFnAttributes().hasAttribute(Attributes::StackProtect) && + !Caller->getFnAttributes().hasAttribute(Attributes::StackProtectReq)) + Caller->addFnAttr(Attributes::StackProtect); // Look at all of the allocas that we inlined through this call site. If we // have already inlined other allocas through other calls into this function, @@ -208,14 +209,15 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const { // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && - Caller->hasFnAttr(Attribute::OptimizeForSize); - if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres) + Caller->getFnAttributes().hasAttribute(Attributes::OptimizeForSize); + if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && + OptSizeThreshold < thres) thres = OptSizeThreshold; // Listen to the inlinehint attribute when it would increase the threshold. Function *Callee = CS.getCalledFunction(); bool InlineHint = Callee && !Callee->isDeclaration() && - Callee->hasFnAttr(Attribute::InlineHint); + Callee->getFnAttributes().hasAttribute(Attributes::InlineHint); if (InlineHint && HintThreshold > thres) thres = HintThreshold; @@ -338,7 +340,8 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID, bool Inliner::runOnSCC(CallGraphSCC &SCC) { CallGraph &CG = getAnalysis(); - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); + const TargetLibraryInfo *TLI = getAnalysisIfAvailable(); SmallPtrSet SCCFunctions; DEBUG(dbgs() << "Inliner visiting SCC:"); @@ -417,7 +420,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) { // just delete the call instead of trying to inline it, regardless of // size. This happens because IPSCCP propagates the result out of the // call and then we're left with the dead call. - if (isInstructionTriviallyDead(CS.getInstruction())) { + if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) { DEBUG(dbgs() << " -> Deleting dead call: " << *CS.getInstruction() << "\n"); // Update the call graph by deleting the edge from Callee to Caller. @@ -530,7 +533,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) { // Handle the case when this function is called and we only want to care // about always-inline functions. This is a bit of a hack to share code // between here and the InlineAlways pass. - if (AlwaysInlineOnly && !F->hasFnAttr(Attribute::AlwaysInline)) + if (AlwaysInlineOnly && + !F->getFnAttributes().hasAttribute(Attributes::AlwaysInline)) continue; // If the only remaining users of the function are dead constants, remove diff --git a/lib/Transforms/IPO/Internalize.cpp b/lib/Transforms/IPO/Internalize.cpp index fb5869e..aa629cc 100644 --- a/lib/Transforms/IPO/Internalize.cpp +++ b/lib/Transforms/IPO/Internalize.cpp @@ -7,9 +7,9 @@ // //===----------------------------------------------------------------------===// // -// This pass loops over all of the functions in the input module, looking for a -// main function. If a main function is found, all other functions and all -// global variables with initializers are marked as internal. +// This pass loops over all of the functions and variables in the input module. +// If the function or variable is not in the list of external names given to +// the pass it is marked as internal. // //===----------------------------------------------------------------------===// @@ -45,12 +45,9 @@ APIList("internalize-public-api-list", cl::value_desc("list"), namespace { class InternalizePass : public ModulePass { std::set ExternalNames; - /// If no api symbols were specified and a main function is defined, - /// assume the main function is the only API - bool AllButMain; public: static char ID; // Pass identification, replacement for typeid - explicit InternalizePass(bool AllButMain = true); + explicit InternalizePass(); explicit InternalizePass(const std::vector & exportList); void LoadFile(const char *Filename); virtual bool runOnModule(Module &M); @@ -66,8 +63,8 @@ char InternalizePass::ID = 0; INITIALIZE_PASS(InternalizePass, "internalize", "Internalize Global Symbols", false, false) -InternalizePass::InternalizePass(bool AllButMain) - : ModulePass(ID), AllButMain(AllButMain){ +InternalizePass::InternalizePass() + : ModulePass(ID) { initializeInternalizePassPass(*PassRegistry::getPassRegistry()); if (!APIFile.empty()) // If a filename is specified, use it. LoadFile(APIFile.c_str()); @@ -76,7 +73,7 @@ InternalizePass::InternalizePass(bool AllButMain) } InternalizePass::InternalizePass(const std::vector&exportList) - : ModulePass(ID), AllButMain(false){ + : ModulePass(ID){ initializeInternalizePassPass(*PassRegistry::getPassRegistry()); for(std::vector::const_iterator itr = exportList.begin(); itr != exportList.end(); itr++) { @@ -103,23 +100,6 @@ void InternalizePass::LoadFile(const char *Filename) { bool InternalizePass::runOnModule(Module &M) { CallGraph *CG = getAnalysisIfAvailable(); CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0; - - if (ExternalNames.empty()) { - // Return if we're not in 'all but main' mode and have no external api - if (!AllButMain) - return false; - // If no list or file of symbols was specified, check to see if there is a - // "main" symbol defined in the module. If so, use it, otherwise do not - // internalize the module, it must be a library or something. - // - Function *MainFunc = M.getFunction("main"); - if (MainFunc == 0 || MainFunc->isDeclaration()) - return false; // No main found, must be a library... - - // Preserve main, internalize all else. - ExternalNames.insert(MainFunc->getName()); - } - bool Changed = false; // Never internalize functions which code-gen might insert. @@ -189,8 +169,8 @@ bool InternalizePass::runOnModule(Module &M) { return Changed; } -ModulePass *llvm::createInternalizePass(bool AllButMain) { - return new InternalizePass(AllButMain); +ModulePass *llvm::createInternalizePass() { + return new InternalizePass(); } ModulePass *llvm::createInternalizePass(const std::vector &el) { diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp index 9f70f66..44283dd 100644 --- a/lib/Transforms/IPO/MergeFunctions.cpp +++ b/lib/Transforms/IPO/MergeFunctions.cpp @@ -63,7 +63,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include using namespace llvm; @@ -92,19 +92,19 @@ static unsigned profileFunction(const Function *F) { namespace { /// ComparableFunction - A struct that pairs together functions with a -/// TargetData so that we can keep them together as elements in the DenseSet. +/// DataLayout so that we can keep them together as elements in the DenseSet. class ComparableFunction { public: static const ComparableFunction EmptyKey; static const ComparableFunction TombstoneKey; - static TargetData * const LookupOnly; + static DataLayout * const LookupOnly; - ComparableFunction(Function *Func, TargetData *TD) + ComparableFunction(Function *Func, DataLayout *TD) : Func(Func), Hash(profileFunction(Func)), TD(TD) {} Function *getFunc() const { return Func; } unsigned getHash() const { return Hash; } - TargetData *getTD() const { return TD; } + DataLayout *getTD() const { return TD; } // Drops AssertingVH reference to the function. Outside of debug mode, this // does nothing. @@ -120,13 +120,13 @@ private: AssertingVH Func; unsigned Hash; - TargetData *TD; + DataLayout *TD; }; const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0); const ComparableFunction ComparableFunction::TombstoneKey = ComparableFunction(1); -TargetData *const ComparableFunction::LookupOnly = (TargetData*)(-1); +DataLayout *const ComparableFunction::LookupOnly = (DataLayout*)(-1); } @@ -150,12 +150,12 @@ namespace llvm { namespace { /// FunctionComparator - Compares two functions to determine whether or not -/// they will generate machine code with the same behaviour. TargetData is +/// they will generate machine code with the same behaviour. DataLayout is /// used if available. The comparator always fails conservatively (erring on the /// side of claiming that two functions are different). class FunctionComparator { public: - FunctionComparator(const TargetData *TD, const Function *F1, + FunctionComparator(const DataLayout *TD, const Function *F1, const Function *F2) : F1(F1), F2(F2), TD(TD) {} @@ -190,7 +190,7 @@ private: // The two functions undergoing comparison. const Function *F1, *F2; - const TargetData *TD; + const DataLayout *TD; DenseMap id_map; DenseSet seen_values; @@ -591,8 +591,8 @@ private: /// to modify it. FnSetType FnSet; - /// TargetData for more accurate GEP comparisons. May be NULL. - TargetData *TD; + /// DataLayout for more accurate GEP comparisons. May be NULL. + DataLayout *TD; /// Whether or not the target supports global aliases. bool HasGlobalAliases; @@ -609,7 +609,7 @@ ModulePass *llvm::createMergeFunctionsPass() { bool MergeFunctions::runOnModule(Module &M) { bool Changed = false; - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) { if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage()) diff --git a/lib/Transforms/IPO/PassManagerBuilder.cpp b/lib/Transforms/IPO/PassManagerBuilder.cpp index 43b4ab5..05253fc 100644 --- a/lib/Transforms/IPO/PassManagerBuilder.cpp +++ b/lib/Transforms/IPO/PassManagerBuilder.cpp @@ -33,13 +33,21 @@ using namespace llvm; static cl::opt -RunVectorization("vectorize", cl::desc("Run vectorization passes")); +RunLoopVectorization("vectorize-loops", + cl::desc("Run the Loop vectorization passes")); + +static cl::opt +RunBBVectorization("vectorize", cl::desc("Run the BB vectorization passes")); static cl::opt UseGVNAfterVectorization("use-gvn-after-vectorization", cl::init(false), cl::Hidden, cl::desc("Run GVN instead of Early CSE after vectorization passes")); +static cl::opt UseNewSROA("use-new-sroa", + cl::init(true), cl::Hidden, + cl::desc("Enable the new, experimental SROA pass")); + PassManagerBuilder::PassManagerBuilder() { OptLevel = 2; SizeLevel = 0; @@ -48,7 +56,8 @@ PassManagerBuilder::PassManagerBuilder() { DisableSimplifyLibCalls = false; DisableUnitAtATime = false; DisableUnrollLoops = false; - Vectorize = RunVectorization; + Vectorize = RunBBVectorization; + LoopVectorize = RunLoopVectorization; } PassManagerBuilder::~PassManagerBuilder() { @@ -100,7 +109,10 @@ void PassManagerBuilder::populateFunctionPassManager(FunctionPassManager &FPM) { addInitialAliasAnalysisPasses(FPM); FPM.add(createCFGSimplificationPass()); - FPM.add(createScalarReplAggregatesPass()); + if (UseNewSROA) + FPM.add(createSROAPass()); + else + FPM.add(createScalarReplAggregatesPass()); FPM.add(createEarlyCSEPass()); FPM.add(createLowerExpectIntrinsicPass()); } @@ -112,6 +124,14 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { MPM.add(Inliner); Inliner = 0; } + + // FIXME: This is a HACK! The inliner pass above implicitly creates a CGSCC + // pass manager, but we don't want to add extensions into that pass manager. + // To prevent this we must insert a no-op module pass to reset the pass + // manager to get the same behavior as EP_OptimizerLast in non-O0 builds. + if (!GlobalExtensions->empty() || !Extensions.empty()) + MPM.add(createBarrierNoopPass()); + addExtensionsToPM(EP_EnabledOnOptLevel0, MPM); return; } @@ -147,7 +167,10 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { // Start of function pass. // Break up aggregate allocas, using SSAUpdater. - MPM.add(createScalarReplAggregatesPass(-1, false)); + if (UseNewSROA) + MPM.add(createSROAPass(/*RequiresDomTree*/ false)); + else + MPM.add(createScalarReplAggregatesPass(-1, false)); MPM.add(createEarlyCSEPass()); // Catch trivial redundancies if (!DisableSimplifyLibCalls) MPM.add(createSimplifyLibCallsPass()); // Library Call Optimizations @@ -166,6 +189,12 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars MPM.add(createLoopIdiomPass()); // Recognize idioms like memset. MPM.add(createLoopDeletionPass()); // Delete dead loops + + if (LoopVectorize) { + MPM.add(createLoopVectorizePass()); + MPM.add(createLICMPass()); + } + if (!DisableUnrollLoops) MPM.add(createLoopUnrollPass()); // Unroll small loops addExtensionsToPM(EP_LoopOptimizerEnd, MPM); @@ -201,13 +230,12 @@ void PassManagerBuilder::populateModulePassManager(PassManagerBase &MPM) { // FIXME: We shouldn't bother with this anymore. MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes - // GlobalOpt already deletes dead functions and globals, at -O3 try a + // GlobalOpt already deletes dead functions and globals, at -O2 try a // late pass of GlobalDCE. It is capable of deleting dead cycles. - if (OptLevel > 2) + if (OptLevel > 1) { MPM.add(createGlobalDCEPass()); // Remove dead fns and globals. - - if (OptLevel > 1) MPM.add(createConstantMergePass()); // Merge dup global constants + } } addExtensionsToPM(EP_OptimizerLast, MPM); } @@ -222,8 +250,11 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM, // Now that composite has been compiled, scan through the module, looking // for a main function. If main is defined, mark all other functions // internal. - if (Internalize) - PM.add(createInternalizePass(true)); + if (Internalize) { + std::vector E; + E.push_back("main"); + PM.add(createInternalizePass(E)); + } // Propagate constants at call sites into the functions they call. This // opens opportunities for globalopt (and inlining) by substituting function @@ -265,7 +296,10 @@ void PassManagerBuilder::populateLTOPassManager(PassManagerBase &PM, PM.add(createInstructionCombiningPass()); PM.add(createJumpThreadingPass()); // Break up allocas - PM.add(createScalarReplAggregatesPass()); + if (UseNewSROA) + PM.add(createSROAPass()); + else + PM.add(createScalarReplAggregatesPass()); // Run a few AA driven optimizations here and now, to cleanup the code. PM.add(createFunctionAttrsPass()); // Add nocapture. diff --git a/lib/Transforms/IPO/PruneEH.cpp b/lib/Transforms/IPO/PruneEH.cpp index c8cc8fd..fb4ecbf 100644 --- a/lib/Transforms/IPO/PruneEH.cpp +++ b/lib/Transforms/IPO/PruneEH.cpp @@ -137,16 +137,18 @@ bool PruneEH::runOnSCC(CallGraphSCC &SCC) { // If the SCC doesn't unwind or doesn't throw, note this fact. if (!SCCMightUnwind || !SCCMightReturn) for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) { - Attributes NewAttributes = Attribute::None; + AttrBuilder NewAttributes; if (!SCCMightUnwind) - NewAttributes |= Attribute::NoUnwind; + NewAttributes.addAttribute(Attributes::NoUnwind); if (!SCCMightReturn) - NewAttributes |= Attribute::NoReturn; + NewAttributes.addAttribute(Attributes::NoReturn); Function *F = (*I)->getFunction(); const AttrListPtr &PAL = F->getAttributes(); - const AttrListPtr &NPAL = PAL.addAttr(~0, NewAttributes); + const AttrListPtr &NPAL = PAL.addAttr(F->getContext(), ~0, + Attributes::get(F->getContext(), + NewAttributes)); if (PAL != NPAL) { MadeChange = true; F->setAttributes(NPAL); diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h index 0d5ef90..7467eca 100644 --- a/lib/Transforms/InstCombine/InstCombine.h +++ b/lib/Transforms/InstCombine/InstCombine.h @@ -18,10 +18,11 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/Support/InstVisitor.h" #include "llvm/Support/TargetFolder.h" +#include "llvm/Transforms/Utils/SimplifyLibCalls.h" namespace llvm { class CallSite; - class TargetData; + class DataLayout; class TargetLibraryInfo; class DbgDeclareInst; class MemIntrinsic; @@ -71,9 +72,10 @@ public: class LLVM_LIBRARY_VISIBILITY InstCombiner : public FunctionPass, public InstVisitor { - TargetData *TD; + DataLayout *TD; TargetLibraryInfo *TLI; bool MadeIRChange; + LibCallSimplifier *Simplifier; public: /// Worklist - All of the instructions that need to be simplified. InstCombineWorklist Worklist; @@ -95,7 +97,7 @@ public: virtual void getAnalysisUsage(AnalysisUsage &AU) const; - TargetData *getTargetData() const { return TD; } + DataLayout *getDataLayout() const { return TD; } TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; } @@ -218,7 +220,7 @@ private: Type *Ty); Instruction *visitCallSite(CallSite CS); - Instruction *tryOptimizeCall(CallInst *CI, const TargetData *TD); + Instruction *tryOptimizeCall(CallInst *CI, const DataLayout *TD); bool transformConstExprCastCall(CallSite CS); Instruction *transformCallThroughTrampoline(CallSite CS, IntrinsicInst *Tramp); @@ -365,6 +367,10 @@ private: Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned); + + /// Descale - Return a value X such that Val = X * Scale, or null if none. If + /// the multiplication is known not to overflow then NoSignedWrap is set. + Value *Descale(Value *Val, APInt Scale, bool &NoSignedWrap); }; diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 99b62f8..d8257e6 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -13,7 +13,7 @@ #include "InstCombine.h" #include "llvm/Analysis/InstructionSimplify.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/PatternMatch.h" using namespace llvm; diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index cbe1ca4..48f2704 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -13,7 +13,7 @@ #include "InstCombine.h" #include "llvm/Support/CallSite.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Transforms/Utils/BuildLibCalls.h" #include "llvm/Transforms/Utils/Local.h" @@ -29,6 +29,26 @@ static Type *getPromotedType(Type *Ty) { return Ty; } +/// reduceToSingleValueType - Given an aggregate type which ultimately holds a +/// single scalar element, like {{{type}}} or [1 x type], return type. +static Type *reduceToSingleValueType(Type *T) { + while (!T->isSingleValueType()) { + if (StructType *STy = dyn_cast(T)) { + if (STy->getNumElements() == 1) + T = STy->getElementType(0); + else + break; + } else if (ArrayType *ATy = dyn_cast(T)) { + if (ATy->getNumElements() == 1) + T = ATy->getElementType(); + else + break; + } else + break; + } + + return T; +} Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD); @@ -74,35 +94,37 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { // dest address will be promotable. See if we can find a better type than the // integer datatype. Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts(); + MDNode *CopyMD = 0; if (StrippedDest != MI->getArgOperand(0)) { Type *SrcETy = cast(StrippedDest->getType()) ->getElementType(); if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { // The SrcETy might be something like {{{double}}} or [1 x double]. Rip // down through these levels if so. - while (!SrcETy->isSingleValueType()) { - if (StructType *STy = dyn_cast(SrcETy)) { - if (STy->getNumElements() == 1) - SrcETy = STy->getElementType(0); - else - break; - } else if (ArrayType *ATy = dyn_cast(SrcETy)) { - if (ATy->getNumElements() == 1) - SrcETy = ATy->getElementType(); - else - break; - } else - break; - } + SrcETy = reduceToSingleValueType(SrcETy); if (SrcETy->isSingleValueType()) { NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp); NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp); + + // If the memcpy has metadata describing the members, see if we can + // get the TBAA tag describing our copy. + if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) { + if (M->getNumOperands() == 3 && + M->getOperand(0) && + isa(M->getOperand(0)) && + cast(M->getOperand(0))->isNullValue() && + M->getOperand(1) && + isa(M->getOperand(1)) && + cast(M->getOperand(1))->getValue() == Size && + M->getOperand(2) && + isa(M->getOperand(2))) + CopyMD = cast(M->getOperand(2)); + } } } } - // If the memcpy/memmove provides better alignment info than we can // infer, use it. SrcAlign = std::max(SrcAlign, CopyAlign); @@ -112,8 +134,12 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile()); L->setAlignment(SrcAlign); + if (CopyMD) + L->setMetadata(LLVMContext::MD_tbaa, CopyMD); StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile()); S->setAlignment(DstAlign); + if (CopyMD) + S->setMetadata(LLVMContext::MD_tbaa, CopyMD); // Set the size of the copy to 0, it will be deleted on the next iteration. MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType())); @@ -168,7 +194,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { /// the heavy lifting. /// Instruction *InstCombiner::visitCallInst(CallInst &CI) { - if (isFreeCall(&CI)) + if (isFreeCall(&CI, TLI)) return visitFree(CI); // If the caller function is nounwind, mark the call as nounwind, even if the @@ -243,7 +269,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { default: break; case Intrinsic::objectsize: { uint64_t Size; - if (getObjectSize(II->getArgOperand(0), Size, TD)) + if (getObjectSize(II->getArgOperand(0), Size, TD, TLI)) return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size)); return 0; } @@ -731,7 +757,7 @@ Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) { /// passed through the varargs area, we can eliminate the use of the cast. static bool isSafeToEliminateVarargsCast(const CallSite CS, const CastInst * const CI, - const TargetData * const TD, + const DataLayout * const TD, const int ix) { if (!CI->isLosslessCast()) return false; @@ -752,49 +778,17 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS, return true; } -namespace { -class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls { - InstCombiner *IC; -protected: - void replaceCall(Value *With) { - NewInstruction = IC->ReplaceInstUsesWith(*CI, With); - } - bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { - if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp)) - return true; - if (ConstantInt *SizeCI = - dyn_cast(CI->getArgOperand(SizeCIOp))) { - if (SizeCI->isAllOnesValue()) - return true; - if (isString) { - uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp)); - // If the length is 0 we don't know how long it is and so we can't - // remove the check. - if (Len == 0) return false; - return SizeCI->getZExtValue() >= Len; - } - if (ConstantInt *Arg = dyn_cast( - CI->getArgOperand(SizeArgOp))) - return SizeCI->getZExtValue() >= Arg->getZExtValue(); - } - return false; - } -public: - InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { } - Instruction *NewInstruction; -}; -} // end anonymous namespace - // Try to fold some different type of calls here. // Currently we're only working with the checking functions, memcpy_chk, // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk, // strcat_chk and strncat_chk. -Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) { +Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *TD) { if (CI->getCalledFunction() == 0) return 0; - InstCombineFortifiedLibCalls Simplifier(this); - Simplifier.fold(CI, TD, TLI); - return Simplifier.NewInstruction; + if (Value *With = Simplifier->optimizeCall(CI)) + return ReplaceInstUsesWith(*CI, With); + + return 0; } static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) { @@ -877,7 +871,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) { // visitCallSite - Improvements for call and invoke instructions. // Instruction *InstCombiner::visitCallSite(CallSite CS) { - if (isAllocLikeFn(CS.getInstruction())) + if (isAllocLikeFn(CS.getInstruction(), TLI)) return visitAllocSite(*CS.getInstruction()); bool Changed = false; @@ -961,7 +955,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { Changed = true; } - // Try to optimize the call if possible, we require TargetData for most of + // Try to optimize the call if possible, we require DataLayout for most of // this. None of these calls are seen as possibly dead so go ahead and // delete the instruction now. if (CallInst *CI = dyn_cast(CS.getInstruction())) { @@ -1013,8 +1007,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { return false; // Cannot transform this return value. if (!CallerPAL.isEmpty() && !Caller->use_empty()) { - Attributes RAttrs = CallerPAL.getRetAttributes(); - if (RAttrs & Attribute::typeIncompatible(NewRetTy)) + AttrBuilder RAttrs = CallerPAL.getRetAttributes(); + if (RAttrs.hasAttributes(Attributes::typeIncompatible(NewRetTy))) return false; // Attribute not compatible with transformed value. } @@ -1044,12 +1038,13 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { return false; // Cannot transform this parameter value. Attributes Attrs = CallerPAL.getParamAttributes(i + 1); - if (Attrs & Attribute::typeIncompatible(ParamTy)) + if (AttrBuilder(Attrs). + hasAttributes(Attributes::typeIncompatible(ParamTy))) return false; // Attribute not compatible with transformed value. // If the parameter is passed as a byval argument, then we have to have a // sized type and the sized type has to have the same size as the old type. - if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) { + if (ParamTy != ActTy && Attrs.hasAttribute(Attributes::ByVal)) { PointerType *ParamPTy = dyn_cast(ParamTy); if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0) return false; @@ -1101,7 +1096,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams()) break; Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs; - if (PAttrs & Attribute::VarArgsIncompatible) + if (PAttrs.hasIncompatibleWithVarArgsAttrs()) return false; } @@ -1114,15 +1109,17 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { attrVec.reserve(NumCommonArgs); // Get any return attributes. - Attributes RAttrs = CallerPAL.getRetAttributes(); + AttrBuilder RAttrs = CallerPAL.getRetAttributes(); // If the return value is not being used, the type may not be compatible // with the existing attributes. Wipe out any problematic attributes. - RAttrs &= ~Attribute::typeIncompatible(NewRetTy); + RAttrs.removeAttributes(Attributes::typeIncompatible(NewRetTy)); // Add the new return attributes. - if (RAttrs) - attrVec.push_back(AttributeWithIndex::get(0, RAttrs)); + if (RAttrs.hasAttributes()) + attrVec.push_back( + AttributeWithIndex::get(AttrListPtr::ReturnIndex, + Attributes::get(FT->getContext(), RAttrs))); AI = CS.arg_begin(); for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) { @@ -1136,7 +1133,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { } // Add any parameter attributes. - if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) + Attributes PAttrs = CallerPAL.getParamAttributes(i + 1); + if (PAttrs.hasAttributes()) attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); } @@ -1164,19 +1162,23 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { } // Add any parameter attributes. - if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1)) + Attributes PAttrs = CallerPAL.getParamAttributes(i + 1); + if (PAttrs.hasAttributes()) attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs)); } } } - if (Attributes FnAttrs = CallerPAL.getFnAttributes()) - attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs)); + Attributes FnAttrs = CallerPAL.getFnAttributes(); + if (FnAttrs.hasAttributes()) + attrVec.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + FnAttrs)); if (NewRetTy->isVoidTy()) Caller->setName(""); // Void type should not have a name. - const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec); + const AttrListPtr &NewCallerPAL = AttrListPtr::get(Callee->getContext(), + attrVec); Instruction *NC; if (InvokeInst *II = dyn_cast(Caller)) { @@ -1240,8 +1242,9 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, // If the call already has the 'nest' attribute somewhere then give up - // otherwise 'nest' would occur twice after splicing in the chain. - if (Attrs.hasAttrSomewhere(Attribute::Nest)) - return 0; + for (unsigned I = 0, E = Attrs.getNumAttrs(); I != E; ++I) + if (Attrs.getAttributesAtIndex(I).hasAttribute(Attributes::Nest)) + return 0; assert(Tramp && "transformCallThroughTrampoline called with incorrect CallSite."); @@ -1254,12 +1257,12 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, if (!NestAttrs.isEmpty()) { unsigned NestIdx = 1; Type *NestTy = 0; - Attributes NestAttr = Attribute::None; + Attributes NestAttr; // Look for a parameter marked with the 'nest' attribute. for (FunctionType::param_iterator I = NestFTy->param_begin(), E = NestFTy->param_end(); I != E; ++NestIdx, ++I) - if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) { + if (NestAttrs.getParamAttributes(NestIdx).hasAttribute(Attributes::Nest)){ // Record the parameter type and any other attributes. NestTy = *I; NestAttr = NestAttrs.getParamAttributes(NestIdx); @@ -1278,8 +1281,10 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, // mean appending it. Likewise for attributes. // Add any result attributes. - if (Attributes Attr = Attrs.getRetAttributes()) - NewAttrs.push_back(AttributeWithIndex::get(0, Attr)); + Attributes Attr = Attrs.getRetAttributes(); + if (Attr.hasAttributes()) + NewAttrs.push_back(AttributeWithIndex::get(AttrListPtr::ReturnIndex, + Attr)); { unsigned Idx = 1; @@ -1299,7 +1304,8 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, // Add the original argument and attributes. NewArgs.push_back(*I); - if (Attributes Attr = Attrs.getParamAttributes(Idx)) + Attr = Attrs.getParamAttributes(Idx); + if (Attr.hasAttributes()) NewAttrs.push_back (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr)); @@ -1308,8 +1314,10 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, } // Add any function attributes. - if (Attributes Attr = Attrs.getFnAttributes()) - NewAttrs.push_back(AttributeWithIndex::get(~0, Attr)); + Attr = Attrs.getFnAttributes(); + if (Attr.hasAttributes()) + NewAttrs.push_back(AttributeWithIndex::get(AttrListPtr::FunctionIndex, + Attr)); // The trampoline may have been bitcast to a bogus type (FTy). // Handle this by synthesizing a new function type, equal to FTy @@ -1348,7 +1356,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS, NestF->getType() == PointerType::getUnqual(NewFTy) ? NestF : ConstantExpr::getBitCast(NestF, PointerType::getUnqual(NewFTy)); - const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs); + const AttrListPtr &NewPAL = AttrListPtr::get(FTy->getContext(), NewAttrs); Instruction *NewCaller; if (InvokeInst *II = dyn_cast(Caller)) { diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index 555b442..bb59db8 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -13,7 +13,7 @@ #include "InstCombine.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/PatternMatch.h" using namespace llvm; @@ -78,7 +78,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale, /// try to eliminate the cast by moving the type information into the alloc. Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI) { - // This requires TargetData to get the alloca alignment and size information. + // This requires DataLayout to get the alloca alignment and size information. if (!TD) return 0; PointerType *PTy = cast(CI.getType()); @@ -229,7 +229,7 @@ isEliminableCastPair( const CastInst *CI, ///< The first cast instruction unsigned opcode, ///< The opcode of the second cast instruction Type *DstTy, ///< The target type for the second cast instruction - TargetData *TD ///< The target data for pointer size + DataLayout *TD ///< The target data for pointer size ) { Type *SrcTy = CI->getOperand(0)->getType(); // A from above @@ -238,17 +238,20 @@ isEliminableCastPair( // Get the opcodes of the two Cast instructions Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode()); Instruction::CastOps secondOp = Instruction::CastOps(opcode); - + Type *SrcIntPtrTy = TD && SrcTy->isPtrOrPtrVectorTy() ? + TD->getIntPtrType(SrcTy) : 0; + Type *MidIntPtrTy = TD && MidTy->isPtrOrPtrVectorTy() ? + TD->getIntPtrType(MidTy) : 0; + Type *DstIntPtrTy = TD && DstTy->isPtrOrPtrVectorTy() ? + TD->getIntPtrType(DstTy) : 0; unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, - DstTy, - TD ? TD->getIntPtrType(CI->getContext()) : 0); - + DstTy, SrcIntPtrTy, MidIntPtrTy, + DstIntPtrTy); + // We don't want to form an inttoptr or ptrtoint that converts to an integer // type that differs from the pointer size. - if ((Res == Instruction::IntToPtr && - (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) || - (Res == Instruction::PtrToInt && - (!TD || DstTy != TD->getIntPtrType(CI->getContext())))) + if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || + (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) Res = 0; return Instruction::CastOps(Res); diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index bdd310e..7c3f8fe 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -16,7 +16,8 @@ #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/MemoryBuiltins.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/ConstantRange.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/PatternMatch.h" @@ -473,7 +474,7 @@ FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV, /// If we can't emit an optimized form for this expression, this returns null. /// static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC) { - TargetData &TD = *IC.getTargetData(); + DataLayout &TD = *IC.getDataLayout(); gep_type_iterator GTI = gep_type_begin(GEP); // Check to see if this gep only has a single variable index. If so, and if @@ -2355,8 +2356,25 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // Try not to increase register pressure. BO0->hasOneUse() && BO1->hasOneUse()) { // Determine Y and Z in the form icmp (X+Y), (X+Z). - Value *Y = (A == C || A == D) ? B : A; - Value *Z = (C == A || C == B) ? D : C; + Value *Y, *Z; + if (A == C) { + // C + B == C + D -> B == D + Y = B; + Z = D; + } else if (A == D) { + // D + B == C + D -> B == C + Y = B; + Z = C; + } else if (B == C) { + // A + C == C + D -> A == D + Y = A; + Z = D; + } else { + assert(B == D); + // A + D == C + D -> A == C + Y = A; + Z = C; + } return new ICmpInst(Pred, Y, Z); } @@ -2894,10 +2912,6 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { if (!RHSF) break; - // We can't convert a PPC double double. - if (RHSF->getType()->isPPC_FP128Ty()) - break; - const fltSemantics *Sem; // FIXME: This shouldn't be here. if (LHSExt->getSrcTy()->isHalfTy()) @@ -2910,6 +2924,8 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { Sem = &APFloat::IEEEquad; else if (LHSExt->getSrcTy()->isX86_FP80Ty()) Sem = &APFloat::x87DoubleExtended; + else if (LHSExt->getSrcTy()->isPPC_FP128Ty()) + Sem = &APFloat::PPCDoubleDouble; else break; @@ -2985,6 +3001,44 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { return Res; } break; + case Instruction::Call: { + CallInst *CI = cast(LHSI); + LibFunc::Func Func; + // Various optimization for fabs compared with zero. + if (RHSC->isNullValue() && CI->getCalledFunction() && + TLI->getLibFunc(CI->getCalledFunction()->getName(), Func) && + TLI->has(Func)) { + if (Func == LibFunc::fabs || Func == LibFunc::fabsf || + Func == LibFunc::fabsl) { + switch (I.getPredicate()) { + default: break; + // fabs(x) < 0 --> false + case FCmpInst::FCMP_OLT: + return ReplaceInstUsesWith(I, Builder->getFalse()); + // fabs(x) > 0 --> x != 0 + case FCmpInst::FCMP_OGT: + return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), + RHSC); + // fabs(x) <= 0 --> x == 0 + case FCmpInst::FCMP_OLE: + return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), + RHSC); + // fabs(x) >= 0 --> !isnan(x) + case FCmpInst::FCMP_OGE: + return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), + RHSC); + // fabs(x) == 0 --> x == 0 + // fabs(x) != 0 --> x != 0 + case FCmpInst::FCMP_OEQ: + case FCmpInst::FCMP_UEQ: + case FCmpInst::FCMP_ONE: + case FCmpInst::FCMP_UNE: + return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), + RHSC); + } + } + } + } } } diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index c485844..4d106fc 100644 --- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -14,13 +14,161 @@ #include "InstCombine.h" #include "llvm/IntrinsicInst.h" #include "llvm/Analysis/Loads.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/ADT/Statistic.h" using namespace llvm; -STATISTIC(NumDeadStore, "Number of dead stores eliminated"); +STATISTIC(NumDeadStore, "Number of dead stores eliminated"); +STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global"); + +/// pointsToConstantGlobal - Return true if V (possibly indirectly) points to +/// some part of a constant global variable. This intentionally only accepts +/// constant expressions because we can't rewrite arbitrary instructions. +static bool pointsToConstantGlobal(Value *V) { + if (GlobalVariable *GV = dyn_cast(V)) + return GV->isConstant(); + if (ConstantExpr *CE = dyn_cast(V)) + if (CE->getOpcode() == Instruction::BitCast || + CE->getOpcode() == Instruction::GetElementPtr) + return pointsToConstantGlobal(CE->getOperand(0)); + return false; +} + +/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) +/// pointer to an alloca. Ignore any reads of the pointer, return false if we +/// see any stores or other unknown uses. If we see pointer arithmetic, keep +/// track of whether it moves the pointer (with IsOffset) but otherwise traverse +/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to +/// the alloca, and if the source pointer is a pointer to a constant global, we +/// can optimize this. +static bool +isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, + SmallVectorImpl &ToDelete, + bool IsOffset = false) { + // We track lifetime intrinsics as we encounter them. If we decide to go + // ahead and replace the value with the global, this lets the caller quickly + // eliminate the markers. + + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { + User *U = cast(*UI); + + if (LoadInst *LI = dyn_cast(U)) { + // Ignore non-volatile loads, they are always ok. + if (!LI->isSimple()) return false; + continue; + } + + if (BitCastInst *BCI = dyn_cast(U)) { + // If uses of the bitcast are ok, we are ok. + if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset)) + return false; + continue; + } + if (GetElementPtrInst *GEP = dyn_cast(U)) { + // If the GEP has all zero indices, it doesn't offset the pointer. If it + // doesn't, it does. + if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, ToDelete, + IsOffset || !GEP->hasAllZeroIndices())) + return false; + continue; + } + + if (CallSite CS = U) { + // If this is the function being called then we treat it like a load and + // ignore it. + if (CS.isCallee(UI)) + continue; + + // If this is a readonly/readnone call site, then we know it is just a + // load (but one that potentially returns the value itself), so we can + // ignore it if we know that the value isn't captured. + unsigned ArgNo = CS.getArgumentNo(UI); + if (CS.onlyReadsMemory() && + (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo))) + continue; + + // If this is being passed as a byval argument, the caller is making a + // copy, so it is only a read of the alloca. + if (CS.isByValArgument(ArgNo)) + continue; + } + + // Lifetime intrinsics can be handled by the caller. + if (IntrinsicInst *II = dyn_cast(U)) { + if (II->getIntrinsicID() == Intrinsic::lifetime_start || + II->getIntrinsicID() == Intrinsic::lifetime_end) { + assert(II->use_empty() && "Lifetime markers have no result to use!"); + ToDelete.push_back(II); + continue; + } + } + + // If this is isn't our memcpy/memmove, reject it as something we can't + // handle. + MemTransferInst *MI = dyn_cast(U); + if (MI == 0) + return false; + + // If the transfer is using the alloca as a source of the transfer, then + // ignore it since it is a load (unless the transfer is volatile). + if (UI.getOperandNo() == 1) { + if (MI->isVolatile()) return false; + continue; + } + + // If we already have seen a copy, reject the second one. + if (TheCopy) return false; + + // If the pointer has been offset from the start of the alloca, we can't + // safely handle this. + if (IsOffset) return false; + + // If the memintrinsic isn't using the alloca as the dest, reject it. + if (UI.getOperandNo() != 0) return false; + + // If the source of the memcpy/move is not a constant global, reject it. + if (!pointsToConstantGlobal(MI->getSource())) + return false; + + // Otherwise, the transform is safe. Remember the copy instruction. + TheCopy = MI; + } + return true; +} + +/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only +/// modified by a copy from a constant global. If we can prove this, we can +/// replace any uses of the alloca with uses of the global directly. +static MemTransferInst * +isOnlyCopiedFromConstantGlobal(AllocaInst *AI, + SmallVectorImpl &ToDelete) { + MemTransferInst *TheCopy = 0; + if (isOnlyCopiedFromConstantGlobal(AI, TheCopy, ToDelete)) + return TheCopy; + return 0; +} + +/// getPointeeAlignment - Compute the minimum alignment of the value pointed +/// to by the given pointer. +static unsigned getPointeeAlignment(Value *V, const DataLayout &TD) { + if (ConstantExpr *CE = dyn_cast(V)) + if (CE->getOpcode() == Instruction::BitCast || + (CE->getOpcode() == Instruction::GetElementPtr && + cast(CE)->hasAllZeroIndices())) + return getPointeeAlignment(CE->getOperand(0), TD); + + if (GlobalVariable *GV = dyn_cast(V)) + if (!GV->isDeclaration()) + return TD.getPreferredAlignment(GV); + + if (PointerType *PT = dyn_cast(V->getType())) + if (PT->getElementType()->isSized()) + return TD.getABITypeAlignment(PT->getElementType()); + + return 0; +} Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { // Ensure that the alloca array size argument has type intptr_t, so that @@ -99,12 +247,16 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { return &AI; } + // If the alignment of the entry block alloca is 0 (unspecified), + // assign it the preferred alignment. + if (EntryAI->getAlignment() == 0) + EntryAI->setAlignment( + TD->getPrefTypeAlignment(EntryAI->getAllocatedType())); // Replace this zero-sized alloca with the one at the start of the entry // block after ensuring that the address will be aligned enough for both // types. - unsigned MaxAlign = - std::max(TD->getPrefTypeAlignment(EntryAI->getAllocatedType()), - TD->getPrefTypeAlignment(AI.getAllocatedType())); + unsigned MaxAlign = std::max(EntryAI->getAlignment(), + AI.getAlignment()); EntryAI->setAlignment(MaxAlign); if (AI.getType() != EntryAI->getType()) return new BitCastInst(EntryAI, AI.getType()); @@ -113,6 +265,31 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { } } + if (TD) { + // Check to see if this allocation is only modified by a memcpy/memmove from + // a constant global whose alignment is equal to or exceeds that of the + // allocation. If this is the case, we can change all users to use + // the constant global instead. This is commonly produced by the CFE by + // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' + // is only subsequently read. + SmallVector ToDelete; + if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) { + if (AI.getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) { + DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n'); + DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); + for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) + EraseInstFromFunction(*ToDelete[i]); + Constant *TheSrc = cast(Copy->getSource()); + Instruction *NewI + = ReplaceInstUsesWith(AI, ConstantExpr::getBitCast(TheSrc, + AI.getType())); + EraseInstFromFunction(*Copy); + ++NumGlobalCopies; + return NewI; + } + } + } + // At last, use the generic allocation site handler to aggressively remove // unused allocas. return visitAllocSite(AI); @@ -121,7 +298,7 @@ Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) { /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible. static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, - const TargetData *TD) { + const DataLayout *TD) { User *CI = cast(LI.getOperand(0)); Value *CastOp = CI->getOperand(0); @@ -151,14 +328,14 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, SrcPTy = SrcTy->getElementType(); } - if (IC.getTargetData() && + if (IC.getDataLayout() && (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() || SrcPTy->isVectorTy()) && // Do not allow turning this into a load of an integer, which is then // casted to a pointer, this pessimizes pointer analysis a lot. (SrcPTy->isPointerTy() == LI.getType()->isPointerTy()) && - IC.getTargetData()->getTypeSizeInBits(SrcPTy) == - IC.getTargetData()->getTypeSizeInBits(DestPTy)) { + IC.getDataLayout()->getTypeSizeInBits(SrcPTy) == + IC.getDataLayout()->getTypeSizeInBits(DestPTy)) { // Okay, we are casting from one integer or pointer type to another of // the same size. Instead of casting the pointer before the load, cast @@ -336,11 +513,11 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { // If the pointers point into different address spaces or if they point to // values with different sizes, we can't do the transformation. - if (!IC.getTargetData() || + if (!IC.getDataLayout() || SrcTy->getAddressSpace() != cast(CI->getType())->getAddressSpace() || - IC.getTargetData()->getTypeSizeInBits(SrcPTy) != - IC.getTargetData()->getTypeSizeInBits(DestPTy)) + IC.getDataLayout()->getTypeSizeInBits(SrcPTy) != + IC.getDataLayout()->getTypeSizeInBits(DestPTy)) return 0; // Okay, we are casting from one integer or pointer type to another of diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 35a0bbb..cefe45e 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -37,7 +37,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) { if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(PowerOf2), m_Value(A))), m_Value(B))) && // The "1" can be any value known to be a power of 2. - isPowerOfTwo(PowerOf2, IC.getTargetData())) { + isPowerOfTwo(PowerOf2, IC.getDataLayout())) { A = IC.Builder->CreateSub(A, B); return IC.Builder->CreateShl(PowerOf2, A); } @@ -46,7 +46,7 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC) { // inexact. Similarly for <<. if (BinaryOperator *I = dyn_cast(V)) if (I->isLogicalShift() && - isPowerOfTwo(I->getOperand(0), IC.getTargetData())) { + isPowerOfTwo(I->getOperand(0), IC.getDataLayout())) { // We know that this is an exact/nuw shift and that the input is a // non-zero context as well. if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC)) { @@ -462,12 +462,23 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { } } + // (x lshr C1) udiv C2 --> x udiv (C2 << C1) + if (ConstantInt *C2 = dyn_cast(Op1)) { + Value *X; + ConstantInt *C1; + if (match(Op0, m_LShr(m_Value(X), m_ConstantInt(C1)))) { + APInt NC = C2->getValue().shl(C1->getLimitedValue(C1->getBitWidth()-1)); + return BinaryOperator::CreateUDiv(X, Builder->getInt(NC)); + } + } + // X udiv (C1 << N), where C1 is "1< X >> (N+C2) { const APInt *CI; Value *N; if (match(Op1, m_Shl(m_Power2(CI), m_Value(N))) || match(Op1, m_ZExt(m_Shl(m_Power2(CI), m_Value(N))))) { if (*CI != 1) - N = Builder->CreateAdd(N, ConstantInt::get(I.getType(),CI->logBase2())); + N = Builder->CreateAdd(N, + ConstantInt::get(N->getType(), CI->logBase2())); if (ZExtInst *Z = dyn_cast(Op1)) N = Builder->CreateZExt(N, Z->getDestTy()); if (I.isExact()) diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp index 664546c..de9c77e 100644 --- a/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -13,7 +13,7 @@ #include "InstCombine.h" #include "llvm/Analysis/InstructionSimplify.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/STLExtras.h" using namespace llvm; diff --git a/lib/Transforms/InstCombine/InstCombineSelect.cpp b/lib/Transforms/InstCombine/InstCombineSelect.cpp index 291e800..a2d4c88 100644 --- a/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -287,7 +287,7 @@ Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal, /// SimplifyWithOpReplaced - See if V simplifies when its operand Op is /// replaced with RepOp. static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI) { // Trivial replacement. if (V == Op) @@ -333,6 +333,10 @@ static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, // All operands were constants, fold it. if (ConstOps.size() == I->getNumOperands()) { + if (CmpInst *C = dyn_cast(I)) + return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], + ConstOps[1], TD, TLI); + if (LoadInst *LI = dyn_cast(I)) if (!LI->isVolatile()) return ConstantFoldLoadFromConstPtr(ConstOps[0], TD); @@ -903,7 +907,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { return &SI; } - if (VectorType* VecTy = dyn_cast(SI.getType())) { + if (VectorType *VecTy = dyn_cast(SI.getType())) { unsigned VWidth = VecTy->getNumElements(); APInt UndefElts(VWidth, 0); APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); @@ -912,6 +916,28 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { return ReplaceInstUsesWith(SI, V); return &SI; } + + if (ConstantVector *CV = dyn_cast(CondVal)) { + // Form a shufflevector instruction. + SmallVector Mask(VWidth); + Type *Int32Ty = Type::getInt32Ty(CV->getContext()); + for (unsigned i = 0; i != VWidth; ++i) { + Constant *Elem = cast(CV->getOperand(i)); + if (ConstantInt *E = dyn_cast(Elem)) + Mask[i] = ConstantInt::get(Int32Ty, i + (E->isZero() ? VWidth : 0)); + else if (isa(Elem)) + Mask[i] = UndefValue::get(Int32Ty); + else + return 0; + } + Constant *MaskVal = ConstantVector::get(Mask); + Value *V = Builder->CreateShuffleVector(TrueVal, FalseVal, MaskVal); + return ReplaceInstUsesWith(SI, V); + } + + if (isa(CondVal)) { + return ReplaceInstUsesWith(SI, FalseVal); + } } return 0; diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp index 4bb2403..57021f1 100644 --- a/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -190,7 +190,7 @@ static Value *GetShiftedValue(Value *V, unsigned NumBits, bool isLeftShift, V = IC.Builder->CreateLShr(C, NumBits); // If we got a constantexpr back, try to simplify it with TD info. if (ConstantExpr *CE = dyn_cast(V)) - V = ConstantFoldConstantExpression(CE, IC.getTargetData(), + V = ConstantFoldConstantExpression(CE, IC.getDataLayout(), IC.getTargetLibraryInfo()); return V; } diff --git a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 54be8ed..602b203 100644 --- a/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -14,7 +14,7 @@ #include "InstCombine.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/IntrinsicInst.h" using namespace llvm; diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index cf60f0f..dd7ea14 100644 --- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -636,8 +636,11 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { // If LHS's width is changed, shift the mask value accordingly. // If newRHS == NULL, i.e. LHSOp0 == RHSOp0, we want to remap any - // references to RHSOp0 to LHSOp0, so we don't need to shift the mask. - if (eltMask >= 0 && newRHS != NULL) + // references from RHSOp0 to LHSOp0, so we don't need to shift the mask. + // If newRHS == newLHS, we want to remap any references from newRHS to + // newLHS so that we can properly identify splats that may occur due to + // obfuscation accross the two vectors. + if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS) eltMask += newLHSWidth; } diff --git a/lib/Transforms/InstCombine/InstCombineWorklist.h b/lib/Transforms/InstCombine/InstCombineWorklist.h index 99a02fc..ea654ae 100644 --- a/lib/Transforms/InstCombine/InstCombineWorklist.h +++ b/lib/Transforms/InstCombine/InstCombineWorklist.h @@ -26,8 +26,8 @@ class LLVM_LIBRARY_VISIBILITY InstCombineWorklist { SmallVector Worklist; DenseMap WorklistMap; - void operator=(const InstCombineWorklist&RHS); // DO NOT IMPLEMENT - InstCombineWorklist(const InstCombineWorklist&); // DO NOT IMPLEMENT + void operator=(const InstCombineWorklist&RHS) LLVM_DELETED_FUNCTION; + InstCombineWorklist(const InstCombineWorklist&) LLVM_DELETED_FUNCTION; public: InstCombineWorklist() {} diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 68ecd51..9a46f25 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -40,7 +40,7 @@ #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/MemoryBuiltins.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Support/CFG.h" @@ -88,7 +88,7 @@ void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { Value *InstCombiner::EmitGEPOffset(User *GEP) { - return llvm::EmitGEPOffset(Builder, *getTargetData(), GEP); + return llvm::EmitGEPOffset(Builder, *getDataLayout(), GEP); } /// ShouldChangeType - Return true if it is desirable to convert a computation @@ -805,6 +805,244 @@ static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { return true; } +/// Descale - Return a value X such that Val = X * Scale, or null if none. If +/// the multiplication is known not to overflow then NoSignedWrap is set. +Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) { + assert(isa(Val->getType()) && "Can only descale integers!"); + assert(cast(Val->getType())->getBitWidth() == + Scale.getBitWidth() && "Scale not compatible with value!"); + + // If Val is zero or Scale is one then Val = Val * Scale. + if (match(Val, m_Zero()) || Scale == 1) { + NoSignedWrap = true; + return Val; + } + + // If Scale is zero then it does not divide Val. + if (Scale.isMinValue()) + return 0; + + // Look through chains of multiplications, searching for a constant that is + // divisible by Scale. For example, descaling X*(Y*(Z*4)) by a factor of 4 + // will find the constant factor 4 and produce X*(Y*Z). Descaling X*(Y*8) by + // a factor of 4 will produce X*(Y*2). The principle of operation is to bore + // down from Val: + // + // Val = M1 * X || Analysis starts here and works down + // M1 = M2 * Y || Doesn't descend into terms with more + // M2 = Z * 4 \/ than one use + // + // Then to modify a term at the bottom: + // + // Val = M1 * X + // M1 = Z * Y || Replaced M2 with Z + // + // Then to work back up correcting nsw flags. + + // Op - the term we are currently analyzing. Starts at Val then drills down. + // Replaced with its descaled value before exiting from the drill down loop. + Value *Op = Val; + + // Parent - initially null, but after drilling down notes where Op came from. + // In the example above, Parent is (Val, 0) when Op is M1, because M1 is the + // 0'th operand of Val. + std::pair Parent; + + // RequireNoSignedWrap - Set if the transform requires a descaling at deeper + // levels that doesn't overflow. + bool RequireNoSignedWrap = false; + + // logScale - log base 2 of the scale. Negative if not a power of 2. + int32_t logScale = Scale.exactLogBase2(); + + for (;; Op = Parent.first->getOperand(Parent.second)) { // Drill down + + if (ConstantInt *CI = dyn_cast(Op)) { + // If Op is a constant divisible by Scale then descale to the quotient. + APInt Quotient(Scale), Remainder(Scale); // Init ensures right bitwidth. + APInt::sdivrem(CI->getValue(), Scale, Quotient, Remainder); + if (!Remainder.isMinValue()) + // Not divisible by Scale. + return 0; + // Replace with the quotient in the parent. + Op = ConstantInt::get(CI->getType(), Quotient); + NoSignedWrap = true; + break; + } + + if (BinaryOperator *BO = dyn_cast(Op)) { + + if (BO->getOpcode() == Instruction::Mul) { + // Multiplication. + NoSignedWrap = BO->hasNoSignedWrap(); + if (RequireNoSignedWrap && !NoSignedWrap) + return 0; + + // There are three cases for multiplication: multiplication by exactly + // the scale, multiplication by a constant different to the scale, and + // multiplication by something else. + Value *LHS = BO->getOperand(0); + Value *RHS = BO->getOperand(1); + + if (ConstantInt *CI = dyn_cast(RHS)) { + // Multiplication by a constant. + if (CI->getValue() == Scale) { + // Multiplication by exactly the scale, replace the multiplication + // by its left-hand side in the parent. + Op = LHS; + break; + } + + // Otherwise drill down into the constant. + if (!Op->hasOneUse()) + return 0; + + Parent = std::make_pair(BO, 1); + continue; + } + + // Multiplication by something else. Drill down into the left-hand side + // since that's where the reassociate pass puts the good stuff. + if (!Op->hasOneUse()) + return 0; + + Parent = std::make_pair(BO, 0); + continue; + } + + if (logScale > 0 && BO->getOpcode() == Instruction::Shl && + isa(BO->getOperand(1))) { + // Multiplication by a power of 2. + NoSignedWrap = BO->hasNoSignedWrap(); + if (RequireNoSignedWrap && !NoSignedWrap) + return 0; + + Value *LHS = BO->getOperand(0); + int32_t Amt = cast(BO->getOperand(1))-> + getLimitedValue(Scale.getBitWidth()); + // Op = LHS << Amt. + + if (Amt == logScale) { + // Multiplication by exactly the scale, replace the multiplication + // by its left-hand side in the parent. + Op = LHS; + break; + } + if (Amt < logScale || !Op->hasOneUse()) + return 0; + + // Multiplication by more than the scale. Reduce the multiplying amount + // by the scale in the parent. + Parent = std::make_pair(BO, 1); + Op = ConstantInt::get(BO->getType(), Amt - logScale); + break; + } + } + + if (!Op->hasOneUse()) + return 0; + + if (CastInst *Cast = dyn_cast(Op)) { + if (Cast->getOpcode() == Instruction::SExt) { + // Op is sign-extended from a smaller type, descale in the smaller type. + unsigned SmallSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); + APInt SmallScale = Scale.trunc(SmallSize); + // Suppose Op = sext X, and we descale X as Y * SmallScale. We want to + // descale Op as (sext Y) * Scale. In order to have + // sext (Y * SmallScale) = (sext Y) * Scale + // some conditions need to hold however: SmallScale must sign-extend to + // Scale and the multiplication Y * SmallScale should not overflow. + if (SmallScale.sext(Scale.getBitWidth()) != Scale) + // SmallScale does not sign-extend to Scale. + return 0; + assert(SmallScale.exactLogBase2() == logScale); + // Require that Y * SmallScale must not overflow. + RequireNoSignedWrap = true; + + // Drill down through the cast. + Parent = std::make_pair(Cast, 0); + Scale = SmallScale; + continue; + } + + if (Cast->getOpcode() == Instruction::Trunc) { + // Op is truncated from a larger type, descale in the larger type. + // Suppose Op = trunc X, and we descale X as Y * sext Scale. Then + // trunc (Y * sext Scale) = (trunc Y) * Scale + // always holds. However (trunc Y) * Scale may overflow even if + // trunc (Y * sext Scale) does not, so nsw flags need to be cleared + // from this point up in the expression (see later). + if (RequireNoSignedWrap) + return 0; + + // Drill down through the cast. + unsigned LargeSize = Cast->getSrcTy()->getPrimitiveSizeInBits(); + Parent = std::make_pair(Cast, 0); + Scale = Scale.sext(LargeSize); + if (logScale + 1 == (int32_t)Cast->getType()->getPrimitiveSizeInBits()) + logScale = -1; + assert(Scale.exactLogBase2() == logScale); + continue; + } + } + + // Unsupported expression, bail out. + return 0; + } + + // We know that we can successfully descale, so from here on we can safely + // modify the IR. Op holds the descaled version of the deepest term in the + // expression. NoSignedWrap is 'true' if multiplying Op by Scale is known + // not to overflow. + + if (!Parent.first) + // The expression only had one term. + return Op; + + // Rewrite the parent using the descaled version of its operand. + assert(Parent.first->hasOneUse() && "Drilled down when more than one use!"); + assert(Op != Parent.first->getOperand(Parent.second) && + "Descaling was a no-op?"); + Parent.first->setOperand(Parent.second, Op); + Worklist.Add(Parent.first); + + // Now work back up the expression correcting nsw flags. The logic is based + // on the following observation: if X * Y is known not to overflow as a signed + // multiplication, and Y is replaced by a value Z with smaller absolute value, + // then X * Z will not overflow as a signed multiplication either. As we work + // our way up, having NoSignedWrap 'true' means that the descaled value at the + // current level has strictly smaller absolute value than the original. + Instruction *Ancestor = Parent.first; + do { + if (BinaryOperator *BO = dyn_cast(Ancestor)) { + // If the multiplication wasn't nsw then we can't say anything about the + // value of the descaled multiplication, and we have to clear nsw flags + // from this point on up. + bool OpNoSignedWrap = BO->hasNoSignedWrap(); + NoSignedWrap &= OpNoSignedWrap; + if (NoSignedWrap != OpNoSignedWrap) { + BO->setHasNoSignedWrap(NoSignedWrap); + Worklist.Add(Ancestor); + } + } else if (Ancestor->getOpcode() == Instruction::Trunc) { + // The fact that the descaled input to the trunc has smaller absolute + // value than the original input doesn't tell us anything useful about + // the absolute values of the truncations. + NoSignedWrap = false; + } + assert((Ancestor->getOpcode() != Instruction::SExt || NoSignedWrap) && + "Failed to keep proper track of nsw flags while drilling down?"); + + if (Ancestor == Val) + // Got to the top, all done! + return Val; + + // Move up one level in the expression. + assert(Ancestor->hasOneUse() && "Drilled down when more than one use!"); + Ancestor = Ancestor->use_back(); + } while (1); +} + Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { SmallVector Ops(GEP.op_begin(), GEP.op_end()); @@ -817,7 +1055,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // by multiples of a zero size type with zero. if (TD) { bool MadeChange = false; - Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); + Type *IntPtrTy = TD->getIntPtrType(GEP.getPointerOperandType()); gep_type_iterator GTI = gep_type_begin(GEP); for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); @@ -836,7 +1074,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } Type *IndexTy = (*I)->getType(); - if (IndexTy != IntPtrTy && !IndexTy->isVectorTy()) { + if (IndexTy != IntPtrTy) { // If we are using a wider index than needed for this platform, shrink // it to what we need. If narrower, sign-extend it to what we need. // This explicit cast can make subsequent optimizations more obvious. @@ -855,7 +1093,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { if (!shouldMergeGEPs(*cast(&GEP), *Src)) return 0; - // Note that if our source is a gep chain itself that we wait for that + // Note that if our source is a gep chain itself then we wait for that // chain to be resolved before we perform this transformation. This // avoids us creating a TON of code in some cases. if (GEPOperator *SrcGEP = @@ -987,63 +1225,74 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } // Transform things like: + // %V = mul i64 %N, 4 + // %t = getelementptr i8* bitcast (i32* %arr to i8*), i32 %V + // into: %t1 = getelementptr i32* %arr, i32 %N; bitcast + if (TD && ResElTy->isSized() && SrcElTy->isSized()) { + // Check that changing the type amounts to dividing the index by a scale + // factor. + uint64_t ResSize = TD->getTypeAllocSize(ResElTy); + uint64_t SrcSize = TD->getTypeAllocSize(SrcElTy); + if (ResSize && SrcSize % ResSize == 0) { + Value *Idx = GEP.getOperand(1); + unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); + uint64_t Scale = SrcSize / ResSize; + + // Earlier transforms ensure that the index has type IntPtrType, which + // considerably simplifies the logic by eliminating implicit casts. + assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) && + "Index not cast to pointer width?"); + + bool NSW; + if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { + // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. + // If the multiplication NewIdx * Scale may overflow then the new + // GEP may not be "inbounds". + Value *NewGEP = GEP.isInBounds() && NSW ? + Builder->CreateInBoundsGEP(StrippedPtr, NewIdx, GEP.getName()) : + Builder->CreateGEP(StrippedPtr, NewIdx, GEP.getName()); + // The NewGEP must be pointer typed, so must the old one -> BitCast + return new BitCastInst(NewGEP, GEP.getType()); + } + } + } + + // Similarly, transform things like: // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp // (where tmp = 8*tmp2) into: // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast - - if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { + if (TD && ResElTy->isSized() && SrcElTy->isSized() && + SrcElTy->isArrayTy()) { + // Check that changing to the array element type amounts to dividing the + // index by a scale factor. + uint64_t ResSize = TD->getTypeAllocSize(ResElTy); uint64_t ArrayEltSize = - TD->getTypeAllocSize(cast(SrcElTy)->getElementType()); - - // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We - // allow either a mul, shift, or constant here. - Value *NewIdx = 0; - ConstantInt *Scale = 0; - if (ArrayEltSize == 1) { - NewIdx = GEP.getOperand(1); - Scale = ConstantInt::get(cast(NewIdx->getType()), 1); - } else if (ConstantInt *CI = dyn_cast(GEP.getOperand(1))) { - NewIdx = ConstantInt::get(CI->getType(), 1); - Scale = CI; - } else if (Instruction *Inst =dyn_cast(GEP.getOperand(1))){ - if (Inst->getOpcode() == Instruction::Shl && - isa(Inst->getOperand(1))) { - ConstantInt *ShAmt = cast(Inst->getOperand(1)); - uint32_t ShAmtVal = ShAmt->getLimitedValue(64); - Scale = ConstantInt::get(cast(Inst->getType()), - 1ULL << ShAmtVal); - NewIdx = Inst->getOperand(0); - } else if (Inst->getOpcode() == Instruction::Mul && - isa(Inst->getOperand(1))) { - Scale = cast(Inst->getOperand(1)); - NewIdx = Inst->getOperand(0); + TD->getTypeAllocSize(cast(SrcElTy)->getElementType()); + if (ResSize && ArrayEltSize % ResSize == 0) { + Value *Idx = GEP.getOperand(1); + unsigned BitWidth = Idx->getType()->getPrimitiveSizeInBits(); + uint64_t Scale = ArrayEltSize / ResSize; + + // Earlier transforms ensure that the index has type IntPtrType, which + // considerably simplifies the logic by eliminating implicit casts. + assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) && + "Index not cast to pointer width?"); + + bool NSW; + if (Value *NewIdx = Descale(Idx, APInt(BitWidth, Scale), NSW)) { + // Successfully decomposed Idx as NewIdx * Scale, form a new GEP. + // If the multiplication NewIdx * Scale may overflow then the new + // GEP may not be "inbounds". + Value *Off[2]; + Off[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); + Off[1] = NewIdx; + Value *NewGEP = GEP.isInBounds() && NSW ? + Builder->CreateInBoundsGEP(StrippedPtr, Off, GEP.getName()) : + Builder->CreateGEP(StrippedPtr, Off, GEP.getName()); + // The NewGEP must be pointer typed, so must the old one -> BitCast + return new BitCastInst(NewGEP, GEP.getType()); } } - - // If the index will be to exactly the right offset with the scale taken - // out, perform the transformation. Note, we don't know whether Scale is - // signed or not. We'll use unsigned version of division/modulo - // operation after making sure Scale doesn't have the sign bit set. - if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && - Scale->getZExtValue() % ArrayEltSize == 0) { - Scale = ConstantInt::get(Scale->getType(), - Scale->getZExtValue() / ArrayEltSize); - if (Scale->getZExtValue() != 1) { - Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), - false /*ZExt*/); - NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); - } - - // Insert the new GEP instruction. - Value *Idx[2]; - Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); - Idx[1] = NewIdx; - Value *NewGEP = GEP.isInBounds() ? - Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()): - Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); - // The NewGEP must be pointer typed, so must the old one -> BitCast - return new BitCastInst(NewGEP, GEP.getType()); - } } } } @@ -1068,7 +1317,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // If the bitcast is of an allocation, and the allocation will be // converted to match the type of the cast, don't touch this. if (isa(BCI->getOperand(0)) || - isAllocationFn(BCI->getOperand(0))) { + isAllocationFn(BCI->getOperand(0), TLI)) { // See if the bitcast simplifies, if so, don't nuke this GEP yet. if (Instruction *I = visitBitCast(*BCI)) { if (I != BCI) { @@ -1107,7 +1356,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { static bool -isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users) { +isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users, + const TargetLibraryInfo *TLI) { SmallVector Worklist; Worklist.push_back(AI); @@ -1163,7 +1413,7 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl &Users) { } } - if (isFreeCall(I)) { + if (isFreeCall(I, TLI)) { Users.push_back(I); continue; } @@ -1188,7 +1438,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) { // to null and free calls, delete the calls and replace the comparisons with // true or false as appropriate. SmallVector Users; - if (isAllocSiteRemovable(&MI, Users)) { + if (isAllocSiteRemovable(&MI, Users, TLI)) { for (unsigned i = 0, e = Users.size(); i != e; ++i) { Instruction *I = cast_or_null(&*Users[i]); if (!I) continue; @@ -1853,7 +2103,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { static bool AddReachableCodeToWorklist(BasicBlock *BB, SmallPtrSet &Visited, InstCombiner &IC, - const TargetData *TD, + const DataLayout *TD, const TargetLibraryInfo *TLI) { bool MadeIRChange = false; SmallVector Worklist; @@ -1872,7 +2122,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, Instruction *Inst = BBI++; // DCE instruction if trivially dead. - if (isInstructionTriviallyDead(Inst)) { + if (isInstructionTriviallyDead(Inst, TLI)) { ++NumDeadInst; DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); Inst->eraseFromParent(); @@ -2002,7 +2252,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { if (I == 0) continue; // skip null values. // Check to see if we can DCE the instruction. - if (isInstructionTriviallyDead(I)) { + if (isInstructionTriviallyDead(I, TLI)) { DEBUG(errs() << "IC: DCE: " << *I << '\n'); EraseInstFromFunction(*I); ++NumDeadInst; @@ -2102,7 +2352,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { // If the instruction was modified, it's possible that it is now dead. // if so, remove it. - if (isInstructionTriviallyDead(I)) { + if (isInstructionTriviallyDead(I, TLI)) { EraseInstFromFunction(*I); } else { Worklist.Add(I); @@ -2117,9 +2367,27 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { return MadeIRChange; } +namespace { +class InstCombinerLibCallSimplifier : public LibCallSimplifier { + InstCombiner *IC; +public: + InstCombinerLibCallSimplifier(const DataLayout *TD, + const TargetLibraryInfo *TLI, + InstCombiner *IC) + : LibCallSimplifier(TD, TLI) { + this->IC = IC; + } + + /// replaceAllUsesWith - override so that instruction replacement + /// can be defined in terms of the instruction combiner framework. + virtual void replaceAllUsesWith(Instruction *I, Value *With) const { + IC->ReplaceInstUsesWith(*I, With); + } +}; +} bool InstCombiner::runOnFunction(Function &F) { - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); /// Builder - This is an IRBuilder that automatically inserts new @@ -2129,6 +2397,9 @@ bool InstCombiner::runOnFunction(Function &F) { InstCombineIRInserter(Worklist)); Builder = &TheBuilder; + InstCombinerLibCallSimplifier TheSimplifier(TD, TLI, this); + Simplifier = &TheSimplifier; + bool EverMadeChange = false; // Lower dbg.declare intrinsics otherwise their value may be clobbered diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 17b83ce..b7be462 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -15,7 +15,7 @@ #define DEBUG_TYPE "asan" -#include "FunctionBlackList.h" +#include "BlackList.h" #include "llvm/Function.h" #include "llvm/IRBuilder.h" #include "llvm/InlineAsm.h" @@ -35,7 +35,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/system_error.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" @@ -61,6 +61,8 @@ static const int kAsanCtorAndCtorPriority = 1; static const char *kAsanReportErrorTemplate = "__asan_report_"; static const char *kAsanRegisterGlobalsName = "__asan_register_globals"; static const char *kAsanUnregisterGlobalsName = "__asan_unregister_globals"; +static const char *kAsanPoisonGlobalsName = "__asan_before_dynamic_init"; +static const char *kAsanUnpoisonGlobalsName = "__asan_after_dynamic_init"; static const char *kAsanInitName = "__asan_init"; static const char *kAsanHandleNoReturnName = "__asan_handle_no_return"; static const char *kAsanMappingOffsetName = "__asan_mapping_offset"; @@ -106,6 +108,8 @@ static cl::opt ClUseAfterReturn("asan-use-after-return", // This flag may need to be replaced with -f[no]asan-globals. static cl::opt ClGlobals("asan-globals", cl::desc("Handle global objects"), cl::Hidden, cl::init(true)); +static cl::opt ClInitializers("asan-initialization-order", + cl::desc("Handle C++ initializer order"), cl::Hidden, cl::init(false)); static cl::opt ClMemIntrin("asan-memintrin", cl::desc("Handle memset/memcpy/memmove"), cl::Hidden, cl::init(true)); // This flag may need to be replaced with -fasan-blacklist. @@ -144,41 +148,33 @@ static cl::opt ClDebugMax("asan-debug-max", cl::desc("Debug man inst"), cl::Hidden, cl::init(-1)); namespace { - -/// An object of this type is created while instrumenting every function. -struct AsanFunctionContext { - AsanFunctionContext(Function &Function) : F(Function) { } - - Function &F; -}; - /// AddressSanitizer: instrument the code in module to find memory bugs. -struct AddressSanitizer : public ModulePass { +struct AddressSanitizer : public FunctionPass { AddressSanitizer(); virtual const char *getPassName() const; - void instrumentMop(AsanFunctionContext &AFC, Instruction *I); - void instrumentAddress(AsanFunctionContext &AFC, - Instruction *OrigIns, IRBuilder<> &IRB, + void instrumentMop(Instruction *I); + void instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB, Value *Addr, uint32_t TypeSize, bool IsWrite); Value *createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, Value *ShadowValue, uint32_t TypeSize); Instruction *generateCrashCode(Instruction *InsertBefore, Value *Addr, bool IsWrite, size_t AccessSizeIndex); - bool instrumentMemIntrinsic(AsanFunctionContext &AFC, MemIntrinsic *MI); - void instrumentMemIntrinsicParam(AsanFunctionContext &AFC, - Instruction *OrigIns, Value *Addr, + bool instrumentMemIntrinsic(MemIntrinsic *MI); + void instrumentMemIntrinsicParam(Instruction *OrigIns, Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite); Value *memToShadow(Value *Shadow, IRBuilder<> &IRB); - bool handleFunction(Module &M, Function &F); + bool runOnFunction(Function &F); + void createInitializerPoisonCalls(Module &M, + Value *FirstAddr, Value *LastAddr); bool maybeInsertAsanInitAtFunctionEntry(Function &F); - bool poisonStackInFunction(Module &M, Function &F); - virtual bool runOnModule(Module &M); + bool poisonStackInFunction(Function &F); + virtual bool doInitialization(Module &M); + virtual bool doFinalization(Module &M); bool insertGlobalRedzones(Module &M); static char ID; // Pass identification, replacement for typeid private: - uint64_t getAllocaSizeInBytes(AllocaInst *AI) { Type *Ty = AI->getAllocatedType(); uint64_t SizeInBytes = TD->getTypeAllocSize(Ty); @@ -194,12 +190,15 @@ struct AddressSanitizer : public ModulePass { } Function *checkInterfaceFunction(Constant *FuncOrBitcast); + bool ShouldInstrumentGlobal(GlobalVariable *G); void PoisonStack(const ArrayRef &AllocaVec, IRBuilder<> IRB, Value *ShadowBase, bool DoPoison); bool LooksLikeCodeInBug11395(Instruction *I); + void FindDynamicInitializers(Module &M); + bool HasDynamicInitializer(GlobalVariable *G); LLVMContext *C; - TargetData *TD; + DataLayout *TD; uint64_t MappingOffset; int MappingScale; size_t RedzoneSize; @@ -208,11 +207,15 @@ struct AddressSanitizer : public ModulePass { Type *IntptrPtrTy; Function *AsanCtorFunction; Function *AsanInitFunction; + Function *AsanStackMallocFunc, *AsanStackFreeFunc; + Function *AsanHandleNoReturnFunc; Instruction *CtorInsertBefore; - OwningPtr BL; + OwningPtr BL; // This array is indexed by AccessIsWrite and log2(AccessSize). Function *AsanErrorCallback[2][kNumberOfAccessSizes]; InlineAsm *EmptyAsm; + SmallSet DynamicallyInitializedGlobals; + SmallSet GlobalsCreatedByAsan; }; } // namespace @@ -221,8 +224,8 @@ char AddressSanitizer::ID = 0; INITIALIZE_PASS(AddressSanitizer, "asan", "AddressSanitizer: detects use-after-free and out-of-bounds bugs.", false, false) -AddressSanitizer::AddressSanitizer() : ModulePass(ID) { } -ModulePass *llvm::createAddressSanitizerPass() { +AddressSanitizer::AddressSanitizer() : FunctionPass(ID) { } +FunctionPass *llvm::createAddressSanitizerPass() { return new AddressSanitizer(); } @@ -243,38 +246,6 @@ static GlobalVariable *createPrivateGlobalForString(Module &M, StringRef Str) { GlobalValue::PrivateLinkage, StrConst, ""); } -// Split the basic block and insert an if-then code. -// Before: -// Head -// Cmp -// Tail -// After: -// Head -// if (Cmp) -// ThenBlock -// Tail -// -// ThenBlock block is created and its terminator is returned. -// If Unreachable, ThenBlock is terminated with UnreachableInst, otherwise -// it is terminated with BranchInst to Tail. -static TerminatorInst *splitBlockAndInsertIfThen(Value *Cmp, bool Unreachable) { - Instruction *SplitBefore = cast(Cmp)->getNextNode(); - BasicBlock *Head = SplitBefore->getParent(); - BasicBlock *Tail = Head->splitBasicBlock(SplitBefore); - TerminatorInst *HeadOldTerm = Head->getTerminator(); - LLVMContext &C = Head->getParent()->getParent()->getContext(); - BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail); - TerminatorInst *CheckTerm; - if (Unreachable) - CheckTerm = new UnreachableInst(C, ThenBlock); - else - CheckTerm = BranchInst::Create(Tail, ThenBlock); - BranchInst *HeadNewTerm = - BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp); - ReplaceInstWithInst(HeadOldTerm, HeadNewTerm); - return CheckTerm; -} - Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { // Shadow >> scale Shadow = IRB.CreateLShr(Shadow, MappingScale); @@ -286,12 +257,12 @@ Value *AddressSanitizer::memToShadow(Value *Shadow, IRBuilder<> &IRB) { } void AddressSanitizer::instrumentMemIntrinsicParam( - AsanFunctionContext &AFC, Instruction *OrigIns, + Instruction *OrigIns, Value *Addr, Value *Size, Instruction *InsertBefore, bool IsWrite) { // Check the first byte. { IRBuilder<> IRB(InsertBefore); - instrumentAddress(AFC, OrigIns, IRB, Addr, 8, IsWrite); + instrumentAddress(OrigIns, IRB, Addr, 8, IsWrite); } // Check the last byte. { @@ -301,13 +272,12 @@ void AddressSanitizer::instrumentMemIntrinsicParam( SizeMinusOne = IRB.CreateIntCast(SizeMinusOne, IntptrTy, false); Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); Value *AddrPlusSizeMinisOne = IRB.CreateAdd(AddrLong, SizeMinusOne); - instrumentAddress(AFC, OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite); + instrumentAddress(OrigIns, IRB, AddrPlusSizeMinisOne, 8, IsWrite); } } // Instrument memset/memmove/memcpy -bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC, - MemIntrinsic *MI) { +bool AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) { Value *Dst = MI->getDest(); MemTransferInst *MemTran = dyn_cast(MI); Value *Src = MemTran ? MemTran->getSource() : 0; @@ -323,12 +293,12 @@ bool AddressSanitizer::instrumentMemIntrinsic(AsanFunctionContext &AFC, Value *Cmp = IRB.CreateICmpNE(Length, Constant::getNullValue(Length->getType())); - InsertBefore = splitBlockAndInsertIfThen(Cmp, false); + InsertBefore = SplitBlockAndInsertIfThen(cast(Cmp), false); } - instrumentMemIntrinsicParam(AFC, MI, Dst, Length, InsertBefore, true); + instrumentMemIntrinsicParam(MI, Dst, Length, InsertBefore, true); if (Src) - instrumentMemIntrinsicParam(AFC, MI, Src, Length, InsertBefore, false); + instrumentMemIntrinsicParam(MI, Src, Length, InsertBefore, false); return true; } @@ -358,14 +328,50 @@ static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) { return NULL; } -void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) { - bool IsWrite; +void AddressSanitizer::FindDynamicInitializers(Module& M) { + // Clang generates metadata identifying all dynamically initialized globals. + NamedMDNode *DynamicGlobals = + M.getNamedMetadata("llvm.asan.dynamically_initialized_globals"); + if (!DynamicGlobals) + return; + for (int i = 0, n = DynamicGlobals->getNumOperands(); i < n; ++i) { + MDNode *MDN = DynamicGlobals->getOperand(i); + assert(MDN->getNumOperands() == 1); + Value *VG = MDN->getOperand(0); + // The optimizer may optimize away a global entirely, in which case we + // cannot instrument access to it. + if (!VG) + continue; + + GlobalVariable *G = cast(VG); + DynamicallyInitializedGlobals.insert(G); + } +} +// Returns true if a global variable is initialized dynamically in this TU. +bool AddressSanitizer::HasDynamicInitializer(GlobalVariable *G) { + return DynamicallyInitializedGlobals.count(G); +} + +void AddressSanitizer::instrumentMop(Instruction *I) { + bool IsWrite = false; Value *Addr = isInterestingMemoryAccess(I, &IsWrite); assert(Addr); - if (ClOpt && ClOptGlobals && isa(Addr)) { - // We are accessing a global scalar variable. Nothing to catch here. - return; + if (ClOpt && ClOptGlobals) { + if (GlobalVariable *G = dyn_cast(Addr)) { + // If initialization order checking is disabled, a simple access to a + // dynamically initialized global is always valid. + if (!ClInitializers) + return; + // If a global variable does not have dynamic initialization we don't + // have to instrument it. However, if a global has external linkage, we + // assume it has dynamic initialization, as it may have an initializer + // in a different TU. + if (G->getLinkage() != GlobalVariable::ExternalLinkage && + !HasDynamicInitializer(G)) + return; + } } + Type *OrigPtrTy = Addr->getType(); Type *OrigTy = cast(OrigPtrTy)->getElementType(); @@ -379,7 +385,7 @@ void AddressSanitizer::instrumentMop(AsanFunctionContext &AFC, Instruction *I) { } IRBuilder<> IRB(I); - instrumentAddress(AFC, I, IRB, Addr, TypeSize, IsWrite); + instrumentAddress(I, IRB, Addr, TypeSize, IsWrite); } // Validate the result of Module::getOrInsertFunction called for an interface @@ -424,8 +430,7 @@ Value *AddressSanitizer::createSlowPathCmp(IRBuilder<> &IRB, Value *AddrLong, return IRB.CreateICmpSGE(LastAccessedByte, ShadowValue); } -void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC, - Instruction *OrigIns, +void AddressSanitizer::instrumentAddress(Instruction *OrigIns, IRBuilder<> &IRB, Value *Addr, uint32_t TypeSize, bool IsWrite) { Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy); @@ -444,17 +449,19 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC, TerminatorInst *CrashTerm = 0; if (ClAlwaysSlowPath || (TypeSize < 8 * Granularity)) { - TerminatorInst *CheckTerm = splitBlockAndInsertIfThen(Cmp, false); + TerminatorInst *CheckTerm = + SplitBlockAndInsertIfThen(cast(Cmp), false); assert(dyn_cast(CheckTerm)->isUnconditional()); BasicBlock *NextBB = CheckTerm->getSuccessor(0); IRB.SetInsertPoint(CheckTerm); Value *Cmp2 = createSlowPathCmp(IRB, AddrLong, ShadowValue, TypeSize); - BasicBlock *CrashBlock = BasicBlock::Create(*C, "", &AFC.F, NextBB); + BasicBlock *CrashBlock = + BasicBlock::Create(*C, "", NextBB->getParent(), NextBB); CrashTerm = new UnreachableInst(*C, CrashBlock); BranchInst *NewTerm = BranchInst::Create(CrashBlock, NextBB, Cmp2); ReplaceInstWithInst(CheckTerm, NewTerm); } else { - CrashTerm = splitBlockAndInsertIfThen(Cmp, true); + CrashTerm = SplitBlockAndInsertIfThen(cast(Cmp), true); } Instruction *Crash = @@ -462,68 +469,108 @@ void AddressSanitizer::instrumentAddress(AsanFunctionContext &AFC, Crash->setDebugLoc(OrigIns->getDebugLoc()); } +void AddressSanitizer::createInitializerPoisonCalls(Module &M, + Value *FirstAddr, + Value *LastAddr) { + // We do all of our poisoning and unpoisoning within _GLOBAL__I_a. + Function *GlobalInit = M.getFunction("_GLOBAL__I_a"); + // If that function is not present, this TU contains no globals, or they have + // all been optimized away + if (!GlobalInit) + return; + + // Set up the arguments to our poison/unpoison functions. + IRBuilder<> IRB(GlobalInit->begin()->getFirstInsertionPt()); + + // Declare our poisoning and unpoisoning functions. + Function *AsanPoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanPoisonGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + AsanPoisonGlobals->setLinkage(Function::ExternalLinkage); + Function *AsanUnpoisonGlobals = checkInterfaceFunction(M.getOrInsertFunction( + kAsanUnpoisonGlobalsName, IRB.getVoidTy(), NULL)); + AsanUnpoisonGlobals->setLinkage(Function::ExternalLinkage); + + // Add a call to poison all external globals before the given function starts. + IRB.CreateCall2(AsanPoisonGlobals, FirstAddr, LastAddr); + + // Add calls to unpoison all globals before each return instruction. + for (Function::iterator I = GlobalInit->begin(), E = GlobalInit->end(); + I != E; ++I) { + if (ReturnInst *RI = dyn_cast(I->getTerminator())) { + CallInst::Create(AsanUnpoisonGlobals, "", RI); + } + } +} + +bool AddressSanitizer::ShouldInstrumentGlobal(GlobalVariable *G) { + Type *Ty = cast(G->getType())->getElementType(); + DEBUG(dbgs() << "GLOBAL: " << *G << "\n"); + + if (BL->isIn(*G)) return false; + if (!Ty->isSized()) return false; + if (!G->hasInitializer()) return false; + if (GlobalsCreatedByAsan.count(G)) return false; // Our own global. + // Touch only those globals that will not be defined in other modules. + // Don't handle ODR type linkages since other modules may be built w/o asan. + if (G->getLinkage() != GlobalVariable::ExternalLinkage && + G->getLinkage() != GlobalVariable::PrivateLinkage && + G->getLinkage() != GlobalVariable::InternalLinkage) + return false; + // Two problems with thread-locals: + // - The address of the main thread's copy can't be computed at link-time. + // - Need to poison all copies, not just the main thread's one. + if (G->isThreadLocal()) + return false; + // For now, just ignore this Alloca if the alignment is large. + if (G->getAlignment() > RedzoneSize) return false; + + // Ignore all the globals with the names starting with "\01L_OBJC_". + // Many of those are put into the .cstring section. The linker compresses + // that section by removing the spare \0s after the string terminator, so + // our redzones get broken. + if ((G->getName().find("\01L_OBJC_") == 0) || + (G->getName().find("\01l_OBJC_") == 0)) { + DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G); + return false; + } + + if (G->hasSection()) { + StringRef Section(G->getSection()); + // Ignore the globals from the __OBJC section. The ObjC runtime assumes + // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to + // them. + if ((Section.find("__OBJC,") == 0) || + (Section.find("__DATA, __objc_") == 0)) { + DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G); + return false; + } + // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 + // Constant CFString instances are compiled in the following way: + // -- the string buffer is emitted into + // __TEXT,__cstring,cstring_literals + // -- the constant NSConstantString structure referencing that buffer + // is placed into __DATA,__cfstring + // Therefore there's no point in placing redzones into __DATA,__cfstring. + // Moreover, it causes the linker to crash on OS X 10.7 + if (Section.find("__DATA,__cfstring") == 0) { + DEBUG(dbgs() << "Ignoring CFString: " << *G); + return false; + } + } + + return true; +} + // This function replaces all global variables with new variables that have // trailing redzones. It also creates a function that poisons // redzones and inserts this function into llvm.global_ctors. bool AddressSanitizer::insertGlobalRedzones(Module &M) { SmallVector GlobalsToChange; - for (Module::GlobalListType::iterator G = M.getGlobalList().begin(), - E = M.getGlobalList().end(); G != E; ++G) { - Type *Ty = cast(G->getType())->getElementType(); - DEBUG(dbgs() << "GLOBAL: " << *G); - - if (!Ty->isSized()) continue; - if (!G->hasInitializer()) continue; - // Touch only those globals that will not be defined in other modules. - // Don't handle ODR type linkages since other modules may be built w/o asan. - if (G->getLinkage() != GlobalVariable::ExternalLinkage && - G->getLinkage() != GlobalVariable::PrivateLinkage && - G->getLinkage() != GlobalVariable::InternalLinkage) - continue; - // Two problems with thread-locals: - // - The address of the main thread's copy can't be computed at link-time. - // - Need to poison all copies, not just the main thread's one. - if (G->isThreadLocal()) - continue; - // For now, just ignore this Alloca if the alignment is large. - if (G->getAlignment() > RedzoneSize) continue; - - // Ignore all the globals with the names starting with "\01L_OBJC_". - // Many of those are put into the .cstring section. The linker compresses - // that section by removing the spare \0s after the string terminator, so - // our redzones get broken. - if ((G->getName().find("\01L_OBJC_") == 0) || - (G->getName().find("\01l_OBJC_") == 0)) { - DEBUG(dbgs() << "Ignoring \\01L_OBJC_* global: " << *G); - continue; - } - - if (G->hasSection()) { - StringRef Section(G->getSection()); - // Ignore the globals from the __OBJC section. The ObjC runtime assumes - // those conform to /usr/lib/objc/runtime.h, so we can't add redzones to - // them. - if ((Section.find("__OBJC,") == 0) || - (Section.find("__DATA, __objc_") == 0)) { - DEBUG(dbgs() << "Ignoring ObjC runtime global: " << *G); - continue; - } - // See http://code.google.com/p/address-sanitizer/issues/detail?id=32 - // Constant CFString instances are compiled in the following way: - // -- the string buffer is emitted into - // __TEXT,__cstring,cstring_literals - // -- the constant NSConstantString structure referencing that buffer - // is placed into __DATA,__cfstring - // Therefore there's no point in placing redzones into __DATA,__cfstring. - // Moreover, it causes the linker to crash on OS X 10.7 - if (Section.find("__DATA,__cfstring") == 0) { - DEBUG(dbgs() << "Ignoring CFString: " << *G); - continue; - } - } - - GlobalsToChange.push_back(G); + for (Module::GlobalListType::iterator G = M.global_begin(), + E = M.global_end(); G != E; ++G) { + if (ShouldInstrumentGlobal(G)) + GlobalsToChange.push_back(G); } size_t n = GlobalsToChange.size(); @@ -534,13 +581,22 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { // size_t size; // size_t size_with_redzone; // const char *name; + // size_t has_dynamic_init; // We initialize an array of such structures and pass it to a run-time call. StructType *GlobalStructTy = StructType::get(IntptrTy, IntptrTy, - IntptrTy, IntptrTy, NULL); - SmallVector Initializers(n); + IntptrTy, IntptrTy, + IntptrTy, NULL); + SmallVector Initializers(n), DynamicInit; IRBuilder<> IRB(CtorInsertBefore); + if (ClInitializers) + FindDynamicInitializers(M); + + // The addresses of the first and last dynamically initialized globals in + // this TU. Used in initialization order checking. + Value *FirstDynamic = 0, *LastDynamic = 0; + for (size_t i = 0; i < n; i++) { GlobalVariable *G = GlobalsToChange[i]; PointerType *PtrTy = cast(G->getType()); @@ -549,6 +605,10 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { uint64_t RightRedzoneSize = RedzoneSize + (RedzoneSize - (SizeInBytes % RedzoneSize)); Type *RightRedZoneTy = ArrayType::get(IRB.getInt8Ty(), RightRedzoneSize); + // Determine whether this global should be poisoned in initialization. + bool GlobalHasDynamicInitializer = HasDynamicInitializer(G); + // Don't check initialization order if this global is blacklisted. + GlobalHasDynamicInitializer &= !BL->isInInit(*G); StructType *NewTy = StructType::get(Ty, RightRedZoneTy, NULL); Constant *NewInitializer = ConstantStruct::get( @@ -583,8 +643,17 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { ConstantInt::get(IntptrTy, SizeInBytes), ConstantInt::get(IntptrTy, SizeInBytes + RightRedzoneSize), ConstantExpr::getPointerCast(Name, IntptrTy), + ConstantInt::get(IntptrTy, GlobalHasDynamicInitializer), NULL); - DEBUG(dbgs() << "NEW GLOBAL:\n" << *NewGlobal); + + // Populate the first and last globals declared in this TU. + if (ClInitializers && GlobalHasDynamicInitializer) { + LastDynamic = ConstantExpr::getPointerCast(NewGlobal, IntptrTy); + if (FirstDynamic == 0) + FirstDynamic = LastDynamic; + } + + DEBUG(dbgs() << "NEW GLOBAL: " << *NewGlobal << "\n"); } ArrayType *ArrayOfGlobalStructTy = ArrayType::get(GlobalStructTy, n); @@ -592,8 +661,13 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { M, ArrayOfGlobalStructTy, false, GlobalVariable::PrivateLinkage, ConstantArray::get(ArrayOfGlobalStructTy, Initializers), ""); + // Create calls for poisoning before initializers run and unpoisoning after. + if (ClInitializers && FirstDynamic && LastDynamic) + createInitializerPoisonCalls(M, FirstDynamic, LastDynamic); + Function *AsanRegisterGlobals = checkInterfaceFunction(M.getOrInsertFunction( - kAsanRegisterGlobalsName, IRB.getVoidTy(), IntptrTy, IntptrTy, NULL)); + kAsanRegisterGlobalsName, IRB.getVoidTy(), + IntptrTy, IntptrTy, NULL)); AsanRegisterGlobals->setLinkage(Function::ExternalLinkage); IRB.CreateCall2(AsanRegisterGlobals, @@ -623,12 +697,13 @@ bool AddressSanitizer::insertGlobalRedzones(Module &M) { } // virtual -bool AddressSanitizer::runOnModule(Module &M) { +bool AddressSanitizer::doInitialization(Module &M) { // Initialize the private fields. No one has accessed them before. - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); + if (!TD) return false; - BL.reset(new FunctionBlackList(ClBlackListFile)); + BL.reset(new BlackList(ClBlackListFile)); C = &(M.getContext()); LongSize = TD->getPointerSizeInBits(); @@ -656,17 +731,27 @@ bool AddressSanitizer::runOnModule(Module &M) { std::string FunctionName = std::string(kAsanReportErrorTemplate) + (AccessIsWrite ? "store" : "load") + itostr(1 << AccessSizeIndex); // If we are merging crash callbacks, they have two parameters. - AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = cast( - M.getOrInsertFunction(FunctionName, IRB.getVoidTy(), IntptrTy, NULL)); + AsanErrorCallback[AccessIsWrite][AccessSizeIndex] = + checkInterfaceFunction(M.getOrInsertFunction( + FunctionName, IRB.getVoidTy(), IntptrTy, NULL)); } } + + AsanStackMallocFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL)); + AsanStackFreeFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanStackFreeName, IRB.getVoidTy(), + IntptrTy, IntptrTy, IntptrTy, NULL)); + AsanHandleNoReturnFunc = checkInterfaceFunction(M.getOrInsertFunction( + kAsanHandleNoReturnName, IRB.getVoidTy(), NULL)); + // We insert an empty inline asm after __asan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), StringRef(""), StringRef(""), /*hasSideEffects=*/true); llvm::Triple targetTriple(M.getTargetTriple()); - bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::ANDROIDEABI; + bool isAndroid = targetTriple.getEnvironment() == llvm::Triple::Android; MappingOffset = isAndroid ? kDefaultShadowOffsetAndroid : (LongSize == 32 ? kDefaultShadowOffset32 : kDefaultShadowOffset64); @@ -686,10 +771,6 @@ bool AddressSanitizer::runOnModule(Module &M) { // For scales 6 and 7, the redzone has to be 64 and 128 bytes respectively. RedzoneSize = std::max(32, (int)(1 << MappingScale)); - bool Res = false; - - if (ClGlobals) - Res |= insertGlobalRedzones(M); if (ClMappingOffsetLog >= 0) { // Tell the run-time the current values of mapping offset and scale. @@ -709,17 +790,20 @@ bool AddressSanitizer::runOnModule(Module &M) { IRB.CreateLoad(asan_mapping_scale, true); } - - for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { - if (F->isDeclaration()) continue; - Res |= handleFunction(M, *F); - } - appendToGlobalCtors(M, AsanCtorFunction, kAsanCtorAndCtorPriority); - return Res; + return true; +} + +bool AddressSanitizer::doFinalization(Module &M) { + // We transform the globals at the very end so that the optimization analysis + // works on the original globals. + if (ClGlobals) + return insertGlobalRedzones(M); + return false; } + bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { // For each NSObject descendant having a +load method, this method is invoked // by the ObjC runtime before any of the static constructors is called. @@ -736,19 +820,22 @@ bool AddressSanitizer::maybeInsertAsanInitAtFunctionEntry(Function &F) { return false; } -bool AddressSanitizer::handleFunction(Module &M, Function &F) { +bool AddressSanitizer::runOnFunction(Function &F) { if (BL->isIn(F)) return false; if (&F == AsanCtorFunction) return false; + DEBUG(dbgs() << "ASAN instrumenting:\n" << F << "\n"); // If needed, insert __asan_init before checking for AddressSafety attr. maybeInsertAsanInitAtFunctionEntry(F); - if (!F.hasFnAttr(Attribute::AddressSafety)) return false; + if (!F.getFnAttributes().hasAttribute(Attributes::AddressSafety)) + return false; if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) return false; - // We want to instrument every address only once per basic block - // (unless there are calls between uses). + + // We want to instrument every address only once per basic block (unless there + // are calls between uses). SmallSet TempsToInstrument; SmallVector ToInstrument; SmallVector NoReturnCalls; @@ -786,8 +873,6 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { } } - AsanFunctionContext AFC(F); - // Instrument. int NumInstrumented = 0; for (size_t i = 0, n = ToInstrument.size(); i != n; i++) { @@ -795,25 +880,23 @@ bool AddressSanitizer::handleFunction(Module &M, Function &F) { if (ClDebugMin < 0 || ClDebugMax < 0 || (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { if (isInterestingMemoryAccess(Inst, &IsWrite)) - instrumentMop(AFC, Inst); + instrumentMop(Inst); else - instrumentMemIntrinsic(AFC, cast(Inst)); + instrumentMemIntrinsic(cast(Inst)); } NumInstrumented++; } - DEBUG(dbgs() << F); - - bool ChangedStack = poisonStackInFunction(M, F); + bool ChangedStack = poisonStackInFunction(F); // We must unpoison the stack before every NoReturn call (throw, _exit, etc). // See e.g. http://code.google.com/p/address-sanitizer/issues/detail?id=37 for (size_t i = 0, n = NoReturnCalls.size(); i != n; i++) { Instruction *CI = NoReturnCalls[i]; IRBuilder<> IRB(CI); - IRB.CreateCall(M.getOrInsertFunction(kAsanHandleNoReturnName, - IRB.getVoidTy(), NULL)); + IRB.CreateCall(AsanHandleNoReturnFunc); } + DEBUG(dbgs() << "ASAN done instrumenting:\n" << F << "\n"); return NumInstrumented > 0 || ChangedStack || !NoReturnCalls.empty(); } @@ -926,7 +1009,7 @@ bool AddressSanitizer::LooksLikeCodeInBug11395(Instruction *I) { // compiler hoists the load of the shadow value somewhere too high. // This causes asan to report a non-existing bug on 453.povray. // It sounds like an LLVM bug. -bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { +bool AddressSanitizer::poisonStackInFunction(Function &F) { if (!ClStack) return false; SmallVector AllocaVec; SmallVector RetVec; @@ -976,8 +1059,6 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { Value *LocalStackBase = OrigStackBase; if (DoStackMalloc) { - Value *AsanStackMallocFunc = M.getOrInsertFunction( - kAsanStackMallocName, IntptrTy, IntptrTy, IntptrTy, NULL); LocalStackBase = IRB.CreateCall2(AsanStackMallocFunc, ConstantInt::get(IntptrTy, LocalStackSize), OrigStackBase); } @@ -1012,22 +1093,16 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { Value *BasePlus1 = IRB.CreateAdd(LocalStackBase, ConstantInt::get(IntptrTy, LongSize/8)); BasePlus1 = IRB.CreateIntToPtr(BasePlus1, IntptrPtrTy); - Value *Description = IRB.CreatePointerCast( - createPrivateGlobalForString(M, StackDescription.str()), - IntptrTy); + GlobalVariable *StackDescriptionGlobal = + createPrivateGlobalForString(*F.getParent(), StackDescription.str()); + GlobalsCreatedByAsan.insert(StackDescriptionGlobal); + Value *Description = IRB.CreatePointerCast(StackDescriptionGlobal, IntptrTy); IRB.CreateStore(Description, BasePlus1); // Poison the stack redzones at the entry. Value *ShadowBase = memToShadow(LocalStackBase, IRB); PoisonStack(ArrayRef(AllocaVec), IRB, ShadowBase, true); - Value *AsanStackFreeFunc = NULL; - if (DoStackMalloc) { - AsanStackFreeFunc = M.getOrInsertFunction( - kAsanStackFreeName, IRB.getVoidTy(), - IntptrTy, IntptrTy, IntptrTy, NULL); - } - // Unpoison the stack before all ret instructions. for (size_t i = 0, n = RetVec.size(); i < n; i++) { Instruction *Ret = RetVec[i]; @@ -1046,6 +1121,10 @@ bool AddressSanitizer::poisonStackInFunction(Module &M, Function &F) { } } + // We are done. Remove the old unused alloca instructions. + for (size_t i = 0, n = AllocaVec.size(); i < n; i++) + AllocaVec[i]->eraseFromParent(); + if (ClDebugStack) { DEBUG(dbgs() << F); } diff --git a/lib/Transforms/Instrumentation/BlackList.cpp b/lib/Transforms/Instrumentation/BlackList.cpp new file mode 100644 index 0000000..ef34b8a --- /dev/null +++ b/lib/Transforms/Instrumentation/BlackList.cpp @@ -0,0 +1,105 @@ +//===-- BlackList.cpp - blacklist for sanitizers --------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is a utility class for instrumentation passes (like AddressSanitizer +// or ThreadSanitizer) to avoid instrumenting some functions or global +// variables based on a user-supplied blacklist. +// +//===----------------------------------------------------------------------===// + +#include +#include + +#include "BlackList.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Module.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/Regex.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/system_error.h" + +namespace llvm { + +BlackList::BlackList(const StringRef Path) { + // Validate and open blacklist file. + if (!Path.size()) return; + OwningPtr File; + if (error_code EC = MemoryBuffer::getFile(Path, File)) { + report_fatal_error("Can't open blacklist file: " + Path + ": " + + EC.message()); + } + + // Iterate through each line in the blacklist file. + SmallVector Lines; + SplitString(File.take()->getBuffer(), Lines, "\n\r"); + StringMap Regexps; + for (SmallVector::iterator I = Lines.begin(), E = Lines.end(); + I != E; ++I) { + // Ignore empty lines and lines starting with "#" + if (I->empty() || I->startswith("#")) + continue; + // Get our prefix and unparsed regexp. + std::pair SplitLine = I->split(":"); + StringRef Prefix = SplitLine.first; + std::string Regexp = SplitLine.second; + + // Replace * with .* + for (size_t pos = 0; (pos = Regexp.find("*", pos)) != std::string::npos; + pos += strlen(".*")) { + Regexp.replace(pos, strlen("*"), ".*"); + } + + // Check that the regexp is valid. + Regex CheckRE(Regexp); + std::string Error; + if (!CheckRE.isValid(Error)) { + report_fatal_error("malformed blacklist regex: " + SplitLine.second + + ": " + Error); + } + + // Add this regexp into the proper group by its prefix. + if (Regexps[Prefix].size()) + Regexps[Prefix] += "|"; + Regexps[Prefix] += Regexp; + } + + // Iterate through each of the prefixes, and create Regexs for them. + for (StringMap::iterator I = Regexps.begin(), E = Regexps.end(); + I != E; ++I) { + Entries[I->getKey()] = new Regex(I->getValue()); + } +} + +bool BlackList::isIn(const Function &F) { + return isIn(*F.getParent()) || inSection("fun", F.getName()); +} + +bool BlackList::isIn(const GlobalVariable &G) { + return isIn(*G.getParent()) || inSection("global", G.getName()); +} + +bool BlackList::isIn(const Module &M) { + return inSection("src", M.getModuleIdentifier()); +} + +bool BlackList::isInInit(const GlobalVariable &G) { + return isIn(*G.getParent()) || inSection("global-init", G.getName()); +} + +bool BlackList::inSection(const StringRef Section, + const StringRef Query) { + Regex *FunctionRegex = Entries[Section]; + return FunctionRegex ? FunctionRegex->match(Query) : false; +} + +} // namespace llvm diff --git a/lib/Transforms/Instrumentation/BlackList.h b/lib/Transforms/Instrumentation/BlackList.h new file mode 100644 index 0000000..f3c05a5 --- /dev/null +++ b/lib/Transforms/Instrumentation/BlackList.h @@ -0,0 +1,57 @@ +//===-- BlackList.h - blacklist for sanitizers ------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +//===----------------------------------------------------------------------===// +// +// This is a utility class for instrumentation passes (like AddressSanitizer +// or ThreadSanitizer) to avoid instrumenting some functions or global +// variables based on a user-supplied blacklist. +// +// The blacklist disables instrumentation of various functions and global +// variables. Each line contains a prefix, followed by a wild card expression. +// Empty lines and lines starting with "#" are ignored. +// --- +// # Blacklisted items: +// fun:*_ZN4base6subtle* +// global:*global_with_bad_access_or_initialization* +// global-init:*global_with_initialization_issues* +// src:file_with_tricky_code.cc +// --- +// Note that the wild card is in fact an llvm::Regex, but * is automatically +// replaced with .* +// This is similar to the "ignore" feature of ThreadSanitizer. +// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores +// +//===----------------------------------------------------------------------===// +// + +#include "llvm/ADT/StringMap.h" + +namespace llvm { +class Function; +class GlobalVariable; +class Module; +class Regex; +class StringRef; + +class BlackList { + public: + BlackList(const StringRef Path); + // Returns whether either this function or it's source file are blacklisted. + bool isIn(const Function &F); + // Returns whether either this global or it's source file are blacklisted. + bool isIn(const GlobalVariable &G); + // Returns whether this module is blacklisted by filename. + bool isIn(const Module &M); + // Returns whether a global should be excluded from initialization checking. + bool isInInit(const GlobalVariable &G); + private: + StringMap Entries; + + bool inSection(const StringRef Section, const StringRef Query); +}; + +} // namespace llvm diff --git a/lib/Transforms/Instrumentation/BoundsChecking.cpp b/lib/Transforms/Instrumentation/BoundsChecking.cpp index 09e0f14..7810b1b 100644 --- a/lib/Transforms/Instrumentation/BoundsChecking.cpp +++ b/lib/Transforms/Instrumentation/BoundsChecking.cpp @@ -23,7 +23,8 @@ #include "llvm/Support/InstIterator.h" #include "llvm/Support/TargetFolder.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Instrumentation.h" using namespace llvm; @@ -47,11 +48,13 @@ namespace { virtual bool runOnFunction(Function &F); virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); + AU.addRequired(); + AU.addRequired(); } private: - const TargetData *TD; + const DataLayout *TD; + const TargetLibraryInfo *TLI; ObjectSizeOffsetEvaluator *ObjSizeEval; BuilderTy *Builder; Instruction *Inst; @@ -140,7 +143,7 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { Value *Offset = SizeOffset.second; ConstantInt *SizeCI = dyn_cast(Size); - IntegerType *IntTy = TD->getIntPtrType(Inst->getContext()); + Type *IntTy = TD->getIntPtrType(Ptr->getType()); Value *NeededSizeVal = ConstantInt::get(IntTy, NeededSize); // three checks are required to ensure safety: @@ -165,12 +168,13 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) { } bool BoundsChecking::runOnFunction(Function &F) { - TD = &getAnalysis(); + TD = &getAnalysis(); + TLI = &getAnalysis(); TrapBB = 0; BuilderTy TheBuilder(F.getContext(), TargetFolder(TD)); Builder = &TheBuilder; - ObjectSizeOffsetEvaluator TheObjSizeEval(TD, F.getContext()); + ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext()); ObjSizeEval = &TheObjSizeEval; // check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory diff --git a/lib/Transforms/Instrumentation/CMakeLists.txt b/lib/Transforms/Instrumentation/CMakeLists.txt index 00de882..058f68c 100644 --- a/lib/Transforms/Instrumentation/CMakeLists.txt +++ b/lib/Transforms/Instrumentation/CMakeLists.txt @@ -1,8 +1,8 @@ add_llvm_library(LLVMInstrumentation AddressSanitizer.cpp + BlackList.cpp BoundsChecking.cpp EdgeProfiling.cpp - FunctionBlackList.cpp GCOVProfiling.cpp Instrumentation.cpp OptimalEdgeProfiling.cpp diff --git a/lib/Transforms/Instrumentation/FunctionBlackList.cpp b/lib/Transforms/Instrumentation/FunctionBlackList.cpp deleted file mode 100644 index 188ea4d..0000000 --- a/lib/Transforms/Instrumentation/FunctionBlackList.cpp +++ /dev/null @@ -1,79 +0,0 @@ -//===-- FunctionBlackList.cpp - blacklist of functions --------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This is a utility class for instrumentation passes (like AddressSanitizer -// or ThreadSanitizer) to avoid instrumenting some functions based on -// user-supplied blacklist. -// -//===----------------------------------------------------------------------===// - -#include "FunctionBlackList.h" -#include "llvm/ADT/OwningPtr.h" -#include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/StringExtras.h" -#include "llvm/Function.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/Regex.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/system_error.h" - -namespace llvm { - -FunctionBlackList::FunctionBlackList(const std::string &Path) { - Functions = NULL; - const char *kFunPrefix = "fun:"; - if (!Path.size()) return; - std::string Fun; - - OwningPtr File; - if (error_code EC = MemoryBuffer::getFile(Path.c_str(), File)) { - report_fatal_error("Can't open blacklist file " + Path + ": " + - EC.message()); - } - MemoryBuffer *Buff = File.take(); - const char *Data = Buff->getBufferStart(); - size_t DataLen = Buff->getBufferSize(); - SmallVector Lines; - SplitString(StringRef(Data, DataLen), Lines, "\n\r"); - for (size_t i = 0, numLines = Lines.size(); i < numLines; i++) { - if (Lines[i].startswith(kFunPrefix)) { - std::string ThisFunc = Lines[i].substr(strlen(kFunPrefix)); - std::string ThisFuncRE; - // add ThisFunc replacing * with .* - for (size_t j = 0, n = ThisFunc.size(); j < n; j++) { - if (ThisFunc[j] == '*') - ThisFuncRE += '.'; - ThisFuncRE += ThisFunc[j]; - } - // Check that the regexp is valid. - Regex CheckRE(ThisFuncRE); - std::string Error; - if (!CheckRE.isValid(Error)) - report_fatal_error("malformed blacklist regex: " + ThisFunc + - ": " + Error); - // Append to the final regexp. - if (Fun.size()) - Fun += "|"; - Fun += ThisFuncRE; - } - } - if (Fun.size()) { - Functions = new Regex(Fun); - } -} - -bool FunctionBlackList::isIn(const Function &F) { - if (Functions) { - bool Res = Functions->match(F.getName()); - return Res; - } - return false; -} - -} // namespace llvm diff --git a/lib/Transforms/Instrumentation/FunctionBlackList.h b/lib/Transforms/Instrumentation/FunctionBlackList.h deleted file mode 100644 index c1239b9..0000000 --- a/lib/Transforms/Instrumentation/FunctionBlackList.h +++ /dev/null @@ -1,37 +0,0 @@ -//===-- FunctionBlackList.cpp - blacklist of functions ----------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -//===----------------------------------------------------------------------===// -// -// This is a utility class for instrumentation passes (like AddressSanitizer -// or ThreadSanitizer) to avoid instrumenting some functions based on -// user-supplied blacklist. -// -//===----------------------------------------------------------------------===// -// - -#include - -namespace llvm { -class Function; -class Regex; - -// Blacklisted functions are not instrumented. -// The blacklist file contains one or more lines like this: -// --- -// fun:FunctionWildCard -// --- -// This is similar to the "ignore" feature of ThreadSanitizer. -// http://code.google.com/p/data-race-test/wiki/ThreadSanitizerIgnores -class FunctionBlackList { - public: - FunctionBlackList(const std::string &Path); - bool isIn(const Function &F); - private: - Regex *Functions; -}; - -} // namespace llvm diff --git a/lib/Transforms/Instrumentation/GCOVProfiling.cpp b/lib/Transforms/Instrumentation/GCOVProfiling.cpp index 264a6a6..e9192e5 100644 --- a/lib/Transforms/Instrumentation/GCOVProfiling.cpp +++ b/lib/Transforms/Instrumentation/GCOVProfiling.cpp @@ -88,11 +88,11 @@ namespace { // Add the function to write out all our counters to the global destructor // list. - void insertCounterWriteout(SmallVector, 8> &); + void insertCounterWriteout(ArrayRef >); void insertIndirectCounterIncrement(); + void insertFlush(ArrayRef >); - std::string mangleName(DICompileUnit CU, std::string NewStem); + std::string mangleName(DICompileUnit CU, const char *NewStem); bool EmitNotes; bool EmitData; @@ -329,7 +329,7 @@ namespace { }; } -std::string GCOVProfiler::mangleName(DICompileUnit CU, std::string NewStem) { +std::string GCOVProfiler::mangleName(DICompileUnit CU, const char *NewStem) { if (NamedMDNode *GCov = M->getNamedMetadata("llvm.gcov")) { for (int i = 0, e = GCov->getNumOperands(); i != e; ++i) { MDNode *N = GCov->getOperand(i); @@ -519,6 +519,7 @@ bool GCOVProfiler::emitProfileArcs() { } insertCounterWriteout(CountersBySP); + insertFlush(CountersBySP); } if (InsertIndCounterIncrCode) @@ -630,14 +631,15 @@ GlobalVariable *GCOVProfiler::getEdgeStateValue() { } void GCOVProfiler::insertCounterWriteout( - SmallVector, 8> &CountersBySP) { - FunctionType *WriteoutFTy = - FunctionType::get(Type::getVoidTy(*Ctx), false); - Function *WriteoutF = Function::Create(WriteoutFTy, - GlobalValue::InternalLinkage, - "__llvm_gcov_writeout", M); + ArrayRef > CountersBySP) { + FunctionType *WriteoutFTy = FunctionType::get(Type::getVoidTy(*Ctx), false); + Function *WriteoutF = M->getFunction("__llvm_gcov_writeout"); + if (!WriteoutF) + WriteoutF = Function::Create(WriteoutFTy, GlobalValue::InternalLinkage, + "__llvm_gcov_writeout", M); WriteoutF->setUnnamedAddr(true); - BasicBlock *BB = BasicBlock::Create(*Ctx, "", WriteoutF); + + BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", WriteoutF); IRBuilder<> Builder(BB); Constant *StartFile = getStartFileFunc(); @@ -648,11 +650,11 @@ void GCOVProfiler::insertCounterWriteout( NamedMDNode *CU_Nodes = M->getNamedMetadata("llvm.dbg.cu"); if (CU_Nodes) { for (unsigned i = 0, e = CU_Nodes->getNumOperands(); i != e; ++i) { - DICompileUnit compile_unit(CU_Nodes->getOperand(i)); - std::string FilenameGcda = mangleName(compile_unit, "gcda"); + DICompileUnit CU(CU_Nodes->getOperand(i)); + std::string FilenameGcda = mangleName(CU, "gcda"); Builder.CreateCall(StartFile, Builder.CreateGlobalStringPtr(FilenameGcda)); - for (SmallVector, 8>::iterator + for (ArrayRef >::iterator I = CountersBySP.begin(), E = CountersBySP.end(); I != E; ++I) { DISubprogram SP(I->second); @@ -680,7 +682,7 @@ void GCOVProfiler::insertCounterWriteout( "__llvm_gcov_init", M); F->setUnnamedAddr(true); F->setLinkage(GlobalValue::InternalLinkage); - F->addFnAttr(Attribute::NoInline); + F->addFnAttr(Attributes::NoInline); BB = BasicBlock::Create(*Ctx, "entry", F); Builder.SetInsertPoint(BB); @@ -699,7 +701,7 @@ void GCOVProfiler::insertIndirectCounterIncrement() { cast(GCOVProfiler::getIncrementIndirectCounterFunc()); Fn->setUnnamedAddr(true); Fn->setLinkage(GlobalValue::InternalLinkage); - Fn->addFnAttr(Attribute::NoInline); + Fn->addFnAttr(Attributes::NoInline); Type *Int32Ty = Type::getInt32Ty(*Ctx); Type *Int64Ty = Type::getInt64Ty(*Ctx); @@ -745,3 +747,42 @@ void GCOVProfiler::insertIndirectCounterIncrement() { Builder.SetInsertPoint(Exit); Builder.CreateRetVoid(); } + +void GCOVProfiler:: +insertFlush(ArrayRef > CountersBySP) { + FunctionType *FTy = FunctionType::get(Type::getVoidTy(*Ctx), false); + Function *FlushF = M->getFunction("__gcov_flush"); + if (!FlushF) + FlushF = Function::Create(FTy, GlobalValue::InternalLinkage, + "__gcov_flush", M); + else + FlushF->setLinkage(GlobalValue::InternalLinkage); + FlushF->setUnnamedAddr(true); + + BasicBlock *Entry = BasicBlock::Create(*Ctx, "entry", FlushF); + + // Write out the current counters. + Constant *WriteoutF = M->getFunction("__llvm_gcov_writeout"); + assert(WriteoutF && "Need to create the writeout function first!"); + + IRBuilder<> Builder(Entry); + Builder.CreateCall(WriteoutF); + + // Zero out the counters. + for (ArrayRef >::iterator + I = CountersBySP.begin(), E = CountersBySP.end(); + I != E; ++I) { + GlobalVariable *GV = I->first; + Constant *Null = Constant::getNullValue(GV->getType()->getElementType()); + Builder.CreateStore(Null, GV); + } + + Type *RetTy = FlushF->getReturnType(); + if (RetTy == Type::getVoidTy(*Ctx)) + Builder.CreateRetVoid(); + else if (RetTy->isIntegerTy()) + // Used if __gcov_flush was implicitly declared. + Builder.CreateRet(ConstantInt::get(RetTy, 0)); + else + report_fatal_error("invalid return type for __gcov_flush"); +} diff --git a/lib/Transforms/Instrumentation/MaximumSpanningTree.h b/lib/Transforms/Instrumentation/MaximumSpanningTree.h index f76c77e..a4bb5a6 100644 --- a/lib/Transforms/Instrumentation/MaximumSpanningTree.h +++ b/lib/Transforms/Instrumentation/MaximumSpanningTree.h @@ -26,30 +26,6 @@ namespace llvm { /// The type parameter T determines the type of the nodes of the graph. template class MaximumSpanningTree { - - // A comparing class for comparing weighted edges. - template - struct EdgeWeightCompare { - bool operator()(typename MaximumSpanningTree::EdgeWeight X, - typename MaximumSpanningTree::EdgeWeight Y) const { - if (X.second > Y.second) return true; - if (X.second < Y.second) return false; - if (const BasicBlock *BBX = dyn_cast(X.first.first)) { - if (const BasicBlock *BBY = dyn_cast(Y.first.first)) { - if (BBX->size() > BBY->size()) return true; - if (BBX->size() < BBY->size()) return false; - } - } - if (const BasicBlock *BBX = dyn_cast(X.first.second)) { - if (const BasicBlock *BBY = dyn_cast(Y.first.second)) { - if (BBX->size() > BBY->size()) return true; - if (BBX->size() < BBY->size()) return false; - } - } - return false; - } - }; - public: typedef std::pair Edge; typedef std::pair EdgeWeight; @@ -59,6 +35,33 @@ namespace llvm { MaxSpanTree MST; + private: + // A comparing class for comparing weighted edges. + struct EdgeWeightCompare { + static bool getBlockSize(const T *X) { + const BasicBlock *BB = dyn_cast_or_null(X); + return BB ? BB->size() : 0; + } + + bool operator()(EdgeWeight X, EdgeWeight Y) const { + if (X.second > Y.second) return true; + if (X.second < Y.second) return false; + + // Equal edge weights: break ties by comparing block sizes. + size_t XSizeA = getBlockSize(X.first.first); + size_t YSizeA = getBlockSize(Y.first.first); + if (XSizeA > YSizeA) return true; + if (XSizeA < YSizeA) return false; + + size_t XSizeB = getBlockSize(X.first.second); + size_t YSizeB = getBlockSize(Y.first.second); + if (XSizeB > YSizeB) return true; + if (XSizeB < YSizeB) return false; + + return false; + } + }; + public: static char ID; // Class identification, replacement for typeinfo @@ -66,7 +69,7 @@ namespace llvm { /// spanning tree. MaximumSpanningTree(EdgeWeights &EdgeVector) { - std::stable_sort(EdgeVector.begin(), EdgeVector.end(), EdgeWeightCompare()); + std::stable_sort(EdgeVector.begin(), EdgeVector.end(), EdgeWeightCompare()); // Create spanning tree, Forest contains a special data structure // that makes checking if two nodes are already in a common (sub-)tree diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index dc0fa71..9e10fc4 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -21,7 +21,7 @@ #define DEBUG_TYPE "tsan" -#include "FunctionBlackList.h" +#include "BlackList.h" #include "llvm/Function.h" #include "llvm/IRBuilder.h" #include "llvm/Intrinsics.h" @@ -38,7 +38,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/ModuleUtils.h" @@ -47,10 +47,19 @@ using namespace llvm; static cl::opt ClBlackListFile("tsan-blacklist", cl::desc("Blacklist file"), cl::Hidden); +static cl::opt ClInstrumentMemoryAccesses( + "tsan-instrument-memory-accesses", cl::init(true), + cl::desc("Instrument memory accesses"), cl::Hidden); +static cl::opt ClInstrumentFuncEntryExit( + "tsan-instrument-func-entry-exit", cl::init(true), + cl::desc("Instrument function entry and exit"), cl::Hidden); +static cl::opt ClInstrumentAtomics( + "tsan-instrument-atomics", cl::init(true), + cl::desc("Instrument atomics"), cl::Hidden); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); -STATISTIC(NumOmittedReadsBeforeWrite, +STATISTIC(NumOmittedReadsBeforeWrite, "Number of reads ignored due to following writes"); STATISTIC(NumAccessesWithBadSize, "Number of accesses with bad size"); STATISTIC(NumInstrumentedVtableWrites, "Number of vtable ptr writes"); @@ -76,8 +85,8 @@ struct ThreadSanitizer : public FunctionPass { bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr); - TargetData *TD; - OwningPtr BL; + DataLayout *TD; + OwningPtr BL; IntegerType *OrdTy; // Callbacks to run-time library are computed in doInitialization. Function *TsanFuncEntry; @@ -88,6 +97,10 @@ struct ThreadSanitizer : public FunctionPass { Function *TsanWrite[kNumberOfAccessSizes]; Function *TsanAtomicLoad[kNumberOfAccessSizes]; Function *TsanAtomicStore[kNumberOfAccessSizes]; + Function *TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1][kNumberOfAccessSizes]; + Function *TsanAtomicCAS[kNumberOfAccessSizes]; + Function *TsanAtomicThreadFence; + Function *TsanAtomicSignalFence; Function *TsanVptrUpdate; }; } // namespace @@ -118,10 +131,10 @@ static Function *checkInterfaceFunction(Constant *FuncOrBitcast) { } bool ThreadSanitizer::doInitialization(Module &M) { - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); if (!TD) return false; - BL.reset(new FunctionBlackList(ClBlackListFile)); + BL.reset(new BlackList(ClBlackListFile)); // Always insert a call to __tsan_init into the module's CTORs. IRBuilder<> IRB(M.getContext()); @@ -158,10 +171,42 @@ bool ThreadSanitizer::doInitialization(Module &M) { TsanAtomicStore[i] = checkInterfaceFunction(M.getOrInsertFunction( AtomicStoreName, IRB.getVoidTy(), PtrTy, Ty, OrdTy, NULL)); + + for (int op = AtomicRMWInst::FIRST_BINOP; + op <= AtomicRMWInst::LAST_BINOP; ++op) { + TsanAtomicRMW[op][i] = NULL; + const char *NamePart = NULL; + if (op == AtomicRMWInst::Xchg) + NamePart = "_exchange"; + else if (op == AtomicRMWInst::Add) + NamePart = "_fetch_add"; + else if (op == AtomicRMWInst::Sub) + NamePart = "_fetch_sub"; + else if (op == AtomicRMWInst::And) + NamePart = "_fetch_and"; + else if (op == AtomicRMWInst::Or) + NamePart = "_fetch_or"; + else if (op == AtomicRMWInst::Xor) + NamePart = "_fetch_xor"; + else + continue; + SmallString<32> RMWName("__tsan_atomic" + itostr(BitSize) + NamePart); + TsanAtomicRMW[op][i] = checkInterfaceFunction(M.getOrInsertFunction( + RMWName, Ty, PtrTy, Ty, OrdTy, NULL)); + } + + SmallString<32> AtomicCASName("__tsan_atomic" + itostr(BitSize) + + "_compare_exchange_val"); + TsanAtomicCAS[i] = checkInterfaceFunction(M.getOrInsertFunction( + AtomicCASName, Ty, PtrTy, Ty, Ty, OrdTy, NULL)); } TsanVptrUpdate = checkInterfaceFunction(M.getOrInsertFunction( "__tsan_vptr_update", IRB.getVoidTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), NULL)); + TsanAtomicThreadFence = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL)); + TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction( + "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL)); return true; } @@ -186,7 +231,7 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { NumOmittedReadsFromConstantGlobals++; return true; } - } else if(LoadInst *L = dyn_cast(Addr)) { + } else if (LoadInst *L = dyn_cast(Addr)) { if (isVtableAccess(L)) { // Reads from a vtable pointer can not race with any writes. NumOmittedReadsFromVtable++; @@ -244,8 +289,8 @@ static bool isAtomic(Instruction *I) { return true; if (isa(I)) return true; - if (FenceInst *FI = dyn_cast(I)) - return FI->getSynchScope() == CrossThread; + if (isa(I)) + return true; return false; } @@ -284,17 +329,19 @@ bool ThreadSanitizer::runOnFunction(Function &F) { // (e.g. variables that do not escape, etc). // Instrument memory accesses. - for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { - Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); - } + if (ClInstrumentMemoryAccesses) + for (size_t i = 0, n = AllLoadsAndStores.size(); i < n; ++i) { + Res |= instrumentLoadOrStore(AllLoadsAndStores[i]); + } // Instrument atomic memory accesses. - for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { - Res |= instrumentAtomic(AtomicAccesses[i]); - } + if (ClInstrumentAtomics) + for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) { + Res |= instrumentAtomic(AtomicAccesses[i]); + } // Instrument function entry/exit points if there were instrumented accesses. - if (Res || HasCalls) { + if ((Res || HasCalls) && ClInstrumentFuncEntryExit) { IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); Value *ReturnAddress = IRB.CreateCall( Intrinsic::getDeclaration(F.getParent(), Intrinsic::returnaddress), @@ -343,12 +390,12 @@ static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { switch (ord) { case NotAtomic: assert(false); case Unordered: // Fall-through. - case Monotonic: v = 1 << 0; break; - // case Consume: v = 1 << 1; break; // Not specified yet. - case Acquire: v = 1 << 2; break; - case Release: v = 1 << 3; break; - case AcquireRelease: v = 1 << 4; break; - case SequentiallyConsistent: v = 1 << 5; break; + case Monotonic: v = 0; break; + // case Consume: v = 1; break; // Not specified yet. + case Acquire: v = 2; break; + case Release: v = 3; break; + case AcquireRelease: v = 4; break; + case SequentiallyConsistent: v = 5; break; } return IRB->getInt32(v); } @@ -385,12 +432,44 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I) { CallInst *C = CallInst::Create(TsanAtomicStore[Idx], ArrayRef(Args)); ReplaceInstWithInst(I, C); - } else if (isa(I)) { - // FIXME: Not yet supported. - } else if (isa(I)) { - // FIXME: Not yet supported. - } else if (isa(I)) { - // FIXME: Not yet supported. + } else if (AtomicRMWInst *RMWI = dyn_cast(I)) { + Value *Addr = RMWI->getPointerOperand(); + int Idx = getMemoryAccessFuncIndex(Addr); + if (Idx < 0) + return false; + Function *F = TsanAtomicRMW[RMWI->getOperation()][Idx]; + if (F == NULL) + return false; + const size_t ByteSize = 1 << Idx; + const size_t BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + IRB.CreateIntCast(RMWI->getValOperand(), Ty, false), + createOrdering(&IRB, RMWI->getOrdering())}; + CallInst *C = CallInst::Create(F, ArrayRef(Args)); + ReplaceInstWithInst(I, C); + } else if (AtomicCmpXchgInst *CASI = dyn_cast(I)) { + Value *Addr = CASI->getPointerOperand(); + int Idx = getMemoryAccessFuncIndex(Addr); + if (Idx < 0) + return false; + const size_t ByteSize = 1 << Idx; + const size_t BitSize = ByteSize * 8; + Type *Ty = Type::getIntNTy(IRB.getContext(), BitSize); + Type *PtrTy = Ty->getPointerTo(); + Value *Args[] = {IRB.CreatePointerCast(Addr, PtrTy), + IRB.CreateIntCast(CASI->getCompareOperand(), Ty, false), + IRB.CreateIntCast(CASI->getNewValOperand(), Ty, false), + createOrdering(&IRB, CASI->getOrdering())}; + CallInst *C = CallInst::Create(TsanAtomicCAS[Idx], ArrayRef(Args)); + ReplaceInstWithInst(I, C); + } else if (FenceInst *FI = dyn_cast(I)) { + Value *Args[] = {createOrdering(&IRB, FI->getOrdering())}; + Function *F = FI->getSynchScope() == SingleThread ? + TsanAtomicSignalFence : TsanAtomicThreadFence; + CallInst *C = CallInst::Create(F, ArrayRef(Args)); + ReplaceInstWithInst(I, C); } return true; } diff --git a/lib/Transforms/Scalar/CMakeLists.txt b/lib/Transforms/Scalar/CMakeLists.txt index a01e066..b3fc6e3 100644 --- a/lib/Transforms/Scalar/CMakeLists.txt +++ b/lib/Transforms/Scalar/CMakeLists.txt @@ -25,6 +25,7 @@ add_llvm_library(LLVMScalarOpts Reassociate.cpp Reg2Mem.cpp SCCP.cpp + SROA.cpp Scalar.cpp ScalarReplAggregates.cpp SimplifyCFGPass.cpp diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index a3c426a..123ed0f 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -27,6 +27,7 @@ #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/DominatorInternals.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/ProfileInfo.h" #include "llvm/Assembly/Writer.h" @@ -37,12 +38,13 @@ #include "llvm/Support/PatternMatch.h" #include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Transforms/Utils/AddrModeMatcher.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BuildLibCalls.h" +#include "llvm/Transforms/Utils/BypassSlowDivision.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; using namespace llvm::PatternMatch; @@ -146,9 +148,18 @@ bool CodeGenPrepare::runOnFunction(Function &F) { TLInfo = &getAnalysis(); DT = getAnalysisIfAvailable(); PFI = getAnalysisIfAvailable(); - OptSize = F.hasFnAttr(Attribute::OptimizeForSize); + OptSize = F.getFnAttributes().hasAttribute(Attributes::OptimizeForSize); + + /// This optimization identifies DIV instructions that can be + /// profitably bypassed and carried out with a shorter, faster divide. + if (TLI && TLI->isSlowDivBypassed()) { + const DenseMap &BypassWidths = + TLI->getBypassSlowDivWidths(); + for (Function::iterator I = F.begin(); I != F.end(); I++) + EverMadeChange |= bypassSlowDivision(F, I, BypassWidths); + } - // First pass, eliminate blocks that contain only PHI nodes and an + // Eliminate blocks that contain only PHI nodes and an // unconditional branch. EverMadeChange |= EliminateMostlyEmptyBlocks(F); @@ -160,7 +171,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) { bool MadeChange = true; while (MadeChange) { MadeChange = false; - for (Function::iterator I = F.begin(), E = F.end(); I != E; ) { + for (Function::iterator I = F.begin(); I != F.end(); ) { BasicBlock *BB = I++; MadeChange |= OptimizeBlock(*BB); } @@ -215,11 +226,13 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) { // edge, just collapse it. BasicBlock *SinglePred = BB->getSinglePredecessor(); - if (!SinglePred || SinglePred == BB) continue; + // Don't merge if BB's address is taken. + if (!SinglePred || SinglePred == BB || BB->hasAddressTaken()) continue; BranchInst *Term = dyn_cast(SinglePred->getTerminator()); if (Term && !Term->isConditional()) { Changed = true; + DEBUG(dbgs() << "To merge:\n"<< *SinglePred << "\n\n\n"); // Remember if SinglePred was the entry block of the function. // If so, we will need to move BB back to the entry position. bool isEntry = SinglePred == &SinglePred->getParent()->getEntryBlock(); @@ -230,7 +243,6 @@ bool CodeGenPrepare::EliminateFallThrough(Function &F) { // We have erased a block. Update the iterator. I = BB; - DEBUG(dbgs() << "Merged:\n"<< *SinglePred << "\n\n\n"); } } return Changed; @@ -610,7 +622,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { // happens. WeakVH IterHandle(CurInstIterator); - replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getTargetData() : 0, + replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0, TLInfo, ModifiedDT ? 0 : DT); // If the iterator instruction was recursively deleted, start over at the @@ -634,8 +646,8 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { // From here on out we're working with named functions. if (CI->getCalledFunction() == 0) return false; - // We'll need TargetData from here on out. - const TargetData *TD = TLI ? TLI->getTargetData() : 0; + // We'll need DataLayout from here on out. + const DataLayout *TD = TLI ? TLI->getDataLayout() : 0; if (!TD) return false; // Lower all default uses of _chk calls. This is very similar @@ -649,6 +661,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { /// DupRetToEnableTailCallOpts - Look for opportunities to duplicate return /// instructions to the predecessor to enable tail call optimizations. The /// case it is currently looking for is: +/// @code /// bb0: /// %tmp0 = tail call i32 @f0() /// br label %return @@ -661,9 +674,11 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { /// return: /// %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ] /// ret i32 %retval +/// @endcode /// /// => /// +/// @code /// bb0: /// %tmp0 = tail call i32 @f0() /// ret i32 %tmp0 @@ -673,7 +688,7 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) { /// bb2: /// %tmp2 = tail call i32 @f2() /// ret i32 %tmp2 -/// +/// @endcode bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) { if (!TLI) return false; @@ -699,7 +714,8 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) { // See llvm::isInTailCallPosition(). const Function *F = BB->getParent(); Attributes CallerRetAttr = F->getAttributes().getRetAttributes(); - if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt)) + if (CallerRetAttr.hasAttribute(Attributes::ZExt) || + CallerRetAttr.hasAttribute(Attributes::SExt)) return false; // Make sure there are no instructions between the PHI and return, or that the @@ -757,7 +773,10 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) { // Conservatively require the attributes of the call to match those of the // return. Ignore noalias because it doesn't affect the call sequence. Attributes CalleeRetAttr = CS.getAttributes().getRetAttributes(); - if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias) + if (AttrBuilder(CalleeRetAttr). + removeAttribute(Attributes::NoAlias) != + AttrBuilder(CallerRetAttr). + removeAttribute(Attributes::NoAlias)) continue; // Make sure the call instruction is followed by an unconditional branch to @@ -774,7 +793,7 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(ReturnInst *RI) { } // If we eliminated all predecessors of the block, delete the block now. - if (Changed && pred_begin(BB) == pred_end(BB)) + if (Changed && !BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) BB->eraseFromParent(); return Changed; @@ -914,7 +933,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for " << *MemoryInst); Type *IntPtrTy = - TLI->getTargetData()->getIntPtrType(AccessTy->getContext()); + TLI->getDataLayout()->getIntPtrType(AccessTy->getContext()); Value *Result = 0; @@ -988,7 +1007,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr, WeakVH IterHandle(CurInstIterator); BasicBlock *BB = CurInstIterator->getParent(); - RecursivelyDeleteTriviallyDeadInstructions(Repl); + RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo); if (IterHandle != CurInstIterator) { // If the iterator instruction was recursively deleted, start over at the @@ -1174,17 +1193,32 @@ static bool isFormingBranchFromSelectProfitable(SelectInst *SI) { } +/// If we have a SelectInst that will likely profit from branch prediction, +/// turn it into a branch. bool CodeGenPrepare::OptimizeSelectInst(SelectInst *SI) { - // If we have a SelectInst that will likely profit from branch prediction, - // turn it into a branch. - if (DisableSelectToBranch || OptSize || !TLI || - !TLI->isPredictableSelectExpensive()) - return false; + bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1); - if (!SI->getCondition()->getType()->isIntegerTy(1) || - !isFormingBranchFromSelectProfitable(SI)) + // Can we convert the 'select' to CF ? + if (DisableSelectToBranch || OptSize || !TLI || VectorCond) return false; + TargetLowering::SelectSupportKind SelectKind; + if (VectorCond) + SelectKind = TargetLowering::VectorMaskSelect; + else if (SI->getType()->isVectorTy()) + SelectKind = TargetLowering::ScalarCondVectorVal; + else + SelectKind = TargetLowering::ScalarValSelect; + + // Do we have efficient codegen support for this kind of 'selects' ? + if (TLI->isSelectSupported(SelectKind)) { + // We have efficient codegen support for the select instruction. + // Check if it is profitable to keep this 'select'. + if (!TLI->isPredictableSelectExpensive() || + !isFormingBranchFromSelectProfitable(SI)) + return false; + } + ModifiedDT = true; // First, we split the block containing the select into 2 blocks. @@ -1302,7 +1336,7 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) { bool MadeChange = false; CurInstIterator = BB.begin(); - for (BasicBlock::iterator E = BB.end(); CurInstIterator != E; ) + while (CurInstIterator != BB.end()) MadeChange |= OptimizeInst(CurInstIterator++); return MadeChange; diff --git a/lib/Transforms/Scalar/ConstantProp.cpp b/lib/Transforms/Scalar/ConstantProp.cpp index 5430f62..369720b 100644 --- a/lib/Transforms/Scalar/ConstantProp.cpp +++ b/lib/Transforms/Scalar/ConstantProp.cpp @@ -24,7 +24,7 @@ #include "llvm/Constant.h" #include "llvm/Instruction.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/InstIterator.h" #include "llvm/ADT/Statistic.h" @@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) { WorkList.insert(&*i); } bool Changed = false; - TargetData *TD = getAnalysisIfAvailable(); + DataLayout *TD = getAnalysisIfAvailable(); TargetLibraryInfo *TLI = &getAnalysis(); while (!WorkList.empty()) { diff --git a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp index 9b0aadb..3ec6f3d 100644 --- a/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp +++ b/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp @@ -235,6 +235,11 @@ bool CorrelatedValuePropagation::processSwitch(SwitchInst *SI) { // This case never fires - remove it. CI.getCaseSuccessor()->removePredecessor(BB); SI->removeCase(CI); // Does not invalidate the iterator. + + // The condition can be modified by removePredecessor's PHI simplification + // logic. + Cond = SI->getCondition(); + ++NumDeadCases; Changed = true; } else if (State == LazyValueInfo::True) { diff --git a/lib/Transforms/Scalar/DCE.cpp b/lib/Transforms/Scalar/DCE.cpp index 8dbcc23..a2e074f 100644 --- a/lib/Transforms/Scalar/DCE.cpp +++ b/lib/Transforms/Scalar/DCE.cpp @@ -22,6 +22,7 @@ #include "llvm/Instruction.h" #include "llvm/Pass.h" #include "llvm/Support/InstIterator.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/ADT/Statistic.h" using namespace llvm; @@ -38,10 +39,11 @@ namespace { initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry()); } virtual bool runOnBasicBlock(BasicBlock &BB) { + TargetLibraryInfo *TLI = getAnalysisIfAvailable(); bool Changed = false; for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) { Instruction *Inst = DI++; - if (isInstructionTriviallyDead(Inst)) { + if (isInstructionTriviallyDead(Inst, TLI)) { Inst->eraseFromParent(); Changed = true; ++DIEEliminated; @@ -87,6 +89,8 @@ char DCE::ID = 0; INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false) bool DCE::runOnFunction(Function &F) { + TargetLibraryInfo *TLI = getAnalysisIfAvailable(); + // Start out with all of the instructions in the worklist... std::vector WorkList; for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) @@ -101,7 +105,7 @@ bool DCE::runOnFunction(Function &F) { Instruction *I = WorkList.back(); WorkList.pop_back(); - if (isInstructionTriviallyDead(I)) { // If the instruction is dead. + if (isInstructionTriviallyDead(I, TLI)) { // If the instruction is dead. // Loop over all of the values that the instruction uses, if there are // instructions being used, add them to the worklist, because they might // go dead after this one is removed. @@ -114,13 +118,8 @@ bool DCE::runOnFunction(Function &F) { I->eraseFromParent(); // Remove the instruction from the worklist if it still exists in it. - for (std::vector::iterator WI = WorkList.begin(); - WI != WorkList.end(); ) { - if (*WI == I) - WI = WorkList.erase(WI); - else - ++WI; - } + WorkList.erase(std::remove(WorkList.begin(), WorkList.end(), I), + WorkList.end()); MadeChange = true; ++DCEEliminated; diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index 8b1283f..736cc05 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -29,7 +29,8 @@ #include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Support/Debug.h" #include "llvm/ADT/SetVector.h" @@ -45,6 +46,7 @@ namespace { AliasAnalysis *AA; MemoryDependenceAnalysis *MD; DominatorTree *DT; + const TargetLibraryInfo *TLI; static char ID; // Pass identification, replacement for typeid DSE() : FunctionPass(ID), AA(0), MD(0), DT(0) { @@ -55,6 +57,7 @@ namespace { AA = &getAnalysis(); MD = &getAnalysis(); DT = &getAnalysis(); + TLI = AA->getTargetLibraryInfo(); bool Changed = false; for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) @@ -106,6 +109,7 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); } /// static void DeleteDeadInstruction(Instruction *I, MemoryDependenceAnalysis &MD, + const TargetLibraryInfo *TLI, SmallSetVector *ValueSet = 0) { SmallVector NowDeadInsts; @@ -130,7 +134,7 @@ static void DeleteDeadInstruction(Instruction *I, if (!Op->use_empty()) continue; if (Instruction *OpI = dyn_cast(Op)) - if (isInstructionTriviallyDead(OpI)) + if (isInstructionTriviallyDead(OpI, TLI)) NowDeadInsts.push_back(OpI); } @@ -143,7 +147,7 @@ static void DeleteDeadInstruction(Instruction *I, /// hasMemoryWrite - Does this instruction write some memory? This only returns /// true for things that we can analyze with other helpers below. -static bool hasMemoryWrite(Instruction *I) { +static bool hasMemoryWrite(Instruction *I, const TargetLibraryInfo *TLI) { if (isa(I)) return true; if (IntrinsicInst *II = dyn_cast(I)) { @@ -158,6 +162,26 @@ static bool hasMemoryWrite(Instruction *I) { return true; } } + if (CallSite CS = I) { + if (Function *F = CS.getCalledFunction()) { + if (TLI && TLI->has(LibFunc::strcpy) && + F->getName() == TLI->getName(LibFunc::strcpy)) { + return true; + } + if (TLI && TLI->has(LibFunc::strncpy) && + F->getName() == TLI->getName(LibFunc::strncpy)) { + return true; + } + if (TLI && TLI->has(LibFunc::strcat) && + F->getName() == TLI->getName(LibFunc::strcat)) { + return true; + } + if (TLI && TLI->has(LibFunc::strncat) && + F->getName() == TLI->getName(LibFunc::strncat)) { + return true; + } + } + } return false; } @@ -175,7 +199,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { // If we don't have target data around, an unknown size in Location means // that we should use the size of the pointee type. This isn't valid for // memset/memcpy, which writes more than an i8. - if (Loc.Size == AliasAnalysis::UnknownSize && AA.getTargetData() == 0) + if (Loc.Size == AliasAnalysis::UnknownSize && AA.getDataLayout() == 0) return AliasAnalysis::Location(); return Loc; } @@ -189,7 +213,7 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { // If we don't have target data around, an unknown size in Location means // that we should use the size of the pointee type. This isn't valid for // init.trampoline, which writes more than an i8. - if (AA.getTargetData() == 0) return AliasAnalysis::Location(); + if (AA.getDataLayout() == 0) return AliasAnalysis::Location(); // FIXME: We don't know the size of the trampoline, so we can't really // handle it here. @@ -205,7 +229,8 @@ getLocForWrite(Instruction *Inst, AliasAnalysis &AA) { /// instruction if any. static AliasAnalysis::Location getLocForRead(Instruction *Inst, AliasAnalysis &AA) { - assert(hasMemoryWrite(Inst) && "Unknown instruction case"); + assert(hasMemoryWrite(Inst, AA.getTargetLibraryInfo()) && + "Unknown instruction case"); // The only instructions that both read and write are the mem transfer // instructions (memcpy/memmove). @@ -222,23 +247,29 @@ static bool isRemovable(Instruction *I) { if (StoreInst *SI = dyn_cast(I)) return SI->isUnordered(); - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); - case Intrinsic::lifetime_end: - // Never remove dead lifetime_end's, e.g. because it is followed by a - // free. - return false; - case Intrinsic::init_trampoline: - // Always safe to remove init_trampoline. - return true; + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: llvm_unreachable("doesn't pass 'hasMemoryWrite' predicate"); + case Intrinsic::lifetime_end: + // Never remove dead lifetime_end's, e.g. because it is followed by a + // free. + return false; + case Intrinsic::init_trampoline: + // Always safe to remove init_trampoline. + return true; - case Intrinsic::memset: - case Intrinsic::memmove: - case Intrinsic::memcpy: - // Don't remove volatile memory intrinsics. - return !cast(II)->isVolatile(); + case Intrinsic::memset: + case Intrinsic::memmove: + case Intrinsic::memcpy: + // Don't remove volatile memory intrinsics. + return !cast(II)->isVolatile(); + } } + + if (CallSite CS = I) + return CS.getInstruction()->use_empty(); + + return false; } @@ -249,14 +280,19 @@ static bool isShortenable(Instruction *I) { if (isa(I)) return false; - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: return false; - case Intrinsic::memset: - case Intrinsic::memcpy: - // Do shorten memory intrinsics. - return true; + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: return false; + case Intrinsic::memset: + case Intrinsic::memcpy: + // Do shorten memory intrinsics. + return true; + } } + + // Don't shorten libcalls calls for now. + + return false; } /// getStoredPointerOperand - Return the pointer that is being written to. @@ -266,17 +302,23 @@ static Value *getStoredPointerOperand(Instruction *I) { if (MemIntrinsic *MI = dyn_cast(I)) return MI->getDest(); - IntrinsicInst *II = cast(I); - switch (II->getIntrinsicID()) { - default: llvm_unreachable("Unexpected intrinsic!"); - case Intrinsic::init_trampoline: - return II->getArgOperand(0); + if (IntrinsicInst *II = dyn_cast(I)) { + switch (II->getIntrinsicID()) { + default: llvm_unreachable("Unexpected intrinsic!"); + case Intrinsic::init_trampoline: + return II->getArgOperand(0); + } } + + CallSite CS = I; + // All the supported functions so far happen to have dest as their first + // argument. + return CS.getArgument(0); } static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) { uint64_t Size; - if (getObjectSize(V, Size, AA.getTargetData())) + if (getObjectSize(V, Size, AA.getDataLayout(), AA.getTargetLibraryInfo())) return Size; return AliasAnalysis::UnknownSize; } @@ -309,10 +351,10 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, // comparison. if (Later.Size == AliasAnalysis::UnknownSize || Earlier.Size == AliasAnalysis::UnknownSize) { - // If we have no TargetData information around, then the size of the store + // If we have no DataLayout information around, then the size of the store // is inferrable from the pointee type. If they are the same type, then // we know that the store is safe. - if (AA.getTargetData() == 0 && + if (AA.getDataLayout() == 0 && Later.Ptr->getType() == Earlier.Ptr->getType()) return OverwriteComplete; @@ -328,13 +370,13 @@ static OverwriteResult isOverwrite(const AliasAnalysis::Location &Later, // larger than the earlier one. if (Later.Size == AliasAnalysis::UnknownSize || Earlier.Size == AliasAnalysis::UnknownSize || - AA.getTargetData() == 0) + AA.getDataLayout() == 0) return OverwriteUnknown; // Check to see if the later store is to the entire object (either a global, // an alloca, or a byval argument). If so, then it clearly overwrites any // other store to the same object. - const TargetData &TD = *AA.getTargetData(); + const DataLayout &TD = *AA.getDataLayout(); const Value *UO1 = GetUnderlyingObject(P1, &TD), *UO2 = GetUnderlyingObject(P2, &TD); @@ -454,13 +496,13 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { Instruction *Inst = BBI++; // Handle 'free' calls specially. - if (CallInst *F = isFreeCall(Inst)) { + if (CallInst *F = isFreeCall(Inst, TLI)) { MadeChange |= HandleFree(F); continue; } // If we find something that writes memory, get its memory dependence. - if (!hasMemoryWrite(Inst)) + if (!hasMemoryWrite(Inst, TLI)) continue; MemDepResult InstDep = MD->getDependency(Inst); @@ -483,7 +525,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // in case we need it. WeakVH NextInst(BBI); - DeleteDeadInstruction(SI, *MD); + DeleteDeadInstruction(SI, *MD, TLI); if (NextInst == 0) // Next instruction deleted. BBI = BB.begin(); @@ -530,7 +572,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { << *DepWrite << "\n KILLER: " << *Inst << '\n'); // Delete the store and now-dead instructions that feed it. - DeleteDeadInstruction(DepWrite, *MD); + DeleteDeadInstruction(DepWrite, *MD, TLI); ++NumFastStores; MadeChange = true; @@ -627,7 +669,7 @@ bool DSE::HandleFree(CallInst *F) { MemDepResult Dep = MD->getPointerDependencyFrom(Loc, false, InstPt, BB); while (Dep.isDef() || Dep.isClobber()) { Instruction *Dependency = Dep.getInst(); - if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency)) + if (!hasMemoryWrite(Dependency, TLI) || !isRemovable(Dependency)) break; Value *DepPointer = @@ -640,7 +682,7 @@ bool DSE::HandleFree(CallInst *F) { Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); // DCE instructions only used to calculate that store - DeleteDeadInstruction(Dependency, *MD); + DeleteDeadInstruction(Dependency, *MD, TLI); ++NumFastStores; MadeChange = true; @@ -659,6 +701,22 @@ bool DSE::HandleFree(CallInst *F) { return MadeChange; } +namespace { + struct CouldRef { + typedef Value *argument_type; + const CallSite CS; + AliasAnalysis *AA; + + bool operator()(Value *I) { + // See if the call site touches the value. + AliasAnalysis::ModRefResult A = + AA->getModRefInfo(CS, I, getPointerSize(I, *AA)); + + return A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref; + } + }; +} + /// handleEndBlock - Remove dead stores to stack-allocated locations in the /// function end block. Ex: /// %A = alloca i32 @@ -680,7 +738,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // Okay, so these are dead heap objects, but if the pointer never escapes // then it's leaked by this function anyways. - else if (isAllocLikeFn(I) && !PointerMayBeCaptured(I, true, true)) + else if (isAllocLikeFn(I, TLI) && !PointerMayBeCaptured(I, true, true)) DeadStackObjects.insert(I); } @@ -696,7 +754,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { --BBI; // If we find a store, check to see if it points into a dead stack value. - if (hasMemoryWrite(BBI) && isRemovable(BBI)) { + if (hasMemoryWrite(BBI, TLI) && isRemovable(BBI)) { // See through pointer-to-pointer bitcasts SmallVector Pointers; GetUnderlyingObjects(getStoredPointerOperand(BBI), Pointers); @@ -724,7 +782,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { dbgs() << '\n'); // DCE instructions only used to calculate that store. - DeleteDeadInstruction(Dead, *MD, &DeadStackObjects); + DeleteDeadInstruction(Dead, *MD, TLI, &DeadStackObjects); ++NumFastStores; MadeChange = true; continue; @@ -732,9 +790,9 @@ bool DSE::handleEndBlock(BasicBlock &BB) { } // Remove any dead non-memory-mutating instructions. - if (isInstructionTriviallyDead(BBI)) { + if (isInstructionTriviallyDead(BBI, TLI)) { Instruction *Inst = BBI++; - DeleteDeadInstruction(Inst, *MD, &DeadStackObjects); + DeleteDeadInstruction(Inst, *MD, TLI, &DeadStackObjects); ++NumFastOther; MadeChange = true; continue; @@ -750,7 +808,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) { if (CallSite CS = cast(BBI)) { // Remove allocation function calls from the list of dead stack objects; // there can't be any references before the definition. - if (isAllocLikeFn(BBI)) + if (isAllocLikeFn(BBI, TLI)) DeadStackObjects.remove(BBI); // If this call does not access memory, it can't be loading any of our @@ -760,20 +818,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) { // If the call might load from any of our allocas, then any store above // the call is live. - SmallVector LiveAllocas; - for (SmallSetVector::iterator I = DeadStackObjects.begin(), - E = DeadStackObjects.end(); I != E; ++I) { - // See if the call site touches it. - AliasAnalysis::ModRefResult A = - AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA)); - - if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref) - LiveAllocas.push_back(*I); - } - - for (SmallVector::iterator I = LiveAllocas.begin(), - E = LiveAllocas.end(); I != E; ++I) - DeadStackObjects.remove(*I); + CouldRef Pred = { CS, AA }; + DeadStackObjects.remove_if(Pred); // If all of the allocas were clobbered by the call then we're not going // to find anything else to process. @@ -816,6 +862,20 @@ bool DSE::handleEndBlock(BasicBlock &BB) { return MadeChange; } +namespace { + struct CouldAlias { + typedef Value *argument_type; + const AliasAnalysis::Location &LoadedLoc; + AliasAnalysis *AA; + + bool operator()(Value *I) { + // See if the loaded location could alias the stack location. + AliasAnalysis::Location StackLoc(I, getPointerSize(I, *AA)); + return !AA->isNoAlias(StackLoc, LoadedLoc); + } + }; +} + /// RemoveAccessedObjects - Check to see if the specified location may alias any /// of the stack objects in the DeadStackObjects set. If so, they become live /// because the location is being loaded. @@ -834,16 +894,7 @@ void DSE::RemoveAccessedObjects(const AliasAnalysis::Location &LoadedLoc, return; } - SmallVector NowLive; - for (SmallSetVector::iterator I = DeadStackObjects.begin(), - E = DeadStackObjects.end(); I != E; ++I) { - // See if the loaded location could alias the stack location. - AliasAnalysis::Location StackLoc(*I, getPointerSize(*I, *AA)); - if (!AA->isNoAlias(StackLoc, LoadedLoc)) - NowLive.push_back(*I); - } - - for (SmallVector::iterator I = NowLive.begin(), E = NowLive.end(); - I != E; ++I) - DeadStackObjects.remove(*I); + // Remove objects that could alias LoadedLoc. + CouldAlias Pred = { LoadedLoc, AA }; + DeadStackObjects.remove_if(Pred); } diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp index 9759549..101009d 100644 --- a/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/lib/Transforms/Scalar/EarlyCSE.cpp @@ -18,11 +18,12 @@ #include "llvm/Pass.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/InstructionSimplify.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Support/Debug.h" #include "llvm/Support/RecyclingAllocator.h" +#include "llvm/ADT/Hashing.h" #include "llvm/ADT/ScopedHashTable.h" #include "llvm/ADT/Statistic.h" #include @@ -90,35 +91,56 @@ template<> struct DenseMapInfo { unsigned DenseMapInfo::getHashValue(SimpleValue Val) { Instruction *Inst = Val.Inst; - // Hash in all of the operands as pointers. - unsigned Res = 0; - for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) - Res ^= getHash(Inst->getOperand(i)) << (i & 0xF); + if (BinaryOperator* BinOp = dyn_cast(Inst)) { + Value *LHS = BinOp->getOperand(0); + Value *RHS = BinOp->getOperand(1); + if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) + std::swap(LHS, RHS); + + if (isa(BinOp)) { + // Hash the overflow behavior + unsigned Overflow = + BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap | + BinOp->hasNoUnsignedWrap() * OverflowingBinaryOperator::NoUnsignedWrap; + return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS); + } - if (CastInst *CI = dyn_cast(Inst)) - Res ^= getHash(CI->getType()); - else if (CmpInst *CI = dyn_cast(Inst)) - Res ^= CI->getPredicate(); - else if (const ExtractValueInst *EVI = dyn_cast(Inst)) { - for (ExtractValueInst::idx_iterator I = EVI->idx_begin(), - E = EVI->idx_end(); I != E; ++I) - Res ^= *I; - } else if (const InsertValueInst *IVI = dyn_cast(Inst)) { - for (InsertValueInst::idx_iterator I = IVI->idx_begin(), - E = IVI->idx_end(); I != E; ++I) - Res ^= *I; - } else { - // nothing extra to hash in. - assert((isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst) || - isa(Inst) || isa(Inst)) && - "Invalid/unknown instruction"); + return hash_combine(BinOp->getOpcode(), LHS, RHS); } + if (CmpInst *CI = dyn_cast(Inst)) { + Value *LHS = CI->getOperand(0); + Value *RHS = CI->getOperand(1); + CmpInst::Predicate Pred = CI->getPredicate(); + if (Inst->getOperand(0) > Inst->getOperand(1)) { + std::swap(LHS, RHS); + Pred = CI->getSwappedPredicate(); + } + return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); + } + + if (CastInst *CI = dyn_cast(Inst)) + return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); + + if (const ExtractValueInst *EVI = dyn_cast(Inst)) + return hash_combine(EVI->getOpcode(), EVI->getOperand(0), + hash_combine_range(EVI->idx_begin(), EVI->idx_end())); + + if (const InsertValueInst *IVI = dyn_cast(Inst)) + return hash_combine(IVI->getOpcode(), IVI->getOperand(0), + IVI->getOperand(1), + hash_combine_range(IVI->idx_begin(), IVI->idx_end())); + + assert((isa(Inst) || isa(Inst) || + isa(Inst) || isa(Inst) || + isa(Inst) || isa(Inst) || + isa(Inst)) && "Invalid/unknown instruction"); + // Mix in the opcode. - return (Res << 1) ^ Inst->getOpcode(); + return hash_combine(Inst->getOpcode(), + hash_combine_range(Inst->value_op_begin(), + Inst->value_op_end())); } bool DenseMapInfo::isEqual(SimpleValue LHS, SimpleValue RHS) { @@ -128,7 +150,41 @@ bool DenseMapInfo::isEqual(SimpleValue LHS, SimpleValue RHS) { return LHSI == RHSI; if (LHSI->getOpcode() != RHSI->getOpcode()) return false; - return LHSI->isIdenticalTo(RHSI); + if (LHSI->isIdenticalTo(RHSI)) return true; + + // If we're not strictly identical, we still might be a commutable instruction + if (BinaryOperator *LHSBinOp = dyn_cast(LHSI)) { + if (!LHSBinOp->isCommutative()) + return false; + + assert(isa(RHSI) + && "same opcode, but different instruction type?"); + BinaryOperator *RHSBinOp = cast(RHSI); + + // Check overflow attributes + if (isa(LHSBinOp)) { + assert(isa(RHSBinOp) + && "same opcode, but different operator type?"); + if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() || + LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap()) + return false; + } + + // Commuted equality + return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && + LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); + } + if (CmpInst *LHSCmp = dyn_cast(LHSI)) { + assert(isa(RHSI) + && "same opcode, but different instruction type?"); + CmpInst *RHSCmp = cast(RHSI); + // Commuted equality + return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && + LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && + LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); + } + + return false; } //===----------------------------------------------------------------------===// @@ -216,7 +272,7 @@ namespace { /// cases. class EarlyCSE : public FunctionPass { public: - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; DominatorTree *DT; typedef RecyclingAllocatoreraseFromParent(); Changed = true; @@ -506,7 +564,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { bool EarlyCSE::runOnFunction(Function &F) { std::deque nodesToProcess; - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); DT = &getAnalysis(); diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp index 4822fd0..f003e06 100644 --- a/lib/Transforms/Scalar/GVN.cpp +++ b/lib/Transforms/Scalar/GVN.cpp @@ -41,7 +41,7 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/PatternMatch.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" @@ -271,16 +271,16 @@ void ValueTable::add(Value *V, uint32_t num) { valueNumbering.insert(std::make_pair(V, num)); } -uint32_t ValueTable::lookup_or_add_call(CallInst* C) { +uint32_t ValueTable::lookup_or_add_call(CallInst *C) { if (AA->doesNotAccessMemory(C)) { Expression exp = create_expression(C); - uint32_t& e = expressionNumbering[exp]; + uint32_t &e = expressionNumbering[exp]; if (!e) e = nextValueNumber++; valueNumbering[C] = e; return e; } else if (AA->onlyReadsMemory(C)) { Expression exp = create_expression(C); - uint32_t& e = expressionNumbering[exp]; + uint32_t &e = expressionNumbering[exp]; if (!e) { e = nextValueNumber++; valueNumbering[C] = e; @@ -413,7 +413,7 @@ uint32_t ValueTable::lookup_or_add(Value *V) { case Instruction::LShr: case Instruction::AShr: case Instruction::And: - case Instruction::Or : + case Instruction::Or: case Instruction::Xor: case Instruction::ICmp: case Instruction::FCmp: @@ -503,7 +503,7 @@ namespace { bool NoLoads; MemoryDependenceAnalysis *MD; DominatorTree *DT; - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; ValueTable VN; @@ -535,7 +535,7 @@ namespace { InstrsToErase.push_back(I); } - const TargetData *getTargetData() const { return TD; } + const DataLayout *getDataLayout() const { return TD; } DominatorTree &getDominatorTree() const { return *DT; } AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } MemoryDependenceAnalysis &getMemDep() const { return *MD; } @@ -632,6 +632,7 @@ INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void GVN::dump(DenseMap& d) { errs() << "{\n"; for (DenseMap::iterator I = d.begin(), @@ -641,6 +642,7 @@ void GVN::dump(DenseMap& d) { } errs() << "}\n"; } +#endif /// IsValueFullyAvailableInBlock - Return true if we can prove that the value /// we're analyzing is fully available in the specified block. As we go, keep @@ -728,7 +730,7 @@ SpeculationFailure: /// CoerceAvailableValueToLoadType will succeed. static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy, - const TargetData &TD) { + const DataLayout &TD) { // If the loaded or stored value is an first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy() || @@ -744,7 +746,6 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, return true; } - /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and /// then a load from a must-aliased pointer of a different type, try to coerce /// the stored value. LoadedTy is the type of the load we want to replace and @@ -754,7 +755,7 @@ static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, Instruction *InsertPt, - const TargetData &TD) { + const DataLayout &TD) { if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD)) return 0; @@ -767,24 +768,25 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, // If the store and reload are the same size, we can always reuse it. if (StoreSize == LoadSize) { // Pointer to Pointer -> use bitcast. - if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) + if (StoredValTy->getScalarType()->isPointerTy() && + LoadedTy->getScalarType()->isPointerTy()) return new BitCastInst(StoredVal, LoadedTy, "", InsertPt); // Convert source pointers to integers, which can be bitcast. - if (StoredValTy->isPointerTy()) { - StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); + if (StoredValTy->getScalarType()->isPointerTy()) { + StoredValTy = TD.getIntPtrType(StoredValTy); StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); } Type *TypeToCastTo = LoadedTy; - if (TypeToCastTo->isPointerTy()) - TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext()); + if (TypeToCastTo->getScalarType()->isPointerTy()) + TypeToCastTo = TD.getIntPtrType(TypeToCastTo); if (StoredValTy != TypeToCastTo) StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt); // Cast to pointer if the load needs a pointer type. - if (LoadedTy->isPointerTy()) + if (LoadedTy->getScalarType()->isPointerTy()) StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt); return StoredVal; @@ -796,8 +798,8 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); // Convert source pointers to integers, which can be manipulated. - if (StoredValTy->isPointerTy()) { - StoredValTy = TD.getIntPtrType(StoredValTy->getContext()); + if (StoredValTy->getScalarType()->isPointerTy()) { + StoredValTy = TD.getIntPtrType(StoredValTy); StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt); } @@ -822,7 +824,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, return StoredVal; // If the result is a pointer, inttoptr. - if (LoadedTy->isPointerTy()) + if (LoadedTy->getScalarType()->isPointerTy()) return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt); // Otherwise, bitcast. @@ -840,7 +842,7 @@ static Value *CoerceAvailableValueToLoadType(Value *StoredVal, static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, Value *WritePtr, uint64_t WriteSizeInBits, - const TargetData &TD) { + const DataLayout &TD) { // If the loaded or stored value is a first class array or struct, don't try // to transform them. We need to be able to bitcast to integer. if (LoadTy->isStructTy() || LoadTy->isArrayTy()) @@ -913,7 +915,7 @@ static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, /// memdep query of a load that ends up being a clobbering store. static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, StoreInst *DepSI, - const TargetData &TD) { + const DataLayout &TD) { // Cannot handle reading from store of first-class aggregate yet. if (DepSI->getValueOperand()->getType()->isStructTy() || DepSI->getValueOperand()->getType()->isArrayTy()) @@ -929,7 +931,7 @@ static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, /// memdep query of a load that ends up being clobbered by another load. See if /// the other load can feed into the second load. static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, - LoadInst *DepLI, const TargetData &TD){ + LoadInst *DepLI, const DataLayout &TD){ // Cannot handle reading from store of first-class aggregate yet. if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) return -1; @@ -957,7 +959,7 @@ static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, MemIntrinsic *MI, - const TargetData &TD) { + const DataLayout &TD) { // If the mem operation is a non-constant size, we can't handle it. ConstantInt *SizeCst = dyn_cast(MI->getLength()); if (SizeCst == 0) return -1; @@ -1007,7 +1009,7 @@ static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, /// before we give up. static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, Type *LoadTy, - Instruction *InsertPt, const TargetData &TD){ + Instruction *InsertPt, const DataLayout &TD){ LLVMContext &Ctx = SrcVal->getType()->getContext(); uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; @@ -1017,8 +1019,9 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, // Compute which bits of the stored value are being used by the load. Convert // to an integer type to start with. - if (SrcVal->getType()->isPointerTy()) - SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx)); + if (SrcVal->getType()->getScalarType()->isPointerTy()) + SrcVal = Builder.CreatePtrToInt(SrcVal, + TD.getIntPtrType(SrcVal->getType())); if (!SrcVal->getType()->isIntegerTy()) SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); @@ -1046,7 +1049,7 @@ static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, Type *LoadTy, Instruction *InsertPt, GVN &gvn) { - const TargetData &TD = *gvn.getTargetData(); + const DataLayout &TD = *gvn.getDataLayout(); // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to // widen SrcVal out to a larger load. unsigned SrcValSize = TD.getTypeStoreSize(SrcVal->getType()); @@ -1105,7 +1108,7 @@ static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, /// memdep query of a load that ends up being a clobbering mem intrinsic. static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, Type *LoadTy, Instruction *InsertPt, - const TargetData &TD){ + const DataLayout &TD){ LLVMContext &Ctx = LoadTy->getContext(); uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8; @@ -1229,7 +1232,7 @@ struct AvailableValueInBlock { if (isSimpleValue()) { Res = getSimpleValue(); if (Res->getType() != LoadTy) { - const TargetData *TD = gvn.getTargetData(); + const DataLayout *TD = gvn.getDataLayout(); assert(TD && "Need target data to handle type mismatch case"); Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), *TD); @@ -1251,7 +1254,7 @@ struct AvailableValueInBlock { << *Res << '\n' << "\n\n\n"); } } else { - const TargetData *TD = gvn.getTargetData(); + const DataLayout *TD = gvn.getDataLayout(); assert(TD && "Need target data to handle type mismatch case"); Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, BB->getTerminator(), *TD); @@ -1299,7 +1302,7 @@ static Value *ConstructSSAForLoadSet(LoadInst *LI, Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); // If new PHI nodes were created, notify alias analysis. - if (V->getType()->isPointerTy()) { + if (V->getType()->getScalarType()->isPointerTy()) { AliasAnalysis *AA = gvn.getAliasAnalysis(); for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i) @@ -1436,7 +1439,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { Instruction *DepInst = DepInfo.getInst(); // Loading the allocation -> undef. - if (isa(DepInst) || isMallocLikeFn(DepInst) || + if (isa(DepInst) || isMallocLikeFn(DepInst, TLI) || // Loading immediately after lifetime begin -> undef. isLifetimeStart(DepInst)) { ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, @@ -1496,7 +1499,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { if (isa(V)) V->takeName(LI); - if (V->getType()->isPointerTy()) + if (V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(LI); ++NumGVNLoad; @@ -1728,7 +1731,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) { LI->replaceAllUsesWith(V); if (isa(V)) V->takeName(LI); - if (V->getType()->isPointerTy()) + if (V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(LI); ++NumPRELoad; @@ -1855,7 +1858,7 @@ bool GVN::processLoad(LoadInst *L) { // Replace the load! L->replaceAllUsesWith(AvailVal); - if (AvailVal->getType()->isPointerTy()) + if (AvailVal->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(AvailVal); markInstructionForDeletion(L); ++NumGVNLoad; @@ -1912,7 +1915,7 @@ bool GVN::processLoad(LoadInst *L) { // Remove it! L->replaceAllUsesWith(StoredVal); - if (StoredVal->getType()->isPointerTy()) + if (StoredVal->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(StoredVal); markInstructionForDeletion(L); ++NumGVNLoad; @@ -1941,7 +1944,7 @@ bool GVN::processLoad(LoadInst *L) { // Remove it! patchAndReplaceAllUsesWith(AvailableVal, L); - if (DepLI->getType()->isPointerTy()) + if (DepLI->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(DepLI); markInstructionForDeletion(L); ++NumGVNLoad; @@ -1951,7 +1954,7 @@ bool GVN::processLoad(LoadInst *L) { // If this load really doesn't depend on anything, then we must be loading an // undef value. This can happen when loading for a fresh allocation with no // intervening stores, for example. - if (isa(DepInst) || isMallocLikeFn(DepInst)) { + if (isa(DepInst) || isMallocLikeFn(DepInst, TLI)) { L->replaceAllUsesWith(UndefValue::get(L->getType())); markInstructionForDeletion(L); ++NumGVNLoad; @@ -2182,7 +2185,7 @@ bool GVN::processInstruction(Instruction *I) { // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. if (Value *V = SimplifyInstruction(I, TD, TLI, DT)) { I->replaceAllUsesWith(V); - if (MD && V->getType()->isPointerTy()) + if (MD && V->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(V); markInstructionForDeletion(I); ++NumGVNSimpl; @@ -2231,12 +2234,20 @@ bool GVN::processInstruction(Instruction *I) { Value *SwitchCond = SI->getCondition(); BasicBlock *Parent = SI->getParent(); bool Changed = false; + + // Remember how many outgoing edges there are to every successor. + SmallDenseMap SwitchEdges; + for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) + ++SwitchEdges[SI->getSuccessor(i)]; + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) { BasicBlock *Dst = i.getCaseSuccessor(); - BasicBlockEdge E(Parent, Dst); - if (E.isSingleEdge()) + // If there is only a single edge, propagate the case value into it. + if (SwitchEdges.lookup(Dst) == 1) { + BasicBlockEdge E(Parent, Dst); Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E); + } } return Changed; } @@ -2274,7 +2285,7 @@ bool GVN::processInstruction(Instruction *I) { // Remove it! patchAndReplaceAllUsesWith(repl, I); - if (MD && repl->getType()->isPointerTy()) + if (MD && repl->getType()->getScalarType()->isPointerTy()) MD->invalidateCachedPointerInfo(repl); markInstructionForDeletion(I); return true; @@ -2285,7 +2296,7 @@ bool GVN::runOnFunction(Function& F) { if (!NoLoads) MD = &getAnalysis(); DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); VN.setAliasAnalysis(&getAnalysis()); VN.setMemDep(MD); @@ -2522,7 +2533,7 @@ bool GVN::performPRE(Function &F) { addToLeaderTable(ValNo, Phi, CurrentBlock); Phi->setDebugLoc(CurInst->getDebugLoc()); CurInst->replaceAllUsesWith(Phi); - if (Phi->getType()->isPointerTy()) { + if (Phi->getType()->getScalarType()->isPointerTy()) { // Because we have added a PHI-use of the pointer value, it has now // "escaped" from alias analysis' perspective. We need to inform // AA of this. diff --git a/lib/Transforms/Scalar/GlobalMerge.cpp b/lib/Transforms/Scalar/GlobalMerge.cpp index b36a3cb..6301aad 100644 --- a/lib/Transforms/Scalar/GlobalMerge.cpp +++ b/lib/Transforms/Scalar/GlobalMerge.cpp @@ -62,7 +62,7 @@ #include "llvm/Intrinsics.h" #include "llvm/Module.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/ADT/Statistic.h" @@ -98,9 +98,9 @@ namespace { } struct GlobalCmp { - const TargetData *TD; + const DataLayout *TD; - GlobalCmp(const TargetData *td) : TD(td) { } + GlobalCmp(const DataLayout *td) : TD(td) { } bool operator()(const GlobalVariable *GV1, const GlobalVariable *GV2) { Type *Ty1 = cast(GV1->getType())->getElementType(); @@ -119,7 +119,7 @@ INITIALIZE_PASS(GlobalMerge, "global-merge", bool GlobalMerge::doMerge(SmallVectorImpl &Globals, Module &M, bool isConst) const { - const TargetData *TD = TLI->getTargetData(); + const DataLayout *TD = TLI->getDataLayout(); // FIXME: Infer the maximum possible offset depending on the actual users // (these max offsets are different for the users inside Thumb or ARM @@ -170,7 +170,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl &Globals, bool GlobalMerge::doInitialization(Module &M) { SmallVector Globals, ConstGlobals, BSSGlobals; - const TargetData *TD = TLI->getTargetData(); + const DataLayout *TD = TLI->getDataLayout(); unsigned MaxOffset = TLI->getMaximalGlobalOffset(); bool Changed = false; diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 37f8bdf..310fd61 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -43,7 +43,8 @@ #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/SimplifyIndVar.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -67,7 +68,8 @@ namespace { LoopInfo *LI; ScalarEvolution *SE; DominatorTree *DT; - TargetData *TD; + DataLayout *TD; + TargetLibraryInfo *TLI; SmallVector DeadInsts; bool Changed; @@ -218,8 +220,6 @@ static Instruction *getInsertPointForUses(Instruction *User, Value *Def, /// ConvertToSInt - Convert APF to an integer, if possible. static bool ConvertToSInt(const APFloat &APF, int64_t &IntVal) { bool isExact = false; - if (&APF.getSemantics() == &APFloat::PPCDoubleDouble) - return false; // See if we can convert this to an int64_t uint64_t UIntVal; if (APF.convertToInteger(&UIntVal, 64, true, APFloat::rmTowardZero, @@ -414,11 +414,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { // new comparison. NewCompare->takeName(Compare); Compare->replaceAllUsesWith(NewCompare); - RecursivelyDeleteTriviallyDeadInstructions(Compare); + RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI); // Delete the old floating point increment. Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); - RecursivelyDeleteTriviallyDeadInstructions(Incr); + RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI); // If the FP induction variable still has uses, this is because something else // in the loop uses its value. In order to canonicalize the induction @@ -431,7 +431,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) { Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", PN->getParent()->getFirstInsertionPt()); PN->replaceAllUsesWith(Conv); - RecursivelyDeleteTriviallyDeadInstructions(PN); + RecursivelyDeleteTriviallyDeadInstructions(PN, TLI); } Changed = true; } @@ -549,15 +549,17 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) { PN->setIncomingValue(i, ExitVal); - // If this instruction is dead now, delete it. - RecursivelyDeleteTriviallyDeadInstructions(Inst); + // If this instruction is dead now, delete it. Don't do it now to avoid + // invalidating iterators. + if (isInstructionTriviallyDead(Inst, TLI)) + DeadInsts.push_back(Inst); if (NumPreds == 1) { // Completely replace a single-pred PHI. This is safe, because the // NewVal won't be variant in the loop, so we don't need an LCSSA phi // node anymore. PN->replaceAllUsesWith(ExitVal); - RecursivelyDeleteTriviallyDeadInstructions(PN); + PN->eraseFromParent(); } } if (NumPreds != 1) { @@ -595,13 +597,13 @@ namespace { class WideIVVisitor : public IVVisitor { ScalarEvolution *SE; - const TargetData *TD; + const DataLayout *TD; public: WideIVInfo WI; WideIVVisitor(PHINode *NarrowIV, ScalarEvolution *SCEV, - const TargetData *TData) : + const DataLayout *TData) : SE(SCEV), TD(TData) { WI.NarrowIV = NarrowIV; } // Implement the interface used by simplifyUsersOfIV. @@ -1259,8 +1261,13 @@ static bool needsLFTR(Loop *L, DominatorTree *DT) { if (!Phi) return true; + // Do LFTR if PHI node is defined in the loop, but is *not* a counter. + int Idx = Phi->getBasicBlockIndex(L->getLoopLatch()); + if (Idx < 0) + return true; + // Do LFTR if the exit condition's IV is *not* a simple counter. - Value *IncV = Phi->getIncomingValueForBlock(L->getLoopLatch()); + Value *IncV = Phi->getIncomingValue(Idx); return Phi != getLoopPhiForCounter(IncV, L, DT); } @@ -1339,7 +1346,7 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) { /// could at least handle constant BECounts. static PHINode * FindLoopCounter(Loop *L, const SCEV *BECount, - ScalarEvolution *SE, DominatorTree *DT, const TargetData *TD) { + ScalarEvolution *SE, DominatorTree *DT, const DataLayout *TD) { uint64_t BCWidth = SE->getTypeSizeInBits(BECount->getType()); Value *Cond = @@ -1696,7 +1703,8 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { LI = &getAnalysis(); SE = &getAnalysis(); DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); + TLI = getAnalysisIfAvailable(); DeadInsts.clear(); Changed = false; @@ -1763,7 +1771,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { while (!DeadInsts.empty()) if (Instruction *Inst = dyn_cast_or_null(&*DeadInsts.pop_back_val())) - RecursivelyDeleteTriviallyDeadInstructions(Inst); + RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); // The Rewriter may not be used from this point on. @@ -1772,7 +1780,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { SinkUnusedInvariants(L); // Clean up dead instructions. - Changed |= DeleteDeadPHIs(L->getHeader()); + Changed |= DeleteDeadPHIs(L->getHeader(), TLI); // Check a post-condition. assert(L->isLCSSAForm(*DT) && "Indvars did not leave the loop in lcssa form!"); diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp index dd42c59..e7ffa09 100644 --- a/lib/Transforms/Scalar/JumpThreading.cpp +++ b/lib/Transforms/Scalar/JumpThreading.cpp @@ -23,7 +23,7 @@ #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" @@ -75,7 +75,7 @@ namespace { /// revectored to the false side of the second if. /// class JumpThreading : public FunctionPass { - TargetData *TD; + DataLayout *TD; TargetLibraryInfo *TLI; LazyValueInfo *LVI; #ifdef NDEBUG @@ -147,7 +147,7 @@ FunctionPass *llvm::createJumpThreadingPass() { return new JumpThreading(); } /// bool JumpThreading::runOnFunction(Function &F) { DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); LVI = &getAnalysis(); @@ -1455,7 +1455,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, // At this point, the IR is fully up to date and consistent. Do a quick scan // over the new instructions and zap any that are constants or dead. This // frequently happens because of phi translation. - SimplifyInstructionsInBlock(NewBB, TD); + SimplifyInstructionsInBlock(NewBB, TD, TLI); // Threaded an edge! ++NumThreads; diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 0192e92..4818437 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -46,7 +46,7 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/SSAUpdater.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/CFG.h" #include "llvm/Support/CommandLine.h" @@ -100,7 +100,7 @@ namespace { LoopInfo *LI; // Current LoopInfo DominatorTree *DT; // Dominator Tree for the current Loop. - TargetData *TD; // TargetData for constant folding. + DataLayout *TD; // DataLayout for constant folding. TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding. // State that is updated as we process loops. @@ -108,6 +108,9 @@ namespace { BasicBlock *Preheader; // The preheader block of the current loop... Loop *CurLoop; // The current loop we are working on... AliasSetTracker *CurAST; // AliasSet information for the current loop... + bool MayThrow; // The current loop contains an instruction which + // may throw, thus preventing code motion of + // instructions with side effects. DenseMap LoopToAliasSetMap; /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info. @@ -204,7 +207,7 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) { AA = &getAnalysis(); DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); CurAST = new AliasSetTracker(*AA); @@ -240,6 +243,15 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) { CurAST->add(*BB); // Incorporate the specified basic block } + MayThrow = false; + // TODO: We've already searched for instructions which may throw in subloops. + // We may want to reuse this information. + for (Loop::block_iterator BB = L->block_begin(), BBE = L->block_end(); + (BB != BBE) && !MayThrow ; ++BB) + for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); + (I != E) && !MayThrow; ++I) + MayThrow |= I->mayThrow(); + // We want to visit all of the instructions in this loop... that are not parts // of our subloops (they have already had their invariants hoisted out of // their loop, into this loop, so there is no need to process the BODIES of @@ -307,7 +319,7 @@ void LICM::SinkRegion(DomTreeNode *N) { // If the instruction is dead, we would try to sink it because it isn't used // in the loop, instead, just delete it. - if (isInstructionTriviallyDead(&I)) { + if (isInstructionTriviallyDead(&I, TLI)) { DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); ++II; CurAST->deleteValue(&I); @@ -418,17 +430,22 @@ bool LICM::canSinkOrHoistInst(Instruction &I) { if (!FoundMod) return true; } - // FIXME: This should use mod/ref information to see if we can hoist or sink - // the call. + // FIXME: This should use mod/ref information to see if we can hoist or + // sink the call. return false; } - // Otherwise these instructions are hoistable/sinkable - return isa(I) || isa(I) || - isa(I) || isa(I) || isa(I) || - isa(I) || isa(I) || - isa(I); + // Only these instructions are hoistable/sinkable. + bool HoistableKind = (isa(I) || isa(I) || + isa(I) || isa(I) || + isa(I) || isa(I) || + isa(I) || + isa(I)); + if (!HoistableKind) + return false; + + return isSafeToExecuteUnconditionally(I); } /// isNotUsedInLoop - Return true if the only users of this instruction are @@ -604,6 +621,12 @@ bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) { } bool LICM::isGuaranteedToExecute(Instruction &Inst) { + + // Somewhere in this loop there is an instruction which may throw and make us + // exit the loop. + if (MayThrow) + return false; + // Otherwise we have to check to make sure that the instruction dominates all // of the exit blocks. If it doesn't, then there is a path out of the loop // which does not execute this instruction, so we can't hoist it. diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index ac1082c..a44e798 100644 --- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -54,7 +54,7 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; @@ -65,7 +65,7 @@ STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores"); namespace { class LoopIdiomRecognize : public LoopPass { Loop *CurLoop; - const TargetData *TD; + const DataLayout *TD; DominatorTree *DT; ScalarEvolution *SE; TargetLibraryInfo *TLI; @@ -132,7 +132,8 @@ Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); } /// and zero out all the operands of this instruction. If any of them become /// dead, delete them and the computation tree that feeds them. /// -static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) { +static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE, + const TargetLibraryInfo *TLI) { SmallVector NowDeadInsts; NowDeadInsts.push_back(I); @@ -153,7 +154,7 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) { if (!Op->use_empty()) continue; if (Instruction *OpI = dyn_cast(Op)) - if (isInstructionTriviallyDead(OpI)) + if (isInstructionTriviallyDead(OpI, TLI)) NowDeadInsts.push_back(OpI); } @@ -164,15 +165,21 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) { /// deleteIfDeadInstruction - If the specified value is a dead instruction, /// delete it and any recursively used instructions. -static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE) { +static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE, + const TargetLibraryInfo *TLI) { if (Instruction *I = dyn_cast(V)) - if (isInstructionTriviallyDead(I)) - deleteDeadInstruction(I, SE); + if (isInstructionTriviallyDead(I, TLI)) + deleteDeadInstruction(I, SE, TLI); } bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { CurLoop = L; + // If the loop could not be converted to canonical form, it must have an + // indirectbr in it, just give up. + if (!L->getLoopPreheader()) + return false; + // Disable loop idiom recognition if the function's name is a common idiom. StringRef Name = L->getHeader()->getParent()->getName(); if (Name == "memset" || Name == "memcpy") @@ -192,7 +199,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { return false; // We require target data for now. - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); if (TD == 0) return false; DT = &getAnalysis(); @@ -401,7 +408,7 @@ static bool mayLoopAccessLocation(Value *Ptr,AliasAnalysis::ModRefResult Access, /// /// Note that we don't ever attempt to use memset_pattern8 or 4, because these /// just replicate their input array and then pass on to memset_pattern16. -static Constant *getMemSetPatternValue(Value *V, const TargetData &TD) { +static Constant *getMemSetPatternValue(Value *V, const DataLayout &TD) { // If the value isn't a constant, we can't promote it to being in a constant // array. We could theoretically do a store to an alloca or something, but // that doesn't seem worthwhile. @@ -490,7 +497,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize, StoreSize, getAnalysis(), TheStore)){ Expander.clear(); // If we generated new code for the base pointer, clean up. - deleteIfDeadInstruction(BasePtr, *SE); + deleteIfDeadInstruction(BasePtr, *SE, TLI); return false; } @@ -538,7 +545,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize, // Okay, the memset has been formed. Zap the original store and anything that // feeds into it. - deleteDeadInstruction(TheStore, *SE); + deleteDeadInstruction(TheStore, *SE, TLI); ++NumMemSet; return true; } @@ -579,7 +586,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, getAnalysis(), SI)) { Expander.clear(); // If we generated new code for the base pointer, clean up. - deleteIfDeadInstruction(StoreBasePtr, *SE); + deleteIfDeadInstruction(StoreBasePtr, *SE, TLI); return false; } @@ -594,8 +601,8 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, StoreSize, getAnalysis(), SI)) { Expander.clear(); // If we generated new code for the base pointer, clean up. - deleteIfDeadInstruction(LoadBasePtr, *SE); - deleteIfDeadInstruction(StoreBasePtr, *SE); + deleteIfDeadInstruction(LoadBasePtr, *SE, TLI); + deleteIfDeadInstruction(StoreBasePtr, *SE, TLI); return false; } @@ -628,7 +635,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize, // Okay, the memset has been formed. Zap the original store and anything that // feeds into it. - deleteDeadInstruction(SI, *SE); + deleteDeadInstruction(SI, *SE, TLI); ++NumMemCpy; return true; } diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp index 982400c..558f62e 100644 --- a/lib/Transforms/Scalar/LoopInstSimplify.cpp +++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp @@ -18,7 +18,7 @@ #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/Support/Debug.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" @@ -66,7 +66,7 @@ Pass *llvm::createLoopInstSimplifyPass() { bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { DominatorTree *DT = getAnalysisIfAvailable(); LoopInfo *LI = &getAnalysis(); - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); const TargetLibraryInfo *TLI = &getAnalysis(); SmallVector ExitBlocks; @@ -120,7 +120,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { ++NumSimplified; } } - LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I); + LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI); if (IsSubloopHeader && !isa(I)) break; diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp index 7eeb152..abe07aa 100644 --- a/lib/Transforms/Scalar/LoopRotation.cpp +++ b/lib/Transforms/Scalar/LoopRotation.cpp @@ -24,6 +24,7 @@ #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" #include "llvm/Transforms/Utils/ValueMapper.h" +#include "llvm/Support/CFG.h" #include "llvm/Support/Debug.h" #include "llvm/ADT/Statistic.h" using namespace llvm; @@ -256,6 +257,7 @@ bool LoopRotate::rotateLoop(Loop *L) { return false; BasicBlock *OrigHeader = L->getHeader(); + BasicBlock *OrigLatch = L->getLoopLatch(); BranchInst *BI = dyn_cast(OrigHeader->getTerminator()); if (BI == 0 || BI->isUnconditional()) @@ -267,13 +269,9 @@ bool LoopRotate::rotateLoop(Loop *L) { if (!L->isLoopExiting(OrigHeader)) return false; - // Updating PHInodes in loops with multiple exits adds complexity. - // Keep it simple, and restrict loop rotation to loops with one exit only. - // In future, lift this restriction and support for multiple exits if - // required. - SmallVector ExitBlocks; - L->getExitBlocks(ExitBlocks); - if (ExitBlocks.size() > 1) + // If the loop latch already contains a branch that leaves the loop then the + // loop is already rotated. + if (OrigLatch == 0 || L->isLoopExiting(OrigLatch)) return false; // Check size of original header and reject loop if it is very big. @@ -286,11 +284,10 @@ bool LoopRotate::rotateLoop(Loop *L) { // Now, this loop is suitable for rotation. BasicBlock *OrigPreheader = L->getLoopPreheader(); - BasicBlock *OrigLatch = L->getLoopLatch(); // If the loop could not be converted to canonical form, it must have an // indirectbr in it, just give up. - if (OrigPreheader == 0 || OrigLatch == 0) + if (OrigPreheader == 0) return false; // Anything ScalarEvolution may know about this loop or the PHI nodes @@ -298,6 +295,8 @@ bool LoopRotate::rotateLoop(Loop *L) { if (ScalarEvolution *SE = getAnalysisIfAvailable()) SE->forgetLoop(L); + DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); + // Find new Loop header. NewHeader is a Header's one and only successor // that is inside loop. Header's other successor is outside the // loop. Otherwise loop is not suitable for rotation. @@ -408,10 +407,19 @@ bool LoopRotate::rotateLoop(Loop *L) { // Update DominatorTree to reflect the CFG change we just made. Then split // edges as necessary to preserve LoopSimplify form. if (DominatorTree *DT = getAnalysisIfAvailable()) { - // Since OrigPreheader now has the conditional branch to Exit block, it is - // the dominator of Exit. - DT->changeImmediateDominator(Exit, OrigPreheader); - DT->changeImmediateDominator(NewHeader, OrigPreheader); + // Everything that was dominated by the old loop header is now dominated + // by the original loop preheader. Conceptually the header was merged + // into the preheader, even though we reuse the actual block as a new + // loop latch. + DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader); + SmallVector HeaderChildren(OrigHeaderNode->begin(), + OrigHeaderNode->end()); + DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader); + for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) + DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode); + + assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode); + assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode); // Update OrigHeader to be dominated by the new header block. DT->changeImmediateDominator(OrigHeader, OrigLatch); @@ -440,6 +448,35 @@ bool LoopRotate::rotateLoop(Loop *L) { // Update OrigHeader to be dominated by the new header block. DT->changeImmediateDominator(NewHeader, OrigPreheader); DT->changeImmediateDominator(OrigHeader, OrigLatch); + + // Brute force incremental dominator tree update. Call + // findNearestCommonDominator on all CFG predecessors of each child of the + // original header. + DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader); + SmallVector HeaderChildren(OrigHeaderNode->begin(), + OrigHeaderNode->end()); + bool Changed; + do { + Changed = false; + for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) { + DomTreeNode *Node = HeaderChildren[I]; + BasicBlock *BB = Node->getBlock(); + + pred_iterator PI = pred_begin(BB); + BasicBlock *NearestDom = *PI; + for (pred_iterator PE = pred_end(BB); PI != PE; ++PI) + NearestDom = DT->findNearestCommonDominator(NearestDom, *PI); + + // Remember if this changes the DomTree. + if (Node->getIDom()->getBlock() != NearestDom) { + DT->changeImmediateDominator(BB, NearestDom); + Changed = true; + } + } + + // If the dominator changed, this may have an effect on other + // predecessors, continue until we reach a fixpoint. + } while (Changed); } } @@ -452,6 +489,8 @@ bool LoopRotate::rotateLoop(Loop *L) { // emitted code isn't too gross in this common case. MergeBlockIntoPredecessor(OrigHeader, this); + DEBUG(dbgs() << "LoopRotation: into "; L->dump()); + ++NumRotated; return true; } diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index b14a713..958348d 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -54,7 +54,7 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "loop-reduce" -#include "llvm/Transforms/Scalar.h" +#include "llvm/AddressingMode.h" #include "llvm/Constants.h" #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" @@ -64,6 +64,7 @@ #include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/ScalarEvolutionExpander.h" #include "llvm/Assembly/Writer.h" +#include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/ADT/SmallBitVector.h" @@ -121,9 +122,11 @@ void RegSortData::print(raw_ostream &OS) const { OS << "[NumUses=" << UsedByIndices.count() << ']'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void RegSortData::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { @@ -223,7 +226,7 @@ namespace { struct Formula { /// AM - This is used to represent complex addressing, as well as other kinds /// of interesting uses. - TargetLowering::AddrMode AM; + AddrMode AM; /// BaseRegs - The list of "base" registers for this use. When this is /// non-empty, AM.HasBaseReg should be set to true. @@ -414,9 +417,11 @@ void Formula::print(raw_ostream &OS) const { } } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Formula::dump() const { print(errs()); errs() << '\n'; } +#endif /// isAddRecSExtable - Return true if the given addrec can be sign-extended /// without changing its value. @@ -738,7 +743,8 @@ DeleteTriviallyDeadInstructions(SmallVectorImpl &DeadInsts) { bool Changed = false; while (!DeadInsts.empty()) { - Instruction *I = dyn_cast_or_null(&*DeadInsts.pop_back_val()); + Value *V = DeadInsts.pop_back_val(); + Instruction *I = dyn_cast_or_null(V); if (I == 0 || !isInstructionTriviallyDead(I)) continue; @@ -973,9 +979,11 @@ void Cost::print(raw_ostream &OS) const { OS << ", plus " << SetupCost << " setup cost"; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void Cost::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { @@ -1059,9 +1067,11 @@ void LSRFixup::print(raw_ostream &OS) const { OS << ", Offset=" << Offset; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRFixup::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { @@ -1251,14 +1261,16 @@ void LSRUse::print(raw_ostream &OS) const { OS << ", widest fixup type: " << *WidestFixupType; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRUse::dump() const { print(errs()); errs() << '\n'; } +#endif /// isLegalUse - Test whether the use described by AM is "legal", meaning it can /// be completely folded into the user instruction at isel time. This includes /// address-mode folding and special icmp tricks. -static bool isLegalUse(const TargetLowering::AddrMode &AM, +static bool isLegalUse(const AddrMode &AM, LSRUse::KindType Kind, Type *AccessTy, const TargetLowering *TLI) { switch (Kind) { @@ -1315,7 +1327,7 @@ static bool isLegalUse(const TargetLowering::AddrMode &AM, llvm_unreachable("Invalid LSRUse Kind!"); } -static bool isLegalUse(TargetLowering::AddrMode AM, +static bool isLegalUse(AddrMode AM, int64_t MinOffset, int64_t MaxOffset, LSRUse::KindType Kind, Type *AccessTy, const TargetLowering *TLI) { @@ -1346,7 +1358,7 @@ static bool isAlwaysFoldable(int64_t BaseOffs, // Conservatively, create an address with an immediate and a // base and a scale. - TargetLowering::AddrMode AM; + AddrMode AM; AM.BaseOffs = BaseOffs; AM.BaseGV = BaseGV; AM.HasBaseReg = HasBaseReg; @@ -1384,7 +1396,7 @@ static bool isAlwaysFoldable(const SCEV *S, // Conservatively, create an address with an immediate and a // base and a scale. - TargetLowering::AddrMode AM; + AddrMode AM; AM.BaseOffs = BaseOffs; AM.BaseGV = BaseGV; AM.HasBaseReg = HasBaseReg; @@ -2009,7 +2021,7 @@ LSRInstance::OptimizeLoopTermCond() { goto decline_post_inc; // Check for possible scaled-address reuse. Type *AccessTy = getAccessType(UI->getUser()); - TargetLowering::AddrMode AM; + AddrMode AM; AM.Scale = C->getSExtValue(); if (TLI->isLegalAddressingMode(AM, AccessTy)) goto decline_post_inc; @@ -3435,9 +3447,11 @@ void WorkItem::print(raw_ostream &OS) const { << " , add offset " << Imm; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void WorkItem::dump() const { print(errs()); errs() << '\n'; } +#endif /// GenerateCrossUseConstantOffsets - Look for registers which are a constant /// distance apart and try to form reuse opportunities between them. @@ -4451,17 +4465,21 @@ void LSRInstance::RewriteForPHI(PHINode *PN, SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); NewBB = NewBBs[0]; } - - // If PN is outside of the loop and BB is in the loop, we want to - // move the block to be immediately before the PHI block, not - // immediately after BB. - if (L->contains(BB) && !L->contains(PN)) - NewBB->moveBefore(PN->getParent()); - - // Splitting the edge can reduce the number of PHI entries we have. - e = PN->getNumIncomingValues(); - BB = NewBB; - i = PN->getBasicBlockIndex(BB); + // If NewBB==NULL, then SplitCriticalEdge refused to split because all + // phi predecessors are identical. The simple thing to do is skip + // splitting in this case rather than complicate the API. + if (NewBB) { + // If PN is outside of the loop and BB is in the loop, we want to + // move the block to be immediately before the PHI block, not + // immediately after BB. + if (L->contains(BB) && !L->contains(PN)) + NewBB->moveBefore(PN->getParent()); + + // Splitting the edge can reduce the number of PHI entries we have. + e = PN->getNumIncomingValues(); + BB = NewBB; + i = PN->getBasicBlockIndex(BB); + } } } @@ -4730,9 +4748,11 @@ void LSRInstance::print(raw_ostream &OS) const { print_uses(OS); } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void LSRInstance::dump() const { print(errs()); errs() << '\n'; } +#endif namespace { diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp index 09a186f..0d781ac 100644 --- a/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -22,7 +22,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/UnrollLoop.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include using namespace llvm; @@ -113,7 +113,7 @@ Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial) { /// ApproximateLoopSize - Approximate the size of the loop. static unsigned ApproximateLoopSize(const Loop *L, unsigned &NumCalls, - const TargetData *TD) { + const DataLayout *TD) { CodeMetrics Metrics; for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E; ++I) @@ -145,7 +145,8 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { // not user specified. unsigned Threshold = CurrentThreshold; if (!UserThreshold && - Header->getParent()->hasFnAttr(Attribute::OptimizeForSize)) + Header->getParent()->getFnAttributes(). + hasAttribute(Attributes::OptimizeForSize)) Threshold = OptSizeUnrollThreshold; // Find trip count and trip multiple if count is not available @@ -178,7 +179,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { // Enforce the threshold. if (Threshold != NoThreshold) { - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); unsigned NumInlineCandidates; unsigned LoopSize = ApproximateLoopSize(L, NumInlineCandidates, TD); DEBUG(dbgs() << " Loop Size = " << LoopSize << "\n"); diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index 58f7739..047b43e 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -638,7 +638,8 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) { // Check to see if it would be profitable to unswitch current loop. // Do not do non-trivial unswitch while optimizing for size. - if (OptimizeForSize || F->hasFnAttr(Attribute::OptimizeForSize)) + if (OptimizeForSize || + F->getFnAttributes().hasAttribute(Attributes::OptimizeForSize)) return false; UnswitchNontrivialCondition(LoopCond, Val, currentLoop); @@ -906,13 +907,9 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, /// specified. static void RemoveFromWorklist(Instruction *I, std::vector &Worklist) { - std::vector::iterator WI = std::find(Worklist.begin(), - Worklist.end(), I); - while (WI != Worklist.end()) { - unsigned Offset = WI-Worklist.begin(); - Worklist.erase(WI); - WI = std::find(Worklist.begin()+Offset, Worklist.end(), I); - } + + Worklist.erase(std::remove(Worklist.begin(), Worklist.end(), I), + Worklist.end()); } /// ReplaceUsesOfWith - When we find that I really equals V, remove I from the diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 2a5ee33..517657cf 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -27,7 +27,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Utils/Local.h" #include @@ -38,8 +38,8 @@ STATISTIC(NumMemSetInfer, "Number of memsets inferred"); STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy"); STATISTIC(NumCpyToSet, "Number of memcpys converted to memset"); -static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, - bool &VariableIdxFound, const TargetData &TD){ +static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, + bool &VariableIdxFound, const DataLayout &TD){ // Skip over the first indices. gep_type_iterator GTI = gep_type_begin(GEP); for (unsigned i = 1; i != Idx; ++i, ++GTI) @@ -72,11 +72,11 @@ static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx, /// constant offset, and return that constant offset. For example, Ptr1 might /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8. static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset, - const TargetData &TD) { + const DataLayout &TD) { Ptr1 = Ptr1->stripPointerCasts(); Ptr2 = Ptr2->stripPointerCasts(); - GetElementPtrInst *GEP1 = dyn_cast(Ptr1); - GetElementPtrInst *GEP2 = dyn_cast(Ptr2); + GEPOperator *GEP1 = dyn_cast(Ptr1); + GEPOperator *GEP2 = dyn_cast(Ptr2); bool VariableIdxFound = false; @@ -141,12 +141,12 @@ struct MemsetRange { /// TheStores - The actual stores that make up this range. SmallVector TheStores; - bool isProfitableToUseMemset(const TargetData &TD) const; + bool isProfitableToUseMemset(const DataLayout &TD) const; }; } // end anon namespace -bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const { +bool MemsetRange::isProfitableToUseMemset(const DataLayout &TD) const { // If we found more than 4 stores to merge or 16 bytes, use memset. if (TheStores.size() >= 4 || End-Start >= 16) return true; @@ -192,9 +192,9 @@ class MemsetRanges { /// because each element is relatively large and expensive to copy. std::list Ranges; typedef std::list::iterator range_iterator; - const TargetData &TD; + const DataLayout &TD; public: - MemsetRanges(const TargetData &td) : TD(td) {} + MemsetRanges(const DataLayout &td) : TD(td) {} typedef std::list::const_iterator const_iterator; const_iterator begin() const { return Ranges.begin(); } @@ -302,7 +302,7 @@ namespace { class MemCpyOpt : public FunctionPass { MemoryDependenceAnalysis *MD; TargetLibraryInfo *TLI; - const TargetData *TD; + const DataLayout *TD; public: static char ID; // Pass identification, replacement for typeid MemCpyOpt() : FunctionPass(ID) { @@ -332,7 +332,7 @@ namespace { bool processMemCpy(MemCpyInst *M); bool processMemMove(MemMoveInst *M); bool performCallSlotOptzn(Instruction *cpy, Value *cpyDst, Value *cpySrc, - uint64_t cpyLen, CallInst *C); + uint64_t cpyLen, unsigned cpyAlign, CallInst *C); bool processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, uint64_t MSize); bool processByValArgument(CallSite CS, unsigned ArgNo); @@ -509,10 +509,18 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { } if (C) { + unsigned storeAlign = SI->getAlignment(); + if (!storeAlign) + storeAlign = TD->getABITypeAlignment(SI->getOperand(0)->getType()); + unsigned loadAlign = LI->getAlignment(); + if (!loadAlign) + loadAlign = TD->getABITypeAlignment(LI->getType()); + bool changed = performCallSlotOptzn(LI, SI->getPointerOperand()->stripPointerCasts(), LI->getPointerOperand()->stripPointerCasts(), - TD->getTypeStoreSize(SI->getOperand(0)->getType()), C); + TD->getTypeStoreSize(SI->getOperand(0)->getType()), + std::min(storeAlign, loadAlign), C); if (changed) { MD->removeInstruction(SI); SI->eraseFromParent(); @@ -559,7 +567,8 @@ bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { /// the call write its result directly into the destination of the memcpy. bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, Value *cpyDest, Value *cpySrc, - uint64_t cpyLen, CallInst *C) { + uint64_t cpyLen, unsigned cpyAlign, + CallInst *C) { // The general transformation to keep in mind is // // call @func(..., src, ...) @@ -625,6 +634,16 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, return false; } + // Check that dest points to memory that is at least as aligned as src. + unsigned srcAlign = srcAlloca->getAlignment(); + if (!srcAlign) + srcAlign = TD->getABITypeAlignment(srcAlloca->getAllocatedType()); + bool isDestSufficientlyAligned = srcAlign <= cpyAlign; + // If dest is not aligned enough and we can't increase its alignment then + // bail out. + if (!isDestSufficientlyAligned && !isa(cpyDest)) + return false; + // Check that src is not accessed except via the call and the memcpy. This // guarantees that it holds only undefined values when passed in (so the final // memcpy can be dropped), that it is not read or written between the call and @@ -673,20 +692,26 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy, bool changedArgument = false; for (unsigned i = 0; i < CS.arg_size(); ++i) if (CS.getArgument(i)->stripPointerCasts() == cpySrc) { - if (cpySrc->getType() != cpyDest->getType()) - cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), - cpyDest->getName(), C); + Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest + : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(), + cpyDest->getName(), C); changedArgument = true; - if (CS.getArgument(i)->getType() == cpyDest->getType()) - CS.setArgument(i, cpyDest); + if (CS.getArgument(i)->getType() == Dest->getType()) + CS.setArgument(i, Dest); else - CS.setArgument(i, CastInst::CreatePointerCast(cpyDest, - CS.getArgument(i)->getType(), cpyDest->getName(), C)); + CS.setArgument(i, CastInst::CreatePointerCast(Dest, + CS.getArgument(i)->getType(), Dest->getName(), C)); } if (!changedArgument) return false; + // If the destination wasn't sufficiently aligned then increase its alignment. + if (!isDestSufficientlyAligned) { + assert(isa(cpyDest) && "Can only increase alloca alignment!"); + cast(cpyDest)->setAlignment(srcAlign); + } + // Drop any cached information about the call, because we may have changed // its dependence information by changing its parameter. MD->removeInstruction(C); @@ -813,7 +838,8 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) { if (DepInfo.isClobber()) { if (CallInst *C = dyn_cast(DepInfo.getInst())) { if (performCallSlotOptzn(M, M->getDest(), M->getSource(), - CopySize->getZExtValue(), C)) { + CopySize->getZExtValue(), M->getAlignment(), + C)) { MD->removeInstruction(M); M->eraseFromParent(); return true; @@ -974,7 +1000,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) { bool MemCpyOpt::runOnFunction(Function &F) { bool MadeChange = false; MD = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); TLI = &getAnalysis(); // If we don't have at least memset and memcpy, there is little point of doing diff --git a/lib/Transforms/Scalar/ObjCARC.cpp b/lib/Transforms/Scalar/ObjCARC.cpp index 3222f20..dfdf505 100644 --- a/lib/Transforms/Scalar/ObjCARC.cpp +++ b/lib/Transforms/Scalar/ObjCARC.cpp @@ -29,6 +29,7 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "objc-arc" +#include "llvm/Support/raw_ostream.h" #include "llvm/Support/CommandLine.h" #include "llvm/ADT/DenseMap.h" using namespace llvm; @@ -1120,9 +1121,8 @@ namespace { bool relatedSelect(const SelectInst *A, const Value *B); bool relatedPHI(const PHINode *A, const Value *B); - // Do not implement. - void operator=(const ProvenanceAnalysis &); - ProvenanceAnalysis(const ProvenanceAnalysis &); + void operator=(const ProvenanceAnalysis &) LLVM_DELETED_FUNCTION; + ProvenanceAnalysis(const ProvenanceAnalysis &) LLVM_DELETED_FUNCTION; public: ProvenanceAnalysis() {} @@ -1236,16 +1236,19 @@ bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B) { // An ObjC-Identified object can't alias a load if it is never locally stored. if (AIsIdentified) { + // Check for an obvious escape. + if (isa(B)) + return isStoredObjCPointer(A); if (BIsIdentified) { - // If both pointers have provenance, they can be directly compared. - if (A != B) - return false; - } else { - if (isa(B)) - return isStoredObjCPointer(A); + // Check for an obvious escape. + if (isa(A)) + return isStoredObjCPointer(B); + // Both pointers are identified and escapes aren't an evident problem. + return false; } - } else { - if (BIsIdentified && isa(A)) + } else if (BIsIdentified) { + // Check for an obvious escape. + if (isa(A)) return isStoredObjCPointer(B); } @@ -1381,9 +1384,6 @@ namespace { /// PtrState - This class summarizes several per-pointer runtime properties /// which are propogated through the flow graph. class PtrState { - /// NestCount - The known minimum level of retain+release nesting. - unsigned NestCount; - /// KnownPositiveRefCount - True if the reference count is known to /// be incremented. bool KnownPositiveRefCount; @@ -1401,7 +1401,7 @@ namespace { /// TODO: Encapsulate this better. RRInfo RRI; - PtrState() : NestCount(0), KnownPositiveRefCount(false), Partial(false), + PtrState() : KnownPositiveRefCount(false), Partial(false), Seq(S_None) {} void SetKnownPositiveRefCount() { @@ -1416,18 +1416,6 @@ namespace { return KnownPositiveRefCount; } - void IncrementNestCount() { - if (NestCount != UINT_MAX) ++NestCount; - } - - void DecrementNestCount() { - if (NestCount != 0) --NestCount; - } - - bool IsKnownNested() const { - return NestCount > 0; - } - void SetSeq(Sequence NewSeq) { Seq = NewSeq; } @@ -1454,7 +1442,6 @@ void PtrState::Merge(const PtrState &Other, bool TopDown) { Seq = MergeSeqs(Seq, Other.Seq, TopDown); KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount; - NestCount = std::min(NestCount, Other.NestCount); // We can't merge a plain objc_retain with an objc_retainBlock. if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock) @@ -1610,6 +1597,12 @@ void BBState::MergePred(const BBState &Other) { // loop backedge. Loop backedges are special. TopDownPathCount += Other.TopDownPathCount; + // Check for overflow. If we have overflow, fall back to conservative behavior. + if (TopDownPathCount < Other.TopDownPathCount) { + clearTopDownPointers(); + return; + } + // For each entry in the other set, if our set has an entry with the same key, // merge the entries. Otherwise, copy the entry and merge it with an empty // entry. @@ -1635,6 +1628,12 @@ void BBState::MergeSucc(const BBState &Other) { // loop backedge. Loop backedges are special. BottomUpPathCount += Other.BottomUpPathCount; + // Check for overflow. If we have overflow, fall back to conservative behavior. + if (BottomUpPathCount < Other.BottomUpPathCount) { + clearBottomUpPointers(); + return; + } + // For each entry in the other set, if our set has an entry with the // same key, merge the entries. Otherwise, copy the entry and merge // it with an empty entry. @@ -1789,7 +1788,9 @@ Constant *ObjCARCOpt::getRetainRVCallee(Module *M) { Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); Type *Params[] = { I8X }; FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false); - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); RetainRVCallee = M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy, Attributes); @@ -1803,7 +1804,9 @@ Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) { Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); Type *Params[] = { I8X }; FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false); - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); AutoreleaseRVCallee = M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy, Attributes); @@ -1815,7 +1818,9 @@ Constant *ObjCARCOpt::getReleaseCallee(Module *M) { if (!ReleaseCallee) { LLVMContext &C = M->getContext(); Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) }; - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); ReleaseCallee = M->getOrInsertFunction( "objc_release", @@ -1829,7 +1834,9 @@ Constant *ObjCARCOpt::getRetainCallee(Module *M) { if (!RetainCallee) { LLVMContext &C = M->getContext(); Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) }; - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); RetainCallee = M->getOrInsertFunction( "objc_retain", @@ -1858,7 +1865,9 @@ Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) { if (!AutoreleaseCallee) { LLVMContext &C = M->getContext(); Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) }; - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); AutoreleaseCallee = M->getOrInsertFunction( "objc_autorelease", @@ -1868,6 +1877,26 @@ Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) { return AutoreleaseCallee; } +/// IsPotentialUse - Test whether the given value is possible a +/// reference-counted pointer, including tests which utilize AliasAnalysis. +static bool IsPotentialUse(const Value *Op, AliasAnalysis &AA) { + // First make the rudimentary check. + if (!IsPotentialUse(Op)) + return false; + + // Objects in constant memory are not reference-counted. + if (AA.pointsToConstantMemory(Op)) + return false; + + // Pointers in constant memory are not pointing to reference-counted objects. + if (const LoadInst *LI = dyn_cast(Op)) + if (AA.pointsToConstantMemory(LI->getPointerOperand())) + return false; + + // Otherwise assume the worst. + return true; +} + /// CanAlterRefCount - Test whether the given instruction can result in a /// reference count modification (positive or negative) for the pointer's /// object. @@ -1894,7 +1923,7 @@ CanAlterRefCount(const Instruction *Inst, const Value *Ptr, for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I) { const Value *Op = *I; - if (IsPotentialUse(Op) && PA.related(Ptr, Op)) + if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op)) return true; } return false; @@ -1919,14 +1948,14 @@ CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, // Comparing a pointer with null, or any other constant, isn't really a use, // because we don't care what the pointer points to, or about the values // of any other dynamic reference-counted pointers. - if (!IsPotentialUse(ICI->getOperand(1))) + if (!IsPotentialUse(ICI->getOperand(1), *PA.getAA())) return false; } else if (ImmutableCallSite CS = static_cast(Inst)) { // For calls, just check the arguments (and not the callee operand). for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(), OE = CS.arg_end(); OI != OE; ++OI) { const Value *Op = *OI; - if (IsPotentialUse(Op) && PA.related(Ptr, Op)) + if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op)) return true; } return false; @@ -1936,14 +1965,14 @@ CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA, const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand()); // If we can't tell what the underlying object was, assume there is a // dependence. - return IsPotentialUse(Op) && PA.related(Op, Ptr); + return IsPotentialUse(Op, *PA.getAA()) && PA.related(Op, Ptr); } // Check each operand for a match. for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end(); OI != OE; ++OI) { const Value *Op = *OI; - if (IsPotentialUse(Op) && PA.related(Ptr, Op)) + if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op)) return true; } return false; @@ -2612,11 +2641,11 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind); S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release); S.RRI.ReleaseMetadata = ReleaseMetadata; - S.RRI.KnownSafe = S.IsKnownNested() || S.IsKnownIncremented(); + S.RRI.KnownSafe = S.IsKnownIncremented(); S.RRI.IsTailCallRelease = cast(Inst)->isTailCall(); S.RRI.Calls.insert(Inst); - S.IncrementNestCount(); + S.SetKnownPositiveRefCount(); break; } case IC_RetainBlock: @@ -2631,7 +2660,6 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, PtrState &S = MyStates.getPtrBottomUpState(Arg); S.SetKnownPositiveRefCount(); - S.DecrementNestCount(); switch (S.GetSeq()) { case S_Stop: @@ -2747,8 +2775,9 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB, // Merge the states from each successor to compute the initial state // for the current block. - for (BBState::edge_iterator SI(MyStates.succ_begin()), - SE(MyStates.succ_end()); SI != SE; ++SI) { + BBState::edge_iterator SI(MyStates.succ_begin()), + SE(MyStates.succ_end()); + if (SI != SE) { const BasicBlock *Succ = *SI; DenseMap::iterator I = BBStates.find(Succ); assert(I != BBStates.end()); @@ -2760,7 +2789,6 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB, assert(I != BBStates.end()); MyStates.MergeSucc(I->second); } - break; } // Visit all the instructions, bottom-up. @@ -2823,12 +2851,11 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, S.ResetSequenceProgress(S_Retain); S.RRI.IsRetainBlock = Class == IC_RetainBlock; - // Don't check S.IsKnownIncremented() here because it's not sufficient. - S.RRI.KnownSafe = S.IsKnownNested(); + S.RRI.KnownSafe = S.IsKnownIncremented(); S.RRI.Calls.insert(Inst); } - S.IncrementNestCount(); + S.SetKnownPositiveRefCount(); // A retain can be a potential use; procede to the generic checking // code below. @@ -2838,7 +2865,7 @@ ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst, Arg = GetObjCArg(Inst); PtrState &S = MyStates.getPtrTopDownState(Arg); - S.DecrementNestCount(); + S.ClearRefCount(); switch (S.GetSeq()) { case S_Retain: @@ -2935,8 +2962,9 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB, // Merge the states from each predecessor to compute the initial state // for the current block. - for (BBState::edge_iterator PI(MyStates.pred_begin()), - PE(MyStates.pred_end()); PI != PE; ++PI) { + BBState::edge_iterator PI(MyStates.pred_begin()), + PE(MyStates.pred_end()); + if (PI != PE) { const BasicBlock *Pred = *PI; DenseMap::iterator I = BBStates.find(Pred); assert(I != BBStates.end()); @@ -2948,7 +2976,6 @@ ObjCARCOpt::VisitTopDown(BasicBlock *BB, assert(I != BBStates.end()); MyStates.MergePred(I->second); } - break; } // Visit all the instructions, top-down. @@ -3532,19 +3559,19 @@ bool ObjCARCOpt::OptimizeSequences(Function &F) { } /// OptimizeReturns - Look for this pattern: -/// +/// \code /// %call = call i8* @something(...) /// %2 = call i8* @objc_retain(i8* %call) /// %3 = call i8* @objc_autorelease(i8* %2) /// ret i8* %3 -/// +/// \endcode /// And delete the retain and autorelease. /// /// Otherwise if it's just this: -/// +/// \code /// %3 = call i8* @objc_autorelease(i8* %2) /// ret i8* %3 -/// +/// \endcode /// convert the autorelease to autoreleaseRV. void ObjCARCOpt::OptimizeReturns(Function &F) { if (!F.getReturnType()->isPointerTy()) @@ -3814,8 +3841,9 @@ Constant *ObjCARCContract::getStoreStrongCallee(Module *M) { Type *Params[] = { I8XX, I8X }; AttrListPtr Attributes = AttrListPtr() - .addAttr(~0u, Attribute::NoUnwind) - .addAttr(1, Attribute::NoCapture); + .addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)) + .addAttr(M->getContext(), 1, Attributes::get(C, Attributes::NoCapture)); StoreStrongCallee = M->getOrInsertFunction( @@ -3832,7 +3860,9 @@ Constant *ObjCARCContract::getRetainAutoreleaseCallee(Module *M) { Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); Type *Params[] = { I8X }; FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false); - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); RetainAutoreleaseCallee = M->getOrInsertFunction("objc_retainAutorelease", FTy, Attributes); } @@ -3845,7 +3875,9 @@ Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) { Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C)); Type *Params[] = { I8X }; FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false); - AttrListPtr Attributes = AttrListPtr().addAttr(~0u, Attribute::NoUnwind); + AttrListPtr Attributes = + AttrListPtr().addAttr(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(C, Attributes::NoUnwind)); RetainAutoreleaseRVCallee = M->getOrInsertFunction("objc_retainAutoreleaseReturnValue", FTy, Attributes); diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index 09687d8..7a40797 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -339,36 +339,6 @@ static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) { } } -/// EvaluateRepeatedConstant - Compute C op C op ... op C where the constant C -/// is repeated Weight times. -static Constant *EvaluateRepeatedConstant(unsigned Opcode, Constant *C, - APInt Weight) { - // For addition the result can be efficiently computed as the product of the - // constant and the weight. - if (Opcode == Instruction::Add) - return ConstantExpr::getMul(C, ConstantInt::get(C->getContext(), Weight)); - - // The weight might be huge, so compute by repeated squaring to ensure that - // compile time is proportional to the logarithm of the weight. - Constant *Result = 0; - Constant *Power = C; // Successively C, C op C, (C op C) op (C op C) etc. - // Visit the bits in Weight. - while (Weight != 0) { - // If the current bit in Weight is non-zero do Result = Result op Power. - if (Weight[0]) - Result = Result ? ConstantExpr::get(Opcode, Result, Power) : Power; - // Move on to the next bit if any more are non-zero. - Weight = Weight.lshr(1); - if (Weight.isMinValue()) - break; - // Square the power. - Power = ConstantExpr::get(Opcode, Power, Power); - } - - assert(Result && "Only positive weights supported!"); - return Result; -} - typedef std::pair RepeatedValue; /// LinearizeExprTree - Given an associative binary expression, return the leaf @@ -382,9 +352,7 @@ typedef std::pair RepeatedValue; /// op /// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times /// -/// Note that the values Ops[0].first, ..., Ops[N].first are all distinct, and -/// they are all non-constant except possibly for the last one, which if it is -/// constant will have weight one (Ops[N].second === 1). +/// Note that the values Ops[0].first, ..., Ops[N].first are all distinct. /// /// This routine may modify the function, in which case it returns 'true'. The /// changes it makes may well be destructive, changing the value computed by 'I' @@ -604,7 +572,6 @@ static bool LinearizeExprTree(BinaryOperator *I, // The leaves, repeated according to their weights, represent the linearized // form of the expression. - Constant *Cst = 0; // Accumulate constants here. for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) { Value *V = LeafOrder[i]; LeafMap::iterator It = Leaves.find(V); @@ -618,31 +585,14 @@ static bool LinearizeExprTree(BinaryOperator *I, continue; // Ensure the leaf is only output once. It->second = 0; - // Glob all constants together into Cst. - if (Constant *C = dyn_cast(V)) { - C = EvaluateRepeatedConstant(Opcode, C, Weight); - Cst = Cst ? ConstantExpr::get(Opcode, Cst, C) : C; - continue; - } - // Add non-constant Ops.push_back(std::make_pair(V, Weight)); } - // Add any constants back into Ops, all globbed together and reduced to having - // weight 1 for the convenience of users. - Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType()); - if (Cst && Cst != Identity) { - // If combining multiple constants resulted in the absorber then the entire - // expression must evaluate to the absorber. - if (Cst == Absorber) - Ops.clear(); - Ops.push_back(std::make_pair(Cst, APInt(Bitwidth, 1))); - } - // For nilpotent operations or addition there may be no operands, for example // because the expression was "X xor X" or consisted of 2^Bitwidth additions: // in both cases the weight reduces to 0 causing the value to be skipped. if (Ops.empty()) { + Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType()); assert(Identity && "Associative operation without identity!"); Ops.push_back(std::make_pair(Identity, APInt(Bitwidth, 1))); } @@ -656,8 +606,8 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, SmallVectorImpl &Ops) { assert(Ops.size() > 1 && "Single values should be used directly!"); - // Since our optimizations never increase the number of operations, the new - // expression can always be written by reusing the existing binary operators + // Since our optimizations should never increase the number of operations, the + // new expression can usually be written reusing the existing binary operators // from the original expression tree, without creating any new instructions, // though the rewritten expression may have a completely different topology. // We take care to not change anything if the new expression will be the same @@ -671,6 +621,20 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, unsigned Opcode = I->getOpcode(); BinaryOperator *Op = I; + /// NotRewritable - The operands being written will be the leaves of the new + /// expression and must not be used as inner nodes (via NodesToRewrite) by + /// mistake. Inner nodes are always reassociable, and usually leaves are not + /// (if they were they would have been incorporated into the expression and so + /// would not be leaves), so most of the time there is no danger of this. But + /// in rare cases a leaf may become reassociable if an optimization kills uses + /// of it, or it may momentarily become reassociable during rewriting (below) + /// due it being removed as an operand of one of its uses. Ensure that misuse + /// of leaf nodes as inner nodes cannot occur by remembering all of the future + /// leaves and refusing to reuse any of them as inner nodes. + SmallPtrSet NotRewritable; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) + NotRewritable.insert(Ops[i].Op); + // ExpressionChanged - Non-null if the rewritten expression differs from the // original in some non-trivial way, requiring the clearing of optional flags. // Flags are cleared from the operator in ExpressionChanged up to I inclusive. @@ -703,12 +667,14 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, // the old operands with the new ones. DEBUG(dbgs() << "RA: " << *Op << '\n'); if (NewLHS != OldLHS) { - if (BinaryOperator *BO = isReassociableOp(OldLHS, Opcode)) + BinaryOperator *BO = isReassociableOp(OldLHS, Opcode); + if (BO && !NotRewritable.count(BO)) NodesToRewrite.push_back(BO); Op->setOperand(0, NewLHS); } if (NewRHS != OldRHS) { - if (BinaryOperator *BO = isReassociableOp(OldRHS, Opcode)) + BinaryOperator *BO = isReassociableOp(OldRHS, Opcode); + if (BO && !NotRewritable.count(BO)) NodesToRewrite.push_back(BO); Op->setOperand(1, NewRHS); } @@ -732,7 +698,8 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, Op->swapOperands(); } else { // Overwrite with the new right-hand side. - if (BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode)) + BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode); + if (BO && !NotRewritable.count(BO)) NodesToRewrite.push_back(BO); Op->setOperand(1, NewRHS); ExpressionChanged = Op; @@ -745,7 +712,8 @@ void Reassociate::RewriteExprTree(BinaryOperator *I, // Now deal with the left-hand side. If this is already an operation node // from the original expression then just rewrite the rest of the expression // into it. - if (BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode)) { + BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode); + if (BO && !NotRewritable.count(BO)) { Op = BO; continue; } @@ -1446,9 +1414,26 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I, SmallVectorImpl &Ops) { // Now that we have the linearized expression tree, try to optimize it. // Start by folding any constants that we found. - if (Ops.size() == 1) return Ops[0].Op; - + Constant *Cst = 0; unsigned Opcode = I->getOpcode(); + while (!Ops.empty() && isa(Ops.back().Op)) { + Constant *C = cast(Ops.pop_back_val().Op); + Cst = Cst ? ConstantExpr::get(Opcode, C, Cst) : C; + } + // If there was nothing but constants then we are done. + if (Ops.empty()) + return Cst; + + // Put the combined constant back at the end of the operand list, except if + // there is no point. For example, an add of 0 gets dropped here, while a + // multiplication by zero turns the whole expression into zero. + if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) { + if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType())) + return Cst; + Ops.push_back(ValueEntry(0, Cst)); + } + + if (Ops.size() == 1) return Ops[0].Op; // Handle destructive annihilation due to identities between elements in the // argument list here. diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index 2c39aab..686520e 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -26,7 +26,7 @@ #include "llvm/Pass.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Transforms/Utils/Local.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" @@ -153,7 +153,7 @@ namespace { /// Constant Propagation. /// class SCCPSolver : public InstVisitor { - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; SmallPtrSet BBExecutable; // The BBs that are executable. DenseMap ValueState; // The state each value is in. @@ -205,7 +205,7 @@ class SCCPSolver : public InstVisitor { typedef std::pair Edge; DenseSet KnownFeasibleEdges; public: - SCCPSolver(const TargetData *td, const TargetLibraryInfo *tli) + SCCPSolver(const DataLayout *td, const TargetLibraryInfo *tli) : TD(td), TLI(tli) {} /// MarkBlockExecutable - This method can be used by clients to mark all of @@ -1564,7 +1564,7 @@ static void DeleteInstructionInBlock(BasicBlock *BB) { // bool SCCP::runOnFunction(Function &F) { DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); const TargetLibraryInfo *TLI = &getAnalysis(); SCCPSolver Solver(TD, TLI); @@ -1693,7 +1693,7 @@ static bool AddressIsTaken(const GlobalValue *GV) { } bool IPSCCP::runOnModule(Module &M) { - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); const TargetLibraryInfo *TLI = &getAnalysis(); SCCPSolver Solver(TD, TLI); diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp new file mode 100644 index 0000000..ccc2f7a --- /dev/null +++ b/lib/Transforms/Scalar/SROA.cpp @@ -0,0 +1,3697 @@ +//===- SROA.cpp - Scalar Replacement Of Aggregates ------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// This transformation implements the well known scalar replacement of +/// aggregates transformation. It tries to identify promotable elements of an +/// aggregate alloca, and promote them to registers. It will also try to +/// convert uses of an element (or set of elements) of an alloca into a vector +/// or bitfield-style integer scalar if appropriate. +/// +/// It works to do this with minimal slicing of the alloca so that regions +/// which are merely transferred in and out of external memory remain unchanged +/// and are not decomposed to scalar code. +/// +/// Because this also performs alloca promotion, it can be thought of as also +/// serving the purpose of SSA formation. The algorithm iterates on the +/// function until all opportunities for promotion have been realized. +/// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "sroa" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Constants.h" +#include "llvm/DIBuilder.h" +#include "llvm/DebugInfo.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/IRBuilder.h" +#include "llvm/Instructions.h" +#include "llvm/IntrinsicInst.h" +#include "llvm/LLVMContext.h" +#include "llvm/Module.h" +#include "llvm/Operator.h" +#include "llvm/Pass.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/InstVisitor.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/DataLayout.h" +#include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/PromoteMemToReg.h" +#include "llvm/Transforms/Utils/SSAUpdater.h" +using namespace llvm; + +STATISTIC(NumAllocasAnalyzed, "Number of allocas analyzed for replacement"); +STATISTIC(NumNewAllocas, "Number of new, smaller allocas introduced"); +STATISTIC(NumPromoted, "Number of allocas promoted to SSA values"); +STATISTIC(NumLoadsSpeculated, "Number of loads speculated to allow promotion"); +STATISTIC(NumDeleted, "Number of instructions deleted"); +STATISTIC(NumVectorized, "Number of vectorized aggregates"); + +/// Hidden option to force the pass to not use DomTree and mem2reg, instead +/// forming SSA values through the SSAUpdater infrastructure. +static cl::opt +ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); + +namespace { +/// \brief Alloca partitioning representation. +/// +/// This class represents a partitioning of an alloca into slices, and +/// information about the nature of uses of each slice of the alloca. The goal +/// is that this information is sufficient to decide if and how to split the +/// alloca apart and replace slices with scalars. It is also intended that this +/// structure can capture the relevant information needed both to decide about +/// and to enact these transformations. +class AllocaPartitioning { +public: + /// \brief A common base class for representing a half-open byte range. + struct ByteRange { + /// \brief The beginning offset of the range. + uint64_t BeginOffset; + + /// \brief The ending offset, not included in the range. + uint64_t EndOffset; + + ByteRange() : BeginOffset(), EndOffset() {} + ByteRange(uint64_t BeginOffset, uint64_t EndOffset) + : BeginOffset(BeginOffset), EndOffset(EndOffset) {} + + /// \brief Support for ordering ranges. + /// + /// This provides an ordering over ranges such that start offsets are + /// always increasing, and within equal start offsets, the end offsets are + /// decreasing. Thus the spanning range comes first in a cluster with the + /// same start position. + bool operator<(const ByteRange &RHS) const { + if (BeginOffset < RHS.BeginOffset) return true; + if (BeginOffset > RHS.BeginOffset) return false; + if (EndOffset > RHS.EndOffset) return true; + return false; + } + + /// \brief Support comparison with a single offset to allow binary searches. + friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) { + return LHS.BeginOffset < RHSOffset; + } + + friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset, + const ByteRange &RHS) { + return LHSOffset < RHS.BeginOffset; + } + + bool operator==(const ByteRange &RHS) const { + return BeginOffset == RHS.BeginOffset && EndOffset == RHS.EndOffset; + } + bool operator!=(const ByteRange &RHS) const { return !operator==(RHS); } + }; + + /// \brief A partition of an alloca. + /// + /// This structure represents a contiguous partition of the alloca. These are + /// formed by examining the uses of the alloca. During formation, they may + /// overlap but once an AllocaPartitioning is built, the Partitions within it + /// are all disjoint. + struct Partition : public ByteRange { + /// \brief Whether this partition is splittable into smaller partitions. + /// + /// We flag partitions as splittable when they are formed entirely due to + /// accesses by trivially splittable operations such as memset and memcpy. + bool IsSplittable; + + /// \brief Test whether a partition has been marked as dead. + bool isDead() const { + if (BeginOffset == UINT64_MAX) { + assert(EndOffset == UINT64_MAX); + return true; + } + return false; + } + + /// \brief Kill a partition. + /// This is accomplished by setting both its beginning and end offset to + /// the maximum possible value. + void kill() { + assert(!isDead() && "He's Dead, Jim!"); + BeginOffset = EndOffset = UINT64_MAX; + } + + Partition() : ByteRange(), IsSplittable() {} + Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable) + : ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {} + }; + + /// \brief A particular use of a partition of the alloca. + /// + /// This structure is used to associate uses of a partition with it. They + /// mark the range of bytes which are referenced by a particular instruction, + /// and includes a handle to the user itself and the pointer value in use. + /// The bounds of these uses are determined by intersecting the bounds of the + /// memory use itself with a particular partition. As a consequence there is + /// intentionally overlap between various uses of the same partition. + struct PartitionUse : public ByteRange { + /// \brief The use in question. Provides access to both user and used value. + /// + /// Note that this may be null if the partition use is *dead*, that is, it + /// should be ignored. + Use *U; + + PartitionUse() : ByteRange(), U() {} + PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U) + : ByteRange(BeginOffset, EndOffset), U(U) {} + }; + + /// \brief Construct a partitioning of a particular alloca. + /// + /// Construction does most of the work for partitioning the alloca. This + /// performs the necessary walks of users and builds a partitioning from it. + AllocaPartitioning(const DataLayout &TD, AllocaInst &AI); + + /// \brief Test whether a pointer to the allocation escapes our analysis. + /// + /// If this is true, the partitioning is never fully built and should be + /// ignored. + bool isEscaped() const { return PointerEscapingInstr; } + + /// \brief Support for iterating over the partitions. + /// @{ + typedef SmallVectorImpl::iterator iterator; + iterator begin() { return Partitions.begin(); } + iterator end() { return Partitions.end(); } + + typedef SmallVectorImpl::const_iterator const_iterator; + const_iterator begin() const { return Partitions.begin(); } + const_iterator end() const { return Partitions.end(); } + /// @} + + /// \brief Support for iterating over and manipulating a particular + /// partition's uses. + /// + /// The iteration support provided for uses is more limited, but also + /// includes some manipulation routines to support rewriting the uses of + /// partitions during SROA. + /// @{ + typedef SmallVectorImpl::iterator use_iterator; + use_iterator use_begin(unsigned Idx) { return Uses[Idx].begin(); } + use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); } + use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); } + use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); } + + typedef SmallVectorImpl::const_iterator const_use_iterator; + const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); } + const_use_iterator use_begin(const_iterator I) const { + return Uses[I - begin()].begin(); + } + const_use_iterator use_end(unsigned Idx) const { return Uses[Idx].end(); } + const_use_iterator use_end(const_iterator I) const { + return Uses[I - begin()].end(); + } + + unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); } + unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); } + const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const { + return Uses[PIdx][UIdx]; + } + const PartitionUse &getUse(const_iterator I, unsigned UIdx) const { + return Uses[I - begin()][UIdx]; + } + + void use_push_back(unsigned Idx, const PartitionUse &PU) { + Uses[Idx].push_back(PU); + } + void use_push_back(const_iterator I, const PartitionUse &PU) { + Uses[I - begin()].push_back(PU); + } + /// @} + + /// \brief Allow iterating the dead users for this alloca. + /// + /// These are instructions which will never actually use the alloca as they + /// are outside the allocated range. They are safe to replace with undef and + /// delete. + /// @{ + typedef SmallVectorImpl::const_iterator dead_user_iterator; + dead_user_iterator dead_user_begin() const { return DeadUsers.begin(); } + dead_user_iterator dead_user_end() const { return DeadUsers.end(); } + /// @} + + /// \brief Allow iterating the dead expressions referring to this alloca. + /// + /// These are operands which have cannot actually be used to refer to the + /// alloca as they are outside its range and the user doesn't correct for + /// that. These mostly consist of PHI node inputs and the like which we just + /// need to replace with undef. + /// @{ + typedef SmallVectorImpl::const_iterator dead_op_iterator; + dead_op_iterator dead_op_begin() const { return DeadOperands.begin(); } + dead_op_iterator dead_op_end() const { return DeadOperands.end(); } + /// @} + + /// \brief MemTransferInst auxiliary data. + /// This struct provides some auxiliary data about memory transfer + /// intrinsics such as memcpy and memmove. These intrinsics can use two + /// different ranges within the same alloca, and provide other challenges to + /// correctly represent. We stash extra data to help us untangle this + /// after the partitioning is complete. + struct MemTransferOffsets { + /// The destination begin and end offsets when the destination is within + /// this alloca. If the end offset is zero the destination is not within + /// this alloca. + uint64_t DestBegin, DestEnd; + + /// The source begin and end offsets when the source is within this alloca. + /// If the end offset is zero, the source is not within this alloca. + uint64_t SourceBegin, SourceEnd; + + /// Flag for whether an alloca is splittable. + bool IsSplittable; + }; + MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const { + return MemTransferInstData.lookup(&II); + } + + /// \brief Map from a PHI or select operand back to a partition. + /// + /// When manipulating PHI nodes or selects, they can use more than one + /// partition of an alloca. We store a special mapping to allow finding the + /// partition referenced by each of these operands, if any. + iterator findPartitionForPHIOrSelectOperand(Use *U) { + SmallDenseMap >::const_iterator MapIt + = PHIOrSelectOpMap.find(U); + if (MapIt == PHIOrSelectOpMap.end()) + return end(); + + return begin() + MapIt->second.first; + } + + /// \brief Map from a PHI or select operand back to the specific use of + /// a partition. + /// + /// Similar to mapping these operands back to the partitions, this maps + /// directly to the use structure of that partition. + use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) { + SmallDenseMap >::const_iterator MapIt + = PHIOrSelectOpMap.find(U); + assert(MapIt != PHIOrSelectOpMap.end()); + return Uses[MapIt->second.first].begin() + MapIt->second.second; + } + + /// \brief Compute a common type among the uses of a particular partition. + /// + /// This routines walks all of the uses of a particular partition and tries + /// to find a common type between them. Untyped operations such as memset and + /// memcpy are ignored. + Type *getCommonType(iterator I) const; + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const; + void printUsers(raw_ostream &OS, const_iterator I, + StringRef Indent = " ") const; + void print(raw_ostream &OS) const; + void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const; + void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const; +#endif + +private: + template class BuilderBase; + class PartitionBuilder; + friend class AllocaPartitioning::PartitionBuilder; + class UseBuilder; + friend class AllocaPartitioning::UseBuilder; + +#ifndef NDEBUG + /// \brief Handle to alloca instruction to simplify method interfaces. + AllocaInst &AI; +#endif + + /// \brief The instruction responsible for this alloca having no partitioning. + /// + /// When an instruction (potentially) escapes the pointer to the alloca, we + /// store a pointer to that here and abort trying to partition the alloca. + /// This will be null if the alloca is partitioned successfully. + Instruction *PointerEscapingInstr; + + /// \brief The partitions of the alloca. + /// + /// We store a vector of the partitions over the alloca here. This vector is + /// sorted by increasing begin offset, and then by decreasing end offset. See + /// the Partition inner class for more details. Initially (during + /// construction) there are overlaps, but we form a disjoint sequence of + /// partitions while finishing construction and a fully constructed object is + /// expected to always have this as a disjoint space. + SmallVector Partitions; + + /// \brief The uses of the partitions. + /// + /// This is essentially a mapping from each partition to a list of uses of + /// that partition. The mapping is done with a Uses vector that has the exact + /// same number of entries as the partition vector. Each entry is itself + /// a vector of the uses. + SmallVector, 8> Uses; + + /// \brief Instructions which will become dead if we rewrite the alloca. + /// + /// Note that these are not separated by partition. This is because we expect + /// a partitioned alloca to be completely rewritten or not rewritten at all. + /// If rewritten, all these instructions can simply be removed and replaced + /// with undef as they come from outside of the allocated space. + SmallVector DeadUsers; + + /// \brief Operands which will become dead if we rewrite the alloca. + /// + /// These are operands that in their particular use can be replaced with + /// undef when we rewrite the alloca. These show up in out-of-bounds inputs + /// to PHI nodes and the like. They aren't entirely dead (there might be + /// a GEP back into the bounds using it elsewhere) and nor is the PHI, but we + /// want to swap this particular input for undef to simplify the use lists of + /// the alloca. + SmallVector DeadOperands; + + /// \brief The underlying storage for auxiliary memcpy and memset info. + SmallDenseMap MemTransferInstData; + + /// \brief A side datastructure used when building up the partitions and uses. + /// + /// This mapping is only really used during the initial building of the + /// partitioning so that we can retain information about PHI and select nodes + /// processed. + SmallDenseMap > PHIOrSelectSizes; + + /// \brief Auxiliary information for particular PHI or select operands. + SmallDenseMap, 4> PHIOrSelectOpMap; + + /// \brief A utility routine called from the constructor. + /// + /// This does what it says on the tin. It is the key of the alloca partition + /// splitting and merging. After it is called we have the desired disjoint + /// collection of partitions. + void splitAndMergePartitions(); +}; +} + +template +class AllocaPartitioning::BuilderBase + : public InstVisitor { +public: + BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) + : TD(TD), + AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())), + P(P) { + enqueueUsers(AI, 0); + } + +protected: + const DataLayout &TD; + const uint64_t AllocSize; + AllocaPartitioning &P; + + SmallPtrSet VisitedUses; + + struct OffsetUse { + Use *U; + int64_t Offset; + }; + SmallVector Queue; + + // The active offset and use while visiting. + Use *U; + int64_t Offset; + + void enqueueUsers(Instruction &I, int64_t UserOffset) { + for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); + UI != UE; ++UI) { + if (VisitedUses.insert(&UI.getUse())) { + OffsetUse OU = { &UI.getUse(), UserOffset }; + Queue.push_back(OU); + } + } + } + + bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) { + GEPOffset = Offset; + for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI); + GTI != GTE; ++GTI) { + ConstantInt *OpC = dyn_cast(GTI.getOperand()); + if (!OpC) + return false; + if (OpC->isZero()) + continue; + + // Handle a struct index, which adds its field offset to the pointer. + if (StructType *STy = dyn_cast(*GTI)) { + unsigned ElementIdx = OpC->getZExtValue(); + const StructLayout *SL = TD.getStructLayout(STy); + uint64_t ElementOffset = SL->getElementOffset(ElementIdx); + // Check that we can continue to model this GEP in a signed 64-bit offset. + if (ElementOffset > INT64_MAX || + (GEPOffset >= 0 && + ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) { + DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding " + << "what can be represented in an int64_t!\n" + << " alloca: " << P.AI << "\n"); + return false; + } + if (GEPOffset < 0) + GEPOffset = ElementOffset + (uint64_t)-GEPOffset; + else + GEPOffset += ElementOffset; + continue; + } + + APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits()); + Index *= APInt(Index.getBitWidth(), + TD.getTypeAllocSize(GTI.getIndexedType())); + Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset, + /*isSigned*/true); + // Check if the result can be stored in our int64_t offset. + if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) { + DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding " + << "what can be represented in an int64_t!\n" + << " alloca: " << P.AI << "\n"); + return false; + } + + GEPOffset = Index.getSExtValue(); + } + return true; + } + + Value *foldSelectInst(SelectInst &SI) { + // If the condition being selected on is a constant or the same value is + // being selected between, fold the select. Yes this does (rarely) happen + // early on. + if (ConstantInt *CI = dyn_cast(SI.getCondition())) + return SI.getOperand(1+CI->isZero()); + if (SI.getOperand(1) == SI.getOperand(2)) { + assert(*U == SI.getOperand(1)); + return SI.getOperand(1); + } + return 0; + } +}; + +/// \brief Builder for the alloca partitioning. +/// +/// This class builds an alloca partitioning by recursively visiting the uses +/// of an alloca and splitting the partitions for each load and store at each +/// offset. +class AllocaPartitioning::PartitionBuilder + : public BuilderBase { + friend class InstVisitor; + + SmallDenseMap MemTransferPartitionMap; + +public: + PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) + : BuilderBase(TD, AI, P) {} + + /// \brief Run the builder over the allocation. + bool operator()() { + // Note that we have to re-evaluate size on each trip through the loop as + // the queue grows at the tail. + for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) { + U = Queue[Idx].U; + Offset = Queue[Idx].Offset; + if (!visit(cast(U->getUser()))) + return false; + } + return true; + } + +private: + bool markAsEscaping(Instruction &I) { + P.PointerEscapingInstr = &I; + return false; + } + + void insertUse(Instruction &I, int64_t Offset, uint64_t Size, + bool IsSplittable = false) { + // Completely skip uses which have a zero size or don't overlap the + // allocation. + if (Size == 0 || + (Offset >= 0 && (uint64_t)Offset >= AllocSize) || + (Offset < 0 && (uint64_t)-Offset >= Size)) { + DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset + << " which starts past the end of the " << AllocSize + << " byte alloca:\n" + << " alloca: " << P.AI << "\n" + << " use: " << I << "\n"); + return; + } + + // Clamp the start to the beginning of the allocation. + if (Offset < 0) { + DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset + << " to start at the beginning of the alloca:\n" + << " alloca: " << P.AI << "\n" + << " use: " << I << "\n"); + Size -= (uint64_t)-Offset; + Offset = 0; + } + + uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size; + + // Clamp the end offset to the end of the allocation. Note that this is + // formulated to handle even the case where "BeginOffset + Size" overflows. + // NOTE! This may appear superficially to be something we could ignore + // entirely, but that is not so! There may be PHI-node uses where some + // instructions are dead but not others. We can't completely ignore the + // PHI node, and so have to record at least the information here. + assert(AllocSize >= BeginOffset); // Established above. + if (Size > AllocSize - BeginOffset) { + DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset + << " to remain within the " << AllocSize << " byte alloca:\n" + << " alloca: " << P.AI << "\n" + << " use: " << I << "\n"); + EndOffset = AllocSize; + } + + Partition New(BeginOffset, EndOffset, IsSplittable); + P.Partitions.push_back(New); + } + + bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset, + bool IsVolatile) { + uint64_t Size = TD.getTypeStoreSize(Ty); + + // If this memory access can be shown to *statically* extend outside the + // bounds of of the allocation, it's behavior is undefined, so simply + // ignore it. Note that this is more strict than the generic clamping + // behavior of insertUse. We also try to handle cases which might run the + // risk of overflow. + // FIXME: We should instead consider the pointer to have escaped if this + // function is being instrumented for addressing bugs or race conditions. + if (Offset < 0 || (uint64_t)Offset >= AllocSize || + Size > (AllocSize - (uint64_t)Offset)) { + DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte " + << (isa(I) ? "load" : "store") << " @" << Offset + << " which extends past the end of the " << AllocSize + << " byte alloca:\n" + << " alloca: " << P.AI << "\n" + << " use: " << I << "\n"); + return true; + } + + // We allow splitting of loads and stores where the type is an integer type + // and which cover the entire alloca. Such integer loads and stores + // often require decomposition into fine grained loads and stores. + bool IsSplittable = false; + if (IntegerType *ITy = dyn_cast(Ty)) + IsSplittable = !IsVolatile && ITy->getBitWidth() == AllocSize*8; + + insertUse(I, Offset, Size, IsSplittable); + return true; + } + + bool visitBitCastInst(BitCastInst &BC) { + enqueueUsers(BC, Offset); + return true; + } + + bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { + int64_t GEPOffset; + if (!computeConstantGEPOffset(GEPI, GEPOffset)) + return markAsEscaping(GEPI); + + enqueueUsers(GEPI, GEPOffset); + return true; + } + + bool visitLoadInst(LoadInst &LI) { + assert((!LI.isSimple() || LI.getType()->isSingleValueType()) && + "All simple FCA loads should have been pre-split"); + return handleLoadOrStore(LI.getType(), LI, Offset, LI.isVolatile()); + } + + bool visitStoreInst(StoreInst &SI) { + Value *ValOp = SI.getValueOperand(); + if (ValOp == *U) + return markAsEscaping(SI); + + assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && + "All simple FCA stores should have been pre-split"); + return handleLoadOrStore(ValOp->getType(), SI, Offset, SI.isVolatile()); + } + + + bool visitMemSetInst(MemSetInst &II) { + assert(II.getRawDest() == *U && "Pointer use is not the destination?"); + ConstantInt *Length = dyn_cast(II.getLength()); + uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; + insertUse(II, Offset, Size, Length); + return true; + } + + bool visitMemTransferInst(MemTransferInst &II) { + ConstantInt *Length = dyn_cast(II.getLength()); + uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; + if (!Size) + // Zero-length mem transfer intrinsics can be ignored entirely. + return true; + + MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; + + // Only intrinsics with a constant length can be split. + Offsets.IsSplittable = Length; + + if (*U == II.getRawDest()) { + Offsets.DestBegin = Offset; + Offsets.DestEnd = Offset + Size; + } + if (*U == II.getRawSource()) { + Offsets.SourceBegin = Offset; + Offsets.SourceEnd = Offset + Size; + } + + // If we have set up end offsets for both the source and the destination, + // we have found both sides of this transfer pointing at the same alloca. + bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd; + if (SeenBothEnds && II.getRawDest() != II.getRawSource()) { + unsigned PrevIdx = MemTransferPartitionMap[&II]; + + // Check if the begin offsets match and this is a non-volatile transfer. + // In that case, we can completely elide the transfer. + if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) { + P.Partitions[PrevIdx].kill(); + return true; + } + + // Otherwise we have an offset transfer within the same alloca. We can't + // split those. + P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false; + } else if (SeenBothEnds) { + // Handle the case where this exact use provides both ends of the + // operation. + assert(II.getRawDest() == II.getRawSource()); + + // For non-volatile transfers this is a no-op. + if (!II.isVolatile()) + return true; + + // Otherwise just suppress splitting. + Offsets.IsSplittable = false; + } + + + // Insert the use now that we've fixed up the splittable nature. + insertUse(II, Offset, Size, Offsets.IsSplittable); + + // Setup the mapping from intrinsic to partition of we've not seen both + // ends of this transfer. + if (!SeenBothEnds) { + unsigned NewIdx = P.Partitions.size() - 1; + bool Inserted + = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second; + assert(Inserted && + "Already have intrinsic in map but haven't seen both ends"); + (void)Inserted; + } + + return true; + } + + // Disable SRoA for any intrinsics except for lifetime invariants. + // FIXME: What about debug instrinsics? This matches old behavior, but + // doesn't make sense. + bool visitIntrinsicInst(IntrinsicInst &II) { + if (II.getIntrinsicID() == Intrinsic::lifetime_start || + II.getIntrinsicID() == Intrinsic::lifetime_end) { + ConstantInt *Length = cast(II.getArgOperand(0)); + uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue()); + insertUse(II, Offset, Size, true); + return true; + } + + return markAsEscaping(II); + } + + Instruction *hasUnsafePHIOrSelectUse(Instruction *Root, uint64_t &Size) { + // We consider any PHI or select that results in a direct load or store of + // the same offset to be a viable use for partitioning purposes. These uses + // are considered unsplittable and the size is the maximum loaded or stored + // size. + SmallPtrSet Visited; + SmallVector, 4> Uses; + Visited.insert(Root); + Uses.push_back(std::make_pair(cast(*U), Root)); + // If there are no loads or stores, the access is dead. We mark that as + // a size zero access. + Size = 0; + do { + Instruction *I, *UsedI; + llvm::tie(UsedI, I) = Uses.pop_back_val(); + + if (LoadInst *LI = dyn_cast(I)) { + Size = std::max(Size, TD.getTypeStoreSize(LI->getType())); + continue; + } + if (StoreInst *SI = dyn_cast(I)) { + Value *Op = SI->getOperand(0); + if (Op == UsedI) + return SI; + Size = std::max(Size, TD.getTypeStoreSize(Op->getType())); + continue; + } + + if (GetElementPtrInst *GEP = dyn_cast(I)) { + if (!GEP->hasAllZeroIndices()) + return GEP; + } else if (!isa(I) && !isa(I) && + !isa(I)) { + return I; + } + + for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE; + ++UI) + if (Visited.insert(cast(*UI))) + Uses.push_back(std::make_pair(I, cast(*UI))); + } while (!Uses.empty()); + + return 0; + } + + bool visitPHINode(PHINode &PN) { + // See if we already have computed info on this node. + std::pair &PHIInfo = P.PHIOrSelectSizes[&PN]; + if (PHIInfo.first) { + PHIInfo.second = true; + insertUse(PN, Offset, PHIInfo.first); + return true; + } + + // Check for an unsafe use of the PHI node. + if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first)) + return markAsEscaping(*EscapingI); + + insertUse(PN, Offset, PHIInfo.first); + return true; + } + + bool visitSelectInst(SelectInst &SI) { + if (Value *Result = foldSelectInst(SI)) { + if (Result == *U) + // If the result of the constant fold will be the pointer, recurse + // through the select as if we had RAUW'ed it. + enqueueUsers(SI, Offset); + + return true; + } + + // See if we already have computed info on this node. + std::pair &SelectInfo = P.PHIOrSelectSizes[&SI]; + if (SelectInfo.first) { + SelectInfo.second = true; + insertUse(SI, Offset, SelectInfo.first); + return true; + } + + // Check for an unsafe use of the PHI node. + if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first)) + return markAsEscaping(*EscapingI); + + insertUse(SI, Offset, SelectInfo.first); + return true; + } + + /// \brief Disable SROA entirely if there are unhandled users of the alloca. + bool visitInstruction(Instruction &I) { return markAsEscaping(I); } +}; + + +/// \brief Use adder for the alloca partitioning. +/// +/// This class adds the uses of an alloca to all of the partitions which they +/// use. For splittable partitions, this can end up doing essentially a linear +/// walk of the partitions, but the number of steps remains bounded by the +/// total result instruction size: +/// - The number of partitions is a result of the number unsplittable +/// instructions using the alloca. +/// - The number of users of each partition is at worst the total number of +/// splittable instructions using the alloca. +/// Thus we will produce N * M instructions in the end, where N are the number +/// of unsplittable uses and M are the number of splittable. This visitor does +/// the exact same number of updates to the partitioning. +/// +/// In the more common case, this visitor will leverage the fact that the +/// partition space is pre-sorted, and do a logarithmic search for the +/// partition needed, making the total visit a classical ((N + M) * log(N)) +/// complexity operation. +class AllocaPartitioning::UseBuilder : public BuilderBase { + friend class InstVisitor; + + /// \brief Set to de-duplicate dead instructions found in the use walk. + SmallPtrSet VisitedDeadInsts; + +public: + UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P) + : BuilderBase(TD, AI, P) {} + + /// \brief Run the builder over the allocation. + void operator()() { + // Note that we have to re-evaluate size on each trip through the loop as + // the queue grows at the tail. + for (unsigned Idx = 0; Idx < Queue.size(); ++Idx) { + U = Queue[Idx].U; + Offset = Queue[Idx].Offset; + this->visit(cast(U->getUser())); + } + } + +private: + void markAsDead(Instruction &I) { + if (VisitedDeadInsts.insert(&I)) + P.DeadUsers.push_back(&I); + } + + void insertUse(Instruction &User, int64_t Offset, uint64_t Size) { + // If the use has a zero size or extends outside of the allocation, record + // it as a dead use for elimination later. + if (Size == 0 || (uint64_t)Offset >= AllocSize || + (Offset < 0 && (uint64_t)-Offset >= Size)) + return markAsDead(User); + + // Clamp the start to the beginning of the allocation. + if (Offset < 0) { + Size -= (uint64_t)-Offset; + Offset = 0; + } + + uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size; + + // Clamp the end offset to the end of the allocation. Note that this is + // formulated to handle even the case where "BeginOffset + Size" overflows. + assert(AllocSize >= BeginOffset); // Established above. + if (Size > AllocSize - BeginOffset) + EndOffset = AllocSize; + + // NB: This only works if we have zero overlapping partitions. + iterator B = std::lower_bound(P.begin(), P.end(), BeginOffset); + if (B != P.begin() && llvm::prior(B)->EndOffset > BeginOffset) + B = llvm::prior(B); + for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset; + ++I) { + PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset), + std::min(I->EndOffset, EndOffset), U); + P.use_push_back(I, NewPU); + if (isa(U->getUser()) || isa(U->getUser())) + P.PHIOrSelectOpMap[U] + = std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1); + } + } + + void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) { + uint64_t Size = TD.getTypeStoreSize(Ty); + + // If this memory access can be shown to *statically* extend outside the + // bounds of of the allocation, it's behavior is undefined, so simply + // ignore it. Note that this is more strict than the generic clamping + // behavior of insertUse. + if (Offset < 0 || (uint64_t)Offset >= AllocSize || + Size > (AllocSize - (uint64_t)Offset)) + return markAsDead(I); + + insertUse(I, Offset, Size); + } + + void visitBitCastInst(BitCastInst &BC) { + if (BC.use_empty()) + return markAsDead(BC); + + enqueueUsers(BC, Offset); + } + + void visitGetElementPtrInst(GetElementPtrInst &GEPI) { + if (GEPI.use_empty()) + return markAsDead(GEPI); + + int64_t GEPOffset; + if (!computeConstantGEPOffset(GEPI, GEPOffset)) + llvm_unreachable("Unable to compute constant offset for use"); + + enqueueUsers(GEPI, GEPOffset); + } + + void visitLoadInst(LoadInst &LI) { + handleLoadOrStore(LI.getType(), LI, Offset); + } + + void visitStoreInst(StoreInst &SI) { + handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset); + } + + void visitMemSetInst(MemSetInst &II) { + ConstantInt *Length = dyn_cast(II.getLength()); + uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; + insertUse(II, Offset, Size); + } + + void visitMemTransferInst(MemTransferInst &II) { + ConstantInt *Length = dyn_cast(II.getLength()); + uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset; + if (!Size) + return markAsDead(II); + + MemTransferOffsets &Offsets = P.MemTransferInstData[&II]; + if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd && + Offsets.DestBegin == Offsets.SourceBegin) + return markAsDead(II); // Skip identity transfers without side-effects. + + insertUse(II, Offset, Size); + } + + void visitIntrinsicInst(IntrinsicInst &II) { + assert(II.getIntrinsicID() == Intrinsic::lifetime_start || + II.getIntrinsicID() == Intrinsic::lifetime_end); + + ConstantInt *Length = cast(II.getArgOperand(0)); + insertUse(II, Offset, + std::min(AllocSize - Offset, Length->getLimitedValue())); + } + + void insertPHIOrSelect(Instruction &User, uint64_t Offset) { + uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first; + + // For PHI and select operands outside the alloca, we can't nuke the entire + // phi or select -- the other side might still be relevant, so we special + // case them here and use a separate structure to track the operands + // themselves which should be replaced with undef. + if (Offset >= AllocSize) { + P.DeadOperands.push_back(U); + return; + } + + insertUse(User, Offset, Size); + } + void visitPHINode(PHINode &PN) { + if (PN.use_empty()) + return markAsDead(PN); + + insertPHIOrSelect(PN, Offset); + } + void visitSelectInst(SelectInst &SI) { + if (SI.use_empty()) + return markAsDead(SI); + + if (Value *Result = foldSelectInst(SI)) { + if (Result == *U) + // If the result of the constant fold will be the pointer, recurse + // through the select as if we had RAUW'ed it. + enqueueUsers(SI, Offset); + else + // Otherwise the operand to the select is dead, and we can replace it + // with undef. + P.DeadOperands.push_back(U); + + return; + } + + insertPHIOrSelect(SI, Offset); + } + + /// \brief Unreachable, we've already visited the alloca once. + void visitInstruction(Instruction &I) { + llvm_unreachable("Unhandled instruction in use builder."); + } +}; + +void AllocaPartitioning::splitAndMergePartitions() { + size_t NumDeadPartitions = 0; + + // Track the range of splittable partitions that we pass when accumulating + // overlapping unsplittable partitions. + uint64_t SplitEndOffset = 0ull; + + Partition New(0ull, 0ull, false); + + for (unsigned i = 0, j = i, e = Partitions.size(); i != e; i = j) { + ++j; + + if (!Partitions[i].IsSplittable || New.BeginOffset == New.EndOffset) { + assert(New.BeginOffset == New.EndOffset); + New = Partitions[i]; + } else { + assert(New.IsSplittable); + New.EndOffset = std::max(New.EndOffset, Partitions[i].EndOffset); + } + assert(New.BeginOffset != New.EndOffset); + + // Scan the overlapping partitions. + while (j != e && New.EndOffset > Partitions[j].BeginOffset) { + // If the new partition we are forming is splittable, stop at the first + // unsplittable partition. + if (New.IsSplittable && !Partitions[j].IsSplittable) + break; + + // Grow the new partition to include any equally splittable range. 'j' is + // always equally splittable when New is splittable, but when New is not + // splittable, we may subsume some (or part of some) splitable partition + // without growing the new one. + if (New.IsSplittable == Partitions[j].IsSplittable) { + New.EndOffset = std::max(New.EndOffset, Partitions[j].EndOffset); + } else { + assert(!New.IsSplittable); + assert(Partitions[j].IsSplittable); + SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset); + } + + Partitions[j].kill(); + ++NumDeadPartitions; + ++j; + } + + // If the new partition is splittable, chop off the end as soon as the + // unsplittable subsequent partition starts and ensure we eventually cover + // the splittable area. + if (j != e && New.IsSplittable) { + SplitEndOffset = std::max(SplitEndOffset, New.EndOffset); + New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset); + } + + // Add the new partition if it differs from the original one and is + // non-empty. We can end up with an empty partition here if it was + // splittable but there is an unsplittable one that starts at the same + // offset. + if (New != Partitions[i]) { + if (New.BeginOffset != New.EndOffset) + Partitions.push_back(New); + // Mark the old one for removal. + Partitions[i].kill(); + ++NumDeadPartitions; + } + + New.BeginOffset = New.EndOffset; + if (!New.IsSplittable) { + New.EndOffset = std::max(New.EndOffset, SplitEndOffset); + if (j != e && !Partitions[j].IsSplittable) + New.EndOffset = std::min(New.EndOffset, Partitions[j].BeginOffset); + New.IsSplittable = true; + // If there is a trailing splittable partition which won't be fused into + // the next splittable partition go ahead and add it onto the partitions + // list. + if (New.BeginOffset < New.EndOffset && + (j == e || !Partitions[j].IsSplittable || + New.EndOffset < Partitions[j].BeginOffset)) { + Partitions.push_back(New); + New.BeginOffset = New.EndOffset = 0ull; + } + } + } + + // Re-sort the partitions now that they have been split and merged into + // disjoint set of partitions. Also remove any of the dead partitions we've + // replaced in the process. + std::sort(Partitions.begin(), Partitions.end()); + if (NumDeadPartitions) { + assert(Partitions.back().isDead()); + assert((ptrdiff_t)NumDeadPartitions == + std::count(Partitions.begin(), Partitions.end(), Partitions.back())); + } + Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end()); +} + +AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI) + : +#ifndef NDEBUG + AI(AI), +#endif + PointerEscapingInstr(0) { + PartitionBuilder PB(TD, AI, *this); + if (!PB()) + return; + + // Sort the uses. This arranges for the offsets to be in ascending order, + // and the sizes to be in descending order. + std::sort(Partitions.begin(), Partitions.end()); + + // Remove any partitions from the back which are marked as dead. + while (!Partitions.empty() && Partitions.back().isDead()) + Partitions.pop_back(); + + if (Partitions.size() > 1) { + // Intersect splittability for all partitions with equal offsets and sizes. + // Then remove all but the first so that we have a sequence of non-equal but + // potentially overlapping partitions. + for (iterator I = Partitions.begin(), J = I, E = Partitions.end(); I != E; + I = J) { + ++J; + while (J != E && *I == *J) { + I->IsSplittable &= J->IsSplittable; + ++J; + } + } + Partitions.erase(std::unique(Partitions.begin(), Partitions.end()), + Partitions.end()); + + // Split splittable and merge unsplittable partitions into a disjoint set + // of partitions over the used space of the allocation. + splitAndMergePartitions(); + } + + // Now build up the user lists for each of these disjoint partitions by + // re-walking the recursive users of the alloca. + Uses.resize(Partitions.size()); + UseBuilder UB(TD, AI, *this); + UB(); +} + +Type *AllocaPartitioning::getCommonType(iterator I) const { + Type *Ty = 0; + for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) { + if (!UI->U) + continue; // Skip dead uses. + if (isa(*UI->U->getUser())) + continue; + if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset) + continue; + + Type *UserTy = 0; + if (LoadInst *LI = dyn_cast(UI->U->getUser())) { + UserTy = LI->getType(); + } else if (StoreInst *SI = dyn_cast(UI->U->getUser())) { + UserTy = SI->getValueOperand()->getType(); + } else { + return 0; // Bail if we have weird uses. + } + + if (IntegerType *ITy = dyn_cast(UserTy)) { + // If the type is larger than the partition, skip it. We only encounter + // this for split integer operations where we want to use the type of the + // entity causing the split. + if (ITy->getBitWidth() > (I->EndOffset - I->BeginOffset)*8) + continue; + + // If we have found an integer type use covering the alloca, use that + // regardless of the other types, as integers are often used for a "bucket + // of bits" type. + return ITy; + } + + if (Ty && Ty != UserTy) + return 0; + + Ty = UserTy; + } + return Ty; +} + +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + +void AllocaPartitioning::print(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + OS << Indent << "partition #" << (I - begin()) + << " [" << I->BeginOffset << "," << I->EndOffset << ")" + << (I->IsSplittable ? " (splittable)" : "") + << (Uses[I - begin()].empty() ? " (zero uses)" : "") + << "\n"; +} + +void AllocaPartitioning::printUsers(raw_ostream &OS, const_iterator I, + StringRef Indent) const { + for (const_use_iterator UI = use_begin(I), UE = use_end(I); + UI != UE; ++UI) { + if (!UI->U) + continue; // Skip dead uses. + OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") " + << "used by: " << *UI->U->getUser() << "\n"; + if (MemTransferInst *II = dyn_cast(UI->U->getUser())) { + const MemTransferOffsets &MTO = MemTransferInstData.lookup(II); + bool IsDest; + if (!MTO.IsSplittable) + IsDest = UI->BeginOffset == MTO.DestBegin; + else + IsDest = MTO.DestBegin != 0u; + OS << Indent << " (original " << (IsDest ? "dest" : "source") << ": " + << "[" << (IsDest ? MTO.DestBegin : MTO.SourceBegin) + << "," << (IsDest ? MTO.DestEnd : MTO.SourceEnd) << ")\n"; + } + } +} + +void AllocaPartitioning::print(raw_ostream &OS) const { + if (PointerEscapingInstr) { + OS << "No partitioning for alloca: " << AI << "\n" + << " A pointer to this alloca escaped by:\n" + << " " << *PointerEscapingInstr << "\n"; + return; + } + + OS << "Partitioning of alloca: " << AI << "\n"; + unsigned Num = 0; + for (const_iterator I = begin(), E = end(); I != E; ++I, ++Num) { + print(OS, I); + printUsers(OS, I); + } +} + +void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); } +void AllocaPartitioning::dump() const { print(dbgs()); } + +#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + + +namespace { +/// \brief Implementation of LoadAndStorePromoter for promoting allocas. +/// +/// This subclass of LoadAndStorePromoter adds overrides to handle promoting +/// the loads and stores of an alloca instruction, as well as updating its +/// debug information. This is used when a domtree is unavailable and thus +/// mem2reg in its full form can't be used to handle promotion of allocas to +/// scalar values. +class AllocaPromoter : public LoadAndStorePromoter { + AllocaInst &AI; + DIBuilder &DIB; + + SmallVector DDIs; + SmallVector DVIs; + +public: + AllocaPromoter(const SmallVectorImpl &Insts, SSAUpdater &S, + AllocaInst &AI, DIBuilder &DIB) + : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {} + + void run(const SmallVectorImpl &Insts) { + // Remember which alloca we're promoting (for isInstInList). + if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) { + for (Value::use_iterator UI = DebugNode->use_begin(), + UE = DebugNode->use_end(); + UI != UE; ++UI) + if (DbgDeclareInst *DDI = dyn_cast(*UI)) + DDIs.push_back(DDI); + else if (DbgValueInst *DVI = dyn_cast(*UI)) + DVIs.push_back(DVI); + } + + LoadAndStorePromoter::run(Insts); + AI.eraseFromParent(); + while (!DDIs.empty()) + DDIs.pop_back_val()->eraseFromParent(); + while (!DVIs.empty()) + DVIs.pop_back_val()->eraseFromParent(); + } + + virtual bool isInstInList(Instruction *I, + const SmallVectorImpl &Insts) const { + if (LoadInst *LI = dyn_cast(I)) + return LI->getOperand(0) == &AI; + return cast(I)->getPointerOperand() == &AI; + } + + virtual void updateDebugInfo(Instruction *Inst) const { + for (SmallVector::const_iterator I = DDIs.begin(), + E = DDIs.end(); I != E; ++I) { + DbgDeclareInst *DDI = *I; + if (StoreInst *SI = dyn_cast(Inst)) + ConvertDebugDeclareToDebugValue(DDI, SI, DIB); + else if (LoadInst *LI = dyn_cast(Inst)) + ConvertDebugDeclareToDebugValue(DDI, LI, DIB); + } + for (SmallVector::const_iterator I = DVIs.begin(), + E = DVIs.end(); I != E; ++I) { + DbgValueInst *DVI = *I; + Value *Arg = NULL; + if (StoreInst *SI = dyn_cast(Inst)) { + // If an argument is zero extended then use argument directly. The ZExt + // may be zapped by an optimization pass in future. + if (ZExtInst *ZExt = dyn_cast(SI->getOperand(0))) + Arg = dyn_cast(ZExt->getOperand(0)); + if (SExtInst *SExt = dyn_cast(SI->getOperand(0))) + Arg = dyn_cast(SExt->getOperand(0)); + if (!Arg) + Arg = SI->getOperand(0); + } else if (LoadInst *LI = dyn_cast(Inst)) { + Arg = LI->getOperand(0); + } else { + continue; + } + Instruction *DbgVal = + DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()), + Inst); + DbgVal->setDebugLoc(DVI->getDebugLoc()); + } + } +}; +} // end anon namespace + + +namespace { +/// \brief An optimization pass providing Scalar Replacement of Aggregates. +/// +/// This pass takes allocations which can be completely analyzed (that is, they +/// don't escape) and tries to turn them into scalar SSA values. There are +/// a few steps to this process. +/// +/// 1) It takes allocations of aggregates and analyzes the ways in which they +/// are used to try to split them into smaller allocations, ideally of +/// a single scalar data type. It will split up memcpy and memset accesses +/// as necessary and try to isolate invidual scalar accesses. +/// 2) It will transform accesses into forms which are suitable for SSA value +/// promotion. This can be replacing a memset with a scalar store of an +/// integer value, or it can involve speculating operations on a PHI or +/// select to be a PHI or select of the results. +/// 3) Finally, this will try to detect a pattern of accesses which map cleanly +/// onto insert and extract operations on a vector value, and convert them to +/// this form. By doing so, it will enable promotion of vector aggregates to +/// SSA vector values. +class SROA : public FunctionPass { + const bool RequiresDomTree; + + LLVMContext *C; + const DataLayout *TD; + DominatorTree *DT; + + /// \brief Worklist of alloca instructions to simplify. + /// + /// Each alloca in the function is added to this. Each new alloca formed gets + /// added to it as well to recursively simplify unless that alloca can be + /// directly promoted. Finally, each time we rewrite a use of an alloca other + /// the one being actively rewritten, we add it back onto the list if not + /// already present to ensure it is re-visited. + SetVector > Worklist; + + /// \brief A collection of instructions to delete. + /// We try to batch deletions to simplify code and make things a bit more + /// efficient. + SetVector > DeadInsts; + + /// \brief Post-promotion worklist. + /// + /// Sometimes we discover an alloca which has a high probability of becoming + /// viable for SROA after a round of promotion takes place. In those cases, + /// the alloca is enqueued here for re-processing. + /// + /// Note that we have to be very careful to clear allocas out of this list in + /// the event they are deleted. + SetVector > PostPromotionWorklist; + + /// \brief A collection of alloca instructions we can directly promote. + std::vector PromotableAllocas; + +public: + SROA(bool RequiresDomTree = true) + : FunctionPass(ID), RequiresDomTree(RequiresDomTree), + C(0), TD(0), DT(0) { + initializeSROAPass(*PassRegistry::getPassRegistry()); + } + bool runOnFunction(Function &F); + void getAnalysisUsage(AnalysisUsage &AU) const; + + const char *getPassName() const { return "SROA"; } + static char ID; + +private: + friend class PHIOrSelectSpeculator; + friend class AllocaPartitionRewriter; + friend class AllocaPartitionVectorRewriter; + + bool rewriteAllocaPartition(AllocaInst &AI, + AllocaPartitioning &P, + AllocaPartitioning::iterator PI); + bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P); + bool runOnAlloca(AllocaInst &AI); + void deleteDeadInstructions(SmallPtrSet &DeletedAllocas); + bool promoteAllocas(Function &F); +}; +} + +char SROA::ID = 0; + +FunctionPass *llvm::createSROAPass(bool RequiresDomTree) { + return new SROA(RequiresDomTree); +} + +INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates", + false, false) +INITIALIZE_PASS_DEPENDENCY(DominatorTree) +INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates", + false, false) + +namespace { +/// \brief Visitor to speculate PHIs and Selects where possible. +class PHIOrSelectSpeculator : public InstVisitor { + // Befriend the base class so it can delegate to private visit methods. + friend class llvm::InstVisitor; + + const DataLayout &TD; + AllocaPartitioning &P; + SROA &Pass; + +public: + PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass) + : TD(TD), P(P), Pass(Pass) {} + + /// \brief Visit the users of an alloca partition and rewrite them. + void visitUsers(AllocaPartitioning::const_iterator PI) { + // Note that we need to use an index here as the underlying vector of uses + // may be grown during speculation. However, we never need to re-visit the + // new uses, and so we can use the initial size bound. + for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) { + const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx); + if (!PU.U) + continue; // Skip dead use. + + visit(cast(PU.U->getUser())); + } + } + +private: + // By default, skip this instruction. + void visitInstruction(Instruction &I) {} + + /// PHI instructions that use an alloca and are subsequently loaded can be + /// rewritten to load both input pointers in the pred blocks and then PHI the + /// results, allowing the load of the alloca to be promoted. + /// From this: + /// %P2 = phi [i32* %Alloca, i32* %Other] + /// %V = load i32* %P2 + /// to: + /// %V1 = load i32* %Alloca -> will be mem2reg'd + /// ... + /// %V2 = load i32* %Other + /// ... + /// %V = phi [i32 %V1, i32 %V2] + /// + /// We can do this to a select if its only uses are loads and if the operands + /// to the select can be loaded unconditionally. + /// + /// FIXME: This should be hoisted into a generic utility, likely in + /// Transforms/Util/Local.h + bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl &Loads) { + // For now, we can only do this promotion if the load is in the same block + // as the PHI, and if there are no stores between the phi and load. + // TODO: Allow recursive phi users. + // TODO: Allow stores. + BasicBlock *BB = PN.getParent(); + unsigned MaxAlign = 0; + for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); + UI != UE; ++UI) { + LoadInst *LI = dyn_cast(*UI); + if (LI == 0 || !LI->isSimple()) return false; + + // For now we only allow loads in the same block as the PHI. This is + // a common case that happens when instcombine merges two loads through + // a PHI. + if (LI->getParent() != BB) return false; + + // Ensure that there are no instructions between the PHI and the load that + // could store. + for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) + if (BBI->mayWriteToMemory()) + return false; + + MaxAlign = std::max(MaxAlign, LI->getAlignment()); + Loads.push_back(LI); + } + + // We can only transform this if it is safe to push the loads into the + // predecessor blocks. The only thing to watch out for is that we can't put + // a possibly trapping load in the predecessor if it is a critical edge. + for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; + ++Idx) { + TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator(); + Value *InVal = PN.getIncomingValue(Idx); + + // If the value is produced by the terminator of the predecessor (an + // invoke) or it has side-effects, there is no valid place to put a load + // in the predecessor. + if (TI == InVal || TI->mayHaveSideEffects()) + return false; + + // If the predecessor has a single successor, then the edge isn't + // critical. + if (TI->getNumSuccessors() == 1) + continue; + + // If this pointer is always safe to load, or if we can prove that there + // is already a load in the block, then we can move the load to the pred + // block. + if (InVal->isDereferenceablePointer() || + isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD)) + continue; + + return false; + } + + return true; + } + + void visitPHINode(PHINode &PN) { + DEBUG(dbgs() << " original: " << PN << "\n"); + + SmallVector Loads; + if (!isSafePHIToSpeculate(PN, Loads)) + return; + + assert(!Loads.empty()); + + Type *LoadTy = cast(PN.getType())->getElementType(); + IRBuilder<> PHIBuilder(&PN); + PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(), + PN.getName() + ".sroa.speculated"); + + // Get the TBAA tag and alignment to use from one of the loads. It doesn't + // matter which one we get and if any differ, it doesn't matter. + LoadInst *SomeLoad = cast(Loads.back()); + MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa); + unsigned Align = SomeLoad->getAlignment(); + + // Rewrite all loads of the PN to use the new PHI. + do { + LoadInst *LI = Loads.pop_back_val(); + LI->replaceAllUsesWith(NewPN); + Pass.DeadInsts.insert(LI); + } while (!Loads.empty()); + + // Inject loads into all of the pred blocks. + for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) { + BasicBlock *Pred = PN.getIncomingBlock(Idx); + TerminatorInst *TI = Pred->getTerminator(); + Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx)); + Value *InVal = PN.getIncomingValue(Idx); + IRBuilder<> PredBuilder(TI); + + LoadInst *Load + = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." + + Pred->getName())); + ++NumLoadsSpeculated; + Load->setAlignment(Align); + if (TBAATag) + Load->setMetadata(LLVMContext::MD_tbaa, TBAATag); + NewPN->addIncoming(Load, Pred); + + Instruction *Ptr = dyn_cast(InVal); + if (!Ptr) + // No uses to rewrite. + continue; + + // Try to lookup and rewrite any partition uses corresponding to this phi + // input. + AllocaPartitioning::iterator PI + = P.findPartitionForPHIOrSelectOperand(InUse); + if (PI == P.end()) + continue; + + // Replace the Use in the PartitionUse for this operand with the Use + // inside the load. + AllocaPartitioning::use_iterator UI + = P.findPartitionUseForPHIOrSelectOperand(InUse); + assert(isa(*UI->U->getUser())); + UI->U = &Load->getOperandUse(Load->getPointerOperandIndex()); + } + DEBUG(dbgs() << " speculated to: " << *NewPN << "\n"); + } + + /// Select instructions that use an alloca and are subsequently loaded can be + /// rewritten to load both input pointers and then select between the result, + /// allowing the load of the alloca to be promoted. + /// From this: + /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other + /// %V = load i32* %P2 + /// to: + /// %V1 = load i32* %Alloca -> will be mem2reg'd + /// %V2 = load i32* %Other + /// %V = select i1 %cond, i32 %V1, i32 %V2 + /// + /// We can do this to a select if its only uses are loads and if the operand + /// to the select can be loaded unconditionally. + bool isSafeSelectToSpeculate(SelectInst &SI, + SmallVectorImpl &Loads) { + Value *TValue = SI.getTrueValue(); + Value *FValue = SI.getFalseValue(); + bool TDerefable = TValue->isDereferenceablePointer(); + bool FDerefable = FValue->isDereferenceablePointer(); + + for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); + UI != UE; ++UI) { + LoadInst *LI = dyn_cast(*UI); + if (LI == 0 || !LI->isSimple()) return false; + + // Both operands to the select need to be dereferencable, either + // absolutely (e.g. allocas) or at this point because we can see other + // accesses to it. + if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI, + LI->getAlignment(), &TD)) + return false; + if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI, + LI->getAlignment(), &TD)) + return false; + Loads.push_back(LI); + } + + return true; + } + + void visitSelectInst(SelectInst &SI) { + DEBUG(dbgs() << " original: " << SI << "\n"); + IRBuilder<> IRB(&SI); + + // If the select isn't safe to speculate, just use simple logic to emit it. + SmallVector Loads; + if (!isSafeSelectToSpeculate(SI, Loads)) + return; + + Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) }; + AllocaPartitioning::iterator PIs[2]; + AllocaPartitioning::PartitionUse PUs[2]; + for (unsigned i = 0, e = 2; i != e; ++i) { + PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]); + if (PIs[i] != P.end()) { + // If the pointer is within the partitioning, remove the select from + // its uses. We'll add in the new loads below. + AllocaPartitioning::use_iterator UI + = P.findPartitionUseForPHIOrSelectOperand(Ops[i]); + PUs[i] = *UI; + // Clear out the use here so that the offsets into the use list remain + // stable but this use is ignored when rewriting. + UI->U = 0; + } + } + + Value *TV = SI.getTrueValue(); + Value *FV = SI.getFalseValue(); + // Replace the loads of the select with a select of two loads. + while (!Loads.empty()) { + LoadInst *LI = Loads.pop_back_val(); + + IRB.SetInsertPoint(LI); + LoadInst *TL = + IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true"); + LoadInst *FL = + IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false"); + NumLoadsSpeculated += 2; + + // Transfer alignment and TBAA info if present. + TL->setAlignment(LI->getAlignment()); + FL->setAlignment(LI->getAlignment()); + if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) { + TL->setMetadata(LLVMContext::MD_tbaa, Tag); + FL->setMetadata(LLVMContext::MD_tbaa, Tag); + } + + Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL, + LI->getName() + ".sroa.speculated"); + + LoadInst *Loads[2] = { TL, FL }; + for (unsigned i = 0, e = 2; i != e; ++i) { + if (PIs[i] != P.end()) { + Use *LoadUse = &Loads[i]->getOperandUse(0); + assert(PUs[i].U->get() == LoadUse->get()); + PUs[i].U = LoadUse; + P.use_push_back(PIs[i], PUs[i]); + } + } + + DEBUG(dbgs() << " speculated to: " << *V << "\n"); + LI->replaceAllUsesWith(V); + Pass.DeadInsts.insert(LI); + } + } +}; +} + +/// \brief Accumulate the constant offsets in a GEP into a single APInt offset. +/// +/// If the provided GEP is all-constant, the total byte offset formed by the +/// GEP is computed and Offset is set to it. If the GEP has any non-constant +/// operands, the function returns false and the value of Offset is unmodified. +static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP, + APInt &Offset) { + APInt GEPOffset(Offset.getBitWidth(), 0); + for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); + GTI != GTE; ++GTI) { + ConstantInt *OpC = dyn_cast(GTI.getOperand()); + if (!OpC) + return false; + if (OpC->isZero()) continue; + + // Handle a struct index, which adds its field offset to the pointer. + if (StructType *STy = dyn_cast(*GTI)) { + unsigned ElementIdx = OpC->getZExtValue(); + const StructLayout *SL = TD.getStructLayout(STy); + GEPOffset += APInt(Offset.getBitWidth(), + SL->getElementOffset(ElementIdx)); + continue; + } + + APInt TypeSize(Offset.getBitWidth(), + TD.getTypeAllocSize(GTI.getIndexedType())); + if (VectorType *VTy = dyn_cast(*GTI)) { + assert((VTy->getScalarSizeInBits() % 8) == 0 && + "vector element size is not a multiple of 8, cannot GEP over it"); + TypeSize = VTy->getScalarSizeInBits() / 8; + } + + GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize; + } + Offset = GEPOffset; + return true; +} + +/// \brief Build a GEP out of a base pointer and indices. +/// +/// This will return the BasePtr if that is valid, or build a new GEP +/// instruction using the IRBuilder if GEP-ing is needed. +static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr, + SmallVectorImpl &Indices, + const Twine &Prefix) { + if (Indices.empty()) + return BasePtr; + + // A single zero index is a no-op, so check for this and avoid building a GEP + // in that case. + if (Indices.size() == 1 && cast(Indices.back())->isZero()) + return BasePtr; + + return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx"); +} + +/// \brief Get a natural GEP off of the BasePtr walking through Ty toward +/// TargetTy without changing the offset of the pointer. +/// +/// This routine assumes we've already established a properly offset GEP with +/// Indices, and arrived at the Ty type. The goal is to continue to GEP with +/// zero-indices down through type layers until we find one the same as +/// TargetTy. If we can't find one with the same type, we at least try to use +/// one with the same size. If none of that works, we just produce the GEP as +/// indicated by Indices to have the correct offset. +static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD, + Value *BasePtr, Type *Ty, Type *TargetTy, + SmallVectorImpl &Indices, + const Twine &Prefix) { + if (Ty == TargetTy) + return buildGEP(IRB, BasePtr, Indices, Prefix); + + // See if we can descend into a struct and locate a field with the correct + // type. + unsigned NumLayers = 0; + Type *ElementTy = Ty; + do { + if (ElementTy->isPointerTy()) + break; + if (SequentialType *SeqTy = dyn_cast(ElementTy)) { + ElementTy = SeqTy->getElementType(); + // Note that we use the default address space as this index is over an + // array or a vector, not a pointer. + Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(0), 0))); + } else if (StructType *STy = dyn_cast(ElementTy)) { + if (STy->element_begin() == STy->element_end()) + break; // Nothing left to descend into. + ElementTy = *STy->element_begin(); + Indices.push_back(IRB.getInt32(0)); + } else { + break; + } + ++NumLayers; + } while (ElementTy != TargetTy); + if (ElementTy != TargetTy) + Indices.erase(Indices.end() - NumLayers, Indices.end()); + + return buildGEP(IRB, BasePtr, Indices, Prefix); +} + +/// \brief Recursively compute indices for a natural GEP. +/// +/// This is the recursive step for getNaturalGEPWithOffset that walks down the +/// element types adding appropriate indices for the GEP. +static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD, + Value *Ptr, Type *Ty, APInt &Offset, + Type *TargetTy, + SmallVectorImpl &Indices, + const Twine &Prefix) { + if (Offset == 0) + return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix); + + // We can't recurse through pointer types. + if (Ty->isPointerTy()) + return 0; + + // We try to analyze GEPs over vectors here, but note that these GEPs are + // extremely poorly defined currently. The long-term goal is to remove GEPing + // over a vector from the IR completely. + if (VectorType *VecTy = dyn_cast(Ty)) { + unsigned ElementSizeInBits = VecTy->getScalarSizeInBits(); + if (ElementSizeInBits % 8) + return 0; // GEPs over non-multiple of 8 size vector elements are invalid. + APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8); + APInt NumSkippedElements = Offset.sdiv(ElementSize); + if (NumSkippedElements.ugt(VecTy->getNumElements())) + return 0; + Offset -= NumSkippedElements * ElementSize; + Indices.push_back(IRB.getInt(NumSkippedElements)); + return getNaturalGEPRecursively(IRB, TD, Ptr, VecTy->getElementType(), + Offset, TargetTy, Indices, Prefix); + } + + if (ArrayType *ArrTy = dyn_cast(Ty)) { + Type *ElementTy = ArrTy->getElementType(); + APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy)); + APInt NumSkippedElements = Offset.sdiv(ElementSize); + if (NumSkippedElements.ugt(ArrTy->getNumElements())) + return 0; + + Offset -= NumSkippedElements * ElementSize; + Indices.push_back(IRB.getInt(NumSkippedElements)); + return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, + Indices, Prefix); + } + + StructType *STy = dyn_cast(Ty); + if (!STy) + return 0; + + const StructLayout *SL = TD.getStructLayout(STy); + uint64_t StructOffset = Offset.getZExtValue(); + if (StructOffset >= SL->getSizeInBytes()) + return 0; + unsigned Index = SL->getElementContainingOffset(StructOffset); + Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index)); + Type *ElementTy = STy->getElementType(Index); + if (Offset.uge(TD.getTypeAllocSize(ElementTy))) + return 0; // The offset points into alignment padding. + + Indices.push_back(IRB.getInt32(Index)); + return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, + Indices, Prefix); +} + +/// \brief Get a natural GEP from a base pointer to a particular offset and +/// resulting in a particular type. +/// +/// The goal is to produce a "natural" looking GEP that works with the existing +/// composite types to arrive at the appropriate offset and element type for +/// a pointer. TargetTy is the element type the returned GEP should point-to if +/// possible. We recurse by decreasing Offset, adding the appropriate index to +/// Indices, and setting Ty to the result subtype. +/// +/// If no natural GEP can be constructed, this function returns null. +static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD, + Value *Ptr, APInt Offset, Type *TargetTy, + SmallVectorImpl &Indices, + const Twine &Prefix) { + PointerType *Ty = cast(Ptr->getType()); + + // Don't consider any GEPs through an i8* as natural unless the TargetTy is + // an i8. + if (Ty == IRB.getInt8PtrTy() && TargetTy->isIntegerTy(8)) + return 0; + + Type *ElementTy = Ty->getElementType(); + if (!ElementTy->isSized()) + return 0; // We can't GEP through an unsized element. + APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy)); + if (ElementSize == 0) + return 0; // Zero-length arrays can't help us build a natural GEP. + APInt NumSkippedElements = Offset.sdiv(ElementSize); + + Offset -= NumSkippedElements * ElementSize; + Indices.push_back(IRB.getInt(NumSkippedElements)); + return getNaturalGEPRecursively(IRB, TD, Ptr, ElementTy, Offset, TargetTy, + Indices, Prefix); +} + +/// \brief Compute an adjusted pointer from Ptr by Offset bytes where the +/// resulting pointer has PointerTy. +/// +/// This tries very hard to compute a "natural" GEP which arrives at the offset +/// and produces the pointer type desired. Where it cannot, it will try to use +/// the natural GEP to arrive at the offset and bitcast to the type. Where that +/// fails, it will try to use an existing i8* and GEP to the byte offset and +/// bitcast to the type. +/// +/// The strategy for finding the more natural GEPs is to peel off layers of the +/// pointer, walking back through bit casts and GEPs, searching for a base +/// pointer from which we can compute a natural GEP with the desired +/// properities. The algorithm tries to fold as many constant indices into +/// a single GEP as possible, thus making each GEP more independent of the +/// surrounding code. +static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD, + Value *Ptr, APInt Offset, Type *PointerTy, + const Twine &Prefix) { + // Even though we don't look through PHI nodes, we could be called on an + // instruction in an unreachable block, which may be on a cycle. + SmallPtrSet Visited; + Visited.insert(Ptr); + SmallVector Indices; + + // We may end up computing an offset pointer that has the wrong type. If we + // never are able to compute one directly that has the correct type, we'll + // fall back to it, so keep it around here. + Value *OffsetPtr = 0; + + // Remember any i8 pointer we come across to re-use if we need to do a raw + // byte offset. + Value *Int8Ptr = 0; + APInt Int8PtrOffset(Offset.getBitWidth(), 0); + + Type *TargetTy = PointerTy->getPointerElementType(); + + do { + // First fold any existing GEPs into the offset. + while (GEPOperator *GEP = dyn_cast(Ptr)) { + APInt GEPOffset(Offset.getBitWidth(), 0); + if (!accumulateGEPOffsets(TD, *GEP, GEPOffset)) + break; + Offset += GEPOffset; + Ptr = GEP->getPointerOperand(); + if (!Visited.insert(Ptr)) + break; + } + + // See if we can perform a natural GEP here. + Indices.clear(); + if (Value *P = getNaturalGEPWithOffset(IRB, TD, Ptr, Offset, TargetTy, + Indices, Prefix)) { + if (P->getType() == PointerTy) { + // Zap any offset pointer that we ended up computing in previous rounds. + if (OffsetPtr && OffsetPtr->use_empty()) + if (Instruction *I = dyn_cast(OffsetPtr)) + I->eraseFromParent(); + return P; + } + if (!OffsetPtr) { + OffsetPtr = P; + } + } + + // Stash this pointer if we've found an i8*. + if (Ptr->getType()->isIntegerTy(8)) { + Int8Ptr = Ptr; + Int8PtrOffset = Offset; + } + + // Peel off a layer of the pointer and update the offset appropriately. + if (Operator::getOpcode(Ptr) == Instruction::BitCast) { + Ptr = cast(Ptr)->getOperand(0); + } else if (GlobalAlias *GA = dyn_cast(Ptr)) { + if (GA->mayBeOverridden()) + break; + Ptr = GA->getAliasee(); + } else { + break; + } + assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!"); + } while (Visited.insert(Ptr)); + + if (!OffsetPtr) { + if (!Int8Ptr) { + Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), + Prefix + ".raw_cast"); + Int8PtrOffset = Offset; + } + + OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : + IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), + Prefix + ".raw_idx"); + } + Ptr = OffsetPtr; + + // On the off chance we were targeting i8*, guard the bitcast here. + if (Ptr->getType() != PointerTy) + Ptr = IRB.CreateBitCast(Ptr, PointerTy, Prefix + ".cast"); + + return Ptr; +} + +/// \brief Test whether we can convert a value from the old to the new type. +/// +/// This predicate should be used to guard calls to convertValue in order to +/// ensure that we only try to convert viable values. The strategy is that we +/// will peel off single element struct and array wrappings to get to an +/// underlying value, and convert that value. +static bool canConvertValue(const DataLayout &DL, Type *OldTy, Type *NewTy) { + if (OldTy == NewTy) + return true; + if (DL.getTypeSizeInBits(NewTy) != DL.getTypeSizeInBits(OldTy)) + return false; + if (!NewTy->isSingleValueType() || !OldTy->isSingleValueType()) + return false; + + if (NewTy->isPointerTy() || OldTy->isPointerTy()) { + if (NewTy->isPointerTy() && OldTy->isPointerTy()) + return true; + if (NewTy->isIntegerTy() || OldTy->isIntegerTy()) + return true; + return false; + } + + return true; +} + +/// \brief Generic routine to convert an SSA value to a value of a different +/// type. +/// +/// This will try various different casting techniques, such as bitcasts, +/// inttoptr, and ptrtoint casts. Use the \c canConvertValue predicate to test +/// two types for viability with this routine. +static Value *convertValue(const DataLayout &DL, IRBuilder<> &IRB, Value *V, + Type *Ty) { + assert(canConvertValue(DL, V->getType(), Ty) && + "Value not convertable to type"); + if (V->getType() == Ty) + return V; + if (V->getType()->isIntegerTy() && Ty->isPointerTy()) + return IRB.CreateIntToPtr(V, Ty); + if (V->getType()->isPointerTy() && Ty->isIntegerTy()) + return IRB.CreatePtrToInt(V, Ty); + + return IRB.CreateBitCast(V, Ty); +} + +/// \brief Test whether the given alloca partition can be promoted to a vector. +/// +/// This is a quick test to check whether we can rewrite a particular alloca +/// partition (and its newly formed alloca) into a vector alloca with only +/// whole-vector loads and stores such that it could be promoted to a vector +/// SSA value. We only can ensure this for a limited set of operations, and we +/// don't want to do the rewrites unless we are confident that the result will +/// be promotable, so we have an early test here. +static bool isVectorPromotionViable(const DataLayout &TD, + Type *AllocaTy, + AllocaPartitioning &P, + uint64_t PartitionBeginOffset, + uint64_t PartitionEndOffset, + AllocaPartitioning::const_use_iterator I, + AllocaPartitioning::const_use_iterator E) { + VectorType *Ty = dyn_cast(AllocaTy); + if (!Ty) + return false; + + uint64_t VecSize = TD.getTypeSizeInBits(Ty); + uint64_t ElementSize = Ty->getScalarSizeInBits(); + + // While the definition of LLVM vectors is bitpacked, we don't support sizes + // that aren't byte sized. + if (ElementSize % 8) + return false; + assert((VecSize % 8) == 0 && "vector size not a multiple of element size?"); + VecSize /= 8; + ElementSize /= 8; + + for (; I != E; ++I) { + if (!I->U) + continue; // Skip dead use. + + uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset; + uint64_t BeginIndex = BeginOffset / ElementSize; + if (BeginIndex * ElementSize != BeginOffset || + BeginIndex >= Ty->getNumElements()) + return false; + uint64_t EndOffset = I->EndOffset - PartitionBeginOffset; + uint64_t EndIndex = EndOffset / ElementSize; + if (EndIndex * ElementSize != EndOffset || + EndIndex > Ty->getNumElements()) + return false; + + // FIXME: We should build shuffle vector instructions to handle + // non-element-sized accesses. + if ((EndOffset - BeginOffset) != ElementSize && + (EndOffset - BeginOffset) != VecSize) + return false; + + if (MemIntrinsic *MI = dyn_cast(I->U->getUser())) { + if (MI->isVolatile()) + return false; + if (MemTransferInst *MTI = dyn_cast(I->U->getUser())) { + const AllocaPartitioning::MemTransferOffsets &MTO + = P.getMemTransferOffsets(*MTI); + if (!MTO.IsSplittable) + return false; + } + } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) { + // Disable vector promotion when there are loads or stores of an FCA. + return false; + } else if (LoadInst *LI = dyn_cast(I->U->getUser())) { + if (LI->isVolatile()) + return false; + } else if (StoreInst *SI = dyn_cast(I->U->getUser())) { + if (SI->isVolatile()) + return false; + } else { + return false; + } + } + return true; +} + +/// \brief Test whether the given alloca partition's integer operations can be +/// widened to promotable ones. +/// +/// This is a quick test to check whether we can rewrite the integer loads and +/// stores to a particular alloca into wider loads and stores and be able to +/// promote the resulting alloca. +static bool isIntegerWideningViable(const DataLayout &TD, + Type *AllocaTy, + uint64_t AllocBeginOffset, + AllocaPartitioning &P, + AllocaPartitioning::const_use_iterator I, + AllocaPartitioning::const_use_iterator E) { + uint64_t SizeInBits = TD.getTypeSizeInBits(AllocaTy); + + // Don't try to handle allocas with bit-padding. + if (SizeInBits != TD.getTypeStoreSizeInBits(AllocaTy)) + return false; + + // We need to ensure that an integer type with the appropriate bitwidth can + // be converted to the alloca type, whatever that is. We don't want to force + // the alloca itself to have an integer type if there is a more suitable one. + Type *IntTy = Type::getIntNTy(AllocaTy->getContext(), SizeInBits); + if (!canConvertValue(TD, AllocaTy, IntTy) || + !canConvertValue(TD, IntTy, AllocaTy)) + return false; + + uint64_t Size = TD.getTypeStoreSize(AllocaTy); + + // Check the uses to ensure the uses are (likely) promoteable integer uses. + // Also ensure that the alloca has a covering load or store. We don't want + // to widen the integer operotains only to fail to promote due to some other + // unsplittable entry (which we may make splittable later). + bool WholeAllocaOp = false; + for (; I != E; ++I) { + if (!I->U) + continue; // Skip dead use. + + uint64_t RelBegin = I->BeginOffset - AllocBeginOffset; + uint64_t RelEnd = I->EndOffset - AllocBeginOffset; + + // We can't reasonably handle cases where the load or store extends past + // the end of the aloca's type and into its padding. + if (RelEnd > Size) + return false; + + if (LoadInst *LI = dyn_cast(I->U->getUser())) { + if (LI->isVolatile()) + return false; + if (RelBegin == 0 && RelEnd == Size) + WholeAllocaOp = true; + if (IntegerType *ITy = dyn_cast(LI->getType())) { + if (ITy->getBitWidth() < TD.getTypeStoreSize(ITy)) + return false; + continue; + } + // Non-integer loads need to be convertible from the alloca type so that + // they are promotable. + if (RelBegin != 0 || RelEnd != Size || + !canConvertValue(TD, AllocaTy, LI->getType())) + return false; + } else if (StoreInst *SI = dyn_cast(I->U->getUser())) { + Type *ValueTy = SI->getValueOperand()->getType(); + if (SI->isVolatile()) + return false; + if (RelBegin == 0 && RelEnd == Size) + WholeAllocaOp = true; + if (IntegerType *ITy = dyn_cast(ValueTy)) { + if (ITy->getBitWidth() < TD.getTypeStoreSize(ITy)) + return false; + continue; + } + // Non-integer stores need to be convertible to the alloca type so that + // they are promotable. + if (RelBegin != 0 || RelEnd != Size || + !canConvertValue(TD, ValueTy, AllocaTy)) + return false; + } else if (MemIntrinsic *MI = dyn_cast(I->U->getUser())) { + if (MI->isVolatile()) + return false; + if (MemTransferInst *MTI = dyn_cast(I->U->getUser())) { + const AllocaPartitioning::MemTransferOffsets &MTO + = P.getMemTransferOffsets(*MTI); + if (!MTO.IsSplittable) + return false; + } + } else if (IntrinsicInst *II = dyn_cast(I->U->getUser())) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + } else { + return false; + } + } + return WholeAllocaOp; +} + +static Value *extractInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *V, + IntegerType *Ty, uint64_t Offset, + const Twine &Name) { + DEBUG(dbgs() << " start: " << *V << "\n"); + IntegerType *IntTy = cast(V->getType()); + assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && + "Element extends past full value"); + uint64_t ShAmt = 8*Offset; + if (DL.isBigEndian()) + ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); + if (ShAmt) { + V = IRB.CreateLShr(V, ShAmt, Name + ".shift"); + DEBUG(dbgs() << " shifted: " << *V << "\n"); + } + assert(Ty->getBitWidth() <= IntTy->getBitWidth() && + "Cannot extract to a larger integer!"); + if (Ty != IntTy) { + V = IRB.CreateTrunc(V, Ty, Name + ".trunc"); + DEBUG(dbgs() << " trunced: " << *V << "\n"); + } + return V; +} + +static Value *insertInteger(const DataLayout &DL, IRBuilder<> &IRB, Value *Old, + Value *V, uint64_t Offset, const Twine &Name) { + IntegerType *IntTy = cast(Old->getType()); + IntegerType *Ty = cast(V->getType()); + assert(Ty->getBitWidth() <= IntTy->getBitWidth() && + "Cannot insert a larger integer!"); + DEBUG(dbgs() << " start: " << *V << "\n"); + if (Ty != IntTy) { + V = IRB.CreateZExt(V, IntTy, Name + ".ext"); + DEBUG(dbgs() << " extended: " << *V << "\n"); + } + assert(DL.getTypeStoreSize(Ty) + Offset <= DL.getTypeStoreSize(IntTy) && + "Element store outside of alloca store"); + uint64_t ShAmt = 8*Offset; + if (DL.isBigEndian()) + ShAmt = 8*(DL.getTypeStoreSize(IntTy) - DL.getTypeStoreSize(Ty) - Offset); + if (ShAmt) { + V = IRB.CreateShl(V, ShAmt, Name + ".shift"); + DEBUG(dbgs() << " shifted: " << *V << "\n"); + } + + if (ShAmt || Ty->getBitWidth() < IntTy->getBitWidth()) { + APInt Mask = ~Ty->getMask().zext(IntTy->getBitWidth()).shl(ShAmt); + Old = IRB.CreateAnd(Old, Mask, Name + ".mask"); + DEBUG(dbgs() << " masked: " << *Old << "\n"); + V = IRB.CreateOr(Old, V, Name + ".insert"); + DEBUG(dbgs() << " inserted: " << *V << "\n"); + } + return V; +} + +namespace { +/// \brief Visitor to rewrite instructions using a partition of an alloca to +/// use a new alloca. +/// +/// Also implements the rewriting to vector-based accesses when the partition +/// passes the isVectorPromotionViable predicate. Most of the rewriting logic +/// lives here. +class AllocaPartitionRewriter : public InstVisitor { + // Befriend the base class so it can delegate to private visit methods. + friend class llvm::InstVisitor; + + const DataLayout &TD; + AllocaPartitioning &P; + SROA &Pass; + AllocaInst &OldAI, &NewAI; + const uint64_t NewAllocaBeginOffset, NewAllocaEndOffset; + Type *NewAllocaTy; + + // If we are rewriting an alloca partition which can be written as pure + // vector operations, we stash extra information here. When VecTy is + // non-null, we have some strict guarantees about the rewriten alloca: + // - The new alloca is exactly the size of the vector type here. + // - The accesses all either map to the entire vector or to a single + // element. + // - The set of accessing instructions is only one of those handled above + // in isVectorPromotionViable. Generally these are the same access kinds + // which are promotable via mem2reg. + VectorType *VecTy; + Type *ElementTy; + uint64_t ElementSize; + + // This is a convenience and flag variable that will be null unless the new + // alloca's integer operations should be widened to this integer type due to + // passing isIntegerWideningViable above. If it is non-null, the desired + // integer type will be stored here for easy access during rewriting. + IntegerType *IntTy; + + // The offset of the partition user currently being rewritten. + uint64_t BeginOffset, EndOffset; + Use *OldUse; + Instruction *OldPtr; + + // The name prefix to use when rewriting instructions for this alloca. + std::string NamePrefix; + +public: + AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P, + AllocaPartitioning::iterator PI, + SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI, + uint64_t NewBeginOffset, uint64_t NewEndOffset) + : TD(TD), P(P), Pass(Pass), + OldAI(OldAI), NewAI(NewAI), + NewAllocaBeginOffset(NewBeginOffset), + NewAllocaEndOffset(NewEndOffset), + NewAllocaTy(NewAI.getAllocatedType()), + VecTy(), ElementTy(), ElementSize(), IntTy(), + BeginOffset(), EndOffset() { + } + + /// \brief Visit the users of the alloca partition and rewrite them. + bool visitUsers(AllocaPartitioning::const_use_iterator I, + AllocaPartitioning::const_use_iterator E) { + if (isVectorPromotionViable(TD, NewAI.getAllocatedType(), P, + NewAllocaBeginOffset, NewAllocaEndOffset, + I, E)) { + ++NumVectorized; + VecTy = cast(NewAI.getAllocatedType()); + ElementTy = VecTy->getElementType(); + assert((VecTy->getScalarSizeInBits() % 8) == 0 && + "Only multiple-of-8 sized vector elements are viable"); + ElementSize = VecTy->getScalarSizeInBits() / 8; + } else if (isIntegerWideningViable(TD, NewAI.getAllocatedType(), + NewAllocaBeginOffset, P, I, E)) { + IntTy = Type::getIntNTy(NewAI.getContext(), + TD.getTypeSizeInBits(NewAI.getAllocatedType())); + } + bool CanSROA = true; + for (; I != E; ++I) { + if (!I->U) + continue; // Skip dead uses. + BeginOffset = I->BeginOffset; + EndOffset = I->EndOffset; + OldUse = I->U; + OldPtr = cast(I->U->get()); + NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str(); + CanSROA &= visit(cast(I->U->getUser())); + } + if (VecTy) { + assert(CanSROA); + VecTy = 0; + ElementTy = 0; + ElementSize = 0; + } + if (IntTy) { + assert(CanSROA); + IntTy = 0; + } + return CanSROA; + } + +private: + // Every instruction which can end up as a user must have a rewrite rule. + bool visitInstruction(Instruction &I) { + DEBUG(dbgs() << " !!!! Cannot rewrite: " << I << "\n"); + llvm_unreachable("No rewrite rule for this instruction!"); + } + + Twine getName(const Twine &Suffix) { + return NamePrefix + Suffix; + } + + Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) { + assert(BeginOffset >= NewAllocaBeginOffset); + APInt Offset(TD.getPointerSizeInBits(), BeginOffset - NewAllocaBeginOffset); + return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName("")); + } + + /// \brief Compute suitable alignment to access an offset into the new alloca. + unsigned getOffsetAlign(uint64_t Offset) { + unsigned NewAIAlign = NewAI.getAlignment(); + if (!NewAIAlign) + NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType()); + return MinAlign(NewAIAlign, Offset); + } + + /// \brief Compute suitable alignment to access this partition of the new + /// alloca. + unsigned getPartitionAlign() { + return getOffsetAlign(BeginOffset - NewAllocaBeginOffset); + } + + /// \brief Compute suitable alignment to access a type at an offset of the + /// new alloca. + /// + /// \returns zero if the type's ABI alignment is a suitable alignment, + /// otherwise returns the maximal suitable alignment. + unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) { + unsigned Align = getOffsetAlign(Offset); + return Align == TD.getABITypeAlignment(Ty) ? 0 : Align; + } + + /// \brief Compute suitable alignment to access a type at the beginning of + /// this partition of the new alloca. + /// + /// See \c getOffsetTypeAlign for details; this routine delegates to it. + unsigned getPartitionTypeAlign(Type *Ty) { + return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset); + } + + ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) { + assert(VecTy && "Can only call getIndex when rewriting a vector"); + uint64_t RelOffset = Offset - NewAllocaBeginOffset; + assert(RelOffset / ElementSize < UINT32_MAX && "Index out of bounds"); + uint32_t Index = RelOffset / ElementSize; + assert(Index * ElementSize == RelOffset); + return IRB.getInt32(Index); + } + + void deleteIfTriviallyDead(Value *V) { + Instruction *I = cast(V); + if (isInstructionTriviallyDead(I)) + Pass.DeadInsts.insert(I); + } + + Value *rewriteVectorizedLoadInst(IRBuilder<> &IRB, LoadInst &LI, Value *OldOp) { + Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".load")); + if (LI.getType() == VecTy->getElementType() || + BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) { + V = IRB.CreateExtractElement(V, getIndex(IRB, BeginOffset), + getName(".extract")); + } + return V; + } + + Value *rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) { + assert(IntTy && "We cannot insert an integer to the alloca"); + assert(!LI.isVolatile()); + Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".load")); + V = convertValue(TD, IRB, V, IntTy); + assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = BeginOffset - NewAllocaBeginOffset; + if (Offset > 0 || EndOffset < NewAllocaEndOffset) + V = extractInteger(TD, IRB, V, cast(LI.getType()), Offset, + getName(".extract")); + return V; + } + + bool visitLoadInst(LoadInst &LI) { + DEBUG(dbgs() << " original: " << LI << "\n"); + Value *OldOp = LI.getOperand(0); + assert(OldOp == OldPtr); + IRBuilder<> IRB(&LI); + + uint64_t Size = EndOffset - BeginOffset; + bool IsSplitIntLoad = Size < TD.getTypeStoreSize(LI.getType()); + + // If this memory access can be shown to *statically* extend outside the + // bounds of the original allocation it's behavior is undefined. Rather + // than trying to transform it, just replace it with undef. + // FIXME: We should do something more clever for functions being + // instrumented by asan. + // FIXME: Eventually, once ASan and friends can flush out bugs here, this + // should be transformed to a load of null making it unreachable. + uint64_t OldAllocSize = TD.getTypeAllocSize(OldAI.getAllocatedType()); + if (TD.getTypeStoreSize(LI.getType()) > OldAllocSize) { + LI.replaceAllUsesWith(UndefValue::get(LI.getType())); + Pass.DeadInsts.insert(&LI); + deleteIfTriviallyDead(OldOp); + DEBUG(dbgs() << " to: undef!!\n"); + return true; + } + + Type *TargetTy = IsSplitIntLoad ? Type::getIntNTy(LI.getContext(), Size * 8) + : LI.getType(); + bool IsPtrAdjusted = false; + Value *V; + if (VecTy) { + V = rewriteVectorizedLoadInst(IRB, LI, OldOp); + } else if (IntTy && LI.getType()->isIntegerTy()) { + V = rewriteIntegerLoad(IRB, LI); + } else if (BeginOffset == NewAllocaBeginOffset && + canConvertValue(TD, NewAllocaTy, LI.getType())) { + V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + LI.isVolatile(), getName(".load")); + } else { + Type *LTy = TargetTy->getPointerTo(); + V = IRB.CreateAlignedLoad(getAdjustedAllocaPtr(IRB, LTy), + getPartitionTypeAlign(TargetTy), + LI.isVolatile(), getName(".load")); + IsPtrAdjusted = true; + } + V = convertValue(TD, IRB, V, TargetTy); + + if (IsSplitIntLoad) { + assert(!LI.isVolatile()); + assert(LI.getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(LI.getType()->getIntegerBitWidth() == + TD.getTypeStoreSizeInBits(LI.getType()) && + "Non-byte-multiple bit width"); + assert(LI.getType()->getIntegerBitWidth() == + TD.getTypeAllocSizeInBits(OldAI.getAllocatedType()) && + "Only alloca-wide loads can be split and recomposed"); + // Move the insertion point just past the load so that we can refer to it. + IRB.SetInsertPoint(llvm::next(BasicBlock::iterator(&LI))); + // Create a placeholder value with the same type as LI to use as the + // basis for the new value. This allows us to replace the uses of LI with + // the computed value, and then replace the placeholder with LI, leaving + // LI only used for this computation. + Value *Placeholder + = new LoadInst(UndefValue::get(LI.getType()->getPointerTo())); + V = insertInteger(TD, IRB, Placeholder, V, BeginOffset, + getName(".insert")); + LI.replaceAllUsesWith(V); + Placeholder->replaceAllUsesWith(&LI); + delete Placeholder; + } else { + LI.replaceAllUsesWith(V); + } + + Pass.DeadInsts.insert(&LI); + deleteIfTriviallyDead(OldOp); + DEBUG(dbgs() << " to: " << *V << "\n"); + return !LI.isVolatile() && !IsPtrAdjusted; + } + + bool rewriteVectorizedStoreInst(IRBuilder<> &IRB, Value *V, + StoreInst &SI, Value *OldOp) { + if (V->getType() == ElementTy || + BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) { + if (V->getType() != ElementTy) + V = convertValue(TD, IRB, V, ElementTy); + LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".load")); + V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset), + getName(".insert")); + } else if (V->getType() != VecTy) { + V = convertValue(TD, IRB, V, VecTy); + } + StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); + Pass.DeadInsts.insert(&SI); + + (void)Store; + DEBUG(dbgs() << " to: " << *Store << "\n"); + return true; + } + + bool rewriteIntegerStore(IRBuilder<> &IRB, Value *V, StoreInst &SI) { + assert(IntTy && "We cannot extract an integer from the alloca"); + assert(!SI.isVolatile()); + if (TD.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".oldload")); + Old = convertValue(TD, IRB, Old, IntTy); + assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = BeginOffset - NewAllocaBeginOffset; + V = insertInteger(TD, IRB, Old, SI.getValueOperand(), Offset, + getName(".insert")); + } + V = convertValue(TD, IRB, V, NewAllocaTy); + StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment()); + Pass.DeadInsts.insert(&SI); + (void)Store; + DEBUG(dbgs() << " to: " << *Store << "\n"); + return true; + } + + bool visitStoreInst(StoreInst &SI) { + DEBUG(dbgs() << " original: " << SI << "\n"); + Value *OldOp = SI.getOperand(1); + assert(OldOp == OldPtr); + IRBuilder<> IRB(&SI); + + Value *V = SI.getValueOperand(); + + // Strip all inbounds GEPs and pointer casts to try to dig out any root + // alloca that should be re-examined after promoting this alloca. + if (V->getType()->isPointerTy()) + if (AllocaInst *AI = dyn_cast(V->stripInBoundsOffsets())) + Pass.PostPromotionWorklist.insert(AI); + + uint64_t Size = EndOffset - BeginOffset; + if (Size < TD.getTypeStoreSize(V->getType())) { + assert(!SI.isVolatile()); + assert(V->getType()->isIntegerTy() && + "Only integer type loads and stores are split"); + assert(V->getType()->getIntegerBitWidth() == + TD.getTypeStoreSizeInBits(V->getType()) && + "Non-byte-multiple bit width"); + assert(V->getType()->getIntegerBitWidth() == + TD.getTypeSizeInBits(OldAI.getAllocatedType()) && + "Only alloca-wide stores can be split and recomposed"); + IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8); + V = extractInteger(TD, IRB, V, NarrowTy, BeginOffset, + getName(".extract")); + } + + if (VecTy) + return rewriteVectorizedStoreInst(IRB, V, SI, OldOp); + if (IntTy && V->getType()->isIntegerTy()) + return rewriteIntegerStore(IRB, V, SI); + + StoreInst *NewSI; + if (BeginOffset == NewAllocaBeginOffset && + canConvertValue(TD, V->getType(), NewAllocaTy)) { + V = convertValue(TD, IRB, V, NewAllocaTy); + NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), + SI.isVolatile()); + } else { + Value *NewPtr = getAdjustedAllocaPtr(IRB, V->getType()->getPointerTo()); + NewSI = IRB.CreateAlignedStore(V, NewPtr, + getPartitionTypeAlign(V->getType()), + SI.isVolatile()); + } + (void)NewSI; + Pass.DeadInsts.insert(&SI); + deleteIfTriviallyDead(OldOp); + + DEBUG(dbgs() << " to: " << *NewSI << "\n"); + return NewSI->getPointerOperand() == &NewAI && !SI.isVolatile(); + } + + bool visitMemSetInst(MemSetInst &II) { + DEBUG(dbgs() << " original: " << II << "\n"); + IRBuilder<> IRB(&II); + assert(II.getRawDest() == OldPtr); + + // If the memset has a variable size, it cannot be split, just adjust the + // pointer to the new alloca. + if (!isa(II.getLength())) { + II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType())); + Type *CstTy = II.getAlignmentCst()->getType(); + II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign())); + + deleteIfTriviallyDead(OldPtr); + return false; + } + + // Record this instruction for deletion. + Pass.DeadInsts.insert(&II); + + Type *AllocaTy = NewAI.getAllocatedType(); + Type *ScalarTy = AllocaTy->getScalarType(); + + // If this doesn't map cleanly onto the alloca type, and that type isn't + // a single value type, just emit a memset. + if (!VecTy && !IntTy && + (BeginOffset != NewAllocaBeginOffset || + EndOffset != NewAllocaEndOffset || + !AllocaTy->isSingleValueType() || + !TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) { + Type *SizeTy = II.getLength()->getType(); + Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset); + CallInst *New + = IRB.CreateMemSet(getAdjustedAllocaPtr(IRB, + II.getRawDest()->getType()), + II.getValue(), Size, getPartitionAlign(), + II.isVolatile()); + (void)New; + DEBUG(dbgs() << " to: " << *New << "\n"); + return false; + } + + // If we can represent this as a simple value, we have to build the actual + // value to store, which requires expanding the byte present in memset to + // a sensible representation for the alloca type. This is essentially + // splatting the byte to a sufficiently wide integer, bitcasting to the + // desired scalar type, and splatting it across any desired vector type. + uint64_t Size = EndOffset - BeginOffset; + Value *V = II.getValue(); + IntegerType *VTy = cast(V->getType()); + Type *SplatIntTy = Type::getIntNTy(VTy->getContext(), Size*8); + if (Size*8 > VTy->getBitWidth()) + V = IRB.CreateMul(IRB.CreateZExt(V, SplatIntTy, getName(".zext")), + ConstantExpr::getUDiv( + Constant::getAllOnesValue(SplatIntTy), + ConstantExpr::getZExt( + Constant::getAllOnesValue(V->getType()), + SplatIntTy)), + getName(".isplat")); + + // If this is an element-wide memset of a vectorizable alloca, insert it. + if (VecTy && (BeginOffset > NewAllocaBeginOffset || + EndOffset < NewAllocaEndOffset)) { + if (V->getType() != ScalarTy) + V = convertValue(TD, IRB, V, ScalarTy); + StoreInst *Store = IRB.CreateAlignedStore( + IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI, + NewAI.getAlignment(), + getName(".load")), + V, getIndex(IRB, BeginOffset), + getName(".insert")), + &NewAI, NewAI.getAlignment()); + (void)Store; + DEBUG(dbgs() << " to: " << *Store << "\n"); + return true; + } + + // If this is a memset on an alloca where we can widen stores, insert the + // set integer. + if (IntTy && (BeginOffset > NewAllocaBeginOffset || + EndOffset < NewAllocaEndOffset)) { + assert(!II.isVolatile()); + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".oldload")); + Old = convertValue(TD, IRB, Old, IntTy); + assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = BeginOffset - NewAllocaBeginOffset; + V = insertInteger(TD, IRB, Old, V, Offset, getName(".insert")); + } + + if (V->getType() != AllocaTy) + V = convertValue(TD, IRB, V, AllocaTy); + + Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), + II.isVolatile()); + (void)New; + DEBUG(dbgs() << " to: " << *New << "\n"); + return !II.isVolatile(); + } + + bool visitMemTransferInst(MemTransferInst &II) { + // Rewriting of memory transfer instructions can be a bit tricky. We break + // them into two categories: split intrinsics and unsplit intrinsics. + + DEBUG(dbgs() << " original: " << II << "\n"); + IRBuilder<> IRB(&II); + + assert(II.getRawSource() == OldPtr || II.getRawDest() == OldPtr); + bool IsDest = II.getRawDest() == OldPtr; + + const AllocaPartitioning::MemTransferOffsets &MTO + = P.getMemTransferOffsets(II); + + // Compute the relative offset within the transfer. + unsigned IntPtrWidth = TD.getPointerSizeInBits(); + APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin + : MTO.SourceBegin)); + + unsigned Align = II.getAlignment(); + if (Align > 1) + Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), + MinAlign(II.getAlignment(), getPartitionAlign())); + + // For unsplit intrinsics, we simply modify the source and destination + // pointers in place. This isn't just an optimization, it is a matter of + // correctness. With unsplit intrinsics we may be dealing with transfers + // within a single alloca before SROA ran, or with transfers that have + // a variable length. We may also be dealing with memmove instead of + // memcpy, and so simply updating the pointers is the necessary for us to + // update both source and dest of a single call. + if (!MTO.IsSplittable) { + Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource(); + if (IsDest) + II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType())); + else + II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType())); + + Type *CstTy = II.getAlignmentCst()->getType(); + II.setAlignment(ConstantInt::get(CstTy, Align)); + + DEBUG(dbgs() << " to: " << II << "\n"); + deleteIfTriviallyDead(OldOp); + return false; + } + // For split transfer intrinsics we have an incredibly useful assurance: + // the source and destination do not reside within the same alloca, and at + // least one of them does not escape. This means that we can replace + // memmove with memcpy, and we don't need to worry about all manner of + // downsides to splitting and transforming the operations. + + // If this doesn't map cleanly onto the alloca type, and that type isn't + // a single value type, just emit a memcpy. + bool EmitMemCpy + = !VecTy && !IntTy && (BeginOffset != NewAllocaBeginOffset || + EndOffset != NewAllocaEndOffset || + !NewAI.getAllocatedType()->isSingleValueType()); + + // If we're just going to emit a memcpy, the alloca hasn't changed, and the + // size hasn't been shrunk based on analysis of the viable range, this is + // a no-op. + if (EmitMemCpy && &OldAI == &NewAI) { + uint64_t OrigBegin = IsDest ? MTO.DestBegin : MTO.SourceBegin; + uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd; + // Ensure the start lines up. + assert(BeginOffset == OrigBegin); + (void)OrigBegin; + + // Rewrite the size as needed. + if (EndOffset != OrigEnd) + II.setLength(ConstantInt::get(II.getLength()->getType(), + EndOffset - BeginOffset)); + return false; + } + // Record this instruction for deletion. + Pass.DeadInsts.insert(&II); + + bool IsWholeAlloca = BeginOffset == NewAllocaBeginOffset && + EndOffset == NewAllocaEndOffset; + bool IsVectorElement = VecTy && !IsWholeAlloca; + uint64_t Size = EndOffset - BeginOffset; + IntegerType *SubIntTy + = IntTy ? Type::getIntNTy(IntTy->getContext(), Size*8) : 0; + + Type *OtherPtrTy = IsDest ? II.getRawSource()->getType() + : II.getRawDest()->getType(); + if (!EmitMemCpy) { + if (IsVectorElement) + OtherPtrTy = VecTy->getElementType()->getPointerTo(); + else if (IntTy && !IsWholeAlloca) + OtherPtrTy = SubIntTy->getPointerTo(); + else + OtherPtrTy = NewAI.getType(); + } + + // Compute the other pointer, folding as much as possible to produce + // a single, simple GEP in most cases. + Value *OtherPtr = IsDest ? II.getRawSource() : II.getRawDest(); + OtherPtr = getAdjustedPtr(IRB, TD, OtherPtr, RelOffset, OtherPtrTy, + getName("." + OtherPtr->getName())); + + // Strip all inbounds GEPs and pointer casts to try to dig out any root + // alloca that should be re-examined after rewriting this instruction. + if (AllocaInst *AI + = dyn_cast(OtherPtr->stripInBoundsOffsets())) + Pass.Worklist.insert(AI); + + if (EmitMemCpy) { + Value *OurPtr + = getAdjustedAllocaPtr(IRB, IsDest ? II.getRawDest()->getType() + : II.getRawSource()->getType()); + Type *SizeTy = II.getLength()->getType(); + Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset); + + CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr, + IsDest ? OtherPtr : OurPtr, + Size, Align, II.isVolatile()); + (void)New; + DEBUG(dbgs() << " to: " << *New << "\n"); + return false; + } + + // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy + // is equivalent to 1, but that isn't true if we end up rewriting this as + // a load or store. + if (!Align) + Align = 1; + + Value *SrcPtr = OtherPtr; + Value *DstPtr = &NewAI; + if (!IsDest) + std::swap(SrcPtr, DstPtr); + + Value *Src; + if (IsVectorElement && !IsDest) { + // We have to extract rather than load. + Src = IRB.CreateExtractElement( + IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")), + getIndex(IRB, BeginOffset), + getName(".copyextract")); + } else if (IntTy && !IsWholeAlloca && !IsDest) { + Src = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".load")); + Src = convertValue(TD, IRB, Src, IntTy); + assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = BeginOffset - NewAllocaBeginOffset; + Src = extractInteger(TD, IRB, Src, SubIntTy, Offset, getName(".extract")); + } else { + Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), + getName(".copyload")); + } + + if (IntTy && !IsWholeAlloca && IsDest) { + Value *Old = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), + getName(".oldload")); + Old = convertValue(TD, IRB, Old, IntTy); + assert(BeginOffset >= NewAllocaBeginOffset && "Out of bounds offset"); + uint64_t Offset = BeginOffset - NewAllocaBeginOffset; + Src = insertInteger(TD, IRB, Old, Src, Offset, getName(".insert")); + Src = convertValue(TD, IRB, Src, NewAllocaTy); + } + + if (IsVectorElement && IsDest) { + // We have to insert into a loaded copy before storing. + Src = IRB.CreateInsertElement( + IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")), + Src, getIndex(IRB, BeginOffset), + getName(".insert")); + } + + StoreInst *Store = cast( + IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile())); + (void)Store; + DEBUG(dbgs() << " to: " << *Store << "\n"); + return !II.isVolatile(); + } + + bool visitIntrinsicInst(IntrinsicInst &II) { + assert(II.getIntrinsicID() == Intrinsic::lifetime_start || + II.getIntrinsicID() == Intrinsic::lifetime_end); + DEBUG(dbgs() << " original: " << II << "\n"); + IRBuilder<> IRB(&II); + assert(II.getArgOperand(1) == OldPtr); + + // Record this instruction for deletion. + Pass.DeadInsts.insert(&II); + + ConstantInt *Size + = ConstantInt::get(cast(II.getArgOperand(0)->getType()), + EndOffset - BeginOffset); + Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType()); + Value *New; + if (II.getIntrinsicID() == Intrinsic::lifetime_start) + New = IRB.CreateLifetimeStart(Ptr, Size); + else + New = IRB.CreateLifetimeEnd(Ptr, Size); + + DEBUG(dbgs() << " to: " << *New << "\n"); + return true; + } + + bool visitPHINode(PHINode &PN) { + DEBUG(dbgs() << " original: " << PN << "\n"); + + // We would like to compute a new pointer in only one place, but have it be + // as local as possible to the PHI. To do that, we re-use the location of + // the old pointer, which necessarily must be in the right position to + // dominate the PHI. + IRBuilder<> PtrBuilder(cast(OldPtr)); + + Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType()); + // Replace the operands which were using the old pointer. + std::replace(PN.op_begin(), PN.op_end(), cast(OldPtr), NewPtr); + + DEBUG(dbgs() << " to: " << PN << "\n"); + deleteIfTriviallyDead(OldPtr); + return false; + } + + bool visitSelectInst(SelectInst &SI) { + DEBUG(dbgs() << " original: " << SI << "\n"); + IRBuilder<> IRB(&SI); + + // Find the operand we need to rewrite here. + bool IsTrueVal = SI.getTrueValue() == OldPtr; + if (IsTrueVal) + assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!"); + else + assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!"); + + Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType()); + SI.setOperand(IsTrueVal ? 1 : 2, NewPtr); + DEBUG(dbgs() << " to: " << SI << "\n"); + deleteIfTriviallyDead(OldPtr); + return false; + } + +}; +} + +namespace { +/// \brief Visitor to rewrite aggregate loads and stores as scalar. +/// +/// This pass aggressively rewrites all aggregate loads and stores on +/// a particular pointer (or any pointer derived from it which we can identify) +/// with scalar loads and stores. +class AggLoadStoreRewriter : public InstVisitor { + // Befriend the base class so it can delegate to private visit methods. + friend class llvm::InstVisitor; + + const DataLayout &TD; + + /// Queue of pointer uses to analyze and potentially rewrite. + SmallVector Queue; + + /// Set to prevent us from cycling with phi nodes and loops. + SmallPtrSet Visited; + + /// The current pointer use being rewritten. This is used to dig up the used + /// value (as opposed to the user). + Use *U; + +public: + AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {} + + /// Rewrite loads and stores through a pointer and all pointers derived from + /// it. + bool rewrite(Instruction &I) { + DEBUG(dbgs() << " Rewriting FCA loads and stores...\n"); + enqueueUsers(I); + bool Changed = false; + while (!Queue.empty()) { + U = Queue.pop_back_val(); + Changed |= visit(cast(U->getUser())); + } + return Changed; + } + +private: + /// Enqueue all the users of the given instruction for further processing. + /// This uses a set to de-duplicate users. + void enqueueUsers(Instruction &I) { + for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; + ++UI) + if (Visited.insert(*UI)) + Queue.push_back(&UI.getUse()); + } + + // Conservative default is to not rewrite anything. + bool visitInstruction(Instruction &I) { return false; } + + /// \brief Generic recursive split emission class. + template + class OpSplitter { + protected: + /// The builder used to form new instructions. + IRBuilder<> IRB; + /// The indices which to be used with insert- or extractvalue to select the + /// appropriate value within the aggregate. + SmallVector Indices; + /// The indices to a GEP instruction which will move Ptr to the correct slot + /// within the aggregate. + SmallVector GEPIndices; + /// The base pointer of the original op, used as a base for GEPing the + /// split operations. + Value *Ptr; + + /// Initialize the splitter with an insertion point, Ptr and start with a + /// single zero GEP index. + OpSplitter(Instruction *InsertionPoint, Value *Ptr) + : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {} + + public: + /// \brief Generic recursive split emission routine. + /// + /// This method recursively splits an aggregate op (load or store) into + /// scalar or vector ops. It splits recursively until it hits a single value + /// and emits that single value operation via the template argument. + /// + /// The logic of this routine relies on GEPs and insertvalue and + /// extractvalue all operating with the same fundamental index list, merely + /// formatted differently (GEPs need actual values). + /// + /// \param Ty The type being split recursively into smaller ops. + /// \param Agg The aggregate value being built up or stored, depending on + /// whether this is splitting a load or a store respectively. + void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) { + if (Ty->isSingleValueType()) + return static_cast(this)->emitFunc(Ty, Agg, Name); + + if (ArrayType *ATy = dyn_cast(Ty)) { + unsigned OldSize = Indices.size(); + (void)OldSize; + for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size; + ++Idx) { + assert(Indices.size() == OldSize && "Did not return to the old size"); + Indices.push_back(Idx); + GEPIndices.push_back(IRB.getInt32(Idx)); + emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx)); + GEPIndices.pop_back(); + Indices.pop_back(); + } + return; + } + + if (StructType *STy = dyn_cast(Ty)) { + unsigned OldSize = Indices.size(); + (void)OldSize; + for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size; + ++Idx) { + assert(Indices.size() == OldSize && "Did not return to the old size"); + Indices.push_back(Idx); + GEPIndices.push_back(IRB.getInt32(Idx)); + emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx)); + GEPIndices.pop_back(); + Indices.pop_back(); + } + return; + } + + llvm_unreachable("Only arrays and structs are aggregate loadable types"); + } + }; + + struct LoadOpSplitter : public OpSplitter { + LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr) + : OpSplitter(InsertionPoint, Ptr) {} + + /// Emit a leaf load of a single value. This is called at the leaves of the + /// recursive emission to actually load values. + void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { + assert(Ty->isSingleValueType()); + // Load the single value and insert it using the indices. + Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices, + Name + ".gep"), + Name + ".load"); + Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert"); + DEBUG(dbgs() << " to: " << *Load << "\n"); + } + }; + + bool visitLoadInst(LoadInst &LI) { + assert(LI.getPointerOperand() == *U); + if (!LI.isSimple() || LI.getType()->isSingleValueType()) + return false; + + // We have an aggregate being loaded, split it apart. + DEBUG(dbgs() << " original: " << LI << "\n"); + LoadOpSplitter Splitter(&LI, *U); + Value *V = UndefValue::get(LI.getType()); + Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca"); + LI.replaceAllUsesWith(V); + LI.eraseFromParent(); + return true; + } + + struct StoreOpSplitter : public OpSplitter { + StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr) + : OpSplitter(InsertionPoint, Ptr) {} + + /// Emit a leaf store of a single value. This is called at the leaves of the + /// recursive emission to actually produce stores. + void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) { + assert(Ty->isSingleValueType()); + // Extract the single value and store it using the indices. + Value *Store = IRB.CreateStore( + IRB.CreateExtractValue(Agg, Indices, Name + ".extract"), + IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep")); + (void)Store; + DEBUG(dbgs() << " to: " << *Store << "\n"); + } + }; + + bool visitStoreInst(StoreInst &SI) { + if (!SI.isSimple() || SI.getPointerOperand() != *U) + return false; + Value *V = SI.getValueOperand(); + if (V->getType()->isSingleValueType()) + return false; + + // We have an aggregate being stored, split it apart. + DEBUG(dbgs() << " original: " << SI << "\n"); + StoreOpSplitter Splitter(&SI, *U); + Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca"); + SI.eraseFromParent(); + return true; + } + + bool visitBitCastInst(BitCastInst &BC) { + enqueueUsers(BC); + return false; + } + + bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { + enqueueUsers(GEPI); + return false; + } + + bool visitPHINode(PHINode &PN) { + enqueueUsers(PN); + return false; + } + + bool visitSelectInst(SelectInst &SI) { + enqueueUsers(SI); + return false; + } +}; +} + +/// \brief Strip aggregate type wrapping. +/// +/// This removes no-op aggregate types wrapping an underlying type. It will +/// strip as many layers of types as it can without changing either the type +/// size or the allocated size. +static Type *stripAggregateTypeWrapping(const DataLayout &DL, Type *Ty) { + if (Ty->isSingleValueType()) + return Ty; + + uint64_t AllocSize = DL.getTypeAllocSize(Ty); + uint64_t TypeSize = DL.getTypeSizeInBits(Ty); + + Type *InnerTy; + if (ArrayType *ArrTy = dyn_cast(Ty)) { + InnerTy = ArrTy->getElementType(); + } else if (StructType *STy = dyn_cast(Ty)) { + const StructLayout *SL = DL.getStructLayout(STy); + unsigned Index = SL->getElementContainingOffset(0); + InnerTy = STy->getElementType(Index); + } else { + return Ty; + } + + if (AllocSize > DL.getTypeAllocSize(InnerTy) || + TypeSize > DL.getTypeSizeInBits(InnerTy)) + return Ty; + + return stripAggregateTypeWrapping(DL, InnerTy); +} + +/// \brief Try to find a partition of the aggregate type passed in for a given +/// offset and size. +/// +/// This recurses through the aggregate type and tries to compute a subtype +/// based on the offset and size. When the offset and size span a sub-section +/// of an array, it will even compute a new array type for that sub-section, +/// and the same for structs. +/// +/// Note that this routine is very strict and tries to find a partition of the +/// type which produces the *exact* right offset and size. It is not forgiving +/// when the size or offset cause either end of type-based partition to be off. +/// Also, this is a best-effort routine. It is reasonable to give up and not +/// return a type if necessary. +static Type *getTypePartition(const DataLayout &TD, Type *Ty, + uint64_t Offset, uint64_t Size) { + if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size) + return stripAggregateTypeWrapping(TD, Ty); + if (Offset > TD.getTypeAllocSize(Ty) || + (TD.getTypeAllocSize(Ty) - Offset) < Size) + return 0; + + if (SequentialType *SeqTy = dyn_cast(Ty)) { + // We can't partition pointers... + if (SeqTy->isPointerTy()) + return 0; + + Type *ElementTy = SeqTy->getElementType(); + uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); + uint64_t NumSkippedElements = Offset / ElementSize; + if (ArrayType *ArrTy = dyn_cast(SeqTy)) + if (NumSkippedElements >= ArrTy->getNumElements()) + return 0; + if (VectorType *VecTy = dyn_cast(SeqTy)) + if (NumSkippedElements >= VecTy->getNumElements()) + return 0; + Offset -= NumSkippedElements * ElementSize; + + // First check if we need to recurse. + if (Offset > 0 || Size < ElementSize) { + // Bail if the partition ends in a different array element. + if ((Offset + Size) > ElementSize) + return 0; + // Recurse through the element type trying to peel off offset bytes. + return getTypePartition(TD, ElementTy, Offset, Size); + } + assert(Offset == 0); + + if (Size == ElementSize) + return stripAggregateTypeWrapping(TD, ElementTy); + assert(Size > ElementSize); + uint64_t NumElements = Size / ElementSize; + if (NumElements * ElementSize != Size) + return 0; + return ArrayType::get(ElementTy, NumElements); + } + + StructType *STy = dyn_cast(Ty); + if (!STy) + return 0; + + const StructLayout *SL = TD.getStructLayout(STy); + if (Offset >= SL->getSizeInBytes()) + return 0; + uint64_t EndOffset = Offset + Size; + if (EndOffset > SL->getSizeInBytes()) + return 0; + + unsigned Index = SL->getElementContainingOffset(Offset); + Offset -= SL->getElementOffset(Index); + + Type *ElementTy = STy->getElementType(Index); + uint64_t ElementSize = TD.getTypeAllocSize(ElementTy); + if (Offset >= ElementSize) + return 0; // The offset points into alignment padding. + + // See if any partition must be contained by the element. + if (Offset > 0 || Size < ElementSize) { + if ((Offset + Size) > ElementSize) + return 0; + return getTypePartition(TD, ElementTy, Offset, Size); + } + assert(Offset == 0); + + if (Size == ElementSize) + return stripAggregateTypeWrapping(TD, ElementTy); + + StructType::element_iterator EI = STy->element_begin() + Index, + EE = STy->element_end(); + if (EndOffset < SL->getSizeInBytes()) { + unsigned EndIndex = SL->getElementContainingOffset(EndOffset); + if (Index == EndIndex) + return 0; // Within a single element and its padding. + + // Don't try to form "natural" types if the elements don't line up with the + // expected size. + // FIXME: We could potentially recurse down through the last element in the + // sub-struct to find a natural end point. + if (SL->getElementOffset(EndIndex) != EndOffset) + return 0; + + assert(Index < EndIndex); + EE = STy->element_begin() + EndIndex; + } + + // Try to build up a sub-structure. + StructType *SubTy = StructType::get(STy->getContext(), makeArrayRef(EI, EE), + STy->isPacked()); + const StructLayout *SubSL = TD.getStructLayout(SubTy); + if (Size != SubSL->getSizeInBytes()) + return 0; // The sub-struct doesn't have quite the size needed. + + return SubTy; +} + +/// \brief Rewrite an alloca partition's users. +/// +/// This routine drives both of the rewriting goals of the SROA pass. It tries +/// to rewrite uses of an alloca partition to be conducive for SSA value +/// promotion. If the partition needs a new, more refined alloca, this will +/// build that new alloca, preserving as much type information as possible, and +/// rewrite the uses of the old alloca to point at the new one and have the +/// appropriate new offsets. It also evaluates how successful the rewrite was +/// at enabling promotion and if it was successful queues the alloca to be +/// promoted. +bool SROA::rewriteAllocaPartition(AllocaInst &AI, + AllocaPartitioning &P, + AllocaPartitioning::iterator PI) { + uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset; + bool IsLive = false; + for (AllocaPartitioning::use_iterator UI = P.use_begin(PI), + UE = P.use_end(PI); + UI != UE && !IsLive; ++UI) + if (UI->U) + IsLive = true; + if (!IsLive) + return false; // No live uses left of this partition. + + DEBUG(dbgs() << "Speculating PHIs and selects in partition " + << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n"); + + PHIOrSelectSpeculator Speculator(*TD, P, *this); + DEBUG(dbgs() << " speculating "); + DEBUG(P.print(dbgs(), PI, "")); + Speculator.visitUsers(PI); + + // Try to compute a friendly type for this partition of the alloca. This + // won't always succeed, in which case we fall back to a legal integer type + // or an i8 array of an appropriate size. + Type *AllocaTy = 0; + if (Type *PartitionTy = P.getCommonType(PI)) + if (TD->getTypeAllocSize(PartitionTy) >= AllocaSize) + AllocaTy = PartitionTy; + if (!AllocaTy) + if (Type *PartitionTy = getTypePartition(*TD, AI.getAllocatedType(), + PI->BeginOffset, AllocaSize)) + AllocaTy = PartitionTy; + if ((!AllocaTy || + (AllocaTy->isArrayTy() && + AllocaTy->getArrayElementType()->isIntegerTy())) && + TD->isLegalInteger(AllocaSize * 8)) + AllocaTy = Type::getIntNTy(*C, AllocaSize * 8); + if (!AllocaTy) + AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize); + assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize); + + // Check for the case where we're going to rewrite to a new alloca of the + // exact same type as the original, and with the same access offsets. In that + // case, re-use the existing alloca, but still run through the rewriter to + // performe phi and select speculation. + AllocaInst *NewAI; + if (AllocaTy == AI.getAllocatedType()) { + assert(PI->BeginOffset == 0 && + "Non-zero begin offset but same alloca type"); + assert(PI == P.begin() && "Begin offset is zero on later partition"); + NewAI = &AI; + } else { + unsigned Alignment = AI.getAlignment(); + if (!Alignment) { + // The minimum alignment which users can rely on when the explicit + // alignment is omitted or zero is that required by the ABI for this + // type. + Alignment = TD->getABITypeAlignment(AI.getAllocatedType()); + } + Alignment = MinAlign(Alignment, PI->BeginOffset); + // If we will get at least this much alignment from the type alone, leave + // the alloca's alignment unconstrained. + if (Alignment <= TD->getABITypeAlignment(AllocaTy)) + Alignment = 0; + NewAI = new AllocaInst(AllocaTy, 0, Alignment, + AI.getName() + ".sroa." + Twine(PI - P.begin()), + &AI); + ++NumNewAllocas; + } + + DEBUG(dbgs() << "Rewriting alloca partition " + << "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: " + << *NewAI << "\n"); + + // Track the high watermark of the post-promotion worklist. We will reset it + // to this point if the alloca is not in fact scheduled for promotion. + unsigned PPWOldSize = PostPromotionWorklist.size(); + + AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI, + PI->BeginOffset, PI->EndOffset); + DEBUG(dbgs() << " rewriting "); + DEBUG(P.print(dbgs(), PI, "")); + bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI)); + if (Promotable) { + DEBUG(dbgs() << " and queuing for promotion\n"); + PromotableAllocas.push_back(NewAI); + } else if (NewAI != &AI) { + // If we can't promote the alloca, iterate on it to check for new + // refinements exposed by splitting the current alloca. Don't iterate on an + // alloca which didn't actually change and didn't get promoted. + Worklist.insert(NewAI); + } + + // Drop any post-promotion work items if promotion didn't happen. + if (!Promotable) + while (PostPromotionWorklist.size() > PPWOldSize) + PostPromotionWorklist.pop_back(); + + return true; +} + +/// \brief Walks the partitioning of an alloca rewriting uses of each partition. +bool SROA::splitAlloca(AllocaInst &AI, AllocaPartitioning &P) { + bool Changed = false; + for (AllocaPartitioning::iterator PI = P.begin(), PE = P.end(); PI != PE; + ++PI) + Changed |= rewriteAllocaPartition(AI, P, PI); + + return Changed; +} + +/// \brief Analyze an alloca for SROA. +/// +/// This analyzes the alloca to ensure we can reason about it, builds +/// a partitioning of the alloca, and then hands it off to be split and +/// rewritten as needed. +bool SROA::runOnAlloca(AllocaInst &AI) { + DEBUG(dbgs() << "SROA alloca: " << AI << "\n"); + ++NumAllocasAnalyzed; + + // Special case dead allocas, as they're trivial. + if (AI.use_empty()) { + AI.eraseFromParent(); + return true; + } + + // Skip alloca forms that this analysis can't handle. + if (AI.isArrayAllocation() || !AI.getAllocatedType()->isSized() || + TD->getTypeAllocSize(AI.getAllocatedType()) == 0) + return false; + + bool Changed = false; + + // First, split any FCA loads and stores touching this alloca to promote + // better splitting and promotion opportunities. + AggLoadStoreRewriter AggRewriter(*TD); + Changed |= AggRewriter.rewrite(AI); + + // Build the partition set using a recursive instruction-visiting builder. + AllocaPartitioning P(*TD, AI); + DEBUG(P.print(dbgs())); + if (P.isEscaped()) + return Changed; + + // Delete all the dead users of this alloca before splitting and rewriting it. + for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(), + DE = P.dead_user_end(); + DI != DE; ++DI) { + Changed = true; + (*DI)->replaceAllUsesWith(UndefValue::get((*DI)->getType())); + DeadInsts.insert(*DI); + } + for (AllocaPartitioning::dead_op_iterator DO = P.dead_op_begin(), + DE = P.dead_op_end(); + DO != DE; ++DO) { + Value *OldV = **DO; + // Clobber the use with an undef value. + **DO = UndefValue::get(OldV->getType()); + if (Instruction *OldI = dyn_cast(OldV)) + if (isInstructionTriviallyDead(OldI)) { + Changed = true; + DeadInsts.insert(OldI); + } + } + + // No partitions to split. Leave the dead alloca for a later pass to clean up. + if (P.begin() == P.end()) + return Changed; + + return splitAlloca(AI, P) || Changed; +} + +/// \brief Delete the dead instructions accumulated in this run. +/// +/// Recursively deletes the dead instructions we've accumulated. This is done +/// at the very end to maximize locality of the recursive delete and to +/// minimize the problems of invalidated instruction pointers as such pointers +/// are used heavily in the intermediate stages of the algorithm. +/// +/// We also record the alloca instructions deleted here so that they aren't +/// subsequently handed to mem2reg to promote. +void SROA::deleteDeadInstructions(SmallPtrSet &DeletedAllocas) { + while (!DeadInsts.empty()) { + Instruction *I = DeadInsts.pop_back_val(); + DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n"); + + I->replaceAllUsesWith(UndefValue::get(I->getType())); + + for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) + if (Instruction *U = dyn_cast(*OI)) { + // Zero out the operand and see if it becomes trivially dead. + *OI = 0; + if (isInstructionTriviallyDead(U)) + DeadInsts.insert(U); + } + + if (AllocaInst *AI = dyn_cast(I)) + DeletedAllocas.insert(AI); + + ++NumDeleted; + I->eraseFromParent(); + } +} + +/// \brief Promote the allocas, using the best available technique. +/// +/// This attempts to promote whatever allocas have been identified as viable in +/// the PromotableAllocas list. If that list is empty, there is nothing to do. +/// If there is a domtree available, we attempt to promote using the full power +/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is +/// based on the SSAUpdater utilities. This function returns whether any +/// promotion occured. +bool SROA::promoteAllocas(Function &F) { + if (PromotableAllocas.empty()) + return false; + + NumPromoted += PromotableAllocas.size(); + + if (DT && !ForceSSAUpdater) { + DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); + PromoteMemToReg(PromotableAllocas, *DT); + PromotableAllocas.clear(); + return true; + } + + DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n"); + SSAUpdater SSA; + DIBuilder DIB(*F.getParent()); + SmallVector Insts; + + for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) { + AllocaInst *AI = PromotableAllocas[Idx]; + for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end(); + UI != UE;) { + Instruction *I = cast(*UI++); + // FIXME: Currently the SSAUpdater infrastructure doesn't reason about + // lifetime intrinsics and so we strip them (and the bitcasts+GEPs + // leading to them) here. Eventually it should use them to optimize the + // scalar values produced. + if (isa(I) || isa(I)) { + assert(onlyUsedByLifetimeMarkers(I) && + "Found a bitcast used outside of a lifetime marker."); + while (!I->use_empty()) + cast(*I->use_begin())->eraseFromParent(); + I->eraseFromParent(); + continue; + } + if (IntrinsicInst *II = dyn_cast(I)) { + assert(II->getIntrinsicID() == Intrinsic::lifetime_start || + II->getIntrinsicID() == Intrinsic::lifetime_end); + II->eraseFromParent(); + continue; + } + + Insts.push_back(I); + } + AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts); + Insts.clear(); + } + + PromotableAllocas.clear(); + return true; +} + +namespace { + /// \brief A predicate to test whether an alloca belongs to a set. + class IsAllocaInSet { + typedef SmallPtrSet SetType; + const SetType &Set; + + public: + typedef AllocaInst *argument_type; + + IsAllocaInSet(const SetType &Set) : Set(Set) {} + bool operator()(AllocaInst *AI) const { return Set.count(AI); } + }; +} + +bool SROA::runOnFunction(Function &F) { + DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); + C = &F.getContext(); + TD = getAnalysisIfAvailable(); + if (!TD) { + DEBUG(dbgs() << " Skipping SROA -- no target data!\n"); + return false; + } + DT = getAnalysisIfAvailable(); + + BasicBlock &EntryBB = F.getEntryBlock(); + for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end()); + I != E; ++I) + if (AllocaInst *AI = dyn_cast(I)) + Worklist.insert(AI); + + bool Changed = false; + // A set of deleted alloca instruction pointers which should be removed from + // the list of promotable allocas. + SmallPtrSet DeletedAllocas; + + do { + while (!Worklist.empty()) { + Changed |= runOnAlloca(*Worklist.pop_back_val()); + deleteDeadInstructions(DeletedAllocas); + + // Remove the deleted allocas from various lists so that we don't try to + // continue processing them. + if (!DeletedAllocas.empty()) { + Worklist.remove_if(IsAllocaInSet(DeletedAllocas)); + PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas)); + PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(), + PromotableAllocas.end(), + IsAllocaInSet(DeletedAllocas)), + PromotableAllocas.end()); + DeletedAllocas.clear(); + } + } + + Changed |= promoteAllocas(F); + + Worklist = PostPromotionWorklist; + PostPromotionWorklist.clear(); + } while (!Worklist.empty()); + + return Changed; +} + +void SROA::getAnalysisUsage(AnalysisUsage &AU) const { + if (RequiresDomTree) + AU.addRequired(); + AU.setPreservesCFG(); +} diff --git a/lib/Transforms/Scalar/Scalar.cpp b/lib/Transforms/Scalar/Scalar.cpp index 48318c8..39630fd 100644 --- a/lib/Transforms/Scalar/Scalar.cpp +++ b/lib/Transforms/Scalar/Scalar.cpp @@ -19,7 +19,7 @@ #include "llvm/PassManager.h" #include "llvm/Analysis/Passes.h" #include "llvm/Analysis/Verifier.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Scalar.h" using namespace llvm; @@ -59,6 +59,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) { initializeRegToMemPass(Registry); initializeSCCPPass(Registry); initializeIPSCCPPass(Registry); + initializeSROAPass(Registry); initializeSROA_DTPass(Registry); initializeSROA_SSAUpPass(Registry); initializeCFGSimplifyPassPass(Registry); diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 6637126..a46d09c 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -46,7 +46,7 @@ #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" #include "llvm/Transforms/Utils/SSAUpdater.h" @@ -56,7 +56,6 @@ STATISTIC(NumReplaced, "Number of allocas broken up"); STATISTIC(NumPromoted, "Number of allocas promoted"); STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion"); STATISTIC(NumConverted, "Number of aggregates converted to scalar"); -STATISTIC(NumGlobals, "Number of allocas copied from constant global"); namespace { struct SROA : public FunctionPass { @@ -88,7 +87,7 @@ namespace { private: bool HasDomTree; - TargetData *TD; + DataLayout *TD; /// DeadInsts - Keep track of instructions we have made dead, so that /// we can remove them after we are done working. @@ -183,9 +182,6 @@ namespace { void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, SmallVector &NewElts); bool ShouldAttemptScalarRepl(AllocaInst *AI); - - static MemTransferInst *isOnlyCopiedFromConstantGlobal( - AllocaInst *AI, SmallVector &ToDelete); }; // SROA_DT - SROA that uses DominatorTree. @@ -262,7 +258,7 @@ namespace { class ConvertToScalarInfo { /// AllocaSize - The size of the alloca being considered in bytes. unsigned AllocaSize; - const TargetData &TD; + const DataLayout &TD; unsigned ScalarLoadThreshold; /// IsNotTrivial - This is set to true if there is some access to the object @@ -305,7 +301,7 @@ class ConvertToScalarInfo { bool HadDynamicAccess; public: - explicit ConvertToScalarInfo(unsigned Size, const TargetData &td, + explicit ConvertToScalarInfo(unsigned Size, const DataLayout &td, unsigned SLT) : AllocaSize(Size), TD(td), ScalarLoadThreshold(SLT), IsNotTrivial(false), ScalarKind(Unknown), VectorTy(0), HadNonMemTransferAccess(false), @@ -1024,11 +1020,11 @@ ConvertScalar_InsertValue(Value *SV, Value *Old, bool SROA::runOnFunction(Function &F) { - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); bool Changed = performPromotion(F); - // FIXME: ScalarRepl currently depends on TargetData more than it + // FIXME: ScalarRepl currently depends on DataLayout more than it // theoretically needs to. It should be refactored in order to support // target-independent IR. Until this is done, just skip the actual // scalar-replacement portion of this pass. @@ -1138,7 +1134,7 @@ public: /// /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. -static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) { +static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *TD) { bool TDerefable = SI->getTrueValue()->isDereferenceablePointer(); bool FDerefable = SI->getFalseValue()->isDereferenceablePointer(); @@ -1176,7 +1172,7 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) { /// /// We can do this to a select if its only uses are loads and if the operand to /// the select can be loaded unconditionally. -static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) { +static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *TD) { // For now, we can only do this promotion if the load is in the same block as // the PHI, and if there are no stores between the phi and load. // TODO: Allow recursive phi users. @@ -1240,7 +1236,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) { /// direct (non-volatile) loads and stores to it. If the alloca is close but /// not quite there, this will transform the code to allow promotion. As such, /// it is a non-pure predicate. -static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) { +static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *TD) { SetVector, SmallPtrSet > InstsToRewrite; @@ -1465,26 +1461,6 @@ bool SROA::ShouldAttemptScalarRepl(AllocaInst *AI) { return false; } -/// getPointeeAlignment - Compute the minimum alignment of the value pointed -/// to by the given pointer. -static unsigned getPointeeAlignment(Value *V, const TargetData &TD) { - if (ConstantExpr *CE = dyn_cast(V)) - if (CE->getOpcode() == Instruction::BitCast || - (CE->getOpcode() == Instruction::GetElementPtr && - cast(CE)->hasAllZeroIndices())) - return getPointeeAlignment(CE->getOperand(0), TD); - - if (GlobalVariable *GV = dyn_cast(V)) - if (!GV->isDeclaration()) - return TD.getPreferredAlignment(GV); - - if (PointerType *PT = dyn_cast(V->getType())) - return TD.getABITypeAlignment(PT->getElementType()); - - return 0; -} - - // performScalarRepl - This algorithm is a simple worklist driven algorithm, // which runs on all of the alloca instructions in the function, removing them // if they are only used by getelementptr instructions. @@ -1516,29 +1492,6 @@ bool SROA::performScalarRepl(Function &F) { if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized()) continue; - // Check to see if this allocation is only modified by a memcpy/memmove from - // a constant global whose alignment is equal to or exceeds that of the - // allocation. If this is the case, we can change all users to use - // the constant global instead. This is commonly produced by the CFE by - // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A' - // is only subsequently read. - SmallVector ToDelete; - if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(AI, ToDelete)) { - if (AI->getAlignment() <= getPointeeAlignment(Copy->getSource(), *TD)) { - DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n'); - DEBUG(dbgs() << " memcpy = " << *Copy << '\n'); - for (unsigned i = 0, e = ToDelete.size(); i != e; ++i) - ToDelete[i]->eraseFromParent(); - Constant *TheSrc = cast(Copy->getSource()); - AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType())); - Copy->eraseFromParent(); // Don't mutate the global. - AI->eraseFromParent(); - ++NumGlobals; - Changed = true; - continue; - } - } - // Check to see if we can perform the core SROA transformation. We cannot // transform the allocation instruction if it is an array allocation // (allocations OF arrays are ok though), and an allocation of a scalar @@ -2584,7 +2537,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI, /// HasPadding - Return true if the specified type has any structure or /// alignment padding in between the elements that would be split apart /// by SROA; return false otherwise. -static bool HasPadding(Type *Ty, const TargetData &TD) { +static bool HasPadding(Type *Ty, const DataLayout &TD) { if (ArrayType *ATy = dyn_cast(Ty)) { Ty = ATy->getElementType(); return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty); @@ -2656,134 +2609,3 @@ bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) { return true; } - - - -/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to -/// some part of a constant global variable. This intentionally only accepts -/// constant expressions because we don't can't rewrite arbitrary instructions. -static bool PointsToConstantGlobal(Value *V) { - if (GlobalVariable *GV = dyn_cast(V)) - return GV->isConstant(); - if (ConstantExpr *CE = dyn_cast(V)) - if (CE->getOpcode() == Instruction::BitCast || - CE->getOpcode() == Instruction::GetElementPtr) - return PointsToConstantGlobal(CE->getOperand(0)); - return false; -} - -/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived) -/// pointer to an alloca. Ignore any reads of the pointer, return false if we -/// see any stores or other unknown uses. If we see pointer arithmetic, keep -/// track of whether it moves the pointer (with isOffset) but otherwise traverse -/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to -/// the alloca, and if the source pointer is a pointer to a constant global, we -/// can optimize this. -static bool -isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy, - bool isOffset, - SmallVector &LifetimeMarkers) { - // We track lifetime intrinsics as we encounter them. If we decide to go - // ahead and replace the value with the global, this lets the caller quickly - // eliminate the markers. - - for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) { - User *U = cast(*UI); - - if (LoadInst *LI = dyn_cast(U)) { - // Ignore non-volatile loads, they are always ok. - if (!LI->isSimple()) return false; - continue; - } - - if (BitCastInst *BCI = dyn_cast(U)) { - // If uses of the bitcast are ok, we are ok. - if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset, - LifetimeMarkers)) - return false; - continue; - } - if (GetElementPtrInst *GEP = dyn_cast(U)) { - // If the GEP has all zero indices, it doesn't offset the pointer. If it - // doesn't, it does. - if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy, - isOffset || !GEP->hasAllZeroIndices(), - LifetimeMarkers)) - return false; - continue; - } - - if (CallSite CS = U) { - // If this is the function being called then we treat it like a load and - // ignore it. - if (CS.isCallee(UI)) - continue; - - // If this is a readonly/readnone call site, then we know it is just a - // load (but one that potentially returns the value itself), so we can - // ignore it if we know that the value isn't captured. - unsigned ArgNo = CS.getArgumentNo(UI); - if (CS.onlyReadsMemory() && - (CS.getInstruction()->use_empty() || CS.doesNotCapture(ArgNo))) - continue; - - // If this is being passed as a byval argument, the caller is making a - // copy, so it is only a read of the alloca. - if (CS.isByValArgument(ArgNo)) - continue; - } - - // Lifetime intrinsics can be handled by the caller. - if (IntrinsicInst *II = dyn_cast(U)) { - if (II->getIntrinsicID() == Intrinsic::lifetime_start || - II->getIntrinsicID() == Intrinsic::lifetime_end) { - assert(II->use_empty() && "Lifetime markers have no result to use!"); - LifetimeMarkers.push_back(II); - continue; - } - } - - // If this is isn't our memcpy/memmove, reject it as something we can't - // handle. - MemTransferInst *MI = dyn_cast(U); - if (MI == 0) - return false; - - // If the transfer is using the alloca as a source of the transfer, then - // ignore it since it is a load (unless the transfer is volatile). - if (UI.getOperandNo() == 1) { - if (MI->isVolatile()) return false; - continue; - } - - // If we already have seen a copy, reject the second one. - if (TheCopy) return false; - - // If the pointer has been offset from the start of the alloca, we can't - // safely handle this. - if (isOffset) return false; - - // If the memintrinsic isn't using the alloca as the dest, reject it. - if (UI.getOperandNo() != 0) return false; - - // If the source of the memcpy/move is not a constant global, reject it. - if (!PointsToConstantGlobal(MI->getSource())) - return false; - - // Otherwise, the transform is safe. Remember the copy instruction. - TheCopy = MI; - } - return true; -} - -/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only -/// modified by a copy from a constant global. If we can prove this, we can -/// replace any uses of the alloca with uses of the global directly. -MemTransferInst * -SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI, - SmallVector &ToDelete) { - MemTransferInst *TheCopy = 0; - if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false, ToDelete)) - return TheCopy; - return 0; -} diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp index d13e4ab..9f24bb6 100644 --- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -31,10 +31,11 @@ #include "llvm/Attributes.h" #include "llvm/Support/CFG.h" #include "llvm/Pass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/Statistic.h" +#include "llvm/TargetTransformInfo.h" using namespace llvm; STATISTIC(NumSimpl, "Number of blocks simplified"); @@ -59,9 +60,9 @@ FunctionPass *llvm::createCFGSimplificationPass() { return new CFGSimplifyPass(); } -/// ChangeToUnreachable - Insert an unreachable instruction before the specified +/// changeToUnreachable - Insert an unreachable instruction before the specified /// instruction, making it and the rest of the code in the block dead. -static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) { +static void changeToUnreachable(Instruction *I, bool UseLLVMTrap) { BasicBlock *BB = I->getParent(); // Loop over all of the successors, removing BB's entry from any PHI // nodes. @@ -87,8 +88,8 @@ static void ChangeToUnreachable(Instruction *I, bool UseLLVMTrap) { } } -/// ChangeToCall - Convert the specified invoke into a normal call. -static void ChangeToCall(InvokeInst *II) { +/// changeToCall - Convert the specified invoke into a normal call. +static void changeToCall(InvokeInst *II) { SmallVector Args(II->op_begin(), II->op_end() - 3); CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, "", II); NewCall->takeName(II); @@ -105,7 +106,7 @@ static void ChangeToCall(InvokeInst *II) { II->eraseFromParent(); } -static bool MarkAliveBlocks(BasicBlock *BB, +static bool markAliveBlocks(BasicBlock *BB, SmallPtrSet &Reachable) { SmallVector Worklist; @@ -129,7 +130,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, ++BBI; if (!isa(BBI)) { // Don't insert a call to llvm.trap right before the unreachable. - ChangeToUnreachable(BBI, false); + changeToUnreachable(BBI, false); Changed = true; } break; @@ -148,7 +149,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, if (isa(Ptr) || (isa(Ptr) && SI->getPointerAddressSpace() == 0)) { - ChangeToUnreachable(SI, true); + changeToUnreachable(SI, true); Changed = true; break; } @@ -159,7 +160,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, if (InvokeInst *II = dyn_cast(BB->getTerminator())) { Value *Callee = II->getCalledValue(); if (isa(Callee) || isa(Callee)) { - ChangeToUnreachable(II, true); + changeToUnreachable(II, true); Changed = true; } else if (II->doesNotThrow()) { if (II->use_empty() && II->onlyReadsMemory()) { @@ -168,7 +169,7 @@ static bool MarkAliveBlocks(BasicBlock *BB, II->getUnwindDest()->removePredecessor(II->getParent()); II->eraseFromParent(); } else - ChangeToCall(II); + changeToCall(II); Changed = true; } } @@ -180,12 +181,12 @@ static bool MarkAliveBlocks(BasicBlock *BB, return Changed; } -/// RemoveUnreachableBlocksFromFn - Remove blocks that are not reachable, even +/// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even /// if they are in a dead cycle. Return true if a change was made, false /// otherwise. -static bool RemoveUnreachableBlocksFromFn(Function &F) { +static bool removeUnreachableBlocksFromFn(Function &F) { SmallPtrSet Reachable; - bool Changed = MarkAliveBlocks(F.begin(), Reachable); + bool Changed = markAliveBlocks(F.begin(), Reachable); // If there are unreachable blocks in the CFG... if (Reachable.size() == F.size()) @@ -215,9 +216,9 @@ static bool RemoveUnreachableBlocksFromFn(Function &F) { return true; } -/// MergeEmptyReturnBlocks - If we have more than one empty (other than phi +/// mergeEmptyReturnBlocks - If we have more than one empty (other than phi /// node) return blocks, merge them together to promote recursive block merging. -static bool MergeEmptyReturnBlocks(Function &F) { +static bool mergeEmptyReturnBlocks(Function &F) { bool Changed = false; BasicBlock *RetBlock = 0; @@ -291,9 +292,10 @@ static bool MergeEmptyReturnBlocks(Function &F) { return Changed; } -/// IterativeSimplifyCFG - Call SimplifyCFG on all the blocks in the function, +/// iterativelySimplifyCFG - Call SimplifyCFG on all the blocks in the function, /// iterating until no more changes are made. -static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) { +static bool iterativelySimplifyCFG(Function &F, const DataLayout *TD, + const TargetTransformInfo *TTI) { bool Changed = false; bool LocalChange = true; while (LocalChange) { @@ -302,7 +304,7 @@ static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) { // Loop over all of the basic blocks and remove them if they are unneeded... // for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) { - if (SimplifyCFG(BBIt++, TD)) { + if (SimplifyCFG(BBIt++, TD, TTI)) { LocalChange = true; ++NumSimpl; } @@ -316,25 +318,27 @@ static bool IterativeSimplifyCFG(Function &F, const TargetData *TD) { // simplify the CFG. // bool CFGSimplifyPass::runOnFunction(Function &F) { - const TargetData *TD = getAnalysisIfAvailable(); - bool EverChanged = RemoveUnreachableBlocksFromFn(F); - EverChanged |= MergeEmptyReturnBlocks(F); - EverChanged |= IterativeSimplifyCFG(F, TD); + const DataLayout *TD = getAnalysisIfAvailable(); + const TargetTransformInfo *TTI = + getAnalysisIfAvailable(); + bool EverChanged = removeUnreachableBlocksFromFn(F); + EverChanged |= mergeEmptyReturnBlocks(F); + EverChanged |= iterativelySimplifyCFG(F, TD, TTI); // If neither pass changed anything, we're done. if (!EverChanged) return false; - // IterativeSimplifyCFG can (rarely) make some loops dead. If this happens, - // RemoveUnreachableBlocksFromFn is needed to nuke them, which means we should + // iterativelySimplifyCFG can (rarely) make some loops dead. If this happens, + // removeUnreachableBlocksFromFn is needed to nuke them, which means we should // iterate between the two optimizations. We structure the code like this to - // avoid reruning IterativeSimplifyCFG if the second pass of - // RemoveUnreachableBlocksFromFn doesn't do anything. - if (!RemoveUnreachableBlocksFromFn(F)) + // avoid reruning iterativelySimplifyCFG if the second pass of + // removeUnreachableBlocksFromFn doesn't do anything. + if (!removeUnreachableBlocksFromFn(F)) return true; do { - EverChanged = IterativeSimplifyCFG(F, TD); - EverChanged |= RemoveUnreachableBlocksFromFn(F); + EverChanged = iterativelySimplifyCFG(F, TD, TTI); + EverChanged |= removeUnreachableBlocksFromFn(F); } while (EverChanged); return true; diff --git a/lib/Transforms/Scalar/SimplifyLibCalls.cpp b/lib/Transforms/Scalar/SimplifyLibCalls.cpp index f110320..17d07cd 100644 --- a/lib/Transforms/Scalar/SimplifyLibCalls.cpp +++ b/lib/Transforms/Scalar/SimplifyLibCalls.cpp @@ -28,9 +28,10 @@ #include "llvm/ADT/Statistic.h" #include "llvm/ADT/StringMap.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Config/config.h" // FIXME: Shouldn't depend on host! using namespace llvm; @@ -38,6 +39,10 @@ using namespace llvm; STATISTIC(NumSimplified, "Number of library calls simplified"); STATISTIC(NumAnnotated, "Number of attributes added to library functions"); +static cl::opt UnsafeFPShrink("enable-double-float-shrink", cl::Hidden, + cl::init(false), + cl::desc("Enable unsafe double to float " + "shrinking for math lib calls")); //===----------------------------------------------------------------------===// // Optimizer Base Class //===----------------------------------------------------------------------===// @@ -48,7 +53,7 @@ namespace { class LibCallOptimization { protected: Function *Caller; - const TargetData *TD; + const DataLayout *TD; const TargetLibraryInfo *TLI; LLVMContext* Context; public: @@ -63,7 +68,7 @@ public: virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) =0; - Value *OptimizeCall(CallInst *CI, const TargetData *TD, + Value *OptimizeCall(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI, IRBuilder<> &B) { Caller = CI->getParent()->getParent(); this->TD = TD; @@ -85,22 +90,6 @@ public: // Helper Functions //===----------------------------------------------------------------------===// -/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the -/// value is equal or not-equal to zero. -static bool IsOnlyUsedInZeroEqualityComparison(Value *V) { - for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); - UI != E; ++UI) { - if (ICmpInst *IC = dyn_cast(*UI)) - if (IC->isEquality()) - if (Constant *C = dyn_cast(IC->getOperand(1))) - if (C->isNullValue()) - continue; - // Unknown instruction. - return false; - } - return true; -} - static bool CallHasFloatingPointArgument(const CallInst *CI) { for (CallInst::const_op_iterator it = CI->op_begin(), e = CI->op_end(); it != e; ++it) { @@ -110,799 +99,62 @@ static bool CallHasFloatingPointArgument(const CallInst *CI) { return false; } -/// IsOnlyUsedInEqualityComparison - Return true if it is only used in equality -/// comparisons with With. -static bool IsOnlyUsedInEqualityComparison(Value *V, Value *With) { - for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); - UI != E; ++UI) { - if (ICmpInst *IC = dyn_cast(*UI)) - if (IC->isEquality() && IC->getOperand(1) == With) - continue; - // Unknown instruction. - return false; - } - return true; -} - +namespace { //===----------------------------------------------------------------------===// -// String and Memory LibCall Optimizations +// Math Library Optimizations //===----------------------------------------------------------------------===// //===---------------------------------------===// -// 'strcat' Optimizations -namespace { -struct StrCatOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strcat" function prototype. - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - FT->getReturnType() != B.getInt8PtrTy() || - FT->getParamType(0) != FT->getReturnType() || - FT->getParamType(1) != FT->getReturnType()) - return 0; - - // Extract some information from the instruction - Value *Dst = CI->getArgOperand(0); - Value *Src = CI->getArgOperand(1); - - // See if we can get the length of the input string. - uint64_t Len = GetStringLength(Src); - if (Len == 0) return 0; - --Len; // Unbias length. - - // Handle the simple, do-nothing case: strcat(x, "") -> x - if (Len == 0) - return Dst; - - // These optimizations require TargetData. - if (!TD) return 0; - - return EmitStrLenMemCpy(Src, Dst, Len, B); - } - - Value *EmitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B) { - // We need to find the end of the destination string. That's where the - // memory is to be moved to. We just generate a call to strlen. - Value *DstLen = EmitStrLen(Dst, B, TD, TLI); - if (!DstLen) - return 0; - - // Now that we have the destination's length, we must index into the - // destination's pointer to get the actual memcpy destination (end of - // the string .. we're concatenating). - Value *CpyDst = B.CreateGEP(Dst, DstLen, "endptr"); - - // We have enough information to now generate the memcpy call to do the - // concatenation for us. Make a memcpy to copy the nul byte with align = 1. - B.CreateMemCpy(CpyDst, Src, - ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1); - return Dst; - } -}; - -//===---------------------------------------===// -// 'strncat' Optimizations - -struct StrNCatOpt : public StrCatOpt { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strncat" function prototype. - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || - FT->getReturnType() != B.getInt8PtrTy() || - FT->getParamType(0) != FT->getReturnType() || - FT->getParamType(1) != FT->getReturnType() || - !FT->getParamType(2)->isIntegerTy()) - return 0; - - // Extract some information from the instruction - Value *Dst = CI->getArgOperand(0); - Value *Src = CI->getArgOperand(1); - uint64_t Len; - - // We don't do anything if length is not constant - if (ConstantInt *LengthArg = dyn_cast(CI->getArgOperand(2))) - Len = LengthArg->getZExtValue(); - else - return 0; - - // See if we can get the length of the input string. - uint64_t SrcLen = GetStringLength(Src); - if (SrcLen == 0) return 0; - --SrcLen; // Unbias length. - - // Handle the simple, do-nothing cases: - // strncat(x, "", c) -> x - // strncat(x, c, 0) -> x - if (SrcLen == 0 || Len == 0) return Dst; - - // These optimizations require TargetData. - if (!TD) return 0; - - // We don't optimize this case - if (Len < SrcLen) return 0; - - // strncat(x, s, c) -> strcat(x, s) - // s is constant so the strcat can be optimized further - return EmitStrLenMemCpy(Src, Dst, SrcLen, B); - } -}; - -//===---------------------------------------===// -// 'strchr' Optimizations - -struct StrChrOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strchr" function prototype. - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - FT->getReturnType() != B.getInt8PtrTy() || - FT->getParamType(0) != FT->getReturnType() || - !FT->getParamType(1)->isIntegerTy(32)) - return 0; - - Value *SrcStr = CI->getArgOperand(0); - - // If the second operand is non-constant, see if we can compute the length - // of the input string and turn this into memchr. - ConstantInt *CharC = dyn_cast(CI->getArgOperand(1)); - if (CharC == 0) { - // These optimizations require TargetData. - if (!TD) return 0; - - uint64_t Len = GetStringLength(SrcStr); - if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32. - return 0; - - return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul. - ConstantInt::get(TD->getIntPtrType(*Context), Len), - B, TD, TLI); - } - - // Otherwise, the character is a constant, see if the first argument is - // a string literal. If so, we can constant fold. - StringRef Str; - if (!getConstantStringInfo(SrcStr, Str)) - return 0; - - // Compute the offset, make sure to handle the case when we're searching for - // zero (a weird way to spell strlen). - size_t I = CharC->getSExtValue() == 0 ? - Str.size() : Str.find(CharC->getSExtValue()); - if (I == StringRef::npos) // Didn't find the char. strchr returns null. - return Constant::getNullValue(CI->getType()); - - // strchr(s+n,c) -> gep(s+n+i,c) - return B.CreateGEP(SrcStr, B.getInt64(I), "strchr"); - } -}; - -//===---------------------------------------===// -// 'strrchr' Optimizations - -struct StrRChrOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strrchr" function prototype. - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - FT->getReturnType() != B.getInt8PtrTy() || - FT->getParamType(0) != FT->getReturnType() || - !FT->getParamType(1)->isIntegerTy(32)) - return 0; - - Value *SrcStr = CI->getArgOperand(0); - ConstantInt *CharC = dyn_cast(CI->getArgOperand(1)); - - // Cannot fold anything if we're not looking for a constant. - if (!CharC) - return 0; - - StringRef Str; - if (!getConstantStringInfo(SrcStr, Str)) { - // strrchr(s, 0) -> strchr(s, 0) - if (TD && CharC->isZero()) - return EmitStrChr(SrcStr, '\0', B, TD, TLI); - return 0; - } - - // Compute the offset. - size_t I = CharC->getSExtValue() == 0 ? - Str.size() : Str.rfind(CharC->getSExtValue()); - if (I == StringRef::npos) // Didn't find the char. Return null. - return Constant::getNullValue(CI->getType()); - - // strrchr(s+n,c) -> gep(s+n+i,c) - return B.CreateGEP(SrcStr, B.getInt64(I), "strrchr"); - } -}; - -//===---------------------------------------===// -// 'strcmp' Optimizations - -struct StrCmpOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strcmp" function prototype. - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - !FT->getReturnType()->isIntegerTy(32) || - FT->getParamType(0) != FT->getParamType(1) || - FT->getParamType(0) != B.getInt8PtrTy()) - return 0; - - Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1); - if (Str1P == Str2P) // strcmp(x,x) -> 0 - return ConstantInt::get(CI->getType(), 0); - - StringRef Str1, Str2; - bool HasStr1 = getConstantStringInfo(Str1P, Str1); - bool HasStr2 = getConstantStringInfo(Str2P, Str2); - - // strcmp(x, y) -> cnst (if both x and y are constant strings) - if (HasStr1 && HasStr2) - return ConstantInt::get(CI->getType(), Str1.compare(Str2)); - - if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x - return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), - CI->getType())); - - if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x - return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType()); - - // strcmp(P, "x") -> memcmp(P, "x", 2) - uint64_t Len1 = GetStringLength(Str1P); - uint64_t Len2 = GetStringLength(Str2P); - if (Len1 && Len2) { - // These optimizations require TargetData. - if (!TD) return 0; - - return EmitMemCmp(Str1P, Str2P, - ConstantInt::get(TD->getIntPtrType(*Context), - std::min(Len1, Len2)), B, TD, TLI); - } - - return 0; - } -}; - -//===---------------------------------------===// -// 'strncmp' Optimizations - -struct StrNCmpOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strncmp" function prototype. - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || - !FT->getReturnType()->isIntegerTy(32) || - FT->getParamType(0) != FT->getParamType(1) || - FT->getParamType(0) != B.getInt8PtrTy() || - !FT->getParamType(2)->isIntegerTy()) - return 0; - - Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1); - if (Str1P == Str2P) // strncmp(x,x,n) -> 0 - return ConstantInt::get(CI->getType(), 0); - - // Get the length argument if it is constant. - uint64_t Length; - if (ConstantInt *LengthArg = dyn_cast(CI->getArgOperand(2))) - Length = LengthArg->getZExtValue(); - else - return 0; - - if (Length == 0) // strncmp(x,y,0) -> 0 - return ConstantInt::get(CI->getType(), 0); - - if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1) - return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI); - - StringRef Str1, Str2; - bool HasStr1 = getConstantStringInfo(Str1P, Str1); - bool HasStr2 = getConstantStringInfo(Str2P, Str2); - - // strncmp(x, y) -> cnst (if both x and y are constant strings) - if (HasStr1 && HasStr2) { - StringRef SubStr1 = Str1.substr(0, Length); - StringRef SubStr2 = Str2.substr(0, Length); - return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2)); - } - - if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x - return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), - CI->getType())); - - if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x - return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType()); - - return 0; - } -}; - - -//===---------------------------------------===// -// 'strcpy' Optimizations - -struct StrCpyOpt : public LibCallOptimization { - bool OptChkCall; // True if it's optimizing a __strcpy_chk libcall. - - StrCpyOpt(bool c) : OptChkCall(c) {} - - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "strcpy" function prototype. - unsigned NumParams = OptChkCall ? 3 : 2; - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != NumParams || - FT->getReturnType() != FT->getParamType(0) || - FT->getParamType(0) != FT->getParamType(1) || - FT->getParamType(0) != B.getInt8PtrTy()) - return 0; - - Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); - if (Dst == Src) // strcpy(x,x) -> x - return Src; - - // These optimizations require TargetData. - if (!TD) return 0; - - // See if we can get the length of the input string. - uint64_t Len = GetStringLength(Src); - if (Len == 0) return 0; - - // We have enough information to now generate the memcpy call to do the - // concatenation for us. Make a memcpy to copy the nul byte with align = 1. - if (!OptChkCall || - !EmitMemCpyChk(Dst, Src, - ConstantInt::get(TD->getIntPtrType(*Context), Len), - CI->getArgOperand(2), B, TD, TLI)) - B.CreateMemCpy(Dst, Src, - ConstantInt::get(TD->getIntPtrType(*Context), Len), 1); - return Dst; - } -}; - -//===---------------------------------------===// -// 'stpcpy' Optimizations - -struct StpCpyOpt: public LibCallOptimization { - bool OptChkCall; // True if it's optimizing a __stpcpy_chk libcall. - - StpCpyOpt(bool c) : OptChkCall(c) {} - - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // Verify the "stpcpy" function prototype. - unsigned NumParams = OptChkCall ? 3 : 2; - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != NumParams || - FT->getReturnType() != FT->getParamType(0) || - FT->getParamType(0) != FT->getParamType(1) || - FT->getParamType(0) != B.getInt8PtrTy()) - return 0; - - // These optimizations require TargetData. - if (!TD) return 0; - - Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); - if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x) - Value *StrLen = EmitStrLen(Src, B, TD, TLI); - return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0; - } - - // See if we can get the length of the input string. - uint64_t Len = GetStringLength(Src); - if (Len == 0) return 0; - - Value *LenV = ConstantInt::get(TD->getIntPtrType(*Context), Len); - Value *DstEnd = B.CreateGEP(Dst, - ConstantInt::get(TD->getIntPtrType(*Context), - Len - 1)); - - // We have enough information to now generate the memcpy call to do the - // copy for us. Make a memcpy to copy the nul byte with align = 1. - if (!OptChkCall || !EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, - TD, TLI)) - B.CreateMemCpy(Dst, Src, LenV, 1); - return DstEnd; - } -}; - -//===---------------------------------------===// -// 'strncpy' Optimizations - -struct StrNCpyOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || - FT->getParamType(0) != FT->getParamType(1) || - FT->getParamType(0) != B.getInt8PtrTy() || - !FT->getParamType(2)->isIntegerTy()) - return 0; - - Value *Dst = CI->getArgOperand(0); - Value *Src = CI->getArgOperand(1); - Value *LenOp = CI->getArgOperand(2); - - // See if we can get the length of the input string. - uint64_t SrcLen = GetStringLength(Src); - if (SrcLen == 0) return 0; - --SrcLen; - - if (SrcLen == 0) { - // strncpy(x, "", y) -> memset(x, '\0', y, 1) - B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1); - return Dst; - } - - uint64_t Len; - if (ConstantInt *LengthArg = dyn_cast(LenOp)) - Len = LengthArg->getZExtValue(); - else - return 0; - - if (Len == 0) return Dst; // strncpy(x, y, 0) -> x - - // These optimizations require TargetData. - if (!TD) return 0; - - // Let strncpy handle the zero padding - if (Len > SrcLen+1) return 0; - - // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant] - B.CreateMemCpy(Dst, Src, - ConstantInt::get(TD->getIntPtrType(*Context), Len), 1); - - return Dst; - } -}; - -//===---------------------------------------===// -// 'strlen' Optimizations - -struct StrLenOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 1 || - FT->getParamType(0) != B.getInt8PtrTy() || - !FT->getReturnType()->isIntegerTy()) - return 0; - - Value *Src = CI->getArgOperand(0); - - // Constant folding: strlen("xyz") -> 3 - if (uint64_t Len = GetStringLength(Src)) - return ConstantInt::get(CI->getType(), Len-1); - - // strlen(x) != 0 --> *x != 0 - // strlen(x) == 0 --> *x == 0 - if (IsOnlyUsedInZeroEqualityComparison(CI)) - return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType()); - return 0; - } -}; - - -//===---------------------------------------===// -// 'strpbrk' Optimizations - -struct StrPBrkOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - FT->getParamType(0) != B.getInt8PtrTy() || - FT->getParamType(1) != FT->getParamType(0) || - FT->getReturnType() != FT->getParamType(0)) - return 0; - - StringRef S1, S2; - bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1); - bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2); - - // strpbrk(s, "") -> NULL - // strpbrk("", s) -> NULL - if ((HasS1 && S1.empty()) || (HasS2 && S2.empty())) - return Constant::getNullValue(CI->getType()); - - // Constant folding. - if (HasS1 && HasS2) { - size_t I = S1.find_first_of(S2); - if (I == std::string::npos) // No match. - return Constant::getNullValue(CI->getType()); - - return B.CreateGEP(CI->getArgOperand(0), B.getInt64(I), "strpbrk"); - } - - // strpbrk(s, "a") -> strchr(s, 'a') - if (TD && HasS2 && S2.size() == 1) - return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI); - - return 0; - } -}; - -//===---------------------------------------===// -// 'strto*' Optimizations. This handles strtol, strtod, strtof, strtoul, etc. - -struct StrToOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) || - !FT->getParamType(0)->isPointerTy() || - !FT->getParamType(1)->isPointerTy()) - return 0; - - Value *EndPtr = CI->getArgOperand(1); - if (isa(EndPtr)) { - // With a null EndPtr, this function won't capture the main argument. - // It would be readonly too, except that it still may write to errno. - CI->addAttribute(1, Attribute::NoCapture); - } - - return 0; - } -}; - -//===---------------------------------------===// -// 'strspn' Optimizations - -struct StrSpnOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - FT->getParamType(0) != B.getInt8PtrTy() || - FT->getParamType(1) != FT->getParamType(0) || - !FT->getReturnType()->isIntegerTy()) - return 0; - - StringRef S1, S2; - bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1); - bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2); - - // strspn(s, "") -> 0 - // strspn("", s) -> 0 - if ((HasS1 && S1.empty()) || (HasS2 && S2.empty())) - return Constant::getNullValue(CI->getType()); - - // Constant folding. - if (HasS1 && HasS2) { - size_t Pos = S1.find_first_not_of(S2); - if (Pos == StringRef::npos) Pos = S1.size(); - return ConstantInt::get(CI->getType(), Pos); - } - - return 0; - } -}; - -//===---------------------------------------===// -// 'strcspn' Optimizations - -struct StrCSpnOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - FT->getParamType(0) != B.getInt8PtrTy() || - FT->getParamType(1) != FT->getParamType(0) || - !FT->getReturnType()->isIntegerTy()) - return 0; - - StringRef S1, S2; - bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1); - bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2); - - // strcspn("", s) -> 0 - if (HasS1 && S1.empty()) - return Constant::getNullValue(CI->getType()); - - // Constant folding. - if (HasS1 && HasS2) { - size_t Pos = S1.find_first_of(S2); - if (Pos == StringRef::npos) Pos = S1.size(); - return ConstantInt::get(CI->getType(), Pos); - } - - // strcspn(s, "") -> strlen(s) - if (TD && HasS2 && S2.empty()) - return EmitStrLen(CI->getArgOperand(0), B, TD, TLI); - - return 0; - } -}; - -//===---------------------------------------===// -// 'strstr' Optimizations +// Double -> Float Shrinking Optimizations for Unary Functions like 'floor' -struct StrStrOpt : public LibCallOptimization { +struct UnaryDoubleFPOpt : public LibCallOptimization { + bool CheckRetType; + UnaryDoubleFPOpt(bool CheckReturnType): CheckRetType(CheckReturnType) {} virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 2 || - !FT->getParamType(0)->isPointerTy() || - !FT->getParamType(1)->isPointerTy() || - !FT->getReturnType()->isPointerTy()) + if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() || + !FT->getParamType(0)->isDoubleTy()) return 0; - // fold strstr(x, x) -> x. - if (CI->getArgOperand(0) == CI->getArgOperand(1)) - return B.CreateBitCast(CI->getArgOperand(0), CI->getType()); - - // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0 - if (TD && IsOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) { - Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI); - if (!StrLen) - return 0; - Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1), - StrLen, B, TD, TLI); - if (!StrNCmp) - return 0; - for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end(); - UI != UE; ) { - ICmpInst *Old = cast(*UI++); - Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp, - ConstantInt::getNullValue(StrNCmp->getType()), - "cmp"); - Old->replaceAllUsesWith(Cmp); - Old->eraseFromParent(); + if (CheckRetType) { + // Check if all the uses for function like 'sin' are converted to float. + for (Value::use_iterator UseI = CI->use_begin(); UseI != CI->use_end(); + ++UseI) { + FPTruncInst *Cast = dyn_cast(*UseI); + if (Cast == 0 || !Cast->getType()->isFloatTy()) + return 0; } - return CI; - } - - // See if either input string is a constant string. - StringRef SearchStr, ToFindStr; - bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr); - bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr); - - // fold strstr(x, "") -> x. - if (HasStr2 && ToFindStr.empty()) - return B.CreateBitCast(CI->getArgOperand(0), CI->getType()); - - // If both strings are known, constant fold it. - if (HasStr1 && HasStr2) { - std::string::size_type Offset = SearchStr.find(ToFindStr); - - if (Offset == StringRef::npos) // strstr("foo", "bar") -> null - return Constant::getNullValue(CI->getType()); - - // strstr("abcd", "bc") -> gep((char*)"abcd", 1) - Value *Result = CastToCStr(CI->getArgOperand(0), B); - Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr"); - return B.CreateBitCast(Result, CI->getType()); - } - - // fold strstr(x, "y") -> strchr(x, 'y'). - if (HasStr2 && ToFindStr.size() == 1) { - Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI); - return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0; } - return 0; - } -}; - - -//===---------------------------------------===// -// 'memcmp' Optimizations - -struct MemCmpOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() || - !FT->getParamType(1)->isPointerTy() || - !FT->getReturnType()->isIntegerTy(32)) - return 0; - Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1); - - if (LHS == RHS) // memcmp(s,s,x) -> 0 - return Constant::getNullValue(CI->getType()); - - // Make sure we have a constant length. - ConstantInt *LenC = dyn_cast(CI->getArgOperand(2)); - if (!LenC) return 0; - uint64_t Len = LenC->getZExtValue(); - - if (Len == 0) // memcmp(s1,s2,0) -> 0 - return Constant::getNullValue(CI->getType()); - - // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS - if (Len == 1) { - Value *LHSV = B.CreateZExt(B.CreateLoad(CastToCStr(LHS, B), "lhsc"), - CI->getType(), "lhsv"); - Value *RHSV = B.CreateZExt(B.CreateLoad(CastToCStr(RHS, B), "rhsc"), - CI->getType(), "rhsv"); - return B.CreateSub(LHSV, RHSV, "chardiff"); - } - - // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant) - StringRef LHSStr, RHSStr; - if (getConstantStringInfo(LHS, LHSStr) && - getConstantStringInfo(RHS, RHSStr)) { - // Make sure we're not reading out-of-bounds memory. - if (Len > LHSStr.size() || Len > RHSStr.size()) - return 0; - uint64_t Ret = memcmp(LHSStr.data(), RHSStr.data(), Len); - return ConstantInt::get(CI->getType(), Ret); - } - - return 0; - } -}; - -//===---------------------------------------===// -// 'memcpy' Optimizations - -struct MemCpyOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // These optimizations require TargetData. - if (!TD) return 0; - - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || - !FT->getParamType(0)->isPointerTy() || - !FT->getParamType(1)->isPointerTy() || - FT->getParamType(2) != TD->getIntPtrType(*Context)) - return 0; - - // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1) - B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), - CI->getArgOperand(2), 1); - return CI->getArgOperand(0); - } -}; - -//===---------------------------------------===// -// 'memmove' Optimizations - -struct MemMoveOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // These optimizations require TargetData. - if (!TD) return 0; - - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || - !FT->getParamType(0)->isPointerTy() || - !FT->getParamType(1)->isPointerTy() || - FT->getParamType(2) != TD->getIntPtrType(*Context)) - return 0; - - // memmove(x, y, n) -> llvm.memmove(x, y, n, 1) - B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1), - CI->getArgOperand(2), 1); - return CI->getArgOperand(0); - } -}; - -//===---------------------------------------===// -// 'memset' Optimizations - -struct MemSetOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // These optimizations require TargetData. - if (!TD) return 0; - - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || - !FT->getParamType(0)->isPointerTy() || - !FT->getParamType(1)->isIntegerTy() || - FT->getParamType(2) != TD->getIntPtrType(*Context)) + // If this is something like 'floor((double)floatval)', convert to floorf. + FPExtInst *Cast = dyn_cast(CI->getArgOperand(0)); + if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy()) return 0; - // memset(p, v, n) -> llvm.memset(p, v, n, 1) - Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false); - B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1); - return CI->getArgOperand(0); + // floor((double)floatval) -> (double)floorf(floatval) + Value *V = Cast->getOperand(0); + V = EmitUnaryFloatFnCall(V, Callee->getName(), B, Callee->getAttributes()); + return B.CreateFPExt(V, B.getDoubleTy()); } }; -//===----------------------------------------------------------------------===// -// Math Library Optimizations -//===----------------------------------------------------------------------===// - //===---------------------------------------===// // 'cos*' Optimizations - struct CosOpt : public LibCallOptimization { virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + Value *Ret = NULL; + if (UnsafeFPShrink && Callee->getName() == "cos" && + TLI->has(LibFunc::cosf)) { + UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true); + Ret = UnsafeUnaryDoubleFP.CallOptimizer(Callee, CI, B); + } + FunctionType *FT = Callee->getFunctionType(); // Just make sure this has 1 argument of FP type, which matches the // result type. if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isFloatingPointTy()) - return 0; + return Ret; // cos(-x) -> cos(x) Value *Op1 = CI->getArgOperand(0); @@ -910,7 +162,7 @@ struct CosOpt : public LibCallOptimization { BinaryOperator *BinExpr = cast(Op1); return B.CreateCall(Callee, BinExpr->getOperand(1), "cos"); } - return 0; + return Ret; } }; @@ -919,13 +171,20 @@ struct CosOpt : public LibCallOptimization { struct PowOpt : public LibCallOptimization { virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + Value *Ret = NULL; + if (UnsafeFPShrink && Callee->getName() == "pow" && + TLI->has(LibFunc::powf)) { + UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true); + Ret = UnsafeUnaryDoubleFP.CallOptimizer(Callee, CI, B); + } + FunctionType *FT = Callee->getFunctionType(); // Just make sure this has 2 arguments of the same FP type, which match the // result type. if (FT->getNumParams() != 2 || FT->getReturnType() != FT->getParamType(0) || FT->getParamType(0) != FT->getParamType(1) || !FT->getParamType(0)->isFloatingPointTy()) - return 0; + return Ret; Value *Op1 = CI->getArgOperand(0), *Op2 = CI->getArgOperand(1); if (ConstantFP *Op1C = dyn_cast(Op1)) { @@ -936,7 +195,7 @@ struct PowOpt : public LibCallOptimization { } ConstantFP *Op2C = dyn_cast(Op2); - if (Op2C == 0) return 0; + if (Op2C == 0) return Ret; if (Op2C->getValueAPF().isZero()) // pow(x, 0.0) -> 1.0 return ConstantFP::get(CI->getType(), 1.0); @@ -974,12 +233,19 @@ struct PowOpt : public LibCallOptimization { struct Exp2Opt : public LibCallOptimization { virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + Value *Ret = NULL; + if (UnsafeFPShrink && Callee->getName() == "exp2" && + TLI->has(LibFunc::exp2)) { + UnaryDoubleFPOpt UnsafeUnaryDoubleFP(true); + Ret = UnsafeUnaryDoubleFP.CallOptimizer(Callee, CI, B); + } + FunctionType *FT = Callee->getFunctionType(); // Just make sure this has 1 argument of FP type, which matches the // result type. if (FT->getNumParams() != 1 || FT->getReturnType() != FT->getParamType(0) || !FT->getParamType(0)->isFloatingPointTy()) - return 0; + return Ret; Value *Op = CI->getArgOperand(0); // Turn exp2(sitofp(x)) -> ldexp(1.0, sext(x)) if sizeof(x) <= 32 @@ -1016,29 +282,7 @@ struct Exp2Opt : public LibCallOptimization { return CI; } - return 0; - } -}; - -//===---------------------------------------===// -// Double -> Float Shrinking Optimizations for Unary Functions like 'floor' - -struct UnaryDoubleFPOpt : public LibCallOptimization { - virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - FunctionType *FT = Callee->getFunctionType(); - if (FT->getNumParams() != 1 || !FT->getReturnType()->isDoubleTy() || - !FT->getParamType(0)->isDoubleTy()) - return 0; - - // If this is something like 'floor((double)floatval)', convert to floorf. - FPExtInst *Cast = dyn_cast(CI->getArgOperand(0)); - if (Cast == 0 || !Cast->getOperand(0)->getType()->isFloatTy()) - return 0; - - // floor((double)floatval) -> (double)floorf(floatval) - Value *V = Cast->getOperand(0); - V = EmitUnaryFloatFnCall(V, Callee->getName(), B, Callee->getAttributes()); - return B.CreateFPExt(V, B.getDoubleTy()); + return Ret; } }; @@ -1063,8 +307,8 @@ struct FFSOpt : public LibCallOptimization { // Constant fold. if (ConstantInt *CI = dyn_cast(Op)) { - if (CI->getValue() == 0) // ffs(0) -> 0. - return Constant::getNullValue(CI->getType()); + if (CI->isZero()) // ffs(0) -> 0. + return B.getInt32(0); // ffs(c) -> cttz(c)+1 return B.getInt32(CI->getValue().countTrailingZeros() + 1); } @@ -1267,7 +511,7 @@ struct SPrintFOpt : public LibCallOptimization { if (FormatStr[i] == '%') return 0; // we found a format specifier, bail out. - // These optimizations require TargetData. + // These optimizations require DataLayout. if (!TD) return 0; // sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1) @@ -1297,7 +541,7 @@ struct SPrintFOpt : public LibCallOptimization { } if (FormatStr[1] == 's') { - // These optimizations require TargetData. + // These optimizations require DataLayout. if (!TD) return 0; // sprintf(dest, "%s", str) -> llvm.memcpy(dest, str, strlen(str)+1, 1) @@ -1385,7 +629,7 @@ struct FWriteOpt : public LibCallOptimization { struct FPutsOpt : public LibCallOptimization { virtual Value *CallOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { - // These optimizations require TargetData. + // These optimizations require DataLayout. if (!TD) return 0; // Require two pointers. Also, we can't optimize if return value is used. @@ -1422,7 +666,7 @@ struct FPrintFOpt : public LibCallOptimization { if (FormatStr[i] == '%') // Could handle %% -> % if we cared. return 0; // We found a format specifier. - // These optimizations require TargetData. + // These optimizations require DataLayout. if (!TD) return 0; Value *NewCI = EmitFWrite(CI->getArgOperand(1), @@ -1524,17 +768,9 @@ namespace { TargetLibraryInfo *TLI; StringMap Optimizations; - // String and Memory LibCall Optimizations - StrCatOpt StrCat; StrNCatOpt StrNCat; StrChrOpt StrChr; StrRChrOpt StrRChr; - StrCmpOpt StrCmp; StrNCmpOpt StrNCmp; - StrCpyOpt StrCpy; StrCpyOpt StrCpyChk; - StpCpyOpt StpCpy; StpCpyOpt StpCpyChk; - StrNCpyOpt StrNCpy; - StrLenOpt StrLen; StrPBrkOpt StrPBrk; - StrToOpt StrTo; StrSpnOpt StrSpn; StrCSpnOpt StrCSpn; StrStrOpt StrStr; - MemCmpOpt MemCmp; MemCpyOpt MemCpy; MemMoveOpt MemMove; MemSetOpt MemSet; // Math Library Optimizations - CosOpt Cos; PowOpt Pow; Exp2Opt Exp2; UnaryDoubleFPOpt UnaryDoubleFP; + CosOpt Cos; PowOpt Pow; Exp2Opt Exp2; + UnaryDoubleFPOpt UnaryDoubleFP, UnsafeUnaryDoubleFP; // Integer Optimizations FFSOpt FFS; AbsOpt Abs; IsDigitOpt IsDigit; IsAsciiOpt IsAscii; ToAsciiOpt ToAscii; @@ -1546,11 +782,13 @@ namespace { bool Modified; // This is only used by doInitialization. public: static char ID; // Pass identification - SimplifyLibCalls() : FunctionPass(ID), StrCpy(false), StrCpyChk(true), - StpCpy(false), StpCpyChk(true) { + SimplifyLibCalls() : FunctionPass(ID), UnaryDoubleFP(false), + UnsafeUnaryDoubleFP(true) { initializeSimplifyLibCallsPass(*PassRegistry::getPassRegistry()); } void AddOpt(LibFunc::Func F, LibCallOptimization* Opt); + void AddOpt(LibFunc::Func F1, LibFunc::Func F2, LibCallOptimization* Opt); + void InitOptimizations(); bool runOnFunction(Function &F); @@ -1586,40 +824,15 @@ void SimplifyLibCalls::AddOpt(LibFunc::Func F, LibCallOptimization* Opt) { Optimizations[TLI->getName(F)] = Opt; } +void SimplifyLibCalls::AddOpt(LibFunc::Func F1, LibFunc::Func F2, + LibCallOptimization* Opt) { + if (TLI->has(F1) && TLI->has(F2)) + Optimizations[TLI->getName(F1)] = Opt; +} + /// Optimizations - Populate the Optimizations map with all the optimizations /// we know. void SimplifyLibCalls::InitOptimizations() { - // String and Memory LibCall Optimizations - Optimizations["strcat"] = &StrCat; - Optimizations["strncat"] = &StrNCat; - Optimizations["strchr"] = &StrChr; - Optimizations["strrchr"] = &StrRChr; - Optimizations["strcmp"] = &StrCmp; - Optimizations["strncmp"] = &StrNCmp; - Optimizations["strcpy"] = &StrCpy; - Optimizations["strncpy"] = &StrNCpy; - Optimizations["stpcpy"] = &StpCpy; - Optimizations["strlen"] = &StrLen; - Optimizations["strpbrk"] = &StrPBrk; - Optimizations["strtol"] = &StrTo; - Optimizations["strtod"] = &StrTo; - Optimizations["strtof"] = &StrTo; - Optimizations["strtoul"] = &StrTo; - Optimizations["strtoll"] = &StrTo; - Optimizations["strtold"] = &StrTo; - Optimizations["strtoull"] = &StrTo; - Optimizations["strspn"] = &StrSpn; - Optimizations["strcspn"] = &StrCSpn; - Optimizations["strstr"] = &StrStr; - Optimizations["memcmp"] = &MemCmp; - AddOpt(LibFunc::memcpy, &MemCpy); - Optimizations["memmove"] = &MemMove; - AddOpt(LibFunc::memset, &MemSet); - - // _chk variants of String and Memory LibCall Optimizations. - Optimizations["__strcpy_chk"] = &StrCpyChk; - Optimizations["__stpcpy_chk"] = &StpCpyChk; - // Math Library Optimizations Optimizations["cosf"] = &Cos; Optimizations["cos"] = &Cos; @@ -1641,16 +854,37 @@ void SimplifyLibCalls::InitOptimizations() { Optimizations["llvm.exp2.f64"] = &Exp2; Optimizations["llvm.exp2.f32"] = &Exp2; - if (TLI->has(LibFunc::floor) && TLI->has(LibFunc::floorf)) - Optimizations["floor"] = &UnaryDoubleFP; - if (TLI->has(LibFunc::ceil) && TLI->has(LibFunc::ceilf)) - Optimizations["ceil"] = &UnaryDoubleFP; - if (TLI->has(LibFunc::round) && TLI->has(LibFunc::roundf)) - Optimizations["round"] = &UnaryDoubleFP; - if (TLI->has(LibFunc::rint) && TLI->has(LibFunc::rintf)) - Optimizations["rint"] = &UnaryDoubleFP; - if (TLI->has(LibFunc::nearbyint) && TLI->has(LibFunc::nearbyintf)) - Optimizations["nearbyint"] = &UnaryDoubleFP; + AddOpt(LibFunc::ceil, LibFunc::ceilf, &UnaryDoubleFP); + AddOpt(LibFunc::fabs, LibFunc::fabsf, &UnaryDoubleFP); + AddOpt(LibFunc::floor, LibFunc::floorf, &UnaryDoubleFP); + AddOpt(LibFunc::rint, LibFunc::rintf, &UnaryDoubleFP); + AddOpt(LibFunc::round, LibFunc::roundf, &UnaryDoubleFP); + AddOpt(LibFunc::nearbyint, LibFunc::nearbyintf, &UnaryDoubleFP); + AddOpt(LibFunc::trunc, LibFunc::truncf, &UnaryDoubleFP); + + if(UnsafeFPShrink) { + AddOpt(LibFunc::acos, LibFunc::acosf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::acosh, LibFunc::acoshf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::asin, LibFunc::asinf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::asinh, LibFunc::asinhf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::atan, LibFunc::atanf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::atanh, LibFunc::atanhf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::cbrt, LibFunc::cbrtf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::cosh, LibFunc::coshf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::exp, LibFunc::expf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::exp10, LibFunc::exp10f, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::expm1, LibFunc::expm1f, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::log, LibFunc::logf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::log10, LibFunc::log10f, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::log1p, LibFunc::log1pf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::log2, LibFunc::log2f, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::logb, LibFunc::logbf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::sin, LibFunc::sinf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::sinh, LibFunc::sinhf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::sqrt, LibFunc::sqrtf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::tan, LibFunc::tanf, &UnsafeUnaryDoubleFP); + AddOpt(LibFunc::tanh, LibFunc::tanhf, &UnsafeUnaryDoubleFP); + } // Integer Optimizations Optimizations["ffs"] = &FFS; @@ -1681,7 +915,7 @@ bool SimplifyLibCalls::runOnFunction(Function &F) { if (Optimizations.empty()) InitOptimizations(); - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); IRBuilder<> Builder(F.getContext()); diff --git a/lib/Transforms/Utils/AddrModeMatcher.cpp b/lib/Transforms/Utils/AddrModeMatcher.cpp index d831452..6815e41 100644 --- a/lib/Transforms/Utils/AddrModeMatcher.cpp +++ b/lib/Transforms/Utils/AddrModeMatcher.cpp @@ -16,7 +16,7 @@ #include "llvm/GlobalValue.h" #include "llvm/Instruction.h" #include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/Debug.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/PatternMatch.h" @@ -55,10 +55,12 @@ void ExtAddrMode::print(raw_ostream &OS) const { OS << ']'; } +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void ExtAddrMode::dump() const { print(dbgs()); dbgs() << '\n'; } +#endif /// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode. @@ -219,7 +221,7 @@ bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode, unsigned VariableScale = 0; int64_t ConstantOffset = 0; - const TargetData *TD = TLI.getTargetData(); + const DataLayout *TD = TLI.getDataLayout(); gep_type_iterator GTI = gep_type_begin(AddrInst); for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { if (StructType *STy = dyn_cast(*GTI)) { diff --git a/lib/Transforms/Utils/BasicBlockUtils.cpp b/lib/Transforms/Utils/BasicBlockUtils.cpp index 2679b93..9fea113 100644 --- a/lib/Transforms/Utils/BasicBlockUtils.cpp +++ b/lib/Transforms/Utils/BasicBlockUtils.cpp @@ -22,7 +22,7 @@ #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/MemoryDependenceAnalysis.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Support/ErrorHandling.h" @@ -94,7 +94,7 @@ void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P) { /// is dead. Also recursively delete any operands that become dead as /// a result. This includes tracing the def-use list from the PHI to see if /// it is ultimately unused or if it reaches an unused cycle. -bool llvm::DeleteDeadPHIs(BasicBlock *BB) { +bool llvm::DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI) { // Recursively deleting a PHI may cause multiple PHIs to be deleted // or RAUW'd undef, so use an array of WeakVH for the PHIs to delete. SmallVector PHIs; @@ -105,7 +105,7 @@ bool llvm::DeleteDeadPHIs(BasicBlock *BB) { bool Changed = false; for (unsigned i = 0, e = PHIs.size(); i != e; ++i) if (PHINode *PN = dyn_cast_or_null(PHIs[i].operator Value*())) - Changed |= RecursivelyDeleteDeadPHINode(PN); + Changed |= RecursivelyDeleteDeadPHINode(PN, TLI); return Changed; } @@ -687,3 +687,42 @@ ReturnInst *llvm::FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB, return cast(NewRet); } +/// SplitBlockAndInsertIfThen - Split the containing block at the +/// specified instruction - everything before and including Cmp stays +/// in the old basic block, and everything after Cmp is moved to a +/// new block. The two blocks are connected by a conditional branch +/// (with value of Cmp being the condition). +/// Before: +/// Head +/// Cmp +/// Tail +/// After: +/// Head +/// Cmp +/// if (Cmp) +/// ThenBlock +/// Tail +/// +/// If Unreachable is true, then ThenBlock ends with +/// UnreachableInst, otherwise it branches to Tail. +/// Returns the NewBasicBlock's terminator. + +TerminatorInst *llvm::SplitBlockAndInsertIfThen(Instruction *Cmp, + bool Unreachable, MDNode *BranchWeights) { + Instruction *SplitBefore = Cmp->getNextNode(); + BasicBlock *Head = SplitBefore->getParent(); + BasicBlock *Tail = Head->splitBasicBlock(SplitBefore); + TerminatorInst *HeadOldTerm = Head->getTerminator(); + LLVMContext &C = Head->getContext(); + BasicBlock *ThenBlock = BasicBlock::Create(C, "", Head->getParent(), Tail); + TerminatorInst *CheckTerm; + if (Unreachable) + CheckTerm = new UnreachableInst(C, ThenBlock); + else + CheckTerm = BranchInst::Create(Tail, ThenBlock); + BranchInst *HeadNewTerm = + BranchInst::Create(/*ifTrue*/ThenBlock, /*ifFalse*/Tail, Cmp); + HeadNewTerm->setMetadata(LLVMContext::MD_prof, BranchWeights); + ReplaceInstWithInst(HeadOldTerm, HeadNewTerm); + return CheckTerm; +} diff --git a/lib/Transforms/Utils/BuildLibCalls.cpp b/lib/Transforms/Utils/BuildLibCalls.cpp index e13fd71..74b2ee1 100644 --- a/lib/Transforms/Utils/BuildLibCalls.cpp +++ b/lib/Transforms/Utils/BuildLibCalls.cpp @@ -22,7 +22,7 @@ #include "llvm/Module.h" #include "llvm/Type.h" #include "llvm/ADT/SmallString.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" using namespace llvm; @@ -34,19 +34,22 @@ Value *llvm::CastToCStr(Value *V, IRBuilder<> &B) { /// EmitStrLen - Emit a call to the strlen function to the builder, for the /// specified pointer. This always returns an integer value of size intptr_t. -Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD, +Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::strlen)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[2]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(~0u, Attribute::ReadOnly | - Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind }; + AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + ArrayRef(AVs, 2)); LLVMContext &Context = B.GetInsertBlock()->getContext(); - Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI), + Constant *StrLen = M->getOrInsertFunction("strlen", + AttrListPtr::get(M->getContext(), + AWI), TD->getIntPtrType(Context), B.getInt8PtrTy(), NULL); @@ -61,18 +64,21 @@ Value *llvm::EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD, /// specified pointer. Ptr is required to be some pointer type, MaxLen must /// be of size_t type, and the return value has 'intptr_t' type. Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI) { + const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::strnlen)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[2]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(~0u, Attribute::ReadOnly | - Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind }; + AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + ArrayRef(AVs, 2)); LLVMContext &Context = B.GetInsertBlock()->getContext(); - Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI), + Constant *StrNLen = M->getOrInsertFunction("strnlen", + AttrListPtr::get(M->getContext(), + AWI), TD->getIntPtrType(Context), B.getInt8PtrTy(), TD->getIntPtrType(Context), @@ -88,17 +94,21 @@ Value *llvm::EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B, /// specified pointer and character. Ptr is required to be some pointer type, /// and the return value has 'i8*' type. Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI) { + const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::strchr)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); + Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind }; AttributeWithIndex AWI = - AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind); + AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + ArrayRef(AVs, 2)); Type *I8Ptr = B.getInt8PtrTy(); Type *I32Ty = B.getInt32Ty(); - Constant *StrChr = M->getOrInsertFunction("strchr", AttrListPtr::get(AWI), + Constant *StrChr = M->getOrInsertFunction("strchr", + AttrListPtr::get(M->getContext(), + AWI), I8Ptr, I8Ptr, I32Ty, NULL); CallInst *CI = B.CreateCall2(StrChr, CastToCStr(Ptr, B), ConstantInt::get(I32Ty, C), "strchr"); @@ -109,20 +119,23 @@ Value *llvm::EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, /// EmitStrNCmp - Emit a call to the strncmp function to the builder. Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, - IRBuilder<> &B, const TargetData *TD, + IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::strncmp)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[3]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture); - AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly | - Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture); + Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind }; + AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + ArrayRef(AVs, 2)); LLVMContext &Context = B.GetInsertBlock()->getContext(); - Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI), + Value *StrNCmp = M->getOrInsertFunction("strncmp", + AttrListPtr::get(M->getContext(), + AWI), B.getInt32Ty(), B.getInt8PtrTy(), B.getInt8PtrTy(), @@ -139,17 +152,19 @@ Value *llvm::EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the /// specified pointer arguments. Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI, + const DataLayout *TD, const TargetLibraryInfo *TLI, StringRef Name) { if (!TLI->has(LibFunc::strcpy)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[2]; - AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); Type *I8Ptr = B.getInt8PtrTy(); - Value *StrCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI), + Value *StrCpy = M->getOrInsertFunction(Name, + AttrListPtr::get(M->getContext(), AWI), I8Ptr, I8Ptr, I8Ptr, NULL); CallInst *CI = B.CreateCall2(StrCpy, CastToCStr(Dst, B), CastToCStr(Src, B), Name); @@ -161,17 +176,20 @@ Value *llvm::EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B, /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the /// specified pointer arguments. Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len, - IRBuilder<> &B, const TargetData *TD, + IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI, StringRef Name) { if (!TLI->has(LibFunc::strncpy)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[2]; - AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); Type *I8Ptr = B.getInt8PtrTy(); - Value *StrNCpy = M->getOrInsertFunction(Name, AttrListPtr::get(AWI), + Value *StrNCpy = M->getOrInsertFunction(Name, + AttrListPtr::get(M->getContext(), + AWI), I8Ptr, I8Ptr, I8Ptr, Len->getType(), NULL); CallInst *CI = B.CreateCall3(StrNCpy, CastToCStr(Dst, B), CastToCStr(Src, B), @@ -185,17 +203,18 @@ Value *llvm::EmitStrNCpy(Value *Dst, Value *Src, Value *Len, /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src /// are pointers. Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize, - IRBuilder<> &B, const TargetData *TD, + IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::memcpy_chk)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI; - AWI = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); LLVMContext &Context = B.GetInsertBlock()->getContext(); Value *MemCpy = M->getOrInsertFunction("__memcpy_chk", - AttrListPtr::get(AWI), + AttrListPtr::get(M->getContext(), AWI), B.getInt8PtrTy(), B.getInt8PtrTy(), B.getInt8PtrTy(), @@ -212,16 +231,19 @@ Value *llvm::EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize, /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value. Value *llvm::EmitMemChr(Value *Ptr, Value *Val, - Value *Len, IRBuilder<> &B, const TargetData *TD, + Value *Len, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::memchr)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI; - AWI = AttributeWithIndex::get(~0u, Attribute::ReadOnly | Attribute::NoUnwind); + Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind }; + AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + ArrayRef(AVs, 2)); LLVMContext &Context = B.GetInsertBlock()->getContext(); - Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI), + Value *MemChr = M->getOrInsertFunction("memchr", + AttrListPtr::get(M->getContext(), AWI), B.getInt8PtrTy(), B.getInt8PtrTy(), B.getInt32Ty(), @@ -237,20 +259,22 @@ Value *llvm::EmitMemChr(Value *Ptr, Value *Val, /// EmitMemCmp - Emit a call to the memcmp function. Value *llvm::EmitMemCmp(Value *Ptr1, Value *Ptr2, - Value *Len, IRBuilder<> &B, const TargetData *TD, + Value *Len, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::memcmp)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[3]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture); - AWI[2] = AttributeWithIndex::get(~0u, Attribute::ReadOnly | - Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture); + Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind }; + AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + ArrayRef(AVs, 2)); LLVMContext &Context = B.GetInsertBlock()->getContext(); - Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI), + Value *MemCmp = M->getOrInsertFunction("memcmp", + AttrListPtr::get(M->getContext(), AWI), B.getInt32Ty(), B.getInt8PtrTy(), B.getInt8PtrTy(), @@ -294,7 +318,7 @@ Value *llvm::EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B, /// EmitPutChar - Emit a call to the putchar function. This assumes that Char /// is an integer. -Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD, +Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::putchar)) return 0; @@ -316,17 +340,19 @@ Value *llvm::EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD, /// EmitPutS - Emit a call to the puts function. This assumes that Str is /// some pointer. -Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD, +Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::puts)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[2]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); - Value *PutS = M->getOrInsertFunction("puts", AttrListPtr::get(AWI), + Value *PutS = M->getOrInsertFunction("puts", + AttrListPtr::get(M->getContext(), AWI), B.getInt32Ty(), B.getInt8PtrTy(), NULL); @@ -339,17 +365,19 @@ Value *llvm::EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD, /// EmitFPutC - Emit a call to the fputc function. This assumes that Char is /// an integer and File is a pointer to FILE. Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI) { + const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::fputc)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[2]; - AWI[0] = AttributeWithIndex::get(2, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); Constant *F; if (File->getType()->isPointerTy()) - F = M->getOrInsertFunction("fputc", AttrListPtr::get(AWI), + F = M->getOrInsertFunction("fputc", + AttrListPtr::get(M->getContext(), AWI), B.getInt32Ty(), B.getInt32Ty(), File->getType(), NULL); @@ -370,19 +398,21 @@ Value *llvm::EmitFPutC(Value *Char, Value *File, IRBuilder<> &B, /// EmitFPutS - Emit a call to the puts function. Str is required to be a /// pointer and File is a pointer to FILE. Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, - const TargetData *TD, const TargetLibraryInfo *TLI) { + const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::fputs)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[3]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(2, Attribute::NoCapture); - AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), 2, Attributes::NoCapture); + AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); StringRef FPutsName = TLI->getName(LibFunc::fputs); Constant *F; if (File->getType()->isPointerTy()) - F = M->getOrInsertFunction(FPutsName, AttrListPtr::get(AWI), + F = M->getOrInsertFunction(FPutsName, + AttrListPtr::get(M->getContext(), AWI), B.getInt32Ty(), B.getInt8PtrTy(), File->getType(), NULL); @@ -400,21 +430,23 @@ Value *llvm::EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, /// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE. Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File, - IRBuilder<> &B, const TargetData *TD, + IRBuilder<> &B, const DataLayout *TD, const TargetLibraryInfo *TLI) { if (!TLI->has(LibFunc::fwrite)) return 0; Module *M = B.GetInsertBlock()->getParent()->getParent(); AttributeWithIndex AWI[3]; - AWI[0] = AttributeWithIndex::get(1, Attribute::NoCapture); - AWI[1] = AttributeWithIndex::get(4, Attribute::NoCapture); - AWI[2] = AttributeWithIndex::get(~0u, Attribute::NoUnwind); + AWI[0] = AttributeWithIndex::get(M->getContext(), 1, Attributes::NoCapture); + AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture); + AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex, + Attributes::NoUnwind); LLVMContext &Context = B.GetInsertBlock()->getContext(); StringRef FWriteName = TLI->getName(LibFunc::fwrite); Constant *F; if (File->getType()->isPointerTy()) - F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI), + F = M->getOrInsertFunction(FWriteName, + AttrListPtr::get(M->getContext(), AWI), TD->getIntPtrType(Context), B.getInt8PtrTy(), TD->getIntPtrType(Context), @@ -436,9 +468,9 @@ Value *llvm::EmitFWrite(Value *Ptr, Value *Size, Value *File, SimplifyFortifiedLibCalls::~SimplifyFortifiedLibCalls() { } -bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const TargetData *TD, +bool SimplifyFortifiedLibCalls::fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI) { - // We really need TargetData for later. + // We really need DataLayout for later. if (!TD) return false; this->CI = CI; diff --git a/lib/Transforms/Utils/BypassSlowDivision.cpp b/lib/Transforms/Utils/BypassSlowDivision.cpp new file mode 100644 index 0000000..bee2f7b --- /dev/null +++ b/lib/Transforms/Utils/BypassSlowDivision.cpp @@ -0,0 +1,262 @@ +//===-- BypassSlowDivision.cpp - Bypass slow division ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains an optimization for div and rem on architectures that +// execute short instructions significantly faster than longer instructions. +// For example, on Intel Atom 32-bit divides are slow enough that during +// runtime it is profitable to check the value of the operands, and if they are +// positive and less than 256 use an unsigned 8-bit divide. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "bypass-slow-division" +#include "llvm/Instructions.h" +#include "llvm/Function.h" +#include "llvm/IRBuilder.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/Transforms/Utils/BypassSlowDivision.h" + +using namespace llvm; + +namespace { + struct DivOpInfo { + bool SignedOp; + Value *Dividend; + Value *Divisor; + + DivOpInfo(bool InSignedOp, Value *InDividend, Value *InDivisor) + : SignedOp(InSignedOp), Dividend(InDividend), Divisor(InDivisor) {} + }; + + struct DivPhiNodes { + PHINode *Quotient; + PHINode *Remainder; + + DivPhiNodes(PHINode *InQuotient, PHINode *InRemainder) + : Quotient(InQuotient), Remainder(InRemainder) {} + }; +} + +namespace llvm { + template<> + struct DenseMapInfo { + static bool isEqual(const DivOpInfo &Val1, const DivOpInfo &Val2) { + return Val1.SignedOp == Val2.SignedOp && + Val1.Dividend == Val2.Dividend && + Val1.Divisor == Val2.Divisor; + } + + static DivOpInfo getEmptyKey() { + return DivOpInfo(false, 0, 0); + } + + static DivOpInfo getTombstoneKey() { + return DivOpInfo(true, 0, 0); + } + + static unsigned getHashValue(const DivOpInfo &Val) { + return (unsigned)(reinterpret_cast(Val.Dividend) ^ + reinterpret_cast(Val.Divisor)) ^ + (unsigned)Val.SignedOp; + } + }; + + typedef DenseMap DivCacheTy; +} + +// insertFastDiv - Substitutes the div/rem instruction with code that checks the +// value of the operands and uses a shorter-faster div/rem instruction when +// possible and the longer-slower div/rem instruction otherwise. +static bool insertFastDiv(Function &F, + Function::iterator &I, + BasicBlock::iterator &J, + IntegerType *BypassType, + bool UseDivOp, + bool UseSignedOp, + DivCacheTy &PerBBDivCache) { + // Get instruction operands + Instruction *Instr = J; + Value *Dividend = Instr->getOperand(0); + Value *Divisor = Instr->getOperand(1); + + if (isa(Divisor) || + (isa(Dividend) && isa(Divisor))) { + // Operations with immediate values should have + // been solved and replaced during compile time. + return false; + } + + // Basic Block is split before divide + BasicBlock *MainBB = I; + BasicBlock *SuccessorBB = I->splitBasicBlock(J); + ++I; //advance iterator I to successorBB + + // Add new basic block for slow divide operation + BasicBlock *SlowBB = BasicBlock::Create(F.getContext(), "", + MainBB->getParent(), SuccessorBB); + SlowBB->moveBefore(SuccessorBB); + IRBuilder<> SlowBuilder(SlowBB, SlowBB->begin()); + Value *SlowQuotientV; + Value *SlowRemainderV; + if (UseSignedOp) { + SlowQuotientV = SlowBuilder.CreateSDiv(Dividend, Divisor); + SlowRemainderV = SlowBuilder.CreateSRem(Dividend, Divisor); + } else { + SlowQuotientV = SlowBuilder.CreateUDiv(Dividend, Divisor); + SlowRemainderV = SlowBuilder.CreateURem(Dividend, Divisor); + } + SlowBuilder.CreateBr(SuccessorBB); + + // Add new basic block for fast divide operation + BasicBlock *FastBB = BasicBlock::Create(F.getContext(), "", + MainBB->getParent(), SuccessorBB); + FastBB->moveBefore(SlowBB); + IRBuilder<> FastBuilder(FastBB, FastBB->begin()); + Value *ShortDivisorV = FastBuilder.CreateCast(Instruction::Trunc, Divisor, + BypassType); + Value *ShortDividendV = FastBuilder.CreateCast(Instruction::Trunc, Dividend, + BypassType); + + // udiv/urem because optimization only handles positive numbers + Value *ShortQuotientV = FastBuilder.CreateExactUDiv(ShortDividendV, + ShortDivisorV); + Value *ShortRemainderV = FastBuilder.CreateURem(ShortDividendV, + ShortDivisorV); + Value *FastQuotientV = FastBuilder.CreateCast(Instruction::ZExt, + ShortQuotientV, + Dividend->getType()); + Value *FastRemainderV = FastBuilder.CreateCast(Instruction::ZExt, + ShortRemainderV, + Dividend->getType()); + FastBuilder.CreateBr(SuccessorBB); + + // Phi nodes for result of div and rem + IRBuilder<> SuccessorBuilder(SuccessorBB, SuccessorBB->begin()); + PHINode *QuoPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2); + QuoPhi->addIncoming(SlowQuotientV, SlowBB); + QuoPhi->addIncoming(FastQuotientV, FastBB); + PHINode *RemPhi = SuccessorBuilder.CreatePHI(Instr->getType(), 2); + RemPhi->addIncoming(SlowRemainderV, SlowBB); + RemPhi->addIncoming(FastRemainderV, FastBB); + + // Replace Instr with appropriate phi node + if (UseDivOp) + Instr->replaceAllUsesWith(QuoPhi); + else + Instr->replaceAllUsesWith(RemPhi); + Instr->eraseFromParent(); + + // Combine operands into a single value with OR for value testing below + MainBB->getInstList().back().eraseFromParent(); + IRBuilder<> MainBuilder(MainBB, MainBB->end()); + Value *OrV = MainBuilder.CreateOr(Dividend, Divisor); + + // BitMask is inverted to check if the operands are + // larger than the bypass type + uint64_t BitMask = ~BypassType->getBitMask(); + Value *AndV = MainBuilder.CreateAnd(OrV, BitMask); + + // Compare operand values and branch + Value *ZeroV = MainBuilder.getInt32(0); + Value *CmpV = MainBuilder.CreateICmpEQ(AndV, ZeroV); + MainBuilder.CreateCondBr(CmpV, FastBB, SlowBB); + + // point iterator J at first instruction of successorBB + J = I->begin(); + + // Cache phi nodes to be used later in place of other instances + // of div or rem with the same sign, dividend, and divisor + DivOpInfo Key(UseSignedOp, Dividend, Divisor); + DivPhiNodes Value(QuoPhi, RemPhi); + PerBBDivCache.insert(std::pair(Key, Value)); + return true; +} + +// reuseOrInsertFastDiv - Reuses previously computed dividend or remainder if +// operands and operation are identical. Otherwise call insertFastDiv to perform +// the optimization and cache the resulting dividend and remainder. +static bool reuseOrInsertFastDiv(Function &F, + Function::iterator &I, + BasicBlock::iterator &J, + IntegerType *BypassType, + bool UseDivOp, + bool UseSignedOp, + DivCacheTy &PerBBDivCache) { + // Get instruction operands + Instruction *Instr = J; + DivOpInfo Key(UseSignedOp, Instr->getOperand(0), Instr->getOperand(1)); + DivCacheTy::iterator CacheI = PerBBDivCache.find(Key); + + if (CacheI == PerBBDivCache.end()) { + // If previous instance does not exist, insert fast div + return insertFastDiv(F, I, J, BypassType, UseDivOp, UseSignedOp, + PerBBDivCache); + } + + // Replace operation value with previously generated phi node + DivPhiNodes &Value = CacheI->second; + if (UseDivOp) { + // Replace all uses of div instruction with quotient phi node + J->replaceAllUsesWith(Value.Quotient); + } else { + // Replace all uses of rem instruction with remainder phi node + J->replaceAllUsesWith(Value.Remainder); + } + + // Advance to next operation + ++J; + + // Remove redundant operation + Instr->eraseFromParent(); + return true; +} + +// bypassSlowDivision - This optimization identifies DIV instructions that can +// be profitably bypassed and carried out with a shorter, faster divide. +bool llvm::bypassSlowDivision(Function &F, + Function::iterator &I, + const DenseMap &BypassWidths) { + DivCacheTy DivCache; + + bool MadeChange = false; + for (BasicBlock::iterator J = I->begin(); J != I->end(); J++) { + + // Get instruction details + unsigned Opcode = J->getOpcode(); + bool UseDivOp = Opcode == Instruction::SDiv || Opcode == Instruction::UDiv; + bool UseRemOp = Opcode == Instruction::SRem || Opcode == Instruction::URem; + bool UseSignedOp = Opcode == Instruction::SDiv || + Opcode == Instruction::SRem; + + // Only optimize div or rem ops + if (!UseDivOp && !UseRemOp) + continue; + + // Skip division on vector types, only optimize integer instructions + if (!J->getType()->isIntegerTy()) + continue; + + // Get bitwidth of div/rem instruction + IntegerType *T = cast(J->getType()); + int bitwidth = T->getBitWidth(); + + // Continue if bitwidth is not bypassed + DenseMap::const_iterator BI = BypassWidths.find(bitwidth); + if (BI == BypassWidths.end()) + continue; + + // Get type for div/rem instruction with bypass bitwidth + IntegerType *BT = IntegerType::get(J->getContext(), BI->second); + + MadeChange |= reuseOrInsertFastDiv(F, I, J, BT, UseDivOp, + UseSignedOp, DivCache); + } + + return MadeChange; +} diff --git a/lib/Transforms/Utils/CMakeLists.txt b/lib/Transforms/Utils/CMakeLists.txt index 4ff31ca..620209b 100644 --- a/lib/Transforms/Utils/CMakeLists.txt +++ b/lib/Transforms/Utils/CMakeLists.txt @@ -3,6 +3,7 @@ add_llvm_library(LLVMTransformUtils BasicBlockUtils.cpp BreakCriticalEdges.cpp BuildLibCalls.cpp + BypassSlowDivision.cpp CloneFunction.cpp CloneModule.cpp CmpInstAnalysis.cpp @@ -10,6 +11,7 @@ add_llvm_library(LLVMTransformUtils DemoteRegToStack.cpp InlineFunction.cpp InstructionNamer.cpp + IntegerDivision.cpp LCSSA.cpp Local.cpp LoopSimplify.cpp @@ -19,12 +21,14 @@ add_llvm_library(LLVMTransformUtils LowerInvoke.cpp LowerSwitch.cpp Mem2Reg.cpp + MetaRenamer.cpp ModuleUtils.cpp PromoteMemoryToRegister.cpp SSAUpdater.cpp SimplifyCFG.cpp SimplifyIndVar.cpp SimplifyInstructions.cpp + SimplifyLibCalls.cpp UnifyFunctionExitNodes.cpp Utils.cpp ValueMapper.cpp diff --git a/lib/Transforms/Utils/CloneFunction.cpp b/lib/Transforms/Utils/CloneFunction.cpp index 99237b8..7ba9f6d 100644 --- a/lib/Transforms/Utils/CloneFunction.cpp +++ b/lib/Transforms/Utils/CloneFunction.cpp @@ -98,10 +98,14 @@ void llvm::CloneFunctionInto(Function *NewFunc, const Function *OldFunc, Anew->addAttr( OldFunc->getAttributes() .getParamAttributes(I->getArgNo() + 1)); NewFunc->setAttributes(NewFunc->getAttributes() - .addAttr(0, OldFunc->getAttributes() + .addAttr(NewFunc->getContext(), + AttrListPtr::ReturnIndex, + OldFunc->getAttributes() .getRetAttributes())); NewFunc->setAttributes(NewFunc->getAttributes() - .addAttr(~0, OldFunc->getAttributes() + .addAttr(NewFunc->getContext(), + AttrListPtr::FunctionIndex, + OldFunc->getAttributes() .getFnAttributes())); } @@ -202,14 +206,14 @@ namespace { bool ModuleLevelChanges; const char *NameSuffix; ClonedCodeInfo *CodeInfo; - const TargetData *TD; + const DataLayout *TD; public: PruningFunctionCloner(Function *newFunc, const Function *oldFunc, ValueToValueMapTy &valueMap, bool moduleLevelChanges, const char *nameSuffix, ClonedCodeInfo *codeInfo, - const TargetData *td) + const DataLayout *td) : NewFunc(newFunc), OldFunc(oldFunc), VMap(valueMap), ModuleLevelChanges(moduleLevelChanges), NameSuffix(nameSuffix), CodeInfo(codeInfo), TD(td) { @@ -365,7 +369,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, SmallVectorImpl &Returns, const char *NameSuffix, ClonedCodeInfo *CodeInfo, - const TargetData *TD, + const DataLayout *TD, Instruction *TheCall) { assert(NameSuffix && "NameSuffix cannot be null!"); diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp index c545cd6..281714f 100644 --- a/lib/Transforms/Utils/CodeExtractor.cpp +++ b/lib/Transforms/Utils/CodeExtractor.cpp @@ -346,7 +346,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs, header->getName(), M); // If the old function is no-throw, so is the new one. if (oldFunction->doesNotThrow()) - newFunction->setDoesNotThrow(true); + newFunction->setDoesNotThrow(); newFunction->getBasicBlockList().push_back(newRootNode); diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp index 89e89e7..009847f 100644 --- a/lib/Transforms/Utils/InlineFunction.cpp +++ b/lib/Transforms/Utils/InlineFunction.cpp @@ -27,7 +27,7 @@ #include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Support/CallSite.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Transforms/Utils/Local.h" using namespace llvm; @@ -357,7 +357,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, Type *VoidPtrTy = Type::getInt8PtrTy(Context); - // Create the alloca. If we have TargetData, use nice alignment. + // Create the alloca. If we have DataLayout, use nice alignment. unsigned Align = 1; if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy); diff --git a/lib/Transforms/Utils/IntegerDivision.cpp b/lib/Transforms/Utils/IntegerDivision.cpp new file mode 100644 index 0000000..55227e2 --- /dev/null +++ b/lib/Transforms/Utils/IntegerDivision.cpp @@ -0,0 +1,420 @@ +//===-- IntegerDivision.cpp - Expand integer division ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains an implementation of 32bit scalar integer division for +// targets that don't have native support. It's largely derived from +// compiler-rt's implementation of __udivsi3, but hand-tuned to reduce the +// amount of control flow +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "integer-division" +#include "llvm/Function.h" +#include "llvm/Instructions.h" +#include "llvm/Intrinsics.h" +#include "llvm/IRBuilder.h" +#include "llvm/Transforms/Utils/IntegerDivision.h" + +using namespace llvm; + +/// Generate code to compute the remainder of two signed integers. Returns the +/// remainder, which will have the sign of the dividend. Builder's insert point +/// should be pointing where the caller wants code generated, e.g. at the srem +/// instruction. This will generate a urem in the process, and Builder's insert +/// point will be pointing at the uren (if present, i.e. not folded), ready to +/// be expanded if the user wishes +static Value *generateSignedRemainderCode(Value *Dividend, Value *Divisor, + IRBuilder<> &Builder) { + ConstantInt *ThirtyOne = Builder.getInt32(31); + + // ; %dividend_sgn = ashr i32 %dividend, 31 + // ; %divisor_sgn = ashr i32 %divisor, 31 + // ; %dvd_xor = xor i32 %dividend, %dividend_sgn + // ; %dvs_xor = xor i32 %divisor, %divisor_sgn + // ; %u_dividend = sub i32 %dvd_xor, %dividend_sgn + // ; %u_divisor = sub i32 %dvs_xor, %divisor_sgn + // ; %urem = urem i32 %dividend, %divisor + // ; %xored = xor i32 %urem, %dividend_sgn + // ; %srem = sub i32 %xored, %dividend_sgn + Value *DividendSign = Builder.CreateAShr(Dividend, ThirtyOne); + Value *DivisorSign = Builder.CreateAShr(Divisor, ThirtyOne); + Value *DvdXor = Builder.CreateXor(Dividend, DividendSign); + Value *DvsXor = Builder.CreateXor(Divisor, DivisorSign); + Value *UDividend = Builder.CreateSub(DvdXor, DividendSign); + Value *UDivisor = Builder.CreateSub(DvsXor, DivisorSign); + Value *URem = Builder.CreateURem(UDividend, UDivisor); + Value *Xored = Builder.CreateXor(URem, DividendSign); + Value *SRem = Builder.CreateSub(Xored, DividendSign); + + if (Instruction *URemInst = dyn_cast(URem)) + Builder.SetInsertPoint(URemInst); + + return SRem; +} + + +/// Generate code to compute the remainder of two unsigned integers. Returns the +/// remainder. Builder's insert point should be pointing where the caller wants +/// code generated, e.g. at the urem instruction. This will generate a udiv in +/// the process, and Builder's insert point will be pointing at the udiv (if +/// present, i.e. not folded), ready to be expanded if the user wishes +static Value *generatedUnsignedRemainderCode(Value *Dividend, Value *Divisor, + IRBuilder<> &Builder) { + // Remainder = Dividend - Quotient*Divisor + + // ; %quotient = udiv i32 %dividend, %divisor + // ; %product = mul i32 %divisor, %quotient + // ; %remainder = sub i32 %dividend, %product + Value *Quotient = Builder.CreateUDiv(Dividend, Divisor); + Value *Product = Builder.CreateMul(Divisor, Quotient); + Value *Remainder = Builder.CreateSub(Dividend, Product); + + if (Instruction *UDiv = dyn_cast(Quotient)) + Builder.SetInsertPoint(UDiv); + + return Remainder; +} + +/// Generate code to divide two signed integers. Returns the quotient, rounded +/// towards 0. Builder's insert point should be pointing where the caller wants +/// code generated, e.g. at the sdiv instruction. This will generate a udiv in +/// the process, and Builder's insert point will be pointing at the udiv (if +/// present, i.e. not folded), ready to be expanded if the user wishes. +static Value *generateSignedDivisionCode(Value *Dividend, Value *Divisor, + IRBuilder<> &Builder) { + // Implementation taken from compiler-rt's __divsi3 + + ConstantInt *ThirtyOne = Builder.getInt32(31); + + // ; %tmp = ashr i32 %dividend, 31 + // ; %tmp1 = ashr i32 %divisor, 31 + // ; %tmp2 = xor i32 %tmp, %dividend + // ; %u_dvnd = sub nsw i32 %tmp2, %tmp + // ; %tmp3 = xor i32 %tmp1, %divisor + // ; %u_dvsr = sub nsw i32 %tmp3, %tmp1 + // ; %q_sgn = xor i32 %tmp1, %tmp + // ; %q_mag = udiv i32 %u_dvnd, %u_dvsr + // ; %tmp4 = xor i32 %q_mag, %q_sgn + // ; %q = sub i32 %tmp4, %q_sgn + Value *Tmp = Builder.CreateAShr(Dividend, ThirtyOne); + Value *Tmp1 = Builder.CreateAShr(Divisor, ThirtyOne); + Value *Tmp2 = Builder.CreateXor(Tmp, Dividend); + Value *U_Dvnd = Builder.CreateSub(Tmp2, Tmp); + Value *Tmp3 = Builder.CreateXor(Tmp1, Divisor); + Value *U_Dvsr = Builder.CreateSub(Tmp3, Tmp1); + Value *Q_Sgn = Builder.CreateXor(Tmp1, Tmp); + Value *Q_Mag = Builder.CreateUDiv(U_Dvnd, U_Dvsr); + Value *Tmp4 = Builder.CreateXor(Q_Mag, Q_Sgn); + Value *Q = Builder.CreateSub(Tmp4, Q_Sgn); + + if (Instruction *UDiv = dyn_cast(Q_Mag)) + Builder.SetInsertPoint(UDiv); + + return Q; +} + +/// Generates code to divide two unsigned scalar 32-bit integers. Returns the +/// quotient, rounded towards 0. Builder's insert point should be pointing where +/// the caller wants code generated, e.g. at the udiv instruction. +static Value *generateUnsignedDivisionCode(Value *Dividend, Value *Divisor, + IRBuilder<> &Builder) { + // The basic algorithm can be found in the compiler-rt project's + // implementation of __udivsi3.c. Here, we do a lower-level IR based approach + // that's been hand-tuned to lessen the amount of control flow involved. + + // Some helper values + IntegerType *I32Ty = Builder.getInt32Ty(); + + ConstantInt *Zero = Builder.getInt32(0); + ConstantInt *One = Builder.getInt32(1); + ConstantInt *ThirtyOne = Builder.getInt32(31); + ConstantInt *NegOne = ConstantInt::getSigned(I32Ty, -1); + ConstantInt *True = Builder.getTrue(); + + BasicBlock *IBB = Builder.GetInsertBlock(); + Function *F = IBB->getParent(); + Function *CTLZi32 = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, + I32Ty); + + // Our CFG is going to look like: + // +---------------------+ + // | special-cases | + // | ... | + // +---------------------+ + // | | + // | +----------+ + // | | bb1 | + // | | ... | + // | +----------+ + // | | | + // | | +------------+ + // | | | preheader | + // | | | ... | + // | | +------------+ + // | | | + // | | | +---+ + // | | | | | + // | | +------------+ | + // | | | do-while | | + // | | | ... | | + // | | +------------+ | + // | | | | | + // | +-----------+ +---+ + // | | loop-exit | + // | | ... | + // | +-----------+ + // | | + // +-------+ + // | ... | + // | end | + // +-------+ + BasicBlock *SpecialCases = Builder.GetInsertBlock(); + SpecialCases->setName(Twine(SpecialCases->getName(), "_udiv-special-cases")); + BasicBlock *End = SpecialCases->splitBasicBlock(Builder.GetInsertPoint(), + "udiv-end"); + BasicBlock *LoopExit = BasicBlock::Create(Builder.getContext(), + "udiv-loop-exit", F, End); + BasicBlock *DoWhile = BasicBlock::Create(Builder.getContext(), + "udiv-do-while", F, End); + BasicBlock *Preheader = BasicBlock::Create(Builder.getContext(), + "udiv-preheader", F, End); + BasicBlock *BB1 = BasicBlock::Create(Builder.getContext(), + "udiv-bb1", F, End); + + // We'll be overwriting the terminator to insert our extra blocks + SpecialCases->getTerminator()->eraseFromParent(); + + // First off, check for special cases: dividend or divisor is zero, divisor + // is greater than dividend, and divisor is 1. + // ; special-cases: + // ; %ret0_1 = icmp eq i32 %divisor, 0 + // ; %ret0_2 = icmp eq i32 %dividend, 0 + // ; %ret0_3 = or i1 %ret0_1, %ret0_2 + // ; %tmp0 = tail call i32 @llvm.ctlz.i32(i32 %divisor, i1 true) + // ; %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %dividend, i1 true) + // ; %sr = sub nsw i32 %tmp0, %tmp1 + // ; %ret0_4 = icmp ugt i32 %sr, 31 + // ; %ret0 = or i1 %ret0_3, %ret0_4 + // ; %retDividend = icmp eq i32 %sr, 31 + // ; %retVal = select i1 %ret0, i32 0, i32 %dividend + // ; %earlyRet = or i1 %ret0, %retDividend + // ; br i1 %earlyRet, label %end, label %bb1 + Builder.SetInsertPoint(SpecialCases); + Value *Ret0_1 = Builder.CreateICmpEQ(Divisor, Zero); + Value *Ret0_2 = Builder.CreateICmpEQ(Dividend, Zero); + Value *Ret0_3 = Builder.CreateOr(Ret0_1, Ret0_2); + Value *Tmp0 = Builder.CreateCall2(CTLZi32, Divisor, True); + Value *Tmp1 = Builder.CreateCall2(CTLZi32, Dividend, True); + Value *SR = Builder.CreateSub(Tmp0, Tmp1); + Value *Ret0_4 = Builder.CreateICmpUGT(SR, ThirtyOne); + Value *Ret0 = Builder.CreateOr(Ret0_3, Ret0_4); + Value *RetDividend = Builder.CreateICmpEQ(SR, ThirtyOne); + Value *RetVal = Builder.CreateSelect(Ret0, Zero, Dividend); + Value *EarlyRet = Builder.CreateOr(Ret0, RetDividend); + Builder.CreateCondBr(EarlyRet, End, BB1); + + // ; bb1: ; preds = %special-cases + // ; %sr_1 = add i32 %sr, 1 + // ; %tmp2 = sub i32 31, %sr + // ; %q = shl i32 %dividend, %tmp2 + // ; %skipLoop = icmp eq i32 %sr_1, 0 + // ; br i1 %skipLoop, label %loop-exit, label %preheader + Builder.SetInsertPoint(BB1); + Value *SR_1 = Builder.CreateAdd(SR, One); + Value *Tmp2 = Builder.CreateSub(ThirtyOne, SR); + Value *Q = Builder.CreateShl(Dividend, Tmp2); + Value *SkipLoop = Builder.CreateICmpEQ(SR_1, Zero); + Builder.CreateCondBr(SkipLoop, LoopExit, Preheader); + + // ; preheader: ; preds = %bb1 + // ; %tmp3 = lshr i32 %dividend, %sr_1 + // ; %tmp4 = add i32 %divisor, -1 + // ; br label %do-while + Builder.SetInsertPoint(Preheader); + Value *Tmp3 = Builder.CreateLShr(Dividend, SR_1); + Value *Tmp4 = Builder.CreateAdd(Divisor, NegOne); + Builder.CreateBr(DoWhile); + + // ; do-while: ; preds = %do-while, %preheader + // ; %carry_1 = phi i32 [ 0, %preheader ], [ %carry, %do-while ] + // ; %sr_3 = phi i32 [ %sr_1, %preheader ], [ %sr_2, %do-while ] + // ; %r_1 = phi i32 [ %tmp3, %preheader ], [ %r, %do-while ] + // ; %q_2 = phi i32 [ %q, %preheader ], [ %q_1, %do-while ] + // ; %tmp5 = shl i32 %r_1, 1 + // ; %tmp6 = lshr i32 %q_2, 31 + // ; %tmp7 = or i32 %tmp5, %tmp6 + // ; %tmp8 = shl i32 %q_2, 1 + // ; %q_1 = or i32 %carry_1, %tmp8 + // ; %tmp9 = sub i32 %tmp4, %tmp7 + // ; %tmp10 = ashr i32 %tmp9, 31 + // ; %carry = and i32 %tmp10, 1 + // ; %tmp11 = and i32 %tmp10, %divisor + // ; %r = sub i32 %tmp7, %tmp11 + // ; %sr_2 = add i32 %sr_3, -1 + // ; %tmp12 = icmp eq i32 %sr_2, 0 + // ; br i1 %tmp12, label %loop-exit, label %do-while + Builder.SetInsertPoint(DoWhile); + PHINode *Carry_1 = Builder.CreatePHI(I32Ty, 2); + PHINode *SR_3 = Builder.CreatePHI(I32Ty, 2); + PHINode *R_1 = Builder.CreatePHI(I32Ty, 2); + PHINode *Q_2 = Builder.CreatePHI(I32Ty, 2); + Value *Tmp5 = Builder.CreateShl(R_1, One); + Value *Tmp6 = Builder.CreateLShr(Q_2, ThirtyOne); + Value *Tmp7 = Builder.CreateOr(Tmp5, Tmp6); + Value *Tmp8 = Builder.CreateShl(Q_2, One); + Value *Q_1 = Builder.CreateOr(Carry_1, Tmp8); + Value *Tmp9 = Builder.CreateSub(Tmp4, Tmp7); + Value *Tmp10 = Builder.CreateAShr(Tmp9, 31); + Value *Carry = Builder.CreateAnd(Tmp10, One); + Value *Tmp11 = Builder.CreateAnd(Tmp10, Divisor); + Value *R = Builder.CreateSub(Tmp7, Tmp11); + Value *SR_2 = Builder.CreateAdd(SR_3, NegOne); + Value *Tmp12 = Builder.CreateICmpEQ(SR_2, Zero); + Builder.CreateCondBr(Tmp12, LoopExit, DoWhile); + + // ; loop-exit: ; preds = %do-while, %bb1 + // ; %carry_2 = phi i32 [ 0, %bb1 ], [ %carry, %do-while ] + // ; %q_3 = phi i32 [ %q, %bb1 ], [ %q_1, %do-while ] + // ; %tmp13 = shl i32 %q_3, 1 + // ; %q_4 = or i32 %carry_2, %tmp13 + // ; br label %end + Builder.SetInsertPoint(LoopExit); + PHINode *Carry_2 = Builder.CreatePHI(I32Ty, 2); + PHINode *Q_3 = Builder.CreatePHI(I32Ty, 2); + Value *Tmp13 = Builder.CreateShl(Q_3, One); + Value *Q_4 = Builder.CreateOr(Carry_2, Tmp13); + Builder.CreateBr(End); + + // ; end: ; preds = %loop-exit, %special-cases + // ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ] + // ; ret i32 %q_5 + Builder.SetInsertPoint(End, End->begin()); + PHINode *Q_5 = Builder.CreatePHI(I32Ty, 2); + + // Populate the Phis, since all values have now been created. Our Phis were: + // ; %carry_1 = phi i32 [ 0, %preheader ], [ %carry, %do-while ] + Carry_1->addIncoming(Zero, Preheader); + Carry_1->addIncoming(Carry, DoWhile); + // ; %sr_3 = phi i32 [ %sr_1, %preheader ], [ %sr_2, %do-while ] + SR_3->addIncoming(SR_1, Preheader); + SR_3->addIncoming(SR_2, DoWhile); + // ; %r_1 = phi i32 [ %tmp3, %preheader ], [ %r, %do-while ] + R_1->addIncoming(Tmp3, Preheader); + R_1->addIncoming(R, DoWhile); + // ; %q_2 = phi i32 [ %q, %preheader ], [ %q_1, %do-while ] + Q_2->addIncoming(Q, Preheader); + Q_2->addIncoming(Q_1, DoWhile); + // ; %carry_2 = phi i32 [ 0, %bb1 ], [ %carry, %do-while ] + Carry_2->addIncoming(Zero, BB1); + Carry_2->addIncoming(Carry, DoWhile); + // ; %q_3 = phi i32 [ %q, %bb1 ], [ %q_1, %do-while ] + Q_3->addIncoming(Q, BB1); + Q_3->addIncoming(Q_1, DoWhile); + // ; %q_5 = phi i32 [ %q_4, %loop-exit ], [ %retVal, %special-cases ] + Q_5->addIncoming(Q_4, LoopExit); + Q_5->addIncoming(RetVal, SpecialCases); + + return Q_5; +} + +/// Generate code to calculate the remainder of two integers, replacing Rem with +/// the generated code. This currently generates code using the udiv expansion, +/// but future work includes generating more specialized code, e.g. when more +/// information about the operands are known. Currently only implements 32bit +/// scalar division (due to udiv's limitation), but future work is removing this +/// limitation. +/// +/// @brief Replace Rem with generated code. +bool llvm::expandRemainder(BinaryOperator *Rem) { + assert((Rem->getOpcode() == Instruction::SRem || + Rem->getOpcode() == Instruction::URem) && + "Trying to expand remainder from a non-remainder function"); + + IRBuilder<> Builder(Rem); + + // First prepare the sign if it's a signed remainder + if (Rem->getOpcode() == Instruction::SRem) { + Value *Remainder = generateSignedRemainderCode(Rem->getOperand(0), + Rem->getOperand(1), Builder); + + Rem->replaceAllUsesWith(Remainder); + Rem->dropAllReferences(); + Rem->eraseFromParent(); + + // If we didn't actually generate a udiv instruction, we're done + BinaryOperator *BO = dyn_cast(Builder.GetInsertPoint()); + if (!BO || BO->getOpcode() != Instruction::URem) + return true; + + Rem = BO; + } + + Value *Remainder = generatedUnsignedRemainderCode(Rem->getOperand(0), + Rem->getOperand(1), + Builder); + + Rem->replaceAllUsesWith(Remainder); + Rem->dropAllReferences(); + Rem->eraseFromParent(); + + // Expand the udiv + if (BinaryOperator *UDiv = dyn_cast(Builder.GetInsertPoint())) { + assert(UDiv->getOpcode() == Instruction::UDiv && "Non-udiv in expansion?"); + expandDivision(UDiv); + } + + return true; +} + + +/// Generate code to divide two integers, replacing Div with the generated +/// code. This currently generates code similarly to compiler-rt's +/// implementations, but future work includes generating more specialized code +/// when more information about the operands are known. Currently only +/// implements 32bit scalar division, but future work is removing this +/// limitation. +/// +/// @brief Replace Div with generated code. +bool llvm::expandDivision(BinaryOperator *Div) { + assert((Div->getOpcode() == Instruction::SDiv || + Div->getOpcode() == Instruction::UDiv) && + "Trying to expand division from a non-division function"); + + IRBuilder<> Builder(Div); + + if (Div->getType()->isVectorTy()) + llvm_unreachable("Div over vectors not supported"); + + // First prepare the sign if it's a signed division + if (Div->getOpcode() == Instruction::SDiv) { + // Lower the code to unsigned division, and reset Div to point to the udiv. + Value *Quotient = generateSignedDivisionCode(Div->getOperand(0), + Div->getOperand(1), Builder); + Div->replaceAllUsesWith(Quotient); + Div->dropAllReferences(); + Div->eraseFromParent(); + + // If we didn't actually generate a udiv instruction, we're done + BinaryOperator *BO = dyn_cast(Builder.GetInsertPoint()); + if (!BO || BO->getOpcode() != Instruction::UDiv) + return true; + + Div = BO; + } + + // Insert the unsigned division code + Value *Quotient = generateUnsignedDivisionCode(Div->getOperand(0), + Div->getOperand(1), + Builder); + Div->replaceAllUsesWith(Quotient); + Div->dropAllReferences(); + Div->eraseFromParent(); + + return true; +} diff --git a/lib/Transforms/Utils/LCSSA.cpp b/lib/Transforms/Utils/LCSSA.cpp index b654111..5e05c83 100644 --- a/lib/Transforms/Utils/LCSSA.cpp +++ b/lib/Transforms/Utils/LCSSA.cpp @@ -53,6 +53,8 @@ namespace { // Cached analysis information for the current function. DominatorTree *DT; + LoopInfo *LI; + ScalarEvolution *SE; std::vector LoopBlocks; PredIteratorCache PredCache; Loop *L; @@ -117,6 +119,8 @@ bool LCSSA::runOnLoop(Loop *TheLoop, LPPassManager &LPM) { L = TheLoop; DT = &getAnalysis(); + LI = &getAnalysis(); + SE = getAnalysisIfAvailable(); // Get the set of exiting blocks. SmallVector ExitBlocks; @@ -156,6 +160,12 @@ bool LCSSA::runOnLoop(Loop *TheLoop, LPPassManager &LPM) { MadeChange |= ProcessInstruction(I, ExitBlocks); } } + + // If we modified the code, remove any caches about the loop from SCEV to + // avoid dangling entries. + // FIXME: This is a big hammer, can we clear the cache more selectively? + if (SE && MadeChange) + SE->forgetLoop(L); assert(L->isLCSSAForm(*DT)); PredCache.clear(); @@ -245,7 +255,7 @@ bool LCSSA::ProcessInstruction(Instruction *Inst, // Remember that this phi makes the value alive in this block. SSAUpdate.AddAvailableValue(ExitBB, PN); } - + // Rewrite all uses outside the loop in terms of the new PHIs we just // inserted. for (unsigned i = 0, e = UsesToRewrite.size(); i != e; ++i) { @@ -260,6 +270,9 @@ bool LCSSA::ProcessInstruction(Instruction *Inst, if (isa(UserBB->begin()) && isExitBlock(UserBB, ExitBlocks)) { + // Tell the VHs that the uses changed. This updates SCEV's caches. + if (UsesToRewrite[i]->get()->hasValueHandle()) + ValueHandleBase::ValueIsRAUWd(*UsesToRewrite[i], UserBB->begin()); UsesToRewrite[i]->set(UserBB->begin()); continue; } diff --git a/lib/Transforms/Utils/Local.cpp b/lib/Transforms/Utils/Local.cpp index bed7d72..a954d82 100644 --- a/lib/Transforms/Utils/Local.cpp +++ b/lib/Transforms/Utils/Local.cpp @@ -23,6 +23,7 @@ #include "llvm/Instructions.h" #include "llvm/IntrinsicInst.h" #include "llvm/Intrinsics.h" +#include "llvm/MDBuilder.h" #include "llvm/Metadata.h" #include "llvm/Operator.h" #include "llvm/ADT/DenseMap.h" @@ -38,7 +39,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/ValueHandle.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" using namespace llvm; //===----------------------------------------------------------------------===// @@ -52,7 +53,8 @@ using namespace llvm; /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch /// conditions and indirectbr addresses this might make dead if /// DeleteDeadConditions is true. -bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { +bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, + const TargetLibraryInfo *TLI) { TerminatorInst *T = BB->getTerminator(); IRBuilder<> Builder(T); @@ -96,7 +98,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { Value *Cond = BI->getCondition(); BI->eraseFromParent(); if (DeleteDeadConditions) - RecursivelyDeleteTriviallyDeadInstructions(Cond); + RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); return true; } return false; @@ -121,6 +123,27 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { // Check to see if this branch is going to the same place as the default // dest. If so, eliminate it as an explicit compare. if (i.getCaseSuccessor() == DefaultDest) { + MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); + // MD should have 2 + NumCases operands. + if (MD && MD->getNumOperands() == 2 + SI->getNumCases()) { + // Collect branch weights into a vector. + SmallVector Weights; + for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; + ++MD_i) { + ConstantInt* CI = dyn_cast(MD->getOperand(MD_i)); + assert(CI); + Weights.push_back(CI->getValue().getZExtValue()); + } + // Merge weight of this case to the default weight. + unsigned idx = i.getCaseIndex(); + Weights[0] += Weights[idx+1]; + // Remove weight for this case. + std::swap(Weights[idx+1], Weights.back()); + Weights.pop_back(); + SI->setMetadata(LLVMContext::MD_prof, + MDBuilder(BB->getContext()). + createBranchWeights(Weights)); + } // Remove this entry. DefaultDest->removePredecessor(SI->getParent()); SI->removeCase(i); @@ -161,7 +184,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { Value *Cond = SI->getCondition(); SI->eraseFromParent(); if (DeleteDeadConditions) - RecursivelyDeleteTriviallyDeadInstructions(Cond); + RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); return true; } @@ -177,8 +200,20 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { "cond"); // Insert the new branch. - Builder.CreateCondBr(Cond, FirstCase.getCaseSuccessor(), - SI->getDefaultDest()); + BranchInst *NewBr = Builder.CreateCondBr(Cond, + FirstCase.getCaseSuccessor(), + SI->getDefaultDest()); + MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); + if (MD && MD->getNumOperands() == 3) { + ConstantInt *SICase = dyn_cast(MD->getOperand(2)); + ConstantInt *SIDef = dyn_cast(MD->getOperand(1)); + assert(SICase && SIDef); + // The TrueWeight should be the weight for the single case of SI. + NewBr->setMetadata(LLVMContext::MD_prof, + MDBuilder(BB->getContext()). + createBranchWeights(SICase->getValue().getZExtValue(), + SIDef->getValue().getZExtValue())); + } // Delete the old switch. SI->eraseFromParent(); @@ -205,7 +240,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { Value *Address = IBI->getAddress(); IBI->eraseFromParent(); if (DeleteDeadConditions) - RecursivelyDeleteTriviallyDeadInstructions(Address); + RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); // If we didn't find our destination in the IBI successor list, then we // have undefined behavior. Replace the unconditional branch with an @@ -230,7 +265,8 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { /// isInstructionTriviallyDead - Return true if the result produced by the /// instruction is not used, and the instruction has no side effects. /// -bool llvm::isInstructionTriviallyDead(Instruction *I) { +bool llvm::isInstructionTriviallyDead(Instruction *I, + const TargetLibraryInfo *TLI) { if (!I->use_empty() || isa(I)) return false; // We don't want the landingpad instruction removed by anything this general. @@ -265,9 +301,9 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) { return isa(II->getArgOperand(1)); } - if (isAllocLikeFn(I)) return true; + if (isAllocLikeFn(I, TLI)) return true; - if (CallInst *CI = isFreeCall(I)) + if (CallInst *CI = isFreeCall(I, TLI)) if (Constant *C = dyn_cast(CI->getArgOperand(0))) return C->isNullValue() || isa(C); @@ -278,9 +314,11 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) { /// trivially dead instruction, delete it. If that makes any of its operands /// trivially dead, delete them too, recursively. Return true if any /// instructions were deleted. -bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) { +bool +llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V, + const TargetLibraryInfo *TLI) { Instruction *I = dyn_cast(V); - if (!I || !I->use_empty() || !isInstructionTriviallyDead(I)) + if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI)) return false; SmallVector DeadInsts; @@ -301,7 +339,7 @@ bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) { // operand, and if it is 'trivially' dead, delete it in a future loop // iteration. if (Instruction *OpI = dyn_cast(OpV)) - if (isInstructionTriviallyDead(OpI)) + if (isInstructionTriviallyDead(OpI, TLI)) DeadInsts.push_back(OpI); } @@ -334,19 +372,20 @@ static bool areAllUsesEqual(Instruction *I) { /// either forms a cycle or is terminated by a trivially dead instruction, /// delete it. If that makes any of its operands trivially dead, delete them /// too, recursively. Return true if a change was made. -bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) { +bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, + const TargetLibraryInfo *TLI) { SmallPtrSet Visited; for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); I = cast(*I->use_begin())) { if (I->use_empty()) - return RecursivelyDeleteTriviallyDeadInstructions(I); + return RecursivelyDeleteTriviallyDeadInstructions(I, TLI); // If we find an instruction more than once, we're on a cycle that // won't prove fruitful. if (!Visited.insert(I)) { // Break the cycle and delete the instruction and its operands. I->replaceAllUsesWith(UndefValue::get(I->getType())); - (void)RecursivelyDeleteTriviallyDeadInstructions(I); + (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI); return true; } } @@ -358,7 +397,8 @@ bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) { /// /// This returns true if it changed the code, note that it can delete /// instructions in other blocks as well in this block. -bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) { +bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD, + const TargetLibraryInfo *TLI) { bool MadeChange = false; #ifndef NDEBUG @@ -381,7 +421,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) { continue; } - MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst); + MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); if (BIHandle != BI) BI = BB->begin(); } @@ -405,7 +445,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) { /// .. and delete the predecessor corresponding to the '1', this will attempt to /// recursively fold the and to 0. void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, - TargetData *TD) { + DataLayout *TD) { // This only adjusts blocks with PHI nodes. if (!isa(BB->begin())) return; @@ -720,7 +760,7 @@ bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { /// their preferred alignment from the beginning. /// static unsigned enforceKnownAlignment(Value *V, unsigned Align, - unsigned PrefAlign, const TargetData *TD) { + unsigned PrefAlign, const DataLayout *TD) { V = V->stripPointerCasts(); if (AllocaInst *AI = dyn_cast(V)) { @@ -763,7 +803,7 @@ static unsigned enforceKnownAlignment(Value *V, unsigned Align, /// and it is more than the alignment of the ultimate object, see if we can /// increase the alignment of the ultimate object, making this check succeed. unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, - const TargetData *TD) { + const DataLayout *TD) { assert(V->getType()->isPointerTy() && "getOrEnforceKnownAlignment expects a pointer!"); unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp index 0bc185d..9d9e201 100644 --- a/lib/Transforms/Utils/LoopSimplify.cpp +++ b/lib/Transforms/Utils/LoopSimplify.cpp @@ -46,6 +46,7 @@ #include "llvm/LLVMContext.h" #include "llvm/Type.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/DependenceAnalysis.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/LoopPass.h" @@ -89,6 +90,7 @@ namespace { AU.addPreserved(); AU.addPreserved(); + AU.addPreserved(); AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added. } @@ -194,6 +196,11 @@ ReprocessLoop: BI->setCondition(ConstantInt::get(Cond->getType(), !L->contains(BI->getSuccessor(0)))); + + // This may make the loop analyzable, force SCEV recomputation. + if (SE) + SE->forgetLoop(L); + Changed = true; } } diff --git a/lib/Transforms/Utils/MetaRenamer.cpp b/lib/Transforms/Utils/MetaRenamer.cpp new file mode 100644 index 0000000..233bc12 --- /dev/null +++ b/lib/Transforms/Utils/MetaRenamer.cpp @@ -0,0 +1,132 @@ +//===- MetaRenamer.cpp - Rename everything with metasyntatic names --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass renames everything with metasyntatic names. The intent is to use +// this pass after bugpoint reduction to conceal the nature of the original +// program. +// +//===----------------------------------------------------------------------===// + +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Transforms/IPO.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Module.h" +#include "llvm/Pass.h" +#include "llvm/Type.h" +#include "llvm/TypeFinder.h" + +using namespace llvm; + +namespace { + + // This PRNG is from the ISO C spec. It is intentionally simple and + // unsuitable for cryptographic use. We're just looking for enough + // variety to surprise and delight users. + struct PRNG { + unsigned long next; + + void srand(unsigned int seed) { + next = seed; + } + + int rand(void) { + next = next * 1103515245 + 12345; + return (unsigned int)(next / 65536) % 32768; + } + }; + + struct MetaRenamer : public ModulePass { + static char ID; // Pass identification, replacement for typeid + MetaRenamer() : ModulePass(ID) { + initializeMetaRenamerPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + } + + bool runOnModule(Module &M) { + static const char *metaNames[] = { + // See http://en.wikipedia.org/wiki/Metasyntactic_variable + "foo", "bar", "baz", "quux", "barney", "snork", "zot", "blam", "hoge", + "wibble", "wobble", "widget", "wombat", "ham", "eggs", "pluto", "spam" + }; + + // Seed our PRNG with simple additive sum of ModuleID. We're looking to + // simply avoid always having the same function names, and we need to + // remain deterministic. + unsigned int randSeed = 0; + for (std::string::const_iterator I = M.getModuleIdentifier().begin(), + E = M.getModuleIdentifier().end(); I != E; ++I) + randSeed += *I; + + PRNG prng; + prng.srand(randSeed); + + // Rename all aliases + for (Module::alias_iterator AI = M.alias_begin(), AE = M.alias_end(); + AI != AE; ++AI) + AI->setName("alias"); + + // Rename all global variables + for (Module::global_iterator GI = M.global_begin(), GE = M.global_end(); + GI != GE; ++GI) + GI->setName("global"); + + // Rename all struct types + TypeFinder StructTypes; + StructTypes.run(M, true); + for (unsigned i = 0, e = StructTypes.size(); i != e; ++i) { + StructType *STy = StructTypes[i]; + if (STy->isLiteral() || STy->getName().empty()) continue; + + SmallString<128> NameStorage; + STy->setName((Twine("struct.") + metaNames[prng.rand() % + array_lengthof(metaNames)]).toStringRef(NameStorage)); + } + + // Rename all functions + for (Module::iterator FI = M.begin(), FE = M.end(); + FI != FE; ++FI) { + FI->setName(metaNames[prng.rand() % array_lengthof(metaNames)]); + runOnFunction(*FI); + } + return true; + } + + bool runOnFunction(Function &F) { + for (Function::arg_iterator AI = F.arg_begin(), AE = F.arg_end(); + AI != AE; ++AI) + if (!AI->getType()->isVoidTy()) + AI->setName("arg"); + + for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { + BB->setName("bb"); + + for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) + if (!I->getType()->isVoidTy()) + I->setName("tmp"); + } + return true; + } + }; +} + +char MetaRenamer::ID = 0; +INITIALIZE_PASS(MetaRenamer, "metarenamer", + "Assign new names to everything", false, false) +//===----------------------------------------------------------------------===// +// +// MetaRenamer - Rename everything with metasyntactic names. +// +ModulePass *llvm::createMetaRenamerPass() { + return new MetaRenamer(); +} diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp index dd5e20e..558de9d 100644 --- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -212,9 +212,13 @@ namespace { /// DenseMap AllocaLookup; - /// NewPhiNodes - The PhiNodes we're adding. + /// NewPhiNodes - The PhiNodes we're adding. That map is used to simplify + /// some Phi nodes as we iterate over it, so it should have deterministic + /// iterators. We could use a MapVector, but since we already maintain a + /// map from BasicBlock* to a stable numbering (BBNumbers), the DenseMap is + /// more efficient (also supports removal). /// - DenseMap, PHINode*> NewPhiNodes; + DenseMap, PHINode*> NewPhiNodes; /// PhiToAllocaMap - For each PHI node, keep track of which entry in Allocas /// it corresponds to. @@ -588,7 +592,11 @@ void PromoteMem2Reg::run() { while (EliminatedAPHI) { EliminatedAPHI = false; - for (DenseMap, PHINode*>::iterator I = + // Iterating over NewPhiNodes is deterministic, so it is safe to try to + // simplify and RAUW them as we go. If it was not, we could add uses to + // the values we replace with in a non deterministic order, thus creating + // non deterministic def->use chains. + for (DenseMap, PHINode*>::iterator I = NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E;) { PHINode *PN = I->second; @@ -612,7 +620,7 @@ void PromoteMem2Reg::run() { // have incoming values for all predecessors. Loop over all PHI nodes we have // created, inserting undef values if they are missing any incoming values. // - for (DenseMap, PHINode*>::iterator I = + for (DenseMap, PHINode*>::iterator I = NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E; ++I) { // We want to do this once per basic block. As such, only process a block // when we find the PHI that is the first entry in the block. @@ -992,7 +1000,7 @@ void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info, bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, unsigned &Version) { // Look up the basic-block in question. - PHINode *&PN = NewPhiNodes[std::make_pair(BB, AllocaNo)]; + PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)]; // If the BB already has a phi node added for the i'th alloca then we're done! if (PN) return false; diff --git a/lib/Transforms/Utils/SSAUpdater.cpp b/lib/Transforms/Utils/SSAUpdater.cpp index e568a61..72d4199 100644 --- a/lib/Transforms/Utils/SSAUpdater.cpp +++ b/lib/Transforms/Utils/SSAUpdater.cpp @@ -39,7 +39,7 @@ SSAUpdater::SSAUpdater(SmallVectorImpl *NewPHI) : AV(0), ProtoType(0), ProtoName(), InsertedPHIs(NewPHI) {} SSAUpdater::~SSAUpdater() { - delete &getAvailableVals(AV); + delete static_cast(AV); } /// Initialize - Reset this object to get ready for a new set of SSA diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index 518df7c..c767da6 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -14,6 +14,7 @@ #define DEBUG_TYPE "simplifycfg" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Constants.h" +#include "llvm/DataLayout.h" #include "llvm/DerivedTypes.h" #include "llvm/GlobalVariable.h" #include "llvm/IRBuilder.h" @@ -22,6 +23,7 @@ #include "llvm/LLVMContext.h" #include "llvm/MDBuilder.h" #include "llvm/Metadata.h" +#include "llvm/Module.h" #include "llvm/Operator.h" #include "llvm/Type.h" #include "llvm/ADT/DenseMap.h" @@ -38,7 +40,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/NoFolder.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/Target/TargetData.h" +#include "llvm/TargetTransformInfo.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" #include #include @@ -53,6 +55,13 @@ static cl::opt DupRet("simplifycfg-dup-ret", cl::Hidden, cl::init(false), cl::desc("Duplicate return instructions into unconditional branches")); +static cl::opt +SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true), + cl::desc("Sink common instructions down to the end block")); + +STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); +STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables"); +STATISTIC(NumSinkCommons, "Number of common instructions sunk down to the end block"); STATISTIC(NumSpeculations, "Number of speculative executed instructions"); namespace { @@ -68,10 +77,13 @@ namespace { // Comparing pointers is ok as we only rely on the order for uniquing. return Value < RHS.Value; } + + bool operator==(BasicBlock *RHSDest) const { return Dest == RHSDest; } }; class SimplifyCFGOpt { - const TargetData *const TD; + const DataLayout *const TD; + const TargetTransformInfo *const TTI; Value *isValueEqualityComparison(TerminatorInst *TI); BasicBlock *GetValueEqualityComparisonCases(TerminatorInst *TI, @@ -91,7 +103,8 @@ class SimplifyCFGOpt { bool SimplifyCondBranch(BranchInst *BI, IRBuilder <>&Builder); public: - explicit SimplifyCFGOpt(const TargetData *td) : TD(td) {} + SimplifyCFGOpt(const DataLayout *td, const TargetTransformInfo *tti) + : TD(td), TTI(tti) {} bool run(BasicBlock *BB); }; } @@ -101,14 +114,14 @@ public: /// static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) { if (SI1 == SI2) return false; // Can't merge with self! - + // It is not safe to merge these two switch instructions if they have a common // successor, and if that successor has a PHI node, and if *that* PHI node has // conflicting incoming values from the two switch blocks. BasicBlock *SI1BB = SI1->getParent(); BasicBlock *SI2BB = SI2->getParent(); SmallPtrSet SI1Succs(succ_begin(SI1BB), succ_end(SI1BB)); - + for (succ_iterator I = succ_begin(SI2BB), E = succ_end(SI2BB); I != E; ++I) if (SI1Succs.count(*I)) for (BasicBlock::iterator BBI = (*I)->begin(); @@ -118,7 +131,7 @@ static bool SafeToMergeTerminators(TerminatorInst *SI1, TerminatorInst *SI2) { PN->getIncomingValueForBlock(SI2BB)) return false; } - + return true; } @@ -135,7 +148,7 @@ static bool isProfitableToFoldUnconditional(BranchInst *SI1, assert(SI1->isUnconditional() && SI2->isConditional()); // We fold the unconditional branch if we can easily update all PHI nodes in - // common successors: + // common successors: // 1> We have a constant incoming value for the conditional branch; // 2> We have "Cond" as the incoming value for the unconditional branch; // 3> SI2->getCondition() and Cond have same operands. @@ -170,7 +183,7 @@ static bool isProfitableToFoldUnconditional(BranchInst *SI1, static void AddPredecessorToBlock(BasicBlock *Succ, BasicBlock *NewPred, BasicBlock *ExistPred) { if (!isa(Succ->begin())) return; // Quick exit if nothing to do - + PHINode *PN; for (BasicBlock::iterator I = Succ->begin(); (PN = dyn_cast(I)); ++I) @@ -222,7 +235,7 @@ static Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue, // doesn't dominate BB. if (Pred2->getSinglePredecessor() == 0) return 0; - + // If we found a conditional branch predecessor, make sure that it branches // to BB and Pred2Br. If it doesn't, this isn't an "if statement". if (Pred1Br->getSuccessor(0) == BB && @@ -252,7 +265,7 @@ static Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue, // Otherwise, if this is a conditional branch, then we can use it! BranchInst *BI = dyn_cast(CommonPred->getTerminator()); if (BI == 0) return 0; - + assert(BI->isConditional() && "Two successors but not conditional?"); if (BI->getSuccessor(0) == Pred1) { IfTrue = Pred1; @@ -345,7 +358,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB, // If we aren't allowing aggressive promotion anymore, then don't consider // instructions in the 'if region'. if (AggressiveInsts == 0) return false; - + // If we have seen this instruction before, don't count it again. if (AggressiveInsts->count(I)) return true; @@ -374,7 +387,7 @@ static bool DominatesMergePoint(Value *V, BasicBlock *BB, /// GetConstantInt - Extract ConstantInt from value, looking through IntToPtr /// and PointerNullValue. Return NULL if value is not a constant int. -static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) { +static ConstantInt *GetConstantInt(Value *V, const DataLayout *TD) { // Normal constant int. ConstantInt *CI = dyn_cast(V); if (CI || !TD || !isa(V) || !V->getType()->isPointerTy()) @@ -382,7 +395,7 @@ static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) { // This is some kind of pointer constant. Turn it into a pointer-sized // ConstantInt if possible. - IntegerType *PtrTy = TD->getIntPtrType(V->getContext()); + IntegerType *PtrTy = cast(TD->getIntPtrType(V->getType())); // Null pointer means 0, see SelectionDAGBuilder::getValue(const Value*). if (isa(V)) @@ -408,10 +421,10 @@ static ConstantInt *GetConstantInt(Value *V, const TargetData *TD) { /// Values vector. static Value * GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra, - const TargetData *TD, bool isEQ, unsigned &UsedICmps) { + const DataLayout *TD, bool isEQ, unsigned &UsedICmps) { Instruction *I = dyn_cast(V); if (I == 0) return 0; - + // If this is an icmp against a constant, handle this as one of the cases. if (ICmpInst *ICI = dyn_cast(I)) { if (ConstantInt *C = GetConstantInt(I->getOperand(1), TD)) { @@ -420,21 +433,21 @@ GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra, Vals.push_back(C); return I->getOperand(0); } - + // If we have "x ult 3" comparison, for example, then we can add 0,1,2 to // the set. ConstantRange Span = ConstantRange::makeICmpRegion(ICI->getPredicate(), C->getValue()); - + // If this is an and/!= check then we want to optimize "x ugt 2" into // x != 0 && x != 1. if (!isEQ) Span = Span.inverse(); - + // If there are a ton of values, we don't want to make a ginormous switch. if (Span.getSetSize().ugt(8) || Span.isEmptySet()) return 0; - + for (APInt Tmp = Span.getLower(); Tmp != Span.getUpper(); ++Tmp) Vals.push_back(ConstantInt::get(V->getContext(), Tmp)); UsedICmps++; @@ -442,11 +455,11 @@ GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra, } return 0; } - + // Otherwise, we can only handle an | or &, depending on isEQ. if (I->getOpcode() != (isEQ ? Instruction::Or : Instruction::And)) return 0; - + unsigned NumValsBeforeLHS = Vals.size(); unsigned UsedICmpsBeforeLHS = UsedICmps; if (Value *LHS = GatherConstantCompares(I->getOperand(0), Vals, Extra, TD, @@ -467,12 +480,12 @@ GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra, Extra = I->getOperand(1); return LHS; } - + Vals.resize(NumValsBeforeLHS); UsedICmps = UsedICmpsBeforeLHS; return 0; } - + // If the LHS can't be folded in, but Extra is available and RHS can, try to // use LHS as Extra. if (Extra == 0 || Extra == I->getOperand(0)) { @@ -484,7 +497,7 @@ GatherConstantCompares(Value *V, std::vector &Vals, Value *&Extra, assert(Vals.size() == NumValsBeforeLHS); Extra = OldExtra; } - + return 0; } @@ -556,11 +569,7 @@ GetValueEqualityComparisonCases(TerminatorInst *TI, /// in the list that match the specified block. static void EliminateBlockCases(BasicBlock *BB, std::vector &Cases) { - for (unsigned i = 0, e = Cases.size(); i != e; ++i) - if (Cases[i].Dest == BB) { - Cases.erase(Cases.begin()+i); - --i; --e; - } + Cases.erase(std::remove(Cases.begin(), Cases.end(), BB), Cases.end()); } /// ValuesOverlap - Return true if there are any keys in C1 that exist in C2 as @@ -615,6 +624,9 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, assert(ThisVal && "This isn't a value comparison!!"); if (ThisVal != PredVal) return false; // Different predicates. + // TODO: Preserve branch weight metadata, similarly to how + // FoldValueComparisonIntoPredecessors preserves it. + // Find out information about when control will move from Pred to TI's block. std::vector PredCases; BasicBlock *PredDef = GetValueEqualityComparisonCases(Pred->getTerminator(), @@ -634,7 +646,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, // can simplify TI. if (!ValuesOverlap(PredCases, ThisCases)) return false; - + if (isa(TI)) { // Okay, one of the successors of this condbr is dead. Convert it to a // uncond br. @@ -652,7 +664,7 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, EraseTerminatorInstAndDCECond(TI); return true; } - + SwitchInst *SI = cast(TI); // Okay, TI has cases that are statically dead, prune them away. SmallPtrSet DeadCases; @@ -662,18 +674,37 @@ SimplifyEqualityComparisonWithOnlyPredecessor(TerminatorInst *TI, DEBUG(dbgs() << "Threading pred instr: " << *Pred->getTerminator() << "Through successor TI: " << *TI); + // Collect branch weights into a vector. + SmallVector Weights; + MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); + bool HasWeight = MD && (MD->getNumOperands() == 2 + SI->getNumCases()); + if (HasWeight) + for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; + ++MD_i) { + ConstantInt* CI = dyn_cast(MD->getOperand(MD_i)); + assert(CI); + Weights.push_back(CI->getValue().getZExtValue()); + } for (SwitchInst::CaseIt i = SI->case_end(), e = SI->case_begin(); i != e;) { --i; if (DeadCases.count(i.getCaseValue())) { + if (HasWeight) { + std::swap(Weights[i.getCaseIndex()+1], Weights.back()); + Weights.pop_back(); + } i.getCaseSuccessor()->removePredecessor(TI->getParent()); SI->removeCase(i); } } + if (HasWeight && Weights.size() >= 2) + SI->setMetadata(LLVMContext::MD_prof, + MDBuilder(SI->getParent()->getContext()). + createBranchWeights(Weights)); DEBUG(dbgs() << "Leaving: " << *TI << "\n"); return true; } - + // Otherwise, TI's block must correspond to some matched value. Find out // which value (or set of values) this is. ConstantInt *TIV = 0; @@ -729,8 +760,8 @@ namespace { } static int ConstantIntSortPredicate(const void *P1, const void *P2) { - const ConstantInt *LHS = *(const ConstantInt**)P1; - const ConstantInt *RHS = *(const ConstantInt**)P2; + const ConstantInt *LHS = *(const ConstantInt*const*)P1; + const ConstantInt *RHS = *(const ConstantInt*const*)P2; if (LHS->getValue().ult(RHS->getValue())) return 1; if (LHS->getValue() == RHS->getValue()) @@ -738,6 +769,56 @@ static int ConstantIntSortPredicate(const void *P1, const void *P2) { return -1; } +static inline bool HasBranchWeights(const Instruction* I) { + MDNode* ProfMD = I->getMetadata(LLVMContext::MD_prof); + if (ProfMD && ProfMD->getOperand(0)) + if (MDString* MDS = dyn_cast(ProfMD->getOperand(0))) + return MDS->getString().equals("branch_weights"); + + return false; +} + +/// Get Weights of a given TerminatorInst, the default weight is at the front +/// of the vector. If TI is a conditional eq, we need to swap the branch-weight +/// metadata. +static void GetBranchWeights(TerminatorInst *TI, + SmallVectorImpl &Weights) { + MDNode* MD = TI->getMetadata(LLVMContext::MD_prof); + assert(MD); + for (unsigned i = 1, e = MD->getNumOperands(); i < e; ++i) { + ConstantInt* CI = dyn_cast(MD->getOperand(i)); + assert(CI); + Weights.push_back(CI->getValue().getZExtValue()); + } + + // If TI is a conditional eq, the default case is the false case, + // and the corresponding branch-weight data is at index 2. We swap the + // default weight to be the first entry. + if (BranchInst* BI = dyn_cast(TI)) { + assert(Weights.size() == 2); + ICmpInst *ICI = cast(BI->getCondition()); + if (ICI->getPredicate() == ICmpInst::ICMP_EQ) + std::swap(Weights.front(), Weights.back()); + } +} + +/// Sees if any of the weights are too big for a uint32_t, and halves all the +/// weights if any are. +static void FitWeights(MutableArrayRef Weights) { + bool Halve = false; + for (unsigned i = 0; i < Weights.size(); ++i) + if (Weights[i] > UINT_MAX) { + Halve = true; + break; + } + + if (! Halve) + return; + + for (unsigned i = 0; i < Weights.size(); ++i) + Weights[i] /= 2; +} + /// FoldValueComparisonIntoPredecessors - The specified terminator is a value /// equality comparison instruction (either a switch or a branch on "X == c"). /// See if any of the predecessors of the terminator block are value comparisons @@ -770,6 +851,31 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, // build. SmallVector NewSuccessors; + // Update the branch weight metadata along the way + SmallVector Weights; + bool PredHasWeights = HasBranchWeights(PTI); + bool SuccHasWeights = HasBranchWeights(TI); + + if (PredHasWeights) { + GetBranchWeights(PTI, Weights); + // branch-weight metadata is inconsistant here. + if (Weights.size() != 1 + PredCases.size()) + PredHasWeights = SuccHasWeights = false; + } else if (SuccHasWeights) + // If there are no predecessor weights but there are successor weights, + // populate Weights with 1, which will later be scaled to the sum of + // successor's weights + Weights.assign(1 + PredCases.size(), 1); + + SmallVector SuccWeights; + if (SuccHasWeights) { + GetBranchWeights(TI, SuccWeights); + // branch-weight metadata is inconsistant here. + if (SuccWeights.size() != 1 + BBCases.size()) + PredHasWeights = SuccHasWeights = false; + } else if (PredHasWeights) + SuccWeights.assign(1 + BBCases.size(), 1); + if (PredDefault == BB) { // If this is the default destination from PTI, only the edges in TI // that don't occur in PTI, or that branch to BB will be activated. @@ -780,6 +886,14 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, else { // The default destination is BB, we don't need explicit targets. std::swap(PredCases[i], PredCases.back()); + + if (PredHasWeights || SuccHasWeights) { + // Increase weight for the default case. + Weights[0] += Weights[i+1]; + std::swap(Weights[i+1], Weights.back()); + Weights.pop_back(); + } + PredCases.pop_back(); --i; --e; } @@ -790,21 +904,47 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, PredDefault = BBDefault; NewSuccessors.push_back(BBDefault); } + + unsigned CasesFromPred = Weights.size(); + uint64_t ValidTotalSuccWeight = 0; for (unsigned i = 0, e = BBCases.size(); i != e; ++i) if (!PTIHandled.count(BBCases[i].Value) && BBCases[i].Dest != BBDefault) { PredCases.push_back(BBCases[i]); NewSuccessors.push_back(BBCases[i].Dest); + if (SuccHasWeights || PredHasWeights) { + // The default weight is at index 0, so weight for the ith case + // should be at index i+1. Scale the cases from successor by + // PredDefaultWeight (Weights[0]). + Weights.push_back(Weights[0] * SuccWeights[i+1]); + ValidTotalSuccWeight += SuccWeights[i+1]; + } } + if (SuccHasWeights || PredHasWeights) { + ValidTotalSuccWeight += SuccWeights[0]; + // Scale the cases from predecessor by ValidTotalSuccWeight. + for (unsigned i = 1; i < CasesFromPred; ++i) + Weights[i] *= ValidTotalSuccWeight; + // Scale the default weight by SuccDefaultWeight (SuccWeights[0]). + Weights[0] *= SuccWeights[0]; + } } else { // If this is not the default destination from PSI, only the edges // in SI that occur in PSI with a destination of BB will be // activated. std::set PTIHandled; + std::map WeightsForHandled; for (unsigned i = 0, e = PredCases.size(); i != e; ++i) if (PredCases[i].Dest == BB) { PTIHandled.insert(PredCases[i].Value); + + if (PredHasWeights || SuccHasWeights) { + WeightsForHandled[PredCases[i].Value] = Weights[i+1]; + std::swap(Weights[i+1], Weights.back()); + Weights.pop_back(); + } + std::swap(PredCases[i], PredCases.back()); PredCases.pop_back(); --i; --e; @@ -815,6 +955,8 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, for (unsigned i = 0, e = BBCases.size(); i != e; ++i) if (PTIHandled.count(BBCases[i].Value)) { // If this is one we are capable of getting... + if (PredHasWeights || SuccHasWeights) + Weights.push_back(WeightsForHandled[BBCases[i].Value]); PredCases.push_back(BBCases[i]); NewSuccessors.push_back(BBCases[i].Dest); PTIHandled.erase(BBCases[i].Value);// This constant is taken care of @@ -822,9 +964,11 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, // If there are any constants vectored to BB that TI doesn't handle, // they must go to the default destination of TI. - for (std::set::iterator I = + for (std::set::iterator I = PTIHandled.begin(), E = PTIHandled.end(); I != E; ++I) { + if (PredHasWeights || SuccHasWeights) + Weights.push_back(WeightsForHandled[*I]); PredCases.push_back(ValueEqualityComparisonCase(*I, BBDefault)); NewSuccessors.push_back(BBDefault); } @@ -839,7 +983,7 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, Builder.SetInsertPoint(PTI); // Convert pointer to int before we switch. if (CV->getType()->isPointerTy()) { - assert(TD && "Cannot switch on pointer without TargetData"); + assert(TD && "Cannot switch on pointer without DataLayout"); CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()), "magicptr"); } @@ -851,6 +995,17 @@ bool SimplifyCFGOpt::FoldValueComparisonIntoPredecessors(TerminatorInst *TI, for (unsigned i = 0, e = PredCases.size(); i != e; ++i) NewSI->addCase(PredCases[i].Value, PredCases[i].Dest); + if (PredHasWeights || SuccHasWeights) { + // Halve the weights if any of them cannot fit in an uint32_t + FitWeights(Weights); + + SmallVector MDWeights(Weights.begin(), Weights.end()); + + NewSI->setMetadata(LLVMContext::MD_prof, + MDBuilder(BB->getContext()). + createBranchWeights(MDWeights)); + } + EraseTerminatorInstAndDCECond(PTI); // Okay, last check. If BB is still a successor of PSI, then we must @@ -984,11 +1139,11 @@ HoistTerminator: Value *BB1V = PN->getIncomingValueForBlock(BB1); Value *BB2V = PN->getIncomingValueForBlock(BB2); if (BB1V == BB2V) continue; - + // These values do not agree. Insert a select instruction before NT // that determines the right value. SelectInst *&SI = InsertedSelects[std::make_pair(BB1V, BB2V)]; - if (SI == 0) + if (SI == 0) SI = cast (Builder.CreateSelect(BI->getCondition(), BB1V, BB2V, BB1V->getName()+"."+BB2V->getName())); @@ -1008,6 +1163,175 @@ HoistTerminator: return true; } +/// SinkThenElseCodeToEnd - Given an unconditional branch that goes to BBEnd, +/// check whether BBEnd has only two predecessors and the other predecessor +/// ends with an unconditional branch. If it is true, sink any common code +/// in the two predecessors to BBEnd. +static bool SinkThenElseCodeToEnd(BranchInst *BI1) { + assert(BI1->isUnconditional()); + BasicBlock *BB1 = BI1->getParent(); + BasicBlock *BBEnd = BI1->getSuccessor(0); + + // Check that BBEnd has two predecessors and the other predecessor ends with + // an unconditional branch. + pred_iterator PI = pred_begin(BBEnd), PE = pred_end(BBEnd); + BasicBlock *Pred0 = *PI++; + if (PI == PE) // Only one predecessor. + return false; + BasicBlock *Pred1 = *PI++; + if (PI != PE) // More than two predecessors. + return false; + BasicBlock *BB2 = (Pred0 == BB1) ? Pred1 : Pred0; + BranchInst *BI2 = dyn_cast(BB2->getTerminator()); + if (!BI2 || !BI2->isUnconditional()) + return false; + + // Gather the PHI nodes in BBEnd. + std::map > MapValueFromBB1ToBB2; + Instruction *FirstNonPhiInBBEnd = 0; + for (BasicBlock::iterator I = BBEnd->begin(), E = BBEnd->end(); + I != E; ++I) { + if (PHINode *PN = dyn_cast(I)) { + Value *BB1V = PN->getIncomingValueForBlock(BB1); + Value *BB2V = PN->getIncomingValueForBlock(BB2); + MapValueFromBB1ToBB2[BB1V] = std::make_pair(BB2V, PN); + } else { + FirstNonPhiInBBEnd = &*I; + break; + } + } + if (!FirstNonPhiInBBEnd) + return false; + + + // This does very trivial matching, with limited scanning, to find identical + // instructions in the two blocks. We scan backward for obviously identical + // instructions in an identical order. + BasicBlock::InstListType::reverse_iterator RI1 = BB1->getInstList().rbegin(), + RE1 = BB1->getInstList().rend(), RI2 = BB2->getInstList().rbegin(), + RE2 = BB2->getInstList().rend(); + // Skip debug info. + while (RI1 != RE1 && isa(&*RI1)) ++RI1; + if (RI1 == RE1) + return false; + while (RI2 != RE2 && isa(&*RI2)) ++RI2; + if (RI2 == RE2) + return false; + // Skip the unconditional branches. + ++RI1; + ++RI2; + + bool Changed = false; + while (RI1 != RE1 && RI2 != RE2) { + // Skip debug info. + while (RI1 != RE1 && isa(&*RI1)) ++RI1; + if (RI1 == RE1) + return Changed; + while (RI2 != RE2 && isa(&*RI2)) ++RI2; + if (RI2 == RE2) + return Changed; + + Instruction *I1 = &*RI1, *I2 = &*RI2; + // I1 and I2 should have a single use in the same PHI node, and they + // perform the same operation. + // Cannot move control-flow-involving, volatile loads, vaarg, etc. + if (isa(I1) || isa(I2) || + isa(I1) || isa(I2) || + isa(I1) || isa(I2) || + isa(I1) || isa(I2) || + I1->mayHaveSideEffects() || I2->mayHaveSideEffects() || + I1->mayReadOrWriteMemory() || I2->mayReadOrWriteMemory() || + !I1->hasOneUse() || !I2->hasOneUse() || + MapValueFromBB1ToBB2.find(I1) == MapValueFromBB1ToBB2.end() || + MapValueFromBB1ToBB2[I1].first != I2) + return Changed; + + // Check whether we should swap the operands of ICmpInst. + ICmpInst *ICmp1 = dyn_cast(I1), *ICmp2 = dyn_cast(I2); + bool SwapOpnds = false; + if (ICmp1 && ICmp2 && + ICmp1->getOperand(0) != ICmp2->getOperand(0) && + ICmp1->getOperand(1) != ICmp2->getOperand(1) && + (ICmp1->getOperand(0) == ICmp2->getOperand(1) || + ICmp1->getOperand(1) == ICmp2->getOperand(0))) { + ICmp2->swapOperands(); + SwapOpnds = true; + } + if (!I1->isSameOperationAs(I2)) { + if (SwapOpnds) + ICmp2->swapOperands(); + return Changed; + } + + // The operands should be either the same or they need to be generated + // with a PHI node after sinking. We only handle the case where there is + // a single pair of different operands. + Value *DifferentOp1 = 0, *DifferentOp2 = 0; + unsigned Op1Idx = 0; + for (unsigned I = 0, E = I1->getNumOperands(); I != E; ++I) { + if (I1->getOperand(I) == I2->getOperand(I)) + continue; + // Early exit if we have more-than one pair of different operands or + // the different operand is already in MapValueFromBB1ToBB2. + // Early exit if we need a PHI node to replace a constant. + if (DifferentOp1 || + MapValueFromBB1ToBB2.find(I1->getOperand(I)) != + MapValueFromBB1ToBB2.end() || + isa(I1->getOperand(I)) || + isa(I2->getOperand(I))) { + // If we can't sink the instructions, undo the swapping. + if (SwapOpnds) + ICmp2->swapOperands(); + return Changed; + } + DifferentOp1 = I1->getOperand(I); + Op1Idx = I; + DifferentOp2 = I2->getOperand(I); + } + + // We insert the pair of different operands to MapValueFromBB1ToBB2 and + // remove (I1, I2) from MapValueFromBB1ToBB2. + if (DifferentOp1) { + PHINode *NewPN = PHINode::Create(DifferentOp1->getType(), 2, + DifferentOp1->getName() + ".sink", + BBEnd->begin()); + MapValueFromBB1ToBB2[DifferentOp1] = std::make_pair(DifferentOp2, NewPN); + // I1 should use NewPN instead of DifferentOp1. + I1->setOperand(Op1Idx, NewPN); + NewPN->addIncoming(DifferentOp1, BB1); + NewPN->addIncoming(DifferentOp2, BB2); + DEBUG(dbgs() << "Create PHI node " << *NewPN << "\n";); + } + PHINode *OldPN = MapValueFromBB1ToBB2[I1].second; + MapValueFromBB1ToBB2.erase(I1); + + DEBUG(dbgs() << "SINK common instructions " << *I1 << "\n";); + DEBUG(dbgs() << " " << *I2 << "\n";); + // We need to update RE1 and RE2 if we are going to sink the first + // instruction in the basic block down. + bool UpdateRE1 = (I1 == BB1->begin()), UpdateRE2 = (I2 == BB2->begin()); + // Sink the instruction. + BBEnd->getInstList().splice(FirstNonPhiInBBEnd, BB1->getInstList(), I1); + if (!OldPN->use_empty()) + OldPN->replaceAllUsesWith(I1); + OldPN->eraseFromParent(); + + if (!I2->use_empty()) + I2->replaceAllUsesWith(I1); + I1->intersectOptionalDataWith(I2); + I2->eraseFromParent(); + + if (UpdateRE1) + RE1 = BB1->getInstList().rend(); + if (UpdateRE2) + RE2 = BB2->getInstList().rend(); + FirstNonPhiInBBEnd = I1; + NumSinkCommons++; + Changed = true; + } + return Changed; +} + /// SpeculativelyExecuteBB - Given a conditional branch that goes to BB1 /// and an BB2 and the only successor of BB1 is BB2, hoist simple code /// (for now, restricted to a single instruction that's side effect free) from @@ -1056,7 +1380,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) { // Do not hoist the instruction if any of its operands are defined but not // used in this BB. The transformation will prevent the operand from // being sunk into the use block. - for (User::op_iterator i = HInst->op_begin(), e = HInst->op_end(); + for (User::op_iterator i = HInst->op_begin(), e = HInst->op_end(); i != e; ++i) { Instruction *OpI = dyn_cast(*i); if (OpI && OpI->getParent() == BIParent && @@ -1112,7 +1436,7 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) { // as well. if (PHIs.empty()) return false; - + // If we get here, we can hoist the instruction and if-convert. DEBUG(dbgs() << "SPECULATIVELY EXECUTING BB" << *BB1 << "\n";); @@ -1162,13 +1486,13 @@ static bool SpeculativelyExecuteBB(BranchInst *BI, BasicBlock *BB1) { static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { BranchInst *BI = cast(BB->getTerminator()); unsigned Size = 0; - + for (BasicBlock::iterator BBI = BB->begin(); &*BBI != BI; ++BBI) { if (isa(BBI)) continue; if (Size > 10) return false; // Don't clone large BB's. ++Size; - + // We can only support instructions that do not define values that are // live outside of the current basic block. for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end(); @@ -1176,7 +1500,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { Instruction *U = cast(*UI); if (U->getParent() != BB || isa(U)) return false; } - + // Looks ok, continue checking. } @@ -1187,38 +1511,38 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) { /// that is defined in the same block as the branch and if any PHI entries are /// constants, thread edges corresponding to that entry to be branches to their /// ultimate destination. -static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) { +static bool FoldCondBranchOnPHI(BranchInst *BI, const DataLayout *TD) { BasicBlock *BB = BI->getParent(); PHINode *PN = dyn_cast(BI->getCondition()); // NOTE: we currently cannot transform this case if the PHI node is used // outside of the block. if (!PN || PN->getParent() != BB || !PN->hasOneUse()) return false; - + // Degenerate case of a single entry PHI. if (PN->getNumIncomingValues() == 1) { FoldSingleEntryPHINodes(PN->getParent()); - return true; + return true; } // Now we know that this block has multiple preds and two succs. if (!BlockIsSimpleEnoughToThreadThrough(BB)) return false; - + // Okay, this is a simple enough basic block. See if any phi values are // constants. for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { ConstantInt *CB = dyn_cast(PN->getIncomingValue(i)); if (CB == 0 || !CB->getType()->isIntegerTy(1)) continue; - + // Okay, we now know that all edges from PredBB should be revectored to // branch to RealDest. BasicBlock *PredBB = PN->getIncomingBlock(i); BasicBlock *RealDest = BI->getSuccessor(!CB->getZExtValue()); - + if (RealDest == BB) continue; // Skip self loops. // Skip if the predecessor's terminator is an indirect branch. if (isa(PredBB->getTerminator())) continue; - + // The dest block might have PHI nodes, other predecessors and other // difficult cases. Instead of being smart about this, just insert a new // block that jumps to the destination block, effectively splitting @@ -1227,7 +1551,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) { RealDest->getName()+".critedge", RealDest->getParent(), RealDest); BranchInst::Create(RealDest, EdgeBB); - + // Update PHI nodes. AddPredecessorToBlock(RealDest, EdgeBB, BB); @@ -1244,7 +1568,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) { // Clone the instruction. Instruction *N = BBI->clone(); if (BBI->hasName()) N->setName(BBI->getName()+".c"); - + // Update operands due to translation. for (User::op_iterator i = N->op_begin(), e = N->op_end(); i != e; ++i) { @@ -1252,7 +1576,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) { if (PI != TranslateMap.end()) *i = PI->second; } - + // Check for trivial simplification. if (Value *V = SimplifyInstruction(N, TD)) { TranslateMap[BBI] = V; @@ -1283,7 +1607,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI, const TargetData *TD) { /// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry /// PHI node, see if we can eliminate it. -static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { +static bool FoldTwoEntryPHINode(PHINode *PN, const DataLayout *TD) { // Ok, this is a two entry PHI node. Check to see if this is a simple "if // statement", which has a very simple dominance structure. Basically, we // are trying to find the condition that is being branched on, which @@ -1297,7 +1621,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { // Don't bother if the branch will be constant folded trivially. isa(IfCond)) return false; - + // Okay, we found that we can merge this two-entry phi node into a select. // Doing so would require us to fold *all* two entry phi nodes in this block. // At some point this becomes non-profitable (particularly if the target @@ -1307,14 +1631,14 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { for (BasicBlock::iterator I = BB->begin(); isa(I); ++NumPhis, ++I) if (NumPhis > 2) return false; - + // Loop over the PHI's seeing if we can promote them all to select // instructions. While we are at it, keep track of the instructions // that need to be moved to the dominating block. SmallPtrSet AggressiveInsts; unsigned MaxCostVal0 = PHINodeFoldingThreshold, MaxCostVal1 = PHINodeFoldingThreshold; - + for (BasicBlock::iterator II = BB->begin(); isa(II);) { PHINode *PN = cast(II++); if (Value *V = SimplifyInstruction(PN, TD)) { @@ -1322,19 +1646,19 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { PN->eraseFromParent(); continue; } - + if (!DominatesMergePoint(PN->getIncomingValue(0), BB, &AggressiveInsts, MaxCostVal0) || !DominatesMergePoint(PN->getIncomingValue(1), BB, &AggressiveInsts, MaxCostVal1)) return false; } - + // If we folded the first phi, PN dangles at this point. Refresh it. If // we ran out of PHIs then we simplified them all. PN = dyn_cast(BB->begin()); if (PN == 0) return true; - + // Don't fold i1 branches on PHIs which contain binary operators. These can // often be turned into switches and other things. if (PN->getType()->isIntegerTy(1) && @@ -1342,7 +1666,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { isa(PN->getIncomingValue(1)) || isa(IfCond))) return false; - + // If we all PHI nodes are promotable, check to make sure that all // instructions in the predecessor blocks can be promoted as well. If // not, we won't be able to get rid of the control flow, so it's not @@ -1362,7 +1686,7 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { return false; } } - + if (cast(IfBlock2->getTerminator())->isConditional()) { IfBlock2 = 0; } else { @@ -1375,15 +1699,15 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { return false; } } - + DEBUG(dbgs() << "FOUND IF CONDITION! " << *IfCond << " T: " << IfTrue->getName() << " F: " << IfFalse->getName() << "\n"); - + // If we can still promote the PHI nodes after this gauntlet of tests, // do all of the PHI's now. Instruction *InsertPt = DomBlock->getTerminator(); IRBuilder Builder(InsertPt); - + // Move all 'aggressive' instructions, which are defined in the // conditional parts of the if's up to the dominating block. if (IfBlock1) @@ -1394,19 +1718,19 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { DomBlock->getInstList().splice(InsertPt, IfBlock2->getInstList(), IfBlock2->begin(), IfBlock2->getTerminator()); - + while (PHINode *PN = dyn_cast(BB->begin())) { // Change the PHI node into a select instruction. Value *TrueVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfFalse); Value *FalseVal = PN->getIncomingValue(PN->getIncomingBlock(0) == IfTrue); - - SelectInst *NV = + + SelectInst *NV = cast(Builder.CreateSelect(IfCond, TrueVal, FalseVal, "")); PN->replaceAllUsesWith(NV); NV->takeName(PN); PN->eraseFromParent(); } - + // At this point, IfBlock1 and IfBlock2 are both empty, so our if statement // has been flattened. Change DomBlock to jump directly to our new block to // avoid other simplifycfg's kicking in on the diamond. @@ -1420,14 +1744,14 @@ static bool FoldTwoEntryPHINode(PHINode *PN, const TargetData *TD) { /// SimplifyCondBranchToTwoReturns - If we found a conditional branch that goes /// to two returning blocks, try to merge them together into one return, /// introducing a select if the return values disagree. -static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, +static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, IRBuilder<> &Builder) { assert(BI->isConditional() && "Must be a conditional branch"); BasicBlock *TrueSucc = BI->getSuccessor(0); BasicBlock *FalseSucc = BI->getSuccessor(1); ReturnInst *TrueRet = cast(TrueSucc->getTerminator()); ReturnInst *FalseRet = cast(FalseSucc->getTerminator()); - + // Check to ensure both blocks are empty (just a return) or optionally empty // with PHI nodes. If there are other instructions, merging would cause extra // computation on one path or the other. @@ -1447,12 +1771,12 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, EraseTerminatorInstAndDCECond(BI); return true; } - + // Otherwise, figure out what the true and false return values are // so we can insert a new select instruction. Value *TrueValue = TrueRet->getReturnValue(); Value *FalseValue = FalseRet->getReturnValue(); - + // Unwrap any PHI nodes in the return blocks. if (PHINode *TVPN = dyn_cast_or_null(TrueValue)) if (TVPN->getParent() == TrueSucc) @@ -1460,7 +1784,7 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, if (PHINode *FVPN = dyn_cast_or_null(FalseValue)) if (FVPN->getParent() == FalseSucc) FalseValue = FVPN->getIncomingValueForBlock(BI->getParent()); - + // In order for this transformation to be safe, we must be able to // unconditionally execute both operands to the return. This is // normally the case, but we could have a potentially-trapping @@ -1472,12 +1796,12 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, if (ConstantExpr *FCV = dyn_cast_or_null(FalseValue)) if (FCV->canTrap()) return false; - + // Okay, we collected all the mapped values and checked them for sanity, and // defined to really do this transformation. First, update the CFG. TrueSucc->removePredecessor(BI->getParent()); FalseSucc->removePredecessor(BI->getParent()); - + // Insert select instructions where needed. Value *BrCond = BI->getCondition(); if (TrueValue) { @@ -1491,15 +1815,15 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, } } - Value *RI = !TrueValue ? + Value *RI = !TrueValue ? Builder.CreateRetVoid() : Builder.CreateRet(TrueValue); (void) RI; - + DEBUG(dbgs() << "\nCHANGING BRANCH TO TWO RETURNS INTO SELECT:" << "\n " << *BI << "NewRet = " << *RI << "TRUEBLOCK: " << *TrueSucc << "FALSEBLOCK: "<< *FalseSucc); - + EraseTerminatorInstAndDCECond(BI); return true; @@ -1510,7 +1834,7 @@ static bool SimplifyCondBranchToTwoReturns(BranchInst *BI, /// parameters and return true, or returns false if no or invalid metadata was /// found. static bool ExtractBranchMetadata(BranchInst *BI, - APInt &ProbTrue, APInt &ProbFalse) { + uint64_t &ProbTrue, uint64_t &ProbFalse) { assert(BI->isConditional() && "Looking for probabilities on unconditional branch?"); MDNode *ProfileData = BI->getMetadata(LLVMContext::MD_prof); @@ -1518,35 +1842,11 @@ static bool ExtractBranchMetadata(BranchInst *BI, ConstantInt *CITrue = dyn_cast(ProfileData->getOperand(1)); ConstantInt *CIFalse = dyn_cast(ProfileData->getOperand(2)); if (!CITrue || !CIFalse) return false; - ProbTrue = CITrue->getValue(); - ProbFalse = CIFalse->getValue(); - assert(ProbTrue.getBitWidth() == 32 && ProbFalse.getBitWidth() == 32 && - "Branch probability metadata must be 32-bit integers"); + ProbTrue = CITrue->getValue().getZExtValue(); + ProbFalse = CIFalse->getValue().getZExtValue(); return true; } -/// MultiplyAndLosePrecision - Multiplies A and B, then returns the result. In -/// the event of overflow, logically-shifts all four inputs right until the -/// multiply fits. -static APInt MultiplyAndLosePrecision(APInt &A, APInt &B, APInt &C, APInt &D, - unsigned &BitsLost) { - BitsLost = 0; - bool Overflow = false; - APInt Result = A.umul_ov(B, Overflow); - if (Overflow) { - APInt MaxB = APInt::getMaxValue(A.getBitWidth()).udiv(A); - do { - B = B.lshr(1); - ++BitsLost; - } while (B.ugt(MaxB)); - A = A.lshr(BitsLost); - C = C.lshr(BitsLost); - D = D.lshr(BitsLost); - Result = A * B; - } - return Result; -} - /// checkCSEInPredecessor - Return true if the given instruction is available /// in its predecessor block. If yes, the instruction will be removed. /// @@ -1600,7 +1900,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { if (Cond == 0) return false; } - + if (Cond == 0 || (!isa(Cond) && !isa(Cond)) || Cond->getParent() != BB || !Cond->hasOneUse()) return false; @@ -1623,7 +1923,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { isSafeToSpeculativelyExecute(FrontIt)) { BonusInst = &*FrontIt; ++FrontIt; - + // Ignore dbg intrinsics. while (isa(FrontIt)) ++FrontIt; } @@ -1631,13 +1931,13 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { // Only a single bonus inst is allowed. if (&*FrontIt != Cond) return false; - + // Make sure the instruction after the condition is the cond branch. BasicBlock::iterator CondIt = Cond; ++CondIt; // Ingore dbg intrinsics. while (isa(CondIt)) ++CondIt; - + if (&*CondIt != BI) return false; @@ -1649,7 +1949,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { if (ConstantExpr *CE = dyn_cast(Cond->getOperand(1))) if (CE->canTrap()) return false; - + // Finally, don't infinitely unroll conditional loops. BasicBlock *TrueDest = BI->getSuccessor(0); BasicBlock *FalseDest = (BI->isConditional()) ? BI->getSuccessor(1) : 0; @@ -1659,22 +1959,22 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { BasicBlock *PredBlock = *PI; BranchInst *PBI = dyn_cast(PredBlock->getTerminator()); - + // Check that we have two conditional branches. If there is a PHI node in // the common successor, verify that the same value flows in from both // blocks. SmallVector PHIs; if (PBI == 0 || PBI->isUnconditional() || - (BI->isConditional() && + (BI->isConditional() && !SafeToMergeTerminators(BI, PBI)) || (!BI->isConditional() && !isProfitableToFoldUnconditional(BI, PBI, Cond, PHIs))) continue; - + // Determine if the two branches share a common destination. - Instruction::BinaryOps Opc; + Instruction::BinaryOps Opc = Instruction::BinaryOpsEnd; bool InvertPredCond = false; - + if (BI->isConditional()) { if (PBI->getSuccessor(0) == TrueDest) Opc = Instruction::Or; @@ -1693,7 +1993,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { // Ensure that any values used in the bonus instruction are also used // by the terminator of the predecessor. This means that those values - // must already have been resolved, so we won't be inhibiting the + // must already have been resolved, so we won't be inhibiting the // out-of-order core by speculating them earlier. if (BonusInst) { // Collect the values used by the bonus inst @@ -1707,47 +2007,47 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { SmallVector, 4> Worklist; Worklist.push_back(std::make_pair(PBI->getOperand(0), 0)); - + // Walk up to four levels back up the use-def chain of the predecessor's // terminator to see if all those values were used. The choice of four // levels is arbitrary, to provide a compile-time-cost bound. while (!Worklist.empty()) { std::pair Pair = Worklist.back(); Worklist.pop_back(); - + if (Pair.second >= 4) continue; UsedValues.erase(Pair.first); if (UsedValues.empty()) break; - + if (Instruction *I = dyn_cast(Pair.first)) { for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; ++OI) Worklist.push_back(std::make_pair(OI->get(), Pair.second+1)); - } + } } - + if (!UsedValues.empty()) return false; } DEBUG(dbgs() << "FOLDING BRANCH TO COMMON DEST:\n" << *PBI << *BB); - IRBuilder<> Builder(PBI); + IRBuilder<> Builder(PBI); // If we need to invert the condition in the pred block to match, do so now. if (InvertPredCond) { Value *NewCond = PBI->getCondition(); - + if (NewCond->hasOneUse() && isa(NewCond)) { CmpInst *CI = cast(NewCond); CI->setPredicate(CI->getInversePredicate()); } else { - NewCond = Builder.CreateNot(NewCond, + NewCond = Builder.CreateNot(NewCond, PBI->getCondition()->getName()+".not"); } - + PBI->setCondition(NewCond); PBI->swapSuccessors(); } - + // If we have a bonus inst, clone it into the predecessor block. Instruction *NewBonus = 0; if (BonusInst) { @@ -1756,7 +2056,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { NewBonus->takeName(BonusInst); BonusInst->setName(BonusInst->getName()+".old"); } - + // Clone Cond into the predecessor basic block, and or/and the // two conditions together. Instruction *New = Cond->clone(); @@ -1764,21 +2064,60 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { PredBlock->getInstList().insert(PBI, New); New->takeName(Cond); Cond->setName(New->getName()+".old"); - + if (BI->isConditional()) { - Instruction *NewCond = + Instruction *NewCond = cast(Builder.CreateBinOp(Opc, PBI->getCondition(), New, "or.cond")); PBI->setCondition(NewCond); + uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight; + bool PredHasWeights = ExtractBranchMetadata(PBI, PredTrueWeight, + PredFalseWeight); + bool SuccHasWeights = ExtractBranchMetadata(BI, SuccTrueWeight, + SuccFalseWeight); + SmallVector NewWeights; + if (PBI->getSuccessor(0) == BB) { + if (PredHasWeights && SuccHasWeights) { + // PBI: br i1 %x, BB, FalseDest + // BI: br i1 %y, TrueDest, FalseDest + //TrueWeight is TrueWeight for PBI * TrueWeight for BI. + NewWeights.push_back(PredTrueWeight * SuccTrueWeight); + //FalseWeight is FalseWeight for PBI * TotalWeight for BI + + // TrueWeight for PBI * FalseWeight for BI. + // We assume that total weights of a BranchInst can fit into 32 bits. + // Therefore, we will not have overflow using 64-bit arithmetic. + NewWeights.push_back(PredFalseWeight * (SuccFalseWeight + + SuccTrueWeight) + PredTrueWeight * SuccFalseWeight); + } AddPredecessorToBlock(TrueDest, PredBlock, BB); PBI->setSuccessor(0, TrueDest); } if (PBI->getSuccessor(1) == BB) { + if (PredHasWeights && SuccHasWeights) { + // PBI: br i1 %x, TrueDest, BB + // BI: br i1 %y, TrueDest, FalseDest + //TrueWeight is TrueWeight for PBI * TotalWeight for BI + + // FalseWeight for PBI * TrueWeight for BI. + NewWeights.push_back(PredTrueWeight * (SuccFalseWeight + + SuccTrueWeight) + PredFalseWeight * SuccTrueWeight); + //FalseWeight is FalseWeight for PBI * FalseWeight for BI. + NewWeights.push_back(PredFalseWeight * SuccFalseWeight); + } AddPredecessorToBlock(FalseDest, PredBlock, BB); PBI->setSuccessor(1, FalseDest); } + if (NewWeights.size() == 2) { + // Halve the weights if any of them cannot fit in an uint32_t + FitWeights(NewWeights); + + SmallVector MDWeights(NewWeights.begin(),NewWeights.end()); + PBI->setMetadata(LLVMContext::MD_prof, + MDBuilder(BI->getContext()). + createBranchWeights(MDWeights)); + } else + PBI->setMetadata(LLVMContext::MD_prof, NULL); } else { // Update PHI nodes in the common successors. for (unsigned i = 0, e = PHIs.size(); i != e; ++i) { @@ -1806,7 +2145,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { // Create (PBI_Cond and BI_Value) or (!PBI_Cond and PBI_C) // PBI_C is true: (PBI_Cond and BI_Value) or (!PBI_Cond) // is false: PBI_Cond and BI_Value - MergedCond = + MergedCond = cast(Builder.CreateBinOp(Instruction::And, PBI->getCondition(), New, "and.cond")); @@ -1814,7 +2153,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { Instruction *NotCond = cast(Builder.CreateNot(PBI->getCondition(), "not.cond")); - MergedCond = + MergedCond = cast(Builder.CreateBinOp(Instruction::Or, NotCond, MergedCond, "or.cond")); @@ -1833,95 +2172,11 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) { // TODO: If BB is reachable from all paths through PredBlock, then we // could replace PBI's branch probabilities with BI's. - // Merge probability data into PredBlock's branch. - APInt A, B, C, D; - if (PBI->isConditional() && BI->isConditional() && - ExtractBranchMetadata(PBI, C, D) && ExtractBranchMetadata(BI, A, B)) { - // Given IR which does: - // bbA: - // br i1 %x, label %bbB, label %bbC - // bbB: - // br i1 %y, label %bbD, label %bbC - // Let's call the probability that we take the edge from %bbA to %bbB - // 'a', from %bbA to %bbC, 'b', from %bbB to %bbD 'c' and from %bbB to - // %bbC probability 'd'. - // - // We transform the IR into: - // bbA: - // br i1 %z, label %bbD, label %bbC - // where the probability of going to %bbD is (a*c) and going to bbC is - // (b+a*d). - // - // Probabilities aren't stored as ratios directly. Using branch weights, - // we get: - // (a*c)% = A*C, (b+(a*d))% = A*D+B*C+B*D. - - // In the event of overflow, we want to drop the LSB of the input - // probabilities. - unsigned BitsLost; - - // Ignore overflow result on ProbTrue. - APInt ProbTrue = MultiplyAndLosePrecision(A, C, B, D, BitsLost); - - APInt Tmp1 = MultiplyAndLosePrecision(B, D, A, C, BitsLost); - if (BitsLost) { - ProbTrue = ProbTrue.lshr(BitsLost*2); - } - - APInt Tmp2 = MultiplyAndLosePrecision(A, D, C, B, BitsLost); - if (BitsLost) { - ProbTrue = ProbTrue.lshr(BitsLost*2); - Tmp1 = Tmp1.lshr(BitsLost*2); - } - - APInt Tmp3 = MultiplyAndLosePrecision(B, C, A, D, BitsLost); - if (BitsLost) { - ProbTrue = ProbTrue.lshr(BitsLost*2); - Tmp1 = Tmp1.lshr(BitsLost*2); - Tmp2 = Tmp2.lshr(BitsLost*2); - } - - bool Overflow1 = false, Overflow2 = false; - APInt Tmp4 = Tmp2.uadd_ov(Tmp3, Overflow1); - APInt ProbFalse = Tmp4.uadd_ov(Tmp1, Overflow2); - - if (Overflow1 || Overflow2) { - ProbTrue = ProbTrue.lshr(1); - Tmp1 = Tmp1.lshr(1); - Tmp2 = Tmp2.lshr(1); - Tmp3 = Tmp3.lshr(1); - Tmp4 = Tmp2 + Tmp3; - ProbFalse = Tmp4 + Tmp1; - } - - // The sum of branch weights must fit in 32-bits. - if (ProbTrue.isNegative() && ProbFalse.isNegative()) { - ProbTrue = ProbTrue.lshr(1); - ProbFalse = ProbFalse.lshr(1); - } - - if (ProbTrue != ProbFalse) { - // Normalize the result. - APInt GCD = APIntOps::GreatestCommonDivisor(ProbTrue, ProbFalse); - ProbTrue = ProbTrue.udiv(GCD); - ProbFalse = ProbFalse.udiv(GCD); - - MDBuilder MDB(BI->getContext()); - MDNode *N = MDB.createBranchWeights(ProbTrue.getZExtValue(), - ProbFalse.getZExtValue()); - PBI->setMetadata(LLVMContext::MD_prof, N); - } else { - PBI->setMetadata(LLVMContext::MD_prof, NULL); - } - } else { - PBI->setMetadata(LLVMContext::MD_prof, NULL); - } - // Copy any debug value intrinsics into the end of PredBlock. for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) if (isa(*I)) I->clone()->insertBefore(PBI); - + return true; } return false; @@ -1936,7 +2191,7 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { BasicBlock *BB = BI->getParent(); // If this block ends with a branch instruction, and if there is a - // predecessor that ends on a branch of the same condition, make + // predecessor that ends on a branch of the same condition, make // this conditional branch redundant. if (PBI->getCondition() == BI->getCondition() && PBI->getSuccessor(0) != PBI->getSuccessor(1)) { @@ -1945,11 +2200,11 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { if (BB->getSinglePredecessor()) { // Turn this into a branch on constant. bool CondIsTrue = PBI->getSuccessor(0) == BB; - BI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()), + BI->setCondition(ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue)); return true; // Nuke the branch on constant. } - + // Otherwise, if there are multiple predecessors, insert a PHI that merges // in the constant and simplify the block result. Subsequent passes of // simplifycfg will thread the block. @@ -1969,18 +2224,18 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { PBI->getCondition() == BI->getCondition() && PBI->getSuccessor(0) != PBI->getSuccessor(1)) { bool CondIsTrue = PBI->getSuccessor(0) == BB; - NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()), + NewPN->addIncoming(ConstantInt::get(Type::getInt1Ty(BB->getContext()), CondIsTrue), P); } else { NewPN->addIncoming(BI->getCondition(), P); } } - + BI->setCondition(NewPN); return true; } } - + // If this is a conditional branch in an empty block, and if any // predecessors is a conditional branch to one of our destinations, // fold the conditions into logical ops and one cond br. @@ -1991,11 +2246,11 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { if (&*BBI != BI) return false; - + if (ConstantExpr *CE = dyn_cast(BI->getCondition())) if (CE->canTrap()) return false; - + int PBIOp, BIOp; if (PBI->getSuccessor(0) == BI->getSuccessor(0)) PBIOp = BIOp = 0; @@ -2007,31 +2262,31 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { PBIOp = BIOp = 1; else return false; - + // Check to make sure that the other destination of this branch // isn't BB itself. If so, this is an infinite loop that will // keep getting unwound. if (PBI->getSuccessor(PBIOp) == BB) return false; - - // Do not perform this transformation if it would require + + // Do not perform this transformation if it would require // insertion of a large number of select instructions. For targets // without predication/cmovs, this is a big pessimization. BasicBlock *CommonDest = PBI->getSuccessor(PBIOp); - + unsigned NumPhis = 0; for (BasicBlock::iterator II = CommonDest->begin(); isa(II); ++II, ++NumPhis) if (NumPhis > 2) // Disable this xform. return false; - + // Finally, if everything is ok, fold the branches to logical ops. BasicBlock *OtherDest = BI->getSuccessor(BIOp ^ 1); - + DEBUG(dbgs() << "FOLDING BRs:" << *PBI->getParent() << "AND: " << *BI->getParent()); - - + + // If OtherDest *is* BB, then BB is a basic block with a single conditional // branch in it, where one edge (OtherDest) goes back to itself but the other // exits. We don't *know* that the program avoids the infinite loop @@ -2046,13 +2301,13 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { "infloop", BB->getParent()); BranchInst::Create(InfLoopBlock, InfLoopBlock); OtherDest = InfLoopBlock; - } - + } + DEBUG(dbgs() << *PBI->getParent()->getParent()); // BI may have other predecessors. Because of this, we leave // it alone, but modify PBI. - + // Make sure we get to CommonDest on True&True directions. Value *PBICond = PBI->getCondition(); IRBuilder Builder(PBI); @@ -2065,16 +2320,43 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { // Merge the conditions. Value *Cond = Builder.CreateOr(PBICond, BICond, "brmerge"); - + // Modify PBI to branch on the new condition to the new dests. PBI->setCondition(Cond); PBI->setSuccessor(0, CommonDest); PBI->setSuccessor(1, OtherDest); - + + // Update branch weight for PBI. + uint64_t PredTrueWeight, PredFalseWeight, SuccTrueWeight, SuccFalseWeight; + bool PredHasWeights = ExtractBranchMetadata(PBI, PredTrueWeight, + PredFalseWeight); + bool SuccHasWeights = ExtractBranchMetadata(BI, SuccTrueWeight, + SuccFalseWeight); + if (PredHasWeights && SuccHasWeights) { + uint64_t PredCommon = PBIOp ? PredFalseWeight : PredTrueWeight; + uint64_t PredOther = PBIOp ?PredTrueWeight : PredFalseWeight; + uint64_t SuccCommon = BIOp ? SuccFalseWeight : SuccTrueWeight; + uint64_t SuccOther = BIOp ? SuccTrueWeight : SuccFalseWeight; + // The weight to CommonDest should be PredCommon * SuccTotal + + // PredOther * SuccCommon. + // The weight to OtherDest should be PredOther * SuccOther. + SmallVector NewWeights; + NewWeights.push_back(PredCommon * (SuccCommon + SuccOther) + + PredOther * SuccCommon); + NewWeights.push_back(PredOther * SuccOther); + // Halve the weights if any of them cannot fit in an uint32_t + FitWeights(NewWeights); + + SmallVector MDWeights(NewWeights.begin(),NewWeights.end()); + PBI->setMetadata(LLVMContext::MD_prof, + MDBuilder(BI->getContext()). + createBranchWeights(MDWeights)); + } + // OtherDest may have phi nodes. If so, add an entry from PBI's // block that are identical to the entries for BI's block. AddPredecessorToBlock(OtherDest, PBI->getParent(), BB); - + // We know that the CommonDest already had an edge from PBI to // it. If it has PHIs though, the PHIs may have different // entries for BB and PBI's BB. If so, insert a select to make @@ -2092,10 +2374,10 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { PN->setIncomingValue(PBBIdx, NV); } } - + DEBUG(dbgs() << "INTO: " << *PBI->getParent()); DEBUG(dbgs() << *PBI->getParent()->getParent()); - + // This basic block is probably dead. We know it has at least // one fewer predecessor. return true; @@ -2107,7 +2389,9 @@ static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) { // Also makes sure not to introduce new successors by assuming that edges to // non-successor TrueBBs and FalseBBs aren't reachable. static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond, - BasicBlock *TrueBB, BasicBlock *FalseBB){ + BasicBlock *TrueBB, BasicBlock *FalseBB, + uint32_t TrueWeight, + uint32_t FalseWeight){ // Remove any superfluous successor edges from the CFG. // First, figure out which successors to preserve. // If TrueBB and FalseBB are equal, only try to preserve one copy of that @@ -2136,10 +2420,15 @@ static bool SimplifyTerminatorOnSelect(TerminatorInst *OldTerm, Value *Cond, // We were only looking for one successor, and it was present. // Create an unconditional branch to it. Builder.CreateBr(TrueBB); - else + else { // We found both of the successors we were looking for. // Create a conditional branch sharing the condition of the select. - Builder.CreateCondBr(Cond, TrueBB, FalseBB); + BranchInst *NewBI = Builder.CreateCondBr(Cond, TrueBB, FalseBB); + if (TrueWeight != FalseWeight) + NewBI->setMetadata(LLVMContext::MD_prof, + MDBuilder(OldTerm->getContext()). + createBranchWeights(TrueWeight, FalseWeight)); + } } else if (KeepEdge1 && (KeepEdge2 || TrueBB == FalseBB)) { // Neither of the selected blocks were successors, so this // terminator must be unreachable. @@ -2176,8 +2465,23 @@ static bool SimplifySwitchOnSelect(SwitchInst *SI, SelectInst *Select) { BasicBlock *TrueBB = SI->findCaseValue(TrueVal).getCaseSuccessor(); BasicBlock *FalseBB = SI->findCaseValue(FalseVal).getCaseSuccessor(); + // Get weight for TrueBB and FalseBB. + uint32_t TrueWeight = 0, FalseWeight = 0; + SmallVector Weights; + bool HasWeights = HasBranchWeights(SI); + if (HasWeights) { + GetBranchWeights(SI, Weights); + if (Weights.size() == 1 + SI->getNumCases()) { + TrueWeight = (uint32_t)Weights[SI->findCaseValue(TrueVal). + getSuccessorIndex()]; + FalseWeight = (uint32_t)Weights[SI->findCaseValue(FalseVal). + getSuccessorIndex()]; + } + } + // Perform the actual simplification. - return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB); + return SimplifyTerminatorOnSelect(SI, Condition, TrueBB, FalseBB, + TrueWeight, FalseWeight); } // SimplifyIndirectBrOnSelect - Replaces @@ -2197,7 +2501,8 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) { BasicBlock *FalseBB = FBA->getBasicBlock(); // Perform the actual simplification. - return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB); + return SimplifyTerminatorOnSelect(IBI, SI->getCondition(), TrueBB, FalseBB, + 0, 0); } /// TryToSimplifyUncondBranchWithICmpInIt - This is called when we find an icmp @@ -2214,11 +2519,11 @@ static bool SimplifyIndirectBrOnSelect(IndirectBrInst *IBI, SelectInst *SI) { /// br label %end /// end: /// ... = phi i1 [ true, %entry ], [ %tmp, %DEFAULT ], [ true, %entry ] -/// +/// /// We prefer to split the edge to 'end' so that there is a true/false entry to /// the PHI, merging the third icmp into the switch. static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, - const TargetData *TD, + const DataLayout *TD, IRBuilder<> &Builder) { BasicBlock *BB = ICI->getParent(); @@ -2228,17 +2533,17 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, Value *V = ICI->getOperand(0); ConstantInt *Cst = cast(ICI->getOperand(1)); - + // The pattern we're looking for is where our only predecessor is a switch on // 'V' and this block is the default case for the switch. In this case we can // fold the compared value into the switch to simplify things. BasicBlock *Pred = BB->getSinglePredecessor(); if (Pred == 0 || !isa(Pred->getTerminator())) return false; - + SwitchInst *SI = cast(Pred->getTerminator()); if (SI->getCondition() != V) return false; - + // If BB is reachable on a non-default case, then we simply know the value of // V in this block. Substitute it and constant fold the icmp instruction // away. @@ -2246,7 +2551,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, ConstantInt *VVal = SI->findCaseDest(BB); assert(VVal && "Should have a unique destination value"); ICI->setOperand(0, VVal); - + if (Value *V = SimplifyInstruction(ICI, TD)) { ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); @@ -2254,7 +2559,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, // BB is now empty, so it is likely to simplify away. return SimplifyCFG(BB) | true; } - + // Ok, the block is reachable from the default dest. If the constant we're // comparing exists in one of the other edges, then we can constant fold ICI // and zap it. @@ -2264,13 +2569,13 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, V = ConstantInt::getFalse(BB->getContext()); else V = ConstantInt::getTrue(BB->getContext()); - + ICI->replaceAllUsesWith(V); ICI->eraseFromParent(); // BB is now empty, so it is likely to simplify away. return SimplifyCFG(BB) | true; } - + // The use of the icmp has to be in the 'end' block, by the only PHI node in // the block. BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0); @@ -2296,8 +2601,23 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, // the switch to the merge point on the compared value. BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "switch.edge", BB->getParent(), BB); + SmallVector Weights; + bool HasWeights = HasBranchWeights(SI); + if (HasWeights) { + GetBranchWeights(SI, Weights); + if (Weights.size() == 1 + SI->getNumCases()) { + // Split weight for default case to case for "Cst". + Weights[0] = (Weights[0]+1) >> 1; + Weights.push_back(Weights[0]); + + SmallVector MDWeights(Weights.begin(), Weights.end()); + SI->setMetadata(LLVMContext::MD_prof, + MDBuilder(SI->getContext()). + createBranchWeights(MDWeights)); + } + } SI->addCase(Cst, NewBB); - + // NewBB branches to the phi block, add the uncond branch and the phi entry. Builder.SetInsertPoint(NewBB); Builder.SetCurrentDebugLocation(SI->getDebugLoc()); @@ -2309,12 +2629,12 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(ICmpInst *ICI, /// SimplifyBranchOnICmpChain - The specified branch is a conditional branch. /// Check to see if it is branching on an or/and chain of icmp instructions, and /// fold it into a switch instruction if so. -static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, +static bool SimplifyBranchOnICmpChain(BranchInst *BI, const DataLayout *TD, IRBuilder<> &Builder) { Instruction *Cond = dyn_cast(BI->getCondition()); if (Cond == 0) return false; - - + + // Change br (X == 0 | X == 1), T, F into a switch instruction. // If this is a bunch of seteq's or'd together, or if it's a bunch of // 'setne's and'ed together, collect them. @@ -2323,7 +2643,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, bool TrueWhenEqual = true; Value *ExtraCase = 0; unsigned UsedICmps = 0; - + if (Cond->getOpcode() == Instruction::Or) { CompVal = GatherConstantCompares(Cond, Values, ExtraCase, TD, true, UsedICmps); @@ -2332,7 +2652,7 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, UsedICmps); TrueWhenEqual = false; } - + // If we didn't have a multiply compared value, fail. if (CompVal == 0) return false; @@ -2344,21 +2664,24 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, // instruction can't handle, remove them now. array_pod_sort(Values.begin(), Values.end(), ConstantIntSortPredicate); Values.erase(std::unique(Values.begin(), Values.end()), Values.end()); - + // If Extra was used, we require at least two switch values to do the // transformation. A switch with one value is just an cond branch. if (ExtraCase && Values.size() < 2) return false; - + + // TODO: Preserve branch weight metadata, similarly to how + // FoldValueComparisonIntoPredecessors preserves it. + // Figure out which block is which destination. BasicBlock *DefaultBB = BI->getSuccessor(1); BasicBlock *EdgeBB = BI->getSuccessor(0); if (!TrueWhenEqual) std::swap(DefaultBB, EdgeBB); - + BasicBlock *BB = BI->getParent(); - + DEBUG(dbgs() << "Converting 'icmp' chain with " << Values.size() << " cases into SWITCH. BB is:\n" << *BB); - + // If there are any extra values that couldn't be folded into the switch // then we evaluate them with an explicit branch first. Split the block // right before the condbr to handle it. @@ -2372,13 +2695,13 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, Builder.CreateCondBr(ExtraCase, EdgeBB, NewBB); else Builder.CreateCondBr(ExtraCase, NewBB, EdgeBB); - + OldTI->eraseFromParent(); - + // If there are PHI nodes in EdgeBB, then we need to add a new entry to them // for the edge we just added. AddPredecessorToBlock(EdgeBB, BB, NewBB); - + DEBUG(dbgs() << " ** 'icmp' chain unhandled condition: " << *ExtraCase << "\nEXTRABB = " << *BB); BB = NewBB; @@ -2387,19 +2710,19 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, Builder.SetInsertPoint(BI); // Convert pointer to int before we switch. if (CompVal->getType()->isPointerTy()) { - assert(TD && "Cannot switch on pointer without TargetData"); + assert(TD && "Cannot switch on pointer without DataLayout"); CompVal = Builder.CreatePtrToInt(CompVal, TD->getIntPtrType(CompVal->getContext()), "magicptr"); } - + // Create the new switch instruction now. SwitchInst *New = Builder.CreateSwitch(CompVal, DefaultBB, Values.size()); // Add all of the 'cases' to the switch instruction. for (unsigned i = 0, e = Values.size(); i != e; ++i) New->addCase(Values[i], EdgeBB); - + // We added edges from PI to the EdgeBB. As such, if there were any // PHI nodes in EdgeBB, they need entries to be added corresponding to // the number of edges added. @@ -2410,10 +2733,10 @@ static bool SimplifyBranchOnICmpChain(BranchInst *BI, const TargetData *TD, for (unsigned i = 0, e = Values.size()-1; i != e; ++i) PN->addIncoming(InVal, BB); } - + // Erase the old branch instruction. EraseTerminatorInstAndDCECond(BI); - + DEBUG(dbgs() << " ** 'icmp' chain result is:\n" << *BB << '\n'); return true; } @@ -2467,7 +2790,7 @@ bool SimplifyCFGOpt::SimplifyResume(ResumeInst *RI, IRBuilder<> &Builder) { bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { BasicBlock *BB = RI->getParent(); if (!BB->getFirstNonPHIOrDbg()->isTerminator()) return false; - + // Find predecessors that end with branches. SmallVector UncondBranchPreds; SmallVector CondBranchPreds; @@ -2481,7 +2804,7 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { CondBranchPreds.push_back(BI); } } - + // If we found some, do the transformation! if (!UncondBranchPreds.empty() && DupRet) { while (!UncondBranchPreds.empty()) { @@ -2490,21 +2813,21 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { << "INTO UNCOND BRANCH PRED: " << *Pred); (void)FoldReturnIntoUncondBranch(RI, BB, Pred); } - + // If we eliminated all predecessors of the block, delete the block now. if (pred_begin(BB) == pred_end(BB)) // We know there are no successors, so just nuke the block. BB->eraseFromParent(); - + return true; } - + // Check out all of the conditional branches going to this return // instruction. If any of them just select between returns, change the // branch itself into a select/return pair. while (!CondBranchPreds.empty()) { BranchInst *BI = CondBranchPreds.pop_back_val(); - + // Check to see if the non-BB successor is also a return block. if (isa(BI->getSuccessor(0)->getTerminator()) && isa(BI->getSuccessor(1)->getTerminator()) && @@ -2516,9 +2839,9 @@ bool SimplifyCFGOpt::SimplifyReturn(ReturnInst *RI, IRBuilder<> &Builder) { bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { BasicBlock *BB = UI->getParent(); - + bool Changed = false; - + // If there are any instructions immediately before the unreachable that can // be removed, do so. while (UI != BB->begin()) { @@ -2558,11 +2881,11 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { BBI->eraseFromParent(); Changed = true; } - + // If the unreachable instruction is the first in the block, take a gander // at all of the predecessors of this instruction, and simplify them. if (&BB->front() != UI) return Changed; - + SmallVector Preds(pred_begin(BB), pred_end(BB)); for (unsigned i = 0, e = Preds.size(); i != e; ++i) { TerminatorInst *TI = Preds[i]->getTerminator(); @@ -2615,7 +2938,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { BasicBlock *MaxBlock = 0; for (std::map >::iterator I = Popularity.begin(), E = Popularity.end(); I != E; ++I) { - if (I->second.first > MaxPop || + if (I->second.first > MaxPop || (I->second.first == MaxPop && MaxIndex > I->second.second)) { MaxPop = I->second.first; MaxIndex = I->second.second; @@ -2627,13 +2950,13 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { // edges to it. SI->setDefaultDest(MaxBlock); Changed = true; - + // If MaxBlock has phinodes in it, remove MaxPop-1 entries from // it. if (isa(MaxBlock->begin())) for (unsigned i = 0; i != MaxPop-1; ++i) MaxBlock->removePredecessor(SI->getParent()); - + for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); i != e; ++i) if (i.getCaseSuccessor() == MaxBlock) { @@ -2648,7 +2971,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { // place to note that the call does not throw though. BranchInst *BI = Builder.CreateBr(II->getNormalDest()); II->removeFromParent(); // Take out of symbol table - + // Insert the call now... SmallVector Args(II->op_begin(), II->op_end()-3); Builder.SetInsertPoint(BI); @@ -2663,7 +2986,7 @@ bool SimplifyCFGOpt::SimplifyUnreachable(UnreachableInst *UI) { } } } - + // If this block is now dead, remove it. if (pred_begin(BB) == pred_end(BB) && BB != &BB->getParent()->getEntryBlock()) { @@ -2706,9 +3029,28 @@ static bool TurnSwitchRangeIntoICmp(SwitchInst *SI, IRBuilder<> &Builder) { if (!Offset->isNullValue()) Sub = Builder.CreateAdd(Sub, Offset, Sub->getName()+".off"); Value *Cmp = Builder.CreateICmpULT(Sub, NumCases, "switch"); - Builder.CreateCondBr( + BranchInst *NewBI = Builder.CreateCondBr( Cmp, SI->case_begin().getCaseSuccessor(), SI->getDefaultDest()); + // Update weight for the newly-created conditional branch. + SmallVector Weights; + bool HasWeights = HasBranchWeights(SI); + if (HasWeights) { + GetBranchWeights(SI, Weights); + if (Weights.size() == 1 + SI->getNumCases()) { + // Combine all weights for the cases to be the true weight of NewBI. + // We assume that the sum of all weights for a Terminator can fit into 32 + // bits. + uint32_t NewTrueWeight = 0; + for (unsigned I = 1, E = Weights.size(); I != E; ++I) + NewTrueWeight += (uint32_t)Weights[I]; + NewBI->setMetadata(LLVMContext::MD_prof, + MDBuilder(SI->getContext()). + createBranchWeights(NewTrueWeight, + (uint32_t)Weights[0])); + } + } + // Prune obsolete incoming values off the successor's PHI nodes. for (BasicBlock::iterator BBI = SI->case_begin().getCaseSuccessor()->begin(); isa(BBI); ++BBI) { @@ -2739,15 +3081,33 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI) { } } + SmallVector Weights; + bool HasWeight = HasBranchWeights(SI); + if (HasWeight) { + GetBranchWeights(SI, Weights); + HasWeight = (Weights.size() == 1 + SI->getNumCases()); + } + // Remove dead cases from the switch. for (unsigned I = 0, E = DeadCases.size(); I != E; ++I) { SwitchInst::CaseIt Case = SI->findCaseValue(DeadCases[I]); assert(Case != SI->case_default() && "Case was not found. Probably mistake in DeadCases forming."); + if (HasWeight) { + std::swap(Weights[Case.getCaseIndex()+1], Weights.back()); + Weights.pop_back(); + } + // Prune unused values from PHI nodes. Case.getCaseSuccessor()->removePredecessor(SI->getParent()); SI->removeCase(Case); } + if (HasWeight) { + SmallVector MDWeights(Weights.begin(), Weights.end()); + SI->setMetadata(LLVMContext::MD_prof, + MDBuilder(SI->getParent()->getContext()). + createBranchWeights(MDWeights)); + } return !DeadCases.empty(); } @@ -2823,33 +3183,512 @@ static bool ForwardSwitchConditionToPHI(SwitchInst *SI) { return Changed; } -bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { - // If this switch is too complex to want to look at, ignore it. - if (!isValueEqualityComparison(SI)) +/// ValidLookupTableConstant - Return true if the backend will be able to handle +/// initializing an array of constants like C. +static bool ValidLookupTableConstant(Constant *C) { + if (ConstantExpr *CE = dyn_cast(C)) + return CE->isGEPWithNoNotionalOverIndexing(); + + return isa(C) || + isa(C) || + isa(C) || + isa(C) || + isa(C); +} + +/// LookupConstant - If V is a Constant, return it. Otherwise, try to look up +/// its constant value in ConstantPool, returning 0 if it's not there. +static Constant *LookupConstant(Value *V, + const SmallDenseMap& ConstantPool) { + if (Constant *C = dyn_cast(V)) + return C; + return ConstantPool.lookup(V); +} + +/// ConstantFold - Try to fold instruction I into a constant. This works for +/// simple instructions such as binary operations where both operands are +/// constant or can be replaced by constants from the ConstantPool. Returns the +/// resulting constant on success, 0 otherwise. +static Constant *ConstantFold(Instruction *I, + const SmallDenseMap& ConstantPool) { + if (BinaryOperator *BO = dyn_cast(I)) { + Constant *A = LookupConstant(BO->getOperand(0), ConstantPool); + if (!A) + return 0; + Constant *B = LookupConstant(BO->getOperand(1), ConstantPool); + if (!B) + return 0; + return ConstantExpr::get(BO->getOpcode(), A, B); + } + + if (CmpInst *Cmp = dyn_cast(I)) { + Constant *A = LookupConstant(I->getOperand(0), ConstantPool); + if (!A) + return 0; + Constant *B = LookupConstant(I->getOperand(1), ConstantPool); + if (!B) + return 0; + return ConstantExpr::getCompare(Cmp->getPredicate(), A, B); + } + + if (SelectInst *Select = dyn_cast(I)) { + Constant *A = LookupConstant(Select->getCondition(), ConstantPool); + if (!A) + return 0; + if (A->isAllOnesValue()) + return LookupConstant(Select->getTrueValue(), ConstantPool); + if (A->isNullValue()) + return LookupConstant(Select->getFalseValue(), ConstantPool); + return 0; + } + + if (CastInst *Cast = dyn_cast(I)) { + Constant *A = LookupConstant(I->getOperand(0), ConstantPool); + if (!A) + return 0; + return ConstantExpr::getCast(Cast->getOpcode(), A, Cast->getDestTy()); + } + + return 0; +} + +/// GetCaseResults - Try to determine the resulting constant values in phi nodes +/// at the common destination basic block, *CommonDest, for one of the case +/// destionations CaseDest corresponding to value CaseVal (0 for the default +/// case), of a switch instruction SI. +static bool GetCaseResults(SwitchInst *SI, + ConstantInt *CaseVal, + BasicBlock *CaseDest, + BasicBlock **CommonDest, + SmallVector, 4> &Res) { + // The block from which we enter the common destination. + BasicBlock *Pred = SI->getParent(); + + // If CaseDest is empty except for some side-effect free instructions through + // which we can constant-propagate the CaseVal, continue to its successor. + SmallDenseMap ConstantPool; + ConstantPool.insert(std::make_pair(SI->getCondition(), CaseVal)); + for (BasicBlock::iterator I = CaseDest->begin(), E = CaseDest->end(); I != E; + ++I) { + if (TerminatorInst *T = dyn_cast(I)) { + // If the terminator is a simple branch, continue to the next block. + if (T->getNumSuccessors() != 1) + return false; + Pred = CaseDest; + CaseDest = T->getSuccessor(0); + } else if (isa(I)) { + // Skip debug intrinsic. + continue; + } else if (Constant *C = ConstantFold(I, ConstantPool)) { + // Instruction is side-effect free and constant. + ConstantPool.insert(std::make_pair(I, C)); + } else { + break; + } + } + + // If we did not have a CommonDest before, use the current one. + if (!*CommonDest) + *CommonDest = CaseDest; + // If the destination isn't the common one, abort. + if (CaseDest != *CommonDest) + return false; + + // Get the values for this case from phi nodes in the destination block. + BasicBlock::iterator I = (*CommonDest)->begin(); + while (PHINode *PHI = dyn_cast(I++)) { + int Idx = PHI->getBasicBlockIndex(Pred); + if (Idx == -1) + continue; + + Constant *ConstVal = LookupConstant(PHI->getIncomingValue(Idx), + ConstantPool); + if (!ConstVal) + return false; + + // Note: If the constant comes from constant-propagating the case value + // through the CaseDest basic block, it will be safe to remove the + // instructions in that block. They cannot be used (except in the phi nodes + // we visit) outside CaseDest, because that block does not dominate its + // successor. If it did, we would not be in this phi node. + + // Be conservative about which kinds of constants we support. + if (!ValidLookupTableConstant(ConstVal)) + return false; + + Res.push_back(std::make_pair(PHI, ConstVal)); + } + + return true; +} + +namespace { + /// SwitchLookupTable - This class represents a lookup table that can be used + /// to replace a switch. + class SwitchLookupTable { + public: + /// SwitchLookupTable - Create a lookup table to use as a switch replacement + /// with the contents of Values, using DefaultValue to fill any holes in the + /// table. + SwitchLookupTable(Module &M, + uint64_t TableSize, + ConstantInt *Offset, + const SmallVector, 4>& Values, + Constant *DefaultValue, + const DataLayout *TD); + + /// BuildLookup - Build instructions with Builder to retrieve the value at + /// the position given by Index in the lookup table. + Value *BuildLookup(Value *Index, IRBuilder<> &Builder); + + /// WouldFitInRegister - Return true if a table with TableSize elements of + /// type ElementType would fit in a target-legal register. + static bool WouldFitInRegister(const DataLayout *TD, + uint64_t TableSize, + const Type *ElementType); + + private: + // Depending on the contents of the table, it can be represented in + // different ways. + enum { + // For tables where each element contains the same value, we just have to + // store that single value and return it for each lookup. + SingleValueKind, + + // For small tables with integer elements, we can pack them into a bitmap + // that fits into a target-legal register. Values are retrieved by + // shift and mask operations. + BitMapKind, + + // The table is stored as an array of values. Values are retrieved by load + // instructions from the table. + ArrayKind + } Kind; + + // For SingleValueKind, this is the single value. + Constant *SingleValue; + + // For BitMapKind, this is the bitmap. + ConstantInt *BitMap; + IntegerType *BitMapElementTy; + + // For ArrayKind, this is the array. + GlobalVariable *Array; + }; +} + +SwitchLookupTable::SwitchLookupTable(Module &M, + uint64_t TableSize, + ConstantInt *Offset, + const SmallVector, 4>& Values, + Constant *DefaultValue, + const DataLayout *TD) { + assert(Values.size() && "Can't build lookup table without values!"); + assert(TableSize >= Values.size() && "Can't fit values in table!"); + + // If all values in the table are equal, this is that value. + SingleValue = Values.begin()->second; + + // Build up the table contents. + SmallVector TableContents(TableSize); + for (size_t I = 0, E = Values.size(); I != E; ++I) { + ConstantInt *CaseVal = Values[I].first; + Constant *CaseRes = Values[I].second; + assert(CaseRes->getType() == DefaultValue->getType()); + + uint64_t Idx = (CaseVal->getValue() - Offset->getValue()) + .getLimitedValue(); + TableContents[Idx] = CaseRes; + + if (CaseRes != SingleValue) + SingleValue = 0; + } + + // Fill in any holes in the table with the default result. + if (Values.size() < TableSize) { + for (uint64_t I = 0; I < TableSize; ++I) { + if (!TableContents[I]) + TableContents[I] = DefaultValue; + } + + if (DefaultValue != SingleValue) + SingleValue = 0; + } + + // If each element in the table contains the same value, we only need to store + // that single value. + if (SingleValue) { + Kind = SingleValueKind; + return; + } + + // If the type is integer and the table fits in a register, build a bitmap. + if (WouldFitInRegister(TD, TableSize, DefaultValue->getType())) { + IntegerType *IT = cast(DefaultValue->getType()); + APInt TableInt(TableSize * IT->getBitWidth(), 0); + for (uint64_t I = TableSize; I > 0; --I) { + TableInt <<= IT->getBitWidth(); + // Insert values into the bitmap. Undef values are set to zero. + if (!isa(TableContents[I - 1])) { + ConstantInt *Val = cast(TableContents[I - 1]); + TableInt |= Val->getValue().zext(TableInt.getBitWidth()); + } + } + BitMap = ConstantInt::get(M.getContext(), TableInt); + BitMapElementTy = IT; + Kind = BitMapKind; + ++NumBitMaps; + return; + } + + // Store the table in an array. + ArrayType *ArrayTy = ArrayType::get(DefaultValue->getType(), TableSize); + Constant *Initializer = ConstantArray::get(ArrayTy, TableContents); + + Array = new GlobalVariable(M, ArrayTy, /*constant=*/ true, + GlobalVariable::PrivateLinkage, + Initializer, + "switch.table"); + Array->setUnnamedAddr(true); + Kind = ArrayKind; +} + +Value *SwitchLookupTable::BuildLookup(Value *Index, IRBuilder<> &Builder) { + switch (Kind) { + case SingleValueKind: + return SingleValue; + case BitMapKind: { + // Type of the bitmap (e.g. i59). + IntegerType *MapTy = BitMap->getType(); + + // Cast Index to the same type as the bitmap. + // Note: The Index is <= the number of elements in the table, so + // truncating it to the width of the bitmask is safe. + Value *ShiftAmt = Builder.CreateZExtOrTrunc(Index, MapTy, "switch.cast"); + + // Multiply the shift amount by the element width. + ShiftAmt = Builder.CreateMul(ShiftAmt, + ConstantInt::get(MapTy, BitMapElementTy->getBitWidth()), + "switch.shiftamt"); + + // Shift down. + Value *DownShifted = Builder.CreateLShr(BitMap, ShiftAmt, + "switch.downshift"); + // Mask off. + return Builder.CreateTrunc(DownShifted, BitMapElementTy, + "switch.masked"); + } + case ArrayKind: { + Value *GEPIndices[] = { Builder.getInt32(0), Index }; + Value *GEP = Builder.CreateInBoundsGEP(Array, GEPIndices, + "switch.gep"); + return Builder.CreateLoad(GEP, "switch.load"); + } + } + llvm_unreachable("Unknown lookup table kind!"); +} + +bool SwitchLookupTable::WouldFitInRegister(const DataLayout *TD, + uint64_t TableSize, + const Type *ElementType) { + if (!TD) + return false; + const IntegerType *IT = dyn_cast(ElementType); + if (!IT) + return false; + // FIXME: If the type is wider than it needs to be, e.g. i8 but all values + // are <= 15, we could try to narrow the type. + + // Avoid overflow, fitsInLegalInteger uses unsigned int for the width. + if (TableSize >= UINT_MAX/IT->getBitWidth()) + return false; + return TD->fitsInLegalInteger(TableSize * IT->getBitWidth()); +} + +/// ShouldBuildLookupTable - Determine whether a lookup table should be built +/// for this switch, based on the number of caes, size of the table and the +/// types of the results. +static bool ShouldBuildLookupTable(SwitchInst *SI, + uint64_t TableSize, + const DataLayout *TD, + const SmallDenseMap& ResultTypes) { + // The table density should be at least 40%. This is the same criterion as for + // jump tables, see SelectionDAGBuilder::handleJTSwitchCase. + // FIXME: Find the best cut-off. + if (SI->getNumCases() > TableSize || TableSize >= UINT64_MAX / 10) + return false; // TableSize overflowed, or mul below might overflow. + if (SI->getNumCases() * 10 >= TableSize * 4) + return true; + + // If each table would fit in a register, we should build it anyway. + for (SmallDenseMap::const_iterator I = ResultTypes.begin(), + E = ResultTypes.end(); I != E; ++I) { + if (!SwitchLookupTable::WouldFitInRegister(TD, TableSize, I->second)) + return false; + } + return true; +} + +/// SwitchToLookupTable - If the switch is only used to initialize one or more +/// phi nodes in a common successor block with different constant values, +/// replace the switch with lookup tables. +static bool SwitchToLookupTable(SwitchInst *SI, + IRBuilder<> &Builder, + const DataLayout* TD, + const TargetTransformInfo *TTI) { + assert(SI->getNumCases() > 1 && "Degenerate switch?"); + + // Only build lookup table when we have a target that supports it. + if (!TTI || !TTI->getScalarTargetTransformInfo() || + !TTI->getScalarTargetTransformInfo()->shouldBuildLookupTables()) return false; + // FIXME: If the switch is too sparse for a lookup table, perhaps we could + // split off a dense part and build a lookup table for that. + + // FIXME: This creates arrays of GEPs to constant strings, which means each + // GEP needs a runtime relocation in PIC code. We should just build one big + // string and lookup indices into that. + + // Ignore the switch if the number of cases is too small. + // This is similar to the check when building jump tables in + // SelectionDAGBuilder::handleJTSwitchCase. + // FIXME: Determine the best cut-off. + if (SI->getNumCases() < 4) + return false; + + // Figure out the corresponding result for each case value and phi node in the + // common destination, as well as the the min and max case values. + assert(SI->case_begin() != SI->case_end()); + SwitchInst::CaseIt CI = SI->case_begin(); + ConstantInt *MinCaseVal = CI.getCaseValue(); + ConstantInt *MaxCaseVal = CI.getCaseValue(); + + BasicBlock *CommonDest = 0; + typedef SmallVector, 4> ResultListTy; + SmallDenseMap ResultLists; + SmallDenseMap DefaultResults; + SmallDenseMap ResultTypes; + SmallVector PHIs; + + for (SwitchInst::CaseIt E = SI->case_end(); CI != E; ++CI) { + ConstantInt *CaseVal = CI.getCaseValue(); + if (CaseVal->getValue().slt(MinCaseVal->getValue())) + MinCaseVal = CaseVal; + if (CaseVal->getValue().sgt(MaxCaseVal->getValue())) + MaxCaseVal = CaseVal; + + // Resulting value at phi nodes for this case value. + typedef SmallVector, 4> ResultsTy; + ResultsTy Results; + if (!GetCaseResults(SI, CaseVal, CI.getCaseSuccessor(), &CommonDest, + Results)) + return false; + + // Append the result from this case to the list for each phi. + for (ResultsTy::iterator I = Results.begin(), E = Results.end(); I!=E; ++I) { + if (!ResultLists.count(I->first)) + PHIs.push_back(I->first); + ResultLists[I->first].push_back(std::make_pair(CaseVal, I->second)); + } + } + + // Get the resulting values for the default case. + SmallVector, 4> DefaultResultsList; + if (!GetCaseResults(SI, 0, SI->getDefaultDest(), &CommonDest, + DefaultResultsList)) + return false; + for (size_t I = 0, E = DefaultResultsList.size(); I != E; ++I) { + PHINode *PHI = DefaultResultsList[I].first; + Constant *Result = DefaultResultsList[I].second; + DefaultResults[PHI] = Result; + ResultTypes[PHI] = Result->getType(); + } + + APInt RangeSpread = MaxCaseVal->getValue() - MinCaseVal->getValue(); + uint64_t TableSize = RangeSpread.getLimitedValue() + 1; + if (!ShouldBuildLookupTable(SI, TableSize, TD, ResultTypes)) + return false; + + // Create the BB that does the lookups. + Module &Mod = *CommonDest->getParent()->getParent(); + BasicBlock *LookupBB = BasicBlock::Create(Mod.getContext(), + "switch.lookup", + CommonDest->getParent(), + CommonDest); + + // Check whether the condition value is within the case range, and branch to + // the new BB. + Builder.SetInsertPoint(SI); + Value *TableIndex = Builder.CreateSub(SI->getCondition(), MinCaseVal, + "switch.tableidx"); + Value *Cmp = Builder.CreateICmpULT(TableIndex, ConstantInt::get( + MinCaseVal->getType(), TableSize)); + Builder.CreateCondBr(Cmp, LookupBB, SI->getDefaultDest()); + + // Populate the BB that does the lookups. + Builder.SetInsertPoint(LookupBB); + bool ReturnedEarly = false; + for (size_t I = 0, E = PHIs.size(); I != E; ++I) { + PHINode *PHI = PHIs[I]; + + SwitchLookupTable Table(Mod, TableSize, MinCaseVal, ResultLists[PHI], + DefaultResults[PHI], TD); + + Value *Result = Table.BuildLookup(TableIndex, Builder); + + // If the result is used to return immediately from the function, we want to + // do that right here. + if (PHI->hasOneUse() && isa(*PHI->use_begin()) && + *PHI->use_begin() == CommonDest->getFirstNonPHIOrDbg()) { + Builder.CreateRet(Result); + ReturnedEarly = true; + break; + } + + PHI->addIncoming(Result, LookupBB); + } + + if (!ReturnedEarly) + Builder.CreateBr(CommonDest); + + // Remove the switch. + for (unsigned i = 0; i < SI->getNumSuccessors(); ++i) { + BasicBlock *Succ = SI->getSuccessor(i); + if (Succ == SI->getDefaultDest()) continue; + Succ->removePredecessor(SI->getParent()); + } + SI->eraseFromParent(); + + ++NumLookupTables; + return true; +} + +bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { BasicBlock *BB = SI->getParent(); - // If we only have one predecessor, and if it is a branch on this value, - // see if that predecessor totally determines the outcome of this switch. - if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) - if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder)) - return SimplifyCFG(BB) | true; + if (isValueEqualityComparison(SI)) { + // If we only have one predecessor, and if it is a branch on this value, + // see if that predecessor totally determines the outcome of this switch. + if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) + if (SimplifyEqualityComparisonWithOnlyPredecessor(SI, OnlyPred, Builder)) + return SimplifyCFG(BB) | true; - Value *Cond = SI->getCondition(); - if (SelectInst *Select = dyn_cast(Cond)) - if (SimplifySwitchOnSelect(SI, Select)) - return SimplifyCFG(BB) | true; + Value *Cond = SI->getCondition(); + if (SelectInst *Select = dyn_cast(Cond)) + if (SimplifySwitchOnSelect(SI, Select)) + return SimplifyCFG(BB) | true; - // If the block only contains the switch, see if we can fold the block - // away into any preds. - BasicBlock::iterator BBI = BB->begin(); - // Ignore dbg intrinsics. - while (isa(BBI)) - ++BBI; - if (SI == &*BBI) - if (FoldValueComparisonIntoPredecessors(SI, Builder)) - return SimplifyCFG(BB) | true; + // If the block only contains the switch, see if we can fold the block + // away into any preds. + BasicBlock::iterator BBI = BB->begin(); + // Ignore dbg intrinsics. + while (isa(BBI)) + ++BBI; + if (SI == &*BBI) + if (FoldValueComparisonIntoPredecessors(SI, Builder)) + return SimplifyCFG(BB) | true; + } // Try to transform the switch into an icmp and a branch. if (TurnSwitchRangeIntoICmp(SI, Builder)) @@ -2862,13 +3701,16 @@ bool SimplifyCFGOpt::SimplifySwitch(SwitchInst *SI, IRBuilder<> &Builder) { if (ForwardSwitchConditionToPHI(SI)) return SimplifyCFG(BB) | true; + if (SwitchToLookupTable(SI, Builder, TD, TTI)) + return SimplifyCFG(BB) | true; + return false; } bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { BasicBlock *BB = IBI->getParent(); bool Changed = false; - + // Eliminate redundant destinations. SmallPtrSet Succs; for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { @@ -2879,7 +3721,7 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { --i; --e; Changed = true; } - } + } if (IBI->getNumDestinations() == 0) { // If the indirectbr has no successors, change it to unreachable. @@ -2887,14 +3729,14 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { EraseTerminatorInstAndDCECond(IBI); return true; } - + if (IBI->getNumDestinations() == 1) { // If the indirectbr has one successor, change it to a direct branch. BranchInst::Create(IBI->getDestination(0), IBI); EraseTerminatorInstAndDCECond(IBI); return true; } - + if (SelectInst *SI = dyn_cast(IBI->getAddress())) { if (SimplifyIndirectBrOnSelect(IBI, SI)) return SimplifyCFG(BB) | true; @@ -2904,13 +3746,16 @@ bool SimplifyCFGOpt::SimplifyIndirectBr(IndirectBrInst *IBI) { bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ BasicBlock *BB = BI->getParent(); - + + if (SinkCommon && SinkThenElseCodeToEnd(BI)) + return true; + // If the Terminator is the only non-phi instruction, simplify the block. BasicBlock::iterator I = BB->getFirstNonPHIOrDbgOrLifetime(); if (I->isTerminator() && BB != &BB->getParent()->getEntryBlock() && TryToSimplifyUncondBranchFromEmptyBlock(BB)) return true; - + // If the only instruction in the block is a seteq/setne comparison // against a constant, try to simplify the block. if (ICmpInst *ICI = dyn_cast(I)) @@ -2921,7 +3766,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ TryToSimplifyUncondBranchWithICmpInIt(ICI, TD, Builder)) return true; } - + // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and our successor, fold the comparison into the // predecessor and use logical operations to update the incoming value @@ -2934,7 +3779,7 @@ bool SimplifyCFGOpt::SimplifyUncondBranch(BranchInst *BI, IRBuilder<> &Builder){ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { BasicBlock *BB = BI->getParent(); - + // Conditional branch if (isValueEqualityComparison(BI)) { // If we only have one predecessor, and if it is a branch on this value, @@ -2943,7 +3788,7 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { if (BasicBlock *OnlyPred = BB->getSinglePredecessor()) if (SimplifyEqualityComparisonWithOnlyPredecessor(BI, OnlyPred, Builder)) return SimplifyCFG(BB) | true; - + // This block must be empty, except for the setcond inst, if it exists. // Ignore dbg intrinsics. BasicBlock::iterator I = BB->begin(); @@ -2962,17 +3807,17 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { return SimplifyCFG(BB) | true; } } - + // Try to turn "br (X == 0 | X == 1), T, F" into a switch instruction. if (SimplifyBranchOnICmpChain(BI, TD, Builder)) return true; - + // If this basic block is ONLY a compare and a branch, and if a predecessor // branches to us and one of our successors, fold the comparison into the // predecessor and use logical operations to pick the right destination. if (FoldBranchToCommonDest(BI)) return SimplifyCFG(BB) | true; - + // We have a conditional branch to two blocks that are only reachable // from BI. We know that the condbr dominates the two blocks, so see if // there is any identical code in the "then" and "else" blocks. If so, we @@ -2999,14 +3844,14 @@ bool SimplifyCFGOpt::SimplifyCondBranch(BranchInst *BI, IRBuilder<> &Builder) { if (SpeculativelyExecuteBB(BI, BI->getSuccessor(1))) return SimplifyCFG(BB) | true; } - + // If this is a branch on a phi node in the current block, thread control // through this block if any PHI node entries are constants. if (PHINode *PN = dyn_cast(BI->getCondition())) if (PN->getParent() == BI->getParent()) if (FoldCondBranchOnPHI(BI, TD)) return SimplifyCFG(BB) | true; - + // Scan predecessor blocks for conditional branches. for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) if (BranchInst *PBI = dyn_cast((*PI)->getTerminator())) @@ -3023,11 +3868,12 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) { if (!C) return false; - if (!I->hasOneUse()) // Only look at single-use instructions, for compile time + if (I->use_empty()) return false; if (C->isNullValue()) { - Instruction *Use = I->use_back(); + // Only look at the first use, avoid hurting compile time with long uselists + User *Use = *I->use_begin(); // Now make sure that there are no instructions in between that can alter // control flow (eg. calls) @@ -3114,7 +3960,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { // if (MergeBlockIntoPredecessor(BB)) return true; - + IRBuilder<> Builder(BB); // If there is a trivial two-entry PHI node in this basic block, and we can @@ -3152,6 +3998,7 @@ bool SimplifyCFGOpt::run(BasicBlock *BB) { /// eliminates unreachable basic blocks, and does other "peephole" optimization /// of the CFG. It returns true if a modification was made. /// -bool llvm::SimplifyCFG(BasicBlock *BB, const TargetData *TD) { - return SimplifyCFGOpt(TD).run(BB); +bool llvm::SimplifyCFG(BasicBlock *BB, const DataLayout *TD, + const TargetTransformInfo *TTI) { + return SimplifyCFGOpt(TD, TTI).run(BB); } diff --git a/lib/Transforms/Utils/SimplifyIndVar.cpp b/lib/Transforms/Utils/SimplifyIndVar.cpp index 5d673f1..110f3808 100644 --- a/lib/Transforms/Utils/SimplifyIndVar.cpp +++ b/lib/Transforms/Utils/SimplifyIndVar.cpp @@ -24,7 +24,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/SimplifyIndVar.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -44,7 +44,7 @@ namespace { Loop *L; LoopInfo *LI; ScalarEvolution *SE; - const TargetData *TD; // May be NULL + const DataLayout *TD; // May be NULL SmallVectorImpl &DeadInsts; @@ -56,7 +56,7 @@ namespace { L(Loop), LI(LPM->getAnalysisIfAvailable()), SE(SE), - TD(LPM->getAnalysisIfAvailable()), + TD(LPM->getAnalysisIfAvailable()), DeadInsts(Dead), Changed(false) { assert(LI && "IV simplification requires LoopInfo"); diff --git a/lib/Transforms/Utils/SimplifyInstructions.cpp b/lib/Transforms/Utils/SimplifyInstructions.cpp index 81eb9e0..65353dc 100644 --- a/lib/Transforms/Utils/SimplifyInstructions.cpp +++ b/lib/Transforms/Utils/SimplifyInstructions.cpp @@ -23,7 +23,7 @@ #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/InstructionSimplify.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/Local.h" @@ -46,7 +46,7 @@ namespace { /// runOnFunction - Remove instructions that simplify. bool runOnFunction(Function &F) { const DominatorTree *DT = getAnalysisIfAvailable(); - const TargetData *TD = getAnalysisIfAvailable(); + const DataLayout *TD = getAnalysisIfAvailable(); const TargetLibraryInfo *TLI = &getAnalysis(); SmallPtrSet S1, S2, *ToSimplify = &S1, *Next = &S2; bool Changed = false; @@ -72,7 +72,7 @@ namespace { ++NumSimplified; Changed = true; } - Changed |= RecursivelyDeleteTriviallyDeadInstructions(I); + Changed |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI); } // Place the list of instructions to simplify on the next loop iteration diff --git a/lib/Transforms/Utils/SimplifyLibCalls.cpp b/lib/Transforms/Utils/SimplifyLibCalls.cpp new file mode 100644 index 0000000..c3ea638 --- /dev/null +++ b/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -0,0 +1,1149 @@ +//===------ SimplifyLibCalls.cpp - Library calls simplifier ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is a utility pass used for testing the InstructionSimplify analysis. +// The analysis is applied to every instruction, and if it simplifies then the +// instruction is replaced by the simplification. If you are looking for a pass +// that performs serious instruction folding, use the instcombine pass instead. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Transforms/Utils/SimplifyLibCalls.h" +#include "llvm/DataLayout.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Function.h" +#include "llvm/IRBuilder.h" +#include "llvm/LLVMContext.h" +#include "llvm/Target/TargetLibraryInfo.h" +#include "llvm/Transforms/Utils/BuildLibCalls.h" + +using namespace llvm; + +/// This class is the abstract base class for the set of optimizations that +/// corresponds to one library call. +namespace { +class LibCallOptimization { +protected: + Function *Caller; + const DataLayout *TD; + const TargetLibraryInfo *TLI; + const LibCallSimplifier *LCS; + LLVMContext* Context; +public: + LibCallOptimization() { } + virtual ~LibCallOptimization() {} + + /// callOptimizer - This pure virtual method is implemented by base classes to + /// do various optimizations. If this returns null then no transformation was + /// performed. If it returns CI, then it transformed the call and CI is to be + /// deleted. If it returns something else, replace CI with the new value and + /// delete CI. + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) + =0; + + Value *optimizeCall(CallInst *CI, const DataLayout *TD, + const TargetLibraryInfo *TLI, + const LibCallSimplifier *LCS, IRBuilder<> &B) { + Caller = CI->getParent()->getParent(); + this->TD = TD; + this->TLI = TLI; + this->LCS = LCS; + if (CI->getCalledFunction()) + Context = &CI->getCalledFunction()->getContext(); + + // We never change the calling convention. + if (CI->getCallingConv() != llvm::CallingConv::C) + return NULL; + + return callOptimizer(CI->getCalledFunction(), CI, B); + } +}; + +//===----------------------------------------------------------------------===// +// Helper Functions +//===----------------------------------------------------------------------===// + +/// isOnlyUsedInZeroEqualityComparison - Return true if it only matters that the +/// value is equal or not-equal to zero. +static bool isOnlyUsedInZeroEqualityComparison(Value *V) { + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); + UI != E; ++UI) { + if (ICmpInst *IC = dyn_cast(*UI)) + if (IC->isEquality()) + if (Constant *C = dyn_cast(IC->getOperand(1))) + if (C->isNullValue()) + continue; + // Unknown instruction. + return false; + } + return true; +} + +/// isOnlyUsedInEqualityComparison - Return true if it is only used in equality +/// comparisons with With. +static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) { + for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); + UI != E; ++UI) { + if (ICmpInst *IC = dyn_cast(*UI)) + if (IC->isEquality() && IC->getOperand(1) == With) + continue; + // Unknown instruction. + return false; + } + return true; +} + +//===----------------------------------------------------------------------===// +// Fortified Library Call Optimizations +//===----------------------------------------------------------------------===// + +struct FortifiedLibCallOptimization : public LibCallOptimization { +protected: + virtual bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, + bool isString) const = 0; +}; + +struct InstFortifiedLibCallOptimization : public FortifiedLibCallOptimization { + CallInst *CI; + + bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const { + if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp)) + return true; + if (ConstantInt *SizeCI = + dyn_cast(CI->getArgOperand(SizeCIOp))) { + if (SizeCI->isAllOnesValue()) + return true; + if (isString) { + uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp)); + // If the length is 0 we don't know how long it is and so we can't + // remove the check. + if (Len == 0) return false; + return SizeCI->getZExtValue() >= Len; + } + if (ConstantInt *Arg = dyn_cast( + CI->getArgOperand(SizeArgOp))) + return SizeCI->getZExtValue() >= Arg->getZExtValue(); + } + return false; + } +}; + +struct MemCpyChkOpt : public InstFortifiedLibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + this->CI = CI; + FunctionType *FT = Callee->getFunctionType(); + LLVMContext &Context = CI->getParent()->getContext(); + + // Check if this has the right signature. + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy() || + FT->getParamType(2) != TD->getIntPtrType(Context) || + FT->getParamType(3) != TD->getIntPtrType(Context)) + return 0; + + if (isFoldable(3, 2, false)) { + B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), + CI->getArgOperand(2), 1); + return CI->getArgOperand(0); + } + return 0; + } +}; + +struct MemMoveChkOpt : public InstFortifiedLibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + this->CI = CI; + FunctionType *FT = Callee->getFunctionType(); + LLVMContext &Context = CI->getParent()->getContext(); + + // Check if this has the right signature. + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy() || + FT->getParamType(2) != TD->getIntPtrType(Context) || + FT->getParamType(3) != TD->getIntPtrType(Context)) + return 0; + + if (isFoldable(3, 2, false)) { + B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1), + CI->getArgOperand(2), 1); + return CI->getArgOperand(0); + } + return 0; + } +}; + +struct MemSetChkOpt : public InstFortifiedLibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + this->CI = CI; + FunctionType *FT = Callee->getFunctionType(); + LLVMContext &Context = CI->getParent()->getContext(); + + // Check if this has the right signature. + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isIntegerTy() || + FT->getParamType(2) != TD->getIntPtrType(Context) || + FT->getParamType(3) != TD->getIntPtrType(Context)) + return 0; + + if (isFoldable(3, 2, false)) { + Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), + false); + B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1); + return CI->getArgOperand(0); + } + return 0; + } +}; + +struct StrCpyChkOpt : public InstFortifiedLibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + this->CI = CI; + StringRef Name = Callee->getName(); + FunctionType *FT = Callee->getFunctionType(); + LLVMContext &Context = CI->getParent()->getContext(); + + // Check if this has the right signature. + if (FT->getNumParams() != 3 || + FT->getReturnType() != FT->getParamType(0) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != Type::getInt8PtrTy(Context) || + FT->getParamType(2) != TD->getIntPtrType(Context)) + return 0; + + Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); + if (Dst == Src) // __strcpy_chk(x,x) -> x + return Src; + + // If a) we don't have any length information, or b) we know this will + // fit then just lower to a plain strcpy. Otherwise we'll keep our + // strcpy_chk call which may fail at runtime if the size is too long. + // TODO: It might be nice to get a maximum length out of the possible + // string lengths for varying. + if (isFoldable(2, 1, true)) { + Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6)); + return Ret; + } else { + // Maybe we can stil fold __strcpy_chk to __memcpy_chk. + uint64_t Len = GetStringLength(Src); + if (Len == 0) return 0; + + // This optimization require DataLayout. + if (!TD) return 0; + + Value *Ret = + EmitMemCpyChk(Dst, Src, + ConstantInt::get(TD->getIntPtrType(Context), Len), + CI->getArgOperand(2), B, TD, TLI); + return Ret; + } + return 0; + } +}; + +struct StpCpyChkOpt : public InstFortifiedLibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + this->CI = CI; + StringRef Name = Callee->getName(); + FunctionType *FT = Callee->getFunctionType(); + LLVMContext &Context = CI->getParent()->getContext(); + + // Check if this has the right signature. + if (FT->getNumParams() != 3 || + FT->getReturnType() != FT->getParamType(0) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != Type::getInt8PtrTy(Context) || + FT->getParamType(2) != TD->getIntPtrType(FT->getParamType(0))) + return 0; + + Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); + if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x) + Value *StrLen = EmitStrLen(Src, B, TD, TLI); + return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0; + } + + // If a) we don't have any length information, or b) we know this will + // fit then just lower to a plain stpcpy. Otherwise we'll keep our + // stpcpy_chk call which may fail at runtime if the size is too long. + // TODO: It might be nice to get a maximum length out of the possible + // string lengths for varying. + if (isFoldable(2, 1, true)) { + Value *Ret = EmitStrCpy(Dst, Src, B, TD, TLI, Name.substr(2, 6)); + return Ret; + } else { + // Maybe we can stil fold __stpcpy_chk to __memcpy_chk. + uint64_t Len = GetStringLength(Src); + if (Len == 0) return 0; + + // This optimization require DataLayout. + if (!TD) return 0; + + Type *PT = FT->getParamType(0); + Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len); + Value *DstEnd = B.CreateGEP(Dst, + ConstantInt::get(TD->getIntPtrType(PT), + Len - 1)); + if (!EmitMemCpyChk(Dst, Src, LenV, CI->getArgOperand(2), B, TD, TLI)) + return 0; + return DstEnd; + } + return 0; + } +}; + +struct StrNCpyChkOpt : public InstFortifiedLibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + this->CI = CI; + StringRef Name = Callee->getName(); + FunctionType *FT = Callee->getFunctionType(); + LLVMContext &Context = CI->getParent()->getContext(); + + // Check if this has the right signature. + if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != Type::getInt8PtrTy(Context) || + !FT->getParamType(2)->isIntegerTy() || + FT->getParamType(3) != TD->getIntPtrType(Context)) + return 0; + + if (isFoldable(3, 2, false)) { + Value *Ret = EmitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1), + CI->getArgOperand(2), B, TD, TLI, + Name.substr(2, 7)); + return Ret; + } + return 0; + } +}; + +//===----------------------------------------------------------------------===// +// String and Memory Library Call Optimizations +//===----------------------------------------------------------------------===// + +struct StrCatOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strcat" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getReturnType() != B.getInt8PtrTy() || + FT->getParamType(0) != FT->getReturnType() || + FT->getParamType(1) != FT->getReturnType()) + return 0; + + // Extract some information from the instruction + Value *Dst = CI->getArgOperand(0); + Value *Src = CI->getArgOperand(1); + + // See if we can get the length of the input string. + uint64_t Len = GetStringLength(Src); + if (Len == 0) return 0; + --Len; // Unbias length. + + // Handle the simple, do-nothing case: strcat(x, "") -> x + if (Len == 0) + return Dst; + + // These optimizations require DataLayout. + if (!TD) return 0; + + return emitStrLenMemCpy(Src, Dst, Len, B); + } + + Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, + IRBuilder<> &B) { + // We need to find the end of the destination string. That's where the + // memory is to be moved to. We just generate a call to strlen. + Value *DstLen = EmitStrLen(Dst, B, TD, TLI); + if (!DstLen) + return 0; + + // Now that we have the destination's length, we must index into the + // destination's pointer to get the actual memcpy destination (end of + // the string .. we're concatenating). + Value *CpyDst = B.CreateGEP(Dst, DstLen, "endptr"); + + // We have enough information to now generate the memcpy call to do the + // concatenation for us. Make a memcpy to copy the nul byte with align = 1. + B.CreateMemCpy(CpyDst, Src, + ConstantInt::get(TD->getIntPtrType(*Context), Len + 1), 1); + return Dst; + } +}; + +struct StrNCatOpt : public StrCatOpt { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strncat" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || + FT->getReturnType() != B.getInt8PtrTy() || + FT->getParamType(0) != FT->getReturnType() || + FT->getParamType(1) != FT->getReturnType() || + !FT->getParamType(2)->isIntegerTy()) + return 0; + + // Extract some information from the instruction + Value *Dst = CI->getArgOperand(0); + Value *Src = CI->getArgOperand(1); + uint64_t Len; + + // We don't do anything if length is not constant + if (ConstantInt *LengthArg = dyn_cast(CI->getArgOperand(2))) + Len = LengthArg->getZExtValue(); + else + return 0; + + // See if we can get the length of the input string. + uint64_t SrcLen = GetStringLength(Src); + if (SrcLen == 0) return 0; + --SrcLen; // Unbias length. + + // Handle the simple, do-nothing cases: + // strncat(x, "", c) -> x + // strncat(x, c, 0) -> x + if (SrcLen == 0 || Len == 0) return Dst; + + // These optimizations require DataLayout. + if (!TD) return 0; + + // We don't optimize this case + if (Len < SrcLen) return 0; + + // strncat(x, s, c) -> strcat(x, s) + // s is constant so the strcat can be optimized further + return emitStrLenMemCpy(Src, Dst, SrcLen, B); + } +}; + +struct StrChrOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strchr" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getReturnType() != B.getInt8PtrTy() || + FT->getParamType(0) != FT->getReturnType() || + !FT->getParamType(1)->isIntegerTy(32)) + return 0; + + Value *SrcStr = CI->getArgOperand(0); + + // If the second operand is non-constant, see if we can compute the length + // of the input string and turn this into memchr. + ConstantInt *CharC = dyn_cast(CI->getArgOperand(1)); + if (CharC == 0) { + // These optimizations require DataLayout. + if (!TD) return 0; + + uint64_t Len = GetStringLength(SrcStr); + if (Len == 0 || !FT->getParamType(1)->isIntegerTy(32))// memchr needs i32. + return 0; + + return EmitMemChr(SrcStr, CI->getArgOperand(1), // include nul. + ConstantInt::get(TD->getIntPtrType(*Context), Len), + B, TD, TLI); + } + + // Otherwise, the character is a constant, see if the first argument is + // a string literal. If so, we can constant fold. + StringRef Str; + if (!getConstantStringInfo(SrcStr, Str)) + return 0; + + // Compute the offset, make sure to handle the case when we're searching for + // zero (a weird way to spell strlen). + size_t I = CharC->getSExtValue() == 0 ? + Str.size() : Str.find(CharC->getSExtValue()); + if (I == StringRef::npos) // Didn't find the char. strchr returns null. + return Constant::getNullValue(CI->getType()); + + // strchr(s+n,c) -> gep(s+n+i,c) + return B.CreateGEP(SrcStr, B.getInt64(I), "strchr"); + } +}; + +struct StrRChrOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strrchr" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getReturnType() != B.getInt8PtrTy() || + FT->getParamType(0) != FT->getReturnType() || + !FT->getParamType(1)->isIntegerTy(32)) + return 0; + + Value *SrcStr = CI->getArgOperand(0); + ConstantInt *CharC = dyn_cast(CI->getArgOperand(1)); + + // Cannot fold anything if we're not looking for a constant. + if (!CharC) + return 0; + + StringRef Str; + if (!getConstantStringInfo(SrcStr, Str)) { + // strrchr(s, 0) -> strchr(s, 0) + if (TD && CharC->isZero()) + return EmitStrChr(SrcStr, '\0', B, TD, TLI); + return 0; + } + + // Compute the offset. + size_t I = CharC->getSExtValue() == 0 ? + Str.size() : Str.rfind(CharC->getSExtValue()); + if (I == StringRef::npos) // Didn't find the char. Return null. + return Constant::getNullValue(CI->getType()); + + // strrchr(s+n,c) -> gep(s+n+i,c) + return B.CreateGEP(SrcStr, B.getInt64(I), "strrchr"); + } +}; + +struct StrCmpOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strcmp" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + !FT->getReturnType()->isIntegerTy(32) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != B.getInt8PtrTy()) + return 0; + + Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1); + if (Str1P == Str2P) // strcmp(x,x) -> 0 + return ConstantInt::get(CI->getType(), 0); + + StringRef Str1, Str2; + bool HasStr1 = getConstantStringInfo(Str1P, Str1); + bool HasStr2 = getConstantStringInfo(Str2P, Str2); + + // strcmp(x, y) -> cnst (if both x and y are constant strings) + if (HasStr1 && HasStr2) + return ConstantInt::get(CI->getType(), Str1.compare(Str2)); + + if (HasStr1 && Str1.empty()) // strcmp("", x) -> -*x + return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), + CI->getType())); + + if (HasStr2 && Str2.empty()) // strcmp(x,"") -> *x + return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType()); + + // strcmp(P, "x") -> memcmp(P, "x", 2) + uint64_t Len1 = GetStringLength(Str1P); + uint64_t Len2 = GetStringLength(Str2P); + if (Len1 && Len2) { + // These optimizations require DataLayout. + if (!TD) return 0; + + return EmitMemCmp(Str1P, Str2P, + ConstantInt::get(TD->getIntPtrType(*Context), + std::min(Len1, Len2)), B, TD, TLI); + } + + return 0; + } +}; + +struct StrNCmpOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strncmp" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || + !FT->getReturnType()->isIntegerTy(32) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != B.getInt8PtrTy() || + !FT->getParamType(2)->isIntegerTy()) + return 0; + + Value *Str1P = CI->getArgOperand(0), *Str2P = CI->getArgOperand(1); + if (Str1P == Str2P) // strncmp(x,x,n) -> 0 + return ConstantInt::get(CI->getType(), 0); + + // Get the length argument if it is constant. + uint64_t Length; + if (ConstantInt *LengthArg = dyn_cast(CI->getArgOperand(2))) + Length = LengthArg->getZExtValue(); + else + return 0; + + if (Length == 0) // strncmp(x,y,0) -> 0 + return ConstantInt::get(CI->getType(), 0); + + if (TD && Length == 1) // strncmp(x,y,1) -> memcmp(x,y,1) + return EmitMemCmp(Str1P, Str2P, CI->getArgOperand(2), B, TD, TLI); + + StringRef Str1, Str2; + bool HasStr1 = getConstantStringInfo(Str1P, Str1); + bool HasStr2 = getConstantStringInfo(Str2P, Str2); + + // strncmp(x, y) -> cnst (if both x and y are constant strings) + if (HasStr1 && HasStr2) { + StringRef SubStr1 = Str1.substr(0, Length); + StringRef SubStr2 = Str2.substr(0, Length); + return ConstantInt::get(CI->getType(), SubStr1.compare(SubStr2)); + } + + if (HasStr1 && Str1.empty()) // strncmp("", x, n) -> -*x + return B.CreateNeg(B.CreateZExt(B.CreateLoad(Str2P, "strcmpload"), + CI->getType())); + + if (HasStr2 && Str2.empty()) // strncmp(x, "", n) -> *x + return B.CreateZExt(B.CreateLoad(Str1P, "strcmpload"), CI->getType()); + + return 0; + } +}; + +struct StrCpyOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "strcpy" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getReturnType() != FT->getParamType(0) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != B.getInt8PtrTy()) + return 0; + + Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); + if (Dst == Src) // strcpy(x,x) -> x + return Src; + + // These optimizations require DataLayout. + if (!TD) return 0; + + // See if we can get the length of the input string. + uint64_t Len = GetStringLength(Src); + if (Len == 0) return 0; + + // We have enough information to now generate the memcpy call to do the + // copy for us. Make a memcpy to copy the nul byte with align = 1. + B.CreateMemCpy(Dst, Src, + ConstantInt::get(TD->getIntPtrType(*Context), Len), 1); + return Dst; + } +}; + +struct StpCpyOpt: public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // Verify the "stpcpy" function prototype. + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getReturnType() != FT->getParamType(0) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != B.getInt8PtrTy()) + return 0; + + // These optimizations require DataLayout. + if (!TD) return 0; + + Value *Dst = CI->getArgOperand(0), *Src = CI->getArgOperand(1); + if (Dst == Src) { // stpcpy(x,x) -> x+strlen(x) + Value *StrLen = EmitStrLen(Src, B, TD, TLI); + return StrLen ? B.CreateInBoundsGEP(Dst, StrLen) : 0; + } + + // See if we can get the length of the input string. + uint64_t Len = GetStringLength(Src); + if (Len == 0) return 0; + + Type *PT = FT->getParamType(0); + Value *LenV = ConstantInt::get(TD->getIntPtrType(PT), Len); + Value *DstEnd = B.CreateGEP(Dst, + ConstantInt::get(TD->getIntPtrType(PT), + Len - 1)); + + // We have enough information to now generate the memcpy call to do the + // copy for us. Make a memcpy to copy the nul byte with align = 1. + B.CreateMemCpy(Dst, Src, LenV, 1); + return DstEnd; + } +}; + +struct StrNCpyOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || + FT->getParamType(0) != FT->getParamType(1) || + FT->getParamType(0) != B.getInt8PtrTy() || + !FT->getParamType(2)->isIntegerTy()) + return 0; + + Value *Dst = CI->getArgOperand(0); + Value *Src = CI->getArgOperand(1); + Value *LenOp = CI->getArgOperand(2); + + // See if we can get the length of the input string. + uint64_t SrcLen = GetStringLength(Src); + if (SrcLen == 0) return 0; + --SrcLen; + + if (SrcLen == 0) { + // strncpy(x, "", y) -> memset(x, '\0', y, 1) + B.CreateMemSet(Dst, B.getInt8('\0'), LenOp, 1); + return Dst; + } + + uint64_t Len; + if (ConstantInt *LengthArg = dyn_cast(LenOp)) + Len = LengthArg->getZExtValue(); + else + return 0; + + if (Len == 0) return Dst; // strncpy(x, y, 0) -> x + + // These optimizations require DataLayout. + if (!TD) return 0; + + // Let strncpy handle the zero padding + if (Len > SrcLen+1) return 0; + + Type *PT = FT->getParamType(0); + // strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant] + B.CreateMemCpy(Dst, Src, + ConstantInt::get(TD->getIntPtrType(PT), Len), 1); + + return Dst; + } +}; + +struct StrLenOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 1 || + FT->getParamType(0) != B.getInt8PtrTy() || + !FT->getReturnType()->isIntegerTy()) + return 0; + + Value *Src = CI->getArgOperand(0); + + // Constant folding: strlen("xyz") -> 3 + if (uint64_t Len = GetStringLength(Src)) + return ConstantInt::get(CI->getType(), Len-1); + + // strlen(x) != 0 --> *x != 0 + // strlen(x) == 0 --> *x == 0 + if (isOnlyUsedInZeroEqualityComparison(CI)) + return B.CreateZExt(B.CreateLoad(Src, "strlenfirst"), CI->getType()); + return 0; + } +}; + +struct StrPBrkOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getParamType(0) != B.getInt8PtrTy() || + FT->getParamType(1) != FT->getParamType(0) || + FT->getReturnType() != FT->getParamType(0)) + return 0; + + StringRef S1, S2; + bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1); + bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2); + + // strpbrk(s, "") -> NULL + // strpbrk("", s) -> NULL + if ((HasS1 && S1.empty()) || (HasS2 && S2.empty())) + return Constant::getNullValue(CI->getType()); + + // Constant folding. + if (HasS1 && HasS2) { + size_t I = S1.find_first_of(S2); + if (I == std::string::npos) // No match. + return Constant::getNullValue(CI->getType()); + + return B.CreateGEP(CI->getArgOperand(0), B.getInt64(I), "strpbrk"); + } + + // strpbrk(s, "a") -> strchr(s, 'a') + if (TD && HasS2 && S2.size() == 1) + return EmitStrChr(CI->getArgOperand(0), S2[0], B, TD, TLI); + + return 0; + } +}; + +struct StrToOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if ((FT->getNumParams() != 2 && FT->getNumParams() != 3) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy()) + return 0; + + Value *EndPtr = CI->getArgOperand(1); + if (isa(EndPtr)) { + // With a null EndPtr, this function won't capture the main argument. + // It would be readonly too, except that it still may write to errno. + CI->addAttribute(1, Attributes::get(Callee->getContext(), + Attributes::NoCapture)); + } + + return 0; + } +}; + +struct StrSpnOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getParamType(0) != B.getInt8PtrTy() || + FT->getParamType(1) != FT->getParamType(0) || + !FT->getReturnType()->isIntegerTy()) + return 0; + + StringRef S1, S2; + bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1); + bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2); + + // strspn(s, "") -> 0 + // strspn("", s) -> 0 + if ((HasS1 && S1.empty()) || (HasS2 && S2.empty())) + return Constant::getNullValue(CI->getType()); + + // Constant folding. + if (HasS1 && HasS2) { + size_t Pos = S1.find_first_not_of(S2); + if (Pos == StringRef::npos) Pos = S1.size(); + return ConstantInt::get(CI->getType(), Pos); + } + + return 0; + } +}; + +struct StrCSpnOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + FT->getParamType(0) != B.getInt8PtrTy() || + FT->getParamType(1) != FT->getParamType(0) || + !FT->getReturnType()->isIntegerTy()) + return 0; + + StringRef S1, S2; + bool HasS1 = getConstantStringInfo(CI->getArgOperand(0), S1); + bool HasS2 = getConstantStringInfo(CI->getArgOperand(1), S2); + + // strcspn("", s) -> 0 + if (HasS1 && S1.empty()) + return Constant::getNullValue(CI->getType()); + + // Constant folding. + if (HasS1 && HasS2) { + size_t Pos = S1.find_first_of(S2); + if (Pos == StringRef::npos) Pos = S1.size(); + return ConstantInt::get(CI->getType(), Pos); + } + + // strcspn(s, "") -> strlen(s) + if (TD && HasS2 && S2.empty()) + return EmitStrLen(CI->getArgOperand(0), B, TD, TLI); + + return 0; + } +}; + +struct StrStrOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 2 || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy() || + !FT->getReturnType()->isPointerTy()) + return 0; + + // fold strstr(x, x) -> x. + if (CI->getArgOperand(0) == CI->getArgOperand(1)) + return B.CreateBitCast(CI->getArgOperand(0), CI->getType()); + + // fold strstr(a, b) == a -> strncmp(a, b, strlen(b)) == 0 + if (TD && isOnlyUsedInEqualityComparison(CI, CI->getArgOperand(0))) { + Value *StrLen = EmitStrLen(CI->getArgOperand(1), B, TD, TLI); + if (!StrLen) + return 0; + Value *StrNCmp = EmitStrNCmp(CI->getArgOperand(0), CI->getArgOperand(1), + StrLen, B, TD, TLI); + if (!StrNCmp) + return 0; + for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end(); + UI != UE; ) { + ICmpInst *Old = cast(*UI++); + Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp, + ConstantInt::getNullValue(StrNCmp->getType()), + "cmp"); + LCS->replaceAllUsesWith(Old, Cmp); + } + return CI; + } + + // See if either input string is a constant string. + StringRef SearchStr, ToFindStr; + bool HasStr1 = getConstantStringInfo(CI->getArgOperand(0), SearchStr); + bool HasStr2 = getConstantStringInfo(CI->getArgOperand(1), ToFindStr); + + // fold strstr(x, "") -> x. + if (HasStr2 && ToFindStr.empty()) + return B.CreateBitCast(CI->getArgOperand(0), CI->getType()); + + // If both strings are known, constant fold it. + if (HasStr1 && HasStr2) { + std::string::size_type Offset = SearchStr.find(ToFindStr); + + if (Offset == StringRef::npos) // strstr("foo", "bar") -> null + return Constant::getNullValue(CI->getType()); + + // strstr("abcd", "bc") -> gep((char*)"abcd", 1) + Value *Result = CastToCStr(CI->getArgOperand(0), B); + Result = B.CreateConstInBoundsGEP1_64(Result, Offset, "strstr"); + return B.CreateBitCast(Result, CI->getType()); + } + + // fold strstr(x, "y") -> strchr(x, 'y'). + if (HasStr2 && ToFindStr.size() == 1) { + Value *StrChr= EmitStrChr(CI->getArgOperand(0), ToFindStr[0], B, TD, TLI); + return StrChr ? B.CreateBitCast(StrChr, CI->getType()) : 0; + } + return 0; + } +}; + +struct MemCmpOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy() || + !FT->getReturnType()->isIntegerTy(32)) + return 0; + + Value *LHS = CI->getArgOperand(0), *RHS = CI->getArgOperand(1); + + if (LHS == RHS) // memcmp(s,s,x) -> 0 + return Constant::getNullValue(CI->getType()); + + // Make sure we have a constant length. + ConstantInt *LenC = dyn_cast(CI->getArgOperand(2)); + if (!LenC) return 0; + uint64_t Len = LenC->getZExtValue(); + + if (Len == 0) // memcmp(s1,s2,0) -> 0 + return Constant::getNullValue(CI->getType()); + + // memcmp(S1,S2,1) -> *(unsigned char*)LHS - *(unsigned char*)RHS + if (Len == 1) { + Value *LHSV = B.CreateZExt(B.CreateLoad(CastToCStr(LHS, B), "lhsc"), + CI->getType(), "lhsv"); + Value *RHSV = B.CreateZExt(B.CreateLoad(CastToCStr(RHS, B), "rhsc"), + CI->getType(), "rhsv"); + return B.CreateSub(LHSV, RHSV, "chardiff"); + } + + // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant) + StringRef LHSStr, RHSStr; + if (getConstantStringInfo(LHS, LHSStr) && + getConstantStringInfo(RHS, RHSStr)) { + // Make sure we're not reading out-of-bounds memory. + if (Len > LHSStr.size() || Len > RHSStr.size()) + return 0; + uint64_t Ret = memcmp(LHSStr.data(), RHSStr.data(), Len); + return ConstantInt::get(CI->getType(), Ret); + } + + return 0; + } +}; + +struct MemCpyOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // These optimizations require DataLayout. + if (!TD) return 0; + + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy() || + FT->getParamType(2) != TD->getIntPtrType(*Context)) + return 0; + + // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1) + B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1), + CI->getArgOperand(2), 1); + return CI->getArgOperand(0); + } +}; + +struct MemMoveOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // These optimizations require DataLayout. + if (!TD) return 0; + + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isPointerTy() || + FT->getParamType(2) != TD->getIntPtrType(*Context)) + return 0; + + // memmove(x, y, n) -> llvm.memmove(x, y, n, 1) + B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1), + CI->getArgOperand(2), 1); + return CI->getArgOperand(0); + } +}; + +struct MemSetOpt : public LibCallOptimization { + virtual Value *callOptimizer(Function *Callee, CallInst *CI, IRBuilder<> &B) { + // These optimizations require DataLayout. + if (!TD) return 0; + + FunctionType *FT = Callee->getFunctionType(); + if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) || + !FT->getParamType(0)->isPointerTy() || + !FT->getParamType(1)->isIntegerTy() || + FT->getParamType(2) != TD->getIntPtrType(*Context)) + return 0; + + // memset(p, v, n) -> llvm.memset(p, v, n, 1) + Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false); + B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1); + return CI->getArgOperand(0); + } +}; + +} // End anonymous namespace. + +namespace llvm { + +class LibCallSimplifierImpl { + const DataLayout *TD; + const TargetLibraryInfo *TLI; + const LibCallSimplifier *LCS; + StringMap Optimizations; + + // Fortified library call optimizations. + MemCpyChkOpt MemCpyChk; + MemMoveChkOpt MemMoveChk; + MemSetChkOpt MemSetChk; + StrCpyChkOpt StrCpyChk; + StpCpyChkOpt StpCpyChk; + StrNCpyChkOpt StrNCpyChk; + + // String library call optimizations. + StrCatOpt StrCat; + StrNCatOpt StrNCat; + StrChrOpt StrChr; + StrRChrOpt StrRChr; + StrCmpOpt StrCmp; + StrNCmpOpt StrNCmp; + StrCpyOpt StrCpy; + StpCpyOpt StpCpy; + StrNCpyOpt StrNCpy; + StrLenOpt StrLen; + StrPBrkOpt StrPBrk; + StrToOpt StrTo; + StrSpnOpt StrSpn; + StrCSpnOpt StrCSpn; + StrStrOpt StrStr; + + // Memory library call optimizations. + MemCmpOpt MemCmp; + MemCpyOpt MemCpy; + MemMoveOpt MemMove; + MemSetOpt MemSet; + + void initOptimizations(); + void addOpt(LibFunc::Func F, LibCallOptimization* Opt); +public: + LibCallSimplifierImpl(const DataLayout *TD, const TargetLibraryInfo *TLI, + const LibCallSimplifier *LCS) { + this->TD = TD; + this->TLI = TLI; + this->LCS = LCS; + } + + Value *optimizeCall(CallInst *CI); +}; + +void LibCallSimplifierImpl::initOptimizations() { + // Fortified library call optimizations. + Optimizations["__memcpy_chk"] = &MemCpyChk; + Optimizations["__memmove_chk"] = &MemMoveChk; + Optimizations["__memset_chk"] = &MemSetChk; + Optimizations["__strcpy_chk"] = &StrCpyChk; + Optimizations["__stpcpy_chk"] = &StpCpyChk; + Optimizations["__strncpy_chk"] = &StrNCpyChk; + Optimizations["__stpncpy_chk"] = &StrNCpyChk; + + // String library call optimizations. + addOpt(LibFunc::strcat, &StrCat); + addOpt(LibFunc::strncat, &StrNCat); + addOpt(LibFunc::strchr, &StrChr); + addOpt(LibFunc::strrchr, &StrRChr); + addOpt(LibFunc::strcmp, &StrCmp); + addOpt(LibFunc::strncmp, &StrNCmp); + addOpt(LibFunc::strcpy, &StrCpy); + addOpt(LibFunc::stpcpy, &StpCpy); + addOpt(LibFunc::strncpy, &StrNCpy); + addOpt(LibFunc::strlen, &StrLen); + addOpt(LibFunc::strpbrk, &StrPBrk); + addOpt(LibFunc::strtol, &StrTo); + addOpt(LibFunc::strtod, &StrTo); + addOpt(LibFunc::strtof, &StrTo); + addOpt(LibFunc::strtoul, &StrTo); + addOpt(LibFunc::strtoll, &StrTo); + addOpt(LibFunc::strtold, &StrTo); + addOpt(LibFunc::strtoull, &StrTo); + addOpt(LibFunc::strspn, &StrSpn); + addOpt(LibFunc::strcspn, &StrCSpn); + addOpt(LibFunc::strstr, &StrStr); + + // Memory library call optimizations. + addOpt(LibFunc::memcmp, &MemCmp); + addOpt(LibFunc::memcpy, &MemCpy); + addOpt(LibFunc::memmove, &MemMove); + addOpt(LibFunc::memset, &MemSet); +} + +Value *LibCallSimplifierImpl::optimizeCall(CallInst *CI) { + if (Optimizations.empty()) + initOptimizations(); + + Function *Callee = CI->getCalledFunction(); + LibCallOptimization *LCO = Optimizations.lookup(Callee->getName()); + if (LCO) { + IRBuilder<> Builder(CI); + return LCO->optimizeCall(CI, TD, TLI, LCS, Builder); + } + return 0; +} + +void LibCallSimplifierImpl::addOpt(LibFunc::Func F, LibCallOptimization* Opt) { + if (TLI->has(F)) + Optimizations[TLI->getName(F)] = Opt; +} + +LibCallSimplifier::LibCallSimplifier(const DataLayout *TD, + const TargetLibraryInfo *TLI) { + Impl = new LibCallSimplifierImpl(TD, TLI, this); +} + +LibCallSimplifier::~LibCallSimplifier() { + delete Impl; +} + +Value *LibCallSimplifier::optimizeCall(CallInst *CI) { + return Impl->optimizeCall(CI); +} + +void LibCallSimplifier::replaceAllUsesWith(Instruction *I, Value *With) const { + I->replaceAllUsesWith(With); + I->eraseFromParent(); +} + +} diff --git a/lib/Transforms/Utils/Utils.cpp b/lib/Transforms/Utils/Utils.cpp index 24e8c8f..5812d46 100644 --- a/lib/Transforms/Utils/Utils.cpp +++ b/lib/Transforms/Utils/Utils.cpp @@ -29,6 +29,7 @@ void llvm::initializeTransformUtils(PassRegistry &Registry) { initializePromotePassPass(Registry); initializeUnifyFunctionExitNodesPass(Registry); initializeInstSimplifierPass(Registry); + initializeMetaRenamerPass(Registry); } /// LLVMInitializeTransformUtils - C binding for initializeTransformUtilsPasses. diff --git a/lib/Transforms/Utils/ValueMapper.cpp b/lib/Transforms/Utils/ValueMapper.cpp index fc2538d..a30b093 100644 --- a/lib/Transforms/Utils/ValueMapper.cpp +++ b/lib/Transforms/Utils/ValueMapper.cpp @@ -21,7 +21,7 @@ using namespace llvm; // Out of line method to get vtable etc for class. -void ValueMapTypeRemapper::Anchor() {} +void ValueMapTypeRemapper::anchor() {} Value *llvm::MapValue(const Value *V, ValueToValueMapTy &VM, RemapFlags Flags, ValueMapTypeRemapper *TypeMapper) { diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index 62d23cb..f7be3e3 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -28,12 +28,14 @@ #include "llvm/Type.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasSetTracker.h" +#include "llvm/Analysis/Dominators.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ValueTracking.h" @@ -41,17 +43,27 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/ValueHandle.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" +#include "llvm/TargetTransformInfo.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Vectorize.h" #include #include using namespace llvm; +static cl::opt +IgnoreTargetInfo("bb-vectorize-ignore-target-info", cl::init(false), + cl::Hidden, cl::desc("Ignore target information")); + static cl::opt ReqChainDepth("bb-vectorize-req-chain-depth", cl::init(6), cl::Hidden, cl::desc("The required chain depth for vectorization")); +static cl::opt +UseChainDepthWithTI("bb-vectorize-use-chain-depth", cl::init(false), + cl::Hidden, cl::desc("Use the chain depth requirement with" + " target information")); + static cl::opt SearchLimit("bb-vectorize-search-limit", cl::init(400), cl::Hidden, cl::desc("The maximum search distance for instruction pairs")); @@ -93,8 +105,9 @@ static cl::opt NoFloats("bb-vectorize-no-floats", cl::init(false), cl::Hidden, cl::desc("Don't try to vectorize floating-point values")); +// FIXME: This should default to false once pointer vector support works. static cl::opt -NoPointers("bb-vectorize-no-pointers", cl::init(false), cl::Hidden, +NoPointers("bb-vectorize-no-pointers", cl::init(/*false*/ true), cl::Hidden, cl::desc("Don't try to vectorize pointer values")); static cl::opt @@ -159,6 +172,12 @@ DebugCycleCheck("bb-vectorize-debug-cycle-check", cl::init(false), cl::Hidden, cl::desc("When debugging is enabled, output information on the" " cycle-checking process")); + +static cl::opt +PrintAfterEveryPair("bb-vectorize-debug-print-after-every-pair", + cl::init(false), cl::Hidden, + cl::desc("When debugging is enabled, dump the basic block after" + " every pair is fused")); #endif STATISTIC(NumFusedOps, "Number of operations fused by bb-vectorize"); @@ -177,13 +196,19 @@ namespace { BBVectorize(Pass *P, const VectorizeConfig &C) : BasicBlockPass(ID), Config(C) { AA = &P->getAnalysis(); + DT = &P->getAnalysis(); SE = &P->getAnalysis(); - TD = P->getAnalysisIfAvailable(); + TD = P->getAnalysisIfAvailable(); + TTI = IgnoreTargetInfo ? 0 : + P->getAnalysisIfAvailable(); + VTTI = TTI ? TTI->getVectorTargetTransformInfo() : 0; } typedef std::pair ValuePair; + typedef std::pair ValuePairWithCost; typedef std::pair ValuePairWithDepth; typedef std::pair VPPair; // A ValuePair pair + typedef std::pair VPPairWithType; typedef std::pair::iterator, std::multimap::iterator> VPIteratorPair; typedef std::pair::iterator, @@ -191,8 +216,11 @@ namespace { VPPIteratorPair; AliasAnalysis *AA; + DominatorTree *DT; ScalarEvolution *SE; - TargetData *TD; + DataLayout *TD; + TargetTransformInfo *TTI; + const VectorTargetTransformInfo *VTTI; // FIXME: const correct? @@ -201,11 +229,23 @@ namespace { bool getCandidatePairs(BasicBlock &BB, BasicBlock::iterator &Start, std::multimap &CandidatePairs, + DenseSet &FixedOrderPairs, + DenseMap &CandidatePairCostSavings, std::vector &PairableInsts, bool NonPow2Len); + // FIXME: The current implementation does not account for pairs that + // are connected in multiple ways. For example: + // C1 = A1 / A2; C2 = A2 / A1 (which may be both direct and a swap) + enum PairConnectionType { + PairConnectionDirect, + PairConnectionSwap, + PairConnectionSplat + }; + void computeConnectedPairs(std::multimap &CandidatePairs, std::vector &PairableInsts, - std::multimap &ConnectedPairs); + std::multimap &ConnectedPairs, + DenseMap &PairConnectionTypes); void buildDepMap(BasicBlock &BB, std::multimap &CandidatePairs, @@ -213,19 +253,29 @@ namespace { DenseSet &PairableInstUsers); void choosePairs(std::multimap &CandidatePairs, + DenseMap &CandidatePairCostSavings, std::vector &PairableInsts, + DenseSet &FixedOrderPairs, + DenseMap &PairConnectionTypes, std::multimap &ConnectedPairs, + std::multimap &ConnectedPairDeps, DenseSet &PairableInstUsers, DenseMap& ChosenPairs); void fuseChosenPairs(BasicBlock &BB, std::vector &PairableInsts, - DenseMap& ChosenPairs); + DenseMap& ChosenPairs, + DenseSet &FixedOrderPairs, + DenseMap &PairConnectionTypes, + std::multimap &ConnectedPairs, + std::multimap &ConnectedPairDeps); + bool isInstVectorizable(Instruction *I, bool &IsSimpleLoadStore); bool areInstsCompatible(Instruction *I, Instruction *J, - bool IsSimpleLoadStore, bool NonPow2Len); + bool IsSimpleLoadStore, bool NonPow2Len, + int &CostSavings, int &FixedOrder); bool trackUsesOfI(DenseSet &Users, AliasSetTracker &WriteSet, Instruction *I, @@ -236,6 +286,7 @@ namespace { std::multimap &CandidatePairs, std::vector &PairableInsts, std::multimap &ConnectedPairs, + DenseMap &PairConnectionTypes, ValuePair P); bool pairsConflict(ValuePair P, ValuePair Q, @@ -267,17 +318,21 @@ namespace { void findBestTreeFor( std::multimap &CandidatePairs, + DenseMap &CandidatePairCostSavings, std::vector &PairableInsts, + DenseSet &FixedOrderPairs, + DenseMap &PairConnectionTypes, std::multimap &ConnectedPairs, + std::multimap &ConnectedPairDeps, DenseSet &PairableInstUsers, std::multimap &PairableInstUserMap, DenseMap &ChosenPairs, DenseSet &BestTree, size_t &BestMaxDepth, - size_t &BestEffSize, VPIteratorPair ChoiceRange, + int &BestEffSize, VPIteratorPair ChoiceRange, bool UseCycleCheck); Value *getReplacementPointerInput(LLVMContext& Context, Instruction *I, - Instruction *J, unsigned o, bool FlipMemInputs); + Instruction *J, unsigned o); void fillNewShuffleMask(LLVMContext& Context, Instruction *J, unsigned MaskOffset, unsigned NumInElem, @@ -289,20 +344,20 @@ namespace { bool expandIEChain(LLVMContext& Context, Instruction *I, Instruction *J, unsigned o, Value *&LOp, unsigned numElemL, - Type *ArgTypeL, Type *ArgTypeR, + Type *ArgTypeL, Type *ArgTypeR, bool IBeforeJ, unsigned IdxOff = 0); Value *getReplacementInput(LLVMContext& Context, Instruction *I, - Instruction *J, unsigned o, bool FlipMemInputs); + Instruction *J, unsigned o, bool IBeforeJ); void getReplacementInputsForPair(LLVMContext& Context, Instruction *I, Instruction *J, SmallVector &ReplacedOperands, - bool FlipMemInputs); + bool IBeforeJ); void replaceOutputsOfPair(LLVMContext& Context, Instruction *I, Instruction *J, Instruction *K, Instruction *&InsertionPt, Instruction *&K1, - Instruction *&K2, bool FlipMemInputs); + Instruction *&K2); void collectPairLoadMoveSet(BasicBlock &BB, DenseMap &ChosenPairs, @@ -314,10 +369,6 @@ namespace { DenseMap &ChosenPairs, std::multimap &LoadMoveSet); - void collectPtrInfo(std::vector &PairableInsts, - DenseMap &ChosenPairs, - DenseSet &LowPtrInsts); - bool canMoveUsesOfIAfterJ(BasicBlock &BB, std::multimap &LoadMoveSet, Instruction *I, Instruction *J); @@ -330,13 +381,22 @@ namespace { void combineMetadata(Instruction *K, const Instruction *J); bool vectorizeBB(BasicBlock &BB) { + if (!DT->isReachableFromEntry(&BB)) { + DEBUG(dbgs() << "BBV: skipping unreachable " << BB.getName() << + " in " << BB.getParent()->getName() << "\n"); + return false; + } + + DEBUG(if (VTTI) dbgs() << "BBV: using target information\n"); + bool changed = false; // Iterate a sufficient number of times to merge types of size 1 bit, // then 2 bits, then 4, etc. up to half of the target vector width of the // target vector register. unsigned n = 1; for (unsigned v = 2; - v <= Config.VectorBits && (!Config.MaxIter || n <= Config.MaxIter); + (VTTI || v <= Config.VectorBits) && + (!Config.MaxIter || n <= Config.MaxIter); v *= 2, ++n) { DEBUG(dbgs() << "BBV: fusing loop #" << n << " for " << BB.getName() << " in " << @@ -363,8 +423,12 @@ namespace { virtual bool runOnBasicBlock(BasicBlock &BB) { AA = &getAnalysis(); + DT = &getAnalysis(); SE = &getAnalysis(); - TD = getAnalysisIfAvailable(); + TD = getAnalysisIfAvailable(); + TTI = IgnoreTargetInfo ? 0 : + getAnalysisIfAvailable(); + VTTI = TTI ? TTI->getVectorTargetTransformInfo() : 0; return vectorizeBB(BB); } @@ -372,8 +436,10 @@ namespace { virtual void getAnalysisUsage(AnalysisUsage &AU) const { BasicBlockPass::getAnalysisUsage(AU); AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); AU.setPreservesCFG(); } @@ -415,6 +481,14 @@ namespace { T2 = cast(I)->getSrcTy(); else T2 = T1; + + if (SelectInst *SI = dyn_cast(I)) { + T2 = SI->getCondition()->getType(); + } else if (ShuffleVectorInst *SI = dyn_cast(I)) { + T2 = SI->getOperand(0)->getType(); + } else if (CmpInst *CI = dyn_cast(I)) { + T2 = CI->getOperand(0)->getType(); + } } // Returns the weight associated with the provided value. A chain of @@ -446,6 +520,62 @@ namespace { return 1; } + // Returns the cost of the provided instruction using VTTI. + // This does not handle loads and stores. + unsigned getInstrCost(unsigned Opcode, Type *T1, Type *T2) { + switch (Opcode) { + default: break; + case Instruction::GetElementPtr: + // We mark this instruction as zero-cost because scalar GEPs are usually + // lowered to the intruction addressing mode. At the moment we don't + // generate vector GEPs. + return 0; + case Instruction::Br: + return VTTI->getCFInstrCost(Opcode); + case Instruction::PHI: + return 0; + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: + return VTTI->getArithmeticInstrCost(Opcode, T1); + case Instruction::Select: + case Instruction::ICmp: + case Instruction::FCmp: + return VTTI->getCmpSelInstrCost(Opcode, T1, T2); + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: + case Instruction::ShuffleVector: + return VTTI->getCastInstrCost(Opcode, T1, T2); + } + + return 1; + } + // This determines the relative offset of two loads or stores, returning // true if the offset could be determined to be some constant value. // For example, if OffsetInElmts == 1, then J accesses the memory directly @@ -453,20 +583,30 @@ namespace { // directly after J. bool getPairPtrInfo(Instruction *I, Instruction *J, Value *&IPtr, Value *&JPtr, unsigned &IAlignment, unsigned &JAlignment, - int64_t &OffsetInElmts) { + unsigned &IAddressSpace, unsigned &JAddressSpace, + int64_t &OffsetInElmts, bool ComputeOffset = true) { OffsetInElmts = 0; - if (isa(I)) { - IPtr = cast(I)->getPointerOperand(); - JPtr = cast(J)->getPointerOperand(); - IAlignment = cast(I)->getAlignment(); - JAlignment = cast(J)->getAlignment(); + if (LoadInst *LI = dyn_cast(I)) { + LoadInst *LJ = cast(J); + IPtr = LI->getPointerOperand(); + JPtr = LJ->getPointerOperand(); + IAlignment = LI->getAlignment(); + JAlignment = LJ->getAlignment(); + IAddressSpace = LI->getPointerAddressSpace(); + JAddressSpace = LJ->getPointerAddressSpace(); } else { - IPtr = cast(I)->getPointerOperand(); - JPtr = cast(J)->getPointerOperand(); - IAlignment = cast(I)->getAlignment(); - JAlignment = cast(J)->getAlignment(); + StoreInst *SI = cast(I), *SJ = cast(J); + IPtr = SI->getPointerOperand(); + JPtr = SJ->getPointerOperand(); + IAlignment = SI->getAlignment(); + JAlignment = SJ->getAlignment(); + IAddressSpace = SI->getPointerAddressSpace(); + JAddressSpace = SJ->getPointerAddressSpace(); } + if (!ComputeOffset) + return true; + const SCEV *IPtrSCEV = SE->getSCEV(IPtr); const SCEV *JPtrSCEV = SE->getSCEV(JPtr); @@ -536,6 +676,19 @@ namespace { return false; } + + bool isPureIEChain(InsertElementInst *IE) { + InsertElementInst *IENext = IE; + do { + if (!isa(IENext->getOperand(0)) && + !isa(IENext->getOperand(0))) { + return false; + } + } while ((IENext = + dyn_cast(IENext->getOperand(0)))); + + return true; + } }; // This function implements one vectorization iteration on the provided @@ -546,11 +699,18 @@ namespace { std::vector AllPairableInsts; DenseMap AllChosenPairs; + DenseSet AllFixedOrderPairs; + DenseMap AllPairConnectionTypes; + std::multimap AllConnectedPairs, AllConnectedPairDeps; do { std::vector PairableInsts; std::multimap CandidatePairs; + DenseSet FixedOrderPairs; + DenseMap CandidatePairCostSavings; ShouldContinue = getCandidatePairs(BB, Start, CandidatePairs, + FixedOrderPairs, + CandidatePairCostSavings, PairableInsts, NonPow2Len); if (PairableInsts.empty()) continue; @@ -563,10 +723,18 @@ namespace { // Note that it only matters that both members of the second pair use some // element of the first pair (to allow for splatting). - std::multimap ConnectedPairs; - computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs); + std::multimap ConnectedPairs, ConnectedPairDeps; + DenseMap PairConnectionTypes; + computeConnectedPairs(CandidatePairs, PairableInsts, ConnectedPairs, + PairConnectionTypes); if (ConnectedPairs.empty()) continue; + for (std::multimap::iterator + I = ConnectedPairs.begin(), IE = ConnectedPairs.end(); + I != IE; ++I) { + ConnectedPairDeps.insert(VPPair(I->second, I->first)); + } + // Build the pairable-instruction dependency map DenseSet PairableInstUsers; buildDepMap(BB, CandidatePairs, PairableInsts, PairableInstUsers); @@ -578,13 +746,48 @@ namespace { // variables. DenseMap ChosenPairs; - choosePairs(CandidatePairs, PairableInsts, ConnectedPairs, + choosePairs(CandidatePairs, CandidatePairCostSavings, + PairableInsts, FixedOrderPairs, PairConnectionTypes, + ConnectedPairs, ConnectedPairDeps, PairableInstUsers, ChosenPairs); if (ChosenPairs.empty()) continue; AllPairableInsts.insert(AllPairableInsts.end(), PairableInsts.begin(), PairableInsts.end()); AllChosenPairs.insert(ChosenPairs.begin(), ChosenPairs.end()); + + // Only for the chosen pairs, propagate information on fixed-order pairs, + // pair connections, and their types to the data structures used by the + // pair fusion procedures. + for (DenseMap::iterator I = ChosenPairs.begin(), + IE = ChosenPairs.end(); I != IE; ++I) { + if (FixedOrderPairs.count(*I)) + AllFixedOrderPairs.insert(*I); + else if (FixedOrderPairs.count(ValuePair(I->second, I->first))) + AllFixedOrderPairs.insert(ValuePair(I->second, I->first)); + + for (DenseMap::iterator J = ChosenPairs.begin(); + J != IE; ++J) { + DenseMap::iterator K = + PairConnectionTypes.find(VPPair(*I, *J)); + if (K != PairConnectionTypes.end()) { + AllPairConnectionTypes.insert(*K); + } else { + K = PairConnectionTypes.find(VPPair(*J, *I)); + if (K != PairConnectionTypes.end()) + AllPairConnectionTypes.insert(*K); + } + } + } + + for (std::multimap::iterator + I = ConnectedPairs.begin(), IE = ConnectedPairs.end(); + I != IE; ++I) { + if (AllPairConnectionTypes.count(*I)) { + AllConnectedPairs.insert(*I); + AllConnectedPairDeps.insert(VPPair(I->second, I->first)); + } + } } while (ShouldContinue); if (AllChosenPairs.empty()) return false; @@ -597,11 +800,13 @@ namespace { // replaced with a vector_extract on the result. Subsequent optimization // passes should coalesce the build/extract combinations. - fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs); + fuseChosenPairs(BB, AllPairableInsts, AllChosenPairs, AllFixedOrderPairs, + AllPairConnectionTypes, + AllConnectedPairs, AllConnectedPairDeps); // It is important to cleanup here so that future iterations of this // function have less work to do. - (void) SimplifyInstructionsInBlock(&BB, TD); + (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo()); return true; } @@ -667,15 +872,22 @@ namespace { !(VectorType::isValidElementType(T2) || T2->isVectorTy())) return false; - if (T1->getScalarSizeInBits() == 1 && T2->getScalarSizeInBits() == 1) { + if (T1->getScalarSizeInBits() == 1) { if (!Config.VectorizeBools) return false; } else { - if (!Config.VectorizeInts - && (T1->isIntOrIntVectorTy() || T2->isIntOrIntVectorTy())) + if (!Config.VectorizeInts && T1->isIntOrIntVectorTy()) return false; } - + + if (T2->getScalarSizeInBits() == 1) { + if (!Config.VectorizeBools) + return false; + } else { + if (!Config.VectorizeInts && T2->isIntOrIntVectorTy()) + return false; + } + if (!Config.VectorizeFloats && (T1->isFPOrFPVectorTy() || T2->isFPOrFPVectorTy())) return false; @@ -691,8 +903,8 @@ namespace { T2->getScalarType()->isPointerTy())) return false; - if (T1->getPrimitiveSizeInBits() >= Config.VectorBits || - T2->getPrimitiveSizeInBits() >= Config.VectorBits) + if (!VTTI && (T1->getPrimitiveSizeInBits() >= Config.VectorBits || + T2->getPrimitiveSizeInBits() >= Config.VectorBits)) return false; return true; @@ -703,10 +915,14 @@ namespace { // that I has already been determined to be vectorizable and that J is not // in the use tree of I. bool BBVectorize::areInstsCompatible(Instruction *I, Instruction *J, - bool IsSimpleLoadStore, bool NonPow2Len) { + bool IsSimpleLoadStore, bool NonPow2Len, + int &CostSavings, int &FixedOrder) { DEBUG(if (DebugInstructionExamination) dbgs() << "BBV: looking at " << *I << " <-> " << *J << "\n"); + CostSavings = 0; + FixedOrder = 0; + // Loads and stores can be merged if they have different alignments, // but are otherwise the same. if (!J->isSameOperationAs(I, Instruction::CompareIgnoringAlignment | @@ -719,38 +935,84 @@ namespace { unsigned MaxTypeBits = std::max( IT1->getPrimitiveSizeInBits() + JT1->getPrimitiveSizeInBits(), IT2->getPrimitiveSizeInBits() + JT2->getPrimitiveSizeInBits()); - if (MaxTypeBits > Config.VectorBits) + if (!VTTI && MaxTypeBits > Config.VectorBits) return false; // FIXME: handle addsub-type operations! if (IsSimpleLoadStore) { Value *IPtr, *JPtr; - unsigned IAlignment, JAlignment; + unsigned IAlignment, JAlignment, IAddressSpace, JAddressSpace; int64_t OffsetInElmts = 0; if (getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment, + IAddressSpace, JAddressSpace, OffsetInElmts) && abs64(OffsetInElmts) == 1) { - if (Config.AlignedOnly) { - Type *aTypeI = isa(I) ? - cast(I)->getValueOperand()->getType() : I->getType(); - Type *aTypeJ = isa(J) ? - cast(J)->getValueOperand()->getType() : J->getType(); + FixedOrder = (int) OffsetInElmts; + unsigned BottomAlignment = IAlignment; + if (OffsetInElmts < 0) BottomAlignment = JAlignment; + + Type *aTypeI = isa(I) ? + cast(I)->getValueOperand()->getType() : I->getType(); + Type *aTypeJ = isa(J) ? + cast(J)->getValueOperand()->getType() : J->getType(); + Type *VType = getVecTypeForPair(aTypeI, aTypeJ); + if (Config.AlignedOnly) { // An aligned load or store is possible only if the instruction // with the lower offset has an alignment suitable for the // vector type. - unsigned BottomAlignment = IAlignment; - if (OffsetInElmts < 0) BottomAlignment = JAlignment; - - Type *VType = getVecTypeForPair(aTypeI, aTypeJ); unsigned VecAlignment = TD->getPrefTypeAlignment(VType); if (BottomAlignment < VecAlignment) return false; } + + if (VTTI) { + unsigned ICost = VTTI->getMemoryOpCost(I->getOpcode(), I->getType(), + IAlignment, IAddressSpace); + unsigned JCost = VTTI->getMemoryOpCost(J->getOpcode(), J->getType(), + JAlignment, JAddressSpace); + unsigned VCost = VTTI->getMemoryOpCost(I->getOpcode(), VType, + BottomAlignment, + IAddressSpace); + if (VCost > ICost + JCost) + return false; + + // We don't want to fuse to a type that will be split, even + // if the two input types will also be split and there is no other + // associated cost. + unsigned VParts = VTTI->getNumberOfParts(VType); + if (VParts > 1) + return false; + else if (!VParts && VCost == ICost + JCost) + return false; + + CostSavings = ICost + JCost - VCost; + } } else { return false; } + } else if (VTTI) { + unsigned ICost = getInstrCost(I->getOpcode(), IT1, IT2); + unsigned JCost = getInstrCost(J->getOpcode(), JT1, JT2); + Type *VT1 = getVecTypeForPair(IT1, JT1), + *VT2 = getVecTypeForPair(IT2, JT2); + unsigned VCost = getInstrCost(I->getOpcode(), VT1, VT2); + + if (VCost > ICost + JCost) + return false; + + // We don't want to fuse to a type that will be split, even + // if the two input types will also be split and there is no other + // associated cost. + unsigned VParts1 = VTTI->getNumberOfParts(VT1), + VParts2 = VTTI->getNumberOfParts(VT2); + if (VParts1 > 1 || VParts2 > 1) + return false; + else if ((!VParts1 || !VParts2) && VCost == ICost + JCost) + return false; + + CostSavings = ICost + JCost - VCost; } // The powi intrinsic is special because only the first argument is @@ -833,6 +1095,8 @@ namespace { bool BBVectorize::getCandidatePairs(BasicBlock &BB, BasicBlock::iterator &Start, std::multimap &CandidatePairs, + DenseSet &FixedOrderPairs, + DenseMap &CandidatePairCostSavings, std::vector &PairableInsts, bool NonPow2Len) { BasicBlock::iterator E = BB.end(); if (Start == E) return false; @@ -869,7 +1133,9 @@ namespace { // J does not use I, and comes before the first use of I, so it can be // merged with I if the instructions are compatible. - if (!areInstsCompatible(I, J, IsSimpleLoadStore, NonPow2Len)) continue; + int CostSavings, FixedOrder; + if (!areInstsCompatible(I, J, IsSimpleLoadStore, NonPow2Len, + CostSavings, FixedOrder)) continue; // J is a candidate for merging with I. if (!PairableInsts.size() || @@ -878,6 +1144,14 @@ namespace { } CandidatePairs.insert(ValuePair(I, J)); + if (VTTI) + CandidatePairCostSavings.insert(ValuePairWithCost(ValuePair(I, J), + CostSavings)); + + if (FixedOrder == 1) + FixedOrderPairs.insert(ValuePair(I, J)); + else if (FixedOrder == -1) + FixedOrderPairs.insert(ValuePair(J, I)); // The next call to this function must start after the last instruction // selected during this invocation. @@ -887,7 +1161,8 @@ namespace { } DEBUG(if (DebugCandidateSelection) dbgs() << "BBV: candidate pair " - << *I << " <-> " << *J << "\n"); + << *I << " <-> " << *J << " (cost savings: " << + CostSavings << ")\n"); // If we have already found too many pairs, break here and this function // will be called again starting after the last instruction selected @@ -915,6 +1190,7 @@ namespace { std::multimap &CandidatePairs, std::vector &PairableInsts, std::multimap &ConnectedPairs, + DenseMap &PairConnectionTypes, ValuePair P) { StoreInst *SI, *SJ; @@ -946,12 +1222,18 @@ namespace { VPIteratorPair JPairRange = CandidatePairs.equal_range(*J); // Look for : - if (isSecondInIteratorPair(*J, IPairRange)) - ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J))); + if (isSecondInIteratorPair(*J, IPairRange)) { + VPPair VP(P, ValuePair(*I, *J)); + ConnectedPairs.insert(VP); + PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionDirect)); + } // Look for : - if (isSecondInIteratorPair(*I, JPairRange)) - ConnectedPairs.insert(VPPair(P, ValuePair(*J, *I))); + if (isSecondInIteratorPair(*I, JPairRange)) { + VPPair VP(P, ValuePair(*J, *I)); + ConnectedPairs.insert(VP); + PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSwap)); + } } if (Config.SplatBreaksChain) continue; @@ -962,8 +1244,11 @@ namespace { P.first == SJ->getPointerOperand()) continue; - if (isSecondInIteratorPair(*J, IPairRange)) - ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J))); + if (isSecondInIteratorPair(*J, IPairRange)) { + VPPair VP(P, ValuePair(*I, *J)); + ConnectedPairs.insert(VP); + PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat)); + } } } @@ -985,8 +1270,11 @@ namespace { P.second == SJ->getPointerOperand()) continue; - if (isSecondInIteratorPair(*J, IPairRange)) - ConnectedPairs.insert(VPPair(P, ValuePair(*I, *J))); + if (isSecondInIteratorPair(*J, IPairRange)) { + VPPair VP(P, ValuePair(*I, *J)); + ConnectedPairs.insert(VP); + PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat)); + } } } } @@ -997,7 +1285,8 @@ namespace { void BBVectorize::computeConnectedPairs( std::multimap &CandidatePairs, std::vector &PairableInsts, - std::multimap &ConnectedPairs) { + std::multimap &ConnectedPairs, + DenseMap &PairConnectionTypes) { for (std::vector::iterator PI = PairableInsts.begin(), PE = PairableInsts.end(); PI != PE; ++PI) { @@ -1006,7 +1295,7 @@ namespace { for (std::multimap::iterator P = choiceRange.first; P != choiceRange.second; ++P) computePairsConnectedTo(CandidatePairs, PairableInsts, - ConnectedPairs, *P); + ConnectedPairs, PairConnectionTypes, *P); } DEBUG(dbgs() << "BBV: found " << ConnectedPairs.size() @@ -1196,7 +1485,7 @@ namespace { PrunedTree.insert(QTop.first); // Visit each child, pruning as necessary... - DenseMap BestChildren; + SmallVector BestChildren; VPPIteratorPair QTopRange = ConnectedPairs.equal_range(QTop.first); for (std::multimap::iterator K = QTopRange.first; K != QTopRange.second; ++K) { @@ -1228,7 +1517,7 @@ namespace { DenseSet CurrentPairs; bool CanAdd = true; - for (DenseMap::iterator C2 + for (SmallVector::iterator C2 = BestChildren.begin(), E2 = BestChildren.end(); C2 != E2; ++C2) { if (C2->first.first == C->first.first || @@ -1313,22 +1602,22 @@ namespace { // to an already-selected child. Check for this here, and if a // conflict is found, then remove the previously-selected child // before adding this one in its place. - for (DenseMap::iterator C2 + for (SmallVector::iterator C2 = BestChildren.begin(); C2 != BestChildren.end();) { if (C2->first.first == C->first.first || C2->first.first == C->first.second || C2->first.second == C->first.first || C2->first.second == C->first.second || pairsConflict(C2->first, C->first, PairableInstUsers)) - BestChildren.erase(C2++); + C2 = BestChildren.erase(C2); else ++C2; } - BestChildren.insert(ValuePairWithDepth(C->first, C->second)); + BestChildren.push_back(ValuePairWithDepth(C->first, C->second)); } - for (DenseMap::iterator C + for (SmallVector::iterator C = BestChildren.begin(), E2 = BestChildren.end(); C != E2; ++C) { size_t DepthF = getDepthFactor(C->first.first); @@ -1341,13 +1630,17 @@ namespace { // pairs, given the choice of root pairs as an iterator range. void BBVectorize::findBestTreeFor( std::multimap &CandidatePairs, + DenseMap &CandidatePairCostSavings, std::vector &PairableInsts, + DenseSet &FixedOrderPairs, + DenseMap &PairConnectionTypes, std::multimap &ConnectedPairs, + std::multimap &ConnectedPairDeps, DenseSet &PairableInstUsers, std::multimap &PairableInstUserMap, DenseMap &ChosenPairs, DenseSet &BestTree, size_t &BestMaxDepth, - size_t &BestEffSize, VPIteratorPair ChoiceRange, + int &BestEffSize, VPIteratorPair ChoiceRange, bool UseCycleCheck) { for (std::multimap::iterator J = ChoiceRange.first; J != ChoiceRange.second; ++J) { @@ -1397,17 +1690,289 @@ namespace { PairableInstUsers, PairableInstUserMap, ChosenPairs, Tree, PrunedTree, *J, UseCycleCheck); - size_t EffSize = 0; - for (DenseSet::iterator S = PrunedTree.begin(), - E = PrunedTree.end(); S != E; ++S) - EffSize += getDepthFactor(S->first); + int EffSize = 0; + if (VTTI) { + DenseSet PrunedTreeInstrs; + for (DenseSet::iterator S = PrunedTree.begin(), + E = PrunedTree.end(); S != E; ++S) { + PrunedTreeInstrs.insert(S->first); + PrunedTreeInstrs.insert(S->second); + } + + // The set of pairs that have already contributed to the total cost. + DenseSet IncomingPairs; + + // If the cost model were perfect, this might not be necessary; but we + // need to make sure that we don't get stuck vectorizing our own + // shuffle chains. + bool HasNontrivialInsts = false; + + // The node weights represent the cost savings associated with + // fusing the pair of instructions. + for (DenseSet::iterator S = PrunedTree.begin(), + E = PrunedTree.end(); S != E; ++S) { + if (!isa(S->first) && + !isa(S->first) && + !isa(S->first)) + HasNontrivialInsts = true; + + bool FlipOrder = false; + + if (getDepthFactor(S->first)) { + int ESContrib = CandidatePairCostSavings.find(*S)->second; + DEBUG(if (DebugPairSelection) dbgs() << "\tweight {" + << *S->first << " <-> " << *S->second << "} = " << + ESContrib << "\n"); + EffSize += ESContrib; + } + + // The edge weights contribute in a negative sense: they represent + // the cost of shuffles. + VPPIteratorPair IP = ConnectedPairDeps.equal_range(*S); + if (IP.first != ConnectedPairDeps.end()) { + unsigned NumDepsDirect = 0, NumDepsSwap = 0; + for (std::multimap::iterator Q = IP.first; + Q != IP.second; ++Q) { + if (!PrunedTree.count(Q->second)) + continue; + DenseMap::iterator R = + PairConnectionTypes.find(VPPair(Q->second, Q->first)); + assert(R != PairConnectionTypes.end() && + "Cannot find pair connection type"); + if (R->second == PairConnectionDirect) + ++NumDepsDirect; + else if (R->second == PairConnectionSwap) + ++NumDepsSwap; + } + + // If there are more swaps than direct connections, then + // the pair order will be flipped during fusion. So the real + // number of swaps is the minimum number. + FlipOrder = !FixedOrderPairs.count(*S) && + ((NumDepsSwap > NumDepsDirect) || + FixedOrderPairs.count(ValuePair(S->second, S->first))); + + for (std::multimap::iterator Q = IP.first; + Q != IP.second; ++Q) { + if (!PrunedTree.count(Q->second)) + continue; + DenseMap::iterator R = + PairConnectionTypes.find(VPPair(Q->second, Q->first)); + assert(R != PairConnectionTypes.end() && + "Cannot find pair connection type"); + Type *Ty1 = Q->second.first->getType(), + *Ty2 = Q->second.second->getType(); + Type *VTy = getVecTypeForPair(Ty1, Ty2); + if ((R->second == PairConnectionDirect && FlipOrder) || + (R->second == PairConnectionSwap && !FlipOrder) || + R->second == PairConnectionSplat) { + int ESContrib = (int) getInstrCost(Instruction::ShuffleVector, + VTy, VTy); + DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" << + *Q->second.first << " <-> " << *Q->second.second << + "} -> {" << + *S->first << " <-> " << *S->second << "} = " << + ESContrib << "\n"); + EffSize -= ESContrib; + } + } + } + + // Compute the cost of outgoing edges. We assume that edges outgoing + // to shuffles, inserts or extracts can be merged, and so contribute + // no additional cost. + if (!S->first->getType()->isVoidTy()) { + Type *Ty1 = S->first->getType(), + *Ty2 = S->second->getType(); + Type *VTy = getVecTypeForPair(Ty1, Ty2); + + bool NeedsExtraction = false; + for (Value::use_iterator I = S->first->use_begin(), + IE = S->first->use_end(); I != IE; ++I) { + if (ShuffleVectorInst *SI = dyn_cast(*I)) { + // Shuffle can be folded if it has no other input + if (isa(SI->getOperand(1))) + continue; + } + if (isa(*I)) + continue; + if (PrunedTreeInstrs.count(*I)) + continue; + NeedsExtraction = true; + break; + } + + if (NeedsExtraction) { + int ESContrib; + if (Ty1->isVectorTy()) + ESContrib = (int) getInstrCost(Instruction::ShuffleVector, + Ty1, VTy); + else + ESContrib = (int) VTTI->getVectorInstrCost( + Instruction::ExtractElement, VTy, 0); + + DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" << + *S->first << "} = " << ESContrib << "\n"); + EffSize -= ESContrib; + } + + NeedsExtraction = false; + for (Value::use_iterator I = S->second->use_begin(), + IE = S->second->use_end(); I != IE; ++I) { + if (ShuffleVectorInst *SI = dyn_cast(*I)) { + // Shuffle can be folded if it has no other input + if (isa(SI->getOperand(1))) + continue; + } + if (isa(*I)) + continue; + if (PrunedTreeInstrs.count(*I)) + continue; + NeedsExtraction = true; + break; + } + + if (NeedsExtraction) { + int ESContrib; + if (Ty2->isVectorTy()) + ESContrib = (int) getInstrCost(Instruction::ShuffleVector, + Ty2, VTy); + else + ESContrib = (int) VTTI->getVectorInstrCost( + Instruction::ExtractElement, VTy, 1); + DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" << + *S->second << "} = " << ESContrib << "\n"); + EffSize -= ESContrib; + } + } + + // Compute the cost of incoming edges. + if (!isa(S->first) && !isa(S->first)) { + Instruction *S1 = cast(S->first), + *S2 = cast(S->second); + for (unsigned o = 0; o < S1->getNumOperands(); ++o) { + Value *O1 = S1->getOperand(o), *O2 = S2->getOperand(o); + + // Combining constants into vector constants (or small vector + // constants into larger ones are assumed free). + if (isa(O1) && isa(O2)) + continue; + + if (FlipOrder) + std::swap(O1, O2); + + ValuePair VP = ValuePair(O1, O2); + ValuePair VPR = ValuePair(O2, O1); + + // Internal edges are not handled here. + if (PrunedTree.count(VP) || PrunedTree.count(VPR)) + continue; + + Type *Ty1 = O1->getType(), + *Ty2 = O2->getType(); + Type *VTy = getVecTypeForPair(Ty1, Ty2); + + // Combining vector operations of the same type is also assumed + // folded with other operations. + if (Ty1 == Ty2) { + // If both are insert elements, then both can be widened. + InsertElementInst *IEO1 = dyn_cast(O1), + *IEO2 = dyn_cast(O2); + if (IEO1 && IEO2 && isPureIEChain(IEO1) && isPureIEChain(IEO2)) + continue; + // If both are extract elements, and both have the same input + // type, then they can be replaced with a shuffle + ExtractElementInst *EIO1 = dyn_cast(O1), + *EIO2 = dyn_cast(O2); + if (EIO1 && EIO2 && + EIO1->getOperand(0)->getType() == + EIO2->getOperand(0)->getType()) + continue; + // If both are a shuffle with equal operand types and only two + // unqiue operands, then they can be replaced with a single + // shuffle + ShuffleVectorInst *SIO1 = dyn_cast(O1), + *SIO2 = dyn_cast(O2); + if (SIO1 && SIO2 && + SIO1->getOperand(0)->getType() == + SIO2->getOperand(0)->getType()) { + SmallSet SIOps; + SIOps.insert(SIO1->getOperand(0)); + SIOps.insert(SIO1->getOperand(1)); + SIOps.insert(SIO2->getOperand(0)); + SIOps.insert(SIO2->getOperand(1)); + if (SIOps.size() <= 2) + continue; + } + } + + int ESContrib; + // This pair has already been formed. + if (IncomingPairs.count(VP)) { + continue; + } else if (IncomingPairs.count(VPR)) { + ESContrib = (int) getInstrCost(Instruction::ShuffleVector, + VTy, VTy); + } else if (!Ty1->isVectorTy() && !Ty2->isVectorTy()) { + ESContrib = (int) VTTI->getVectorInstrCost( + Instruction::InsertElement, VTy, 0); + ESContrib += (int) VTTI->getVectorInstrCost( + Instruction::InsertElement, VTy, 1); + } else if (!Ty1->isVectorTy()) { + // O1 needs to be inserted into a vector of size O2, and then + // both need to be shuffled together. + ESContrib = (int) VTTI->getVectorInstrCost( + Instruction::InsertElement, Ty2, 0); + ESContrib += (int) getInstrCost(Instruction::ShuffleVector, + VTy, Ty2); + } else if (!Ty2->isVectorTy()) { + // O2 needs to be inserted into a vector of size O1, and then + // both need to be shuffled together. + ESContrib = (int) VTTI->getVectorInstrCost( + Instruction::InsertElement, Ty1, 0); + ESContrib += (int) getInstrCost(Instruction::ShuffleVector, + VTy, Ty1); + } else { + Type *TyBig = Ty1, *TySmall = Ty2; + if (Ty2->getVectorNumElements() > Ty1->getVectorNumElements()) + std::swap(TyBig, TySmall); + + ESContrib = (int) getInstrCost(Instruction::ShuffleVector, + VTy, TyBig); + if (TyBig != TySmall) + ESContrib += (int) getInstrCost(Instruction::ShuffleVector, + TyBig, TySmall); + } + + DEBUG(if (DebugPairSelection) dbgs() << "\tcost {" + << *O1 << " <-> " << *O2 << "} = " << + ESContrib << "\n"); + EffSize -= ESContrib; + IncomingPairs.insert(VP); + } + } + } + + if (!HasNontrivialInsts) { + DEBUG(if (DebugPairSelection) dbgs() << + "\tNo non-trivial instructions in tree;" + " override to zero effective size\n"); + EffSize = 0; + } + } else { + for (DenseSet::iterator S = PrunedTree.begin(), + E = PrunedTree.end(); S != E; ++S) + EffSize += (int) getDepthFactor(S->first); + } DEBUG(if (DebugPairSelection) dbgs() << "BBV: found pruned Tree for pair {" << *J->first << " <-> " << *J->second << "} of depth " << MaxDepth << " and size " << PrunedTree.size() << " (effective size: " << EffSize << ")\n"); - if (MaxDepth >= Config.ReqChainDepth && EffSize > BestEffSize) { + if (((VTTI && !UseChainDepthWithTI) || + MaxDepth >= Config.ReqChainDepth) && + EffSize > 0 && EffSize > BestEffSize) { BestMaxDepth = MaxDepth; BestEffSize = EffSize; BestTree = PrunedTree; @@ -1419,8 +1984,12 @@ namespace { // that will be fused into vector instructions. void BBVectorize::choosePairs( std::multimap &CandidatePairs, + DenseMap &CandidatePairCostSavings, std::vector &PairableInsts, + DenseSet &FixedOrderPairs, + DenseMap &PairConnectionTypes, std::multimap &ConnectedPairs, + std::multimap &ConnectedPairDeps, DenseSet &PairableInstUsers, DenseMap& ChosenPairs) { bool UseCycleCheck = @@ -1435,9 +2004,12 @@ namespace { VPIteratorPair ChoiceRange = CandidatePairs.equal_range(*I); // The best pair to choose and its tree: - size_t BestMaxDepth = 0, BestEffSize = 0; + size_t BestMaxDepth = 0; + int BestEffSize = 0; DenseSet BestTree; - findBestTreeFor(CandidatePairs, PairableInsts, ConnectedPairs, + findBestTreeFor(CandidatePairs, CandidatePairCostSavings, + PairableInsts, FixedOrderPairs, PairConnectionTypes, + ConnectedPairs, ConnectedPairDeps, PairableInstUsers, PairableInstUserMap, ChosenPairs, BestTree, BestMaxDepth, BestEffSize, ChoiceRange, UseCycleCheck); @@ -1490,24 +2062,19 @@ namespace { // Returns the value that is to be used as the pointer input to the vector // instruction that fuses I with J. Value *BBVectorize::getReplacementPointerInput(LLVMContext& Context, - Instruction *I, Instruction *J, unsigned o, - bool FlipMemInputs) { + Instruction *I, Instruction *J, unsigned o) { Value *IPtr, *JPtr; - unsigned IAlignment, JAlignment; + unsigned IAlignment, JAlignment, IAddressSpace, JAddressSpace; int64_t OffsetInElmts; - // Note: the analysis might fail here, that is why FlipMemInputs has + // Note: the analysis might fail here, that is why the pair order has // been precomputed (OffsetInElmts must be unused here). (void) getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment, - OffsetInElmts); + IAddressSpace, JAddressSpace, + OffsetInElmts, false); // The pointer value is taken to be the one with the lowest offset. - Value *VPtr; - if (!FlipMemInputs) { - VPtr = IPtr; - } else { - VPtr = JPtr; - } + Value *VPtr = IPtr; Type *ArgTypeI = cast(IPtr->getType())->getElementType(); Type *ArgTypeJ = cast(JPtr->getType())->getElementType(); @@ -1515,7 +2082,7 @@ namespace { Type *VArgPtrType = PointerType::get(VArgType, cast(IPtr->getType())->getAddressSpace()); return new BitCastInst(VPtr, VArgPtrType, getReplacementName(I, true, o), - /* insert before */ FlipMemInputs ? J : I); + /* insert before */ I); } void BBVectorize::fillNewShuffleMask(LLVMContext& Context, Instruction *J, @@ -1585,23 +2152,12 @@ namespace { Instruction *J, unsigned o, Value *&LOp, unsigned numElemL, Type *ArgTypeL, Type *ArgTypeH, - unsigned IdxOff) { + bool IBeforeJ, unsigned IdxOff) { bool ExpandedIEChain = false; if (InsertElementInst *LIE = dyn_cast(LOp)) { // If we have a pure insertelement chain, then this can be rewritten // into a chain that directly builds the larger type. - bool PureChain = true; - InsertElementInst *LIENext = LIE; - do { - if (!isa(LIENext->getOperand(0)) && - !isa(LIENext->getOperand(0))) { - PureChain = false; - break; - } - } while ((LIENext = - dyn_cast(LIENext->getOperand(0)))); - - if (PureChain) { + if (isPureIEChain(LIE)) { SmallVector VectElemts(numElemL, UndefValue::get(ArgTypeL->getScalarType())); InsertElementInst *LIENext = LIE; @@ -1619,8 +2175,9 @@ namespace { LIENext = InsertElementInst::Create(LIEPrev, VectElemts[i], ConstantInt::get(Type::getInt32Ty(Context), i + IdxOff), - getReplacementName(I, true, o, i+1)); - LIENext->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, + true, o, i+1)); + LIENext->insertBefore(IBeforeJ ? J : I); LIEPrev = LIENext; } @@ -1635,7 +2192,7 @@ namespace { // Returns the value to be used as the specified operand of the vector // instruction that fuses I with J. Value *BBVectorize::getReplacementInput(LLVMContext& Context, Instruction *I, - Instruction *J, unsigned o, bool FlipMemInputs) { + Instruction *J, unsigned o, bool IBeforeJ) { Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0); Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), 1); @@ -1646,12 +2203,6 @@ namespace { Instruction *L = I, *H = J; Type *ArgTypeL = ArgTypeI, *ArgTypeH = ArgTypeJ; - if (FlipMemInputs) { - L = J; - H = I; - ArgTypeL = ArgTypeJ; - ArgTypeH = ArgTypeI; - } unsigned numElemL; if (ArgTypeL->isVectorTy()) @@ -1804,8 +2355,9 @@ namespace { Instruction *S = new ShuffleVectorInst(I1, UndefValue::get(I1T), ConstantVector::get(Mask), - getReplacementName(I, true, o)); - S->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, + true, o)); + S->insertBefore(IBeforeJ ? J : I); return S; } @@ -1826,8 +2378,9 @@ namespace { Instruction *NewI1 = new ShuffleVectorInst(I1, UndefValue::get(I1T), ConstantVector::get(Mask), - getReplacementName(I, true, o, 1)); - NewI1->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); + NewI1->insertBefore(IBeforeJ ? J : I); I1 = NewI1; I1T = I2T; I1Elem = I2Elem; @@ -1842,8 +2395,9 @@ namespace { Instruction *NewI2 = new ShuffleVectorInst(I2, UndefValue::get(I2T), ConstantVector::get(Mask), - getReplacementName(I, true, o, 1)); - NewI2->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); + NewI2->insertBefore(IBeforeJ ? J : I); I2 = NewI2; I2T = I1T; I2Elem = I1Elem; @@ -1863,8 +2417,8 @@ namespace { Instruction *NewOp = new ShuffleVectorInst(I1, I2, ConstantVector::get(Mask), - getReplacementName(I, true, o)); - NewOp->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, true, o)); + NewOp->insertBefore(IBeforeJ ? J : I); return NewOp; } } @@ -1872,17 +2426,17 @@ namespace { Type *ArgType = ArgTypeL; if (numElemL < numElemH) { if (numElemL == 1 && expandIEChain(Context, I, J, o, HOp, numElemH, - ArgTypeL, VArgType, 1)) { + ArgTypeL, VArgType, IBeforeJ, 1)) { // This is another short-circuit case: we're combining a scalar into // a vector that is formed by an IE chain. We've just expanded the IE // chain, now insert the scalar and we're done. Instruction *S = InsertElementInst::Create(HOp, LOp, CV0, - getReplacementName(I, true, o)); - S->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, true, o)); + S->insertBefore(IBeforeJ ? J : I); return S; } else if (!expandIEChain(Context, I, J, o, LOp, numElemL, ArgTypeL, - ArgTypeH)) { + ArgTypeH, IBeforeJ)) { // The two vector inputs to the shuffle must be the same length, // so extend the smaller vector to be the same length as the larger one. Instruction *NLOp; @@ -1897,29 +2451,32 @@ namespace { NLOp = new ShuffleVectorInst(LOp, UndefValue::get(ArgTypeL), ConstantVector::get(Mask), - getReplacementName(I, true, o, 1)); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); } else { NLOp = InsertElementInst::Create(UndefValue::get(ArgTypeH), LOp, CV0, - getReplacementName(I, true, o, 1)); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); } - NLOp->insertBefore(J); + NLOp->insertBefore(IBeforeJ ? J : I); LOp = NLOp; } ArgType = ArgTypeH; } else if (numElemL > numElemH) { if (numElemH == 1 && expandIEChain(Context, I, J, o, LOp, numElemL, - ArgTypeH, VArgType)) { + ArgTypeH, VArgType, IBeforeJ)) { Instruction *S = InsertElementInst::Create(LOp, HOp, ConstantInt::get(Type::getInt32Ty(Context), numElemL), - getReplacementName(I, true, o)); - S->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, + true, o)); + S->insertBefore(IBeforeJ ? J : I); return S; } else if (!expandIEChain(Context, I, J, o, HOp, numElemH, ArgTypeH, - ArgTypeL)) { + ArgTypeL, IBeforeJ)) { Instruction *NHOp; if (numElemH > 1) { std::vector Mask(numElemL); @@ -1931,13 +2488,15 @@ namespace { NHOp = new ShuffleVectorInst(HOp, UndefValue::get(ArgTypeH), ConstantVector::get(Mask), - getReplacementName(I, true, o, 1)); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); } else { NHOp = InsertElementInst::Create(UndefValue::get(ArgTypeL), HOp, CV0, - getReplacementName(I, true, o, 1)); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); } - NHOp->insertBefore(J); + NHOp->insertBefore(IBeforeJ ? J : I); HOp = NHOp; } } @@ -1955,19 +2514,21 @@ namespace { } Instruction *BV = new ShuffleVectorInst(LOp, HOp, - ConstantVector::get(Mask), - getReplacementName(I, true, o)); - BV->insertBefore(J); + ConstantVector::get(Mask), + getReplacementName(IBeforeJ ? I : J, true, o)); + BV->insertBefore(IBeforeJ ? J : I); return BV; } Instruction *BV1 = InsertElementInst::Create( UndefValue::get(VArgType), LOp, CV0, - getReplacementName(I, true, o, 1)); - BV1->insertBefore(I); + getReplacementName(IBeforeJ ? I : J, + true, o, 1)); + BV1->insertBefore(IBeforeJ ? J : I); Instruction *BV2 = InsertElementInst::Create(BV1, HOp, CV1, - getReplacementName(I, true, o, 2)); - BV2->insertBefore(J); + getReplacementName(IBeforeJ ? I : J, + true, o, 2)); + BV2->insertBefore(IBeforeJ ? J : I); return BV2; } @@ -1976,7 +2537,7 @@ namespace { void BBVectorize::getReplacementInputsForPair(LLVMContext& Context, Instruction *I, Instruction *J, SmallVector &ReplacedOperands, - bool FlipMemInputs) { + bool IBeforeJ) { unsigned NumOperands = I->getNumOperands(); for (unsigned p = 0, o = NumOperands-1; p < NumOperands; ++p, --o) { @@ -1985,8 +2546,7 @@ namespace { if (isa(I) || (o == 1 && isa(I))) { // This is the pointer for a load/store instruction. - ReplacedOperands[o] = getReplacementPointerInput(Context, I, J, o, - FlipMemInputs); + ReplacedOperands[o] = getReplacementPointerInput(Context, I, J, o); continue; } else if (isa(I)) { Function *F = cast(I)->getCalledFunction(); @@ -2014,8 +2574,7 @@ namespace { continue; } - ReplacedOperands[o] = - getReplacementInput(Context, I, J, o, FlipMemInputs); + ReplacedOperands[o] = getReplacementInput(Context, I, J, o, IBeforeJ); } } @@ -2026,8 +2585,7 @@ namespace { void BBVectorize::replaceOutputsOfPair(LLVMContext& Context, Instruction *I, Instruction *J, Instruction *K, Instruction *&InsertionPt, - Instruction *&K1, Instruction *&K2, - bool FlipMemInputs) { + Instruction *&K1, Instruction *&K2) { if (isa(I)) { AA->replaceWithNewValue(I, K); AA->replaceWithNewValue(J, K); @@ -2057,13 +2615,11 @@ namespace { } K1 = new ShuffleVectorInst(K, UndefValue::get(VType), - ConstantVector::get( - FlipMemInputs ? Mask2 : Mask1), + ConstantVector::get( Mask1), getReplacementName(K, false, 1)); } else { Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0); - Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1); - K1 = ExtractElementInst::Create(K, FlipMemInputs ? CV1 : CV0, + K1 = ExtractElementInst::Create(K, CV0, getReplacementName(K, false, 1)); } @@ -2075,13 +2631,11 @@ namespace { } K2 = new ShuffleVectorInst(K, UndefValue::get(VType), - ConstantVector::get( - FlipMemInputs ? Mask1 : Mask2), + ConstantVector::get( Mask2), getReplacementName(K, false, 2)); } else { - Value *CV0 = ConstantInt::get(Type::getInt32Ty(Context), 0); Value *CV1 = ConstantInt::get(Type::getInt32Ty(Context), numElem-1); - K2 = ExtractElementInst::Create(K, FlipMemInputs ? CV0 : CV1, + K2 = ExtractElementInst::Create(K, CV1, getReplacementName(K, false, 2)); } @@ -2181,36 +2735,6 @@ namespace { } } - // As with the aliasing information, SCEV can also change because of - // vectorization. This information is used to compute relative pointer - // offsets; the necessary information will be cached here prior to - // fusion. - void BBVectorize::collectPtrInfo(std::vector &PairableInsts, - DenseMap &ChosenPairs, - DenseSet &LowPtrInsts) { - for (std::vector::iterator PI = PairableInsts.begin(), - PIE = PairableInsts.end(); PI != PIE; ++PI) { - DenseMap::iterator P = ChosenPairs.find(*PI); - if (P == ChosenPairs.end()) continue; - - Instruction *I = cast(P->first); - Instruction *J = cast(P->second); - - if (!isa(I) && !isa(I)) - continue; - - Value *IPtr, *JPtr; - unsigned IAlignment, JAlignment; - int64_t OffsetInElmts; - if (!getPairPtrInfo(I, J, IPtr, JPtr, IAlignment, JAlignment, - OffsetInElmts) || abs64(OffsetInElmts) != 1) - llvm_unreachable("Pre-fusion pointer analysis failed"); - - Value *LowPI = (OffsetInElmts > 0) ? I : J; - LowPtrInsts.insert(LowPI); - } - } - // When the first instruction in each pair is cloned, it will inherit its // parent's metadata. This metadata must be combined with that of the other // instruction in a safe way. @@ -2244,27 +2768,27 @@ namespace { // second member). void BBVectorize::fuseChosenPairs(BasicBlock &BB, std::vector &PairableInsts, - DenseMap &ChosenPairs) { + DenseMap &ChosenPairs, + DenseSet &FixedOrderPairs, + DenseMap &PairConnectionTypes, + std::multimap &ConnectedPairs, + std::multimap &ConnectedPairDeps) { LLVMContext& Context = BB.getContext(); // During the vectorization process, the order of the pairs to be fused // could be flipped. So we'll add each pair, flipped, into the ChosenPairs // list. After a pair is fused, the flipped pair is removed from the list. - std::vector FlippedPairs; - FlippedPairs.reserve(ChosenPairs.size()); + DenseSet FlippedPairs; for (DenseMap::iterator P = ChosenPairs.begin(), E = ChosenPairs.end(); P != E; ++P) - FlippedPairs.push_back(ValuePair(P->second, P->first)); - for (std::vector::iterator P = FlippedPairs.begin(), + FlippedPairs.insert(ValuePair(P->second, P->first)); + for (DenseSet::iterator P = FlippedPairs.begin(), E = FlippedPairs.end(); P != E; ++P) ChosenPairs.insert(*P); std::multimap LoadMoveSet; collectLoadMoveSet(BB, PairableInsts, ChosenPairs, LoadMoveSet); - DenseSet LowPtrInsts; - collectPtrInfo(PairableInsts, ChosenPairs, LowPtrInsts); - DEBUG(dbgs() << "BBV: initial: \n" << BB << "\n"); for (BasicBlock::iterator PI = BB.getFirstInsertionPt(); PI != BB.end();) { @@ -2304,44 +2828,92 @@ namespace { continue; } - bool FlipMemInputs = false; - if (isa(I) || isa(I)) - FlipMemInputs = (LowPtrInsts.find(I) == LowPtrInsts.end()); + // If the pair must have the other order, then flip it. + bool FlipPairOrder = FixedOrderPairs.count(ValuePair(J, I)); + if (!FlipPairOrder && !FixedOrderPairs.count(ValuePair(I, J))) { + // This pair does not have a fixed order, and so we might want to + // flip it if that will yield fewer shuffles. We count the number + // of dependencies connected via swaps, and those directly connected, + // and flip the order if the number of swaps is greater. + bool OrigOrder = true; + VPPIteratorPair IP = ConnectedPairDeps.equal_range(ValuePair(I, J)); + if (IP.first == ConnectedPairDeps.end()) { + IP = ConnectedPairDeps.equal_range(ValuePair(J, I)); + OrigOrder = false; + } + if (IP.first != ConnectedPairDeps.end()) { + unsigned NumDepsDirect = 0, NumDepsSwap = 0; + for (std::multimap::iterator Q = IP.first; + Q != IP.second; ++Q) { + DenseMap::iterator R = + PairConnectionTypes.find(VPPair(Q->second, Q->first)); + assert(R != PairConnectionTypes.end() && + "Cannot find pair connection type"); + if (R->second == PairConnectionDirect) + ++NumDepsDirect; + else if (R->second == PairConnectionSwap) + ++NumDepsSwap; + } + + if (!OrigOrder) + std::swap(NumDepsDirect, NumDepsSwap); + + if (NumDepsSwap > NumDepsDirect) { + FlipPairOrder = true; + DEBUG(dbgs() << "BBV: reordering pair: " << *I << + " <-> " << *J << "\n"); + } + } + } + + Instruction *L = I, *H = J; + if (FlipPairOrder) + std::swap(H, L); + + // If the pair being fused uses the opposite order from that in the pair + // connection map, then we need to flip the types. + VPPIteratorPair IP = ConnectedPairs.equal_range(ValuePair(H, L)); + for (std::multimap::iterator Q = IP.first; + Q != IP.second; ++Q) { + DenseMap::iterator R = PairConnectionTypes.find(*Q); + assert(R != PairConnectionTypes.end() && + "Cannot find pair connection type"); + if (R->second == PairConnectionDirect) + R->second = PairConnectionSwap; + else if (R->second == PairConnectionSwap) + R->second = PairConnectionDirect; + } + + bool LBeforeH = !FlipPairOrder; unsigned NumOperands = I->getNumOperands(); SmallVector ReplacedOperands(NumOperands); - getReplacementInputsForPair(Context, I, J, ReplacedOperands, - FlipMemInputs); + getReplacementInputsForPair(Context, L, H, ReplacedOperands, + LBeforeH); // Make a copy of the original operation, change its type to the vector // type and replace its operands with the vector operands. - Instruction *K = I->clone(); - if (I->hasName()) K->takeName(I); + Instruction *K = L->clone(); + if (L->hasName()) + K->takeName(L); + else if (H->hasName()) + K->takeName(H); if (!isa(K)) - K->mutateType(getVecTypeForPair(I->getType(), J->getType())); + K->mutateType(getVecTypeForPair(L->getType(), H->getType())); - combineMetadata(K, J); + combineMetadata(K, H); + K->intersectOptionalDataWith(H); for (unsigned o = 0; o < NumOperands; ++o) K->setOperand(o, ReplacedOperands[o]); - // If we've flipped the memory inputs, make sure that we take the correct - // alignment. - if (FlipMemInputs) { - if (isa(K)) - cast(K)->setAlignment(cast(J)->getAlignment()); - else - cast(K)->setAlignment(cast(J)->getAlignment()); - } - K->insertAfter(J); // Instruction insertion point: Instruction *InsertionPt = K; Instruction *K1 = 0, *K2 = 0; - replaceOutputsOfPair(Context, I, J, K, InsertionPt, K1, K2, - FlipMemInputs); + replaceOutputsOfPair(Context, L, H, K, InsertionPt, K1, K2); // The use tree of the first original instruction must be moved to after // the location of the second instruction. The entire use tree of the @@ -2351,10 +2923,10 @@ namespace { moveUsesOfIAfterJ(BB, LoadMoveSet, InsertionPt, I, J); if (!isa(I)) { - I->replaceAllUsesWith(K1); - J->replaceAllUsesWith(K2); - AA->replaceWithNewValue(I, K1); - AA->replaceWithNewValue(J, K2); + L->replaceAllUsesWith(K1); + H->replaceAllUsesWith(K2); + AA->replaceWithNewValue(L, K1); + AA->replaceWithNewValue(H, K2); } // Instructions that may read from memory may be in the load move set. @@ -2387,6 +2959,9 @@ namespace { SE->forgetValue(J); I->eraseFromParent(); J->eraseFromParent(); + + DEBUG(if (PrintAfterEveryPair) dbgs() << "BBV: block is now: \n" << + BB << "\n"); } DEBUG(dbgs() << "BBV: final: \n" << BB << "\n"); @@ -2397,6 +2972,7 @@ char BBVectorize::ID = 0; static const char bb_vectorize_name[] = "Basic-Block Vectorization"; INITIALIZE_PASS_BEGIN(BBVectorize, BBV_NAME, bb_vectorize_name, false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_DEPENDENCY(DominatorTree) INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false) diff --git a/lib/Transforms/Vectorize/CMakeLists.txt b/lib/Transforms/Vectorize/CMakeLists.txt index 06cf1e4..e64034a 100644 --- a/lib/Transforms/Vectorize/CMakeLists.txt +++ b/lib/Transforms/Vectorize/CMakeLists.txt @@ -1,6 +1,7 @@ add_llvm_library(LLVMVectorize BBVectorize.cpp Vectorize.cpp + LoopVectorize.cpp ) add_dependencies(LLVMVectorize intrinsics_gen) diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp new file mode 100644 index 0000000..a7ef248 --- /dev/null +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -0,0 +1,1941 @@ +//===- LoopVectorize.cpp - A Loop Vectorizer ------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the LLVM loop vectorizer. This pass modifies 'vectorizable' loops +// and generates target-independent LLVM-IR. Legalization of the IR is done +// in the codegen. However, the vectorizes uses (will use) the codegen +// interfaces to generate IR that is likely to result in an optimal binary. +// +// The loop vectorizer combines consecutive loop iteration into a single +// 'wide' iteration. After this transformation the index is incremented +// by the SIMD vector width, and not by one. +// +// This pass has three parts: +// 1. The main loop pass that drives the different parts. +// 2. LoopVectorizationLegality - A unit that checks for the legality +// of the vectorization. +// 3. SingleBlockLoopVectorizer - A unit that performs the actual +// widening of instructions. +// 4. LoopVectorizationCostModel - A unit that checks for the profitability +// of vectorization. It decides on the optimal vector width, which +// can be one, if vectorization is not profitable. +//===----------------------------------------------------------------------===// +// +// The reduction-variable vectorization is based on the paper: +// D. Nuzman and R. Henderson. Multi-platform Auto-vectorization. +// +// Variable uniformity checks are inspired by: +// Karrenberg, R. and Hack, S. Whole Function Vectorization. +// +// Other ideas/concepts are from: +// A. Zaks and D. Nuzman. Autovectorization in GCC-two years later. +// +//===----------------------------------------------------------------------===// +#define LV_NAME "loop-vectorize" +#define DEBUG_TYPE LV_NAME +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Instructions.h" +#include "llvm/LLVMContext.h" +#include "llvm/Pass.h" +#include "llvm/Analysis/LoopPass.h" +#include "llvm/Value.h" +#include "llvm/Function.h" +#include "llvm/Analysis/Verifier.h" +#include "llvm/Module.h" +#include "llvm/Type.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/AliasSetTracker.h" +#include "llvm/Analysis/ScalarEvolution.h" +#include "llvm/Analysis/Dominators.h" +#include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/ScalarEvolutionExpander.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Utils/BasicBlockUtils.h" +#include "llvm/TargetTransformInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/DataLayout.h" +#include "llvm/Transforms/Utils/Local.h" +#include +using namespace llvm; + +static cl::opt +VectorizationFactor("force-vector-width", cl::init(0), cl::Hidden, + cl::desc("Set the default vectorization width. Zero is autoselect.")); + +/// We don't vectorize loops with a known constant trip count below this number. +const unsigned TinyTripCountThreshold = 16; + +/// When performing a runtime memory check, do not check more than this +/// number of pointers. Notice that the check is quadratic! +const unsigned RuntimeMemoryCheckThreshold = 2; + +namespace { + +// Forward declarations. +class LoopVectorizationLegality; +class LoopVectorizationCostModel; + +/// SingleBlockLoopVectorizer vectorizes loops which contain only one basic +/// block to a specified vectorization factor (VF). +/// This class performs the widening of scalars into vectors, or multiple +/// scalars. This class also implements the following features: +/// * It inserts an epilogue loop for handling loops that don't have iteration +/// counts that are known to be a multiple of the vectorization factor. +/// * It handles the code generation for reduction variables. +/// * Scalarization (implementation using scalars) of un-vectorizable +/// instructions. +/// SingleBlockLoopVectorizer does not perform any vectorization-legality +/// checks, and relies on the caller to check for the different legality +/// aspects. The SingleBlockLoopVectorizer relies on the +/// LoopVectorizationLegality class to provide information about the induction +/// and reduction variables that were found to a given vectorization factor. +class SingleBlockLoopVectorizer { +public: + /// Ctor. + SingleBlockLoopVectorizer(Loop *Orig, ScalarEvolution *Se, LoopInfo *Li, + DominatorTree *dt, LPPassManager *Lpm, + unsigned VecWidth): + OrigLoop(Orig), SE(Se), LI(Li), DT(dt), LPM(Lpm), VF(VecWidth), + Builder(Se->getContext()), Induction(0), OldInduction(0) { } + + // Perform the actual loop widening (vectorization). + void vectorize(LoopVectorizationLegality *Legal) { + ///Create a new empty loop. Unlink the old loop and connect the new one. + createEmptyLoop(Legal); + /// Widen each instruction in the old loop to a new one in the new loop. + /// Use the Legality module to find the induction and reduction variables. + vectorizeLoop(Legal); + // Register the new loop and update the analysis passes. + updateAnalysis(); + } + +private: + /// Create an empty loop, based on the loop ranges of the old loop. + void createEmptyLoop(LoopVectorizationLegality *Legal); + /// Copy and widen the instructions from the old loop. + void vectorizeLoop(LoopVectorizationLegality *Legal); + /// Insert the new loop to the loop hierarchy and pass manager + /// and update the analysis passes. + void updateAnalysis(); + + /// This instruction is un-vectorizable. Implement it as a sequence + /// of scalars. + void scalarizeInstruction(Instruction *Instr); + + /// Create a broadcast instruction. This method generates a broadcast + /// instruction (shuffle) for loop invariant values and for the induction + /// value. If this is the induction variable then we extend it to N, N+1, ... + /// this is needed because each iteration in the loop corresponds to a SIMD + /// element. + Value *getBroadcastInstrs(Value *V); + + /// This is a helper function used by getBroadcastInstrs. It adds 0, 1, 2 .. + /// for each element in the vector. Starting from zero. + Value *getConsecutiveVector(Value* Val); + + /// When we go over instructions in the basic block we rely on previous + /// values within the current basic block or on loop invariant values. + /// When we widen (vectorize) values we place them in the map. If the values + /// are not within the map, they have to be loop invariant, so we simply + /// broadcast them into a vector. + Value *getVectorValue(Value *V); + + /// Get a uniform vector of constant integers. We use this to get + /// vectors of ones and zeros for the reduction code. + Constant* getUniformVector(unsigned Val, Type* ScalarTy); + + typedef DenseMap ValueMap; + + /// The original loop. + Loop *OrigLoop; + // Scev analysis to use. + ScalarEvolution *SE; + // Loop Info. + LoopInfo *LI; + // Dominator Tree. + DominatorTree *DT; + // Loop Pass Manager; + LPPassManager *LPM; + // The vectorization factor to use. + unsigned VF; + + // The builder that we use + IRBuilder<> Builder; + + // --- Vectorization state --- + + /// The vector-loop preheader. + BasicBlock *LoopVectorPreHeader; + /// The scalar-loop preheader. + BasicBlock *LoopScalarPreHeader; + /// Middle Block between the vector and the scalar. + BasicBlock *LoopMiddleBlock; + ///The ExitBlock of the scalar loop. + BasicBlock *LoopExitBlock; + ///The vector loop body. + BasicBlock *LoopVectorBody; + ///The scalar loop body. + BasicBlock *LoopScalarBody; + ///The first bypass block. + BasicBlock *LoopBypassBlock; + + /// The new Induction variable which was added to the new block. + PHINode *Induction; + /// The induction variable of the old basic block. + PHINode *OldInduction; + // Maps scalars to widened vectors. + ValueMap WidenMap; +}; + +/// LoopVectorizationLegality checks if it is legal to vectorize a loop, and +/// to what vectorization factor. +/// This class does not look at the profitability of vectorization, only the +/// legality. This class has two main kinds of checks: +/// * Memory checks - The code in canVectorizeMemory checks if vectorization +/// will change the order of memory accesses in a way that will change the +/// correctness of the program. +/// * Scalars checks - The code in canVectorizeBlock checks for a number +/// of different conditions, such as the availability of a single induction +/// variable, that all types are supported and vectorize-able, etc. +/// This code reflects the capabilities of SingleBlockLoopVectorizer. +/// This class is also used by SingleBlockLoopVectorizer for identifying +/// induction variable and the different reduction variables. +class LoopVectorizationLegality { +public: + LoopVectorizationLegality(Loop *Lp, ScalarEvolution *Se, DataLayout *Dl): + TheLoop(Lp), SE(Se), DL(Dl), Induction(0) { } + + /// This represents the kinds of reductions that we support. + enum ReductionKind { + NoReduction, /// Not a reduction. + IntegerAdd, /// Sum of numbers. + IntegerMult, /// Product of numbers. + IntegerOr, /// Bitwise or logical OR of numbers. + IntegerAnd, /// Bitwise or logical AND of numbers. + IntegerXor /// Bitwise or logical XOR of numbers. + }; + + /// This POD struct holds information about reduction variables. + struct ReductionDescriptor { + // Default C'tor + ReductionDescriptor(): + StartValue(0), LoopExitInstr(0), Kind(NoReduction) {} + + // C'tor. + ReductionDescriptor(Value *Start, Instruction *Exit, ReductionKind K): + StartValue(Start), LoopExitInstr(Exit), Kind(K) {} + + // The starting value of the reduction. + // It does not have to be zero! + Value *StartValue; + // The instruction who's value is used outside the loop. + Instruction *LoopExitInstr; + // The kind of the reduction. + ReductionKind Kind; + }; + + // This POD struct holds information about the memory runtime legality + // check that a group of pointers do not overlap. + struct RuntimePointerCheck { + /// This flag indicates if we need to add the runtime check. + bool Need; + /// Holds the pointers that we need to check. + SmallVector Pointers; + }; + + /// ReductionList contains the reduction descriptors for all + /// of the reductions that were found in the loop. + typedef DenseMap ReductionList; + + /// Returns true if it is legal to vectorize this loop. + /// This does not mean that it is profitable to vectorize this + /// loop, only that it is legal to do so. + bool canVectorize(); + + /// Returns the Induction variable. + PHINode *getInduction() {return Induction;} + + /// Returns the reduction variables found in the loop. + ReductionList *getReductionVars() { return &Reductions; } + + /// Check if the pointer returned by this GEP is consecutive + /// when the index is vectorized. This happens when the last + /// index of the GEP is consecutive, like the induction variable. + /// This check allows us to vectorize A[idx] into a wide load/store. + bool isConsecutiveGep(Value *Ptr); + + /// Returns true if the value V is uniform within the loop. + bool isUniform(Value *V); + + /// Returns true if this instruction will remain scalar after vectorization. + bool isUniformAfterVectorization(Instruction* I) {return Uniforms.count(I);} + + /// Returns the information that we collected about runtime memory check. + RuntimePointerCheck *getRuntimePointerCheck() {return &PtrRtCheck; } +private: + /// Check if a single basic block loop is vectorizable. + /// At this point we know that this is a loop with a constant trip count + /// and we only need to check individual instructions. + bool canVectorizeBlock(BasicBlock &BB); + + /// When we vectorize loops we may change the order in which + /// we read and write from memory. This method checks if it is + /// legal to vectorize the code, considering only memory constrains. + /// Returns true if BB is vectorizable + bool canVectorizeMemory(BasicBlock &BB); + + /// Returns True, if 'Phi' is the kind of reduction variable for type + /// 'Kind'. If this is a reduction variable, it adds it to ReductionList. + bool AddReductionVar(PHINode *Phi, ReductionKind Kind); + /// Returns true if the instruction I can be a reduction variable of type + /// 'Kind'. + bool isReductionInstr(Instruction *I, ReductionKind Kind); + /// Returns True, if 'Phi' is an induction variable. + bool isInductionVariable(PHINode *Phi); + /// Return true if can compute the address bounds of Ptr within the loop. + bool hasComputableBounds(Value *Ptr); + + /// The loop that we evaluate. + Loop *TheLoop; + /// Scev analysis. + ScalarEvolution *SE; + /// DataLayout analysis. + DataLayout *DL; + + // --- vectorization state --- // + + /// Holds the induction variable. + PHINode *Induction; + /// Holds the reduction variables. + ReductionList Reductions; + /// Allowed outside users. This holds the reduction + /// vars which can be accessed from outside the loop. + SmallPtrSet AllowedExit; + /// This set holds the variables which are known to be uniform after + /// vectorization. + SmallPtrSet Uniforms; + /// We need to check that all of the pointers in this list are disjoint + /// at runtime. + RuntimePointerCheck PtrRtCheck; +}; + +/// LoopVectorizationCostModel - estimates the expected speedups due to +/// vectorization. +/// In many cases vectorization is not profitable. This can happen because +/// of a number of reasons. In this class we mainly attempt to predict +/// the expected speedup/slowdowns due to the supported instruction set. +/// We use the VectorTargetTransformInfo to query the different backends +/// for the cost of different operations. +class LoopVectorizationCostModel { +public: + /// C'tor. + LoopVectorizationCostModel(Loop *Lp, ScalarEvolution *Se, + LoopVectorizationLegality *Leg, + const VectorTargetTransformInfo *Vtti): + TheLoop(Lp), SE(Se), Legal(Leg), VTTI(Vtti) { } + + /// Returns the most profitable vectorization factor for the loop that is + /// smaller or equal to the VF argument. This method checks every power + /// of two up to VF. + unsigned findBestVectorizationFactor(unsigned VF = 8); + +private: + /// Returns the expected execution cost. The unit of the cost does + /// not matter because we use the 'cost' units to compare different + /// vector widths. The cost that is returned is *not* normalized by + /// the factor width. + unsigned expectedCost(unsigned VF); + + /// Returns the execution time cost of an instruction for a given vector + /// width. Vector width of one means scalar. + unsigned getInstructionCost(Instruction *I, unsigned VF); + + /// A helper function for converting Scalar types to vector types. + /// If the incoming type is void, we return void. If the VF is 1, we return + /// the scalar type. + static Type* ToVectorTy(Type *Scalar, unsigned VF); + + /// The loop that we evaluate. + Loop *TheLoop; + /// Scev analysis. + ScalarEvolution *SE; + + /// Vectorization legality. + LoopVectorizationLegality *Legal; + /// Vector target information. + const VectorTargetTransformInfo *VTTI; +}; + +struct LoopVectorize : public LoopPass { + static char ID; // Pass identification, replacement for typeid + + LoopVectorize() : LoopPass(ID) { + initializeLoopVectorizePass(*PassRegistry::getPassRegistry()); + } + + ScalarEvolution *SE; + DataLayout *DL; + LoopInfo *LI; + TargetTransformInfo *TTI; + DominatorTree *DT; + + virtual bool runOnLoop(Loop *L, LPPassManager &LPM) { + // We only vectorize innermost loops. + if (!L->empty()) + return false; + + SE = &getAnalysis(); + DL = getAnalysisIfAvailable(); + LI = &getAnalysis(); + TTI = getAnalysisIfAvailable(); + DT = &getAnalysis(); + + DEBUG(dbgs() << "LV: Checking a loop in \"" << + L->getHeader()->getParent()->getName() << "\"\n"); + + // Check if it is legal to vectorize the loop. + LoopVectorizationLegality LVL(L, SE, DL); + if (!LVL.canVectorize()) { + DEBUG(dbgs() << "LV: Not vectorizing.\n"); + return false; + } + + // Select the preffered vectorization factor. + unsigned VF = 1; + if (VectorizationFactor == 0) { + const VectorTargetTransformInfo *VTTI = 0; + if (TTI) + VTTI = TTI->getVectorTargetTransformInfo(); + // Use the cost model. + LoopVectorizationCostModel CM(L, SE, &LVL, VTTI); + VF = CM.findBestVectorizationFactor(); + + if (VF == 1) { + DEBUG(dbgs() << "LV: Vectorization is possible but not beneficial.\n"); + return false; + } + + } else { + // Use the user command flag. + VF = VectorizationFactor; + } + + DEBUG(dbgs() << "LV: Found a vectorizable loop ("<< VF << ") in "<< + L->getHeader()->getParent()->getParent()->getModuleIdentifier()<< + "\n"); + + // If we decided that it is *legal* to vectorizer the loop then do it. + SingleBlockLoopVectorizer LB(L, SE, LI, DT, &LPM, VF); + LB.vectorize(&LVL); + + DEBUG(verifyFunction(*L->getHeader()->getParent())); + return true; + } + + virtual void getAnalysisUsage(AnalysisUsage &AU) const { + LoopPass::getAnalysisUsage(AU); + AU.addRequiredID(LoopSimplifyID); + AU.addRequiredID(LCSSAID); + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addPreserved(); + AU.addPreserved(); + } + +}; + +Value *SingleBlockLoopVectorizer::getBroadcastInstrs(Value *V) { + // Instructions that access the old induction variable + // actually want to get the new one. + if (V == OldInduction) + V = Induction; + // Create the types. + LLVMContext &C = V->getContext(); + Type *VTy = VectorType::get(V->getType(), VF); + Type *I32 = IntegerType::getInt32Ty(C); + Constant *Zero = ConstantInt::get(I32, 0); + Value *Zeros = ConstantAggregateZero::get(VectorType::get(I32, VF)); + Value *UndefVal = UndefValue::get(VTy); + // Insert the value into a new vector. + Value *SingleElem = Builder.CreateInsertElement(UndefVal, V, Zero); + // Broadcast the scalar into all locations in the vector. + Value *Shuf = Builder.CreateShuffleVector(SingleElem, UndefVal, Zeros, + "broadcast"); + // We are accessing the induction variable. Make sure to promote the + // index for each consecutive SIMD lane. This adds 0,1,2 ... to all lanes. + if (V == Induction) + return getConsecutiveVector(Shuf); + return Shuf; +} + +Value *SingleBlockLoopVectorizer::getConsecutiveVector(Value* Val) { + assert(Val->getType()->isVectorTy() && "Must be a vector"); + assert(Val->getType()->getScalarType()->isIntegerTy() && + "Elem must be an integer"); + // Create the types. + Type *ITy = Val->getType()->getScalarType(); + VectorType *Ty = cast(Val->getType()); + unsigned VLen = Ty->getNumElements(); + SmallVector Indices; + + // Create a vector of consecutive numbers from zero to VF. + for (unsigned i = 0; i < VLen; ++i) + Indices.push_back(ConstantInt::get(ITy, i)); + + // Add the consecutive indices to the vector value. + Constant *Cv = ConstantVector::get(Indices); + assert(Cv->getType() == Val->getType() && "Invalid consecutive vec"); + return Builder.CreateAdd(Val, Cv, "induction"); +} + +bool LoopVectorizationLegality::isConsecutiveGep(Value *Ptr) { + GetElementPtrInst *Gep = dyn_cast_or_null(Ptr); + if (!Gep) + return false; + + unsigned NumOperands = Gep->getNumOperands(); + Value *LastIndex = Gep->getOperand(NumOperands - 1); + + // Check that all of the gep indices are uniform except for the last. + for (unsigned i = 0; i < NumOperands - 1; ++i) + if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop)) + return false; + + // We can emit wide load/stores only of the last index is the induction + // variable. + const SCEV *Last = SE->getSCEV(LastIndex); + if (const SCEVAddRecExpr *AR = dyn_cast(Last)) { + const SCEV *Step = AR->getStepRecurrence(*SE); + + // The memory is consecutive because the last index is consecutive + // and all other indices are loop invariant. + if (Step->isOne()) + return true; + } + + return false; +} + +bool LoopVectorizationLegality::isUniform(Value *V) { + return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop)); +} + +Value *SingleBlockLoopVectorizer::getVectorValue(Value *V) { + assert(!V->getType()->isVectorTy() && "Can't widen a vector"); + // If we saved a vectorized copy of V, use it. + Value *&MapEntry = WidenMap[V]; + if (MapEntry) + return MapEntry; + + // Broadcast V and save the value for future uses. + Value *B = getBroadcastInstrs(V); + MapEntry = B; + return B; +} + +Constant* +SingleBlockLoopVectorizer::getUniformVector(unsigned Val, Type* ScalarTy) { + SmallVector Indices; + // Create a vector of consecutive numbers from zero to VF. + for (unsigned i = 0; i < VF; ++i) + Indices.push_back(ConstantInt::get(ScalarTy, Val, true)); + + // Add the consecutive indices to the vector value. + return ConstantVector::get(Indices); +} + +void SingleBlockLoopVectorizer::scalarizeInstruction(Instruction *Instr) { + assert(!Instr->getType()->isAggregateType() && "Can't handle vectors"); + // Holds vector parameters or scalars, in case of uniform vals. + SmallVector Params; + + // Find all of the vectorized parameters. + for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { + Value *SrcOp = Instr->getOperand(op); + + // If we are accessing the old induction variable, use the new one. + if (SrcOp == OldInduction) { + Params.push_back(getBroadcastInstrs(Induction)); + continue; + } + + // Try using previously calculated values. + Instruction *SrcInst = dyn_cast(SrcOp); + + // If the src is an instruction that appeared earlier in the basic block + // then it should already be vectorized. + if (SrcInst && SrcInst->getParent() == Instr->getParent()) { + assert(WidenMap.count(SrcInst) && "Source operand is unavailable"); + // The parameter is a vector value from earlier. + Params.push_back(WidenMap[SrcInst]); + } else { + // The parameter is a scalar from outside the loop. Maybe even a constant. + Params.push_back(SrcOp); + } + } + + assert(Params.size() == Instr->getNumOperands() && + "Invalid number of operands"); + + // Does this instruction return a value ? + bool IsVoidRetTy = Instr->getType()->isVoidTy(); + Value *VecResults = 0; + + // If we have a return value, create an empty vector. We place the scalarized + // instructions in this vector. + if (!IsVoidRetTy) + VecResults = UndefValue::get(VectorType::get(Instr->getType(), VF)); + + // For each scalar that we create: + for (unsigned i = 0; i < VF; ++i) { + Instruction *Cloned = Instr->clone(); + if (!IsVoidRetTy) + Cloned->setName(Instr->getName() + ".cloned"); + // Replace the operands of the cloned instrucions with extracted scalars. + for (unsigned op = 0, e = Instr->getNumOperands(); op != e; ++op) { + Value *Op = Params[op]; + // Param is a vector. Need to extract the right lane. + if (Op->getType()->isVectorTy()) + Op = Builder.CreateExtractElement(Op, Builder.getInt32(i)); + Cloned->setOperand(op, Op); + } + + // Place the cloned scalar in the new loop. + Builder.Insert(Cloned); + + // If the original scalar returns a value we need to place it in a vector + // so that future users will be able to use it. + if (!IsVoidRetTy) + VecResults = Builder.CreateInsertElement(VecResults, Cloned, + Builder.getInt32(i)); + } + + if (!IsVoidRetTy) + WidenMap[Instr] = VecResults; +} + +void +SingleBlockLoopVectorizer::createEmptyLoop(LoopVectorizationLegality *Legal) { + /* + In this function we generate a new loop. The new loop will contain + the vectorized instructions while the old loop will continue to run the + scalar remainder. + + [ ] <-- vector loop bypass. + / | + / v +| [ ] <-- vector pre header. +| | +| v +| [ ] \ +| [ ]_| <-- vector loop. +| | + \ v + >[ ] <--- middle-block. + / | + / v +| [ ] <--- new preheader. +| | +| v +| [ ] \ +| [ ]_| <-- old scalar loop to handle remainder. + \ | + \ v + >[ ] <-- exit block. + ... + */ + + OldInduction = Legal->getInduction(); + assert(OldInduction && "We must have a single phi node."); + Type *IdxTy = OldInduction->getType(); + + // Find the loop boundaries. + const SCEV *ExitCount = SE->getExitCount(OrigLoop, OrigLoop->getHeader()); + assert(ExitCount != SE->getCouldNotCompute() && "Invalid loop count"); + + // Get the total trip count from the count by adding 1. + ExitCount = SE->getAddExpr(ExitCount, + SE->getConstant(ExitCount->getType(), 1)); + // We may need to extend the index in case there is a type mismatch. + // We know that the count starts at zero and does not overflow. + // We are using Zext because it should be less expensive. + if (ExitCount->getType() != IdxTy) + ExitCount = SE->getZeroExtendExpr(ExitCount, IdxTy); + + // This is the original scalar-loop preheader. + BasicBlock *BypassBlock = OrigLoop->getLoopPreheader(); + BasicBlock *ExitBlock = OrigLoop->getExitBlock(); + assert(ExitBlock && "Must have an exit block"); + + // The loop index does not have to start at Zero. It starts with this value. + Value *StartIdx = OldInduction->getIncomingValueForBlock(BypassBlock); + + assert(OrigLoop->getNumBlocks() == 1 && "Invalid loop"); + assert(BypassBlock && "Invalid loop structure"); + + BasicBlock *VectorPH = + BypassBlock->splitBasicBlock(BypassBlock->getTerminator(), "vector.ph"); + BasicBlock *VecBody = VectorPH->splitBasicBlock(VectorPH->getTerminator(), + "vector.body"); + + BasicBlock *MiddleBlock = VecBody->splitBasicBlock(VecBody->getTerminator(), + "middle.block"); + BasicBlock *ScalarPH = + MiddleBlock->splitBasicBlock(MiddleBlock->getTerminator(), + "scalar.preheader"); + // Find the induction variable. + BasicBlock *OldBasicBlock = OrigLoop->getHeader(); + + // Use this IR builder to create the loop instructions (Phi, Br, Cmp) + // inside the loop. + Builder.SetInsertPoint(VecBody->getFirstInsertionPt()); + + // Generate the induction variable. + Induction = Builder.CreatePHI(IdxTy, 2, "index"); + Constant *Step = ConstantInt::get(IdxTy, VF); + + // Expand the trip count and place the new instructions in the preheader. + // Notice that the pre-header does not change, only the loop body. + SCEVExpander Exp(*SE, "induction"); + Instruction *Loc = BypassBlock->getTerminator(); + + // Count holds the overall loop count (N). + Value *Count = Exp.expandCodeFor(ExitCount, Induction->getType(), Loc); + + // Add the start index to the loop count to get the new end index. + Value *IdxEnd = BinaryOperator::CreateAdd(Count, StartIdx, "end.idx", Loc); + + // Now we need to generate the expression for N - (N % VF), which is + // the part that the vectorized body will execute. + Constant *CIVF = ConstantInt::get(IdxTy, VF); + Value *R = BinaryOperator::CreateURem(Count, CIVF, "n.mod.vf", Loc); + Value *CountRoundDown = BinaryOperator::CreateSub(Count, R, "n.vec", Loc); + Value *IdxEndRoundDown = BinaryOperator::CreateAdd(CountRoundDown, StartIdx, + "end.idx.rnd.down", Loc); + + // Now, compare the new count to zero. If it is zero, jump to the scalar part. + Value *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, + IdxEndRoundDown, + StartIdx, + "cmp.zero", Loc); + + LoopVectorizationLegality::RuntimePointerCheck *PtrRtCheck = + Legal->getRuntimePointerCheck(); + Value *MemoryRuntimeCheck = 0; + if (PtrRtCheck->Need) { + unsigned NumPointers = PtrRtCheck->Pointers.size(); + SmallVector Starts; + SmallVector Ends; + + // Use this type for pointer arithmetic. + Type* PtrArithTy = PtrRtCheck->Pointers[0]->getType(); + + for (unsigned i=0; i < NumPointers; ++i) { + Value *Ptr = PtrRtCheck->Pointers[i]; + const SCEV *Sc = SE->getSCEV(Ptr); + + if (SE->isLoopInvariant(Sc, OrigLoop)) { + DEBUG(dbgs() << "LV1: Adding RT check for a loop invariant ptr:" << + *Ptr <<"\n"); + Starts.push_back(Ptr); + Ends.push_back(Ptr); + } else { + DEBUG(dbgs() << "LV: Adding RT check for range:" << *Ptr <<"\n"); + const SCEVAddRecExpr *AR = dyn_cast(Sc); + Value *Start = Exp.expandCodeFor(AR->getStart(), PtrArithTy, Loc); + const SCEV *Ex = SE->getExitCount(OrigLoop, OrigLoop->getHeader()); + const SCEV *ScEnd = AR->evaluateAtIteration(Ex, *SE); + assert(!isa(ScEnd) && "Invalid scev range."); + Value *End = Exp.expandCodeFor(ScEnd, PtrArithTy, Loc); + Starts.push_back(Start); + Ends.push_back(End); + } + } + + for (unsigned i=0; i < NumPointers; ++i) { + for (unsigned j=i+1; j < NumPointers; ++j) { + Value *Cmp0 = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULE, + Starts[0], Ends[1], "bound0", Loc); + Value *Cmp1 = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULE, + Starts[1], Ends[0], "bound1", Loc); + Value *IsConflict = BinaryOperator::Create(Instruction::And, Cmp0, Cmp1, + "found.conflict", Loc); + if (MemoryRuntimeCheck) { + MemoryRuntimeCheck = BinaryOperator::Create(Instruction::Or, + MemoryRuntimeCheck, + IsConflict, + "conflict.rdx", Loc); + } else { + MemoryRuntimeCheck = IsConflict; + } + } + } + }// end of need-runtime-check code. + + // If we are using memory runtime checks, include them in. + if (MemoryRuntimeCheck) { + Cmp = BinaryOperator::Create(Instruction::Or, Cmp, MemoryRuntimeCheck, + "CntOrMem", Loc); + } + + BranchInst::Create(MiddleBlock, VectorPH, Cmp, Loc); + // Remove the old terminator. + Loc->eraseFromParent(); + + // We are going to resume the execution of the scalar loop. + // This PHI decides on what number to start. If we come from the + // vector loop then we need to start with the end index minus the + // index modulo VF. If we come from a bypass edge then we need to start + // from the real start. + PHINode* ResumeIndex = PHINode::Create(IdxTy, 2, "resume.idx", + MiddleBlock->getTerminator()); + ResumeIndex->addIncoming(StartIdx, BypassBlock); + ResumeIndex->addIncoming(IdxEndRoundDown, VecBody); + + // Add a check in the middle block to see if we have completed + // all of the iterations in the first vector loop. + // If (N - N%VF) == N, then we *don't* need to run the remainder. + Value *CmpN = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_EQ, IdxEnd, + ResumeIndex, "cmp.n", + MiddleBlock->getTerminator()); + + BranchInst::Create(ExitBlock, ScalarPH, CmpN, MiddleBlock->getTerminator()); + // Remove the old terminator. + MiddleBlock->getTerminator()->eraseFromParent(); + + // Create i+1 and fill the PHINode. + Value *NextIdx = Builder.CreateAdd(Induction, Step, "index.next"); + Induction->addIncoming(StartIdx, VectorPH); + Induction->addIncoming(NextIdx, VecBody); + // Create the compare. + Value *ICmp = Builder.CreateICmpEQ(NextIdx, IdxEndRoundDown); + Builder.CreateCondBr(ICmp, MiddleBlock, VecBody); + + // Now we have two terminators. Remove the old one from the block. + VecBody->getTerminator()->eraseFromParent(); + + // Fix the scalar body iteration count. + unsigned BlockIdx = OldInduction->getBasicBlockIndex(ScalarPH); + OldInduction->setIncomingValue(BlockIdx, ResumeIndex); + + // Get ready to start creating new instructions into the vectorized body. + Builder.SetInsertPoint(VecBody->getFirstInsertionPt()); + + // Register the new loop. + Loop* Lp = new Loop(); + LPM->insertLoop(Lp, OrigLoop->getParentLoop()); + + Lp->addBasicBlockToLoop(VecBody, LI->getBase()); + + Loop *ParentLoop = OrigLoop->getParentLoop(); + if (ParentLoop) { + ParentLoop->addBasicBlockToLoop(ScalarPH, LI->getBase()); + ParentLoop->addBasicBlockToLoop(VectorPH, LI->getBase()); + ParentLoop->addBasicBlockToLoop(MiddleBlock, LI->getBase()); + } + + // Save the state. + LoopVectorPreHeader = VectorPH; + LoopScalarPreHeader = ScalarPH; + LoopMiddleBlock = MiddleBlock; + LoopExitBlock = ExitBlock; + LoopVectorBody = VecBody; + LoopScalarBody = OldBasicBlock; + LoopBypassBlock = BypassBlock; +} + +/// This function returns the identity element (or neutral element) for +/// the operation K. +static unsigned +getReductionIdentity(LoopVectorizationLegality::ReductionKind K) { + switch (K) { + case LoopVectorizationLegality::IntegerXor: + case LoopVectorizationLegality::IntegerAdd: + case LoopVectorizationLegality::IntegerOr: + // Adding, Xoring, Oring zero to a number does not change it. + return 0; + case LoopVectorizationLegality::IntegerMult: + // Multiplying a number by 1 does not change it. + return 1; + case LoopVectorizationLegality::IntegerAnd: + // AND-ing a number with an all-1 value does not change it. + return -1; + default: + llvm_unreachable("Unknown reduction kind"); + } +} + +void +SingleBlockLoopVectorizer::vectorizeLoop(LoopVectorizationLegality *Legal) { + //===------------------------------------------------===// + // + // Notice: any optimization or new instruction that go + // into the code below should be also be implemented in + // the cost-model. + // + //===------------------------------------------------===// + typedef SmallVector PhiVector; + BasicBlock &BB = *OrigLoop->getHeader(); + Constant *Zero = ConstantInt::get( + IntegerType::getInt32Ty(BB.getContext()), 0); + + // In order to support reduction variables we need to be able to vectorize + // Phi nodes. Phi nodes have cycles, so we need to vectorize them in two + // steages. First, we create a new vector PHI node with no incoming edges. + // We use this value when we vectorize all of the instructions that use the + // PHI. Next, after all of the instructions in the block are complete we + // add the new incoming edges to the PHI. At this point all of the + // instructions in the basic block are vectorized, so we can use them to + // construct the PHI. + PhiVector PHIsToFix; + + // For each instruction in the old loop. + for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) { + Instruction *Inst = it; + + switch (Inst->getOpcode()) { + case Instruction::Br: + // Nothing to do for PHIs and BR, since we already took care of the + // loop control flow instructions. + continue; + case Instruction::PHI:{ + PHINode* P = cast(Inst); + // Special handling for the induction var. + if (OldInduction == Inst) + continue; + // This is phase one of vectorizing PHIs. + // This has to be a reduction variable. + assert(Legal->getReductionVars()->count(P) && "Not a Reduction"); + Type *VecTy = VectorType::get(Inst->getType(), VF); + WidenMap[Inst] = Builder.CreatePHI(VecTy, 2, "vec.phi"); + PHIsToFix.push_back(P); + continue; + } + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + // Just widen binops. + BinaryOperator *BinOp = dyn_cast(Inst); + Value *A = getVectorValue(Inst->getOperand(0)); + Value *B = getVectorValue(Inst->getOperand(1)); + + // Use this vector value for all users of the original instruction. + Value *V = Builder.CreateBinOp(BinOp->getOpcode(), A, B); + WidenMap[Inst] = V; + + // Update the NSW, NUW and Exact flags. + BinaryOperator *VecOp = cast(V); + if (isa(BinOp)) { + VecOp->setHasNoSignedWrap(BinOp->hasNoSignedWrap()); + VecOp->setHasNoUnsignedWrap(BinOp->hasNoUnsignedWrap()); + } + if (isa(VecOp)) + VecOp->setIsExact(BinOp->isExact()); + break; + } + case Instruction::Select: { + // Widen selects. + // If the selector is loop invariant we can create a select + // instruction with a scalar condition. Otherwise, use vector-select. + Value *Cond = Inst->getOperand(0); + bool InvariantCond = SE->isLoopInvariant(SE->getSCEV(Cond), OrigLoop); + + // The condition can be loop invariant but still defined inside the + // loop. This means that we can't just use the original 'cond' value. + // We have to take the 'vectorized' value and pick the first lane. + // Instcombine will make this a no-op. + Cond = getVectorValue(Cond); + if (InvariantCond) + Cond = Builder.CreateExtractElement(Cond, Builder.getInt32(0)); + + Value *Op0 = getVectorValue(Inst->getOperand(1)); + Value *Op1 = getVectorValue(Inst->getOperand(2)); + WidenMap[Inst] = Builder.CreateSelect(Cond, Op0, Op1); + break; + } + + case Instruction::ICmp: + case Instruction::FCmp: { + // Widen compares. Generate vector compares. + bool FCmp = (Inst->getOpcode() == Instruction::FCmp); + CmpInst *Cmp = dyn_cast(Inst); + Value *A = getVectorValue(Inst->getOperand(0)); + Value *B = getVectorValue(Inst->getOperand(1)); + if (FCmp) + WidenMap[Inst] = Builder.CreateFCmp(Cmp->getPredicate(), A, B); + else + WidenMap[Inst] = Builder.CreateICmp(Cmp->getPredicate(), A, B); + break; + } + + case Instruction::Store: { + // Attempt to issue a wide store. + StoreInst *SI = dyn_cast(Inst); + Type *StTy = VectorType::get(SI->getValueOperand()->getType(), VF); + Value *Ptr = SI->getPointerOperand(); + unsigned Alignment = SI->getAlignment(); + + assert(!Legal->isUniform(Ptr) && + "We do not allow storing to uniform addresses"); + + GetElementPtrInst *Gep = dyn_cast(Ptr); + + // This store does not use GEPs. + if (!Legal->isConsecutiveGep(Gep)) { + scalarizeInstruction(Inst); + break; + } + + // The last index does not have to be the induction. It can be + // consecutive and be a function of the index. For example A[I+1]; + unsigned NumOperands = Gep->getNumOperands(); + Value *LastIndex = getVectorValue(Gep->getOperand(NumOperands - 1)); + LastIndex = Builder.CreateExtractElement(LastIndex, Zero); + + // Create the new GEP with the new induction variable. + GetElementPtrInst *Gep2 = cast(Gep->clone()); + Gep2->setOperand(NumOperands - 1, LastIndex); + Ptr = Builder.Insert(Gep2); + Ptr = Builder.CreateBitCast(Ptr, StTy->getPointerTo()); + Value *Val = getVectorValue(SI->getValueOperand()); + Builder.CreateStore(Val, Ptr)->setAlignment(Alignment); + break; + } + case Instruction::Load: { + // Attempt to issue a wide load. + LoadInst *LI = dyn_cast(Inst); + Type *RetTy = VectorType::get(LI->getType(), VF); + Value *Ptr = LI->getPointerOperand(); + unsigned Alignment = LI->getAlignment(); + GetElementPtrInst *Gep = dyn_cast(Ptr); + + // If we don't have a gep, or that the pointer is loop invariant, + // scalarize the load. + if (!Gep || Legal->isUniform(Gep) || !Legal->isConsecutiveGep(Gep)) { + scalarizeInstruction(Inst); + break; + } + + // The last index does not have to be the induction. It can be + // consecutive and be a function of the index. For example A[I+1]; + unsigned NumOperands = Gep->getNumOperands(); + Value *LastIndex = getVectorValue(Gep->getOperand(NumOperands -1)); + LastIndex = Builder.CreateExtractElement(LastIndex, Zero); + + // Create the new GEP with the new induction variable. + GetElementPtrInst *Gep2 = cast(Gep->clone()); + Gep2->setOperand(NumOperands - 1, LastIndex); + Ptr = Builder.Insert(Gep2); + Ptr = Builder.CreateBitCast(Ptr, RetTy->getPointerTo()); + LI = Builder.CreateLoad(Ptr); + LI->setAlignment(Alignment); + // Use this vector value for all users of the load. + WidenMap[Inst] = LI; + break; + } + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: { + /// Vectorize bitcasts. + CastInst *CI = dyn_cast(Inst); + Value *A = getVectorValue(Inst->getOperand(0)); + Type *DestTy = VectorType::get(CI->getType()->getScalarType(), VF); + WidenMap[Inst] = Builder.CreateCast(CI->getOpcode(), A, DestTy); + break; + } + + default: + /// All other instructions are unsupported. Scalarize them. + scalarizeInstruction(Inst); + break; + }// end of switch. + }// end of for_each instr. + + // At this point every instruction in the original loop is widended to + // a vector form. We are almost done. Now, we need to fix the PHI nodes + // that we vectorized. The PHI nodes are currently empty because we did + // not want to introduce cycles. Notice that the remaining PHI nodes + // that we need to fix are reduction variables. + + // Create the 'reduced' values for each of the induction vars. + // The reduced values are the vector values that we scalarize and combine + // after the loop is finished. + for (PhiVector::iterator it = PHIsToFix.begin(), e = PHIsToFix.end(); + it != e; ++it) { + PHINode *RdxPhi = *it; + PHINode *VecRdxPhi = dyn_cast(WidenMap[RdxPhi]); + assert(RdxPhi && "Unable to recover vectorized PHI"); + + // Find the reduction variable descriptor. + assert(Legal->getReductionVars()->count(RdxPhi) && + "Unable to find the reduction variable"); + LoopVectorizationLegality::ReductionDescriptor RdxDesc = + (*Legal->getReductionVars())[RdxPhi]; + + // We need to generate a reduction vector from the incoming scalar. + // To do so, we need to generate the 'identity' vector and overide + // one of the elements with the incoming scalar reduction. We need + // to do it in the vector-loop preheader. + Builder.SetInsertPoint(LoopBypassBlock->getTerminator()); + + // This is the vector-clone of the value that leaves the loop. + Value *VectorExit = getVectorValue(RdxDesc.LoopExitInstr); + Type *VecTy = VectorExit->getType(); + + // Find the reduction identity variable. Zero for addition, or, xor, + // one for multiplication, -1 for And. + Constant *Identity = getUniformVector(getReductionIdentity(RdxDesc.Kind), + VecTy->getScalarType()); + + // This vector is the Identity vector where the first element is the + // incoming scalar reduction. + Value *VectorStart = Builder.CreateInsertElement(Identity, + RdxDesc.StartValue, Zero); + + + // Fix the vector-loop phi. + // We created the induction variable so we know that the + // preheader is the first entry. + BasicBlock *VecPreheader = Induction->getIncomingBlock(0); + + // Reductions do not have to start at zero. They can start with + // any loop invariant values. + VecRdxPhi->addIncoming(VectorStart, VecPreheader); + unsigned SelfEdgeIdx = (RdxPhi)->getBasicBlockIndex(LoopScalarBody); + Value *Val = getVectorValue(RdxPhi->getIncomingValue(SelfEdgeIdx)); + VecRdxPhi->addIncoming(Val, LoopVectorBody); + + // Before each round, move the insertion point right between + // the PHIs and the values we are going to write. + // This allows us to write both PHINodes and the extractelement + // instructions. + Builder.SetInsertPoint(LoopMiddleBlock->getFirstInsertionPt()); + + // This PHINode contains the vectorized reduction variable, or + // the initial value vector, if we bypass the vector loop. + PHINode *NewPhi = Builder.CreatePHI(VecTy, 2, "rdx.vec.exit.phi"); + NewPhi->addIncoming(VectorStart, LoopBypassBlock); + NewPhi->addIncoming(getVectorValue(RdxDesc.LoopExitInstr), LoopVectorBody); + + // Extract the first scalar. + Value *Scalar0 = + Builder.CreateExtractElement(NewPhi, Builder.getInt32(0)); + // Extract and reduce the remaining vector elements. + for (unsigned i=1; i < VF; ++i) { + Value *Scalar1 = + Builder.CreateExtractElement(NewPhi, Builder.getInt32(i)); + switch (RdxDesc.Kind) { + case LoopVectorizationLegality::IntegerAdd: + Scalar0 = Builder.CreateAdd(Scalar0, Scalar1); + break; + case LoopVectorizationLegality::IntegerMult: + Scalar0 = Builder.CreateMul(Scalar0, Scalar1); + break; + case LoopVectorizationLegality::IntegerOr: + Scalar0 = Builder.CreateOr(Scalar0, Scalar1); + break; + case LoopVectorizationLegality::IntegerAnd: + Scalar0 = Builder.CreateAnd(Scalar0, Scalar1); + break; + case LoopVectorizationLegality::IntegerXor: + Scalar0 = Builder.CreateXor(Scalar0, Scalar1); + break; + default: + llvm_unreachable("Unknown reduction operation"); + } + } + + // Now, we need to fix the users of the reduction variable + // inside and outside of the scalar remainder loop. + // We know that the loop is in LCSSA form. We need to update the + // PHI nodes in the exit blocks. + for (BasicBlock::iterator LEI = LoopExitBlock->begin(), + LEE = LoopExitBlock->end(); LEI != LEE; ++LEI) { + PHINode *LCSSAPhi = dyn_cast(LEI); + if (!LCSSAPhi) continue; + + // All PHINodes need to have a single entry edge, or two if + // we already fixed them. + assert(LCSSAPhi->getNumIncomingValues() < 3 && "Invalid LCSSA PHI"); + + // We found our reduction value exit-PHI. Update it with the + // incoming bypass edge. + if (LCSSAPhi->getIncomingValue(0) == RdxDesc.LoopExitInstr) { + // Add an edge coming from the bypass. + LCSSAPhi->addIncoming(Scalar0, LoopMiddleBlock); + break; + } + }// end of the LCSSA phi scan. + + // Fix the scalar loop reduction variable with the incoming reduction sum + // from the vector body and from the backedge value. + int IncomingEdgeBlockIdx = (RdxPhi)->getBasicBlockIndex(LoopScalarBody); + int SelfEdgeBlockIdx = (IncomingEdgeBlockIdx ? 0 : 1); // The other block. + (RdxPhi)->setIncomingValue(SelfEdgeBlockIdx, Scalar0); + (RdxPhi)->setIncomingValue(IncomingEdgeBlockIdx, RdxDesc.LoopExitInstr); + }// end of for each redux variable. +} + +void SingleBlockLoopVectorizer::updateAnalysis() { + // The original basic block. + SE->forgetLoop(OrigLoop); + + // Update the dominator tree information. + assert(DT->properlyDominates(LoopBypassBlock, LoopExitBlock) && + "Entry does not dominate exit."); + + DT->addNewBlock(LoopVectorPreHeader, LoopBypassBlock); + DT->addNewBlock(LoopVectorBody, LoopVectorPreHeader); + DT->addNewBlock(LoopMiddleBlock, LoopBypassBlock); + DT->addNewBlock(LoopScalarPreHeader, LoopMiddleBlock); + DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader); + DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock); + + DEBUG(DT->verifyAnalysis()); +} + +bool LoopVectorizationLegality::canVectorize() { + if (!TheLoop->getLoopPreheader()) { + assert(false && "No preheader!!"); + DEBUG(dbgs() << "LV: Loop not normalized." << "\n"); + return false; + } + + // We can only vectorize single basic block loops. + unsigned NumBlocks = TheLoop->getNumBlocks(); + if (NumBlocks != 1) { + DEBUG(dbgs() << "LV: Too many blocks:" << NumBlocks << "\n"); + return false; + } + + // We need to have a loop header. + BasicBlock *BB = TheLoop->getHeader(); + DEBUG(dbgs() << "LV: Found a loop: " << BB->getName() << "\n"); + + // ScalarEvolution needs to be able to find the exit count. + const SCEV *ExitCount = SE->getExitCount(TheLoop, BB); + if (ExitCount == SE->getCouldNotCompute()) { + DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n"); + return false; + } + + // Do not loop-vectorize loops with a tiny trip count. + unsigned TC = SE->getSmallConstantTripCount(TheLoop, BB); + if (TC > 0u && TC < TinyTripCountThreshold) { + DEBUG(dbgs() << "LV: Found a loop with a very small trip count. " << + "This loop is not worth vectorizing.\n"); + return false; + } + + // Go over each instruction and look at memory deps. + if (!canVectorizeBlock(*BB)) { + DEBUG(dbgs() << "LV: Can't vectorize this loop header\n"); + return false; + } + + DEBUG(dbgs() << "LV: We can vectorize this loop" << + (PtrRtCheck.Need ? " (with a runtime bound check)" : "") + <<"!\n"); + + // Okay! We can vectorize. At this point we don't have any other mem analysis + // which may limit our maximum vectorization factor, so just return true with + // no restrictions. + return true; +} + +bool LoopVectorizationLegality::canVectorizeBlock(BasicBlock &BB) { + // Scan the instructions in the block and look for hazards. + for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) { + Instruction *I = it; + + PHINode *Phi = dyn_cast(I); + if (Phi) { + // This should not happen because the loop should be normalized. + if (Phi->getNumIncomingValues() != 2) { + DEBUG(dbgs() << "LV: Found an invalid PHI.\n"); + return false; + } + // We only look at integer phi nodes. + if (!Phi->getType()->isIntegerTy()) { + DEBUG(dbgs() << "LV: Found an non-int PHI.\n"); + return false; + } + + if (isInductionVariable(Phi)) { + if (Induction) { + DEBUG(dbgs() << "LV: Found too many inductions."<< *Phi <<"\n"); + return false; + } + DEBUG(dbgs() << "LV: Found the induction PHI."<< *Phi <<"\n"); + Induction = Phi; + continue; + } + if (AddReductionVar(Phi, IntegerAdd)) { + DEBUG(dbgs() << "LV: Found an ADD reduction PHI."<< *Phi <<"\n"); + continue; + } + if (AddReductionVar(Phi, IntegerMult)) { + DEBUG(dbgs() << "LV: Found a MUL reduction PHI."<< *Phi <<"\n"); + continue; + } + if (AddReductionVar(Phi, IntegerOr)) { + DEBUG(dbgs() << "LV: Found an OR reduction PHI."<< *Phi <<"\n"); + continue; + } + if (AddReductionVar(Phi, IntegerAnd)) { + DEBUG(dbgs() << "LV: Found an AND reduction PHI."<< *Phi <<"\n"); + continue; + } + if (AddReductionVar(Phi, IntegerXor)) { + DEBUG(dbgs() << "LV: Found a XOR reduction PHI."<< *Phi <<"\n"); + continue; + } + + DEBUG(dbgs() << "LV: Found an unidentified PHI."<< *Phi <<"\n"); + return false; + }// end of PHI handling + + // We still don't handle functions. + CallInst *CI = dyn_cast(I); + if (CI) { + DEBUG(dbgs() << "LV: Found a call site.\n"); + return false; + } + + // We do not re-vectorize vectors. + if (!VectorType::isValidElementType(I->getType()) && + !I->getType()->isVoidTy()) { + DEBUG(dbgs() << "LV: Found unvectorizable type." << "\n"); + return false; + } + + // Reduction instructions are allowed to have exit users. + // All other instructions must not have external users. + if (!AllowedExit.count(I)) + //Check that all of the users of the loop are inside the BB. + for (Value::use_iterator it = I->use_begin(), e = I->use_end(); + it != e; ++it) { + Instruction *U = cast(*it); + // This user may be a reduction exit value. + BasicBlock *Parent = U->getParent(); + if (Parent != &BB) { + DEBUG(dbgs() << "LV: Found an outside user for : "<< *U << "\n"); + return false; + } + } + } // next instr. + + if (!Induction) { + DEBUG(dbgs() << "LV: Did not find an induction var.\n"); + return false; + } + + // Don't vectorize if the memory dependencies do not allow vectorization. + if (!canVectorizeMemory(BB)) + return false; + + // We now know that the loop is vectorizable! + // Collect variables that will remain uniform after vectorization. + std::vector Worklist; + + // Start with the conditional branch and walk up the block. + Worklist.push_back(BB.getTerminator()->getOperand(0)); + + while (Worklist.size()) { + Instruction *I = dyn_cast(Worklist.back()); + Worklist.pop_back(); + // Look at instructions inside this block. + if (!I) continue; + if (I->getParent() != &BB) continue; + + // Stop when reaching PHI nodes. + if (isa(I)) { + assert(I == Induction && "Found a uniform PHI that is not the induction"); + break; + } + + // This is a known uniform. + Uniforms.insert(I); + + // Insert all operands. + for (int i=0, Op = I->getNumOperands(); i < Op; ++i) { + Worklist.push_back(I->getOperand(i)); + } + } + + return true; +} + +bool LoopVectorizationLegality::canVectorizeMemory(BasicBlock &BB) { + typedef SmallVector ValueVector; + typedef SmallPtrSet ValueSet; + // Holds the Load and Store *instructions*. + ValueVector Loads; + ValueVector Stores; + PtrRtCheck.Pointers.clear(); + PtrRtCheck.Need = false; + + // Scan the BB and collect legal loads and stores. + for (BasicBlock::iterator it = BB.begin(), e = BB.end(); it != e; ++it) { + Instruction *I = it; + + // If this is a load, save it. If this instruction can read from memory + // but is not a load, then we quit. Notice that we don't handle function + // calls that read or write. + if (I->mayReadFromMemory()) { + LoadInst *Ld = dyn_cast(I); + if (!Ld) return false; + if (!Ld->isSimple()) { + DEBUG(dbgs() << "LV: Found a non-simple load.\n"); + return false; + } + Loads.push_back(Ld); + continue; + } + + // Save store instructions. Abort if other instructions write to memory. + if (I->mayWriteToMemory()) { + StoreInst *St = dyn_cast(I); + if (!St) return false; + if (!St->isSimple()) { + DEBUG(dbgs() << "LV: Found a non-simple store.\n"); + return false; + } + Stores.push_back(St); + } + } // next instr. + + // Now we have two lists that hold the loads and the stores. + // Next, we find the pointers that they use. + + // Check if we see any stores. If there are no stores, then we don't + // care if the pointers are *restrict*. + if (!Stores.size()) { + DEBUG(dbgs() << "LV: Found a read-only loop!\n"); + return true; + } + + // Holds the read and read-write *pointers* that we find. + ValueVector Reads; + ValueVector ReadWrites; + + // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects + // multiple times on the same object. If the ptr is accessed twice, once + // for read and once for write, it will only appear once (on the write + // list). This is okay, since we are going to check for conflicts between + // writes and between reads and writes, but not between reads and reads. + ValueSet Seen; + + ValueVector::iterator I, IE; + for (I = Stores.begin(), IE = Stores.end(); I != IE; ++I) { + StoreInst *ST = dyn_cast(*I); + assert(ST && "Bad StoreInst"); + Value* Ptr = ST->getPointerOperand(); + + if (isUniform(Ptr)) { + DEBUG(dbgs() << "LV: We don't allow storing to uniform addresses\n"); + return false; + } + + // If we did *not* see this pointer before, insert it to + // the read-write list. At this phase it is only a 'write' list. + if (Seen.insert(Ptr)) + ReadWrites.push_back(Ptr); + } + + for (I = Loads.begin(), IE = Loads.end(); I != IE; ++I) { + LoadInst *LD = dyn_cast(*I); + assert(LD && "Bad LoadInst"); + Value* Ptr = LD->getPointerOperand(); + // If we did *not* see this pointer before, insert it to the + // read list. If we *did* see it before, then it is already in + // the read-write list. This allows us to vectorize expressions + // such as A[i] += x; Because the address of A[i] is a read-write + // pointer. This only works if the index of A[i] is consecutive. + // If the address of i is unknown (for example A[B[i]]) then we may + // read a few words, modify, and write a few words, and some of the + // words may be written to the same address. + if (Seen.insert(Ptr) || !isConsecutiveGep(Ptr)) + Reads.push_back(Ptr); + } + + // If we write (or read-write) to a single destination and there are no + // other reads in this loop then is it safe to vectorize. + if (ReadWrites.size() == 1 && Reads.size() == 0) { + DEBUG(dbgs() << "LV: Found a write-only loop!\n"); + return true; + } + + // Find pointers with computable bounds. We are going to use this information + // to place a runtime bound check. + bool RT = true; + for (I = ReadWrites.begin(), IE = ReadWrites.end(); I != IE; ++I) + if (hasComputableBounds(*I)) { + PtrRtCheck.Pointers.push_back(*I); + DEBUG(dbgs() << "LV: Found a runtime check ptr:" << **I <<"\n"); + } else { + RT = false; + break; + } + for (I = Reads.begin(), IE = Reads.end(); I != IE; ++I) + if (hasComputableBounds(*I)) { + PtrRtCheck.Pointers.push_back(*I); + DEBUG(dbgs() << "LV: Found a runtime check ptr:" << **I <<"\n"); + } else { + RT = false; + break; + } + + // Check that we did not collect too many pointers or found a + // unsizeable pointer. + if (!RT || PtrRtCheck.Pointers.size() > RuntimeMemoryCheckThreshold) { + PtrRtCheck.Pointers.clear(); + RT = false; + } + + PtrRtCheck.Need = RT; + + if (RT) { + DEBUG(dbgs() << "LV: We can perform a memory runtime check if needed.\n"); + } + + // Now that the pointers are in two lists (Reads and ReadWrites), we + // can check that there are no conflicts between each of the writes and + // between the writes to the reads. + ValueSet WriteObjects; + ValueVector TempObjects; + + // Check that the read-writes do not conflict with other read-write + // pointers. + for (I = ReadWrites.begin(), IE = ReadWrites.end(); I != IE; ++I) { + GetUnderlyingObjects(*I, TempObjects, DL); + for (ValueVector::iterator it=TempObjects.begin(), e=TempObjects.end(); + it != e; ++it) { + if (!isIdentifiedObject(*it)) { + DEBUG(dbgs() << "LV: Found an unidentified write ptr:"<< **it <<"\n"); + return RT; + } + if (!WriteObjects.insert(*it)) { + DEBUG(dbgs() << "LV: Found a possible write-write reorder:" + << **it <<"\n"); + return RT; + } + } + TempObjects.clear(); + } + + /// Check that the reads don't conflict with the read-writes. + for (I = Reads.begin(), IE = Reads.end(); I != IE; ++I) { + GetUnderlyingObjects(*I, TempObjects, DL); + for (ValueVector::iterator it=TempObjects.begin(), e=TempObjects.end(); + it != e; ++it) { + if (!isIdentifiedObject(*it)) { + DEBUG(dbgs() << "LV: Found an unidentified read ptr:"<< **it <<"\n"); + return RT; + } + if (WriteObjects.count(*it)) { + DEBUG(dbgs() << "LV: Found a possible read/write reorder:" + << **it <<"\n"); + return RT; + } + } + TempObjects.clear(); + } + + // It is safe to vectorize and we don't need any runtime checks. + DEBUG(dbgs() << "LV: We don't need a runtime memory check.\n"); + PtrRtCheck.Pointers.clear(); + PtrRtCheck.Need = false; + return true; +} + +bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi, + ReductionKind Kind) { + if (Phi->getNumIncomingValues() != 2) + return false; + + // Find the possible incoming reduction variable. + BasicBlock *BB = Phi->getParent(); + int SelfEdgeIdx = Phi->getBasicBlockIndex(BB); + int InEdgeBlockIdx = (SelfEdgeIdx ? 0 : 1); // The other entry. + Value *RdxStart = Phi->getIncomingValue(InEdgeBlockIdx); + + // ExitInstruction is the single value which is used outside the loop. + // We only allow for a single reduction value to be used outside the loop. + // This includes users of the reduction, variables (which form a cycle + // which ends in the phi node). + Instruction *ExitInstruction = 0; + + // Iter is our iterator. We start with the PHI node and scan for all of the + // users of this instruction. All users must be instructions which can be + // used as reduction variables (such as ADD). We may have a single + // out-of-block user. They cycle must end with the original PHI. + // Also, we can't have multiple block-local users. + Instruction *Iter = Phi; + while (true) { + // Any reduction instr must be of one of the allowed kinds. + if (!isReductionInstr(Iter, Kind)) + return false; + + // Did we found a user inside this block ? + bool FoundInBlockUser = false; + // Did we reach the initial PHI node ? + bool FoundStartPHI = false; + + // If the instruction has no users then this is a broken + // chain and can't be a reduction variable. + if (Iter->use_empty()) + return false; + + // For each of the *users* of iter. + for (Value::use_iterator it = Iter->use_begin(), e = Iter->use_end(); + it != e; ++it) { + Instruction *U = cast(*it); + // We already know that the PHI is a user. + if (U == Phi) { + FoundStartPHI = true; + continue; + } + // Check if we found the exit user. + BasicBlock *Parent = U->getParent(); + if (Parent != BB) { + // We must have a single exit instruction. + if (ExitInstruction != 0) + return false; + ExitInstruction = Iter; + } + // We can't have multiple inside users. + if (FoundInBlockUser) + return false; + FoundInBlockUser = true; + Iter = U; + } + + // We found a reduction var if we have reached the original + // phi node and we only have a single instruction with out-of-loop + // users. + if (FoundStartPHI && ExitInstruction) { + // This instruction is allowed to have out-of-loop users. + AllowedExit.insert(ExitInstruction); + + // Save the description of this reduction variable. + ReductionDescriptor RD(RdxStart, ExitInstruction, Kind); + Reductions[Phi] = RD; + return true; + } + } +} + +bool +LoopVectorizationLegality::isReductionInstr(Instruction *I, + ReductionKind Kind) { + switch (I->getOpcode()) { + default: + return false; + case Instruction::PHI: + // possibly. + return true; + case Instruction::Add: + case Instruction::Sub: + return Kind == IntegerAdd; + case Instruction::Mul: + case Instruction::UDiv: + case Instruction::SDiv: + return Kind == IntegerMult; + case Instruction::And: + return Kind == IntegerAnd; + case Instruction::Or: + return Kind == IntegerOr; + case Instruction::Xor: + return Kind == IntegerXor; + } +} + +bool LoopVectorizationLegality::isInductionVariable(PHINode *Phi) { + // Check that the PHI is consecutive and starts at zero. + const SCEV *PhiScev = SE->getSCEV(Phi); + const SCEVAddRecExpr *AR = dyn_cast(PhiScev); + if (!AR) { + DEBUG(dbgs() << "LV: PHI is not a poly recurrence.\n"); + return false; + } + const SCEV *Step = AR->getStepRecurrence(*SE); + + if (!Step->isOne()) { + DEBUG(dbgs() << "LV: PHI stride does not equal one.\n"); + return false; + } + return true; +} + +bool LoopVectorizationLegality::hasComputableBounds(Value *Ptr) { + const SCEV *PhiScev = SE->getSCEV(Ptr); + const SCEVAddRecExpr *AR = dyn_cast(PhiScev); + if (!AR) + return false; + + return AR->isAffine(); +} + +unsigned +LoopVectorizationCostModel::findBestVectorizationFactor(unsigned VF) { + if (!VTTI) { + DEBUG(dbgs() << "LV: No vector target information. Not vectorizing. \n"); + return 1; + } + + float Cost = expectedCost(1); + unsigned Width = 1; + DEBUG(dbgs() << "LV: Scalar loop costs: "<< (int)Cost << ".\n"); + for (unsigned i=2; i <= VF; i*=2) { + // Notice that the vector loop needs to be executed less times, so + // we need to divide the cost of the vector loops by the width of + // the vector elements. + float VectorCost = expectedCost(i) / (float)i; + DEBUG(dbgs() << "LV: Vector loop of width "<< i << " costs: " << + (int)VectorCost << ".\n"); + if (VectorCost < Cost) { + Cost = VectorCost; + Width = i; + } + } + + DEBUG(dbgs() << "LV: Selecting VF = : "<< Width << ".\n"); + return Width; +} + +unsigned LoopVectorizationCostModel::expectedCost(unsigned VF) { + // We can only estimate the cost of single basic block loops. + assert(1 == TheLoop->getNumBlocks() && "Too many blocks in loop"); + + BasicBlock *BB = TheLoop->getHeader(); + unsigned Cost = 0; + + // For each instruction in the old loop. + for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) { + Instruction *Inst = it; + unsigned C = getInstructionCost(Inst, VF); + Cost += C; + DEBUG(dbgs() << "LV: Found an estimated cost of "<< C <<" for VF "<< VF << + " For instruction: "<< *Inst << "\n"); + } + + return Cost; +} + +unsigned +LoopVectorizationCostModel::getInstructionCost(Instruction *I, unsigned VF) { + assert(VTTI && "Invalid vector target transformation info"); + + // If we know that this instruction will remain uniform, check the cost of + // the scalar version. + if (Legal->isUniformAfterVectorization(I)) + VF = 1; + + Type *RetTy = I->getType(); + Type *VectorTy = ToVectorTy(RetTy, VF); + + + // TODO: We need to estimate the cost of intrinsic calls. + switch (I->getOpcode()) { + case Instruction::GetElementPtr: + // We mark this instruction as zero-cost because scalar GEPs are usually + // lowered to the intruction addressing mode. At the moment we don't + // generate vector geps. + return 0; + case Instruction::Br: { + return VTTI->getCFInstrCost(I->getOpcode()); + } + case Instruction::PHI: + return 0; + case Instruction::Add: + case Instruction::FAdd: + case Instruction::Sub: + case Instruction::FSub: + case Instruction::Mul: + case Instruction::FMul: + case Instruction::UDiv: + case Instruction::SDiv: + case Instruction::FDiv: + case Instruction::URem: + case Instruction::SRem: + case Instruction::FRem: + case Instruction::Shl: + case Instruction::LShr: + case Instruction::AShr: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: { + return VTTI->getArithmeticInstrCost(I->getOpcode(), VectorTy); + } + case Instruction::Select: { + SelectInst *SI = cast(I); + const SCEV *CondSCEV = SE->getSCEV(SI->getCondition()); + bool ScalarCond = (SE->isLoopInvariant(CondSCEV, TheLoop)); + Type *CondTy = SI->getCondition()->getType(); + if (ScalarCond) + CondTy = VectorType::get(CondTy, VF); + + return VTTI->getCmpSelInstrCost(I->getOpcode(), VectorTy, CondTy); + } + case Instruction::ICmp: + case Instruction::FCmp: { + Type *ValTy = I->getOperand(0)->getType(); + VectorTy = ToVectorTy(ValTy, VF); + return VTTI->getCmpSelInstrCost(I->getOpcode(), VectorTy); + } + case Instruction::Store: { + StoreInst *SI = cast(I); + Type *ValTy = SI->getValueOperand()->getType(); + VectorTy = ToVectorTy(ValTy, VF); + + if (VF == 1) + return VTTI->getMemoryOpCost(I->getOpcode(), ValTy, + SI->getAlignment(), SI->getPointerAddressSpace()); + + // Scalarized stores. + if (!Legal->isConsecutiveGep(SI->getPointerOperand())) { + unsigned Cost = 0; + unsigned ExtCost = VTTI->getInstrCost(Instruction::ExtractElement, + ValTy); + // The cost of extracting from the value vector. + Cost += VF * (ExtCost); + // The cost of the scalar stores. + Cost += VF * VTTI->getMemoryOpCost(I->getOpcode(), + ValTy->getScalarType(), + SI->getAlignment(), + SI->getPointerAddressSpace()); + return Cost; + } + + // Wide stores. + return VTTI->getMemoryOpCost(I->getOpcode(), VectorTy, SI->getAlignment(), + SI->getPointerAddressSpace()); + } + case Instruction::Load: { + LoadInst *LI = cast(I); + + if (VF == 1) + return VTTI->getMemoryOpCost(I->getOpcode(), RetTy, + LI->getAlignment(), + LI->getPointerAddressSpace()); + + // Scalarized loads. + if (!Legal->isConsecutiveGep(LI->getPointerOperand())) { + unsigned Cost = 0; + unsigned InCost = VTTI->getInstrCost(Instruction::InsertElement, RetTy); + // The cost of inserting the loaded value into the result vector. + Cost += VF * (InCost); + // The cost of the scalar stores. + Cost += VF * VTTI->getMemoryOpCost(I->getOpcode(), + RetTy->getScalarType(), + LI->getAlignment(), + LI->getPointerAddressSpace()); + return Cost; + } + + // Wide loads. + return VTTI->getMemoryOpCost(I->getOpcode(), VectorTy, LI->getAlignment(), + LI->getPointerAddressSpace()); + } + case Instruction::ZExt: + case Instruction::SExt: + case Instruction::FPToUI: + case Instruction::FPToSI: + case Instruction::FPExt: + case Instruction::PtrToInt: + case Instruction::IntToPtr: + case Instruction::SIToFP: + case Instruction::UIToFP: + case Instruction::Trunc: + case Instruction::FPTrunc: + case Instruction::BitCast: { + Type *SrcVecTy = ToVectorTy(I->getOperand(0)->getType(), VF); + return VTTI->getCastInstrCost(I->getOpcode(), VectorTy, SrcVecTy); + } + default: { + // We are scalarizing the instruction. Return the cost of the scalar + // instruction, plus the cost of insert and extract into vector + // elements, times the vector width. + unsigned Cost = 0; + + bool IsVoid = RetTy->isVoidTy(); + + unsigned InsCost = (IsVoid ? 0 : + VTTI->getInstrCost(Instruction::InsertElement, + VectorTy)); + + unsigned ExtCost = VTTI->getInstrCost(Instruction::ExtractElement, + VectorTy); + + // The cost of inserting the results plus extracting each one of the + // operands. + Cost += VF * (InsCost + ExtCost * I->getNumOperands()); + + // The cost of executing VF copies of the scalar instruction. + Cost += VF * VTTI->getInstrCost(I->getOpcode(), RetTy); + return Cost; + } + }// end of switch. +} + +Type* LoopVectorizationCostModel::ToVectorTy(Type *Scalar, unsigned VF) { + if (Scalar->isVoidTy() || VF == 1) + return Scalar; + return VectorType::get(Scalar, VF); +} + +} // namespace + +char LoopVectorize::ID = 0; +static const char lv_name[] = "Loop Vectorization"; +INITIALIZE_PASS_BEGIN(LoopVectorize, LV_NAME, lv_name, false, false) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(LoopSimplify) +INITIALIZE_PASS_END(LoopVectorize, LV_NAME, lv_name, false, false) + +namespace llvm { + Pass *createLoopVectorizePass() { + return new LoopVectorize(); + } +} + diff --git a/lib/Transforms/Vectorize/Vectorize.cpp b/lib/Transforms/Vectorize/Vectorize.cpp index 1ef6002..d26973a 100644 --- a/lib/Transforms/Vectorize/Vectorize.cpp +++ b/lib/Transforms/Vectorize/Vectorize.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements common infrastructure for libLLVMVectorizeOpts.a, which +// This file implements common infrastructure for libLLVMVectorizeOpts.a, which // implements several vectorization transformations over the LLVM intermediate // representation, including the C bindings for that library. // @@ -23,10 +23,11 @@ using namespace llvm; -/// initializeVectorizationPasses - Initialize all passes linked into the +/// initializeVectorizationPasses - Initialize all passes linked into the /// Vectorization library. void llvm::initializeVectorization(PassRegistry &Registry) { initializeBBVectorizePass(Registry); + initializeLoopVectorizePass(Registry); } void LLVMInitializeVectorization(LLVMPassRegistryRef R) { @@ -37,3 +38,6 @@ void LLVMAddBBVectorizePass(LLVMPassManagerRef PM) { unwrap(PM)->add(createBBVectorizePass()); } +void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM) { + unwrap(PM)->add(createLoopVectorizePass()); +} diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp index 7ef1131..b72c17f 100644 --- a/lib/VMCore/AsmWriter.cpp +++ b/lib/VMCore/AsmWriter.cpp @@ -66,6 +66,25 @@ static const Module *getModuleFromVal(const Value *V) { return 0; } +static void PrintCallingConv(unsigned cc, raw_ostream &Out) +{ + switch (cc) { + case CallingConv::Fast: Out << "fastcc"; break; + case CallingConv::Cold: Out << "coldcc"; break; + case CallingConv::X86_StdCall: Out << "x86_stdcallcc"; break; + case CallingConv::X86_FastCall: Out << "x86_fastcallcc"; break; + case CallingConv::X86_ThisCall: Out << "x86_thiscallcc"; break; + case CallingConv::Intel_OCL_BI: Out << "intel_ocl_bicc"; break; + case CallingConv::ARM_APCS: Out << "arm_apcscc"; break; + case CallingConv::ARM_AAPCS: Out << "arm_aapcscc"; break; + case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc"; break; + case CallingConv::MSP430_INTR: Out << "msp430_intrcc"; break; + case CallingConv::PTX_Kernel: Out << "ptx_kernel"; break; + case CallingConv::PTX_Device: Out << "ptx_device"; break; + default: Out << "cc" << cc; break; + } +} + // PrintEscapedString - Print each character of the specified string, escaping // it if it is not printable or if it is an escape char. static void PrintEscapedString(StringRef Name, raw_ostream &Out) { @@ -141,8 +160,8 @@ static void PrintLLVMName(raw_ostream &OS, const Value *V) { /// TypePrinting - Type printing machinery. namespace { class TypePrinting { - TypePrinting(const TypePrinting &); // DO NOT IMPLEMENT - void operator=(const TypePrinting&); // DO NOT IMPLEMENT + TypePrinting(const TypePrinting &) LLVM_DELETED_FUNCTION; + void operator=(const TypePrinting&) LLVM_DELETED_FUNCTION; public: /// NamedTypes - The named types that are used by the current module. @@ -380,8 +399,8 @@ private: /// Add all of the functions arguments, basic blocks, and instructions. void processFunction(); - SlotTracker(const SlotTracker &); // DO NOT IMPLEMENT - void operator=(const SlotTracker &); // DO NOT IMPLEMENT + SlotTracker(const SlotTracker &) LLVM_DELETED_FUNCTION; + void operator=(const SlotTracker &) LLVM_DELETED_FUNCTION; }; } // end anonymous namespace @@ -1029,6 +1048,9 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, Out << "sideeffect "; if (IA->isAlignStack()) Out << "alignstack "; + // We don't emit the AD_ATT dialect as it's the assumed default. + if (IA->getDialect() == InlineAsm::AD_Intel) + Out << "inteldialect "; Out << '"'; PrintEscapedString(IA->getAsmString(), Out); Out << "\", \""; @@ -1222,8 +1244,8 @@ void AssemblyWriter::writeParamOperand(const Value *Operand, // Print the type TypePrinter.print(Operand->getType(), Out); // Print parameter attributes list - if (Attrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs); + if (Attrs.hasAttributes()) + Out << ' ' << Attrs.getAsString(); Out << ' '; // Print the operand WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule); @@ -1285,8 +1307,9 @@ void AssemblyWriter::printModule(const Module *M) { // Output all globals. if (!M->global_empty()) Out << '\n'; for (Module::const_global_iterator I = M->global_begin(), E = M->global_end(); - I != E; ++I) - printGlobal(I); + I != E; ++I) { + printGlobal(I); Out << '\n'; + } // Output all aliases. if (!M->alias_empty()) Out << "\n"; @@ -1353,12 +1376,12 @@ static void PrintLinkage(GlobalValue::LinkageTypes LT, case GlobalValue::LinkerPrivateWeakLinkage: Out << "linker_private_weak "; break; - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: - Out << "linker_private_weak_def_auto "; - break; case GlobalValue::InternalLinkage: Out << "internal "; break; case GlobalValue::LinkOnceAnyLinkage: Out << "linkonce "; break; case GlobalValue::LinkOnceODRLinkage: Out << "linkonce_odr "; break; + case GlobalValue::LinkOnceODRAutoHideLinkage: + Out << "linkonce_odr_auto_hide "; + break; case GlobalValue::WeakAnyLinkage: Out << "weak "; break; case GlobalValue::WeakODRLinkage: Out << "weak_odr "; break; case GlobalValue::CommonLinkage: Out << "common "; break; @@ -1436,7 +1459,6 @@ void AssemblyWriter::printGlobal(const GlobalVariable *GV) { Out << ", align " << GV->getAlignment(); printInfoComment(*GV); - Out << '\n'; } void AssemblyWriter::printAlias(const GlobalAlias *GA) { @@ -1527,27 +1549,16 @@ void AssemblyWriter::printFunction(const Function *F) { PrintVisibility(F->getVisibility(), Out); // Print the calling convention. - switch (F->getCallingConv()) { - case CallingConv::C: break; // default - case CallingConv::Fast: Out << "fastcc "; break; - case CallingConv::Cold: Out << "coldcc "; break; - case CallingConv::X86_StdCall: Out << "x86_stdcallcc "; break; - case CallingConv::X86_FastCall: Out << "x86_fastcallcc "; break; - case CallingConv::X86_ThisCall: Out << "x86_thiscallcc "; break; - case CallingConv::ARM_APCS: Out << "arm_apcscc "; break; - case CallingConv::ARM_AAPCS: Out << "arm_aapcscc "; break; - case CallingConv::ARM_AAPCS_VFP:Out << "arm_aapcs_vfpcc "; break; - case CallingConv::MSP430_INTR: Out << "msp430_intrcc "; break; - case CallingConv::PTX_Kernel: Out << "ptx_kernel "; break; - case CallingConv::PTX_Device: Out << "ptx_device "; break; - default: Out << "cc" << F->getCallingConv() << " "; break; + if (F->getCallingConv() != CallingConv::C) { + PrintCallingConv(F->getCallingConv(), Out); + Out << " "; } FunctionType *FT = F->getFunctionType(); const AttrListPtr &Attrs = F->getAttributes(); Attributes RetAttrs = Attrs.getRetAttributes(); - if (RetAttrs != Attribute::None) - Out << Attribute::getAsString(Attrs.getRetAttributes()) << ' '; + if (RetAttrs.hasAttributes()) + Out << Attrs.getRetAttributes().getAsString() << ' '; TypePrinter.print(F->getReturnType(), Out); Out << ' '; WriteAsOperandInternal(Out, F, &TypePrinter, &Machine, F->getParent()); @@ -1576,8 +1587,8 @@ void AssemblyWriter::printFunction(const Function *F) { TypePrinter.print(FT->getParamType(i), Out); Attributes ArgAttrs = Attrs.getParamAttributes(i+1); - if (ArgAttrs != Attribute::None) - Out << ' ' << Attribute::getAsString(ArgAttrs); + if (ArgAttrs.hasAttributes()) + Out << ' ' << ArgAttrs.getAsString(); } } @@ -1590,8 +1601,8 @@ void AssemblyWriter::printFunction(const Function *F) { if (F->hasUnnamedAddr()) Out << " unnamed_addr"; Attributes FnAttrs = Attrs.getFnAttributes(); - if (FnAttrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs.getFnAttributes()); + if (FnAttrs.hasAttributes()) + Out << ' ' << Attrs.getFnAttributes().getAsString(); if (F->hasSection()) { Out << " section \""; PrintEscapedString(F->getSection(), Out); @@ -1624,8 +1635,8 @@ void AssemblyWriter::printArgument(const Argument *Arg, TypePrinter.print(Arg->getType(), Out); // Output parameter attributes list - if (Attrs != Attribute::None) - Out << ' ' << Attribute::getAsString(Attrs); + if (Attrs.hasAttributes()) + Out << ' ' << Attrs.getAsString(); // Output name, if available... if (Arg->hasName()) { @@ -1828,20 +1839,9 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Out << " void"; } else if (const CallInst *CI = dyn_cast(&I)) { // Print the calling convention being used. - switch (CI->getCallingConv()) { - case CallingConv::C: break; // default - case CallingConv::Fast: Out << " fastcc"; break; - case CallingConv::Cold: Out << " coldcc"; break; - case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break; - case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break; - case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break; - case CallingConv::ARM_APCS: Out << " arm_apcscc "; break; - case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break; - case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break; - case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break; - case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break; - case CallingConv::PTX_Device: Out << " ptx_device"; break; - default: Out << " cc" << CI->getCallingConv(); break; + if (CI->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(CI->getCallingConv(), Out); } Operand = CI->getCalledValue(); @@ -1850,8 +1850,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { Type *RetTy = FTy->getReturnType(); const AttrListPtr &PAL = CI->getAttributes(); - if (PAL.getRetAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getRetAttributes()); + if (PAL.getRetAttributes().hasAttributes()) + Out << ' ' << PAL.getRetAttributes().getAsString(); // If possible, print out the short form of the call instruction. We can // only do this if the first argument is a pointer to a nonvararg function, @@ -1874,8 +1874,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1)); } Out << ')'; - if (PAL.getFnAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getFnAttributes()); + if (PAL.getFnAttributes().hasAttributes()) + Out << ' ' << PAL.getFnAttributes().getAsString(); } else if (const InvokeInst *II = dyn_cast(&I)) { Operand = II->getCalledValue(); PointerType *PTy = cast(Operand->getType()); @@ -1884,24 +1884,13 @@ void AssemblyWriter::printInstruction(const Instruction &I) { const AttrListPtr &PAL = II->getAttributes(); // Print the calling convention being used. - switch (II->getCallingConv()) { - case CallingConv::C: break; // default - case CallingConv::Fast: Out << " fastcc"; break; - case CallingConv::Cold: Out << " coldcc"; break; - case CallingConv::X86_StdCall: Out << " x86_stdcallcc"; break; - case CallingConv::X86_FastCall: Out << " x86_fastcallcc"; break; - case CallingConv::X86_ThisCall: Out << " x86_thiscallcc"; break; - case CallingConv::ARM_APCS: Out << " arm_apcscc "; break; - case CallingConv::ARM_AAPCS: Out << " arm_aapcscc "; break; - case CallingConv::ARM_AAPCS_VFP:Out << " arm_aapcs_vfpcc "; break; - case CallingConv::MSP430_INTR: Out << " msp430_intrcc "; break; - case CallingConv::PTX_Kernel: Out << " ptx_kernel"; break; - case CallingConv::PTX_Device: Out << " ptx_device"; break; - default: Out << " cc" << II->getCallingConv(); break; + if (II->getCallingConv() != CallingConv::C) { + Out << " "; + PrintCallingConv(II->getCallingConv(), Out); } - if (PAL.getRetAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getRetAttributes()); + if (PAL.getRetAttributes().hasAttributes()) + Out << ' ' << PAL.getRetAttributes().getAsString(); // If possible, print out the short form of the invoke instruction. We can // only do this if the first argument is a pointer to a nonvararg function, @@ -1925,8 +1914,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } Out << ')'; - if (PAL.getFnAttributes() != Attribute::None) - Out << ' ' << Attribute::getAsString(PAL.getFnAttributes()); + if (PAL.getFnAttributes().hasAttributes()) + Out << ' ' << PAL.getFnAttributes().getAsString(); Out << "\n to "; writeOperand(II->getNormalDest(), true); diff --git a/lib/VMCore/Attributes.cpp b/lib/VMCore/Attributes.cpp index c8219eb..f1268e6 100644 --- a/lib/VMCore/Attributes.cpp +++ b/lib/VMCore/Attributes.cpp @@ -7,11 +7,14 @@ // //===----------------------------------------------------------------------===// // -// This file implements the AttributesList class and Attribute utilities. +// This file implements the Attributes, AttributeImpl, AttrBuilder, +// AttributeListImpl, and AttrListPtr classes. // //===----------------------------------------------------------------------===// #include "llvm/Attributes.h" +#include "AttributesImpl.h" +#include "LLVMContextImpl.h" #include "llvm/Type.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/FoldingSet.h" @@ -23,215 +26,382 @@ using namespace llvm; //===----------------------------------------------------------------------===// -// Attribute Function Definitions +// Attributes Implementation //===----------------------------------------------------------------------===// -std::string Attribute::getAsString(Attributes Attrs) { +Attributes Attributes::get(LLVMContext &Context, ArrayRef Vals) { + AttrBuilder B; + for (ArrayRef::iterator I = Vals.begin(), E = Vals.end(); + I != E; ++I) + B.addAttribute(*I); + return Attributes::get(Context, B); +} + +Attributes Attributes::get(LLVMContext &Context, AttrBuilder &B) { + // If there are no attributes, return an empty Attributes class. + if (!B.hasAttributes()) + return Attributes(); + + // Otherwise, build a key to look up the existing attributes. + LLVMContextImpl *pImpl = Context.pImpl; + FoldingSetNodeID ID; + ID.AddInteger(B.Raw()); + + void *InsertPoint; + AttributesImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint); + + if (!PA) { + // If we didn't find any existing attributes of the same shape then create a + // new one and insert it. + PA = new AttributesImpl(B.Raw()); + pImpl->AttrsSet.InsertNode(PA, InsertPoint); + } + + // Return the AttributesList that we found or created. + return Attributes(PA); +} + +bool Attributes::hasAttribute(AttrVal Val) const { + return Attrs && Attrs->hasAttribute(Val); +} + +bool Attributes::hasAttributes() const { + return Attrs && Attrs->hasAttributes(); +} + +bool Attributes::hasAttributes(const Attributes &A) const { + return Attrs && Attrs->hasAttributes(A); +} + +/// This returns the alignment field of an attribute as a byte alignment value. +unsigned Attributes::getAlignment() const { + if (!hasAttribute(Attributes::Alignment)) + return 0; + return 1U << ((Attrs->getAlignment() >> 16) - 1); +} + +/// This returns the stack alignment field of an attribute as a byte alignment +/// value. +unsigned Attributes::getStackAlignment() const { + if (!hasAttribute(Attributes::StackAlignment)) + return 0; + return 1U << ((Attrs->getStackAlignment() >> 26) - 1); +} + +uint64_t Attributes::Raw() const { + return Attrs ? Attrs->Raw() : 0; +} + +Attributes Attributes::typeIncompatible(Type *Ty) { + AttrBuilder Incompatible; + + if (!Ty->isIntegerTy()) + // Attributes that only apply to integers. + Incompatible.addAttribute(Attributes::SExt) + .addAttribute(Attributes::ZExt); + + if (!Ty->isPointerTy()) + // Attributes that only apply to pointers. + Incompatible.addAttribute(Attributes::ByVal) + .addAttribute(Attributes::Nest) + .addAttribute(Attributes::NoAlias) + .addAttribute(Attributes::NoCapture) + .addAttribute(Attributes::StructRet); + + return Attributes::get(Ty->getContext(), Incompatible); +} + +/// encodeLLVMAttributesForBitcode - This returns an integer containing an +/// encoding of all the LLVM attributes found in the given attribute bitset. +/// Any change to this encoding is a breaking change to bitcode compatibility. +uint64_t Attributes::encodeLLVMAttributesForBitcode(Attributes Attrs) { + // FIXME: It doesn't make sense to store the alignment information as an + // expanded out value, we should store it as a log2 value. However, we can't + // just change that here without breaking bitcode compatibility. If this ever + // becomes a problem in practice, we should introduce new tag numbers in the + // bitcode file and have those tags use a more efficiently encoded alignment + // field. + + // Store the alignment in the bitcode as a 16-bit raw value instead of a 5-bit + // log2 encoded value. Shift the bits above the alignment up by 11 bits. + uint64_t EncodedAttrs = Attrs.Raw() & 0xffff; + if (Attrs.hasAttribute(Attributes::Alignment)) + EncodedAttrs |= Attrs.getAlignment() << 16; + EncodedAttrs |= (Attrs.Raw() & (0xffffULL << 21)) << 11; + return EncodedAttrs; +} + +/// decodeLLVMAttributesForBitcode - This returns an attribute bitset containing +/// the LLVM attributes that have been decoded from the given integer. This +/// function must stay in sync with 'encodeLLVMAttributesForBitcode'. +Attributes Attributes::decodeLLVMAttributesForBitcode(LLVMContext &C, + uint64_t EncodedAttrs) { + // The alignment is stored as a 16-bit raw value from bits 31--16. We shift + // the bits above 31 down by 11 bits. + unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16; + assert((!Alignment || isPowerOf2_32(Alignment)) && + "Alignment must be a power of two."); + + AttrBuilder B(EncodedAttrs & 0xffff); + if (Alignment) + B.addAlignmentAttr(Alignment); + B.addRawValue((EncodedAttrs & (0xffffULL << 32)) >> 11); + return Attributes::get(C, B); +} + +std::string Attributes::getAsString() const { std::string Result; - if (Attrs & Attribute::ZExt) + if (hasAttribute(Attributes::ZExt)) Result += "zeroext "; - if (Attrs & Attribute::SExt) + if (hasAttribute(Attributes::SExt)) Result += "signext "; - if (Attrs & Attribute::NoReturn) + if (hasAttribute(Attributes::NoReturn)) Result += "noreturn "; - if (Attrs & Attribute::NoUnwind) + if (hasAttribute(Attributes::NoUnwind)) Result += "nounwind "; - if (Attrs & Attribute::UWTable) + if (hasAttribute(Attributes::UWTable)) Result += "uwtable "; - if (Attrs & Attribute::ReturnsTwice) + if (hasAttribute(Attributes::ReturnsTwice)) Result += "returns_twice "; - if (Attrs & Attribute::InReg) + if (hasAttribute(Attributes::InReg)) Result += "inreg "; - if (Attrs & Attribute::NoAlias) + if (hasAttribute(Attributes::NoAlias)) Result += "noalias "; - if (Attrs & Attribute::NoCapture) + if (hasAttribute(Attributes::NoCapture)) Result += "nocapture "; - if (Attrs & Attribute::StructRet) + if (hasAttribute(Attributes::StructRet)) Result += "sret "; - if (Attrs & Attribute::ByVal) + if (hasAttribute(Attributes::ByVal)) Result += "byval "; - if (Attrs & Attribute::Nest) + if (hasAttribute(Attributes::Nest)) Result += "nest "; - if (Attrs & Attribute::ReadNone) + if (hasAttribute(Attributes::ReadNone)) Result += "readnone "; - if (Attrs & Attribute::ReadOnly) + if (hasAttribute(Attributes::ReadOnly)) Result += "readonly "; - if (Attrs & Attribute::OptimizeForSize) + if (hasAttribute(Attributes::OptimizeForSize)) Result += "optsize "; - if (Attrs & Attribute::NoInline) + if (hasAttribute(Attributes::NoInline)) Result += "noinline "; - if (Attrs & Attribute::InlineHint) + if (hasAttribute(Attributes::InlineHint)) Result += "inlinehint "; - if (Attrs & Attribute::AlwaysInline) + if (hasAttribute(Attributes::AlwaysInline)) Result += "alwaysinline "; - if (Attrs & Attribute::StackProtect) + if (hasAttribute(Attributes::StackProtect)) Result += "ssp "; - if (Attrs & Attribute::StackProtectReq) + if (hasAttribute(Attributes::StackProtectReq)) Result += "sspreq "; - if (Attrs & Attribute::NoRedZone) + if (hasAttribute(Attributes::NoRedZone)) Result += "noredzone "; - if (Attrs & Attribute::NoImplicitFloat) + if (hasAttribute(Attributes::NoImplicitFloat)) Result += "noimplicitfloat "; - if (Attrs & Attribute::Naked) + if (hasAttribute(Attributes::Naked)) Result += "naked "; - if (Attrs & Attribute::NonLazyBind) + if (hasAttribute(Attributes::NonLazyBind)) Result += "nonlazybind "; - if (Attrs & Attribute::AddressSafety) + if (hasAttribute(Attributes::AddressSafety)) Result += "address_safety "; - if (Attrs & Attribute::StackAlignment) { + if (hasAttribute(Attributes::MinSize)) + Result += "minsize "; + if (hasAttribute(Attributes::StackAlignment)) { Result += "alignstack("; - Result += utostr(Attribute::getStackAlignmentFromAttrs(Attrs)); + Result += utostr(getStackAlignment()); Result += ") "; } - if (Attrs & Attribute::Alignment) { + if (hasAttribute(Attributes::Alignment)) { Result += "align "; - Result += utostr(Attribute::getAlignmentFromAttrs(Attrs)); + Result += utostr(getAlignment()); Result += " "; } - if (Attrs & Attribute::IANSDialect) - Result += "ia_nsdialect "; - // Trim the trailing space. assert(!Result.empty() && "Unknown attribute!"); Result.erase(Result.end()-1); return Result; } -Attributes Attribute::typeIncompatible(Type *Ty) { - Attributes Incompatible = None; - - if (!Ty->isIntegerTy()) - // Attributes that only apply to integers. - Incompatible |= SExt | ZExt; - - if (!Ty->isPointerTy()) - // Attributes that only apply to pointers. - Incompatible |= ByVal | Nest | NoAlias | StructRet | NoCapture; - - return Incompatible; +//===----------------------------------------------------------------------===// +// AttrBuilder Implementation +//===----------------------------------------------------------------------===// + +AttrBuilder &AttrBuilder::addAttribute(Attributes::AttrVal Val){ + Bits |= AttributesImpl::getAttrMask(Val); + return *this; +} + +AttrBuilder &AttrBuilder::addRawValue(uint64_t Val) { + Bits |= Val; + return *this; +} + +AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) { + if (Align == 0) return *this; + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x40000000 && "Alignment too large."); + Bits |= (Log2_32(Align) + 1) << 16; + return *this; +} +AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align){ + // Default alignment, allow the target to define how to align it. + if (Align == 0) return *this; + assert(isPowerOf2_32(Align) && "Alignment must be a power of two."); + assert(Align <= 0x100 && "Alignment too large."); + Bits |= (Log2_32(Align) + 1) << 26; + return *this; +} + +AttrBuilder &AttrBuilder::removeAttribute(Attributes::AttrVal Val) { + Bits &= ~AttributesImpl::getAttrMask(Val); + return *this; +} + +AttrBuilder &AttrBuilder::addAttributes(const Attributes &A) { + Bits |= A.Raw(); + return *this; +} + +AttrBuilder &AttrBuilder::removeAttributes(const Attributes &A){ + Bits &= ~A.Raw(); + return *this; +} + +bool AttrBuilder::hasAttribute(Attributes::AttrVal A) const { + return Bits & AttributesImpl::getAttrMask(A); +} + +bool AttrBuilder::hasAttributes() const { + return Bits != 0; +} +bool AttrBuilder::hasAttributes(const Attributes &A) const { + return Bits & A.Raw(); +} +bool AttrBuilder::hasAlignmentAttr() const { + return Bits & AttributesImpl::getAttrMask(Attributes::Alignment); +} + +uint64_t AttrBuilder::getAlignment() const { + if (!hasAlignmentAttr()) + return 0; + return 1U << + (((Bits & AttributesImpl::getAttrMask(Attributes::Alignment)) >> 16) - 1); +} + +uint64_t AttrBuilder::getStackAlignment() const { + if (!hasAlignmentAttr()) + return 0; + return 1U << + (((Bits & AttributesImpl::getAttrMask(Attributes::StackAlignment))>>26)-1); } //===----------------------------------------------------------------------===// -// AttributeListImpl Definition +// AttributeImpl Definition //===----------------------------------------------------------------------===// -namespace llvm { - class AttributeListImpl; +uint64_t AttributesImpl::getAttrMask(uint64_t Val) { + switch (Val) { + case Attributes::None: return 0; + case Attributes::ZExt: return 1 << 0; + case Attributes::SExt: return 1 << 1; + case Attributes::NoReturn: return 1 << 2; + case Attributes::InReg: return 1 << 3; + case Attributes::StructRet: return 1 << 4; + case Attributes::NoUnwind: return 1 << 5; + case Attributes::NoAlias: return 1 << 6; + case Attributes::ByVal: return 1 << 7; + case Attributes::Nest: return 1 << 8; + case Attributes::ReadNone: return 1 << 9; + case Attributes::ReadOnly: return 1 << 10; + case Attributes::NoInline: return 1 << 11; + case Attributes::AlwaysInline: return 1 << 12; + case Attributes::OptimizeForSize: return 1 << 13; + case Attributes::StackProtect: return 1 << 14; + case Attributes::StackProtectReq: return 1 << 15; + case Attributes::Alignment: return 31 << 16; + case Attributes::NoCapture: return 1 << 21; + case Attributes::NoRedZone: return 1 << 22; + case Attributes::NoImplicitFloat: return 1 << 23; + case Attributes::Naked: return 1 << 24; + case Attributes::InlineHint: return 1 << 25; + case Attributes::StackAlignment: return 7 << 26; + case Attributes::ReturnsTwice: return 1 << 29; + case Attributes::UWTable: return 1 << 30; + case Attributes::NonLazyBind: return 1U << 31; + case Attributes::AddressSafety: return 1ULL << 32; + case Attributes::MinSize: return 1ULL << 33; + } + llvm_unreachable("Unsupported attribute type"); } -static ManagedStatic > AttributesLists; +bool AttributesImpl::hasAttribute(uint64_t A) const { + return (Bits & getAttrMask(A)) != 0; +} -namespace llvm { -static ManagedStatic > ALMutex; +bool AttributesImpl::hasAttributes() const { + return Bits != 0; +} -class AttributeListImpl : public FoldingSetNode { - sys::cas_flag RefCount; - - // AttributesList is uniqued, these should not be publicly available. - void operator=(const AttributeListImpl &); // Do not implement - AttributeListImpl(const AttributeListImpl &); // Do not implement - ~AttributeListImpl(); // Private implementation -public: - SmallVector Attrs; - - AttributeListImpl(ArrayRef attrs) - : Attrs(attrs.begin(), attrs.end()) { - RefCount = 0; - } - - void AddRef() { - sys::SmartScopedLock Lock(*ALMutex); - ++RefCount; - } - void DropRef() { - sys::SmartScopedLock Lock(*ALMutex); - if (!AttributesLists.isConstructed()) - return; - sys::cas_flag new_val = --RefCount; - if (new_val == 0) - delete this; - } - - void Profile(FoldingSetNodeID &ID) const { - Profile(ID, Attrs); - } - static void Profile(FoldingSetNodeID &ID, ArrayRef Attrs){ - for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { - ID.AddInteger(Attrs[i].Attrs.Raw()); - ID.AddInteger(Attrs[i].Index); - } - } -}; +bool AttributesImpl::hasAttributes(const Attributes &A) const { + return Bits & A.Raw(); // FIXME: Raw() won't work here in the future. } -AttributeListImpl::~AttributeListImpl() { - // NOTE: Lock must be acquired by caller. - AttributesLists->RemoveNode(this); +uint64_t AttributesImpl::getAlignment() const { + return Bits & getAttrMask(Attributes::Alignment); } +uint64_t AttributesImpl::getStackAlignment() const { + return Bits & getAttrMask(Attributes::StackAlignment); +} -AttrListPtr AttrListPtr::get(ArrayRef Attrs) { +//===----------------------------------------------------------------------===// +// AttributeListImpl Definition +//===----------------------------------------------------------------------===// + +AttrListPtr AttrListPtr::get(LLVMContext &C, + ArrayRef Attrs) { // If there are no attributes then return a null AttributesList pointer. if (Attrs.empty()) return AttrListPtr(); - + #ifndef NDEBUG for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { - assert(Attrs[i].Attrs != Attribute::None && + assert(Attrs[i].Attrs.hasAttributes() && "Pointless attribute!"); assert((!i || Attrs[i-1].Index < Attrs[i].Index) && "Misordered AttributesList!"); } #endif - + // Otherwise, build a key to look up the existing attributes. + LLVMContextImpl *pImpl = C.pImpl; FoldingSetNodeID ID; AttributeListImpl::Profile(ID, Attrs); - void *InsertPos; - - sys::SmartScopedLock Lock(*ALMutex); - - AttributeListImpl *PAL = - AttributesLists->FindNodeOrInsertPos(ID, InsertPos); - + + void *InsertPoint; + AttributeListImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID, + InsertPoint); + // If we didn't find any existing attributes of the same shape then // create a new one and insert it. - if (!PAL) { - PAL = new AttributeListImpl(Attrs); - AttributesLists->InsertNode(PAL, InsertPos); + if (!PA) { + PA = new AttributeListImpl(Attrs); + pImpl->AttrsLists.InsertNode(PA, InsertPoint); } - + // Return the AttributesList that we found or created. - return AttrListPtr(PAL); + return AttrListPtr(PA); } - //===----------------------------------------------------------------------===// // AttrListPtr Method Implementations //===----------------------------------------------------------------------===// -AttrListPtr::AttrListPtr(AttributeListImpl *LI) : AttrList(LI) { - if (LI) LI->AddRef(); -} - -AttrListPtr::AttrListPtr(const AttrListPtr &P) : AttrList(P.AttrList) { - if (AttrList) AttrList->AddRef(); -} - const AttrListPtr &AttrListPtr::operator=(const AttrListPtr &RHS) { - sys::SmartScopedLock Lock(*ALMutex); if (AttrList == RHS.AttrList) return *this; - if (AttrList) AttrList->DropRef(); + AttrList = RHS.AttrList; - if (AttrList) AttrList->AddRef(); return *this; } -AttrListPtr::~AttrListPtr() { - if (AttrList) AttrList->DropRef(); -} - -/// getNumSlots - Return the number of slots used in this attribute list. +/// getNumSlots - Return the number of slots used in this attribute list. /// This is the number of arguments that have an attribute set on them /// (including the function itself). unsigned AttrListPtr::getNumSlots() const { @@ -245,48 +415,60 @@ const AttributeWithIndex &AttrListPtr::getSlot(unsigned Slot) const { return AttrList->Attrs[Slot]; } - -/// getAttributes - The attributes for the specified index are -/// returned. Attributes for the result are denoted with Idx = 0. -/// Function notes are denoted with idx = ~0. +/// getAttributes - The attributes for the specified index are returned. +/// Attributes for the result are denoted with Idx = 0. Function notes are +/// denoted with idx = ~0. Attributes AttrListPtr::getAttributes(unsigned Idx) const { - if (AttrList == 0) return Attribute::None; - + if (AttrList == 0) return Attributes(); + const SmallVector &Attrs = AttrList->Attrs; for (unsigned i = 0, e = Attrs.size(); i != e && Attrs[i].Index <= Idx; ++i) if (Attrs[i].Index == Idx) return Attrs[i].Attrs; - return Attribute::None; + + return Attributes(); } /// hasAttrSomewhere - Return true if the specified attribute is set for at /// least one parameter or for the return value. -bool AttrListPtr::hasAttrSomewhere(Attributes Attr) const { +bool AttrListPtr::hasAttrSomewhere(Attributes::AttrVal Attr) const { if (AttrList == 0) return false; - + const SmallVector &Attrs = AttrList->Attrs; for (unsigned i = 0, e = Attrs.size(); i != e; ++i) - if (Attrs[i].Attrs & Attr) + if (Attrs[i].Attrs.hasAttribute(Attr)) return true; + return false; } +unsigned AttrListPtr::getNumAttrs() const { + return AttrList ? AttrList->Attrs.size() : 0; +} + +Attributes &AttrListPtr::getAttributesAtIndex(unsigned i) const { + assert(AttrList && "Trying to get an attribute from an empty list!"); + assert(i < AttrList->Attrs.size() && "Index out of range!"); + return AttrList->Attrs[i].Attrs; +} -AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const { +AttrListPtr AttrListPtr::addAttr(LLVMContext &C, unsigned Idx, + Attributes Attrs) const { Attributes OldAttrs = getAttributes(Idx); #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. // For now, say we can't change a known alignment. - Attributes OldAlign = OldAttrs & Attribute::Alignment; - Attributes NewAlign = Attrs & Attribute::Alignment; + unsigned OldAlign = OldAttrs.getAlignment(); + unsigned NewAlign = Attrs.getAlignment(); assert((!OldAlign || !NewAlign || OldAlign == NewAlign) && "Attempt to change alignment!"); #endif - - Attributes NewAttrs = OldAttrs | Attrs; - if (NewAttrs == OldAttrs) + + AttrBuilder NewAttrs = + AttrBuilder(OldAttrs).addAttributes(Attrs); + if (NewAttrs == AttrBuilder(OldAttrs)) return *this; - + SmallVector NewAttrList; if (AttrList == 0) NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs)); @@ -299,61 +481,67 @@ AttrListPtr AttrListPtr::addAttr(unsigned Idx, Attributes Attrs) const { // If there are attributes already at this index, merge them in. if (i != e && OldAttrList[i].Index == Idx) { - Attrs |= OldAttrList[i].Attrs; + Attrs = + Attributes::get(C, AttrBuilder(Attrs). + addAttributes(OldAttrList[i].Attrs)); ++i; } - + NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs)); - + // Copy attributes for arguments after this one. - NewAttrList.insert(NewAttrList.end(), + NewAttrList.insert(NewAttrList.end(), OldAttrList.begin()+i, OldAttrList.end()); } - - return get(NewAttrList); + + return get(C, NewAttrList); } -AttrListPtr AttrListPtr::removeAttr(unsigned Idx, Attributes Attrs) const { +AttrListPtr AttrListPtr::removeAttr(LLVMContext &C, unsigned Idx, + Attributes Attrs) const { #ifndef NDEBUG // FIXME it is not obvious how this should work for alignment. // For now, say we can't pass in alignment, which no current use does. - assert(!(Attrs & Attribute::Alignment) && "Attempt to exclude alignment!"); + assert(!Attrs.hasAttribute(Attributes::Alignment) && + "Attempt to exclude alignment!"); #endif if (AttrList == 0) return AttrListPtr(); - + Attributes OldAttrs = getAttributes(Idx); - Attributes NewAttrs = OldAttrs & ~Attrs; - if (NewAttrs == OldAttrs) + AttrBuilder NewAttrs = + AttrBuilder(OldAttrs).removeAttributes(Attrs); + if (NewAttrs == AttrBuilder(OldAttrs)) return *this; SmallVector NewAttrList; const SmallVector &OldAttrList = AttrList->Attrs; unsigned i = 0, e = OldAttrList.size(); - + // Copy attributes for arguments before this one. for (; i != e && OldAttrList[i].Index < Idx; ++i) NewAttrList.push_back(OldAttrList[i]); - + // If there are attributes already at this index, merge them in. assert(OldAttrList[i].Index == Idx && "Attribute isn't set?"); - Attrs = OldAttrList[i].Attrs & ~Attrs; + Attrs = Attributes::get(C, AttrBuilder(OldAttrList[i].Attrs). + removeAttributes(Attrs)); ++i; - if (Attrs) // If any attributes left for this parameter, add them. + if (Attrs.hasAttributes()) // If any attributes left for this param, add them. NewAttrList.push_back(AttributeWithIndex::get(Idx, Attrs)); - + // Copy attributes for arguments after this one. - NewAttrList.insert(NewAttrList.end(), + NewAttrList.insert(NewAttrList.end(), OldAttrList.begin()+i, OldAttrList.end()); - - return get(NewAttrList); + + return get(C, NewAttrList); } void AttrListPtr::dump() const { dbgs() << "PAL[ "; for (unsigned i = 0; i < getNumSlots(); ++i) { const AttributeWithIndex &PAWI = getSlot(i); - dbgs() << "{" << PAWI.Index << "," << PAWI.Attrs << "} "; + dbgs() << "{" << PAWI.Index << "," << PAWI.Attrs.getAsString() << "} "; } - + dbgs() << "]\n"; } diff --git a/lib/VMCore/AttributesImpl.h b/lib/VMCore/AttributesImpl.h new file mode 100644 index 0000000..5c107e1 --- /dev/null +++ b/lib/VMCore/AttributesImpl.h @@ -0,0 +1,71 @@ +//===-- AttributesImpl.h - Attributes Internals -----------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines various helper methods and classes used by LLVMContextImpl +// for creating and managing attributes. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_ATTRIBUTESIMPL_H +#define LLVM_ATTRIBUTESIMPL_H + +#include "llvm/Attributes.h" +#include "llvm/ADT/FoldingSet.h" + +namespace llvm { + +class AttributesImpl : public FoldingSetNode { + uint64_t Bits; // FIXME: We will be expanding this. +public: + AttributesImpl(uint64_t bits) : Bits(bits) {} + + bool hasAttribute(uint64_t A) const; + + bool hasAttributes() const; + bool hasAttributes(const Attributes &A) const; + + uint64_t getAlignment() const; + uint64_t getStackAlignment() const; + + uint64_t Raw() const { return Bits; } // FIXME: Remove. + + static uint64_t getAttrMask(uint64_t Val); + + void Profile(FoldingSetNodeID &ID) const { + Profile(ID, Bits); + } + static void Profile(FoldingSetNodeID &ID, uint64_t Bits) { + ID.AddInteger(Bits); + } +}; + +class AttributeListImpl : public FoldingSetNode { + // AttributesList is uniqued, these should not be publicly available. + void operator=(const AttributeListImpl &) LLVM_DELETED_FUNCTION; + AttributeListImpl(const AttributeListImpl &) LLVM_DELETED_FUNCTION; +public: + SmallVector Attrs; + + AttributeListImpl(ArrayRef attrs) + : Attrs(attrs.begin(), attrs.end()) {} + + void Profile(FoldingSetNodeID &ID) const { + Profile(ID, Attrs); + } + static void Profile(FoldingSetNodeID &ID, ArrayRef Attrs){ + for (unsigned i = 0, e = Attrs.size(); i != e; ++i) { + ID.AddInteger(Attrs[i].Attrs.Raw()); + ID.AddInteger(Attrs[i].Index); + } + } +}; + +} // end llvm namespace + +#endif diff --git a/lib/VMCore/AutoUpgrade.cpp b/lib/VMCore/AutoUpgrade.cpp index 094ca75..5fff460 100644 --- a/lib/VMCore/AutoUpgrade.cpp +++ b/lib/VMCore/AutoUpgrade.cpp @@ -148,7 +148,8 @@ bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { if (NewFn) F = NewFn; if (unsigned id = F->getIntrinsicID()) - F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id)); + F->setAttributes(Intrinsic::getAttributes(F->getContext(), + (Intrinsic::ID)id)); return Upgraded; } diff --git a/lib/VMCore/CMakeLists.txt b/lib/VMCore/CMakeLists.txt index 6a20be6..06eab0e 100644 --- a/lib/VMCore/CMakeLists.txt +++ b/lib/VMCore/CMakeLists.txt @@ -1,5 +1,3 @@ -set(LLVM_REQUIRES_RTTI 1) - add_llvm_library(LLVMCore AsmWriter.cpp Attributes.cpp @@ -8,6 +6,7 @@ add_llvm_library(LLVMCore ConstantFold.cpp Constants.cpp Core.cpp + DataLayout.cpp DebugInfo.cpp DebugLoc.cpp DIBuilder.cpp @@ -32,6 +31,7 @@ add_llvm_library(LLVMCore PrintModulePass.cpp Type.cpp TypeFinder.cpp + TargetTransformInfo.cpp Use.cpp User.cpp Value.cpp @@ -42,7 +42,7 @@ add_llvm_library(LLVMCore # Workaround: It takes over 20 minutes to compile with msvc10. # FIXME: Suppressing optimizations to core libraries would not be good thing. -if( MSVC_VERSION EQUAL 1600 ) +if( MSVC_VERSION LESS 1700 ) set_property( SOURCE Function.cpp PROPERTY COMPILE_FLAGS "/Og-" diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp index 8e82876..fe3edac 100644 --- a/lib/VMCore/ConstantFold.cpp +++ b/lib/VMCore/ConstantFold.cpp @@ -12,7 +12,7 @@ // ConstantExpr::get* methods to automatically fold constants when possible. // // The current constant folding implementation is implemented in two pieces: the -// pieces that don't need TargetData, and the pieces that do. This is to avoid +// pieces that don't need DataLayout, and the pieces that do. This is to avoid // a dependence in VMCore on Target. // //===----------------------------------------------------------------------===// @@ -87,9 +87,13 @@ foldConstantCastPair( Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode()); Instruction::CastOps secondOp = Instruction::CastOps(opc); + // Assume that pointers are never more than 64 bits wide. + IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext()); + // Let CastInst::isEliminableCastPair do the heavy lifting. return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy, - Type::getInt64Ty(DstTy->getContext())); + FakeIntPtrTy, FakeIntPtrTy, + FakeIntPtrTy); } static Constant *FoldBitCast(Constant *V, Type *DestTy) { @@ -514,10 +518,6 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, return UndefValue::get(DestTy); } - // No compile-time operations on this type yet. - if (V->getType()->isPPC_FP128Ty() || DestTy->isPPC_FP128Ty()) - return 0; - if (V->isNullValue() && !DestTy->isX86_MMXTy()) return Constant::getNullValue(DestTy); @@ -576,6 +576,7 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, DestTy->isDoubleTy() ? APFloat::IEEEdouble : DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended : DestTy->isFP128Ty() ? APFloat::IEEEquad : + DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble : APFloat::Bogus, APFloat::rmNearestTiesToEven, &ignored); return ConstantFP::get(V->getContext(), Val); @@ -646,7 +647,8 @@ Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V, case Instruction::SIToFP: if (ConstantInt *CI = dyn_cast(V)) { APInt api = CI->getValue(); - APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()), true); + APFloat apf(APInt::getNullValue(DestTy->getPrimitiveSizeInBits()), + !DestTy->isPPC_FP128Ty() /* isEEEE */); (void)apf.convertFromAPInt(api, opc==Instruction::SIToFP, APFloat::rmNearestTiesToEven); @@ -867,10 +869,6 @@ Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg, Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1, Constant *C2) { - // No compile-time operations on this type yet. - if (C1->getType()->isPPC_FP128Ty()) - return 0; - // Handle UndefValue up front. if (isa(C1) || isa(C2)) { switch (Opcode) { @@ -1273,10 +1271,6 @@ static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) { assert(V1->getType() == V2->getType() && "Cannot compare values of different types!"); - // No compile-time operations on this type yet. - if (V1->getType()->isPPC_FP128Ty()) - return FCmpInst::BAD_FCMP_PREDICATE; - // Handle degenerate case quickly if (V1 == V2) return FCmpInst::FCMP_OEQ; @@ -1602,10 +1596,6 @@ Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred, return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(pred)); } - // No compile-time operations on this type yet. - if (C1->getType()->isPPC_FP128Ty()) - return 0; - // icmp eq/ne(null,GV) -> false/true if (C1->isNullValue()) { if (const GlobalValue *GV = dyn_cast(C2)) diff --git a/lib/VMCore/Constants.cpp b/lib/VMCore/Constants.cpp index a4e21e1..edd6a73 100644 --- a/lib/VMCore/Constants.cpp +++ b/lib/VMCore/Constants.cpp @@ -245,6 +245,33 @@ bool Constant::canTrap() const { } } +/// isThreadDependent - Return true if the value can vary between threads. +bool Constant::isThreadDependent() const { + SmallPtrSet Visited; + SmallVector WorkList; + WorkList.push_back(this); + Visited.insert(this); + + while (!WorkList.empty()) { + const Constant *C = WorkList.pop_back_val(); + + if (const GlobalVariable *GV = dyn_cast(C)) { + if (GV->isThreadLocal()) + return true; + } + + for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) { + const Constant *D = dyn_cast(C->getOperand(I)); + if (!D) + continue; + if (Visited.insert(D)) + WorkList.push_back(D); + } + } + + return false; +} + /// isConstantUsed - Return true if the constant has users other than constant /// exprs and other dangling things. bool Constant::isConstantUsed() const { diff --git a/lib/VMCore/ConstantsContext.h b/lib/VMCore/ConstantsContext.h index 8903a8f..996eb12 100644 --- a/lib/VMCore/ConstantsContext.h +++ b/lib/VMCore/ConstantsContext.h @@ -33,7 +33,7 @@ struct ConstantTraits; /// behind the scenes to implement unary constant exprs. class UnaryConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -50,7 +50,7 @@ public: /// behind the scenes to implement binary constant exprs. class BinaryConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -71,7 +71,7 @@ public: /// behind the scenes to implement select constant exprs. class SelectConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly three operands void *operator new(size_t s) { @@ -92,7 +92,7 @@ public: /// extractelement constant exprs. class ExtractElementConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -113,7 +113,7 @@ public: /// insertelement constant exprs. class InsertElementConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly three operands void *operator new(size_t s) { @@ -135,7 +135,7 @@ public: /// shufflevector constant exprs. class ShuffleVectorConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly three operands void *operator new(size_t s) { @@ -160,7 +160,7 @@ public: /// extractvalue constant exprs. class ExtractValueConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -186,7 +186,7 @@ public: /// insertvalue constant exprs. class InsertValueConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly one operand void *operator new(size_t s) { @@ -234,7 +234,7 @@ public: // needed in order to store the predicate value for these instructions. class CompareConstantExpr : public ConstantExpr { virtual void anchor(); - void *operator new(size_t, unsigned); // DO NOT IMPLEMENT + void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION; public: // allocate space for exactly two operands void *operator new(size_t s) { @@ -352,18 +352,21 @@ struct ExprMapKeyType { struct InlineAsmKeyType { InlineAsmKeyType(StringRef AsmString, StringRef Constraints, bool hasSideEffects, - bool isAlignStack) + bool isAlignStack, InlineAsm::AsmDialect asmDialect) : asm_string(AsmString), constraints(Constraints), - has_side_effects(hasSideEffects), is_align_stack(isAlignStack) {} + has_side_effects(hasSideEffects), is_align_stack(isAlignStack), + asm_dialect(asmDialect) {} std::string asm_string; std::string constraints; bool has_side_effects; bool is_align_stack; + InlineAsm::AsmDialect asm_dialect; bool operator==(const InlineAsmKeyType& that) const { return this->asm_string == that.asm_string && this->constraints == that.constraints && this->has_side_effects == that.has_side_effects && - this->is_align_stack == that.is_align_stack; + this->is_align_stack == that.is_align_stack && + this->asm_dialect == that.asm_dialect; } bool operator<(const InlineAsmKeyType& that) const { if (this->asm_string != that.asm_string) @@ -374,6 +377,8 @@ struct InlineAsmKeyType { return this->has_side_effects < that.has_side_effects; if (this->is_align_stack != that.is_align_stack) return this->is_align_stack < that.is_align_stack; + if (this->asm_dialect != that.asm_dialect) + return this->asm_dialect < that.asm_dialect; return false; } @@ -490,7 +495,8 @@ template<> struct ConstantCreator { static InlineAsm *create(PointerType *Ty, const InlineAsmKeyType &Key) { return new InlineAsm(Ty, Key.asm_string, Key.constraints, - Key.has_side_effects, Key.is_align_stack); + Key.has_side_effects, Key.is_align_stack, + Key.asm_dialect); } }; @@ -499,7 +505,8 @@ struct ConstantKeyData { typedef InlineAsmKeyType ValType; static ValType getValType(InlineAsm *Asm) { return InlineAsmKeyType(Asm->getAsmString(), Asm->getConstraintString(), - Asm->hasSideEffects(), Asm->isAlignStack()); + Asm->hasSideEffects(), Asm->isAlignStack(), + Asm->getDialect()); } }; diff --git a/lib/VMCore/Core.cpp b/lib/VMCore/Core.cpp index 972db3c..847bc13 100644 --- a/lib/VMCore/Core.cpp +++ b/lib/VMCore/Core.cpp @@ -568,6 +568,19 @@ const char *LLVMGetMDString(LLVMValueRef V, unsigned* Len) { return 0; } +unsigned LLVMGetMDNodeNumOperands(LLVMValueRef V) +{ + return cast(unwrap(V))->getNumOperands(); +} + +void LLVMGetMDNodeOperands(LLVMValueRef V, LLVMValueRef *Dest) +{ + const MDNode *N = cast(unwrap(V)); + const unsigned numOperands = N->getNumOperands(); + for (unsigned i = 0; i < numOperands; i++) + Dest[i] = wrap(N->getOperand(i)); +} + unsigned LLVMGetNamedMetadataNumOperands(LLVMModuleRef M, const char* name) { if (NamedMDNode *N = unwrap(M)->getNamedMetadata(name)) { @@ -1084,6 +1097,8 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) { return LLVMLinkOnceAnyLinkage; case GlobalValue::LinkOnceODRLinkage: return LLVMLinkOnceODRLinkage; + case GlobalValue::LinkOnceODRAutoHideLinkage: + return LLVMLinkOnceODRAutoHideLinkage; case GlobalValue::WeakAnyLinkage: return LLVMWeakAnyLinkage; case GlobalValue::WeakODRLinkage: @@ -1098,8 +1113,6 @@ LLVMLinkage LLVMGetLinkage(LLVMValueRef Global) { return LLVMLinkerPrivateLinkage; case GlobalValue::LinkerPrivateWeakLinkage: return LLVMLinkerPrivateWeakLinkage; - case GlobalValue::LinkerPrivateWeakDefAutoLinkage: - return LLVMLinkerPrivateWeakDefAutoLinkage; case GlobalValue::DLLImportLinkage: return LLVMDLLImportLinkage; case GlobalValue::DLLExportLinkage: @@ -1129,6 +1142,9 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) { case LLVMLinkOnceODRLinkage: GV->setLinkage(GlobalValue::LinkOnceODRLinkage); break; + case LLVMLinkOnceODRAutoHideLinkage: + GV->setLinkage(GlobalValue::LinkOnceODRAutoHideLinkage); + break; case LLVMWeakAnyLinkage: GV->setLinkage(GlobalValue::WeakAnyLinkage); break; @@ -1150,9 +1166,6 @@ void LLVMSetLinkage(LLVMValueRef Global, LLVMLinkage Linkage) { case LLVMLinkerPrivateWeakLinkage: GV->setLinkage(GlobalValue::LinkerPrivateWeakLinkage); break; - case LLVMLinkerPrivateWeakDefAutoLinkage: - GV->setLinkage(GlobalValue::LinkerPrivateWeakDefAutoLinkage); - break; case LLVMDLLImportLinkage: GV->setLinkage(GlobalValue::DLLImportLinkage); break; @@ -1368,14 +1381,20 @@ void LLVMSetGC(LLVMValueRef Fn, const char *GC) { void LLVMAddFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { Function *Func = unwrap(Fn); const AttrListPtr PAL = Func->getAttributes(); - const AttrListPtr PALnew = PAL.addAttr(~0U, Attributes(PA)); + AttrBuilder B(PA); + const AttrListPtr PALnew = + PAL.addAttr(Func->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(Func->getContext(), B)); Func->setAttributes(PALnew); } void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { Function *Func = unwrap(Fn); const AttrListPtr PAL = Func->getAttributes(); - const AttrListPtr PALnew = PAL.removeAttr(~0U, Attributes(PA)); + AttrBuilder B(PA); + const AttrListPtr PALnew = + PAL.removeAttr(Func->getContext(), AttrListPtr::FunctionIndex, + Attributes::get(Func->getContext(), B)); Func->setAttributes(PALnew); } @@ -1445,11 +1464,15 @@ LLVMValueRef LLVMGetPreviousParam(LLVMValueRef Arg) { } void LLVMAddAttribute(LLVMValueRef Arg, LLVMAttribute PA) { - unwrap(Arg)->addAttr(Attributes(PA)); + Argument *A = unwrap(Arg); + AttrBuilder B(PA); + A->addAttr(Attributes::get(A->getContext(), B)); } void LLVMRemoveAttribute(LLVMValueRef Arg, LLVMAttribute PA) { - unwrap(Arg)->removeAttr(Attributes(PA)); + Argument *A = unwrap(Arg); + AttrBuilder B(PA); + A->removeAttr(Attributes::get(A->getContext(), B)); } LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) { @@ -1461,8 +1484,10 @@ LLVMAttribute LLVMGetAttribute(LLVMValueRef Arg) { void LLVMSetParamAlignment(LLVMValueRef Arg, unsigned align) { - unwrap(Arg)->addAttr( - Attribute::constructAlignmentFromInt(align)); + AttrBuilder B; + B.addAlignmentAttr(align); + unwrap(Arg)->addAttr(Attributes:: + get(unwrap(Arg)->getContext(), B)); } /*--.. Operations on basic blocks ..........................................--*/ @@ -1651,23 +1676,28 @@ void LLVMSetInstructionCallConv(LLVMValueRef Instr, unsigned CC) { void LLVMAddInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute PA) { CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B(PA); Call.setAttributes( - Call.getAttributes().addAttr(index, Attributes(PA))); + Call.getAttributes().addAttr(Call->getContext(), index, + Attributes::get(Call->getContext(), B))); } void LLVMRemoveInstrAttribute(LLVMValueRef Instr, unsigned index, LLVMAttribute PA) { CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B(PA); Call.setAttributes( - Call.getAttributes().removeAttr(index, Attributes(PA))); + Call.getAttributes().removeAttr(Call->getContext(), index, + Attributes::get(Call->getContext(), B))); } void LLVMSetInstrParamAlignment(LLVMValueRef Instr, unsigned index, unsigned align) { CallSite Call = CallSite(unwrap(Instr)); - Call.setAttributes( - Call.getAttributes().addAttr(index, - Attribute::constructAlignmentFromInt(align))); + AttrBuilder B; + B.addAlignmentAttr(align); + Call.setAttributes(Call.getAttributes().addAttr(Call->getContext(), index, + Attributes::get(Call->getContext(), B))); } /*--.. Operations on call instructions (only) ..............................--*/ diff --git a/lib/VMCore/DIBuilder.cpp b/lib/VMCore/DIBuilder.cpp index f5894e9..152b825 100644 --- a/lib/VMCore/DIBuilder.cpp +++ b/lib/VMCore/DIBuilder.cpp @@ -492,7 +492,8 @@ DIType DIBuilder::createStructType(DIDescriptor Context, StringRef Name, NULL, Elements, ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeLang), - Constant::getNullValue(Type::getInt32Ty(VMContext)) + ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), }; return DIType(MDNode::get(VMContext, Elts)); } @@ -550,7 +551,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name, uint64_t SizeInBits, uint64_t AlignInBits, DIArray Elements, - DIType ClassType, unsigned Flags) { + DIType ClassType) { // TAG_enumeration_type is encoded in DICompositeType format. Value *Elts[] = { GetTagConstant(VMContext, dwarf::DW_TAG_enumeration_type), @@ -561,7 +562,7 @@ DIType DIBuilder::createEnumerationType(DIDescriptor Scope, StringRef Name, ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), ConstantInt::get(Type::getInt32Ty(VMContext), 0), - ConstantInt::get(Type::getInt32Ty(VMContext), Flags), + ConstantInt::get(Type::getInt32Ty(VMContext), 0), ClassType, Elements, ConstantInt::get(Type::getInt32Ty(VMContext), 0), @@ -640,6 +641,30 @@ DIType DIBuilder::createArtificialType(DIType Ty) { return DIType(MDNode::get(VMContext, Elts)); } +/// createArtificialType - Create a new DIType with "artificial" flag set. +DIType DIBuilder::createObjectPointerType(DIType Ty) { + if (Ty.isObjectPointer()) + return Ty; + + SmallVector Elts; + MDNode *N = Ty; + assert (N && "Unexpected input DIType!"); + for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { + if (Value *V = N->getOperand(i)) + Elts.push_back(V); + else + Elts.push_back(Constant::getNullValue(Type::getInt32Ty(VMContext))); + } + + unsigned CurFlags = Ty.getFlags(); + CurFlags = CurFlags | (DIType::FlagObjectPointer | DIType::FlagArtificial); + + // Flags are stored at this slot. + Elts[8] = ConstantInt::get(Type::getInt32Ty(VMContext), CurFlags); + + return DIType(MDNode::get(VMContext, Elts)); +} + /// retainType - Retain DIType in a module even if it is not referenced /// through debug info anchors. void DIBuilder::retainType(DIType T) { @@ -682,7 +707,9 @@ DIType DIBuilder::createTemporaryType(DIFile F) { /// can be RAUW'd if the full type is seen. DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope, DIFile F, - unsigned Line, unsigned RuntimeLang) { + unsigned Line, unsigned RuntimeLang, + uint64_t SizeInBits, + uint64_t AlignInBits) { // Create a temporary MDNode. Value *Elts[] = { GetTagConstant(VMContext, Tag), @@ -690,9 +717,8 @@ DIType DIBuilder::createForwardDecl(unsigned Tag, StringRef Name, MDString::get(VMContext, Name), F, ConstantInt::get(Type::getInt32Ty(VMContext), Line), - // To ease transition include sizes etc of 0. - ConstantInt::get(Type::getInt32Ty(VMContext), 0), - ConstantInt::get(Type::getInt32Ty(VMContext), 0), + ConstantInt::get(Type::getInt64Ty(VMContext), SizeInBits), + ConstantInt::get(Type::getInt64Ty(VMContext), AlignInBits), ConstantInt::get(Type::getInt32Ty(VMContext), 0), ConstantInt::get(Type::getInt32Ty(VMContext), DIDescriptor::FlagFwdDecl), diff --git a/lib/VMCore/DataLayout.cpp b/lib/VMCore/DataLayout.cpp new file mode 100644 index 0000000..19cf0f5 --- /dev/null +++ b/lib/VMCore/DataLayout.cpp @@ -0,0 +1,749 @@ +//===-- DataLayout.cpp - Data size & alignment routines --------------------==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines layout properties related to datatype size/offset/alignment +// information. +// +// This structure should be created once, filled in if the defaults are not +// correct and then passed around by const&. None of the members functions +// require modification to the object. +// +//===----------------------------------------------------------------------===// + +#include "llvm/DataLayout.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/MathExtras.h" +#include "llvm/Support/ManagedStatic.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Mutex.h" +#include "llvm/ADT/DenseMap.h" +#include +#include +using namespace llvm; + +// Handle the Pass registration stuff necessary to use DataLayout's. + +// Register the default SparcV9 implementation... +INITIALIZE_PASS(DataLayout, "datalayout", "Data Layout", false, true) +char DataLayout::ID = 0; + +//===----------------------------------------------------------------------===// +// Support for StructLayout +//===----------------------------------------------------------------------===// + +StructLayout::StructLayout(StructType *ST, const DataLayout &TD) { + assert(!ST->isOpaque() && "Cannot get layout of opaque structs"); + StructAlignment = 0; + StructSize = 0; + NumElements = ST->getNumElements(); + + // Loop over each of the elements, placing them in memory. + for (unsigned i = 0, e = NumElements; i != e; ++i) { + Type *Ty = ST->getElementType(i); + unsigned TyAlign = ST->isPacked() ? 1 : TD.getABITypeAlignment(Ty); + + // Add padding if necessary to align the data element properly. + if ((StructSize & (TyAlign-1)) != 0) + StructSize = DataLayout::RoundUpAlignment(StructSize, TyAlign); + + // Keep track of maximum alignment constraint. + StructAlignment = std::max(TyAlign, StructAlignment); + + MemberOffsets[i] = StructSize; + StructSize += TD.getTypeAllocSize(Ty); // Consume space for this data item + } + + // Empty structures have alignment of 1 byte. + if (StructAlignment == 0) StructAlignment = 1; + + // Add padding to the end of the struct so that it could be put in an array + // and all array elements would be aligned correctly. + if ((StructSize & (StructAlignment-1)) != 0) + StructSize = DataLayout::RoundUpAlignment(StructSize, StructAlignment); +} + + +/// getElementContainingOffset - Given a valid offset into the structure, +/// return the structure index that contains it. +unsigned StructLayout::getElementContainingOffset(uint64_t Offset) const { + const uint64_t *SI = + std::upper_bound(&MemberOffsets[0], &MemberOffsets[NumElements], Offset); + assert(SI != &MemberOffsets[0] && "Offset not in structure type!"); + --SI; + assert(*SI <= Offset && "upper_bound didn't work"); + assert((SI == &MemberOffsets[0] || *(SI-1) <= Offset) && + (SI+1 == &MemberOffsets[NumElements] || *(SI+1) > Offset) && + "Upper bound didn't work!"); + + // Multiple fields can have the same offset if any of them are zero sized. + // For example, in { i32, [0 x i32], i32 }, searching for offset 4 will stop + // at the i32 element, because it is the last element at that offset. This is + // the right one to return, because anything after it will have a higher + // offset, implying that this element is non-empty. + return SI-&MemberOffsets[0]; +} + +//===----------------------------------------------------------------------===// +// LayoutAlignElem, LayoutAlign support +//===----------------------------------------------------------------------===// + +LayoutAlignElem +LayoutAlignElem::get(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + LayoutAlignElem retval; + retval.AlignType = align_type; + retval.ABIAlign = abi_align; + retval.PrefAlign = pref_align; + retval.TypeBitWidth = bit_width; + return retval; +} + +bool +LayoutAlignElem::operator==(const LayoutAlignElem &rhs) const { + return (AlignType == rhs.AlignType + && ABIAlign == rhs.ABIAlign + && PrefAlign == rhs.PrefAlign + && TypeBitWidth == rhs.TypeBitWidth); +} + +const LayoutAlignElem +DataLayout::InvalidAlignmentElem = + LayoutAlignElem::get((AlignTypeEnum) -1, 0, 0, 0); + +//===----------------------------------------------------------------------===// +// PointerAlignElem, PointerAlign support +//===----------------------------------------------------------------------===// + +PointerAlignElem +PointerAlignElem::get(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + PointerAlignElem retval; + retval.AddressSpace = addr_space; + retval.ABIAlign = abi_align; + retval.PrefAlign = pref_align; + retval.TypeBitWidth = bit_width; + return retval; +} + +bool +PointerAlignElem::operator==(const PointerAlignElem &rhs) const { + return (ABIAlign == rhs.ABIAlign + && AddressSpace == rhs.AddressSpace + && PrefAlign == rhs.PrefAlign + && TypeBitWidth == rhs.TypeBitWidth); +} + +const PointerAlignElem +DataLayout::InvalidPointerElem = PointerAlignElem::get(~0U, 0U, 0U, 0U); + +//===----------------------------------------------------------------------===// +// DataLayout Class Implementation +//===----------------------------------------------------------------------===// + +/// getInt - Get an integer ignoring errors. +static int getInt(StringRef R) { + int Result = 0; + R.getAsInteger(10, Result); + return Result; +} + +void DataLayout::init() { + initializeDataLayoutPass(*PassRegistry::getPassRegistry()); + + LayoutMap = 0; + LittleEndian = false; + StackNaturalAlign = 0; + + // Default alignments + setAlignment(INTEGER_ALIGN, 1, 1, 1); // i1 + setAlignment(INTEGER_ALIGN, 1, 1, 8); // i8 + setAlignment(INTEGER_ALIGN, 2, 2, 16); // i16 + setAlignment(INTEGER_ALIGN, 4, 4, 32); // i32 + setAlignment(INTEGER_ALIGN, 4, 8, 64); // i64 + setAlignment(FLOAT_ALIGN, 2, 2, 16); // half + setAlignment(FLOAT_ALIGN, 4, 4, 32); // float + setAlignment(FLOAT_ALIGN, 8, 8, 64); // double + setAlignment(FLOAT_ALIGN, 16, 16, 128); // ppcf128, quad, ... + setAlignment(VECTOR_ALIGN, 8, 8, 64); // v2i32, v1i64, ... + setAlignment(VECTOR_ALIGN, 16, 16, 128); // v16i8, v8i16, v4i32, ... + setAlignment(AGGREGATE_ALIGN, 0, 8, 0); // struct + setPointerAlignment(0, 8, 8, 8); +} + +std::string DataLayout::parseSpecifier(StringRef Desc, DataLayout *td) { + + if (td) + td->init(); + + while (!Desc.empty()) { + std::pair Split = Desc.split('-'); + StringRef Token = Split.first; + Desc = Split.second; + + if (Token.empty()) + continue; + + Split = Token.split(':'); + StringRef Specifier = Split.first; + Token = Split.second; + + assert(!Specifier.empty() && "Can't be empty here"); + + switch (Specifier[0]) { + case 'E': + if (td) + td->LittleEndian = false; + break; + case 'e': + if (td) + td->LittleEndian = true; + break; + case 'p': { + int AddrSpace = 0; + if (Specifier.size() > 1) { + AddrSpace = getInt(Specifier.substr(1)); + if (AddrSpace < 0 || AddrSpace > (1 << 24)) + return "Invalid address space, must be a positive 24bit integer"; + } + Split = Token.split(':'); + int PointerMemSizeBits = getInt(Split.first); + if (PointerMemSizeBits < 0 || PointerMemSizeBits % 8 != 0) + return "invalid pointer size, must be a positive 8-bit multiple"; + + // Pointer ABI alignment. + Split = Split.second.split(':'); + int PointerABIAlignBits = getInt(Split.first); + if (PointerABIAlignBits < 0 || PointerABIAlignBits % 8 != 0) { + return "invalid pointer ABI alignment, " + "must be a positive 8-bit multiple"; + } + + // Pointer preferred alignment. + Split = Split.second.split(':'); + int PointerPrefAlignBits = getInt(Split.first); + if (PointerPrefAlignBits < 0 || PointerPrefAlignBits % 8 != 0) { + return "invalid pointer preferred alignment, " + "must be a positive 8-bit multiple"; + } + + if (PointerPrefAlignBits == 0) + PointerPrefAlignBits = PointerABIAlignBits; + if (td) + td->setPointerAlignment(AddrSpace, PointerABIAlignBits/8, + PointerPrefAlignBits/8, PointerMemSizeBits/8); + break; + } + case 'i': + case 'v': + case 'f': + case 'a': + case 's': { + AlignTypeEnum AlignType; + char field = Specifier[0]; + switch (field) { + default: + case 'i': AlignType = INTEGER_ALIGN; break; + case 'v': AlignType = VECTOR_ALIGN; break; + case 'f': AlignType = FLOAT_ALIGN; break; + case 'a': AlignType = AGGREGATE_ALIGN; break; + case 's': AlignType = STACK_ALIGN; break; + } + int Size = getInt(Specifier.substr(1)); + if (Size < 0) { + return std::string("invalid ") + field + "-size field, " + "must be positive"; + } + + Split = Token.split(':'); + int ABIAlignBits = getInt(Split.first); + if (ABIAlignBits < 0 || ABIAlignBits % 8 != 0) { + return std::string("invalid ") + field +"-abi-alignment field, " + "must be a positive 8-bit multiple"; + } + unsigned ABIAlign = ABIAlignBits / 8; + + Split = Split.second.split(':'); + + int PrefAlignBits = getInt(Split.first); + if (PrefAlignBits < 0 || PrefAlignBits % 8 != 0) { + return std::string("invalid ") + field +"-preferred-alignment field, " + "must be a positive 8-bit multiple"; + } + unsigned PrefAlign = PrefAlignBits / 8; + if (PrefAlign == 0) + PrefAlign = ABIAlign; + + if (td) + td->setAlignment(AlignType, ABIAlign, PrefAlign, Size); + break; + } + case 'n': // Native integer types. + Specifier = Specifier.substr(1); + do { + int Width = getInt(Specifier); + if (Width <= 0) { + return std::string("invalid native integer size \'") + + Specifier.str() + "\', must be a positive integer."; + } + if (td && Width != 0) + td->LegalIntWidths.push_back(Width); + Split = Token.split(':'); + Specifier = Split.first; + Token = Split.second; + } while (!Specifier.empty() || !Token.empty()); + break; + case 'S': { // Stack natural alignment. + int StackNaturalAlignBits = getInt(Specifier.substr(1)); + if (StackNaturalAlignBits < 0 || StackNaturalAlignBits % 8 != 0) { + return "invalid natural stack alignment (S-field), " + "must be a positive 8-bit multiple"; + } + if (td) + td->StackNaturalAlign = StackNaturalAlignBits / 8; + break; + } + default: + break; + } + } + + return ""; +} + +/// Default ctor. +/// +/// @note This has to exist, because this is a pass, but it should never be +/// used. +DataLayout::DataLayout() : ImmutablePass(ID) { + report_fatal_error("Bad DataLayout ctor used. " + "Tool did not specify a DataLayout to use?"); +} + +DataLayout::DataLayout(const Module *M) + : ImmutablePass(ID) { + std::string errMsg = parseSpecifier(M->getDataLayout(), this); + assert(errMsg == "" && "Module M has malformed data layout string."); + (void)errMsg; +} + +void +DataLayout::setAlignment(AlignTypeEnum align_type, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + assert(pref_align < (1 << 16) && "Alignment doesn't fit in bitfield"); + assert(bit_width < (1 << 24) && "Bit width doesn't fit in bitfield"); + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + if (Alignments[i].AlignType == (unsigned)align_type && + Alignments[i].TypeBitWidth == bit_width) { + // Update the abi, preferred alignments. + Alignments[i].ABIAlign = abi_align; + Alignments[i].PrefAlign = pref_align; + return; + } + } + + Alignments.push_back(LayoutAlignElem::get(align_type, abi_align, + pref_align, bit_width)); +} + +void +DataLayout::setPointerAlignment(uint32_t addr_space, unsigned abi_align, + unsigned pref_align, uint32_t bit_width) { + assert(abi_align <= pref_align && "Preferred alignment worse than ABI!"); + DenseMap::iterator val = Pointers.find(addr_space); + if (val == Pointers.end()) { + Pointers[addr_space] = PointerAlignElem::get(addr_space, + abi_align, pref_align, bit_width); + } else { + val->second.ABIAlign = abi_align; + val->second.PrefAlign = pref_align; + val->second.TypeBitWidth = bit_width; + } +} + +/// getAlignmentInfo - Return the alignment (either ABI if ABIInfo = true or +/// preferred if ABIInfo = false) the layout wants for the specified datatype. +unsigned DataLayout::getAlignmentInfo(AlignTypeEnum AlignType, + uint32_t BitWidth, bool ABIInfo, + Type *Ty) const { + // Check to see if we have an exact match and remember the best match we see. + int BestMatchIdx = -1; + int LargestInt = -1; + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + if (Alignments[i].AlignType == (unsigned)AlignType && + Alignments[i].TypeBitWidth == BitWidth) + return ABIInfo ? Alignments[i].ABIAlign : Alignments[i].PrefAlign; + + // The best match so far depends on what we're looking for. + if (AlignType == INTEGER_ALIGN && + Alignments[i].AlignType == INTEGER_ALIGN) { + // The "best match" for integers is the smallest size that is larger than + // the BitWidth requested. + if (Alignments[i].TypeBitWidth > BitWidth && (BestMatchIdx == -1 || + Alignments[i].TypeBitWidth < Alignments[BestMatchIdx].TypeBitWidth)) + BestMatchIdx = i; + // However, if there isn't one that's larger, then we must use the + // largest one we have (see below) + if (LargestInt == -1 || + Alignments[i].TypeBitWidth > Alignments[LargestInt].TypeBitWidth) + LargestInt = i; + } + } + + // Okay, we didn't find an exact solution. Fall back here depending on what + // is being looked for. + if (BestMatchIdx == -1) { + // If we didn't find an integer alignment, fall back on most conservative. + if (AlignType == INTEGER_ALIGN) { + BestMatchIdx = LargestInt; + } else { + assert(AlignType == VECTOR_ALIGN && "Unknown alignment type!"); + + // By default, use natural alignment for vector types. This is consistent + // with what clang and llvm-gcc do. + unsigned Align = getTypeAllocSize(cast(Ty)->getElementType()); + Align *= cast(Ty)->getNumElements(); + // If the alignment is not a power of 2, round up to the next power of 2. + // This happens for non-power-of-2 length vectors. + if (Align & (Align-1)) + Align = NextPowerOf2(Align); + return Align; + } + } + + // Since we got a "best match" index, just return it. + return ABIInfo ? Alignments[BestMatchIdx].ABIAlign + : Alignments[BestMatchIdx].PrefAlign; +} + +namespace { + +class StructLayoutMap { + typedef DenseMap LayoutInfoTy; + LayoutInfoTy LayoutInfo; + +public: + virtual ~StructLayoutMap() { + // Remove any layouts. + for (LayoutInfoTy::iterator I = LayoutInfo.begin(), E = LayoutInfo.end(); + I != E; ++I) { + StructLayout *Value = I->second; + Value->~StructLayout(); + free(Value); + } + } + + StructLayout *&operator[](StructType *STy) { + return LayoutInfo[STy]; + } + + // for debugging... + virtual void dump() const {} +}; + +} // end anonymous namespace + +DataLayout::~DataLayout() { + delete static_cast(LayoutMap); +} + +const StructLayout *DataLayout::getStructLayout(StructType *Ty) const { + if (!LayoutMap) + LayoutMap = new StructLayoutMap(); + + StructLayoutMap *STM = static_cast(LayoutMap); + StructLayout *&SL = (*STM)[Ty]; + if (SL) return SL; + + // Otherwise, create the struct layout. Because it is variable length, we + // malloc it, then use placement new. + int NumElts = Ty->getNumElements(); + StructLayout *L = + (StructLayout *)malloc(sizeof(StructLayout)+(NumElts-1) * sizeof(uint64_t)); + + // Set SL before calling StructLayout's ctor. The ctor could cause other + // entries to be added to TheMap, invalidating our reference. + SL = L; + + new (L) StructLayout(Ty, *this); + + return L; +} + +std::string DataLayout::getStringRepresentation() const { + std::string Result; + raw_string_ostream OS(Result); + + OS << (LittleEndian ? "e" : "E"); + SmallVector addrSpaces; + // Lets get all of the known address spaces and sort them + // into increasing order so that we can emit the string + // in a cleaner format. + for (DenseMap::const_iterator + pib = Pointers.begin(), pie = Pointers.end(); + pib != pie; ++pib) { + addrSpaces.push_back(pib->first); + } + std::sort(addrSpaces.begin(), addrSpaces.end()); + for (SmallVector::iterator asb = addrSpaces.begin(), + ase = addrSpaces.end(); asb != ase; ++asb) { + const PointerAlignElem &PI = Pointers.find(*asb)->second; + OS << "-p"; + if (PI.AddressSpace) { + OS << PI.AddressSpace; + } + OS << ":" << PI.TypeBitWidth*8 << ':' << PI.ABIAlign*8 + << ':' << PI.PrefAlign*8; + } + OS << "-S" << StackNaturalAlign*8; + + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) { + const LayoutAlignElem &AI = Alignments[i]; + OS << '-' << (char)AI.AlignType << AI.TypeBitWidth << ':' + << AI.ABIAlign*8 << ':' << AI.PrefAlign*8; + } + + if (!LegalIntWidths.empty()) { + OS << "-n" << (unsigned)LegalIntWidths[0]; + + for (unsigned i = 1, e = LegalIntWidths.size(); i != e; ++i) + OS << ':' << (unsigned)LegalIntWidths[i]; + } + return OS.str(); +} + + +uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const { + assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); + switch (Ty->getTypeID()) { + case Type::LabelTyID: + return getPointerSizeInBits(0); + case Type::PointerTyID: { + unsigned AS = dyn_cast(Ty)->getAddressSpace(); + return getPointerSizeInBits(AS); + } + case Type::ArrayTyID: { + ArrayType *ATy = cast(Ty); + return getTypeAllocSizeInBits(ATy->getElementType())*ATy->getNumElements(); + } + case Type::StructTyID: + // Get the layout annotation... which is lazily created on demand. + return getStructLayout(cast(Ty))->getSizeInBits(); + case Type::IntegerTyID: + return cast(Ty)->getBitWidth(); + case Type::VoidTyID: + return 8; + case Type::HalfTyID: + return 16; + case Type::FloatTyID: + return 32; + case Type::DoubleTyID: + case Type::X86_MMXTyID: + return 64; + case Type::PPC_FP128TyID: + case Type::FP128TyID: + return 128; + // In memory objects this is always aligned to a higher boundary, but + // only 80 bits contain information. + case Type::X86_FP80TyID: + return 80; + case Type::VectorTyID: { + VectorType *VTy = cast(Ty); + return VTy->getNumElements()*getTypeSizeInBits(VTy->getElementType()); + } + default: + llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type"); + } +} + +/*! + \param abi_or_pref Flag that determines which alignment is returned. true + returns the ABI alignment, false returns the preferred alignment. + \param Ty The underlying type for which alignment is determined. + + Get the ABI (\a abi_or_pref == true) or preferred alignment (\a abi_or_pref + == false) for the requested type \a Ty. + */ +unsigned DataLayout::getAlignment(Type *Ty, bool abi_or_pref) const { + int AlignType = -1; + + assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!"); + switch (Ty->getTypeID()) { + // Early escape for the non-numeric types. + case Type::LabelTyID: + return (abi_or_pref + ? getPointerABIAlignment(0) + : getPointerPrefAlignment(0)); + case Type::PointerTyID: { + unsigned AS = dyn_cast(Ty)->getAddressSpace(); + return (abi_or_pref + ? getPointerABIAlignment(AS) + : getPointerPrefAlignment(AS)); + } + case Type::ArrayTyID: + return getAlignment(cast(Ty)->getElementType(), abi_or_pref); + + case Type::StructTyID: { + // Packed structure types always have an ABI alignment of one. + if (cast(Ty)->isPacked() && abi_or_pref) + return 1; + + // Get the layout annotation... which is lazily created on demand. + const StructLayout *Layout = getStructLayout(cast(Ty)); + unsigned Align = getAlignmentInfo(AGGREGATE_ALIGN, 0, abi_or_pref, Ty); + return std::max(Align, Layout->getAlignment()); + } + case Type::IntegerTyID: + case Type::VoidTyID: + AlignType = INTEGER_ALIGN; + break; + case Type::HalfTyID: + case Type::FloatTyID: + case Type::DoubleTyID: + // PPC_FP128TyID and FP128TyID have different data contents, but the + // same size and alignment, so they look the same here. + case Type::PPC_FP128TyID: + case Type::FP128TyID: + case Type::X86_FP80TyID: + AlignType = FLOAT_ALIGN; + break; + case Type::X86_MMXTyID: + case Type::VectorTyID: + AlignType = VECTOR_ALIGN; + break; + default: + llvm_unreachable("Bad type for getAlignment!!!"); + } + + return getAlignmentInfo((AlignTypeEnum)AlignType, getTypeSizeInBits(Ty), + abi_or_pref, Ty); +} + +unsigned DataLayout::getABITypeAlignment(Type *Ty) const { + return getAlignment(Ty, true); +} + +/// getABIIntegerTypeAlignment - Return the minimum ABI-required alignment for +/// an integer type of the specified bitwidth. +unsigned DataLayout::getABIIntegerTypeAlignment(unsigned BitWidth) const { + return getAlignmentInfo(INTEGER_ALIGN, BitWidth, true, 0); +} + + +unsigned DataLayout::getCallFrameTypeAlignment(Type *Ty) const { + for (unsigned i = 0, e = Alignments.size(); i != e; ++i) + if (Alignments[i].AlignType == STACK_ALIGN) + return Alignments[i].ABIAlign; + + return getABITypeAlignment(Ty); +} + +unsigned DataLayout::getPrefTypeAlignment(Type *Ty) const { + return getAlignment(Ty, false); +} + +unsigned DataLayout::getPreferredTypeAlignmentShift(Type *Ty) const { + unsigned Align = getPrefTypeAlignment(Ty); + assert(!(Align & (Align-1)) && "Alignment is not a power of two!"); + return Log2_32(Align); +} + +/// getIntPtrType - Return an integer type with size at least as big as that +/// of a pointer in the given address space. +IntegerType *DataLayout::getIntPtrType(LLVMContext &C, + unsigned AddressSpace) const { + return IntegerType::get(C, getPointerSizeInBits(AddressSpace)); +} + +/// getIntPtrType - Return an integer (vector of integer) type with size at +/// least as big as that of a pointer of the given pointer (vector of pointer) +/// type. +Type *DataLayout::getIntPtrType(Type *Ty) const { + assert(Ty->isPtrOrPtrVectorTy() && + "Expected a pointer or pointer vector type."); + unsigned NumBits = getTypeSizeInBits(Ty->getScalarType()); + IntegerType *IntTy = IntegerType::get(Ty->getContext(), NumBits); + if (VectorType *VecTy = dyn_cast(Ty)) + return VectorType::get(IntTy, VecTy->getNumElements()); + return IntTy; +} + +uint64_t DataLayout::getIndexedOffset(Type *ptrTy, + ArrayRef Indices) const { + Type *Ty = ptrTy; + assert(Ty->isPointerTy() && "Illegal argument for getIndexedOffset()"); + uint64_t Result = 0; + + generic_gep_type_iterator + TI = gep_type_begin(ptrTy, Indices); + for (unsigned CurIDX = 0, EndIDX = Indices.size(); CurIDX != EndIDX; + ++CurIDX, ++TI) { + if (StructType *STy = dyn_cast(*TI)) { + assert(Indices[CurIDX]->getType() == + Type::getInt32Ty(ptrTy->getContext()) && + "Illegal struct idx"); + unsigned FieldNo = cast(Indices[CurIDX])->getZExtValue(); + + // Get structure layout information... + const StructLayout *Layout = getStructLayout(STy); + + // Add in the offset, as calculated by the structure layout info... + Result += Layout->getElementOffset(FieldNo); + + // Update Ty to refer to current element + Ty = STy->getElementType(FieldNo); + } else { + // Update Ty to refer to current element + Ty = cast(Ty)->getElementType(); + + // Get the array index and the size of each array element. + if (int64_t arrayIdx = cast(Indices[CurIDX])->getSExtValue()) + Result += (uint64_t)arrayIdx * getTypeAllocSize(Ty); + } + } + + return Result; +} + +/// getPreferredAlignment - Return the preferred alignment of the specified +/// global. This includes an explicitly requested alignment (if the global +/// has one). +unsigned DataLayout::getPreferredAlignment(const GlobalVariable *GV) const { + Type *ElemType = GV->getType()->getElementType(); + unsigned Alignment = getPrefTypeAlignment(ElemType); + unsigned GVAlignment = GV->getAlignment(); + if (GVAlignment >= Alignment) { + Alignment = GVAlignment; + } else if (GVAlignment != 0) { + Alignment = std::max(GVAlignment, getABITypeAlignment(ElemType)); + } + + if (GV->hasInitializer() && GVAlignment == 0) { + if (Alignment < 16) { + // If the global is not external, see if it is large. If so, give it a + // larger alignment. + if (getTypeSizeInBits(ElemType) > 128) + Alignment = 16; // 16-byte alignment. + } + } + return Alignment; +} + +/// getPreferredAlignmentLog - Return the preferred alignment of the +/// specified global, returned in log form. This includes an explicitly +/// requested alignment (if the global has one). +unsigned DataLayout::getPreferredAlignmentLog(const GlobalVariable *GV) const { + return Log2_32(getPreferredAlignment(GV)); +} diff --git a/lib/VMCore/DebugInfo.cpp b/lib/VMCore/DebugInfo.cpp index c8f8f7d..3029ce2 100644 --- a/lib/VMCore/DebugInfo.cpp +++ b/lib/VMCore/DebugInfo.cpp @@ -111,6 +111,16 @@ Function *DIDescriptor::getFunctionField(unsigned Elt) const { return 0; } +void DIDescriptor::replaceFunctionField(unsigned Elt, Function *F) { + if (DbgNode == 0) + return; + + if (Elt < DbgNode->getNumOperands()) { + MDNode *Node = const_cast(DbgNode); + Node->replaceOperandWith(Elt, F); + } +} + unsigned DIVariable::getNumAddrElements() const { if (getVersion() <= LLVMDebugVersion8) return DbgNode->getNumOperands()-6; diff --git a/lib/VMCore/Dominators.cpp b/lib/VMCore/Dominators.cpp index 60bdeac..77b2403 100644 --- a/lib/VMCore/Dominators.cpp +++ b/lib/VMCore/Dominators.cpp @@ -161,6 +161,11 @@ bool DominatorTree::dominates(const Instruction *Def, bool DominatorTree::dominates(const BasicBlockEdge &BBE, const BasicBlock *UseBB) const { + // Assert that we have a single edge. We could handle them by simply + // returning false, but since isSingleEdge is linear on the number of + // edges, the callers can normally handle them more efficiently. + assert(BBE.isSingleEdge()); + // If the BB the edge ends in doesn't dominate the use BB, then the // edge also doesn't. const BasicBlock *Start = BBE.getStart(); @@ -207,6 +212,11 @@ bool DominatorTree::dominates(const BasicBlockEdge &BBE, bool DominatorTree::dominates(const BasicBlockEdge &BBE, const Use &U) const { + // Assert that we have a single edge. We could handle them by simply + // returning false, but since isSingleEdge is linear on the number of + // edges, the callers can normally handle them more efficiently. + assert(BBE.isSingleEdge()); + Instruction *UserInst = cast(U.getUser()); // A PHI in the end of the edge is dominated by it. PHINode *PN = dyn_cast(UserInst); diff --git a/lib/VMCore/Function.cpp b/lib/VMCore/Function.cpp index 2e0b316..9c4f2d9 100644 --- a/lib/VMCore/Function.cpp +++ b/lib/VMCore/Function.cpp @@ -78,7 +78,8 @@ unsigned Argument::getArgNo() const { /// in its containing function. bool Argument::hasByValAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::ByVal); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::ByVal); } unsigned Argument::getParamAlignment() const { @@ -91,21 +92,24 @@ unsigned Argument::getParamAlignment() const { /// it in its containing function. bool Argument::hasNestAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::Nest); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::Nest); } /// hasNoAliasAttr - Return true if this argument has the noalias attribute on /// it in its containing function. bool Argument::hasNoAliasAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::NoAlias); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::NoAlias); } /// hasNoCaptureAttr - Return true if this argument has the nocapture attribute /// on it in its containing function. bool Argument::hasNoCaptureAttr() const { if (!getType()->isPointerTy()) return false; - return getParent()->paramHasAttr(getArgNo()+1, Attribute::NoCapture); + return getParent()->getParamAttributes(getArgNo()+1). + hasAttribute(Attributes::NoCapture); } /// hasSRetAttr - Return true if this argument has the sret attribute on @@ -114,7 +118,8 @@ bool Argument::hasStructRetAttr() const { if (!getType()->isPointerTy()) return false; if (this != getParent()->arg_begin()) return false; // StructRet param must be first param - return getParent()->paramHasAttr(1, Attribute::StructRet); + return getParent()->getParamAttributes(1). + hasAttribute(Attributes::StructRet); } /// addAttr - Add a Attribute to an argument @@ -180,7 +185,7 @@ Function::Function(FunctionType *Ty, LinkageTypes Linkage, // Ensure intrinsics have the right parameter attributes. if (unsigned IID = getIntrinsicID()) - setAttributes(Intrinsic::getAttributes(Intrinsic::ID(IID))); + setAttributes(Intrinsic::getAttributes(getContext(), Intrinsic::ID(IID))); } @@ -244,13 +249,13 @@ void Function::dropAllReferences() { void Function::addAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.addAttr(i, attr); + PAL = PAL.addAttr(getContext(), i, attr); setAttributes(PAL); } void Function::removeAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.removeAttr(i, attr); + PAL = PAL.removeAttr(getContext(), i, attr); setAttributes(PAL); } diff --git a/lib/VMCore/GCOV.cpp b/lib/VMCore/GCOV.cpp index 003a5d4..ea2f0a6 100644 --- a/lib/VMCore/GCOV.cpp +++ b/lib/VMCore/GCOV.cpp @@ -28,19 +28,19 @@ GCOVFile::~GCOVFile() { } /// isGCDAFile - Return true if Format identifies a .gcda file. -static bool isGCDAFile(GCOVFormat Format) { - return Format == GCDA_402 || Format == GCDA_404; +static bool isGCDAFile(GCOV::GCOVFormat Format) { + return Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404; } /// isGCNOFile - Return true if Format identifies a .gcno file. -static bool isGCNOFile(GCOVFormat Format) { - return Format == GCNO_402 || Format == GCNO_404; +static bool isGCNOFile(GCOV::GCOVFormat Format) { + return Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404; } /// read - Read GCOV buffer. bool GCOVFile::read(GCOVBuffer &Buffer) { - GCOVFormat Format = Buffer.readGCOVFormat(); - if (Format == InvalidGCOV) + GCOV::GCOVFormat Format = Buffer.readGCOVFormat(); + if (Format == GCOV::InvalidGCOV) return false; unsigned i = 0; @@ -48,7 +48,7 @@ bool GCOVFile::read(GCOVBuffer &Buffer) { GCOVFunction *GFun = NULL; if (isGCDAFile(Format)) { // Use existing function while reading .gcda file. - assert (i < Functions.size() && ".gcda data does not match .gcno data"); + assert(i < Functions.size() && ".gcda data does not match .gcno data"); GFun = Functions[i]; } else if (isGCNOFile(Format)){ GFun = new GCOVFunction(); @@ -87,21 +87,21 @@ GCOVFunction::~GCOVFunction() { /// read - Read a aunction from the buffer. Return false if buffer cursor /// does not point to a function tag. -bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { +bool GCOVFunction::read(GCOVBuffer &Buff, GCOV::GCOVFormat Format) { if (!Buff.readFunctionTag()) return false; Buff.readInt(); // Function header length Ident = Buff.readInt(); Buff.readInt(); // Checksum #1 - if (Format != GCNO_402) + if (Format != GCOV::GCNO_402) Buff.readInt(); // Checksum #2 Name = Buff.readString(); - if (Format == GCNO_402 || Format == GCNO_404) + if (Format == GCOV::GCNO_402 || Format == GCOV::GCNO_404) Filename = Buff.readString(); - if (Format == GCDA_402 || Format == GCDA_404) { + if (Format == GCOV::GCDA_402 || Format == GCOV::GCDA_404) { Buff.readArcTag(); uint32_t Count = Buff.readInt() / 2; for (unsigned i = 0, e = Count; i != e; ++i) { @@ -113,7 +113,9 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { LineNumber = Buff.readInt(); // read blocks. - assert (Buff.readBlockTag() && "Block Tag not found!"); + bool BlockTagFound = Buff.readBlockTag(); + (void)BlockTagFound; + assert(BlockTagFound && "Block Tag not found!"); uint32_t BlockCount = Buff.readInt(); for (int i = 0, e = BlockCount; i != e; ++i) { Buff.readInt(); // Block flags; @@ -124,7 +126,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { while (Buff.readEdgeTag()) { uint32_t EdgeCount = (Buff.readInt() - 1) / 2; uint32_t BlockNo = Buff.readInt(); - assert (BlockNo < BlockCount && "Unexpected Block number!"); + assert(BlockNo < BlockCount && "Unexpected Block number!"); for (int i = 0, e = EdgeCount; i != e; ++i) { Blocks[BlockNo]->addEdge(Buff.readInt()); Buff.readInt(); // Edge flag @@ -136,7 +138,7 @@ bool GCOVFunction::read(GCOVBuffer &Buff, GCOVFormat Format) { uint32_t LineTableLength = Buff.readInt(); uint32_t Size = Buff.getCursor() + LineTableLength*4; uint32_t BlockNo = Buff.readInt(); - assert (BlockNo < BlockCount && "Unexpected Block number!"); + assert(BlockNo < BlockCount && "Unexpected Block number!"); GCOVBlock *Block = Blocks[BlockNo]; Buff.readInt(); // flag while (Buff.getCursor() != (Size - 4)) { diff --git a/lib/VMCore/IRBuilder.cpp b/lib/VMCore/IRBuilder.cpp index 5c4e6d9..04f08fe 100644 --- a/lib/VMCore/IRBuilder.cpp +++ b/lib/VMCore/IRBuilder.cpp @@ -80,7 +80,7 @@ CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align, CallInst *IRBuilderBase:: CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, - bool isVolatile, MDNode *TBAATag) { + bool isVolatile, MDNode *TBAATag, MDNode *TBAAStructTag) { Dst = getCastedInt8PtrValue(Dst); Src = getCastedInt8PtrValue(Src); @@ -94,6 +94,10 @@ CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align, // Set the TBAA info if present. if (TBAATag) CI->setMetadata(LLVMContext::MD_tbaa, TBAATag); + + // Set the TBAA Struct info if present. + if (TBAAStructTag) + CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag); return CI; } diff --git a/lib/VMCore/InlineAsm.cpp b/lib/VMCore/InlineAsm.cpp index 736e370..2e636aa 100644 --- a/lib/VMCore/InlineAsm.cpp +++ b/lib/VMCore/InlineAsm.cpp @@ -27,19 +27,20 @@ InlineAsm::~InlineAsm() { InlineAsm *InlineAsm::get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, - bool isAlignStack) { - InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack); + bool isAlignStack, AsmDialect asmDialect) { + InlineAsmKeyType Key(AsmString, Constraints, hasSideEffects, isAlignStack, + asmDialect); LLVMContextImpl *pImpl = Ty->getContext().pImpl; return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(Ty), Key); } InlineAsm::InlineAsm(PointerType *Ty, const std::string &asmString, const std::string &constraints, bool hasSideEffects, - bool isAlignStack) + bool isAlignStack, AsmDialect asmDialect) : Value(Ty, Value::InlineAsmVal), - AsmString(asmString), - Constraints(constraints), HasSideEffects(hasSideEffects), - IsAlignStack(isAlignStack) { + AsmString(asmString), Constraints(constraints), + HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack), + Dialect(asmDialect) { // Do various checks on the constraint string and type. assert(Verify(getFunctionType(), constraints) && diff --git a/lib/VMCore/Instructions.cpp b/lib/VMCore/Instructions.cpp index 9af98e8..94bd2a1 100644 --- a/lib/VMCore/Instructions.cpp +++ b/lib/VMCore/Instructions.cpp @@ -332,21 +332,30 @@ CallInst::CallInst(const CallInst &CI) void CallInst::addAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.addAttr(i, attr); + PAL = PAL.addAttr(getContext(), i, attr); setAttributes(PAL); } void CallInst::removeAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.removeAttr(i, attr); + PAL = PAL.removeAttr(getContext(), i, attr); setAttributes(PAL); } -bool CallInst::paramHasAttr(unsigned i, Attributes attr) const { - if (AttributeList.paramHasAttr(i, attr)) +bool CallInst::hasFnAttr(Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(AttrListPtr::FunctionIndex) + .hasAttribute(A)) return true; if (const Function *F = getCalledFunction()) - return F->paramHasAttr(i, attr); + return F->getParamAttributes(AttrListPtr::FunctionIndex).hasAttribute(A); + return false; +} + +bool CallInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(i).hasAttribute(A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getParamAttributes(i).hasAttribute(A); return false; } @@ -562,23 +571,32 @@ void InvokeInst::setSuccessorV(unsigned idx, BasicBlock *B) { return setSuccessor(idx, B); } -bool InvokeInst::paramHasAttr(unsigned i, Attributes attr) const { - if (AttributeList.paramHasAttr(i, attr)) +bool InvokeInst::hasFnAttr(Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(AttrListPtr::FunctionIndex). + hasAttribute(A)) return true; if (const Function *F = getCalledFunction()) - return F->paramHasAttr(i, attr); + return F->getParamAttributes(AttrListPtr::FunctionIndex).hasAttribute(A); + return false; +} + +bool InvokeInst::paramHasAttr(unsigned i, Attributes::AttrVal A) const { + if (AttributeList.getParamAttributes(i).hasAttribute(A)) + return true; + if (const Function *F = getCalledFunction()) + return F->getParamAttributes(i).hasAttribute(A); return false; } void InvokeInst::addAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.addAttr(i, attr); + PAL = PAL.addAttr(getContext(), i, attr); setAttributes(PAL); } void InvokeInst::removeAttribute(unsigned i, Attributes attr) { AttrListPtr PAL = getAttributes(); - PAL = PAL.removeAttr(i, attr); + PAL = PAL.removeAttr(getContext(), i, attr); setAttributes(PAL); } @@ -1381,18 +1399,6 @@ Type *GetElementPtrInst::getIndexedType(Type *Ptr, ArrayRef IdxList) { return getIndexedTypeInternal(Ptr, IdxList); } -unsigned GetElementPtrInst::getAddressSpace(Value *Ptr) { - Type *Ty = Ptr->getType(); - - if (VectorType *VTy = dyn_cast(Ty)) - Ty = VTy->getElementType(); - - if (PointerType *PTy = dyn_cast(Ty)) - return PTy->getAddressSpace(); - - llvm_unreachable("Invalid GEP pointer type"); -} - /// hasAllZeroIndices - Return true if all of the indices of this GEP are /// zeros. If so, the result pointer and the first operand have the same /// value, just potentially different types. @@ -2112,7 +2118,8 @@ bool CastInst::isNoopCast(Type *IntPtrTy) const { /// If no such cast is permited, the function returns 0. unsigned CastInst::isEliminableCastPair( Instruction::CastOps firstOp, Instruction::CastOps secondOp, - Type *SrcTy, Type *MidTy, Type *DstTy, Type *IntPtrTy) { + Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, + Type *DstIntPtrTy) { // Define the 144 possibilities for these two cast instructions. The values // in this matrix determine what to do in a given situation and select the // case in the switch below. The rows correspond to firstOp, the columns @@ -2215,9 +2222,9 @@ unsigned CastInst::isEliminableCastPair( return 0; case 7: { // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size - if (!IntPtrTy) + if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy) return 0; - unsigned PtrSize = IntPtrTy->getScalarSizeInBits(); + unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits(); unsigned MidSize = MidTy->getScalarSizeInBits(); if (MidSize >= PtrSize) return Instruction::BitCast; @@ -2256,9 +2263,9 @@ unsigned CastInst::isEliminableCastPair( return 0; case 13: { // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize - if (!IntPtrTy) + if (!MidIntPtrTy) return 0; - unsigned PtrSize = IntPtrTy->getScalarSizeInBits(); + unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits(); unsigned SrcSize = SrcTy->getScalarSizeInBits(); unsigned DstSize = DstTy->getScalarSizeInBits(); if (SrcSize <= PtrSize && SrcSize == DstSize) @@ -2836,7 +2843,7 @@ BitCastInst::BitCastInst( // CmpInst Classes //===----------------------------------------------------------------------===// -void CmpInst::Anchor() const {} +void CmpInst::anchor() {} CmpInst::CmpInst(Type *ty, OtherOps op, unsigned short predicate, Value *LHS, Value *RHS, const Twine &Name, diff --git a/lib/VMCore/LLVMContext.cpp b/lib/VMCore/LLVMContext.cpp index f07f0b3..2446ec9 100644 --- a/lib/VMCore/LLVMContext.cpp +++ b/lib/VMCore/LLVMContext.cpp @@ -53,6 +53,11 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) { unsigned RangeID = getMDKindID("range"); assert(RangeID == MD_range && "range kind id drifted"); (void)RangeID; + + // Create the 'tbaa.struct' metadata kind. + unsigned TBAAStructID = getMDKindID("tbaa.struct"); + assert(TBAAStructID == MD_tbaa_struct && "tbaa.struct kind id drifted"); + (void)TBAAStructID; } LLVMContext::~LLVMContext() { delete pImpl; } diff --git a/lib/VMCore/LLVMContextImpl.cpp b/lib/VMCore/LLVMContextImpl.cpp index 6279bb8..d35d284 100644 --- a/lib/VMCore/LLVMContextImpl.cpp +++ b/lib/VMCore/LLVMContextImpl.cpp @@ -12,6 +12,7 @@ //===----------------------------------------------------------------------===// #include "LLVMContextImpl.h" +#include "llvm/Attributes.h" #include "llvm/Module.h" #include "llvm/ADT/STLExtras.h" #include @@ -93,7 +94,21 @@ LLVMContextImpl::~LLVMContextImpl() { E = CDSConstants.end(); I != E; ++I) delete I->second; CDSConstants.clear(); - + + // Destroy attributes. + for (FoldingSetIterator I = AttrsSet.begin(), + E = AttrsSet.end(); I != E; ) { + FoldingSetIterator Elem = I++; + delete &*Elem; + } + + // Destroy attribute lists. + for (FoldingSetIterator I = AttrsLists.begin(), + E = AttrsLists.end(); I != E; ) { + FoldingSetIterator Elem = I++; + delete &*Elem; + } + // Destroy MDNodes. ~MDNode can move and remove nodes between the MDNodeSet // and the NonUniquedMDNodes sets, so copy the values out first. SmallVector MDNodes; @@ -107,6 +122,7 @@ LLVMContextImpl::~LLVMContextImpl() { (*I)->destroy(); assert(MDNodeSet.empty() && NonUniquedMDNodes.empty() && "Destroying all MDNodes didn't empty the Context's sets."); + // Destroy MDStrings. DeleteContainerSeconds(MDStringCache); } diff --git a/lib/VMCore/LLVMContextImpl.h b/lib/VMCore/LLVMContextImpl.h index 2252028..90cf424 100644 --- a/lib/VMCore/LLVMContextImpl.h +++ b/lib/VMCore/LLVMContextImpl.h @@ -16,6 +16,7 @@ #define LLVM_LLVMCONTEXT_IMPL_H #include "llvm/LLVMContext.h" +#include "AttributesImpl.h" #include "ConstantsContext.h" #include "LeaksContext.h" #include "llvm/Constants.h" @@ -253,10 +254,14 @@ public: typedef DenseMap FPMapTy; FPMapTy FPConstants; - + + FoldingSet AttrsSet; + FoldingSet AttrsLists; + StringMap MDStringCache; - + FoldingSet MDNodeSet; + // MDNodes may be uniqued or not uniqued. When they're not uniqued, they // aren't in the MDNodeSet, but they're still shared between objects, so no // one object can destroy them. This set allows us to at least destroy them diff --git a/lib/VMCore/Makefile b/lib/VMCore/Makefile index 2b9b0f2..8b98651 100644 --- a/lib/VMCore/Makefile +++ b/lib/VMCore/Makefile @@ -9,7 +9,6 @@ LEVEL = ../.. LIBRARYNAME = LLVMCore BUILD_ARCHIVE = 1 -REQUIRES_RTTI = 1 BUILT_SOURCES = $(PROJ_OBJ_ROOT)/include/llvm/Intrinsics.gen diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp index 4530c04..53f1149 100644 --- a/lib/VMCore/PassManager.cpp +++ b/lib/VMCore/PassManager.cpp @@ -1189,7 +1189,7 @@ void PMDataManager::dumpAnalysisUsage(StringRef Msg, const Pass *P, assert(PassDebugging >= Details); if (Set.empty()) return; - dbgs() << (void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:"; + dbgs() << (const void*)P << std::string(getDepth()*2+3, ' ') << Msg << " Analyses:"; for (unsigned i = 0; i != Set.size(); ++i) { if (i) dbgs() << ','; const PassInfo *PInf = PassRegistry::getPassRegistry()->getPassInfo(Set[i]); diff --git a/lib/VMCore/TargetTransformInfo.cpp b/lib/VMCore/TargetTransformInfo.cpp new file mode 100644 index 0000000..e91c29c --- /dev/null +++ b/lib/VMCore/TargetTransformInfo.cpp @@ -0,0 +1,31 @@ +//===- llvm/VMCore/TargetTransformInfo.cpp ----------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/TargetTransformInfo.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +/// Default ctor. +/// +/// @note This has to exist, because this is a pass, but it should never be +/// used. +TargetTransformInfo::TargetTransformInfo() : ImmutablePass(ID) { + /// You are seeing this error because your pass required the TTI + /// using a call to "getAnalysis()", and you did + /// not initialize a machine target which can provide the TTI. + /// You should use "getAnalysisIfAvailable()" instead. + report_fatal_error("Bad TargetTransformInfo ctor used. " + "Tool did not specify a TargetTransformInfo to use?"); +} + +INITIALIZE_PASS(TargetTransformInfo, "targettransforminfo", + "Target Transform Info", false, true) +char TargetTransformInfo::ID = 0; + diff --git a/lib/VMCore/Type.cpp b/lib/VMCore/Type.cpp index 5e9a00f..1656ab2 100644 --- a/lib/VMCore/Type.cpp +++ b/lib/VMCore/Type.cpp @@ -47,35 +47,17 @@ Type *Type::getScalarType() { return this; } +const Type *Type::getScalarType() const { + if (const VectorType *VTy = dyn_cast(this)) + return VTy->getElementType(); + return this; +} + /// isIntegerTy - Return true if this is an IntegerType of the specified width. bool Type::isIntegerTy(unsigned Bitwidth) const { return isIntegerTy() && cast(this)->getBitWidth() == Bitwidth; } -/// isIntOrIntVectorTy - Return true if this is an integer type or a vector of -/// integer types. -/// -bool Type::isIntOrIntVectorTy() const { - if (isIntegerTy()) - return true; - if (getTypeID() != Type::VectorTyID) return false; - - return cast(this)->getElementType()->isIntegerTy(); -} - -/// isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP types. -/// -bool Type::isFPOrFPVectorTy() const { - if (getTypeID() == Type::HalfTyID || getTypeID() == Type::FloatTyID || - getTypeID() == Type::DoubleTyID || - getTypeID() == Type::FP128TyID || getTypeID() == Type::X86_FP80TyID || - getTypeID() == Type::PPC_FP128TyID) - return true; - if (getTypeID() != Type::VectorTyID) return false; - - return cast(this)->getElementType()->isFloatingPointTy(); -} - // canLosslesslyBitCastTo - Return true if this type can be converted to // 'Ty' without any reinterpretation of bits. For example, i8* to i32*. // @@ -220,8 +202,6 @@ Type *Type::getStructElementType(unsigned N) const { return cast(this)->getElementType(N); } - - Type *Type::getSequentialElementType() const { return cast(this)->getElementType(); } @@ -235,12 +215,10 @@ unsigned Type::getVectorNumElements() const { } unsigned Type::getPointerAddressSpace() const { - return cast(this)->getAddressSpace(); + return cast(getScalarType())->getAddressSpace(); } - - //===----------------------------------------------------------------------===// // Primitive 'Type' data //===----------------------------------------------------------------------===// @@ -400,12 +378,10 @@ FunctionType *FunctionType::get(Type *ReturnType, return FT; } - FunctionType *FunctionType::get(Type *Result, bool isVarArg) { return get(Result, ArrayRef(), isVarArg); } - /// isValidReturnType - Return true if the specified type is valid as a return /// type. bool FunctionType::isValidReturnType(Type *RetTy) { @@ -553,7 +529,6 @@ StructType *StructType::create(LLVMContext &Context) { return create(Context, StringRef()); } - StructType *StructType::create(ArrayRef Elements, StringRef Name, bool isPacked) { assert(!Elements.empty() && @@ -637,7 +612,6 @@ bool StructType::isLayoutIdentical(StructType *Other) const { return std::equal(element_begin(), element_end(), Other->element_begin()); } - /// getTypeByName - Return the type with the specified name, or null if there /// is none by that name. StructType *Module::getTypeByName(StringRef Name) const { @@ -700,7 +674,6 @@ ArrayType::ArrayType(Type *ElType, uint64_t NumEl) NumElements = NumEl; } - ArrayType *ArrayType::get(Type *elementType, uint64_t NumElements) { Type *ElementType = const_cast(elementType); assert(isValidElementType(ElementType) && "Invalid type for array element!"); diff --git a/lib/VMCore/User.cpp b/lib/VMCore/User.cpp index 5f35ce4..e847ce6 100644 --- a/lib/VMCore/User.cpp +++ b/lib/VMCore/User.cpp @@ -10,6 +10,7 @@ #include "llvm/Constant.h" #include "llvm/GlobalValue.h" #include "llvm/User.h" +#include "llvm/Operator.h" namespace llvm { @@ -78,4 +79,12 @@ void User::operator delete(void *Usr) { ::operator delete(Storage); } +//===----------------------------------------------------------------------===// +// Operator Class +//===----------------------------------------------------------------------===// + +Operator::~Operator() { + llvm_unreachable("should never destroy an Operator"); +} + } // End llvm namespace diff --git a/lib/VMCore/Value.cpp b/lib/VMCore/Value.cpp index d871108..8d0720d 100644 --- a/lib/VMCore/Value.cpp +++ b/lib/VMCore/Value.cpp @@ -394,7 +394,7 @@ static bool isDereferenceablePointer(const Value *V, // It's also not always safe to follow a bitcast, for example: // bitcast i8* (alloca i8) to i32* // would result in a 4-byte load from a 1-byte alloca. Some cases could - // be handled using TargetData to check sizes and alignments though. + // be handled using DataLayout to check sizes and alignments though. // These are obviously ok. if (isa(V)) return true; diff --git a/lib/VMCore/ValueTypes.cpp b/lib/VMCore/ValueTypes.cpp index d1ca953..2ee9f0f 100644 --- a/lib/VMCore/ValueTypes.cpp +++ b/lib/VMCore/ValueTypes.cpp @@ -55,24 +55,32 @@ bool EVT::isExtendedVector() const { return LLVMTy->isVectorTy(); } +bool EVT::isExtended16BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 16; +} + +bool EVT::isExtended32BitVector() const { + return isExtendedVector() && getExtendedSizeInBits() == 32; +} + bool EVT::isExtended64BitVector() const { - return isExtendedVector() && getSizeInBits() == 64; + return isExtendedVector() && getExtendedSizeInBits() == 64; } bool EVT::isExtended128BitVector() const { - return isExtendedVector() && getSizeInBits() == 128; + return isExtendedVector() && getExtendedSizeInBits() == 128; } bool EVT::isExtended256BitVector() const { - return isExtendedVector() && getSizeInBits() == 256; + return isExtendedVector() && getExtendedSizeInBits() == 256; } bool EVT::isExtended512BitVector() const { - return isExtendedVector() && getSizeInBits() == 512; + return isExtendedVector() && getExtendedSizeInBits() == 512; } bool EVT::isExtended1024BitVector() const { - return isExtendedVector() && getSizeInBits() == 1024; + return isExtendedVector() && getExtendedSizeInBits() == 1024; } EVT EVT::getExtendedVectorElementType() const { @@ -120,15 +128,21 @@ std::string EVT::getEVTString() const { case MVT::Other: return "ch"; case MVT::Glue: return "glue"; case MVT::x86mmx: return "x86mmx"; + case MVT::v2i1: return "v2i1"; + case MVT::v4i1: return "v4i1"; + case MVT::v8i1: return "v8i1"; + case MVT::v16i1: return "v16i1"; case MVT::v2i8: return "v2i8"; case MVT::v4i8: return "v4i8"; case MVT::v8i8: return "v8i8"; case MVT::v16i8: return "v16i8"; case MVT::v32i8: return "v32i8"; + case MVT::v1i16: return "v1i16"; case MVT::v2i16: return "v2i16"; case MVT::v4i16: return "v4i16"; case MVT::v8i16: return "v8i16"; case MVT::v16i16: return "v16i16"; + case MVT::v1i32: return "v1i32"; case MVT::v2i32: return "v2i32"; case MVT::v4i32: return "v4i32"; case MVT::v8i32: return "v8i32"; @@ -171,15 +185,21 @@ Type *EVT::getTypeForEVT(LLVMContext &Context) const { case MVT::f128: return Type::getFP128Ty(Context); case MVT::ppcf128: return Type::getPPC_FP128Ty(Context); case MVT::x86mmx: return Type::getX86_MMXTy(Context); + case MVT::v2i1: return VectorType::get(Type::getInt1Ty(Context), 2); + case MVT::v4i1: return VectorType::get(Type::getInt1Ty(Context), 4); + case MVT::v8i1: return VectorType::get(Type::getInt1Ty(Context), 8); + case MVT::v16i1: return VectorType::get(Type::getInt1Ty(Context), 16); case MVT::v2i8: return VectorType::get(Type::getInt8Ty(Context), 2); case MVT::v4i8: return VectorType::get(Type::getInt8Ty(Context), 4); case MVT::v8i8: return VectorType::get(Type::getInt8Ty(Context), 8); case MVT::v16i8: return VectorType::get(Type::getInt8Ty(Context), 16); case MVT::v32i8: return VectorType::get(Type::getInt8Ty(Context), 32); + case MVT::v1i16: return VectorType::get(Type::getInt16Ty(Context), 1); case MVT::v2i16: return VectorType::get(Type::getInt16Ty(Context), 2); case MVT::v4i16: return VectorType::get(Type::getInt16Ty(Context), 4); case MVT::v8i16: return VectorType::get(Type::getInt16Ty(Context), 8); case MVT::v16i16: return VectorType::get(Type::getInt16Ty(Context), 16); + case MVT::v1i32: return VectorType::get(Type::getInt32Ty(Context), 1); case MVT::v2i32: return VectorType::get(Type::getInt32Ty(Context), 2); case MVT::v4i32: return VectorType::get(Type::getInt32Ty(Context), 4); case MVT::v8i32: return VectorType::get(Type::getInt32Ty(Context), 8); diff --git a/lib/VMCore/Verifier.cpp b/lib/VMCore/Verifier.cpp index 38914b3..eb40b09 100644 --- a/lib/VMCore/Verifier.cpp +++ b/lib/VMCore/Verifier.cpp @@ -400,8 +400,8 @@ void Verifier::visitGlobalValue(GlobalValue &GV) { "Only global arrays can have appending linkage!", GVar); } - Assert1(!GV.hasLinkerPrivateWeakDefAutoLinkage() || GV.hasDefaultVisibility(), - "linker_private_weak_def_auto can only have default visibility!", + Assert1(!GV.hasLinkOnceODRAutoHideLinkage() || GV.hasDefaultVisibility(), + "linkonce_odr_auto_hide can only have default visibility!", &GV); } @@ -526,40 +526,60 @@ void Verifier::visitMDNode(MDNode &MD, Function *F) { // value of the specified type. The value V is printed in error messages. void Verifier::VerifyParameterAttrs(Attributes Attrs, Type *Ty, bool isReturnValue, const Value *V) { - if (Attrs == Attribute::None) + if (!Attrs.hasAttributes()) return; - Attributes FnCheckAttr = Attrs & Attribute::FunctionOnly; - Assert1(!FnCheckAttr, "Attribute " + Attribute::getAsString(FnCheckAttr) + - " only applies to the function!", V); - - if (isReturnValue) { - Attributes RetI = Attrs & Attribute::ParameterOnly; - Assert1(!RetI, "Attribute " + Attribute::getAsString(RetI) + - " does not apply to return values!", V); - } - - for (unsigned i = 0; - i < array_lengthof(Attribute::MutuallyIncompatible); ++i) { - Attributes MutI = Attrs & Attribute::MutuallyIncompatible[i]; - Assert1(MutI.isEmptyOrSingleton(), "Attributes " + - Attribute::getAsString(MutI) + " are incompatible!", V); - } - - Attributes TypeI = Attrs & Attribute::typeIncompatible(Ty); - Assert1(!TypeI, "Wrong type for attribute " + - Attribute::getAsString(TypeI), V); - - Attributes ByValI = Attrs & Attribute::ByVal; - if (PointerType *PTy = dyn_cast(Ty)) { - Assert1(!ByValI || PTy->getElementType()->isSized(), - "Attribute " + Attribute::getAsString(ByValI) + - " does not support unsized types!", V); - } else { - Assert1(!ByValI, - "Attribute " + Attribute::getAsString(ByValI) + - " only applies to parameters with pointer type!", V); - } + Assert1(!Attrs.hasFunctionOnlyAttrs(), + "Some attributes in '" + Attrs.getAsString() + + "' only apply to functions!", V); + + if (isReturnValue) + Assert1(!Attrs.hasParameterOnlyAttrs(), + "Attributes 'byval', 'nest', 'sret', and 'nocapture' " + "do not apply to return values!", V); + + // Check for mutually incompatible attributes. + Assert1(!((Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::Nest)) || + (Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::StructRet)) || + (Attrs.hasAttribute(Attributes::Nest) && + Attrs.hasAttribute(Attributes::StructRet))), "Attributes " + "'byval, nest, and sret' are incompatible!", V); + + Assert1(!((Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::Nest)) || + (Attrs.hasAttribute(Attributes::ByVal) && + Attrs.hasAttribute(Attributes::InReg)) || + (Attrs.hasAttribute(Attributes::Nest) && + Attrs.hasAttribute(Attributes::InReg))), "Attributes " + "'byval, nest, and inreg' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Attributes::ZExt) && + Attrs.hasAttribute(Attributes::SExt)), "Attributes " + "'zeroext and signext' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Attributes::ReadNone) && + Attrs.hasAttribute(Attributes::ReadOnly)), "Attributes " + "'readnone and readonly' are incompatible!", V); + + Assert1(!(Attrs.hasAttribute(Attributes::NoInline) && + Attrs.hasAttribute(Attributes::AlwaysInline)), "Attributes " + "'noinline and alwaysinline' are incompatible!", V); + + Assert1(!AttrBuilder(Attrs). + hasAttributes(Attributes::typeIncompatible(Ty)), + "Wrong types for attribute: " + + Attributes::typeIncompatible(Ty).getAsString(), V); + + if (PointerType *PTy = dyn_cast(Ty)) + Assert1(!Attrs.hasAttribute(Attributes::ByVal) || + PTy->getElementType()->isSized(), + "Attribute 'byval' does not support unsized types!", V); + else + Assert1(!Attrs.hasAttribute(Attributes::ByVal), + "Attribute 'byval' only applies to parameters with pointer type!", + V); } // VerifyFunctionAttrs - Check parameter attributes against a function type. @@ -585,26 +605,50 @@ void Verifier::VerifyFunctionAttrs(FunctionType *FT, VerifyParameterAttrs(Attr.Attrs, Ty, Attr.Index == 0, V); - if (Attr.Attrs & Attribute::Nest) { + if (Attr.Attrs.hasAttribute(Attributes::Nest)) { Assert1(!SawNest, "More than one parameter has attribute nest!", V); SawNest = true; } - if (Attr.Attrs & Attribute::StructRet) + if (Attr.Attrs.hasAttribute(Attributes::StructRet)) Assert1(Attr.Index == 1, "Attribute sret not on first parameter!", V); } Attributes FAttrs = Attrs.getFnAttributes(); - Attributes NotFn = FAttrs & (~Attribute::FunctionOnly); - Assert1(!NotFn, "Attribute " + Attribute::getAsString(NotFn) + - " does not apply to the function!", V); - - for (unsigned i = 0; - i < array_lengthof(Attribute::MutuallyIncompatible); ++i) { - Attributes MutI = FAttrs & Attribute::MutuallyIncompatible[i]; - Assert1(MutI.isEmptyOrSingleton(), "Attributes " + - Attribute::getAsString(MutI) + " are incompatible!", V); - } + AttrBuilder NotFn(FAttrs); + NotFn.removeFunctionOnlyAttrs(); + Assert1(!NotFn.hasAttributes(), "Attributes '" + + Attributes::get(V->getContext(), NotFn).getAsString() + + "' do not apply to the function!", V); + + // Check for mutually incompatible attributes. + Assert1(!((FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::Nest)) || + (FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::StructRet)) || + (FAttrs.hasAttribute(Attributes::Nest) && + FAttrs.hasAttribute(Attributes::StructRet))), "Attributes " + "'byval, nest, and sret' are incompatible!", V); + + Assert1(!((FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::Nest)) || + (FAttrs.hasAttribute(Attributes::ByVal) && + FAttrs.hasAttribute(Attributes::InReg)) || + (FAttrs.hasAttribute(Attributes::Nest) && + FAttrs.hasAttribute(Attributes::InReg))), "Attributes " + "'byval, nest, and inreg' are incompatible!", V); + + Assert1(!(FAttrs.hasAttribute(Attributes::ZExt) && + FAttrs.hasAttribute(Attributes::SExt)), "Attributes " + "'zeroext and signext' are incompatible!", V); + + Assert1(!(FAttrs.hasAttribute(Attributes::ReadNone) && + FAttrs.hasAttribute(Attributes::ReadOnly)), "Attributes " + "'readnone and readonly' are incompatible!", V); + + Assert1(!(FAttrs.hasAttribute(Attributes::NoInline) && + FAttrs.hasAttribute(Attributes::AlwaysInline)), "Attributes " + "'noinline and alwaysinline' are incompatible!", V); } static bool VerifyAttributeCount(const AttrListPtr &Attrs, unsigned Params) { @@ -661,6 +705,7 @@ void Verifier::visitFunction(Function &F) { case CallingConv::Cold: case CallingConv::X86_FastCall: case CallingConv::X86_ThisCall: + case CallingConv::Intel_OCL_BI: case CallingConv::PTX_Kernel: case CallingConv::PTX_Device: Assert1(!F.isVarArg(), @@ -1170,9 +1215,8 @@ void Verifier::VerifyCallSite(CallSite CS) { VerifyParameterAttrs(Attr, CS.getArgument(Idx-1)->getType(), false, I); - Attributes VArgI = Attr & Attribute::VarArgsIncompatible; - Assert1(!VArgI, "Attribute " + Attribute::getAsString(VArgI) + - " cannot be used for vararg call arguments!", I); + Assert1(!Attr.hasIncompatibleWithVarArgsAttrs(), + "Attribute 'sret' cannot be used for vararg call arguments!", I); } // Verify that there's no metadata unless it's a direct call to an intrinsic. @@ -1378,6 +1422,15 @@ void Verifier::visitLoadInst(LoadInst &LI) { "Load cannot have Release ordering", &LI); Assert1(LI.getAlignment() != 0, "Atomic load must specify explicit alignment", &LI); + if (!ElTy->isPointerTy()) { + Assert2(ElTy->isIntegerTy(), + "atomic store operand must have integer type!", + &LI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomic store operand must be power-of-two byte-sized integer", + &LI, ElTy); + } } else { Assert1(LI.getSynchScope() == CrossThread, "Non-atomic load cannot have SynchronizationScope specified", &LI); @@ -1444,6 +1497,15 @@ void Verifier::visitStoreInst(StoreInst &SI) { "Store cannot have Acquire ordering", &SI); Assert1(SI.getAlignment() != 0, "Atomic store must specify explicit alignment", &SI); + if (!ElTy->isPointerTy()) { + Assert2(ElTy->isIntegerTy(), + "atomic store operand must have integer type!", + &SI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomic store operand must be power-of-two byte-sized integer", + &SI, ElTy); + } } else { Assert1(SI.getSynchScope() == CrossThread, "Non-atomic store cannot have SynchronizationScope specified", &SI); @@ -1471,6 +1533,13 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { PointerType *PTy = dyn_cast(CXI.getOperand(0)->getType()); Assert1(PTy, "First cmpxchg operand must be a pointer.", &CXI); Type *ElTy = PTy->getElementType(); + Assert2(ElTy->isIntegerTy(), + "cmpxchg operand must have integer type!", + &CXI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "cmpxchg operand must be power-of-two byte-sized integer", + &CXI, ElTy); Assert2(ElTy == CXI.getOperand(1)->getType(), "Expected value type does not match pointer operand type!", &CXI, ElTy); @@ -1488,6 +1557,13 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { PointerType *PTy = dyn_cast(RMWI.getOperand(0)->getType()); Assert1(PTy, "First atomicrmw operand must be a pointer.", &RMWI); Type *ElTy = PTy->getElementType(); + Assert2(ElTy->isIntegerTy(), + "atomicrmw operand must have integer type!", + &RMWI, ElTy); + unsigned Size = ElTy->getPrimitiveSizeInBits(); + Assert2(Size >= 8 && !(Size & (Size - 1)), + "atomicrmw operand must be power-of-two byte-sized integer", + &RMWI, ElTy); Assert2(ElTy == RMWI.getOperand(1)->getType(), "Argument value type does not match pointer operand type!", &RMWI, ElTy); @@ -1575,6 +1651,13 @@ void Verifier::visitLandingPadInst(LandingPadInst &LPI) { void Verifier::verifyDominatesUse(Instruction &I, unsigned i) { Instruction *Op = cast(I.getOperand(i)); + // If the we have an invalid invoke, don't try to compute the dominance. + // We already reject it in the invoke specific checks and the dominance + // computation doesn't handle multiple edges. + if (InvokeInst *II = dyn_cast(Op)) { + if (II->getNormalDest() == II->getUnwindDest()) + return; + } const Use &U = I.getOperandUse(i); Assert2(InstsInThisBlock.count(Op) || DT->dominates(Op, U), diff --git a/projects/CMakeLists.txt b/projects/CMakeLists.txt index dac6373..36751cd 100644 --- a/projects/CMakeLists.txt +++ b/projects/CMakeLists.txt @@ -10,3 +10,10 @@ foreach(entry ${entries}) endif() endif() endforeach(entry) + +# Also add in the compiler-rt tree if present and we have a sufficiently +# recent version of CMake. +if(${CMAKE_VERSION} VERSION_GREATER 2.8.7 AND + ${LLVM_BUILD_RUNTIME}) + add_llvm_external_project(compiler-rt) +endif() diff --git a/projects/sample/Makefile.llvm.rules b/projects/sample/Makefile.llvm.rules index a655302..7ed1c1b 100644 --- a/projects/sample/Makefile.llvm.rules +++ b/projects/sample/Makefile.llvm.rules @@ -1437,7 +1437,7 @@ install-local:: uninstall-local:: $(Echo) Uninstall circumvented with NO_INSTALL else -DestTool = $(DESTDIR)$(PROJ_bindir)/$(TOOLEXENAME) +DestTool = $(DESTDIR)$(PROJ_bindir)/$(program_prefix)$(TOOLEXENAME) install-local:: $(DestTool) @@ -1451,7 +1451,7 @@ uninstall-local:: # TOOLALIAS install. ifdef TOOLALIAS -DestToolAlias = $(DESTDIR)$(PROJ_bindir)/$(TOOLALIAS)$(EXEEXT) +DestToolAlias = $(DESTDIR)$(PROJ_bindir)/$(program_prefix)$(TOOLALIAS)$(EXEEXT) install-local:: $(DestToolAlias) diff --git a/projects/sample/autoconf/configure.ac b/projects/sample/autoconf/configure.ac index bd0b16a..8012c23 100644 --- a/projects/sample/autoconf/configure.ac +++ b/projects/sample/autoconf/configure.ac @@ -304,8 +304,8 @@ AC_CACHE_CHECK([target architecture],[llvm_cv_target_arch], sparc*-*) llvm_cv_target_arch="Sparc" ;; powerpc*-*) llvm_cv_target_arch="PowerPC" ;; arm*-*) llvm_cv_target_arch="ARM" ;; - mips-*) llvm_cv_target_arch="Mips" ;; - mipsel-*) llvm_cv_target_arch="Mips" ;; + mips-* | mips64-*) llvm_cv_target_arch="Mips" ;; + mipsel-* | mips64el-*) llvm_cv_target_arch="Mips" ;; xcore-*) llvm_cv_target_arch="XCore" ;; msp430-*) llvm_cv_target_arch="MSP430" ;; hexagon-*) llvm_cv_target_arch="Hexagon" ;; diff --git a/projects/sample/configure b/projects/sample/configure index df08c7c..cfbb6c6 100755 --- a/projects/sample/configure +++ b/projects/sample/configure @@ -3840,8 +3840,8 @@ else sparc*-*) llvm_cv_target_arch="Sparc" ;; powerpc*-*) llvm_cv_target_arch="PowerPC" ;; arm*-*) llvm_cv_target_arch="ARM" ;; - mips-*) llvm_cv_target_arch="Mips" ;; - mipsel-*) llvm_cv_target_arch="Mips" ;; + mips-* | mips64-*) llvm_cv_target_arch="Mips" ;; + mipsel-* | mips64el-*) llvm_cv_target_arch="Mips" ;; xcore-*) llvm_cv_target_arch="XCore" ;; msp430-*) llvm_cv_target_arch="MSP430" ;; hexagon-*) llvm_cv_target_arch="Hexagon" ;; diff --git a/runtime/libprofile/CMakeLists.txt b/runtime/libprofile/CMakeLists.txt index 414ad00..8609715 100644 --- a/runtime/libprofile/CMakeLists.txt +++ b/runtime/libprofile/CMakeLists.txt @@ -13,7 +13,8 @@ set_target_properties( profile_rt-static PROPERTIES OUTPUT_NAME "profile_rt" ) -add_llvm_loadable_module( profile_rt-shared ${SOURCES} ) +set(BUILD_SHARED_LIBS ON) +add_llvm_library( profile_rt-shared ${SOURCES} ) set_target_properties( profile_rt-shared PROPERTIES OUTPUT_NAME "profile_rt" ) diff --git a/runtime/libprofile/CommonProfiling.c b/runtime/libprofile/CommonProfiling.c index acc17ce..8f4119c 100644 --- a/runtime/libprofile/CommonProfiling.c +++ b/runtime/libprofile/CommonProfiling.c @@ -28,14 +28,35 @@ static char *SavedArgs = 0; static unsigned SavedArgsLength = 0; +static const char *SavedEnvVar = 0; static const char *OutputFilename = "llvmprof.out"; +/* check_environment_variable - Check to see if the LLVMPROF_OUTPUT environment + * variable is set. If it is then save it and set OutputFilename. + */ +static void check_environment_variable(void) { + const char *EnvVar; + if (SavedEnvVar) return; /* Guarantee that we can't leak memory. */ + + if ((EnvVar = getenv("LLVMPROF_OUTPUT")) != NULL) { + /* The string that getenv returns is allowed to be statically allocated, + * which means it may be changed by future calls to getenv, so copy it. + */ + SavedEnvVar = strdup(EnvVar); + OutputFilename = SavedEnvVar; + } +} + /* save_arguments - Save argc and argv as passed into the program for the file * we output. + * If either the LLVMPROF_OUTPUT environment variable or the -llvmprof-output + * command line argument are set then change OutputFilename to the provided + * value. The command line argument value overrides the environment variable. */ int save_arguments(int argc, const char **argv) { unsigned Length, i; + if (!SavedEnvVar && !SavedArgs) check_environment_variable(); if (SavedArgs || !argv) return argc; /* This can be called multiple times */ /* Check to see if there are any arguments passed into the program for the @@ -54,6 +75,7 @@ int save_arguments(int argc, const char **argv) { puts("-llvmprof-output requires a filename argument!"); else { OutputFilename = strdup(argv[1]); + if (SavedEnvVar) { free((void *)SavedEnvVar); SavedEnvVar = 0; } memmove((char**)&argv[1], &argv[2], (argc-1)*sizeof(char*)); --argc; } diff --git a/runtime/libprofile/Makefile b/runtime/libprofile/Makefile index d851149..6e92253 100644 --- a/runtime/libprofile/Makefile +++ b/runtime/libprofile/Makefile @@ -44,8 +44,15 @@ ifeq ($(HOST_OS),Darwin) # command line. DARWIN_VERS := $(shell echo $(TARGET_TRIPLE) | sed 's/.*darwin\([0-9]*\).*/\1/') ifneq ($(DARWIN_VERS),8) - LLVMLibsOptions := $(LLVMLibsOptions) \ + LLVMLibsOptions := $(LLVMLibsOptions) \ -Wl,-install_name \ -Wl,"@executable_path/../lib/lib$(LIBRARYNAME)$(SHLIBEXT)" endif + + # If we're doing an Apple-style build, add the LTO object path. + ifeq ($(RC_BUILDIT),YES) + TempFile := $(shell mkdir -p ${OBJROOT}/dSYMs ; mktemp ${OBJROOT}/dSYMs/profile_rt-lto.XXXXXX) + LLVMLibsOptions := $(LLVMLibsOptions) \ + -Wl,-object_path_lto -Wl,$(TempFile) + endif endif diff --git a/runtime/libprofile/Profiling.h b/runtime/libprofile/Profiling.h index c6b9a4d..acc6399 100644 --- a/runtime/libprofile/Profiling.h +++ b/runtime/libprofile/Profiling.h @@ -15,7 +15,7 @@ #ifndef PROFILING_H #define PROFILING_H -#include "llvm/Analysis/ProfileInfoTypes.h" /* for enum ProfilingType */ +#include "llvm/Analysis/ProfileDataTypes.h" /* for enum ProfilingType */ /* save_arguments - Save argc and argv as passed into the program for the file * we output. diff --git a/test/Analysis/BasicAA/noalias-geps.ll b/test/Analysis/BasicAA/noalias-geps.ll new file mode 100644 index 0000000..a93d778 --- /dev/null +++ b/test/Analysis/BasicAA/noalias-geps.ll @@ -0,0 +1,54 @@ +; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" + +; Check that geps with equal base offsets of noalias base pointers stay noalias. +define i32 @test(i32* %p, i16 %i) { + %pi = getelementptr i32* %p, i32 0 + %pi.next = getelementptr i32* %p, i32 1 + %b = icmp eq i16 %i, 0 + br i1 %b, label %bb1, label %bb2 + +bb1: + %f = getelementptr i32* %pi, i32 1 + %g = getelementptr i32* %pi.next, i32 1 + br label %bb3 +bb2: + %f2 = getelementptr i32* %pi, i32 1 + %g2 = getelementptr i32* %pi.next, i32 1 + br label %bb3 + +bb3: + %ptr_phi = phi i32* [ %f, %bb1 ], [ %f2, %bb2 ] + %ptr_phi2 = phi i32* [ %g, %bb1 ], [ %g2, %bb2 ] +; CHECK: NoAlias: i32* %f1, i32* %g1 + %f1 = getelementptr i32* %ptr_phi , i32 1 + %g1 = getelementptr i32* %ptr_phi2 , i32 1 + +ret i32 0 +} + +; Check that geps with equal indices of noalias base pointers stay noalias. +define i32 @test2([2 x i32]* %p, i32 %i) { + %pi = getelementptr [2 x i32]* %p, i32 0 + %pi.next = getelementptr [2 x i32]* %p, i32 1 + %b = icmp eq i32 %i, 0 + br i1 %b, label %bb1, label %bb2 + +bb1: + %f = getelementptr [2 x i32]* %pi, i32 1 + %g = getelementptr [2 x i32]* %pi.next, i32 1 + br label %bb3 +bb2: + %f2 = getelementptr [2 x i32]* %pi, i32 1 + %g2 = getelementptr [2 x i32]* %pi.next, i32 1 + br label %bb3 +bb3: + %ptr_phi = phi [2 x i32]* [ %f, %bb1 ], [ %f2, %bb2 ] + %ptr_phi2 = phi [2 x i32]* [ %g, %bb1 ], [ %g2, %bb2 ] +; CHECK: NoAlias: i32* %f1, i32* %g1 + %f1 = getelementptr [2 x i32]* %ptr_phi , i32 1, i32 %i + %g1 = getelementptr [2 x i32]* %ptr_phi2 , i32 1, i32 %i + +ret i32 0 +} diff --git a/test/Analysis/BasicAA/nocapture.ll b/test/Analysis/BasicAA/nocapture.ll index a8658ec..ffc0a09a 100644 --- a/test/Analysis/BasicAA/nocapture.ll +++ b/test/Analysis/BasicAA/nocapture.ll @@ -13,3 +13,24 @@ define i32 @test2() { ret i32 %c } +declare void @test3(i32** %p, i32* %q) nounwind + +define i32 @test4(i32* noalias nocapture %p) nounwind { +; CHECK: call void @test3 +; CHECK: store i32 0, i32* %p +; CHECK: store i32 1, i32* %x +; CHECK: %y = load i32* %p +; CHECK: ret i32 %y +entry: + %q = alloca i32* + ; Here test3 might store %p to %q. This doesn't violate %p's nocapture + ; attribute since the copy doesn't outlive the function. + call void @test3(i32** %q, i32* %p) nounwind + store i32 0, i32* %p + %x = load i32** %q + ; This store might write to %p and so we can't eliminate the subsequent + ; load + store i32 1, i32* %x + %y = load i32* %p + ret i32 %y +} diff --git a/test/Analysis/BasicAA/phi-speculation.ll b/test/Analysis/BasicAA/phi-speculation.ll new file mode 100644 index 0000000..21c6592 --- /dev/null +++ b/test/Analysis/BasicAA/phi-speculation.ll @@ -0,0 +1,33 @@ +target datalayout = +"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" + +; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s + +; ptr_phi and ptr2_phi do not alias. +; CHECK: NoAlias: i32* %ptr2_phi, i32* %ptr_phi + +define i32 @test_noalias(i32* %ptr2, i32 %count, i32* %coeff) { +entry: + %ptr = getelementptr inbounds i32* %ptr2, i64 1 + br label %while.body + +while.body: + %num = phi i32 [ %count, %entry ], [ %dec, %while.body ] + %ptr_phi = phi i32* [ %ptr, %entry ], [ %ptr_inc, %while.body ] + %ptr2_phi = phi i32* [ %ptr2, %entry ], [ %ptr2_inc, %while.body ] + %result.09 = phi i32 [ 0 , %entry ], [ %add, %while.body ] + %dec = add nsw i32 %num, -1 + %0 = load i32* %ptr_phi, align 4 + store i32 %0, i32* %ptr2_phi, align 4 + %1 = load i32* %coeff, align 4 + %2 = load i32* %ptr_phi, align 4 + %mul = mul nsw i32 %1, %2 + %add = add nsw i32 %mul, %result.09 + %tobool = icmp eq i32 %dec, 0 + %ptr_inc = getelementptr inbounds i32* %ptr_phi, i64 1 + %ptr2_inc = getelementptr inbounds i32* %ptr2_phi, i64 1 + br i1 %tobool, label %the_exit, label %while.body + +the_exit: + ret i32 %add +} diff --git a/test/Analysis/BranchProbabilityInfo/basic.ll b/test/Analysis/BranchProbabilityInfo/basic.ll index 74d06a1..08adfa8 100644 --- a/test/Analysis/BranchProbabilityInfo/basic.ll +++ b/test/Analysis/BranchProbabilityInfo/basic.ll @@ -88,3 +88,30 @@ exit: } !1 = metadata !{metadata !"branch_weights", i32 4, i32 4, i32 64, i32 4, i32 4} + +define i32 @test4(i32 %x) nounwind uwtable readnone ssp { +; CHECK: Printing analysis {{.*}} for function 'test4' +entry: + %conv = sext i32 %x to i64 + switch i64 %conv, label %return [ + i64 0, label %sw.bb + i64 1, label %sw.bb + i64 2, label %sw.bb + i64 5, label %sw.bb1 + ], !prof !2 +; CHECK: edge entry -> return probability is 7 / 85 +; CHECK: edge entry -> sw.bb probability is 14 / 85 +; CHECK: edge entry -> sw.bb1 probability is 64 / 85 + +sw.bb: + br label %return + +sw.bb1: + br label %return + +return: + %retval.0 = phi i32 [ 5, %sw.bb1 ], [ 1, %sw.bb ], [ 0, %entry ] + ret i32 %retval.0 +} + +!2 = metadata !{metadata !"branch_weights", i32 7, i32 6, i32 4, i32 4, i32 64} diff --git a/test/Analysis/CallGraph/do-nothing-intrinsic.ll b/test/Analysis/CallGraph/do-nothing-intrinsic.ll new file mode 100644 index 0000000..f28ad10 --- /dev/null +++ b/test/Analysis/CallGraph/do-nothing-intrinsic.ll @@ -0,0 +1,13 @@ +; RUN: opt < %s -basiccg +; PR13903 + +define void @main() { + invoke void @llvm.donothing() + to label %ret unwind label %unw +unw: + %tmp = landingpad i8 personality i8 0 cleanup + br label %ret +ret: + ret void +} +declare void @llvm.donothing() nounwind readnone diff --git a/test/Analysis/CostModel/X86/arith.ll b/test/Analysis/CostModel/X86/arith.ll new file mode 100644 index 0000000..37cca8d --- /dev/null +++ b/test/Analysis/CostModel/X86/arith.ll @@ -0,0 +1,42 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +define i32 @add(i32 %arg) { + ;CHECK: cost of 1 {{.*}} add + %A = add <4 x i32> undef, undef + ;CHECK: cost of 4 {{.*}} add + %B = add <8 x i32> undef, undef + ;CHECK: cost of 1 {{.*}} add + %C = add <2 x i64> undef, undef + ;CHECK: cost of 4 {{.*}} add + %D = add <4 x i64> undef, undef + ;CHECK: cost of 8 {{.*}} add + %E = add <8 x i64> undef, undef + ;CHECK: cost of 1 {{.*}} ret + ret i32 undef +} + + +define i32 @xor(i32 %arg) { + ;CHECK: cost of 1 {{.*}} xor + %A = xor <4 x i32> undef, undef + ;CHECK: cost of 1 {{.*}} xor + %B = xor <8 x i32> undef, undef + ;CHECK: cost of 1 {{.*}} xor + %C = xor <2 x i64> undef, undef + ;CHECK: cost of 1 {{.*}} xor + %D = xor <4 x i64> undef, undef + ;CHECK: cost of 1 {{.*}} ret + ret i32 undef +} + + +define i32 @fmul(i32 %arg) { + ;CHECK: cost of 1 {{.*}} fmul + %A = fmul <4 x float> undef, undef + ;CHECK: cost of 1 {{.*}} fmul + %B = fmul <8 x float> undef, undef + ret i32 undef +} diff --git a/test/Analysis/CostModel/X86/cast.ll b/test/Analysis/CostModel/X86/cast.ll new file mode 100644 index 0000000..75c97a7 --- /dev/null +++ b/test/Analysis/CostModel/X86/cast.ll @@ -0,0 +1,69 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +define i32 @add(i32 %arg) { + + ; -- Same size registeres -- + ;CHECK: cost of 1 {{.*}} zext + %A = zext <4 x i1> undef to <4 x i32> + ;CHECK: cost of 2 {{.*}} sext + %B = sext <4 x i1> undef to <4 x i32> + ;CHECK: cost of 0 {{.*}} trunc + %C = trunc <4 x i32> undef to <4 x i1> + + ; -- Different size registers -- + ;CHECK-NOT: cost of 1 {{.*}} zext + %D = zext <8 x i1> undef to <8 x i32> + ;CHECK-NOT: cost of 2 {{.*}} sext + %E = sext <8 x i1> undef to <8 x i32> + ;CHECK-NOT: cost of 2 {{.*}} trunc + %F = trunc <8 x i32> undef to <8 x i1> + + ; -- scalars -- + + ;CHECK: cost of 1 {{.*}} zext + %G = zext i1 undef to i32 + ;CHECK: cost of 0 {{.*}} trunc + %H = trunc i32 undef to i1 + + ;CHECK: cost of 1 {{.*}} ret + ret i32 undef +} + +define i32 @zext_sext(<8 x i1> %in) { + ;CHECK: cost of 6 {{.*}} zext + %Z = zext <8 x i1> %in to <8 x i32> + ;CHECK: cost of 9 {{.*}} sext + %S = sext <8 x i1> %in to <8 x i32> + + ;CHECK: cost of 1 {{.*}} sext + %A = sext <8 x i16> undef to <8 x i32> + ;CHECK: cost of 1 {{.*}} zext + %B = zext <8 x i16> undef to <8 x i32> + ;CHECK: cost of 1 {{.*}} sext + %C = sext <4 x i32> undef to <4 x i64> + + ;CHECK: cost of 1 {{.*}} zext + %D = zext <4 x i32> undef to <4 x i64> + ;CHECK: cost of 1 {{.*}} trunc + + %E = trunc <4 x i64> undef to <4 x i32> + ;CHECK: cost of 1 {{.*}} trunc + %F = trunc <8 x i32> undef to <8 x i16> + + ;CHECK: cost of 3 {{.*}} trunc + %G = trunc <8 x i64> undef to <8 x i32> + + ret i32 undef +} + +define i32 @masks(<8 x i1> %in) { + ;CHECK: cost of 6 {{.*}} zext + %Z = zext <8 x i1> %in to <8 x i32> + ;CHECK: cost of 9 {{.*}} sext + %S = sext <8 x i1> %in to <8 x i32> + ret i32 undef +} + diff --git a/test/Analysis/CostModel/X86/cmp.ll b/test/Analysis/CostModel/X86/cmp.ll new file mode 100644 index 0000000..f868bd1 --- /dev/null +++ b/test/Analysis/CostModel/X86/cmp.ll @@ -0,0 +1,42 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +define i32 @cmp(i32 %arg) { + ; -- floats -- + ;CHECK: cost of 1 {{.*}} fcmp + %A = fcmp olt <2 x float> undef, undef + ;CHECK: cost of 1 {{.*}} fcmp + %B = fcmp olt <4 x float> undef, undef + ;CHECK: cost of 1 {{.*}} fcmp + %C = fcmp olt <8 x float> undef, undef + ;CHECK: cost of 1 {{.*}} fcmp + %D = fcmp olt <2 x double> undef, undef + ;CHECK: cost of 1 {{.*}} fcmp + %E = fcmp olt <4 x double> undef, undef + + ; -- integers -- + + ;CHECK: cost of 1 {{.*}} icmp + %F = icmp eq <16 x i8> undef, undef + ;CHECK: cost of 1 {{.*}} icmp + %G = icmp eq <8 x i16> undef, undef + ;CHECK: cost of 1 {{.*}} icmp + %H = icmp eq <4 x i32> undef, undef + ;CHECK: cost of 1 {{.*}} icmp + %I = icmp eq <2 x i64> undef, undef + ;CHECK: cost of 4 {{.*}} icmp + %J = icmp eq <4 x i64> undef, undef + ;CHECK: cost of 4 {{.*}} icmp + %K = icmp eq <8 x i32> undef, undef + ;CHECK: cost of 4 {{.*}} icmp + %L = icmp eq <16 x i16> undef, undef + ;CHECK: cost of 4 {{.*}} icmp + %M = icmp eq <32 x i8> undef, undef + + ;CHECK: cost of 1 {{.*}} ret + ret i32 undef +} + + diff --git a/test/Analysis/CostModel/X86/i32.ll b/test/Analysis/CostModel/X86/i32.ll new file mode 100644 index 0000000..4015e0b --- /dev/null +++ b/test/Analysis/CostModel/X86/i32.ll @@ -0,0 +1,9 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=i386 -mcpu=corei7-avx | FileCheck %s + + +;CHECK: cost of 2 {{.*}} add +;CHECK: cost of 1 {{.*}} ret +define i32 @no_info(i32 %arg) { + %e = add i64 undef, undef + ret i32 undef +} diff --git a/test/Analysis/CostModel/X86/insert-extract-at-zero.ll b/test/Analysis/CostModel/X86/insert-extract-at-zero.ll new file mode 100644 index 0000000..87bf7c4 --- /dev/null +++ b/test/Analysis/CostModel/X86/insert-extract-at-zero.ll @@ -0,0 +1,40 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +define i32 @insert-extract-at-zero-idx(i32 %arg, float %fl) { + ;CHECK: cost of 0 {{.*}} extract + %A = extractelement <4 x float> undef, i32 0 + ;CHECK: cost of 1 {{.*}} extract + %B = extractelement <4 x i32> undef, i32 0 + ;CHECK: cost of 1 {{.*}} extract + %C = extractelement <4 x float> undef, i32 1 + + ;CHECK: cost of 0 {{.*}} extract + %D = extractelement <8 x float> undef, i32 0 + ;CHECK: cost of 1 {{.*}} extract + %E = extractelement <8 x float> undef, i32 1 + + ;CHECK: cost of 1 {{.*}} extract + %F = extractelement <8 x float> undef, i32 %arg + + ;CHECK: cost of 0 {{.*}} insert + %G = insertelement <4 x float> undef, float %fl, i32 0 + ;CHECK: cost of 1 {{.*}} insert + %H = insertelement <4 x float> undef, float %fl, i32 1 + ;CHECK: cost of 1 {{.*}} insert + %I = insertelement <4 x i32> undef, i32 %arg, i32 0 + + ;CHECK: cost of 0 {{.*}} insert + %J = insertelement <4 x double> undef, double undef, i32 0 + + ;CHECK: cost of 0 {{.*}} insert + %K = insertelement <8 x double> undef, double undef, i32 4 + ;CHECK: cost of 0 {{.*}} insert + %L = insertelement <16 x double> undef, double undef, i32 8 + ;CHECK: cost of 1 {{.*}} insert + %M = insertelement <16 x double> undef, double undef, i32 9 + ret i32 0 +} + diff --git a/test/Analysis/CostModel/X86/lit.local.cfg b/test/Analysis/CostModel/X86/lit.local.cfg new file mode 100644 index 0000000..a8ad0f1 --- /dev/null +++ b/test/Analysis/CostModel/X86/lit.local.cfg @@ -0,0 +1,6 @@ +config.suffixes = ['.ll', '.c', '.cpp'] + +targets = set(config.root.targets_to_build.split()) +if not 'X86' in targets: + config.unsupported = True + diff --git a/test/Analysis/CostModel/X86/loop_v2.ll b/test/Analysis/CostModel/X86/loop_v2.ll new file mode 100644 index 0000000..260a606 --- /dev/null +++ b/test/Analysis/CostModel/X86/loop_v2.ll @@ -0,0 +1,43 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.9.0" + +define i32 @foo(i32* nocapture %A) nounwind uwtable readonly ssp { +vector.ph: + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %vec.phi = phi <2 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ] + %0 = getelementptr inbounds i32* %A, i64 %index + %1 = bitcast i32* %0 to <2 x i32>* + %2 = load <2 x i32>* %1, align 4 + %3 = sext <2 x i32> %2 to <2 x i64> + ;CHECK: cost of 1 {{.*}} extract + %4 = extractelement <2 x i64> %3, i32 0 + %5 = getelementptr inbounds i32* %A, i64 %4 + ;CHECK: cost of 1 {{.*}} extract + %6 = extractelement <2 x i64> %3, i32 1 + %7 = getelementptr inbounds i32* %A, i64 %6 + %8 = load i32* %5, align 4, !tbaa !0 + ;CHECK: cost of 1 {{.*}} insert + %9 = insertelement <2 x i32> undef, i32 %8, i32 0 + %10 = load i32* %7, align 4, !tbaa !0 + ;CHECK: cost of 1 {{.*}} insert + %11 = insertelement <2 x i32> %9, i32 %10, i32 1 + %12 = add nsw <2 x i32> %11, %vec.phi + %index.next = add i64 %index, 2 + %13 = icmp eq i64 %index.next, 192 + br i1 %13, label %for.end, label %vector.body + +for.end: ; preds = %vector.body + %14 = extractelement <2 x i32> %12, i32 0 + %15 = extractelement <2 x i32> %12, i32 1 + %16 = add i32 %14, %15 + ret i32 %16 +} + +!0 = metadata !{metadata !"int", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA"} diff --git a/test/Analysis/CostModel/X86/tiny.ll b/test/Analysis/CostModel/X86/tiny.ll new file mode 100644 index 0000000..cc7b443 --- /dev/null +++ b/test/Analysis/CostModel/X86/tiny.ll @@ -0,0 +1,11 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;CHECK: cost of 1 {{.*}} add +;CHECK: cost of 1 {{.*}} ret +define i32 @no_info(i32 %arg) { + %e = add i32 %arg, %arg + ret i32 %e +} diff --git a/test/Analysis/CostModel/X86/vectorized-loop.ll b/test/Analysis/CostModel/X86/vectorized-loop.ll new file mode 100644 index 0000000..7919a9c --- /dev/null +++ b/test/Analysis/CostModel/X86/vectorized-loop.ll @@ -0,0 +1,78 @@ +; RUN: opt < %s -cost-model -analyze -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +define i32 @foo(i32* noalias nocapture %A, i32* noalias nocapture %B, i32 %start, i32 %end) nounwind uwtable ssp { +entry: + ;CHECK: cost of 1 {{.*}} icmp + %cmp7 = icmp slt i32 %start, %end + br i1 %cmp7, label %for.body.lr.ph, label %for.end + +for.body.lr.ph: ; preds = %entry + ;CHECK: cost of 1 {{.*}} sext + %0 = sext i32 %start to i64 + %1 = sub i32 %end, %start + %2 = zext i32 %1 to i64 + %end.idx = add i64 %2, %0 + ;CHECK: cost of 1 {{.*}} add + %n.vec = and i64 %2, 4294967288 + %end.idx.rnd.down = add i64 %n.vec, %0 + ;CHECK: cost of 1 {{.*}} icmp + %cmp.zero = icmp eq i64 %n.vec, 0 + br i1 %cmp.zero, label %middle.block, label %vector.body + +vector.body: ; preds = %for.body.lr.ph, %vector.body + %index = phi i64 [ %index.next, %vector.body ], [ %0, %for.body.lr.ph ] + %3 = add i64 %index, 2 + %4 = getelementptr inbounds i32* %B, i64 %3 + ;CHECK: cost of 0 {{.*}} bitcast + %5 = bitcast i32* %4 to <8 x i32>* + ;CHECK: cost of 1 {{.*}} load + %6 = load <8 x i32>* %5, align 4 + ;CHECK: cost of 4 {{.*}} mul + %7 = mul nsw <8 x i32> %6, + %8 = getelementptr inbounds i32* %A, i64 %index + %9 = bitcast i32* %8 to <8 x i32>* + %10 = load <8 x i32>* %9, align 4 + ;CHECK: cost of 4 {{.*}} add + %11 = add nsw <8 x i32> %10, %7 + ;CHECK: cost of 1 {{.*}} store + store <8 x i32> %11, <8 x i32>* %9, align 4 + %index.next = add i64 %index, 8 + %12 = icmp eq i64 %index.next, %end.idx.rnd.down + ;CHECK: cost of 1 {{.*}} br + br i1 %12, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body, %for.body.lr.ph + %cmp.n = icmp eq i64 %end.idx, %end.idx.rnd.down + br i1 %cmp.n, label %for.end, label %for.body + +for.body: ; preds = %middle.block, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %end.idx.rnd.down, %middle.block ] + %13 = add nsw i64 %indvars.iv, 2 + %arrayidx = getelementptr inbounds i32* %B, i64 %13 + ;CHECK: cost of 1 {{.*}} load + %14 = load i32* %arrayidx, align 4, !tbaa !0 + ;CHECK: cost of 1 {{.*}} mul + %mul = mul nsw i32 %14, 5 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %indvars.iv + ;CHECK: cost of 1 {{.*}} load + %15 = load i32* %arrayidx2, align 4, !tbaa !0 + %add3 = add nsw i32 %15, %mul + store i32 %add3, i32* %arrayidx2, align 4, !tbaa !0 + %indvars.iv.next = add i64 %indvars.iv, 1 + ;CHECK: cost of 0 {{.*}} trunc + %16 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %16, %end + ;CHECK: cost of 1 {{.*}} br + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %middle.block, %for.body, %entry + ;CHECK: cost of 1 {{.*}} ret + ret i32 undef +} + +!0 = metadata !{metadata !"int", metadata !1} +!1 = metadata !{metadata !"omnipotent char", metadata !2} +!2 = metadata !{metadata !"Simple C/C++ TBAA"} diff --git a/test/Analysis/CostModel/lit.local.cfg b/test/Analysis/CostModel/lit.local.cfg new file mode 100644 index 0000000..19eebc0 --- /dev/null +++ b/test/Analysis/CostModel/lit.local.cfg @@ -0,0 +1 @@ +config.suffixes = ['.ll', '.c', '.cpp'] diff --git a/test/Analysis/CostModel/no_info.ll b/test/Analysis/CostModel/no_info.ll new file mode 100644 index 0000000..d20d56b --- /dev/null +++ b/test/Analysis/CostModel/no_info.ll @@ -0,0 +1,15 @@ +; RUN: opt < %s -cost-model -analyze | FileCheck %s + +; The cost model does not have any target information so it can't make a decision. +; Notice that OPT does not read the triple information from the module itself, only through the command line. + +; This info ignored: +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.8.0" + +;CHECK: Unknown cost {{.*}} add +;CHECK: Unknown cost {{.*}} ret +define i32 @no_info(i32 %arg) { + %e = add i32 %arg, %arg + ret i32 %e +} diff --git a/test/Analysis/DependenceAnalysis/Banerjee.ll b/test/Analysis/DependenceAnalysis/Banerjee.ll new file mode 100644 index 0000000..8865ee9 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/Banerjee.ll @@ -0,0 +1,595 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'Banerjee.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 1; i <= 10; i++) +;; for (long int j = 1; j <= 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j - 1]; + +define void @banerjee0(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 1, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 1, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %sub = add nsw i64 %add5, -1 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub + %0 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - flow [<= <>]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 11 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 11 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} + + +;; for (long int i = 1; i <= n; i++) +;; for (long int j = 1; j <= m; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j - 1]; + +define void @banerjee1(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + %cmp4 = icmp sgt i64 %n, 0 + br i1 %cmp4, label %for.cond1.preheader.preheader, label %for.end9 + +for.cond1.preheader.preheader: ; preds = %entry + %0 = add i64 %n, 1 + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc7 + %B.addr.06 = phi i64* [ %B.addr.1.lcssa, %for.inc7 ], [ %B, %for.cond1.preheader.preheader ] + %i.05 = phi i64 [ %inc8, %for.inc7 ], [ 1, %for.cond1.preheader.preheader ] + %1 = add i64 %m, 1 + %cmp21 = icmp sgt i64 %m, 0 + br i1 %cmp21, label %for.body3.preheader, label %for.inc7 + +for.body3.preheader: ; preds = %for.cond1.preheader + br label %for.body3 + +for.body3: ; preds = %for.body3.preheader, %for.body3 + %j.03 = phi i64 [ %inc, %for.body3 ], [ 1, %for.body3.preheader ] + %B.addr.12 = phi i64* [ %incdec.ptr, %for.body3 ], [ %B.addr.06, %for.body3.preheader ] + %mul = mul nsw i64 %i.05, 10 + %add = add nsw i64 %mul, %j.03 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.05, 10 + %add5 = add nsw i64 %mul4, %j.03 + %sub = add nsw i64 %add5, -1 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub + %2 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - flow [* <>]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.12, i64 1 + store i64 %2, i64* %B.addr.12, align 8 + %inc = add nsw i64 %j.03, 1 + %exitcond = icmp eq i64 %inc, %1 + br i1 %exitcond, label %for.inc7.loopexit, label %for.body3 + +for.inc7.loopexit: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.06, i64 %m + br label %for.inc7 + +for.inc7: ; preds = %for.inc7.loopexit, %for.cond1.preheader + %B.addr.1.lcssa = phi i64* [ %B.addr.06, %for.cond1.preheader ], [ %scevgep, %for.inc7.loopexit ] + %inc8 = add nsw i64 %i.05, 1 + %exitcond7 = icmp eq i64 %inc8, %0 + br i1 %exitcond7, label %for.end9.loopexit, label %for.cond1.preheader + +for.end9.loopexit: ; preds = %for.inc7 + br label %for.end9 + +for.end9: ; preds = %for.end9.loopexit, %entry + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = 0; +;; *B++ = A[10*i + j + 100]; + +define void @banerjee2(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %add6 = add nsw i64 %add5, 100 + %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6 + %0 = load i64* %arrayidx7, align 8 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j + 99]; + +define void @banerjee3(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %add6 = add nsw i64 %add5, 99 + %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6 + %0 = load i64* %arrayidx7, align 8 +; CHECK: da analyze - flow [> >]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j - 100]; + +define void @banerjee4(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %sub = add nsw i64 %add5, -100 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub + %0 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j - 99]; + +define void @banerjee5(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %sub = add nsw i64 %add5, -99 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %sub + %0 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - flow [< <]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j + 9]; + +define void @banerjee6(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %add6 = add nsw i64 %add5, 9 + %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6 + %0 = load i64* %arrayidx7, align 8 +; CHECK: da analyze - flow [=> <>]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j + 10]; + +define void @banerjee7(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %add6 = add nsw i64 %add5, 10 + %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6 + %0 = load i64* %arrayidx7, align 8 +; CHECK: da analyze - flow [> <=]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 10; i++) +;; for (long int j = 0; j < 10; j++) { +;; A[10*i + j] = ... +;; ... = A[10*i + j + 11]; + +define void @banerjee8(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 10 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 10 + %add5 = add nsw i64 %mul4, %j.02 + %add6 = add nsw i64 %add5, 11 + %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6 + %0 = load i64* %arrayidx7, align 8 +; CHECK: da analyze - flow [> <>]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 10 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 10 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 10 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 20; i++) +;; for (long int j = 0; j < 20; j++) { +;; A[30*i + 500*j] = ... +;; ... = A[i - 500*j + 11]; + +define void @banerjee9(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 30 + %mul4 = mul nsw i64 %j.02, 500 + %add = add nsw i64 %mul, %mul4 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %0 = mul i64 %j.02, -500 + %sub = add i64 %i.03, %0 + %add6 = add nsw i64 %sub, 11 + %arrayidx7 = getelementptr inbounds i64* %A, i64 %add6 + %1 = load i64* %arrayidx7, align 8 +; CHECK: da analyze - flow [<= =|<]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %1, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 20 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 20 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 20 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 20; i++) +;; for (long int j = 0; j < 20; j++) { +;; A[i + 500*j] = ... +;; ... = A[i - 500*j + 11]; + +define void @banerjee10(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %j.02, 500 + %add = add nsw i64 %i.03, %mul + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %0 = mul i64 %j.02, -500 + %sub = add i64 %i.03, %0 + %add5 = add nsw i64 %sub, 11 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5 + %1 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - flow [<> =]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %1, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 20 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 20 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 20 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} + + +;; for (long int i = 0; i < 20; i++) +;; for (long int j = 0; j < 20; j++) { +;; A[300*i + j] = ... +;; ... = A[250*i - j + 11]; + +define void @banerjee11(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 300 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 250 + %sub = sub nsw i64 %mul4, %j.02 + %add5 = add nsw i64 %sub, 11 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5 + %0 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - flow [<= <>]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 20 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 20 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 20 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} + + +;; for (long int i = 0; i < 20; i++) +;; for (long int j = 0; j < 20; j++) { +;; A[100*i + j] = ... +;; ... = A[100*i - j + 11]; + +define void @banerjee12(i64* %A, i64* %B, i64 %m, i64 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i64* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i64* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %mul = mul nsw i64 %i.03, 100 + %add = add nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i64* %A, i64 %add + store i64 0, i64* %arrayidx, align 8 + %mul4 = mul nsw i64 %i.03, 100 + %sub = sub nsw i64 %mul4, %j.02 + %add5 = add nsw i64 %sub, 11 + %arrayidx6 = getelementptr inbounds i64* %A, i64 %add5 + %0 = load i64* %arrayidx6, align 8 +; CHECK: da analyze - flow [= <>]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.11, i64 1 + store i64 %0, i64* %B.addr.11, align 8 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 20 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i64* %B.addr.04, i64 20 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 20 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} diff --git a/test/Analysis/DependenceAnalysis/Coupled.ll b/test/Analysis/DependenceAnalysis/Coupled.ll new file mode 100644 index 0000000..60163fe --- /dev/null +++ b/test/Analysis/DependenceAnalysis/Coupled.ll @@ -0,0 +1,509 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'Coupled.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < 50; i++) +;; A[i][i] = ... +;; ... = A[i + 10][i + 9] + +define void @couple0([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + store i32 %conv, i32* %arrayidx1, align 4 + %add = add nsw i64 %i.02, 9 + %add2 = add nsw i64 %i.02, 10 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %add2, i64 %add + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[i][i] = ... +;; ... = A[i + 9][i + 9] + +define void @couple1([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + store i32 %conv, i32* %arrayidx1, align 4 + %add = add nsw i64 %i.02, 9 + %add2 = add nsw i64 %i.02, 9 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %add2, i64 %add + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - consistent flow [-9]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[3*i - 6][3*i - 6] = ... +;; ... = A[i][i] + +define void @couple2([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul nsw i64 %i.02, 3 + %sub = add nsw i64 %mul, -6 + %mul1 = mul nsw i64 %i.02, 3 + %sub2 = add nsw i64 %mul1, -6 + %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %sub2, i64 %sub + store i32 %conv, i32* %arrayidx3, align 4 + %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[3*i - 6][3*i - 5] = ... +;; ... = A[i][i] + +define void @couple3([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul nsw i64 %i.02, 3 + %sub = add nsw i64 %mul, -5 + %mul1 = mul nsw i64 %i.02, 3 + %sub2 = add nsw i64 %mul1, -6 + %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %sub2, i64 %sub + store i32 %conv, i32* %arrayidx3, align 4 + %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[3*i - 6][3*i - n] = ... +;; ... = A[i][i] + +define void @couple4([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul nsw i64 %i.02, 3 + %conv1 = sext i32 %n to i64 + %sub = sub nsw i64 %mul, %conv1 + %mul2 = mul nsw i64 %i.02, 3 + %sub3 = add nsw i64 %mul2, -6 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %sub3, i64 %sub + store i32 %conv, i32* %arrayidx4, align 4 + %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx6, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[3*i - n + 1][3*i - n] = ... +;; ... = A[i][i] + +define void @couple5([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul nsw i64 %i.02, 3 + %conv1 = sext i32 %n to i64 + %sub = sub nsw i64 %mul, %conv1 + %mul2 = mul nsw i64 %i.02, 3 + %conv3 = sext i32 %n to i64 + %sub4 = sub nsw i64 %mul2, %conv3 + %add = add nsw i64 %sub4, 1 + %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %add, i64 %sub + store i32 %conv, i32* %arrayidx5, align 4 + %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[i][3*i - 6] = ... +;; ... = A[i][i] + +define void @couple6([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul nsw i64 %i.02, 3 + %sub = add nsw i64 %mul, -6 + %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %sub + store i32 %conv, i32* %arrayidx1, align 4 + %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - flow [=|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; A[i][3*i - 5] = ... +;; ... = A[i][i] + +define void @couple7([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul nsw i64 %i.02, 3 + %sub = add nsw i64 %mul, -5 + %arrayidx1 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %sub + store i32 %conv, i32* %arrayidx1, align 4 + %arrayidx3 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 50 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i <= 15; i++) +;; A[3*i - 18][3 - i] = ... +;; ... = A[i][i] + +define void @couple8([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 3, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub + store i32 %conv, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 16 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i <= 15; i++) +;; A[3*i - 18][2 - i] = ... +;; ... = A[i][i] + +define void @couple9([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 2, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub + store i32 %conv, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 16 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i <= 15; i++) +;; A[3*i - 18][6 - i] = ... +;; ... = A[i][i] + +define void @couple10([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 6, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub + store i32 %conv, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - flow [>] splitable! +; CHECK: da analyze - split level = 1, iteration = 3! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 16 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i <= 15; i++) +;; A[3*i - 18][18 - i] = ... +;; ... = A[i][i] + +define void @couple11([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 18, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub + store i32 %conv, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - flow [=|<] splitable! +; CHECK: da analyze - split level = 1, iteration = 9! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 16 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i <= 12; i++) +;; A[3*i - 18][22 - i] = ... +;; ... = A[i][i] + +define void @couple12([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 22, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub + store i32 %conv, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - flow [<] splitable! +; CHECK: da analyze - split level = 1, iteration = 11! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 13 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 12; i++) +;; A[3*i - 18][22 - i] = ... +;; ... = A[i][i] + +define void @couple13([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 22, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx2 = getelementptr inbounds [100 x i32]* %A, i64 %sub1, i64 %sub + store i32 %conv, i32* %arrayidx2, align 4 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 12 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; A[3*i - 18][18 - i][i] = ... +;; ... = A[i][i][i] + +define void @couple14([100 x [100 x i32]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 18, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02 + store i32 %conv, i32* %arrayidx3, align 4 + %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx6, align 4 +; CHECK: da analyze - flow [=|<] splitable! +; CHECK: da analyze - split level = 1, iteration = 9! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 100 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; A[3*i - 18][22 - i][i] = ... +;; ... = A[i][i][i] + +define void @couple15([100 x [100 x i32]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %sub = sub nsw i64 22, %i.02 + %mul = mul nsw i64 %i.02, 3 + %sub1 = add nsw i64 %mul, -18 + %arrayidx3 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02 + store i32 %conv, i32* %arrayidx3, align 4 + %arrayidx6 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02 + %0 = load i32* %arrayidx6, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add nsw i64 %i.02, 1 + %cmp = icmp slt i64 %inc, 100 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} diff --git a/test/Analysis/DependenceAnalysis/ExactRDIV.ll b/test/Analysis/DependenceAnalysis/ExactRDIV.ll new file mode 100644 index 0000000..aa5d254 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/ExactRDIV.ll @@ -0,0 +1,508 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'ExactRDIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < 10; i++) +;; A[4*i + 10] = ... +;; for (long int j = 0; j < 10; j++) +;; ... = A[2*j + 1]; + +define void @rdiv0(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 2 + %add = add nsw i64 %mul, 10 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 10 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %mul5 = shl nsw i64 %j.02, 1 + %add64 = or i64 %mul5, 1 + %arrayidx7 = getelementptr inbounds i32* %A, i64 %add64 + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc9 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc9, 10 + br i1 %cmp2, label %for.body4, label %for.end10 + +for.end10: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; A[11*i - 45] = ... +;; for (long int j = 0; j < 10; j++) +;; ... = A[j]; + +define void @rdiv1(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = add nsw i64 %mul, -45 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 5 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 10 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i <= 5; i++) +;; A[11*i - 45] = ... +;; for (long int j = 0; j < 10; j++) +;; ... = A[j]; + +define void @rdiv2(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = add nsw i64 %mul, -45 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 6 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 10 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; A[11*i - 45] = ... +;; for (long int j = 0; j <= 10; j++) +;; ... = A[j]; + +define void @rdiv3(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = add nsw i64 %mul, -45 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 5 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 11 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i <= 5; i++) +;; A[11*i - 45] = ... +;; for (long int j = 0; j <= 10; j++) +;; ... = A[j]; + +define void @rdiv4(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = add nsw i64 %mul, -45 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 6 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %arrayidx5 = getelementptr inbounds i32* %A, i64 %j.02 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - flow! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 11 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; A[-11*i + 45] = ... +;; for (long int j = 0; j < 10; j++) +;; ... = A[-j]; + +define void @rdiv5(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -11 + %add = add nsw i64 %mul, 45 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 5 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %sub = sub nsw i64 0, %j.02 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 10 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i <= 5; i++) +;; A[-11*i + 45] = ... +;; for (long int j = 0; j < 10; j++) +;; ... = A[-j]; + +define void @rdiv6(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -11 + %add = add nsw i64 %mul, 45 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 6 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %sub = sub nsw i64 0, %j.02 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 10 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; A[-11*i + 45] = ... +;; for (long int j = 0; j <= 10; j++) +;; ... = A[-j]; + +define void @rdiv7(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -11 + %add = add nsw i64 %mul, 45 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 5 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %sub = sub nsw i64 0, %j.02 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 11 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i <= 5; i++) +;; A[-11*i + 45] = ... +;; for (long int j = 0; j <= 10; j++) +;; ... = A[-j]; + +define void @rdiv8(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -11 + %add = add nsw i64 %mul, 45 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, 6 + br i1 %cmp, label %for.body, label %for.body4 + +for.body4: ; preds = %for.body4, %for.body + %j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body ] + %B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body ] + %sub = sub nsw i64 0, %j.02 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - flow! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc7 = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc7, 11 + br i1 %cmp2, label %for.body4, label %for.end8 + +for.end8: ; preds = %for.body4 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; for (long int j = 0; j < 10; j++) +;; A[11*i - j] = ... +;; ... = A[45]; + +define void @rdiv9(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc5, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc5 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc6, %for.inc5 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = sub nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx4 = getelementptr inbounds i32* %A, i64 45 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 10 + br i1 %cmp2, label %for.body3, label %for.inc5 + +for.inc5: ; preds = %for.body3 + %inc6 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc6, 5 + br i1 %cmp, label %for.cond1.preheader, label %for.end7 + +for.end7: ; preds = %for.inc5 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; for (long int j = 0; j <= 10; j++) +;; A[11*i - j] = ... +;; ... = A[45]; + +define void @rdiv10(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc5, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc5 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc6, %for.inc5 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = sub nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx4 = getelementptr inbounds i32* %A, i64 45 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 10 + br i1 %cmp2, label %for.body3, label %for.inc5 + +for.inc5: ; preds = %for.body3 + %inc6 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc6, 6 + br i1 %cmp, label %for.cond1.preheader, label %for.end7 + +for.end7: ; preds = %for.inc5 + ret void +} + + +;; for (long int i = 0; i <= 5; i++) +;; for (long int j = 0; j <= 10; j++) +;; A[11*i - j] = ... +;; ... = A[45]; + +define void @rdiv11(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc5, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc5 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc6, %for.inc5 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = sub nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx4 = getelementptr inbounds i32* %A, i64 45 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 11 + br i1 %cmp2, label %for.body3, label %for.inc5 + +for.inc5: ; preds = %for.body3 + %inc6 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc6, 5 + br i1 %cmp, label %for.cond1.preheader, label %for.end7 + +for.end7: ; preds = %for.inc5 + ret void +} + + +;; for (long int i = 0; i < 5; i++) +;; for (long int j = 0; j < 10; j++) +;; A[11*i - j] = ... +;; ... = A[45]; + +define void @rdiv12(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc5, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc5 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc6, %for.inc5 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 11 + %sub = sub nsw i64 %mul, %j.02 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx4 = getelementptr inbounds i32* %A, i64 45 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - flow [* *|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 11 + br i1 %cmp2, label %for.body3, label %for.inc5 + +for.inc5: ; preds = %for.body3 + %inc6 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc6, 6 + br i1 %cmp, label %for.cond1.preheader, label %for.end7 + +for.end7: ; preds = %for.inc5 + ret void +} diff --git a/test/Analysis/DependenceAnalysis/ExactSIV.ll b/test/Analysis/DependenceAnalysis/ExactSIV.ll new file mode 100644 index 0000000..71e0502 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/ExactSIV.ll @@ -0,0 +1,428 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'ExactSIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long unsigned i = 0; i < 10; i++) { +;; A[i + 10] = ... +;; ... = A[2*i + 1]; + +define void @exact0(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %add = add i64 %i.02, 10 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %i.02, 1 + %add13 = or i64 %mul, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %add13 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [<=|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 10 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 10; i++) { +;; A[4*i + 10] = ... +;; ... = A[2*i + 1]; + +define void @exact1(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 2 + %add = add i64 %mul, 10 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul1 = shl i64 %i.02, 1 + %add23 = or i64 %mul1, 1 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %add23 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 10 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 10; i++) { +;; A[6*i] = ... +;; ... = A[i + 60]; + +define void @exact2(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %add = add i64 %i.02, 60 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 10 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i <= 10; i++) { +;; A[6*i] = ... +;; ... = A[i + 60]; + +define void @exact3(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %add = add i64 %i.02, 60 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [>]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 11 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 12; i++) { +;; A[6*i] = ... +;; ... = A[i + 60]; + +define void @exact4(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %add = add i64 %i.02, 60 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [>]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 12 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i <= 12; i++) { +;; A[6*i] = ... +;; ... = A[i + 60]; + +define void @exact5(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %add = add i64 %i.02, 60 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [=>|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 13 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 18; i++) { +;; A[6*i] = ... +;; ... = A[i + 60]; + +define void @exact6(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %add = add i64 %i.02, 60 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [=>|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 18 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i <= 18; i++) { +;; A[6*i] = ... +;; ... = A[i + 60]; + +define void @exact7(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %add = add i64 %i.02, 60 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 19 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 10; i++) { +;; A[-6*i] = ... +;; ... = A[-i - 60]; + +define void @exact8(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, -6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %sub1 = sub i64 -60, %i.02 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 10 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i <= 10; i++) { +;; A[-6*i] = ... +;; ... = A[-i - 60]; + +define void @exact9(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, -6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %sub1 = sub i64 -60, %i.02 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [>]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 11 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 12; i++) { +;; A[-6*i] = ... +;; ... = A[-i - 60]; + +define void @exact10(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, -6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %sub1 = sub i64 -60, %i.02 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [>]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 12 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i <= 12; i++) { +;; A[-6*i] = ... +;; ... = A[-i - 60]; + +define void @exact11(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, -6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %sub1 = sub i64 -60, %i.02 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [=>|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 13 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 18; i++) { +;; A[-6*i] = ... +;; ... = A[-i - 60]; + +define void @exact12(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, -6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %sub1 = sub i64 -60, %i.02 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [=>|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 18 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i <= 18; i++) { +;; A[-6*i] = ... +;; ... = A[-i - 60]; + +define void @exact13(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, -6 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %sub1 = sub i64 -60, %i.02 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 19 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} diff --git a/test/Analysis/DependenceAnalysis/GCD.ll b/test/Analysis/DependenceAnalysis/GCD.ll new file mode 100644 index 0000000..94c93a8 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/GCD.ll @@ -0,0 +1,597 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'GCD.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[2*i - 4*j] = ... +;; ... = A[6*i + 8*j]; + +define void @gcd0(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc8 + %B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %mul4 = shl nsw i64 %j.02, 2 + %sub = sub nsw i64 %mul, %mul4 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %mul5 = mul nsw i64 %i.03, 6 + %mul6 = shl nsw i64 %j.02, 3 + %add = add nsw i64 %mul5, %mul6 + %arrayidx7 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - flow [=> *|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 100 + br i1 %exitcond, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.04, i64 100 + %inc9 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc9, 100 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[2*i - 4*j] = ... +;; ... = A[6*i + 8*j + 1]; + +define void @gcd1(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc9 + %B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc9 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc10, %for.inc9 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %mul4 = shl nsw i64 %j.02, 2 + %sub = sub nsw i64 %mul, %mul4 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %mul5 = mul nsw i64 %i.03, 6 + %mul6 = shl nsw i64 %j.02, 3 + %add = add nsw i64 %mul5, %mul6 + %add7 = or i64 %add, 1 + %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7 + %0 = load i32* %arrayidx8, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 100 + br i1 %exitcond, label %for.body3, label %for.inc9 + +for.inc9: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.04, i64 100 + %inc10 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc10, 100 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end11 + +for.end11: ; preds = %for.inc9 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[2*i - 4*j + 1] = ... +;; ... = A[6*i + 8*j]; + +define void @gcd2(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc9 + %B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc9 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc10, %for.inc9 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %mul4 = shl nsw i64 %j.02, 2 + %sub = sub nsw i64 %mul, %mul4 + %add5 = or i64 %sub, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add5 + store i32 %conv, i32* %arrayidx, align 4 + %mul5 = mul nsw i64 %i.03, 6 + %mul6 = shl nsw i64 %j.02, 3 + %add7 = add nsw i64 %mul5, %mul6 + %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7 + %0 = load i32* %arrayidx8, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 100 + br i1 %exitcond, label %for.body3, label %for.inc9 + +for.inc9: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.04, i64 100 + %inc10 = add nsw i64 %i.03, 1 + %exitcond6 = icmp ne i64 %inc10, 100 + br i1 %exitcond6, label %for.cond1.preheader, label %for.end11 + +for.end11: ; preds = %for.inc9 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[i + 2*j] = ... +;; ... = A[i + 2*j - 1]; + +define void @gcd3(i32* %A, i32* %B) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc7 + %B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc7 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc8, %for.inc7 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %j.02, 1 + %add = add nsw i64 %i.03, %mul + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul4 = shl nsw i64 %j.02, 1 + %add5 = add nsw i64 %i.03, %mul4 + %sub = add nsw i64 %add5, -1 + %arrayidx6 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx6, align 4 +; CHECK: da analyze - flow [<> *]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 100 + br i1 %exitcond, label %for.body3, label %for.inc7 + +for.inc7: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.04, i64 100 + %inc8 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc8, 100 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end9 + +for.end9: ; preds = %for.inc7 + ret void +} + + +;; void gcd4(int *A, int *B, long int M, long int N) { +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) { +;; A[5*i + 10*j*M + 9*M*N] = i; +;; *B++ = A[15*i + 20*j*M - 21*N*M + 4]; + +define void @gcd4(i32* %A, i32* %B, i64 %M, i64 %N) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc17 + %B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc17 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc18, %for.inc17 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 5 + %mul4 = mul nsw i64 %j.02, 10 + %mul5 = mul nsw i64 %mul4, %M + %add = add nsw i64 %mul, %mul5 + %mul6 = mul nsw i64 %M, 9 + %mul7 = mul nsw i64 %mul6, %N + %add8 = add nsw i64 %add, %mul7 + %arrayidx = getelementptr inbounds i32* %A, i64 %add8 + store i32 %conv, i32* %arrayidx, align 4 + %mul9 = mul nsw i64 %i.03, 15 + %mul10 = mul nsw i64 %j.02, 20 + %mul11 = mul nsw i64 %mul10, %M + %add12 = add nsw i64 %mul9, %mul11 + %mul13 = mul nsw i64 %N, 21 + %mul14 = mul nsw i64 %mul13, %M + %sub = sub nsw i64 %add12, %mul14 + %add15 = add nsw i64 %sub, 4 + %arrayidx16 = getelementptr inbounds i32* %A, i64 %add15 + %0 = load i32* %arrayidx16, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 100 + br i1 %exitcond, label %for.body3, label %for.inc17 + +for.inc17: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.04, i64 100 + %inc18 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc18, 100 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end19 + +for.end19: ; preds = %for.inc17 + ret void +} + + +;; void gcd5(int *A, int *B, long int M, long int N) { +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) { +;; A[5*i + 10*j*M + 9*M*N] = i; +;; *B++ = A[15*i + 20*j*M - 21*N*M + 5]; + +define void @gcd5(i32* %A, i32* %B, i64 %M, i64 %N) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %entry, %for.inc17 + %B.addr.04 = phi i32* [ %B, %entry ], [ %scevgep, %for.inc17 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc18, %for.inc17 ] + br label %for.body3 + +for.body3: ; preds = %for.cond1.preheader, %for.body3 + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 5 + %mul4 = mul nsw i64 %j.02, 10 + %mul5 = mul nsw i64 %mul4, %M + %add = add nsw i64 %mul, %mul5 + %mul6 = mul nsw i64 %M, 9 + %mul7 = mul nsw i64 %mul6, %N + %add8 = add nsw i64 %add, %mul7 + %arrayidx = getelementptr inbounds i32* %A, i64 %add8 + store i32 %conv, i32* %arrayidx, align 4 + %mul9 = mul nsw i64 %i.03, 15 + %mul10 = mul nsw i64 %j.02, 20 + %mul11 = mul nsw i64 %mul10, %M + %add12 = add nsw i64 %mul9, %mul11 + %mul13 = mul nsw i64 %N, 21 + %mul14 = mul nsw i64 %mul13, %M + %sub = sub nsw i64 %add12, %mul14 + %add15 = add nsw i64 %sub, 5 + %arrayidx16 = getelementptr inbounds i32* %A, i64 %add15 + %0 = load i32* %arrayidx16, align 4 +; CHECK: da analyze - flow [<> *]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %exitcond = icmp ne i64 %inc, 100 + br i1 %exitcond, label %for.body3, label %for.inc17 + +for.inc17: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.04, i64 100 + %inc18 = add nsw i64 %i.03, 1 + %exitcond5 = icmp ne i64 %inc18, 100 + br i1 %exitcond5, label %for.cond1.preheader, label %for.end19 + +for.end19: ; preds = %for.inc17 + ret void +} + + +;; void gcd6(long int n, int A[][n], int *B) { +;; for (long int i = 0; i < n; i++) +;; for (long int j = 0; j < n; j++) { +;; A[2*i][4*j] = i; +;; *B++ = A[8*i][6*j + 1]; + +define void @gcd6(i64 %n, i32* %A, i32* %B) nounwind uwtable ssp { +entry: + %cmp4 = icmp sgt i64 %n, 0 + br i1 %cmp4, label %for.cond1.preheader.preheader, label %for.end12 + +for.cond1.preheader.preheader: ; preds = %entry + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc10 + %i.06 = phi i64 [ %inc11, %for.inc10 ], [ 0, %for.cond1.preheader.preheader ] + %B.addr.05 = phi i32* [ %B.addr.1.lcssa, %for.inc10 ], [ %B, %for.cond1.preheader.preheader ] + %cmp21 = icmp sgt i64 %n, 0 + br i1 %cmp21, label %for.body3.preheader, label %for.inc10 + +for.body3.preheader: ; preds = %for.cond1.preheader + br label %for.body3 + +for.body3: ; preds = %for.body3.preheader, %for.body3 + %j.03 = phi i64 [ %inc, %for.body3 ], [ 0, %for.body3.preheader ] + %B.addr.12 = phi i32* [ %incdec.ptr, %for.body3 ], [ %B.addr.05, %for.body3.preheader ] + %conv = trunc i64 %i.06 to i32 + %mul = shl nsw i64 %j.03, 2 + %mul4 = shl nsw i64 %i.06, 1 + %0 = mul nsw i64 %mul4, %n + %arrayidx.sum = add i64 %0, %mul + %arrayidx5 = getelementptr inbounds i32* %A, i64 %arrayidx.sum + store i32 %conv, i32* %arrayidx5, align 4 + %mul6 = mul nsw i64 %j.03, 6 + %add7 = or i64 %mul6, 1 + %mul7 = shl nsw i64 %i.06, 3 + %1 = mul nsw i64 %mul7, %n + %arrayidx8.sum = add i64 %1, %add7 + %arrayidx9 = getelementptr inbounds i32* %A, i64 %arrayidx8.sum + %2 = load i32* %arrayidx9, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1 + store i32 %2, i32* %B.addr.12, align 4 + %inc = add nsw i64 %j.03, 1 + %exitcond = icmp ne i64 %inc, %n + br i1 %exitcond, label %for.body3, label %for.inc10.loopexit + +for.inc10.loopexit: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.05, i64 %n + br label %for.inc10 + +for.inc10: ; preds = %for.inc10.loopexit, %for.cond1.preheader + %B.addr.1.lcssa = phi i32* [ %B.addr.05, %for.cond1.preheader ], [ %scevgep, %for.inc10.loopexit ] + %inc11 = add nsw i64 %i.06, 1 + %exitcond8 = icmp ne i64 %inc11, %n + br i1 %exitcond8, label %for.cond1.preheader, label %for.end12.loopexit + +for.end12.loopexit: ; preds = %for.inc10 + br label %for.end12 + +for.end12: ; preds = %for.end12.loopexit, %entry + ret void +} + + +;; void gcd7(int n, int A[][n], int *B) { +;; for (int i = 0; i < n; i++) +;; for (int j = 0; j < n; j++) { +;; A[2*i][4*j] = i; +;; *B++ = A[8*i][6*j + 1]; + +define void @gcd7(i32 %n, i32* %A, i32* %B) nounwind uwtable ssp { +entry: + %0 = zext i32 %n to i64 + %cmp4 = icmp sgt i32 %n, 0 + br i1 %cmp4, label %for.cond1.preheader.preheader, label %for.end15 + +for.cond1.preheader.preheader: ; preds = %entry + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc13 + %indvars.iv8 = phi i64 [ 0, %for.cond1.preheader.preheader ], [ %indvars.iv.next9, %for.inc13 ] + %B.addr.05 = phi i32* [ %B.addr.1.lcssa, %for.inc13 ], [ %B, %for.cond1.preheader.preheader ] + %1 = add i32 %n, -1 + %2 = zext i32 %1 to i64 + %3 = add i64 %2, 1 + %cmp21 = icmp sgt i32 %n, 0 + br i1 %cmp21, label %for.body3.preheader, label %for.inc13 + +for.body3.preheader: ; preds = %for.cond1.preheader + br label %for.body3 + +for.body3: ; preds = %for.body3.preheader, %for.body3 + %indvars.iv = phi i64 [ 0, %for.body3.preheader ], [ %indvars.iv.next, %for.body3 ] + %B.addr.12 = phi i32* [ %incdec.ptr, %for.body3 ], [ %B.addr.05, %for.body3.preheader ] + %4 = trunc i64 %indvars.iv to i32 + %mul = shl nsw i32 %4, 2 + %idxprom = sext i32 %mul to i64 + %5 = trunc i64 %indvars.iv8 to i32 + %mul4 = shl nsw i32 %5, 1 + %idxprom5 = sext i32 %mul4 to i64 + %6 = mul nsw i64 %idxprom5, %0 + %arrayidx.sum = add i64 %6, %idxprom + %arrayidx6 = getelementptr inbounds i32* %A, i64 %arrayidx.sum + %7 = trunc i64 %indvars.iv8 to i32 + store i32 %7, i32* %arrayidx6, align 4 + %8 = trunc i64 %indvars.iv to i32 + %mul7 = mul nsw i32 %8, 6 + %add7 = or i32 %mul7, 1 + %idxprom8 = sext i32 %add7 to i64 + %9 = trunc i64 %indvars.iv8 to i32 + %mul9 = shl nsw i32 %9, 3 + %idxprom10 = sext i32 %mul9 to i64 + %10 = mul nsw i64 %idxprom10, %0 + %arrayidx11.sum = add i64 %10, %idxprom8 + %arrayidx12 = getelementptr inbounds i32* %A, i64 %arrayidx11.sum + %11 = load i32* %arrayidx12, align 4 +; CHECK: da analyze - flow [* *|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1 + store i32 %11, i32* %B.addr.12, align 4 + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.body3, label %for.inc13.loopexit + +for.inc13.loopexit: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.05, i64 %3 + br label %for.inc13 + +for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader + %B.addr.1.lcssa = phi i32* [ %B.addr.05, %for.cond1.preheader ], [ %scevgep, %for.inc13.loopexit ] + %indvars.iv.next9 = add i64 %indvars.iv8, 1 + %lftr.wideiv10 = trunc i64 %indvars.iv.next9 to i32 + %exitcond11 = icmp ne i32 %lftr.wideiv10, %n + br i1 %exitcond11, label %for.cond1.preheader, label %for.end15.loopexit + +for.end15.loopexit: ; preds = %for.inc13 + br label %for.end15 + +for.end15: ; preds = %for.end15.loopexit, %entry + ret void +} + + +;; void gcd8(int n, int *A, int *B) { +;; for (int i = 0; i < n; i++) +;; for (int j = 0; j < n; j++) { +;; A[n*2*i + 4*j] = i; +;; *B++ = A[n*8*i + 6*j + 1]; + +define void @gcd8(i32 %n, i32* %A, i32* %B) nounwind uwtable ssp { +entry: + %cmp4 = icmp sgt i32 %n, 0 + br i1 %cmp4, label %for.cond1.preheader.preheader, label %for.end15 + +for.cond1.preheader.preheader: ; preds = %entry + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc13 + %i.06 = phi i32 [ %inc14, %for.inc13 ], [ 0, %for.cond1.preheader.preheader ] + %B.addr.05 = phi i32* [ %B.addr.1.lcssa, %for.inc13 ], [ %B, %for.cond1.preheader.preheader ] + %0 = add i32 %n, -1 + %1 = zext i32 %0 to i64 + %2 = add i64 %1, 1 + %cmp21 = icmp sgt i32 %n, 0 + br i1 %cmp21, label %for.body3.preheader, label %for.inc13 + +for.body3.preheader: ; preds = %for.cond1.preheader + br label %for.body3 + +for.body3: ; preds = %for.body3.preheader, %for.body3 + %indvars.iv = phi i64 [ 0, %for.body3.preheader ], [ %indvars.iv.next, %for.body3 ] + %B.addr.12 = phi i32* [ %incdec.ptr, %for.body3 ], [ %B.addr.05, %for.body3.preheader ] + %mul = shl nsw i32 %n, 1 + %mul4 = mul nsw i32 %mul, %i.06 + %3 = trunc i64 %indvars.iv to i32 + %mul5 = shl nsw i32 %3, 2 + %add = add nsw i32 %mul4, %mul5 + %idxprom = sext i32 %add to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 %i.06, i32* %arrayidx, align 4 + %mul6 = shl nsw i32 %n, 3 + %mul7 = mul nsw i32 %mul6, %i.06 + %4 = trunc i64 %indvars.iv to i32 + %mul8 = mul nsw i32 %4, 6 + %add9 = add nsw i32 %mul7, %mul8 + %add10 = or i32 %add9, 1 + %idxprom11 = sext i32 %add10 to i64 + %arrayidx12 = getelementptr inbounds i32* %A, i64 %idxprom11 + %5 = load i32* %arrayidx12, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1 + store i32 %5, i32* %B.addr.12, align 4 + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.body3, label %for.inc13.loopexit + +for.inc13.loopexit: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.05, i64 %2 + br label %for.inc13 + +for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader + %B.addr.1.lcssa = phi i32* [ %B.addr.05, %for.cond1.preheader ], [ %scevgep, %for.inc13.loopexit ] + %inc14 = add nsw i32 %i.06, 1 + %exitcond7 = icmp ne i32 %inc14, %n + br i1 %exitcond7, label %for.cond1.preheader, label %for.end15.loopexit + +for.end15.loopexit: ; preds = %for.inc13 + br label %for.end15 + +for.end15: ; preds = %for.end15.loopexit, %entry + ret void +} + + +;; void gcd9(unsigned n, int A[][n], int *B) { +;; for (unsigned i = 0; i < n; i++) +;; for (unsigned j = 0; j < n; j++) { +;; A[2*i][4*j] = i; +;; *B++ = A[8*i][6*j + 1]; + +define void @gcd9(i32 %n, i32* %A, i32* %B) nounwind uwtable ssp { +entry: + %0 = zext i32 %n to i64 + %cmp4 = icmp eq i32 %n, 0 + br i1 %cmp4, label %for.end15, label %for.cond1.preheader.preheader + +for.cond1.preheader.preheader: ; preds = %entry + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.cond1.preheader.preheader, %for.inc13 + %indvars.iv8 = phi i64 [ 0, %for.cond1.preheader.preheader ], [ %indvars.iv.next9, %for.inc13 ] + %B.addr.05 = phi i32* [ %B.addr.1.lcssa, %for.inc13 ], [ %B, %for.cond1.preheader.preheader ] + %1 = add i32 %n, -1 + %2 = zext i32 %1 to i64 + %3 = add i64 %2, 1 + %cmp21 = icmp eq i32 %n, 0 + br i1 %cmp21, label %for.inc13, label %for.body3.preheader + +for.body3.preheader: ; preds = %for.cond1.preheader + br label %for.body3 + +for.body3: ; preds = %for.body3.preheader, %for.body3 + %indvars.iv = phi i64 [ 0, %for.body3.preheader ], [ %indvars.iv.next, %for.body3 ] + %B.addr.12 = phi i32* [ %incdec.ptr, %for.body3 ], [ %B.addr.05, %for.body3.preheader ] + %4 = trunc i64 %indvars.iv to i32 + %mul = shl i32 %4, 2 + %idxprom = zext i32 %mul to i64 + %5 = trunc i64 %indvars.iv8 to i32 + %mul4 = shl i32 %5, 1 + %idxprom5 = zext i32 %mul4 to i64 + %6 = mul nsw i64 %idxprom5, %0 + %arrayidx.sum = add i64 %6, %idxprom + %arrayidx6 = getelementptr inbounds i32* %A, i64 %arrayidx.sum + %7 = trunc i64 %indvars.iv8 to i32 + store i32 %7, i32* %arrayidx6, align 4 + %8 = trunc i64 %indvars.iv to i32 + %mul7 = mul i32 %8, 6 + %add7 = or i32 %mul7, 1 + %idxprom8 = zext i32 %add7 to i64 + %9 = trunc i64 %indvars.iv8 to i32 + %mul9 = shl i32 %9, 3 + %idxprom10 = zext i32 %mul9 to i64 + %10 = mul nsw i64 %idxprom10, %0 + %arrayidx11.sum = add i64 %10, %idxprom8 + %arrayidx12 = getelementptr inbounds i32* %A, i64 %arrayidx11.sum + %11 = load i32* %arrayidx12, align 4 +; CHECK: da analyze - flow [* *|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1 + store i32 %11, i32* %B.addr.12, align 4 + %indvars.iv.next = add i64 %indvars.iv, 1 + %lftr.wideiv = trunc i64 %indvars.iv.next to i32 + %exitcond = icmp ne i32 %lftr.wideiv, %n + br i1 %exitcond, label %for.body3, label %for.inc13.loopexit + +for.inc13.loopexit: ; preds = %for.body3 + %scevgep = getelementptr i32* %B.addr.05, i64 %3 + br label %for.inc13 + +for.inc13: ; preds = %for.inc13.loopexit, %for.cond1.preheader + %B.addr.1.lcssa = phi i32* [ %B.addr.05, %for.cond1.preheader ], [ %scevgep, %for.inc13.loopexit ] + %indvars.iv.next9 = add i64 %indvars.iv8, 1 + %lftr.wideiv10 = trunc i64 %indvars.iv.next9 to i32 + %exitcond11 = icmp ne i32 %lftr.wideiv10, %n + br i1 %exitcond11, label %for.cond1.preheader, label %for.end15.loopexit + +for.end15.loopexit: ; preds = %for.inc13 + br label %for.end15 + +for.end15: ; preds = %for.end15.loopexit, %entry + ret void +} diff --git a/test/Analysis/DependenceAnalysis/Preliminary.ll b/test/Analysis/DependenceAnalysis/Preliminary.ll new file mode 100644 index 0000000..3ef63fd --- /dev/null +++ b/test/Analysis/DependenceAnalysis/Preliminary.ll @@ -0,0 +1,469 @@ +; RUN: opt < %s -analyze -basicaa -indvars -da | FileCheck %s + +; This series of tests is more interesting when debugging is enabled. + +; ModuleID = 'Preliminary.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; may alias +;; int p0(int n, int *A, int *B) { +;; A[0] = n; +;; return B[1]; + +define i32 @p0(i32 %n, i32* %A, i32* %B) nounwind uwtable ssp { +entry: + store i32 %n, i32* %A, align 4 + %arrayidx1 = getelementptr inbounds i32* %B, i64 1 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - confused! + ret i32 %0 +} + + +;; no alias +;; int p1(int n, int *restrict A, int *restrict B) { +;; A[0] = n; +;; return B[1]; + +define i32 @p1(i32 %n, i32* noalias %A, i32* noalias %B) nounwind uwtable ssp { +entry: + store i32 %n, i32* %A, align 4 + %arrayidx1 = getelementptr inbounds i32* %B, i64 1 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + ret i32 %0 +} + +;; check loop nesting levels +;; for (long int i = 0; i < n; i++) +;; for (long int j = 0; j < n; j++) +;; for (long int k = 0; k < n; k++) +;; A[i][j][k] = ... +;; for (long int k = 0; k < n; k++) +;; ... = A[i + 3][j + 2][k + 1]; + +define void @p2(i64 %n, [100 x [100 x i64]]* %A, i64* %B) nounwind uwtable ssp { +entry: + %cmp10 = icmp sgt i64 %n, 0 + br i1 %cmp10, label %for.cond1.preheader, label %for.end26 + +for.cond1.preheader: ; preds = %for.inc24, %entry + %B.addr.012 = phi i64* [ %B.addr.1.lcssa, %for.inc24 ], [ %B, %entry ] + %i.011 = phi i64 [ %inc25, %for.inc24 ], [ 0, %entry ] + %cmp26 = icmp sgt i64 %n, 0 + br i1 %cmp26, label %for.cond4.preheader, label %for.inc24 + +for.cond4.preheader: ; preds = %for.inc21, %for.cond1.preheader + %B.addr.18 = phi i64* [ %B.addr.2.lcssa, %for.inc21 ], [ %B.addr.012, %for.cond1.preheader ] + %j.07 = phi i64 [ %inc22, %for.inc21 ], [ 0, %for.cond1.preheader ] + %cmp51 = icmp sgt i64 %n, 0 + br i1 %cmp51, label %for.body6, label %for.cond10.loopexit + +for.body6: ; preds = %for.body6, %for.cond4.preheader + %k.02 = phi i64 [ %inc, %for.body6 ], [ 0, %for.cond4.preheader ] + %arrayidx8 = getelementptr inbounds [100 x [100 x i64]]* %A, i64 %i.011, i64 %j.07, i64 %k.02 + store i64 %i.011, i64* %arrayidx8, align 8 + %inc = add nsw i64 %k.02, 1 + %cmp5 = icmp slt i64 %inc, %n + br i1 %cmp5, label %for.body6, label %for.cond10.loopexit + +for.cond10.loopexit: ; preds = %for.body6, %for.cond4.preheader + %cmp113 = icmp sgt i64 %n, 0 + br i1 %cmp113, label %for.body12, label %for.inc21 + +for.body12: ; preds = %for.body12, %for.cond10.loopexit + %k9.05 = phi i64 [ %inc19, %for.body12 ], [ 0, %for.cond10.loopexit ] + %B.addr.24 = phi i64* [ %incdec.ptr, %for.body12 ], [ %B.addr.18, %for.cond10.loopexit ] + %add = add nsw i64 %k9.05, 1 + %add13 = add nsw i64 %j.07, 2 + %add14 = add nsw i64 %i.011, 3 + %arrayidx17 = getelementptr inbounds [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add + %0 = load i64* %arrayidx17, align 8 +; CHECK: da analyze - flow [-3 -2]! + %incdec.ptr = getelementptr inbounds i64* %B.addr.24, i64 1 + store i64 %0, i64* %B.addr.24, align 8 + %inc19 = add nsw i64 %k9.05, 1 + %cmp11 = icmp slt i64 %inc19, %n + br i1 %cmp11, label %for.body12, label %for.inc21 + +for.inc21: ; preds = %for.body12, %for.cond10.loopexit + %B.addr.2.lcssa = phi i64* [ %B.addr.18, %for.cond10.loopexit ], [ %incdec.ptr, %for.body12 ] + %inc22 = add nsw i64 %j.07, 1 + %cmp2 = icmp slt i64 %inc22, %n + br i1 %cmp2, label %for.cond4.preheader, label %for.inc24 + +for.inc24: ; preds = %for.inc21, %for.cond1.preheader + %B.addr.1.lcssa = phi i64* [ %B.addr.012, %for.cond1.preheader ], [ %B.addr.2.lcssa, %for.inc21 ] + %inc25 = add nsw i64 %i.011, 1 + %cmp = icmp slt i64 %inc25, %n + br i1 %cmp, label %for.cond1.preheader, label %for.end26 + +for.end26: ; preds = %for.inc24, %entry + ret void +} + + +;; classify subscripts +;; for (long int i = 0; i < n; i++) +;; for (long int j = 0; j < n; j++) +;; for (long int k = 0; k < n; k++) +;; for (long int l = 0; l < n; l++) +;; for (long int m = 0; m < n; m++) +;; for (long int o = 0; o < n; o++) +;; for (long int p = 0; p < n; p++) +;; for (long int q = 0; q < n; q++) +;; for (long int r = 0; r < n; r++) +;; for (long int s = 0; s < n; s++) +;; for (long int u = 0; u < n; u++) +;; for (long int t = 0; t < n; t++) { +;; A[i - 3] [j] [2] [k-1] [2*l + 1] [m] [p + q] [r + s] = ... +;; ... = A[i + 3] [2] [u] [1-k] [3*l - 1] [o] [1 + n] [t + 2]; + +define void @p3(i64 %n, [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64* %B) nounwind uwtable ssp { +entry: + %cmp44 = icmp sgt i64 %n, 0 + br i1 %cmp44, label %for.cond1.preheader, label %for.end90 + +for.cond1.preheader: ; preds = %for.inc88, %entry + %B.addr.046 = phi i64* [ %B.addr.1.lcssa, %for.inc88 ], [ %B, %entry ] + %i.045 = phi i64 [ %inc89, %for.inc88 ], [ 0, %entry ] + %cmp240 = icmp sgt i64 %n, 0 + br i1 %cmp240, label %for.cond4.preheader, label %for.inc88 + +for.cond4.preheader: ; preds = %for.inc85, %for.cond1.preheader + %B.addr.142 = phi i64* [ %B.addr.2.lcssa, %for.inc85 ], [ %B.addr.046, %for.cond1.preheader ] + %j.041 = phi i64 [ %inc86, %for.inc85 ], [ 0, %for.cond1.preheader ] + %cmp536 = icmp sgt i64 %n, 0 + br i1 %cmp536, label %for.cond7.preheader, label %for.inc85 + +for.cond7.preheader: ; preds = %for.inc82, %for.cond4.preheader + %B.addr.238 = phi i64* [ %B.addr.3.lcssa, %for.inc82 ], [ %B.addr.142, %for.cond4.preheader ] + %k.037 = phi i64 [ %inc83, %for.inc82 ], [ 0, %for.cond4.preheader ] + %cmp832 = icmp sgt i64 %n, 0 + br i1 %cmp832, label %for.cond10.preheader, label %for.inc82 + +for.cond10.preheader: ; preds = %for.inc79, %for.cond7.preheader + %B.addr.334 = phi i64* [ %B.addr.4.lcssa, %for.inc79 ], [ %B.addr.238, %for.cond7.preheader ] + %l.033 = phi i64 [ %inc80, %for.inc79 ], [ 0, %for.cond7.preheader ] + %cmp1128 = icmp sgt i64 %n, 0 + br i1 %cmp1128, label %for.cond13.preheader, label %for.inc79 + +for.cond13.preheader: ; preds = %for.inc76, %for.cond10.preheader + %B.addr.430 = phi i64* [ %B.addr.5.lcssa, %for.inc76 ], [ %B.addr.334, %for.cond10.preheader ] + %m.029 = phi i64 [ %inc77, %for.inc76 ], [ 0, %for.cond10.preheader ] + %cmp1424 = icmp sgt i64 %n, 0 + br i1 %cmp1424, label %for.cond16.preheader, label %for.inc76 + +for.cond16.preheader: ; preds = %for.inc73, %for.cond13.preheader + %B.addr.526 = phi i64* [ %B.addr.6.lcssa, %for.inc73 ], [ %B.addr.430, %for.cond13.preheader ] + %o.025 = phi i64 [ %inc74, %for.inc73 ], [ 0, %for.cond13.preheader ] + %cmp1720 = icmp sgt i64 %n, 0 + br i1 %cmp1720, label %for.cond19.preheader, label %for.inc73 + +for.cond19.preheader: ; preds = %for.inc70, %for.cond16.preheader + %B.addr.622 = phi i64* [ %B.addr.7.lcssa, %for.inc70 ], [ %B.addr.526, %for.cond16.preheader ] + %p.021 = phi i64 [ %inc71, %for.inc70 ], [ 0, %for.cond16.preheader ] + %cmp2016 = icmp sgt i64 %n, 0 + br i1 %cmp2016, label %for.cond22.preheader, label %for.inc70 + +for.cond22.preheader: ; preds = %for.inc67, %for.cond19.preheader + %B.addr.718 = phi i64* [ %B.addr.8.lcssa, %for.inc67 ], [ %B.addr.622, %for.cond19.preheader ] + %q.017 = phi i64 [ %inc68, %for.inc67 ], [ 0, %for.cond19.preheader ] + %cmp2312 = icmp sgt i64 %n, 0 + br i1 %cmp2312, label %for.cond25.preheader, label %for.inc67 + +for.cond25.preheader: ; preds = %for.inc64, %for.cond22.preheader + %B.addr.814 = phi i64* [ %B.addr.9.lcssa, %for.inc64 ], [ %B.addr.718, %for.cond22.preheader ] + %r.013 = phi i64 [ %inc65, %for.inc64 ], [ 0, %for.cond22.preheader ] + %cmp268 = icmp sgt i64 %n, 0 + br i1 %cmp268, label %for.cond28.preheader, label %for.inc64 + +for.cond28.preheader: ; preds = %for.inc61, %for.cond25.preheader + %B.addr.910 = phi i64* [ %B.addr.10.lcssa, %for.inc61 ], [ %B.addr.814, %for.cond25.preheader ] + %s.09 = phi i64 [ %inc62, %for.inc61 ], [ 0, %for.cond25.preheader ] + %cmp294 = icmp sgt i64 %n, 0 + br i1 %cmp294, label %for.cond31.preheader, label %for.inc61 + +for.cond31.preheader: ; preds = %for.inc58, %for.cond28.preheader + %u.06 = phi i64 [ %inc59, %for.inc58 ], [ 0, %for.cond28.preheader ] + %B.addr.105 = phi i64* [ %B.addr.11.lcssa, %for.inc58 ], [ %B.addr.910, %for.cond28.preheader ] + %cmp321 = icmp sgt i64 %n, 0 + br i1 %cmp321, label %for.body33, label %for.inc58 + +for.body33: ; preds = %for.body33, %for.cond31.preheader + %t.03 = phi i64 [ %inc, %for.body33 ], [ 0, %for.cond31.preheader ] + %B.addr.112 = phi i64* [ %incdec.ptr, %for.body33 ], [ %B.addr.105, %for.cond31.preheader ] + %add = add nsw i64 %r.013, %s.09 + %add34 = add nsw i64 %p.021, %q.017 + %mul = shl nsw i64 %l.033, 1 + %add3547 = or i64 %mul, 1 + %sub = add nsw i64 %k.037, -1 + %sub36 = add nsw i64 %i.045, -3 + %arrayidx43 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %sub36, i64 %j.041, i64 2, i64 %sub, i64 %add3547, i64 %m.029, i64 %add34, i64 %add + store i64 %i.045, i64* %arrayidx43, align 8 + %add44 = add nsw i64 %t.03, 2 + %add45 = add nsw i64 %n, 1 + %mul46 = mul nsw i64 %l.033, 3 + %sub47 = add nsw i64 %mul46, -1 + %sub48 = sub nsw i64 1, %k.037 + %add49 = add nsw i64 %i.045, 3 + %arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44 + %0 = load i64* %arrayidx57, align 8 +; CHECK: da analyze - flow [-6 * * => * * * * * * * *] splitable! +; CHECK: da analyze - split level = 3, iteration = 1! + %incdec.ptr = getelementptr inbounds i64* %B.addr.112, i64 1 + store i64 %0, i64* %B.addr.112, align 8 + %inc = add nsw i64 %t.03, 1 + %cmp32 = icmp slt i64 %inc, %n + br i1 %cmp32, label %for.body33, label %for.inc58 + +for.inc58: ; preds = %for.body33, %for.cond31.preheader + %B.addr.11.lcssa = phi i64* [ %B.addr.105, %for.cond31.preheader ], [ %incdec.ptr, %for.body33 ] + %inc59 = add nsw i64 %u.06, 1 + %cmp29 = icmp slt i64 %inc59, %n + br i1 %cmp29, label %for.cond31.preheader, label %for.inc61 + +for.inc61: ; preds = %for.inc58, %for.cond28.preheader + %B.addr.10.lcssa = phi i64* [ %B.addr.910, %for.cond28.preheader ], [ %B.addr.11.lcssa, %for.inc58 ] + %inc62 = add nsw i64 %s.09, 1 + %cmp26 = icmp slt i64 %inc62, %n + br i1 %cmp26, label %for.cond28.preheader, label %for.inc64 + +for.inc64: ; preds = %for.inc61, %for.cond25.preheader + %B.addr.9.lcssa = phi i64* [ %B.addr.814, %for.cond25.preheader ], [ %B.addr.10.lcssa, %for.inc61 ] + %inc65 = add nsw i64 %r.013, 1 + %cmp23 = icmp slt i64 %inc65, %n + br i1 %cmp23, label %for.cond25.preheader, label %for.inc67 + +for.inc67: ; preds = %for.inc64, %for.cond22.preheader + %B.addr.8.lcssa = phi i64* [ %B.addr.718, %for.cond22.preheader ], [ %B.addr.9.lcssa, %for.inc64 ] + %inc68 = add nsw i64 %q.017, 1 + %cmp20 = icmp slt i64 %inc68, %n + br i1 %cmp20, label %for.cond22.preheader, label %for.inc70 + +for.inc70: ; preds = %for.inc67, %for.cond19.preheader + %B.addr.7.lcssa = phi i64* [ %B.addr.622, %for.cond19.preheader ], [ %B.addr.8.lcssa, %for.inc67 ] + %inc71 = add nsw i64 %p.021, 1 + %cmp17 = icmp slt i64 %inc71, %n + br i1 %cmp17, label %for.cond19.preheader, label %for.inc73 + +for.inc73: ; preds = %for.inc70, %for.cond16.preheader + %B.addr.6.lcssa = phi i64* [ %B.addr.526, %for.cond16.preheader ], [ %B.addr.7.lcssa, %for.inc70 ] + %inc74 = add nsw i64 %o.025, 1 + %cmp14 = icmp slt i64 %inc74, %n + br i1 %cmp14, label %for.cond16.preheader, label %for.inc76 + +for.inc76: ; preds = %for.inc73, %for.cond13.preheader + %B.addr.5.lcssa = phi i64* [ %B.addr.430, %for.cond13.preheader ], [ %B.addr.6.lcssa, %for.inc73 ] + %inc77 = add nsw i64 %m.029, 1 + %cmp11 = icmp slt i64 %inc77, %n + br i1 %cmp11, label %for.cond13.preheader, label %for.inc79 + +for.inc79: ; preds = %for.inc76, %for.cond10.preheader + %B.addr.4.lcssa = phi i64* [ %B.addr.334, %for.cond10.preheader ], [ %B.addr.5.lcssa, %for.inc76 ] + %inc80 = add nsw i64 %l.033, 1 + %cmp8 = icmp slt i64 %inc80, %n + br i1 %cmp8, label %for.cond10.preheader, label %for.inc82 + +for.inc82: ; preds = %for.inc79, %for.cond7.preheader + %B.addr.3.lcssa = phi i64* [ %B.addr.238, %for.cond7.preheader ], [ %B.addr.4.lcssa, %for.inc79 ] + %inc83 = add nsw i64 %k.037, 1 + %cmp5 = icmp slt i64 %inc83, %n + br i1 %cmp5, label %for.cond7.preheader, label %for.inc85 + +for.inc85: ; preds = %for.inc82, %for.cond4.preheader + %B.addr.2.lcssa = phi i64* [ %B.addr.142, %for.cond4.preheader ], [ %B.addr.3.lcssa, %for.inc82 ] + %inc86 = add nsw i64 %j.041, 1 + %cmp2 = icmp slt i64 %inc86, %n + br i1 %cmp2, label %for.cond4.preheader, label %for.inc88 + +for.inc88: ; preds = %for.inc85, %for.cond1.preheader + %B.addr.1.lcssa = phi i64* [ %B.addr.046, %for.cond1.preheader ], [ %B.addr.2.lcssa, %for.inc85 ] + %inc89 = add nsw i64 %i.045, 1 + %cmp = icmp slt i64 %inc89, %n + br i1 %cmp, label %for.cond1.preheader, label %for.end90 + +for.end90: ; preds = %for.inc88, %entry + ret void +} + + +;; cleanup around chars, shorts, ints +;;void p4(int *A, int *B, long int n) +;; for (char i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @p4(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp sgt i64 %n, 0 + br i1 %cmp1, label %for.body, label %for.end + +for.body: ; preds = %for.body, %entry + %i.03 = phi i8 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv2 = sext i8 %i.03 to i32 + %conv3 = sext i8 %i.03 to i64 + %add = add i64 %conv3, 2 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv2, i32* %arrayidx, align 4 + %idxprom4 = sext i8 %i.03 to i64 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %idxprom4 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i8 %i.03, 1 + %conv = sext i8 %inc to i64 + %cmp = icmp slt i64 %conv, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;;void p5(int *A, int *B, long int n) +;; for (short i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @p5(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp sgt i64 %n, 0 + br i1 %cmp1, label %for.body, label %for.end + +for.body: ; preds = %for.body, %entry + %i.03 = phi i16 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv2 = sext i16 %i.03 to i32 + %conv3 = sext i16 %i.03 to i64 + %add = add i64 %conv3, 2 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv2, i32* %arrayidx, align 4 + %idxprom4 = sext i16 %i.03 to i64 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %idxprom4 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i16 %i.03, 1 + %conv = sext i16 %inc to i64 + %cmp = icmp slt i64 %conv, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;;void p6(int *A, int *B, long int n) +;; for (int i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @p6(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp sgt i64 %n, 0 + br i1 %cmp1, label %for.body, label %for.end + +for.body: ; preds = %for.body, %entry + %i.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %add = add nsw i32 %i.03, 2 + %idxprom = sext i32 %add to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 %i.03, i32* %arrayidx, align 4 + %idxprom2 = sext i32 %i.03 to i64 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %idxprom2 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - consistent flow [2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i32 %i.03, 1 + %conv = sext i32 %inc to i64 + %cmp = icmp slt i64 %conv, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;;void p7(unsigned *A, unsigned *B, char n) +;; A[n] = ... +;; ... = A[n + 1]; + +define void @p7(i32* %A, i32* %B, i8 signext %n) nounwind uwtable ssp { +entry: + %idxprom = sext i8 %n to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 0, i32* %arrayidx, align 4 + %conv = sext i8 %n to i64 + %add = add i64 %conv, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + store i32 %0, i32* %B, align 4 + ret void +} + + + +;;void p8(unsigned *A, unsigned *B, short n) +;; A[n] = ... +;; ... = A[n + 1]; + +define void @p8(i32* %A, i32* %B, i16 signext %n) nounwind uwtable ssp { +entry: + %idxprom = sext i16 %n to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 0, i32* %arrayidx, align 4 + %conv = sext i16 %n to i64 + %add = add i64 %conv, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + store i32 %0, i32* %B, align 4 + ret void +} + + +;;void p9(unsigned *A, unsigned *B, int n) +;; A[n] = ... +;; ... = A[n + 1]; + +define void @p9(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + %idxprom = sext i32 %n to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 0, i32* %arrayidx, align 4 + %add = add nsw i32 %n, 1 + %idxprom1 = sext i32 %add to i64 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + store i32 %0, i32* %B, align 4 + ret void +} + + +;;void p10(unsigned *A, unsigned *B, unsigned n) +;; A[n] = ... +;; ... = A[n + 1]; + +define void @p10(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + %idxprom = zext i32 %n to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 0, i32* %arrayidx, align 4 + %add = add i32 %n, 1 + %idxprom1 = zext i32 %add to i64 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + store i32 %0, i32* %B, align 4 + ret void +} diff --git a/test/Analysis/DependenceAnalysis/Propagating.ll b/test/Analysis/DependenceAnalysis/Propagating.ll new file mode 100644 index 0000000..076348c --- /dev/null +++ b/test/Analysis/DependenceAnalysis/Propagating.ll @@ -0,0 +1,467 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'Propagating.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[i + 1][i + j] = i; +;; *B++ = A[i][i + j]; + +define void @prop0([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc9, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc9 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc10, %for.inc9 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %add = add nsw i64 %i.03, %j.02 + %add4 = add nsw i64 %i.03, 1 + %arrayidx5 = getelementptr inbounds [100 x i32]* %A, i64 %add4, i64 %add + store i32 %conv, i32* %arrayidx5, align 4 + %add6 = add nsw i64 %i.03, %j.02 + %arrayidx8 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add6 + %0 = load i32* %arrayidx8, align 4 +; CHECK: da analyze - consistent flow [1 -1]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc9 + +for.inc9: ; preds = %for.body3 + %inc10 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc10, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end11 + +for.end11: ; preds = %for.inc9 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; for (long int k = 0; k < 100; k++) +;; A[j - i][i + 1][j + k] = ... +;; ... = A[j - i][i][j + k]; + +define void @prop1([100 x [100 x i32]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc18, %entry + %B.addr.06 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc18 ] + %i.05 = phi i64 [ 0, %entry ], [ %inc19, %for.inc18 ] + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.inc15, %for.cond1.preheader + %B.addr.14 = phi i32* [ %B.addr.06, %for.cond1.preheader ], [ %incdec.ptr, %for.inc15 ] + %j.03 = phi i64 [ 0, %for.cond1.preheader ], [ %inc16, %for.inc15 ] + br label %for.body6 + +for.body6: ; preds = %for.body6, %for.cond4.preheader + %k.02 = phi i64 [ 0, %for.cond4.preheader ], [ %inc, %for.body6 ] + %B.addr.21 = phi i32* [ %B.addr.14, %for.cond4.preheader ], [ %incdec.ptr, %for.body6 ] + %conv = trunc i64 %i.05 to i32 + %add = add nsw i64 %j.03, %k.02 + %add7 = add nsw i64 %i.05, 1 + %sub = sub nsw i64 %j.03, %i.05 + %arrayidx9 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub, i64 %add7, i64 %add + store i32 %conv, i32* %arrayidx9, align 4 + %add10 = add nsw i64 %j.03, %k.02 + %sub11 = sub nsw i64 %j.03, %i.05 + %arrayidx14 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10 + %0 = load i32* %arrayidx14, align 4 +; CHECK: da analyze - consistent flow [1 1 -1]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.21, i64 1 + store i32 %0, i32* %B.addr.21, align 4 + %inc = add nsw i64 %k.02, 1 + %cmp5 = icmp slt i64 %inc, 100 + br i1 %cmp5, label %for.body6, label %for.inc15 + +for.inc15: ; preds = %for.body6 + %inc16 = add nsw i64 %j.03, 1 + %cmp2 = icmp slt i64 %inc16, 100 + br i1 %cmp2, label %for.cond4.preheader, label %for.inc18 + +for.inc18: ; preds = %for.inc15 + %inc19 = add nsw i64 %i.05, 1 + %cmp = icmp slt i64 %inc19, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end20 + +for.end20: ; preds = %for.inc18 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[i - 1][2*i] = ... +;; ... = A[i][i + j + 110]; + +define void @prop2([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc8, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc8 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc9, %for.inc8 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %sub = add nsw i64 %i.03, -1 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %sub, i64 %mul + store i32 %conv, i32* %arrayidx4, align 4 + %add = add nsw i64 %i.03, %j.02 + %add5 = add nsw i64 %add, 110 + %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add5 + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc8 + +for.inc8: ; preds = %for.body3 + %inc9 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc9, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end10 + +for.end10: ; preds = %for.inc8 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[i][2*j + i] = ... +;; ... = A[i][2*j - i + 5]; + +define void @prop3([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc9, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc9 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc10, %for.inc9 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %j.02, 1 + %add = add nsw i64 %mul, %i.03 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add + store i32 %conv, i32* %arrayidx4, align 4 + %mul5 = shl nsw i64 %j.02, 1 + %sub = sub nsw i64 %mul5, %i.03 + %add6 = add nsw i64 %sub, 5 + %arrayidx8 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add6 + %0 = load i32* %arrayidx8, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc9 + +for.inc9: ; preds = %for.body3 + %inc10 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc10, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end11 + +for.end11: ; preds = %for.inc9 + ret void +} + + +;; propagate Distance +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[i + 2][2*i + j + 1] = ... +;; ... = A[i][2*i + j]; + +define void @prop4([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc11, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc11 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc12, %for.inc11 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %add = add nsw i64 %mul, %j.02 + %add4 = add nsw i64 %add, 1 + %add5 = add nsw i64 %i.03, 2 + %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %add5, i64 %add4 + store i32 %conv, i32* %arrayidx6, align 4 + %mul7 = shl nsw i64 %i.03, 1 + %add8 = add nsw i64 %mul7, %j.02 + %arrayidx10 = getelementptr inbounds [100 x i32]* %A, i64 %i.03, i64 %add8 + %0 = load i32* %arrayidx10, align 4 +; CHECK: da analyze - consistent flow [2 -3]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc11 + +for.inc11: ; preds = %for.body3 + %inc12 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc12, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end13 + +for.end13: ; preds = %for.inc11 + ret void +} + + +;; propagate Point +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[3*i - 18][22 - i][2*i + j] = ... +;; ... = A[i][i][3*i + j]; + +define void @prop5([100 x [100 x i32]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc13, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc13 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc14, %for.inc13 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %add = add nsw i64 %mul, %j.02 + %sub = sub nsw i64 22, %i.03 + %mul4 = mul nsw i64 %i.03, 3 + %sub5 = add nsw i64 %mul4, -18 + %arrayidx7 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %sub5, i64 %sub, i64 %add + store i32 %conv, i32* %arrayidx7, align 4 + %mul8 = mul nsw i64 %i.03, 3 + %add9 = add nsw i64 %mul8, %j.02 + %arrayidx12 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9 + %0 = load i32* %arrayidx12, align 4 +; CHECK: da analyze - flow [< -16] splitable! +; CHECK: da analyze - split level = 1, iteration = 11! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc13 + +for.inc13: ; preds = %for.body3 + %inc14 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc14, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end15 + +for.end15: ; preds = %for.inc13 + ret void +} + + +;; propagate Line +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[i + 1][4*i + j + 2] = ... +;; ... = A[2*i][8*i + j]; + +define void @prop6([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc12, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc12 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc13, %for.inc12 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 2 + %add = add nsw i64 %mul, %j.02 + %add4 = add nsw i64 %add, 2 + %add5 = add nsw i64 %i.03, 1 + %arrayidx6 = getelementptr inbounds [100 x i32]* %A, i64 %add5, i64 %add4 + store i32 %conv, i32* %arrayidx6, align 4 + %mul7 = shl nsw i64 %i.03, 3 + %add8 = add nsw i64 %mul7, %j.02 + %mul9 = shl nsw i64 %i.03, 1 + %arrayidx11 = getelementptr inbounds [100 x i32]* %A, i64 %mul9, i64 %add8 + %0 = load i32* %arrayidx11, align 4 +; CHECK: da analyze - flow [=> -2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc12 + +for.inc12: ; preds = %for.body3 + %inc13 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc13, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end14 + +for.end14: ; preds = %for.inc12 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[2*i + 4][-5*i + j + 2] = ... +;; ... = A[-2*i + 20][5*i + j]; + +define void @prop7([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc14, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc14 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc15, %for.inc14 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -5 + %add = add nsw i64 %mul, %j.02 + %add4 = add nsw i64 %add, 2 + %mul5 = shl nsw i64 %i.03, 1 + %add6 = add nsw i64 %mul5, 4 + %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %add6, i64 %add4 + store i32 %conv, i32* %arrayidx7, align 4 + %mul8 = mul nsw i64 %i.03, 5 + %add9 = add nsw i64 %mul8, %j.02 + %mul10 = mul nsw i64 %i.03, -2 + %add11 = add nsw i64 %mul10, 20 + %arrayidx13 = getelementptr inbounds [100 x i32]* %A, i64 %add11, i64 %add9 + %0 = load i32* %arrayidx13, align 4 +; CHECK: da analyze - flow [* -38] splitable! +; CHECK: da analyze - split level = 1, iteration = 4! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc14 + +for.inc14: ; preds = %for.body3 + %inc15 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc15, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end16 + +for.end16: ; preds = %for.inc14 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[4][j + 2] = ... +;; ... = A[-2*i + 4][5*i + j]; + +define void @prop8([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc10, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc10 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc11, %for.inc10 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %add = add nsw i64 %j.02, 2 + %arrayidx4 = getelementptr inbounds [100 x i32]* %A, i64 4, i64 %add + store i32 %conv, i32* %arrayidx4, align 4 + %mul = mul nsw i64 %i.03, 5 + %add5 = add nsw i64 %mul, %j.02 + %mul6 = mul nsw i64 %i.03, -2 + %add7 = add nsw i64 %mul6, 4 + %arrayidx9 = getelementptr inbounds [100 x i32]* %A, i64 %add7, i64 %add5 + %0 = load i32* %arrayidx9, align 4 +; CHECK: da analyze - flow [p<= 2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc10 + +for.inc10: ; preds = %for.body3 + %inc11 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc11, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end12 + +for.end12: ; preds = %for.inc10 + ret void +} + + +;; for (long int i = 0; i < 100; i++) +;; for (long int j = 0; j < 100; j++) +;; A[2*i + 4][5*i + j + 2] = ... +;; ... = A[4][j]; + +define void @prop9([100 x i32]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc10, %entry + %B.addr.04 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc10 ] + %i.03 = phi i64 [ 0, %entry ], [ %inc11, %for.inc10 ] + br label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.02 = phi i64 [ 0, %for.cond1.preheader ], [ %inc, %for.body3 ] + %B.addr.11 = phi i32* [ %B.addr.04, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, 5 + %add = add nsw i64 %mul, %j.02 + %add4 = add nsw i64 %add, 2 + %mul5 = shl nsw i64 %i.03, 1 + %add6 = add nsw i64 %mul5, 4 + %arrayidx7 = getelementptr inbounds [100 x i32]* %A, i64 %add6, i64 %add4 + store i32 %conv, i32* %arrayidx7, align 4 + %arrayidx9 = getelementptr inbounds [100 x i32]* %A, i64 4, i64 %j.02 + %0 = load i32* %arrayidx9, align 4 +; CHECK: da analyze - flow [p<= 2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.11, i64 1 + store i32 %0, i32* %B.addr.11, align 4 + %inc = add nsw i64 %j.02, 1 + %cmp2 = icmp slt i64 %inc, 100 + br i1 %cmp2, label %for.body3, label %for.inc10 + +for.inc10: ; preds = %for.body3 + %inc11 = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc11, 100 + br i1 %cmp, label %for.cond1.preheader, label %for.end12 + +for.end12: ; preds = %for.inc10 + ret void +} diff --git a/test/Analysis/DependenceAnalysis/Separability.ll b/test/Analysis/DependenceAnalysis/Separability.ll new file mode 100644 index 0000000..d42d3cd --- /dev/null +++ b/test/Analysis/DependenceAnalysis/Separability.ll @@ -0,0 +1,267 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'Separability.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < 50; i++) +;; for (long int j = 0; j < 50; j++) +;; for (long int k = 0; k < 50; k++) +;; for (long int l = 0; l < 50; l++) +;; A[n][i][j + k] = ... +;; ... = A[10][i + 10][2*j - l]; + +define void @sep0([100 x [100 x i32]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc22, %entry + %B.addr.08 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc22 ] + %i.07 = phi i64 [ 0, %entry ], [ %inc23, %for.inc22 ] + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.inc19, %for.cond1.preheader + %B.addr.16 = phi i32* [ %B.addr.08, %for.cond1.preheader ], [ %incdec.ptr, %for.inc19 ] + %j.05 = phi i64 [ 0, %for.cond1.preheader ], [ %inc20, %for.inc19 ] + br label %for.cond7.preheader + +for.cond7.preheader: ; preds = %for.inc16, %for.cond4.preheader + %B.addr.24 = phi i32* [ %B.addr.16, %for.cond4.preheader ], [ %incdec.ptr, %for.inc16 ] + %k.03 = phi i64 [ 0, %for.cond4.preheader ], [ %inc17, %for.inc16 ] + br label %for.body9 + +for.body9: ; preds = %for.body9, %for.cond7.preheader + %l.02 = phi i64 [ 0, %for.cond7.preheader ], [ %inc, %for.body9 ] + %B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ] + %conv = trunc i64 %i.07 to i32 + %add = add nsw i64 %j.05, %k.03 + %idxprom = sext i32 %n to i64 + %arrayidx11 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %idxprom, i64 %i.07, i64 %add + store i32 %conv, i32* %arrayidx11, align 4 + %mul = shl nsw i64 %j.05, 1 + %sub = sub nsw i64 %mul, %l.02 + %add12 = add nsw i64 %i.07, 10 + %arrayidx15 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub + %0 = load i32* %arrayidx15, align 4 +; CHECK: da analyze - flow [-10 * * *]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1 + store i32 %0, i32* %B.addr.31, align 4 + %inc = add nsw i64 %l.02, 1 + %cmp8 = icmp slt i64 %inc, 50 + br i1 %cmp8, label %for.body9, label %for.inc16 + +for.inc16: ; preds = %for.body9 + %inc17 = add nsw i64 %k.03, 1 + %cmp5 = icmp slt i64 %inc17, 50 + br i1 %cmp5, label %for.cond7.preheader, label %for.inc19 + +for.inc19: ; preds = %for.inc16 + %inc20 = add nsw i64 %j.05, 1 + %cmp2 = icmp slt i64 %inc20, 50 + br i1 %cmp2, label %for.cond4.preheader, label %for.inc22 + +for.inc22: ; preds = %for.inc19 + %inc23 = add nsw i64 %i.07, 1 + %cmp = icmp slt i64 %inc23, 50 + br i1 %cmp, label %for.cond1.preheader, label %for.end24 + +for.end24: ; preds = %for.inc22 + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; for (long int j = 0; j < 50; j++) +;; for (long int k = 0; k < 50; k++) +;; for (long int l = 0; l < 50; l++) +;; A[i][i][j + k] = ... +;; ... = A[10][i + 10][2*j - l]; + +define void @sep1([100 x [100 x i32]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc22, %entry + %B.addr.08 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc22 ] + %i.07 = phi i64 [ 0, %entry ], [ %inc23, %for.inc22 ] + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.inc19, %for.cond1.preheader + %B.addr.16 = phi i32* [ %B.addr.08, %for.cond1.preheader ], [ %incdec.ptr, %for.inc19 ] + %j.05 = phi i64 [ 0, %for.cond1.preheader ], [ %inc20, %for.inc19 ] + br label %for.cond7.preheader + +for.cond7.preheader: ; preds = %for.inc16, %for.cond4.preheader + %B.addr.24 = phi i32* [ %B.addr.16, %for.cond4.preheader ], [ %incdec.ptr, %for.inc16 ] + %k.03 = phi i64 [ 0, %for.cond4.preheader ], [ %inc17, %for.inc16 ] + br label %for.body9 + +for.body9: ; preds = %for.body9, %for.cond7.preheader + %l.02 = phi i64 [ 0, %for.cond7.preheader ], [ %inc, %for.body9 ] + %B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ] + %conv = trunc i64 %i.07 to i32 + %add = add nsw i64 %j.05, %k.03 + %arrayidx11 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 %i.07, i64 %i.07, i64 %add + store i32 %conv, i32* %arrayidx11, align 4 + %mul = shl nsw i64 %j.05, 1 + %sub = sub nsw i64 %mul, %l.02 + %add12 = add nsw i64 %i.07, 10 + %arrayidx15 = getelementptr inbounds [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub + %0 = load i32* %arrayidx15, align 4 +; CHECK: da analyze - flow [> * * *]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1 + store i32 %0, i32* %B.addr.31, align 4 + %inc = add nsw i64 %l.02, 1 + %cmp8 = icmp slt i64 %inc, 50 + br i1 %cmp8, label %for.body9, label %for.inc16 + +for.inc16: ; preds = %for.body9 + %inc17 = add nsw i64 %k.03, 1 + %cmp5 = icmp slt i64 %inc17, 50 + br i1 %cmp5, label %for.cond7.preheader, label %for.inc19 + +for.inc19: ; preds = %for.inc16 + %inc20 = add nsw i64 %j.05, 1 + %cmp2 = icmp slt i64 %inc20, 50 + br i1 %cmp2, label %for.cond4.preheader, label %for.inc22 + +for.inc22: ; preds = %for.inc19 + %inc23 = add nsw i64 %i.07, 1 + %cmp = icmp slt i64 %inc23, 50 + br i1 %cmp, label %for.cond1.preheader, label %for.end24 + +for.end24: ; preds = %for.inc22 + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; for (long int j = 0; j < 50; j++) +;; for (long int k = 0; k < 50; k++) +;; for (long int l = 0; l < 50; l++) +;; A[i][i][i + k][l] = ... +;; ... = A[10][i + 10][j + k][l + 10]; + +define void @sep2([100 x [100 x [100 x i32]]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc26, %entry + %B.addr.08 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc26 ] + %i.07 = phi i64 [ 0, %entry ], [ %inc27, %for.inc26 ] + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.inc23, %for.cond1.preheader + %B.addr.16 = phi i32* [ %B.addr.08, %for.cond1.preheader ], [ %incdec.ptr, %for.inc23 ] + %j.05 = phi i64 [ 0, %for.cond1.preheader ], [ %inc24, %for.inc23 ] + br label %for.cond7.preheader + +for.cond7.preheader: ; preds = %for.inc20, %for.cond4.preheader + %B.addr.24 = phi i32* [ %B.addr.16, %for.cond4.preheader ], [ %incdec.ptr, %for.inc20 ] + %k.03 = phi i64 [ 0, %for.cond4.preheader ], [ %inc21, %for.inc20 ] + br label %for.body9 + +for.body9: ; preds = %for.body9, %for.cond7.preheader + %l.02 = phi i64 [ 0, %for.cond7.preheader ], [ %inc, %for.body9 ] + %B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ] + %conv = trunc i64 %i.07 to i32 + %add = add nsw i64 %i.07, %k.03 + %arrayidx12 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add, i64 %l.02 + store i32 %conv, i32* %arrayidx12, align 4 + %add13 = add nsw i64 %l.02, 10 + %add14 = add nsw i64 %j.05, %k.03 + %add15 = add nsw i64 %i.07, 10 + %arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13 + %0 = load i32* %arrayidx19, align 4 +; CHECK: da analyze - flow [> * * -10]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1 + store i32 %0, i32* %B.addr.31, align 4 + %inc = add nsw i64 %l.02, 1 + %cmp8 = icmp slt i64 %inc, 50 + br i1 %cmp8, label %for.body9, label %for.inc20 + +for.inc20: ; preds = %for.body9 + %inc21 = add nsw i64 %k.03, 1 + %cmp5 = icmp slt i64 %inc21, 50 + br i1 %cmp5, label %for.cond7.preheader, label %for.inc23 + +for.inc23: ; preds = %for.inc20 + %inc24 = add nsw i64 %j.05, 1 + %cmp2 = icmp slt i64 %inc24, 50 + br i1 %cmp2, label %for.cond4.preheader, label %for.inc26 + +for.inc26: ; preds = %for.inc23 + %inc27 = add nsw i64 %i.07, 1 + %cmp = icmp slt i64 %inc27, 50 + br i1 %cmp, label %for.cond1.preheader, label %for.end28 + +for.end28: ; preds = %for.inc26 + ret void +} + + +;; for (long int i = 0; i < 50; i++) +;; for (long int j = 0; j < 50; j++) +;; for (long int k = 0; k < 50; k++) +;; for (long int l = 0; l < 50; l++) +;; A[i][i][i + k][l + k] = ... +;; ... = A[10][i + 10][j + k][l + 10]; + +define void @sep3([100 x [100 x [100 x i32]]]* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + br label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc27, %entry + %B.addr.08 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.inc27 ] + %i.07 = phi i64 [ 0, %entry ], [ %inc28, %for.inc27 ] + br label %for.cond4.preheader + +for.cond4.preheader: ; preds = %for.inc24, %for.cond1.preheader + %B.addr.16 = phi i32* [ %B.addr.08, %for.cond1.preheader ], [ %incdec.ptr, %for.inc24 ] + %j.05 = phi i64 [ 0, %for.cond1.preheader ], [ %inc25, %for.inc24 ] + br label %for.cond7.preheader + +for.cond7.preheader: ; preds = %for.inc21, %for.cond4.preheader + %B.addr.24 = phi i32* [ %B.addr.16, %for.cond4.preheader ], [ %incdec.ptr, %for.inc21 ] + %k.03 = phi i64 [ 0, %for.cond4.preheader ], [ %inc22, %for.inc21 ] + br label %for.body9 + +for.body9: ; preds = %for.body9, %for.cond7.preheader + %l.02 = phi i64 [ 0, %for.cond7.preheader ], [ %inc, %for.body9 ] + %B.addr.31 = phi i32* [ %B.addr.24, %for.cond7.preheader ], [ %incdec.ptr, %for.body9 ] + %conv = trunc i64 %i.07 to i32 + %add = add nsw i64 %l.02, %k.03 + %add10 = add nsw i64 %i.07, %k.03 + %arrayidx13 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 %i.07, i64 %i.07, i64 %add10, i64 %add + store i32 %conv, i32* %arrayidx13, align 4 + %add14 = add nsw i64 %l.02, 10 + %add15 = add nsw i64 %j.05, %k.03 + %add16 = add nsw i64 %i.07, 10 + %arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14 + %0 = load i32* %arrayidx20, align 4 +; CHECK: da analyze - flow [> * * *]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.31, i64 1 + store i32 %0, i32* %B.addr.31, align 4 + %inc = add nsw i64 %l.02, 1 + %cmp8 = icmp slt i64 %inc, 50 + br i1 %cmp8, label %for.body9, label %for.inc21 + +for.inc21: ; preds = %for.body9 + %inc22 = add nsw i64 %k.03, 1 + %cmp5 = icmp slt i64 %inc22, 50 + br i1 %cmp5, label %for.cond7.preheader, label %for.inc24 + +for.inc24: ; preds = %for.inc21 + %inc25 = add nsw i64 %j.05, 1 + %cmp2 = icmp slt i64 %inc25, 50 + br i1 %cmp2, label %for.cond4.preheader, label %for.inc27 + +for.inc27: ; preds = %for.inc24 + %inc28 = add nsw i64 %i.07, 1 + %cmp = icmp slt i64 %inc28, 50 + br i1 %cmp, label %for.cond1.preheader, label %for.end29 + +for.end29: ; preds = %for.inc27 + ret void +} diff --git a/test/Analysis/DependenceAnalysis/StrongSIV.ll b/test/Analysis/DependenceAnalysis/StrongSIV.ll new file mode 100644 index 0000000..be336c3 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/StrongSIV.ll @@ -0,0 +1,342 @@ +; RUN: opt < %s -analyze -basicaa -indvars -da | FileCheck %s + +; ModuleID = 'StrongSIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (int i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @strong0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp sgt i64 %n, 0 + br i1 %cmp1, label %for.body, label %for.end + +for.body: ; preds = %for.body, %entry + %i.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %add = add nsw i32 %i.03, 2 + %idxprom = sext i32 %add to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 %i.03, i32* %arrayidx, align 4 + %idxprom2 = sext i32 %i.03 to i64 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %idxprom2 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - consistent flow [2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i32 %i.03, 1 + %conv = sext i32 %inc to i64 + %cmp = icmp slt i64 %conv, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long int i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @strong1(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + %conv = sext i32 %n to i64 + %cmp1 = icmp sgt i32 %n, 0 + br i1 %cmp1, label %for.body, label %for.end + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv2 = trunc i64 %i.03 to i32 + %add = add nsw i64 %i.03, 2 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv2, i32* %arrayidx, align 4 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %i.03 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - consistent flow [2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp slt i64 %inc, %conv + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @strong2(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %add = add i64 %i.03, 2 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.03 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - consistent flow [2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (int i = 0; i < n; i++) +;; A[i + 2] = ... +;; ... = A[i]; + +define void @strong3(i32* %A, i32* %B, i32 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp sgt i32 %n, 0 + br i1 %cmp1, label %for.body, label %for.end + +for.body: ; preds = %for.body, %entry + %i.03 = phi i32 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %add = add nsw i32 %i.03, 2 + %idxprom = sext i32 %add to i64 + %arrayidx = getelementptr inbounds i32* %A, i64 %idxprom + store i32 %i.03, i32* %arrayidx, align 4 + %idxprom1 = sext i32 %i.03 to i64 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %idxprom1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - consistent flow [2]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i32 %i.03, 1 + %cmp = icmp slt i32 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < 19; i++) +;; A[i + 19] = ... +;; ... = A[i]; + +define void @strong4(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %add = add i64 %i.02, 19 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 19 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 20; i++) +;; A[i + 19] = ... +;; ... = A[i]; + +define void @strong5(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %add = add i64 %i.02, 19 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - consistent flow [19]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 20 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 20; i++) +;; A[2*i + 6] = ... +;; ... = A[2*i]; + +define void @strong6(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %add = add i64 %mul, 6 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul1 = shl i64 %i.02, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %mul1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - consistent flow [3]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 20 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 20; i++) +;; A[2*i + 7] = ... +;; ... = A[2*i]; + +define void @strong7(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %add = add i64 %mul, 7 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul1 = shl i64 %i.02, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %mul1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 20 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 20; i++) +;; A[i + n] = ... +;; ... = A[i]; + +define void @strong8(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %add = add i64 %i.02, %n + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %i.02 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - consistent flow [%n|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 20 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[i + n] = ... +;; ... = A[i + 2*n]; + +define void @strong9(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %add = add i64 %i.03, %n + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %n, 1 + %add1 = add i64 %i.03, %mul + %arrayidx2 = getelementptr inbounds i32* %A, i64 %add1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < 1000; i++) +;; A[n*i + 5] = ... +;; ... = A[n*i + 5]; + +define void @strong10(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = mul i64 %i.02, %n + %add = add i64 %mul, 5 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul1 = mul i64 %i.02, %n + %add2 = add i64 %mul1, 5 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %add2 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - consistent flow [0|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 1000 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} diff --git a/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll b/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll new file mode 100644 index 0000000..2a1b4e7 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll @@ -0,0 +1,312 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'SymbolicRDIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < n1; i++) +;; A[2*i + n1] = ... +;; for (long int j = 0; j < n2; j++) +;; ... = A[3*j + 3*n1]; + +define void @symbolicrdiv0(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.cond1.preheader, label %for.body + +for.cond1.preheader: ; preds = %for.body, %entry + %cmp21 = icmp eq i64 %n2, 0 + br i1 %cmp21, label %for.end11, label %for.body4 + +for.body: ; preds = %for.body, %entry + %i.05 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %conv = trunc i64 %i.05 to i32 + %mul = shl nsw i64 %i.05, 1 + %add = add i64 %mul, %n1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc, %n1 + br i1 %cmp, label %for.body, label %for.cond1.preheader + +for.body4: ; preds = %for.body4, %for.cond1.preheader + %j.03 = phi i64 [ %inc10, %for.body4 ], [ 0, %for.cond1.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.cond1.preheader ] + %mul56 = add i64 %j.03, %n1 + %add7 = mul i64 %mul56, 3 + %arrayidx8 = getelementptr inbounds i32* %A, i64 %add7 + %0 = load i32* %arrayidx8, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc10 = add nsw i64 %j.03, 1 + %cmp2 = icmp ult i64 %inc10, %n2 + br i1 %cmp2, label %for.body4, label %for.end11 + +for.end11: ; preds = %for.body4, %for.cond1.preheader + ret void +} + + +;; for (long int i = 0; i < n1; i++) +;; A[2*i + 5*n2] = ... +;; for (long int j = 0; j < n2; j++) +;; ... = A[3*j + 2*n2]; + +define void @symbolicrdiv1(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.cond2.preheader, label %for.body + +for.cond2.preheader: ; preds = %for.body, %entry + %cmp31 = icmp eq i64 %n2, 0 + br i1 %cmp31, label %for.end12, label %for.body5 + +for.body: ; preds = %for.body, %entry + %i.05 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %conv = trunc i64 %i.05 to i32 + %mul = shl nsw i64 %i.05, 1 + %mul1 = mul i64 %n2, 5 + %add = add i64 %mul, %mul1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc, %n1 + br i1 %cmp, label %for.body, label %for.cond2.preheader + +for.body5: ; preds = %for.body5, %for.cond2.preheader + %j.03 = phi i64 [ %inc11, %for.body5 ], [ 0, %for.cond2.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body5 ], [ %B, %for.cond2.preheader ] + %mul6 = mul nsw i64 %j.03, 3 + %mul7 = shl i64 %n2, 1 + %add8 = add i64 %mul6, %mul7 + %arrayidx9 = getelementptr inbounds i32* %A, i64 %add8 + %0 = load i32* %arrayidx9, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc11 = add nsw i64 %j.03, 1 + %cmp3 = icmp ult i64 %inc11, %n2 + br i1 %cmp3, label %for.body5, label %for.end12 + +for.end12: ; preds = %for.body5, %for.cond2.preheader + ret void +} + + +;; for (long int i = 0; i < n1; i++) +;; A[2*i - n2] = ... +;; for (long int j = 0; j < n2; j++) +;; ... = A[-j + 2*n1]; + +define void @symbolicrdiv2(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.cond1.preheader, label %for.body + +for.cond1.preheader: ; preds = %for.body, %entry + %cmp21 = icmp eq i64 %n2, 0 + br i1 %cmp21, label %for.end10, label %for.body4 + +for.body: ; preds = %for.body, %entry + %i.05 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %conv = trunc i64 %i.05 to i32 + %mul = shl nsw i64 %i.05, 1 + %sub = sub i64 %mul, %n2 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc, %n1 + br i1 %cmp, label %for.body, label %for.cond1.preheader + +for.body4: ; preds = %for.body4, %for.cond1.preheader + %j.03 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.cond1.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.cond1.preheader ] + %mul6 = shl i64 %n1, 1 + %add = sub i64 %mul6, %j.03 + %arrayidx7 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc9 = add nsw i64 %j.03, 1 + %cmp2 = icmp ult i64 %inc9, %n2 + br i1 %cmp2, label %for.body4, label %for.end10 + +for.end10: ; preds = %for.body4, %for.cond1.preheader + ret void +} + + +;; for (long int i = 0; i < n1; i++) +;; A[-i + n2] = ... +;; for (long int j = 0; j < n2; j++) +;; ... = A[j - n1]; + +define void @symbolicrdiv3(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.cond1.preheader, label %for.body + +for.cond1.preheader: ; preds = %for.body, %entry + %cmp21 = icmp eq i64 %n2, 0 + br i1 %cmp21, label %for.end9, label %for.body4 + +for.body: ; preds = %for.body, %entry + %i.05 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %conv = trunc i64 %i.05 to i32 + %add = sub i64 %n2, %i.05 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc, %n1 + br i1 %cmp, label %for.body, label %for.cond1.preheader + +for.body4: ; preds = %for.body4, %for.cond1.preheader + %j.03 = phi i64 [ %inc8, %for.body4 ], [ 0, %for.cond1.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.cond1.preheader ] + %sub5 = sub i64 %j.03, %n1 + %arrayidx6 = getelementptr inbounds i32* %A, i64 %sub5 + %0 = load i32* %arrayidx6, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc8 = add nsw i64 %j.03, 1 + %cmp2 = icmp ult i64 %inc8, %n2 + br i1 %cmp2, label %for.body4, label %for.end9 + +for.end9: ; preds = %for.body4, %for.cond1.preheader + ret void +} + + +;; for (long int i = 0; i < n1; i++) +;; A[-i + 2*n1] = ... +;; for (long int j = 0; j < n2; j++) +;; ... = A[-j + n1]; + +define void @symbolicrdiv4(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.cond1.preheader, label %for.body + +for.cond1.preheader: ; preds = %for.body, %entry + %cmp21 = icmp eq i64 %n2, 0 + br i1 %cmp21, label %for.end10, label %for.body4 + +for.body: ; preds = %for.body, %entry + %i.05 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %conv = trunc i64 %i.05 to i32 + %mul = shl i64 %n1, 1 + %add = sub i64 %mul, %i.05 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc, %n1 + br i1 %cmp, label %for.body, label %for.cond1.preheader + +for.body4: ; preds = %for.body4, %for.cond1.preheader + %j.03 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.cond1.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.cond1.preheader ] + %add6 = sub i64 %n1, %j.03 + %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6 + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc9 = add nsw i64 %j.03, 1 + %cmp2 = icmp ult i64 %inc9, %n2 + br i1 %cmp2, label %for.body4, label %for.end10 + +for.end10: ; preds = %for.body4, %for.cond1.preheader + ret void +} + + +;; for (long int i = 0; i < n1; i++) +;; A[-i + n2] = ... +;; for (long int j = 0; j < n2; j++) +;; ... = A[-j + 2*n2]; + +define void @symbolicrdiv5(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.cond1.preheader, label %for.body + +for.cond1.preheader: ; preds = %for.body, %entry + %cmp21 = icmp eq i64 %n2, 0 + br i1 %cmp21, label %for.end10, label %for.body4 + +for.body: ; preds = %for.body, %entry + %i.05 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %conv = trunc i64 %i.05 to i32 + %add = sub i64 %n2, %i.05 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %inc = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc, %n1 + br i1 %cmp, label %for.body, label %for.cond1.preheader + +for.body4: ; preds = %for.body4, %for.cond1.preheader + %j.03 = phi i64 [ %inc9, %for.body4 ], [ 0, %for.cond1.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.cond1.preheader ] + %mul = shl i64 %n2, 1 + %add6 = sub i64 %mul, %j.03 + %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6 + %0 = load i32* %arrayidx7, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc9 = add nsw i64 %j.03, 1 + %cmp2 = icmp ult i64 %inc9, %n2 + br i1 %cmp2, label %for.body4, label %for.end10 + +for.end10: ; preds = %for.body4, %for.cond1.preheader + ret void +} + + +;; for (long int i = 0; i < n1; i++) +;; for (long int j = 0; j < n2; j++) +;; A[j -i + n2] = ... +;; ... = A[2*n2]; + +define void @symbolicrdiv6(i32* %A, i32* %B, i64 %n1, i64 %n2) nounwind uwtable ssp { +entry: + %cmp4 = icmp eq i64 %n1, 0 + br i1 %cmp4, label %for.end7, label %for.cond1.preheader + +for.cond1.preheader: ; preds = %for.inc5, %entry + %B.addr.06 = phi i32* [ %B.addr.1.lcssa, %for.inc5 ], [ %B, %entry ] + %i.05 = phi i64 [ %inc6, %for.inc5 ], [ 0, %entry ] + %cmp21 = icmp eq i64 %n2, 0 + br i1 %cmp21, label %for.inc5, label %for.body3 + +for.body3: ; preds = %for.body3, %for.cond1.preheader + %j.03 = phi i64 [ %inc, %for.body3 ], [ 0, %for.cond1.preheader ] + %B.addr.12 = phi i32* [ %incdec.ptr, %for.body3 ], [ %B.addr.06, %for.cond1.preheader ] + %conv = trunc i64 %i.05 to i32 + %sub = sub nsw i64 %j.03, %i.05 + %add = add i64 %sub, %n2 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %n2, 1 + %arrayidx4 = getelementptr inbounds i32* %A, i64 %mul + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.12, i64 1 + store i32 %0, i32* %B.addr.12, align 4 + %inc = add nsw i64 %j.03, 1 + %cmp2 = icmp ult i64 %inc, %n2 + br i1 %cmp2, label %for.body3, label %for.inc5 + +for.inc5: ; preds = %for.body3, %for.cond1.preheader + %B.addr.1.lcssa = phi i32* [ %B.addr.06, %for.cond1.preheader ], [ %incdec.ptr, %for.body3 ] + %inc6 = add nsw i64 %i.05, 1 + %cmp = icmp ult i64 %inc6, %n1 + br i1 %cmp, label %for.cond1.preheader, label %for.end7 + +for.end7: ; preds = %for.inc5, %entry + ret void +} diff --git a/test/Analysis/DependenceAnalysis/SymbolicSIV.ll b/test/Analysis/DependenceAnalysis/SymbolicSIV.ll new file mode 100644 index 0000000..ee2343f --- /dev/null +++ b/test/Analysis/DependenceAnalysis/SymbolicSIV.ll @@ -0,0 +1,330 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'SymbolicSIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long int i = 0; i < n; i++) +;; A[2*i + n] = ... +;; ... = A[3*i + 3*n]; + +define void @symbolicsiv0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %add = add i64 %mul, %n + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul14 = add i64 %i.03, %n + %add3 = mul i64 %mul14, 3 + %arrayidx4 = getelementptr inbounds i32* %A, i64 %add3 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long int i = 0; i < n; i++) +;; A[2*i + 5*n] = ... +;; ... = A[3*i + 2*n]; + +define void @symbolicsiv1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %mul1 = mul i64 %n, 5 + %add = add i64 %mul, %mul1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul2 = mul nsw i64 %i.03, 3 + %mul3 = shl i64 %n, 1 + %add4 = add i64 %mul2, %mul3 + %arrayidx5 = getelementptr inbounds i32* %A, i64 %add4 + %0 = load i32* %arrayidx5, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long int i = 0; i < n; i++) +;; A[2*i - n] = ... +;; ... = A[-i + 2*n]; + +define void @symbolicsiv2(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = shl nsw i64 %i.03, 1 + %sub = sub i64 %mul, %n + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %mul2 = shl i64 %n, 1 + %add = sub i64 %mul2, %i.03 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long int i = 0; i < n; i++) +;; A[-2*i + n + 1] = ... +;; ... = A[i - 2*n]; + +define void @symbolicsiv3(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -2 + %add = add i64 %mul, %n + %add1 = add i64 %add, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add1 + store i32 %conv, i32* %arrayidx, align 4 + %mul2 = shl i64 %n, 1 + %sub = sub i64 %i.03, %mul2 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long int i = 0; i < n; i++) +;; A[-2*i + 3*n] = ... +;; ... = A[-i + n]; + +define void @symbolicsiv4(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -2 + %mul1 = mul i64 %n, 3 + %add = add i64 %mul, %mul1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %add2 = sub i64 %n, %i.03 + %arrayidx3 = getelementptr inbounds i32* %A, i64 %add2 + %0 = load i32* %arrayidx3, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long int i = 0; i < n; i++) +;; A[-2*i - 2*n] = ... +;; ... = A[-i - n]; + +define void @symbolicsiv5(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul nsw i64 %i.03, -2 + %mul1 = shl i64 %n, 1 + %sub = sub i64 %mul, %mul1 + %arrayidx = getelementptr inbounds i32* %A, i64 %sub + store i32 %conv, i32* %arrayidx, align 4 + %sub2 = sub nsw i64 0, %i.03 + %sub3 = sub i64 %sub2, %n + %arrayidx4 = getelementptr inbounds i32* %A, i64 %sub3 + %0 = load i32* %arrayidx4, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; why doesn't SCEV package understand that n >= 0? +;;void weaktest(int *A, int *B, long unsigned n) +;; for (long unsigned i = 0; i < n; i++) +;; A[i + n + 1] = ... +;; ... = A[-i]; + +define void @weaktest(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %add = add i64 %i.03, %n + %add1 = add i64 %add, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add1 + store i32 %conv, i32* %arrayidx, align 4 + %sub = sub i64 0, %i.03 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [*|<] splitable! +; CHECK: da analyze - split level = 1, iteration = ((0 smax (-1 + (-1 * %n))) /u 2)! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; void symbolicsiv6(int *A, int *B, long unsigned n, long unsigned N, long unsigned M) { +;; for (long int i = 0; i < n; i++) { +;; A[4*N*i + M] = i; +;; *B++ = A[4*N*i + 3*M + 1]; + +define void @symbolicsiv6(i32* %A, i32* %B, i64 %n, i64 %N, i64 %M) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.preheader, %for.body + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ] + %conv = trunc i64 %i.03 to i32 + %mul = shl i64 %N, 2 + %mul1 = mul i64 %mul, %i.03 + %add = add i64 %mul1, %M + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul2 = shl i64 %N, 2 + %mul3 = mul i64 %mul2, %i.03 + %mul4 = mul i64 %M, 3 + %add5 = add i64 %mul3, %mul4 + %add6 = add i64 %add5, 1 + %arrayidx7 = getelementptr inbounds i32* %A, i64 %add6 + %0 = load i32* %arrayidx7, align 4 + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 +; CHECK: da analyze - none! + store i32 %0, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %exitcond = icmp ne i64 %inc, %n + br i1 %exitcond, label %for.body, label %for.end.loopexit + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +} + + +;; void symbolicsiv7(int *A, int *B, long unsigned n, long unsigned N, long unsigned M) { +;; for (long int i = 0; i < n; i++) { +;; A[2*N*i + M] = i; +;; *B++ = A[2*N*i - 3*M + 2]; + +define void @symbolicsiv7(i32* %A, i32* %B, i64 %n, i64 %N, i64 %M) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body.preheader + +for.body.preheader: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.preheader, %for.body + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %for.body.preheader ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %for.body.preheader ] + %conv = trunc i64 %i.03 to i32 + %mul = shl i64 %N, 1 + %mul1 = mul i64 %mul, %i.03 + %add = add i64 %mul1, %M + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul2 = shl i64 %N, 1 + %mul3 = mul i64 %mul2, %i.03 + %0 = mul i64 %M, -3 + %sub = add i64 %mul3, %0 + %add5 = add i64 %sub, 2 + %arrayidx6 = getelementptr inbounds i32* %A, i64 %add5 + %1 = load i32* %arrayidx6, align 4 + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 +; CHECK: da analyze - flow [<>]! + store i32 %1, i32* %B.addr.02, align 4 + %inc = add nsw i64 %i.03, 1 + %exitcond = icmp ne i64 %inc, %n + br i1 %exitcond, label %for.body, label %for.end.loopexit + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +} diff --git a/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll b/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll new file mode 100644 index 0000000..343e8f4 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll @@ -0,0 +1,220 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'WeakCrossingSIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long unsigned i = 0; i < n; i++) +;; A[1 + n*i] = ... +;; ... = A[1 - n*i]; + +define void @weakcrossing0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul i64 %i.03, %n + %add = add i64 %mul, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %mul1 = mul i64 %i.03, %n + %sub = sub i64 1, %mul1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [0|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[n + i] = ... +;; ... = A[1 + n - i]; + +define void @weakcrossing1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %add = add i64 %i.03, %n + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %add1 = add i64 %n, 1 + %sub = sub i64 %add1, %i.03 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - flow [<>] splitable! +; CHECK: da analyze - split level = 1, iteration = 0! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < 3; i++) +;; A[i] = ... +;; ... = A[6 - i]; + +define void @weakcrossing2(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 %i.02 + store i32 %conv, i32* %arrayidx, align 4 + %sub = sub i64 6, %i.02 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 3 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 4; i++) +;; A[i] = ... +;; ... = A[6 - i]; + +define void @weakcrossing3(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 %i.02 + store i32 %conv, i32* %arrayidx, align 4 + %sub = sub i64 6, %i.02 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [0|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 4 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 10; i++) +;; A[i] = ... +;; ... = A[-6 - i]; + +define void @weakcrossing4(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 %i.02 + store i32 %conv, i32* %arrayidx, align 4 + %sub = sub i64 -6, %i.02 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 10 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[3*i] = ... +;; ... = A[5 - 3*i]; + +define void @weakcrossing5(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul i64 %i.03, 3 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %0 = mul i64 %i.03, -3 + %sub = add i64 %0, 5 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %sub + %1 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %1, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < 4; i++) +;; A[i] = ... +;; ... = A[5 - i]; + +define void @weakcrossing6(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 %i.02 + store i32 %conv, i32* %arrayidx, align 4 + %sub = sub i64 5, %i.02 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %sub + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [<>] splitable! +; CHECK: da analyze - split level = 1, iteration = 2! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 4 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} diff --git a/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll b/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll new file mode 100644 index 0000000..a598716 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll @@ -0,0 +1,212 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'WeakZeroDstSIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long unsigned i = 0; i < 30; i++) +;; A[2*i + 10] = ... +;; ... = A[10]; + +define void @weakzerodst0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %add = add i64 %mul, 10 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [p<=|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 30 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[n*i + 10] = ... +;; ... = A[10]; + +define void @weakzerodst1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul i64 %i.03, %n + %add = add i64 %mul, 10 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [p<=|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < 5; i++) +;; A[2*i] = ... +;; ... = A[10]; + +define void @weakzerodst2(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 5 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 6; i++) +;; A[2*i] = ... +;; ... = A[10]; + +define void @weakzerodst3(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [=>p|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 6 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 7; i++) +;; A[2*i] = ... +;; ... = A[10]; + +define void @weakzerodst4(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 7 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 7; i++) +;; A[2*i] = ... +;; ... = A[-10]; + +define void @weakzerodst5(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %mul = shl i64 %i.02, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 -10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 7 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[3*i] = ... +;; ... = A[10]; + +define void @weakzerodst6(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %mul = mul i64 %i.03, 3 + %arrayidx = getelementptr inbounds i32* %A, i64 %mul + store i32 %conv, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 10 + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} diff --git a/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll b/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll new file mode 100644 index 0000000..fd4f462 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll @@ -0,0 +1,212 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'WeakZeroSrcSIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; for (long unsigned i = 0; i < 30; i++) +;; A[10] = ... +;; ... = A[2*i + 10]; + +define void @weakzerosrc0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %i.02, 1 + %add = add i64 %mul, 10 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [p<=|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 30 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[10] = ... +;; ... = A[n*i + 10]; + +define void @weakzerosrc1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = mul i64 %i.03, %n + %add = add i64 %mul, 10 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [p<=|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} + + +;; for (long unsigned i = 0; i < 5; i++) +;; A[10] = ... +;; ... = A[2*i]; + +define void @weakzerosrc2(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %i.02, 1 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 5 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 6; i++) +;; A[10] = ... +;; ... = A[2*i]; + +define void @weakzerosrc3(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %i.02, 1 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [=>p|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 6 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 7; i++) +;; A[10] = ... +;; ... = A[2*i]; + +define void @weakzerosrc4(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %i.02, 1 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow [*|<]! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 7 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < 7; i++) +;; A[-10] = ... +;; ... = A[2*i]; + +define void @weakzerosrc5(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %i.02 = phi i64 [ 0, %entry ], [ %inc, %for.body ] + %B.addr.01 = phi i32* [ %B, %entry ], [ %incdec.ptr, %for.body ] + %conv = trunc i64 %i.02 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 -10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = shl i64 %i.02, 1 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.01, i64 1 + store i32 %0, i32* %B.addr.01, align 4 + %inc = add i64 %i.02, 1 + %cmp = icmp ult i64 %inc, 7 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + + +;; for (long unsigned i = 0; i < n; i++) +;; A[10] = ... +;; ... = A[3*i]; + +define void @weakzerosrc6(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %cmp1 = icmp eq i64 %n, 0 + br i1 %cmp1, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %i.03 = phi i64 [ %inc, %for.body ], [ 0, %entry ] + %B.addr.02 = phi i32* [ %incdec.ptr, %for.body ], [ %B, %entry ] + %conv = trunc i64 %i.03 to i32 + %arrayidx = getelementptr inbounds i32* %A, i64 10 + store i32 %conv, i32* %arrayidx, align 4 + %mul = mul i64 %i.03, 3 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %mul + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + %incdec.ptr = getelementptr inbounds i32* %B.addr.02, i64 1 + store i32 %0, i32* %B.addr.02, align 4 + %inc = add i64 %i.03, 1 + %cmp = icmp ult i64 %inc, %n + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body, %entry + ret void +} diff --git a/test/Analysis/DependenceAnalysis/ZIV.ll b/test/Analysis/DependenceAnalysis/ZIV.ll new file mode 100644 index 0000000..42b2389 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/ZIV.ll @@ -0,0 +1,53 @@ +; RUN: opt < %s -analyze -basicaa -da | FileCheck %s + +; ModuleID = 'ZIV.bc' +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-apple-macosx10.6.0" + + +;; A[n + 1] = ... +;; ... = A[1 + n]; + +define void @z0(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %add = add i64 %n, 1 + %arrayidx = getelementptr inbounds i32* %A, i64 %add + store i32 0, i32* %arrayidx, align 4 + %add1 = add i64 %n, 1 + %arrayidx2 = getelementptr inbounds i32* %A, i64 %add1 + %0 = load i32* %arrayidx2, align 4 +; CHECK: da analyze - consistent flow! + store i32 %0, i32* %B, align 4 + ret void +} + + +;; A[n] = ... +;; ... = A[n + 1]; + +define void @z1(i32* %A, i32* %B, i64 %n) nounwind uwtable ssp { +entry: + %arrayidx = getelementptr inbounds i32* %A, i64 %n + store i32 0, i32* %arrayidx, align 4 + %add = add i64 %n, 1 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %add + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - none! + store i32 %0, i32* %B, align 4 + ret void +} + + +;; A[n] = ... +;; ... = A[m]; + +define void @z2(i32* %A, i32* %B, i64 %n, i64 %m) nounwind uwtable ssp { +entry: + %arrayidx = getelementptr inbounds i32* %A, i64 %n + store i32 0, i32* %arrayidx, align 4 + %arrayidx1 = getelementptr inbounds i32* %A, i64 %m + %0 = load i32* %arrayidx1, align 4 +; CHECK: da analyze - flow! + store i32 %0, i32* %B, align 4 + ret void +} diff --git a/test/Analysis/DependenceAnalysis/lit.local.cfg b/test/Analysis/DependenceAnalysis/lit.local.cfg new file mode 100644 index 0000000..c6106e4 --- /dev/null +++ b/test/Analysis/DependenceAnalysis/lit.local.cfg @@ -0,0 +1 @@ +config.suffixes = ['.ll'] diff --git a/test/Analysis/LoopDependenceAnalysis/alias.ll b/test/Analysis/LoopDependenceAnalysis/alias.ll deleted file mode 100644 index 78d0bf4..0000000 --- a/test/Analysis/LoopDependenceAnalysis/alias.ll +++ /dev/null @@ -1,44 +0,0 @@ -; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s - -;; x[5] = x[6] // with x being a pointer passed as argument - -define void @f1(i32* nocapture %xptr) nounwind { -entry: - %x.ld.addr = getelementptr i32* %xptr, i64 6 - %x.st.addr = getelementptr i32* %xptr, i64 5 - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x = load i32* %x.ld.addr - store i32 %x, i32* %x.st.addr -; CHECK: 0,1: dep - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; x[5] = x[6] // with x being an array on the stack - -define void @foo(...) nounwind { -entry: - %xptr = alloca [256 x i32], align 4 - %x.ld.addr = getelementptr [256 x i32]* %xptr, i64 0, i64 6 - %x.st.addr = getelementptr [256 x i32]* %xptr, i64 0, i64 5 - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x = load i32* %x.ld.addr - store i32 %x, i32* %x.st.addr -; CHECK: 0,1: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} diff --git a/test/Analysis/LoopDependenceAnalysis/lit.local.cfg b/test/Analysis/LoopDependenceAnalysis/lit.local.cfg deleted file mode 100644 index 19eebc0..0000000 --- a/test/Analysis/LoopDependenceAnalysis/lit.local.cfg +++ /dev/null @@ -1 +0,0 @@ -config.suffixes = ['.ll', '.c', '.cpp'] diff --git a/test/Analysis/LoopDependenceAnalysis/siv-strong.ll b/test/Analysis/LoopDependenceAnalysis/siv-strong.ll deleted file mode 100644 index 401e466..0000000 --- a/test/Analysis/LoopDependenceAnalysis/siv-strong.ll +++ /dev/null @@ -1,110 +0,0 @@ -; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s - -@x = common global [256 x i32] zeroinitializer, align 4 -@y = common global [256 x i32] zeroinitializer, align 4 - -;; for (i = 0; i < 256; i++) -;; x[i] = x[i] + y[i] - -define void @f1(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %y.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %x = load i32* %x.addr ; 0 - %y = load i32* %y.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; for (i = 0; i < 256; i++) -;; x[i+1] = x[i] + y[i] - -define void @f2(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %i.next = add i64 %i, 1 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.next - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; for (i = 0; i < 10; i++) -;; x[i+20] = x[i] + y[i] - -define void @f3(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %i.20 = add i64 %i, 20 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.20 - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 10 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; for (i = 0; i < 10; i++) -;; x[10*i+1] = x[10*i] + y[i] - -define void @f4(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %i.10 = mul i64 %i, 10 - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i.10 - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.10 - %i.10.1 = add i64 %i.10, 1 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.10.1 - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 10 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} diff --git a/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll b/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll deleted file mode 100644 index 9d0128c..0000000 --- a/test/Analysis/LoopDependenceAnalysis/siv-weak-crossing.ll +++ /dev/null @@ -1,118 +0,0 @@ -; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s - -@x = common global [256 x i32] zeroinitializer, align 4 -@y = common global [256 x i32] zeroinitializer, align 4 - -;; for (i = 0; i < 256; i++) -;; x[i] = x[255 - i] + y[i] - -define void @f1(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %i.255 = sub i64 255, %i - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.255 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; for (i = 0; i < 100; i++) -;; x[i] = x[255 - i] + y[i] - -define void @f2(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %i.255 = sub i64 255, %i - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.255 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 100 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; // the first iteration (i=0) leads to an out-of-bounds access of x. as the -;; // result of this access is undefined, _any_ dependence result is safe. -;; for (i = 0; i < 256; i++) -;; x[i] = x[256 - i] + y[i] - -define void @f3(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %i.256 = sub i64 0, %i - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x.ld.addr = getelementptr [256 x i32]* @x, i64 1, i64 %i.256 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; // slightly contrived but valid IR for the following loop, where all -;; // accesses in all iterations are within bounds. while this example's first -;; // (ZIV-)subscript is (0, 1), accesses are dependent. -;; for (i = 1; i < 256; i++) -;; x[i] = x[256 - i] + y[i] - -define void @f4(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %i.1 = add i64 1, %i - %i.256 = sub i64 -1, %i - %y.ld.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i.1 - %x.ld.addr = getelementptr [256 x i32]* @x, i64 1, i64 %i.256 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i.1 - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.ld.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.st.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} diff --git a/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll b/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll deleted file mode 100644 index 1c5ae4c..0000000 --- a/test/Analysis/LoopDependenceAnalysis/siv-weak-zero.ll +++ /dev/null @@ -1,56 +0,0 @@ -; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s - -@x = common global [256 x i32] zeroinitializer, align 4 -@y = common global [256 x i32] zeroinitializer, align 4 - -;; for (i = 0; i < 256; i++) -;; x[i] = x[42] + y[i] - -define void @f1(...) nounwind { -entry: - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 42 - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %y.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; for (i = 0; i < 250; i++) -;; x[i] = x[255] + y[i] - -define void @f2(...) nounwind { -entry: - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 255 - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x.addr = getelementptr [256 x i32]* @x, i64 0, i64 %i - %y.addr = getelementptr [256 x i32]* @y, i64 0, i64 %i - %x = load i32* %x.ld.addr ; 0 - %y = load i32* %y.addr ; 1 - %r = add i32 %y, %x - store i32 %r, i32* %x.addr ; 2 -; CHECK: 0,2: dep -; CHECK: 1,2: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 250 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} diff --git a/test/Analysis/LoopDependenceAnalysis/ziv.ll b/test/Analysis/LoopDependenceAnalysis/ziv.ll deleted file mode 100644 index 645ae7f..0000000 --- a/test/Analysis/LoopDependenceAnalysis/ziv.ll +++ /dev/null @@ -1,63 +0,0 @@ -; RUN: opt < %s -analyze -basicaa -lda | FileCheck %s - -@x = common global [256 x i32] zeroinitializer, align 4 - -;; x[5] = x[6] - -define void @f1(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x = load i32* getelementptr ([256 x i32]* @x, i32 0, i64 6) - store i32 %x, i32* getelementptr ([256 x i32]* @x, i32 0, i64 5) -; CHECK: 0,1: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; x[c] = x[c+1] // with c being a loop-invariant constant - -define void @f2(i64 %c0) nounwind { -entry: - %c1 = add i64 %c0, 1 - %x.ld.addr = getelementptr [256 x i32]* @x, i64 0, i64 %c0 - %x.st.addr = getelementptr [256 x i32]* @x, i64 0, i64 %c1 - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x = load i32* %x.ld.addr - store i32 %x, i32* %x.st.addr -; CHECK: 0,1: ind - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} - -;; x[6] = x[6] - -define void @f3(...) nounwind { -entry: - br label %for.body - -for.body: - %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] - %x = load i32* getelementptr ([256 x i32]* @x, i32 0, i64 6) - store i32 %x, i32* getelementptr ([256 x i32]* @x, i32 0, i64 6) -; CHECK: 0,1: dep - %i.next = add i64 %i, 1 - %exitcond = icmp eq i64 %i.next, 256 - br i1 %exitcond, label %for.end, label %for.body - -for.end: - ret void -} diff --git a/test/Analysis/Profiling/load-branch-weights-ifs.ll b/test/Analysis/Profiling/load-branch-weights-ifs.ll new file mode 100644 index 0000000..7ed090b --- /dev/null +++ b/test/Analysis/Profiling/load-branch-weights-ifs.ll @@ -0,0 +1,122 @@ +; RUN: opt -insert-edge-profiling -o %t1 < %s +; RUN: rm -f %t1.prof_data +; RUN: lli %defaultjit -load %llvmshlibdir/libprofile_rt%shlibext %t1 \ +; RUN: -llvmprof-output %t1.prof_data +; RUN: opt -profile-file %t1.prof_data -profile-metadata-loader -S -o - < %s \ +; RUN: | FileCheck %s +; RUN: rm -f %t1.prof_data + +; FIXME: profile_rt.dll could be built on win32. +; REQUIRES: loadable_module + +;; func_mod - Branch taken 6 times in 7. +define i32 @func_mod(i32 %N) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %N.addr = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + %0 = load i32* %N.addr, align 4 + %rem = srem i32 %0, 7 + %tobool = icmp ne i32 %rem, 0 + br i1 %tobool, label %if.then, label %if.else +; CHECK: br i1 %tobool, label %if.then, label %if.else, !prof !0 + +if.then: + store i32 1, i32* %retval + br label %return + +if.else: + store i32 0, i32* %retval + br label %return + +return: + %1 = load i32* %retval + ret i32 %1 +} + +;; func_const_true - conditional branch which 100% taken probability. +define i32 @func_const_true(i32 %N) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %N.addr = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + %0 = load i32* %N.addr, align 4 + %cmp = icmp eq i32 %0, 1 + br i1 %cmp, label %if.then, label %if.end +; CHECK: br i1 %cmp, label %if.then, label %if.end, !prof !1 + +if.then: + store i32 1, i32* %retval + br label %return + +if.end: + store i32 0, i32* %retval + br label %return + +return: + %1 = load i32* %retval + ret i32 %1 +} + +;; func_const_true - conditional branch which 100% not-taken probability. +define i32 @func_const_false(i32 %N) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %N.addr = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + %0 = load i32* %N.addr, align 4 + %cmp = icmp eq i32 %0, 1 + br i1 %cmp, label %if.then, label %if.end +; CHECK: br i1 %cmp, label %if.then, label %if.end, !prof !2 + +if.then: + store i32 1, i32* %retval + br label %return + +if.end: + store i32 0, i32* %retval + br label %return + +return: + %1 = load i32* %retval + ret i32 %1 +} + +define i32 @main(i32 %argc, i8** %argv) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %argc.addr = alloca i32, align 4 + %argv.addr = alloca i8**, align 8 + %loop = alloca i32, align 4 + store i32 0, i32* %retval + store i32 0, i32* %loop, align 4 + br label %for.cond + +for.cond: + %0 = load i32* %loop, align 4 + %cmp = icmp slt i32 %0, 7000 + br i1 %cmp, label %for.body, label %for.end +; CHECK: br i1 %cmp, label %for.body, label %for.end, !prof !3 + +for.body: + %1 = load i32* %loop, align 4 + %call = call i32 @func_mod(i32 %1) + br label %for.inc + +for.inc: + %2 = load i32* %loop, align 4 + %inc = add nsw i32 %2, 1 + store i32 %inc, i32* %loop, align 4 + br label %for.cond + +for.end: + %call1 = call i32 @func_const_true(i32 1) + %call2 = call i32 @func_const_false(i32 0) + ret i32 0 +} + +; CHECK: !0 = metadata !{metadata !"branch_weights", i32 6000, i32 1000} +; CHECK: !1 = metadata !{metadata !"branch_weights", i32 1, i32 0} +; CHECK: !2 = metadata !{metadata !"branch_weights", i32 0, i32 1} +; CHECK: !3 = metadata !{metadata !"branch_weights", i32 7000, i32 1} +; CHECK-NOT: !4 diff --git a/test/Analysis/Profiling/load-branch-weights-loops.ll b/test/Analysis/Profiling/load-branch-weights-loops.ll new file mode 100644 index 0000000..9d1925a --- /dev/null +++ b/test/Analysis/Profiling/load-branch-weights-loops.ll @@ -0,0 +1,188 @@ +; RUN: opt -insert-edge-profiling -o %t1 < %s +; RUN: rm -f %t1.prof_data +; RUN: lli %defaultjit -load %llvmshlibdir/libprofile_rt%shlibext %t1 \ +; RUN: -llvmprof-output %t1.prof_data +; RUN: opt -profile-file %t1.prof_data -profile-metadata-loader -S -o - < %s \ +; RUN: | FileCheck %s +; RUN: rm -f %t1.prof_data + +; FIXME: profile_rt.dll could be built on win32. +; REQUIRES: loadable_module + +;; func_for - Test branch probabilities for a vanilla for loop. +define i32 @func_for(i32 %N) nounwind uwtable { +entry: + %N.addr = alloca i32, align 4 + %ret = alloca i32, align 4 + %loop = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + store i32 0, i32* %ret, align 4 + store i32 0, i32* %loop, align 4 + br label %for.cond + +for.cond: + %0 = load i32* %loop, align 4 + %1 = load i32* %N.addr, align 4 + %cmp = icmp slt i32 %0, %1 + br i1 %cmp, label %for.body, label %for.end +; CHECK: br i1 %cmp, label %for.body, label %for.end, !prof !0 + +for.body: + %2 = load i32* %N.addr, align 4 + %3 = load i32* %ret, align 4 + %add = add nsw i32 %3, %2 + store i32 %add, i32* %ret, align 4 + br label %for.inc + +for.inc: + %4 = load i32* %loop, align 4 + %inc = add nsw i32 %4, 1 + store i32 %inc, i32* %loop, align 4 + br label %for.cond + +for.end: + %5 = load i32* %ret, align 4 + ret i32 %5 +} + +;; func_for_odd - Test branch probabilities for a for loop with a continue and +;; a break. +define i32 @func_for_odd(i32 %N) nounwind uwtable { +entry: + %N.addr = alloca i32, align 4 + %ret = alloca i32, align 4 + %loop = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + store i32 0, i32* %ret, align 4 + store i32 0, i32* %loop, align 4 + br label %for.cond + +for.cond: + %0 = load i32* %loop, align 4 + %1 = load i32* %N.addr, align 4 + %cmp = icmp slt i32 %0, %1 + br i1 %cmp, label %for.body, label %for.end +; CHECK: br i1 %cmp, label %for.body, label %for.end, !prof !1 + +for.body: + %2 = load i32* %loop, align 4 + %rem = srem i32 %2, 10 + %tobool = icmp ne i32 %rem, 0 + br i1 %tobool, label %if.then, label %if.end +; CHECK: br i1 %tobool, label %if.then, label %if.end, !prof !2 + +if.then: + br label %for.inc + +if.end: + %3 = load i32* %loop, align 4 + %cmp1 = icmp eq i32 %3, 500 + br i1 %cmp1, label %if.then2, label %if.end3 +; CHECK: br i1 %cmp1, label %if.then2, label %if.end3, !prof !3 + +if.then2: + br label %for.end + +if.end3: + %4 = load i32* %N.addr, align 4 + %5 = load i32* %ret, align 4 + %add = add nsw i32 %5, %4 + store i32 %add, i32* %ret, align 4 + br label %for.inc + +for.inc: + %6 = load i32* %loop, align 4 + %inc = add nsw i32 %6, 1 + store i32 %inc, i32* %loop, align 4 + br label %for.cond + +for.end: + %7 = load i32* %ret, align 4 + ret i32 %7 +} + +;; func_while - Test branch probability in a vanilla while loop. +define i32 @func_while(i32 %N) nounwind uwtable { +entry: + %N.addr = alloca i32, align 4 + %ret = alloca i32, align 4 + %loop = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + store i32 0, i32* %ret, align 4 + store i32 0, i32* %loop, align 4 + br label %while.cond + +while.cond: + %0 = load i32* %loop, align 4 + %1 = load i32* %N.addr, align 4 + %cmp = icmp slt i32 %0, %1 + br i1 %cmp, label %while.body, label %while.end +; CHECK: br i1 %cmp, label %while.body, label %while.end, !prof !0 + +while.body: + %2 = load i32* %N.addr, align 4 + %3 = load i32* %ret, align 4 + %add = add nsw i32 %3, %2 + store i32 %add, i32* %ret, align 4 + %4 = load i32* %loop, align 4 + %inc = add nsw i32 %4, 1 + store i32 %inc, i32* %loop, align 4 + br label %while.cond + +while.end: + %5 = load i32* %ret, align 4 + ret i32 %5 +} + +;; func_while - Test branch probability in a vanilla do-while loop. +define i32 @func_do_while(i32 %N) nounwind uwtable { +entry: + %N.addr = alloca i32, align 4 + %ret = alloca i32, align 4 + %loop = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + store i32 0, i32* %ret, align 4 + store i32 0, i32* %loop, align 4 + br label %do.body + +do.body: + %0 = load i32* %N.addr, align 4 + %1 = load i32* %ret, align 4 + %add = add nsw i32 %1, %0 + store i32 %add, i32* %ret, align 4 + %2 = load i32* %loop, align 4 + %inc = add nsw i32 %2, 1 + store i32 %inc, i32* %loop, align 4 + br label %do.cond + +do.cond: + %3 = load i32* %loop, align 4 + %4 = load i32* %N.addr, align 4 + %cmp = icmp slt i32 %3, %4 + br i1 %cmp, label %do.body, label %do.end +; CHECK: br i1 %cmp, label %do.body, label %do.end, !prof !4 + +do.end: + %5 = load i32* %ret, align 4 + ret i32 %5 +} + +define i32 @main(i32 %argc, i8** %argv) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %argc.addr = alloca i32, align 4 + %argv.addr = alloca i8**, align 8 + store i32 0, i32* %retval + %call = call i32 @func_for(i32 1000) + %call1 = call i32 @func_for_odd(i32 1000) + %call2 = call i32 @func_while(i32 1000) + %call3 = call i32 @func_do_while(i32 1000) + ret i32 0 +} + +!0 = metadata !{metadata !"branch_weights", i32 1000, i32 1} +!1 = metadata !{metadata !"branch_weights", i32 501, i32 0} +!2 = metadata !{metadata !"branch_weights", i32 450, i32 51} +!3 = metadata !{metadata !"branch_weights", i32 1, i32 50} +!4 = metadata !{metadata !"branch_weights", i32 999, i32 1} +; CHECK-NOT: !5 diff --git a/test/Analysis/Profiling/load-branch-weights-switches.ll b/test/Analysis/Profiling/load-branch-weights-switches.ll new file mode 100644 index 0000000..5587c71 --- /dev/null +++ b/test/Analysis/Profiling/load-branch-weights-switches.ll @@ -0,0 +1,165 @@ +; RUN: opt -insert-edge-profiling -o %t1 < %s +; RUN: rm -f %t1.prof_data +; RUN: lli %defaultjit -load %llvmshlibdir/libprofile_rt%shlibext %t1 \ +; RUN: -llvmprof-output %t1.prof_data +; RUN: opt -profile-file %t1.prof_data -profile-metadata-loader -S -o - < %s \ +; RUN: | FileCheck %s +; RUN: rm -f %t1.prof_data + +; FIXME: profile_rt.dll could be built on win32. +; REQUIRES: loadable_module + +;; func_switch - Test branch probabilities for a switch instruction with an +;; even chance of taking each case (or no case). +define i32 @func_switch(i32 %N) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %N.addr = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + %0 = load i32* %N.addr, align 4 + %rem = srem i32 %0, 4 + switch i32 %rem, label %sw.epilog [ + i32 0, label %sw.bb + i32 1, label %sw.bb1 + i32 2, label %sw.bb2 + ] +; CHECK: ], !prof !0 + +sw.bb: + store i32 5, i32* %retval + br label %return + +sw.bb1: + store i32 6, i32* %retval + br label %return + +sw.bb2: + store i32 7, i32* %retval + br label %return + +sw.epilog: + store i32 8, i32* %retval + br label %return + +return: + %1 = load i32* %retval + ret i32 %1 +} + +;; func_switch_switch - Test branch probabilities in a switch-instruction that +;; leads to further switch instructions. The first-tier switch occludes some +;; possibilities in the second-tier switches, leading to some branches having a +;; 0 probability. +define i32 @func_switch_switch(i32 %N) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %N.addr = alloca i32, align 4 + store i32 %N, i32* %N.addr, align 4 + %0 = load i32* %N.addr, align 4 + %rem = srem i32 %0, 2 + switch i32 %rem, label %sw.default11 [ + i32 0, label %sw.bb + i32 1, label %sw.bb5 + ] +; CHECK: ], !prof !1 + +sw.bb: + %1 = load i32* %N.addr, align 4 + %rem1 = srem i32 %1, 4 + switch i32 %rem1, label %sw.default [ + i32 0, label %sw.bb2 + i32 1, label %sw.bb3 + i32 2, label %sw.bb4 + ] +; CHECK: ], !prof !2 + +sw.bb2: + store i32 5, i32* %retval + br label %return + +sw.bb3: + store i32 6, i32* %retval + br label %return + +sw.bb4: + store i32 7, i32* %retval + br label %return + +sw.default: + store i32 8, i32* %retval + br label %return + +sw.bb5: + %2 = load i32* %N.addr, align 4 + %rem6 = srem i32 %2, 4 + switch i32 %rem6, label %sw.default10 [ + i32 0, label %sw.bb7 + i32 1, label %sw.bb8 + i32 2, label %sw.bb9 + ] +; CHECK: ], !prof !3 + +sw.bb7: + store i32 9, i32* %retval + br label %return + +sw.bb8: + store i32 10, i32* %retval + br label %return + +sw.bb9: + store i32 11, i32* %retval + br label %return + +sw.default10: + store i32 12, i32* %retval + br label %return + +sw.default11: + store i32 13, i32* %retval + br label %return + +return: + %3 = load i32* %retval + ret i32 %3 +} + +define i32 @main(i32 %argc, i8** %argv) nounwind uwtable { +entry: + %retval = alloca i32, align 4 + %argc.addr = alloca i32, align 4 + %argv.addr = alloca i8**, align 8 + %loop = alloca i32, align 4 + store i32 0, i32* %retval + store i32 0, i32* %loop, align 4 + br label %for.cond + +for.cond: + %0 = load i32* %loop, align 4 + %cmp = icmp slt i32 %0, 4000 + br i1 %cmp, label %for.body, label %for.end +; CHECK: br i1 %cmp, label %for.body, label %for.end, !prof !4 + +for.body: + %1 = load i32* %loop, align 4 + %call = call i32 @func_switch(i32 %1) + %2 = load i32* %loop, align 4 + %call1 = call i32 @func_switch_switch(i32 %2) + br label %for.inc + +for.inc: + %3 = load i32* %loop, align 4 + %inc = add nsw i32 %3, 1 + store i32 %inc, i32* %loop, align 4 + br label %for.cond + +for.end: + ret i32 0 +} + +; CHECK: !0 = metadata !{metadata !"branch_weights", i32 1000, i32 1000, i32 1000, i32 1000} +; CHECK: !1 = metadata !{metadata !"branch_weights", i32 0, i32 2000, i32 2000} +; CHECK: !2 = metadata !{metadata !"branch_weights", i32 0, i32 1000, i32 0, i32 1000} +; CHECK: !3 = metadata !{metadata !"branch_weights", i32 1000, i32 0, i32 1000, i32 0} +; CHECK: !4 = metadata !{metadata !"branch_weights", i32 4000, i32 1} +; CHECK-NOT: !5 diff --git a/test/Assembler/2008-09-02-FunctionNotes2.ll b/test/Assembler/2008-09-02-FunctionNotes2.ll index 97351e2..47eb011 100644 --- a/test/Assembler/2008-09-02-FunctionNotes2.ll +++ b/test/Assembler/2008-09-02-FunctionNotes2.ll @@ -1,5 +1,5 @@ ; Test function notes -; RUN: not llvm-as %s -o /dev/null 2>&1 | grep "Attributes noinline alwaysinline are incompatible" +; RUN: not llvm-as %s -o /dev/null 2>&1 | grep "Attributes 'noinline and alwaysinline' are incompatible" define void @fn1() alwaysinline noinline { ret void } diff --git a/test/Assembler/global-addrspace-forwardref.ll b/test/Assembler/global-addrspace-forwardref.ll new file mode 100644 index 0000000..f0f094a --- /dev/null +++ b/test/Assembler/global-addrspace-forwardref.ll @@ -0,0 +1,8 @@ +; RUN: llvm-as < %s | llvm-dis | FileCheck %s + +; Make sure the address space of forward decls is preserved + +; CHECK: @a2 = global i8 addrspace(1)* @a +; CHECK: @a = addrspace(1) global i8 0 +@a2 = global i8 addrspace(1)* @a +@a = addrspace(1) global i8 0 diff --git a/test/Assembler/invalid-fwdref1.ll b/test/Assembler/invalid-fwdref1.ll new file mode 100644 index 0000000..ef8b16c --- /dev/null +++ b/test/Assembler/invalid-fwdref1.ll @@ -0,0 +1,4 @@ +; RUN: not llvm-as %s -disable-output 2>&1 | grep "invalid forward reference to function as global value!" + +define i8* @test1() { ret i8* @test1a } +define void @test1a() { } diff --git a/test/Bindings/Ocaml/ipo_opts.ml b/test/Bindings/Ocaml/ipo_opts.ml index 3a36231..d4537e4 100644 --- a/test/Bindings/Ocaml/ipo_opts.ml +++ b/test/Bindings/Ocaml/ipo_opts.ml @@ -43,10 +43,10 @@ let test_transforms () = ignore (build_ret (build_call fn [| |] "" b) b); end; - let td = TargetData.create (target_triple m) in + let td = DataLayout.create (target_triple m) in ignore (PassManager.create () - ++ TargetData.add td + ++ DataLayout.add td ++ add_argument_promotion ++ add_constant_merge ++ add_dead_arg_elimination @@ -63,7 +63,7 @@ let test_transforms () = ++ PassManager.run_module m ++ PassManager.dispose); - TargetData.dispose td + DataLayout.dispose td (*===-- Driver ------------------------------------------------------------===*) diff --git a/test/Bindings/Ocaml/scalar_opts.ml b/test/Bindings/Ocaml/scalar_opts.ml index 34a7a6a..0760dad 100644 --- a/test/Bindings/Ocaml/scalar_opts.ml +++ b/test/Bindings/Ocaml/scalar_opts.ml @@ -38,10 +38,10 @@ let test_transforms () = let fn = define_function "fn" fty m in ignore (build_ret_void (builder_at_end context (entry_block fn))); - let td = TargetData.create (target_triple m) in + let td = DataLayout.create (target_triple m) in ignore (PassManager.create_function m - ++ TargetData.add td + ++ DataLayout.add td ++ add_verifier ++ add_constant_propagation ++ add_sccp @@ -78,7 +78,7 @@ let test_transforms () = ++ PassManager.finalize ++ PassManager.dispose); - TargetData.dispose td + DataLayout.dispose td (*===-- Driver ------------------------------------------------------------===*) diff --git a/test/Bindings/Ocaml/target.ml b/test/Bindings/Ocaml/target.ml index 1b6b71e..7a35a790 100644 --- a/test/Bindings/Ocaml/target.ml +++ b/test/Bindings/Ocaml/target.ml @@ -33,10 +33,10 @@ let m = create_module context filename (*===-- Target Data -------------------------------------------------------===*) let test_target_data () = - let td = TargetData.create (target_triple m) in + let td = DataLayout.create (target_triple m) in let sty = struct_type context [| i32_type; i64_type |] in - ignore (TargetData.as_string td); + ignore (DataLayout.as_string td); ignore (byte_order td); ignore (pointer_size td); ignore (intptr_type td); @@ -49,7 +49,7 @@ let test_target_data () = ignore (element_at_offset td sty (Int64.of_int 1)); ignore (offset_of_element td sty 1); - TargetData.dispose td + DataLayout.dispose td (*===-- Driver ------------------------------------------------------------===*) diff --git a/test/Bindings/Ocaml/vmcore.ml b/test/Bindings/Ocaml/vmcore.ml index b8eb6d3..61be4b77 100644 --- a/test/Bindings/Ocaml/vmcore.ml +++ b/test/Bindings/Ocaml/vmcore.ml @@ -113,14 +113,14 @@ let test_constants () = ignore (define_global "const_int_string" c m); insist (i32_type = type_of c); - (* RUN: grep 'const_string.*"cruel\00world"' < %t.ll + (* RUN: grep 'const_string.*"cruel\\00world"' < %t.ll *) group "string"; let c = const_string context "cruel\000world" in ignore (define_global "const_string" c m); insist ((array_type i8_type 11) = type_of c); - (* RUN: grep 'const_stringz.*"hi\00again\00"' < %t.ll + (* RUN: grep 'const_stringz.*"hi\\00again\\00"' < %t.ll *) group "stringz"; let c = const_stringz context "hi\000again" in @@ -187,7 +187,7 @@ let test_constants () = ignore (define_global "const_all_ones" c m); group "pointer null"; begin - (* RUN: grep "const_pointer_null = global i64* null" < %t.ll + (* RUN: grep "const_pointer_null = global i64\* null" < %t.ll *) let c = const_pointer_null (pointer_type i64_type) in ignore (define_global "const_pointer_null" c m); @@ -542,7 +542,7 @@ let test_users () = (*===-- Aliases -----------------------------------------------------------===*) let test_aliases () = - (* RUN: grep "@alias = alias i32* @aliasee" < %t.ll + (* RUN: grep "@alias = alias i32\* @aliasee" < %t.ll *) let v = declare_global i32_type "aliasee" m in ignore (add_alias m (pointer_type i32_type) v "alias") @@ -554,7 +554,7 @@ let test_functions () = let ty = function_type i32_type [| i32_type; i64_type |] in let ty2 = function_type i8_type [| i8_type; i64_type |] in - (* RUN: grep "declare i32 @Fn1\(i32, i64\)" < %t.ll + (* RUN: grep 'declare i32 @Fn1(i32, i64)' < %t.ll *) begin group "declare"; insist (None = lookup_function "Fn1" m); @@ -935,7 +935,7 @@ let test_builder () = group "malloc/free"; begin (* RUN: grep "call.*@malloc(i32 ptrtoint" < %t.ll - * RUN: grep "call.*@free(i8*" < %t.ll + * RUN: grep "call.*@free(i8\*" < %t.ll * RUN: grep "call.*@malloc(i32 %" < %t.ll *) let bb1 = append_block context "MallocBlock1" fn in @@ -947,7 +947,7 @@ let test_builder () = end; group "indirectbr"; begin - (* RUN: grep "indirectbr i8* blockaddress(@X7, %IBRBlock2), [label %IBRBlock2, label %IBRBlock3]" < %t.ll + (* RUN: grep "indirectbr i8\* blockaddress(@X7, %IBRBlock2), \[label %IBRBlock2, label %IBRBlock3\]" < %t.ll *) let bb1 = append_block context "IBRBlock1" fn in @@ -1054,10 +1054,10 @@ let test_builder () = (* RUN: grep "%build_alloca = alloca i32" < %t.ll * RUN: grep "%build_array_alloca = alloca i32, i32 %P2" < %t.ll - * RUN: grep "%build_load = load i32* %build_array_alloca" < %t.ll - * RUN: grep "store i32 %P2, i32* %build_alloca" < %t.ll - * RUN: grep "%build_gep = getelementptr i32* %build_array_alloca, i32 %P2" < %t.ll - * RUN: grep "%build_in_bounds_gep = getelementptr inbounds i32* %build_array_alloca, i32 %P2" < %t.ll + * RUN: grep "%build_load = load i32\* %build_array_alloca" < %t.ll + * RUN: grep "store i32 %P2, i32\* %build_alloca" < %t.ll + * RUN: grep "%build_gep = getelementptr i32\* %build_array_alloca, i32 %P2" < %t.ll + * RUN: grep "%build_in_bounds_gep = getelementptr inbounds i32\* %build_array_alloca, i32 %P2" < %t.ll * RUN: grep "%build_struct_gep = getelementptr inbounds.*%build_alloca2, i32 0, i32 1" < %t.ll *) let alloca = build_alloca i32_type "build_alloca" b in @@ -1106,14 +1106,14 @@ let test_builder () = * RUN: grep "%build_fptrunc2 = fptrunc double %build_sitofp to float" < %t.ll * RUN: grep "%build_fpext = fpext float %build_fptrunc to double" < %t.ll * RUN: grep "%build_fpext2 = fpext float %build_fptrunc to double" < %t.ll - * RUN: grep "%build_inttoptr = inttoptr i32 %P1 to i8*" < %t.ll - * RUN: grep "%build_ptrtoint = ptrtoint i8* %build_inttoptr to i64" < %t.ll - * RUN: grep "%build_ptrtoint2 = ptrtoint i8* %build_inttoptr to i64" < %t.ll + * RUN: grep "%build_inttoptr = inttoptr i32 %P1 to i8\*" < %t.ll + * RUN: grep "%build_ptrtoint = ptrtoint i8\* %build_inttoptr to i64" < %t.ll + * RUN: grep "%build_ptrtoint2 = ptrtoint i8\* %build_inttoptr to i64" < %t.ll * RUN: grep "%build_bitcast = bitcast i64 %build_ptrtoint to double" < %t.ll * RUN: grep "%build_bitcast2 = bitcast i64 %build_ptrtoint to double" < %t.ll * RUN: grep "%build_bitcast3 = bitcast i64 %build_ptrtoint to double" < %t.ll * RUN: grep "%build_bitcast4 = bitcast i64 %build_ptrtoint to double" < %t.ll - * RUN: grep "%build_pointercast = bitcast i8* %build_inttoptr to i16*" < %t.ll + * RUN: grep "%build_pointercast = bitcast i8\* %build_inttoptr to i16*" < %t.ll *) let inst28 = build_trunc p1 i8_type "build_trunc" atentry in let inst29 = build_zext inst28 i32_type "build_zext" atentry in @@ -1148,7 +1148,7 @@ let test_builder () = * RUN: grep "%build_fcmp_false = fcmp false float %F1, %F2" < %t.ll * RUN: grep "%build_fcmp_true = fcmp true float %F2, %F1" < %t.ll * RUN: grep "%build_is_null.*= icmp eq.*%X0,.*null" < %t.ll - * RUN: grep "%build_is_not_null = icmp ne i8* %X1, null" < %t.ll + * RUN: grep "%build_is_not_null = icmp ne i8\* %X1, null" < %t.ll * RUN: grep "%build_ptrdiff" < %t.ll *) ignore (build_icmp Icmp.Ne p1 p2 "build_icmp_ne" atentry); @@ -1167,7 +1167,7 @@ let test_builder () = group "miscellaneous"; begin (* RUN: grep "%build_call = tail call cc63 i32 @.*(i32 signext %P2, i32 %P1)" < %t.ll * RUN: grep "%build_select = select i1 %build_icmp, i32 %P1, i32 %P2" < %t.ll - * RUN: grep "%build_va_arg = va_arg i8** null, i32" < %t.ll + * RUN: grep "%build_va_arg = va_arg i8\*\* null, i32" < %t.ll * RUN: grep "%build_extractelement = extractelement <4 x i32> %Vec1, i32 %P2" < %t.ll * RUN: grep "%build_insertelement = insertelement <4 x i32> %Vec1, i32 %P1, i32 %P2" < %t.ll * RUN: grep "%build_shufflevector = shufflevector <4 x i32> %Vec1, <4 x i32> %Vec2, <4 x i32> " < %t.ll @@ -1240,8 +1240,8 @@ let test_builder () = end; group "dbg"; begin - (* RUN: grep "%dbg = add i32 %P1, %P2, !dbg !1" < %t.ll - * RUN: grep "!1 = metadata !{i32 2, i32 3, metadata !2, metadata !2}" < %t.ll + (* RUN: grep '%dbg = add i32 %P1, %P2, !dbg !1' < %t.ll + * RUN: grep '!1 = metadata !{i32 2, i32 3, metadata !2, metadata !2}' < %t.ll *) insist ((current_debug_location atentry) = None); diff --git a/test/Bitcode/blockaddress.ll b/test/Bitcode/blockaddress.ll index b9f3341..8ac54be 100644 --- a/test/Bitcode/blockaddress.ll +++ b/test/Bitcode/blockaddress.ll @@ -28,3 +28,18 @@ here: end: ret void } + +; PR13895 +define void @doitagain(i8** nocapture %pptr) { +; CHECK: define void @doitagain +entry: + br label %here + +here: + store i8* blockaddress(@doit, %here), i8** %pptr, align 8 +; CHECK: blockaddress(@doit, %here) + br label %end + +end: + ret void +} diff --git a/test/Bitcode/function-encoding-rel-operands.ll b/test/Bitcode/function-encoding-rel-operands.ll new file mode 100644 index 0000000..aedb0c3 --- /dev/null +++ b/test/Bitcode/function-encoding-rel-operands.ll @@ -0,0 +1,49 @@ +; Basic sanity test to check that instruction operands are encoded with +; relative IDs. +; RUN: llvm-as < %s | llvm-bcanalyzer -dump | FileCheck %s + +; CHECK: FUNCTION_BLOCK +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_RET {{.*}}op0=1 +define i32 @test_int_binops(i32 %a) nounwind { +entry: + %0 = add i32 %a, %a + %1 = sub i32 %0, %0 + %2 = mul i32 %1, %1 + ret i32 %2 +} + + +; CHECK: FUNCTION_BLOCK +; CHECK: INST_CAST {{.*}}op0=1 +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_BINOP {{.*}}op0=1 op1=1 +; CHECK: INST_RET {{.*}}op0=1 +define double @test_float_binops(i32 %a) nounwind { + %1 = sitofp i32 %a to double + %2 = fadd double %1, %1 + %3 = fsub double %2, %2 + %4 = fmul double %3, %3 + %5 = fdiv double %4, %4 + ret double %5 +} + + +; CHECK: FUNCTION_BLOCK +; skip checking operands of INST_INBOUNDS_GEP since that depends on ordering +; between literals and the formal parameters. +; CHECK: INST_INBOUNDS_GEP {{.*}} +; CHECK: INST_LOAD {{.*}}op0=1 {{.*}} +; CHECK: INST_CMP2 op0=1 {{.*}} +; CHECK: INST_RET {{.*}}op0=1 +define i1 @test_load(i32 %a, {i32, i32}* %ptr) nounwind { +entry: + %0 = getelementptr inbounds {i32, i32}* %ptr, i32 %a, i32 0 + %1 = load i32* %0 + %2 = icmp eq i32 %1, %a + ret i1 %2 +} diff --git a/test/BugPoint/crash-narrowfunctiontest.ll b/test/BugPoint/crash-narrowfunctiontest.ll index d080d9d..c812836 100644 --- a/test/BugPoint/crash-narrowfunctiontest.ll +++ b/test/BugPoint/crash-narrowfunctiontest.ll @@ -2,6 +2,7 @@ ; ; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashcalls -silence-passes > /dev/null ; REQUIRES: loadable_module +; XFAIL: lto_on_osx define i32 @foo() { ret i32 1 } diff --git a/test/BugPoint/metadata.ll b/test/BugPoint/metadata.ll index 0eda566..6dc9574 100644 --- a/test/BugPoint/metadata.ll +++ b/test/BugPoint/metadata.ll @@ -1,6 +1,7 @@ ; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashcalls -silence-passes > /dev/null ; RUN: llvm-dis %t-reduced-simplified.bc -o - | FileCheck %s ; REQUIRES: loadable_module +; XFAIL: lto_on_osx ; Bugpoint should keep the call's metadata attached to the call. diff --git a/test/BugPoint/remove_arguments_test.ll b/test/BugPoint/remove_arguments_test.ll index 29a03b8..5a45f84 100644 --- a/test/BugPoint/remove_arguments_test.ll +++ b/test/BugPoint/remove_arguments_test.ll @@ -1,6 +1,7 @@ ; RUN: bugpoint -load %llvmshlibdir/BugpointPasses%shlibext %s -output-prefix %t -bugpoint-crashcalls -silence-passes ; RUN: llvm-dis %t-reduced-simplified.bc -o - | FileCheck %s ; REQUIRES: loadable_module +; XFAIL: lto_on_osx ; Test to make sure that arguments are removed from the function if they are ; unnecessary. And clean up any types that that frees up too. diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 991cc9d..e10a532 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -7,6 +7,11 @@ configure_lit_site_cfg( ${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg ) +# Don't include check-llvm into check-all without LLVM_BUILD_TOOLS. +if(NOT LLVM_BUILD_TOOLS) + set(EXCLUDE_FROM_ALL ON) +endif() + add_lit_testsuite(check-llvm "Running the LLVM regression tests" ${CMAKE_CURRENT_BINARY_DIR} PARAMS llvm_site_config=${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg @@ -14,10 +19,16 @@ add_lit_testsuite(check-llvm "Running the LLVM regression tests" DEPENDS UnitTests BugpointPasses LLVMHello llc lli llvm-ar llvm-as - llvm-diff + llvm-bcanalyzer llvm-diff llvm-dis llvm-extract llvm-dwarfdump - llvm-link llvm-mc llvm-nm llvm-objdump llvm-readobj + llvm-link + llvm-mc + llvm-mcmarkup + llvm-nm + llvm-objdump + llvm-readobj macho-dump opt + profile_rt-shared FileCheck count not yaml2obj ) diff --git a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll index 99db637..36d1575 100644 --- a/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll +++ b/test/CodeGen/ARM/2010-10-19-mc-elf-objheader.ll @@ -13,12 +13,12 @@ ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x0000003c -; BASIC-NEXT: 0x00000020 +; BASIC-NEXT: 0x00000022 ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x00000000 ; BASIC-NEXT: 0x00000001 ; BASIC-NEXT: 0x00000000 -; BASIC-NEXT: '411f0000 00616561 62690001 15000000 06020801 09011401 15011703 18011901' +; BASIC-NEXT: '41210000 00616561 62690001 17000000 060a0741 08010902 14011501 17031801 1901' ; CORTEXA8: .ARM.attributes ; CORTEXA8-NEXT: 0x70000003 diff --git a/test/CodeGen/ARM/2010-12-07-PEIBug.ll b/test/CodeGen/ARM/2010-12-07-PEIBug.ll index 770ad44..4879f4e 100644 --- a/test/CodeGen/ARM/2010-12-07-PEIBug.ll +++ b/test/CodeGen/ARM/2010-12-07-PEIBug.ll @@ -1,4 +1,4 @@ -; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a8 | FileCheck %s +; RUN: llc < %s -mtriple=thumbv7-apple-ios -mcpu=cortex-a9 | FileCheck %s ; rdar://8728956 define hidden void @foo() nounwind ssp { diff --git a/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll b/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll index 3e78c46..101a913 100644 --- a/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll +++ b/test/CodeGen/ARM/2011-06-16-TailCallByVal.ll @@ -1,4 +1,9 @@ ; RUN: llc < %s -arm-tail-calls=1 | FileCheck %s + +; tail call inside a function where byval argument is splitted between +; registers and stack is currently unsupported. +; XFAIL: * + target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32" target triple = "thumbv7-apple-ios" diff --git a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll index 42b1491..6e0ef96 100644 --- a/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll +++ b/test/CodeGen/ARM/2011-10-26-memset-with-neon.ll @@ -9,8 +9,8 @@ entry: } ; Trigger multiple NEON stores. -; CHECK: vstmia -; CHECK-NEXT: vstmia +; CHECK: vst1.64 +; CHECK-NEXT: vst1.64 define void @f_0_40(i8* nocapture %c) nounwind optsize { entry: call void @llvm.memset.p0i8.i64(i8* %c, i8 0, i64 40, i32 16, i1 false) diff --git a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll index 89c01d5..f9ede74 100644 --- a/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll +++ b/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll @@ -8,12 +8,12 @@ define void @test_sqrt(<4 x float>* %X) nounwind { ; CHECK: movw r1, :lower16:{{.*}} ; CHECK: movt r1, :upper16:{{.*}} -; CHECK: vldmia r1 +; CHECK: vld1.64 {{.*}}, [r1, :128] ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} ; CHECK: vsqrt.f32 {{s[0-9]+}}, {{s[0-9]+}} -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 {{.*}} L.entry: %0 = load <4 x float>* @A, align 16 @@ -31,21 +31,21 @@ define void @test_cos(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}cosf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -62,21 +62,21 @@ define void @test_exp(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}expf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -93,21 +93,21 @@ define void @test_exp2(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}exp2f -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -124,21 +124,21 @@ define void @test_log10(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log10f -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -155,21 +155,21 @@ define void @test_log(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}logf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -186,21 +186,21 @@ define void @test_log2(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}log2f -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -218,21 +218,21 @@ define void @test_pow(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}powf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: @@ -252,10 +252,10 @@ define void @test_powi(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia [[reg0]], {{.*}} +; CHECK: vld1.64 {{.*}}, :128 ; CHECK: vmul.f32 {{.*}} -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: @@ -275,21 +275,21 @@ define void @test_sin(<4 x float>* %X) nounwind { ; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} ; CHECK: movt [[reg0]], :upper16:{{.*}} -; CHECK: vldmia r{{[0-9][0-9]?}}, {{.*}} +; CHECK: vld1.64 -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: {{[mov|vmov.32]}} r0, +; CHECK: {{v?mov(.32)?}} r0, ; CHECK: bl {{.*}}sinf -; CHECK: vstmia {{.*}} +; CHECK: vst1.64 L.entry: %0 = load <4 x float>* @A, align 16 @@ -300,3 +300,34 @@ L.entry: declare <4 x float> @llvm.sin.v4f32(<4 x float>) nounwind readonly +define void @test_floor(<4 x float>* %X) nounwind { + +; CHECK: test_floor: + +; CHECK: movw [[reg0:r[0-9]+]], :lower16:{{.*}} +; CHECK: movt [[reg0]], :upper16:{{.*}} +; CHECK: vld1.64 + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: {{v?mov(.32)?}} r0, +; CHECK: bl {{.*}}floorf + +; CHECK: vst1.64 + +L.entry: + %0 = load <4 x float>* @A, align 16 + %1 = call <4 x float> @llvm.floor.v4f32(<4 x float> %0) + store <4 x float> %1, <4 x float>* %X, align 16 + ret void +} + +declare <4 x float> @llvm.floor.v4f32(<4 x float>) nounwind readonly + diff --git a/test/CodeGen/ARM/2012-05-04-vmov.ll b/test/CodeGen/ARM/2012-05-04-vmov.ll new file mode 100644 index 0000000..d52ef2c --- /dev/null +++ b/test/CodeGen/ARM/2012-05-04-vmov.ll @@ -0,0 +1,11 @@ +; RUN: llc -O1 -march=arm -mcpu=cortex-a9 < %s | FileCheck -check-prefix=A9-CHECK %s +; RUN: llc -O1 -march=arm -mcpu=swift < %s | FileCheck -check-prefix=SWIFT-CHECK %s +; Check that swift doesn't use vmov.32. . + +define <2 x i32> @testuvec(<2 x i32> %A, <2 x i32> %B) nounwind { +entry: + %div = udiv <2 x i32> %A, %B + ret <2 x i32> %div +; A9-CHECK: vmov.32 +; SWIFT-CHECK-NOT: vmov.32 +} diff --git a/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll b/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll new file mode 100644 index 0000000..dd67843 --- /dev/null +++ b/test/CodeGen/ARM/2012-05-10-PreferVMOVtoVDUP32.ll @@ -0,0 +1,14 @@ +; RUN: llc -march=arm -mcpu=swift < %s | FileCheck %s +; + +define void @f(i32 %x, i32* %p) nounwind ssp { +entry: +; CHECK-NOT: vdup.32 + %vecinit.i = insertelement <2 x i32> undef, i32 %x, i32 0 + %vecinit1.i = insertelement <2 x i32> %vecinit.i, i32 %x, i32 1 + %0 = bitcast i32* %p to i8* + tail call void @llvm.arm.neon.vst1.v2i32(i8* %0, <2 x i32> %vecinit1.i, i32 4) + ret void +} + +declare void @llvm.arm.neon.vst1.v2i32(i8*, <2 x i32>, i32) nounwind diff --git a/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll b/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll new file mode 100644 index 0000000..ec7f72d --- /dev/null +++ b/test/CodeGen/ARM/2012-08-27-CopyPhysRegCrash.ll @@ -0,0 +1,129 @@ +; RUN: llc < %s -mcpu=cortex-a8 -march=thumb +; Test that this doesn't crash. +; + +target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" +target triple = "thumbv7-apple-ios5.1.0" + +declare { <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.neon.vld3.v16i8(i8*, i32) nounwind readonly + +declare void @llvm.arm.neon.vst1.v16i8(i8*, <16 x i8>, i32) nounwind + +define void @findEdges(i8*) nounwind ssp { + %2 = icmp sgt i32 undef, 0 + br i1 %2, label %5, label %3 + +;
    ")); + } +}; + +namespace { + +// FIXME: In order to JIT an empty module, there needs to be +// an interface to ExecutionEngine that forces compilation but +// does require retrieval of a pointer to a function/global. +/* +TEST_F(MCJITTest, empty_module) { + createJIT(M.take()); + //EXPECT_NE(0, TheJIT->getObjectImage()) + // << "Unable to generate executable loaded object image"; +} +*/ + +TEST_F(MCJITTest, global_variable) { + SKIP_UNSUPPORTED_PLATFORM; + + int initialValue = 5; + GlobalValue *Global = insertGlobalInt32(M.get(), "test_global", initialValue); + createJIT(M.take()); + void *globalPtr = TheJIT->getPointerToGlobal(Global); + static_cast(MM)->invalidateInstructionCache(); + EXPECT_TRUE(0 != globalPtr) + << "Unable to get pointer to global value from JIT"; + + EXPECT_EQ(initialValue, *(int32_t*)globalPtr) + << "Unexpected initial value of global"; +} + +TEST_F(MCJITTest, add_function) { + SKIP_UNSUPPORTED_PLATFORM; + + Function *F = insertAddFunction(M.get()); + createJIT(M.take()); + void *addPtr = TheJIT->getPointerToFunction(F); + static_cast(MM)->invalidateInstructionCache(); + EXPECT_TRUE(0 != addPtr) + << "Unable to get pointer to function from JIT"; + + int (*AddPtrTy)(int, int) = (int(*)(int, int))(intptr_t)addPtr; + EXPECT_EQ(0, AddPtrTy(0, 0)); + EXPECT_EQ(3, AddPtrTy(1, 2)); + EXPECT_EQ(-5, AddPtrTy(-2, -3)); +} + +TEST_F(MCJITTest, run_main) { + SKIP_UNSUPPORTED_PLATFORM; + + int rc = 6; + Function *Main = insertMainFunction(M.get(), 6); + createJIT(M.take()); + void *vPtr = TheJIT->getPointerToFunction(Main); + static_cast(MM)->invalidateInstructionCache(); + EXPECT_TRUE(0 != vPtr) + << "Unable to get pointer to main() from JIT"; + + int (*FuncPtr)(void) = (int(*)(void))(intptr_t)vPtr; + int returnCode = FuncPtr(); + EXPECT_EQ(returnCode, rc); +} + +TEST_F(MCJITTest, return_global) { + SKIP_UNSUPPORTED_PLATFORM; + + int32_t initialNum = 7; + GlobalVariable *GV = insertGlobalInt32(M.get(), "myglob", initialNum); + + Function *ReturnGlobal = startFunction(M.get(), + "ReturnGlobal"); + Value *ReadGlobal = Builder.CreateLoad(GV); + endFunctionWithRet(ReturnGlobal, ReadGlobal); + + createJIT(M.take()); + void *rgvPtr = TheJIT->getPointerToFunction(ReturnGlobal); + static_cast(MM)->invalidateInstructionCache(); + EXPECT_TRUE(0 != rgvPtr); + + int32_t(*FuncPtr)(void) = (int32_t(*)(void))(intptr_t)rgvPtr; + EXPECT_EQ(initialNum, FuncPtr()) + << "Invalid value for global returned from JITted function"; +} + +// FIXME: This case fails due to a bug with getPointerToGlobal(). +// The bug is due to MCJIT not having an implementation of getPointerToGlobal() +// which results in falling back on the ExecutionEngine implementation that +// allocates a new memory block for the global instead of using the same +// global variable that is emitted by MCJIT. Hence, the pointer (gvPtr below) +// has the correct initial value, but updates to the real global (accessed by +// JITted code) are not propagated. Instead, getPointerToGlobal() should return +// a pointer into the loaded ObjectImage to reference the emitted global. +/* +TEST_F(MCJITTest, increment_global) { + SKIP_UNSUPPORTED_PLATFORM; + + int32_t initialNum = 5; + Function *IncrementGlobal = startFunction(M.get(), "IncrementGlobal"); + GlobalVariable *GV = insertGlobalInt32(M.get(), "my_global", initialNum); + Value *DerefGV = Builder.CreateLoad(GV); + Value *AddResult = Builder.CreateAdd(DerefGV, + ConstantInt::get(Context, APInt(32, 1))); + Builder.CreateStore(AddResult, GV); + endFunctionWithRet(IncrementGlobal, AddResult); + + createJIT(M.take()); + void *gvPtr = TheJIT->getPointerToGlobal(GV); + EXPECT_EQ(initialNum, *(int32_t*)gvPtr); + + void *vPtr = TheJIT->getPointerToFunction(IncrementGlobal); + EXPECT_TRUE(0 != vPtr) + << "Unable to get pointer to main() from JIT"; + + int32_t(*FuncPtr)(void) = (int32_t(*)(void))(intptr_t)vPtr; + + for(int i = 1; i < 3; ++i) { + int32_t result = FuncPtr(); + EXPECT_EQ(initialNum + i, result); // OK + EXPECT_EQ(initialNum + i, *(int32_t*)gvPtr); // FAILS + } +} +*/ + +TEST_F(MCJITTest, multiple_functions) { + SKIP_UNSUPPORTED_PLATFORM; + + unsigned int numLevels = 23; + int32_t innerRetVal= 5; + + Function *Inner = startFunction(M.get(), "Inner"); + endFunctionWithRet(Inner, ConstantInt::get(Context, APInt(32, innerRetVal))); + + Function *Outer; + for (unsigned int i = 0; i < numLevels; ++i) { + std::stringstream funcName; + funcName << "level_" << i; + Outer = startFunction(M.get(), funcName.str()); + Value *innerResult = Builder.CreateCall(Inner); + endFunctionWithRet(Outer, innerResult); + + Inner = Outer; + } + + createJIT(M.take()); + void *vPtr = TheJIT->getPointerToFunction(Outer); + static_cast(MM)->invalidateInstructionCache(); + EXPECT_TRUE(0 != vPtr) + << "Unable to get pointer to outer function from JIT"; + + int32_t(*FuncPtr)(void) = (int32_t(*)(void))(intptr_t)vPtr; + EXPECT_EQ(innerRetVal, FuncPtr()) + << "Incorrect result returned from function"; +} + +// FIXME: ExecutionEngine has no support empty modules +/* +TEST_F(MCJITTest, multiple_empty_modules) { + SKIP_UNSUPPORTED_PLATFORM; + + createJIT(M.take()); + // JIT-compile + EXPECT_NE(0, TheJIT->getObjectImage()) + << "Unable to generate executable loaded object image"; + + TheJIT->addModule(createEmptyModule("")); + TheJIT->addModule(createEmptyModule("")); + + // JIT again + EXPECT_NE(0, TheJIT->getObjectImage()) + << "Unable to generate executable loaded object image"; +} +*/ + +// FIXME: MCJIT must support multiple modules +/* +TEST_F(MCJITTest, multiple_modules) { + SKIP_UNSUPPORTED_PLATFORM; + + Function *Callee = insertAddFunction(M.get()); + createJIT(M.take()); + + // caller function is defined in a different module + M.reset(createEmptyModule("")); + + Function *CalleeRef = insertExternalReferenceToFunction(M.get(), Callee); + Function *Caller = insertSimpleCallFunction(M.get(), CalleeRef); + + TheJIT->addModule(M.take()); + + // get a function pointer in a module that was not used in EE construction + void *vPtr = TheJIT->getPointerToFunction(Caller); + EXPECT_NE(0, vPtr) + << "Unable to get pointer to caller function from JIT"; + + int(*FuncPtr)(int, int) = (int(*)(int, int))(intptr_t)vPtr; + EXPECT_EQ(0, FuncPtr(0, 0)); + EXPECT_EQ(30, FuncPtr(10, 20)); + EXPECT_EQ(-30, FuncPtr(-10, -20)); + + // ensure caller is destroyed before callee (free use before def) + M.reset(); +} +*/ + +} diff --git a/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h b/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h new file mode 100644 index 0000000..9b4a4ac --- /dev/null +++ b/unittests/ExecutionEngine/MCJIT/MCJITTestBase.h @@ -0,0 +1,245 @@ +//===- MCJITTestBase.h - Common base class for MCJIT Unit tests ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class implements common functionality required by the MCJIT unit tests, +// as well as logic to skip tests on unsupported architectures and operating +// systems. +// +//===----------------------------------------------------------------------===// + + +#ifndef MCJIT_TEST_BASE_H +#define MCJIT_TEST_BASE_H + +#include "llvm/ADT/Triple.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Config/config.h" +#include "llvm/ExecutionEngine/ExecutionEngine.h" +#include "llvm/Function.h" +#include "llvm/IRBuilder.h" +#include "llvm/LLVMContext.h" +#include "llvm/Module.h" +#include "llvm/Support/CodeGen.h" +#include "llvm/Support/Host.h" +#include "llvm/Support/TargetSelect.h" +#include "llvm/TypeBuilder.h" + +#include "SectionMemoryManager.h" + +// Used to skip tests on unsupported architectures and operating systems. +// To skip a test, add this macro at the top of a test-case in a suite that +// inherits from MCJITTestBase. See MCJITTest.cpp for examples. +#define SKIP_UNSUPPORTED_PLATFORM \ + do \ + if (!ArchSupportsMCJIT() || !OSSupportsMCJIT()) \ + return; \ + while(0); + +namespace llvm { + +class MCJITTestBase { +protected: + + MCJITTestBase() + : OptLevel(CodeGenOpt::None) + , RelocModel(Reloc::Default) + , CodeModel(CodeModel::Default) + , MArch("") + , Builder(Context) + , MM(new SectionMemoryManager) + , HostTriple(LLVM_HOSTTRIPLE) + { + InitializeNativeTarget(); + InitializeNativeTargetAsmPrinter(); + +#ifdef LLVM_ON_WIN32 + // On Windows, generate ELF objects by specifying "-elf" in triple + HostTriple += "-elf"; +#endif // LLVM_ON_WIN32 + HostTriple = Triple::normalize(HostTriple); + + // The architectures below are known to be compatible with MCJIT as they + // are copied from test/ExecutionEngine/MCJIT/lit.local.cfg and should be + // kept in sync. + SupportedArchs.push_back(Triple::arm); + SupportedArchs.push_back(Triple::mips); + SupportedArchs.push_back(Triple::x86); + SupportedArchs.push_back(Triple::x86_64); + + // The operating systems below are known to be incompatible with MCJIT as + // they are copied from the test/ExecutionEngine/MCJIT/lit.local.cfg and + // should be kept in sync. + UnsupportedOSs.push_back(Triple::Cygwin); + UnsupportedOSs.push_back(Triple::Darwin); + } + + /// Returns true if the host architecture is known to support MCJIT + bool ArchSupportsMCJIT() { + Triple Host(HostTriple); + if (std::find(SupportedArchs.begin(), SupportedArchs.end(), Host.getArch()) + == SupportedArchs.end()) { + return false; + } + return true; + } + + /// Returns true if the host OS is known to support MCJIT + bool OSSupportsMCJIT() { + Triple Host(HostTriple); + if (std::find(UnsupportedOSs.begin(), UnsupportedOSs.end(), Host.getOS()) + == UnsupportedOSs.end()) { + return true; + } + return false; + } + + Module *createEmptyModule(StringRef Name) { + Module * M = new Module(Name, Context); + M->setTargetTriple(Triple::normalize(HostTriple)); + return M; + } + + template + Function *startFunction(Module *M, StringRef Name) { + Function *Result = Function::Create( + TypeBuilder::get(Context), + GlobalValue::ExternalLinkage, Name, M); + + BasicBlock *BB = BasicBlock::Create(Context, Name, Result); + Builder.SetInsertPoint(BB); + + return Result; + } + + void endFunctionWithRet(Function *Func, Value *RetValue) { + Builder.CreateRet(RetValue); + } + + // Inserts a simple function that invokes Callee and takes the same arguments: + // int Caller(...) { return Callee(...); } + template + Function *insertSimpleCallFunction(Module *M, Function *Callee) { + Function *Result = startFunction(M, "caller"); + + SmallVector CallArgs; + + Function::arg_iterator arg_iter = Result->arg_begin(); + for(;arg_iter != Result->arg_end(); ++arg_iter) + CallArgs.push_back(arg_iter); + + Value *ReturnCode = Builder.CreateCall(Callee, CallArgs); + Builder.CreateRet(ReturnCode); + return Result; + } + + // Inserts a function named 'main' that returns a uint32_t: + // int32_t main() { return X; } + // where X is given by returnCode + Function *insertMainFunction(Module *M, uint32_t returnCode) { + Function *Result = startFunction(M, "main"); + + Value *ReturnVal = ConstantInt::get(Context, APInt(32, returnCode)); + endFunctionWithRet(Result, ReturnVal); + + return Result; + } + + // Inserts a function + // int32_t add(int32_t a, int32_t b) { return a + b; } + // in the current module and returns a pointer to it. + Function *insertAddFunction(Module *M, StringRef Name = "add") { + Function *Result = startFunction(M, Name); + + Function::arg_iterator args = Result->arg_begin(); + Value *Arg1 = args; + Value *Arg2 = ++args; + Value *AddResult = Builder.CreateAdd(Arg1, Arg2); + + endFunctionWithRet(Result, AddResult); + + return Result; + } + + // Inserts an declaration to a function defined elsewhere + Function *insertExternalReferenceToFunction(Module *M, StringRef Name, + FunctionType *FuncTy) { + Function *Result = Function::Create(FuncTy, + GlobalValue::ExternalLinkage, + Name, M); + return Result; + } + + // Inserts an declaration to a function defined elsewhere + Function *insertExternalReferenceToFunction(Module *M, Function *Func) { + Function *Result = Function::Create(Func->getFunctionType(), + GlobalValue::AvailableExternallyLinkage, + Func->getName(), M); + return Result; + } + + // Inserts a global variable of type int32 + GlobalVariable *insertGlobalInt32(Module *M, + StringRef name, + int32_t InitialValue) { + Type *GlobalTy = TypeBuilder, true>::get(Context); + Constant *IV = ConstantInt::get(Context, APInt(32, InitialValue)); + GlobalVariable *Global = new GlobalVariable(*M, + GlobalTy, + false, + GlobalValue::ExternalLinkage, + IV, + name); + return Global; + } + + void createJIT(Module *M) { + + // Due to the EngineBuilder constructor, it is required to have a Module + // in order to construct an ExecutionEngine (i.e. MCJIT) + assert(M != 0 && "a non-null Module must be provided to create MCJIT"); + + EngineBuilder EB(M); + std::string Error; + TheJIT.reset(EB.setEngineKind(EngineKind::JIT) + .setUseMCJIT(true) /* can this be folded into the EngineKind enum? */ + .setJITMemoryManager(MM) + .setErrorStr(&Error) + .setOptLevel(CodeGenOpt::None) + .setAllocateGVsWithCode(false) /*does this do anything?*/ + .setCodeModel(CodeModel::JITDefault) + .setRelocationModel(Reloc::Default) + .setMArch(MArch) + .setMCPU(sys::getHostCPUName()) + //.setMAttrs(MAttrs) + .create()); + // At this point, we cannot modify the module any more. + assert(TheJIT.get() != NULL && "error creating MCJIT with EngineBuilder"); + } + + LLVMContext Context; + CodeGenOpt::Level OptLevel; + Reloc::Model RelocModel; + CodeModel::Model CodeModel; + StringRef MArch; + SmallVector MAttrs; + OwningPtr TM; + OwningPtr TheJIT; + IRBuilder<> Builder; + JITMemoryManager *MM; + + std::string HostTriple; + SmallVector SupportedArchs; + SmallVector UnsupportedOSs; + + OwningPtr M; +}; + +} // namespace llvm + +#endif // MCJIT_TEST_H diff --git a/unittests/ExecutionEngine/MCJIT/MCJITTests.def b/unittests/ExecutionEngine/MCJIT/MCJITTests.def new file mode 100644 index 0000000..aabd224 --- /dev/null +++ b/unittests/ExecutionEngine/MCJIT/MCJITTests.def @@ -0,0 +1 @@ +EXPORTS diff --git a/unittests/ExecutionEngine/MCJIT/Makefile b/unittests/ExecutionEngine/MCJIT/Makefile new file mode 100644 index 0000000..454f830 --- /dev/null +++ b/unittests/ExecutionEngine/MCJIT/Makefile @@ -0,0 +1,18 @@ +##===- unittests/ExecutionEngine/MCJIT/Makefile ------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../.. +TESTNAME = MCJIT +LINK_COMPONENTS := core jit mcjit native support + +include $(LEVEL)/Makefile.config +include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest + +# Permit these tests to use the MCJIT's symbolic lookup. +LD.Flags += $(RDYNAMIC) diff --git a/unittests/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/unittests/ExecutionEngine/MCJIT/SectionMemoryManager.cpp new file mode 100644 index 0000000..d6baf3c --- /dev/null +++ b/unittests/ExecutionEngine/MCJIT/SectionMemoryManager.cpp @@ -0,0 +1,143 @@ +//===-- SectionMemoryManager.cpp - The memory manager for MCJIT -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the implementation of the section-based memory manager +// used by MCJIT. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Config/config.h" +#include "llvm/Support/DynamicLibrary.h" +#include "llvm/Support/MathExtras.h" + +#include "SectionMemoryManager.h" + +#ifdef __linux__ +// These includes used by SectionMemoryManager::getPointerToNamedFunction() +// for Glibc trickery. Look comments in this function for more information. +#ifdef HAVE_SYS_STAT_H +#include +#endif +#include +#include +#endif + +namespace llvm { + +uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size, + unsigned Alignment, + unsigned SectionID) { + if (!Alignment) + Alignment = 16; + // Ensure that enough memory is requested to allow aligning. + size_t NumElementsAligned = 1 + (Size + Alignment - 1)/Alignment; + uint8_t *Addr = (uint8_t*)calloc(NumElementsAligned, Alignment); + + // Honour the alignment requirement. + uint8_t *AlignedAddr = (uint8_t*)RoundUpToAlignment((uint64_t)Addr, Alignment); + + // Store the original address from calloc so we can free it later. + AllocatedDataMem.push_back(sys::MemoryBlock(Addr, NumElementsAligned*Alignment)); + return AlignedAddr; +} + +uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size, + unsigned Alignment, + unsigned SectionID) { + if (!Alignment) + Alignment = 16; + unsigned NeedAllocate = Alignment * ((Size + Alignment - 1)/Alignment + 1); + uintptr_t Addr = 0; + // Look in the list of free code memory regions and use a block there if one + // is available. + for (int i = 0, e = FreeCodeMem.size(); i != e; ++i) { + sys::MemoryBlock &MB = FreeCodeMem[i]; + if (MB.size() >= NeedAllocate) { + Addr = (uintptr_t)MB.base(); + uintptr_t EndOfBlock = Addr + MB.size(); + // Align the address. + Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); + // Store cutted free memory block. + FreeCodeMem[i] = sys::MemoryBlock((void*)(Addr + Size), + EndOfBlock - Addr - Size); + return (uint8_t*)Addr; + } + } + + // No pre-allocated free block was large enough. Allocate a new memory region. + sys::MemoryBlock MB = sys::Memory::AllocateRWX(NeedAllocate, 0, 0); + + AllocatedCodeMem.push_back(MB); + Addr = (uintptr_t)MB.base(); + uintptr_t EndOfBlock = Addr + MB.size(); + // Align the address. + Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); + // The AllocateRWX may allocate much more memory than we need. In this case, + // we store the unused memory as a free memory block. + unsigned FreeSize = EndOfBlock-Addr-Size; + if (FreeSize > 16) + FreeCodeMem.push_back(sys::MemoryBlock((void*)(Addr + Size), FreeSize)); + + // Return aligned address + return (uint8_t*)Addr; +} + +void SectionMemoryManager::invalidateInstructionCache() { + for (int i = 0, e = AllocatedCodeMem.size(); i != e; ++i) + sys::Memory::InvalidateInstructionCache(AllocatedCodeMem[i].base(), + AllocatedCodeMem[i].size()); +} + +void *SectionMemoryManager::getPointerToNamedFunction(const std::string &Name, + bool AbortOnFailure) { +#if defined(__linux__) + //===--------------------------------------------------------------------===// + // Function stubs that are invoked instead of certain library calls + // + // Force the following functions to be linked in to anything that uses the + // JIT. This is a hack designed to work around the all-too-clever Glibc + // strategy of making these functions work differently when inlined vs. when + // not inlined, and hiding their real definitions in a separate archive file + // that the dynamic linker can't see. For more info, search for + // 'libc_nonshared.a' on Google, or read http://llvm.org/PR274. + if (Name == "stat") return (void*)(intptr_t)&stat; + if (Name == "fstat") return (void*)(intptr_t)&fstat; + if (Name == "lstat") return (void*)(intptr_t)&lstat; + if (Name == "stat64") return (void*)(intptr_t)&stat64; + if (Name == "fstat64") return (void*)(intptr_t)&fstat64; + if (Name == "lstat64") return (void*)(intptr_t)&lstat64; + if (Name == "atexit") return (void*)(intptr_t)&atexit; + if (Name == "mknod") return (void*)(intptr_t)&mknod; +#endif // __linux__ + + const char *NameStr = Name.c_str(); + void *Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr); + if (Ptr) return Ptr; + + // If it wasn't found and if it starts with an underscore ('_') character, + // try again without the underscore. + if (NameStr[0] == '_') { + Ptr = sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr+1); + if (Ptr) return Ptr; + } + + if (AbortOnFailure) + report_fatal_error("Program used external function '" + Name + + "' which could not be resolved!"); + return 0; +} + +SectionMemoryManager::~SectionMemoryManager() { + for (unsigned i = 0, e = AllocatedCodeMem.size(); i != e; ++i) + sys::Memory::ReleaseRWX(AllocatedCodeMem[i]); + for (unsigned i = 0, e = AllocatedDataMem.size(); i != e; ++i) + free(AllocatedDataMem[i].base()); +} + +} // namespace llvm diff --git a/unittests/ExecutionEngine/MCJIT/SectionMemoryManager.h b/unittests/ExecutionEngine/MCJIT/SectionMemoryManager.h new file mode 100644 index 0000000..e44217c --- /dev/null +++ b/unittests/ExecutionEngine/MCJIT/SectionMemoryManager.h @@ -0,0 +1,118 @@ +//===-- SectionMemoryManager.h - Memory allocator for MCJIT -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of a section-based memory manager used by +// the MCJIT execution engine. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H +#define LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/ExecutionEngine/JITMemoryManager.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Memory.h" + +namespace llvm { + +// Section-based memory manager for MCJIT +class SectionMemoryManager : public JITMemoryManager { + +public: + + SectionMemoryManager() { } + ~SectionMemoryManager(); + + virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, + unsigned SectionID); + + virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, + unsigned SectionID); + + virtual void *getPointerToNamedFunction(const std::string &Name, + bool AbortOnFailure = true); + + // Invalidate instruction cache for code sections. Some platforms with + // separate data cache and instruction cache require explicit cache flush, + // otherwise JIT code manipulations (like resolved relocations) will get to + // the data cache but not to the instruction cache. + virtual void invalidateInstructionCache(); + +private: + + SmallVector AllocatedDataMem; + SmallVector AllocatedCodeMem; + SmallVector FreeCodeMem; + +public: + + /// + /// Functions below are not used by MCJIT, but must be implemented because + /// they are declared as pure virtuals in the base class. + /// + + virtual void setMemoryWritable() { + llvm_unreachable("Unexpected call!"); + } + virtual void setMemoryExecutable() { + llvm_unreachable("Unexpected call!"); + } + virtual void setPoisonMemory(bool poison) { + llvm_unreachable("Unexpected call!"); + } + virtual void AllocateGOT() { + llvm_unreachable("Unexpected call!"); + } + virtual uint8_t *getGOTBase() const { + llvm_unreachable("Unexpected call!"); + return 0; + } + virtual uint8_t *startFunctionBody(const Function *F, + uintptr_t &ActualSize){ + llvm_unreachable("Unexpected call!"); + return 0; + } + virtual uint8_t *allocateStub(const GlobalValue* F, unsigned StubSize, + unsigned Alignment) { + llvm_unreachable("Unexpected call!"); + return 0; + } + virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart, + uint8_t *FunctionEnd) { + llvm_unreachable("Unexpected call!"); + } + virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) { + llvm_unreachable("Unexpected call!"); + return 0; + } + virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) { + llvm_unreachable("Unexpected call!"); + return 0; + } + virtual void deallocateFunctionBody(void *Body) { + llvm_unreachable("Unexpected call!"); + } + virtual uint8_t *startExceptionTable(const Function *F, + uintptr_t &ActualSize) { + llvm_unreachable("Unexpected call!"); + return 0; + } + virtual void endExceptionTable(const Function *F, uint8_t *TableStart, + uint8_t *TableEnd, uint8_t *FrameRegister) { + llvm_unreachable("Unexpected call!"); + } + virtual void deallocateExceptionTable(void *ET) { + llvm_unreachable("Unexpected call!"); + } +}; + +} + +#endif // LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H diff --git a/unittests/ExecutionEngine/Makefile b/unittests/ExecutionEngine/Makefile index 63508d2..ca11956 100644 --- a/unittests/ExecutionEngine/Makefile +++ b/unittests/ExecutionEngine/Makefile @@ -10,7 +10,7 @@ LEVEL = ../.. TESTNAME = ExecutionEngine LINK_COMPONENTS :=interpreter -PARALLEL_DIRS = JIT +PARALLEL_DIRS = JIT MCJIT include $(LEVEL)/Makefile.config include $(LLVM_SRC_ROOT)/unittests/Makefile.unittest diff --git a/unittests/Support/AlignOfTest.cpp b/unittests/Support/AlignOfTest.cpp index 6f57668..f01e660 100644 --- a/unittests/Support/AlignOfTest.cpp +++ b/unittests/Support/AlignOfTest.cpp @@ -1,4 +1,4 @@ -//===- llvm/unittest/Support/AlignOfTest.cpp - Alignment utility tests ----===// +//=== - llvm/unittest/Support/AlignOfTest.cpp - Alignment utility tests ----===// // // The LLVM Compiler Infrastructure // @@ -23,31 +23,25 @@ namespace { #endif // Define some fixed alignment types to use in these tests. -#if __cplusplus == 201103L || __has_feature(cxx_alignas) -typedef char alignas(1) A1; -typedef char alignas(2) A2; -typedef char alignas(4) A4; -typedef char alignas(8) A8; -#elif defined(__clang__) || defined(__GNUC__) -typedef char A1 __attribute__((aligned(1))); -typedef char A2 __attribute__((aligned(2))); -typedef char A4 __attribute__((aligned(4))); -typedef char A8 __attribute__((aligned(8))); +#if __has_feature(cxx_alignas) +struct alignas(1) A1 { }; +struct alignas(2) A2 { }; +struct alignas(4) A4 { }; +struct alignas(8) A8 { }; +#elif defined(__GNUC__) +struct A1 { } __attribute__((aligned(1))); +struct A2 { } __attribute__((aligned(2))); +struct A4 { } __attribute__((aligned(4))); +struct A8 { } __attribute__((aligned(8))); #elif defined(_MSC_VER) -typedef __declspec(align(1)) char A1; -typedef __declspec(align(2)) char A2; -typedef __declspec(align(4)) char A4; -typedef __declspec(align(8)) char A8; +__declspec(align(1)) struct A1 { }; +__declspec(align(2)) struct A2 { }; +__declspec(align(4)) struct A4 { }; +__declspec(align(8)) struct A8 { }; #else # error No supported align as directive. #endif -// Wrap the forced aligned types in structs to hack around compiler bugs. -struct SA1 { A1 a; }; -struct SA2 { A2 a; }; -struct SA4 { A4 a; }; -struct SA8 { A8 a; }; - struct S1 {}; struct S2 { char a; }; struct S3 { int x; }; @@ -72,6 +66,17 @@ struct V6 : S1 { virtual ~V6(); }; struct V7 : virtual V2, virtual V6 { virtual ~V7(); }; struct V8 : V5, virtual V6, V7 { double zz; virtual ~V8(); }; +double S6::f() { return 0.0; } +float D2::g() { return 0.0f; } +V1::~V1() {} +V2::~V2() {} +V3::~V3() {} +V4::~V4() {} +V5::~V5() {} +V6::~V6() {} +V7::~V7() {} +V8::~V8() {} + // Ensure alignment is a compile-time constant. char LLVM_ATTRIBUTE_UNUSED test_arr1 [AlignOf::Alignment > 0] @@ -90,11 +95,7 @@ char LLVM_ATTRIBUTE_UNUSED test_arr2 [AlignOf::Alignment > 0] [AlignOf::Alignment > 0] [AlignOf::Alignment > 0] - [AlignOf::Alignment > 0] - [AlignOf::Alignment > 0] - [AlignOf::Alignment > 0] - [AlignOf::Alignment > 0] - [AlignOf::Alignment > 0]; + [AlignOf::Alignment > 0]; char LLVM_ATTRIBUTE_UNUSED test_arr3 [AlignOf::Alignment > 0] [AlignOf::Alignment > 0] @@ -123,20 +124,10 @@ char LLVM_ATTRIBUTE_UNUSED test_arr5 [AlignOf::Alignment > 0]; TEST(AlignOfTest, BasicAlignmentInvariants) { - // For a very strange reason, many compilers do not support this. Both Clang - // and GCC fail to align these properly. - EXPECT_EQ(1u, alignOf()); -#if 0 - EXPECT_EQ(2u, alignOf()); - EXPECT_EQ(4u, alignOf()); - EXPECT_EQ(8u, alignOf()); -#endif - - // But once wrapped in structs, the alignment is correctly managed. - EXPECT_LE(1u, alignOf()); - EXPECT_LE(2u, alignOf()); - EXPECT_LE(4u, alignOf()); - EXPECT_LE(8u, alignOf()); + EXPECT_LE(1u, alignOf()); + EXPECT_LE(2u, alignOf()); + EXPECT_LE(4u, alignOf()); + EXPECT_LE(8u, alignOf()); EXPECT_EQ(1u, alignOf()); EXPECT_LE(alignOf(), alignOf()); @@ -174,42 +165,38 @@ TEST(AlignOfTest, BasicAlignmentInvariants) { } TEST(AlignOfTest, BasicAlignedArray) { - // Note: this code exclusively uses the struct-wrapped arbitrarily aligned - // types because of the bugs mentioned above where GCC and Clang both - // disregard the arbitrary alignment specifier until the type is used to - // declare a member of a struct. - EXPECT_LE(1u, alignOf >()); - EXPECT_LE(2u, alignOf >()); - EXPECT_LE(4u, alignOf >()); - EXPECT_LE(8u, alignOf >()); + EXPECT_LE(1u, alignOf >()); + EXPECT_LE(2u, alignOf >()); + EXPECT_LE(4u, alignOf >()); + EXPECT_LE(8u, alignOf >()); - EXPECT_LE(1u, sizeof(AlignedCharArrayUnion)); - EXPECT_LE(2u, sizeof(AlignedCharArrayUnion)); - EXPECT_LE(4u, sizeof(AlignedCharArrayUnion)); - EXPECT_LE(8u, sizeof(AlignedCharArrayUnion)); + EXPECT_LE(1u, sizeof(AlignedCharArrayUnion)); + EXPECT_LE(2u, sizeof(AlignedCharArrayUnion)); + EXPECT_LE(4u, sizeof(AlignedCharArrayUnion)); + EXPECT_LE(8u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(1u, (alignOf >())); - EXPECT_EQ(2u, (alignOf >())); - EXPECT_EQ(4u, (alignOf >())); - EXPECT_EQ(8u, (alignOf >())); + EXPECT_EQ(1u, (alignOf >())); + EXPECT_EQ(2u, (alignOf >())); + EXPECT_EQ(4u, (alignOf >())); + EXPECT_EQ(8u, (alignOf >())); - EXPECT_EQ(1u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(2u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(4u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(8u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(1u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(2u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(4u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(8u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(1u, (alignOf >())); - EXPECT_EQ(2u, (alignOf >())); - EXPECT_EQ(4u, (alignOf >())); - EXPECT_EQ(8u, (alignOf >())); + EXPECT_EQ(1u, (alignOf >())); + EXPECT_EQ(2u, (alignOf >())); + EXPECT_EQ(4u, (alignOf >())); + EXPECT_EQ(8u, (alignOf >())); - EXPECT_EQ(1u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(2u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(4u, sizeof(AlignedCharArrayUnion)); - EXPECT_EQ(16u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(1u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(2u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(4u, sizeof(AlignedCharArrayUnion)); + EXPECT_EQ(16u, sizeof(AlignedCharArrayUnion)); // For other tests we simply assert that the alignment of the union mathes // that of the fundamental type and hope that we have any weird type diff --git a/unittests/Support/CMakeLists.txt b/unittests/Support/CMakeLists.txt index 3b9bf84..09a0ea5 100644 --- a/unittests/Support/CMakeLists.txt +++ b/unittests/Support/CMakeLists.txt @@ -17,11 +17,14 @@ add_llvm_unittest(SupportTests LeakDetectorTest.cpp ManagedStatic.cpp MathExtrasTest.cpp + MemoryBufferTest.cpp + MemoryTest.cpp Path.cpp - raw_ostream_test.cpp RegexTest.cpp SwapByteOrderTest.cpp TimeValue.cpp ValueHandleTest.cpp YAMLParserTest.cpp + formatted_raw_ostream_test.cpp + raw_ostream_test.cpp ) diff --git a/unittests/Support/Casting.cpp b/unittests/Support/Casting.cpp index ca0b40b..ad564aa 100644 --- a/unittests/Support/Casting.cpp +++ b/unittests/Support/Casting.cpp @@ -95,8 +95,9 @@ TEST(CastingTest, cast) { EXPECT_NE(&F5, null_foo); const foo *F6 = cast(B4); EXPECT_NE(F6, null_foo); - foo *F7 = cast(fub()); - EXPECT_EQ(F7, null_foo); + // Can't pass null pointer to cast<>. + // foo *F7 = cast(fub()); + // EXPECT_EQ(F7, null_foo); foo *F8 = B1.baz(); EXPECT_NE(F8, null_foo); } @@ -121,7 +122,8 @@ TEST(CastingTest, dyn_cast) { EXPECT_NE(F2, null_foo); const foo *F3 = dyn_cast(B4); EXPECT_NE(F3, null_foo); - // foo *F4 = dyn_cast(fub()); // not permittible + // Can't pass null pointer to dyn_cast<>. + // foo *F4 = dyn_cast(fub()); // EXPECT_EQ(F4, null_foo); foo *F5 = B1.daz(); EXPECT_NE(F5, null_foo); @@ -151,3 +153,54 @@ const bar *B2 = &B; } // anonymous namespace bar *llvm::fub() { return 0; } + +namespace { +namespace inferred_upcasting { +// This test case verifies correct behavior of inferred upcasts when the +// types are statically known to be OK to upcast. This is the case when, +// for example, Derived inherits from Base, and we do `isa(Derived)`. + +// Note: This test will actually fail to compile without inferred +// upcasting. + +class Base { +public: + // No classof. We are testing that the upcast is inferred. + Base() {} +}; + +class Derived : public Base { +public: + Derived() {} +}; + +// Even with no explicit classof() in Base, we should still be able to cast +// Derived to its base class. +TEST(CastingTest, UpcastIsInferred) { + Derived D; + EXPECT_TRUE(isa(D)); + Base *BP = dyn_cast(&D); + EXPECT_TRUE(BP != NULL); +} + + +// This test verifies that the inferred upcast takes precedence over an +// explicitly written one. This is important because it verifies that the +// dynamic check gets optimized away. +class UseInferredUpcast { +public: + int Dummy; + static bool classof(const UseInferredUpcast *) { + return false; + } +}; + +TEST(CastingTest, InferredUpcastTakesPrecedence) { + UseInferredUpcast UIU; + // Since the explicit classof() returns false, this will fail if the + // explicit one is used. + EXPECT_TRUE(isa(&UIU)); +} + +} // end namespace inferred_upcasting +} // end anonymous namespace diff --git a/unittests/Support/DataExtractorTest.cpp b/unittests/Support/DataExtractorTest.cpp index 9813e46..ec8bd3d 100644 --- a/unittests/Support/DataExtractorTest.cpp +++ b/unittests/Support/DataExtractorTest.cpp @@ -16,6 +16,7 @@ namespace { const char numberData[] = "\x80\x90\xFF\xFF\x80\x00\x00\x00"; const char stringData[] = "hellohello\0hello"; const char leb128data[] = "\xA6\x49"; +const char bigleb128data[] = "\xAA\xA9\xFF\xAA\xFF\xAA\xFF\x4A"; TEST(DataExtractorTest, OffsetOverflow) { DataExtractor DE(StringRef(numberData, sizeof(numberData)-1), false, 8); @@ -106,6 +107,14 @@ TEST(DataExtractorTest, LEB128) { offset = 0; EXPECT_EQ(-7002LL, DE.getSLEB128(&offset)); EXPECT_EQ(2U, offset); + + DataExtractor BDE(StringRef(bigleb128data, sizeof(bigleb128data)-1), false,8); + offset = 0; + EXPECT_EQ(42218325750568106ULL, BDE.getULEB128(&offset)); + EXPECT_EQ(8U, offset); + offset = 0; + EXPECT_EQ(-29839268287359830LL, BDE.getSLEB128(&offset)); + EXPECT_EQ(8U, offset); } } diff --git a/unittests/Support/MemoryBufferTest.cpp b/unittests/Support/MemoryBufferTest.cpp new file mode 100644 index 0000000..6c78cd8 --- /dev/null +++ b/unittests/Support/MemoryBufferTest.cpp @@ -0,0 +1,99 @@ +//===- llvm/unittest/Support/MemoryBufferTest.cpp - MemoryBuffer tests ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements unit tests for the MemoryBuffer support class. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/ADT/OwningPtr.h" + +#include "gtest/gtest.h" + +using namespace llvm; + +class MemoryBufferTest : public testing::Test { +protected: + MemoryBufferTest() + : data("this is some data") + { } + + virtual void SetUp() { } + + typedef OwningPtr OwningBuffer; + + std::string data; +}; + +namespace { + +TEST_F(MemoryBufferTest, get) { + // Default name and null-terminator flag + OwningBuffer MB1(MemoryBuffer::getMemBuffer(data)); + EXPECT_TRUE(0 != MB1.get()); + + // RequiresNullTerminator = false + OwningBuffer MB2(MemoryBuffer::getMemBuffer(data, "one", false)); + EXPECT_TRUE(0 != MB2.get()); + + // RequiresNullTerminator = true + OwningBuffer MB3(MemoryBuffer::getMemBuffer(data, "two", true)); + EXPECT_TRUE(0 != MB3.get()); + + // verify all 3 buffers point to the same address + EXPECT_EQ(MB1->getBufferStart(), MB2->getBufferStart()); + EXPECT_EQ(MB2->getBufferStart(), MB3->getBufferStart()); + + // verify the original data is unmodified after deleting the buffers + MB1.reset(); + MB2.reset(); + MB3.reset(); + EXPECT_EQ("this is some data", data); +} + +TEST_F(MemoryBufferTest, copy) { + // copy with no name + OwningBuffer MBC1(MemoryBuffer::getMemBufferCopy(data)); + EXPECT_TRUE(0 != MBC1.get()); + + // copy with a name + OwningBuffer MBC2(MemoryBuffer::getMemBufferCopy(data, "copy")); + EXPECT_TRUE(0 != MBC2.get()); + + // verify the two copies do not point to the same place + EXPECT_NE(MBC1->getBufferStart(), MBC2->getBufferStart()); +} + +TEST_F(MemoryBufferTest, make_new) { + // 0-sized buffer + OwningBuffer Zero(MemoryBuffer::getNewUninitMemBuffer(0)); + EXPECT_TRUE(0 != Zero.get()); + + // uninitialized buffer with no name + OwningBuffer One(MemoryBuffer::getNewUninitMemBuffer(321)); + EXPECT_TRUE(0 != One.get()); + + // uninitialized buffer with name + OwningBuffer Two(MemoryBuffer::getNewUninitMemBuffer(123, "bla")); + EXPECT_TRUE(0 != Two.get()); + + // 0-initialized buffer with no name + OwningBuffer Three(MemoryBuffer::getNewMemBuffer(321, data)); + EXPECT_TRUE(0 != Three.get()); + for (size_t i = 0; i < 321; ++i) + EXPECT_EQ(0, Three->getBufferStart()[0]); + + // 0-initialized buffer with name + OwningBuffer Four(MemoryBuffer::getNewMemBuffer(123, "zeros")); + EXPECT_TRUE(0 != Four.get()); + for (size_t i = 0; i < 123; ++i) + EXPECT_EQ(0, Four->getBufferStart()[0]); +} + +} diff --git a/unittests/Support/MemoryTest.cpp b/unittests/Support/MemoryTest.cpp new file mode 100644 index 0000000..21cb27e --- /dev/null +++ b/unittests/Support/MemoryTest.cpp @@ -0,0 +1,356 @@ +//===- llvm/unittest/Support/AllocatorTest.cpp - BumpPtrAllocator tests ---===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/Memory.h" +#include "llvm/Support/Process.h" + +#include "gtest/gtest.h" +#include + +using namespace llvm; +using namespace sys; + +namespace { + +class MappedMemoryTest : public ::testing::TestWithParam { +public: + MappedMemoryTest() { + Flags = GetParam(); + PageSize = sys::Process::GetPageSize(); + } + +protected: + // Adds RW flags to permit testing of the resulting memory + unsigned getTestableEquivalent(unsigned RequestedFlags) { + switch (RequestedFlags) { + case Memory::MF_READ: + case Memory::MF_WRITE: + case Memory::MF_READ|Memory::MF_WRITE: + return Memory::MF_READ|Memory::MF_WRITE; + case Memory::MF_READ|Memory::MF_EXEC: + case Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC: + case Memory::MF_EXEC: + return Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC; + } + // Default in case values are added to the enum, as required by some compilers + return Memory::MF_READ|Memory::MF_WRITE; + } + + // Returns true if the memory blocks overlap + bool doesOverlap(MemoryBlock M1, MemoryBlock M2) { + if (M1.base() == M2.base()) + return true; + + if (M1.base() > M2.base()) + return (unsigned char *)M2.base() + M2.size() > M1.base(); + + return (unsigned char *)M1.base() + M1.size() > M2.base(); + } + + unsigned Flags; + size_t PageSize; +}; + +TEST_P(MappedMemoryTest, AllocAndRelease) { + error_code EC; + MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(sizeof(int), M1.size()); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); +} + +TEST_P(MappedMemoryTest, MultipleAllocAndRelease) { + error_code EC; + MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(64, 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(32, 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(16U, M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(64U, M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(32U, M3.size()); + + EXPECT_FALSE(doesOverlap(M1, M2)); + EXPECT_FALSE(doesOverlap(M2, M3)); + EXPECT_FALSE(doesOverlap(M1, M3)); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + EXPECT_NE((void*)0, M4.base()); + EXPECT_LE(16U, M4.size()); + EXPECT_FALSE(Memory::releaseMappedMemory(M4)); + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, BasicWrite) { + // This test applies only to writeable combinations + if (Flags && !(Flags & Memory::MF_WRITE)) + return; + + error_code EC; + MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(sizeof(int), M1.size()); + + int *a = (int*)M1.base(); + *a = 1; + EXPECT_EQ(1, *a); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); +} + +TEST_P(MappedMemoryTest, MultipleWrite) { + // This test applies only to writeable combinations + if (Flags && !(Flags & Memory::MF_WRITE)) + return; + error_code EC; + MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_FALSE(doesOverlap(M1, M2)); + EXPECT_FALSE(doesOverlap(M2, M3)); + EXPECT_FALSE(doesOverlap(M1, M3)); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(1U * sizeof(int), M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(8U * sizeof(int), M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(4U * sizeof(int), M3.size()); + + int *x = (int*)M1.base(); + *x = 1; + + int *y = (int*)M2.base(); + for (int i = 0; i < 8; i++) { + y[i] = i; + } + + int *z = (int*)M3.base(); + *z = 42; + + EXPECT_EQ(1, *x); + EXPECT_EQ(7, y[7]); + EXPECT_EQ(42, *z); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + + MemoryBlock M4 = Memory::allocateMappedMemory(64 * sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + EXPECT_NE((void*)0, M4.base()); + EXPECT_LE(64U * sizeof(int), M4.size()); + x = (int*)M4.base(); + *x = 4; + EXPECT_EQ(4, *x); + EXPECT_FALSE(Memory::releaseMappedMemory(M4)); + + // Verify that M2 remains unaffected by other activity + for (int i = 0; i < 8; i++) { + EXPECT_EQ(i, y[i]); + } + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, EnabledWrite) { + error_code EC; + MemoryBlock M1 = Memory::allocateMappedMemory(2 * sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(2U * sizeof(int), M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(8U * sizeof(int), M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(4U * sizeof(int), M3.size()); + + EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags))); + EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags))); + EXPECT_FALSE(Memory::protectMappedMemory(M3, getTestableEquivalent(Flags))); + + EXPECT_FALSE(doesOverlap(M1, M2)); + EXPECT_FALSE(doesOverlap(M2, M3)); + EXPECT_FALSE(doesOverlap(M1, M3)); + + int *x = (int*)M1.base(); + *x = 1; + int *y = (int*)M2.base(); + for (unsigned int i = 0; i < 8; i++) { + y[i] = i; + } + int *z = (int*)M3.base(); + *z = 42; + + EXPECT_EQ(1, *x); + EXPECT_EQ(7, y[7]); + EXPECT_EQ(42, *z); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + EXPECT_EQ(6, y[6]); + + MemoryBlock M4 = Memory::allocateMappedMemory(16, 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + EXPECT_NE((void*)0, M4.base()); + EXPECT_LE(16U, M4.size()); + EXPECT_EQ(error_code::success(), Memory::protectMappedMemory(M4, getTestableEquivalent(Flags))); + x = (int*)M4.base(); + *x = 4; + EXPECT_EQ(4, *x); + EXPECT_FALSE(Memory::releaseMappedMemory(M4)); + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, SuccessiveNear) { + error_code EC; + MemoryBlock M1 = Memory::allocateMappedMemory(16, 0, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(64, &M1, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(32, &M2, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(16U, M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(64U, M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(32U, M3.size()); + + EXPECT_FALSE(doesOverlap(M1, M2)); + EXPECT_FALSE(doesOverlap(M2, M3)); + EXPECT_FALSE(doesOverlap(M1, M3)); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, DuplicateNear) { + error_code EC; + MemoryBlock Near((void*)(3*PageSize), 16); + MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(16U, M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(64U, M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(32U, M3.size()); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, ZeroNear) { + error_code EC; + MemoryBlock Near(0, 0); + MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(16U, M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(64U, M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(32U, M3.size()); + + EXPECT_FALSE(doesOverlap(M1, M2)); + EXPECT_FALSE(doesOverlap(M2, M3)); + EXPECT_FALSE(doesOverlap(M1, M3)); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, ZeroSizeNear) { + error_code EC; + MemoryBlock Near((void*)(4*PageSize), 0); + MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(16U, M1.size()); + EXPECT_NE((void*)0, M2.base()); + EXPECT_LE(64U, M2.size()); + EXPECT_NE((void*)0, M3.base()); + EXPECT_LE(32U, M3.size()); + + EXPECT_FALSE(doesOverlap(M1, M2)); + EXPECT_FALSE(doesOverlap(M2, M3)); + EXPECT_FALSE(doesOverlap(M1, M3)); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); + EXPECT_FALSE(Memory::releaseMappedMemory(M3)); + EXPECT_FALSE(Memory::releaseMappedMemory(M2)); +} + +TEST_P(MappedMemoryTest, UnalignedNear) { + error_code EC; + MemoryBlock Near((void*)(2*PageSize+5), 0); + MemoryBlock M1 = Memory::allocateMappedMemory(15, &Near, Flags, EC); + EXPECT_EQ(error_code::success(), EC); + + EXPECT_NE((void*)0, M1.base()); + EXPECT_LE(sizeof(int), M1.size()); + + EXPECT_FALSE(Memory::releaseMappedMemory(M1)); +} + +// Note that Memory::MF_WRITE is not supported exclusively across +// operating systems and architectures and can imply MF_READ|MF_WRITE +unsigned MemoryFlags[] = { + Memory::MF_READ, + Memory::MF_WRITE, + Memory::MF_READ|Memory::MF_WRITE, + Memory::MF_EXEC, + Memory::MF_READ|Memory::MF_EXEC, + Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC + }; + +INSTANTIATE_TEST_CASE_P(AllocationTests, + MappedMemoryTest, + ::testing::ValuesIn(MemoryFlags)); + +} // anonymous namespace diff --git a/unittests/Support/formatted_raw_ostream_test.cpp b/unittests/Support/formatted_raw_ostream_test.cpp new file mode 100644 index 0000000..4725ced --- /dev/null +++ b/unittests/Support/formatted_raw_ostream_test.cpp @@ -0,0 +1,33 @@ +//===- llvm/unittest/Support/formatted_raw_ostream_test.cpp ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "gtest/gtest.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/FormattedStream.h" + +using namespace llvm; + +namespace { + +TEST(formatted_raw_ostreamTest, Test_Tell) { + // Check offset when underlying stream has buffer contents. + SmallString<128> A; + raw_svector_ostream B(A); + formatted_raw_ostream C(B); + char tmp[100] = ""; + + for (unsigned i = 0; i != 3; ++i) { + C.write(tmp, 100); + + EXPECT_EQ(100*(i+1), (unsigned) C.tell()); + } +} + +} diff --git a/unittests/Transforms/Utils/CMakeLists.txt b/unittests/Transforms/Utils/CMakeLists.txt index 365bfbb..730d83b 100644 --- a/unittests/Transforms/Utils/CMakeLists.txt +++ b/unittests/Transforms/Utils/CMakeLists.txt @@ -4,5 +4,6 @@ set(LLVM_LINK_COMPONENTS add_llvm_unittest(UtilsTests Cloning.cpp + IntegerDivision.cpp Local.cpp ) diff --git a/unittests/Transforms/Utils/IntegerDivision.cpp b/unittests/Transforms/Utils/IntegerDivision.cpp new file mode 100644 index 0000000..a321139 --- /dev/null +++ b/unittests/Transforms/Utils/IntegerDivision.cpp @@ -0,0 +1,142 @@ +//===- IntegerDivision.cpp - Unit tests for the integer division code -----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "gtest/gtest.h" +#include "llvm/BasicBlock.h" +#include "llvm/GlobalValue.h" +#include "llvm/Function.h" +#include "llvm/IRBuilder.h" +#include "llvm/Module.h" +#include "llvm/Transforms/Utils/IntegerDivision.h" + +using namespace llvm; + +namespace { + +TEST(IntegerDivision, SDiv) { + LLVMContext &C(getGlobalContext()); + Module M("test division", C); + IRBuilder<> Builder(C); + + SmallVector ArgTys(2, Builder.getInt32Ty()); + Function *F = Function::Create(FunctionType::get(Builder.getInt32Ty(), + ArgTys, false), + GlobalValue::ExternalLinkage, "F", &M); + assert(F->getArgumentList().size() == 2); + + BasicBlock *BB = BasicBlock::Create(C, "", F); + Builder.SetInsertPoint(BB); + + Function::arg_iterator AI = F->arg_begin(); + Value *A = AI++; + Value *B = AI++; + + Value *Div = Builder.CreateSDiv(A, B); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::SDiv); + + Value *Ret = Builder.CreateRet(Div); + + expandDivision(cast(Div)); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::AShr); + + Instruction* Quotient = dyn_cast(cast(Ret)->getOperand(0)); + EXPECT_TRUE(Quotient && Quotient->getOpcode() == Instruction::Sub); +} + +TEST(IntegerDivision, UDiv) { + LLVMContext &C(getGlobalContext()); + Module M("test division", C); + IRBuilder<> Builder(C); + + SmallVector ArgTys(2, Builder.getInt32Ty()); + Function *F = Function::Create(FunctionType::get(Builder.getInt32Ty(), + ArgTys, false), + GlobalValue::ExternalLinkage, "F", &M); + assert(F->getArgumentList().size() == 2); + + BasicBlock *BB = BasicBlock::Create(C, "", F); + Builder.SetInsertPoint(BB); + + Function::arg_iterator AI = F->arg_begin(); + Value *A = AI++; + Value *B = AI++; + + Value *Div = Builder.CreateUDiv(A, B); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::UDiv); + + Value *Ret = Builder.CreateRet(Div); + + expandDivision(cast(Div)); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::ICmp); + + Instruction* Quotient = dyn_cast(cast(Ret)->getOperand(0)); + EXPECT_TRUE(Quotient && Quotient->getOpcode() == Instruction::PHI); +} + +TEST(IntegerDivision, SRem) { + LLVMContext &C(getGlobalContext()); + Module M("test remainder", C); + IRBuilder<> Builder(C); + + SmallVector ArgTys(2, Builder.getInt32Ty()); + Function *F = Function::Create(FunctionType::get(Builder.getInt32Ty(), + ArgTys, false), + GlobalValue::ExternalLinkage, "F", &M); + assert(F->getArgumentList().size() == 2); + + BasicBlock *BB = BasicBlock::Create(C, "", F); + Builder.SetInsertPoint(BB); + + Function::arg_iterator AI = F->arg_begin(); + Value *A = AI++; + Value *B = AI++; + + Value *Rem = Builder.CreateSRem(A, B); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::SRem); + + Value *Ret = Builder.CreateRet(Rem); + + expandRemainder(cast(Rem)); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::AShr); + + Instruction* Remainder = dyn_cast(cast(Ret)->getOperand(0)); + EXPECT_TRUE(Remainder && Remainder->getOpcode() == Instruction::Sub); +} + +TEST(IntegerDivision, URem) { + LLVMContext &C(getGlobalContext()); + Module M("test remainder", C); + IRBuilder<> Builder(C); + + SmallVector ArgTys(2, Builder.getInt32Ty()); + Function *F = Function::Create(FunctionType::get(Builder.getInt32Ty(), + ArgTys, false), + GlobalValue::ExternalLinkage, "F", &M); + assert(F->getArgumentList().size() == 2); + + BasicBlock *BB = BasicBlock::Create(C, "", F); + Builder.SetInsertPoint(BB); + + Function::arg_iterator AI = F->arg_begin(); + Value *A = AI++; + Value *B = AI++; + + Value *Rem = Builder.CreateURem(A, B); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::URem); + + Value *Ret = Builder.CreateRet(Rem); + + expandRemainder(cast(Rem)); + EXPECT_TRUE(BB->front().getOpcode() == Instruction::ICmp); + + Instruction* Remainder = dyn_cast(cast(Ret)->getOperand(0)); + EXPECT_TRUE(Remainder && Remainder->getOpcode() == Instruction::Sub); +} + +} diff --git a/unittests/VMCore/IRBuilderTest.cpp b/unittests/VMCore/IRBuilderTest.cpp index b6a3795..9f26936 100644 --- a/unittests/VMCore/IRBuilderTest.cpp +++ b/unittests/VMCore/IRBuilderTest.cpp @@ -8,6 +8,7 @@ //===----------------------------------------------------------------------===// #include "llvm/BasicBlock.h" +#include "llvm/DataLayout.h" #include "llvm/Function.h" #include "llvm/IRBuilder.h" #include "llvm/IntrinsicInst.h" @@ -96,4 +97,15 @@ TEST_F(IRBuilderTest, CreateCondBr) { EXPECT_EQ(Weights, TI->getMetadata(LLVMContext::MD_prof)); } +TEST_F(IRBuilderTest, GetIntTy) { + IRBuilder<> Builder(BB); + IntegerType *Ty1 = Builder.getInt1Ty(); + EXPECT_EQ(Ty1, IntegerType::get(getGlobalContext(), 1)); + + DataLayout* DL = new DataLayout(M.get()); + IntegerType *IntPtrTy = Builder.getIntPtrTy(DL); + unsigned IntPtrBitSize = DL->getPointerSizeInBits(0); + EXPECT_EQ(IntPtrTy, IntegerType::get(getGlobalContext(), IntPtrBitSize)); +} + } diff --git a/unittests/VMCore/InstructionsTest.cpp b/unittests/VMCore/InstructionsTest.cpp index 72cdc8b..a3b13ce 100644 --- a/unittests/VMCore/InstructionsTest.cpp +++ b/unittests/VMCore/InstructionsTest.cpp @@ -9,6 +9,7 @@ #include "llvm/BasicBlock.h" #include "llvm/Constants.h" +#include "llvm/DataLayout.h" #include "llvm/DerivedTypes.h" #include "llvm/IRBuilder.h" #include "llvm/Instructions.h" @@ -17,7 +18,6 @@ #include "llvm/Operator.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Target/TargetData.h" #include "gtest/gtest.h" namespace llvm { @@ -183,7 +183,7 @@ TEST(InstructionsTest, VectorGep) { EXPECT_NE(S3, Gep3); int64_t Offset; - TargetData TD("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3" + DataLayout TD("e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3" "2:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80" ":128:128-n8:16:32:64-S128"); // Make sure we don't crash @@ -243,5 +243,42 @@ TEST(InstructionsTest, FPMathOperator) { delete I; } + +TEST(InstructionsTest, isEliminableCastPair) { + LLVMContext &C(getGlobalContext()); + + Type* Int32Ty = Type::getInt32Ty(C); + Type* Int64Ty = Type::getInt64Ty(C); + Type* Int64PtrTy = Type::getInt64PtrTy(C); + + // Source and destination pointers have same size -> bitcast. + EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, + CastInst::IntToPtr, + Int64PtrTy, Int64Ty, Int64PtrTy, + Int32Ty, 0, Int32Ty), + CastInst::BitCast); + + // Source and destination pointers have different sizes -> fail. + EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::PtrToInt, + CastInst::IntToPtr, + Int64PtrTy, Int64Ty, Int64PtrTy, + Int32Ty, 0, Int64Ty), + 0U); + + // Middle pointer big enough -> bitcast. + EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, + CastInst::PtrToInt, + Int64Ty, Int64PtrTy, Int64Ty, + 0, Int64Ty, 0), + CastInst::BitCast); + + // Middle pointer too small -> fail. + EXPECT_EQ(CastInst::isEliminableCastPair(CastInst::IntToPtr, + CastInst::PtrToInt, + Int64Ty, Int64PtrTy, Int64Ty, + 0, Int32Ty, 0), + 0U); +} + } // end anonymous namespace } // end namespace llvm diff --git a/unittests/VMCore/PassManagerTest.cpp b/unittests/VMCore/PassManagerTest.cpp index 60d33c1..9c070c8 100644 --- a/unittests/VMCore/PassManagerTest.cpp +++ b/unittests/VMCore/PassManagerTest.cpp @@ -14,7 +14,7 @@ #include "llvm/Pass.h" #include "llvm/Analysis/LoopPass.h" #include "llvm/CallGraphSCCPass.h" -#include "llvm/Target/TargetData.h" +#include "llvm/DataLayout.h" #include "llvm/Support/raw_ostream.h" #include "llvm/DerivedTypes.h" #include "llvm/Constants.h" @@ -94,7 +94,7 @@ namespace llvm { initializeModuleNDMPass(*PassRegistry::getPassRegistry()); } virtual bool runOnModule(Module &M) { - EXPECT_TRUE(getAnalysisIfAvailable()); + EXPECT_TRUE(getAnalysisIfAvailable()); run++; return false; } @@ -167,7 +167,7 @@ namespace llvm { initializeCGPassPass(*PassRegistry::getPassRegistry()); } virtual bool runOnSCC(CallGraphSCC &SCMM) { - EXPECT_TRUE(getAnalysisIfAvailable()); + EXPECT_TRUE(getAnalysisIfAvailable()); run(); return false; } @@ -177,7 +177,7 @@ namespace llvm { public: virtual bool runOnFunction(Function &F) { // FIXME: PR4112 - // EXPECT_TRUE(getAnalysisIfAvailable()); + // EXPECT_TRUE(getAnalysisIfAvailable()); run(); return false; } @@ -204,7 +204,7 @@ namespace llvm { return false; } virtual bool runOnLoop(Loop *L, LPPassManager &LPM) { - EXPECT_TRUE(getAnalysisIfAvailable()); + EXPECT_TRUE(getAnalysisIfAvailable()); run(); return false; } @@ -241,7 +241,7 @@ namespace llvm { return false; } virtual bool runOnBasicBlock(BasicBlock &BB) { - EXPECT_TRUE(getAnalysisIfAvailable()); + EXPECT_TRUE(getAnalysisIfAvailable()); run(); return false; } @@ -266,7 +266,7 @@ namespace llvm { initializeFPassPass(*PassRegistry::getPassRegistry()); } virtual bool runOnModule(Module &M) { - EXPECT_TRUE(getAnalysisIfAvailable()); + EXPECT_TRUE(getAnalysisIfAvailable()); for (Module::iterator I=M.begin(),E=M.end(); I != E; ++I) { Function &F = *I; { @@ -292,7 +292,7 @@ namespace llvm { mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0; PassManager Passes; - Passes.add(new TargetData(&M)); + Passes.add(new DataLayout(&M)); Passes.add(mNDM2); Passes.add(mNDM); Passes.add(mNDNM); @@ -316,7 +316,7 @@ namespace llvm { mNDM->run = mNDNM->run = mDNM->run = mNDM2->run = 0; PassManager Passes; - Passes.add(new TargetData(&M)); + Passes.add(new DataLayout(&M)); Passes.add(mNDM); Passes.add(mNDNM); Passes.add(mNDM2);// invalidates mNDM needed by mDNM @@ -338,7 +338,7 @@ namespace llvm { OwningPtr M(makeLLVMModule()); T *P = new T(); PassManager Passes; - Passes.add(new TargetData(M.get())); + Passes.add(new DataLayout(M.get())); Passes.add(P); Passes.run(*M); T::finishedOK(run); @@ -349,7 +349,7 @@ namespace llvm { Module *M = makeLLVMModule(); T *P = new T(); PassManager Passes; - Passes.add(new TargetData(M)); + Passes.add(new DataLayout(M)); Passes.add(P); Passes.run(*M); T::finishedOK(run, N); @@ -387,7 +387,7 @@ namespace llvm { SCOPED_TRACE("Running OnTheFlyTest"); struct OnTheFlyTest *O = new OnTheFlyTest(); PassManager Passes; - Passes.add(new TargetData(M)); + Passes.add(new DataLayout(M)); Passes.add(O); Passes.run(*M); diff --git a/utils/FileCheck/FileCheck.cpp b/utils/FileCheck/FileCheck.cpp index 33f04ce..e791628 100644 --- a/utils/FileCheck/FileCheck.cpp +++ b/utils/FileCheck/FileCheck.cpp @@ -470,7 +470,7 @@ static MemoryBuffer *CanonicalizeInputFile(MemoryBuffer *MB) { continue; } - // If C is not a horizontal whitespace, skip it. + // If current char is not a horizontal whitespace, dump it to output as is. if (*Ptr != ' ' && *Ptr != '\t') { NewFile.push_back(*Ptr); continue; @@ -537,11 +537,11 @@ static bool ReadCheckFile(SourceMgr &SM, Buffer = Buffer.substr(CheckPrefix.size()+1); } else if (Buffer.size() > CheckPrefix.size()+6 && memcmp(Buffer.data()+CheckPrefix.size(), "-NEXT:", 6) == 0) { - Buffer = Buffer.substr(CheckPrefix.size()+7); + Buffer = Buffer.substr(CheckPrefix.size()+6); IsCheckNext = true; } else if (Buffer.size() > CheckPrefix.size()+5 && memcmp(Buffer.data()+CheckPrefix.size(), "-NOT:", 5) == 0) { - Buffer = Buffer.substr(CheckPrefix.size()+6); + Buffer = Buffer.substr(CheckPrefix.size()+5); IsCheckNot = true; } else { Buffer = Buffer.substr(1); diff --git a/utils/TableGen/AsmMatcherEmitter.cpp b/utils/TableGen/AsmMatcherEmitter.cpp index 026d47f..ee83311 100644 --- a/utils/TableGen/AsmMatcherEmitter.cpp +++ b/utils/TableGen/AsmMatcherEmitter.cpp @@ -77,7 +77,7 @@ // // Some targets need a custom way to parse operands, some specific instructions // can contain arguments that can represent processor flags and other kinds of -// identifiers that need to be mapped to specific valeus in the final encoded +// identifiers that need to be mapped to specific values in the final encoded // instructions. The target specific custom operand parsing works in the // following way: // @@ -199,7 +199,7 @@ public: return Kind >= UserClass0; } - /// isRelatedTo - Check whether this class is "related" to \arg RHS. Classes + /// isRelatedTo - Check whether this class is "related" to \p RHS. Classes /// are related if they are in the same class hierarchy. bool isRelatedTo(const ClassInfo &RHS) const { // Tokens are only related to tokens. @@ -238,7 +238,7 @@ public: return Root == RHSRoot; } - /// isSubsetOf - Test whether this class is a subset of \arg RHS; + /// isSubsetOf - Test whether this class is a subset of \p RHS. bool isSubsetOf(const ClassInfo &RHS) const { // This is a subset of RHS if it is the same class... if (this == &RHS) @@ -279,6 +279,15 @@ public: } }; +namespace { +/// Sort ClassInfo pointers independently of pointer value. +struct LessClassInfoPtr { + bool operator()(const ClassInfo *LHS, const ClassInfo *RHS) const { + return *LHS < *RHS; + } +}; +} + /// MatchableInfo - Helper class for storing the necessary information for an /// instruction or alias which is capable of being matched. struct MatchableInfo { @@ -416,7 +425,7 @@ struct MatchableInfo { SmallVector RequiredFeatures; /// ConversionFnKind - The enum value which is passed to the generated - /// ConvertToMCInst to convert parsed operands into an MCInst for this + /// convertToMCInst to convert parsed operands into an MCInst for this /// function. std::string ConversionFnKind; @@ -488,11 +497,20 @@ struct MatchableInfo { return false; } + // Give matches that require more features higher precedence. This is useful + // because we cannot define AssemblerPredicates with the negation of + // processor features. For example, ARM v6 "nop" may be either a HINT or + // MOV. With v6, we want to match HINT. The assembler has no way to + // predicate MOV under "NoV6", but HINT will always match first because it + // requires V6 while MOV does not. + if (RequiredFeatures.size() != RHS.RequiredFeatures.size()) + return RequiredFeatures.size() > RHS.RequiredFeatures.size(); + return false; } /// couldMatchAmbiguouslyWith - Check whether this matchable could - /// ambiguously match the same set of operands as \arg RHS (without being a + /// ambiguously match the same set of operands as \p RHS (without being a /// strictly superior match). bool couldMatchAmbiguouslyWith(const MatchableInfo &RHS) { // The primary comparator is the instruction mnemonic. @@ -590,7 +608,8 @@ public: std::vector OperandMatchInfo; /// Map of Register records to their class information. - std::map RegisterClasses; + typedef std::map RegisterClassesTy; + RegisterClassesTy RegisterClasses; /// Map of Predicate records to their subtarget information. std::map SubtargetFeatures; @@ -666,22 +685,22 @@ void MatchableInfo::dump() { } static std::pair -parseTwoOperandConstraint(StringRef S, SMLoc Loc) { +parseTwoOperandConstraint(StringRef S, ArrayRef Loc) { // Split via the '='. std::pair Ops = S.split('='); if (Ops.second == "") - throw TGError(Loc, "missing '=' in two-operand alias constraint"); + PrintFatalError(Loc, "missing '=' in two-operand alias constraint"); // Trim whitespace and the leading '$' on the operand names. size_t start = Ops.first.find_first_of('$'); if (start == std::string::npos) - throw TGError(Loc, "expected '$' prefix on asm operand name"); + PrintFatalError(Loc, "expected '$' prefix on asm operand name"); Ops.first = Ops.first.slice(start + 1, std::string::npos); size_t end = Ops.first.find_last_of(" \t"); Ops.first = Ops.first.slice(0, end); // Now the second operand. start = Ops.second.find_first_of('$'); if (start == std::string::npos) - throw TGError(Loc, "expected '$' prefix on asm operand name"); + PrintFatalError(Loc, "expected '$' prefix on asm operand name"); Ops.second = Ops.second.slice(start + 1, std::string::npos); end = Ops.second.find_last_of(" \t"); Ops.first = Ops.first.slice(0, end); @@ -697,11 +716,11 @@ void MatchableInfo::formTwoOperandAlias(StringRef Constraint) { int SrcAsmOperand = findAsmOperandNamed(Ops.first); int DstAsmOperand = findAsmOperandNamed(Ops.second); if (SrcAsmOperand == -1) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "unknown source two-operand alias operand '" + Ops.first.str() + "'."); if (DstAsmOperand == -1) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "unknown destination two-operand alias operand '" + Ops.second.str() + "'."); @@ -833,15 +852,15 @@ void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info) { // The first token of the instruction is the mnemonic, which must be a // simple string, not a $foo variable or a singleton register. if (AsmOperands.empty()) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "Instruction '" + TheDef->getName() + "' has no tokens"); Mnemonic = AsmOperands[0].Token; if (Mnemonic.empty()) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "Missing instruction mnemonic"); // FIXME : Check and raise an error if it is a register. if (Mnemonic[0] == '$') - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "Invalid instruction mnemonic '" + Mnemonic.str() + "'!"); // Remove the first operand, it is tracked in the mnemonic field. @@ -851,12 +870,12 @@ void MatchableInfo::tokenizeAsmString(const AsmMatcherInfo &Info) { bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const { // Reject matchables with no .s string. if (AsmString.empty()) - throw TGError(TheDef->getLoc(), "instruction with empty asm string"); + PrintFatalError(TheDef->getLoc(), "instruction with empty asm string"); // Reject any matchables with a newline in them, they should be marked // isCodeGenOnly if they are pseudo instructions. if (AsmString.find('\n') != std::string::npos) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "multiline instruction is not valid for the asmparser, " "mark it isCodeGenOnly"); @@ -864,7 +883,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const { // has one line. if (!CommentDelimiter.empty() && StringRef(AsmString).find(CommentDelimiter) != StringRef::npos) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "asmstring for instruction has comment character in it, " "mark it isCodeGenOnly"); @@ -878,7 +897,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const { for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) { StringRef Tok = AsmOperands[i].Token; if (Tok[0] == '$' && Tok.find(':') != StringRef::npos) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "matchable with operand modifier '" + Tok.str() + "' not supported by asm matcher. Mark isCodeGenOnly!"); @@ -886,7 +905,7 @@ bool MatchableInfo::validate(StringRef CommentDelimiter, bool Hack) const { // We reject aliases and ignore instructions for now. if (Tok[0] == '$' && !OperandNames.insert(Tok).second) { if (!Hack) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "ERROR: matchable with tied operand '" + Tok.str() + "' can never be matched!"); // FIXME: Should reject these. The ARM backend hits this with $lane in a @@ -974,7 +993,7 @@ AsmMatcherInfo::getOperandClass(const CGIOperandList::OperandInfo &OI, int SubOpIdx) { Record *Rec = OI.Rec; if (SubOpIdx != -1) - Rec = dynamic_cast(OI.MIOperandInfo->getArg(SubOpIdx))->getDef(); + Rec = cast(OI.MIOperandInfo->getArg(SubOpIdx))->getDef(); return getOperandClass(Rec, SubOpIdx); } @@ -985,10 +1004,10 @@ AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) { // use it, else just fall back to the underlying register class. const RecordVal *R = Rec->getValue("ParserMatchClass"); if (R == 0 || R->getValue() == 0) - throw "Record `" + Rec->getName() + - "' does not have a ParserMatchClass!\n"; + PrintFatalError("Record `" + Rec->getName() + + "' does not have a ParserMatchClass!\n"); - if (DefInit *DI= dynamic_cast(R->getValue())) { + if (DefInit *DI= dyn_cast(R->getValue())) { Record *MatchClass = DI->getDef(); if (ClassInfo *CI = AsmOperandClasses[MatchClass]) return CI; @@ -997,26 +1016,28 @@ AsmMatcherInfo::getOperandClass(Record *Rec, int SubOpIdx) { // No custom match class. Just use the register class. Record *ClassRec = Rec->getValueAsDef("RegClass"); if (!ClassRec) - throw TGError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() + + PrintFatalError(Rec->getLoc(), "RegisterOperand `" + Rec->getName() + "' has no associated register class!\n"); if (ClassInfo *CI = RegisterClassClasses[ClassRec]) return CI; - throw TGError(Rec->getLoc(), "register class has no class info!"); + PrintFatalError(Rec->getLoc(), "register class has no class info!"); } if (Rec->isSubClassOf("RegisterClass")) { if (ClassInfo *CI = RegisterClassClasses[Rec]) return CI; - throw TGError(Rec->getLoc(), "register class has no class info!"); + PrintFatalError(Rec->getLoc(), "register class has no class info!"); } - assert(Rec->isSubClassOf("Operand") && "Unexpected operand!"); + if (!Rec->isSubClassOf("Operand")) + PrintFatalError(Rec->getLoc(), "Operand `" + Rec->getName() + + "' does not derive from class Operand!\n"); Record *MatchClass = Rec->getValueAsDef("ParserMatchClass"); if (ClassInfo *CI = AsmOperandClasses[MatchClass]) return CI; - throw TGError(Rec->getLoc(), "operand has no match class!"); + PrintFatalError(Rec->getLoc(), "operand has no match class!"); } void AsmMatcherInfo:: @@ -1164,7 +1185,7 @@ void AsmMatcherInfo::buildOperandClasses() { ListInit *Supers = (*it)->getValueAsListInit("SuperClasses"); for (unsigned i = 0, e = Supers->getSize(); i != e; ++i) { - DefInit *DI = dynamic_cast(Supers->getElement(i)); + DefInit *DI = dyn_cast(Supers->getElement(i)); if (!DI) { PrintError((*it)->getLoc(), "Invalid super class reference!"); continue; @@ -1182,33 +1203,31 @@ void AsmMatcherInfo::buildOperandClasses() { // Get or construct the predicate method name. Init *PMName = (*it)->getValueInit("PredicateMethod"); - if (StringInit *SI = dynamic_cast(PMName)) { + if (StringInit *SI = dyn_cast(PMName)) { CI->PredicateMethod = SI->getValue(); } else { - assert(dynamic_cast(PMName) && - "Unexpected PredicateMethod field!"); + assert(isa(PMName) && "Unexpected PredicateMethod field!"); CI->PredicateMethod = "is" + CI->ClassName; } // Get or construct the render method name. Init *RMName = (*it)->getValueInit("RenderMethod"); - if (StringInit *SI = dynamic_cast(RMName)) { + if (StringInit *SI = dyn_cast(RMName)) { CI->RenderMethod = SI->getValue(); } else { - assert(dynamic_cast(RMName) && - "Unexpected RenderMethod field!"); + assert(isa(RMName) && "Unexpected RenderMethod field!"); CI->RenderMethod = "add" + CI->ClassName + "Operands"; } // Get the parse method name or leave it as empty. Init *PRMName = (*it)->getValueInit("ParserMethod"); - if (StringInit *SI = dynamic_cast(PRMName)) + if (StringInit *SI = dyn_cast(PRMName)) CI->ParserMethod = SI->getValue(); // Get the diagnostic type or leave it as empty. // Get the parse method name or leave it as empty. Init *DiagnosticType = (*it)->getValueInit("DiagnosticType"); - if (StringInit *SI = dynamic_cast(DiagnosticType)) + if (StringInit *SI = dyn_cast(DiagnosticType)) CI->DiagnosticType = SI->getValue(); AsmOperandClasses[*it] = CI; @@ -1228,7 +1247,8 @@ void AsmMatcherInfo::buildOperandMatchInfo() { /// Map containing a mask with all operands indices that can be found for /// that class inside a instruction. - std::map OpClassMask; + typedef std::map OpClassMaskTy; + OpClassMaskTy OpClassMask; for (std::vector::const_iterator it = Matchables.begin(), ie = Matchables.end(); @@ -1247,7 +1267,7 @@ void AsmMatcherInfo::buildOperandMatchInfo() { } // Generate operand match info for each mnemonic/operand class pair. - for (std::map::iterator iit = OpClassMask.begin(), + for (OpClassMaskTy::iterator iit = OpClassMask.begin(), iie = OpClassMask.end(); iit != iie; ++iit) { unsigned OpMask = iit->second; ClassInfo *CI = iit->first; @@ -1267,7 +1287,7 @@ void AsmMatcherInfo::buildInfo() { continue; if (Pred->getName().empty()) - throw TGError(Pred->getLoc(), "Predicate has no name!"); + PrintFatalError(Pred->getLoc(), "Predicate has no name!"); unsigned FeatureNo = SubtargetFeatures.size(); SubtargetFeatures[Pred] = new SubtargetFeatureInfo(Pred, FeatureNo); @@ -1448,7 +1468,7 @@ void AsmMatcherInfo::buildInfo() { ClassInfo *FromClass = getTokenClass(Rec->getValueAsString("FromToken")); ClassInfo *ToClass = getTokenClass(Rec->getValueAsString("ToToken")); if (FromClass == ToClass) - throw TGError(Rec->getLoc(), + PrintFatalError(Rec->getLoc(), "error: Destination value identical to source value."); FromClass->SuperClasses.push_back(ToClass); } @@ -1470,7 +1490,7 @@ buildInstructionOperandReference(MatchableInfo *II, // Map this token to an operand. unsigned Idx; if (!Operands.hasOperandNamed(OperandName, Idx)) - throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" + + PrintFatalError(II->TheDef->getLoc(), "error: unable to find operand: '" + OperandName.str() + "'"); // If the instruction operand has multiple suboperands, but the parser @@ -1541,7 +1561,7 @@ void AsmMatcherInfo::buildAliasOperandReference(MatchableInfo *II, return; } - throw TGError(II->TheDef->getLoc(), "error: unable to find operand: '" + + PrintFatalError(II->TheDef->getLoc(), "error: unable to find operand: '" + OperandName.str() + "'"); } @@ -1563,7 +1583,7 @@ void MatchableInfo::buildInstructionResultOperands() { // Find out what operand from the asmparser this MCInst operand comes from. int SrcOperand = findAsmOperandNamed(OpInfo.Name); if (OpInfo.Name.empty() || SrcOperand == -1) - throw TGError(TheDef->getLoc(), "Instruction '" + + PrintFatalError(TheDef->getLoc(), "Instruction '" + TheDef->getName() + "' has operand '" + OpInfo.Name + "' that doesn't appear in asm string!"); @@ -1615,7 +1635,7 @@ void MatchableInfo::buildAliasResultOperands() { StringRef Name = CGA.ResultOperands[AliasOpNo].getName(); int SrcOperand = findAsmOperand(Name, SubIdx); if (SrcOperand == -1) - throw TGError(TheDef->getLoc(), "Instruction '" + + PrintFatalError(TheDef->getLoc(), "Instruction '" + TheDef->getName() + "' has operand '" + OpName + "' that doesn't appear in asm string!"); unsigned NumOperands = (SubIdx == -1 ? OpInfo->MINumOperands : 1); @@ -1638,35 +1658,85 @@ void MatchableInfo::buildAliasResultOperands() { } } -static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName, - std::vector &Infos, - raw_ostream &OS) { - // Write the convert function to a separate stream, so we can drop it after - // the enum. - std::string ConvertFnBody; - raw_string_ostream CvtOS(ConvertFnBody); +static unsigned getConverterOperandID(const std::string &Name, + SetVector &Table, + bool &IsNew) { + IsNew = Table.insert(Name); - // Function we have already generated. - std::set GeneratedFns; + unsigned ID = IsNew ? Table.size() - 1 : + std::find(Table.begin(), Table.end(), Name) - Table.begin(); - // Start the unified conversion function. - CvtOS << "bool " << Target.getName() << ClassName << "::\n"; - CvtOS << "ConvertToMCInst(unsigned Kind, MCInst &Inst, " - << "unsigned Opcode,\n" - << " const SmallVectorImpl &Operands) {\n"; - CvtOS << " Inst.setOpcode(Opcode);\n"; - CvtOS << " switch (Kind) {\n"; - CvtOS << " default:\n"; + assert(ID < Table.size()); + + return ID; +} - // Start the enum, which we will generate inline. - OS << "// Unified function for converting operands to MCInst instances.\n\n"; - OS << "enum ConversionKind {\n"; +static void emitConvertFuncs(CodeGenTarget &Target, StringRef ClassName, + std::vector &Infos, + raw_ostream &OS) { + SetVector OperandConversionKinds; + SetVector InstructionConversionKinds; + std::vector > ConversionTable; + size_t MaxRowLength = 2; // minimum is custom converter plus terminator. // TargetOperandClass - This is the target's operand class, like X86Operand. std::string TargetOperandClass = Target.getName() + "Operand"; + // Write the convert function to a separate stream, so we can drop it after + // the enum. We'll build up the conversion handlers for the individual + // operand types opportunistically as we encounter them. + std::string ConvertFnBody; + raw_string_ostream CvtOS(ConvertFnBody); + // Start the unified conversion function. + CvtOS << "void " << Target.getName() << ClassName << "::\n" + << "convertToMCInst(unsigned Kind, MCInst &Inst, " + << "unsigned Opcode,\n" + << " const SmallVectorImpl &Operands) {\n" + << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n" + << " const uint8_t *Converter = ConversionTable[Kind];\n" + << " Inst.setOpcode(Opcode);\n" + << " for (const uint8_t *p = Converter; *p; p+= 2) {\n" + << " switch (*p) {\n" + << " default: llvm_unreachable(\"invalid conversion entry!\");\n" + << " case CVT_Reg:\n" + << " static_cast<" << TargetOperandClass + << "*>(Operands[*(p + 1)])->addRegOperands(Inst, 1);\n" + << " break;\n" + << " case CVT_Tied:\n" + << " Inst.addOperand(Inst.getOperand(*(p + 1)));\n" + << " break;\n"; + + std::string OperandFnBody; + raw_string_ostream OpOS(OperandFnBody); + // Start the operand number lookup function. + OpOS << "void " << Target.getName() << ClassName << "::\n" + << "convertToMapAndConstraints(unsigned Kind,\n"; + OpOS.indent(27); + OpOS << "const SmallVectorImpl &Operands) {\n" + << " assert(Kind < CVT_NUM_SIGNATURES && \"Invalid signature!\");\n" + << " unsigned NumMCOperands = 0;\n" + << " const uint8_t *Converter = ConversionTable[Kind];\n" + << " for (const uint8_t *p = Converter; *p; p+= 2) {\n" + << " switch (*p) {\n" + << " default: llvm_unreachable(\"invalid conversion entry!\");\n" + << " case CVT_Reg:\n" + << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n" + << " Operands[*(p + 1)]->setConstraint(\"m\");\n" + << " ++NumMCOperands;\n" + << " break;\n" + << " case CVT_Tied:\n" + << " ++NumMCOperands;\n" + << " break;\n"; + + // Pre-populate the operand conversion kinds with the standard always + // available entries. + OperandConversionKinds.insert("CVT_Done"); + OperandConversionKinds.insert("CVT_Reg"); + OperandConversionKinds.insert("CVT_Tied"); + enum { CVT_Done, CVT_Reg, CVT_Tied }; + for (std::vector::const_iterator it = Infos.begin(), ie = Infos.end(); it != ie; ++it) { MatchableInfo &II = **it; @@ -1679,24 +1749,35 @@ static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName, II.ConversionFnKind = Signature; // Check if we have already generated this signature. - if (!GeneratedFns.insert(Signature).second) + if (!InstructionConversionKinds.insert(Signature)) continue; - // If not, emit it now. Add to the enum list. - OS << " " << Signature << ",\n"; + // Remember this converter for the kind enum. + unsigned KindID = OperandConversionKinds.size(); + OperandConversionKinds.insert("CVT_" + AsmMatchConverter); + + // Add the converter row for this instruction. + ConversionTable.push_back(std::vector()); + ConversionTable.back().push_back(KindID); + ConversionTable.back().push_back(CVT_Done); + + // Add the handler to the conversion driver function. + CvtOS << " case CVT_" << AsmMatchConverter << ":\n" + << " " << AsmMatchConverter << "(Inst, Operands);\n" + << " break;\n"; - CvtOS << " case " << Signature << ":\n"; - CvtOS << " return " << AsmMatchConverter - << "(Inst, Opcode, Operands);\n"; + // FIXME: Handle the operand number lookup for custom match functions. continue; } // Build the conversion function signature. std::string Signature = "Convert"; - std::string CaseBody; - raw_string_ostream CaseOS(CaseBody); + + std::vector ConversionRow; // Compute the convert enum and the case body. + MaxRowLength = std::max(MaxRowLength, II.ResOperands.size()*2 + 1 ); + for (unsigned i = 0, e = II.ResOperands.size(); i != e; ++i) { const MatchableInfo::ResOperand &OpInfo = II.ResOperands[i]; @@ -1709,74 +1790,180 @@ static void emitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName, // Registers are always converted the same, don't duplicate the // conversion function based on them. Signature += "__"; - if (Op.Class->isRegisterClass()) - Signature += "Reg"; - else - Signature += Op.Class->ClassName; + std::string Class; + Class = Op.Class->isRegisterClass() ? "Reg" : Op.Class->ClassName; + Signature += Class; Signature += utostr(OpInfo.MINumOperands); Signature += "_" + itostr(OpInfo.AsmOperandNum); - CaseOS << " ((" << TargetOperandClass << "*)Operands[" - << (OpInfo.AsmOperandNum+1) << "])->" << Op.Class->RenderMethod - << "(Inst, " << OpInfo.MINumOperands << ");\n"; + // Add the conversion kind, if necessary, and get the associated ID + // the index of its entry in the vector). + std::string Name = "CVT_" + (Op.Class->isRegisterClass() ? "Reg" : + Op.Class->RenderMethod); + + bool IsNewConverter = false; + unsigned ID = getConverterOperandID(Name, OperandConversionKinds, + IsNewConverter); + + // Add the operand entry to the instruction kind conversion row. + ConversionRow.push_back(ID); + ConversionRow.push_back(OpInfo.AsmOperandNum + 1); + + if (!IsNewConverter) + break; + + // This is a new operand kind. Add a handler for it to the + // converter driver. + CvtOS << " case " << Name << ":\n" + << " static_cast<" << TargetOperandClass + << "*>(Operands[*(p + 1)])->" + << Op.Class->RenderMethod << "(Inst, " << OpInfo.MINumOperands + << ");\n" + << " break;\n"; + + // Add a handler for the operand number lookup. + OpOS << " case " << Name << ":\n" + << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n" + << " Operands[*(p + 1)]->setConstraint(\"m\");\n" + << " NumMCOperands += " << OpInfo.MINumOperands << ";\n" + << " break;\n"; break; } - case MatchableInfo::ResOperand::TiedOperand: { // If this operand is tied to a previous one, just copy the MCInst // operand from the earlier one.We can only tie single MCOperand values. //assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand"); unsigned TiedOp = OpInfo.TiedOperandNum; assert(i > TiedOp && "Tied operand precedes its target!"); - CaseOS << " Inst.addOperand(Inst.getOperand(" << TiedOp << "));\n"; Signature += "__Tie" + utostr(TiedOp); + ConversionRow.push_back(CVT_Tied); + ConversionRow.push_back(TiedOp); + // FIXME: Handle the operand number lookup for tied operands. break; } case MatchableInfo::ResOperand::ImmOperand: { int64_t Val = OpInfo.ImmVal; - CaseOS << " Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n"; - Signature += "__imm" + itostr(Val); + std::string Ty = "imm_" + itostr(Val); + Signature += "__" + Ty; + + std::string Name = "CVT_" + Ty; + bool IsNewConverter = false; + unsigned ID = getConverterOperandID(Name, OperandConversionKinds, + IsNewConverter); + // Add the operand entry to the instruction kind conversion row. + ConversionRow.push_back(ID); + ConversionRow.push_back(0); + + if (!IsNewConverter) + break; + + CvtOS << " case " << Name << ":\n" + << " Inst.addOperand(MCOperand::CreateImm(" << Val << "));\n" + << " break;\n"; + + OpOS << " case " << Name << ":\n" + << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n" + << " Operands[*(p + 1)]->setConstraint(\"\");\n" + << " ++NumMCOperands;\n" + << " break;\n"; break; } case MatchableInfo::ResOperand::RegOperand: { + std::string Reg, Name; if (OpInfo.Register == 0) { - CaseOS << " Inst.addOperand(MCOperand::CreateReg(0));\n"; - Signature += "__reg0"; + Name = "reg0"; + Reg = "0"; } else { - std::string N = getQualifiedName(OpInfo.Register); - CaseOS << " Inst.addOperand(MCOperand::CreateReg(" << N << "));\n"; - Signature += "__reg" + OpInfo.Register->getName(); + Reg = getQualifiedName(OpInfo.Register); + Name = "reg" + OpInfo.Register->getName(); } + Signature += "__" + Name; + Name = "CVT_" + Name; + bool IsNewConverter = false; + unsigned ID = getConverterOperandID(Name, OperandConversionKinds, + IsNewConverter); + // Add the operand entry to the instruction kind conversion row. + ConversionRow.push_back(ID); + ConversionRow.push_back(0); + + if (!IsNewConverter) + break; + CvtOS << " case " << Name << ":\n" + << " Inst.addOperand(MCOperand::CreateReg(" << Reg << "));\n" + << " break;\n"; + + OpOS << " case " << Name << ":\n" + << " Operands[*(p + 1)]->setMCOperandNum(NumMCOperands);\n" + << " Operands[*(p + 1)]->setConstraint(\"m\");\n" + << " ++NumMCOperands;\n" + << " break;\n"; } } } + // If there were no operands, add to the signature to that effect + if (Signature == "Convert") + Signature += "_NoOperands"; + II.ConversionFnKind = Signature; - // Check if we have already generated this signature. - if (!GeneratedFns.insert(Signature).second) + // Save the signature. If we already have it, don't add a new row + // to the table. + if (!InstructionConversionKinds.insert(Signature)) continue; - // If not, emit it now. Add to the enum list. - OS << " " << Signature << ",\n"; - - CvtOS << " case " << Signature << ":\n"; - CvtOS << CaseOS.str(); - CvtOS << " return true;\n"; + // Add the row to the table. + ConversionTable.push_back(ConversionRow); } - // Finish the convert function. + // Finish up the converter driver function. + CvtOS << " }\n }\n}\n\n"; + + // Finish up the operand number lookup function. + OpOS << " }\n }\n}\n\n"; - CvtOS << " }\n"; - CvtOS << " return false;\n"; - CvtOS << "}\n\n"; + OS << "namespace {\n"; + + // Output the operand conversion kind enum. + OS << "enum OperatorConversionKind {\n"; + for (unsigned i = 0, e = OperandConversionKinds.size(); i != e; ++i) + OS << " " << OperandConversionKinds[i] << ",\n"; + OS << " CVT_NUM_CONVERTERS\n"; + OS << "};\n\n"; + + // Output the instruction conversion kind enum. + OS << "enum InstructionConversionKind {\n"; + for (SetVector::const_iterator + i = InstructionConversionKinds.begin(), + e = InstructionConversionKinds.end(); i != e; ++i) + OS << " " << *i << ",\n"; + OS << " CVT_NUM_SIGNATURES\n"; + OS << "};\n\n"; + + + OS << "} // end anonymous namespace\n\n"; - // Finish the enum, and drop the convert function after it. + // Output the conversion table. + OS << "static const uint8_t ConversionTable[CVT_NUM_SIGNATURES][" + << MaxRowLength << "] = {\n"; + + for (unsigned Row = 0, ERow = ConversionTable.size(); Row != ERow; ++Row) { + assert(ConversionTable[Row].size() % 2 == 0 && "bad conversion row!"); + OS << " // " << InstructionConversionKinds[Row] << "\n"; + OS << " { "; + for (unsigned i = 0, e = ConversionTable[Row].size(); i != e; i += 2) + OS << OperandConversionKinds[ConversionTable[Row][i]] << ", " + << (unsigned)(ConversionTable[Row][i + 1]) << ", "; + OS << "CVT_Done },\n"; + } - OS << " NumConversionVariants\n"; OS << "};\n\n"; + // Spit out the conversion driver function. OS << CvtOS.str(); + + // Spit out the operand number lookup function. + OS << OpOS.str(); } /// emitMatchClassEnumeration - Emit the enumeration for match class kinds. @@ -1853,7 +2040,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info, OS << " MatchClassKind OpKind;\n"; OS << " switch (Operand.getReg()) {\n"; OS << " default: OpKind = InvalidMatchClass; break;\n"; - for (std::map::iterator + for (AsmMatcherInfo::RegisterClassesTy::iterator it = Info.RegisterClasses.begin(), ie = Info.RegisterClasses.end(); it != ie; ++it) OS << " case " << Info.Target.getName() << "::" @@ -1874,7 +2061,7 @@ static void emitValidateOperandClass(AsmMatcherInfo &Info, static void emitIsSubclass(CodeGenTarget &Target, std::vector &Infos, raw_ostream &OS) { - OS << "/// isSubclass - Compute whether \\arg A is a subclass of \\arg B.\n"; + OS << "/// isSubclass - Compute whether \\p A is a subclass of \\p B.\n"; OS << "static bool isSubclass(MatchClassKind A, MatchClassKind B) {\n"; OS << " if (A == B)\n"; OS << " return true;\n\n"; @@ -2083,7 +2270,7 @@ static std::string GetAliasRequiredFeatures(Record *R, SubtargetFeatureInfo *F = Info.getSubtargetFeature(ReqFeatures[i]); if (F == 0) - throw TGError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() + + PrintFatalError(R->getLoc(), "Predicate '" + ReqFeatures[i]->getName() + "' is not marked as an AssemblerPredicate!"); if (NumFeatures) @@ -2146,14 +2333,14 @@ static bool emitMnemonicAliases(raw_ostream &OS, const AsmMatcherInfo &Info) { // We can't have two aliases from the same mnemonic with no predicate. PrintError(ToVec[AliasWithNoPredicate]->getLoc(), "two MnemonicAliases with the same 'from' mnemonic!"); - throw TGError(R->getLoc(), "this is the other MnemonicAlias."); + PrintFatalError(R->getLoc(), "this is the other MnemonicAlias."); } AliasWithNoPredicate = i; continue; } if (R->getValueAsString("ToMnemonic") == I->first) - throw TGError(R->getLoc(), "MnemonicAlias to the same string"); + PrintFatalError(R->getLoc(), "MnemonicAlias to the same string"); if (!MatchCode.empty()) MatchCode += "else "; @@ -2189,17 +2376,27 @@ static const char *getMinimalTypeForRange(uint64_t Range) { } static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target, - const AsmMatcherInfo &Info, StringRef ClassName) { + const AsmMatcherInfo &Info, StringRef ClassName, + StringToOffsetTable &StringTable, + unsigned MaxMnemonicIndex) { + unsigned MaxMask = 0; + for (std::vector::const_iterator it = + Info.OperandMatchInfo.begin(), ie = Info.OperandMatchInfo.end(); + it != ie; ++it) { + MaxMask |= it->OperandMask; + } + // Emit the static custom operand parsing table; OS << "namespace {\n"; OS << " struct OperandMatchEntry {\n"; - OS << " static const char *const MnemonicTable;\n"; - OS << " uint32_t OperandMask;\n"; - OS << " uint32_t Mnemonic;\n"; OS << " " << getMinimalTypeForRange(1ULL << Info.SubtargetFeatures.size()) << " RequiredFeatures;\n"; + OS << " " << getMinimalTypeForRange(MaxMnemonicIndex) + << " Mnemonic;\n"; OS << " " << getMinimalTypeForRange(Info.Classes.size()) - << " Class;\n\n"; + << " Class;\n"; + OS << " " << getMinimalTypeForRange(MaxMask) + << " OperandMask;\n\n"; OS << " StringRef getMnemonic() const {\n"; OS << " return StringRef(MnemonicTable + Mnemonic + 1,\n"; OS << " MnemonicTable[Mnemonic]);\n"; @@ -2222,8 +2419,6 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target, OS << "} // end anonymous namespace.\n\n"; - StringToOffsetTable StringTable; - OS << "static const OperandMatchEntry OperandMatchTable[" << Info.OperandMatchInfo.size() << "] = {\n"; @@ -2234,8 +2429,25 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target, const OperandMatchEntry &OMI = *it; const MatchableInfo &II = *OMI.MI; - OS << " { " << OMI.OperandMask; + OS << " { "; + // Write the required features mask. + if (!II.RequiredFeatures.empty()) { + for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) { + if (i) OS << "|"; + OS << II.RequiredFeatures[i]->getEnumName(); + } + } else + OS << "0"; + + // Store a pascal-style length byte in the mnemonic. + std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str(); + OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false) + << " /* " << II.Mnemonic << " */, "; + + OS << OMI.CI->Name; + + OS << ", " << OMI.OperandMask; OS << " /* "; bool printComma = false; for (int i = 0, e = 31; i !=e; ++i) @@ -2247,30 +2459,10 @@ static void emitCustomOperandParsing(raw_ostream &OS, CodeGenTarget &Target, } OS << " */"; - // Store a pascal-style length byte in the mnemonic. - std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str(); - OS << ", " << StringTable.GetOrAddStringOffset(LenMnemonic, false) - << " /* " << II.Mnemonic << " */, "; - - // Write the required features mask. - if (!II.RequiredFeatures.empty()) { - for (unsigned i = 0, e = II.RequiredFeatures.size(); i != e; ++i) { - if (i) OS << "|"; - OS << II.RequiredFeatures[i]->getEnumName(); - } - } else - OS << "0"; - - OS << ", " << OMI.CI->Name; - OS << " },\n"; } OS << "};\n\n"; - OS << "const char *const OperandMatchEntry::MnemonicTable =\n"; - StringTable.EmitString(OS); - OS << ";\n\n"; - // Emit the operand class switch to call the correct custom parser for // the found operand class. OS << Target.getName() << ClassName << "::OperandMatchResultTy " @@ -2407,14 +2599,20 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { OS << " // This should be included into the middle of the declaration of\n"; OS << " // your subclasses implementation of MCTargetAsmParser.\n"; OS << " unsigned ComputeAvailableFeatures(uint64_t FeatureBits) const;\n"; - OS << " bool ConvertToMCInst(unsigned Kind, MCInst &Inst, " + OS << " void convertToMCInst(unsigned Kind, MCInst &Inst, " << "unsigned Opcode,\n" << " const SmallVectorImpl " << "&Operands);\n"; - OS << " bool MnemonicIsValid(StringRef Mnemonic);\n"; + OS << " void convertToMapAndConstraints(unsigned Kind,\n "; + OS << " const SmallVectorImpl &Operands);\n"; + OS << " bool mnemonicIsValid(StringRef Mnemonic);\n"; OS << " unsigned MatchInstructionImpl(\n"; - OS << " const SmallVectorImpl &Operands,\n"; - OS << " MCInst &Inst, unsigned &ErrorInfo, unsigned VariantID = 0);\n"; + OS.indent(27); + OS << "const SmallVectorImpl &Operands,\n" + << " MCInst &Inst,\n" + << " unsigned &ErrorInfo," + << " bool matchingInlineAsm,\n" + << " unsigned VariantID = 0);\n"; if (Info.OperandMatchInfo.size()) { OS << "\n enum OperandMatchResultTy {\n"; @@ -2447,7 +2645,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { emitSubtargetFeatureFlagEnumeration(Info, OS); // Emit the function to match a register name to number. - emitMatchRegisterName(Target, AsmParser, OS); + // This should be omitted for Mips target + if (AsmParser->getValueAsBit("ShouldEmitMatchRegisterName")) + emitMatchRegisterName(Target, AsmParser, OS); OS << "#endif // GET_REGISTER_MATCHER\n\n"; @@ -2465,8 +2665,10 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { // Generate the function that remaps for mnemonic aliases. bool HasMnemonicAliases = emitMnemonicAliases(OS, Info); - // Generate the unified function to convert operands into an MCInst. - emitConvertToMCInst(Target, ClassName, Info.Matchables, OS); + // Generate the convertToMCInst function to convert operands into an MCInst. + // Also, generate the convertToMapAndConstraints function for MS-style inline + // assembly. The latter doesn't actually generate a MCInst. + emitConvertFuncs(Target, ClassName, Info.Matchables, OS); // Emit the enumeration for classes which participate in matching. emitMatchClassEnumeration(Target, Info.Classes, OS); @@ -2484,11 +2686,25 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { emitComputeAvailableFeatures(Info, OS); + StringToOffsetTable StringTable; + size_t MaxNumOperands = 0; + unsigned MaxMnemonicIndex = 0; for (std::vector::const_iterator it = Info.Matchables.begin(), ie = Info.Matchables.end(); - it != ie; ++it) - MaxNumOperands = std::max(MaxNumOperands, (*it)->AsmOperands.size()); + it != ie; ++it) { + MatchableInfo &II = **it; + MaxNumOperands = std::max(MaxNumOperands, II.AsmOperands.size()); + + // Store a pascal-style length byte in the mnemonic. + std::string LenMnemonic = char(II.Mnemonic.size()) + II.Mnemonic.str(); + MaxMnemonicIndex = std::max(MaxMnemonicIndex, + StringTable.GetOrAddStringOffset(LenMnemonic, false)); + } + + OS << "static const char *const MnemonicTable =\n"; + StringTable.EmitString(OS); + OS << ";\n\n"; // Emit the static match table; unused classes get initalized to 0 which is // guaranteed to be InvalidMatchClass. @@ -2502,8 +2718,8 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { // following the mnemonic. OS << "namespace {\n"; OS << " struct MatchEntry {\n"; - OS << " static const char *const MnemonicTable;\n"; - OS << " uint32_t Mnemonic;\n"; + OS << " " << getMinimalTypeForRange(MaxMnemonicIndex) + << " Mnemonic;\n"; OS << " uint16_t Opcode;\n"; OS << " " << getMinimalTypeForRange(Info.Matchables.size()) << " ConvertFn;\n"; @@ -2533,8 +2749,6 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { OS << "} // end anonymous namespace.\n\n"; - StringToOffsetTable StringTable; - OS << "static const MatchEntry MatchTable[" << Info.Matchables.size() << "] = {\n"; @@ -2573,13 +2787,9 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { OS << "};\n\n"; - OS << "const char *const MatchEntry::MnemonicTable =\n"; - StringTable.EmitString(OS); - OS << ";\n\n"; - // A method to determine if a mnemonic is in the list. OS << "bool " << Target.getName() << ClassName << "::\n" - << "MnemonicIsValid(StringRef Mnemonic) {\n"; + << "mnemonicIsValid(StringRef Mnemonic) {\n"; OS << " // Search the table.\n"; OS << " std::pair MnemonicRange =\n"; OS << " std::equal_range(MatchTable, MatchTable+" @@ -2592,8 +2802,14 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { << Target.getName() << ClassName << "::\n" << "MatchInstructionImpl(const SmallVectorImpl" << " &Operands,\n"; - OS << " MCInst &Inst, unsigned &ErrorInfo, "; - OS << "unsigned VariantID) {\n"; + OS << " MCInst &Inst,\n" + << "unsigned &ErrorInfo, bool matchingInlineAsm, unsigned VariantID) {\n"; + + OS << " // Eliminate obvious mismatches.\n"; + OS << " if (Operands.size() > " << (MaxNumOperands+1) << ") {\n"; + OS << " ErrorInfo = " << (MaxNumOperands+1) << ";\n"; + OS << " return Match_InvalidOperand;\n"; + OS << " }\n\n"; // Emit code to get the available features. OS << " // Get the current feature set.\n"; @@ -2611,12 +2827,6 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { } // Emit code to compute the class list for this operand vector. - OS << " // Eliminate obvious mismatches.\n"; - OS << " if (Operands.size() > " << (MaxNumOperands+1) << ") {\n"; - OS << " ErrorInfo = " << (MaxNumOperands+1) << ";\n"; - OS << " return Match_InvalidOperand;\n"; - OS << " }\n\n"; - OS << " // Some state to try to produce better error messages.\n"; OS << " bool HadMatchOtherThanFeatures = false;\n"; OS << " bool HadMatchOtherThanPredicate = false;\n"; @@ -2681,17 +2891,20 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { OS << " HadMatchOtherThanFeatures = true;\n"; OS << " unsigned NewMissingFeatures = it->RequiredFeatures & " "~AvailableFeatures;\n"; - OS << " if (CountPopulation_32(NewMissingFeatures) <= " - "CountPopulation_32(MissingFeatures))\n"; + OS << " if (CountPopulation_32(NewMissingFeatures) <=\n" + " CountPopulation_32(MissingFeatures))\n"; OS << " MissingFeatures = NewMissingFeatures;\n"; OS << " continue;\n"; OS << " }\n"; OS << "\n"; + OS << " if (matchingInlineAsm) {\n"; + OS << " Inst.setOpcode(it->Opcode);\n"; + OS << " convertToMapAndConstraints(it->ConvertFn, Operands);\n"; + OS << " return Match_Success;\n"; + OS << " }\n\n"; OS << " // We have selected a definite instruction, convert the parsed\n" << " // operands into the appropriate MCInst.\n"; - OS << " if (!ConvertToMCInst(it->ConvertFn, Inst,\n" - << " it->Opcode, Operands))\n"; - OS << " return Match_ConversionFail;\n"; + OS << " convertToMCInst(it->ConvertFn, Inst, it->Opcode, Operands);\n"; OS << "\n"; // Verify the instruction with the target-specific match predicate function. @@ -2716,15 +2929,16 @@ void AsmMatcherEmitter::run(raw_ostream &OS) { OS << " }\n\n"; OS << " // Okay, we had no match. Try to return a useful error code.\n"; - OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)"; - OS << " return RetCode;\n"; + OS << " if (HadMatchOtherThanPredicate || !HadMatchOtherThanFeatures)\n"; + OS << " return RetCode;\n\n"; OS << " // Missing feature matches return which features were missing\n"; OS << " ErrorInfo = MissingFeatures;\n"; OS << " return Match_MissingFeature;\n"; OS << "}\n\n"; if (Info.OperandMatchInfo.size()) - emitCustomOperandParsing(OS, Target, Info, ClassName); + emitCustomOperandParsing(OS, Target, Info, ClassName, StringTable, + MaxMnemonicIndex); OS << "#endif // GET_MATCHER_IMPLEMENTATION\n\n"; } diff --git a/utils/TableGen/AsmWriterEmitter.cpp b/utils/TableGen/AsmWriterEmitter.cpp index 57979b3..a4114d9 100644 --- a/utils/TableGen/AsmWriterEmitter.cpp +++ b/utils/TableGen/AsmWriterEmitter.cpp @@ -313,7 +313,9 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { /// OpcodeInfo - This encodes the index of the string to use for the first /// chunk of the output as well as indices used for operand printing. - std::vector OpcodeInfo; + /// To reduce the number of unhandled cases, we expand the size from 32-bit + /// to 32+16 = 48-bit. + std::vector OpcodeInfo; // Add all strings to the string table upfront so it can generate an optimized // representation. @@ -362,7 +364,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { // To reduce code size, we compactify common instructions into a few bits // in the opcode-indexed table. - unsigned BitsLeft = 32-AsmStrBits; + unsigned BitsLeft = 64-AsmStrBits; std::vector > TableDrivenOperandPrinters; @@ -388,10 +390,11 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { } // Otherwise, we can include this in the initial lookup table. Add it in. - BitsLeft -= NumBits; for (unsigned i = 0, e = InstIdxs.size(); i != e; ++i) - if (InstIdxs[i] != ~0U) - OpcodeInfo[i] |= InstIdxs[i] << (BitsLeft+AsmStrBits); + if (InstIdxs[i] != ~0U) { + OpcodeInfo[i] |= (uint64_t)InstIdxs[i] << (64-BitsLeft); + } + BitsLeft -= NumBits; // Remove the info about this operand. for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) { @@ -410,16 +413,32 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { } - - O<<" static const unsigned OpInfo[] = {\n"; + // We always emit at least one 32-bit table. A second table is emitted if + // more bits are needed. + O<<" static const uint32_t OpInfo[] = {\n"; for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) { - O << " " << OpcodeInfo[i] << "U,\t// " + O << " " << (OpcodeInfo[i] & 0xffffffff) << "U,\t// " << NumberedInstructions[i]->TheDef->getName() << "\n"; } // Add a dummy entry so the array init doesn't end with a comma. O << " 0U\n"; O << " };\n\n"; + if (BitsLeft < 32) { + // Add a second OpInfo table only when it is necessary. + // Adjust the type of the second table based on the number of bits needed. + O << " static const uint" + << ((BitsLeft < 16) ? "32" : (BitsLeft < 24) ? "16" : "8") + << "_t OpInfo2[] = {\n"; + for (unsigned i = 0, e = NumberedInstructions.size(); i != e; ++i) { + O << " " << (OpcodeInfo[i] >> 32) << "U,\t// " + << NumberedInstructions[i]->TheDef->getName() << "\n"; + } + // Add a dummy entry so the array init doesn't end with a comma. + O << " 0U\n"; + O << " };\n\n"; + } + // Emit the string itself. O << " const char AsmStrs[] = {\n"; StringTable.emit(O, printChar); @@ -427,13 +446,22 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { O << " O << \"\\t\";\n\n"; - O << " // Emit the opcode for the instruction.\n" - << " unsigned Bits = OpInfo[MI->getOpcode()];\n" - << " assert(Bits != 0 && \"Cannot print this instruction.\");\n" + O << " // Emit the opcode for the instruction.\n"; + if (BitsLeft < 32) { + // If we have two tables then we need to perform two lookups and combine + // the results into a single 64-bit value. + O << " uint64_t Bits1 = OpInfo[MI->getOpcode()];\n" + << " uint64_t Bits2 = OpInfo2[MI->getOpcode()];\n" + << " uint64_t Bits = (Bits2 << 32) | Bits1;\n"; + } else { + // If only one table is used we just need to perform a single lookup. + O << " uint32_t Bits = OpInfo[MI->getOpcode()];\n"; + } + O << " assert(Bits != 0 && \"Cannot print this instruction.\");\n" << " O << AsmStrs+(Bits & " << (1 << AsmStrBits)-1 << ")-1;\n\n"; // Output the table driven operand information. - BitsLeft = 32-AsmStrBits; + BitsLeft = 64-AsmStrBits; for (unsigned i = 0, e = TableDrivenOperandPrinters.size(); i != e; ++i) { std::vector &Commands = TableDrivenOperandPrinters[i]; @@ -443,14 +471,13 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { assert(NumBits <= BitsLeft && "consistency error"); // Emit code to extract this field from Bits. - BitsLeft -= NumBits; - O << "\n // Fragment " << i << " encoded into " << NumBits << " bits for " << Commands.size() << " unique commands.\n"; if (Commands.size() == 2) { // Emit two possibilitys with if/else. - O << " if ((Bits >> " << (BitsLeft+AsmStrBits) << ") & " + O << " if ((Bits >> " + << (64-BitsLeft) << ") & " << ((1 << NumBits)-1) << ") {\n" << Commands[1] << " } else {\n" @@ -460,7 +487,8 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { // Emit a single possibility. O << Commands[0] << "\n\n"; } else { - O << " switch ((Bits >> " << (BitsLeft+AsmStrBits) << ") & " + O << " switch ((Bits >> " + << (64-BitsLeft) << ") & " << ((1 << NumBits)-1) << ") {\n" << " default: // unreachable.\n"; @@ -472,6 +500,7 @@ void AsmWriterEmitter::EmitPrintInstruction(raw_ostream &O) { } O << " }\n\n"; } + BitsLeft -= NumBits; } // Okay, delete instructions with no operand info left. @@ -537,9 +566,9 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName, std::vector AltNames = Reg.TheDef->getValueAsListOfStrings("AltNames"); if (AltNames.size() <= Idx) - throw TGError(Reg.TheDef->getLoc(), - (Twine("Register definition missing alt name for '") + - AltName + "'.").str()); + PrintFatalError(Reg.TheDef->getLoc(), + (Twine("Register definition missing alt name for '") + + AltName + "'.").str()); AsmName = AltNames[Idx]; } } @@ -551,7 +580,7 @@ emitRegisterNameString(raw_ostream &O, StringRef AltName, StringTable.emit(O, printChar); O << " };\n\n"; - O << " static const unsigned RegAsmOffset" << AltName << "[] = {"; + O << " static const uint32_t RegAsmOffset" << AltName << "[] = {"; for (unsigned i = 0, e = Registers.size(); i != e; ++i) { if ((i % 14) == 0) O << "\n "; @@ -590,7 +619,7 @@ void AsmWriterEmitter::EmitGetRegisterName(raw_ostream &O) { emitRegisterNameString(O, "", Registers); if (hasAltNames) { - O << " const unsigned *RegAsmOffset;\n" + O << " const uint32_t *RegAsmOffset;\n" << " const char *AsmStrs;\n" << " switch(AltIdx) {\n" << " default: llvm_unreachable(\"Invalid register alt name index!\");\n"; @@ -763,7 +792,7 @@ void AsmWriterEmitter::EmitPrintAliasInstruction(raw_ostream &O) { if (!R->getValueAsBit("EmitAlias")) continue; // We were told not to emit the alias, but to emit the aliasee. const DagInit *DI = R->getValueAsDag("ResultInst"); - const DefInit *Op = dynamic_cast(DI->getOperator()); + const DefInit *Op = cast(DI->getOperator()); AliasMap[getQualifiedName(Op->getDef())].push_back(Alias); } diff --git a/utils/TableGen/AsmWriterInst.cpp b/utils/TableGen/AsmWriterInst.cpp index 350a2cc..fe1f756 100644 --- a/utils/TableGen/AsmWriterInst.cpp +++ b/utils/TableGen/AsmWriterInst.cpp @@ -14,6 +14,7 @@ #include "AsmWriterInst.h" #include "CodeGenTarget.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" using namespace llvm; @@ -123,8 +124,8 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI, != std::string::npos) { AddLiteralString(std::string(1, AsmString[DollarPos+1])); } else { - throw "Non-supported escaped character found in instruction '" + - CGI.TheDef->getName() + "'!"; + PrintFatalError("Non-supported escaped character found in instruction '" + + CGI.TheDef->getName() + "'!"); } LastEmitted = DollarPos+2; continue; @@ -162,15 +163,15 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI, // brace. if (hasCurlyBraces) { if (VarEnd >= AsmString.size()) - throw "Reached end of string before terminating curly brace in '" - + CGI.TheDef->getName() + "'"; + PrintFatalError("Reached end of string before terminating curly brace in '" + + CGI.TheDef->getName() + "'"); // Look for a modifier string. if (AsmString[VarEnd] == ':') { ++VarEnd; if (VarEnd >= AsmString.size()) - throw "Reached end of string before terminating curly brace in '" - + CGI.TheDef->getName() + "'"; + PrintFatalError("Reached end of string before terminating curly brace in '" + + CGI.TheDef->getName() + "'"); unsigned ModifierStart = VarEnd; while (VarEnd < AsmString.size() && isIdentChar(AsmString[VarEnd])) @@ -178,17 +179,17 @@ AsmWriterInst::AsmWriterInst(const CodeGenInstruction &CGI, Modifier = std::string(AsmString.begin()+ModifierStart, AsmString.begin()+VarEnd); if (Modifier.empty()) - throw "Bad operand modifier name in '"+ CGI.TheDef->getName() + "'"; + PrintFatalError("Bad operand modifier name in '"+ CGI.TheDef->getName() + "'"); } if (AsmString[VarEnd] != '}') - throw "Variable name beginning with '{' did not end with '}' in '" - + CGI.TheDef->getName() + "'"; + PrintFatalError("Variable name beginning with '{' did not end with '}' in '" + + CGI.TheDef->getName() + "'"); ++VarEnd; } if (VarName.empty() && Modifier.empty()) - throw "Stray '$' in '" + CGI.TheDef->getName() + - "' asm string, maybe you want $$?"; + PrintFatalError("Stray '$' in '" + CGI.TheDef->getName() + + "' asm string, maybe you want $$?"); if (VarName.empty()) { // Just a modifier, pass this into PrintSpecial. diff --git a/utils/TableGen/CMakeLists.txt b/utils/TableGen/CMakeLists.txt index 0e14cba..d0416c9 100644 --- a/utils/TableGen/CMakeLists.txt +++ b/utils/TableGen/CMakeLists.txt @@ -1,5 +1,3 @@ -set(LLVM_REQUIRES_EH 1) -set(LLVM_REQUIRES_RTTI 1) set(LLVM_LINK_COMPONENTS Support) add_tablegen(llvm-tblgen LLVM @@ -10,6 +8,7 @@ add_tablegen(llvm-tblgen LLVM CodeEmitterGen.cpp CodeGenDAGPatterns.cpp CodeGenInstruction.cpp + CodeGenMapTable.cpp CodeGenRegisters.cpp CodeGenSchedule.cpp CodeGenTarget.cpp diff --git a/utils/TableGen/CallingConvEmitter.cpp b/utils/TableGen/CallingConvEmitter.cpp index e9c4bd3..94f3c65 100644 --- a/utils/TableGen/CallingConvEmitter.cpp +++ b/utils/TableGen/CallingConvEmitter.cpp @@ -13,6 +13,7 @@ //===----------------------------------------------------------------------===// #include "CodeGenTarget.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" #include @@ -93,7 +94,7 @@ void CallingConvEmitter::EmitAction(Record *Action, O << Action->getValueAsString("Predicate"); } else { Action->dump(); - throw "Unknown CCPredicateAction!"; + PrintFatalError("Unknown CCPredicateAction!"); } O << ") {\n"; @@ -131,7 +132,7 @@ void CallingConvEmitter::EmitAction(Record *Action, ListInit *ShadowRegList = Action->getValueAsListInit("ShadowRegList"); if (ShadowRegList->getSize() >0 && ShadowRegList->getSize() != RegList->getSize()) - throw "Invalid length of list of shadowed registers"; + PrintFatalError("Invalid length of list of shadowed registers"); if (RegList->getSize() == 1) { O << IndentStr << "if (unsigned Reg = State.AllocateReg("; @@ -177,12 +178,12 @@ void CallingConvEmitter::EmitAction(Record *Action, if (Size) O << Size << ", "; else - O << "\n" << IndentStr << " State.getTarget().getTargetData()" + O << "\n" << IndentStr << " State.getTarget().getDataLayout()" "->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext())), "; if (Align) O << Align; else - O << "\n" << IndentStr << " State.getTarget().getTargetData()" + O << "\n" << IndentStr << " State.getTarget().getDataLayout()" "->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()))"; if (Action->isSubClassOf("CCAssignToStackWithShadow")) O << ", " << getQualifiedName(Action->getValueAsDef("ShadowReg")); @@ -221,7 +222,7 @@ void CallingConvEmitter::EmitAction(Record *Action, O << IndentStr << IndentStr << "return false;\n"; } else { Action->dump(); - throw "Unknown CCAction!"; + PrintFatalError("Unknown CCAction!"); } } } diff --git a/utils/TableGen/CodeEmitterGen.cpp b/utils/TableGen/CodeEmitterGen.cpp index 31a39b1..3e4f626 100644 --- a/utils/TableGen/CodeEmitterGen.cpp +++ b/utils/TableGen/CodeEmitterGen.cpp @@ -91,11 +91,11 @@ void CodeEmitterGen::reverseBits(std::vector &Insts) { // return the variable bit position. Otherwise return -1. int CodeEmitterGen::getVariableBit(const std::string &VarName, BitsInit *BI, int bit) { - if (VarBitInit *VBI = dynamic_cast(BI->getBit(bit))) { - if (VarInit *VI = dynamic_cast(VBI->getVariable())) + if (VarBitInit *VBI = dyn_cast(BI->getBit(bit))) { + if (VarInit *VI = dyn_cast(VBI->getBitVar())) if (VI->getName() == VarName) return VBI->getBitNum(); - } else if (VarInit *VI = dynamic_cast(BI->getBit(bit))) { + } else if (VarInit *VI = dyn_cast(BI->getBit(bit))) { if (VI->getName() == VarName) return 0; } @@ -134,10 +134,13 @@ AddCodeToMergeInOperand(Record *R, BitsInit *BI, const std::string &VarName, assert(!CGI.Operands.isFlatOperandNotEmitted(OpIdx) && "Explicitly used operand also marked as not emitted!"); } else { + unsigned NumberOps = CGI.Operands.size(); /// If this operand is not supposed to be emitted by the /// generated emitter, skip it. - while (CGI.Operands.isFlatOperandNotEmitted(NumberedOp)) + while (NumberedOp < NumberOps && + CGI.Operands.isFlatOperandNotEmitted(NumberedOp)) ++NumberedOp; + OpIdx = NumberedOp++; } @@ -269,7 +272,7 @@ void CodeEmitterGen::run(raw_ostream &o) { // Start by filling in fixed values. uint64_t Value = 0; for (unsigned i = 0, e = BI->getNumBits(); i != e; ++i) { - if (BitInit *B = dynamic_cast(BI->getBit(e-i-1))) + if (BitInit *B = dyn_cast(BI->getBit(e-i-1))) Value |= (uint64_t)B->getValue() << (e-i-1); } o << " UINT64_C(" << Value << ")," << '\t' << "// " << R->getName() << "\n"; diff --git a/utils/TableGen/CodeGenDAGPatterns.cpp b/utils/TableGen/CodeGenDAGPatterns.cpp index 34f8a34..d5b581b 100644 --- a/utils/TableGen/CodeGenDAGPatterns.cpp +++ b/utils/TableGen/CodeGenDAGPatterns.cpp @@ -79,14 +79,19 @@ bool EEVT::TypeSet::FillWithPossibleTypes(TreePattern &TP, const std::vector &LegalTypes = TP.getDAGPatterns().getTargetInfo().getLegalValueTypes(); + if (TP.hasError()) + return false; + for (unsigned i = 0, e = LegalTypes.size(); i != e; ++i) if (Pred == 0 || Pred(LegalTypes[i])) TypeVec.push_back(LegalTypes[i]); // If we have nothing that matches the predicate, bail out. - if (TypeVec.empty()) + if (TypeVec.empty()) { TP.error("Type inference contradiction found, no " + std::string(PredicateName) + " types found"); + return false; + } // No need to sort with one element. if (TypeVec.size() == 1) return true; @@ -146,9 +151,9 @@ std::string EEVT::TypeSet::getName() const { /// MergeInTypeInfo - This merges in type information from the specified /// argument. If 'this' changes, it returns true. If the two types are -/// contradictory (e.g. merge f32 into i32) then this throws an exception. +/// contradictory (e.g. merge f32 into i32) then this flags an error. bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){ - if (InVT.isCompletelyUnknown() || *this == InVT) + if (InVT.isCompletelyUnknown() || *this == InVT || TP.hasError()) return false; if (isCompletelyUnknown()) { @@ -224,11 +229,13 @@ bool EEVT::TypeSet::MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP){ // FIXME: Really want an SMLoc here! TP.error("Type inference contradiction found, merging '" + InVT.getName() + "' into '" + InputSet.getName() + "'"); - return true; // unreachable + return false; } /// EnforceInteger - Remove all non-integer types from this set. bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) { + if (TP.hasError()) + return false; // If we know nothing, then get the full set. if (TypeVec.empty()) return FillWithPossibleTypes(TP, isInteger, "integer"); @@ -242,14 +249,18 @@ bool EEVT::TypeSet::EnforceInteger(TreePattern &TP) { if (!isInteger(TypeVec[i])) TypeVec.erase(TypeVec.begin()+i--); - if (TypeVec.empty()) + if (TypeVec.empty()) { TP.error("Type inference contradiction found, '" + InputSet.getName() + "' needs to be integer"); + return false; + } return true; } /// EnforceFloatingPoint - Remove all integer types from this set. bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) { + if (TP.hasError()) + return false; // If we know nothing, then get the full set. if (TypeVec.empty()) return FillWithPossibleTypes(TP, isFloatingPoint, "floating point"); @@ -264,14 +275,19 @@ bool EEVT::TypeSet::EnforceFloatingPoint(TreePattern &TP) { if (!isFloatingPoint(TypeVec[i])) TypeVec.erase(TypeVec.begin()+i--); - if (TypeVec.empty()) + if (TypeVec.empty()) { TP.error("Type inference contradiction found, '" + InputSet.getName() + "' needs to be floating point"); + return false; + } return true; } /// EnforceScalar - Remove all vector types from this. bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) { + if (TP.hasError()) + return false; + // If we know nothing, then get the full set. if (TypeVec.empty()) return FillWithPossibleTypes(TP, isScalar, "scalar"); @@ -286,14 +302,19 @@ bool EEVT::TypeSet::EnforceScalar(TreePattern &TP) { if (!isScalar(TypeVec[i])) TypeVec.erase(TypeVec.begin()+i--); - if (TypeVec.empty()) + if (TypeVec.empty()) { TP.error("Type inference contradiction found, '" + InputSet.getName() + "' needs to be scalar"); + return false; + } return true; } /// EnforceVector - Remove all vector types from this. bool EEVT::TypeSet::EnforceVector(TreePattern &TP) { + if (TP.hasError()) + return false; + // If we know nothing, then get the full set. if (TypeVec.empty()) return FillWithPossibleTypes(TP, isVector, "vector"); @@ -308,9 +329,11 @@ bool EEVT::TypeSet::EnforceVector(TreePattern &TP) { MadeChange = true; } - if (TypeVec.empty()) + if (TypeVec.empty()) { TP.error("Type inference contradiction found, '" + InputSet.getName() + "' needs to be a vector"); + return false; + } return MadeChange; } @@ -319,6 +342,9 @@ bool EEVT::TypeSet::EnforceVector(TreePattern &TP) { /// EnforceSmallerThan - 'this' must be a smaller VT than Other. Update /// this an other based on this information. bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) { + if (TP.hasError()) + return false; + // Both operands must be integer or FP, but we don't care which. bool MadeChange = false; @@ -365,19 +391,22 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) { if (hasVectorTypes() && Other.hasVectorTypes()) { if (Type.getSizeInBits() >= OtherType.getSizeInBits()) if (Type.getVectorElementType().getSizeInBits() - >= OtherType.getVectorElementType().getSizeInBits()) + >= OtherType.getVectorElementType().getSizeInBits()) { TP.error("Type inference contradiction found, '" + getName() + "' element type not smaller than '" + Other.getName() +"'!"); + return false; + } } else // For scalar types, the bitsize of this type must be larger // than that of the other. - if (Type.getSizeInBits() >= OtherType.getSizeInBits()) + if (Type.getSizeInBits() >= OtherType.getSizeInBits()) { TP.error("Type inference contradiction found, '" + getName() + "' is not smaller than '" + Other.getName() +"'!"); - + return false; + } } @@ -437,9 +466,11 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) { // If this is the only type in the large set, the constraint can never be // satisfied. if ((Other.hasIntegerTypes() && OtherIntSize == 0) - || (Other.hasFloatingPointTypes() && OtherFPSize == 0)) + || (Other.hasFloatingPointTypes() && OtherFPSize == 0)) { TP.error("Type inference contradiction found, '" + Other.getName() + "' has nothing larger than '" + getName() +"'!"); + return false; + } // Okay, find the largest type in the Other set and remove it from the // current set. @@ -493,9 +524,11 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) { // If this is the only type in the small set, the constraint can never be // satisfied. if ((hasIntegerTypes() && IntSize == 0) - || (hasFloatingPointTypes() && FPSize == 0)) + || (hasFloatingPointTypes() && FPSize == 0)) { TP.error("Type inference contradiction found, '" + getName() + "' has nothing smaller than '" + Other.getName()+"'!"); + return false; + } return MadeChange; } @@ -504,6 +537,9 @@ bool EEVT::TypeSet::EnforceSmallerThan(EEVT::TypeSet &Other, TreePattern &TP) { /// whose element is specified by VTOperand. bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand, TreePattern &TP) { + if (TP.hasError()) + return false; + // "This" must be a vector and "VTOperand" must be a scalar. bool MadeChange = false; MadeChange |= EnforceVector(TP); @@ -535,9 +571,11 @@ bool EEVT::TypeSet::EnforceVectorEltTypeIs(EEVT::TypeSet &VTOperand, } } - if (TypeVec.empty()) // FIXME: Really want an SMLoc here! + if (TypeVec.empty()) { // FIXME: Really want an SMLoc here! TP.error("Type inference contradiction found, forcing '" + InputSet.getName() + "' to have a vector element"); + return false; + } return MadeChange; } @@ -574,10 +612,6 @@ bool EEVT::TypeSet::EnforceVectorSubVectorTypeIs(EEVT::TypeSet &VTOperand, //===----------------------------------------------------------------------===// // Helpers for working with extended types. -bool RecordPtrCmp::operator()(const Record *LHS, const Record *RHS) const { - return LHS->getID() < RHS->getID(); -} - /// Dependent variable map for CodeGenDAGPattern variant generation typedef std::map DepVarMap; @@ -586,7 +620,7 @@ typedef DepVarMap::const_iterator DepVarMap_citer; static void FindDepVarsOf(TreePatternNode *N, DepVarMap &DepMap) { if (N->isLeaf()) { - if (dynamic_cast(N->getLeafValue()) != NULL) + if (isa(N->getLeafValue())) DepMap[N->getName()]++; } else { for (size_t i = 0, e = N->getNumChildren(); i != e; ++i) @@ -695,7 +729,7 @@ static unsigned getPatternSize(const TreePatternNode *P, unsigned Size = 3; // The node itself. // If the root node is a ConstantSDNode, increases its size. // e.g. (set R32:$dst, 0). - if (P->isLeaf() && dynamic_cast(P->getLeafValue())) + if (P->isLeaf() && isa(P->getLeafValue())) Size += 2; // FIXME: This is a hack to statically increase the priority of patterns @@ -719,7 +753,7 @@ static unsigned getPatternSize(const TreePatternNode *P, Child->getType(0) != MVT::Other) Size += getPatternSize(Child, CGP); else if (Child->isLeaf()) { - if (dynamic_cast(Child->getLeafValue())) + if (isa(Child->getLeafValue())) Size += 5; // Matches a ConstantSDNode (+3) and a specific value (+2). else if (Child->getComplexPatternInfo(CGP)) Size += getPatternSize(Child, CGP); @@ -745,7 +779,7 @@ getPatternComplexity(const CodeGenDAGPatterns &CGP) const { std::string PatternToMatch::getPredicateCheck() const { std::string PredicateCheck; for (unsigned i = 0, e = Predicates->getSize(); i != e; ++i) { - if (DefInit *Pred = dynamic_cast(Predicates->getElement(i))) { + if (DefInit *Pred = dyn_cast(Predicates->getElement(i))) { Record *Def = Pred->getDef(); if (!Def->isSubClassOf("Predicate")) { #ifndef NDEBUG @@ -773,7 +807,7 @@ SDTypeConstraint::SDTypeConstraint(Record *R) { ConstraintType = SDTCisVT; x.SDTCisVT_Info.VT = getValueType(R->getValueAsDef("VT")); if (x.SDTCisVT_Info.VT == MVT::isVoid) - throw TGError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT"); + PrintFatalError(R->getLoc(), "Cannot use 'Void' as type to SDTCisVT"); } else if (R->isSubClassOf("SDTCisPtrTy")) { ConstraintType = SDTCisPtrTy; @@ -833,11 +867,13 @@ static TreePatternNode *getOperandNum(unsigned OpNo, TreePatternNode *N, /// ApplyTypeConstraint - Given a node in a pattern, apply this type /// constraint to the nodes operands. This returns true if it makes a -/// change, false otherwise. If a type contradiction is found, throw an -/// exception. +/// change, false otherwise. If a type contradiction is found, flag an error. bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, const SDNodeInfo &NodeInfo, TreePattern &TP) const { + if (TP.hasError()) + return false; + unsigned ResNo = 0; // The result number being referenced. TreePatternNode *NodeToApply = getOperandNum(OperandNo, N, NodeInfo, ResNo); @@ -868,10 +904,12 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N, // The NodeToApply must be a leaf node that is a VT. OtherOperandNum must // have an integer type that is smaller than the VT. if (!NodeToApply->isLeaf() || - !dynamic_cast(NodeToApply->getLeafValue()) || + !isa(NodeToApply->getLeafValue()) || !static_cast(NodeToApply->getLeafValue())->getDef() - ->isSubClassOf("ValueType")) + ->isSubClassOf("ValueType")) { TP.error(N->getOperator()->getName() + " expects a VT operand!"); + return false; + } MVT::SimpleValueType VT = getValueType(static_cast(NodeToApply->getLeafValue())->getDef()); @@ -1025,8 +1063,9 @@ static unsigned GetNumNodeResults(Record *Operator, CodeGenDAGPatterns &CDP) { // Get the result tree. DagInit *Tree = Operator->getValueAsDag("Fragment"); Record *Op = 0; - if (Tree && dynamic_cast(Tree->getOperator())) - Op = dynamic_cast(Tree->getOperator())->getDef(); + if (Tree) + if (DefInit *DI = dyn_cast(Tree->getOperator())) + Op = DI->getDef(); assert(Op && "Invalid Fragment"); return GetNumNodeResults(Op, CDP); } @@ -1100,8 +1139,8 @@ bool TreePatternNode::isIsomorphicTo(const TreePatternNode *N, return false; if (isLeaf()) { - if (DefInit *DI = dynamic_cast(getLeafValue())) { - if (DefInit *NDI = dynamic_cast(N->getLeafValue())) { + if (DefInit *DI = dyn_cast(getLeafValue())) { + if (DefInit *NDI = dyn_cast(N->getLeafValue())) { return ((DI->getDef() == NDI->getDef()) && (DepVars.find(getName()) == DepVars.end() || getName() == N->getName())); @@ -1158,8 +1197,8 @@ SubstituteFormalArguments(std::map &ArgMap) { TreePatternNode *Child = getChild(i); if (Child->isLeaf()) { Init *Val = Child->getLeafValue(); - if (dynamic_cast(Val) && - static_cast(Val)->getDef()->getName() == "node") { + if (isa(Val) && + cast(Val)->getDef()->getName() == "node") { // We found a use of a formal argument, replace it with its value. TreePatternNode *NewChild = ArgMap[Child->getName()]; assert(NewChild && "Couldn't find formal argument!"); @@ -1179,7 +1218,11 @@ SubstituteFormalArguments(std::map &ArgMap) { /// fragments, inline them into place, giving us a pattern without any /// PatFrag references. TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) { - if (isLeaf()) return this; // nothing to do. + if (TP.hasError()) + return 0; + + if (isLeaf()) + return this; // nothing to do. Record *Op = getOperator(); if (!Op->isSubClassOf("PatFrag")) { @@ -1202,9 +1245,11 @@ TreePatternNode *TreePatternNode::InlinePatternFragments(TreePattern &TP) { TreePattern *Frag = TP.getDAGPatterns().getPatternFragment(Op); // Verify that we are passing the right number of operands. - if (Frag->getNumArgs() != Children.size()) + if (Frag->getNumArgs() != Children.size()) { TP.error("'" + Op->getName() + "' fragment requires " + utostr(Frag->getNumArgs()) + " operands!"); + return 0; + } TreePatternNode *FragTree = Frag->getOnlyTree()->clone(); @@ -1320,8 +1365,7 @@ getIntrinsicInfo(const CodeGenDAGPatterns &CDP) const { getOperator() != CDP.get_intrinsic_wo_chain_sdnode()) return 0; - unsigned IID = - dynamic_cast(getChild(0)->getLeafValue())->getValue(); + unsigned IID = cast(getChild(0)->getLeafValue())->getValue(); return &CDP.getIntrinsicInfo(IID); } @@ -1331,7 +1375,7 @@ const ComplexPattern * TreePatternNode::getComplexPatternInfo(const CodeGenDAGPatterns &CGP) const { if (!isLeaf()) return 0; - DefInit *DI = dynamic_cast(getLeafValue()); + DefInit *DI = dyn_cast(getLeafValue()); if (DI && DI->getDef()->isSubClassOf("ComplexPattern")) return &CGP.getComplexPattern(DI->getDef()); return 0; @@ -1379,12 +1423,14 @@ TreePatternNode::isCommutativeIntrinsic(const CodeGenDAGPatterns &CDP) const { /// ApplyTypeConstraints - Apply all of the type constraints relevant to /// this node and its children in the tree. This returns true if it makes a -/// change, false otherwise. If a type contradiction is found, throw an -/// exception. +/// change, false otherwise. If a type contradiction is found, flag an error. bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { + if (TP.hasError()) + return false; + CodeGenDAGPatterns &CDP = TP.getDAGPatterns(); if (isLeaf()) { - if (DefInit *DI = dynamic_cast(getLeafValue())) { + if (DefInit *DI = dyn_cast(getLeafValue())) { // If it's a regclass or something else known, include the type. bool MadeChange = false; for (unsigned i = 0, e = Types.size(); i != e; ++i) @@ -1393,7 +1439,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { return MadeChange; } - if (IntInit *II = dynamic_cast(getLeafValue())) { + if (IntInit *II = dyn_cast(getLeafValue())) { assert(Types.size() == 1 && "Invalid IntInit"); // Int inits are always integers. :) @@ -1410,21 +1456,15 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { // Make sure that the value is representable for this type. if (Size >= 32) return MadeChange; - int Val = (II->getValue() << (32-Size)) >> (32-Size); - if (Val == II->getValue()) return MadeChange; - - // If sign-extended doesn't fit, does it fit as unsigned? - unsigned ValueMask; - unsigned UnsignedVal; - ValueMask = unsigned(~uint32_t(0UL) >> (32-Size)); - UnsignedVal = unsigned(II->getValue()); - - if ((ValueMask & UnsignedVal) == UnsignedVal) + // Check that the value doesn't use more bits than we have. It must either + // be a sign- or zero-extended equivalent of the original. + int64_t SignBitAndAbove = II->getValue() >> (Size - 1); + if (SignBitAndAbove == -1 || SignBitAndAbove == 0 || SignBitAndAbove == 1) return MadeChange; - TP.error("Integer value '" + itostr(II->getValue())+ + TP.error("Integer value '" + itostr(II->getValue()) + "' is out of range for type '" + getEnumName(getType(0)) + "'!"); - return MadeChange; + return false; } return false; } @@ -1487,10 +1527,12 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { for (unsigned i = 0, e = NumRetVTs; i != e; ++i) MadeChange |= UpdateNodeType(i, Int->IS.RetVTs[i], TP); - if (getNumChildren() != NumParamVTs + 1) + if (getNumChildren() != NumParamVTs + 1) { TP.error("Intrinsic '" + Int->Name + "' expects " + utostr(NumParamVTs) + " operands, not " + utostr(getNumChildren() - 1) + " operands!"); + return false; + } // Apply type info to the intrinsic ID. MadeChange |= getChild(0)->UpdateNodeType(0, MVT::iPTR, TP); @@ -1510,9 +1552,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { // Check that the number of operands is sane. Negative operands -> varargs. if (NI.getNumOperands() >= 0 && - getNumChildren() != (unsigned)NI.getNumOperands()) + getNumChildren() != (unsigned)NI.getNumOperands()) { TP.error(getOperator()->getName() + " node requires exactly " + itostr(NI.getNumOperands()) + " operands!"); + return false; + } bool MadeChange = NI.ApplyTypeConstraints(this, TP); for (unsigned i = 0, e = getNumChildren(); i != e; ++i) @@ -1541,7 +1585,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { const CodeGenRegisterClass &RC = CDP.getTargetInfo().getRegisterClass(RegClass); MadeChange |= UpdateNodeType(ResNo, RC.getValueTypes(), TP); - } else if (ResultNode->getName() == "unknown") { + } else if (ResultNode->isSubClassOf("unknown_class")) { // Nothing to do. } else { assert(ResultNode->isSubClassOf("RegisterClass") && @@ -1581,15 +1625,16 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { // If the instruction expects a predicate or optional def operand, we // codegen this by setting the operand to it's default value if it has a // non-empty DefaultOps field. - if ((OperandNode->isSubClassOf("PredicateOperand") || - OperandNode->isSubClassOf("OptionalDefOperand")) && + if (OperandNode->isSubClassOf("OperandWithDefaultOps") && !CDP.getDefaultOperand(OperandNode).DefaultOps.empty()) continue; // Verify that we didn't run out of provided operands. - if (ChildNo >= getNumChildren()) + if (ChildNo >= getNumChildren()) { TP.error("Instruction '" + getOperator()->getName() + "' expects more operands than were provided."); + return false; + } MVT::SimpleValueType VT; TreePatternNode *Child = getChild(ChildNo++); @@ -1609,7 +1654,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { MadeChange |= Child->UpdateNodeType(ChildResNo, VT, TP); } else if (OperandNode->isSubClassOf("PointerLikeRegClass")) { MadeChange |= Child->UpdateNodeType(ChildResNo, MVT::iPTR, TP); - } else if (OperandNode->getName() == "unknown") { + } else if (OperandNode->isSubClassOf("unknown_class")) { // Nothing to do. } else llvm_unreachable("Unknown operand type!"); @@ -1617,9 +1662,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { MadeChange |= Child->ApplyTypeConstraints(TP, NotRegisters); } - if (ChildNo != getNumChildren()) + if (ChildNo != getNumChildren()) { TP.error("Instruction '" + getOperator()->getName() + "' was provided too many operands!"); + return false; + } return MadeChange; } @@ -1627,9 +1674,11 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { assert(getOperator()->isSubClassOf("SDNodeXForm") && "Unknown node type!"); // Node transforms always take one operand. - if (getNumChildren() != 1) + if (getNumChildren() != 1) { TP.error("Node transform '" + getOperator()->getName() + "' requires one operand!"); + return false; + } bool MadeChange = getChild(0)->ApplyTypeConstraints(TP, NotRegisters); @@ -1652,7 +1701,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) { static bool OnlyOnRHSOfCommutative(TreePatternNode *N) { if (!N->isLeaf() && N->getOperator()->getName() == "imm") return true; - if (N->isLeaf() && dynamic_cast(N->getLeafValue())) + if (N->isLeaf() && isa(N->getLeafValue())) return true; return false; } @@ -1703,27 +1752,30 @@ bool TreePatternNode::canPatternMatch(std::string &Reason, // TreePattern::TreePattern(Record *TheRec, ListInit *RawPat, bool isInput, - CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){ - isInputPattern = isInput; + CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp), + isInputPattern(isInput), HasError(false) { for (unsigned i = 0, e = RawPat->getSize(); i != e; ++i) Trees.push_back(ParseTreePattern(RawPat->getElement(i), "")); } TreePattern::TreePattern(Record *TheRec, DagInit *Pat, bool isInput, - CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){ - isInputPattern = isInput; + CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp), + isInputPattern(isInput), HasError(false) { Trees.push_back(ParseTreePattern(Pat, "")); } TreePattern::TreePattern(Record *TheRec, TreePatternNode *Pat, bool isInput, - CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp){ - isInputPattern = isInput; + CodeGenDAGPatterns &cdp) : TheRecord(TheRec), CDP(cdp), + isInputPattern(isInput), HasError(false) { Trees.push_back(Pat); } -void TreePattern::error(const std::string &Msg) const { +void TreePattern::error(const std::string &Msg) { + if (HasError) + return; dump(); - throw TGError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg); + PrintError(TheRecord->getLoc(), "In " + TheRecord->getName() + ": " + Msg); + HasError = true; } void TreePattern::ComputeNamedNodes() { @@ -1741,7 +1793,7 @@ void TreePattern::ComputeNamedNodes(TreePatternNode *N) { TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){ - if (DefInit *DI = dynamic_cast(TheInit)) { + if (DefInit *DI = dyn_cast(TheInit)) { Record *R = DI->getDef(); // Direct reference to a leaf DagNode or PatFrag? Turn it into a @@ -1765,26 +1817,26 @@ TreePatternNode *TreePattern::ParseTreePattern(Init *TheInit, StringRef OpName){ return Res; } - if (IntInit *II = dynamic_cast(TheInit)) { + if (IntInit *II = dyn_cast(TheInit)) { if (!OpName.empty()) error("Constant int argument should not have a name!"); return new TreePatternNode(II, 1); } - if (BitsInit *BI = dynamic_cast(TheInit)) { + if (BitsInit *BI = dyn_cast(TheInit)) { // Turn this into an IntInit. Init *II = BI->convertInitializerTo(IntRecTy::get()); - if (II == 0 || !dynamic_cast(II)) + if (II == 0 || !isa(II)) error("Bits value must be constants!"); return ParseTreePattern(II, OpName); } - DagInit *Dag = dynamic_cast(TheInit); + DagInit *Dag = dyn_cast(TheInit); if (!Dag) { TheInit->dump(); error("Pattern has unexpected init kind!"); } - DefInit *OpDef = dynamic_cast(Dag->getOperator()); + DefInit *OpDef = dyn_cast(Dag->getOperator()); if (!OpDef) error("Pattern has unexpected operator type!"); Record *Operator = OpDef->getDef(); @@ -1912,7 +1964,7 @@ static bool SimplifyTree(TreePatternNode *&N) { /// InferAllTypes - Infer/propagate as many types throughout the expression /// patterns as possible. Return true if all types are inferred, false -/// otherwise. Throw an exception if a type contradiction is found. +/// otherwise. Flags an error if a type contradiction is found. bool TreePattern:: InferAllTypes(const StringMap > *InNamedTypes) { if (NamedNodes.empty()) @@ -1949,7 +2001,7 @@ InferAllTypes(const StringMap > *InNamedTypes) { // us to match things like: // def : Pat<(v1i64 (bitconvert(v2i32 DPR:$src))), (v1i64 DPR:$src)>; if (Nodes[i] == Trees[0] && Nodes[i]->isLeaf()) { - DefInit *DI = dynamic_cast(Nodes[i]->getLeafValue()); + DefInit *DI = dyn_cast(Nodes[i]->getLeafValue()); if (DI && (DI->getDef()->isSubClassOf("RegisterClass") || DI->getDef()->isSubClassOf("RegisterOperand"))) continue; @@ -2033,6 +2085,9 @@ CodeGenDAGPatterns::CodeGenDAGPatterns(RecordKeeper &R) : // stores, and side effects in many cases by examining an // instruction's pattern. InferInstructionFlags(); + + // Verify that instruction flags match the patterns. + VerifyInstructionFlags(); } CodeGenDAGPatterns::~CodeGenDAGPatterns() { @@ -2111,7 +2166,7 @@ void CodeGenDAGPatterns::ParsePatternFragments() { // Parse the operands list. DagInit *OpsList = Fragments[i]->getValueAsDag("Operands"); - DefInit *OpsOp = dynamic_cast(OpsList->getOperator()); + DefInit *OpsOp = dyn_cast(OpsList->getOperator()); // Special cases: ops == outs == ins. Different names are used to // improve readability. if (!OpsOp || @@ -2123,9 +2178,8 @@ void CodeGenDAGPatterns::ParsePatternFragments() { // Copy over the arguments. Args.clear(); for (unsigned j = 0, e = OpsList->getNumArgs(); j != e; ++j) { - if (!dynamic_cast(OpsList->getArg(j)) || - static_cast(OpsList->getArg(j))-> - getDef()->getName() != "node") + if (!isa(OpsList->getArg(j)) || + cast(OpsList->getArg(j))->getDef()->getName() != "node") P->error("Operands list should all be 'node' values."); if (OpsList->getArgName(j).empty()) P->error("Operands list should have names for each operand!"); @@ -2161,14 +2215,8 @@ void CodeGenDAGPatterns::ParsePatternFragments() { // Infer as many types as possible. Don't worry about it if we don't infer // all of them, some may depend on the inputs of the pattern. - try { - ThePat->InferAllTypes(); - } catch (...) { - // If this pattern fragment is not supported by this target (no types can - // satisfy its constraints), just ignore it. If the bogus pattern is - // actually used by instructions, the type consistency error will be - // reported there. - } + ThePat->InferAllTypes(); + ThePat->resetError(); // If debugging, print out the pattern fragment result. DEBUG(ThePat->dump()); @@ -2176,53 +2224,46 @@ void CodeGenDAGPatterns::ParsePatternFragments() { } void CodeGenDAGPatterns::ParseDefaultOperands() { - std::vector DefaultOps[2]; - DefaultOps[0] = Records.getAllDerivedDefinitions("PredicateOperand"); - DefaultOps[1] = Records.getAllDerivedDefinitions("OptionalDefOperand"); + std::vector DefaultOps; + DefaultOps = Records.getAllDerivedDefinitions("OperandWithDefaultOps"); // Find some SDNode. assert(!SDNodes.empty() && "No SDNodes parsed?"); Init *SomeSDNode = DefInit::get(SDNodes.begin()->first); - for (unsigned iter = 0; iter != 2; ++iter) { - for (unsigned i = 0, e = DefaultOps[iter].size(); i != e; ++i) { - DagInit *DefaultInfo = DefaultOps[iter][i]->getValueAsDag("DefaultOps"); - - // Clone the DefaultInfo dag node, changing the operator from 'ops' to - // SomeSDnode so that we can parse this. - std::vector > Ops; - for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op) - Ops.push_back(std::make_pair(DefaultInfo->getArg(op), - DefaultInfo->getArgName(op))); - DagInit *DI = DagInit::get(SomeSDNode, "", Ops); - - // Create a TreePattern to parse this. - TreePattern P(DefaultOps[iter][i], DI, false, *this); - assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!"); - - // Copy the operands over into a DAGDefaultOperand. - DAGDefaultOperand DefaultOpInfo; - - TreePatternNode *T = P.getTree(0); - for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) { - TreePatternNode *TPN = T->getChild(op); - while (TPN->ApplyTypeConstraints(P, false)) - /* Resolve all types */; - - if (TPN->ContainsUnresolvedType()) { - if (iter == 0) - throw "Value #" + utostr(i) + " of PredicateOperand '" + - DefaultOps[iter][i]->getName() +"' doesn't have a concrete type!"; - else - throw "Value #" + utostr(i) + " of OptionalDefOperand '" + - DefaultOps[iter][i]->getName() +"' doesn't have a concrete type!"; - } - DefaultOpInfo.DefaultOps.push_back(TPN); + for (unsigned i = 0, e = DefaultOps.size(); i != e; ++i) { + DagInit *DefaultInfo = DefaultOps[i]->getValueAsDag("DefaultOps"); + + // Clone the DefaultInfo dag node, changing the operator from 'ops' to + // SomeSDnode so that we can parse this. + std::vector > Ops; + for (unsigned op = 0, e = DefaultInfo->getNumArgs(); op != e; ++op) + Ops.push_back(std::make_pair(DefaultInfo->getArg(op), + DefaultInfo->getArgName(op))); + DagInit *DI = DagInit::get(SomeSDNode, "", Ops); + + // Create a TreePattern to parse this. + TreePattern P(DefaultOps[i], DI, false, *this); + assert(P.getNumTrees() == 1 && "This ctor can only produce one tree!"); + + // Copy the operands over into a DAGDefaultOperand. + DAGDefaultOperand DefaultOpInfo; + + TreePatternNode *T = P.getTree(0); + for (unsigned op = 0, e = T->getNumChildren(); op != e; ++op) { + TreePatternNode *TPN = T->getChild(op); + while (TPN->ApplyTypeConstraints(P, false)) + /* Resolve all types */; + + if (TPN->ContainsUnresolvedType()) { + PrintFatalError("Value #" + utostr(i) + " of OperandWithDefaultOps '" + + DefaultOps[i]->getName() +"' doesn't have a concrete type!"); } - - // Insert it into the DefaultOperands map so we can find it later. - DefaultOperands[DefaultOps[iter][i]] = DefaultOpInfo; + DefaultOpInfo.DefaultOps.push_back(TPN); } + + // Insert it into the DefaultOperands map so we can find it later. + DefaultOperands[DefaultOps[i]] = DefaultOpInfo; } } @@ -2233,7 +2274,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat, // No name -> not interesting. if (Pat->getName().empty()) { if (Pat->isLeaf()) { - DefInit *DI = dynamic_cast(Pat->getLeafValue()); + DefInit *DI = dyn_cast(Pat->getLeafValue()); if (DI && (DI->getDef()->isSubClassOf("RegisterClass") || DI->getDef()->isSubClassOf("RegisterOperand"))) I->error("Input " + DI->getDef()->getName() + " must be named!"); @@ -2243,7 +2284,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat, Record *Rec; if (Pat->isLeaf()) { - DefInit *DI = dynamic_cast(Pat->getLeafValue()); + DefInit *DI = dyn_cast(Pat->getLeafValue()); if (!DI) I->error("Input $" + Pat->getName() + " must be an identifier!"); Rec = DI->getDef(); } else { @@ -2261,7 +2302,7 @@ static bool HandleUse(TreePattern *I, TreePatternNode *Pat, } Record *SlotRec; if (Slot->isLeaf()) { - SlotRec = dynamic_cast(Slot->getLeafValue())->getDef(); + SlotRec = cast(Slot->getLeafValue())->getDef(); } else { assert(Slot->getNumChildren() == 0 && "can't be a use with children!"); SlotRec = Slot->getOperator(); @@ -2296,7 +2337,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat, if (!Dest->isLeaf()) I->error("implicitly defined value should be a register!"); - DefInit *Val = dynamic_cast(Dest->getLeafValue()); + DefInit *Val = dyn_cast(Dest->getLeafValue()); if (!Val || !Val->getDef()->isSubClassOf("Register")) I->error("implicitly defined value should be a register!"); InstImpResults.push_back(Val->getDef()); @@ -2337,7 +2378,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat, if (!Dest->isLeaf()) I->error("set destination should be a register!"); - DefInit *Val = dynamic_cast(Dest->getLeafValue()); + DefInit *Val = dyn_cast(Dest->getLeafValue()); if (!Val) I->error("set destination should be a register!"); @@ -2367,43 +2408,36 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat, class InstAnalyzer { const CodeGenDAGPatterns &CDP; - bool &mayStore; - bool &mayLoad; - bool &IsBitcast; - bool &HasSideEffects; - bool &IsVariadic; public: - InstAnalyzer(const CodeGenDAGPatterns &cdp, - bool &maystore, bool &mayload, bool &isbc, bool &hse, bool &isv) - : CDP(cdp), mayStore(maystore), mayLoad(mayload), IsBitcast(isbc), - HasSideEffects(hse), IsVariadic(isv) { - } + bool hasSideEffects; + bool mayStore; + bool mayLoad; + bool isBitcast; + bool isVariadic; - /// Analyze - Analyze the specified instruction, returning true if the - /// instruction had a pattern. - bool Analyze(Record *InstRecord) { - const TreePattern *Pattern = CDP.getInstruction(InstRecord).getPattern(); - if (Pattern == 0) { - HasSideEffects = 1; - return false; // No pattern. - } + InstAnalyzer(const CodeGenDAGPatterns &cdp) + : CDP(cdp), hasSideEffects(false), mayStore(false), mayLoad(false), + isBitcast(false), isVariadic(false) {} - // FIXME: Assume only the first tree is the pattern. The others are clobber - // nodes. - AnalyzeNode(Pattern->getTree(0)); - return true; + void Analyze(const TreePattern *Pat) { + // Assume only the first tree is the pattern. The others are clobber nodes. + AnalyzeNode(Pat->getTree(0)); + } + + void Analyze(const PatternToMatch *Pat) { + AnalyzeNode(Pat->getSrcPattern()); } private: bool IsNodeBitcast(const TreePatternNode *N) const { - if (HasSideEffects || mayLoad || mayStore || IsVariadic) + if (hasSideEffects || mayLoad || mayStore || isVariadic) return false; if (N->getNumChildren() != 2) return false; const TreePatternNode *N0 = N->getChild(0); - if (!N0->isLeaf() || !dynamic_cast(N0->getLeafValue())) + if (!N0->isLeaf() || !isa(N0->getLeafValue())) return false; const TreePatternNode *N1 = N->getChild(1); @@ -2418,16 +2452,17 @@ private: return OpInfo.getEnumName() == "ISD::BITCAST"; } +public: void AnalyzeNode(const TreePatternNode *N) { if (N->isLeaf()) { - if (DefInit *DI = dynamic_cast(N->getLeafValue())) { + if (DefInit *DI = dyn_cast(N->getLeafValue())) { Record *LeafRec = DI->getDef(); // Handle ComplexPattern leaves. if (LeafRec->isSubClassOf("ComplexPattern")) { const ComplexPattern &CP = CDP.getComplexPattern(LeafRec); if (CP.hasProperty(SDNPMayStore)) mayStore = true; if (CP.hasProperty(SDNPMayLoad)) mayLoad = true; - if (CP.hasProperty(SDNPSideEffect)) HasSideEffects = true; + if (CP.hasProperty(SDNPSideEffect)) hasSideEffects = true; } } return; @@ -2439,7 +2474,7 @@ private: // Ignore set nodes, which are not SDNodes. if (N->getOperator()->getName() == "set") { - IsBitcast = IsNodeBitcast(N); + isBitcast = IsNodeBitcast(N); return; } @@ -2449,8 +2484,8 @@ private: // Notice properties of the node. if (OpInfo.hasProperty(SDNPMayStore)) mayStore = true; if (OpInfo.hasProperty(SDNPMayLoad)) mayLoad = true; - if (OpInfo.hasProperty(SDNPSideEffect)) HasSideEffects = true; - if (OpInfo.hasProperty(SDNPVariadic)) IsVariadic = true; + if (OpInfo.hasProperty(SDNPSideEffect)) hasSideEffects = true; + if (OpInfo.hasProperty(SDNPVariadic)) isVariadic = true; if (const CodeGenIntrinsic *IntInfo = N->getIntrinsicInfo(CDP)) { // If this is an intrinsic, analyze it. @@ -2462,68 +2497,70 @@ private: if (IntInfo->ModRef >= CodeGenIntrinsic::ReadWriteMem) // WriteMem intrinsics can have other strange effects. - HasSideEffects = true; + hasSideEffects = true; } } }; -static void InferFromPattern(const CodeGenInstruction &Inst, - bool &MayStore, bool &MayLoad, - bool &IsBitcast, - bool &HasSideEffects, bool &IsVariadic, - const CodeGenDAGPatterns &CDP) { - MayStore = MayLoad = IsBitcast = HasSideEffects = IsVariadic = false; - - bool HadPattern = - InstAnalyzer(CDP, MayStore, MayLoad, IsBitcast, HasSideEffects, IsVariadic) - .Analyze(Inst.TheDef); - - // InstAnalyzer only correctly analyzes mayStore/mayLoad so far. - if (Inst.mayStore) { // If the .td file explicitly sets mayStore, use it. - // If we decided that this is a store from the pattern, then the .td file - // entry is redundant. - if (MayStore) - PrintWarning(Inst.TheDef->getLoc(), - "mayStore flag explicitly set on " - "instruction, but flag already inferred from pattern."); - MayStore = true; +static bool InferFromPattern(CodeGenInstruction &InstInfo, + const InstAnalyzer &PatInfo, + Record *PatDef) { + bool Error = false; + + // Remember where InstInfo got its flags. + if (InstInfo.hasUndefFlags()) + InstInfo.InferredFrom = PatDef; + + // Check explicitly set flags for consistency. + if (InstInfo.hasSideEffects != PatInfo.hasSideEffects && + !InstInfo.hasSideEffects_Unset) { + // Allow explicitly setting hasSideEffects = 1 on instructions, even when + // the pattern has no side effects. That could be useful for div/rem + // instructions that may trap. + if (!InstInfo.hasSideEffects) { + Error = true; + PrintError(PatDef->getLoc(), "Pattern doesn't match hasSideEffects = " + + Twine(InstInfo.hasSideEffects)); + } } - if (Inst.mayLoad) { // If the .td file explicitly sets mayLoad, use it. - // If we decided that this is a load from the pattern, then the .td file - // entry is redundant. - if (MayLoad) - PrintWarning(Inst.TheDef->getLoc(), - "mayLoad flag explicitly set on " - "instruction, but flag already inferred from pattern."); - MayLoad = true; + if (InstInfo.mayStore != PatInfo.mayStore && !InstInfo.mayStore_Unset) { + Error = true; + PrintError(PatDef->getLoc(), "Pattern doesn't match mayStore = " + + Twine(InstInfo.mayStore)); } - if (Inst.neverHasSideEffects) { - if (HadPattern) - PrintWarning(Inst.TheDef->getLoc(), - "neverHasSideEffects flag explicitly set on " - "instruction, but flag already inferred from pattern."); - HasSideEffects = false; + if (InstInfo.mayLoad != PatInfo.mayLoad && !InstInfo.mayLoad_Unset) { + // Allow explicitly setting mayLoad = 1, even when the pattern has no loads. + // Some targets translate imediates to loads. + if (!InstInfo.mayLoad) { + Error = true; + PrintError(PatDef->getLoc(), "Pattern doesn't match mayLoad = " + + Twine(InstInfo.mayLoad)); + } } - if (Inst.hasSideEffects) { - if (HasSideEffects) - PrintWarning(Inst.TheDef->getLoc(), - "hasSideEffects flag explicitly set on " - "instruction, but flag already inferred from pattern."); - HasSideEffects = true; - } + // Transfer inferred flags. + InstInfo.hasSideEffects |= PatInfo.hasSideEffects; + InstInfo.mayStore |= PatInfo.mayStore; + InstInfo.mayLoad |= PatInfo.mayLoad; + + // These flags are silently added without any verification. + InstInfo.isBitcast |= PatInfo.isBitcast; + + // Don't infer isVariadic. This flag means something different on SDNodes and + // instructions. For example, a CALL SDNode is variadic because it has the + // call arguments as operands, but a CALL instruction is not variadic - it + // has argument registers as implicit, not explicit uses. - if (Inst.Operands.isVariadic) - IsVariadic = true; // Can warn if we want. + return Error; } /// hasNullFragReference - Return true if the DAG has any reference to the /// null_frag operator. static bool hasNullFragReference(DagInit *DI) { - DefInit *OpDef = dynamic_cast(DI->getOperator()); + DefInit *OpDef = dyn_cast(DI->getOperator()); if (!OpDef) return false; Record *Operator = OpDef->getDef(); @@ -2531,7 +2568,7 @@ static bool hasNullFragReference(DagInit *DI) { if (Operator->getName() == "null_frag") return true; // If any of the arguments reference the null fragment, return true. for (unsigned i = 0, e = DI->getNumArgs(); i != e; ++i) { - DagInit *Arg = dynamic_cast(DI->getArg(i)); + DagInit *Arg = dyn_cast(DI->getArg(i)); if (Arg && hasNullFragReference(Arg)) return true; } @@ -2543,7 +2580,7 @@ static bool hasNullFragReference(DagInit *DI) { /// the null_frag operator. static bool hasNullFragReference(ListInit *LI) { for (unsigned i = 0, e = LI->getSize(); i != e; ++i) { - DagInit *DI = dynamic_cast(LI->getElement(i)); + DagInit *DI = dyn_cast(LI->getElement(i)); assert(DI && "non-dag in an instruction Pattern list?!"); if (hasNullFragReference(DI)) return true; @@ -2551,6 +2588,17 @@ static bool hasNullFragReference(ListInit *LI) { return false; } +/// Get all the instructions in a tree. +static void +getInstructionsInTree(TreePatternNode *Tree, SmallVectorImpl &Instrs) { + if (Tree->isLeaf()) + return; + if (Tree->getOperator()->isSubClassOf("Instruction")) + Instrs.push_back(Tree->getOperator()); + for (unsigned i = 0, e = Tree->getNumChildren(); i != e; ++i) + getInstructionsInTree(Tree->getChild(i), Instrs); +} + /// ParseInstructions - Parse all of the instructions, inlining and resolving /// any fragments involved. This populates the Instructions list with fully /// resolved instructions. @@ -2560,7 +2608,7 @@ void CodeGenDAGPatterns::ParseInstructions() { for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { ListInit *LI = 0; - if (dynamic_cast(Instrs[i]->getValueInit("Pattern"))) + if (isa(Instrs[i]->getValueInit("Pattern"))) LI = Instrs[i]->getValueAsListInit("Pattern"); // If there is no pattern, only collect minimal information about the @@ -2655,7 +2703,7 @@ void CodeGenDAGPatterns::ParseInstructions() { if (i == 0) Res0Node = RNode; - Record *R = dynamic_cast(RNode->getLeafValue())->getDef(); + Record *R = cast(RNode->getLeafValue())->getDef(); if (R == 0) I->error("Operand $" + OpName + " should be a set destination: all " "outputs must occur before inputs in operand list!"); @@ -2683,11 +2731,9 @@ void CodeGenDAGPatterns::ParseInstructions() { I->error("Operand #" + utostr(i) + " in operands list has no name!"); if (!InstInputsCheck.count(OpName)) { - // If this is an predicate operand or optional def operand with an - // DefaultOps set filled in, we can ignore this. When we codegen it, - // we will do so as always executed. - if (Op.Rec->isSubClassOf("PredicateOperand") || - Op.Rec->isSubClassOf("OptionalDefOperand")) { + // If this is an operand with a DefaultOps set filled in, we can ignore + // this. When we codegen it, we will do so as always executed. + if (Op.Rec->isSubClassOf("OperandWithDefaultOps")) { // Does it have a non-empty DefaultOps field? If so, ignore this // operand. if (!getDefaultOperand(Op.Rec).DefaultOps.empty()) @@ -2699,8 +2745,7 @@ void CodeGenDAGPatterns::ParseInstructions() { TreePatternNode *InVal = InstInputsCheck[OpName]; InstInputsCheck.erase(OpName); // It occurred, remove from map. - if (InVal->isLeaf() && - dynamic_cast(InVal->getLeafValue())) { + if (InVal->isLeaf() && isa(InVal->getLeafValue())) { Record *InRec = static_cast(InVal->getLeafValue())->getDef(); if (Op.Rec != InRec && !InRec->isSubClassOf("ComplexPattern")) I->error("Operand $" + OpName + "'s register class disagrees" @@ -2754,11 +2799,11 @@ void CodeGenDAGPatterns::ParseInstructions() { } // If we can, convert the instructions to be patterns that are matched! - for (std::map::iterator II = + for (std::map::iterator II = Instructions.begin(), E = Instructions.end(); II != E; ++II) { DAGInstruction &TheInst = II->second; - const TreePattern *I = TheInst.getPattern(); + TreePattern *I = TheInst.getPattern(); if (I == 0) continue; // No pattern. // FIXME: Assume only the first tree is the pattern. The others are clobber @@ -2789,7 +2834,7 @@ typedef std::pair NameRecord; static void FindNames(const TreePatternNode *P, std::map &Names, - const TreePattern *PatternTop) { + TreePattern *PatternTop) { if (!P->getName().empty()) { NameRecord &Rec = Names[P->getName()]; // If this is the first instance of the name, remember the node. @@ -2806,12 +2851,15 @@ static void FindNames(const TreePatternNode *P, } } -void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern, +void CodeGenDAGPatterns::AddPatternToMatch(TreePattern *Pattern, const PatternToMatch &PTM) { // Do some sanity checking on the pattern we're about to match. std::string Reason; - if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) - Pattern->error("Pattern can never match: " + Reason); + if (!PTM.getSrcPattern()->canPatternMatch(Reason, *this)) { + PrintWarning(Pattern->getRecord()->getLoc(), + Twine("Pattern can never match: ") + Reason); + return; + } // If the source pattern's root is a complex pattern, that complex pattern // must specify the nodes it can potentially match. @@ -2852,25 +2900,156 @@ void CodeGenDAGPatterns::AddPatternToMatch(const TreePattern *Pattern, void CodeGenDAGPatterns::InferInstructionFlags() { const std::vector &Instructions = Target.getInstructionsByEnumValue(); + + // First try to infer flags from the primary instruction pattern, if any. + SmallVector Revisit; + unsigned Errors = 0; for (unsigned i = 0, e = Instructions.size(); i != e; ++i) { CodeGenInstruction &InstInfo = const_cast(*Instructions[i]); - // Determine properties of the instruction from its pattern. - bool MayStore, MayLoad, IsBitcast, HasSideEffects, IsVariadic; - InferFromPattern(InstInfo, MayStore, MayLoad, IsBitcast, - HasSideEffects, IsVariadic, *this); - InstInfo.mayStore = MayStore; - InstInfo.mayLoad = MayLoad; - InstInfo.isBitcast = IsBitcast; - InstInfo.hasSideEffects = HasSideEffects; - InstInfo.Operands.isVariadic = IsVariadic; - // Sanity checks. - if (InstInfo.isReMaterializable && InstInfo.hasSideEffects) - throw TGError(InstInfo.TheDef->getLoc(), "The instruction " + - InstInfo.TheDef->getName() + - " is rematerializable AND has unmodeled side effects?"); + // Treat neverHasSideEffects = 1 as the equivalent of hasSideEffects = 0. + // This flag is obsolete and will be removed. + if (InstInfo.neverHasSideEffects) { + assert(!InstInfo.hasSideEffects); + InstInfo.hasSideEffects_Unset = false; + } + + // Get the primary instruction pattern. + const TreePattern *Pattern = getInstruction(InstInfo.TheDef).getPattern(); + if (!Pattern) { + if (InstInfo.hasUndefFlags()) + Revisit.push_back(&InstInfo); + continue; + } + InstAnalyzer PatInfo(*this); + PatInfo.Analyze(Pattern); + Errors += InferFromPattern(InstInfo, PatInfo, InstInfo.TheDef); + } + + // Second, look for single-instruction patterns defined outside the + // instruction. + for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) { + const PatternToMatch &PTM = *I; + + // We can only infer from single-instruction patterns, otherwise we won't + // know which instruction should get the flags. + SmallVector PatInstrs; + getInstructionsInTree(PTM.getDstPattern(), PatInstrs); + if (PatInstrs.size() != 1) + continue; + + // Get the single instruction. + CodeGenInstruction &InstInfo = Target.getInstruction(PatInstrs.front()); + + // Only infer properties from the first pattern. We'll verify the others. + if (InstInfo.InferredFrom) + continue; + + InstAnalyzer PatInfo(*this); + PatInfo.Analyze(&PTM); + Errors += InferFromPattern(InstInfo, PatInfo, PTM.getSrcRecord()); + } + + if (Errors) + PrintFatalError("pattern conflicts"); + + // Revisit instructions with undefined flags and no pattern. + if (Target.guessInstructionProperties()) { + for (unsigned i = 0, e = Revisit.size(); i != e; ++i) { + CodeGenInstruction &InstInfo = *Revisit[i]; + if (InstInfo.InferredFrom) + continue; + // The mayLoad and mayStore flags default to false. + // Conservatively assume hasSideEffects if it wasn't explicit. + if (InstInfo.hasSideEffects_Unset) + InstInfo.hasSideEffects = true; + } + return; } + + // Complain about any flags that are still undefined. + for (unsigned i = 0, e = Revisit.size(); i != e; ++i) { + CodeGenInstruction &InstInfo = *Revisit[i]; + if (InstInfo.InferredFrom) + continue; + if (InstInfo.hasSideEffects_Unset) + PrintError(InstInfo.TheDef->getLoc(), + "Can't infer hasSideEffects from patterns"); + if (InstInfo.mayStore_Unset) + PrintError(InstInfo.TheDef->getLoc(), + "Can't infer mayStore from patterns"); + if (InstInfo.mayLoad_Unset) + PrintError(InstInfo.TheDef->getLoc(), + "Can't infer mayLoad from patterns"); + } +} + + +/// Verify instruction flags against pattern node properties. +void CodeGenDAGPatterns::VerifyInstructionFlags() { + unsigned Errors = 0; + for (ptm_iterator I = ptm_begin(), E = ptm_end(); I != E; ++I) { + const PatternToMatch &PTM = *I; + SmallVector Instrs; + getInstructionsInTree(PTM.getDstPattern(), Instrs); + if (Instrs.empty()) + continue; + + // Count the number of instructions with each flag set. + unsigned NumSideEffects = 0; + unsigned NumStores = 0; + unsigned NumLoads = 0; + for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { + const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]); + NumSideEffects += InstInfo.hasSideEffects; + NumStores += InstInfo.mayStore; + NumLoads += InstInfo.mayLoad; + } + + // Analyze the source pattern. + InstAnalyzer PatInfo(*this); + PatInfo.Analyze(&PTM); + + // Collect error messages. + SmallVector Msgs; + + // Check for missing flags in the output. + // Permit extra flags for now at least. + if (PatInfo.hasSideEffects && !NumSideEffects) + Msgs.push_back("pattern has side effects, but hasSideEffects isn't set"); + + // Don't verify store flags on instructions with side effects. At least for + // intrinsics, side effects implies mayStore. + if (!PatInfo.hasSideEffects && PatInfo.mayStore && !NumStores) + Msgs.push_back("pattern may store, but mayStore isn't set"); + + // Similarly, mayStore implies mayLoad on intrinsics. + if (!PatInfo.mayStore && PatInfo.mayLoad && !NumLoads) + Msgs.push_back("pattern may load, but mayLoad isn't set"); + + // Print error messages. + if (Msgs.empty()) + continue; + ++Errors; + + for (unsigned i = 0, e = Msgs.size(); i != e; ++i) + PrintError(PTM.getSrcRecord()->getLoc(), Twine(Msgs[i]) + " on the " + + (Instrs.size() == 1 ? + "instruction" : "output instructions")); + // Provide the location of the relevant instruction definitions. + for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { + if (Instrs[i] != PTM.getSrcRecord()) + PrintError(Instrs[i]->getLoc(), "defined here"); + const CodeGenInstruction &InstInfo = Target.getInstruction(Instrs[i]); + if (InstInfo.InferredFrom && + InstInfo.InferredFrom != InstInfo.TheDef && + InstInfo.InferredFrom != PTM.getSrcRecord()) + PrintError(InstInfo.InferredFrom->getLoc(), "inferred from patttern"); + } + } + if (Errors) + PrintFatalError("Errors in DAG patterns"); } /// Given a pattern result with an unresolved type, see if we can find one @@ -3230,7 +3409,7 @@ static void GenerateVariantsOf(TreePatternNode *N, for (unsigned i = 0, e = N->getNumChildren(); i != e; ++i) { TreePatternNode *Child = N->getChild(i); if (Child->isLeaf()) - if (DefInit *DI = dynamic_cast(Child->getLeafValue())) { + if (DefInit *DI = dyn_cast(Child->getLeafValue())) { Record *RR = DI->getDef(); if (RR->isSubClassOf("Register")) continue; @@ -3330,4 +3509,3 @@ void CodeGenDAGPatterns::GenerateVariants() { DEBUG(errs() << "\n"); } } - diff --git a/utils/TableGen/CodeGenDAGPatterns.h b/utils/TableGen/CodeGenDAGPatterns.h index 5a2d40a..9be763f 100644 --- a/utils/TableGen/CodeGenDAGPatterns.h +++ b/utils/TableGen/CodeGenDAGPatterns.h @@ -105,7 +105,7 @@ namespace EEVT { /// MergeInTypeInfo - This merges in type information from the specified /// argument. If 'this' changes, it returns true. If the two types are - /// contradictory (e.g. merge f32 into i32) then this throws an exception. + /// contradictory (e.g. merge f32 into i32) then this flags an error. bool MergeInTypeInfo(const EEVT::TypeSet &InVT, TreePattern &TP); bool MergeInTypeInfo(MVT::SimpleValueType InVT, TreePattern &TP) { @@ -187,8 +187,8 @@ struct SDTypeConstraint { /// ApplyTypeConstraint - Given a node in a pattern, apply this type /// constraint to the nodes operands. This returns true if it makes a - /// change, false otherwise. If a type contradiction is found, throw an - /// exception. + /// change, false otherwise. If a type contradiction is found, an error + /// is flagged. bool ApplyTypeConstraint(TreePatternNode *N, const SDNodeInfo &NodeInfo, TreePattern &TP) const; }; @@ -232,7 +232,7 @@ public: /// ApplyTypeConstraints - Given a node in a pattern, apply the type /// constraints for this node to the operands of the node. This returns /// true if it makes a change, false otherwise. If a type contradiction is - /// found, throw an exception. + /// found, an error is flagged. bool ApplyTypeConstraints(TreePatternNode *N, TreePattern &TP) const { bool MadeChange = false; for (unsigned i = 0, e = TypeConstraints.size(); i != e; ++i) @@ -446,13 +446,12 @@ public: // Higher level manipulation routines. /// ApplyTypeConstraints - Apply all of the type constraints relevant to /// this node and its children in the tree. This returns true if it makes a - /// change, false otherwise. If a type contradiction is found, throw an - /// exception. + /// change, false otherwise. If a type contradiction is found, flag an error. bool ApplyTypeConstraints(TreePattern &TP, bool NotRegisters); /// UpdateNodeType - Set the node type of N to VT if VT contains - /// information. If N already contains a conflicting type, then throw an - /// exception. This returns true if any information was updated. + /// information. If N already contains a conflicting type, then flag an + /// error. This returns true if any information was updated. /// bool UpdateNodeType(unsigned ResNo, const EEVT::TypeSet &InTy, TreePattern &TP) { @@ -514,6 +513,10 @@ class TreePattern { /// isInputPattern - True if this is an input pattern, something to match. /// False if this is an output pattern, something to emit. bool isInputPattern; + + /// hasError - True if the currently processed nodes have unresolvable types + /// or other non-fatal errors + bool HasError; public: /// TreePattern constructor - Parse the specified DagInits into the @@ -565,13 +568,19 @@ public: /// InferAllTypes - Infer/propagate as many types throughout the expression /// patterns as possible. Return true if all types are inferred, false - /// otherwise. Throw an exception if a type contradiction is found. + /// otherwise. Bail out if a type contradiction is found. bool InferAllTypes(const StringMap > *NamedTypes=0); - /// error - Throw an exception, prefixing it with information about this - /// pattern. - void error(const std::string &Msg) const; + /// error - If this is the first error in the current resolution step, + /// print it and set the error flag. Otherwise, continue silently. + void error(const std::string &Msg); + bool hasError() const { + return HasError; + } + void resetError() { + HasError = false; + } void print(raw_ostream &OS) const; void dump() const; @@ -582,8 +591,8 @@ private: void ComputeNamedNodes(TreePatternNode *N); }; -/// DAGDefaultOperand - One of these is created for each PredicateOperand -/// or OptionalDefOperand that has a set ExecuteAlways / DefaultOps field. +/// DAGDefaultOperand - One of these is created for each OperandWithDefaultOps +/// that has a set ExecuteAlways / DefaultOps field. struct DAGDefaultOperand { std::vector DefaultOps; }; @@ -602,7 +611,7 @@ public: : Pattern(TP), Results(results), Operands(operands), ImpResults(impresults), ResultPattern(0) {} - const TreePattern *getPattern() const { return Pattern; } + TreePattern *getPattern() const { return Pattern; } unsigned getNumResults() const { return Results.size(); } unsigned getNumOperands() const { return Operands.size(); } unsigned getNumImpResults() const { return ImpResults.size(); } @@ -661,23 +670,18 @@ public: unsigned getPatternComplexity(const CodeGenDAGPatterns &CGP) const; }; -// Deterministic comparison of Record*. -struct RecordPtrCmp { - bool operator()(const Record *LHS, const Record *RHS) const; -}; - class CodeGenDAGPatterns { RecordKeeper &Records; CodeGenTarget Target; std::vector Intrinsics; std::vector TgtIntrinsics; - std::map SDNodes; - std::map, RecordPtrCmp> SDNodeXForms; - std::map ComplexPatterns; - std::map PatternFragments; - std::map DefaultOperands; - std::map Instructions; + std::map SDNodes; + std::map, LessRecordByID> SDNodeXForms; + std::map ComplexPatterns; + std::map PatternFragments; + std::map DefaultOperands; + std::map Instructions; // Specific SDNode definitions: Record *intrinsic_void_sdnode; @@ -708,7 +712,7 @@ public: return SDNodeXForms.find(R)->second; } - typedef std::map::const_iterator + typedef std::map::const_iterator nx_iterator; nx_iterator nx_begin() const { return SDNodeXForms.begin(); } nx_iterator nx_end() const { return SDNodeXForms.end(); } @@ -758,7 +762,7 @@ public: return PatternFragments.find(R)->second; } - typedef std::map::const_iterator + typedef std::map::const_iterator pf_iterator; pf_iterator pf_begin() const { return PatternFragments.begin(); } pf_iterator pf_end() const { return PatternFragments.end(); } @@ -797,8 +801,9 @@ private: void ParsePatterns(); void InferInstructionFlags(); void GenerateVariants(); + void VerifyInstructionFlags(); - void AddPatternToMatch(const TreePattern *Pattern, const PatternToMatch &PTM); + void AddPatternToMatch(TreePattern *Pattern, const PatternToMatch &PTM); void FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat, std::map &InstInputs, diff --git a/utils/TableGen/CodeGenInstruction.cpp b/utils/TableGen/CodeGenInstruction.cpp index 12e153a..0a8684d 100644 --- a/utils/TableGen/CodeGenInstruction.cpp +++ b/utils/TableGen/CodeGenInstruction.cpp @@ -32,20 +32,20 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { DagInit *OutDI = R->getValueAsDag("OutOperandList"); - if (DefInit *Init = dynamic_cast(OutDI->getOperator())) { + if (DefInit *Init = dyn_cast(OutDI->getOperator())) { if (Init->getDef()->getName() != "outs") - throw R->getName() + ": invalid def name for output list: use 'outs'"; + PrintFatalError(R->getName() + ": invalid def name for output list: use 'outs'"); } else - throw R->getName() + ": invalid output list: use 'outs'"; + PrintFatalError(R->getName() + ": invalid output list: use 'outs'"); NumDefs = OutDI->getNumArgs(); DagInit *InDI = R->getValueAsDag("InOperandList"); - if (DefInit *Init = dynamic_cast(InDI->getOperator())) { + if (DefInit *Init = dyn_cast(InDI->getOperator())) { if (Init->getDef()->getName() != "ins") - throw R->getName() + ": invalid def name for input list: use 'ins'"; + PrintFatalError(R->getName() + ": invalid def name for input list: use 'ins'"); } else - throw R->getName() + ": invalid input list: use 'ins'"; + PrintFatalError(R->getName() + ": invalid input list: use 'ins'"); unsigned MIOperandNo = 0; std::set OperandNames; @@ -60,9 +60,9 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { ArgName = InDI->getArgName(i-NumDefs); } - DefInit *Arg = dynamic_cast(ArgInit); + DefInit *Arg = dyn_cast(ArgInit); if (!Arg) - throw "Illegal operand for the '" + R->getName() + "' instruction!"; + PrintFatalError("Illegal operand for the '" + R->getName() + "' instruction!"); Record *Rec = Arg->getDef(); std::string PrintMethod = "printOperand"; @@ -80,11 +80,10 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { MIOpInfo = Rec->getValueAsDag("MIOperandInfo"); // Verify that MIOpInfo has an 'ops' root value. - if (!dynamic_cast(MIOpInfo->getOperator()) || - dynamic_cast(MIOpInfo->getOperator()) - ->getDef()->getName() != "ops") - throw "Bad value for MIOperandInfo in operand '" + Rec->getName() + - "'\n"; + if (!isa(MIOpInfo->getOperator()) || + cast(MIOpInfo->getOperator())->getDef()->getName() != "ops") + PrintFatalError("Bad value for MIOperandInfo in operand '" + Rec->getName() + + "'\n"); // If we have MIOpInfo, then we have #operands equal to number of entries // in MIOperandInfo. @@ -101,17 +100,17 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { } else if (Rec->isSubClassOf("RegisterClass")) { OperandType = "OPERAND_REGISTER"; } else if (!Rec->isSubClassOf("PointerLikeRegClass") && - Rec->getName() != "unknown") - throw "Unknown operand class '" + Rec->getName() + - "' in '" + R->getName() + "' instruction!"; + !Rec->isSubClassOf("unknown_class")) + PrintFatalError("Unknown operand class '" + Rec->getName() + + "' in '" + R->getName() + "' instruction!"); // Check that the operand has a name and that it's unique. if (ArgName.empty()) - throw "In instruction '" + R->getName() + "', operand #" + utostr(i) + - " has no name!"; + PrintFatalError("In instruction '" + R->getName() + "', operand #" + utostr(i) + + " has no name!"); if (!OperandNames.insert(ArgName).second) - throw "In instruction '" + R->getName() + "', operand #" + utostr(i) + - " has the same name as a previous operand!"; + PrintFatalError("In instruction '" + R->getName() + "', operand #" + utostr(i) + + " has the same name as a previous operand!"); OperandList.push_back(OperandInfo(Rec, ArgName, PrintMethod, EncoderMethod, OperandType, MIOperandNo, NumOps, @@ -129,13 +128,13 @@ CGIOperandList::CGIOperandList(Record *R) : TheDef(R) { /// getOperandNamed - Return the index of the operand with the specified /// non-empty name. If the instruction does not have an operand with the -/// specified name, throw an exception. +/// specified name, abort. /// unsigned CGIOperandList::getOperandNamed(StringRef Name) const { unsigned OpIdx; if (hasOperandNamed(Name, OpIdx)) return OpIdx; - throw "'" + TheDef->getName() + "' does not have an operand named '$" + - Name.str() + "'!"; + PrintFatalError("'" + TheDef->getName() + "' does not have an operand named '$" + + Name.str() + "'!"); } /// hasOperandNamed - Query whether the instruction has an operand of the @@ -154,7 +153,7 @@ bool CGIOperandList::hasOperandNamed(StringRef Name, unsigned &OpIdx) const { std::pair CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { if (Op.empty() || Op[0] != '$') - throw TheDef->getName() + ": Illegal operand name: '" + Op + "'"; + PrintFatalError(TheDef->getName() + ": Illegal operand name: '" + Op + "'"); std::string OpName = Op.substr(1); std::string SubOpName; @@ -164,7 +163,7 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { if (DotIdx != std::string::npos) { SubOpName = OpName.substr(DotIdx+1); if (SubOpName.empty()) - throw TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'"; + PrintFatalError(TheDef->getName() + ": illegal empty suboperand name in '" +Op +"'"); OpName = OpName.substr(0, DotIdx); } @@ -174,8 +173,8 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { // If one was needed, throw. if (OperandList[OpIdx].MINumOperands > 1 && !AllowWholeOp && SubOpName.empty()) - throw TheDef->getName() + ": Illegal to refer to" - " whole operand part of complex operand '" + Op + "'"; + PrintFatalError(TheDef->getName() + ": Illegal to refer to" + " whole operand part of complex operand '" + Op + "'"); // Otherwise, return the operand. return std::make_pair(OpIdx, 0U); @@ -184,7 +183,7 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { // Find the suboperand number involved. DagInit *MIOpInfo = OperandList[OpIdx].MIOperandInfo; if (MIOpInfo == 0) - throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'"; + PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'"); // Find the operand with the right name. for (unsigned i = 0, e = MIOpInfo->getNumArgs(); i != e; ++i) @@ -192,7 +191,7 @@ CGIOperandList::ParseOperandName(const std::string &Op, bool AllowWholeOp) { return std::make_pair(OpIdx, i); // Otherwise, didn't find it! - throw TheDef->getName() + ": unknown suboperand name in '" + Op + "'"; + PrintFatalError(TheDef->getName() + ": unknown suboperand name in '" + Op + "'"); } static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) { @@ -204,13 +203,13 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) { std::string Name = CStr.substr(wpos+1); wpos = Name.find_first_not_of(" \t"); if (wpos == std::string::npos) - throw "Illegal format for @earlyclobber constraint: '" + CStr + "'"; + PrintFatalError("Illegal format for @earlyclobber constraint: '" + CStr + "'"); Name = Name.substr(wpos); std::pair Op = Ops.ParseOperandName(Name, false); // Build the string for the operand if (!Ops[Op.first].Constraints[Op.second].isNone()) - throw "Operand '" + Name + "' cannot have multiple constraints!"; + PrintFatalError("Operand '" + Name + "' cannot have multiple constraints!"); Ops[Op.first].Constraints[Op.second] = CGIOperandList::ConstraintInfo::getEarlyClobber(); return; @@ -225,25 +224,27 @@ static void ParseConstraint(const std::string &CStr, CGIOperandList &Ops) { // TIED_TO: $src1 = $dst wpos = Name.find_first_of(" \t"); if (wpos == std::string::npos) - throw "Illegal format for tied-to constraint: '" + CStr + "'"; + PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'"); std::string DestOpName = Name.substr(0, wpos); std::pair DestOp = Ops.ParseOperandName(DestOpName, false); Name = CStr.substr(pos+1); wpos = Name.find_first_not_of(" \t"); if (wpos == std::string::npos) - throw "Illegal format for tied-to constraint: '" + CStr + "'"; - - std::pair SrcOp = - Ops.ParseOperandName(Name.substr(wpos), false); - if (SrcOp > DestOp) - throw "Illegal tied-to operand constraint '" + CStr + "'"; + PrintFatalError("Illegal format for tied-to constraint: '" + CStr + "'"); + std::string SrcOpName = Name.substr(wpos); + std::pair SrcOp = Ops.ParseOperandName(SrcOpName, false); + if (SrcOp > DestOp) { + std::swap(SrcOp, DestOp); + std::swap(SrcOpName, DestOpName); + } unsigned FlatOpNo = Ops.getFlattenedOperandNumber(SrcOp); if (!Ops[DestOp.first].Constraints[DestOp.second].isNone()) - throw "Operand '" + DestOpName + "' cannot have multiple constraints!"; + PrintFatalError("Operand '" + DestOpName + + "' cannot have multiple constraints!"); Ops[DestOp.first].Constraints[DestOp.second] = CGIOperandList::ConstraintInfo::getTied(FlatOpNo); } @@ -287,7 +288,8 @@ void CGIOperandList::ProcessDisableEncoding(std::string DisableEncoding) { // CodeGenInstruction Implementation //===----------------------------------------------------------------------===// -CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) { +CodeGenInstruction::CodeGenInstruction(Record *R) + : TheDef(R), Operands(R), InferredFrom(0) { Namespace = R->getValueAsString("Namespace"); AsmString = R->getValueAsString("AsmString"); @@ -301,8 +303,6 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) { isBarrier = R->getValueAsBit("isBarrier"); isCall = R->getValueAsBit("isCall"); canFoldAsLoad = R->getValueAsBit("canFoldAsLoad"); - mayLoad = R->getValueAsBit("mayLoad"); - mayStore = R->getValueAsBit("mayStore"); isPredicable = Operands.isPredicable || R->getValueAsBit("isPredicable"); isConvertibleToThreeAddress = R->getValueAsBit("isConvertibleToThreeAddress"); isCommutable = R->getValueAsBit("isCommutable"); @@ -313,8 +313,13 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) { hasPostISelHook = R->getValueAsBit("hasPostISelHook"); hasCtrlDep = R->getValueAsBit("hasCtrlDep"); isNotDuplicable = R->getValueAsBit("isNotDuplicable"); - hasSideEffects = R->getValueAsBit("hasSideEffects"); + + mayLoad = R->getValueAsBitOrUnset("mayLoad", mayLoad_Unset); + mayStore = R->getValueAsBitOrUnset("mayStore", mayStore_Unset); + hasSideEffects = R->getValueAsBitOrUnset("hasSideEffects", + hasSideEffects_Unset); neverHasSideEffects = R->getValueAsBit("neverHasSideEffects"); + isAsCheapAsAMove = R->getValueAsBit("isAsCheapAsAMove"); hasExtraSrcRegAllocReq = R->getValueAsBit("hasExtraSrcRegAllocReq"); hasExtraDefRegAllocReq = R->getValueAsBit("hasExtraDefRegAllocReq"); @@ -324,7 +329,7 @@ CodeGenInstruction::CodeGenInstruction(Record *R) : TheDef(R), Operands(R) { ImplicitUses = R->getValueAsListOfDefs("Uses"); if (neverHasSideEffects + hasSideEffects > 1) - throw R->getName() + ": multiple conflicting side-effect flags set!"; + PrintFatalError(R->getName() + ": multiple conflicting side-effect flags set!"); // Parse Constraints. ParseConstraints(R->getValueAsString("Constraints"), Operands); @@ -409,16 +414,16 @@ FlattenAsmStringVariants(StringRef Cur, unsigned Variant) { /// successful match, with ResOp set to the result operand to be used. bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, Record *InstOpRec, bool hasSubOps, - SMLoc Loc, CodeGenTarget &T, + ArrayRef Loc, CodeGenTarget &T, ResultOperand &ResOp) { Init *Arg = Result->getArg(AliasOpNo); - DefInit *ADI = dynamic_cast(Arg); + DefInit *ADI = dyn_cast(Arg); if (ADI && ADI->getDef() == InstOpRec) { // If the operand is a record, it must have a name, and the record type // must match up with the instruction's argument type. if (Result->getArgName(AliasOpNo).empty()) - throw TGError(Loc, "result argument #" + utostr(AliasOpNo) + + PrintFatalError(Loc, "result argument #" + utostr(AliasOpNo) + " must have a name!"); ResOp = ResultOperand(Result->getArgName(AliasOpNo), ADI->getDef()); return true; @@ -442,7 +447,7 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, DagInit *DI = InstOpRec->getValueAsDag("MIOperandInfo"); // The operand info should only have a single (register) entry. We // want the register class of it. - InstOpRec = dynamic_cast(DI->getArg(0))->getDef(); + InstOpRec = cast(DI->getArg(0))->getDef(); } if (InstOpRec->isSubClassOf("RegisterOperand")) @@ -453,13 +458,13 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, if (!T.getRegisterClass(InstOpRec) .contains(T.getRegBank().getReg(ADI->getDef()))) - throw TGError(Loc, "fixed register " + ADI->getDef()->getName() + - " is not a member of the " + InstOpRec->getName() + - " register class!"); + PrintFatalError(Loc, "fixed register " + ADI->getDef()->getName() + + " is not a member of the " + InstOpRec->getName() + + " register class!"); if (!Result->getArgName(AliasOpNo).empty()) - throw TGError(Loc, "result fixed register argument must " - "not have a name!"); + PrintFatalError(Loc, "result fixed register argument must " + "not have a name!"); ResOp = ResultOperand(ADI->getDef()); return true; @@ -482,13 +487,13 @@ bool CodeGenInstAlias::tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, } // Literal integers. - if (IntInit *II = dynamic_cast(Arg)) { + if (IntInit *II = dyn_cast(Arg)) { if (hasSubOps || !InstOpRec->isSubClassOf("Operand")) return false; // Integer arguments can't have names. if (!Result->getArgName(AliasOpNo).empty()) - throw TGError(Loc, "result argument #" + utostr(AliasOpNo) + - " must not have a name!"); + PrintFatalError(Loc, "result argument #" + utostr(AliasOpNo) + + " must not have a name!"); ResOp = ResultOperand(II->getValue()); return true; } @@ -514,9 +519,10 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { Result = R->getValueAsDag("ResultInst"); // Verify that the root of the result is an instruction. - DefInit *DI = dynamic_cast(Result->getOperator()); + DefInit *DI = dyn_cast(Result->getOperator()); if (DI == 0 || !DI->getDef()->isSubClassOf("Instruction")) - throw TGError(R->getLoc(), "result of inst alias should be an instruction"); + PrintFatalError(R->getLoc(), + "result of inst alias should be an instruction"); ResultInst = &T.getInstruction(DI->getDef()); @@ -524,7 +530,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { // the same class. StringMap NameClass; for (unsigned i = 0, e = Result->getNumArgs(); i != e; ++i) { - DefInit *ADI = dynamic_cast(Result->getArg(i)); + DefInit *ADI = dyn_cast(Result->getArg(i)); if (!ADI || Result->getArgName(i).empty()) continue; // Verify we don't have something like: (someinst GR16:$foo, GR32:$foo) @@ -532,9 +538,9 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { // same type. Record *&Entry = NameClass[Result->getArgName(i)]; if (Entry && Entry != ADI->getDef()) - throw TGError(R->getLoc(), "result value $" + Result->getArgName(i) + - " is both " + Entry->getName() + " and " + - ADI->getDef()->getName() + "!"); + PrintFatalError(R->getLoc(), "result value $" + Result->getArgName(i) + + " is both " + Entry->getName() + " and " + + ADI->getDef()->getName() + "!"); Entry = ADI->getDef(); } @@ -550,7 +556,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { continue; if (AliasOpNo >= Result->getNumArgs()) - throw TGError(R->getLoc(), "not enough arguments for instruction!"); + PrintFatalError(R->getLoc(), "not enough arguments for instruction!"); Record *InstOpRec = ResultInst->Operands[i].Rec; unsigned NumSubOps = ResultInst->Operands[i].MINumOperands; @@ -571,7 +577,7 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { } else { DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo; for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) { - Record *SubRec = dynamic_cast(MIOI->getArg(SubOp))->getDef(); + Record *SubRec = cast(MIOI->getArg(SubOp))->getDef(); // Take care to instantiate each of the suboperands with the correct // nomenclature: $foo.bar @@ -591,26 +597,26 @@ CodeGenInstAlias::CodeGenInstAlias(Record *R, CodeGenTarget &T) : TheDef(R) { DagInit *MIOI = ResultInst->Operands[i].MIOperandInfo; for (unsigned SubOp = 0; SubOp != NumSubOps; ++SubOp) { if (AliasOpNo >= Result->getNumArgs()) - throw TGError(R->getLoc(), "not enough arguments for instruction!"); - Record *SubRec = dynamic_cast(MIOI->getArg(SubOp))->getDef(); + PrintFatalError(R->getLoc(), "not enough arguments for instruction!"); + Record *SubRec = cast(MIOI->getArg(SubOp))->getDef(); if (tryAliasOpMatch(Result, AliasOpNo, SubRec, false, R->getLoc(), T, ResOp)) { ResultOperands.push_back(ResOp); ResultInstOperandIndex.push_back(std::make_pair(i, SubOp)); ++AliasOpNo; } else { - throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) + + PrintFatalError(R->getLoc(), "result argument #" + utostr(AliasOpNo) + " does not match instruction operand class " + (SubOp == 0 ? InstOpRec->getName() :SubRec->getName())); } } continue; } - throw TGError(R->getLoc(), "result argument #" + utostr(AliasOpNo) + - " does not match instruction operand class " + - InstOpRec->getName()); + PrintFatalError(R->getLoc(), "result argument #" + utostr(AliasOpNo) + + " does not match instruction operand class " + + InstOpRec->getName()); } if (AliasOpNo != Result->getNumArgs()) - throw TGError(R->getLoc(), "too many operands for instruction!"); + PrintFatalError(R->getLoc(), "too many operands for instruction!"); } diff --git a/utils/TableGen/CodeGenInstruction.h b/utils/TableGen/CodeGenInstruction.h index 95b572d..55d4439 100644 --- a/utils/TableGen/CodeGenInstruction.h +++ b/utils/TableGen/CodeGenInstruction.h @@ -152,7 +152,7 @@ namespace llvm { /// getOperandNamed - Return the index of the operand with the specified /// non-empty name. If the instruction does not have an operand with the - /// specified name, throw an exception. + /// specified name, abort. unsigned getOperandNamed(StringRef Name) const; /// hasOperandNamed - Query whether the instruction has an operand of the @@ -162,9 +162,8 @@ namespace llvm { /// ParseOperandName - Parse an operand name like "$foo" or "$foo.bar", /// where $foo is a whole operand and $foo.bar refers to a suboperand. - /// This throws an exception if the name is invalid. If AllowWholeOp is - /// true, references to operands with suboperands are allowed, otherwise - /// not. + /// This aborts if the name is invalid. If AllowWholeOp is true, references + /// to operands with suboperands are allowed, otherwise not. std::pair ParseOperandName(const std::string &Op, bool AllowWholeOp = true); @@ -226,7 +225,10 @@ namespace llvm { bool isBarrier; bool isCall; bool canFoldAsLoad; - bool mayLoad, mayStore; + bool mayLoad; + bool mayLoad_Unset; + bool mayStore; + bool mayStore_Unset; bool isPredicable; bool isConvertibleToThreeAddress; bool isCommutable; @@ -238,6 +240,7 @@ namespace llvm { bool hasCtrlDep; bool isNotDuplicable; bool hasSideEffects; + bool hasSideEffects_Unset; bool neverHasSideEffects; bool isAsCheapAsAMove; bool hasExtraSrcRegAllocReq; @@ -245,6 +248,14 @@ namespace llvm { bool isCodeGenOnly; bool isPseudo; + /// Are there any undefined flags? + bool hasUndefFlags() const { + return mayLoad_Unset || mayStore_Unset || hasSideEffects_Unset; + } + + // The record used to infer instruction flags, or NULL if no flag values + // have been inferred. + Record *InferredFrom; CodeGenInstruction(Record *R); @@ -319,7 +330,7 @@ namespace llvm { CodeGenInstAlias(Record *R, CodeGenTarget &T); bool tryAliasOpMatch(DagInit *Result, unsigned AliasOpNo, - Record *InstOpRec, bool hasSubOps, SMLoc Loc, + Record *InstOpRec, bool hasSubOps, ArrayRef Loc, CodeGenTarget &T, ResultOperand &ResOp); }; } diff --git a/utils/TableGen/CodeGenMapTable.cpp b/utils/TableGen/CodeGenMapTable.cpp new file mode 100644 index 0000000..1653d67 --- /dev/null +++ b/utils/TableGen/CodeGenMapTable.cpp @@ -0,0 +1,606 @@ +//===- CodeGenMapTable.cpp - Instruction Mapping Table Generator ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// CodeGenMapTable provides functionality for the TabelGen to create +// relation mapping between instructions. Relation models are defined using +// InstrMapping as a base class. This file implements the functionality which +// parses these definitions and generates relation maps using the information +// specified there. These maps are emitted as tables in the XXXGenInstrInfo.inc +// file along with the functions to query them. +// +// A relationship model to relate non-predicate instructions with their +// predicated true/false forms can be defined as follows: +// +// def getPredOpcode : InstrMapping { +// let FilterClass = "PredRel"; +// let RowFields = ["BaseOpcode"]; +// let ColFields = ["PredSense"]; +// let KeyCol = ["none"]; +// let ValueCols = [["true"], ["false"]]; } +// +// CodeGenMapTable parses this map and generates a table in XXXGenInstrInfo.inc +// file that contains the instructions modeling this relationship. This table +// is defined in the function +// "int getPredOpcode(uint16_t Opcode, enum PredSense inPredSense)" +// that can be used to retrieve the predicated form of the instruction by +// passing its opcode value and the predicate sense (true/false) of the desired +// instruction as arguments. +// +// Short description of the algorithm: +// +// 1) Iterate through all the records that derive from "InstrMapping" class. +// 2) For each record, filter out instructions based on the FilterClass value. +// 3) Iterate through this set of instructions and insert them into +// RowInstrMap map based on their RowFields values. RowInstrMap is keyed by the +// vector of RowFields values and contains vectors of Records (instructions) as +// values. RowFields is a list of fields that are required to have the same +// values for all the instructions appearing in the same row of the relation +// table. All the instructions in a given row of the relation table have some +// sort of relationship with the key instruction defined by the corresponding +// relationship model. +// +// Ex: RowInstrMap(RowVal1, RowVal2, ...) -> [Instr1, Instr2, Instr3, ... ] +// Here Instr1, Instr2, Instr3 have same values (RowVal1, RowVal2) for +// RowFields. These groups of instructions are later matched against ValueCols +// to determine the column they belong to, if any. +// +// While building the RowInstrMap map, collect all the key instructions in +// KeyInstrVec. These are the instructions having the same values as KeyCol +// for all the fields listed in ColFields. +// +// For Example: +// +// Relate non-predicate instructions with their predicated true/false forms. +// +// def getPredOpcode : InstrMapping { +// let FilterClass = "PredRel"; +// let RowFields = ["BaseOpcode"]; +// let ColFields = ["PredSense"]; +// let KeyCol = ["none"]; +// let ValueCols = [["true"], ["false"]]; } +// +// Here, only instructions that have "none" as PredSense will be selected as key +// instructions. +// +// 4) For each key instruction, get the group of instructions that share the +// same key-value as the key instruction from RowInstrMap. Iterate over the list +// of columns in ValueCols (it is defined as a list >. Therefore, +// it can specify multi-column relationships). For each column, find the +// instruction from the group that matches all the values for the column. +// Multiple matches are not allowed. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenTarget.h" +#include "llvm/Support/Format.h" +#include "llvm/TableGen/Error.h" +using namespace llvm; +typedef std::map > InstrRelMapTy; + +typedef std::map, std::vector > RowInstrMapTy; + +namespace { + +//===----------------------------------------------------------------------===// +// This class is used to represent InstrMapping class defined in Target.td file. +class InstrMap { +private: + std::string Name; + std::string FilterClass; + ListInit *RowFields; + ListInit *ColFields; + ListInit *KeyCol; + std::vector ValueCols; + +public: + InstrMap(Record* MapRec) { + Name = MapRec->getName(); + + // FilterClass - It's used to reduce the search space only to the + // instructions that define the kind of relationship modeled by + // this InstrMapping object/record. + const RecordVal *Filter = MapRec->getValue("FilterClass"); + FilterClass = Filter->getValue()->getAsUnquotedString(); + + // List of fields/attributes that need to be same across all the + // instructions in a row of the relation table. + RowFields = MapRec->getValueAsListInit("RowFields"); + + // List of fields/attributes that are constant across all the instruction + // in a column of the relation table. Ex: ColFields = 'predSense' + ColFields = MapRec->getValueAsListInit("ColFields"); + + // Values for the fields/attributes listed in 'ColFields'. + // Ex: KeyCol = 'noPred' -- key instruction is non predicated + KeyCol = MapRec->getValueAsListInit("KeyCol"); + + // List of values for the fields/attributes listed in 'ColFields', one for + // each column in the relation table. + // + // Ex: ValueCols = [['true'],['false']] -- it results two columns in the + // table. First column requires all the instructions to have predSense + // set to 'true' and second column requires it to be 'false'. + ListInit *ColValList = MapRec->getValueAsListInit("ValueCols"); + + // Each instruction map must specify at least one column for it to be valid. + if (ColValList->getSize() == 0) + PrintFatalError(MapRec->getLoc(), "InstrMapping record `" + + MapRec->getName() + "' has empty " + "`ValueCols' field!"); + + for (unsigned i = 0, e = ColValList->getSize(); i < e; i++) { + ListInit *ColI = dyn_cast(ColValList->getElement(i)); + + // Make sure that all the sub-lists in 'ValueCols' have same number of + // elements as the fields in 'ColFields'. + if (ColI->getSize() != ColFields->getSize()) + PrintFatalError(MapRec->getLoc(), "Record `" + MapRec->getName() + + "', field `ValueCols' entries don't match with " + + " the entries in 'ColFields'!"); + ValueCols.push_back(ColI); + } + } + + std::string getName() const { + return Name; + } + + std::string getFilterClass() { + return FilterClass; + } + + ListInit *getRowFields() const { + return RowFields; + } + + ListInit *getColFields() const { + return ColFields; + } + + ListInit *getKeyCol() const { + return KeyCol; + } + + const std::vector &getValueCols() const { + return ValueCols; + } +}; +} // End anonymous namespace. + + +//===----------------------------------------------------------------------===// +// class MapTableEmitter : It builds the instruction relation maps using +// the information provided in InstrMapping records. It outputs these +// relationship maps as tables into XXXGenInstrInfo.inc file along with the +// functions to query them. + +namespace { +class MapTableEmitter { +private: +// std::string TargetName; + const CodeGenTarget &Target; + // InstrMapDesc - InstrMapping record to be processed. + InstrMap InstrMapDesc; + + // InstrDefs - list of instructions filtered using FilterClass defined + // in InstrMapDesc. + std::vector InstrDefs; + + // RowInstrMap - maps RowFields values to the instructions. It's keyed by the + // values of the row fields and contains vector of records as values. + RowInstrMapTy RowInstrMap; + + // KeyInstrVec - list of key instructions. + std::vector KeyInstrVec; + DenseMap > MapTable; + +public: + MapTableEmitter(CodeGenTarget &Target, RecordKeeper &Records, Record *IMRec): + Target(Target), InstrMapDesc(IMRec) { + const std::string FilterClass = InstrMapDesc.getFilterClass(); + InstrDefs = Records.getAllDerivedDefinitions(FilterClass); + } + + void buildRowInstrMap(); + + // Returns true if an instruction is a key instruction, i.e., its ColFields + // have same values as KeyCol. + bool isKeyColInstr(Record* CurInstr); + + // Find column instruction corresponding to a key instruction based on the + // constraints for that column. + Record *getInstrForColumn(Record *KeyInstr, ListInit *CurValueCol); + + // Find column instructions for each key instruction based + // on ValueCols and store them into MapTable. + void buildMapTable(); + + void emitBinSearch(raw_ostream &OS, unsigned TableSize); + void emitTablesWithFunc(raw_ostream &OS); + unsigned emitBinSearchTable(raw_ostream &OS); + + // Lookup functions to query binary search tables. + void emitMapFuncBody(raw_ostream &OS, unsigned TableSize); + +}; +} // End anonymous namespace. + + +//===----------------------------------------------------------------------===// +// Process all the instructions that model this relation (alreday present in +// InstrDefs) and insert them into RowInstrMap which is keyed by the values of +// the fields listed as RowFields. It stores vectors of records as values. +// All the related instructions have the same values for the RowFields thus are +// part of the same key-value pair. +//===----------------------------------------------------------------------===// + +void MapTableEmitter::buildRowInstrMap() { + for (unsigned i = 0, e = InstrDefs.size(); i < e; i++) { + std::vector InstrList; + Record *CurInstr = InstrDefs[i]; + std::vector KeyValue; + ListInit *RowFields = InstrMapDesc.getRowFields(); + for (unsigned j = 0, endRF = RowFields->getSize(); j < endRF; j++) { + Init *RowFieldsJ = RowFields->getElement(j); + Init *CurInstrVal = CurInstr->getValue(RowFieldsJ)->getValue(); + KeyValue.push_back(CurInstrVal); + } + + // Collect key instructions into KeyInstrVec. Later, these instructions are + // processed to assign column position to the instructions sharing + // their KeyValue in RowInstrMap. + if (isKeyColInstr(CurInstr)) + KeyInstrVec.push_back(CurInstr); + + RowInstrMap[KeyValue].push_back(CurInstr); + } +} + +//===----------------------------------------------------------------------===// +// Return true if an instruction is a KeyCol instruction. +//===----------------------------------------------------------------------===// + +bool MapTableEmitter::isKeyColInstr(Record* CurInstr) { + ListInit *ColFields = InstrMapDesc.getColFields(); + ListInit *KeyCol = InstrMapDesc.getKeyCol(); + + // Check if the instruction is a KeyCol instruction. + bool MatchFound = true; + for (unsigned j = 0, endCF = ColFields->getSize(); + (j < endCF) && MatchFound; j++) { + RecordVal *ColFieldName = CurInstr->getValue(ColFields->getElement(j)); + std::string CurInstrVal = ColFieldName->getValue()->getAsUnquotedString(); + std::string KeyColValue = KeyCol->getElement(j)->getAsUnquotedString(); + MatchFound = (CurInstrVal == KeyColValue); + } + return MatchFound; +} + +//===----------------------------------------------------------------------===// +// Build a map to link key instructions with the column instructions arranged +// according to their column positions. +//===----------------------------------------------------------------------===// + +void MapTableEmitter::buildMapTable() { + // Find column instructions for a given key based on the ColField + // constraints. + const std::vector &ValueCols = InstrMapDesc.getValueCols(); + unsigned NumOfCols = ValueCols.size(); + for (unsigned j = 0, endKI = KeyInstrVec.size(); j < endKI; j++) { + Record *CurKeyInstr = KeyInstrVec[j]; + std::vector ColInstrVec(NumOfCols); + + // Find the column instruction based on the constraints for the column. + for (unsigned ColIdx = 0; ColIdx < NumOfCols; ColIdx++) { + ListInit *CurValueCol = ValueCols[ColIdx]; + Record *ColInstr = getInstrForColumn(CurKeyInstr, CurValueCol); + ColInstrVec[ColIdx] = ColInstr; + } + MapTable[CurKeyInstr] = ColInstrVec; + } +} + +//===----------------------------------------------------------------------===// +// Find column instruction based on the constraints for that column. +//===----------------------------------------------------------------------===// + +Record *MapTableEmitter::getInstrForColumn(Record *KeyInstr, + ListInit *CurValueCol) { + ListInit *RowFields = InstrMapDesc.getRowFields(); + std::vector KeyValue; + + // Construct KeyValue using KeyInstr's values for RowFields. + for (unsigned j = 0, endRF = RowFields->getSize(); j < endRF; j++) { + Init *RowFieldsJ = RowFields->getElement(j); + Init *KeyInstrVal = KeyInstr->getValue(RowFieldsJ)->getValue(); + KeyValue.push_back(KeyInstrVal); + } + + // Get all the instructions that share the same KeyValue as the KeyInstr + // in RowInstrMap. We search through these instructions to find a match + // for the current column, i.e., the instruction which has the same values + // as CurValueCol for all the fields in ColFields. + const std::vector &RelatedInstrVec = RowInstrMap[KeyValue]; + + ListInit *ColFields = InstrMapDesc.getColFields(); + Record *MatchInstr = NULL; + + for (unsigned i = 0, e = RelatedInstrVec.size(); i < e; i++) { + bool MatchFound = true; + Record *CurInstr = RelatedInstrVec[i]; + for (unsigned j = 0, endCF = ColFields->getSize(); + (j < endCF) && MatchFound; j++) { + Init *ColFieldJ = ColFields->getElement(j); + Init *CurInstrInit = CurInstr->getValue(ColFieldJ)->getValue(); + std::string CurInstrVal = CurInstrInit->getAsUnquotedString(); + Init *ColFieldJVallue = CurValueCol->getElement(j); + MatchFound = (CurInstrVal == ColFieldJVallue->getAsUnquotedString()); + } + + if (MatchFound) { + if (MatchInstr) // Already had a match + // Error if multiple matches are found for a column. + PrintFatalError("Multiple matches found for `" + KeyInstr->getName() + + "', for the relation `" + InstrMapDesc.getName()); + MatchInstr = CurInstr; + } + } + return MatchInstr; +} + +//===----------------------------------------------------------------------===// +// Emit one table per relation. Only instructions with a valid relation of a +// given type are included in the table sorted by their enum values (opcodes). +// Binary search is used for locating instructions in the table. +//===----------------------------------------------------------------------===// + +unsigned MapTableEmitter::emitBinSearchTable(raw_ostream &OS) { + + const std::vector &NumberedInstructions = + Target.getInstructionsByEnumValue(); + std::string TargetName = Target.getName(); + const std::vector &ValueCols = InstrMapDesc.getValueCols(); + unsigned NumCol = ValueCols.size(); + unsigned TotalNumInstr = NumberedInstructions.size(); + unsigned TableSize = 0; + + OS << "static const uint16_t "<TheDef; + std::vector ColInstrs = MapTable[CurInstr]; + std::string OutStr(""); + unsigned RelExists = 0; + if (ColInstrs.size()) { + for (unsigned j = 0; j < NumCol; j++) { + if (ColInstrs[j] != NULL) { + RelExists = 1; + OutStr += ", "; + OutStr += TargetName; + OutStr += "::"; + OutStr += ColInstrs[j]->getName(); + } else { OutStr += ", -1";} + } + + if (RelExists) { + OS << " { " << TargetName << "::" << CurInstr->getName(); + OS << OutStr <<" },\n"; + TableSize++; + } + } + } + if (!TableSize) { + OS << " { " << TargetName << "::" << "INSTRUCTION_LIST_END, "; + OS << TargetName << "::" << "INSTRUCTION_LIST_END }"; + } + OS << "}; // End of " << InstrMapDesc.getName() << "Table\n\n"; + return TableSize; +} + +//===----------------------------------------------------------------------===// +// Emit binary search algorithm as part of the functions used to query +// relation tables. +//===----------------------------------------------------------------------===// + +void MapTableEmitter::emitBinSearch(raw_ostream &OS, unsigned TableSize) { + OS << " unsigned mid;\n"; + OS << " unsigned start = 0;\n"; + OS << " unsigned end = " << TableSize << ";\n"; + OS << " while (start < end) {\n"; + OS << " mid = start + (end - start)/2;\n"; + OS << " if (Opcode == " << InstrMapDesc.getName() << "Table[mid][0]) {\n"; + OS << " break;\n"; + OS << " }\n"; + OS << " if (Opcode < " << InstrMapDesc.getName() << "Table[mid][0])\n"; + OS << " end = mid;\n"; + OS << " else\n"; + OS << " start = mid + 1;\n"; + OS << " }\n"; + OS << " if (start == end)\n"; + OS << " return -1; // Instruction doesn't exist in this table.\n\n"; +} + +//===----------------------------------------------------------------------===// +// Emit functions to query relation tables. +//===----------------------------------------------------------------------===// + +void MapTableEmitter::emitMapFuncBody(raw_ostream &OS, + unsigned TableSize) { + + ListInit *ColFields = InstrMapDesc.getColFields(); + const std::vector &ValueCols = InstrMapDesc.getValueCols(); + + // Emit binary search algorithm to locate instructions in the + // relation table. If found, return opcode value from the appropriate column + // of the table. + emitBinSearch(OS, TableSize); + + if (ValueCols.size() > 1) { + for (unsigned i = 0, e = ValueCols.size(); i < e; i++) { + ListInit *ColumnI = ValueCols[i]; + for (unsigned j = 0, ColSize = ColumnI->getSize(); j < ColSize; j++) { + std::string ColName = ColFields->getElement(j)->getAsUnquotedString(); + OS << " if (in" << ColName; + OS << " == "; + OS << ColName << "_" << ColumnI->getElement(j)->getAsUnquotedString(); + if (j < ColumnI->getSize() - 1) OS << " && "; + else OS << ")\n"; + } + OS << " return " << InstrMapDesc.getName(); + OS << "Table[mid]["< &ValueCols = InstrMapDesc.getValueCols(); + OS << "// "<< InstrMapDesc.getName() << "\n"; + OS << "int "<< InstrMapDesc.getName() << "(uint16_t Opcode"; + if (ValueCols.size() > 1) { + for (unsigned i = 0, e = ColFields->getSize(); i < e; i++) { + std::string ColName = ColFields->getElement(i)->getAsUnquotedString(); + OS << ", enum " << ColName << " in" << ColName << ") {\n"; + } + } else { OS << ") {\n"; } + + // Emit map table. + unsigned TableSize = emitBinSearchTable(OS); + + // Emit rest of the function body. + emitMapFuncBody(OS, TableSize); +} + +//===----------------------------------------------------------------------===// +// Emit enums for the column fields across all the instruction maps. +//===----------------------------------------------------------------------===// + +static void emitEnums(raw_ostream &OS, RecordKeeper &Records) { + + std::vector InstrMapVec; + InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping"); + std::map > ColFieldValueMap; + + // Iterate over all InstrMapping records and create a map between column + // fields and their possible values across all records. + for (unsigned i = 0, e = InstrMapVec.size(); i < e; i++) { + Record *CurMap = InstrMapVec[i]; + ListInit *ColFields; + ColFields = CurMap->getValueAsListInit("ColFields"); + ListInit *List = CurMap->getValueAsListInit("ValueCols"); + std::vector ValueCols; + unsigned ListSize = List->getSize(); + + for (unsigned j = 0; j < ListSize; j++) { + ListInit *ListJ = dyn_cast(List->getElement(j)); + + if (ListJ->getSize() != ColFields->getSize()) + PrintFatalError("Record `" + CurMap->getName() + "', field " + "`ValueCols' entries don't match with the entries in 'ColFields' !"); + ValueCols.push_back(ListJ); + } + + for (unsigned j = 0, endCF = ColFields->getSize(); j < endCF; j++) { + for (unsigned k = 0; k < ListSize; k++){ + std::string ColName = ColFields->getElement(j)->getAsUnquotedString(); + ColFieldValueMap[ColName].push_back((ValueCols[k])->getElement(j)); + } + } + } + + for (std::map >::iterator + II = ColFieldValueMap.begin(), IE = ColFieldValueMap.end(); + II != IE; II++) { + std::vector FieldValues = (*II).second; + unsigned FieldSize = FieldValues.size(); + + // Delete duplicate entries from ColFieldValueMap + for (unsigned i = 0; i < FieldSize - 1; i++) { + Init *CurVal = FieldValues[i]; + for (unsigned j = i+1; j < FieldSize; j++) { + if (CurVal == FieldValues[j]) { + FieldValues.erase(FieldValues.begin()+j); + } + } + } + + // Emit enumerated values for the column fields. + OS << "enum " << (*II).first << " {\n"; + for (unsigned i = 0; i < FieldSize; i++) { + OS << "\t" << (*II).first << "_" << FieldValues[i]->getAsUnquotedString(); + if (i != FieldValues.size() - 1) + OS << ",\n"; + else + OS << "\n};\n\n"; + } + } +} + +namespace llvm { +//===----------------------------------------------------------------------===// +// Parse 'InstrMapping' records and use the information to form relationship +// between instructions. These relations are emitted as a tables along with the +// functions to query them. +//===----------------------------------------------------------------------===// +void EmitMapTable(RecordKeeper &Records, raw_ostream &OS) { + CodeGenTarget Target(Records); + std::string TargetName = Target.getName(); + std::vector InstrMapVec; + InstrMapVec = Records.getAllDerivedDefinitions("InstrMapping"); + + if (!InstrMapVec.size()) + return; + + OS << "#ifdef GET_INSTRMAP_INFO\n"; + OS << "#undef GET_INSTRMAP_INFO\n"; + OS << "namespace llvm {\n\n"; + OS << "namespace " << TargetName << " {\n\n"; + + // Emit coulumn field names and their values as enums. + emitEnums(OS, Records); + + // Iterate over all instruction mapping records and construct relationship + // maps based on the information specified there. + // + for (unsigned i = 0, e = InstrMapVec.size(); i < e; i++) { + MapTableEmitter IMap(Target, Records, InstrMapVec[i]); + + // Build RowInstrMap to group instructions based on their values for + // RowFields. In the process, also collect key instructions into + // KeyInstrVec. + IMap.buildRowInstrMap(); + + // Build MapTable to map key instructions with the corresponding column + // instructions. + IMap.buildMapTable(); + + // Emit map tables and the functions to query them. + IMap.emitTablesWithFunc(OS); + } + OS << "} // End " << TargetName << " namespace\n"; + OS << "} // End llvm namespace\n"; + OS << "#endif // GET_INSTRMAP_INFO\n\n"; +} + +} // End llvm namespace diff --git a/utils/TableGen/CodeGenRegisters.cpp b/utils/TableGen/CodeGenRegisters.cpp index 011f4b7..580e319 100644 --- a/utils/TableGen/CodeGenRegisters.cpp +++ b/utils/TableGen/CodeGenRegisters.cpp @@ -28,7 +28,7 @@ using namespace llvm; //===----------------------------------------------------------------------===// CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum) - : TheDef(R), EnumValue(Enum) { + : TheDef(R), EnumValue(Enum), LaneMask(0) { Name = R->getName(); if (R->getValue("Namespace")) Namespace = R->getValueAsString("Namespace"); @@ -36,7 +36,7 @@ CodeGenSubRegIndex::CodeGenSubRegIndex(Record *R, unsigned Enum) CodeGenSubRegIndex::CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum) - : TheDef(0), Name(N), Namespace(Nspace), EnumValue(Enum) { + : TheDef(0), Name(N), Namespace(Nspace), EnumValue(Enum), LaneMask(0) { } std::string CodeGenSubRegIndex::getQualifiedName() const { @@ -54,19 +54,20 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) { std::vector Comps = TheDef->getValueAsListOfDefs("ComposedOf"); if (!Comps.empty()) { if (Comps.size() != 2) - throw TGError(TheDef->getLoc(), "ComposedOf must have exactly two entries"); + PrintFatalError(TheDef->getLoc(), + "ComposedOf must have exactly two entries"); CodeGenSubRegIndex *A = RegBank.getSubRegIdx(Comps[0]); CodeGenSubRegIndex *B = RegBank.getSubRegIdx(Comps[1]); CodeGenSubRegIndex *X = A->addComposite(B, this); if (X) - throw TGError(TheDef->getLoc(), "Ambiguous ComposedOf entries"); + PrintFatalError(TheDef->getLoc(), "Ambiguous ComposedOf entries"); } std::vector Parts = TheDef->getValueAsListOfDefs("CoveringSubRegIndices"); if (!Parts.empty()) { if (Parts.size() < 2) - throw TGError(TheDef->getLoc(), + PrintFatalError(TheDef->getLoc(), "CoveredBySubRegs must have two or more entries"); SmallVector IdxParts; for (unsigned i = 0, e = Parts.size(); i != e; ++i) @@ -75,14 +76,21 @@ void CodeGenSubRegIndex::updateComponents(CodeGenRegBank &RegBank) { } } -void CodeGenSubRegIndex::cleanComposites() { - // Clean out redundant mappings of the form this+X -> X. - for (CompMap::iterator i = Composed.begin(), e = Composed.end(); i != e;) { - CompMap::iterator j = i; - ++i; - if (j->first == j->second) - Composed.erase(j); - } +unsigned CodeGenSubRegIndex::computeLaneMask() { + // Already computed? + if (LaneMask) + return LaneMask; + + // Recursion guard, shouldn't be required. + LaneMask = ~0u; + + // The lane mask is simply the union of all sub-indices. + unsigned M = 0; + for (CompMap::iterator I = Composed.begin(), E = Composed.end(); I != E; ++I) + M |= I->second->computeLaneMask(); + assert(M && "Missing lane mask, sub-register cycle?"); + LaneMask = M; + return LaneMask; } //===----------------------------------------------------------------------===// @@ -105,8 +113,8 @@ void CodeGenRegister::buildObjectGraph(CodeGenRegBank &RegBank) { std::vector SRs = TheDef->getValueAsListOfDefs("SubRegs"); if (SRIs.size() != SRs.size()) - throw TGError(TheDef->getLoc(), - "SubRegs and SubRegIndices must have the same size"); + PrintFatalError(TheDef->getLoc(), + "SubRegs and SubRegIndices must have the same size"); for (unsigned i = 0, e = SRIs.size(); i != e; ++i) { ExplicitSubRegIndices.push_back(RegBank.getSubRegIdx(SRIs[i])); @@ -217,8 +225,8 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) { CodeGenRegister *SR = ExplicitSubRegs[i]; CodeGenSubRegIndex *Idx = ExplicitSubRegIndices[i]; if (!SubRegs.insert(std::make_pair(Idx, SR)).second) - throw TGError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() + - " appears twice in Register " + getName()); + PrintFatalError(TheDef->getLoc(), "SubRegIndex " + Idx->getName() + + " appears twice in Register " + getName()); // Map explicit sub-registers first, so the names take precedence. // The inherited sub-registers are mapped below. SubReg2Idx.insert(std::make_pair(SR, Idx)); @@ -298,11 +306,11 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) { for (SubRegMap::const_iterator SI = SubRegs.begin(), SE = SubRegs.end(); SI != SE; ++SI) { if (SI->second == this) { - SMLoc Loc; + ArrayRef Loc; if (TheDef) Loc = TheDef->getLoc(); - throw TGError(Loc, "Register " + getName() + - " has itself as a sub-register"); + PrintFatalError(Loc, "Register " + getName() + + " has itself as a sub-register"); } // Ensure that every sub-register has a unique name. DenseMap::iterator Ins = @@ -310,10 +318,10 @@ CodeGenRegister::computeSubRegs(CodeGenRegBank &RegBank) { if (Ins->second == SI->first) continue; // Trouble: Two different names for SI->second. - SMLoc Loc; + ArrayRef Loc; if (TheDef) Loc = TheDef->getLoc(); - throw TGError(Loc, "Sub-register can't have two names: " + + PrintFatalError(Loc, "Sub-register can't have two names: " + SI->second->getName() + " available as " + SI->first->getName() + " and " + Ins->second->getName()); } @@ -460,8 +468,8 @@ void CodeGenRegister::computeSecondarySubRegs(CodeGenRegBank &RegBank) { SE = NewSubReg->SubRegs.end(); SI != SE; ++SI) { CodeGenSubRegIndex *SubIdx = getSubRegIndex(SI->second); if (!SubIdx) - throw TGError(TheDef->getLoc(), "No SubRegIndex for " + - SI->second->getName() + " in " + getName()); + PrintFatalError(TheDef->getLoc(), "No SubRegIndex for " + + SI->second->getName() + " in " + getName()); NewIdx->addComposite(SI->first, SubIdx); } } @@ -585,15 +593,16 @@ struct TupleExpander : SetTheory::Expander { unsigned Dim = Indices.size(); ListInit *SubRegs = Def->getValueAsListInit("SubRegs"); if (Dim != SubRegs->getSize()) - throw TGError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch"); + PrintFatalError(Def->getLoc(), "SubRegIndices and SubRegs size mismatch"); if (Dim < 2) - throw TGError(Def->getLoc(), "Tuples must have at least 2 sub-registers"); + PrintFatalError(Def->getLoc(), + "Tuples must have at least 2 sub-registers"); // Evaluate the sub-register lists to be zipped. unsigned Length = ~0u; SmallVector Lists(Dim); for (unsigned i = 0; i != Dim; ++i) { - ST.evaluate(SubRegs->getElement(i), Lists[i]); + ST.evaluate(SubRegs->getElement(i), Lists[i], Def->getLoc()); Length = std::min(Length, unsigned(Lists[i].size())); } @@ -699,8 +708,8 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R) for (unsigned i = 0, e = TypeList.size(); i != e; ++i) { Record *Type = TypeList[i]; if (!Type->isSubClassOf("ValueType")) - throw "RegTypes list member '" + Type->getName() + - "' does not derive from the ValueType class!"; + PrintFatalError("RegTypes list member '" + Type->getName() + + "' does not derive from the ValueType class!"); VTs.push_back(getValueType(Type)); } assert(!VTs.empty() && "RegisterClass must contain at least one ValueType!"); @@ -721,14 +730,14 @@ CodeGenRegisterClass::CodeGenRegisterClass(CodeGenRegBank &RegBank, Record *R) // Alternative allocation orders may be subsets. SetTheory::RecSet Order; for (unsigned i = 0, e = AltOrders->size(); i != e; ++i) { - RegBank.getSets().evaluate(AltOrders->getElement(i), Order); + RegBank.getSets().evaluate(AltOrders->getElement(i), Order, R->getLoc()); Orders[1 + i].append(Order.begin(), Order.end()); // Verify that all altorder members are regclass members. while (!Order.empty()) { CodeGenRegister *Reg = RegBank.getReg(Order.back()); Order.pop_back(); if (!contains(Reg)) - throw TGError(R->getLoc(), " AltOrder register " + Reg->getName() + + PrintFatalError(R->getLoc(), " AltOrder register " + Reg->getName() + " is not a class member"); } } @@ -986,6 +995,12 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) { for (unsigned i = 0, e = Registers.size(); i != e; ++i) Registers[i]->buildObjectGraph(*this); + // Compute register name map. + for (unsigned i = 0, e = Registers.size(); i != e; ++i) + RegistersByName.GetOrCreateValue( + Registers[i]->TheDef->getValueAsString("AsmName"), + Registers[i]); + // Precompute all sub-register maps. // This will create Composite entries for all inferred sub-register indices. for (unsigned i = 0, e = Registers.size(); i != e; ++i) @@ -1008,7 +1023,7 @@ CodeGenRegBank::CodeGenRegBank(RecordKeeper &Records) { // Read in register class definitions. std::vector RCs = Records.getAllDerivedDefinitions("RegisterClass"); if (RCs.empty()) - throw std::string("No 'RegisterClass' subclasses defined!"); + PrintFatalError(std::string("No 'RegisterClass' subclasses defined!")); // Allocate user-defined register classes. RegClasses.reserve(RCs.size()); @@ -1085,7 +1100,7 @@ CodeGenRegisterClass *CodeGenRegBank::getRegClass(Record *Def) { if (CodeGenRegisterClass *RC = Def2RC[Def]) return RC; - throw TGError(Def->getLoc(), "Not a known RegisterClass!"); + PrintFatalError(Def->getLoc(), "Not a known RegisterClass!"); } CodeGenSubRegIndex* @@ -1164,11 +1179,35 @@ void CodeGenRegBank::computeComposites() { } } } +} + +// Compute lane masks. This is similar to register units, but at the +// sub-register index level. Each bit in the lane mask is like a register unit +// class, and two lane masks will have a bit in common if two sub-register +// indices overlap in some register. +// +// Conservatively share a lane mask bit if two sub-register indices overlap in +// some registers, but not in others. That shouldn't happen a lot. +void CodeGenRegBank::computeSubRegIndexLaneMasks() { + // First assign individual bits to all the leaf indices. + unsigned Bit = 0; + for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) { + CodeGenSubRegIndex *Idx = SubRegIndices[i]; + if (Idx->getComposites().empty()) { + Idx->LaneMask = 1u << Bit; + // Share bit 31 in the unlikely case there are more than 32 leafs. + if (Bit < 31) ++Bit; + } else { + Idx->LaneMask = 0; + } + } + + // FIXME: What if ad-hoc aliasing introduces overlaps that aren't represented + // by the sub-register graph? This doesn't occur in any known targets. - // We don't care about the difference between (Idx1, Idx2) -> Idx2 and invalid - // compositions, so remove any mappings of that form. + // Inherit lanes from composites. for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) - SubRegIndices[i]->cleanComposites(); + SubRegIndices[i]->computeLaneMask(); } namespace { @@ -1554,6 +1593,7 @@ void CodeGenRegBank::computeRegUnitSets() { void CodeGenRegBank::computeDerivedInfo() { computeComposites(); + computeSubRegIndexLaneMasks(); // Compute a weight for each register unit created during getSubRegs. // This may create adopted register units (with unit # >= NumNativeRegUnits). diff --git a/utils/TableGen/CodeGenRegisters.h b/utils/TableGen/CodeGenRegisters.h index 827063e..e411074 100644 --- a/utils/TableGen/CodeGenRegisters.h +++ b/utils/TableGen/CodeGenRegisters.h @@ -40,6 +40,7 @@ namespace llvm { public: const unsigned EnumValue; + unsigned LaneMask; CodeGenSubRegIndex(Record *R, unsigned Enum); CodeGenSubRegIndex(StringRef N, StringRef Nspace, unsigned Enum); @@ -80,12 +81,12 @@ namespace llvm { // Update the composite maps of components specified in 'ComposedOf'. void updateComponents(CodeGenRegBank&); - // Clean out redundant composite mappings. - void cleanComposites(); - // Return the map of composites. const CompMap &getComposites() const { return Composed; } + // Compute LaneMask from Composed. Return LaneMask. + unsigned computeLaneMask(); + private: CompMap Composed; }; @@ -439,6 +440,7 @@ namespace llvm { // Registers. std::vector Registers; + StringMap RegistersByName; DenseMap Def2Reg; unsigned NumNativeRegUnits; @@ -489,6 +491,9 @@ namespace llvm { // Populate the Composite map from sub-register relationships. void computeComposites(); + // Compute a lane mask for each sub-register index. + void computeSubRegIndexLaneMasks(); + public: CodeGenRegBank(RecordKeeper&); @@ -518,6 +523,9 @@ namespace llvm { } const std::vector &getRegisters() { return Registers; } + const StringMap &getRegistersByName() { + return RegistersByName; + } // Find a register from its Record def. CodeGenRegister *getReg(Record*); diff --git a/utils/TableGen/CodeGenSchedule.cpp b/utils/TableGen/CodeGenSchedule.cpp index f57fd18..63cc97a 100644 --- a/utils/TableGen/CodeGenSchedule.cpp +++ b/utils/TableGen/CodeGenSchedule.cpp @@ -16,41 +16,505 @@ #include "CodeGenSchedule.h" #include "CodeGenTarget.h" +#include "llvm/TableGen/Error.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/Regex.h" +#include "llvm/ADT/STLExtras.h" using namespace llvm; -// CodeGenModels ctor interprets machine model records and populates maps. +#ifndef NDEBUG +static void dumpIdxVec(const IdxVec &V) { + for (unsigned i = 0, e = V.size(); i < e; ++i) { + dbgs() << V[i] << ", "; + } +} +static void dumpIdxVec(const SmallVectorImpl &V) { + for (unsigned i = 0, e = V.size(); i < e; ++i) { + dbgs() << V[i] << ", "; + } +} +#endif + +// (instrs a, b, ...) Evaluate and union all arguments. Identical to AddOp. +struct InstrsOp : public SetTheory::Operator { + void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, + ArrayRef Loc) { + ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc); + } +}; + +// (instregex "OpcPat",...) Find all instructions matching an opcode pattern. +// +// TODO: Since this is a prefix match, perform a binary search over the +// instruction names using lower_bound. Note that the predefined instrs must be +// scanned linearly first. However, this is only safe if the regex pattern has +// no top-level bars. The DAG already has a list of patterns, so there's no +// reason to use top-level bars, but we need a way to verify they don't exist +// before implementing the optimization. +struct InstRegexOp : public SetTheory::Operator { + const CodeGenTarget &Target; + InstRegexOp(const CodeGenTarget &t): Target(t) {} + + void apply(SetTheory &ST, DagInit *Expr, SetTheory::RecSet &Elts, + ArrayRef Loc) { + SmallVector RegexList; + for (DagInit::const_arg_iterator + AI = Expr->arg_begin(), AE = Expr->arg_end(); AI != AE; ++AI) { + StringInit *SI = dyn_cast(*AI); + if (!SI) + PrintFatalError(Loc, "instregex requires pattern string: " + + Expr->getAsString()); + std::string pat = SI->getValue(); + // Implement a python-style prefix match. + if (pat[0] != '^') { + pat.insert(0, "^("); + pat.insert(pat.end(), ')'); + } + RegexList.push_back(new Regex(pat)); + } + for (CodeGenTarget::inst_iterator I = Target.inst_begin(), + E = Target.inst_end(); I != E; ++I) { + for (SmallVectorImpl::iterator + RI = RegexList.begin(), RE = RegexList.end(); RI != RE; ++RI) { + if ((*RI)->match((*I)->TheDef->getName())) + Elts.insert((*I)->TheDef); + } + } + DeleteContainerPointers(RegexList); + } +}; + +/// CodeGenModels ctor interprets machine model records and populates maps. CodeGenSchedModels::CodeGenSchedModels(RecordKeeper &RK, const CodeGenTarget &TGT): - Records(RK), Target(TGT), NumItineraryClasses(0), HasProcItineraries(false) { + Records(RK), Target(TGT), NumItineraryClasses(0) { + + Sets.addFieldExpander("InstRW", "Instrs"); + + // Allow Set evaluation to recognize the dags used in InstRW records: + // (instrs Op1, Op1...) + Sets.addOperator("instrs", new InstrsOp); + Sets.addOperator("instregex", new InstRegexOp(Target)); + + // Instantiate a CodeGenProcModel for each SchedMachineModel with the values + // that are explicitly referenced in tablegen records. Resources associated + // with each processor will be derived later. Populate ProcModelMap with the + // CodeGenProcModel instances. + collectProcModels(); + + // Instantiate a CodeGenSchedRW for each SchedReadWrite record explicitly + // defined, and populate SchedReads and SchedWrites vectors. Implicit + // SchedReadWrites that represent sequences derived from expanded variant will + // be inferred later. + collectSchedRW(); + + // Instantiate a CodeGenSchedClass for each unique SchedRW signature directly + // required by an instruction definition, and populate SchedClassIdxMap. Set + // NumItineraryClasses to the number of explicit itinerary classes referenced + // by instructions. Set NumInstrSchedClasses to the number of itinerary + // classes plus any classes implied by instructions that derive from class + // Sched and provide SchedRW list. This does not infer any new classes from + // SchedVariant. + collectSchedClasses(); + + // Find instruction itineraries for each processor. Sort and populate + // CodeGenProcModel::ItinDefList. (Cycle-to-cycle itineraries). This requires + // all itinerary classes to be discovered. + collectProcItins(); + + // Find ItinRW records for each processor and itinerary class. + // (For per-operand resources mapped to itinerary classes). + collectProcItinRW(); + + // Infer new SchedClasses from SchedVariant. + inferSchedClasses(); + + // Populate each CodeGenProcModel's WriteResDefs, ReadAdvanceDefs, and + // ProcResourceDefs. + collectProcResources(); +} + +/// Gather all processor models. +void CodeGenSchedModels::collectProcModels() { + RecVec ProcRecords = Records.getAllDerivedDefinitions("Processor"); + std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName()); + + // Reserve space because we can. Reallocation would be ok. + ProcModels.reserve(ProcRecords.size()+1); + + // Use idx=0 for NoModel/NoItineraries. + Record *NoModelDef = Records.getDef("NoSchedModel"); + Record *NoItinsDef = Records.getDef("NoItineraries"); + ProcModels.push_back(CodeGenProcModel(0, "NoSchedModel", + NoModelDef, NoItinsDef)); + ProcModelMap[NoModelDef] = 0; + + // For each processor, find a unique machine model. + for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i) + addProcModel(ProcRecords[i]); +} + +/// Get a unique processor model based on the defined MachineModel and +/// ProcessorItineraries. +void CodeGenSchedModels::addProcModel(Record *ProcDef) { + Record *ModelKey = getModelOrItinDef(ProcDef); + if (!ProcModelMap.insert(std::make_pair(ModelKey, ProcModels.size())).second) + return; + + std::string Name = ModelKey->getName(); + if (ModelKey->isSubClassOf("SchedMachineModel")) { + Record *ItinsDef = ModelKey->getValueAsDef("Itineraries"); + ProcModels.push_back( + CodeGenProcModel(ProcModels.size(), Name, ModelKey, ItinsDef)); + } + else { + // An itinerary is defined without a machine model. Infer a new model. + if (!ModelKey->getValueAsListOfDefs("IID").empty()) + Name = Name + "Model"; + ProcModels.push_back( + CodeGenProcModel(ProcModels.size(), Name, + ProcDef->getValueAsDef("SchedModel"), ModelKey)); + } + DEBUG(ProcModels.back().dump()); +} + +// Recursively find all reachable SchedReadWrite records. +static void scanSchedRW(Record *RWDef, RecVec &RWDefs, + SmallPtrSet &RWSet) { + if (!RWSet.insert(RWDef)) + return; + RWDefs.push_back(RWDef); + // Reads don't current have sequence records, but it can be added later. + if (RWDef->isSubClassOf("WriteSequence")) { + RecVec Seq = RWDef->getValueAsListOfDefs("Writes"); + for (RecIter I = Seq.begin(), E = Seq.end(); I != E; ++I) + scanSchedRW(*I, RWDefs, RWSet); + } + else if (RWDef->isSubClassOf("SchedVariant")) { + // Visit each variant (guarded by a different predicate). + RecVec Vars = RWDef->getValueAsListOfDefs("Variants"); + for (RecIter VI = Vars.begin(), VE = Vars.end(); VI != VE; ++VI) { + // Visit each RW in the sequence selected by the current variant. + RecVec Selected = (*VI)->getValueAsListOfDefs("Selected"); + for (RecIter I = Selected.begin(), E = Selected.end(); I != E; ++I) + scanSchedRW(*I, RWDefs, RWSet); + } + } +} + +// Collect and sort all SchedReadWrites reachable via tablegen records. +// More may be inferred later when inferring new SchedClasses from variants. +void CodeGenSchedModels::collectSchedRW() { + // Reserve idx=0 for invalid writes/reads. + SchedWrites.resize(1); + SchedReads.resize(1); + + SmallPtrSet RWSet; + + // Find all SchedReadWrites referenced by instruction defs. + RecVec SWDefs, SRDefs; + for (CodeGenTarget::inst_iterator I = Target.inst_begin(), + E = Target.inst_end(); I != E; ++I) { + Record *SchedDef = (*I)->TheDef; + if (!SchedDef->isSubClassOf("Sched")) + continue; + RecVec RWs = SchedDef->getValueAsListOfDefs("SchedRW"); + for (RecIter RWI = RWs.begin(), RWE = RWs.end(); RWI != RWE; ++RWI) { + if ((*RWI)->isSubClassOf("SchedWrite")) + scanSchedRW(*RWI, SWDefs, RWSet); + else { + assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); + scanSchedRW(*RWI, SRDefs, RWSet); + } + } + } + // Find all ReadWrites referenced by InstRW. + RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); + for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) { + // For all OperandReadWrites. + RecVec RWDefs = (*OI)->getValueAsListOfDefs("OperandReadWrites"); + for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); + RWI != RWE; ++RWI) { + if ((*RWI)->isSubClassOf("SchedWrite")) + scanSchedRW(*RWI, SWDefs, RWSet); + else { + assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); + scanSchedRW(*RWI, SRDefs, RWSet); + } + } + } + // Find all ReadWrites referenced by ItinRW. + RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); + for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) { + // For all OperandReadWrites. + RecVec RWDefs = (*II)->getValueAsListOfDefs("OperandReadWrites"); + for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); + RWI != RWE; ++RWI) { + if ((*RWI)->isSubClassOf("SchedWrite")) + scanSchedRW(*RWI, SWDefs, RWSet); + else { + assert((*RWI)->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); + scanSchedRW(*RWI, SRDefs, RWSet); + } + } + } + // Find all ReadWrites referenced by SchedAlias. AliasDefs needs to be sorted + // for the loop below that initializes Alias vectors. + RecVec AliasDefs = Records.getAllDerivedDefinitions("SchedAlias"); + std::sort(AliasDefs.begin(), AliasDefs.end(), LessRecord()); + for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) { + Record *MatchDef = (*AI)->getValueAsDef("MatchRW"); + Record *AliasDef = (*AI)->getValueAsDef("AliasRW"); + if (MatchDef->isSubClassOf("SchedWrite")) { + if (!AliasDef->isSubClassOf("SchedWrite")) + PrintFatalError((*AI)->getLoc(), "SchedWrite Alias must be SchedWrite"); + scanSchedRW(AliasDef, SWDefs, RWSet); + } + else { + assert(MatchDef->isSubClassOf("SchedRead") && "Unknown SchedReadWrite"); + if (!AliasDef->isSubClassOf("SchedRead")) + PrintFatalError((*AI)->getLoc(), "SchedRead Alias must be SchedRead"); + scanSchedRW(AliasDef, SRDefs, RWSet); + } + } + // Sort and add the SchedReadWrites directly referenced by instructions or + // itinerary resources. Index reads and writes in separate domains. + std::sort(SWDefs.begin(), SWDefs.end(), LessRecord()); + for (RecIter SWI = SWDefs.begin(), SWE = SWDefs.end(); SWI != SWE; ++SWI) { + assert(!getSchedRWIdx(*SWI, /*IsRead=*/false) && "duplicate SchedWrite"); + SchedWrites.push_back(CodeGenSchedRW(SchedWrites.size(), *SWI)); + } + std::sort(SRDefs.begin(), SRDefs.end(), LessRecord()); + for (RecIter SRI = SRDefs.begin(), SRE = SRDefs.end(); SRI != SRE; ++SRI) { + assert(!getSchedRWIdx(*SRI, /*IsRead-*/true) && "duplicate SchedWrite"); + SchedReads.push_back(CodeGenSchedRW(SchedReads.size(), *SRI)); + } + // Initialize WriteSequence vectors. + for (std::vector::iterator WI = SchedWrites.begin(), + WE = SchedWrites.end(); WI != WE; ++WI) { + if (!WI->IsSequence) + continue; + findRWs(WI->TheDef->getValueAsListOfDefs("Writes"), WI->Sequence, + /*IsRead=*/false); + } + // Initialize Aliases vectors. + for (RecIter AI = AliasDefs.begin(), AE = AliasDefs.end(); AI != AE; ++AI) { + Record *AliasDef = (*AI)->getValueAsDef("AliasRW"); + getSchedRW(AliasDef).IsAlias = true; + Record *MatchDef = (*AI)->getValueAsDef("MatchRW"); + CodeGenSchedRW &RW = getSchedRW(MatchDef); + if (RW.IsAlias) + PrintFatalError((*AI)->getLoc(), "Cannot Alias an Alias"); + RW.Aliases.push_back(*AI); + } + DEBUG( + for (unsigned WIdx = 0, WEnd = SchedWrites.size(); WIdx != WEnd; ++WIdx) { + dbgs() << WIdx << ": "; + SchedWrites[WIdx].dump(); + dbgs() << '\n'; + } + for (unsigned RIdx = 0, REnd = SchedReads.size(); RIdx != REnd; ++RIdx) { + dbgs() << RIdx << ": "; + SchedReads[RIdx].dump(); + dbgs() << '\n'; + } + RecVec RWDefs = Records.getAllDerivedDefinitions("SchedReadWrite"); + for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); + RI != RE; ++RI) { + if (!getSchedRWIdx(*RI, (*RI)->isSubClassOf("SchedRead"))) { + const std::string &Name = (*RI)->getName(); + if (Name != "NoWrite" && Name != "ReadDefault") + dbgs() << "Unused SchedReadWrite " << (*RI)->getName() << '\n'; + } + }); +} + +/// Compute a SchedWrite name from a sequence of writes. +std::string CodeGenSchedModels::genRWName(const IdxVec& Seq, bool IsRead) { + std::string Name("("); + for (IdxIter I = Seq.begin(), E = Seq.end(); I != E; ++I) { + if (I != Seq.begin()) + Name += '_'; + Name += getSchedRW(*I, IsRead).Name; + } + Name += ')'; + return Name; +} + +unsigned CodeGenSchedModels::getSchedRWIdx(Record *Def, bool IsRead, + unsigned After) const { + const std::vector &RWVec = IsRead ? SchedReads : SchedWrites; + assert(After < RWVec.size() && "start position out of bounds"); + for (std::vector::const_iterator I = RWVec.begin() + After, + E = RWVec.end(); I != E; ++I) { + if (I->TheDef == Def) + return I - RWVec.begin(); + } + return 0; +} + +bool CodeGenSchedModels::hasReadOfWrite(Record *WriteDef) const { + for (unsigned i = 0, e = SchedReads.size(); i < e; ++i) { + Record *ReadDef = SchedReads[i].TheDef; + if (!ReadDef || !ReadDef->isSubClassOf("ProcReadAdvance")) + continue; + + RecVec ValidWrites = ReadDef->getValueAsListOfDefs("ValidWrites"); + if (std::find(ValidWrites.begin(), ValidWrites.end(), WriteDef) + != ValidWrites.end()) { + return true; + } + } + return false; +} - // Populate SchedClassIdxMap and set NumItineraryClasses. - CollectSchedClasses(); +namespace llvm { +void splitSchedReadWrites(const RecVec &RWDefs, + RecVec &WriteDefs, RecVec &ReadDefs) { + for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) { + if ((*RWI)->isSubClassOf("SchedWrite")) + WriteDefs.push_back(*RWI); + else { + assert((*RWI)->isSubClassOf("SchedRead") && "unknown SchedReadWrite"); + ReadDefs.push_back(*RWI); + } + } +} +} // namespace llvm + +// Split the SchedReadWrites defs and call findRWs for each list. +void CodeGenSchedModels::findRWs(const RecVec &RWDefs, + IdxVec &Writes, IdxVec &Reads) const { + RecVec WriteDefs; + RecVec ReadDefs; + splitSchedReadWrites(RWDefs, WriteDefs, ReadDefs); + findRWs(WriteDefs, Writes, false); + findRWs(ReadDefs, Reads, true); +} + +// Call getSchedRWIdx for all elements in a sequence of SchedRW defs. +void CodeGenSchedModels::findRWs(const RecVec &RWDefs, IdxVec &RWs, + bool IsRead) const { + for (RecIter RI = RWDefs.begin(), RE = RWDefs.end(); RI != RE; ++RI) { + unsigned Idx = getSchedRWIdx(*RI, IsRead); + assert(Idx && "failed to collect SchedReadWrite"); + RWs.push_back(Idx); + } +} + +void CodeGenSchedModels::expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, + bool IsRead) const { + const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); + if (!SchedRW.IsSequence) { + RWSeq.push_back(RWIdx); + return; + } + int Repeat = + SchedRW.TheDef ? SchedRW.TheDef->getValueAsInt("Repeat") : 1; + for (int i = 0; i < Repeat; ++i) { + for (IdxIter I = SchedRW.Sequence.begin(), E = SchedRW.Sequence.end(); + I != E; ++I) { + expandRWSequence(*I, RWSeq, IsRead); + } + } +} + +// Expand a SchedWrite as a sequence following any aliases that coincide with +// the given processor model. +void CodeGenSchedModels::expandRWSeqForProc( + unsigned RWIdx, IdxVec &RWSeq, bool IsRead, + const CodeGenProcModel &ProcModel) const { + + const CodeGenSchedRW &SchedWrite = getSchedRW(RWIdx, IsRead); + Record *AliasDef = 0; + for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end(); + AI != AE; ++AI) { + const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); + if ((*AI)->getValueInit("SchedModel")->isComplete()) { + Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); + if (&getProcModel(ModelDef) != &ProcModel) + continue; + } + if (AliasDef) + PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " + "defined for processor " + ProcModel.ModelName + + " Ensure only one SchedAlias exists per RW."); + AliasDef = AliasRW.TheDef; + } + if (AliasDef) { + expandRWSeqForProc(getSchedRWIdx(AliasDef, IsRead), + RWSeq, IsRead,ProcModel); + return; + } + if (!SchedWrite.IsSequence) { + RWSeq.push_back(RWIdx); + return; + } + int Repeat = + SchedWrite.TheDef ? SchedWrite.TheDef->getValueAsInt("Repeat") : 1; + for (int i = 0; i < Repeat; ++i) { + for (IdxIter I = SchedWrite.Sequence.begin(), E = SchedWrite.Sequence.end(); + I != E; ++I) { + expandRWSeqForProc(*I, RWSeq, IsRead, ProcModel); + } + } +} + +// Find the existing SchedWrite that models this sequence of writes. +unsigned CodeGenSchedModels::findRWForSequence(const IdxVec &Seq, + bool IsRead) { + std::vector &RWVec = IsRead ? SchedReads : SchedWrites; - // Populate ProcModelMap. - CollectProcModels(); + for (std::vector::iterator I = RWVec.begin(), E = RWVec.end(); + I != E; ++I) { + if (I->Sequence == Seq) + return I - RWVec.begin(); + } + // Index zero reserved for invalid RW. + return 0; } -// Visit all the instruction definitions for this target to gather and enumerate -// the itinerary classes. These are the explicitly specified SchedClasses. More -// SchedClasses may be inferred. -void CodeGenSchedModels::CollectSchedClasses() { +/// Add this ReadWrite if it doesn't already exist. +unsigned CodeGenSchedModels::findOrInsertRW(ArrayRef Seq, + bool IsRead) { + assert(!Seq.empty() && "cannot insert empty sequence"); + if (Seq.size() == 1) + return Seq.back(); - // NoItinerary is always the first class at Index=0 + unsigned Idx = findRWForSequence(Seq, IsRead); + if (Idx) + return Idx; + + unsigned RWIdx = IsRead ? SchedReads.size() : SchedWrites.size(); + CodeGenSchedRW SchedRW(RWIdx, IsRead, Seq, genRWName(Seq, IsRead)); + if (IsRead) + SchedReads.push_back(SchedRW); + else + SchedWrites.push_back(SchedRW); + return RWIdx; +} + +/// Visit all the instruction definitions for this target to gather and +/// enumerate the itinerary classes. These are the explicitly specified +/// SchedClasses. More SchedClasses may be inferred. +void CodeGenSchedModels::collectSchedClasses() { + + // NoItinerary is always the first class at Idx=0 SchedClasses.resize(1); SchedClasses.back().Name = "NoItinerary"; + SchedClasses.back().ProcIndices.push_back(0); SchedClassIdxMap[SchedClasses.back().Name] = 0; // Gather and sort all itinerary classes used by instruction descriptions. - std::vector ItinClassList; + RecVec ItinClassList; for (CodeGenTarget::inst_iterator I = Target.inst_begin(), E = Target.inst_end(); I != E; ++I) { - Record *SchedDef = (*I)->TheDef->getValueAsDef("Itinerary"); + Record *ItinDef = (*I)->TheDef->getValueAsDef("Itinerary"); // Map a new SchedClass with no index. - if (!SchedClassIdxMap.count(SchedDef->getName())) { - SchedClassIdxMap[SchedDef->getName()] = 0; - ItinClassList.push_back(SchedDef); + if (!SchedClassIdxMap.count(ItinDef->getName())) { + SchedClassIdxMap[ItinDef->getName()] = 0; + ItinClassList.push_back(ItinDef); } } // Assign each itinerary class unique number, skipping NoItinerary==0 @@ -61,91 +525,1139 @@ void CodeGenSchedModels::CollectSchedClasses() { SchedClassIdxMap[ItinDef->getName()] = SchedClasses.size(); SchedClasses.push_back(CodeGenSchedClass(ItinDef)); } + // Infer classes from SchedReadWrite resources listed for each + // instruction definition that inherits from class Sched. + for (CodeGenTarget::inst_iterator I = Target.inst_begin(), + E = Target.inst_end(); I != E; ++I) { + if (!(*I)->TheDef->isSubClassOf("Sched")) + continue; + IdxVec Writes, Reads; + findRWs((*I)->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads); + // ProcIdx == 0 indicates the class applies to all processors. + IdxVec ProcIndices(1, 0); + addSchedClass(Writes, Reads, ProcIndices); + } + // Create classes for InstRW defs. + RecVec InstRWDefs = Records.getAllDerivedDefinitions("InstRW"); + std::sort(InstRWDefs.begin(), InstRWDefs.end(), LessRecord()); + for (RecIter OI = InstRWDefs.begin(), OE = InstRWDefs.end(); OI != OE; ++OI) + createInstRWClass(*OI); + + NumInstrSchedClasses = SchedClasses.size(); + + bool EnableDump = false; + DEBUG(EnableDump = true); + if (!EnableDump) + return; + for (CodeGenTarget::inst_iterator I = Target.inst_begin(), + E = Target.inst_end(); I != E; ++I) { + Record *SchedDef = (*I)->TheDef; + std::string InstName = (*I)->TheDef->getName(); + if (SchedDef->isSubClassOf("Sched")) { + IdxVec Writes; + IdxVec Reads; + findRWs((*I)->TheDef->getValueAsListOfDefs("SchedRW"), Writes, Reads); + dbgs() << "SchedRW machine model for " << InstName; + for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) + dbgs() << " " << SchedWrites[*WI].Name; + for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI) + dbgs() << " " << SchedReads[*RI].Name; + dbgs() << '\n'; + } + unsigned SCIdx = InstrClassMap.lookup((*I)->TheDef); + if (SCIdx) { + const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; + for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); + RWI != RWE; ++RWI) { + const CodeGenProcModel &ProcModel = + getProcModel((*RWI)->getValueAsDef("SchedModel")); + dbgs() << "InstRW on " << ProcModel.ModelName << " for " << InstName; + IdxVec Writes; + IdxVec Reads; + findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"), + Writes, Reads); + for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) + dbgs() << " " << SchedWrites[*WI].Name; + for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI) + dbgs() << " " << SchedReads[*RI].Name; + dbgs() << '\n'; + } + continue; + } + if (!SchedDef->isSubClassOf("Sched") + && (SchedDef->getValueAsDef("Itinerary")->getName() == "NoItinerary")) { + dbgs() << "No machine model for " << (*I)->TheDef->getName() << '\n'; + } + } +} + +unsigned CodeGenSchedModels::getSchedClassIdx( + const RecVec &RWDefs) const { - // TODO: Infer classes from non-itinerary scheduler resources. + IdxVec Writes, Reads; + findRWs(RWDefs, Writes, Reads); + return findSchedClassIdx(Writes, Reads); } -// Gather all processor models. -void CodeGenSchedModels::CollectProcModels() { - std::vector ProcRecords = - Records.getAllDerivedDefinitions("Processor"); - std::sort(ProcRecords.begin(), ProcRecords.end(), LessRecordFieldName()); +/// Find an SchedClass that has been inferred from a per-operand list of +/// SchedWrites and SchedReads. +unsigned CodeGenSchedModels::findSchedClassIdx(const IdxVec &Writes, + const IdxVec &Reads) const { + for (SchedClassIter I = schedClassBegin(), E = schedClassEnd(); I != E; ++I) { + // Classes with InstRWs may have the same Writes/Reads as a class originally + // produced by a SchedRW definition. We need to be able to recover the + // original class index for processors that don't match any InstRWs. + if (I->ItinClassDef || !I->InstRWs.empty()) + continue; - // Reserve space because we can. Reallocation would be ok. - ProcModels.reserve(ProcRecords.size()); + if (I->Writes == Writes && I->Reads == Reads) { + return I - schedClassBegin(); + } + } + return 0; +} - // For each processor, find a unique machine model. - for (unsigned i = 0, N = ProcRecords.size(); i < N; ++i) - addProcModel(ProcRecords[i]); +// Get the SchedClass index for an instruction. +unsigned CodeGenSchedModels::getSchedClassIdx( + const CodeGenInstruction &Inst) const { + + unsigned SCIdx = InstrClassMap.lookup(Inst.TheDef); + if (SCIdx) + return SCIdx; + + // If this opcode isn't mapped by the subtarget fallback to the instruction + // definition's SchedRW or ItinDef values. + if (Inst.TheDef->isSubClassOf("Sched")) { + RecVec RWs = Inst.TheDef->getValueAsListOfDefs("SchedRW"); + return getSchedClassIdx(RWs); + } + Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary"); + assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass"); + unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName()); + assert(Idx <= NumItineraryClasses && "bad ItinClass index"); + return Idx; } -// Get a unique processor model based on the defined MachineModel and -// ProcessorItineraries. -void CodeGenSchedModels::addProcModel(Record *ProcDef) { - unsigned Idx = getProcModelIdx(ProcDef); - if (Idx < ProcModels.size()) - return; +std::string CodeGenSchedModels::createSchedClassName( + const IdxVec &OperWrites, const IdxVec &OperReads) { + + std::string Name; + for (IdxIter WI = OperWrites.begin(), WE = OperWrites.end(); WI != WE; ++WI) { + if (WI != OperWrites.begin()) + Name += '_'; + Name += SchedWrites[*WI].Name; + } + for (IdxIter RI = OperReads.begin(), RE = OperReads.end(); RI != RE; ++RI) { + Name += '_'; + Name += SchedReads[*RI].Name; + } + return Name; +} + +std::string CodeGenSchedModels::createSchedClassName(const RecVec &InstDefs) { + + std::string Name; + for (RecIter I = InstDefs.begin(), E = InstDefs.end(); I != E; ++I) { + if (I != InstDefs.begin()) + Name += '_'; + Name += (*I)->getName(); + } + return Name; +} + +/// Add an inferred sched class from a per-operand list of SchedWrites and +/// SchedReads. ProcIndices contains the set of IDs of processors that may +/// utilize this class. +unsigned CodeGenSchedModels::addSchedClass(const IdxVec &OperWrites, + const IdxVec &OperReads, + const IdxVec &ProcIndices) +{ + assert(!ProcIndices.empty() && "expect at least one ProcIdx"); + + unsigned Idx = findSchedClassIdx(OperWrites, OperReads); + if (Idx) { + IdxVec PI; + std::set_union(SchedClasses[Idx].ProcIndices.begin(), + SchedClasses[Idx].ProcIndices.end(), + ProcIndices.begin(), ProcIndices.end(), + std::back_inserter(PI)); + SchedClasses[Idx].ProcIndices.swap(PI); + return Idx; + } + Idx = SchedClasses.size(); + SchedClasses.resize(Idx+1); + CodeGenSchedClass &SC = SchedClasses.back(); + SC.Name = createSchedClassName(OperWrites, OperReads); + SC.Writes = OperWrites; + SC.Reads = OperReads; + SC.ProcIndices = ProcIndices; + + return Idx; +} + +// Create classes for each set of opcodes that are in the same InstReadWrite +// definition across all processors. +void CodeGenSchedModels::createInstRWClass(Record *InstRWDef) { + // ClassInstrs will hold an entry for each subset of Instrs in InstRWDef that + // intersects with an existing class via a previous InstRWDef. Instrs that do + // not intersect with an existing class refer back to their former class as + // determined from ItinDef or SchedRW. + SmallVector >, 4> ClassInstrs; + // Sort Instrs into sets. + const RecVec *InstDefs = Sets.expand(InstRWDef); + if (InstDefs->empty()) + PrintFatalError(InstRWDef->getLoc(), "No matching instruction opcodes"); + + for (RecIter I = InstDefs->begin(), E = InstDefs->end(); I != E; ++I) { + unsigned SCIdx = 0; + InstClassMapTy::const_iterator Pos = InstrClassMap.find(*I); + if (Pos != InstrClassMap.end()) + SCIdx = Pos->second; + else { + // This instruction has not been mapped yet. Get the original class. All + // instructions in the same InstrRW class must be from the same original + // class because that is the fall-back class for other processors. + Record *ItinDef = (*I)->getValueAsDef("Itinerary"); + SCIdx = SchedClassIdxMap.lookup(ItinDef->getName()); + if (!SCIdx && (*I)->isSubClassOf("Sched")) + SCIdx = getSchedClassIdx((*I)->getValueAsListOfDefs("SchedRW")); + } + unsigned CIdx = 0, CEnd = ClassInstrs.size(); + for (; CIdx != CEnd; ++CIdx) { + if (ClassInstrs[CIdx].first == SCIdx) + break; + } + if (CIdx == CEnd) { + ClassInstrs.resize(CEnd + 1); + ClassInstrs[CIdx].first = SCIdx; + } + ClassInstrs[CIdx].second.push_back(*I); + } + // For each set of Instrs, create a new class if necessary, and map or remap + // the Instrs to it. + unsigned CIdx = 0, CEnd = ClassInstrs.size(); + for (; CIdx != CEnd; ++CIdx) { + unsigned OldSCIdx = ClassInstrs[CIdx].first; + ArrayRef InstDefs = ClassInstrs[CIdx].second; + // If the all instrs in the current class are accounted for, then leave + // them mapped to their old class. + if (SchedClasses[OldSCIdx].InstRWs.size() == InstDefs.size()) { + assert(SchedClasses[OldSCIdx].ProcIndices[0] == 0 && + "expected a generic SchedClass"); + continue; + } + unsigned SCIdx = SchedClasses.size(); + SchedClasses.resize(SCIdx+1); + CodeGenSchedClass &SC = SchedClasses.back(); + SC.Name = createSchedClassName(InstDefs); + // Preserve ItinDef and Writes/Reads for processors without an InstRW entry. + SC.ItinClassDef = SchedClasses[OldSCIdx].ItinClassDef; + SC.Writes = SchedClasses[OldSCIdx].Writes; + SC.Reads = SchedClasses[OldSCIdx].Reads; + SC.ProcIndices.push_back(0); + // Map each Instr to this new class. + // Note that InstDefs may be a smaller list than InstRWDef's "Instrs". + Record *RWModelDef = InstRWDef->getValueAsDef("SchedModel"); + SmallSet RemappedClassIDs; + for (ArrayRef::const_iterator + II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) { + unsigned OldSCIdx = InstrClassMap[*II]; + if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx)) { + for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(), + RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) { + if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) { + PrintFatalError(InstRWDef->getLoc(), "Overlapping InstRW def " + + (*II)->getName() + " also matches " + + (*RI)->getValue("Instrs")->getValue()->getAsString()); + } + assert(*RI != InstRWDef && "SchedClass has duplicate InstRW def"); + SC.InstRWs.push_back(*RI); + } + } + InstrClassMap[*II] = SCIdx; + } + SC.InstRWs.push_back(InstRWDef); + } +} + +// Gather the processor itineraries. +void CodeGenSchedModels::collectProcItins() { + for (std::vector::iterator PI = ProcModels.begin(), + PE = ProcModels.end(); PI != PE; ++PI) { + CodeGenProcModel &ProcModel = *PI; + RecVec ItinRecords = ProcModel.ItinsDef->getValueAsListOfDefs("IID"); + // Skip empty itinerary. + if (ItinRecords.empty()) + continue; + + ProcModel.ItinDefList.resize(NumItineraryClasses+1); + + // Insert each itinerary data record in the correct position within + // the processor model's ItinDefList. + for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) { + Record *ItinData = ItinRecords[i]; + Record *ItinDef = ItinData->getValueAsDef("TheClass"); + if (!SchedClassIdxMap.count(ItinDef->getName())) { + DEBUG(dbgs() << ProcModel.ItinsDef->getName() + << " has unused itinerary class " << ItinDef->getName() << '\n'); + continue; + } + assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass"); + unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName()); + assert(Idx <= NumItineraryClasses && "bad ItinClass index"); + ProcModel.ItinDefList[Idx] = ItinData; + } + // Check for missing itinerary entries. + assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec"); + DEBUG( + for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) { + if (!ProcModel.ItinDefList[i]) + dbgs() << ProcModel.ItinsDef->getName() + << " missing itinerary for class " + << SchedClasses[i].Name << '\n'; + }); + } +} + +// Gather the read/write types for each itinerary class. +void CodeGenSchedModels::collectProcItinRW() { + RecVec ItinRWDefs = Records.getAllDerivedDefinitions("ItinRW"); + std::sort(ItinRWDefs.begin(), ItinRWDefs.end(), LessRecord()); + for (RecIter II = ItinRWDefs.begin(), IE = ItinRWDefs.end(); II != IE; ++II) { + if (!(*II)->getValueInit("SchedModel")->isComplete()) + PrintFatalError((*II)->getLoc(), "SchedModel is undefined"); + Record *ModelDef = (*II)->getValueAsDef("SchedModel"); + ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); + if (I == ProcModelMap.end()) { + PrintFatalError((*II)->getLoc(), "Undefined SchedMachineModel " + + ModelDef->getName()); + } + ProcModels[I->second].ItinRWDefs.push_back(*II); + } +} + +/// Infer new classes from existing classes. In the process, this may create new +/// SchedWrites from sequences of existing SchedWrites. +void CodeGenSchedModels::inferSchedClasses() { + // Visit all existing classes and newly created classes. + for (unsigned Idx = 0; Idx != SchedClasses.size(); ++Idx) { + if (SchedClasses[Idx].ItinClassDef) + inferFromItinClass(SchedClasses[Idx].ItinClassDef, Idx); + else if (!SchedClasses[Idx].InstRWs.empty()) + inferFromInstRWs(Idx); + else { + inferFromRW(SchedClasses[Idx].Writes, SchedClasses[Idx].Reads, + Idx, SchedClasses[Idx].ProcIndices); + } + assert(SchedClasses.size() < (NumInstrSchedClasses*6) && + "too many SchedVariants"); + } +} + +/// Infer classes from per-processor itinerary resources. +void CodeGenSchedModels::inferFromItinClass(Record *ItinClassDef, + unsigned FromClassIdx) { + for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { + const CodeGenProcModel &PM = ProcModels[PIdx]; + // For all ItinRW entries. + bool HasMatch = false; + for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); + II != IE; ++II) { + RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); + if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) + continue; + if (HasMatch) + PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " + + ItinClassDef->getName() + + " in ItinResources for " + PM.ModelName); + HasMatch = true; + IdxVec Writes, Reads; + findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); + IdxVec ProcIndices(1, PIdx); + inferFromRW(Writes, Reads, FromClassIdx, ProcIndices); + } + } +} + +/// Infer classes from per-processor InstReadWrite definitions. +void CodeGenSchedModels::inferFromInstRWs(unsigned SCIdx) { + const RecVec &RWDefs = SchedClasses[SCIdx].InstRWs; + for (RecIter RWI = RWDefs.begin(), RWE = RWDefs.end(); RWI != RWE; ++RWI) { + const RecVec *InstDefs = Sets.expand(*RWI); + RecIter II = InstDefs->begin(), IE = InstDefs->end(); + for (; II != IE; ++II) { + if (InstrClassMap[*II] == SCIdx) + break; + } + // If this class no longer has any instructions mapped to it, it has become + // irrelevant. + if (II == IE) + continue; + IdxVec Writes, Reads; + findRWs((*RWI)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); + unsigned PIdx = getProcModel((*RWI)->getValueAsDef("SchedModel")).Index; + IdxVec ProcIndices(1, PIdx); + inferFromRW(Writes, Reads, SCIdx, ProcIndices); + } +} + +namespace { +// Helper for substituteVariantOperand. +struct TransVariant { + Record *VarOrSeqDef; // Variant or sequence. + unsigned RWIdx; // Index of this variant or sequence's matched type. + unsigned ProcIdx; // Processor model index or zero for any. + unsigned TransVecIdx; // Index into PredTransitions::TransVec. + + TransVariant(Record *def, unsigned rwi, unsigned pi, unsigned ti): + VarOrSeqDef(def), RWIdx(rwi), ProcIdx(pi), TransVecIdx(ti) {} +}; + +// Associate a predicate with the SchedReadWrite that it guards. +// RWIdx is the index of the read/write variant. +struct PredCheck { + bool IsRead; + unsigned RWIdx; + Record *Predicate; + + PredCheck(bool r, unsigned w, Record *p): IsRead(r), RWIdx(w), Predicate(p) {} +}; + +// A Predicate transition is a list of RW sequences guarded by a PredTerm. +struct PredTransition { + // A predicate term is a conjunction of PredChecks. + SmallVector PredTerm; + SmallVector, 16> WriteSequences; + SmallVector, 16> ReadSequences; + SmallVector ProcIndices; +}; + +// Encapsulate a set of partially constructed transitions. +// The results are built by repeated calls to substituteVariants. +class PredTransitions { + CodeGenSchedModels &SchedModels; + +public: + std::vector TransVec; + + PredTransitions(CodeGenSchedModels &sm): SchedModels(sm) {} + + void substituteVariantOperand(const SmallVectorImpl &RWSeq, + bool IsRead, unsigned StartIdx); + + void substituteVariants(const PredTransition &Trans); + +#ifndef NDEBUG + void dump() const; +#endif + +private: + bool mutuallyExclusive(Record *PredDef, ArrayRef Term); + void getIntersectingVariants( + const CodeGenSchedRW &SchedRW, unsigned TransIdx, + std::vector &IntersectingVariants); + void pushVariant(const TransVariant &VInfo, bool IsRead); +}; +} // anonymous + +// Return true if this predicate is mutually exclusive with a PredTerm. This +// degenerates into checking if the predicate is mutually exclusive with any +// predicate in the Term's conjunction. +// +// All predicates associated with a given SchedRW are considered mutually +// exclusive. This should work even if the conditions expressed by the +// predicates are not exclusive because the predicates for a given SchedWrite +// are always checked in the order they are defined in the .td file. Later +// conditions implicitly negate any prior condition. +bool PredTransitions::mutuallyExclusive(Record *PredDef, + ArrayRef Term) { + + for (ArrayRef::iterator I = Term.begin(), E = Term.end(); + I != E; ++I) { + if (I->Predicate == PredDef) + return false; - Record *ModelDef = ProcDef->getValueAsDef("SchedModel"); - Record *ItinsDef = ProcDef->getValueAsDef("ProcItin"); + const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(I->RWIdx, I->IsRead); + assert(SchedRW.HasVariants && "PredCheck must refer to a SchedVariant"); + RecVec Variants = SchedRW.TheDef->getValueAsListOfDefs("Variants"); + for (RecIter VI = Variants.begin(), VE = Variants.end(); VI != VE; ++VI) { + if ((*VI)->getValueAsDef("Predicate") == PredDef) + return true; + } + } + return false; +} - std::string ModelName = ModelDef->getName(); - const std::string &ItinName = ItinsDef->getName(); +static bool hasAliasedVariants(const CodeGenSchedRW &RW, + CodeGenSchedModels &SchedModels) { + if (RW.HasVariants) + return true; - bool NoModel = ModelDef->getValueAsBit("NoModel"); - bool hasTopLevelItin = !ItinsDef->getValueAsListOfDefs("IID").empty(); - if (NoModel) { - // If an itinerary is defined without a machine model, infer a new model. - if (NoModel && hasTopLevelItin) { - ModelName = ItinName + "Model"; - ModelDef = NULL; + for (RecIter I = RW.Aliases.begin(), E = RW.Aliases.end(); I != E; ++I) { + const CodeGenSchedRW &AliasRW = + SchedModels.getSchedRW((*I)->getValueAsDef("AliasRW")); + if (AliasRW.HasVariants) + return true; + if (AliasRW.IsSequence) { + IdxVec ExpandedRWs; + SchedModels.expandRWSequence(AliasRW.Index, ExpandedRWs, AliasRW.IsRead); + for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); + SI != SE; ++SI) { + if (hasAliasedVariants(SchedModels.getSchedRW(*SI, AliasRW.IsRead), + SchedModels)) { + return true; + } + } + } + } + return false; +} + +static bool hasVariant(ArrayRef Transitions, + CodeGenSchedModels &SchedModels) { + for (ArrayRef::iterator + PTI = Transitions.begin(), PTE = Transitions.end(); + PTI != PTE; ++PTI) { + for (SmallVectorImpl >::const_iterator + WSI = PTI->WriteSequences.begin(), WSE = PTI->WriteSequences.end(); + WSI != WSE; ++WSI) { + for (SmallVectorImpl::const_iterator + WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { + if (hasAliasedVariants(SchedModels.getSchedWrite(*WI), SchedModels)) + return true; + } + } + for (SmallVectorImpl >::const_iterator + RSI = PTI->ReadSequences.begin(), RSE = PTI->ReadSequences.end(); + RSI != RSE; ++RSI) { + for (SmallVectorImpl::const_iterator + RI = RSI->begin(), RE = RSI->end(); RI != RE; ++RI) { + if (hasAliasedVariants(SchedModels.getSchedRead(*RI), SchedModels)) + return true; + } + } + } + return false; +} + +// Populate IntersectingVariants with any variants or aliased sequences of the +// given SchedRW whose processor indices and predicates are not mutually +// exclusive with the given transition, +void PredTransitions::getIntersectingVariants( + const CodeGenSchedRW &SchedRW, unsigned TransIdx, + std::vector &IntersectingVariants) { + + std::vector Variants; + if (SchedRW.HasVariants) { + unsigned VarProcIdx = 0; + if (SchedRW.TheDef->getValueInit("SchedModel")->isComplete()) { + Record *ModelDef = SchedRW.TheDef->getValueAsDef("SchedModel"); + VarProcIdx = SchedModels.getProcModel(ModelDef).Index; + } + // Push each variant. Assign TransVecIdx later. + const RecVec VarDefs = SchedRW.TheDef->getValueAsListOfDefs("Variants"); + for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI) + Variants.push_back(TransVariant(*RI, SchedRW.Index, VarProcIdx, 0)); + } + for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); + AI != AE; ++AI) { + // If either the SchedAlias itself or the SchedReadWrite that it aliases + // to is defined within a processor model, constrain all variants to + // that processor. + unsigned AliasProcIdx = 0; + if ((*AI)->getValueInit("SchedModel")->isComplete()) { + Record *ModelDef = (*AI)->getValueAsDef("SchedModel"); + AliasProcIdx = SchedModels.getProcModel(ModelDef).Index; + } + const CodeGenSchedRW &AliasRW = + SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); + + if (AliasRW.HasVariants) { + const RecVec VarDefs = AliasRW.TheDef->getValueAsListOfDefs("Variants"); + for (RecIter RI = VarDefs.begin(), RE = VarDefs.end(); RI != RE; ++RI) + Variants.push_back(TransVariant(*RI, AliasRW.Index, AliasProcIdx, 0)); + } + if (AliasRW.IsSequence) { + Variants.push_back( + TransVariant(AliasRW.TheDef, SchedRW.Index, AliasProcIdx, 0)); + } + } + for (unsigned VIdx = 0, VEnd = Variants.size(); VIdx != VEnd; ++VIdx) { + TransVariant &Variant = Variants[VIdx]; + // Don't expand variants if the processor models don't intersect. + // A zero processor index means any processor. + SmallVector &ProcIndices = TransVec[TransIdx].ProcIndices; + if (ProcIndices[0] && Variants[VIdx].ProcIdx) { + unsigned Cnt = std::count(ProcIndices.begin(), ProcIndices.end(), + Variant.ProcIdx); + if (!Cnt) + continue; + if (Cnt > 1) { + const CodeGenProcModel &PM = + *(SchedModels.procModelBegin() + Variant.ProcIdx); + PrintFatalError(Variant.VarOrSeqDef->getLoc(), + "Multiple variants defined for processor " + + PM.ModelName + + " Ensure only one SchedAlias exists per RW."); + } + } + if (Variant.VarOrSeqDef->isSubClassOf("SchedVar")) { + Record *PredDef = Variant.VarOrSeqDef->getValueAsDef("Predicate"); + if (mutuallyExclusive(PredDef, TransVec[TransIdx].PredTerm)) + continue; + } + if (IntersectingVariants.empty()) { + // The first variant builds on the existing transition. + Variant.TransVecIdx = TransIdx; + IntersectingVariants.push_back(Variant); + } + else { + // Push another copy of the current transition for more variants. + Variant.TransVecIdx = TransVec.size(); + IntersectingVariants.push_back(Variant); + TransVec.push_back(TransVec[TransIdx]); } } +} + +// Push the Reads/Writes selected by this variant onto the PredTransition +// specified by VInfo. +void PredTransitions:: +pushVariant(const TransVariant &VInfo, bool IsRead) { + + PredTransition &Trans = TransVec[VInfo.TransVecIdx]; + + // If this operand transition is reached through a processor-specific alias, + // then the whole transition is specific to this processor. + if (VInfo.ProcIdx != 0) + Trans.ProcIndices.assign(1, VInfo.ProcIdx); + + IdxVec SelectedRWs; + if (VInfo.VarOrSeqDef->isSubClassOf("SchedVar")) { + Record *PredDef = VInfo.VarOrSeqDef->getValueAsDef("Predicate"); + Trans.PredTerm.push_back(PredCheck(IsRead, VInfo.RWIdx,PredDef)); + RecVec SelectedDefs = VInfo.VarOrSeqDef->getValueAsListOfDefs("Selected"); + SchedModels.findRWs(SelectedDefs, SelectedRWs, IsRead); + } else { - // If a machine model is defined, the itinerary must be defined within it - // rather than in the Processor definition itself. - assert(!hasTopLevelItin && "Itinerary must be defined in SchedModel"); - ItinsDef = ModelDef->getValueAsDef("Itineraries"); + assert(VInfo.VarOrSeqDef->isSubClassOf("WriteSequence") && + "variant must be a SchedVariant or aliased WriteSequence"); + SelectedRWs.push_back(SchedModels.getSchedRWIdx(VInfo.VarOrSeqDef, IsRead)); } - ProcModelMap[getProcModelKey(ProcDef)]= ProcModels.size(); + const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(VInfo.RWIdx, IsRead); - ProcModels.push_back(CodeGenProcModel(ModelName, ModelDef, ItinsDef)); + SmallVectorImpl > &RWSequences = IsRead + ? Trans.ReadSequences : Trans.WriteSequences; + if (SchedRW.IsVariadic) { + unsigned OperIdx = RWSequences.size()-1; + // Make N-1 copies of this transition's last sequence. + for (unsigned i = 1, e = SelectedRWs.size(); i != e; ++i) { + RWSequences.push_back(RWSequences[OperIdx]); + } + // Push each of the N elements of the SelectedRWs onto a copy of the last + // sequence (split the current operand into N operands). + // Note that write sequences should be expanded within this loop--the entire + // sequence belongs to a single operand. + for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); + RWI != RWE; ++RWI, ++OperIdx) { + IdxVec ExpandedRWs; + if (IsRead) + ExpandedRWs.push_back(*RWI); + else + SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); + RWSequences[OperIdx].insert(RWSequences[OperIdx].end(), + ExpandedRWs.begin(), ExpandedRWs.end()); + } + assert(OperIdx == RWSequences.size() && "missed a sequence"); + } + else { + // Push this transition's expanded sequence onto this transition's last + // sequence (add to the current operand's sequence). + SmallVectorImpl &Seq = RWSequences.back(); + IdxVec ExpandedRWs; + for (IdxIter RWI = SelectedRWs.begin(), RWE = SelectedRWs.end(); + RWI != RWE; ++RWI) { + if (IsRead) + ExpandedRWs.push_back(*RWI); + else + SchedModels.expandRWSequence(*RWI, ExpandedRWs, IsRead); + } + Seq.insert(Seq.end(), ExpandedRWs.begin(), ExpandedRWs.end()); + } +} + +// RWSeq is a sequence of all Reads or all Writes for the next read or write +// operand. StartIdx is an index into TransVec where partial results +// starts. RWSeq must be applied to all transitions between StartIdx and the end +// of TransVec. +void PredTransitions::substituteVariantOperand( + const SmallVectorImpl &RWSeq, bool IsRead, unsigned StartIdx) { - std::vector ItinRecords = ItinsDef->getValueAsListOfDefs("IID"); - CollectProcItin(ProcModels.back(), ItinRecords); + // Visit each original RW within the current sequence. + for (SmallVectorImpl::const_iterator + RWI = RWSeq.begin(), RWE = RWSeq.end(); RWI != RWE; ++RWI) { + const CodeGenSchedRW &SchedRW = SchedModels.getSchedRW(*RWI, IsRead); + // Push this RW on all partial PredTransitions or distribute variants. + // New PredTransitions may be pushed within this loop which should not be + // revisited (TransEnd must be loop invariant). + for (unsigned TransIdx = StartIdx, TransEnd = TransVec.size(); + TransIdx != TransEnd; ++TransIdx) { + // In the common case, push RW onto the current operand's sequence. + if (!hasAliasedVariants(SchedRW, SchedModels)) { + if (IsRead) + TransVec[TransIdx].ReadSequences.back().push_back(*RWI); + else + TransVec[TransIdx].WriteSequences.back().push_back(*RWI); + continue; + } + // Distribute this partial PredTransition across intersecting variants. + // This will push a copies of TransVec[TransIdx] on the back of TransVec. + std::vector IntersectingVariants; + getIntersectingVariants(SchedRW, TransIdx, IntersectingVariants); + if (IntersectingVariants.empty()) + PrintFatalError(SchedRW.TheDef->getLoc(), + "No variant of this type has " + "a matching predicate on any processor"); + // Now expand each variant on top of its copy of the transition. + for (std::vector::const_iterator + IVI = IntersectingVariants.begin(), + IVE = IntersectingVariants.end(); + IVI != IVE; ++IVI) { + pushVariant(*IVI, IsRead); + } + } + } } -// Gather the processor itineraries. -void CodeGenSchedModels::CollectProcItin(CodeGenProcModel &ProcModel, - std::vector ItinRecords) { - // Skip empty itinerary. - if (ItinRecords.empty()) +// For each variant of a Read/Write in Trans, substitute the sequence of +// Read/Writes guarded by the variant. This is exponential in the number of +// variant Read/Writes, but in practice detection of mutually exclusive +// predicates should result in linear growth in the total number variants. +// +// This is one step in a breadth-first search of nested variants. +void PredTransitions::substituteVariants(const PredTransition &Trans) { + // Build up a set of partial results starting at the back of + // PredTransitions. Remember the first new transition. + unsigned StartIdx = TransVec.size(); + TransVec.resize(TransVec.size() + 1); + TransVec.back().PredTerm = Trans.PredTerm; + TransVec.back().ProcIndices = Trans.ProcIndices; + + // Visit each original write sequence. + for (SmallVectorImpl >::const_iterator + WSI = Trans.WriteSequences.begin(), WSE = Trans.WriteSequences.end(); + WSI != WSE; ++WSI) { + // Push a new (empty) write sequence onto all partial Transitions. + for (std::vector::iterator I = + TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { + I->WriteSequences.resize(I->WriteSequences.size() + 1); + } + substituteVariantOperand(*WSI, /*IsRead=*/false, StartIdx); + } + // Visit each original read sequence. + for (SmallVectorImpl >::const_iterator + RSI = Trans.ReadSequences.begin(), RSE = Trans.ReadSequences.end(); + RSI != RSE; ++RSI) { + // Push a new (empty) read sequence onto all partial Transitions. + for (std::vector::iterator I = + TransVec.begin() + StartIdx, E = TransVec.end(); I != E; ++I) { + I->ReadSequences.resize(I->ReadSequences.size() + 1); + } + substituteVariantOperand(*RSI, /*IsRead=*/true, StartIdx); + } +} + +// Create a new SchedClass for each variant found by inferFromRW. Pass +static void inferFromTransitions(ArrayRef LastTransitions, + unsigned FromClassIdx, + CodeGenSchedModels &SchedModels) { + // For each PredTransition, create a new CodeGenSchedTransition, which usually + // requires creating a new SchedClass. + for (ArrayRef::iterator + I = LastTransitions.begin(), E = LastTransitions.end(); I != E; ++I) { + IdxVec OperWritesVariant; + for (SmallVectorImpl >::const_iterator + WSI = I->WriteSequences.begin(), WSE = I->WriteSequences.end(); + WSI != WSE; ++WSI) { + // Create a new write representing the expanded sequence. + OperWritesVariant.push_back( + SchedModels.findOrInsertRW(*WSI, /*IsRead=*/false)); + } + IdxVec OperReadsVariant; + for (SmallVectorImpl >::const_iterator + RSI = I->ReadSequences.begin(), RSE = I->ReadSequences.end(); + RSI != RSE; ++RSI) { + // Create a new read representing the expanded sequence. + OperReadsVariant.push_back( + SchedModels.findOrInsertRW(*RSI, /*IsRead=*/true)); + } + IdxVec ProcIndices(I->ProcIndices.begin(), I->ProcIndices.end()); + CodeGenSchedTransition SCTrans; + SCTrans.ToClassIdx = + SchedModels.addSchedClass(OperWritesVariant, OperReadsVariant, + ProcIndices); + SCTrans.ProcIndices = ProcIndices; + // The final PredTerm is unique set of predicates guarding the transition. + RecVec Preds; + for (SmallVectorImpl::const_iterator + PI = I->PredTerm.begin(), PE = I->PredTerm.end(); PI != PE; ++PI) { + Preds.push_back(PI->Predicate); + } + RecIter PredsEnd = std::unique(Preds.begin(), Preds.end()); + Preds.resize(PredsEnd - Preds.begin()); + SCTrans.PredTerm = Preds; + SchedModels.getSchedClass(FromClassIdx).Transitions.push_back(SCTrans); + } +} + +// Create new SchedClasses for the given ReadWrite list. If any of the +// ReadWrites refers to a SchedVariant, create a new SchedClass for each variant +// of the ReadWrite list, following Aliases if necessary. +void CodeGenSchedModels::inferFromRW(const IdxVec &OperWrites, + const IdxVec &OperReads, + unsigned FromClassIdx, + const IdxVec &ProcIndices) { + DEBUG(dbgs() << "INFER RW: "); + + // Create a seed transition with an empty PredTerm and the expanded sequences + // of SchedWrites for the current SchedClass. + std::vector LastTransitions; + LastTransitions.resize(1); + LastTransitions.back().ProcIndices.append(ProcIndices.begin(), + ProcIndices.end()); + + for (IdxIter I = OperWrites.begin(), E = OperWrites.end(); I != E; ++I) { + IdxVec WriteSeq; + expandRWSequence(*I, WriteSeq, /*IsRead=*/false); + unsigned Idx = LastTransitions[0].WriteSequences.size(); + LastTransitions[0].WriteSequences.resize(Idx + 1); + SmallVectorImpl &Seq = LastTransitions[0].WriteSequences[Idx]; + for (IdxIter WI = WriteSeq.begin(), WE = WriteSeq.end(); WI != WE; ++WI) + Seq.push_back(*WI); + DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); + } + DEBUG(dbgs() << " Reads: "); + for (IdxIter I = OperReads.begin(), E = OperReads.end(); I != E; ++I) { + IdxVec ReadSeq; + expandRWSequence(*I, ReadSeq, /*IsRead=*/true); + unsigned Idx = LastTransitions[0].ReadSequences.size(); + LastTransitions[0].ReadSequences.resize(Idx + 1); + SmallVectorImpl &Seq = LastTransitions[0].ReadSequences[Idx]; + for (IdxIter RI = ReadSeq.begin(), RE = ReadSeq.end(); RI != RE; ++RI) + Seq.push_back(*RI); + DEBUG(dbgs() << "("; dumpIdxVec(Seq); dbgs() << ") "); + } + DEBUG(dbgs() << '\n'); + + // Collect all PredTransitions for individual operands. + // Iterate until no variant writes remain. + while (hasVariant(LastTransitions, *this)) { + PredTransitions Transitions(*this); + for (std::vector::const_iterator + I = LastTransitions.begin(), E = LastTransitions.end(); + I != E; ++I) { + Transitions.substituteVariants(*I); + } + DEBUG(Transitions.dump()); + LastTransitions.swap(Transitions.TransVec); + } + // If the first transition has no variants, nothing to do. + if (LastTransitions[0].PredTerm.empty()) return; - HasProcItineraries = true; + // WARNING: We are about to mutate the SchedClasses vector. Do not refer to + // OperWrites, OperReads, or ProcIndices after calling inferFromTransitions. + inferFromTransitions(LastTransitions, FromClassIdx, *this); +} - ProcModel.ItinDefList.resize(NumItineraryClasses+1); +// Collect and sort WriteRes, ReadAdvance, and ProcResources. +void CodeGenSchedModels::collectProcResources() { + // Add any subtarget-specific SchedReadWrites that are directly associated + // with processor resources. Refer to the parent SchedClass's ProcIndices to + // determine which processors they apply to. + for (SchedClassIter SCI = schedClassBegin(), SCE = schedClassEnd(); + SCI != SCE; ++SCI) { + if (SCI->ItinClassDef) + collectItinProcResources(SCI->ItinClassDef); + else + collectRWResources(SCI->Writes, SCI->Reads, SCI->ProcIndices); + } + // Add resources separately defined by each subtarget. + RecVec WRDefs = Records.getAllDerivedDefinitions("WriteRes"); + for (RecIter WRI = WRDefs.begin(), WRE = WRDefs.end(); WRI != WRE; ++WRI) { + Record *ModelDef = (*WRI)->getValueAsDef("SchedModel"); + addWriteRes(*WRI, getProcModel(ModelDef).Index); + } + RecVec RADefs = Records.getAllDerivedDefinitions("ReadAdvance"); + for (RecIter RAI = RADefs.begin(), RAE = RADefs.end(); RAI != RAE; ++RAI) { + Record *ModelDef = (*RAI)->getValueAsDef("SchedModel"); + addReadAdvance(*RAI, getProcModel(ModelDef).Index); + } + // Finalize each ProcModel by sorting the record arrays. + for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { + CodeGenProcModel &PM = ProcModels[PIdx]; + std::sort(PM.WriteResDefs.begin(), PM.WriteResDefs.end(), + LessRecord()); + std::sort(PM.ReadAdvanceDefs.begin(), PM.ReadAdvanceDefs.end(), + LessRecord()); + std::sort(PM.ProcResourceDefs.begin(), PM.ProcResourceDefs.end(), + LessRecord()); + DEBUG( + PM.dump(); + dbgs() << "WriteResDefs: "; + for (RecIter RI = PM.WriteResDefs.begin(), + RE = PM.WriteResDefs.end(); RI != RE; ++RI) { + if ((*RI)->isSubClassOf("WriteRes")) + dbgs() << (*RI)->getValueAsDef("WriteType")->getName() << " "; + else + dbgs() << (*RI)->getName() << " "; + } + dbgs() << "\nReadAdvanceDefs: "; + for (RecIter RI = PM.ReadAdvanceDefs.begin(), + RE = PM.ReadAdvanceDefs.end(); RI != RE; ++RI) { + if ((*RI)->isSubClassOf("ReadAdvance")) + dbgs() << (*RI)->getValueAsDef("ReadType")->getName() << " "; + else + dbgs() << (*RI)->getName() << " "; + } + dbgs() << "\nProcResourceDefs: "; + for (RecIter RI = PM.ProcResourceDefs.begin(), + RE = PM.ProcResourceDefs.end(); RI != RE; ++RI) { + dbgs() << (*RI)->getName() << " "; + } + dbgs() << '\n'); + } +} - // Insert each itinerary data record in the correct position within - // the processor model's ItinDefList. - for (unsigned i = 0, N = ItinRecords.size(); i < N; i++) { - Record *ItinData = ItinRecords[i]; - Record *ItinDef = ItinData->getValueAsDef("TheClass"); - if (!SchedClassIdxMap.count(ItinDef->getName())) { - DEBUG(dbgs() << ProcModel.ItinsDef->getName() - << " has unused itinerary class " << ItinDef->getName() << '\n'); - continue; +// Collect itinerary class resources for each processor. +void CodeGenSchedModels::collectItinProcResources(Record *ItinClassDef) { + for (unsigned PIdx = 0, PEnd = ProcModels.size(); PIdx != PEnd; ++PIdx) { + const CodeGenProcModel &PM = ProcModels[PIdx]; + // For all ItinRW entries. + bool HasMatch = false; + for (RecIter II = PM.ItinRWDefs.begin(), IE = PM.ItinRWDefs.end(); + II != IE; ++II) { + RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); + if (!std::count(Matched.begin(), Matched.end(), ItinClassDef)) + continue; + if (HasMatch) + PrintFatalError((*II)->getLoc(), "Duplicate itinerary class " + + ItinClassDef->getName() + + " in ItinResources for " + PM.ModelName); + HasMatch = true; + IdxVec Writes, Reads; + findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), Writes, Reads); + IdxVec ProcIndices(1, PIdx); + collectRWResources(Writes, Reads, ProcIndices); + } + } +} + +void CodeGenSchedModels::collectRWResources(unsigned RWIdx, bool IsRead, + const IdxVec &ProcIndices) { + const CodeGenSchedRW &SchedRW = getSchedRW(RWIdx, IsRead); + if (SchedRW.TheDef) { + if (!IsRead && SchedRW.TheDef->isSubClassOf("SchedWriteRes")) { + for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end(); + PI != PE; ++PI) { + addWriteRes(SchedRW.TheDef, *PI); + } + } + else if (IsRead && SchedRW.TheDef->isSubClassOf("SchedReadAdvance")) { + for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end(); + PI != PE; ++PI) { + addReadAdvance(SchedRW.TheDef, *PI); + } + } + } + for (RecIter AI = SchedRW.Aliases.begin(), AE = SchedRW.Aliases.end(); + AI != AE; ++AI) { + IdxVec AliasProcIndices; + if ((*AI)->getValueInit("SchedModel")->isComplete()) { + AliasProcIndices.push_back( + getProcModel((*AI)->getValueAsDef("SchedModel")).Index); + } + else + AliasProcIndices = ProcIndices; + const CodeGenSchedRW &AliasRW = getSchedRW((*AI)->getValueAsDef("AliasRW")); + assert(AliasRW.IsRead == IsRead && "cannot alias reads to writes"); + + IdxVec ExpandedRWs; + expandRWSequence(AliasRW.Index, ExpandedRWs, IsRead); + for (IdxIter SI = ExpandedRWs.begin(), SE = ExpandedRWs.end(); + SI != SE; ++SI) { + collectRWResources(*SI, IsRead, AliasProcIndices); + } + } +} + +// Collect resources for a set of read/write types and processor indices. +void CodeGenSchedModels::collectRWResources(const IdxVec &Writes, + const IdxVec &Reads, + const IdxVec &ProcIndices) { + + for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) + collectRWResources(*WI, /*IsRead=*/false, ProcIndices); + + for (IdxIter RI = Reads.begin(), RE = Reads.end(); RI != RE; ++RI) + collectRWResources(*RI, /*IsRead=*/true, ProcIndices); +} + + +// Find the processor's resource units for this kind of resource. +Record *CodeGenSchedModels::findProcResUnits(Record *ProcResKind, + const CodeGenProcModel &PM) const { + if (ProcResKind->isSubClassOf("ProcResourceUnits")) + return ProcResKind; + + Record *ProcUnitDef = 0; + RecVec ProcResourceDefs = + Records.getAllDerivedDefinitions("ProcResourceUnits"); + + for (RecIter RI = ProcResourceDefs.begin(), RE = ProcResourceDefs.end(); + RI != RE; ++RI) { + + if ((*RI)->getValueAsDef("Kind") == ProcResKind + && (*RI)->getValueAsDef("SchedModel") == PM.ModelDef) { + if (ProcUnitDef) { + PrintFatalError((*RI)->getLoc(), + "Multiple ProcessorResourceUnits associated with " + + ProcResKind->getName()); + } + ProcUnitDef = *RI; } - ProcModel.ItinDefList[getItinClassIdx(ItinDef)] = ItinData; } + if (!ProcUnitDef) { + PrintFatalError(ProcResKind->getLoc(), + "No ProcessorResources associated with " + + ProcResKind->getName()); + } + return ProcUnitDef; +} + +// Iteratively add a resource and its super resources. +void CodeGenSchedModels::addProcResource(Record *ProcResKind, + CodeGenProcModel &PM) { + for (;;) { + Record *ProcResUnits = findProcResUnits(ProcResKind, PM); + + // See if this ProcResource is already associated with this processor. + RecIter I = std::find(PM.ProcResourceDefs.begin(), + PM.ProcResourceDefs.end(), ProcResUnits); + if (I != PM.ProcResourceDefs.end()) + return; + + PM.ProcResourceDefs.push_back(ProcResUnits); + if (!ProcResUnits->getValueInit("Super")->isComplete()) + return; + + ProcResKind = ProcResUnits->getValueAsDef("Super"); + } +} + +// Add resources for a SchedWrite to this processor if they don't exist. +void CodeGenSchedModels::addWriteRes(Record *ProcWriteResDef, unsigned PIdx) { + assert(PIdx && "don't add resources to an invalid Processor model"); + + RecVec &WRDefs = ProcModels[PIdx].WriteResDefs; + RecIter WRI = std::find(WRDefs.begin(), WRDefs.end(), ProcWriteResDef); + if (WRI != WRDefs.end()) + return; + WRDefs.push_back(ProcWriteResDef); + + // Visit ProcResourceKinds referenced by the newly discovered WriteRes. + RecVec ProcResDefs = ProcWriteResDef->getValueAsListOfDefs("ProcResources"); + for (RecIter WritePRI = ProcResDefs.begin(), WritePRE = ProcResDefs.end(); + WritePRI != WritePRE; ++WritePRI) { + addProcResource(*WritePRI, ProcModels[PIdx]); + } +} + +// Add resources for a ReadAdvance to this processor if they don't exist. +void CodeGenSchedModels::addReadAdvance(Record *ProcReadAdvanceDef, + unsigned PIdx) { + RecVec &RADefs = ProcModels[PIdx].ReadAdvanceDefs; + RecIter I = std::find(RADefs.begin(), RADefs.end(), ProcReadAdvanceDef); + if (I != RADefs.end()) + return; + RADefs.push_back(ProcReadAdvanceDef); +} + +unsigned CodeGenProcModel::getProcResourceIdx(Record *PRDef) const { + RecIter PRPos = std::find(ProcResourceDefs.begin(), ProcResourceDefs.end(), + PRDef); + if (PRPos == ProcResourceDefs.end()) + PrintFatalError(PRDef->getLoc(), "ProcResource def is not included in " + "the ProcResources list for " + ModelName); + // Idx=0 is reserved for invalid. + return 1 + (PRPos - ProcResourceDefs.begin()); +} + #ifndef NDEBUG - // Check for missing itinerary entries. - assert(!ProcModel.ItinDefList[0] && "NoItinerary class can't have rec"); - for (unsigned i = 1, N = ProcModel.ItinDefList.size(); i < N; ++i) { - if (!ProcModel.ItinDefList[i]) - DEBUG(dbgs() << ProcModel.ItinsDef->getName() - << " missing itinerary for class " << SchedClasses[i].Name << '\n'); +void CodeGenProcModel::dump() const { + dbgs() << Index << ": " << ModelName << " " + << (ModelDef ? ModelDef->getName() : "inferred") << " " + << (ItinsDef ? ItinsDef->getName() : "no itinerary") << '\n'; +} + +void CodeGenSchedRW::dump() const { + dbgs() << Name << (IsVariadic ? " (V) " : " "); + if (IsSequence) { + dbgs() << "("; + dumpIdxVec(Sequence); + dbgs() << ")"; + } +} + +void CodeGenSchedClass::dump(const CodeGenSchedModels* SchedModels) const { + dbgs() << "SCHEDCLASS " << Name << '\n' + << " Writes: "; + for (unsigned i = 0, N = Writes.size(); i < N; ++i) { + SchedModels->getSchedWrite(Writes[i]).dump(); + if (i < N-1) { + dbgs() << '\n'; + dbgs().indent(10); + } + } + dbgs() << "\n Reads: "; + for (unsigned i = 0, N = Reads.size(); i < N; ++i) { + SchedModels->getSchedRead(Reads[i]).dump(); + if (i < N-1) { + dbgs() << '\n'; + dbgs().indent(10); + } + } + dbgs() << "\n ProcIdx: "; dumpIdxVec(ProcIndices); dbgs() << '\n'; +} + +void PredTransitions::dump() const { + dbgs() << "Expanded Variants:\n"; + for (std::vector::const_iterator + TI = TransVec.begin(), TE = TransVec.end(); TI != TE; ++TI) { + dbgs() << "{"; + for (SmallVectorImpl::const_iterator + PCI = TI->PredTerm.begin(), PCE = TI->PredTerm.end(); + PCI != PCE; ++PCI) { + if (PCI != TI->PredTerm.begin()) + dbgs() << ", "; + dbgs() << SchedModels.getSchedRW(PCI->RWIdx, PCI->IsRead).Name + << ":" << PCI->Predicate->getName(); + } + dbgs() << "},\n => {"; + for (SmallVectorImpl >::const_iterator + WSI = TI->WriteSequences.begin(), WSE = TI->WriteSequences.end(); + WSI != WSE; ++WSI) { + dbgs() << "("; + for (SmallVectorImpl::const_iterator + WI = WSI->begin(), WE = WSI->end(); WI != WE; ++WI) { + if (WI != WSI->begin()) + dbgs() << ", "; + dbgs() << SchedModels.getSchedWrite(*WI).Name; + } + dbgs() << "),"; + } + dbgs() << "}\n"; } -#endif } +#endif // NDEBUG diff --git a/utils/TableGen/CodeGenSchedule.h b/utils/TableGen/CodeGenSchedule.h index 9da0145..eed0589 100644 --- a/utils/TableGen/CodeGenSchedule.h +++ b/utils/TableGen/CodeGenSchedule.h @@ -15,6 +15,7 @@ #ifndef CODEGEN_SCHEDULE_H #define CODEGEN_SCHEDULE_H +#include "SetTheory.h" #include "llvm/TableGen/Record.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/ADT/DenseMap.h" @@ -23,21 +24,131 @@ namespace llvm { class CodeGenTarget; +class CodeGenSchedModels; +class CodeGenInstruction; -// Scheduling class. -// -// Each instruction description will be mapped to a scheduling class. It may be -// an explicitly defined itinerary class, or an inferred class in which case -// ItinClassDef == NULL. +typedef std::vector RecVec; +typedef std::vector::const_iterator RecIter; + +typedef std::vector IdxVec; +typedef std::vector::const_iterator IdxIter; + +void splitSchedReadWrites(const RecVec &RWDefs, + RecVec &WriteDefs, RecVec &ReadDefs); + +/// We have two kinds of SchedReadWrites. Explicitly defined and inferred +/// sequences. TheDef is nonnull for explicit SchedWrites, but Sequence may or +/// may not be empty. TheDef is null for inferred sequences, and Sequence must +/// be nonempty. +/// +/// IsVariadic controls whether the variants are expanded into multiple operands +/// or a sequence of writes on one operand. +struct CodeGenSchedRW { + unsigned Index; + std::string Name; + Record *TheDef; + bool IsRead; + bool IsAlias; + bool HasVariants; + bool IsVariadic; + bool IsSequence; + IdxVec Sequence; + RecVec Aliases; + + CodeGenSchedRW(): Index(0), TheDef(0), IsAlias(false), HasVariants(false), + IsVariadic(false), IsSequence(false) {} + CodeGenSchedRW(unsigned Idx, Record *Def): Index(Idx), TheDef(Def), + IsAlias(false), IsVariadic(false) { + Name = Def->getName(); + IsRead = Def->isSubClassOf("SchedRead"); + HasVariants = Def->isSubClassOf("SchedVariant"); + if (HasVariants) + IsVariadic = Def->getValueAsBit("Variadic"); + + // Read records don't currently have sequences, but it can be easily + // added. Note that implicit Reads (from ReadVariant) may have a Sequence + // (but no record). + IsSequence = Def->isSubClassOf("WriteSequence"); + } + + CodeGenSchedRW(unsigned Idx, bool Read, const IdxVec &Seq, + const std::string &Name): + Index(Idx), Name(Name), TheDef(0), IsRead(Read), IsAlias(false), + HasVariants(false), IsVariadic(false), IsSequence(true), Sequence(Seq) { + assert(Sequence.size() > 1 && "implied sequence needs >1 RWs"); + } + + bool isValid() const { + assert((!HasVariants || TheDef) && "Variant write needs record def"); + assert((!IsVariadic || HasVariants) && "Variadic write needs variants"); + assert((!IsSequence || !HasVariants) && "Sequence can't have variant"); + assert((!IsSequence || !Sequence.empty()) && "Sequence should be nonempty"); + assert((!IsAlias || Aliases.empty()) && "Alias cannot have aliases"); + return TheDef || !Sequence.empty(); + } + +#ifndef NDEBUG + void dump() const; +#endif +}; + +/// Represent a transition between SchedClasses induced by SchedVariant. +struct CodeGenSchedTransition { + unsigned ToClassIdx; + IdxVec ProcIndices; + RecVec PredTerm; +}; + +/// Scheduling class. +/// +/// Each instruction description will be mapped to a scheduling class. There are +/// four types of classes: +/// +/// 1) An explicitly defined itinerary class with ItinClassDef set. +/// Writes and ReadDefs are empty. ProcIndices contains 0 for any processor. +/// +/// 2) An implied class with a list of SchedWrites and SchedReads that are +/// defined in an instruction definition and which are common across all +/// subtargets. ProcIndices contains 0 for any processor. +/// +/// 3) An implied class with a list of InstRW records that map instructions to +/// SchedWrites and SchedReads per-processor. InstrClassMap should map the same +/// instructions to this class. ProcIndices contains all the processors that +/// provided InstrRW records for this class. ItinClassDef or Writes/Reads may +/// still be defined for processors with no InstRW entry. +/// +/// 4) An inferred class represents a variant of another class that may be +/// resolved at runtime. ProcIndices contains the set of processors that may +/// require the class. ProcIndices are propagated through SchedClasses as +/// variants are expanded. Multiple SchedClasses may be inferred from an +/// itinerary class. Each inherits the processor index from the ItinRW record +/// that mapped the itinerary class to the variant Writes or Reads. struct CodeGenSchedClass { std::string Name; - unsigned Index; Record *ItinClassDef; - CodeGenSchedClass(): Index(0), ItinClassDef(0) {} - CodeGenSchedClass(Record *rec): Index(0), ItinClassDef(rec) { + IdxVec Writes; + IdxVec Reads; + // Sorted list of ProcIdx, where ProcIdx==0 implies any processor. + IdxVec ProcIndices; + + std::vector Transitions; + + // InstRW records associated with this class. These records may refer to an + // Instruction no longer mapped to this class by InstrClassMap. These + // Instructions should be ignored by this class because they have been split + // off to join another inferred class. + RecVec InstRWs; + + CodeGenSchedClass(): ItinClassDef(0) {} + CodeGenSchedClass(Record *rec): ItinClassDef(rec) { Name = rec->getName(); + ProcIndices.push_back(0); } + +#ifndef NDEBUG + void dump(const CodeGenSchedModels *SchedModels) const; +#endif }; // Processor model. @@ -55,28 +166,69 @@ struct CodeGenSchedClass { // // ItinDefList orders this processor's InstrItinData records by SchedClass idx. struct CodeGenProcModel { + unsigned Index; std::string ModelName; Record *ModelDef; Record *ItinsDef; - // Array of InstrItinData records indexed by CodeGenSchedClass::Index. - // The list is empty if the subtarget has no itineraries. - std::vector ItinDefList; + // Derived members... - CodeGenProcModel(const std::string &Name, Record *MDef, Record *IDef): - ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {} + // Array of InstrItinData records indexed by a CodeGenSchedClass index. + // This list is empty if the Processor has no value for Itineraries. + // Initialized by collectProcItins(). + RecVec ItinDefList; + + // Map itinerary classes to per-operand resources. + // This list is empty if no ItinRW refers to this Processor. + RecVec ItinRWDefs; + + // All read/write resources associated with this processor. + RecVec WriteResDefs; + RecVec ReadAdvanceDefs; + + // Per-operand machine model resources associated with this processor. + RecVec ProcResourceDefs; + + CodeGenProcModel(unsigned Idx, const std::string &Name, Record *MDef, + Record *IDef) : + Index(Idx), ModelName(Name), ModelDef(MDef), ItinsDef(IDef) {} + + bool hasInstrSchedModel() const { + return !WriteResDefs.empty() || !ItinRWDefs.empty(); + } + + unsigned getProcResourceIdx(Record *PRDef) const; + +#ifndef NDEBUG + void dump() const; +#endif }; -// Top level container for machine model data. +/// Top level container for machine model data. class CodeGenSchedModels { RecordKeeper &Records; const CodeGenTarget &Target; + // Map dag expressions to Instruction lists. + SetTheory Sets; + + // List of unique processor models. + std::vector ProcModels; + + // Map Processor's MachineModel or ProcItin to a CodeGenProcModel index. + typedef DenseMap ProcModelMapTy; + ProcModelMapTy ProcModelMap; + + // Per-operand SchedReadWrite types. + std::vector SchedWrites; + std::vector SchedReads; + // List of unique SchedClasses. std::vector SchedClasses; // Map SchedClass name to itinerary index. - // These are either explicit itinerary classes or inferred classes. + // These are either explicit itinerary classes or classes implied by + // instruction definitions with SchedReadWrite lists. StringMap SchedClassIdxMap; // SchedClass indices 1 up to and including NumItineraryClasses identify @@ -84,22 +236,80 @@ class CodeGenSchedModels { // definitions. NoItinerary always has index 0 regardless of whether it is // explicitly referenced. // - // Any inferred SchedClass have a index greater than NumItineraryClasses. + // Any implied SchedClass has an index greater than NumItineraryClasses. unsigned NumItineraryClasses; - // List of unique processor models. - std::vector ProcModels; - - // Map Processor's MachineModel + ProcItin fields to a CodeGenProcModel index. - typedef DenseMap, unsigned> ProcModelMapTy; - ProcModelMapTy ProcModelMap; + // Any inferred SchedClass has an index greater than NumInstrSchedClassses. + unsigned NumInstrSchedClasses; - // True if any processors have nonempty itineraries. - bool HasProcItineraries; + // Map Instruction to SchedClass index. Only for Instructions mentioned in + // InstRW records. + typedef DenseMap InstClassMapTy; + InstClassMapTy InstrClassMap; public: CodeGenSchedModels(RecordKeeper& RK, const CodeGenTarget &TGT); + Record *getModelOrItinDef(Record *ProcDef) const { + Record *ModelDef = ProcDef->getValueAsDef("SchedModel"); + Record *ItinsDef = ProcDef->getValueAsDef("ProcItin"); + if (!ItinsDef->getValueAsListOfDefs("IID").empty()) { + assert(ModelDef->getValueAsBit("NoModel") + && "Itineraries must be defined within SchedMachineModel"); + return ItinsDef; + } + return ModelDef; + } + + const CodeGenProcModel &getModelForProc(Record *ProcDef) const { + Record *ModelDef = getModelOrItinDef(ProcDef); + ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); + assert(I != ProcModelMap.end() && "missing machine model"); + return ProcModels[I->second]; + } + + const CodeGenProcModel &getProcModel(Record *ModelDef) const { + ProcModelMapTy::const_iterator I = ProcModelMap.find(ModelDef); + assert(I != ProcModelMap.end() && "missing machine model"); + return ProcModels[I->second]; + } + + // Iterate over the unique processor models. + typedef std::vector::const_iterator ProcIter; + ProcIter procModelBegin() const { return ProcModels.begin(); } + ProcIter procModelEnd() const { return ProcModels.end(); } + + // Get a SchedWrite from its index. + const CodeGenSchedRW &getSchedWrite(unsigned Idx) const { + assert(Idx < SchedWrites.size() && "bad SchedWrite index"); + assert(SchedWrites[Idx].isValid() && "invalid SchedWrite"); + return SchedWrites[Idx]; + } + // Get a SchedWrite from its index. + const CodeGenSchedRW &getSchedRead(unsigned Idx) const { + assert(Idx < SchedReads.size() && "bad SchedRead index"); + assert(SchedReads[Idx].isValid() && "invalid SchedRead"); + return SchedReads[Idx]; + } + + const CodeGenSchedRW &getSchedRW(unsigned Idx, bool IsRead) const { + return IsRead ? getSchedRead(Idx) : getSchedWrite(Idx); + } + CodeGenSchedRW &getSchedRW(Record *Def) { + bool IsRead = Def->isSubClassOf("SchedRead"); + unsigned Idx = getSchedRWIdx(Def, IsRead); + return const_cast( + IsRead ? getSchedRead(Idx) : getSchedWrite(Idx)); + } + const CodeGenSchedRW &getSchedRW(Record*Def) const { + return const_cast(*this).getSchedRW(Def); + } + + unsigned getSchedRWIdx(Record *Def, bool IsRead, unsigned After = 0) const; + + // Return true if the given write record is referenced by a ReadAdvance. + bool hasReadOfWrite(Record *WriteDef) const; + // Check if any instructions are assigned to an explicit itinerary class other // than NoItinerary. bool hasItineraryClasses() const { return NumItineraryClasses > 0; } @@ -111,60 +321,90 @@ public: } // Get a SchedClass from its index. - const CodeGenSchedClass &getSchedClass(unsigned Idx) { + CodeGenSchedClass &getSchedClass(unsigned Idx) { assert(Idx < SchedClasses.size() && "bad SchedClass index"); return SchedClasses[Idx]; } - - // Get an itinerary class's index. Value indices are '0' for NoItinerary up to - // and including numItineraryClasses(). - unsigned getItinClassIdx(Record *ItinDef) const { - assert(SchedClassIdxMap.count(ItinDef->getName()) && "missing ItinClass"); - unsigned Idx = SchedClassIdxMap.lookup(ItinDef->getName()); - assert(Idx <= NumItineraryClasses && "bad ItinClass index"); - return Idx; + const CodeGenSchedClass &getSchedClass(unsigned Idx) const { + assert(Idx < SchedClasses.size() && "bad SchedClass index"); + return SchedClasses[Idx]; } - bool hasProcessorItineraries() const { - return HasProcItineraries; - } + // Get the SchedClass index for an instruction. Instructions with no + // itinerary, no SchedReadWrites, and no InstrReadWrites references return 0 + // for NoItinerary. + unsigned getSchedClassIdx(const CodeGenInstruction &Inst) const; + + unsigned getSchedClassIdx(const RecVec &RWDefs) const; - // Get an existing machine model for a processor definition. - const CodeGenProcModel &getProcModel(Record *ProcDef) const { - unsigned idx = getProcModelIdx(ProcDef); - assert(idx < ProcModels.size() && "missing machine model"); - return ProcModels[idx]; + unsigned getSchedClassIdxForItin(const Record *ItinDef) { + return SchedClassIdxMap[ItinDef->getName()]; } - // Iterate over the unique processor models. - typedef std::vector::const_iterator ProcIter; - ProcIter procModelBegin() const { return ProcModels.begin(); } - ProcIter procModelEnd() const { return ProcModels.end(); } + typedef std::vector::const_iterator SchedClassIter; + SchedClassIter schedClassBegin() const { return SchedClasses.begin(); } + SchedClassIter schedClassEnd() const { return SchedClasses.end(); } -private: - // Get a key that can uniquely identify a machine model. - ProcModelMapTy::key_type getProcModelKey(Record *ProcDef) const { - Record *ModelDef = ProcDef->getValueAsDef("SchedModel"); - Record *ItinsDef = ProcDef->getValueAsDef("ProcItin"); - return std::make_pair(ModelDef, ItinsDef); - } + void findRWs(const RecVec &RWDefs, IdxVec &Writes, IdxVec &Reads) const; + void findRWs(const RecVec &RWDefs, IdxVec &RWs, bool IsRead) const; + void expandRWSequence(unsigned RWIdx, IdxVec &RWSeq, bool IsRead) const; + void expandRWSeqForProc(unsigned RWIdx, IdxVec &RWSeq, bool IsRead, + const CodeGenProcModel &ProcModel) const; - // Get the unique index of a machine model. - unsigned getProcModelIdx(Record *ProcDef) const { - ProcModelMapTy::const_iterator I = - ProcModelMap.find(getProcModelKey(ProcDef)); - if (I == ProcModelMap.end()) - return ProcModels.size(); - return I->second; - } + unsigned addSchedClass(const IdxVec &OperWrites, const IdxVec &OperReads, + const IdxVec &ProcIndices); + + unsigned findOrInsertRW(ArrayRef Seq, bool IsRead); + + unsigned findSchedClassIdx(const IdxVec &Writes, const IdxVec &Reads) const; + + Record *findProcResUnits(Record *ProcResKind, + const CodeGenProcModel &PM) const; + +private: + void collectProcModels(); // Initialize a new processor model if it is unique. void addProcModel(Record *ProcDef); - void CollectSchedClasses(); - void CollectProcModels(); - void CollectProcItin(CodeGenProcModel &ProcModel, - std::vector ItinRecords); + void collectSchedRW(); + + std::string genRWName(const IdxVec& Seq, bool IsRead); + unsigned findRWForSequence(const IdxVec &Seq, bool IsRead); + + void collectSchedClasses(); + + std::string createSchedClassName(const IdxVec &OperWrites, + const IdxVec &OperReads); + std::string createSchedClassName(const RecVec &InstDefs); + void createInstRWClass(Record *InstRWDef); + + void collectProcItins(); + + void collectProcItinRW(); + + void inferSchedClasses(); + + void inferFromRW(const IdxVec &OperWrites, const IdxVec &OperReads, + unsigned FromClassIdx, const IdxVec &ProcIndices); + void inferFromItinClass(Record *ItinClassDef, unsigned FromClassIdx); + void inferFromInstRWs(unsigned SCIdx); + + void collectProcResources(); + + void collectItinProcResources(Record *ItinClassDef); + + void collectRWResources(unsigned RWIdx, bool IsRead, + const IdxVec &ProcIndices); + + void collectRWResources(const IdxVec &Writes, const IdxVec &Reads, + const IdxVec &ProcIndices); + + void addProcResource(Record *ProcResourceKind, CodeGenProcModel &PM); + + void addWriteRes(Record *ProcWriteResDef, unsigned PIdx); + + void addReadAdvance(Record *ProcReadAdvanceDef, unsigned PIdx); }; } // namespace llvm diff --git a/utils/TableGen/CodeGenTarget.cpp b/utils/TableGen/CodeGenTarget.cpp index 1dd2efc..c9992eb 100644 --- a/utils/TableGen/CodeGenTarget.cpp +++ b/utils/TableGen/CodeGenTarget.cpp @@ -10,13 +10,14 @@ // This class wraps target description classes used by the various code // generation TableGen backends. This makes it easier to access the data and // provides a single place that needs to check it for validity. All of these -// classes throw exceptions on error conditions. +// classes abort on error conditions. // //===----------------------------------------------------------------------===// #include "CodeGenTarget.h" #include "CodeGenIntrinsics.h" #include "CodeGenSchedule.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/STLExtras.h" @@ -68,22 +69,30 @@ std::string llvm::getEnumName(MVT::SimpleValueType T) { case MVT::x86mmx: return "MVT::x86mmx"; case MVT::Glue: return "MVT::Glue"; case MVT::isVoid: return "MVT::isVoid"; + case MVT::v2i1: return "MVT::v2i1"; + case MVT::v4i1: return "MVT::v4i1"; + case MVT::v8i1: return "MVT::v8i1"; + case MVT::v16i1: return "MVT::v16i1"; case MVT::v2i8: return "MVT::v2i8"; case MVT::v4i8: return "MVT::v4i8"; case MVT::v8i8: return "MVT::v8i8"; case MVT::v16i8: return "MVT::v16i8"; case MVT::v32i8: return "MVT::v32i8"; + case MVT::v1i16: return "MVT::v1i16"; case MVT::v2i16: return "MVT::v2i16"; case MVT::v4i16: return "MVT::v4i16"; case MVT::v8i16: return "MVT::v8i16"; case MVT::v16i16: return "MVT::v16i16"; + case MVT::v1i32: return "MVT::v1i32"; case MVT::v2i32: return "MVT::v2i32"; case MVT::v4i32: return "MVT::v4i32"; case MVT::v8i32: return "MVT::v8i32"; + case MVT::v16i32: return "MVT::v16i32"; case MVT::v1i64: return "MVT::v1i64"; case MVT::v2i64: return "MVT::v2i64"; case MVT::v4i64: return "MVT::v4i64"; case MVT::v8i64: return "MVT::v8i64"; + case MVT::v16i64: return "MVT::v16i64"; case MVT::v2f16: return "MVT::v2f16"; case MVT::v2f32: return "MVT::v2f32"; case MVT::v4f32: return "MVT::v4f32"; @@ -116,9 +125,9 @@ CodeGenTarget::CodeGenTarget(RecordKeeper &records) : Records(records), RegBank(0), SchedModels(0) { std::vector Targets = Records.getAllDerivedDefinitions("Target"); if (Targets.size() == 0) - throw std::string("ERROR: No 'Target' subclasses defined!"); + PrintFatalError("ERROR: No 'Target' subclasses defined!"); if (Targets.size() != 1) - throw std::string("ERROR: Multiple subclasses of Target defined!"); + PrintFatalError("ERROR: Multiple subclasses of Target defined!"); TargetRec = Targets[0]; } @@ -152,7 +161,7 @@ Record *CodeGenTarget::getInstructionSet() const { Record *CodeGenTarget::getAsmParser() const { std::vector LI = TargetRec->getValueAsListOfDefs("AssemblyParsers"); if (AsmParserNum >= LI.size()) - throw "Target does not have an AsmParser #" + utostr(AsmParserNum) + "!"; + PrintFatalError("Target does not have an AsmParser #" + utostr(AsmParserNum) + "!"); return LI[AsmParserNum]; } @@ -163,7 +172,7 @@ Record *CodeGenTarget::getAsmParserVariant(unsigned i) const { std::vector LI = TargetRec->getValueAsListOfDefs("AssemblyParserVariants"); if (i >= LI.size()) - throw "Target does not have an AsmParserVariant #" + utostr(i) + "!"; + PrintFatalError("Target does not have an AsmParserVariant #" + utostr(i) + "!"); return LI[i]; } @@ -181,7 +190,7 @@ unsigned CodeGenTarget::getAsmParserVariantCount() const { Record *CodeGenTarget::getAsmWriter() const { std::vector LI = TargetRec->getValueAsListOfDefs("AssemblyWriters"); if (AsmWriterNum >= LI.size()) - throw "Target does not have an AsmWriter #" + utostr(AsmWriterNum) + "!"; + PrintFatalError("Target does not have an AsmWriter #" + utostr(AsmWriterNum) + "!"); return LI[AsmWriterNum]; } @@ -199,12 +208,11 @@ void CodeGenTarget::ReadRegAltNameIndices() const { /// getRegisterByName - If there is a register with the specific AsmName, /// return it. const CodeGenRegister *CodeGenTarget::getRegisterByName(StringRef Name) const { - const std::vector &Regs = getRegBank().getRegisters(); - for (unsigned i = 0, e = Regs.size(); i != e; ++i) - if (Regs[i]->TheDef->getValueAsString("AsmName") == Name) - return Regs[i]; - - return 0; + const StringMap &Regs = getRegBank().getRegistersByName(); + StringMap::const_iterator I = Regs.find(Name); + if (I == Regs.end()) + return 0; + return I->second; } std::vector CodeGenTarget:: @@ -249,7 +257,7 @@ CodeGenSchedModels &CodeGenTarget::getSchedModels() const { void CodeGenTarget::ReadInstructions() const { std::vector Insts = Records.getAllDerivedDefinitions("Instruction"); if (Insts.size() <= 2) - throw std::string("No 'Instruction' subclasses defined!"); + PrintFatalError("No 'Instruction' subclasses defined!"); // Parse the instructions defined in the .td file. for (unsigned i = 0, e = Insts.size(); i != e; ++i) @@ -265,7 +273,7 @@ GetInstByName(const char *Name, DenseMap::const_iterator I = Insts.find(Rec); if (Rec == 0 || I == Insts.end()) - throw std::string("Could not find '") + Name + "' instruction!"; + PrintFatalError(std::string("Could not find '") + Name + "' instruction!"); return I->second; } @@ -300,6 +308,8 @@ void CodeGenTarget::ComputeInstrsByEnum() const { "REG_SEQUENCE", "COPY", "BUNDLE", + "LIFETIME_START", + "LIFETIME_END", 0 }; const DenseMap &Insts = getInstructions(); @@ -334,6 +344,15 @@ bool CodeGenTarget::isLittleEndianEncoding() const { return getInstructionSet()->getValueAsBit("isLittleEndianEncoding"); } +/// guessInstructionProperties - Return true if it's OK to guess instruction +/// properties instead of raising an error. +/// +/// This is configurable as a temporary migration aid. It will eventually be +/// permanently false. +bool CodeGenTarget::guessInstructionProperties() const { + return getInstructionSet()->getValueAsBit("guessInstructionProperties"); +} + //===----------------------------------------------------------------------===// // ComplexPattern implementation // @@ -401,7 +420,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) { if (DefName.size() <= 4 || std::string(DefName.begin(), DefName.begin() + 4) != "int_") - throw "Intrinsic '" + DefName + "' does not start with 'int_'!"; + PrintFatalError("Intrinsic '" + DefName + "' does not start with 'int_'!"); EnumName = std::string(DefName.begin()+4, DefName.end()); @@ -421,7 +440,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) { // Verify it starts with "llvm.". if (Name.size() <= 5 || std::string(Name.begin(), Name.begin() + 5) != "llvm.") - throw "Intrinsic '" + DefName + "'s name does not start with 'llvm.'!"; + PrintFatalError("Intrinsic '" + DefName + "'s name does not start with 'llvm.'!"); } // If TargetPrefix is specified, make sure that Name starts with @@ -430,8 +449,8 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) { if (Name.size() < 6+TargetPrefix.size() || std::string(Name.begin() + 5, Name.begin() + 6 + TargetPrefix.size()) != (TargetPrefix + ".")) - throw "Intrinsic '" + DefName + "' does not start with 'llvm." + - TargetPrefix + ".'!"; + PrintFatalError("Intrinsic '" + DefName + "' does not start with 'llvm." + + TargetPrefix + ".'!"); } // Parse the list of return types. @@ -463,7 +482,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) { // Reject invalid types. if (VT == MVT::isVoid) - throw "Intrinsic '" + DefName + " has void in result type list!"; + PrintFatalError("Intrinsic '" + DefName + " has void in result type list!"); IS.RetVTs.push_back(VT); IS.RetTypeDefs.push_back(TyEl); @@ -497,7 +516,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) { // Reject invalid types. if (VT == MVT::isVoid && i != e-1 /*void at end means varargs*/) - throw "Intrinsic '" + DefName + " has void in result type list!"; + PrintFatalError("Intrinsic '" + DefName + " has void in result type list!"); IS.ParamVTs.push_back(VT); IS.ParamTypeDefs.push_back(TyEl); diff --git a/utils/TableGen/CodeGenTarget.h b/utils/TableGen/CodeGenTarget.h index 2f8cee4..ddeecee 100644 --- a/utils/TableGen/CodeGenTarget.h +++ b/utils/TableGen/CodeGenTarget.h @@ -9,8 +9,8 @@ // // This file defines wrappers for the Target class and related global // functionality. This makes it easier to access the data and provides a single -// place that needs to check it for validity. All of these classes throw -// exceptions on error conditions. +// place that needs to check it for validity. All of these classes abort +// on error conditions. // //===----------------------------------------------------------------------===// @@ -177,6 +177,10 @@ public: /// bool isLittleEndianEncoding() const; + /// guessInstructionProperties - should we just guess unset instruction + /// properties? + bool guessInstructionProperties() const; + private: void ComputeInstrsByEnum() const; }; diff --git a/utils/TableGen/DAGISelMatcher.h b/utils/TableGen/DAGISelMatcher.h index 3ca16f0..7c6ce3b 100644 --- a/utils/TableGen/DAGISelMatcher.h +++ b/utils/TableGen/DAGISelMatcher.h @@ -99,8 +99,6 @@ public: OwningPtr &getNextPtr() { return Next; } - static inline bool classof(const Matcher *) { return true; } - bool isEqual(const Matcher *M) const { if (getKind() != M->getKind()) return false; return isEqualImpl(M); diff --git a/utils/TableGen/DAGISelMatcherEmitter.cpp b/utils/TableGen/DAGISelMatcherEmitter.cpp index 1445edb..713f174 100644 --- a/utils/TableGen/DAGISelMatcherEmitter.cpp +++ b/utils/TableGen/DAGISelMatcherEmitter.cpp @@ -598,7 +598,7 @@ EmitMatcherList(const Matcher *N, unsigned Indent, unsigned CurrentIdx, void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) { // Emit pattern predicates. if (!PatternPredicates.empty()) { - OS << "bool CheckPatternPredicate(unsigned PredNo) const {\n"; + OS << "virtual bool CheckPatternPredicate(unsigned PredNo) const {\n"; OS << " switch (PredNo) {\n"; OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n"; for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i) @@ -616,7 +616,8 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) { PFsByName[I->first->getName()] = I->second; if (!NodePredicates.empty()) { - OS << "bool CheckNodePredicate(SDNode *Node, unsigned PredNo) const {\n"; + OS << "virtual bool CheckNodePredicate(SDNode *Node,\n"; + OS << " unsigned PredNo) const {\n"; OS << " switch (PredNo) {\n"; OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n"; for (unsigned i = 0, e = NodePredicates.size(); i != e; ++i) { @@ -635,8 +636,8 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) { // Emit CompletePattern matchers. // FIXME: This should be const. if (!ComplexPatterns.empty()) { - OS << "bool CheckComplexPattern(SDNode *Root, SDNode *Parent, SDValue N,\n"; - OS << " unsigned PatternNo,\n"; + OS << "virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent,\n"; + OS << " SDValue N, unsigned PatternNo,\n"; OS << " SmallVectorImpl > &Result) {\n"; OS << " unsigned NextRes = Result.size();\n"; OS << " switch (PatternNo) {\n"; @@ -676,7 +677,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) { // Emit SDNodeXForm handlers. // FIXME: This should be const. if (!NodeXForms.empty()) { - OS << "SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n"; + OS << "virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n"; OS << " switch (XFormNo) {\n"; OS << " default: llvm_unreachable(\"Invalid xform # in table?\");\n"; diff --git a/utils/TableGen/DAGISelMatcherGen.cpp b/utils/TableGen/DAGISelMatcherGen.cpp index aed222c..573f558 100644 --- a/utils/TableGen/DAGISelMatcherGen.cpp +++ b/utils/TableGen/DAGISelMatcherGen.cpp @@ -10,6 +10,7 @@ #include "DAGISelMatcher.h" #include "CodeGenDAGPatterns.h" #include "CodeGenRegisters.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallVector.h" @@ -172,15 +173,10 @@ void MatcherGen::InferPossibleTypes() { // diagnostics, which we know are impossible at this point. TreePattern &TP = *CGP.pf_begin()->second; - try { - bool MadeChange = true; - while (MadeChange) - MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP, - true/*Ignore reg constraints*/); - } catch (...) { - errs() << "Type constraint application shouldn't fail!"; - abort(); - } + bool MadeChange = true; + while (MadeChange) + MadeChange = PatWithNoTypes->ApplyTypeConstraints(TP, + true/*Ignore reg constraints*/); } @@ -203,7 +199,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) { assert(N->isLeaf() && "Not a leaf?"); // Direct match against an integer constant. - if (IntInit *II = dynamic_cast(N->getLeafValue())) { + if (IntInit *II = dyn_cast(N->getLeafValue())) { // If this is the root of the dag we're matching, we emit a redundant opcode // check to ensure that this gets folded into the normal top-level // OpcodeSwitch. @@ -215,7 +211,7 @@ void MatcherGen::EmitLeafMatchCode(const TreePatternNode *N) { return AddMatcher(new CheckIntegerMatcher(II->getValue())); } - DefInit *DI = dynamic_cast(N->getLeafValue()); + DefInit *DI = dyn_cast(N->getLeafValue()); if (DI == 0) { errs() << "Unknown leaf kind: " << *N << "\n"; abort(); @@ -283,7 +279,7 @@ void MatcherGen::EmitOperatorMatchCode(const TreePatternNode *N, N->getOperator()->getName() == "or") && N->getChild(1)->isLeaf() && N->getChild(1)->getPredicateFns().empty() && N->getPredicateFns().empty()) { - if (IntInit *II = dynamic_cast(N->getChild(1)->getLeafValue())) { + if (IntInit *II = dyn_cast(N->getChild(1)->getLeafValue())) { if (!isPowerOf2_32(II->getValue())) { // Don't bother with single bits. // If this is at the root of the pattern, we emit a redundant // CheckOpcode so that the following checks get factored properly under @@ -572,14 +568,14 @@ void MatcherGen::EmitResultLeafAsOperand(const TreePatternNode *N, SmallVectorImpl &ResultOps) { assert(N->isLeaf() && "Must be a leaf"); - if (IntInit *II = dynamic_cast(N->getLeafValue())) { + if (IntInit *II = dyn_cast(N->getLeafValue())) { AddMatcher(new EmitIntegerMatcher(II->getValue(), N->getType(0))); ResultOps.push_back(NextRecordedOperandNo++); return; } // If this is an explicit register reference, handle it. - if (DefInit *DI = dynamic_cast(N->getLeafValue())) { + if (DefInit *DI = dyn_cast(N->getLeafValue())) { Record *Def = DI->getDef(); if (Def->isSubClassOf("Register")) { const CodeGenRegister *Reg = @@ -727,8 +723,7 @@ EmitResultInstructionAsOperand(const TreePatternNode *N, // Determine what to emit for this operand. Record *OperandNode = II.Operands[InstOpNo].Rec; - if ((OperandNode->isSubClassOf("PredicateOperand") || - OperandNode->isSubClassOf("OptionalDefOperand")) && + if (OperandNode->isSubClassOf("OperandWithDefaultOps") && !CGP.getDefaultOperand(OperandNode).DefaultOps.empty()) { // This is a predicate or optional def operand; emit the // 'default ops' operands. @@ -877,7 +872,7 @@ void MatcherGen::EmitResultOperand(const TreePatternNode *N, if (OpRec->isSubClassOf("SDNodeXForm")) return EmitResultSDNodeXFormAsOperand(N, ResultOps); errs() << "Unknown result node to emit code for: " << *N << '\n'; - throw std::string("Unknown node in result pattern!"); + PrintFatalError("Unknown node in result pattern!"); } void MatcherGen::EmitResultCode() { diff --git a/utils/TableGen/DFAPacketizerEmitter.cpp b/utils/TableGen/DFAPacketizerEmitter.cpp index 8bfecea..0ad25a5 100644 --- a/utils/TableGen/DFAPacketizerEmitter.cpp +++ b/utils/TableGen/DFAPacketizerEmitter.cpp @@ -17,6 +17,7 @@ #include "CodeGenTarget.h" #include "llvm/ADT/DenseSet.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" #include @@ -74,6 +75,8 @@ public: // Another way of thinking about this transition is we are mapping a NDFA with // two states [0x01] and [0x10] into a DFA with a single state [0x01, 0x10]. // +// A State instance also contains a collection of transitions from that state: +// a map from inputs to new states. // namespace { class State { @@ -82,10 +85,16 @@ class State { int stateNum; bool isInitial; std::set stateInfo; + typedef std::map TransitionMap; + TransitionMap Transitions; State(); State(const State &S); + bool operator<(const State &s) const { + return stateNum < s.stateNum; + } + // // canAddInsnClass - Returns true if an instruction of type InsnClass is a // valid transition from this state, i.e., can an instruction of type InsnClass @@ -100,38 +109,18 @@ class State { // which are possible from this state (PossibleStates). // void AddInsnClass(unsigned InsnClass, std::set &PossibleStates); + // + // addTransition - Add a transition from this state given the input InsnClass + // + void addTransition(unsigned InsnClass, State *To); + // + // hasTransition - Returns true if there is a transition from this state + // given the input InsnClass + // + bool hasTransition(unsigned InsnClass); }; } // End anonymous namespace. - -namespace { -struct Transition { - public: - static int currentTransitionNum; - int transitionNum; - State *from; - unsigned input; - State *to; - - Transition(State *from_, unsigned input_, State *to_); -}; -} // End anonymous namespace. - - -// -// Comparators to keep set of states sorted. -// -namespace { -struct ltState { - bool operator()(const State *s1, const State *s2) const; -}; - -struct ltTransition { - bool operator()(const Transition *s1, const Transition *s2) const; -}; -} // End anonymous namespace. - - // // class DFA: deterministic finite automaton for processor resource tracking. // @@ -139,36 +128,19 @@ namespace { class DFA { public: DFA(); + ~DFA(); // Set of states. Need to keep this sorted to emit the transition table. - std::set states; + typedef std::set > StateSet; + StateSet states; - // Map from a state to the list of transitions with that state as source. - std::map, ltState> - stateTransitions; State *currentState; - // Highest valued Input seen. - unsigned LargestInput; - // // Modify the DFA. // void initialize(); void addState(State *); - void addTransition(Transition *); - - // - // getTransition - Return the state when a transition is made from - // State From with Input I. If a transition is not found, return NULL. - // - State *getTransition(State *, unsigned); - - // - // isValidTransition: Predicate that checks if there is a valid transition - // from state From on input InsnClass. - // - bool isValidTransition(State *From, unsigned InsnClass); // // writeTable: Print out a table representing the DFA. @@ -179,7 +151,7 @@ public: // -// Constructors for State, Transition, and DFA +// Constructors and destructors for State and DFA // State::State() : stateNum(currentStateNum++), isInitial(false) {} @@ -189,22 +161,27 @@ State::State(const State &S) : stateNum(currentStateNum++), isInitial(S.isInitial), stateInfo(S.stateInfo) {} +DFA::DFA(): currentState(NULL) {} -Transition::Transition(State *from_, unsigned input_, State *to_) : - transitionNum(currentTransitionNum++), from(from_), input(input_), - to(to_) {} - - -DFA::DFA() : - LargestInput(0) {} - +DFA::~DFA() { + DeleteContainerPointers(states); +} -bool ltState::operator()(const State *s1, const State *s2) const { - return (s1->stateNum < s2->stateNum); +// +// addTransition - Add a transition from this state given the input InsnClass +// +void State::addTransition(unsigned InsnClass, State *To) { + assert(!Transitions.count(InsnClass) && + "Cannot have multiple transitions for the same input"); + Transitions[InsnClass] = To; } -bool ltTransition::operator()(const Transition *s1, const Transition *s2) const { - return (s1->input < s2->input); +// +// hasTransition - Returns true if there is a transition from this state +// given the input InsnClass +// +bool State::hasTransition(unsigned InsnClass) { + return Transitions.count(InsnClass) > 0; } // @@ -272,6 +249,7 @@ bool State::canAddInsnClass(unsigned InsnClass) const { void DFA::initialize() { + assert(currentState && "Missing current state"); currentState->isInitial = true; } @@ -282,47 +260,7 @@ void DFA::addState(State *S) { } -void DFA::addTransition(Transition *T) { - // Update LargestInput. - if (T->input > LargestInput) - LargestInput = T->input; - - // Add the new transition. - bool Added = stateTransitions[T->from].insert(T).second; - assert(Added && "Cannot have multiple states for the same input"); - (void)Added; -} - - -// -// getTransition - Return the state when a transition is made from -// State From with Input I. If a transition is not found, return NULL. -// -State *DFA::getTransition(State *From, unsigned I) { - // Do we have a transition from state From? - if (!stateTransitions.count(From)) - return NULL; - - // Do we have a transition from state From with Input I? - Transition TVal(NULL, I, NULL); - // Do not count this temporal instance - Transition::currentTransitionNum--; - std::set::iterator T = - stateTransitions[From].find(&TVal); - if (T != stateTransitions[From].end()) - return (*T)->to; - - return NULL; -} - - -bool DFA::isValidTransition(State *From, unsigned InsnClass) { - return (getTransition(From, InsnClass) != NULL); -} - - int State::currentStateNum = 0; -int Transition::currentTransitionNum = 0; DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R): TargetName(CodeGenTarget(R).getName()), @@ -341,7 +279,7 @@ DFAPacketizerEmitter::DFAPacketizerEmitter(RecordKeeper &R): // // void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) { - std::set::iterator SI = states.begin(); + DFA::StateSet::iterator SI = states.begin(); // This table provides a map to the beginning of the transitions for State s // in DFAStateInputTable. std::vector StateEntry(states.size()); @@ -353,18 +291,16 @@ void DFA::writeTableAndAPI(raw_ostream &OS, const std::string &TargetName) { // to construct the StateEntry table. int ValidTransitions = 0; for (unsigned i = 0; i < states.size(); ++i, ++SI) { + assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers"); StateEntry[i] = ValidTransitions; - for (unsigned j = 0; j <= LargestInput; ++j) { - assert (((*SI)->stateNum == (int) i) && "Mismatch in state numbers"); - State *To = getTransition(*SI, j); - if (To == NULL) - continue; - - OS << "{" << j << ", " - << To->stateNum + for (State::TransitionMap::iterator + II = (*SI)->Transitions.begin(), IE = (*SI)->Transitions.end(); + II != IE; ++II) { + OS << "{" << II->first << ", " + << II->second->stateNum << "}, "; - ++ValidTransitions; } + ValidTransitions += (*SI)->Transitions.size(); // If there are no valid transitions from this stage, we need a sentinel // transition. @@ -539,7 +475,7 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) { // If we haven't already created a transition for this input // and the state can accommodate this InsnClass, create a transition. // - if (!D.getTransition(current, InsnClass) && + if (!current->hasTransition(InsnClass) && current->canAddInsnClass(InsnClass)) { State *NewState = NULL; current->AddInsnClass(InsnClass, NewStateResources); @@ -559,10 +495,8 @@ void DFAPacketizerEmitter::run(raw_ostream &OS) { Visited[NewStateResources] = NewState; WorkList.push_back(NewState); } - - Transition *NewTransition = new Transition(current, InsnClass, - NewState); - D.addTransition(NewTransition); + + current->addTransition(InsnClass, NewState); } } } diff --git a/utils/TableGen/DisassemblerEmitter.cpp b/utils/TableGen/DisassemblerEmitter.cpp index 826465a..2d11d24 100644 --- a/utils/TableGen/DisassemblerEmitter.cpp +++ b/utils/TableGen/DisassemblerEmitter.cpp @@ -117,11 +117,9 @@ void EmitDisassembler(RecordKeeper &Records, raw_ostream &OS) { for (unsigned i = 0, e = numberedInstructions.size(); i != e; ++i) RecognizableInstr::processInstr(Tables, *numberedInstructions[i], i); - // FIXME: As long as we are using exceptions, might as well drop this to the - // actual conflict site. if (Tables.hasConflicts()) - throw TGError(Target.getTargetRecord()->getLoc(), - "Primary decode conflict"); + PrintFatalError(Target.getTargetRecord()->getLoc(), + "Primary decode conflict"); Tables.emit(OS); return; diff --git a/utils/TableGen/EDEmitter.cpp b/utils/TableGen/EDEmitter.cpp index 0c8b28d..ea25450 100644 --- a/utils/TableGen/EDEmitter.cpp +++ b/utils/TableGen/EDEmitter.cpp @@ -19,6 +19,7 @@ #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" #include "llvm/Support/raw_ostream.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" #include @@ -358,8 +359,8 @@ static int X86TypeFromOpName(LiteralConstantEmitter *type, /// X86PopulateOperands - Handles all the operands in an X86 instruction, adding /// the appropriate flags to their descriptors /// -/// @operandFlags - A reference the array of operand flag objects -/// @inst - The instruction to use as a source of information +/// \param operandTypes A reference the array of operand type objects +/// \param inst The instruction to use as a source of information static void X86PopulateOperands( LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS], const CodeGenInstruction &inst) { @@ -385,11 +386,12 @@ static void X86PopulateOperands( /// decorate1 - Decorates a named operand with a new flag /// -/// @operandFlags - The array of operand flag objects, which don't have names -/// @inst - The CodeGenInstruction, which provides a way to translate -/// between names and operand indices -/// @opName - The name of the operand -/// @flag - The name of the flag to add +/// \param operandFlags The array of operand flag objects, which don't have +/// names +/// \param inst The CodeGenInstruction, which provides a way to +// translate between names and operand indices +/// \param opName The name of the operand +/// \param opFlag The name of the flag to add static inline void decorate1( FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS], const CodeGenInstruction &inst, @@ -438,9 +440,9 @@ static inline void decorate1( /// instruction to determine what sort of an instruction it is and then adds /// the appropriate flags to the instruction and its operands /// -/// @arg instType - A reference to the type for the instruction as a whole -/// @arg operandFlags - A reference to the array of operand flag object pointers -/// @arg inst - A reference to the original instruction +/// \param instType A reference to the type for the instruction as a whole +/// \param operandFlags A reference to the array of operand flag object pointers +/// \param inst A reference to the original instruction static void X86ExtractSemantics( LiteralConstantEmitter &instType, FlagsConstantEmitter *(&operandFlags)[EDIS_MAX_OPERANDS], @@ -567,8 +569,8 @@ static void X86ExtractSemantics( /// ARMFlagFromOpName - Processes the name of a single ARM operand (which is /// actually its type) and translates it into an operand type /// -/// @arg type - The type object to set -/// @arg name - The name of the operand +/// \param type The type object to set +/// \param name The name of the operand static int ARMFlagFromOpName(LiteralConstantEmitter *type, const std::string &name) { REG("GPR"); @@ -750,8 +752,8 @@ static int ARMFlagFromOpName(LiteralConstantEmitter *type, /// ARMPopulateOperands - Handles all the operands in an ARM instruction, adding /// the appropriate flags to their descriptors /// -/// @operandFlags - A reference the array of operand flag objects -/// @inst - The instruction to use as a source of information +/// \param operandTypes A reference the array of operand type objects +/// \param inst The instruction to use as a source of information static void ARMPopulateOperands( LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS], const CodeGenInstruction &inst) { @@ -776,7 +778,7 @@ static void ARMPopulateOperands( errs() << "Operand type: " << rec.getName() << '\n'; errs() << "Operand name: " << operandInfo.Name << '\n'; errs() << "Instruction name: " << inst.TheDef->getName() << '\n'; - throw("Unhandled type in EDEmitter"); + PrintFatalError("Unhandled type in EDEmitter"); } } } @@ -790,10 +792,10 @@ static void ARMPopulateOperands( /// instruction to determine what sort of an instruction it is and then adds /// the appropriate flags to the instruction and its operands /// -/// @arg instType - A reference to the type for the instruction as a whole -/// @arg operandTypes - A reference to the array of operand type object pointers -/// @arg operandFlags - A reference to the array of operand flag object pointers -/// @arg inst - A reference to the original instruction +/// \param instType A reference to the type for the instruction as a whole +/// \param operandTypes A reference to the array of operand type object pointers +/// \param operandFlags A reference to the array of operand flag object pointers +/// \param inst A reference to the original instruction static void ARMExtractSemantics( LiteralConstantEmitter &instType, LiteralConstantEmitter *(&operandTypes)[EDIS_MAX_OPERANDS], @@ -831,8 +833,8 @@ static void ARMExtractSemantics( /// populateInstInfo - Fills an array of InstInfos with information about each /// instruction in a target /// -/// @arg infoArray - The array of InstInfo objects to populate -/// @arg target - The CodeGenTarget to use as a source of instructions +/// \param infoArray The array of InstInfo objects to populate +/// \param target The CodeGenTarget to use as a source of instructions static void populateInstInfo(CompoundConstantEmitter &infoArray, CodeGenTarget &target) { const std::vector &numberedInstructions = diff --git a/utils/TableGen/FastISelEmitter.cpp b/utils/TableGen/FastISelEmitter.cpp index ca784d0..8b1e7f9 100644 --- a/utils/TableGen/FastISelEmitter.cpp +++ b/utils/TableGen/FastISelEmitter.cpp @@ -245,7 +245,7 @@ struct OperandsSignature { if (Op->getType(0) != VT) return false; - DefInit *OpDI = dynamic_cast(Op->getLeafValue()); + DefInit *OpDI = dyn_cast(Op->getLeafValue()); if (!OpDI) return false; Record *OpLeafRec = OpDI->getDef(); @@ -406,13 +406,12 @@ static std::string PhyRegForNode(TreePatternNode *Op, if (!Op->isLeaf()) return PhysReg; - DefInit *OpDI = dynamic_cast(Op->getLeafValue()); - Record *OpLeafRec = OpDI->getDef(); + Record *OpLeafRec = cast(Op->getLeafValue())->getDef(); if (!OpLeafRec->isSubClassOf("Register")) return PhysReg; - PhysReg += static_cast(OpLeafRec->getValue( \ - "Namespace")->getValue())->getValue(); + PhysReg += cast(OpLeafRec->getValue("Namespace")->getValue()) + ->getValue(); PhysReg += "::"; PhysReg += Target.getRegBank().getReg(OpLeafRec)->getName(); return PhysReg; @@ -473,7 +472,7 @@ void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) { // a bit too complicated for now. if (!Dst->getChild(1)->isLeaf()) continue; - DefInit *SR = dynamic_cast(Dst->getChild(1)->getLeafValue()); + DefInit *SR = dyn_cast(Dst->getChild(1)->getLeafValue()); if (SR) SubRegNo = getQualifiedName(SR->getDef()); else @@ -550,7 +549,7 @@ void FastISelMap::collectPatterns(CodeGenDAGPatterns &CGP) { }; if (SimplePatterns[Operands][OpcodeName][VT][RetVT].count(PredicateCheck)) - throw TGError(Pattern.getSrcRecord()->getLoc(), + PrintFatalError(Pattern.getSrcRecord()->getLoc(), "Duplicate record in FastISel table!"); SimplePatterns[Operands][OpcodeName][VT][RetVT][PredicateCheck] = Memo; diff --git a/utils/TableGen/FixedLenDecoderEmitter.cpp b/utils/TableGen/FixedLenDecoderEmitter.cpp index e89c393..5cabcad 100644 --- a/utils/TableGen/FixedLenDecoderEmitter.cpp +++ b/utils/TableGen/FixedLenDecoderEmitter.cpp @@ -15,6 +15,7 @@ #define DEBUG_TYPE "decoder-emitter" #include "CodeGenTarget.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/SmallString.h" @@ -142,7 +143,7 @@ static int Value(bit_value_t V) { return ValueNotSet(V) ? -1 : (V == BIT_FALSE ? 0 : 1); } static bit_value_t bitFromBits(const BitsInit &bits, unsigned index) { - if (BitInit *bit = dynamic_cast(bits.getBit(index))) + if (BitInit *bit = dyn_cast(bits.getBit(index))) return bit->getValue() ? BIT_TRUE : BIT_FALSE; // The bit is uninitialized. @@ -741,7 +742,7 @@ void FixedLenDecoderEmitter::emitTable(formatted_raw_ostream &OS, switch (*I) { default: - throw "invalid decode table opcode"; + PrintFatalError("invalid decode table opcode"); case MCD::OPC_ExtractField: { ++I; unsigned Start = *I++; @@ -1757,8 +1758,8 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc, // for decoding register classes. // FIXME: This need to be extended to handle instructions with custom // decoder methods, and operands with (simple) MIOperandInfo's. - TypedInit *TI = dynamic_cast(NI->first); - RecordRecTy *Type = dynamic_cast(TI->getType()); + TypedInit *TI = cast(NI->first); + RecordRecTy *Type = cast(TI->getType()); Record *TypeRecord = Type->getRecord(); bool isReg = false; if (TypeRecord->isSubClassOf("RegisterOperand")) @@ -1770,7 +1771,7 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc, RecordVal *DecoderString = TypeRecord->getValue("DecoderMethod"); StringInit *String = DecoderString ? - dynamic_cast(DecoderString->getValue()) : 0; + dyn_cast(DecoderString->getValue()) : 0; if (!isReg && String && String->getValue() != "") Decoder = String->getValue(); @@ -1781,11 +1782,11 @@ static bool populateInstruction(const CodeGenInstruction &CGI, unsigned Opc, for (unsigned bi = 0; bi < Bits.getNumBits(); ++bi) { VarInit *Var = 0; - VarBitInit *BI = dynamic_cast(Bits.getBit(bi)); + VarBitInit *BI = dyn_cast(Bits.getBit(bi)); if (BI) - Var = dynamic_cast(BI->getVariable()); + Var = dyn_cast(BI->getBitVar()); else - Var = dynamic_cast(Bits.getBit(bi)); + Var = dyn_cast(Bits.getBit(bi)); if (!Var) { if (Base != ~0U) { @@ -1882,7 +1883,7 @@ static void emitDecodeInstruction(formatted_raw_ostream &OS) { << " uint64_t Bits = STI.getFeatureBits();\n" << "\n" << " const uint8_t *Ptr = DecodeTable;\n" - << " uint32_t CurFieldValue;\n" + << " uint32_t CurFieldValue = 0;\n" << " DecodeStatus S = MCDisassembler::Success;\n" << " for (;;) {\n" << " ptrdiff_t Loc = Ptr - DecodeTable;\n" diff --git a/utils/TableGen/InstrInfoEmitter.cpp b/utils/TableGen/InstrInfoEmitter.cpp index b41ad94..48d41d7 100644 --- a/utils/TableGen/InstrInfoEmitter.cpp +++ b/utils/TableGen/InstrInfoEmitter.cpp @@ -16,8 +16,10 @@ #include "CodeGenDAGPatterns.h" #include "CodeGenSchedule.h" #include "CodeGenTarget.h" +#include "TableGenBackends.h" #include "SequenceToOffsetTable.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" #include @@ -89,7 +91,7 @@ InstrInfoEmitter::GetOperandInfo(const CodeGenInstruction &Inst) { for (unsigned j = 0, e = Inst.Operands[i].MINumOperands; j != e; ++j) { OperandList.push_back(Inst.Operands[i]); - Record *OpR = dynamic_cast(MIOI->getArg(j))->getDef(); + Record *OpR = cast(MIOI->getArg(j))->getDef(); OperandList.back().Rec = OpR; } } @@ -299,16 +301,15 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num, const OperandInfoMapTy &OpInfo, raw_ostream &OS) { int MinOperands = 0; - if (!Inst.Operands.size() == 0) + if (!Inst.Operands.empty()) // Each logical operand can be multiple MI operands. MinOperands = Inst.Operands.back().MIOperandNo + Inst.Operands.back().MINumOperands; - Record *ItinDef = Inst.TheDef->getValueAsDef("Itinerary"); OS << " { "; OS << Num << ",\t" << MinOperands << ",\t" << Inst.Operands.NumDefs << ",\t" - << SchedModels.getItinClassIdx(ItinDef) << ",\t" + << SchedModels.getSchedClassIdx(Inst) << ",\t" << Inst.TheDef->getValueAsInt("Size") << ",\t0"; // Emit all of the target indepedent flags... @@ -343,13 +344,14 @@ void InstrInfoEmitter::emitRecord(const CodeGenInstruction &Inst, unsigned Num, // Emit all of the target-specific flags... BitsInit *TSF = Inst.TheDef->getValueAsBitsInit("TSFlags"); - if (!TSF) throw "no TSFlags?"; + if (!TSF) + PrintFatalError("no TSFlags?"); uint64_t Value = 0; for (unsigned i = 0, e = TSF->getNumBits(); i != e; ++i) { - if (BitInit *Bit = dynamic_cast(TSF->getBit(i))) + if (BitInit *Bit = dyn_cast(TSF->getBit(i))) Value |= uint64_t(Bit->getValue()) << i; else - throw "Invalid TSFlags bit in " + Inst.TheDef->getName(); + PrintFatalError("Invalid TSFlags bit in " + Inst.TheDef->getName()); } OS << ", 0x"; OS.write_hex(Value); @@ -416,6 +418,7 @@ namespace llvm { void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS) { InstrInfoEmitter(RK).run(OS); + EmitMapTable(RK, OS); } } // End llvm namespace diff --git a/utils/TableGen/IntrinsicEmitter.cpp b/utils/TableGen/IntrinsicEmitter.cpp index 155d1ab..fe55242 100644 --- a/utils/TableGen/IntrinsicEmitter.cpp +++ b/utils/TableGen/IntrinsicEmitter.cpp @@ -15,6 +15,7 @@ #include "CodeGenTarget.h" #include "SequenceToOffsetTable.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/StringMatcher.h" #include "llvm/TableGen/TableGenBackend.h" @@ -249,7 +250,7 @@ static void EncodeFixedValueType(MVT::SimpleValueType VT, if (EVT(VT).isInteger()) { unsigned BitWidth = EVT(VT).getSizeInBits(); switch (BitWidth) { - default: throw "unhandled integer type width in intrinsic!"; + default: PrintFatalError("unhandled integer type width in intrinsic!"); case 1: return Sig.push_back(IIT_I1); case 8: return Sig.push_back(IIT_I8); case 16: return Sig.push_back(IIT_I16); @@ -259,7 +260,7 @@ static void EncodeFixedValueType(MVT::SimpleValueType VT, } switch (VT) { - default: throw "unhandled MVT in intrinsic!"; + default: PrintFatalError("unhandled MVT in intrinsic!"); case MVT::f32: return Sig.push_back(IIT_F32); case MVT::f64: return Sig.push_back(IIT_F64); case MVT::Metadata: return Sig.push_back(IIT_METADATA); @@ -328,7 +329,7 @@ static void EncodeFixedType(Record *R, std::vector &ArgCodes, if (EVT(VT).isVector()) { EVT VVT = VT; switch (VVT.getVectorNumElements()) { - default: throw "unhandled vector type width in intrinsic!"; + default: PrintFatalError("unhandled vector type width in intrinsic!"); case 2: Sig.push_back(IIT_V2); break; case 4: Sig.push_back(IIT_V4); break; case 8: Sig.push_back(IIT_V8); break; @@ -510,10 +511,10 @@ EmitAttributes(const std::vector &Ints, raw_ostream &OS) { OS << "// Add parameter attributes that are not common to all intrinsics.\n"; OS << "#ifdef GET_INTRINSIC_ATTRIBUTES\n"; if (TargetOnly) - OS << "static AttrListPtr getAttributes(" << TargetPrefix + OS << "static AttrListPtr getAttributes(LLVMContext &C, " << TargetPrefix << "Intrinsic::ID id) {\n"; else - OS << "AttrListPtr Intrinsic::getAttributes(ID id) {\n"; + OS << "AttrListPtr Intrinsic::getAttributes(LLVMContext &C, ID id) {\n"; // Compute the maximum number of attribute arguments and the map typedef std::map &Ints, raw_ostream &OS) { OS << " AttributeWithIndex AWI[" << maxArgAttrs+1 << "];\n"; OS << " unsigned NumAttrs = 0;\n"; OS << " if (id != 0) {\n"; + OS << " SmallVector AttrVec;\n"; OS << " switch(IntrinsicsToAttributesMap[id - "; if (TargetOnly) OS << "Intrinsic::num_intrinsics"; @@ -564,58 +566,49 @@ EmitAttributes(const std::vector &Ints, raw_ostream &OS) { unsigned numAttrs = 0; // The argument attributes are alreadys sorted by argument index. - for (unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size(); ai != ae;) { - unsigned argNo = intrinsic.ArgumentAttributes[ai].first; + unsigned ai = 0, ae = intrinsic.ArgumentAttributes.size(); + if (ae) { + while (ai != ae) { + unsigned argNo = intrinsic.ArgumentAttributes[ai].first; - OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(" - << argNo+1 << ", "; + OS << " AttrVec.clear();\n"; - bool moreThanOne = false; + do { + switch (intrinsic.ArgumentAttributes[ai].second) { + case CodeGenIntrinsic::NoCapture: + OS << " AttrVec.push_back(Attributes::NoCapture);\n"; + break; + } - do { - if (moreThanOne) OS << '|'; + ++ai; + } while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo); - switch (intrinsic.ArgumentAttributes[ai].second) { - case CodeGenIntrinsic::NoCapture: - OS << "Attribute::NoCapture"; - break; - } - - ++ai; - moreThanOne = true; - } while (ai != ae && intrinsic.ArgumentAttributes[ai].first == argNo); - - OS << ");\n"; + OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(C, " + << argNo+1 << ", AttrVec);\n"; + } } ModRefKind modRef = getModRefKind(intrinsic); if (!intrinsic.canThrow || modRef || intrinsic.isNoReturn) { - OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(~0, "; - bool Emitted = false; - if (!intrinsic.canThrow) { - OS << "Attribute::NoUnwind"; - Emitted = true; - } - - if (intrinsic.isNoReturn) { - if (Emitted) OS << '|'; - OS << "Attribute::NoReturn"; - Emitted = true; - } + OS << " AttrVec.clear();\n"; + + if (!intrinsic.canThrow) + OS << " AttrVec.push_back(Attributes::NoUnwind);\n"; + if (intrinsic.isNoReturn) + OS << " AttrVec.push_back(Attributes::NoReturn);\n"; switch (modRef) { case MRK_none: break; case MRK_readonly: - if (Emitted) OS << '|'; - OS << "Attribute::ReadOnly"; + OS << " AttrVec.push_back(Attributes::ReadOnly);\n"; break; case MRK_readnone: - if (Emitted) OS << '|'; - OS << "Attribute::ReadNone"; + OS << " AttrVec.push_back(Attributes::ReadNone);\n"; break; } - OS << ");\n"; + OS << " AWI[" << numAttrs++ << "] = AttributeWithIndex::get(C, " + << "AttrListPtr::FunctionIndex, AttrVec);\n"; } if (numAttrs) { @@ -628,7 +621,7 @@ EmitAttributes(const std::vector &Ints, raw_ostream &OS) { OS << " }\n"; OS << " }\n"; - OS << " return AttrListPtr::get(ArrayRef(AWI, " + OS << " return AttrListPtr::get(C, ArrayRef(AWI, " "NumAttrs));\n"; OS << "}\n"; OS << "#endif // GET_INTRINSIC_ATTRIBUTES\n\n"; @@ -700,8 +693,8 @@ EmitIntrinsicToGCCBuiltinMap(const std::vector &Ints, if (!BIM.insert(std::make_pair(Ints[i].GCCBuiltinName, Ints[i].EnumName)).second) - throw "Intrinsic '" + Ints[i].TheDef->getName() + - "': duplicate GCC builtin name!"; + PrintFatalError("Intrinsic '" + Ints[i].TheDef->getName() + + "': duplicate GCC builtin name!"); } } diff --git a/utils/TableGen/Makefile b/utils/TableGen/Makefile index 0c4619d..9bfd94b 100644 --- a/utils/TableGen/Makefile +++ b/utils/TableGen/Makefile @@ -10,8 +10,6 @@ LEVEL = ../.. TOOLNAME = llvm-tblgen USEDLIBS = LLVMTableGen.a LLVMSupport.a -REQUIRES_EH := 1 -REQUIRES_RTTI := 1 # This tool has no plugins, optimize startup time. TOOL_NO_EXPORTS = 1 diff --git a/utils/TableGen/PseudoLoweringEmitter.cpp b/utils/TableGen/PseudoLoweringEmitter.cpp index 8d9d419..64aaee7 100644 --- a/utils/TableGen/PseudoLoweringEmitter.cpp +++ b/utils/TableGen/PseudoLoweringEmitter.cpp @@ -74,7 +74,7 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn, IndexedMap &OperandMap, unsigned BaseIdx) { unsigned OpsAdded = 0; for (unsigned i = 0, e = Dag->getNumArgs(); i != e; ++i) { - if (DefInit *DI = dynamic_cast(Dag->getArg(i))) { + if (DefInit *DI = dyn_cast(Dag->getArg(i))) { // Physical register reference. Explicit check for the special case // "zero_reg" definition. if (DI->getDef()->isSubClassOf("Register") || @@ -90,7 +90,7 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn, // FIXME: We probably shouldn't ever get a non-zero BaseIdx here. assert(BaseIdx == 0 && "Named subargument in pseudo expansion?!"); if (DI->getDef() != Insn.Operands[BaseIdx + i].Rec) - throw TGError(Rec->getLoc(), + PrintFatalError(Rec->getLoc(), "Pseudo operand type '" + DI->getDef()->getName() + "' does not match expansion operand type '" + Insn.Operands[BaseIdx + i].Rec->getName() + "'"); @@ -100,11 +100,11 @@ addDagOperandMapping(Record *Rec, DagInit *Dag, CodeGenInstruction &Insn, for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I) OperandMap[BaseIdx + i + I].Kind = OpData::Operand; OpsAdded += Insn.Operands[i].MINumOperands; - } else if (IntInit *II = dynamic_cast(Dag->getArg(i))) { + } else if (IntInit *II = dyn_cast(Dag->getArg(i))) { OperandMap[BaseIdx + i].Kind = OpData::Imm; OperandMap[BaseIdx + i].Data.Imm = II->getValue(); ++OpsAdded; - } else if (DagInit *SubDag = dynamic_cast(Dag->getArg(i))) { + } else if (DagInit *SubDag = dyn_cast(Dag->getArg(i))) { // Just add the operands recursively. This is almost certainly // a constant value for a complex operand (> 1 MI operand). unsigned NewOps = @@ -127,24 +127,24 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) { assert(Dag && "Missing result instruction in pseudo expansion!"); DEBUG(dbgs() << " Result: " << *Dag << "\n"); - DefInit *OpDef = dynamic_cast(Dag->getOperator()); + DefInit *OpDef = dyn_cast(Dag->getOperator()); if (!OpDef) - throw TGError(Rec->getLoc(), Rec->getName() + + PrintFatalError(Rec->getLoc(), Rec->getName() + " has unexpected operator type!"); Record *Operator = OpDef->getDef(); if (!Operator->isSubClassOf("Instruction")) - throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() + - "' is not an instruction!"); + PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() + + "' is not an instruction!"); CodeGenInstruction Insn(Operator); if (Insn.isCodeGenOnly || Insn.isPseudo) - throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() + - "' cannot be another pseudo instruction!"); + PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() + + "' cannot be another pseudo instruction!"); if (Insn.Operands.size() != Dag->getNumArgs()) - throw TGError(Rec->getLoc(), "Pseudo result '" + Operator->getName() + - "' operand count mismatch"); + PrintFatalError(Rec->getLoc(), "Pseudo result '" + Operator->getName() + + "' operand count mismatch"); unsigned NumMIOperands = 0; for (unsigned i = 0, e = Insn.Operands.size(); i != e; ++i) @@ -156,7 +156,7 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) { // If there are more operands that weren't in the DAG, they have to // be operands that have default values, or we have an error. Currently, - // PredicateOperand and OptionalDefOperand both have default values. + // Operands that are a sublass of OperandWithDefaultOp have default values. // Validate that each result pattern argument has a matching (by name) @@ -179,9 +179,9 @@ void PseudoLoweringEmitter::evaluateExpansion(Record *Rec) { StringMap::iterator SourceOp = SourceOperands.find(Dag->getArgName(i)); if (SourceOp == SourceOperands.end()) - throw TGError(Rec->getLoc(), - "Pseudo output operand '" + Dag->getArgName(i) + - "' has no matching source operand."); + PrintFatalError(Rec->getLoc(), + "Pseudo output operand '" + Dag->getArgName(i) + + "' has no matching source operand."); // Map the source operand to the destination operand index for each // MachineInstr operand. for (unsigned I = 0, E = Insn.Operands[i].MINumOperands; I != E; ++I) @@ -267,7 +267,7 @@ void PseudoLoweringEmitter::emitLoweringEmitter(raw_ostream &o) { void PseudoLoweringEmitter::run(raw_ostream &o) { Record *ExpansionClass = Records.getClass("PseudoInstExpansion"); - Record *InstructionClass = Records.getClass("PseudoInstExpansion"); + Record *InstructionClass = Records.getClass("Instruction"); assert(ExpansionClass && "PseudoInstExpansion class definition missing!"); assert(InstructionClass && "Instruction class definition missing!"); diff --git a/utils/TableGen/RegisterInfoEmitter.cpp b/utils/TableGen/RegisterInfoEmitter.cpp index 02546df..95b6267 100644 --- a/utils/TableGen/RegisterInfoEmitter.cpp +++ b/utils/TableGen/RegisterInfoEmitter.cpp @@ -62,6 +62,8 @@ private: void EmitRegUnitPressure(raw_ostream &OS, const CodeGenRegBank &RegBank, const std::string &ClassName); + void emitComposeSubRegIndices(raw_ostream &OS, CodeGenRegBank &RegBank, + const std::string &ClassName); }; } // End anonymous namespace @@ -325,7 +327,7 @@ RegisterInfoEmitter::EmitRegMappingTables(raw_ostream &OS, if (!V || !V->getValue()) continue; - DefInit *DI = dynamic_cast(V->getValue()); + DefInit *DI = cast(V->getValue()); Record *Alias = DI->getDef(); DwarfRegNums[Reg] = DwarfRegNums[Alias]; } @@ -530,6 +532,102 @@ static void printDiff16(raw_ostream &OS, uint16_t Val) { OS << Val; } +// Try to combine Idx's compose map into Vec if it is compatible. +// Return false if it's not possible. +static bool combine(const CodeGenSubRegIndex *Idx, + SmallVectorImpl &Vec) { + const CodeGenSubRegIndex::CompMap &Map = Idx->getComposites(); + for (CodeGenSubRegIndex::CompMap::const_iterator + I = Map.begin(), E = Map.end(); I != E; ++I) { + CodeGenSubRegIndex *&Entry = Vec[I->first->EnumValue - 1]; + if (Entry && Entry != I->second) + return false; + } + + // All entries are compatible. Make it so. + for (CodeGenSubRegIndex::CompMap::const_iterator + I = Map.begin(), E = Map.end(); I != E; ++I) + Vec[I->first->EnumValue - 1] = I->second; + return true; +} + +static const char *getMinimalTypeForRange(uint64_t Range) { + assert(Range < 0xFFFFFFFFULL && "Enum too large"); + if (Range > 0xFFFF) + return "uint32_t"; + if (Range > 0xFF) + return "uint16_t"; + return "uint8_t"; +} + +void +RegisterInfoEmitter::emitComposeSubRegIndices(raw_ostream &OS, + CodeGenRegBank &RegBank, + const std::string &ClName) { + ArrayRef SubRegIndices = RegBank.getSubRegIndices(); + OS << "unsigned " << ClName + << "::composeSubRegIndicesImpl(unsigned IdxA, unsigned IdxB) const {\n"; + + // Many sub-register indexes are composition-compatible, meaning that + // + // compose(IdxA, IdxB) == compose(IdxA', IdxB) + // + // for many IdxA, IdxA' pairs. Not all sub-register indexes can be composed. + // The illegal entries can be use as wildcards to compress the table further. + + // Map each Sub-register index to a compatible table row. + SmallVector RowMap; + SmallVector, 4> Rows; + + for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) { + unsigned Found = ~0u; + for (unsigned r = 0, re = Rows.size(); r != re; ++r) { + if (combine(SubRegIndices[i], Rows[r])) { + Found = r; + break; + } + } + if (Found == ~0u) { + Found = Rows.size(); + Rows.resize(Found + 1); + Rows.back().resize(SubRegIndices.size()); + combine(SubRegIndices[i], Rows.back()); + } + RowMap.push_back(Found); + } + + // Output the row map if there is multiple rows. + if (Rows.size() > 1) { + OS << " static const " << getMinimalTypeForRange(Rows.size()) + << " RowMap[" << SubRegIndices.size() << "] = {\n "; + for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) + OS << RowMap[i] << ", "; + OS << "\n };\n"; + } + + // Output the rows. + OS << " static const " << getMinimalTypeForRange(SubRegIndices.size()+1) + << " Rows[" << Rows.size() << "][" << SubRegIndices.size() << "] = {\n"; + for (unsigned r = 0, re = Rows.size(); r != re; ++r) { + OS << " { "; + for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) + if (Rows[r][i]) + OS << Rows[r][i]->EnumValue << ", "; + else + OS << "0, "; + OS << "},\n"; + } + OS << " };\n\n"; + + OS << " --IdxA; assert(IdxA < " << SubRegIndices.size() << ");\n" + << " --IdxB; assert(IdxB < " << SubRegIndices.size() << ");\n"; + if (Rows.size() > 1) + OS << " return Rows[RowMap[IdxA]][IdxB];\n"; + else + OS << " return Rows[0][IdxB];\n"; + OS << "}\n\n"; +} + // // runMCDesc - Print out MC register descriptions. // @@ -751,7 +849,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target, BitsInit *BI = Reg->getValueAsBitsInit("HWEncoding"); uint64_t Value = 0; for (unsigned b = 0, be = BI->getNumBits(); b != be; ++b) { - if (BitInit *B = dynamic_cast(BI->getBit(b))) + if (BitInit *B = dyn_cast(BI->getBit(b))) Value |= (uint64_t)B->getValue() << b; } OS << " " << Value << ",\n"; @@ -770,7 +868,7 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target, << TargetName << "RegDiffLists, " << TargetName << "RegStrings, " << TargetName << "SubRegIdxLists, " - << SubRegIndices.size() << ",\n" + << (SubRegIndices.size() + 1) << ",\n" << " " << TargetName << "RegEncodingTable);\n\n"; EmitRegMapping(OS, Regs, false); @@ -802,16 +900,17 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target, << " virtual bool needsStackRealignment(const MachineFunction &) const\n" << " { return false; }\n"; if (!RegBank.getSubRegIndices().empty()) { - OS << " unsigned composeSubRegIndices(unsigned, unsigned) const;\n" - << " const TargetRegisterClass *" + OS << " virtual unsigned composeSubRegIndicesImpl" + << "(unsigned, unsigned) const;\n" + << " virtual const TargetRegisterClass *" "getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n"; } - OS << " const RegClassWeight &getRegClassWeight(" + OS << " virtual const RegClassWeight &getRegClassWeight(" << "const TargetRegisterClass *RC) const;\n" - << " unsigned getNumRegPressureSets() const;\n" - << " const char *getRegPressureSetName(unsigned Idx) const;\n" - << " unsigned getRegPressureSetLimit(unsigned Idx) const;\n" - << " const int *getRegClassPressureSets(" + << " virtual unsigned getNumRegPressureSets() const;\n" + << " virtual const char *getRegPressureSetName(unsigned Idx) const;\n" + << " virtual unsigned getRegPressureSetLimit(unsigned Idx) const;\n" + << " virtual const int *getRegClassPressureSets(" << "const TargetRegisterClass *RC) const;\n" << "};\n\n"; @@ -876,15 +975,23 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, VTSeqs.emit(OS, printSimpleValueType, "MVT::Other"); OS << "};\n"; - // Emit SubRegIndex names, skipping 0 - OS << "\nstatic const char *const SubRegIndexTable[] = { \""; + // Emit SubRegIndex names, skipping 0. + OS << "\nstatic const char *const SubRegIndexNameTable[] = { \""; for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) { OS << SubRegIndices[i]->getName(); - if (i+1 != e) + if (i + 1 != e) OS << "\", \""; } OS << "\" };\n\n"; + // Emit SubRegIndex lane masks, including 0. + OS << "\nstatic const unsigned SubRegIndexLaneMaskTable[] = {\n ~0u,\n"; + for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) { + OS << format(" 0x%08x, // ", SubRegIndices[i]->LaneMask) + << SubRegIndices[i]->getName() << '\n'; + } + OS << " };\n\n"; + OS << "\n"; // Now that all of the structs have been emitted, emit the instances. @@ -1046,31 +1153,8 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, std::string ClassName = Target.getName() + "GenRegisterInfo"; - // Emit composeSubRegIndices - if (!SubRegIndices.empty()) { - OS << "unsigned " << ClassName - << "::composeSubRegIndices(unsigned IdxA, unsigned IdxB) const {\n" - << " switch (IdxA) {\n" - << " default:\n return IdxB;\n"; - for (unsigned i = 0, e = SubRegIndices.size(); i != e; ++i) { - bool Open = false; - for (unsigned j = 0; j != e; ++j) { - if (CodeGenSubRegIndex *Comp = - SubRegIndices[i]->compose(SubRegIndices[j])) { - if (!Open) { - OS << " case " << SubRegIndices[i]->getQualifiedName() - << ": switch(IdxB) {\n default: return IdxB;\n"; - Open = true; - } - OS << " case " << SubRegIndices[j]->getQualifiedName() - << ": return " << Comp->getQualifiedName() << ";\n"; - } - } - if (Open) - OS << " }\n"; - } - OS << " }\n}\n\n"; - } + if (!SubRegIndices.empty()) + emitComposeSubRegIndices(OS, RegBank, ClassName); // Emit getSubClassWithSubReg. if (!SubRegIndices.empty()) { @@ -1084,7 +1168,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, else if (RegisterClasses.size() < UINT16_MAX) OS << " static const uint16_t Table["; else - throw "Too many register classes."; + PrintFatalError("Too many register classes."); OS << RegisterClasses.size() << "][" << SubRegIndices.size() << "] = {\n"; for (unsigned rci = 0, rce = RegisterClasses.size(); rci != rce; ++rci) { const CodeGenRegisterClass &RC = *RegisterClasses[rci]; @@ -1122,7 +1206,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, << "(unsigned RA, unsigned DwarfFlavour, unsigned EHFlavour)\n" << " : TargetRegisterInfo(" << TargetName << "RegInfoDesc" << ", RegisterClasses, RegisterClasses+" << RegisterClasses.size() <<",\n" - << " SubRegIndexTable) {\n" + << " SubRegIndexNameTable, SubRegIndexLaneMaskTable) {\n" << " InitMCRegisterInfo(" << TargetName << "RegDesc, " << Regs.size()+1 << ", RA,\n " << TargetName << "MCRegisterClasses, " << RegisterClasses.size() << ",\n" @@ -1131,7 +1215,7 @@ RegisterInfoEmitter::runTargetDesc(raw_ostream &OS, CodeGenTarget &Target, << " " << TargetName << "RegDiffLists,\n" << " " << TargetName << "RegStrings,\n" << " " << TargetName << "SubRegIdxLists,\n" - << " " << SubRegIndices.size() << ",\n" + << " " << SubRegIndices.size() + 1 << ",\n" << " " << TargetName << "RegEncodingTable);\n\n"; EmitRegMapping(OS, Regs, true); diff --git a/utils/TableGen/SequenceToOffsetTable.h b/utils/TableGen/SequenceToOffsetTable.h index d8ab2ee..d4db152 100644 --- a/utils/TableGen/SequenceToOffsetTable.h +++ b/utils/TableGen/SequenceToOffsetTable.h @@ -29,8 +29,8 @@ namespace llvm { /// Compute the layout of a table that contains all the sequences, possibly by /// reusing entries. /// -/// @param SeqT The sequence container. (vector or string). -/// @param Less A stable comparator for SeqT elements. +/// @tparam SeqT The sequence container. (vector or string). +/// @tparam Less A stable comparator for SeqT elements. template > class SequenceToOffsetTable { typedef typename SeqT::value_type ElemT; @@ -82,7 +82,7 @@ public: } bool empty() const { return Seqs.empty(); } - + /// layout - Computes the final table layout. void layout() { assert(Entries == 0 && "Can only call layout() once"); diff --git a/utils/TableGen/SetTheory.cpp b/utils/TableGen/SetTheory.cpp index 46e6db1..0dd9853 100644 --- a/utils/TableGen/SetTheory.cpp +++ b/utils/TableGen/SetTheory.cpp @@ -27,20 +27,20 @@ typedef SetTheory::RecVec RecVec; // (add a, b, ...) Evaluate and union all arguments. struct AddOp : public SetTheory::Operator { - void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) { - ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts); + void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef Loc) { + ST.evaluate(Expr->arg_begin(), Expr->arg_end(), Elts, Loc); } }; // (sub Add, Sub, ...) Set difference. struct SubOp : public SetTheory::Operator { - void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) { + void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef Loc) { if (Expr->arg_size() < 2) - throw "Set difference needs at least two arguments: " + - Expr->getAsString(); + PrintFatalError(Loc, "Set difference needs at least two arguments: " + + Expr->getAsString()); RecSet Add, Sub; - ST.evaluate(*Expr->arg_begin(), Add); - ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub); + ST.evaluate(*Expr->arg_begin(), Add, Loc); + ST.evaluate(Expr->arg_begin() + 1, Expr->arg_end(), Sub, Loc); for (RecSet::iterator I = Add.begin(), E = Add.end(); I != E; ++I) if (!Sub.count(*I)) Elts.insert(*I); @@ -49,12 +49,13 @@ struct SubOp : public SetTheory::Operator { // (and S1, S2) Set intersection. struct AndOp : public SetTheory::Operator { - void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) { + void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef Loc) { if (Expr->arg_size() != 2) - throw "Set intersection requires two arguments: " + Expr->getAsString(); + PrintFatalError(Loc, "Set intersection requires two arguments: " + + Expr->getAsString()); RecSet S1, S2; - ST.evaluate(Expr->arg_begin()[0], S1); - ST.evaluate(Expr->arg_begin()[1], S2); + ST.evaluate(Expr->arg_begin()[0], S1, Loc); + ST.evaluate(Expr->arg_begin()[1], S2, Loc); for (RecSet::iterator I = S1.begin(), E = S1.end(); I != E; ++I) if (S2.count(*I)) Elts.insert(*I); @@ -65,17 +66,19 @@ struct AndOp : public SetTheory::Operator { struct SetIntBinOp : public SetTheory::Operator { virtual void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N, - RecSet &Elts) =0; + RecSet &Elts, ArrayRef Loc) =0; - void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) { + void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef Loc) { if (Expr->arg_size() != 2) - throw "Operator requires (Op Set, Int) arguments: " + Expr->getAsString(); + PrintFatalError(Loc, "Operator requires (Op Set, Int) arguments: " + + Expr->getAsString()); RecSet Set; - ST.evaluate(Expr->arg_begin()[0], Set); - IntInit *II = dynamic_cast(Expr->arg_begin()[1]); + ST.evaluate(Expr->arg_begin()[0], Set, Loc); + IntInit *II = dyn_cast(Expr->arg_begin()[1]); if (!II) - throw "Second argument must be an integer: " + Expr->getAsString(); - apply2(ST, Expr, Set, II->getValue(), Elts); + PrintFatalError(Loc, "Second argument must be an integer: " + + Expr->getAsString()); + apply2(ST, Expr, Set, II->getValue(), Elts, Loc); } }; @@ -83,9 +86,10 @@ struct SetIntBinOp : public SetTheory::Operator { struct ShlOp : public SetIntBinOp { void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N, - RecSet &Elts) { + RecSet &Elts, ArrayRef Loc) { if (N < 0) - throw "Positive shift required: " + Expr->getAsString(); + PrintFatalError(Loc, "Positive shift required: " + + Expr->getAsString()); if (unsigned(N) < Set.size()) Elts.insert(Set.begin() + N, Set.end()); } @@ -95,9 +99,10 @@ struct ShlOp : public SetIntBinOp { struct TruncOp : public SetIntBinOp { void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N, - RecSet &Elts) { + RecSet &Elts, ArrayRef Loc) { if (N < 0) - throw "Positive length required: " + Expr->getAsString(); + PrintFatalError(Loc, "Positive length required: " + + Expr->getAsString()); if (unsigned(N) > Set.size()) N = Set.size(); Elts.insert(Set.begin(), Set.begin() + N); @@ -112,7 +117,7 @@ struct RotOp : public SetIntBinOp { void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N, - RecSet &Elts) { + RecSet &Elts, ArrayRef Loc) { if (Reverse) N = -N; // N > 0 -> rotate left, N < 0 -> rotate right. @@ -131,9 +136,10 @@ struct RotOp : public SetIntBinOp { struct DecimateOp : public SetIntBinOp { void apply2(SetTheory &ST, DagInit *Expr, RecSet &Set, int64_t N, - RecSet &Elts) { + RecSet &Elts, ArrayRef Loc) { if (N <= 0) - throw "Positive stride required: " + Expr->getAsString(); + PrintFatalError(Loc, "Positive stride required: " + + Expr->getAsString()); for (unsigned I = 0; I < Set.size(); I += N) Elts.insert(Set[I]); } @@ -141,12 +147,12 @@ struct DecimateOp : public SetIntBinOp { // (interleave S1, S2, ...) Interleave elements of the arguments. struct InterleaveOp : public SetTheory::Operator { - void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) { + void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef Loc) { // Evaluate the arguments individually. SmallVector Args(Expr->getNumArgs()); unsigned MaxSize = 0; for (unsigned i = 0, e = Expr->getNumArgs(); i != e; ++i) { - ST.evaluate(Expr->getArg(i), Args[i]); + ST.evaluate(Expr->getArg(i), Args[i], Loc); MaxSize = std::max(MaxSize, unsigned(Args[i].size())); } // Interleave arguments into Elts. @@ -159,41 +165,42 @@ struct InterleaveOp : public SetTheory::Operator { // (sequence "Format", From, To) Generate a sequence of records by name. struct SequenceOp : public SetTheory::Operator { - void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts) { + void apply(SetTheory &ST, DagInit *Expr, RecSet &Elts, ArrayRef Loc) { int Step = 1; if (Expr->arg_size() > 4) - throw "Bad args to (sequence \"Format\", From, To): " + - Expr->getAsString(); + PrintFatalError(Loc, "Bad args to (sequence \"Format\", From, To): " + + Expr->getAsString()); else if (Expr->arg_size() == 4) { - if (IntInit *II = dynamic_cast(Expr->arg_begin()[3])) { + if (IntInit *II = dyn_cast(Expr->arg_begin()[3])) { Step = II->getValue(); } else - throw "Stride must be an integer: " + Expr->getAsString(); + PrintFatalError(Loc, "Stride must be an integer: " + + Expr->getAsString()); } std::string Format; - if (StringInit *SI = dynamic_cast(Expr->arg_begin()[0])) + if (StringInit *SI = dyn_cast(Expr->arg_begin()[0])) Format = SI->getValue(); else - throw "Format must be a string: " + Expr->getAsString(); + PrintFatalError(Loc, "Format must be a string: " + Expr->getAsString()); int64_t From, To; - if (IntInit *II = dynamic_cast(Expr->arg_begin()[1])) + if (IntInit *II = dyn_cast(Expr->arg_begin()[1])) From = II->getValue(); else - throw "From must be an integer: " + Expr->getAsString(); + PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString()); if (From < 0 || From >= (1 << 30)) - throw "From out of range"; + PrintFatalError(Loc, "From out of range"); - if (IntInit *II = dynamic_cast(Expr->arg_begin()[2])) + if (IntInit *II = dyn_cast(Expr->arg_begin()[2])) To = II->getValue(); else - throw "From must be an integer: " + Expr->getAsString(); + PrintFatalError(Loc, "From must be an integer: " + Expr->getAsString()); if (To < 0 || To >= (1 << 30)) - throw "To out of range"; + PrintFatalError(Loc, "To out of range"); RecordKeeper &Records = - dynamic_cast(*Expr->getOperator()).getDef()->getRecords(); + cast(Expr->getOperator())->getDef()->getRecords(); Step *= From <= To ? 1 : -1; while (true) { @@ -206,7 +213,8 @@ struct SequenceOp : public SetTheory::Operator { OS << format(Format.c_str(), unsigned(From)); Record *Rec = Records.getDef(OS.str()); if (!Rec) - throw "No def named '" + Name + "': " + Expr->getAsString(); + PrintFatalError(Loc, "No def named '" + Name + "': " + + Expr->getAsString()); // Try to reevaluate Rec in case it is a set. if (const RecVec *Result = ST.expand(Rec)) Elts.insert(Result->begin(), Result->end()); @@ -225,7 +233,7 @@ struct FieldExpander : public SetTheory::Expander { FieldExpander(StringRef fn) : FieldName(fn) {} void expand(SetTheory &ST, Record *Def, RecSet &Elts) { - ST.evaluate(Def->getValueInit(FieldName), Elts); + ST.evaluate(Def->getValueInit(FieldName), Elts, Def->getLoc()); } }; } // end anonymous namespace @@ -259,9 +267,9 @@ void SetTheory::addFieldExpander(StringRef ClassName, StringRef FieldName) { addExpander(ClassName, new FieldExpander(FieldName)); } -void SetTheory::evaluate(Init *Expr, RecSet &Elts) { +void SetTheory::evaluate(Init *Expr, RecSet &Elts, ArrayRef Loc) { // A def in a list can be a just an element, or it may expand. - if (DefInit *Def = dynamic_cast(Expr)) { + if (DefInit *Def = dyn_cast(Expr)) { if (const RecVec *Result = expand(Def->getDef())) return Elts.insert(Result->begin(), Result->end()); Elts.insert(Def->getDef()); @@ -269,20 +277,20 @@ void SetTheory::evaluate(Init *Expr, RecSet &Elts) { } // Lists simply expand. - if (ListInit *LI = dynamic_cast(Expr)) - return evaluate(LI->begin(), LI->end(), Elts); + if (ListInit *LI = dyn_cast(Expr)) + return evaluate(LI->begin(), LI->end(), Elts, Loc); // Anything else must be a DAG. - DagInit *DagExpr = dynamic_cast(Expr); + DagInit *DagExpr = dyn_cast(Expr); if (!DagExpr) - throw "Invalid set element: " + Expr->getAsString(); - DefInit *OpInit = dynamic_cast(DagExpr->getOperator()); + PrintFatalError(Loc, "Invalid set element: " + Expr->getAsString()); + DefInit *OpInit = dyn_cast(DagExpr->getOperator()); if (!OpInit) - throw "Bad set expression: " + Expr->getAsString(); + PrintFatalError(Loc, "Bad set expression: " + Expr->getAsString()); Operator *Op = Operators.lookup(OpInit->getDef()->getName()); if (!Op) - throw "Unknown set operator: " + Expr->getAsString(); - Op->apply(*this, DagExpr, Elts); + PrintFatalError(Loc, "Unknown set operator: " + Expr->getAsString()); + Op->apply(*this, DagExpr, Elts, Loc); } const RecVec *SetTheory::expand(Record *Set) { @@ -292,19 +300,19 @@ const RecVec *SetTheory::expand(Record *Set) { return &I->second; // This is the first time we see Set. Find a suitable expander. - try { - const std::vector &SC = Set->getSuperClasses(); - for (unsigned i = 0, e = SC.size(); i != e; ++i) - if (Expander *Exp = Expanders.lookup(SC[i]->getName())) { - // This breaks recursive definitions. - RecVec &EltVec = Expansions[Set]; - RecSet Elts; - Exp->expand(*this, Set, Elts); - EltVec.assign(Elts.begin(), Elts.end()); - return &EltVec; - } - } catch (const std::string &Error) { - throw TGError(Set->getLoc(), Error); + const std::vector &SC = Set->getSuperClasses(); + for (unsigned i = 0, e = SC.size(); i != e; ++i) { + // Skip unnamed superclasses. + if (!dyn_cast(SC[i]->getNameInit())) + continue; + if (Expander *Exp = Expanders.lookup(SC[i]->getName())) { + // This breaks recursive definitions. + RecVec &EltVec = Expansions[Set]; + RecSet Elts; + Exp->expand(*this, Set, Elts); + EltVec.assign(Elts.begin(), Elts.end()); + return &EltVec; + } } // Set is not expandable. diff --git a/utils/TableGen/SetTheory.h b/utils/TableGen/SetTheory.h index b394058..122372a 100644 --- a/utils/TableGen/SetTheory.h +++ b/utils/TableGen/SetTheory.h @@ -49,6 +49,7 @@ #include "llvm/ADT/StringMap.h" #include "llvm/ADT/SetVector.h" +#include "llvm/Support/SourceMgr.h" #include #include @@ -72,7 +73,8 @@ public: /// apply - Apply this operator to Expr's arguments and insert the result /// in Elts. - virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts) =0; + virtual void apply(SetTheory&, DagInit *Expr, RecSet &Elts, + ArrayRef Loc) =0; }; /// Expander - A callback function that can transform a Record representing a @@ -119,13 +121,13 @@ public: void addOperator(StringRef Name, Operator*); /// evaluate - Evaluate Expr and append the resulting set to Elts. - void evaluate(Init *Expr, RecSet &Elts); + void evaluate(Init *Expr, RecSet &Elts, ArrayRef Loc); /// evaluate - Evaluate a sequence of Inits and append to Elts. template - void evaluate(Iter begin, Iter end, RecSet &Elts) { + void evaluate(Iter begin, Iter end, RecSet &Elts, ArrayRef Loc) { while (begin != end) - evaluate(*begin++, Elts); + evaluate(*begin++, Elts, Loc); } /// expand - Expand a record into a set of elements if possible. Return a diff --git a/utils/TableGen/SubtargetEmitter.cpp b/utils/TableGen/SubtargetEmitter.cpp index 3472343..f1a06bb 100644 --- a/utils/TableGen/SubtargetEmitter.cpp +++ b/utils/TableGen/SubtargetEmitter.cpp @@ -11,13 +11,18 @@ // //===----------------------------------------------------------------------===// +#define DEBUG_TYPE "subtarget-emitter" + #include "CodeGenTarget.h" #include "CodeGenSchedule.h" #include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/MC/MCInstrItineraries.h" -#include "llvm/Support/Debug.h" +#include "llvm/TableGen/Error.h" #include "llvm/TableGen/Record.h" #include "llvm/TableGen/TableGenBackend.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/Format.h" #include #include #include @@ -26,6 +31,32 @@ using namespace llvm; namespace { class SubtargetEmitter { + // Each processor has a SchedClassDesc table with an entry for each SchedClass. + // The SchedClassDesc table indexes into a global write resource table, write + // latency table, and read advance table. + struct SchedClassTables { + std::vector > ProcSchedClasses; + std::vector WriteProcResources; + std::vector WriteLatencies; + std::vector WriterNames; + std::vector ReadAdvanceEntries; + + // Reserve an invalid entry at index 0 + SchedClassTables() { + ProcSchedClasses.resize(1); + WriteProcResources.resize(1); + WriteLatencies.resize(1); + WriterNames.push_back("InvalidWrite"); + ReadAdvanceEntries.resize(1); + } + }; + + struct LessWriteProcResources { + bool operator()(const MCWriteProcResEntry &LHS, + const MCWriteProcResEntry &RHS) { + return LHS.ProcResourceIdx < RHS.ProcResourceIdx; + } + }; RecordKeeper &Records; CodeGenSchedModels &SchedModels; @@ -50,8 +81,18 @@ class SubtargetEmitter { &ProcItinLists); void EmitProcessorProp(raw_ostream &OS, const Record *R, const char *Name, char Separator); + void EmitProcessorResources(const CodeGenProcModel &ProcModel, + raw_ostream &OS); + Record *FindWriteResources(const CodeGenSchedRW &SchedWrite, + const CodeGenProcModel &ProcModel); + Record *FindReadAdvance(const CodeGenSchedRW &SchedRead, + const CodeGenProcModel &ProcModel); + void GenSchedClassTables(const CodeGenProcModel &ProcModel, + SchedClassTables &SchedTables); + void EmitSchedClassTables(SchedClassTables &SchedTables, raw_ostream &OS); void EmitProcessorModels(raw_ostream &OS); void EmitProcessorLookup(raw_ostream &OS); + void EmitSchedModelHelpers(std::string ClassName, raw_ostream &OS); void EmitSchedModel(raw_ostream &OS); void ParseFeaturesFunction(raw_ostream &OS, unsigned NumFeatures, unsigned NumProcs); @@ -521,7 +562,7 @@ EmitItineraries(raw_ostream &OS, std::vector >::iterator ProcItinListsIter = ProcItinLists.begin(); for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(), - PE = SchedModels.procModelEnd(); PI != PE; ++PI) { + PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) { Record *ItinsDef = PI->ItinsDef; if (!ItinsDefSet.insert(ItinsDef)) @@ -532,7 +573,7 @@ EmitItineraries(raw_ostream &OS, // Get the itinerary list for the processor. assert(ProcItinListsIter != ProcItinLists.end() && "bad iterator"); - std::vector &ItinList = *ProcItinListsIter++; + std::vector &ItinList = *ProcItinListsIter; OS << "\n"; OS << "static const llvm::InstrItinerary "; @@ -578,11 +619,488 @@ void SubtargetEmitter::EmitProcessorProp(raw_ostream &OS, const Record *R, OS << '\n'; } +void SubtargetEmitter::EmitProcessorResources(const CodeGenProcModel &ProcModel, + raw_ostream &OS) { + char Sep = ProcModel.ProcResourceDefs.empty() ? ' ' : ','; + + OS << "\n// {Name, NumUnits, SuperIdx, IsBuffered}\n"; + OS << "static const llvm::MCProcResourceDesc " + << ProcModel.ModelName << "ProcResources" << "[] = {\n" + << " {DBGFIELD(\"InvalidUnit\") 0, 0, 0}" << Sep << "\n"; + + for (unsigned i = 0, e = ProcModel.ProcResourceDefs.size(); i < e; ++i) { + Record *PRDef = ProcModel.ProcResourceDefs[i]; + + // Find the SuperIdx + unsigned SuperIdx = 0; + Record *SuperDef = 0; + if (PRDef->getValueInit("Super")->isComplete()) { + SuperDef = + SchedModels.findProcResUnits(PRDef->getValueAsDef("Super"), ProcModel); + SuperIdx = ProcModel.getProcResourceIdx(SuperDef); + } + // Emit the ProcResourceDesc + if (i+1 == e) + Sep = ' '; + OS << " {DBGFIELD(\"" << PRDef->getName() << "\") "; + if (PRDef->getName().size() < 15) + OS.indent(15 - PRDef->getName().size()); + OS << PRDef->getValueAsInt("NumUnits") << ", " << SuperIdx << ", " + << PRDef->getValueAsBit("Buffered") << "}" << Sep << " // #" << i+1; + if (SuperDef) + OS << ", Super=" << SuperDef->getName(); + OS << "\n"; + } + OS << "};\n"; +} + +// Find the WriteRes Record that defines processor resources for this +// SchedWrite. +Record *SubtargetEmitter::FindWriteResources( + const CodeGenSchedRW &SchedWrite, const CodeGenProcModel &ProcModel) { + + // Check if the SchedWrite is already subtarget-specific and directly + // specifies a set of processor resources. + if (SchedWrite.TheDef->isSubClassOf("SchedWriteRes")) + return SchedWrite.TheDef; + + Record *AliasDef = 0; + for (RecIter AI = SchedWrite.Aliases.begin(), AE = SchedWrite.Aliases.end(); + AI != AE; ++AI) { + const CodeGenSchedRW &AliasRW = + SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); + if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) { + Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel"); + if (&SchedModels.getProcModel(ModelDef) != &ProcModel) + continue; + } + if (AliasDef) + PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " + "defined for processor " + ProcModel.ModelName + + " Ensure only one SchedAlias exists per RW."); + AliasDef = AliasRW.TheDef; + } + if (AliasDef && AliasDef->isSubClassOf("SchedWriteRes")) + return AliasDef; + + // Check this processor's list of write resources. + Record *ResDef = 0; + for (RecIter WRI = ProcModel.WriteResDefs.begin(), + WRE = ProcModel.WriteResDefs.end(); WRI != WRE; ++WRI) { + if (!(*WRI)->isSubClassOf("WriteRes")) + continue; + if (AliasDef == (*WRI)->getValueAsDef("WriteType") + || SchedWrite.TheDef == (*WRI)->getValueAsDef("WriteType")) { + if (ResDef) { + PrintFatalError((*WRI)->getLoc(), "Resources are defined for both " + "SchedWrite and its alias on processor " + + ProcModel.ModelName); + } + ResDef = *WRI; + } + } + // TODO: If ProcModel has a base model (previous generation processor), + // then call FindWriteResources recursively with that model here. + if (!ResDef) { + PrintFatalError(ProcModel.ModelDef->getLoc(), + std::string("Processor does not define resources for ") + + SchedWrite.TheDef->getName()); + } + return ResDef; +} + +/// Find the ReadAdvance record for the given SchedRead on this processor or +/// return NULL. +Record *SubtargetEmitter::FindReadAdvance(const CodeGenSchedRW &SchedRead, + const CodeGenProcModel &ProcModel) { + // Check for SchedReads that directly specify a ReadAdvance. + if (SchedRead.TheDef->isSubClassOf("SchedReadAdvance")) + return SchedRead.TheDef; + + // Check this processor's list of aliases for SchedRead. + Record *AliasDef = 0; + for (RecIter AI = SchedRead.Aliases.begin(), AE = SchedRead.Aliases.end(); + AI != AE; ++AI) { + const CodeGenSchedRW &AliasRW = + SchedModels.getSchedRW((*AI)->getValueAsDef("AliasRW")); + if (AliasRW.TheDef->getValueInit("SchedModel")->isComplete()) { + Record *ModelDef = AliasRW.TheDef->getValueAsDef("SchedModel"); + if (&SchedModels.getProcModel(ModelDef) != &ProcModel) + continue; + } + if (AliasDef) + PrintFatalError(AliasRW.TheDef->getLoc(), "Multiple aliases " + "defined for processor " + ProcModel.ModelName + + " Ensure only one SchedAlias exists per RW."); + AliasDef = AliasRW.TheDef; + } + if (AliasDef && AliasDef->isSubClassOf("SchedReadAdvance")) + return AliasDef; + + // Check this processor's ReadAdvanceList. + Record *ResDef = 0; + for (RecIter RAI = ProcModel.ReadAdvanceDefs.begin(), + RAE = ProcModel.ReadAdvanceDefs.end(); RAI != RAE; ++RAI) { + if (!(*RAI)->isSubClassOf("ReadAdvance")) + continue; + if (AliasDef == (*RAI)->getValueAsDef("ReadType") + || SchedRead.TheDef == (*RAI)->getValueAsDef("ReadType")) { + if (ResDef) { + PrintFatalError((*RAI)->getLoc(), "Resources are defined for both " + "SchedRead and its alias on processor " + + ProcModel.ModelName); + } + ResDef = *RAI; + } + } + // TODO: If ProcModel has a base model (previous generation processor), + // then call FindReadAdvance recursively with that model here. + if (!ResDef && SchedRead.TheDef->getName() != "ReadDefault") { + PrintFatalError(ProcModel.ModelDef->getLoc(), + std::string("Processor does not define resources for ") + + SchedRead.TheDef->getName()); + } + return ResDef; +} + +// Generate the SchedClass table for this processor and update global +// tables. Must be called for each processor in order. +void SubtargetEmitter::GenSchedClassTables(const CodeGenProcModel &ProcModel, + SchedClassTables &SchedTables) { + SchedTables.ProcSchedClasses.resize(SchedTables.ProcSchedClasses.size() + 1); + if (!ProcModel.hasInstrSchedModel()) + return; + + std::vector &SCTab = SchedTables.ProcSchedClasses.back(); + for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(), + SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) { + DEBUG(SCI->dump(&SchedModels)); + + SCTab.resize(SCTab.size() + 1); + MCSchedClassDesc &SCDesc = SCTab.back(); + // SCDesc.Name is guarded by NDEBUG + SCDesc.NumMicroOps = 0; + SCDesc.BeginGroup = false; + SCDesc.EndGroup = false; + SCDesc.WriteProcResIdx = 0; + SCDesc.WriteLatencyIdx = 0; + SCDesc.ReadAdvanceIdx = 0; + + // A Variant SchedClass has no resources of its own. + if (!SCI->Transitions.empty()) { + SCDesc.NumMicroOps = MCSchedClassDesc::VariantNumMicroOps; + continue; + } + + // Determine if the SchedClass is actually reachable on this processor. If + // not don't try to locate the processor resources, it will fail. + // If ProcIndices contains 0, this class applies to all processors. + assert(!SCI->ProcIndices.empty() && "expect at least one procidx"); + if (SCI->ProcIndices[0] != 0) { + IdxIter PIPos = std::find(SCI->ProcIndices.begin(), + SCI->ProcIndices.end(), ProcModel.Index); + if (PIPos == SCI->ProcIndices.end()) + continue; + } + IdxVec Writes = SCI->Writes; + IdxVec Reads = SCI->Reads; + if (SCI->ItinClassDef) { + assert(SCI->InstRWs.empty() && "ItinClass should not have InstRWs"); + // Check this processor's itinerary class resources. + for (RecIter II = ProcModel.ItinRWDefs.begin(), + IE = ProcModel.ItinRWDefs.end(); II != IE; ++II) { + RecVec Matched = (*II)->getValueAsListOfDefs("MatchedItinClasses"); + if (std::find(Matched.begin(), Matched.end(), SCI->ItinClassDef) + != Matched.end()) { + SchedModels.findRWs((*II)->getValueAsListOfDefs("OperandReadWrites"), + Writes, Reads); + break; + } + } + if (Writes.empty()) { + DEBUG(dbgs() << ProcModel.ItinsDef->getName() + << " does not have resources for itinerary class " + << SCI->ItinClassDef->getName() << '\n'); + } + } + else if (!SCI->InstRWs.empty()) { + // This class may have a default ReadWrite list which can be overriden by + // InstRW definitions. + Record *RWDef = 0; + for (RecIter RWI = SCI->InstRWs.begin(), RWE = SCI->InstRWs.end(); + RWI != RWE; ++RWI) { + Record *RWModelDef = (*RWI)->getValueAsDef("SchedModel"); + if (&ProcModel == &SchedModels.getProcModel(RWModelDef)) { + RWDef = *RWI; + break; + } + } + if (RWDef) { + Writes.clear(); + Reads.clear(); + SchedModels.findRWs(RWDef->getValueAsListOfDefs("OperandReadWrites"), + Writes, Reads); + } + } + // Sum resources across all operand writes. + std::vector WriteProcResources; + std::vector WriteLatencies; + std::vector WriterNames; + std::vector ReadAdvanceEntries; + for (IdxIter WI = Writes.begin(), WE = Writes.end(); WI != WE; ++WI) { + IdxVec WriteSeq; + SchedModels.expandRWSeqForProc(*WI, WriteSeq, /*IsRead=*/false, + ProcModel); + + // For each operand, create a latency entry. + MCWriteLatencyEntry WLEntry; + WLEntry.Cycles = 0; + unsigned WriteID = WriteSeq.back(); + WriterNames.push_back(SchedModels.getSchedWrite(WriteID).Name); + // If this Write is not referenced by a ReadAdvance, don't distinguish it + // from other WriteLatency entries. + if (!SchedModels.hasReadOfWrite(SchedModels.getSchedWrite(WriteID).TheDef)) { + WriteID = 0; + } + WLEntry.WriteResourceID = WriteID; + + for (IdxIter WSI = WriteSeq.begin(), WSE = WriteSeq.end(); + WSI != WSE; ++WSI) { + + Record *WriteRes = + FindWriteResources(SchedModels.getSchedWrite(*WSI), ProcModel); + + // Mark the parent class as invalid for unsupported write types. + if (WriteRes->getValueAsBit("Unsupported")) { + SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps; + break; + } + WLEntry.Cycles += WriteRes->getValueAsInt("Latency"); + SCDesc.NumMicroOps += WriteRes->getValueAsInt("NumMicroOps"); + SCDesc.BeginGroup |= WriteRes->getValueAsBit("BeginGroup"); + SCDesc.EndGroup |= WriteRes->getValueAsBit("EndGroup"); + + // Create an entry for each ProcResource listed in WriteRes. + RecVec PRVec = WriteRes->getValueAsListOfDefs("ProcResources"); + std::vector Cycles = + WriteRes->getValueAsListOfInts("ResourceCycles"); + for (unsigned PRIdx = 0, PREnd = PRVec.size(); + PRIdx != PREnd; ++PRIdx) { + MCWriteProcResEntry WPREntry; + WPREntry.ProcResourceIdx = ProcModel.getProcResourceIdx(PRVec[PRIdx]); + assert(WPREntry.ProcResourceIdx && "Bad ProcResourceIdx"); + if (Cycles.size() > PRIdx) + WPREntry.Cycles = Cycles[PRIdx]; + else + WPREntry.Cycles = 1; + WriteProcResources.push_back(WPREntry); + } + } + WriteLatencies.push_back(WLEntry); + } + // Create an entry for each operand Read in this SchedClass. + // Entries must be sorted first by UseIdx then by WriteResourceID. + for (unsigned UseIdx = 0, EndIdx = Reads.size(); + UseIdx != EndIdx; ++UseIdx) { + Record *ReadAdvance = + FindReadAdvance(SchedModels.getSchedRead(Reads[UseIdx]), ProcModel); + if (!ReadAdvance) + continue; + + // Mark the parent class as invalid for unsupported write types. + if (ReadAdvance->getValueAsBit("Unsupported")) { + SCDesc.NumMicroOps = MCSchedClassDesc::InvalidNumMicroOps; + break; + } + RecVec ValidWrites = ReadAdvance->getValueAsListOfDefs("ValidWrites"); + IdxVec WriteIDs; + if (ValidWrites.empty()) + WriteIDs.push_back(0); + else { + for (RecIter VWI = ValidWrites.begin(), VWE = ValidWrites.end(); + VWI != VWE; ++VWI) { + WriteIDs.push_back(SchedModels.getSchedRWIdx(*VWI, /*IsRead=*/false)); + } + } + std::sort(WriteIDs.begin(), WriteIDs.end()); + for(IdxIter WI = WriteIDs.begin(), WE = WriteIDs.end(); WI != WE; ++WI) { + MCReadAdvanceEntry RAEntry; + RAEntry.UseIdx = UseIdx; + RAEntry.WriteResourceID = *WI; + RAEntry.Cycles = ReadAdvance->getValueAsInt("Cycles"); + ReadAdvanceEntries.push_back(RAEntry); + } + } + if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) { + WriteProcResources.clear(); + WriteLatencies.clear(); + ReadAdvanceEntries.clear(); + } + // Add the information for this SchedClass to the global tables using basic + // compression. + // + // WritePrecRes entries are sorted by ProcResIdx. + std::sort(WriteProcResources.begin(), WriteProcResources.end(), + LessWriteProcResources()); + + SCDesc.NumWriteProcResEntries = WriteProcResources.size(); + std::vector::iterator WPRPos = + std::search(SchedTables.WriteProcResources.begin(), + SchedTables.WriteProcResources.end(), + WriteProcResources.begin(), WriteProcResources.end()); + if (WPRPos != SchedTables.WriteProcResources.end()) + SCDesc.WriteProcResIdx = WPRPos - SchedTables.WriteProcResources.begin(); + else { + SCDesc.WriteProcResIdx = SchedTables.WriteProcResources.size(); + SchedTables.WriteProcResources.insert(WPRPos, WriteProcResources.begin(), + WriteProcResources.end()); + } + // Latency entries must remain in operand order. + SCDesc.NumWriteLatencyEntries = WriteLatencies.size(); + std::vector::iterator WLPos = + std::search(SchedTables.WriteLatencies.begin(), + SchedTables.WriteLatencies.end(), + WriteLatencies.begin(), WriteLatencies.end()); + if (WLPos != SchedTables.WriteLatencies.end()) { + unsigned idx = WLPos - SchedTables.WriteLatencies.begin(); + SCDesc.WriteLatencyIdx = idx; + for (unsigned i = 0, e = WriteLatencies.size(); i < e; ++i) + if (SchedTables.WriterNames[idx + i].find(WriterNames[i]) == + std::string::npos) { + SchedTables.WriterNames[idx + i] += std::string("_") + WriterNames[i]; + } + } + else { + SCDesc.WriteLatencyIdx = SchedTables.WriteLatencies.size(); + SchedTables.WriteLatencies.insert(SchedTables.WriteLatencies.end(), + WriteLatencies.begin(), + WriteLatencies.end()); + SchedTables.WriterNames.insert(SchedTables.WriterNames.end(), + WriterNames.begin(), WriterNames.end()); + } + // ReadAdvanceEntries must remain in operand order. + SCDesc.NumReadAdvanceEntries = ReadAdvanceEntries.size(); + std::vector::iterator RAPos = + std::search(SchedTables.ReadAdvanceEntries.begin(), + SchedTables.ReadAdvanceEntries.end(), + ReadAdvanceEntries.begin(), ReadAdvanceEntries.end()); + if (RAPos != SchedTables.ReadAdvanceEntries.end()) + SCDesc.ReadAdvanceIdx = RAPos - SchedTables.ReadAdvanceEntries.begin(); + else { + SCDesc.ReadAdvanceIdx = SchedTables.ReadAdvanceEntries.size(); + SchedTables.ReadAdvanceEntries.insert(RAPos, ReadAdvanceEntries.begin(), + ReadAdvanceEntries.end()); + } + } +} + +// Emit SchedClass tables for all processors and associated global tables. +void SubtargetEmitter::EmitSchedClassTables(SchedClassTables &SchedTables, + raw_ostream &OS) { + // Emit global WriteProcResTable. + OS << "\n// {ProcResourceIdx, Cycles}\n" + << "extern const llvm::MCWriteProcResEntry " + << Target << "WriteProcResTable[] = {\n" + << " { 0, 0}, // Invalid\n"; + for (unsigned WPRIdx = 1, WPREnd = SchedTables.WriteProcResources.size(); + WPRIdx != WPREnd; ++WPRIdx) { + MCWriteProcResEntry &WPREntry = SchedTables.WriteProcResources[WPRIdx]; + OS << " {" << format("%2d", WPREntry.ProcResourceIdx) << ", " + << format("%2d", WPREntry.Cycles) << "}"; + if (WPRIdx + 1 < WPREnd) + OS << ','; + OS << " // #" << WPRIdx << '\n'; + } + OS << "}; // " << Target << "WriteProcResTable\n"; + + // Emit global WriteLatencyTable. + OS << "\n// {Cycles, WriteResourceID}\n" + << "extern const llvm::MCWriteLatencyEntry " + << Target << "WriteLatencyTable[] = {\n" + << " { 0, 0}, // Invalid\n"; + for (unsigned WLIdx = 1, WLEnd = SchedTables.WriteLatencies.size(); + WLIdx != WLEnd; ++WLIdx) { + MCWriteLatencyEntry &WLEntry = SchedTables.WriteLatencies[WLIdx]; + OS << " {" << format("%2d", WLEntry.Cycles) << ", " + << format("%2d", WLEntry.WriteResourceID) << "}"; + if (WLIdx + 1 < WLEnd) + OS << ','; + OS << " // #" << WLIdx << " " << SchedTables.WriterNames[WLIdx] << '\n'; + } + OS << "}; // " << Target << "WriteLatencyTable\n"; + + // Emit global ReadAdvanceTable. + OS << "\n// {UseIdx, WriteResourceID, Cycles}\n" + << "extern const llvm::MCReadAdvanceEntry " + << Target << "ReadAdvanceTable[] = {\n" + << " {0, 0, 0}, // Invalid\n"; + for (unsigned RAIdx = 1, RAEnd = SchedTables.ReadAdvanceEntries.size(); + RAIdx != RAEnd; ++RAIdx) { + MCReadAdvanceEntry &RAEntry = SchedTables.ReadAdvanceEntries[RAIdx]; + OS << " {" << RAEntry.UseIdx << ", " + << format("%2d", RAEntry.WriteResourceID) << ", " + << format("%2d", RAEntry.Cycles) << "}"; + if (RAIdx + 1 < RAEnd) + OS << ','; + OS << " // #" << RAIdx << '\n'; + } + OS << "}; // " << Target << "ReadAdvanceTable\n"; + + // Emit a SchedClass table for each processor. + for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(), + PE = SchedModels.procModelEnd(); PI != PE; ++PI) { + if (!PI->hasInstrSchedModel()) + continue; + + std::vector &SCTab = + SchedTables.ProcSchedClasses[1 + (PI - SchedModels.procModelBegin())]; + + OS << "\n// {Name, NumMicroOps, BeginGroup, EndGroup," + << " WriteProcResIdx,#, WriteLatencyIdx,#, ReadAdvanceIdx,#}\n"; + OS << "static const llvm::MCSchedClassDesc " + << PI->ModelName << "SchedClasses[] = {\n"; + + // The first class is always invalid. We no way to distinguish it except by + // name and position. + assert(SchedModels.getSchedClass(0).Name == "NoItinerary" + && "invalid class not first"); + OS << " {DBGFIELD(\"InvalidSchedClass\") " + << MCSchedClassDesc::InvalidNumMicroOps + << ", 0, 0, 0, 0, 0, 0, 0, 0},\n"; + + for (unsigned SCIdx = 1, SCEnd = SCTab.size(); SCIdx != SCEnd; ++SCIdx) { + MCSchedClassDesc &MCDesc = SCTab[SCIdx]; + const CodeGenSchedClass &SchedClass = SchedModels.getSchedClass(SCIdx); + OS << " {DBGFIELD(\"" << SchedClass.Name << "\") "; + if (SchedClass.Name.size() < 18) + OS.indent(18 - SchedClass.Name.size()); + OS << MCDesc.NumMicroOps + << ", " << MCDesc.BeginGroup << ", " << MCDesc.EndGroup + << ", " << format("%2d", MCDesc.WriteProcResIdx) + << ", " << MCDesc.NumWriteProcResEntries + << ", " << format("%2d", MCDesc.WriteLatencyIdx) + << ", " << MCDesc.NumWriteLatencyEntries + << ", " << format("%2d", MCDesc.ReadAdvanceIdx) + << ", " << MCDesc.NumReadAdvanceEntries << "}"; + if (SCIdx + 1 < SCEnd) + OS << ','; + OS << " // #" << SCIdx << '\n'; + } + OS << "}; // " << PI->ModelName << "SchedClasses\n"; + } +} + void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) { // For each processor model. for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(), PE = SchedModels.procModelEnd(); PI != PE; ++PI) { - // Skip default + // Emit processor resource table. + if (PI->hasInstrSchedModel()) + EmitProcessorResources(*PI, OS); + else if(!PI->ProcResourceDefs.empty()) + PrintFatalError(PI->ModelDef->getLoc(), "SchedMachineModel defines " + "ProcResources without defining WriteRes SchedWriteRes"); + // Begin processor itinerary properties OS << "\n"; OS << "static const llvm::MCSchedModel " << PI->ModelName << "(\n"; @@ -591,11 +1109,19 @@ void SubtargetEmitter::EmitProcessorModels(raw_ostream &OS) { EmitProcessorProp(OS, PI->ModelDef, "LoadLatency", ','); EmitProcessorProp(OS, PI->ModelDef, "HighLatency", ','); EmitProcessorProp(OS, PI->ModelDef, "MispredictPenalty", ','); + OS << " " << PI->Index << ", // Processor ID\n"; + if (PI->hasInstrSchedModel()) + OS << " " << PI->ModelName << "ProcResources" << ",\n" + << " " << PI->ModelName << "SchedClasses" << ",\n" + << " " << PI->ProcResourceDefs.size()+1 << ",\n" + << " " << (SchedModels.schedClassEnd() + - SchedModels.schedClassBegin()) << ",\n"; + else + OS << " 0, 0, 0, 0, // No instruction-level machine model.\n"; if (SchedModels.hasItineraryClasses()) - OS << " " << PI->ItinsDef->getName(); + OS << " " << PI->ItinsDef->getName() << ");\n"; else - OS << " 0"; - OS << ");\n"; + OS << " 0); // No Itinerary\n"; } } @@ -621,14 +1147,10 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) { const std::string &Name = Processor->getValueAsString("Name"); const std::string &ProcModelName = - SchedModels.getProcModel(Processor).ModelName; + SchedModels.getModelForProc(Processor).ModelName; // Emit as { "cpu", procinit }, - OS << " { " - << "\"" << Name << "\", " - << "(void *)&" << ProcModelName; - - OS << " }"; + OS << " { \"" << Name << "\", (const void *)&" << ProcModelName << " }"; // Depending on ''if more in the list'' emit comma if (++i < N) OS << ","; @@ -644,16 +1166,116 @@ void SubtargetEmitter::EmitProcessorLookup(raw_ostream &OS) { // EmitSchedModel - Emits all scheduling model tables, folding common patterns. // void SubtargetEmitter::EmitSchedModel(raw_ostream &OS) { + OS << "#ifdef DBGFIELD\n" + << "#error \"GenSubtargetInfo.inc requires a DBGFIELD macro\"\n" + << "#endif\n" + << "#ifndef NDEBUG\n" + << "#define DBGFIELD(x) x,\n" + << "#else\n" + << "#define DBGFIELD(x)\n" + << "#endif\n"; + if (SchedModels.hasItineraryClasses()) { std::vector > ProcItinLists; // Emit the stage data EmitStageAndOperandCycleData(OS, ProcItinLists); EmitItineraries(OS, ProcItinLists); } + OS << "\n// ===============================================================\n" + << "// Data tables for the new per-operand machine model.\n"; + + SchedClassTables SchedTables; + for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(), + PE = SchedModels.procModelEnd(); PI != PE; ++PI) { + GenSchedClassTables(*PI, SchedTables); + } + EmitSchedClassTables(SchedTables, OS); + // Emit the processor machine model EmitProcessorModels(OS); // Emit the processor lookup data EmitProcessorLookup(OS); + + OS << "#undef DBGFIELD"; +} + +void SubtargetEmitter::EmitSchedModelHelpers(std::string ClassName, + raw_ostream &OS) { + OS << "unsigned " << ClassName + << "\n::resolveSchedClass(unsigned SchedClass, const MachineInstr *MI," + << " const TargetSchedModel *SchedModel) const {\n"; + + std::vector Prologs = Records.getAllDerivedDefinitions("PredicateProlog"); + std::sort(Prologs.begin(), Prologs.end(), LessRecord()); + for (std::vector::const_iterator + PI = Prologs.begin(), PE = Prologs.end(); PI != PE; ++PI) { + OS << (*PI)->getValueAsString("Code") << '\n'; + } + IdxVec VariantClasses; + for (CodeGenSchedModels::SchedClassIter SCI = SchedModels.schedClassBegin(), + SCE = SchedModels.schedClassEnd(); SCI != SCE; ++SCI) { + if (SCI->Transitions.empty()) + continue; + VariantClasses.push_back(SCI - SchedModels.schedClassBegin()); + } + if (!VariantClasses.empty()) { + OS << " switch (SchedClass) {\n"; + for (IdxIter VCI = VariantClasses.begin(), VCE = VariantClasses.end(); + VCI != VCE; ++VCI) { + const CodeGenSchedClass &SC = SchedModels.getSchedClass(*VCI); + OS << " case " << *VCI << ": // " << SC.Name << '\n'; + IdxVec ProcIndices; + for (std::vector::const_iterator + TI = SC.Transitions.begin(), TE = SC.Transitions.end(); + TI != TE; ++TI) { + IdxVec PI; + std::set_union(TI->ProcIndices.begin(), TI->ProcIndices.end(), + ProcIndices.begin(), ProcIndices.end(), + std::back_inserter(PI)); + ProcIndices.swap(PI); + } + for (IdxIter PI = ProcIndices.begin(), PE = ProcIndices.end(); + PI != PE; ++PI) { + OS << " "; + if (*PI != 0) + OS << "if (SchedModel->getProcessorID() == " << *PI << ") "; + OS << "{ // " << (SchedModels.procModelBegin() + *PI)->ModelName + << '\n'; + for (std::vector::const_iterator + TI = SC.Transitions.begin(), TE = SC.Transitions.end(); + TI != TE; ++TI) { + OS << " if ("; + if (*PI != 0 && !std::count(TI->ProcIndices.begin(), + TI->ProcIndices.end(), *PI)) { + continue; + } + for (RecIter RI = TI->PredTerm.begin(), RE = TI->PredTerm.end(); + RI != RE; ++RI) { + if (RI != TI->PredTerm.begin()) + OS << "\n && "; + OS << "(" << (*RI)->getValueAsString("Predicate") << ")"; + } + OS << ")\n" + << " return " << TI->ToClassIdx << "; // " + << SchedModels.getSchedClass(TI->ToClassIdx).Name << '\n'; + } + OS << " }\n"; + if (*PI == 0) + break; + } + unsigned SCIdx = 0; + if (SC.ItinClassDef) + SCIdx = SchedModels.getSchedClassIdxForItin(SC.ItinClassDef); + else + SCIdx = SchedModels.findSchedClassIdx(SC.Writes, SC.Reads); + if (SCIdx != *VCI) + OS << " return " << SCIdx << ";\n"; + OS << " break;\n"; + } + OS << " };\n"; + } + OS << " report_fatal_error(\"Expected a variant SchedClass\");\n" + << "} // " << ClassName << "::resolveSchedClass\n"; } // @@ -680,7 +1302,8 @@ void SubtargetEmitter::ParseFeaturesFunction(raw_ostream &OS, return; } - OS << " uint64_t Bits = ReInitMCSubtargetInfo(CPU, FS);\n"; + OS << " InitMCProcessorInfo(CPU, FS);\n" + << " uint64_t Bits = getFeatureBits();\n"; for (unsigned i = 0; i < Features.size(); i++) { // Next record @@ -747,13 +1370,18 @@ void SubtargetEmitter::run(raw_ostream &OS) { OS << Target << "SubTypeKV, "; else OS << "0, "; + OS << '\n'; OS.indent(22); + OS << Target << "ProcSchedKV, " + << Target << "WriteProcResTable, " + << Target << "WriteLatencyTable, " + << Target << "ReadAdvanceTable, "; if (SchedModels.hasItineraryClasses()) { - OS << Target << "ProcSchedKV, " - << Target << "Stages, " + OS << '\n'; OS.indent(22); + OS << Target << "Stages, " << Target << "OperandCycles, " << Target << "ForwardingPaths, "; } else - OS << "0, 0, 0, 0, "; + OS << "0, 0, 0, "; OS << NumFeatures << ", " << NumProcs << ");\n}\n\n"; OS << "} // End llvm namespace \n"; @@ -780,6 +1408,8 @@ void SubtargetEmitter::run(raw_ostream &OS) { << " explicit " << ClassName << "(StringRef TT, StringRef CPU, " << "StringRef FS);\n" << "public:\n" + << " unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *DefMI," + << " const TargetSchedModel *SchedModel) const;\n" << " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)" << " const;\n" << "};\n"; @@ -790,11 +1420,19 @@ void SubtargetEmitter::run(raw_ostream &OS) { OS << "\n#ifdef GET_SUBTARGETINFO_CTOR\n"; OS << "#undef GET_SUBTARGETINFO_CTOR\n"; + OS << "#include \"llvm/CodeGen/TargetSchedule.h\"\n"; OS << "namespace llvm {\n"; OS << "extern const llvm::SubtargetFeatureKV " << Target << "FeatureKV[];\n"; OS << "extern const llvm::SubtargetFeatureKV " << Target << "SubTypeKV[];\n"; + OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n"; + OS << "extern const llvm::MCWriteProcResEntry " + << Target << "WriteProcResTable[];\n"; + OS << "extern const llvm::MCWriteLatencyEntry " + << Target << "WriteLatencyTable[];\n"; + OS << "extern const llvm::MCReadAdvanceEntry " + << Target << "ReadAdvanceTable[];\n"; + if (SchedModels.hasItineraryClasses()) { - OS << "extern const llvm::SubtargetInfoKV " << Target << "ProcSchedKV[];\n"; OS << "extern const llvm::InstrStage " << Target << "Stages[];\n"; OS << "extern const unsigned " << Target << "OperandCycles[];\n"; OS << "extern const unsigned " << Target << "ForwardingPaths[];\n"; @@ -812,14 +1450,22 @@ void SubtargetEmitter::run(raw_ostream &OS) { OS << Target << "SubTypeKV, "; else OS << "0, "; + OS << '\n'; OS.indent(22); + OS << Target << "ProcSchedKV, " + << Target << "WriteProcResTable, " + << Target << "WriteLatencyTable, " + << Target << "ReadAdvanceTable, "; + OS << '\n'; OS.indent(22); if (SchedModels.hasItineraryClasses()) { - OS << Target << "ProcSchedKV, " - << Target << "Stages, " + OS << Target << "Stages, " << Target << "OperandCycles, " << Target << "ForwardingPaths, "; } else - OS << "0, 0, 0, 0, "; + OS << "0, 0, 0, "; OS << NumFeatures << ", " << NumProcs << ");\n}\n\n"; + + EmitSchedModelHelpers(ClassName, OS); + OS << "} // End llvm namespace \n"; OS << "#endif // GET_SUBTARGETINFO_CTOR\n\n"; diff --git a/utils/TableGen/TGValueTypes.cpp b/utils/TableGen/TGValueTypes.cpp index af0d9f4..3ac71a4 100644 --- a/utils/TableGen/TGValueTypes.cpp +++ b/utils/TableGen/TGValueTypes.cpp @@ -15,13 +15,25 @@ //===----------------------------------------------------------------------===// #include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/Casting.h" #include using namespace llvm; namespace llvm { class Type { +protected: + enum TypeKind { + TK_ExtendedIntegerType, + TK_ExtendedVectorType + }; +private: + TypeKind Kind; public: + TypeKind getKind() const { + return Kind; + } + Type(TypeKind K) : Kind(K) {} virtual unsigned getSizeInBits() const = 0; virtual ~Type() {} }; @@ -32,7 +44,10 @@ class ExtendedIntegerType : public Type { unsigned BitWidth; public: explicit ExtendedIntegerType(unsigned bits) - : BitWidth(bits) {} + : Type(TK_ExtendedIntegerType), BitWidth(bits) {} + static bool classof(const Type *T) { + return T->getKind() == TK_ExtendedIntegerType; + } unsigned getSizeInBits() const { return getBitWidth(); } @@ -46,7 +61,10 @@ class ExtendedVectorType : public Type { unsigned NumElements; public: ExtendedVectorType(EVT elty, unsigned num) - : ElementType(elty), NumElements(num) {} + : Type(TK_ExtendedVectorType), ElementType(elty), NumElements(num) {} + static bool classof(const Type *T) { + return T->getKind() == TK_ExtendedVectorType; + } unsigned getSizeInBits() const { return getNumElements() * getElementType().getSizeInBits(); } @@ -71,12 +89,12 @@ bool EVT::isExtendedFloatingPoint() const { bool EVT::isExtendedInteger() const { assert(isExtended() && "Type is not extended!"); - return dynamic_cast(LLVMTy) != 0; + return isa(LLVMTy); } bool EVT::isExtendedVector() const { assert(isExtended() && "Type is not extended!"); - return dynamic_cast(LLVMTy) != 0; + return isa(LLVMTy); } bool EVT::isExtended64BitVector() const { diff --git a/utils/TableGen/TableGen.cpp b/utils/TableGen/TableGen.cpp index 9695b4a..49efe7e 100644 --- a/utils/TableGen/TableGen.cpp +++ b/utils/TableGen/TableGen.cpp @@ -20,7 +20,6 @@ #include "llvm/TableGen/Error.h" #include "llvm/TableGen/Main.h" #include "llvm/TableGen/Record.h" -#include "llvm/TableGen/TableGenAction.h" using namespace llvm; @@ -90,86 +89,83 @@ namespace { Class("class", cl::desc("Print Enum list for this class"), cl::value_desc("class name")); - class LLVMTableGenAction : public TableGenAction { - public: - bool operator()(raw_ostream &OS, RecordKeeper &Records) { - switch (Action) { - case PrintRecords: - OS << Records; // No argument, dump all contents - break; - case GenEmitter: - EmitCodeEmitter(Records, OS); - break; - case GenRegisterInfo: - EmitRegisterInfo(Records, OS); - break; - case GenInstrInfo: - EmitInstrInfo(Records, OS); - break; - case GenCallingConv: - EmitCallingConv(Records, OS); - break; - case GenAsmWriter: - EmitAsmWriter(Records, OS); - break; - case GenAsmMatcher: - EmitAsmMatcher(Records, OS); - break; - case GenDisassembler: - EmitDisassembler(Records, OS); - break; - case GenPseudoLowering: - EmitPseudoLowering(Records, OS); - break; - case GenDAGISel: - EmitDAGISel(Records, OS); - break; - case GenDFAPacketizer: - EmitDFAPacketizer(Records, OS); - break; - case GenFastISel: - EmitFastISel(Records, OS); - break; - case GenSubtarget: - EmitSubtarget(Records, OS); - break; - case GenIntrinsic: - EmitIntrinsics(Records, OS); - break; - case GenTgtIntrinsic: - EmitIntrinsics(Records, OS, true); - break; - case GenEDInfo: - EmitEnhancedDisassemblerInfo(Records, OS); - break; - case PrintEnums: - { - std::vector Recs = Records.getAllDerivedDefinitions(Class); - for (unsigned i = 0, e = Recs.size(); i != e; ++i) - OS << Recs[i]->getName() << ", "; - OS << "\n"; - break; - } - case PrintSets: - { - SetTheory Sets; - Sets.addFieldExpander("Set", "Elements"); - std::vector Recs = Records.getAllDerivedDefinitions("Set"); - for (unsigned i = 0, e = Recs.size(); i != e; ++i) { - OS << Recs[i]->getName() << " = ["; - const std::vector *Elts = Sets.expand(Recs[i]); - assert(Elts && "Couldn't expand Set instance"); - for (unsigned ei = 0, ee = Elts->size(); ei != ee; ++ei) - OS << ' ' << (*Elts)[ei]->getName(); - OS << " ]\n"; - } - break; - } - } - - return false; +bool LLVMTableGenMain(raw_ostream &OS, RecordKeeper &Records) { + switch (Action) { + case PrintRecords: + OS << Records; // No argument, dump all contents + break; + case GenEmitter: + EmitCodeEmitter(Records, OS); + break; + case GenRegisterInfo: + EmitRegisterInfo(Records, OS); + break; + case GenInstrInfo: + EmitInstrInfo(Records, OS); + break; + case GenCallingConv: + EmitCallingConv(Records, OS); + break; + case GenAsmWriter: + EmitAsmWriter(Records, OS); + break; + case GenAsmMatcher: + EmitAsmMatcher(Records, OS); + break; + case GenDisassembler: + EmitDisassembler(Records, OS); + break; + case GenPseudoLowering: + EmitPseudoLowering(Records, OS); + break; + case GenDAGISel: + EmitDAGISel(Records, OS); + break; + case GenDFAPacketizer: + EmitDFAPacketizer(Records, OS); + break; + case GenFastISel: + EmitFastISel(Records, OS); + break; + case GenSubtarget: + EmitSubtarget(Records, OS); + break; + case GenIntrinsic: + EmitIntrinsics(Records, OS); + break; + case GenTgtIntrinsic: + EmitIntrinsics(Records, OS, true); + break; + case GenEDInfo: + EmitEnhancedDisassemblerInfo(Records, OS); + break; + case PrintEnums: + { + std::vector Recs = Records.getAllDerivedDefinitions(Class); + for (unsigned i = 0, e = Recs.size(); i != e; ++i) + OS << Recs[i]->getName() << ", "; + OS << "\n"; + break; + } + case PrintSets: + { + SetTheory Sets; + Sets.addFieldExpander("Set", "Elements"); + std::vector Recs = Records.getAllDerivedDefinitions("Set"); + for (unsigned i = 0, e = Recs.size(); i != e; ++i) { + OS << Recs[i]->getName() << " = ["; + const std::vector *Elts = Sets.expand(Recs[i]); + assert(Elts && "Couldn't expand Set instance"); + for (unsigned ei = 0, ee = Elts->size(); ei != ee; ++ei) + OS << ' ' << (*Elts)[ei]->getName(); + OS << " ]\n"; } - }; + break; + } + } + + return false; +} } int main(int argc, char **argv) { @@ -177,6 +173,5 @@ int main(int argc, char **argv) { PrettyStackTraceProgram X(argc, argv); cl::ParseCommandLineOptions(argc, argv); - LLVMTableGenAction Action; - return TableGenMain(argv[0], Action); + return TableGenMain(argv[0], &LLVMTableGenMain); } diff --git a/utils/TableGen/TableGenBackends.h b/utils/TableGen/TableGenBackends.h index 2c00c40..f0d25d8 100644 --- a/utils/TableGen/TableGenBackends.h +++ b/utils/TableGen/TableGenBackends.h @@ -74,5 +74,6 @@ void EmitInstrInfo(RecordKeeper &RK, raw_ostream &OS); void EmitPseudoLowering(RecordKeeper &RK, raw_ostream &OS); void EmitRegisterInfo(RecordKeeper &RK, raw_ostream &OS); void EmitSubtarget(RecordKeeper &RK, raw_ostream &OS); +void EmitMapTable(RecordKeeper &RK, raw_ostream &OS); } // End llvm namespace diff --git a/utils/TableGen/X86DisassemblerTables.cpp b/utils/TableGen/X86DisassemblerTables.cpp index f3bd373..468a1f8 100644 --- a/utils/TableGen/X86DisassemblerTables.cpp +++ b/utils/TableGen/X86DisassemblerTables.cpp @@ -209,6 +209,7 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision) { bool satisfiesOneEntry = true; bool satisfiesSplitRM = true; bool satisfiesSplitReg = true; + bool satisfiesSplitMisc = true; for (unsigned index = 0; index < 256; ++index) { if (decision.instructionIDs[index] != decision.instructionIDs[0]) @@ -228,7 +229,7 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision) { if (((index & 0xc0) != 0xc0) && (decision.instructionIDs[index] != decision.instructionIDs[index&0x38])) - satisfiesSplitReg = false; + satisfiesSplitMisc = false; } if (satisfiesOneEntry) @@ -237,9 +238,12 @@ static ModRMDecisionType getDecisionType(ModRMDecision &decision) { if (satisfiesSplitRM) return MODRM_SPLITRM; - if (satisfiesSplitReg) + if (satisfiesSplitReg && satisfiesSplitMisc) return MODRM_SPLITREG; + if (satisfiesSplitMisc) + return MODRM_SPLITMISC; + return MODRM_FULL; } @@ -332,6 +336,12 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2, for (unsigned index = 0xc0; index < 256; index += 8) emitOneID(o1, i1, decision.instructionIDs[index], true); break; + case MODRM_SPLITMISC: + for (unsigned index = 0; index < 64; index += 8) + emitOneID(o1, i1, decision.instructionIDs[index], true); + for (unsigned index = 0xc0; index < 256; ++index) + emitOneID(o1, i1, decision.instructionIDs[index], true); + break; case MODRM_FULL: for (unsigned index = 0; index < 256; ++index) emitOneID(o1, i1, decision.instructionIDs[index], true); @@ -361,11 +371,18 @@ void DisassemblerTables::emitModRMDecision(raw_ostream &o1, raw_ostream &o2, case MODRM_SPLITREG: sEntryNumber += 16; break; + case MODRM_SPLITMISC: + sEntryNumber += 8 + 64; + break; case MODRM_FULL: sEntryNumber += 256; break; } + // We assume that the index can fit into uint16_t. + assert(sEntryNumber < 65536U && + "Index into ModRMDecision is too large for uint16_t!"); + ++sTableNumber; } diff --git a/utils/TableGen/X86ModRMFilters.h b/utils/TableGen/X86ModRMFilters.h index 19fecbc..2cbaf79 100644 --- a/utils/TableGen/X86ModRMFilters.h +++ b/utils/TableGen/X86ModRMFilters.h @@ -70,7 +70,7 @@ class ModFilter : public ModRMFilter { public: /// Constructor /// - /// @r - True if the mod bits of the ModR/M byte must be 11; false + /// \param r True if the mod bits of the ModR/M byte must be 11; false /// otherwise. The name r derives from the fact that the mod /// bits indicate whether the R/M bits [bits 2-0] signify a /// register or a memory operand. @@ -98,11 +98,12 @@ class EscapeFilter : public ModRMFilter { public: /// Constructor /// - /// @c0_ff - True if the ModR/M byte must fall between 0xc0 and 0xff; - /// false otherwise. - /// @nnn_or_modRM - If c0_ff is true, the required value of the entire ModR/M - /// byte. If c0_ff is false, the required value of the nnn - /// field. + /// \param c0_ff True if the ModR/M byte must fall between 0xc0 and 0xff; + /// false otherwise. + /// + /// \param nnn_or_modRM If c0_ff is true, the required value of the entire + /// ModR/M byte. If c0_ff is false, the required value + /// of the nnn field. EscapeFilter(bool c0_ff, uint8_t nnn_or_modRM) : ModRMFilter(), C0_FF(c0_ff), @@ -128,8 +129,8 @@ class AddRegEscapeFilter : public ModRMFilter { public: /// Constructor /// - /// @modRM - The value of the ModR/M byte when the register operand - /// refers to the first register in the register set. + /// \param modRM The value of the ModR/M byte when the register operand + /// refers to the first register in the register set. AddRegEscapeFilter(uint8_t modRM) : ModRM(modRM) { } @@ -150,9 +151,9 @@ class ExtendedFilter : public ModRMFilter { public: /// Constructor /// - /// @r - True if the mod field must be set to 11; false otherwise. - /// The name is explained at ModFilter. - /// @nnn - The required value of the nnn field. + /// \param r True if the mod field must be set to 11; false otherwise. + /// The name is explained at ModFilter. + /// \param nnn The required value of the nnn field. ExtendedFilter(bool r, uint8_t nnn) : ModRMFilter(), R(r), @@ -177,7 +178,7 @@ class ExactFilter : public ModRMFilter { public: /// Constructor /// - /// @modRM - The required value of the full ModR/M byte. + /// \param modRM The required value of the full ModR/M byte. ExactFilter(uint8_t modRM) : ModRMFilter(), ModRM(modRM) { diff --git a/utils/TableGen/X86RecognizableInstr.cpp b/utils/TableGen/X86RecognizableInstr.cpp index 7ac2336..d6ed2fe 100644 --- a/utils/TableGen/X86RecognizableInstr.cpp +++ b/utils/TableGen/X86RecognizableInstr.cpp @@ -38,14 +38,15 @@ using namespace llvm; MAP(D0, 45) \ MAP(D1, 46) \ MAP(D4, 47) \ - MAP(D8, 48) \ - MAP(D9, 49) \ - MAP(DA, 50) \ - MAP(DB, 51) \ - MAP(DC, 52) \ - MAP(DD, 53) \ - MAP(DE, 54) \ - MAP(DF, 55) + MAP(D5, 48) \ + MAP(D8, 49) \ + MAP(D9, 50) \ + MAP(DA, 51) \ + MAP(DB, 52) \ + MAP(DC, 53) \ + MAP(DD, 54) \ + MAP(DE, 55) \ + MAP(DF, 56) // A clone of X86 since we can't depend on something that is generated. namespace X86Local { @@ -244,7 +245,7 @@ RecognizableInstr::RecognizableInstr(DisassemblerTables &tables, IsSSE = (HasOpSizePrefix && (Name.find("16") == Name.npos)) || (Name.find("CRC32") != Name.npos); HasFROperands = hasFROperands(); - HasVEX_LPrefix = has256BitOperands() || Rec->getValueAsBit("hasVEX_L"); + HasVEX_LPrefix = Rec->getValueAsBit("hasVEX_L"); // Check for 64-bit inst which does not require REX Is32Bit = false; @@ -479,20 +480,6 @@ bool RecognizableInstr::hasFROperands() const { return false; } -bool RecognizableInstr::has256BitOperands() const { - const std::vector &OperandList = *Operands; - unsigned numOperands = OperandList.size(); - - for (unsigned operandIndex = 0; operandIndex < numOperands; ++operandIndex) { - const std::string &recName = OperandList[operandIndex].Rec->getName(); - - if (!recName.compare("VR256")) { - return true; - } - } - return false; -} - void RecognizableInstr::handleOperand(bool optional, unsigned &operandIndex, unsigned &physicalOperandIndex, unsigned &numPhysicalOperands, @@ -1145,6 +1132,8 @@ OperandEncoding RecognizableInstr::immediateEncodingFromString // register IDs in 8-bit immediates nowadays. ENCODING("VR256", ENCODING_IB) ENCODING("VR128", ENCODING_IB) + ENCODING("FR32", ENCODING_IB) + ENCODING("FR64", ENCODING_IB) errs() << "Unhandled immediate encoding " << s << "\n"; llvm_unreachable("Unhandled immediate encoding"); } diff --git a/utils/TableGen/X86RecognizableInstr.h b/utils/TableGen/X86RecognizableInstr.h index 542e510..9feb3c3 100644 --- a/utils/TableGen/X86RecognizableInstr.h +++ b/utils/TableGen/X86RecognizableInstr.h @@ -127,10 +127,7 @@ private: /// hasFROperands - Returns true if any operand is a FR operand. bool hasFROperands() const; - - /// has256BitOperands - Returns true if any operand is a 256-bit SSE operand. - bool has256BitOperands() const; - + /// typeFromString - Translates an operand type from the string provided in /// the LLVM tables to an OperandType for use in the operand specifier. /// @@ -143,7 +140,7 @@ private: /// @param hasREX_WPrefix - Indicates whether the instruction has a REX.W /// prefix. If it does, 32-bit register operands stay /// 32-bit regardless of the operand size. - /// @param hasOpSizePrefix- Indicates whether the instruction has an OpSize + /// @param hasOpSizePrefix Indicates whether the instruction has an OpSize /// prefix. If it does not, then 16-bit register /// operands stay 16-bit. /// @return - The operand's type. @@ -225,23 +222,23 @@ private: /// emitInstructionSpecifier - Loads the instruction specifier for the current /// instruction into a DisassemblerTables. /// - /// @arg tables - The DisassemblerTables to populate with the specifier for + /// \param tables The DisassemblerTables to populate with the specifier for /// the current instruction. void emitInstructionSpecifier(DisassemblerTables &tables); /// emitDecodePath - Populates the proper fields in the decode tables /// corresponding to the decode paths for this instruction. /// - /// @arg tables - The DisassemblerTables to populate with the decode + /// \param tables The DisassemblerTables to populate with the decode /// decode information for the current instruction. void emitDecodePath(DisassemblerTables &tables) const; /// Constructor - Initializes a RecognizableInstr with the appropriate fields /// from a CodeGenInstruction. /// - /// @arg tables - The DisassemblerTables that the specifier will be added to. - /// @arg insn - The CodeGenInstruction to extract information from. - /// @arg uid - The unique ID of the current instruction. + /// \param tables The DisassemblerTables that the specifier will be added to. + /// \param insn The CodeGenInstruction to extract information from. + /// \param uid The unique ID of the current instruction. RecognizableInstr(DisassemblerTables &tables, const CodeGenInstruction &insn, InstrUID uid); @@ -249,11 +246,11 @@ public: /// processInstr - Accepts a CodeGenInstruction and loads decode information /// for it into a DisassemblerTables if appropriate. /// - /// @arg tables - The DiassemblerTables to be populated with decode + /// \param tables The DiassemblerTables to be populated with decode /// information. - /// @arg insn - The CodeGenInstruction to be used as a source for this + /// \param insn The CodeGenInstruction to be used as a source for this /// information. - /// @uid - The unique ID of the instruction. + /// \param uid The unique ID of the instruction. static void processInstr(DisassemblerTables &tables, const CodeGenInstruction &insn, InstrUID uid); diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll b/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll deleted file mode 100644 index 3017b13..0000000 --- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/Bar/bar-test.ll +++ /dev/null @@ -1,3 +0,0 @@ -; RUN: true -; XFAIL: * -; XTARGET: darwin diff --git a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg index e9df1e5..3fdd63c 100644 --- a/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg +++ b/utils/lit/lit/ExampleTests/LLVM.InTree/test/lit.cfg @@ -77,7 +77,7 @@ for line in open(os.path.join(config.llvm_obj_root, 'test', 'site.exp')): excludes = [] -# Provide target_triple for use in XFAIL and XTARGET. +# Provide target_triple for use in XFAIL. config.target_triple = site_exp['target_triplet'] # Provide llvm_supports_target for use in local configs. diff --git a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg index e9df1e5..3fdd63c 100644 --- a/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg +++ b/utils/lit/lit/ExampleTests/LLVM.OutOfTree/src/test/lit.cfg @@ -77,7 +77,7 @@ for line in open(os.path.join(config.llvm_obj_root, 'test', 'site.exp')): excludes = [] -# Provide target_triple for use in XFAIL and XTARGET. +# Provide target_triple for use in XFAIL. config.target_triple = site_exp['target_triplet'] # Provide llvm_supports_target for use in local configs. diff --git a/utils/lit/lit/ExampleTests/lit.cfg b/utils/lit/lit/ExampleTests/lit.cfg index 20ee37d..2629918 100644 --- a/utils/lit/lit/ExampleTests/lit.cfg +++ b/utils/lit/lit/ExampleTests/lit.cfg @@ -23,4 +23,4 @@ config.test_exec_root = None config.target_triple = 'foo' # available_features: Used by ShTest and TclTest formats for REQUIRES checks. -config.available_features = ['some-feature-name'] +config.available_features.add('some-feature-name') diff --git a/utils/lit/lit/ExampleTests/vg-fail.c b/utils/lit/lit/ExampleTests/vg-fail.c new file mode 100644 index 0000000..e3339ff --- /dev/null +++ b/utils/lit/lit/ExampleTests/vg-fail.c @@ -0,0 +1,4 @@ +// This test should XPASS, when run without valgrind. + +// RUN: true +// XFAIL: valgrind diff --git a/utils/lit/lit/ExampleTests/xfail-feature.c b/utils/lit/lit/ExampleTests/xfail-feature.c new file mode 100644 index 0000000..3444bf8 --- /dev/null +++ b/utils/lit/lit/ExampleTests/xfail-feature.c @@ -0,0 +1,4 @@ +// This test should XPASS. + +// RUN: true +// XFAIL: some-feature-name diff --git a/utils/lit/lit/LitConfig.py b/utils/lit/lit/LitConfig.py index c71c0cc..0a359a3 100644 --- a/utils/lit/lit/LitConfig.py +++ b/utils/lit/lit/LitConfig.py @@ -42,14 +42,11 @@ class LitConfig: self.numWarnings = 0 self.valgrindArgs = [] - self.valgrindTriple = "" if self.useValgrind: - self.valgrindTriple = "-vg" self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no', '--tool=memcheck', '--trace-children=yes', '--error-exitcode=123'] if self.valgrindLeakCheck: - self.valgrindTriple += "_leak" self.valgrindArgs.append('--leak-check=full') else: # The default is 'summary'. diff --git a/utils/lit/lit/TestRunner.py b/utils/lit/lit/TestRunner.py index 71882b7..0c1911e 100644 --- a/utils/lit/lit/TestRunner.py +++ b/utils/lit/lit/TestRunner.py @@ -370,27 +370,27 @@ def executeScript(test, litConfig, tmpBase, commands, cwd): return executeCommand(command, cwd=cwd, env=test.config.environment) -def isExpectedFail(xfails, xtargets, target_triple): - # Check if any xfail matches this target. +def isExpectedFail(test, xfails): + # Check if any of the xfails match an available feature or the target. for item in xfails: - if item == '*' or item in target_triple: - break - else: - return False + # If this is the wildcard, it always fails. + if item == '*': + return True - # If so, see if it is expected to pass on this target. - # - # FIXME: Rename XTARGET to something that makes sense, like XPASS. - for item in xtargets: - if item == '*' or item in target_triple: - return False + # If this is an exact match for one of the features, it fails. + if item in test.config.available_features: + return True + + # If this is a part of the target triple, it fails. + if item in test.suite.config.target_triple: + return True - return True + return False def parseIntegratedTestScript(test, normalize_slashes=False, extra_substitutions=[]): """parseIntegratedTestScript - Scan an LLVM/Clang style integrated test - script and extract the lines to 'RUN' as well as 'XFAIL' and 'XTARGET' + script and extract the lines to 'RUN' as well as 'XFAIL' and 'REQUIRES' information. The RUN lines also will have variable substitution performed. """ @@ -431,7 +431,6 @@ def parseIntegratedTestScript(test, normalize_slashes=False, # Collect the test lines from the script. script = [] xfails = [] - xtargets = [] requires = [] for ln in open(sourcepath): if 'RUN:' in ln: @@ -450,9 +449,6 @@ def parseIntegratedTestScript(test, normalize_slashes=False, elif 'XFAIL:' in ln: items = ln[ln.index('XFAIL:') + 6:].split(',') xfails.extend([s.strip() for s in items]) - elif 'XTARGET:' in ln: - items = ln[ln.index('XTARGET:') + 8:].split(',') - xtargets.extend([s.strip() for s in items]) elif 'REQUIRES:' in ln: items = ln[ln.index('REQUIRES:') + 9:].split(',') requires.extend([s.strip() for s in items]) @@ -491,7 +487,7 @@ def parseIntegratedTestScript(test, normalize_slashes=False, return (Test.UNSUPPORTED, "Test requires the following features: %s" % msg) - isXFail = isExpectedFail(xfails, xtargets, test.suite.config.target_triple) + isXFail = isExpectedFail(test, xfails) return script,isXFail,tmpBase,execdir def formatTestOutput(status, out, err, exitCode, failDueToStderr, script): diff --git a/utils/lit/lit/TestingConfig.py b/utils/lit/lit/TestingConfig.py index 223120c..a1f79a3 100644 --- a/utils/lit/lit/TestingConfig.py +++ b/utils/lit/lit/TestingConfig.py @@ -16,6 +16,7 @@ class TestingConfig: 'PATH' : os.pathsep.join(litConfig.path + [os.environ.get('PATH','')]), 'SYSTEMROOT' : os.environ.get('SYSTEMROOT',''), + 'TERM' : os.environ.get('TERM',''), 'LLVM_DISABLE_CRASH_REPORT' : '1', } @@ -28,6 +29,13 @@ class TestingConfig: 'TMP' : os.environ.get('TMP',''), }) + # Set the default available features based on the LitConfig. + available_features = [] + if litConfig.useValgrind: + available_features.append('valgrind') + if litConfig.valgrindLeakCheck: + available_features.append('vg_leak') + config = TestingConfig(parent, name = '', suffixes = set(), @@ -39,7 +47,7 @@ class TestingConfig: test_exec_root = None, test_source_root = None, excludes = [], - available_features = []) + available_features = available_features) if os.path.exists(path): # FIXME: Improve detection and error reporting of errors in the diff --git a/utils/lit/lit/Util.py b/utils/lit/lit/Util.py index 226e453..f294809 100644 --- a/utils/lit/lit/Util.py +++ b/utils/lit/lit/Util.py @@ -56,7 +56,7 @@ def which(command, paths = None): paths = os.environ.get('PATH','') # Check for absolute match first. - if os.path.exists(command): + if os.path.isfile(command): return command # Would be nice if Python had a lib function for this. diff --git a/utils/lldbDataFormatters.py b/utils/lldbDataFormatters.py index 18b407a..1baf398 100644 --- a/utils/lldbDataFormatters.py +++ b/utils/lldbDataFormatters.py @@ -2,6 +2,7 @@ Load into LLDB with: script import lldbDataFormatters type synthetic add -x "^llvm::SmallVectorImpl<.+>$" -l lldbDataFormatters.SmallVectorSynthProvider +type synthetic add -x "^llvm::SmallVector<.+,.+>$" -l lldbDataFormatters.SmallVectorSynthProvider """ # Pretty printer for llvm::SmallVector/llvm::SmallVectorImpl @@ -32,22 +33,15 @@ class SmallVectorSynthProvider: return self.begin.CreateChildAtOffset('['+str(index)+']', offset, self.data_type) - def get_type_from_name(self): - import re - name = self.valobj.GetType().GetName() - # This class works with both SmallVectors and SmallVectorImpls. - res = re.match("^(llvm::)?SmallVectorImpl<(.+)>$", name) - if res: - return res.group(2) - res = re.match("^(llvm::)?SmallVector<(.+), \d+>$", name) - if res: - return res.group(2) - return None - def update(self): self.begin = self.valobj.GetChildMemberWithName('BeginX') self.end = self.valobj.GetChildMemberWithName('EndX') - data_type = self.get_type_from_name() - # FIXME: this sometimes returns an invalid type. - self.data_type = self.valobj.GetTarget().FindFirstType(data_type) + the_type = self.valobj.GetType() + # If this is a reference type we have to dereference it to get to the + # template parameter. + if the_type.IsReferenceType(): + the_type = the_type.GetDereferencedType() + + self.data_type = the_type.GetTemplateArgumentType(0) self.type_size = self.data_type.GetByteSize() + assert self.type_size != 0 diff --git a/utils/llvm-lit/llvm-lit.in b/utils/llvm-lit/llvm-lit.in index 879d18b..768dc51 100644 --- a/utils/llvm-lit/llvm-lit.in +++ b/utils/llvm-lit/llvm-lit.in @@ -18,10 +18,15 @@ builtin_parameters = { 'llvm_site_config' : os.path.join(llvm_obj_root, 'test', 'lit.site.cfg') } -clang_site_config = os.path.join(llvm_obj_root, 'tools', 'clang', 'test', - 'lit.site.cfg') -if os.path.exists(clang_site_config): - builtin_parameters['clang_site_config'] = clang_site_config +clang_obj_root = os.path.join(llvm_obj_root, 'tools', 'clang') + +if os.path.exists(clang_obj_root): + builtin_parameters['clang_site_config'] = \ + os.path.join(clang_obj_root, 'test', 'lit.site.cfg') + clang_tools_extra_obj_root = os.path.join(clang_obj_root, 'tools', 'extra') + if os.path.exists(clang_tools_extra_obj_root): + builtin_parameters['clang_tools_extra_site_config'] = \ + os.path.join(clang_tools_extra_obj_root, 'test', 'lit.site.cfg') if __name__=='__main__': import lit diff --git a/utils/llvm.grm b/utils/llvm.grm index ad2799f..322036b 100644 --- a/utils/llvm.grm +++ b/utils/llvm.grm @@ -175,7 +175,6 @@ FuncAttr ::= noreturn | returns_twice | nonlazybind | address_safety - | ia_nsdialect ; OptFuncAttrs ::= + _ | OptFuncAttrs FuncAttr ; diff --git a/utils/unittest/googletest/gtest-port.cc b/utils/unittest/googletest/gtest-port.cc index 07e5bb3..3c32ff1 100644 --- a/utils/unittest/googletest/gtest-port.cc +++ b/utils/unittest/googletest/gtest-port.cc @@ -505,6 +505,10 @@ class CapturedStream { GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " << temp_file_path; filename_ = temp_file_path; +#elif GTEST_OS_LINUX_ANDROID + char name_template[] = "/sdcard/captured_stderr.XXXXXX"; + const int captured_fd = mkstemp(name_template); + filename_ = name_template; # else // There's no guarantee that a test has write access to the // current directory, so we create the temporary file in the /tmp diff --git a/utils/unittest/googletest/include/gtest/internal/gtest-port.h b/utils/unittest/googletest/include/gtest/internal/gtest-port.h index 8ef5d7d..58f6caf 100644 --- a/utils/unittest/googletest/include/gtest/internal/gtest-port.h +++ b/utils/unittest/googletest/include/gtest/internal/gtest-port.h @@ -230,7 +230,7 @@ # define GTEST_OS_MAC 1 #elif defined __linux__ # define GTEST_OS_LINUX 1 -# ifdef ANDROID +# if defined(ANDROID) || defined(__ANDROID__) # define GTEST_OS_LINUX_ANDROID 1 # endif // ANDROID #elif defined __MVS__ diff --git a/utils/vim/llvm.vim b/utils/vim/llvm.vim index c83e8ca..c16274b 100644 --- a/utils/vim/llvm.vim +++ b/utils/vim/llvm.vim @@ -1,7 +1,7 @@ " Vim syntax file " Language: llvm " Maintainer: The LLVM team, http://llvm.org/ -" Version: $Revision: 156080 $ +" Version: $Revision: 166305 $ if version < 600 syntax clear @@ -79,7 +79,6 @@ syn match llvmSpecialComment /;\s*RUN:.*$/ syn match llvmSpecialComment /;\s*PR\d*\s*$/ syn match llvmSpecialComment /;\s*END\.\s*$/ syn match llvmSpecialComment /;\s*XFAIL:.*$/ -syn match llvmSpecialComment /;\s*XTARGET:.*$/ if version >= 508 || !exists("did_c_syn_inits") if version < 508 diff --git a/utils/yaml2obj/yaml2obj.cpp b/utils/yaml2obj/yaml2obj.cpp index c3b3e54..4fc620f 100644 --- a/utils/yaml2obj/yaml2obj.cpp +++ b/utils/yaml2obj/yaml2obj.cpp @@ -148,7 +148,7 @@ struct COFFParser { return false; } if (KeyValue == "Machine") { - uint16_t Machine; + uint16_t Machine = COFF::MT_Invalid; if (!getAs(Value, Machine)) { // It's not a raw number, try matching the string. StringRef ValueValue = Value->getValue(Storage); -- cgit v1.1 From 056abd2059c65a3e908193aeae16fad98017437c Mon Sep 17 00:00:00 2001 From: dim Date: Sun, 2 Dec 2012 13:20:44 +0000 Subject: Vendor import of clang release_32 branch r168974 (effectively, 3.2 RC2): http://llvm.org/svn/llvm-project/cfe/branches/release_32@168974 --- .gitignore | 1 + CMakeLists.txt | 30 +- Makefile | 4 + NOTES.txt | 2 +- bindings/python/clang/cindex.py | 1330 +- .../python/tests/cindex/test_code_completion.py | 75 + bindings/python/tests/cindex/test_cursor.py | 9 + bindings/xml/comment-xml-schema.rng | 94 +- docs/AddressSanitizer.html | 49 +- docs/AutomaticReferenceCounting.html | 43 +- docs/BlockLanguageSpec.txt | 24 +- docs/ClangTools.html | 18 +- docs/HowToSetupToolingForLLVM.html | 34 +- docs/InternalsManual.html | 4 +- docs/LanguageExtensions.html | 8 +- docs/LibASTMatchers.html | 130 + docs/LibASTMatchersReference.html | 1938 ++ docs/LibTooling.html | 125 +- docs/ObjectiveCLiterals.html | 2 +- docs/PCHInternals.html | 320 +- docs/ReleaseNotes.html | 10 + docs/ThreadSanitizer.html | 15 +- docs/UsersManual.html | 82 +- docs/analyzer/IPA.txt | 363 +- docs/analyzer/debug-checks.txt | 89 + docs/tools/clang.pod | 2 +- docs/tools/dump_ast_matchers.py | 264 + examples/analyzer-plugin/MainCallChecker.cpp | 2 +- examples/clang-interpreter/CMakeLists.txt | 3 +- examples/clang-interpreter/Makefile | 2 +- examples/clang-interpreter/main.cpp | 7 +- include/clang-c/Index.h | 170 +- include/clang/ARCMigrate/ARCMT.h | 5 +- include/clang/AST/ASTConsumer.h | 15 +- include/clang/AST/ASTContext.h | 616 +- include/clang/AST/ASTMutationListener.h | 2 + include/clang/AST/Attr.h | 5 - include/clang/AST/BuiltinTypes.def | 2 + include/clang/AST/CMakeLists.txt | 12 + include/clang/AST/CXXInheritance.h | 17 +- include/clang/AST/CanonicalType.h | 1 + include/clang/AST/CharUnits.h | 4 +- include/clang/AST/Comment.h | 189 +- include/clang/AST/CommentBriefParser.h | 3 +- include/clang/AST/CommentCommandTraits.h | 186 +- include/clang/AST/CommentCommands.td | 156 + include/clang/AST/CommentHTMLTags.td | 54 + include/clang/AST/CommentLexer.h | 116 +- include/clang/AST/CommentParser.h | 4 +- include/clang/AST/CommentSema.h | 48 +- include/clang/AST/Decl.h | 52 +- include/clang/AST/DeclBase.h | 27 +- include/clang/AST/DeclCXX.h | 46 +- include/clang/AST/DeclFriend.h | 12 +- include/clang/AST/DeclObjC.h | 106 +- include/clang/AST/DeclTemplate.h | 130 +- include/clang/AST/DeclarationName.h | 4 +- include/clang/AST/Expr.h | 249 +- include/clang/AST/ExprCXX.h | 214 +- include/clang/AST/ExprObjC.h | 33 +- include/clang/AST/ExternalASTSource.h | 2 +- include/clang/AST/Makefile | 19 +- include/clang/AST/NSAPI.h | 6 +- include/clang/AST/NestedNameSpecifier.h | 3 +- include/clang/AST/OperationKinds.h | 6 +- include/clang/AST/PrettyPrinter.h | 12 + include/clang/AST/RawCommentList.h | 6 +- include/clang/AST/RecordLayout.h | 4 +- include/clang/AST/RecursiveASTVisitor.h | 13 +- include/clang/AST/SelectorLocationsKind.h | 4 +- include/clang/AST/Stmt.h | 377 +- include/clang/AST/StmtCXX.h | 5 - include/clang/AST/StmtObjC.h | 7 - include/clang/AST/TemplateBase.h | 92 +- include/clang/AST/Type.h | 163 +- include/clang/AST/TypeLoc.h | 42 +- include/clang/AST/UnresolvedSet.h | 2 +- include/clang/AST/VTableBuilder.h | 5 +- include/clang/ASTMatchers/ASTMatchFinder.h | 29 +- include/clang/ASTMatchers/ASTMatchers.h | 1445 +- include/clang/ASTMatchers/ASTMatchersInternal.h | 621 +- include/clang/ASTMatchers/ASTMatchersMacros.h | 65 + include/clang/ASTMatchers/ASTTypeTraits.h | 209 + include/clang/Analysis/Analyses/FormatString.h | 24 +- include/clang/Analysis/Analyses/ThreadSafety.h | 3 +- include/clang/Analysis/AnalysisContext.h | 35 +- include/clang/Analysis/CFG.h | 6 +- .../clang/Analysis/DomainSpecific/ObjCNoReturn.h | 46 + include/clang/Analysis/ProgramPoint.h | 7 +- include/clang/Basic/Attr.td | 16 +- include/clang/Basic/Builtins.def | 65 +- include/clang/Basic/BuiltinsMips.def | 63 + include/clang/Basic/BuiltinsNVPTX.def | 246 + include/clang/Basic/BuiltinsX86.def | 9 + include/clang/Basic/ConvertUTF.h | 12 +- include/clang/Basic/Diagnostic.h | 27 +- include/clang/Basic/DiagnosticASTKinds.td | 2 +- include/clang/Basic/DiagnosticCommentKinds.td | 16 + include/clang/Basic/DiagnosticCommonKinds.td | 5 +- include/clang/Basic/DiagnosticDriverKinds.td | 24 +- include/clang/Basic/DiagnosticFrontendKinds.td | 7 + include/clang/Basic/DiagnosticGroups.td | 83 +- include/clang/Basic/DiagnosticLexKinds.td | 35 +- include/clang/Basic/DiagnosticOptions.def | 93 + include/clang/Basic/DiagnosticOptions.h | 85 + include/clang/Basic/DiagnosticParseKinds.td | 50 +- include/clang/Basic/DiagnosticSemaKinds.td | 361 +- .../clang/Basic/DiagnosticSerializationKinds.td | 43 +- include/clang/Basic/FileManager.h | 4 + include/clang/Basic/IdentifierTable.h | 33 +- include/clang/Basic/LangOptions.def | 22 +- include/clang/Basic/Module.h | 30 +- include/clang/Basic/ObjCRuntime.h | 45 +- include/clang/Basic/OnDiskHashTable.h | 5 +- include/clang/Basic/Sanitizers.def | 69 + include/clang/Basic/SourceLocation.h | 2 + include/clang/Basic/SourceManager.h | 21 +- include/clang/Basic/Specifiers.h | 15 + include/clang/Basic/StmtNodes.td | 10 +- include/clang/Basic/TargetInfo.h | 59 +- include/clang/Basic/TargetOptions.h | 6 +- include/clang/Basic/TokenKinds.def | 41 +- include/clang/Basic/TokenKinds.h | 25 + include/clang/Basic/TypeTraits.h | 1 + include/clang/Basic/arm_neon.td | 8 +- include/clang/CodeGen/CodeGenAction.h | 2 +- include/clang/Driver/Action.h | 15 - include/clang/Driver/Arg.h | 26 +- include/clang/Driver/ArgList.h | 72 +- include/clang/Driver/CC1AsOptions.h | 4 +- include/clang/Driver/CC1AsOptions.td | 43 +- include/clang/Driver/CC1Options.td | 419 +- include/clang/Driver/Compilation.h | 11 +- include/clang/Driver/Driver.h | 43 +- include/clang/Driver/Job.h | 4 - include/clang/Driver/OptParser.td | 31 +- include/clang/Driver/OptTable.h | 61 +- include/clang/Driver/Option.h | 324 +- include/clang/Driver/Options.h | 4 +- include/clang/Driver/Options.td | 1645 +- include/clang/Driver/Tool.h | 4 +- include/clang/Driver/ToolChain.h | 40 +- include/clang/Driver/Types.h | 4 - include/clang/Frontend/ASTUnit.h | 86 +- include/clang/Frontend/Analyses.def | 67 - include/clang/Frontend/AnalyzerOptions.h | 135 - include/clang/Frontend/CodeGenOptions.def | 132 + include/clang/Frontend/CodeGenOptions.h | 182 +- include/clang/Frontend/CompilerInstance.h | 44 +- include/clang/Frontend/CompilerInvocation.h | 79 +- include/clang/Frontend/DiagnosticOptions.h | 111 - include/clang/Frontend/DiagnosticRenderer.h | 8 +- include/clang/Frontend/FrontendAction.h | 16 +- include/clang/Frontend/FrontendOptions.h | 34 +- include/clang/Frontend/HeaderSearchOptions.h | 146 - include/clang/Frontend/LangStandard.h | 18 +- include/clang/Frontend/LangStandards.def | 58 +- include/clang/Frontend/LogDiagnosticPrinter.h | 4 +- include/clang/Frontend/MultiplexConsumer.h | 1 - include/clang/Frontend/PreprocessorOptions.h | 224 - .../clang/Frontend/SerializedDiagnosticPrinter.h | 2 +- include/clang/Frontend/TextDiagnostic.h | 2 +- include/clang/Frontend/TextDiagnosticPrinter.h | 5 +- include/clang/Frontend/VerifyDiagnosticConsumer.h | 86 +- include/clang/Lex/ExternalPreprocessorSource.h | 3 - include/clang/Lex/HeaderMap.h | 5 +- include/clang/Lex/HeaderSearch.h | 25 +- include/clang/Lex/HeaderSearchOptions.h | 147 + include/clang/Lex/Lexer.h | 91 +- include/clang/Lex/LiteralSupport.h | 10 +- include/clang/Lex/MacroInfo.h | 60 +- include/clang/Lex/ModuleMap.h | 74 +- include/clang/Lex/PPCallbacks.h | 53 +- include/clang/Lex/PPMutationListener.h | 43 + include/clang/Lex/PTHLexer.h | 4 +- include/clang/Lex/PTHManager.h | 5 +- include/clang/Lex/PreprocessingRecord.h | 84 +- include/clang/Lex/Preprocessor.h | 128 +- include/clang/Lex/PreprocessorLexer.h | 4 +- include/clang/Lex/PreprocessorOptions.h | 221 + include/clang/Lex/Token.h | 18 +- include/clang/Lex/TokenLexer.h | 17 +- include/clang/Parse/Parser.h | 197 +- include/clang/Rewrite/ASTConsumers.h | 48 - include/clang/Rewrite/Core/DeltaTree.h | 50 + include/clang/Rewrite/Core/HTMLRewrite.h | 81 + include/clang/Rewrite/Core/RewriteRope.h | 239 + include/clang/Rewrite/Core/Rewriter.h | 295 + include/clang/Rewrite/Core/TokenRewriter.h | 79 + include/clang/Rewrite/DeltaTree.h | 48 - include/clang/Rewrite/FixItRewriter.h | 130 - include/clang/Rewrite/Frontend/ASTConsumers.h | 48 + include/clang/Rewrite/Frontend/FixItRewriter.h | 130 + include/clang/Rewrite/Frontend/FrontendActions.h | 83 + include/clang/Rewrite/Frontend/Rewriters.h | 35 + include/clang/Rewrite/FrontendActions.h | 83 - include/clang/Rewrite/HTMLRewrite.h | 81 - include/clang/Rewrite/RewriteRope.h | 231 - include/clang/Rewrite/Rewriter.h | 295 - include/clang/Rewrite/Rewriters.h | 35 - include/clang/Rewrite/TokenRewriter.h | 79 - include/clang/Sema/AttributeList.h | 8 +- include/clang/Sema/CodeCompleteConsumer.h | 20 +- include/clang/Sema/DeclSpec.h | 54 +- include/clang/Sema/DelayedDiagnostic.h | 11 +- include/clang/Sema/ExternalSemaSource.h | 1 - include/clang/Sema/Initialization.h | 4 +- include/clang/Sema/LocInfoType.h | 1 - include/clang/Sema/MultiplexExternalSemaSource.h | 367 + include/clang/Sema/Overload.h | 12 +- include/clang/Sema/Ownership.h | 216 +- include/clang/Sema/ParsedTemplate.h | 5 - include/clang/Sema/Scope.h | 8 +- include/clang/Sema/ScopeInfo.h | 226 +- include/clang/Sema/Sema.h | 249 +- include/clang/Sema/SemaConsumer.h | 1 - include/clang/Sema/Template.h | 5 +- include/clang/Sema/TemplateDeduction.h | 18 +- include/clang/Sema/TypoCorrection.h | 12 + include/clang/Serialization/ASTBitCodes.h | 234 +- .../Serialization/ASTDeserializationListener.h | 6 +- include/clang/Serialization/ASTReader.h | 476 +- include/clang/Serialization/ASTWriter.h | 72 +- include/clang/Serialization/ContinuousRangeMap.h | 4 +- include/clang/Serialization/Module.h | 66 +- include/clang/Serialization/ModuleManager.h | 9 +- .../StaticAnalyzer/Checkers/DereferenceChecker.h | 35 - include/clang/StaticAnalyzer/Core/Analyses.def | 67 + .../clang/StaticAnalyzer/Core/AnalyzerOptions.h | 308 + .../StaticAnalyzer/Core/BugReporter/BugReporter.h | 89 +- .../Core/BugReporter/BugReporterVisitor.h | 58 +- .../Core/BugReporter/PathDiagnostic.h | 129 +- include/clang/StaticAnalyzer/Core/Checker.h | 17 - include/clang/StaticAnalyzer/Core/CheckerManager.h | 12 +- .../StaticAnalyzer/Core/PathSensitive/APSIntType.h | 4 + .../Core/PathSensitive/AnalysisManager.h | 80 +- .../Core/PathSensitive/BasicValueFactory.h | 5 +- .../StaticAnalyzer/Core/PathSensitive/CallEvent.h | 167 +- .../Core/PathSensitive/CheckerContext.h | 130 +- .../Core/PathSensitive/ConstraintManager.h | 111 +- .../StaticAnalyzer/Core/PathSensitive/CoreEngine.h | 70 +- .../Core/PathSensitive/DynamicTypeInfo.h | 52 + .../Core/PathSensitive/Environment.h | 11 +- .../Core/PathSensitive/ExplodedGraph.h | 74 +- .../StaticAnalyzer/Core/PathSensitive/ExprEngine.h | 76 +- .../StaticAnalyzer/Core/PathSensitive/MemRegion.h | 43 +- .../Core/PathSensitive/ProgramState.h | 95 +- .../Core/PathSensitive/ProgramStateTrait.h | 31 + .../Core/PathSensitive/SValBuilder.h | 57 +- .../StaticAnalyzer/Core/PathSensitive/SVals.h | 5 +- .../StaticAnalyzer/Core/PathSensitive/Store.h | 45 +- .../StaticAnalyzer/Core/PathSensitive/SubEngine.h | 6 +- .../Core/PathSensitive/SymbolManager.h | 52 +- .../Core/PathSensitive/TaintManager.h | 2 + include/clang/Tooling/CommandLineClangTool.h | 80 - include/clang/Tooling/CommonOptionsParser.h | 89 + include/clang/Tooling/CompilationDatabase.h | 95 +- .../Tooling/CompilationDatabasePluginRegistry.h | 27 + include/clang/Tooling/FileMatchTrie.h | 90 + include/clang/Tooling/JSONCompilationDatabase.h | 107 + include/clang/Tooling/Refactoring.h | 1 + include/clang/Tooling/Tooling.h | 48 +- lib/ARCMigrate/ARCMT.cpp | 75 +- lib/ARCMigrate/CMakeLists.txt | 3 +- lib/ARCMigrate/FileRemapper.cpp | 2 +- lib/ARCMigrate/Internals.h | 2 + lib/ARCMigrate/ObjCMT.cpp | 2 +- lib/ARCMigrate/Transforms.cpp | 4 +- lib/AST/ASTConsumer.cpp | 5 + lib/AST/ASTContext.cpp | 325 +- lib/AST/ASTDiagnostic.cpp | 277 +- lib/AST/ASTImporter.cpp | 478 +- lib/AST/CMakeLists.txt | 3 + lib/AST/CXXInheritance.cpp | 32 +- lib/AST/Comment.cpp | 93 +- lib/AST/CommentBriefParser.cpp | 51 +- lib/AST/CommentCommandTraits.cpp | 141 +- lib/AST/CommentDumper.cpp | 48 +- lib/AST/CommentLexer.cpp | 62 +- lib/AST/CommentParser.cpp | 72 +- lib/AST/CommentSema.cpp | 314 +- lib/AST/Decl.cpp | 88 +- lib/AST/DeclBase.cpp | 2 +- lib/AST/DeclCXX.cpp | 30 +- lib/AST/DeclObjC.cpp | 269 +- lib/AST/DeclPrinter.cpp | 18 +- lib/AST/DeclTemplate.cpp | 86 +- lib/AST/DumpXML.cpp | 23 +- lib/AST/Expr.cpp | 299 +- lib/AST/ExprCXX.cpp | 121 +- lib/AST/ExprClassification.cpp | 1 + lib/AST/ExprConstant.cpp | 61 +- lib/AST/ItaniumMangle.cpp | 133 +- lib/AST/MicrosoftMangle.cpp | 242 +- lib/AST/NSAPI.cpp | 1 + lib/AST/ParentMap.cpp | 69 +- lib/AST/RawCommentList.cpp | 69 +- lib/AST/RecordLayoutBuilder.cpp | 84 +- lib/AST/Stmt.cpp | 167 +- lib/AST/StmtDumper.cpp | 115 +- lib/AST/StmtPrinter.cpp | 68 +- lib/AST/StmtProfile.cpp | 16 +- lib/AST/TemplateBase.cpp | 63 +- lib/AST/Type.cpp | 55 +- lib/AST/TypeLoc.cpp | 55 +- lib/AST/TypePrinter.cpp | 14 +- lib/AST/VTableBuilder.cpp | 3 + lib/ASTMatchers/ASTMatchFinder.cpp | 411 +- lib/ASTMatchers/ASTMatchersInternal.cpp | 66 +- lib/Analysis/AnalysisDeclContext.cpp | 41 +- lib/Analysis/BodyFarm.cpp | 374 + lib/Analysis/BodyFarm.h | 43 + lib/Analysis/CFG.cpp | 83 +- lib/Analysis/CMakeLists.txt | 4 +- lib/Analysis/FormatString.cpp | 57 +- lib/Analysis/ObjCNoReturn.cpp | 67 + lib/Analysis/PrintfFormatString.cpp | 43 +- lib/Analysis/ReachableCode.cpp | 4 +- lib/Analysis/ScanfFormatString.cpp | 36 +- lib/Analysis/ThreadSafety.cpp | 340 +- lib/Analysis/UninitializedValues.cpp | 64 +- lib/Basic/ConvertUTF.c | 24 +- lib/Basic/ConvertUTFWrapper.cpp | 18 +- lib/Basic/Diagnostic.cpp | 25 +- lib/Basic/DiagnosticIDs.cpp | 5 +- lib/Basic/FileManager.cpp | 7 + lib/Basic/IdentifierTable.cpp | 4 +- lib/Basic/Module.cpp | 11 +- lib/Basic/SourceLocation.cpp | 7 + lib/Basic/SourceManager.cpp | 133 +- lib/Basic/TargetInfo.cpp | 8 +- lib/Basic/Targets.cpp | 406 +- lib/Basic/Version.cpp | 2 +- lib/CodeGen/ABIInfo.h | 38 +- lib/CodeGen/BackendUtil.cpp | 109 +- lib/CodeGen/CGBlocks.cpp | 138 +- lib/CodeGen/CGBlocks.h | 26 +- lib/CodeGen/CGBuiltin.cpp | 430 +- lib/CodeGen/CGCXXABI.cpp | 2 +- lib/CodeGen/CGCXXABI.h | 12 + lib/CodeGen/CGCall.cpp | 223 +- lib/CodeGen/CGClass.cpp | 31 +- lib/CodeGen/CGDebugInfo.cpp | 257 +- lib/CodeGen/CGDebugInfo.h | 3 + lib/CodeGen/CGDecl.cpp | 53 +- lib/CodeGen/CGDeclCXX.cpp | 77 +- lib/CodeGen/CGException.cpp | 17 +- lib/CodeGen/CGExpr.cpp | 568 +- lib/CodeGen/CGExprAgg.cpp | 32 +- lib/CodeGen/CGExprCXX.cpp | 71 +- lib/CodeGen/CGExprComplex.cpp | 5 +- lib/CodeGen/CGExprConstant.cpp | 41 +- lib/CodeGen/CGExprScalar.cpp | 482 +- lib/CodeGen/CGObjC.cpp | 86 +- lib/CodeGen/CGObjCGNU.cpp | 156 +- lib/CodeGen/CGObjCMac.cpp | 798 +- lib/CodeGen/CGObjCRuntime.cpp | 7 + lib/CodeGen/CGObjCRuntime.h | 8 + lib/CodeGen/CGRTTI.cpp | 17 +- lib/CodeGen/CGRecordLayout.h | 4 +- lib/CodeGen/CGRecordLayoutBuilder.cpp | 26 +- lib/CodeGen/CGStmt.cpp | 128 +- lib/CodeGen/CGVTables.cpp | 60 +- lib/CodeGen/CodeGenAction.cpp | 6 +- lib/CodeGen/CodeGenFunction.cpp | 66 +- lib/CodeGen/CodeGenFunction.h | 133 +- lib/CodeGen/CodeGenModule.cpp | 227 +- lib/CodeGen/CodeGenModule.h | 36 +- lib/CodeGen/CodeGenTBAA.cpp | 57 + lib/CodeGen/CodeGenTBAA.h | 15 + lib/CodeGen/CodeGenTypes.cpp | 4 +- lib/CodeGen/CodeGenTypes.h | 6 +- lib/CodeGen/ItaniumCXXABI.cpp | 58 +- lib/CodeGen/MicrosoftCXXABI.cpp | 65 +- lib/CodeGen/ModuleBuilder.cpp | 6 +- lib/CodeGen/TargetInfo.cpp | 752 +- lib/Driver/Arg.cpp | 34 +- lib/Driver/ArgList.cpp | 41 +- lib/Driver/CC1AsOptions.cpp | 14 +- lib/Driver/Compilation.cpp | 100 + lib/Driver/Driver.cpp | 248 +- lib/Driver/DriverOptions.cpp | 14 +- lib/Driver/OptTable.cpp | 158 +- lib/Driver/Option.cpp | 333 +- lib/Driver/SanitizerArgs.h | 106 + lib/Driver/ToolChain.cpp | 61 +- lib/Driver/ToolChains.cpp | 481 +- lib/Driver/ToolChains.h | 43 +- lib/Driver/Tools.cpp | 980 +- lib/Driver/Tools.h | 5 +- lib/Driver/Types.cpp | 14 - lib/Driver/WindowsToolChain.cpp | 14 +- lib/Edit/RewriteObjCFoundationAPI.cpp | 1 + lib/Frontend/ASTConsumers.cpp | 5 +- lib/Frontend/ASTMerge.cpp | 5 +- lib/Frontend/ASTUnit.cpp | 306 +- lib/Frontend/ChainedDiagnosticConsumer.cpp | 2 +- lib/Frontend/ChainedIncludesSource.cpp | 22 +- lib/Frontend/CompilerInstance.cpp | 139 +- lib/Frontend/CompilerInvocation.cpp | 1261 +- lib/Frontend/CreateInvocationFromCommandLine.cpp | 11 +- lib/Frontend/DependencyFile.cpp | 10 +- lib/Frontend/DependencyGraph.cpp | 10 +- lib/Frontend/DiagnosticRenderer.cpp | 94 +- lib/Frontend/FrontendAction.cpp | 76 +- lib/Frontend/FrontendActions.cpp | 133 +- lib/Frontend/InitHeaderSearch.cpp | 6 +- lib/Frontend/InitPreprocessor.cpp | 86 +- lib/Frontend/LogDiagnosticPrinter.cpp | 7 +- lib/Frontend/PrintPreprocessedOutput.cpp | 8 +- lib/Frontend/SerializedDiagnosticPrinter.cpp | 232 +- lib/Frontend/TextDiagnostic.cpp | 203 +- lib/Frontend/TextDiagnosticPrinter.cpp | 10 +- lib/Frontend/VerifyDiagnosticConsumer.cpp | 213 +- lib/Frontend/Warnings.cpp | 5 +- lib/FrontendTool/CMakeLists.txt | 3 +- lib/FrontendTool/ExecuteCompilerInvocation.cpp | 9 +- lib/Headers/CMakeLists.txt | 4 + lib/Headers/__wmmintrin_aes.h | 67 + lib/Headers/__wmmintrin_pclmul.h | 34 + lib/Headers/altivec.h | 26 +- lib/Headers/bmi2intrin.h | 19 + lib/Headers/cpuid.h | 2 +- lib/Headers/f16cintrin.h | 58 + lib/Headers/immintrin.h | 4 + lib/Headers/module.map | 37 + lib/Headers/rtmintrin.h | 49 + lib/Headers/unwind.h | 2 +- lib/Headers/wmmintrin.h | 41 +- lib/Headers/x86intrin.h | 4 + lib/Headers/xmmintrin.h | 9 +- lib/Lex/HeaderMap.cpp | 2 +- lib/Lex/HeaderSearch.cpp | 47 +- lib/Lex/Lexer.cpp | 75 +- lib/Lex/LiteralSupport.cpp | 263 +- lib/Lex/MacroArgs.cpp | 2 +- lib/Lex/MacroInfo.cpp | 90 +- lib/Lex/ModuleMap.cpp | 524 +- lib/Lex/PPDirectives.cpp | 56 +- lib/Lex/PPExpressions.cpp | 20 +- lib/Lex/PPLexerChange.cpp | 6 +- lib/Lex/PPMacroExpansion.cpp | 333 +- lib/Lex/PTHLexer.cpp | 11 +- lib/Lex/Pragma.cpp | 22 +- lib/Lex/PreprocessingRecord.cpp | 56 +- lib/Lex/Preprocessor.cpp | 66 +- lib/Lex/TokenLexer.cpp | 88 +- lib/Parse/ParseAST.cpp | 1 - lib/Parse/ParseCXXInlineMethods.cpp | 6 +- lib/Parse/ParseDecl.cpp | 224 +- lib/Parse/ParseDeclCXX.cpp | 190 +- lib/Parse/ParseExpr.cpp | 179 +- lib/Parse/ParseExprCXX.cpp | 203 +- lib/Parse/ParseInit.cpp | 11 +- lib/Parse/ParseObjc.cpp | 67 +- lib/Parse/ParsePragma.cpp | 248 +- lib/Parse/ParsePragma.h | 37 +- lib/Parse/ParseStmt.cpp | 297 +- lib/Parse/ParseTemplate.cpp | 10 +- lib/Parse/ParseTentative.cpp | 131 +- lib/Parse/Parser.cpp | 272 +- lib/Parse/RAIIObjectsForParser.h | 10 +- lib/Rewrite/CMakeLists.txt | 35 +- lib/Rewrite/Core/CMakeLists.txt | 24 + lib/Rewrite/Core/DeltaTree.cpp | 464 + lib/Rewrite/Core/HTMLRewrite.cpp | 584 + lib/Rewrite/Core/Makefile | 18 + lib/Rewrite/Core/RewriteRope.cpp | 807 + lib/Rewrite/Core/Rewriter.cpp | 486 + lib/Rewrite/Core/TokenRewriter.cpp | 99 + lib/Rewrite/DeltaTree.cpp | 467 - lib/Rewrite/FixItRewriter.cpp | 205 - lib/Rewrite/Frontend/CMakeLists.txt | 28 + lib/Rewrite/Frontend/FixItRewriter.cpp | 205 + lib/Rewrite/Frontend/FrontendActions.cpp | 192 + lib/Rewrite/Frontend/HTMLPrint.cpp | 94 + lib/Rewrite/Frontend/InclusionRewriter.cpp | 363 + lib/Rewrite/Frontend/Makefile | 18 + lib/Rewrite/Frontend/RewriteMacros.cpp | 217 + lib/Rewrite/Frontend/RewriteModernObjC.cpp | 7607 ++++++ lib/Rewrite/Frontend/RewriteObjC.cpp | 6029 +++++ lib/Rewrite/Frontend/RewriteTest.cpp | 39 + lib/Rewrite/FrontendActions.cpp | 192 - lib/Rewrite/HTMLPrint.cpp | 94 - lib/Rewrite/HTMLRewrite.cpp | 583 - lib/Rewrite/InclusionRewriter.cpp | 361 - lib/Rewrite/Makefile | 10 +- lib/Rewrite/RewriteMacros.cpp | 217 - lib/Rewrite/RewriteModernObjC.cpp | 7543 ------ lib/Rewrite/RewriteObjC.cpp | 6035 ----- lib/Rewrite/RewriteRope.cpp | 811 - lib/Rewrite/RewriteTest.cpp | 39 - lib/Rewrite/Rewriter.cpp | 486 - lib/Rewrite/TokenRewriter.cpp | 99 - lib/Sema/AnalysisBasedWarnings.cpp | 290 +- lib/Sema/CMakeLists.txt | 3 + lib/Sema/CodeCompleteConsumer.cpp | 7 +- lib/Sema/DeclSpec.cpp | 38 +- lib/Sema/DelayedDiagnostic.cpp | 2 + lib/Sema/IdentifierResolver.cpp | 10 +- lib/Sema/JumpDiagnostics.cpp | 138 +- lib/Sema/MultiplexExternalSemaSource.cpp | 271 + lib/Sema/ScopeInfo.cpp | 189 + lib/Sema/Sema.cpp | 64 +- lib/Sema/SemaAccess.cpp | 17 +- lib/Sema/SemaAttr.cpp | 23 +- lib/Sema/SemaCXXScopeSpec.cpp | 3 +- lib/Sema/SemaCast.cpp | 68 +- lib/Sema/SemaChecking.cpp | 706 +- lib/Sema/SemaCodeComplete.cpp | 86 +- lib/Sema/SemaDecl.cpp | 840 +- lib/Sema/SemaDeclAttr.cpp | 135 +- lib/Sema/SemaDeclCXX.cpp | 687 +- lib/Sema/SemaDeclObjC.cpp | 145 +- lib/Sema/SemaExceptionSpec.cpp | 64 +- lib/Sema/SemaExpr.cpp | 475 +- lib/Sema/SemaExprCXX.cpp | 163 +- lib/Sema/SemaExprMember.cpp | 34 +- lib/Sema/SemaExprObjC.cpp | 124 +- lib/Sema/SemaInit.cpp | 120 +- lib/Sema/SemaLambda.cpp | 31 +- lib/Sema/SemaLookup.cpp | 43 +- lib/Sema/SemaObjCProperty.cpp | 227 +- lib/Sema/SemaOverload.cpp | 430 +- lib/Sema/SemaPseudoObject.cpp | 87 +- lib/Sema/SemaStmt.cpp | 930 +- lib/Sema/SemaStmtAsm.cpp | 661 + lib/Sema/SemaStmtAttr.cpp | 9 +- lib/Sema/SemaTemplate.cpp | 370 +- lib/Sema/SemaTemplateDeduction.cpp | 160 +- lib/Sema/SemaTemplateInstantiate.cpp | 126 +- lib/Sema/SemaTemplateInstantiateDecl.cpp | 172 +- lib/Sema/SemaTemplateVariadic.cpp | 1 + lib/Sema/SemaType.cpp | 176 +- lib/Sema/TreeTransform.h | 360 +- lib/Serialization/ASTCommon.cpp | 3 + lib/Serialization/ASTReader.cpp | 2337 +- lib/Serialization/ASTReaderDecl.cpp | 151 +- lib/Serialization/ASTReaderStmt.cpp | 35 +- lib/Serialization/ASTWriter.cpp | 781 +- lib/Serialization/ASTWriterDecl.cpp | 58 +- lib/Serialization/ASTWriterStmt.cpp | 28 +- lib/Serialization/GeneratePCH.cpp | 13 +- lib/Serialization/Module.cpp | 18 +- lib/Serialization/ModuleManager.cpp | 42 +- .../Checkers/AdjustedReturnValueChecker.cpp | 92 - lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp | 2 +- .../Checkers/ArrayBoundCheckerV2.cpp | 2 +- lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp | 4 +- .../Checkers/BasicObjCFoundationChecks.cpp | 109 +- .../Checkers/BoolAssignmentChecker.cpp | 2 +- .../Checkers/BuiltinFunctionChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/CMakeLists.txt | 6 +- lib/StaticAnalyzer/Checkers/CStringChecker.cpp | 77 +- .../Checkers/CStringSyntaxChecker.cpp | 9 +- .../Checkers/CallAndMessageChecker.cpp | 22 +- lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp | 2 +- .../Checkers/CastToStructChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp | 2 +- .../Checkers/CheckSecuritySyntaxOnly.cpp | 1 + .../Checkers/CheckerDocumentation.cpp | 59 +- lib/StaticAnalyzer/Checkers/Checkers.td | 83 +- lib/StaticAnalyzer/Checkers/ChrootChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp | 72 +- lib/StaticAnalyzer/Checkers/DebugCheckers.cpp | 35 + lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp | 60 +- .../Checkers/DirectIvarAssignment.cpp | 180 + lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp | 10 +- .../Checkers/DynamicTypePropagation.cpp | 9 +- .../Checkers/ExprInspectionChecker.cpp | 4 +- .../Checkers/FixedAddressChecker.cpp | 2 +- .../Checkers/GenericTaintChecker.cpp | 12 +- .../Checkers/IdempotentOperationChecker.cpp | 2 +- .../Checkers/IvarInvalidationChecker.cpp | 550 + .../Checkers/MacOSKeychainAPIChecker.cpp | 41 +- lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp | 16 +- lib/StaticAnalyzer/Checkers/MallocChecker.cpp | 259 +- .../Checkers/MallocSizeofChecker.cpp | 87 +- .../Checkers/NSAutoreleasePoolChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp | 20 +- lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp | 218 - lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp | 9 +- .../Checkers/ObjCContainersASTChecker.cpp | 21 +- .../Checkers/ObjCContainersChecker.cpp | 14 +- .../Checkers/ObjCMissingSuperCallChecker.cpp | 203 + .../Checkers/ObjCSelfInitChecker.cpp | 94 +- .../Checkers/PointerArithChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp | 16 +- lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp | 356 +- .../Checkers/ReturnPointerRangeChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp | 19 +- .../Checkers/SimpleStreamChecker.cpp | 348 + .../Checkers/StackAddrEscapeChecker.cpp | 23 +- lib/StaticAnalyzer/Checkers/StreamChecker.cpp | 46 +- lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp | 5 +- .../Checkers/UndefCapturedBlockVarChecker.cpp | 2 +- lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp | 7 +- .../Checkers/UndefinedArraySubscriptChecker.cpp | 4 +- .../Checkers/UndefinedAssignmentChecker.cpp | 5 +- lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp | 18 +- lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp | 4 +- lib/StaticAnalyzer/Core/AnalysisManager.cpp | 36 +- lib/StaticAnalyzer/Core/AnalyzerOptions.cpp | 138 + lib/StaticAnalyzer/Core/BasicConstraintManager.cpp | 446 - lib/StaticAnalyzer/Core/BasicValueFactory.cpp | 6 +- lib/StaticAnalyzer/Core/BugReporter.cpp | 517 +- lib/StaticAnalyzer/Core/BugReporterVisitors.cpp | 602 +- lib/StaticAnalyzer/Core/CMakeLists.txt | 7 +- lib/StaticAnalyzer/Core/CallEvent.cpp | 166 +- lib/StaticAnalyzer/Core/CheckerContext.cpp | 27 +- lib/StaticAnalyzer/Core/CheckerManager.cpp | 54 +- lib/StaticAnalyzer/Core/ConstraintManager.cpp | 39 + lib/StaticAnalyzer/Core/CoreEngine.cpp | 15 +- lib/StaticAnalyzer/Core/Environment.cpp | 204 +- lib/StaticAnalyzer/Core/ExplodedGraph.cpp | 155 +- lib/StaticAnalyzer/Core/ExprEngine.cpp | 414 +- lib/StaticAnalyzer/Core/ExprEngineC.cpp | 133 +- lib/StaticAnalyzer/Core/ExprEngineCXX.cpp | 79 +- .../Core/ExprEngineCallAndReturn.cpp | 351 +- lib/StaticAnalyzer/Core/ExprEngineObjC.cpp | 82 +- lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp | 15 +- lib/StaticAnalyzer/Core/MemRegion.cpp | 34 +- lib/StaticAnalyzer/Core/PathDiagnostic.cpp | 306 +- lib/StaticAnalyzer/Core/PlistDiagnostics.cpp | 47 +- lib/StaticAnalyzer/Core/ProgramState.cpp | 68 +- lib/StaticAnalyzer/Core/RangeConstraintManager.cpp | 66 +- lib/StaticAnalyzer/Core/RegionStore.cpp | 308 +- lib/StaticAnalyzer/Core/SValBuilder.cpp | 37 +- lib/StaticAnalyzer/Core/SVals.cpp | 3 +- .../Core/SimpleConstraintManager.cpp | 18 +- lib/StaticAnalyzer/Core/SimpleConstraintManager.h | 4 +- lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp | 25 +- lib/StaticAnalyzer/Core/Store.cpp | 86 + lib/StaticAnalyzer/Core/SymbolManager.cpp | 109 +- lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp | 1 - lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp | 154 +- lib/StaticAnalyzer/Frontend/AnalysisConsumer.h | 4 +- lib/StaticAnalyzer/Frontend/CMakeLists.txt | 3 +- .../Frontend/CheckerRegistration.cpp | 2 +- lib/Tooling/CMakeLists.txt | 7 +- lib/Tooling/CommandLineClangTool.cpp | 80 - lib/Tooling/CommonOptionsParser.cpp | 79 + lib/Tooling/CompilationDatabase.cpp | 297 +- lib/Tooling/CustomCompilationDatabase.h | 42 - lib/Tooling/FileMatchTrie.cpp | 188 + lib/Tooling/JSONCompilationDatabase.cpp | 303 + lib/Tooling/Refactoring.cpp | 11 +- lib/Tooling/Tooling.cpp | 30 +- runtime/compiler-rt/Makefile | 61 +- runtime/compiler-rt/clang_linux_test_input.c | 4 + test/ARCMT/cxx-checking.mm | 27 +- test/ARCMT/verify.m | 5 +- test/ASTMerge/exprs.c | 1 + test/Analysis/CFContainers-invalid.c | 20 + test/Analysis/CFContainers.mm | 16 +- test/Analysis/CFDateGC.m | 1 - test/Analysis/CFNumber.c | 1 - test/Analysis/CFRetainRelease_NSAssertionHandler.m | 4 +- test/Analysis/CGColorSpace.c | 1 - test/Analysis/CheckNSError.m | 1 - test/Analysis/Inputs/system-header-simulator-cxx.h | 57 + .../system-header-simulator-for-simple-stream.h | 11 + .../Analysis/Inputs/system-header-simulator-objc.h | 130 + test/Analysis/Inputs/system-header-simulator.h | 64 + test/Analysis/MissingDealloc.m | 3 +- test/Analysis/NSPanel.m | 4 +- test/Analysis/NSString.m | 6 +- test/Analysis/NSWindow.m | 3 +- test/Analysis/NoReturn.m | 4 +- test/Analysis/OSAtomic_mac.cpp | 1 + test/Analysis/ObjCProperties.m | 4 +- test/Analysis/ObjCRetSigs.m | 2 +- test/Analysis/PR2599.m | 2 +- test/Analysis/PR2978.m | 2 +- test/Analysis/PR3991.m | 3 +- test/Analysis/PR9741.cpp | 1 + test/Analysis/additive-folding.cpp | 9 +- test/Analysis/analyzer-config.c | 13 + test/Analysis/analyzer-config.cpp | 22 + test/Analysis/array-struct-region.c | 110 +- test/Analysis/array-struct-region.cpp | 176 + test/Analysis/array-struct.c | 11 +- test/Analysis/auto-obj-dtors-cfg-output.cpp | 2 +- test/Analysis/base-init.cpp | 3 +- test/Analysis/bitwise-ops.c | 14 + test/Analysis/blocks.m | 14 +- test/Analysis/bool-assignment.cpp | 2 +- test/Analysis/bool-assignment2.c | 2 +- test/Analysis/bstring.c | 8 +- test/Analysis/casts.c | 5 +- test/Analysis/casts.m | 3 +- test/Analysis/cfref_PR2519.c | 4 +- test/Analysis/cfref_rdar6080742.c | 4 +- test/Analysis/chroot.c | 2 +- test/Analysis/comparison-implicit-casts.cpp | 2 - test/Analysis/complex-init-list.cpp | 7 + test/Analysis/complex.c | 1 - test/Analysis/concrete-address.c | 3 +- test/Analysis/conditional-operator-path-notes.c | 1083 + test/Analysis/coverage.c | 2 +- test/Analysis/cstring-syntax-cxx.cpp | 1 + test/Analysis/ctor-inlining.mm | 32 +- test/Analysis/cxx-crashes.cpp | 1 + test/Analysis/cxx-method-names.cpp | 3 +- test/Analysis/cxx11-crashes.cpp | 1 + test/Analysis/dead-stores.c | 5 +- test/Analysis/dead-stores.cpp | 41 +- test/Analysis/dead-stores.m | 3 +- test/Analysis/delegates.m | 1 + test/Analysis/derived-to-base.cpp | 2 +- .../diagnostics/deref-track-symbolic-region.c | 354 + .../diagnostics/deref-track-symbolic-region.cpp | 16 + test/Analysis/diagnostics/undef-value-caller.c | 379 +- test/Analysis/diagnostics/undef-value-param.c | 1183 + test/Analysis/diagnostics/undef-value-param.m | 476 + test/Analysis/domtest.c | 2 +- test/Analysis/dtor.cpp | 78 +- test/Analysis/dtors-in-dtor-cfg-output.cpp | 2 +- test/Analysis/elementtype.c | 2 +- test/Analysis/engine/replay-without-inlining.c | 1 + test/Analysis/exceptions.mm | 38 + test/Analysis/exercise-ps.c | 2 +- test/Analysis/fields.c | 12 +- test/Analysis/free.c | 2 +- test/Analysis/func.c | 2 +- test/Analysis/global-region-invalidation.c | 4 +- .../Analysis/idempotent-operations-limited-loops.c | 6 +- test/Analysis/idempotent-operations.c | 10 +- test/Analysis/idempotent-operations.cpp | 2 +- test/Analysis/idempotent-operations.m | 3 +- test/Analysis/initializer.cpp | 22 +- test/Analysis/inline-not-supported.c | 4 +- test/Analysis/inline-plist.c | 2988 ++- test/Analysis/inline-unique-reports.c | 2 +- test/Analysis/inline.c | 2 +- test/Analysis/inline.cpp | 176 +- test/Analysis/inline2.c | 3 +- test/Analysis/inline3.c | 3 +- test/Analysis/inline4.c | 3 +- test/Analysis/inlining/DynDispatchBifurcate.m | 12 +- test/Analysis/inlining/InlineObjCInstanceMethod.m | 66 +- test/Analysis/inlining/RetainCountExamples.m | 96 +- .../assume-super-init-does-not-return-nil.m | 41 + test/Analysis/inlining/dyn-dispatch-bifurcate.cpp | 16 + .../inlining/eager-reclamation-path-notes.c | 788 + .../Analysis/inlining/false-positive-suppression.c | 184 + test/Analysis/inlining/path-notes.c | 4413 +++- test/Analysis/inlining/path-notes.m | 469 + test/Analysis/inlining/retain-count-self-init.m | 68 + test/Analysis/inlining/stl.cpp | 29 + .../inlining/test-always-inline-size-option.c | 48 + test/Analysis/inlining/test_objc_inlining_option.m | 34 + test/Analysis/ivars.m | 8 + test/Analysis/keychainAPI.m | 2 +- test/Analysis/logical-ops.c | 27 + test/Analysis/lvalue.cpp | 1 + test/Analysis/malloc-annotations.c | 2 +- test/Analysis/malloc-interprocedural.c | 41 +- test/Analysis/malloc-overflow.c | 2 +- test/Analysis/malloc-overflow.cpp | 3 +- test/Analysis/malloc-plist.c | 8604 +++---- test/Analysis/malloc-sizeof.c | 16 + test/Analysis/malloc.c | 35 +- test/Analysis/malloc.cpp | 27 +- test/Analysis/malloc.m | 2 +- test/Analysis/malloc.mm | 60 +- test/Analysis/member-expr.cpp | 23 + test/Analysis/method-call-intra-p.cpp | 1 + test/Analysis/method-call-path-notes.cpp | 1470 +- test/Analysis/method-call.cpp | 30 +- test/Analysis/misc-ps-64.m | 4 +- test/Analysis/misc-ps-arm.m | 1 + test/Analysis/misc-ps-eager-assume.m | 3 +- test/Analysis/misc-ps-ranges.m | 2 +- test/Analysis/misc-ps-region-store-i386.m | 3 +- test/Analysis/misc-ps-region-store-x86_64.m | 3 +- test/Analysis/misc-ps-region-store.cpp | 10 +- test/Analysis/misc-ps-region-store.m | 7 +- test/Analysis/misc-ps-region-store.mm | 5 +- test/Analysis/misc-ps.c | 18 + test/Analysis/misc-ps.m | 10 +- test/Analysis/new-with-exceptions.cpp | 71 + test/Analysis/new.cpp | 12 + ...iver-undefined-larger-than-voidptr-ret-region.m | 2 +- ...il-receiver-undefined-larger-than-voidptr-ret.m | 9 +- test/Analysis/no-exit-cfg.c | 3 +- test/Analysis/no-outofbounds.c | 2 +- test/Analysis/null-deref-path-notes.m | 488 + test/Analysis/null-deref-ps-region.c | 3 +- test/Analysis/null-deref-ps.c | 4 +- test/Analysis/objc-bool.m | 1 + test/Analysis/objc-for.m | 2 +- test/Analysis/objc-method-coverage.m | 2 +- test/Analysis/objc-properties.m | 72 + test/Analysis/objc_invalidation.m | 153 + test/Analysis/operator-calls.cpp | 21 +- test/Analysis/out-of-bounds.c | 2 +- test/Analysis/outofbound-notwork.c | 2 +- test/Analysis/outofbound.c | 2 +- test/Analysis/override-werror.c | 2 +- test/Analysis/plist-html-macros.c | 31 + test/Analysis/plist-output-alternate.m | 2137 +- test/Analysis/plist-output.m | 3659 +-- test/Analysis/pointer-to-member.cpp | 44 + test/Analysis/pr4209.m | 2 +- test/Analysis/pr_2542_rdar_6793404.m | 2 +- test/Analysis/pr_4164.c | 3 +- test/Analysis/pthreadlock.c | 2 +- test/Analysis/ptr-arith.c | 4 +- test/Analysis/rdar-6442306-1.m | 3 +- test/Analysis/rdar-6540084.m | 2 +- test/Analysis/rdar-6541136-region.c | 2 +- test/Analysis/rdar-6562655.m | 3 +- ...dar-6600344-nil-receiver-undefined-struct-ret.m | 3 +- test/Analysis/rdar-7168531.m | 2 +- test/Analysis/redefined_system.c | 3 +- test/Analysis/refcnt_naming.m | 2 +- test/Analysis/reference.cpp | 28 +- test/Analysis/region-1.m | 3 +- test/Analysis/region-store.c | 1 + test/Analysis/retain-release-gc-only.m | 53 +- test/Analysis/retain-release-inline.m | 18 +- test/Analysis/retain-release-path-notes-gc.m | 2662 ++- test/Analysis/retain-release-path-notes.m | 8833 ++++--- test/Analysis/retain-release.m | 22839 ++++++++++++++++++- test/Analysis/retain-release.mm | 19 + test/Analysis/security-syntax-checks-no-emit.c | 1 + test/Analysis/simple-stream-checks.c | 78 + test/Analysis/sizeofpointer.c | 2 +- test/Analysis/stack-addr-ps.cpp | 2 +- test/Analysis/static_local.m | 19 + test/Analysis/stream.c | 2 +- test/Analysis/string.c | 8 +- test/Analysis/svalbuilder-logic.c | 1 + test/Analysis/system-header-simulator-objc.h | 130 - test/Analysis/system-header-simulator.h | 62 - test/Analysis/taint-generic.c | 2 +- test/Analysis/taint-tester.c | 2 +- test/Analysis/taint-tester.cpp | 3 +- test/Analysis/taint-tester.m | 3 +- test/Analysis/temp-obj-dtors-cfg-output.cpp | 2 +- test/Analysis/templates.cpp | 8 +- test/Analysis/temporaries.cpp | 30 + .../test-objc-non-nil-return-value-checker.m | 50 + test/Analysis/traversal-path-unification.c | 28 + test/Analysis/undef-buffers.c | 2 +- test/Analysis/uninit-vals-ps-region.m | 2 +- test/Analysis/uninit-vals-ps.c | 12 + test/Analysis/uninit-vals.m | 1 + test/Analysis/unions-region.m | 1 + test/Analysis/unions.cpp | 51 + test/Analysis/unix-fns.c | 1857 +- test/Analysis/unreachable-code-path.c | 2 +- test/Analysis/viewcontroller.m | 135 + test/Analysis/virtualcall.cpp | 8 +- test/Analysis/virtualcall.h | 28 + .../basic.lookup.argdep/p2-template-id.cpp | 1 + .../basic/basic.lookup/basic.lookup.argdep/p2.cpp | 24 + .../basic.lookup/basic.lookup.classref/p3.cpp | 1 + .../basic.lookup.classref/p4-cxx11.cpp | 1 + .../basic.lookup.qual/namespace.qual/p3.cpp | 1 + .../basic.lookup.qual/namespace.qual/p4.cpp | 1 + .../basic/basic.lookup/basic.lookup.udir/p1.cpp | 1 + .../basic/basic.lookup/basic.lookup.unqual/p12.cpp | 1 + .../basic/basic.lookup/basic.lookup.unqual/p13.cpp | 1 + .../basic/basic.lookup/basic.lookup.unqual/p14.cpp | 1 + .../basic/basic.lookup/basic.lookup.unqual/p3.cpp | 1 + .../CXX/basic/basic.scope/basic.scope.local/p2.cpp | 37 + .../CXX/basic/basic.scope/basic.scope.pdecl/p9.cpp | 1 + .../CXX/basic/basic.start/basic.start.main/p2a.cpp | 1 + .../CXX/basic/basic.start/basic.start.main/p2b.cpp | 1 + .../CXX/basic/basic.start/basic.start.main/p2c.cpp | 1 + .../CXX/basic/basic.start/basic.start.main/p2g.cpp | 1 + .../basic/basic.stc/basic.stc.dynamic/p2-nodef.cpp | 1 + .../basic.stc.dynamic/p2-noexceptions.cpp | 1 + test/CXX/class.access/class.friend/p1.cpp | 24 +- test/CXX/class.access/class.friend/p3-cxx0x.cpp | 26 + test/CXX/class.access/class.protected/p1-cxx11.cpp | 1 + test/CXX/class.access/class.protected/p1.cpp | 2 +- test/CXX/class.derived/class.abstract/p16.cpp | 16 + test/CXX/class.derived/p2.cpp | 1 + test/CXX/class/class.friend/p1-ambiguous.cpp | 1 + test/CXX/class/class.friend/p1-cxx11.cpp | 1 + test/CXX/class/class.nest/p3.cpp | 1 + test/CXX/class/p1-0x.cpp | 1 + test/CXX/class/p2-0x.cpp | 8 + test/CXX/class/p6-0x.cpp | 1 + test/CXX/conv/conv.prom/p2.cpp | 5 +- test/CXX/conv/conv.prom/p4.cpp | 19 + test/CXX/conv/conv.ptr/p2.cpp | 1 + test/CXX/conv/conv.qual/pr6089.cpp | 1 + .../dcl.dcl/basic.namespace/namespace.def/p2.cpp | 1 + .../dcl.dcl/basic.namespace/namespace.def/p7.cpp | 4 +- .../basic.namespace/namespace.udecl/p10.cpp | 1 + .../basic.namespace/namespace.udecl/p13.cpp | 1 + .../dcl.dcl/basic.namespace/namespace.udir/p6.cpp | 1 + test/CXX/dcl.dcl/dcl.attr/dcl.attr.grammar/p6.cpp | 3 +- test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p5.cpp | 6 +- .../dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p3.cpp | 3 + test/CXX/dcl.dcl/dcl.spec/dcl.type/p3-0x.cpp | 2 +- test/CXX/dcl.decl/dcl.init/dcl.init.aggr/p4.cpp | 6 + test/CXX/dcl.decl/dcl.init/dcl.init.ref/basic.cpp | 1 + test/CXX/dcl.decl/dcl.init/dcl.init.ref/p1.cpp | 1 + test/CXX/dcl.decl/dcl.init/dcl.init.string/p1.cpp | 1 + test/CXX/dcl.decl/dcl.init/p7.cpp | 14 + .../dcl.decl/dcl.meaning/dcl.fct.default/p2.cpp | 1 + .../dcl.decl/dcl.meaning/dcl.fct.default/p3.cpp | 6 +- test/CXX/dcl.decl/dcl.meaning/dcl.fct/p14.cpp | 1 + test/CXX/dcl.decl/dcl.meaning/dcl.ref/p6-0x.cpp | 1 + test/CXX/dcl.decl/dcl.meaning/p1.cpp | 14 +- test/CXX/dcl.decl/dcl.name/p1.cpp | 1 + test/CXX/dcl.decl/p4-0x.cpp | 1 + test/CXX/except/except.spec/canonical.cpp | 1 + test/CXX/except/except.spec/p11.cpp | 1 + test/CXX/except/except.spec/p14.cpp | 38 + test/CXX/except/except.spec/p15.cpp | 14 +- test/CXX/except/except.spec/p4.cpp | 36 + test/CXX/expr/expr.cast/p4-0x.cpp | 1 + test/CXX/expr/expr.const/p3-0x-nowarn.cpp | 1 + test/CXX/expr/expr.const/p5-0x.cpp | 8 +- test/CXX/expr/expr.post/expr.const.cast/p1-0x.cpp | 1 + test/CXX/expr/expr.post/expr.ref/p3.cpp | 1 + test/CXX/expr/expr.post/expr.static.cast/p3-0x.cpp | 1 + test/CXX/expr/expr.post/expr.static.cast/p9-0x.cpp | 1 + test/CXX/expr/expr.post/expr.type.conv/p1-0x.cpp | 1 + .../CXX/expr/expr.prim/expr.prim.general/p3-0x.cpp | 4 +- test/CXX/expr/expr.prim/expr.prim.lambda/p14.cpp | 15 + test/CXX/expr/expr.prim/expr.prim.lambda/p15.cpp | 1 + test/CXX/expr/expr.prim/expr.prim.lambda/p18.cpp | 1 + test/CXX/expr/expr.prim/expr.prim.lambda/p20.cpp | 1 + test/CXX/expr/expr.prim/expr.prim.lambda/p21.cpp | 1 + .../expr/expr.unary/expr.unary.noexcept/sema.cpp | 1 + test/CXX/expr/expr.unary/expr.unary.op/p3.cpp | 1 + test/CXX/expr/p8.cpp | 1 + test/CXX/expr/p9.cpp | 1 + test/CXX/lex/lex.literal/lex.ccon/p1.cpp | 1 + test/CXX/lex/lex.trigraph/p3.cpp | 1 + test/CXX/over/over.built/p1.cpp | 16 - test/CXX/over/over.built/p23.cpp | 1 + test/CXX/over/over.built/p25.cpp | 1 + .../over.match.best/over.ics.rank/p3-0x.cpp | 1 + test/CXX/over/over.match/over.match.best/p1.cpp | 1 + .../over.match.funcs/over.match.oper/p3.cpp | 29 + .../CXX/over/over.match/over.match.funcs/p4-0x.cpp | 1 + test/CXX/over/over.oper/over.literal/p7.cpp | 1 + test/CXX/over/over.oper/over.literal/p8.cpp | 5 +- test/CXX/special/class.conv/class.conv.ctor/p1.cpp | 1 + test/CXX/special/class.copy/p15-0x.cpp | 1 + test/CXX/special/class.copy/p8-cxx11.cpp | 1 + test/CXX/special/class.ctor/p1.cpp | 1 + test/CXX/special/class.dtor/p2.cpp | 1 + test/CXX/special/class.dtor/p3-0x.cpp | 2 +- test/CXX/special/class.dtor/p3.cpp | 17 + test/CXX/stmt.stmt/stmt.iter/stmt.ranged/p1.cpp | 104 +- .../stmt.stmt/stmt.select/stmt.switch/p2-0x.cpp | 1 + test/CXX/temp/p3.cpp | 2 +- test/CXX/temp/temp.arg/temp.arg.type/p2-cxx0x.cpp | 1 + test/CXX/temp/temp.decls/temp.alias/p1.cpp | 1 + test/CXX/temp/temp.decls/temp.class.spec/p9.cpp | 1 + .../temp.class.spec/temp.class.order/p2.cpp | 1 + .../temp.class.spec/temp.class.spec.mfunc/p1.cpp | 1 + .../temp.class/temp.mem.func/p1-retmem.cpp | 1 + .../temp.decls/temp.class/temp.mem.func/pr5056.cpp | 1 + .../temp.decls/temp.fct/temp.func.order/p3-0x.cpp | 28 + .../temp.decls/temp.fct/temp.func.order/p3.cpp | 13 + .../temp.decls/temp.fct/temp.func.order/p5.cpp | 1 + .../temp/temp.decls/temp.fct/temp.over.link/p4.cpp | 1 + test/CXX/temp/temp.decls/temp.friend/p5.cpp | 1 + test/CXX/temp/temp.decls/temp.mem/p1.cpp | 1 + .../temp/temp.decls/temp.variadic/deduction.cpp | 1 + .../temp/temp.decls/temp.variadic/example-bind.cpp | 1 + .../temp.decls/temp.variadic/example-function.cpp | 1 + .../temp.decls/temp.variadic/example-tuple.cpp | 1 + .../temp.variadic/injected-class-name.cpp | 1 + .../temp.variadic/multi-level-substitution.cpp | 24 + .../temp.decls/temp.variadic/partial-ordering.cpp | 1 + .../temp/temp.fct.spec/temp.arg.explicit/p3-0x.cpp | 1 + .../temp/temp.fct.spec/temp.arg.explicit/p9-0x.cpp | 1 + .../CXX/temp/temp.fct.spec/temp.deduct/cwg1170.cpp | 1 + .../temp/temp.fct.spec/temp.deduct/sfinae-1.cpp | 1 + .../temp.deduct/temp.deduct.call/p2.cpp | 1 + .../temp.deduct/temp.deduct.call/p4.cpp | 1 + .../temp.deduct/temp.deduct.conv/p2.cpp | 1 + .../temp.deduct/temp.deduct.conv/p3.cpp | 1 + .../temp.deduct/temp.deduct.partial/p12.cpp | 1 + .../temp.deduct/temp.deduct.partial/p9-0x.cpp | 1 + .../temp.deduct/temp.deduct.type/p10-0x.cpp | 1 + .../temp.deduct/temp.deduct.type/p2-0x.cpp | 1 + .../temp.deduct/temp.deduct.type/p21.cpp | 1 + .../temp.deduct/temp.deduct.type/p22.cpp | 1 + .../temp.deduct/temp.deduct.type/p5-0x.cpp | 1 + .../temp.deduct/temp.deduct.type/p8-0x.cpp | 1 + test/CXX/temp/temp.names/p2.cpp | 1 + test/CXX/temp/temp.names/p4.cpp | 1 + test/CXX/temp/temp.param/p10-0x.cpp | 1 + test/CXX/temp/temp.param/p10.cpp | 1 + test/CXX/temp/temp.param/p13.cpp | 1 + test/CXX/temp/temp.param/p15-cxx0x.cpp | 154 + test/CXX/temp/temp.param/p2.cpp | 1 + test/CXX/temp/temp.param/p5.cpp | 1 + test/CXX/temp/temp.param/p8.cpp | 1 + test/CXX/temp/temp.res/temp.dep/p3.cpp | 1 + .../temp.res/temp.dep/temp.dep.constexpr/p2-0x.cpp | 1 + test/CXX/temp/temp.res/temp.local/p1.cpp | 1 + test/CXX/temp/temp.res/temp.local/p7.cpp | 1 + test/CXX/temp/temp.res/temp.local/p8.cpp | 1 + test/CXX/temp/temp.spec/temp.expl.spec/p1.cpp | 1 + test/CXX/temp/temp.spec/temp.expl.spec/p11.cpp | 1 + test/CXX/temp/temp.spec/temp.expl.spec/p9.cpp | 1 + test/CXX/temp/temp.spec/temp.explicit/p11.cpp | 1 + test/CXX/temp/temp.spec/temp.explicit/p3-0x.cpp | 1 + test/CXX/temp/temp.spec/temp.explicit/p6.cpp | 1 + test/CodeCompletion/Inputs/macros.h | 7 + test/CodeCompletion/macros.c | 12 + test/CodeCompletion/preamble.c | 2 +- test/CodeGen/2004-03-16-AsmRegisterCrash.c | 5 +- test/CodeGen/2008-01-25-ByValReadNone.c | 8 +- test/CodeGen/2008-01-25-ZeroSizedAggregate.c | 1 + test/CodeGen/2008-12-23-AsmIntPointerTie.c | 1 + test/CodeGen/2009-06-01-addrofknr.c | 1 + test/CodeGen/2010-06-17-asmcrash.c | 5 +- test/CodeGen/PR3589-freestanding-libcalls.c | 6 +- test/CodeGen/a15.c | 5 + test/CodeGen/address-safety-attr.cpp | 2 +- test/CodeGen/alias.c | 44 +- test/CodeGen/arm-aapcs-vfp.c | 6 + test/CodeGen/arm-aapcs-zerolength-bitfield.c | 1 + test/CodeGen/arm-abi-vector.c | 263 + test/CodeGen/arm-apcs-zerolength-bitfield.c | 1 + test/CodeGen/arm-arguments.c | 45 + test/CodeGen/arm-asm-warn.c | 18 + test/CodeGen/arm-homogenous.c | 41 + test/CodeGen/arm-pnaclcall.c | 33 + test/CodeGen/asm.c | 9 + test/CodeGen/atomic-ops.c | 9 + test/CodeGen/attr-minsize.cpp | 75 + test/CodeGen/attr-weakref.c | 6 + test/CodeGen/attributes.c | 2 +- test/CodeGen/bitfield-promote.c | 10 +- test/CodeGen/bmi2-builtins.c | 17 + test/CodeGen/builtin-memfns.c | 20 + test/CodeGen/builtin-ms-noop.cpp | 14 + test/CodeGen/builtins-mips-args.c | 23 + test/CodeGen/builtins-mips.c | 210 + test/CodeGen/builtins-nvptx.c | 74 +- test/CodeGen/builtins.c | 1 + test/CodeGen/catch-undef-behavior.c | 241 +- test/CodeGen/const-init.c | 15 + test/CodeGen/const-label-addr.c | 15 +- test/CodeGen/debug-info-iv.c | 2 +- test/CodeGen/debug-info-line3.c | 4 +- test/CodeGen/debug-info-line4.c | 11 + test/CodeGen/debug-info.c | 5 +- test/CodeGen/debug-line-1.c | 2 +- test/CodeGen/decl-in-prototype.c | 2 +- test/CodeGen/dostmt.c | 10 +- test/CodeGen/exprs.c | 10 + test/CodeGen/extern-inline.c | 4 +- test/CodeGen/f16c-builtins.c | 26 + test/CodeGen/ffp-contract-option.c | 9 + test/CodeGen/fold-const-declref.c | 4 +- test/CodeGen/fp-contract-pragma.cpp | 64 + test/CodeGen/fp-contract.c | 9 - test/CodeGen/func-ptr-cast-decl.c | 1 + test/CodeGen/init.c | 2 + test/CodeGen/inline.c | 6 +- test/CodeGen/integer-overflow.c | 9 + test/CodeGen/le32-arguments.c | 61 + test/CodeGen/le32-regparm.c | 41 + test/CodeGen/libcall-declarations.c | 191 + test/CodeGen/libcalls-fno-builtin.c | 97 + test/CodeGen/long-double-x86-nacl.c | 7 + test/CodeGen/microsoft-call-conv-x64.c | 39 + test/CodeGen/microsoft-call-conv.c | 2 +- test/CodeGen/mips-byval-arg.c | 4 +- test/CodeGen/mips-clobber-reg.c | 2 +- test/CodeGen/mips-constraint-regs.c | 7 +- test/CodeGen/mips-vector-arg.c | 4 +- test/CodeGen/mips-vector-return.c | 4 +- test/CodeGen/mips64-class-return.cpp | 2 +- test/CodeGen/mips64-f128-literal.c | 2 +- test/CodeGen/mips64-nontrivial-return.cpp | 2 +- test/CodeGen/mips64-padding-arg.c | 2 +- test/CodeGen/ms-inline-asm-64.c | 16 + test/CodeGen/ms-inline-asm.c | 170 +- test/CodeGen/ppc-atomics.c | 35 + test/CodeGen/ppc64-align-long-double.c | 18 + test/CodeGen/ppc64-extend.c | 15 + test/CodeGen/ppc64-struct-onefloat.c | 49 + test/CodeGen/ppc64-varargs-struct.c | 30 + test/CodeGen/pragma-weak.c | 9 + test/CodeGen/rtm-builtins.c | 23 + test/CodeGen/sse-builtins.c | 31 + test/CodeGen/statements.c | 1 + test/CodeGen/stdcall-fastcall.c | 98 +- test/CodeGen/tbaa-for-vptr.cpp | 10 +- test/CodeGen/tbaa-struct.cpp | 17 + test/CodeGen/trapv.c | 18 +- test/CodeGen/unwind-attr.c | 4 +- test/CodeGen/x86_64-arguments-nacl.c | 120 + test/CodeGenCUDA/address-spaces.cu | 12 + test/CodeGenCXX/2005-01-03-StaticInitializers.cpp | 1 + test/CodeGenCXX/2009-05-04-PureConstNounwind.cpp | 2 +- test/CodeGenCXX/assign-construct-memcpy.cpp | 89 + test/CodeGenCXX/attr.cpp | 10 +- test/CodeGenCXX/builtins.cpp | 12 - test/CodeGenCXX/catch-undef-behavior.cpp | 134 + test/CodeGenCXX/compound-literals.cpp | 6 +- test/CodeGenCXX/const-global-linkage.cpp | 4 + test/CodeGenCXX/const-init-cxx11.cpp | 6 +- test/CodeGenCXX/conversion-operator-base.cpp | 1 + test/CodeGenCXX/copy-assign-synthesis-3.cpp | 1 + test/CodeGenCXX/copy-constructor-elim-2.cpp | 4 +- test/CodeGenCXX/cxx0x-delegating-ctors.cpp | 2 +- test/CodeGenCXX/cxx0x-initializer-array.cpp | 2 +- test/CodeGenCXX/cxx0x-initializer-constructors.cpp | 2 +- test/CodeGenCXX/cxx0x-initializer-references.cpp | 6 +- ...x0x-initializer-stdinitializerlist-startend.cpp | 2 +- .../cxx0x-initializer-stdinitializerlist.cpp | 2 +- test/CodeGenCXX/cxx11-special-members.cpp | 32 + test/CodeGenCXX/debug-info-artificial-arg.cpp | 2 +- test/CodeGenCXX/debug-info-blocks.cpp | 14 + test/CodeGenCXX/debug-info-class.cpp | 18 +- test/CodeGenCXX/debug-info-enum-class.cpp | 16 +- test/CodeGenCXX/debug-info-fwd-ref.cpp | 11 +- test/CodeGenCXX/debug-info-global-ctor-dtor.cpp | 27 + test/CodeGenCXX/debug-info-globalinit.cpp | 2 +- test/CodeGenCXX/debug-info-pubtypes.cpp | 2 +- test/CodeGenCXX/debug-info-template-member.cpp | 2 +- test/CodeGenCXX/debug-info-template-quals.cpp | 2 +- test/CodeGenCXX/debug-info-thunk.cpp | 17 + test/CodeGenCXX/debug-info-user-def.cpp | 14 - test/CodeGenCXX/debug-lambda-expressions.cpp | 18 +- test/CodeGenCXX/debug-lambda-this.cpp | 15 + test/CodeGenCXX/delete.cpp | 23 +- test/CodeGenCXX/dependent-type-member-pointer.cpp | 1 + test/CodeGenCXX/destructor-debug-info.cpp | 2 +- .../devirtualize-virtual-function-calls-final.cpp | 5 +- .../devirtualize-virtual-function-calls.cpp | 4 +- test/CodeGenCXX/enum.cpp | 1 + test/CodeGenCXX/exceptions.cpp | 1 + test/CodeGenCXX/fastcall.cpp | 20 + test/CodeGenCXX/for-range.cpp | 12 +- test/CodeGenCXX/implicit-copy-constructor.cpp | 28 +- test/CodeGenCXX/incomplete-types.cpp | 1 + test/CodeGenCXX/init-priority-attr.cpp | 46 + test/CodeGenCXX/instantiate-init-list.cpp | 1 + test/CodeGenCXX/lambda-expressions.cpp | 9 + test/CodeGenCXX/mangle-exprs.cpp | 7 + test/CodeGenCXX/mangle-extern-local.cpp | 2 +- test/CodeGenCXX/mangle-lambdas.cpp | 27 + test/CodeGenCXX/mangle-ms-arg-qualifiers.cpp | 78 + test/CodeGenCXX/mangle-ms-return-qualifiers.cpp | 5 +- test/CodeGenCXX/mangle-ms-template-callback.cpp | 72 + test/CodeGenCXX/mangle-ms-templates.cpp | 12 + test/CodeGenCXX/mangle-ms.cpp | 39 +- test/CodeGenCXX/mangle-nullptr-arg.cpp | 3 + test/CodeGenCXX/mangle-template.cpp | 16 +- test/CodeGenCXX/mangle-valist.cpp | 44 + test/CodeGenCXX/member-alignment.cpp | 1 - test/CodeGenCXX/member-call-parens.cpp | 1 + test/CodeGenCXX/member-functions.cpp | 3 + test/CodeGenCXX/member-init-assignment.cpp | 2 +- test/CodeGenCXX/member-init-struct.cpp | 1 + test/CodeGenCXX/member-init-union.cpp | 1 + test/CodeGenCXX/microsoft-abi-constructors.cpp | 11 +- test/CodeGenCXX/microsoft-abi-default-cc.cpp | 47 + test/CodeGenCXX/microsoft-abi-methods.cpp | 6 +- .../microsoft-abi-static-initializers.cpp | 6 +- test/CodeGenCXX/microsoft-interface.cpp | 43 + .../microsoft-uuidof-unsupported-target.cpp | 13 + test/CodeGenCXX/microsoft-uuidof.cpp | 72 + test/CodeGenCXX/new-operator-phi.cpp | 1 + test/CodeGenCXX/new.cpp | 10 + test/CodeGenCXX/nrvo.cpp | 4 +- test/CodeGenCXX/override-layout.cpp | 15 +- test/CodeGenCXX/pr12251.cpp | 4 +- test/CodeGenCXX/pragma-visibility.cpp | 2 +- .../CodeGenCXX/reference-bind-default-argument.cpp | 1 + test/CodeGenCXX/reference-init.cpp | 1 + test/CodeGenCXX/regparm.cpp | 32 + test/CodeGenCXX/reinterpret-cast.cpp | 2 + test/CodeGenCXX/return.cpp | 12 + test/CodeGenCXX/static-assert.cpp | 1 + test/CodeGenCXX/static-init-2.cpp | 1 + test/CodeGenCXX/switch-case-folding-2.cpp | 2 +- test/CodeGenCXX/throw-expression-cleanup.cpp | 2 +- test/CodeGenCXX/throw-expression-dtor.cpp | 1 + test/CodeGenCXX/throw-expressions.cpp | 1 + test/CodeGenCXX/thunk-linkonce-odr.cpp | 2 +- test/CodeGenCXX/thunks.cpp | 45 +- test/CodeGenCXX/typeid-cxx11.cpp | 8 +- test/CodeGenCXX/unary-type-trait.cpp | 1 + test/CodeGenCXX/virtual-operator-call.cpp | 2 +- test/CodeGenCXX/visibility-inlines-hidden.cpp | 11 +- test/CodeGenCXX/vtable-layout.cpp | 21 + test/CodeGenCXX/vtt-layout.cpp | 24 +- test/CodeGenObjC/2008-10-3-EhValue.m | 2 +- test/CodeGenObjC/arc-arm.m | 20 +- test/CodeGenObjC/arc-block-ivar-layout.m | 60 - test/CodeGenObjC/arc-blocks.m | 133 +- .../arc-captured-32bit-block-var-layout.m | 425 + .../arc-captured-block-var-inlined-layout.m | 112 + test/CodeGenObjC/arc-captured-block-var-layout.m | 425 + test/CodeGenObjC/arc-exceptions.m | 5 +- test/CodeGenObjC/arc-foreach.m | 8 +- test/CodeGenObjC/arc-ivar-layout.m | 1 + test/CodeGenObjC/arc-no-runtime.m | 4 + test/CodeGenObjC/arc-property.m | 76 +- test/CodeGenObjC/arc-related-result-type.m | 10 +- test/CodeGenObjC/arc.m | 9 +- test/CodeGenObjC/atomic-aggregate-property.m | 11 + test/CodeGenObjC/attr-minsize.m | 12 + test/CodeGenObjC/block-var-layout.m | 6 +- test/CodeGenObjC/builtin-memfns.m | 10 + test/CodeGenObjC/category-super-class-meth.m | 32 +- test/CodeGenObjC/debug-info-crash-2.m | 2 + test/CodeGenObjC/debug-info-fwddecl.m | 2 +- test/CodeGenObjC/debug-info-impl.m | 2 +- test/CodeGenObjC/debug-info-ivars.m | 24 + test/CodeGenObjC/debug-info-pubtypes.m | 4 +- test/CodeGenObjC/debug-info-self.m | 8 +- test/CodeGenObjC/exceptions.m | 4 +- test/CodeGenObjC/gnu-exceptions.m | 2 +- test/CodeGenObjC/ivar-layout-64.m | 52 +- .../mrr-captured-block-var-inlined-layout.m | 65 + test/CodeGenObjC/newproperty-nested-synthesis-1.m | 1 + test/CodeGenObjC/objc-arc-container-subscripting.m | 5 +- test/CodeGenObjC/objc2-ivar-assign.m | 3 + test/CodeGenObjC/optimized-setter-ios-device.m | 33 + test/CodeGenObjC/optimized-setter.m | 3 +- test/CodeGenObjC/prop-metadata-gnu.m | 15 + test/CodeGenObjC/property.m | 5 +- test/CodeGenObjC/synchronized.m | 8 +- test/CodeGenObjC/synthesize_ivar-cont-class.m | 3 + test/CodeGenObjC/synthesize_ivar.m | 3 + test/CodeGenObjC/undefined-protocol.m | 3 + test/CodeGenObjC/unoptimized-setter.m | 32 + test/CodeGenObjCXX/address-safety-attr.mm | 2 +- test/CodeGenObjCXX/arc-exceptions.mm | 10 +- test/CodeGenObjCXX/arc-new-delete.mm | 5 +- test/CodeGenObjCXX/arc-references.mm | 2 +- test/CodeGenObjCXX/arc-special-member-functions.mm | 7 +- test/CodeGenObjCXX/block-var-layout.mm | 6 +- test/CodeGenObjCXX/implementation-in-extern-c.mm | 17 + .../CodeGenObjCXX/implicit-copy-assign-operator.mm | 95 +- test/CodeGenObjCXX/property-objects.mm | 22 +- test/CodeGenOpenCL/single-precision-constant.cl | 3 +- test/Coverage/targets.c | 1 - test/Driver/B-opt.c | 22 + .../Inputs/B_opt_tree/dir1/i386-unknown-linux-ld | 0 test/Driver/Inputs/B_opt_tree/dir1/ld | 0 test/Driver/Inputs/B_opt_tree/dir2/ld | 0 test/Driver/Inputs/B_opt_tree/dir3/prefix-ld | 0 .../arm-linux-androideabi/bin/.keep | 0 .../arm-linux-androideabi/include/c++/4.4.3/.keep | 0 .../arm-linux-androideabi/lib/.keep | 0 .../lib/gcc/arm-linux-androideabi/4.4.3/crtbegin.o | 0 .../gcc/arm-linux-androideabi/4.4.3/crtbeginS.o | 0 .../gcc/arm-linux-androideabi/4.4.3/crtbeginT.o | 0 .../lib/gcc/arm-linux-androideabi/4.4.3/crtend.o | 0 .../lib/gcc/arm-linux-androideabi/4.4.3/crtendS.o | 0 .../lib/gcc/mipsel-linux-android/4.4.3/crtbegin.o | 0 .../lib/gcc/mipsel-linux-android/4.4.3/crtbeginS.o | 0 .../lib/gcc/mipsel-linux-android/4.4.3/crtbeginT.o | 0 .../lib/gcc/mipsel-linux-android/4.4.3/crtend.o | 0 .../lib/gcc/mipsel-linux-android/4.4.3/crtendS.o | 0 .../mipsel-linux-android/4.4.3/mips-r2/crtbegin.o | 0 .../mipsel-linux-android/4.4.3/mips-r2/crtbeginS.o | 0 .../mipsel-linux-android/4.4.3/mips-r2/crtbeginT.o | 0 .../mipsel-linux-android/4.4.3/mips-r2/crtend.o | 0 .../mipsel-linux-android/4.4.3/mips-r2/crtendS.o | 0 .../mipsel-linux-android/bin/.keep | 0 .../mipsel-linux-android/include/c++/4.4.3/.keep | 0 .../mipsel-linux-android/lib/.keep | 0 .../sysroot/usr/lib/crtbegin_dynamic.o | 0 .../sysroot/usr/lib/crtbegin_so.o | 0 .../sysroot/usr/lib/crtbegin_static.o | 0 .../sysroot/usr/lib/crtend_android.o | 0 .../basic_android_tree/sysroot/usr/lib/crtend_so.o | 0 .../basic_android_tree/usr/lib/crtbegin_dynamic.o | 0 .../basic_android_tree/usr/lib/crtbegin_so.o | 0 .../basic_android_tree/usr/lib/crtbegin_static.o | 0 .../basic_android_tree/usr/lib/crtend_android.o | 0 .../Inputs/basic_android_tree/usr/lib/crtend_so.o | 0 .../gcc/x86_64-unknown-linux/4.6.0/crtfastmath.o | 0 test/Driver/Inputs/debian_6_mips_tree/lib/.keep | 0 test/Driver/Inputs/debian_6_mips_tree/lib32/.keep | 0 test/Driver/Inputs/debian_6_mips_tree/lib64/.keep | 0 .../Inputs/debian_6_mips_tree/usr/lib/crt1.o | 0 .../Inputs/debian_6_mips_tree/usr/lib/crti.o | 0 .../usr/lib/gcc/mipsel-linux-gnu/4.4/64/crtbegin.o | 0 .../usr/lib/gcc/mipsel-linux-gnu/4.4/crtbegin.o | 0 .../lib/gcc/mipsel-linux-gnu/4.4/n32/crtbegin.o | 0 .../Inputs/debian_6_mips_tree/usr/lib32/crt1.o | 0 .../Inputs/debian_6_mips_tree/usr/lib32/crti.o | 0 .../Inputs/debian_6_mips_tree/usr/lib64/crt1.o | 0 .../Inputs/debian_6_mips_tree/usr/lib64/crti.o | 0 .../Driver/Inputs/freescale_ppc64_tree/lib64/.keep | 0 .../Inputs/freescale_ppc64_tree/usr/lib64/crt1.o | 0 .../Inputs/freescale_ppc64_tree/usr/lib64/crti.o | 0 .../Inputs/freescale_ppc64_tree/usr/lib64/crtn.o | 0 .../usr/lib64/powerpc64-fsl-linux/4.6.2/crtbegin.o | 0 .../usr/lib64/powerpc64-fsl-linux/4.6.2/crtend.o | 0 test/Driver/Inputs/freescale_ppc_tree/lib/.keep | 0 .../Inputs/freescale_ppc_tree/usr/lib/crt1.o | 0 .../Inputs/freescale_ppc_tree/usr/lib/crti.o | 0 .../Inputs/freescale_ppc_tree/usr/lib/crtn.o | 0 .../usr/lib/powerpc-fsl-linux/4.6.2/crtbegin.o | 0 .../usr/lib/powerpc-fsl-linux/4.6.2/crtend.o | 0 test/Driver/Wp-args.c | 8 + test/Driver/altivec.cpp | 12 +- test/Driver/android-standalone.cpp | 65 + test/Driver/apple-kext-i386.cpp | 55 - test/Driver/arc.c | 6 +- test/Driver/arm-darwin-builtin.c | 2 +- test/Driver/asan-ld.c | 31 +- test/Driver/asan.c | 1 + test/Driver/bindings.c | 58 +- test/Driver/bitrig.c | 29 + test/Driver/clang-translation.c | 107 +- test/Driver/cpath.c | 4 +- test/Driver/crash-report.c | 3 +- test/Driver/darwin-arch-default.c | 7 + test/Driver/darwin-asan-nofortify.c | 6 + test/Driver/darwin-cc.c | 4 - test/Driver/darwin-ld.c | 6 +- test/Driver/darwin-sdkroot.c | 22 + test/Driver/fast-math.c | 49 + test/Driver/freebsd-mips-as.c | 81 + test/Driver/freebsd.c | 58 +- test/Driver/fsanitize.c | 23 + test/Driver/gcc_forward.c | 2 +- test/Driver/hello.c | 18 - test/Driver/immediate-options.c | 6 +- test/Driver/ios-simulator-arcruntime.c | 8 +- test/Driver/le32-unknown-nacl.cpp | 6 +- test/Driver/linker-opts.c | 2 +- test/Driver/linux-header-search.cpp | 4 +- test/Driver/linux-ld.c | 210 +- test/Driver/mips-as.c | 27 + test/Driver/mips-features.c | 6 + test/Driver/mips-float.c | 12 +- test/Driver/no-objc-arr.m | 1 + test/Driver/objc++-cpp-output.mm | 3 + test/Driver/objc-cpp-output.m | 3 + test/Driver/openbsd.c | 12 +- test/Driver/pic.c | 162 +- test/Driver/retain-comments-from-system-headers.c | 9 + test/Driver/rewrite-legacy-objc.m | 10 +- test/Driver/rewrite-objc.m | 11 +- test/Driver/stack-protector.c | 11 + test/Driver/std.cpp | 8 + test/Driver/tsan.c | 11 +- test/Driver/ubsan-ld.c | 10 + test/Driver/warning-options.cpp | 5 + test/Driver/working-directory-and-abs.c | 1 + test/Driver/x86_64-nacl-defines.cpp | 45 + test/Driver/x86_64-nacl-types.cpp | 37 + test/FixIt/fixit-cxx0x.cpp | 2 - test/FixIt/fixit-include.c | 2 + test/FixIt/fixit-missing-self-in-block.m | 20 + test/FixIt/fixit-objc.m | 2 +- test/FixIt/fixit.c | 7 + test/FixIt/fixit.cpp | 9 +- test/FixIt/format-darwin.m | 198 + test/FixIt/no-fixit.cpp | 6 + test/FixIt/typo.cpp | 42 +- test/Frontend/ast-codegen.c | 4 +- test/Frontend/iframework.c | 3 +- test/Frontend/macros.c | 9 + test/Frontend/unknown-pragmas.c | 1 + test/Frontend/verify.c | 7 +- test/Frontend/verify2.c | 7 +- test/Frontend/verify3.c | 41 + test/Frontend/warning-mapping-1.c | 1 + test/Frontend/warning-mapping-4.c | 1 + test/Headers/Inputs/include/stdint.h | 18 + test/Headers/altivec-header.c | 12 + test/Headers/c89.c | 1 + test/Headers/int64-type.c | 1 + test/Headers/typedef_guards.c | 1 + test/Headers/unwind.c | 19 + test/Headers/wchar_limits.cpp | 1 + test/Headers/wmmintrin.c | 1 + .../CommentXML/valid-availability-attr-01.xml | 11 + .../CommentXML/valid-availability-attr-02.xml | 11 + .../Inputs/CommentXML/valid-deprecated-attr.xml | 6 + .../Inputs/CommentXML/valid-unavailable-attr.xml | 7 + test/Index/Inputs/Frameworks/module.map | 1 + .../Inputs/retain-comments-from-system-headers.h | 8 + .../Index/annotate-comments-availability-attrs.cpp | 44 + test/Index/annotate-comments.cpp | 177 +- test/Index/annotate-deep-statements.cpp | 95 + test/Index/annotate-macro-args.m | 4 +- test/Index/annotate-module.m | 42 + test/Index/arc-annotate.m | 21 +- test/Index/c-index-getCursor-pp.c | 4 + test/Index/code-completion-skip-bodies.cpp | 20 + test/Index/comment-xml-schema.c | 5 + test/Index/complete-cxx-inline-methods.cpp | 4 +- test/Index/complete-documentation-templates.cpp | 151 + test/Index/complete-documentation.cpp | 4 +- test/Index/complete-exprs.cpp | 6 +- test/Index/complete-lambdas.mm | 14 +- test/Index/complete-macros.c | 5 +- test/Index/complete-method-decls.m | 16 +- test/Index/complete-objc-message.m | 12 +- test/Index/complete-preamble.cpp | 2 +- test/Index/complete-preprocessor.m | 4 +- test/Index/complete-property-flags.m | 20 +- test/Index/complete-qualified.cpp | 8 +- test/Index/cursor-dynamic-call.mm | 8 +- test/Index/get-cursor-macro-args.h | 4 +- test/Index/get-cursor-macro-args.m | 8 +- test/Index/get-cursor.m | 13 +- test/Index/index-decls.m | 13 + test/Index/index-file.cpp | 6 + test/Index/index-module.m | 54 + test/Index/index-pch-with-module.m | 31 + test/Index/index-pch.cpp | 6 + test/Index/index-with-working-dir.c | 5 + test/Index/overrides.cpp | 3 + test/Index/overrides.m | 4 +- test/Index/overriding-ftemplate-comments.cpp | 79 + test/Index/overriding-method-comments.mm | 124 + test/Index/preamble-reparse-with-BOM.m | 6 + test/Index/rdar12316296-codecompletion.m | 23 + test/Index/recursive-cxx-member-calls.cpp | 2 +- test/Index/retain-comments-from-system-headers.c | 19 + test/Index/usrs.m | 2 - test/Lexer/clang-keywords.cpp | 1 + test/Lexer/digraph.c | 1 + test/Lexer/dollar-idents.c | 8 +- test/Lexer/eof-char.c | 8 + test/Lexer/eof-file.c | 8 + test/Lexer/eof-string.c | 8 + test/Lexer/gnu_keywords.c | 1 + test/Lexer/has_feature_address_sanitizer.cpp | 2 +- test/Lexer/long-long.cpp | 24 + test/Lexer/msdos-cpm-eof.c | 1 + test/Lexer/newline-eof-c++11.cpp | 1 + test/Lexer/numeric-literal-trash.c | 2 +- test/Lexer/pragma-mark.c | 1 + test/Lexer/rdr-6096838.c | 1 + test/Lexer/string-literal-errors.cpp | 25 + test/Misc/ast-dump-stmt.c | 35 + test/Misc/ast-dump-stmt.m | 36 + test/Misc/caret-diags-macros.c | 79 +- test/Misc/diag-template-diffing-color.cpp | 53 + test/Misc/diag-template-diffing.cpp | 366 + test/Misc/predefines.c | 1 + test/Misc/unnecessary-elipses.cpp | 15 + test/Misc/unprintable.c | 41 +- test/Misc/warning-flags-enabled.c | 16 + test/Misc/warning-flags.c | 15 +- test/Misc/wrong-encoding.c | 33 +- test/Misc/wrong-encoding2.c | 8 + test/Modules/Inputs/Modified/A.h | 1 + test/Modules/Inputs/Modified/B.h | 2 + test/Modules/Inputs/Modified/module.map | 2 + .../Inputs/Module.framework/Headers/Module.h | 2 + .../Modules/Inputs/NoUmbrella.framework/module.map | 5 +- .../NotAModule.framework/Headers/NotAModule.h | 2 + test/Modules/Inputs/lookup_right.hpp | 1 + test/Modules/Inputs/macros.h | 9 + test/Modules/Inputs/macros_left.h | 14 + test/Modules/Inputs/macros_other.h | 1 + test/Modules/Inputs/macros_right.h | 17 + test/Modules/Inputs/macros_right_undef.h | 1 + test/Modules/Inputs/macros_top.h | 16 + test/Modules/Inputs/module.map | 30 + .../Inputs/normal-module-map/nested_umbrella/1.h | 1 + .../normal-module-map/nested_umbrella/a-extras.h | 1 + .../normal-module-map/nested_umbrella/decltype.h | 2 + test/Modules/Inputs/redecl-merge-bottom.h | 8 - test/Modules/Inputs/redecl-merge-left.h | 8 +- test/Modules/Inputs/redecl-merge-right.h | 9 +- test/Modules/Inputs/redecl-merge-top.h | 4 +- test/Modules/Inputs/templates-left.h | 29 + test/Modules/Inputs/templates-right.h | 27 + test/Modules/Inputs/templates-top.h | 17 + test/Modules/compiler_builtins.m | 2 + test/Modules/direct-module-import.m | 7 + test/Modules/header-import.m | 1 + test/Modules/import-decl.cpp | 2 +- test/Modules/inferred-frameworks.m | 8 + test/Modules/inferred-submodules.m | 1 + test/Modules/macros.c | 107 + test/Modules/modify-module.m | 23 + test/Modules/module-private.cpp | 7 +- test/Modules/normal-module-map.cpp | 10 + test/Modules/on-demand-macros.m | 1 + test/Modules/redecl-merge.m | 20 +- test/Modules/redeclarations.m | 1 + test/Modules/submodules.m | 1 + test/Modules/templates.mm | 36 + test/PCH/Inputs/badpch-dir.h.gch/.keep | 0 test/PCH/Inputs/badpch-empty.h.gch | 0 test/PCH/Inputs/case-insensitive-include.h | 5 + test/PCH/Inputs/chain-macro-override1.h | 4 + test/PCH/Inputs/chain-macro-override2.h | 3 + test/PCH/Inputs/cxx11-statement-attributes.h | 14 + test/PCH/__va_list_tag.c | 2 + test/PCH/asm.c | 1 + test/PCH/badpch-dir.h.gch/.keep | 0 test/PCH/badpch-empty.h.gch | 0 test/PCH/badpch.c | 6 +- test/PCH/builtins.c | 2 + test/PCH/case-insensitive-include.c | 29 + test/PCH/chain-categories.m | 2 + test/PCH/chain-class-extension.m | 2 + test/PCH/chain-cxx.cpp | 2 + test/PCH/chain-decls.c | 2 + test/PCH/chain-macro-override.c | 2 + test/PCH/chain-macro.c | 1 + test/PCH/chain-remap-types.m | 1 + test/PCH/cmdline-include.c | 1 + test/PCH/cxx-exprs.cpp | 2 + test/PCH/cxx-for-range.h | 5 +- test/PCH/cxx-friends.cpp | 2 + test/PCH/cxx-functions.cpp | 2 + test/PCH/cxx-implicit-moves.cpp | 1 + test/PCH/cxx-method.cpp | 1 + .../cxx-ms-function-specialization-class-scope.cpp | 1 + test/PCH/cxx-namespaces.cpp | 2 + test/PCH/cxx-templates.cpp | 21 +- test/PCH/cxx-templates.h | 5 + test/PCH/cxx-traits.cpp | 2 + test/PCH/cxx-typeid.cpp | 2 + test/PCH/cxx-variadic-templates.cpp | 6 + test/PCH/cxx-variadic-templates.h | 7 + test/PCH/cxx11-constexpr.cpp | 9 + test/PCH/cxx11-exception-spec.cpp | 1 + test/PCH/cxx11-statement-attributes.cpp | 12 + test/PCH/cxx_exprs.cpp | 8 + test/PCH/cxx_exprs.h | 5 + test/PCH/empty-with-headers.c | 2 +- test/PCH/enum.c | 2 + test/PCH/exprs.c | 6 +- test/PCH/field-designator.c | 35 + test/PCH/friend-template.cpp | 46 + test/PCH/fuzzy-pch.c | 16 +- test/PCH/missing-file.cpp | 17 +- test/PCH/objc_container.m | 6 +- test/PCH/objc_import.m | 2 + test/PCH/objc_literals.m | 10 +- test/PCH/objc_literals.mm | 10 +- test/PCH/objc_methods.m | 2 + test/PCH/objc_property.m | 2 + test/PCH/pch-dir.c | 28 + test/PCH/pch-dir.h | 5 + test/PCH/pending-ids.m | 2 + test/PCH/pragma-diag-section.cpp | 19 +- test/PCH/pragma-diag.c | 2 + test/PCH/rdar8852495.c | 2 + test/PCH/reinclude.cpp | 2 + test/PCH/single-token-macro.c | 2 + test/PCH/target-options.c | 5 + test/PCH/target-options.h | 2 + test/Parser/MicrosoftExtensions.cpp | 56 +- test/Parser/block-block-storageclass.c | 1 + test/Parser/block-pointer-decl.c | 1 + test/Parser/builtin_classify_type.c | 2 +- test/Parser/check-objc2-syntax-1.m | 1 + test/Parser/colon-colon-parentheses.cpp | 22 + test/Parser/compound_literal.c | 1 + test/Parser/cxx-ambig-decl-expr.cpp | 3 + test/Parser/cxx-attributes.cpp | 1 + test/Parser/cxx-casting.cpp | 12 +- test/Parser/cxx-class.cpp | 14 + test/Parser/cxx-decl.cpp | 5 + test/Parser/cxx-extern-c-array.cpp | 1 + test/Parser/cxx-stmt.cpp | 8 + test/Parser/cxx-template-argument.cpp | 17 + test/Parser/cxx0x-attributes.cpp | 26 +- test/Parser/cxx0x-condition.cpp | 9 +- test/Parser/cxx0x-decl.cpp | 14 +- test/Parser/cxx0x-lambda-expressions.cpp | 3 +- test/Parser/cxx0x-override-control-keywords.cpp | 1 + test/Parser/cxx11-brace-initializers.cpp | 16 + test/Parser/cxx11-stmt-attributes.cpp | 77 +- test/Parser/cxx11-user-defined-literals.cpp | 3 +- test/Parser/empty-translation-unit.c | 2 +- test/Parser/encode.m | 1 + test/Parser/enhanced-proto-1.m | 1 + test/Parser/if-scope-c90.c | 1 + test/Parser/knr_parameter_attributes.c | 1 + test/Parser/ms-inline-asm.c | 8 +- test/Parser/namelookup-bug-1.c | 1 + test/Parser/namelookup-bug-2.c | 1 + test/Parser/namespaces.cpp | 4 + test/Parser/objcxx11-attributes.mm | 18 +- test/Parser/opencl-kernel.cl | 1 + test/Parser/parmvardecl_conversion.c | 1 + test/Parser/pragma-fp-contract.c | 12 + test/Parser/pragma-options.cpp | 6 + test/Parser/recovery.cpp | 7 +- test/Parser/recursion-limits.cpp | 1 + test/Parser/selector-1.m | 1 + test/Parser/statements.c | 15 + test/Parser/top-level-semi-cxx0x.cpp | 1 + test/Parser/types.c | 1 + test/Preprocessor/comment_save_if.c | 1 + test/Preprocessor/cxx_true.cpp | 2 + test/Preprocessor/expr_define_expansion.c | 1 + test/Preprocessor/expr_multichar.c | 1 + test/Preprocessor/has_include.c | 32 +- test/Preprocessor/import_self.c | 2 +- test/Preprocessor/init.c | 293 +- test/Preprocessor/macro_arg_directive.c | 7 + test/Preprocessor/macro_fn_comma_swallow2.c | 64 + test/Preprocessor/macro_paste_identifier_error.c | 1 + test/Preprocessor/microsoft-ext.c | 24 + test/Preprocessor/objc-pp.m | 1 + test/Preprocessor/optimize.c | 3 + test/Preprocessor/pr13851.c | 11 + test/Preprocessor/pragma-pushpop-macro.c | 19 +- test/Preprocessor/pragma_sysheader.c | 3 +- test/Preprocessor/predefined-arch-macros.c | 2 + test/Preprocessor/user_defined_system_framework.c | 3 +- test/Rewriter/no-integrated-preprocessing-64bit.m | 26 + test/Rewriter/no-integrated-preprocessing.m | 26 + test/Rewriter/objc-modern-StretAPI-2.mm | 30 + test/Rewriter/objc-modern-boxing.mm | 2 +- test/Rewriter/objc-modern-numeric-literal.mm | 2 +- test/Sema/MicrosoftCompatibility-x64.c | 8 + test/Sema/MicrosoftCompatibility-x86.c | 6 + test/Sema/MicrosoftCompatibility.c | 2 +- test/Sema/PR2727.c | 1 + test/Sema/PR2728.c | 1 + test/Sema/PR2923.c | 1 + test/Sema/address-constant.c | 1 + test/Sema/align-arm-apcs.c | 1 + test/Sema/align-x86-64.c | 1 + test/Sema/align-x86.c | 1 + test/Sema/arg-scope-c99.c | 1 + test/Sema/arg-scope.c | 1 + test/Sema/arm-layout.c | 1 + test/Sema/array-init.c | 2 + test/Sema/asm.c | 2 +- test/Sema/assign-null.c | 1 + test/Sema/atomic-ops.c | 7 +- test/Sema/attr-availability-macosx.c | 6 +- test/Sema/attr-availability.c | 6 +- test/Sema/attr-minsize.c | 5 + test/Sema/attr-used.c | 2 +- test/Sema/bitfield-layout.c | 1 + test/Sema/bitfield-promote.c | 1 + test/Sema/block-return.c | 2 +- test/Sema/block-storageclass.c | 1 + test/Sema/builtin_objc_msgSend.c | 1 + test/Sema/builtins-arm.c | 20 +- test/Sema/builtins-decl.c | 1 + test/Sema/builtins.c | 16 +- test/Sema/c89-2.c | 5 - test/Sema/c89.c | 6 + test/Sema/callingconv.c | 8 +- test/Sema/cast-to-union.c | 6 +- test/Sema/cast.c | 15 +- test/Sema/check-increment.c | 1 + test/Sema/compare.c | 32 +- test/Sema/complex-promotion.c | 1 + test/Sema/compound-literal.c | 19 +- test/Sema/const-eval-64.c | 1 + test/Sema/const-ptr-int-ptr-cast.c | 1 + test/Sema/constant-builtins-2.c | 3 + test/Sema/constant-builtins.c | 4 + test/Sema/darwin-align-cast.c | 1 + test/Sema/enum-packed.c | 1 + test/Sema/expr-comma-c99.c | 1 + test/Sema/expr-comma.c | 1 + test/Sema/exprs.c | 2 +- test/Sema/format-string-percentm.c | 1 + test/Sema/format-strings-darwin.c | 64 + test/Sema/format-strings-gnu.c | 55 + test/Sema/format-strings-non-iso.c | 16 +- test/Sema/format-strings-scanf.c | 16 +- test/Sema/format-strings.c | 12 +- test/Sema/function-redecl.c | 4 + test/Sema/heinous-extensions-on.c | 4 +- test/Sema/i-c-e.c | 2 +- test/Sema/implicit-builtin-freestanding.c | 1 + test/Sema/init-struct-qualified.c | 1 + test/Sema/init-vector.c | 1 + test/Sema/int-arith-convert.c | 1 + test/Sema/invalid-decl.c | 9 + test/Sema/knr-variadic-def.c | 1 + test/Sema/many-logical-ops.c | 1 + test/Sema/member-reference.c | 1 + test/Sema/mms-bitfields.c | 13 + test/Sema/ms-inline-asm.c | 35 + test/Sema/ms_wide_predefined_expr.cpp | 1 + test/Sema/no-format-y2k-turnsoff-format.c | 2 +- test/Sema/outof-range-constant-compare.c | 149 + test/Sema/overloaded-func-transparent-union.c | 1 + test/Sema/parentheses.c | 6 +- test/Sema/parentheses.cpp | 12 + test/Sema/pragma-align-mac68k.c | 13 + test/Sema/pragma-align-packed.c | 1 + test/Sema/pragma-ms_struct.c | 6 + test/Sema/pragma-pack-2.c | 1 + test/Sema/pragma-pack-3.c | 1 + test/Sema/pragma-pack-4.c | 1 + test/Sema/pragma-pack-5.c | 1 + test/Sema/pragma-pack-6.c | 1 + test/Sema/pragma-pack-and-options-align.c | 11 + test/Sema/private-extern.c | 2 +- test/Sema/return-silent.c | 1 + test/Sema/short-enums.c | 1 + test/Sema/stdcall-fastcall-x64.c | 20 + test/Sema/stdcall-fastcall.c | 1 - test/Sema/struct-cast.c | 1 + test/Sema/struct-packed-align.c | 1 + test/Sema/surpress-deprecated.c | 1 + test/Sema/template-specialization.cpp | 21 + test/Sema/tentative-decls.c | 3 +- test/Sema/thread-specifier.c | 9 +- test/Sema/tls.c | 3 + test/Sema/transparent-union-pointer.c | 1 + test/Sema/typedef-prototype.c | 1 + test/Sema/types.c | 2 +- test/Sema/uninit-variables.c | 23 + test/Sema/unnamed-bitfield-init.c | 1 + test/Sema/unused-expr.c | 11 + test/Sema/va_arg_x86_64.c | 1 + test/Sema/variadic-block.c | 1 + test/Sema/vector-cast.c | 12 +- test/Sema/vfprintf-valid-redecl.c | 1 + test/Sema/warn-bad-function-cast.c | 47 + test/Sema/warn-documentation-fixits.cpp | 44 + test/Sema/warn-documentation.cpp | 136 +- test/Sema/warn-documentation.m | 8 +- test/Sema/warn-gnu-designators.c | 1 + test/Sema/warn-missing-variable-declarations.c | 18 + test/Sema/warn-type-safety-mpi-hdf5.c | 4 + test/Sema/warn-type-safety.c | 8 + test/Sema/warn-type-safety.cpp | 4 +- test/Sema/warn-unreachable.c | 9 + test/Sema/warn-unused-function.c | 14 +- test/Sema/wchar.c | 2 + test/Sema/weak-import-on-enum.c | 1 + test/SemaCXX/2008-01-11-BadWarning.cpp | 1 + .../SemaCXX/MicrosoftCompatibilityNoExceptions.cpp | 1 + test/SemaCXX/MicrosoftExtensions.cpp | 7 +- test/SemaCXX/PR10447.cpp | 1 + test/SemaCXX/PR5086-ambig-resolution-enum.cpp | 1 + test/SemaCXX/PR6562.cpp | 1 + test/SemaCXX/PR9884.cpp | 1 + test/SemaCXX/PR9902.cpp | 1 + test/SemaCXX/PR9908.cpp | 1 + test/SemaCXX/__try.cpp | 1 + .../SemaCXX/ambiguous-conversion-show-overload.cpp | 21 + test/SemaCXX/anonymous-union-cxx11.cpp | 1 + test/SemaCXX/ast-print.cpp | 83 + test/SemaCXX/attr-format.cpp | 8 + test/SemaCXX/attr-nodebug.cpp | 7 + test/SemaCXX/attr-noreturn.cpp | 26 + test/SemaCXX/attr-unused.cpp | 9 + test/SemaCXX/blocks-1.cpp | 1 + test/SemaCXX/blocks.cpp | 1 + test/SemaCXX/borland-extensions.cpp | 1 + test/SemaCXX/builtin-exception-spec.cpp | 1 + test/SemaCXX/builtin-ptrtomember-overload.cpp | 1 + test/SemaCXX/builtin_objc_msgSend.cpp | 1 + test/SemaCXX/builtins-arm.cpp | 6 + test/SemaCXX/builtins-va_arg.cpp | 52 + test/SemaCXX/builtins.cpp | 13 + test/SemaCXX/cast-conversion.cpp | 22 +- test/SemaCXX/cast-explicit-ctor.cpp | 1 + test/SemaCXX/class-layout.cpp | 1 + test/SemaCXX/class.cpp | 2 +- test/SemaCXX/comma.cpp | 1 + test/SemaCXX/compare.cpp | 32 +- test/SemaCXX/complex-init-list.cpp | 1 + test/SemaCXX/conditional-expr.cpp | 13 + test/SemaCXX/constant-expression-cxx11.cpp | 83 +- test/SemaCXX/constant-expression.cpp | 2 +- test/SemaCXX/constexpr-turing.cpp | 1 + test/SemaCXX/constructor-initializer.cpp | 16 +- test/SemaCXX/crashes.cpp | 35 + test/SemaCXX/cstyle-cast.cpp | 1 + test/SemaCXX/cxx0x-delegating-ctors.cpp | 4 +- test/SemaCXX/cxx0x-initializer-constructor.cpp | 16 + .../cxx0x-initializer-stdinitializerlist.cpp | 4 + test/SemaCXX/cxx11-crashes.cpp | 76 + test/SemaCXX/cxx98-compat-pedantic.cpp | 6 + test/SemaCXX/cxx98-compat.cpp | 13 +- test/SemaCXX/dcl_ambig_res.cpp | 3 + test/SemaCXX/decl-expr-ambiguity.cpp | 2 +- test/SemaCXX/decltype-98.cpp | 1 + test/SemaCXX/decltype-overloaded-functions.cpp | 25 +- test/SemaCXX/decltype-pr4444.cpp | 1 + test/SemaCXX/decltype-pr4448.cpp | 1 + test/SemaCXX/decltype-this.cpp | 1 + test/SemaCXX/decltype.cpp | 15 + test/SemaCXX/default-argument-temporaries.cpp | 1 + test/SemaCXX/defaulted-ctor-loop.cpp | 2 +- test/SemaCXX/do-while-scope.cpp | 1 + test/SemaCXX/empty-class-layout.cpp | 1 + test/SemaCXX/exception-spec-no-exceptions.cpp | 1 + test/SemaCXX/explicit.cpp | 22 +- test/SemaCXX/for-range-dereference.cpp | 89 + test/SemaCXX/for-range-examples.cpp | 2 +- test/SemaCXX/for-range-no-std.cpp | 4 +- test/SemaCXX/format-strings.cpp | 60 +- test/SemaCXX/friend-out-of-line.cpp | 1 + test/SemaCXX/function-type-qual.cpp | 3 + test/SemaCXX/functional-cast.cpp | 1 + test/SemaCXX/gnu-case-ranges.cpp | 1 + test/SemaCXX/goto2.cpp | 1 + test/SemaCXX/implicit-exception-spec.cpp | 11 +- test/SemaCXX/indirect-goto.cpp | 1 + test/SemaCXX/issue547.cpp | 1 + test/SemaCXX/lambda-expressions.cpp | 29 +- test/SemaCXX/libstdcxx_atomic_ns_hack.cpp | 35 + test/SemaCXX/libstdcxx_common_type_hack.cpp | 33 + test/SemaCXX/libstdcxx_is_pod_hack.cpp | 12 +- test/SemaCXX/local-classes.cpp | 1 + test/SemaCXX/lookup-member.cpp | 1 + test/SemaCXX/member-expr-anonymous-union.cpp | 1 + test/SemaCXX/member-expr-static.cpp | 1 + test/SemaCXX/member-pointer-size.cpp | 1 + test/SemaCXX/missing-header.cpp | 2 +- test/SemaCXX/ms-exception-spec.cpp | 1 + test/SemaCXX/ms-interface.cpp | 77 + test/SemaCXX/nested-name-spec.cpp | 11 +- test/SemaCXX/new-delete-0x.cpp | 9 +- test/SemaCXX/new-delete-predefined-decl-2.cpp | 1 + test/SemaCXX/new-delete-predefined-decl.cpp | 1 + test/SemaCXX/no-warn-composite-pointer-type.cpp | 9 + test/SemaCXX/no-wchar.cpp | 9 + test/SemaCXX/null_in_arithmetic_ops.cpp | 2 + test/SemaCXX/nullptr-98.cpp | 1 + test/SemaCXX/overload-value-dep-arg.cpp | 1 + test/SemaCXX/overloaded-builtin-operators-0x.cpp | 1 + test/SemaCXX/overloaded-builtin-operators.cpp | 2 + test/SemaCXX/overloaded-operator-decl.cpp | 10 + test/SemaCXX/pragma-pack.cpp | 1 + test/SemaCXX/pragma-unused.cpp | 1 + test/SemaCXX/pragma-visibility.cpp | 7 + test/SemaCXX/prefetch-enum.cpp | 1 + test/SemaCXX/primary-base.cpp | 1 + test/SemaCXX/ptrtomember-overload-resolution.cpp | 1 + test/SemaCXX/qualified-member-enum.cpp | 1 + test/SemaCXX/references.cpp | 2 +- test/SemaCXX/scope-check.cpp | 103 +- test/SemaCXX/short-wchar-sign.cpp | 1 + test/SemaCXX/static-initializers.cpp | 1 + test/SemaCXX/switch-implicit-fallthrough-cxx98.cpp | 3 + test/SemaCXX/switch-implicit-fallthrough-macro.cpp | 139 + .../switch-implicit-fallthrough-per-method.cpp | 2 +- test/SemaCXX/tag-ambig.cpp | 1 + test/SemaCXX/trailing-return-0x.cpp | 16 + test/SemaCXX/trivial-constructor.cpp | 1 + test/SemaCXX/trivial-destructor.cpp | 1 + test/SemaCXX/typo-correction.cpp | 12 +- test/SemaCXX/uninitialized.cpp | 151 +- test/SemaCXX/unknown-type-name.cpp | 23 + test/SemaCXX/unused-functions.cpp | 1 + test/SemaCXX/unused.cpp | 27 + test/SemaCXX/using-decl-pr4441.cpp | 1 + test/SemaCXX/using-decl-pr4450.cpp | 1 + test/SemaCXX/value-dependent-exprs.cpp | 1 + test/SemaCXX/vararg-default-arg.cpp | 1 + test/SemaCXX/vararg-non-pod.cpp | 18 + test/SemaCXX/vector.cpp | 11 + test/SemaCXX/warn-assignment-condition.cpp | 6 +- test/SemaCXX/warn-c++11-extensions.cpp | 7 + test/SemaCXX/warn-enum-compare.cpp | 4 +- ...-implicit-conversion-floating-point-to-bool.cpp | 24 + .../SemaCXX/warn-missing-variable-declarations.cpp | 43 + test/SemaCXX/warn-new-overaligned-2.cpp | 1 + test/SemaCXX/warn-overloaded-virtual.cpp | 56 + test/SemaCXX/warn-self-comparisons.cpp | 1 + test/SemaCXX/warn-thread-safety-analysis.cpp | 669 +- test/SemaCXX/warn-thread-safety-parsing.cpp | 49 +- test/SemaCXX/warn-unique-enum.cpp | 27 - test/SemaCXX/warn-unused-filescoped.cpp | 16 +- test/SemaCXX/warn-unused-variables.cpp | 17 +- test/SemaCXX/warn-using-namespace-in-header.cpp | 96 +- test/SemaCXX/warn-using-namespace-in-header.h | 50 - test/SemaCXX/zero-length-arrays.cpp | 1 + test/SemaObjC/ClassPropertyNotObject.m | 1 + test/SemaObjC/ContClassPropertyLookup.m | 1 + test/SemaObjC/arc-decls.m | 2 + test/SemaObjC/arc-objc-lifetime.m | 31 +- test/SemaObjC/arc-property-decl-attrs.m | 6 +- test/SemaObjC/arc-property-lifetime.m | 22 +- test/SemaObjC/arc-property.m | 12 +- test/SemaObjC/arc-readonly-property-ivar-1.m | 1 + test/SemaObjC/arc-readonly-property-ivar.m | 1 + test/SemaObjC/arc-repeated-weak.mm | 386 + test/SemaObjC/arc-setter-property-match.m | 1 + test/SemaObjC/arc-system-header.m | 2 +- test/SemaObjC/arc-unavailable-for-weakref.m | 2 +- test/SemaObjC/arc-unsafe_unretained.m | 1 + test/SemaObjC/attr-availability.m | 8 +- test/SemaObjC/attr-cleanup.m | 1 + test/SemaObjC/attr-deprecated.m | 3 +- test/SemaObjC/block-as-object.m | 1 + test/SemaObjC/block-ivar.m | 1 + test/SemaObjC/block-return.m | 1 + test/SemaObjC/builtin_objc_assign_ivar.m | 1 + test/SemaObjC/builtin_objc_msgSend.m | 1 + test/SemaObjC/category-method-lookup-2.m | 1 + test/SemaObjC/category-method-lookup.m | 1 + test/SemaObjC/class-getter-using-dotsyntax.m | 1 + test/SemaObjC/class-property-access.m | 1 + test/SemaObjC/class-protocol.m | 1 + test/SemaObjC/comptypes-2.m | 1 + test/SemaObjC/comptypes-8.m | 1 + test/SemaObjC/conditional-expr-5.m | 1 + test/SemaObjC/conditional-expr-6.m | 1 + test/SemaObjC/conditional-expr-7.m | 1 + test/SemaObjC/conditional-expr-8.m | 1 + test/SemaObjC/conflict-nonfragile-abi2.m | 1 + test/SemaObjC/continuation-class-err.m | 4 +- test/SemaObjC/crash-on-objc-bool-literal.m | 12 + test/SemaObjC/default-synthesize-2.m | 4 +- test/SemaObjC/delay-parsing-cfunctions.m | 1 + test/SemaObjC/direct-synthesized-ivar-access.m | 1 + test/SemaObjC/enhanced-proto-2.m | 1 + test/SemaObjC/enum-fixed-type.m | 1 + test/SemaObjC/error-property-gc-attr.m | 8 +- .../getter-setter-defined-in-category-of-parent.m | 1 + test/SemaObjC/iboutletcollection-attr.m | 4 +- test/SemaObjC/id_builtin.m | 1 + test/SemaObjC/idiomatic-parentheses.m | 11 + test/SemaObjC/ignore-qualifier-on-qualified-id.m | 1 + test/SemaObjC/ignore-weakimport-method.m | 1 + test/SemaObjC/interface-layout-2.m | 1 + test/SemaObjC/interface-layout.m | 1 + test/SemaObjC/interface-scope-2.m | 1 + test/SemaObjC/interface-scope.m | 1 + test/SemaObjC/ivar-access-package.m | 1 + test/SemaObjC/ivar-in-class-extension-error.m | 4 +- test/SemaObjC/ivar-in-class-extension.m | 2 +- test/SemaObjC/ivar-sem-check-2.m | 4 +- test/SemaObjC/method-attributes.m | 29 + test/SemaObjC/method-conflict-1.m | 1 + test/SemaObjC/method-in-class-extension-impl.m | 1 + test/SemaObjC/method-lookup-2.m | 1 + test/SemaObjC/method-lookup-4.m | 1 + test/SemaObjC/method-typecheck-1.m | 15 + test/SemaObjC/nested-typedef-decl.m | 1 + test/SemaObjC/no-gc-weak-test.m | 1 + test/SemaObjC/no-ivar-access-control.m | 1 + test/SemaObjC/no-ivar-in-interface-block.m | 6 +- test/SemaObjC/no-warn-qual-mismatch.m | 1 + test/SemaObjC/no-warn-synth-protocol-meth.m | 1 + test/SemaObjC/no-warn-unimpl-method.m | 1 + test/SemaObjC/no-warning-unavail-unimp.m | 1 + test/SemaObjC/nonarc-weak.m | 16 + test/SemaObjC/nonnull.m | 1 + test/SemaObjC/nowarn-superclass-method-mismatch.m | 1 + test/SemaObjC/nsobject-attribute-1.m | 1 + test/SemaObjC/nsobject-attribute.m | 11 +- test/SemaObjC/objc-buffered-methods.m | 1 + test/SemaObjC/objc-literal-comparison.m | 4 + test/SemaObjC/objc-qualified-property-lookup.m | 1 + .../overriding-property-in-class-extension.m | 27 + test/SemaObjC/pedantic-dynamic-test.m | 1 + test/SemaObjC/pragma-pack.m | 1 + test/SemaObjC/property-11.m | 1 + test/SemaObjC/property-13.m | 1 + test/SemaObjC/property-2.m | 1 + test/SemaObjC/property-6.m | 1 + test/SemaObjC/property-7.m | 1 + test/SemaObjC/property-8.m | 1 + test/SemaObjC/property-9-impl-method.m | 1 + test/SemaObjC/property-and-class-extension.m | 2 +- test/SemaObjC/property-and-ivar-use.m | 1 + test/SemaObjC/property-deprecated-warning.m | 64 + test/SemaObjC/property-dot-receiver.m | 1 + test/SemaObjC/property-impl-misuse.m | 2 +- test/SemaObjC/property-in-class-extension-1.m | 59 + test/SemaObjC/property-ivar-mismatch.m | 8 +- test/SemaObjC/property-method-lookup-impl.m | 1 + test/SemaObjC/property-nonfragile-abi.m | 1 + test/SemaObjC/property-noprotocol-warning.m | 1 + test/SemaObjC/property-redundant-decl-accessor.m | 1 + test/SemaObjC/property-weak.m | 1 + test/SemaObjC/property.m | 4 +- test/SemaObjC/props-on-prots.m | 1 + test/SemaObjC/protocol-expr-1.m | 1 + test/SemaObjC/protocol-implementation-inherited.m | 1 + test/SemaObjC/protocol-lookup-2.m | 1 + test/SemaObjC/protocol-lookup.m | 1 + .../protocol-qualified-class-unsupported.m | 1 + test/SemaObjC/rdar6248119.m | 1 + test/SemaObjC/restrict-id-type.m | 1 + test/SemaObjC/selector-1.m | 1 + test/SemaObjC/selector-2.m | 1 + test/SemaObjC/self-declared-in-block.m | 1 + test/SemaObjC/self-in-function.m | 1 + test/SemaObjC/setter-dotsyntax.m | 1 + test/SemaObjC/super-cat-prot.m | 1 + test/SemaObjC/super-dealloc-attribute.m | 87 + test/SemaObjC/super-property-message-expr.m | 1 + test/SemaObjC/super-property-notation.m | 1 + test/SemaObjC/synth-provisional-ivars-1.m | 1 + test/SemaObjC/synthesize-setter-contclass.m | 1 + test/SemaObjC/transparent-union.m | 1 + test/SemaObjC/ucn-objc-string.m | 1 + test/SemaObjC/uninit-variables.m | 29 + test/SemaObjC/unused.m | 27 +- test/SemaObjC/va-method-1.m | 1 + test/SemaObjC/warn-direct-ivar-access.m | 2 +- test/SemaObjC/warn-implicit-self-in-block.m | 18 + test/SemaObjC/warn-isa-ref.m | 2 +- test/SemaObjC/warn-retain-cycle.m | 63 +- test/SemaObjC/warning-missing-selector-name.m | 32 + test/SemaObjC/weak-property.m | 2 +- test/SemaObjC/weak-receiver-warn.m | 48 +- test/SemaObjC/writable-property-in-superclass.m | 1 + test/SemaObjCXX/abstract-class-type-ivar.mm | 2 +- test/SemaObjCXX/arc-0x.mm | 12 + test/SemaObjCXX/arc-bool-conversion.mm | 1 + test/SemaObjCXX/arc-libstdcxx.mm | 1 + test/SemaObjCXX/arc-memfunc.mm | 1 + test/SemaObjCXX/arc-non-pod.mm | 116 - test/SemaObjCXX/arc-objc-lifetime.mm | 68 + test/SemaObjCXX/arc-object-init-destroy.mm | 52 - test/SemaObjCXX/arc-type-traits.mm | 1 + test/SemaObjCXX/argument-dependent-lookup.mm | 1 + test/SemaObjCXX/category-lookup.mm | 2 +- test/SemaObjCXX/composite-objc-pointertype.mm | 1 + test/SemaObjCXX/conversion-ranking.mm | 1 + test/SemaObjCXX/conversion-to-objc-pointer-2.mm | 1 + test/SemaObjCXX/conversion-to-objc-pointer.mm | 1 + test/SemaObjCXX/debugger-support.mm | 8 + test/SemaObjCXX/decltype.mm | 25 + test/SemaObjCXX/delay-parsing-cfunctions.mm | 1 + test/SemaObjCXX/delay-parsing-cplusfuncs.mm | 1 + test/SemaObjCXX/delay-parsing-func-tryblock.mm | 1 + test/SemaObjCXX/expr-objcxx.mm | 1 + test/SemaObjCXX/format-strings.mm | 81 + test/SemaObjCXX/function-pointer-void-star.mm | 1 + test/SemaObjCXX/instantiate-method-return.mm | 1 + test/SemaObjCXX/ivar-lookup.mm | 1 + test/SemaObjCXX/ivar-struct.mm | 1 + test/SemaObjCXX/linkage-spec.mm | 1 + test/SemaObjCXX/namespace-lookup.mm | 1 + test/SemaObjCXX/null_objc_pointer.mm | 1 + test/SemaObjCXX/nullptr.mm | 1 + test/SemaObjCXX/overload-gc.mm | 1 + test/SemaObjCXX/pointer-to-objc-pointer-conv.mm | 3 + test/SemaObjCXX/propert-dot-error.mm | 2 +- test/SemaObjCXX/properties.mm | 23 + test/SemaObjCXX/property-synthesis-error.mm | 23 +- test/SemaObjCXX/property-type-mismatch.mm | 1 + test/SemaObjCXX/references.mm | 1 + .../reinterpret-cast-objc-pointertype.mm | 1 + test/SemaObjCXX/reserved-keyword-methods.mm | 1 + test/SemaObjCXX/standard-conversion-to-bool.mm | 1 + test/SemaObjCXX/static-cast.mm | 1 + test/SemaObjCXX/vla.mm | 1 + test/SemaOpenCL/cond.cl | 1 + test/SemaOpenCL/init.cl | 1 + test/SemaOpenCL/vec_compare.cl | 1 + test/SemaOpenCL/vector_literals_const.cl | 1 + test/SemaTemplate/ackermann.cpp | 1 + test/SemaTemplate/alias-church-numerals.cpp | 1 + .../SemaTemplate/alias-template-template-param.cpp | 1 + test/SemaTemplate/array-to-pointer-decay.cpp | 1 + test/SemaTemplate/atomics.cpp | 1 + .../class-template-ctor-initializer.cpp | 17 + test/SemaTemplate/class-template-id.cpp | 2 +- test/SemaTemplate/constexpr-instantiate.cpp | 133 + test/SemaTemplate/current-instantiation.cpp | 14 +- test/SemaTemplate/deduction-crash.cpp | 25 +- test/SemaTemplate/default-arguments-cxx0x.cpp | 1 + test/SemaTemplate/dependent-base-member-init.cpp | 1 + test/SemaTemplate/dependent-expr.cpp | 1 + test/SemaTemplate/dependent-names.cpp | 26 +- test/SemaTemplate/dependent-sized_array.cpp | 11 + test/SemaTemplate/derived.cpp | 12 + test/SemaTemplate/enum-argument.cpp | 1 + test/SemaTemplate/example-typelist.cpp | 1 + test/SemaTemplate/inject-templated-friend-post.cpp | 8 +- test/SemaTemplate/instantiate-array.cpp | 1 + test/SemaTemplate/instantiate-attr.cpp | 10 + test/SemaTemplate/instantiate-decl-init.cpp | 1 + test/SemaTemplate/instantiate-declref-ice.cpp | 1 + test/SemaTemplate/instantiate-deeply.cpp | 1 + .../instantiate-dependent-nested-name.cpp | 1 + .../instantiate-elab-type-specifier.cpp | 1 + test/SemaTemplate/instantiate-enum-2.cpp | 1 + .../instantiate-exception-spec-cxx11.cpp | 12 - test/SemaTemplate/instantiate-friend-class.cpp | 1 + test/SemaTemplate/instantiate-local-class.cpp | 1 + test/SemaTemplate/instantiate-member-expr.cpp | 17 +- .../instantiate-non-type-template-parameter.cpp | 1 + .../instantiate-overload-candidates.cpp | 30 + test/SemaTemplate/instantiate-overloaded-arrow.cpp | 1 + test/SemaTemplate/instantiate-sizeof.cpp | 1 + test/SemaTemplate/instantiation-default-3.cpp | 1 + .../instantiation-depth-exception-spec.cpp | 11 + test/SemaTemplate/instantiation-depth-subst-2.cpp | 7 +- test/SemaTemplate/instantiation-depth-subst.cpp | 6 +- test/SemaTemplate/issue150.cpp | 1 + test/SemaTemplate/lookup-dependent-bases.cpp | 1 + test/SemaTemplate/member-access-ambig.cpp | 21 +- test/SemaTemplate/member-initializers.cpp | 1 + test/SemaTemplate/nested-linkage.cpp | 1 + test/SemaTemplate/nested-template.cpp | 5 + .../SemaTemplate/operator-function-id-template.cpp | 1 + test/SemaTemplate/overload-uneval.cpp | 1 + test/SemaTemplate/pragma-ms_struct.cpp | 1 + test/SemaTemplate/temp_class_spec_blocks.cpp | 1 + test/SemaTemplate/template-class-traits.cpp | 1 + test/SemaTemplate/typo-dependent-name.cpp | 1 + test/SemaTemplate/unresolved-construct.cpp | 1 + test/Tooling/clang-check-ast-dump.cpp | 9 + test/lit.cfg | 29 +- tools/CMakeLists.txt | 6 +- tools/Makefile | 5 + tools/arcmt-test/CMakeLists.txt | 2 +- tools/arcmt-test/Makefile | 2 +- tools/arcmt-test/arcmt-test.cpp | 10 +- tools/c-arcmt-test/Makefile | 8 +- tools/c-index-test/CMakeLists.txt | 7 + tools/c-index-test/Makefile | 3 - tools/c-index-test/c-index-test.c | 173 +- tools/clang-check/CMakeLists.txt | 1 + tools/clang-check/ClangCheck.cpp | 130 +- tools/clang-check/Makefile | 4 +- tools/diagtool/ShowEnabledWarnings.cpp | 4 +- tools/diagtool/TreeView.cpp | 51 +- tools/driver/CMakeLists.txt | 3 +- tools/driver/Makefile | 3 +- tools/driver/cc1_main.cpp | 78 +- tools/driver/cc1as_main.cpp | 17 +- tools/driver/driver.cpp | 12 +- tools/libclang/CIndex.cpp | 202 +- tools/libclang/CIndexCXX.cpp | 3 +- tools/libclang/CIndexCodeCompletion.cpp | 8 +- tools/libclang/CIndexDiagnostic.cpp | 8 +- tools/libclang/CIndexUSRs.cpp | 44 +- tools/libclang/CMakeLists.txt | 3 +- tools/libclang/CXComment.cpp | 197 +- tools/libclang/CXComment.h | 23 +- tools/libclang/CXCursor.cpp | 204 +- tools/libclang/CXType.cpp | 3 +- tools/libclang/CursorVisitor.h | 20 +- tools/libclang/IndexBody.cpp | 14 +- tools/libclang/IndexDecl.cpp | 15 +- tools/libclang/Indexing.cpp | 104 +- tools/libclang/IndexingContext.cpp | 68 +- tools/libclang/IndexingContext.h | 26 +- tools/libclang/Makefile | 15 +- tools/libclang/RecursiveASTVisitor.h | 7 +- tools/libclang/libclang.exports | 7 + tools/scan-build/ccc-analyzer | 2 + tools/scan-build/scan-build | 338 +- tools/scan-build/scan-build.1 | 15 +- tools/scan-build/set-xcode-analyzer | 22 +- tools/scan-view/ScanView.py | 5 + unittests/AST/CMakeLists.txt | 5 +- unittests/AST/CommentLexer.cpp | 327 +- unittests/AST/CommentParser.cpp | 125 +- unittests/AST/DeclPrinterTest.cpp | 1248 + unittests/AST/Makefile | 8 +- unittests/AST/SourceLocationTest.cpp | 289 + unittests/AST/StmtPrinterTest.cpp | 172 + unittests/ASTMatchers/ASTMatchersTest.cpp | 1859 +- unittests/ASTMatchers/ASTMatchersTest.h | 20 +- unittests/ASTMatchers/Makefile | 3 +- unittests/Basic/SourceManagerTest.cpp | 32 +- unittests/Frontend/Makefile | 3 +- unittests/Lex/CMakeLists.txt | 1 + unittests/Lex/LexerTest.cpp | 21 +- unittests/Lex/PPCallbacksTest.cpp | 246 + unittests/Lex/PreprocessingRecordTest.cpp | 21 +- unittests/Tooling/CMakeLists.txt | 2 +- unittests/Tooling/CompilationDatabaseTest.cpp | 123 +- unittests/Tooling/Makefile | 3 +- unittests/Tooling/RecursiveASTVisitorTest.cpp | 68 + unittests/Tooling/RefactoringCallbacksTest.cpp | 16 +- unittests/Tooling/RefactoringTest.cpp | 4 +- unittests/Tooling/RewriterTestContext.h | 11 +- unittests/Tooling/TestVisitor.h | 161 +- unittests/Tooling/ToolingTest.cpp | 32 + utils/ClangDataFormat.py | 113 + utils/TableGen/CMakeLists.txt | 4 +- utils/TableGen/ClangAttrEmitter.cpp | 4 +- utils/TableGen/ClangCommentCommandInfoEmitter.cpp | 72 + utils/TableGen/ClangCommentHTMLTagsEmitter.cpp | 69 + utils/TableGen/ClangDiagnosticsEmitter.cpp | 20 +- utils/TableGen/ClangSACheckersEmitter.cpp | 23 +- utils/TableGen/Makefile | 2 - utils/TableGen/NeonEmitter.cpp | 28 +- utils/TableGen/OptParserEmitter.cpp | 101 +- utils/TableGen/TableGen.cpp | 175 +- utils/TableGen/TableGenBackends.h | 5 + utils/analyzer/CmpRuns.py | 92 +- utils/analyzer/SATestAdd.py | 18 +- utils/analyzer/SATestBuild.py | 163 +- utils/analyzer/SumTimerInfo.py | 17 +- utils/analyzer/update_plist_test.pl | 51 + www/analyzer/available_checks.html | 3 +- www/analyzer/checker_dev_manual.html | 2 +- www/analyzer/content.css | 32 + www/analyzer/latest_checker.html.incl | 2 +- www/analyzer/menu.html.incl | 1 + www/analyzer/potential_checkers.html | 1651 ++ www/analyzer/release_notes.html | 23 + www/cxx_status.html | 8 +- www/libstdc++4.6-clang11.patch | 11 + www/menu.css | 4 +- www/timing-data/2008-10-31/176.gcc-01.txt | 135 - www/timing-data/2008-10-31/176.gcc-02.txt | 135 - www/timing-data/2008-10-31/176.gcc.png | Bin 20395 -> 0 bytes www/timing-data/2008-10-31/sketch-01.txt | 187 - www/timing-data/2008-10-31/sketch-02.txt | 187 - www/timing-data/2008-10-31/sketch.png | Bin 23482 -> 0 bytes www/timing-data/2009-03-02/176.gcc.pdf | Bin 34547 -> 0 bytes www/timing-data/2009-03-02/176.gcc.png | Bin 77003 -> 0 bytes www/timing-data/2009-03-02/176.gcc.txt | 1120 - www/timing-data/2009-03-02/sketch.pdf | Bin 36086 -> 0 bytes www/timing-data/2009-03-02/sketch.png | Bin 78278 -> 0 bytes www/timing-data/2009-03-02/sketch.txt | 2368 -- www/timing-data/2009-06-26/176.gcc.pdf | Bin 33680 -> 0 bytes www/timing-data/2009-06-26/176.gcc.png | Bin 61190 -> 0 bytes www/timing-data/2009-06-26/176.gcc.txt | 699 - www/timing-data/2009-06-26/sketch.pdf | Bin 34528 -> 0 bytes www/timing-data/2009-06-26/sketch.png | Bin 62222 -> 0 bytes www/timing-data/2009-06-26/sketch.txt | 803 - 2231 files changed, 136963 insertions(+), 65030 deletions(-) create mode 100644 bindings/python/tests/cindex/test_code_completion.py create mode 100644 docs/LibASTMatchers.html create mode 100644 docs/LibASTMatchersReference.html create mode 100644 docs/analyzer/debug-checks.txt create mode 100644 docs/tools/dump_ast_matchers.py create mode 100644 include/clang/AST/CommentCommands.td create mode 100644 include/clang/AST/CommentHTMLTags.td create mode 100644 include/clang/ASTMatchers/ASTTypeTraits.h create mode 100644 include/clang/Analysis/DomainSpecific/ObjCNoReturn.h create mode 100644 include/clang/Basic/DiagnosticOptions.def create mode 100644 include/clang/Basic/DiagnosticOptions.h create mode 100644 include/clang/Basic/Sanitizers.def delete mode 100644 include/clang/Frontend/Analyses.def delete mode 100644 include/clang/Frontend/AnalyzerOptions.h create mode 100644 include/clang/Frontend/CodeGenOptions.def delete mode 100644 include/clang/Frontend/DiagnosticOptions.h delete mode 100644 include/clang/Frontend/HeaderSearchOptions.h delete mode 100644 include/clang/Frontend/PreprocessorOptions.h create mode 100644 include/clang/Lex/HeaderSearchOptions.h create mode 100644 include/clang/Lex/PPMutationListener.h create mode 100644 include/clang/Lex/PreprocessorOptions.h delete mode 100644 include/clang/Rewrite/ASTConsumers.h create mode 100644 include/clang/Rewrite/Core/DeltaTree.h create mode 100644 include/clang/Rewrite/Core/HTMLRewrite.h create mode 100644 include/clang/Rewrite/Core/RewriteRope.h create mode 100644 include/clang/Rewrite/Core/Rewriter.h create mode 100644 include/clang/Rewrite/Core/TokenRewriter.h delete mode 100644 include/clang/Rewrite/DeltaTree.h delete mode 100644 include/clang/Rewrite/FixItRewriter.h create mode 100644 include/clang/Rewrite/Frontend/ASTConsumers.h create mode 100644 include/clang/Rewrite/Frontend/FixItRewriter.h create mode 100644 include/clang/Rewrite/Frontend/FrontendActions.h create mode 100644 include/clang/Rewrite/Frontend/Rewriters.h delete mode 100644 include/clang/Rewrite/FrontendActions.h delete mode 100644 include/clang/Rewrite/HTMLRewrite.h delete mode 100644 include/clang/Rewrite/RewriteRope.h delete mode 100644 include/clang/Rewrite/Rewriter.h delete mode 100644 include/clang/Rewrite/Rewriters.h delete mode 100644 include/clang/Rewrite/TokenRewriter.h create mode 100644 include/clang/Sema/MultiplexExternalSemaSource.h delete mode 100644 include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h create mode 100644 include/clang/StaticAnalyzer/Core/Analyses.def create mode 100644 include/clang/StaticAnalyzer/Core/AnalyzerOptions.h create mode 100644 include/clang/StaticAnalyzer/Core/PathSensitive/DynamicTypeInfo.h delete mode 100644 include/clang/Tooling/CommandLineClangTool.h create mode 100644 include/clang/Tooling/CommonOptionsParser.h create mode 100644 include/clang/Tooling/CompilationDatabasePluginRegistry.h create mode 100644 include/clang/Tooling/FileMatchTrie.h create mode 100644 include/clang/Tooling/JSONCompilationDatabase.h create mode 100644 lib/Analysis/BodyFarm.cpp create mode 100644 lib/Analysis/BodyFarm.h create mode 100644 lib/Analysis/ObjCNoReturn.cpp create mode 100644 lib/Driver/SanitizerArgs.h create mode 100644 lib/Headers/__wmmintrin_aes.h create mode 100644 lib/Headers/__wmmintrin_pclmul.h create mode 100644 lib/Headers/f16cintrin.h create mode 100644 lib/Headers/rtmintrin.h create mode 100644 lib/Rewrite/Core/CMakeLists.txt create mode 100644 lib/Rewrite/Core/DeltaTree.cpp create mode 100644 lib/Rewrite/Core/HTMLRewrite.cpp create mode 100644 lib/Rewrite/Core/Makefile create mode 100644 lib/Rewrite/Core/RewriteRope.cpp create mode 100644 lib/Rewrite/Core/Rewriter.cpp create mode 100644 lib/Rewrite/Core/TokenRewriter.cpp delete mode 100644 lib/Rewrite/DeltaTree.cpp delete mode 100644 lib/Rewrite/FixItRewriter.cpp create mode 100644 lib/Rewrite/Frontend/CMakeLists.txt create mode 100644 lib/Rewrite/Frontend/FixItRewriter.cpp create mode 100644 lib/Rewrite/Frontend/FrontendActions.cpp create mode 100644 lib/Rewrite/Frontend/HTMLPrint.cpp create mode 100644 lib/Rewrite/Frontend/InclusionRewriter.cpp create mode 100644 lib/Rewrite/Frontend/Makefile create mode 100644 lib/Rewrite/Frontend/RewriteMacros.cpp create mode 100644 lib/Rewrite/Frontend/RewriteModernObjC.cpp create mode 100644 lib/Rewrite/Frontend/RewriteObjC.cpp create mode 100644 lib/Rewrite/Frontend/RewriteTest.cpp delete mode 100644 lib/Rewrite/FrontendActions.cpp delete mode 100644 lib/Rewrite/HTMLPrint.cpp delete mode 100644 lib/Rewrite/HTMLRewrite.cpp delete mode 100644 lib/Rewrite/InclusionRewriter.cpp delete mode 100644 lib/Rewrite/RewriteMacros.cpp delete mode 100644 lib/Rewrite/RewriteModernObjC.cpp delete mode 100644 lib/Rewrite/RewriteObjC.cpp delete mode 100644 lib/Rewrite/RewriteRope.cpp delete mode 100644 lib/Rewrite/RewriteTest.cpp delete mode 100644 lib/Rewrite/Rewriter.cpp delete mode 100644 lib/Rewrite/TokenRewriter.cpp create mode 100644 lib/Sema/MultiplexExternalSemaSource.cpp create mode 100644 lib/Sema/ScopeInfo.cpp create mode 100644 lib/Sema/SemaStmtAsm.cpp delete mode 100644 lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp create mode 100644 lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp create mode 100644 lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp delete mode 100644 lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp create mode 100644 lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp create mode 100644 lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp create mode 100644 lib/StaticAnalyzer/Core/AnalyzerOptions.cpp delete mode 100644 lib/StaticAnalyzer/Core/BasicConstraintManager.cpp create mode 100644 lib/StaticAnalyzer/Core/ConstraintManager.cpp delete mode 100644 lib/Tooling/CommandLineClangTool.cpp create mode 100644 lib/Tooling/CommonOptionsParser.cpp delete mode 100644 lib/Tooling/CustomCompilationDatabase.h create mode 100644 lib/Tooling/FileMatchTrie.cpp create mode 100644 lib/Tooling/JSONCompilationDatabase.cpp create mode 100644 runtime/compiler-rt/clang_linux_test_input.c create mode 100644 test/Analysis/CFContainers-invalid.c create mode 100644 test/Analysis/Inputs/system-header-simulator-cxx.h create mode 100644 test/Analysis/Inputs/system-header-simulator-for-simple-stream.h create mode 100644 test/Analysis/Inputs/system-header-simulator-objc.h create mode 100644 test/Analysis/Inputs/system-header-simulator.h create mode 100644 test/Analysis/analyzer-config.c create mode 100644 test/Analysis/analyzer-config.cpp create mode 100644 test/Analysis/array-struct-region.cpp create mode 100644 test/Analysis/bitwise-ops.c create mode 100644 test/Analysis/complex-init-list.cpp create mode 100644 test/Analysis/conditional-operator-path-notes.c create mode 100644 test/Analysis/diagnostics/deref-track-symbolic-region.c create mode 100644 test/Analysis/diagnostics/deref-track-symbolic-region.cpp create mode 100644 test/Analysis/diagnostics/undef-value-param.c create mode 100644 test/Analysis/diagnostics/undef-value-param.m create mode 100644 test/Analysis/exceptions.mm create mode 100644 test/Analysis/inlining/assume-super-init-does-not-return-nil.m create mode 100644 test/Analysis/inlining/eager-reclamation-path-notes.c create mode 100644 test/Analysis/inlining/false-positive-suppression.c create mode 100644 test/Analysis/inlining/path-notes.m create mode 100644 test/Analysis/inlining/retain-count-self-init.m create mode 100644 test/Analysis/inlining/stl.cpp create mode 100644 test/Analysis/inlining/test-always-inline-size-option.c create mode 100644 test/Analysis/inlining/test_objc_inlining_option.m create mode 100644 test/Analysis/logical-ops.c create mode 100644 test/Analysis/member-expr.cpp create mode 100644 test/Analysis/new-with-exceptions.cpp create mode 100644 test/Analysis/null-deref-path-notes.m create mode 100644 test/Analysis/objc-properties.m create mode 100644 test/Analysis/objc_invalidation.m create mode 100644 test/Analysis/plist-html-macros.c create mode 100644 test/Analysis/pointer-to-member.cpp create mode 100644 test/Analysis/simple-stream-checks.c create mode 100644 test/Analysis/static_local.m delete mode 100644 test/Analysis/system-header-simulator-objc.h delete mode 100644 test/Analysis/system-header-simulator.h create mode 100644 test/Analysis/temporaries.cpp create mode 100644 test/Analysis/test-objc-non-nil-return-value-checker.m create mode 100644 test/Analysis/traversal-path-unification.c create mode 100644 test/Analysis/unions.cpp create mode 100644 test/Analysis/viewcontroller.m create mode 100644 test/Analysis/virtualcall.h create mode 100644 test/CXX/basic/basic.scope/basic.scope.local/p2.cpp create mode 100644 test/CXX/class.derived/class.abstract/p16.cpp create mode 100644 test/CXX/dcl.decl/dcl.init/p7.cpp create mode 100644 test/CXX/except/except.spec/p4.cpp delete mode 100644 test/CXX/over/over.built/p1.cpp create mode 100644 test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp create mode 100644 test/CXX/special/class.dtor/p3.cpp create mode 100644 test/CodeGen/a15.c create mode 100644 test/CodeGen/arm-abi-vector.c create mode 100644 test/CodeGen/arm-asm-warn.c create mode 100644 test/CodeGen/arm-pnaclcall.c create mode 100644 test/CodeGen/attr-minsize.cpp create mode 100644 test/CodeGen/builtin-ms-noop.cpp create mode 100644 test/CodeGen/debug-info-line4.c create mode 100644 test/CodeGen/f16c-builtins.c create mode 100644 test/CodeGen/ffp-contract-option.c create mode 100644 test/CodeGen/fp-contract-pragma.cpp delete mode 100644 test/CodeGen/fp-contract.c create mode 100644 test/CodeGen/le32-arguments.c create mode 100644 test/CodeGen/le32-regparm.c create mode 100644 test/CodeGen/libcall-declarations.c create mode 100644 test/CodeGen/long-double-x86-nacl.c create mode 100644 test/CodeGen/microsoft-call-conv-x64.c create mode 100644 test/CodeGen/ms-inline-asm-64.c create mode 100644 test/CodeGen/ppc-atomics.c create mode 100644 test/CodeGen/ppc64-align-long-double.c create mode 100644 test/CodeGen/ppc64-extend.c create mode 100644 test/CodeGen/ppc64-struct-onefloat.c create mode 100644 test/CodeGen/ppc64-varargs-struct.c create mode 100644 test/CodeGen/rtm-builtins.c create mode 100644 test/CodeGen/tbaa-struct.cpp create mode 100644 test/CodeGen/x86_64-arguments-nacl.c create mode 100644 test/CodeGenCXX/assign-construct-memcpy.cpp create mode 100644 test/CodeGenCXX/catch-undef-behavior.cpp create mode 100644 test/CodeGenCXX/cxx11-special-members.cpp create mode 100644 test/CodeGenCXX/debug-info-blocks.cpp create mode 100644 test/CodeGenCXX/debug-info-global-ctor-dtor.cpp create mode 100644 test/CodeGenCXX/debug-info-thunk.cpp delete mode 100644 test/CodeGenCXX/debug-info-user-def.cpp create mode 100644 test/CodeGenCXX/debug-lambda-this.cpp create mode 100644 test/CodeGenCXX/fastcall.cpp create mode 100644 test/CodeGenCXX/init-priority-attr.cpp create mode 100644 test/CodeGenCXX/mangle-ms-arg-qualifiers.cpp create mode 100644 test/CodeGenCXX/mangle-ms-template-callback.cpp create mode 100644 test/CodeGenCXX/mangle-valist.cpp create mode 100644 test/CodeGenCXX/microsoft-abi-default-cc.cpp create mode 100644 test/CodeGenCXX/microsoft-interface.cpp create mode 100644 test/CodeGenCXX/microsoft-uuidof-unsupported-target.cpp create mode 100644 test/CodeGenCXX/microsoft-uuidof.cpp create mode 100644 test/CodeGenCXX/return.cpp delete mode 100644 test/CodeGenObjC/arc-block-ivar-layout.m create mode 100644 test/CodeGenObjC/arc-captured-32bit-block-var-layout.m create mode 100644 test/CodeGenObjC/arc-captured-block-var-inlined-layout.m create mode 100644 test/CodeGenObjC/arc-captured-block-var-layout.m create mode 100644 test/CodeGenObjC/attr-minsize.m create mode 100644 test/CodeGenObjC/builtin-memfns.m create mode 100644 test/CodeGenObjC/debug-info-ivars.m create mode 100644 test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m create mode 100644 test/CodeGenObjC/optimized-setter-ios-device.m create mode 100644 test/CodeGenObjC/prop-metadata-gnu.m create mode 100644 test/CodeGenObjC/unoptimized-setter.m create mode 100644 test/CodeGenObjCXX/implementation-in-extern-c.mm create mode 100644 test/Driver/B-opt.c create mode 100755 test/Driver/Inputs/B_opt_tree/dir1/i386-unknown-linux-ld create mode 100755 test/Driver/Inputs/B_opt_tree/dir1/ld create mode 100755 test/Driver/Inputs/B_opt_tree/dir2/ld create mode 100755 test/Driver/Inputs/B_opt_tree/dir3/prefix-ld create mode 100644 test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/bin/.keep create mode 100644 test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/include/c++/4.4.3/.keep create mode 100644 test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/lib/.keep create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbegin.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbeginS.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbeginT.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtend.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtendS.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbegin.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbeginS.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbeginT.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtend.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtendS.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbegin.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbeginS.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbeginT.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtend.o create mode 100644 test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtendS.o create mode 100644 test/Driver/Inputs/basic_android_tree/mipsel-linux-android/bin/.keep create mode 100644 test/Driver/Inputs/basic_android_tree/mipsel-linux-android/include/c++/4.4.3/.keep create mode 100644 test/Driver/Inputs/basic_android_tree/mipsel-linux-android/lib/.keep create mode 100644 test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_dynamic.o create mode 100644 test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_so.o create mode 100644 test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_static.o create mode 100644 test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtend_android.o create mode 100644 test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtend_so.o delete mode 100644 test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_dynamic.o delete mode 100644 test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_so.o delete mode 100644 test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_static.o delete mode 100644 test/Driver/Inputs/basic_android_tree/usr/lib/crtend_android.o delete mode 100644 test/Driver/Inputs/basic_android_tree/usr/lib/crtend_so.o create mode 100644 test/Driver/Inputs/basic_linux_tree/usr/lib/gcc/x86_64-unknown-linux/4.6.0/crtfastmath.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/lib/.keep create mode 100644 test/Driver/Inputs/debian_6_mips_tree/lib32/.keep create mode 100644 test/Driver/Inputs/debian_6_mips_tree/lib64/.keep create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib/crt1.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib/crti.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/64/crtbegin.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/crtbegin.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/n32/crtbegin.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib32/crt1.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib32/crti.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib64/crt1.o create mode 100644 test/Driver/Inputs/debian_6_mips_tree/usr/lib64/crti.o create mode 100644 test/Driver/Inputs/freescale_ppc64_tree/lib64/.keep create mode 100644 test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crt1.o create mode 100644 test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crti.o create mode 100644 test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crtn.o create mode 100644 test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/powerpc64-fsl-linux/4.6.2/crtbegin.o create mode 100644 test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/powerpc64-fsl-linux/4.6.2/crtend.o create mode 100644 test/Driver/Inputs/freescale_ppc_tree/lib/.keep create mode 100644 test/Driver/Inputs/freescale_ppc_tree/usr/lib/crt1.o create mode 100644 test/Driver/Inputs/freescale_ppc_tree/usr/lib/crti.o create mode 100644 test/Driver/Inputs/freescale_ppc_tree/usr/lib/crtn.o create mode 100644 test/Driver/Inputs/freescale_ppc_tree/usr/lib/powerpc-fsl-linux/4.6.2/crtbegin.o create mode 100644 test/Driver/Inputs/freescale_ppc_tree/usr/lib/powerpc-fsl-linux/4.6.2/crtend.o create mode 100644 test/Driver/android-standalone.cpp delete mode 100644 test/Driver/apple-kext-i386.cpp create mode 100644 test/Driver/bitrig.c create mode 100644 test/Driver/darwin-arch-default.c create mode 100644 test/Driver/darwin-asan-nofortify.c delete mode 100644 test/Driver/darwin-cc.c create mode 100644 test/Driver/darwin-sdkroot.c create mode 100644 test/Driver/freebsd-mips-as.c create mode 100644 test/Driver/fsanitize.c delete mode 100644 test/Driver/hello.c create mode 100644 test/Driver/retain-comments-from-system-headers.c create mode 100644 test/Driver/stack-protector.c create mode 100644 test/Driver/ubsan-ld.c create mode 100644 test/Driver/working-directory-and-abs.c create mode 100644 test/Driver/x86_64-nacl-defines.cpp create mode 100644 test/Driver/x86_64-nacl-types.cpp create mode 100644 test/FixIt/fixit-missing-self-in-block.m create mode 100644 test/FixIt/format-darwin.m create mode 100644 test/Frontend/verify3.c create mode 100644 test/Headers/Inputs/include/stdint.h create mode 100644 test/Headers/altivec-header.c create mode 100644 test/Headers/unwind.c create mode 100644 test/Index/Inputs/CommentXML/valid-availability-attr-01.xml create mode 100644 test/Index/Inputs/CommentXML/valid-availability-attr-02.xml create mode 100644 test/Index/Inputs/CommentXML/valid-deprecated-attr.xml create mode 100644 test/Index/Inputs/CommentXML/valid-unavailable-attr.xml create mode 100644 test/Index/Inputs/Frameworks/module.map create mode 100644 test/Index/Inputs/retain-comments-from-system-headers.h create mode 100644 test/Index/annotate-comments-availability-attrs.cpp create mode 100644 test/Index/annotate-deep-statements.cpp create mode 100644 test/Index/annotate-module.m create mode 100644 test/Index/code-completion-skip-bodies.cpp create mode 100644 test/Index/complete-documentation-templates.cpp create mode 100644 test/Index/index-file.cpp create mode 100644 test/Index/index-module.m create mode 100644 test/Index/index-pch-with-module.m create mode 100644 test/Index/index-pch.cpp create mode 100644 test/Index/index-with-working-dir.c create mode 100644 test/Index/overriding-ftemplate-comments.cpp create mode 100644 test/Index/overriding-method-comments.mm create mode 100644 test/Index/preamble-reparse-with-BOM.m create mode 100644 test/Index/rdar12316296-codecompletion.m create mode 100644 test/Index/retain-comments-from-system-headers.c create mode 100644 test/Lexer/eof-char.c create mode 100644 test/Lexer/eof-file.c create mode 100644 test/Lexer/eof-string.c create mode 100644 test/Lexer/long-long.cpp create mode 100644 test/Lexer/string-literal-errors.cpp create mode 100644 test/Misc/ast-dump-stmt.c create mode 100644 test/Misc/ast-dump-stmt.m create mode 100644 test/Misc/unnecessary-elipses.cpp create mode 100644 test/Misc/wrong-encoding2.c create mode 100644 test/Modules/Inputs/Modified/A.h create mode 100644 test/Modules/Inputs/Modified/B.h create mode 100644 test/Modules/Inputs/Modified/module.map create mode 100644 test/Modules/Inputs/NotAModule.framework/Headers/NotAModule.h create mode 100644 test/Modules/Inputs/macros_left.h create mode 100644 test/Modules/Inputs/macros_other.h create mode 100644 test/Modules/Inputs/macros_right.h create mode 100644 test/Modules/Inputs/macros_right_undef.h create mode 100644 test/Modules/Inputs/macros_top.h create mode 100644 test/Modules/Inputs/normal-module-map/nested_umbrella/1.h create mode 100644 test/Modules/Inputs/normal-module-map/nested_umbrella/a-extras.h create mode 100644 test/Modules/Inputs/normal-module-map/nested_umbrella/decltype.h create mode 100644 test/Modules/Inputs/templates-left.h create mode 100644 test/Modules/Inputs/templates-right.h create mode 100644 test/Modules/Inputs/templates-top.h create mode 100644 test/Modules/direct-module-import.m create mode 100644 test/Modules/inferred-frameworks.m create mode 100644 test/Modules/modify-module.m create mode 100644 test/Modules/templates.mm create mode 100644 test/PCH/Inputs/badpch-dir.h.gch/.keep create mode 100644 test/PCH/Inputs/badpch-empty.h.gch create mode 100644 test/PCH/Inputs/case-insensitive-include.h create mode 100644 test/PCH/Inputs/cxx11-statement-attributes.h delete mode 100644 test/PCH/badpch-dir.h.gch/.keep delete mode 100644 test/PCH/badpch-empty.h.gch create mode 100644 test/PCH/case-insensitive-include.c create mode 100644 test/PCH/cxx11-statement-attributes.cpp create mode 100644 test/PCH/field-designator.c create mode 100644 test/PCH/friend-template.cpp create mode 100644 test/PCH/pch-dir.c create mode 100644 test/PCH/pch-dir.h create mode 100644 test/PCH/target-options.c create mode 100644 test/PCH/target-options.h create mode 100644 test/Parser/colon-colon-parentheses.cpp create mode 100644 test/Parser/cxx11-brace-initializers.cpp create mode 100644 test/Parser/pragma-fp-contract.c create mode 100644 test/Parser/pragma-options.cpp create mode 100644 test/Preprocessor/macro_fn_comma_swallow2.c create mode 100644 test/Preprocessor/microsoft-ext.c create mode 100644 test/Preprocessor/pr13851.c create mode 100644 test/Rewriter/no-integrated-preprocessing-64bit.m create mode 100644 test/Rewriter/no-integrated-preprocessing.m create mode 100644 test/Rewriter/objc-modern-StretAPI-2.mm create mode 100644 test/Sema/MicrosoftCompatibility-x64.c create mode 100644 test/Sema/MicrosoftCompatibility-x86.c create mode 100644 test/Sema/attr-minsize.c delete mode 100644 test/Sema/c89-2.c create mode 100644 test/Sema/format-strings-darwin.c create mode 100644 test/Sema/format-strings-gnu.c create mode 100644 test/Sema/mms-bitfields.c create mode 100644 test/Sema/ms-inline-asm.c create mode 100644 test/Sema/outof-range-constant-compare.c create mode 100644 test/Sema/stdcall-fastcall-x64.c create mode 100644 test/Sema/template-specialization.cpp create mode 100644 test/Sema/warn-bad-function-cast.c create mode 100644 test/Sema/warn-missing-variable-declarations.c create mode 100644 test/SemaCXX/ambiguous-conversion-show-overload.cpp create mode 100644 test/SemaCXX/ast-print.cpp create mode 100644 test/SemaCXX/attr-nodebug.cpp create mode 100644 test/SemaCXX/attr-unused.cpp create mode 100644 test/SemaCXX/builtins-arm.cpp create mode 100644 test/SemaCXX/builtins-va_arg.cpp create mode 100644 test/SemaCXX/cxx11-crashes.cpp create mode 100644 test/SemaCXX/for-range-dereference.cpp create mode 100644 test/SemaCXX/libstdcxx_atomic_ns_hack.cpp create mode 100644 test/SemaCXX/libstdcxx_common_type_hack.cpp create mode 100644 test/SemaCXX/ms-interface.cpp create mode 100644 test/SemaCXX/no-warn-composite-pointer-type.cpp create mode 100644 test/SemaCXX/no-wchar.cpp create mode 100644 test/SemaCXX/switch-implicit-fallthrough-macro.cpp create mode 100644 test/SemaCXX/warn-c++11-extensions.cpp create mode 100644 test/SemaCXX/warn-implicit-conversion-floating-point-to-bool.cpp create mode 100644 test/SemaCXX/warn-missing-variable-declarations.cpp delete mode 100644 test/SemaCXX/warn-unique-enum.cpp delete mode 100644 test/SemaCXX/warn-using-namespace-in-header.h create mode 100644 test/SemaObjC/arc-repeated-weak.mm create mode 100644 test/SemaObjC/crash-on-objc-bool-literal.m create mode 100644 test/SemaObjC/nonarc-weak.m create mode 100644 test/SemaObjC/overriding-property-in-class-extension.m create mode 100644 test/SemaObjC/property-deprecated-warning.m create mode 100644 test/SemaObjC/property-in-class-extension-1.m create mode 100644 test/SemaObjC/super-dealloc-attribute.m create mode 100644 test/SemaObjC/warn-implicit-self-in-block.m create mode 100644 test/SemaObjC/warning-missing-selector-name.m delete mode 100644 test/SemaObjCXX/arc-non-pod.mm create mode 100644 test/SemaObjCXX/arc-objc-lifetime.mm delete mode 100644 test/SemaObjCXX/arc-object-init-destroy.mm create mode 100644 test/SemaObjCXX/debugger-support.mm create mode 100644 test/SemaObjCXX/decltype.mm create mode 100644 test/SemaObjCXX/format-strings.mm create mode 100644 test/SemaTemplate/derived.cpp create mode 100644 test/SemaTemplate/instantiation-depth-exception-spec.cpp create mode 100644 unittests/AST/DeclPrinterTest.cpp create mode 100644 unittests/AST/SourceLocationTest.cpp create mode 100644 unittests/AST/StmtPrinterTest.cpp create mode 100644 unittests/Lex/PPCallbacksTest.cpp create mode 100644 utils/ClangDataFormat.py create mode 100644 utils/TableGen/ClangCommentCommandInfoEmitter.cpp create mode 100644 utils/TableGen/ClangCommentHTMLTagsEmitter.cpp create mode 100755 utils/analyzer/update_plist_test.pl create mode 100644 www/analyzer/potential_checkers.html create mode 100644 www/libstdc++4.6-clang11.patch delete mode 100644 www/timing-data/2008-10-31/176.gcc-01.txt delete mode 100644 www/timing-data/2008-10-31/176.gcc-02.txt delete mode 100644 www/timing-data/2008-10-31/176.gcc.png delete mode 100644 www/timing-data/2008-10-31/sketch-01.txt delete mode 100644 www/timing-data/2008-10-31/sketch-02.txt delete mode 100644 www/timing-data/2008-10-31/sketch.png delete mode 100644 www/timing-data/2009-03-02/176.gcc.pdf delete mode 100644 www/timing-data/2009-03-02/176.gcc.png delete mode 100644 www/timing-data/2009-03-02/176.gcc.txt delete mode 100644 www/timing-data/2009-03-02/sketch.pdf delete mode 100644 www/timing-data/2009-03-02/sketch.png delete mode 100644 www/timing-data/2009-03-02/sketch.txt delete mode 100644 www/timing-data/2009-06-26/176.gcc.pdf delete mode 100644 www/timing-data/2009-06-26/176.gcc.png delete mode 100644 www/timing-data/2009-06-26/176.gcc.txt delete mode 100644 www/timing-data/2009-06-26/sketch.pdf delete mode 100644 www/timing-data/2009-06-26/sketch.png delete mode 100644 www/timing-data/2009-06-26/sketch.txt diff --git a/.gitignore b/.gitignore index b906ab9..6be9976 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ *.pyc # vim swap files .*.swp +.sw? #==============================================================================# # Explicit files to ignore (only matches one). diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f4fa1c..53d4165 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -181,16 +181,26 @@ endfunction(clang_tablegen) macro(add_clang_library name) llvm_process_sources(srcs ${ARGN}) if(MSVC_IDE OR XCODE) - string( REGEX MATCHALL "/[^/]+" split_path ${CMAKE_CURRENT_SOURCE_DIR}) - list( GET split_path -1 dir) - file( GLOB_RECURSE headers - ../../../include/clang/StaticAnalyzer${dir}/*.h - ../../../include/clang/StaticAnalyzer${dir}/*.td - ../../../include/clang/StaticAnalyzer${dir}/*.def - ../../include/clang${dir}/*.h - ../../include/clang${dir}/*.td - ../../include/clang${dir}/*.def) - set(srcs ${srcs} ${headers}) + # Add public headers + file(RELATIVE_PATH lib_path + ${CLANG_SOURCE_DIR}/lib/ + ${CMAKE_CURRENT_SOURCE_DIR} + ) + if(NOT lib_path MATCHES "^[.][.]") + file( GLOB_RECURSE headers + ${CLANG_SOURCE_DIR}/include/clang/${lib_path}/*.h + ${CLANG_SOURCE_DIR}/include/clang/${lib_path}/*.def + ) + set_source_files_properties(${headers} PROPERTIES HEADER_FILE_ONLY ON) + + file( GLOB_RECURSE tds + ${CLANG_SOURCE_DIR}/include/clang/${lib_path}/*.td + ) + source_group("TableGen descriptions" FILES ${tds}) + set_source_files_properties(${tds}} PROPERTIES HEADER_FILE_ONLY ON) + + set(srcs ${srcs} ${headers} ${tds}) + endif() endif(MSVC_IDE OR XCODE) if (MODULE) set(libkind MODULE) diff --git a/Makefile b/Makefile index bf1a772..cb2566a 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,10 @@ ifeq ($(MAKECMDGOALS),libs-only) DIRS := $(filter-out tools docs, $(DIRS)) OPTIONAL_DIRS := endif +ifeq ($(BUILD_CLANG_ONLY),YES) + DIRS := $(filter-out docs unittests, $(DIRS)) + OPTIONAL_DIRS := +endif ### # Common Makefile code, shared by all Clang Makefiles. diff --git a/NOTES.txt b/NOTES.txt index ca46d1c..1c89d68 100644 --- a/NOTES.txt +++ b/NOTES.txt @@ -44,7 +44,7 @@ TODO: File Manager Speedup: 2. Instead of stat'ing the file in FileManager::getFile, check to see if the dir has been read. If so, fail immediately, if not, read the dir, then retry. - 3. Reading the dir uses the getdirentries syscall, creating an FileEntry + 3. Reading the dir uses the getdirentries syscall, creating a FileEntry for all files found. //===---------------------------------------------------------------------===// diff --git a/bindings/python/clang/cindex.py b/bindings/python/clang/cindex.py index fc0a2a1..5e162c0 100644 --- a/bindings/python/clang/cindex.py +++ b/bindings/python/clang/cindex.py @@ -67,26 +67,12 @@ import collections import clang.enumerations -def get_cindex_library(): - # FIXME: It's probably not the case that the library is actually found in - # this location. We need a better system of identifying and loading the - # CIndex library. It could be on path or elsewhere, or versioned, etc. - import platform - name = platform.system() - if name == 'Darwin': - return cdll.LoadLibrary('libclang.dylib') - elif name == 'Windows': - return cdll.LoadLibrary('libclang.dll') - else: - return cdll.LoadLibrary('libclang.so') - # ctypes doesn't implicitly convert c_void_p to the appropriate wrapper # object. This is a problem, because it means that from_parameter will see an # integer and pass the wrong value on platforms where int != void*. Work around # this by marshalling object arguments as void**. c_object_p = POINTER(c_void_p) -lib = get_cindex_library() callbacks = {} ### Exception Classes ### @@ -133,18 +119,43 @@ class TranslationUnitSaveError(Exception): ### Structures and Utility Classes ### +class CachedProperty(object): + """Decorator that lazy-loads the value of a property. + + The first time the property is accessed, the original property function is + executed. The value it returns is set as the new value of that instance's + property, replacing the original method. + """ + + def __init__(self, wrapped): + self.wrapped = wrapped + try: + self.__doc__ = wrapped.__doc__ + except: + pass + + def __get__(self, instance, instance_type=None): + if instance is None: + return self + + value = self.wrapped(instance) + setattr(instance, self.wrapped.__name__, value) + + return value + + class _CXString(Structure): """Helper for transforming CXString results.""" _fields_ = [("spelling", c_char_p), ("free", c_int)] def __del__(self): - lib.clang_disposeString(self) + conf.lib.clang_disposeString(self) @staticmethod def from_result(res, fn, args): assert isinstance(res, _CXString) - return lib.clang_getCString(res) + return conf.lib.clang_getCString(res) class SourceLocation(Structure): """ @@ -156,7 +167,7 @@ class SourceLocation(Structure): def _get_instantiation(self): if self._data is None: f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint() - lib.clang_getInstantiationLocation(self, byref(f), byref(l), + conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l), byref(c), byref(o)) if f: f = File(f) @@ -171,7 +182,7 @@ class SourceLocation(Structure): Retrieve the source location associated with a given file/line/column in a particular translation unit. """ - return lib.clang_getLocation(tu, file, line, column) + return conf.lib.clang_getLocation(tu, file, line, column) @staticmethod def from_offset(tu, file, offset): @@ -181,7 +192,7 @@ class SourceLocation(Structure): file -- File instance to obtain offset from offset -- Integer character offset within file """ - return lib.clang_getLocationForOffset(tu, file, offset) + return conf.lib.clang_getLocationForOffset(tu, file, offset) @property def file(self): @@ -204,7 +215,7 @@ class SourceLocation(Structure): return self._get_instantiation()[3] def __eq__(self, other): - return lib.clang_equalLocations(self, other) + return conf.lib.clang_equalLocations(self, other) def __ne__(self, other): return not self.__eq__(other) @@ -231,7 +242,7 @@ class SourceRange(Structure): # object. @staticmethod def from_locations(start, end): - return lib.clang_getRange(start, end) + return conf.lib.clang_getRange(start, end) @property def start(self): @@ -239,7 +250,7 @@ class SourceRange(Structure): Return a SourceLocation representing the first character within a source range. """ - return lib.clang_getRangeStart(self) + return conf.lib.clang_getRangeStart(self) @property def end(self): @@ -247,10 +258,10 @@ class SourceRange(Structure): Return a SourceLocation representing the last character within a source range. """ - return lib.clang_getRangeEnd(self) + return conf.lib.clang_getRangeEnd(self) def __eq__(self, other): - return lib.clang_equalRanges(self, other) + return conf.lib.clang_equalRanges(self, other) def __ne__(self, other): return not self.__eq__(other) @@ -275,19 +286,19 @@ class Diagnostic(object): self.ptr = ptr def __del__(self): - lib.clang_disposeDiagnostic(self) + conf.lib.clang_disposeDiagnostic(self) @property def severity(self): - return lib.clang_getDiagnosticSeverity(self) + return conf.lib.clang_getDiagnosticSeverity(self) @property def location(self): - return lib.clang_getDiagnosticLocation(self) + return conf.lib.clang_getDiagnosticLocation(self) @property def spelling(self): - return lib.clang_getDiagnosticSpelling(self) + return conf.lib.clang_getDiagnosticSpelling(self) @property def ranges(self): @@ -296,12 +307,12 @@ class Diagnostic(object): self.diag = diag def __len__(self): - return int(lib.clang_getDiagnosticNumRanges(self.diag)) + return int(conf.lib.clang_getDiagnosticNumRanges(self.diag)) def __getitem__(self, key): if (key >= len(self)): raise IndexError - return lib.clang_getDiagnosticRange(self.diag, key) + return conf.lib.clang_getDiagnosticRange(self.diag, key) return RangeIterator(self) @@ -312,11 +323,11 @@ class Diagnostic(object): self.diag = diag def __len__(self): - return int(lib.clang_getDiagnosticNumFixIts(self.diag)) + return int(conf.lib.clang_getDiagnosticNumFixIts(self.diag)) def __getitem__(self, key): range = SourceRange() - value = lib.clang_getDiagnosticFixIt(self.diag, key, + value = conf.lib.clang_getDiagnosticFixIt(self.diag, key, byref(range)) if len(value) == 0: raise IndexError @@ -328,25 +339,25 @@ class Diagnostic(object): @property def category_number(self): """The category number for this diagnostic.""" - return lib.clang_getDiagnosticCategory(self) + return conf.lib.clang_getDiagnosticCategory(self) @property def category_name(self): """The string name of the category for this diagnostic.""" - return lib.clang_getDiagnosticCategoryName(self.category_number) + return conf.lib.clang_getDiagnosticCategoryName(self.category_number) @property def option(self): """The command-line option that enables this diagnostic.""" - return lib.clang_getDiagnosticOption(self, None) + return conf.lib.clang_getDiagnosticOption(self, None) @property def disable_option(self): """The command-line option that disables this diagnostic.""" disable = _CXString() - lib.clang_getDiagnosticOption(self, byref(disable)) + conf.lib.clang_getDiagnosticOption(self, byref(disable)) - return lib.clang_getCString(disable) + return conf.lib.clang_getCString(disable) def __repr__(self): return "" % ( @@ -389,7 +400,7 @@ class TokenGroup(object): self._count = count def __del__(self): - lib.clang_disposeTokens(self._tu, self._memory, self._count) + conf.lib.clang_disposeTokens(self._tu, self._memory, self._count) @staticmethod def get_tokens(tu, extent): @@ -401,7 +412,7 @@ class TokenGroup(object): tokens_memory = POINTER(Token)() tokens_count = c_uint() - lib.clang_tokenize(tu, extent, byref(tokens_memory), + conf.lib.clang_tokenize(tu, extent, byref(tokens_memory), byref(tokens_count)) count = int(tokens_count.value) @@ -507,39 +518,39 @@ class CursorKind(object): def is_declaration(self): """Test if this is a declaration kind.""" - return lib.clang_isDeclaration(self) + return conf.lib.clang_isDeclaration(self) def is_reference(self): """Test if this is a reference kind.""" - return lib.clang_isReference(self) + return conf.lib.clang_isReference(self) def is_expression(self): """Test if this is an expression kind.""" - return lib.clang_isExpression(self) + return conf.lib.clang_isExpression(self) def is_statement(self): """Test if this is a statement kind.""" - return lib.clang_isStatement(self) + return conf.lib.clang_isStatement(self) def is_attribute(self): """Test if this is an attribute kind.""" - return lib.clang_isAttribute(self) + return conf.lib.clang_isAttribute(self) def is_invalid(self): """Test if this is an invalid kind.""" - return lib.clang_isInvalid(self) + return conf.lib.clang_isInvalid(self) def is_translation_unit(self): """Test if this is a translation unit kind.""" - return lib.clang_isTranslationUnit(self) + return conf.lib.clang_isTranslationUnit(self) def is_preprocessing(self): """Test if this is a preprocessing kind.""" - return lib.clang_isPreprocessing(self) + return conf.lib.clang_isPreprocessing(self) def is_unexposed(self): """Test if this is an unexposed kind.""" - return lib.clang_isUnexposed(self) + return conf.lib.clang_isUnexposed(self) def __repr__(self): return 'CursorKind.%s' % (self.name,) @@ -643,7 +654,7 @@ CursorKind.TEMPLATE_TYPE_PARAMETER = CursorKind(27) CursorKind.TEMPLATE_NON_TYPE_PARAMETER = CursorKind(28) # A C++ template template parameter. -CursorKind.TEMPLATE_TEMPLATE_PARAMTER = CursorKind(29) +CursorKind.TEMPLATE_TEMPLATE_PARAMETER = CursorKind(29) # A C++ function template. CursorKind.FUNCTION_TEMPLATE = CursorKind(30) @@ -1038,13 +1049,13 @@ class Cursor(Structure): def from_location(tu, location): # We store a reference to the TU in the instance so the TU won't get # collected before the cursor. - cursor = lib.clang_getCursor(tu, location) + cursor = conf.lib.clang_getCursor(tu, location) cursor._tu = tu return cursor def __eq__(self, other): - return lib.clang_equalCursors(self, other) + return conf.lib.clang_equalCursors(self, other) def __ne__(self, other): return not self.__eq__(other) @@ -1054,13 +1065,13 @@ class Cursor(Structure): Returns true if the declaration pointed at by the cursor is also a definition of that entity. """ - return lib.clang_isCursorDefinition(self) + return conf.lib.clang_isCursorDefinition(self) def is_static_method(self): """Returns True if the cursor refers to a C++ member function or member function template that is declared 'static'. """ - return lib.clang_CXXMethod_isStatic(self) + return conf.lib.clang_CXXMethod_isStatic(self) def get_definition(self): """ @@ -1070,7 +1081,7 @@ class Cursor(Structure): """ # TODO: Should probably check that this is either a reference or # declaration prior to issuing the lookup. - return lib.clang_getCursorDefinition(self) + return conf.lib.clang_getCursorDefinition(self) def get_usr(self): """Return the Unified Symbol Resultion (USR) for the entity referenced @@ -1081,7 +1092,7 @@ class Cursor(Structure): program. USRs can be compared across translation units to determine, e.g., when references in one translation refer to an entity defined in another translation unit.""" - return lib.clang_getCursorUSR(self) + return conf.lib.clang_getCursorUSR(self) @property def kind(self): @@ -1096,7 +1107,7 @@ class Cursor(Structure): # this, for consistency with clang_getCursorUSR. return None if not hasattr(self, '_spelling'): - self._spelling = lib.clang_getCursorSpelling(self) + self._spelling = conf.lib.clang_getCursorSpelling(self) return self._spelling @@ -1110,7 +1121,7 @@ class Cursor(Structure): class template specialization. """ if not hasattr(self, '_displayname'): - self._displayname = lib.clang_getCursorDisplayName(self) + self._displayname = conf.lib.clang_getCursorDisplayName(self) return self._displayname @@ -1121,7 +1132,7 @@ class Cursor(Structure): pointed at by the cursor. """ if not hasattr(self, '_loc'): - self._loc = lib.clang_getCursorLocation(self) + self._loc = conf.lib.clang_getCursorLocation(self) return self._loc @@ -1132,7 +1143,7 @@ class Cursor(Structure): pointed at by the cursor. """ if not hasattr(self, '_extent'): - self._extent = lib.clang_getCursorExtent(self) + self._extent = conf.lib.clang_getCursorExtent(self) return self._extent @@ -1142,7 +1153,7 @@ class Cursor(Structure): Retrieve the Type (if any) of the entity pointed at by the cursor. """ if not hasattr(self, '_type'): - self._type = lib.clang_getCursorType(self) + self._type = conf.lib.clang_getCursorType(self) return self._type @@ -1156,7 +1167,7 @@ class Cursor(Structure): declarations will be identical. """ if not hasattr(self, '_canonical'): - self._canonical = lib.clang_getCanonicalCursor(self) + self._canonical = conf.lib.clang_getCanonicalCursor(self) return self._canonical @@ -1164,7 +1175,7 @@ class Cursor(Structure): def result_type(self): """Retrieve the Type of the result for this Cursor.""" if not hasattr(self, '_result_type'): - self._result_type = lib.clang_getResultType(self.type) + self._result_type = conf.lib.clang_getResultType(self.type) return self._result_type @@ -1177,7 +1188,8 @@ class Cursor(Structure): """ if not hasattr(self, '_underlying_type'): assert self.kind.is_declaration() - self._underlying_type = lib.clang_getTypedefDeclUnderlyingType(self) + self._underlying_type = \ + conf.lib.clang_getTypedefDeclUnderlyingType(self) return self._underlying_type @@ -1190,7 +1202,7 @@ class Cursor(Structure): """ if not hasattr(self, '_enum_type'): assert self.kind == CursorKind.ENUM_DECL - self._enum_type = lib.clang_getEnumDeclIntegerType(self) + self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self) return self._enum_type @@ -1213,16 +1225,18 @@ class Cursor(Structure): TypeKind.ULONG, TypeKind.ULONGLONG, TypeKind.UINT128): - self._enum_value = lib.clang_getEnumConstantDeclUnsignedValue(self) + self._enum_value = \ + conf.lib.clang_getEnumConstantDeclUnsignedValue(self) else: - self._enum_value = lib.clang_getEnumConstantDeclValue(self) + self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self) return self._enum_value @property def objc_type_encoding(self): """Return the Objective-C type encoding as a str.""" if not hasattr(self, '_objc_type_encoding'): - self._objc_type_encoding = lib.clang_getDeclObjCTypeEncoding(self) + self._objc_type_encoding = \ + conf.lib.clang_getDeclObjCTypeEncoding(self) return self._objc_type_encoding @@ -1230,7 +1244,7 @@ class Cursor(Structure): def hash(self): """Returns a hash of the cursor as an int.""" if not hasattr(self, '_hash'): - self._hash = lib.clang_hashCursor(self) + self._hash = conf.lib.clang_hashCursor(self) return self._hash @@ -1238,7 +1252,7 @@ class Cursor(Structure): def semantic_parent(self): """Return the semantic parent for this cursor.""" if not hasattr(self, '_semantic_parent'): - self._semantic_parent = lib.clang_getCursorSemanticParent(self) + self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self) return self._semantic_parent @@ -1246,7 +1260,7 @@ class Cursor(Structure): def lexical_parent(self): """Return the lexical parent for this cursor.""" if not hasattr(self, '_lexical_parent'): - self._lexical_parent = lib.clang_getCursorLexicalParent(self) + self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self) return self._lexical_parent @@ -1257,6 +1271,12 @@ class Cursor(Structure): # created. return self._tu + def get_arguments(self): + """Return an iterator for accessing the arguments of this cursor.""" + num_args = conf.lib.clang_Cursor_getNumArguments(self) + for i in range(0, num_args): + yield conf.lib.clang_Cursor_getArgument(self, i) + def get_children(self): """Return an iterator for accessing the children of this cursor.""" @@ -1264,14 +1284,14 @@ class Cursor(Structure): def visitor(child, parent, children): # FIXME: Document this assertion in API. # FIXME: There should just be an isNull method. - assert child != lib.clang_getNullCursor() + assert child != conf.lib.clang_getNullCursor() # Create reference to TU so it isn't GC'd before Cursor. child._tu = self._tu children.append(child) return 1 # continue children = [] - lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor), + conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor), children) return iter(children) @@ -1287,7 +1307,7 @@ class Cursor(Structure): def from_result(res, fn, args): assert isinstance(res, Cursor) # FIXME: There should just be an isNull method. - if res == lib.clang_getNullCursor(): + if res == conf.lib.clang_getNullCursor(): return None # Store a reference to the TU in the Python object so it won't get GC'd @@ -1310,7 +1330,7 @@ class Cursor(Structure): @staticmethod def from_cursor_result(res, fn, args): assert isinstance(res, Cursor) - if res == lib.clang_getNullCursor(): + if res == conf.lib.clang_getNullCursor(): return None res._tu = args[0]._tu @@ -1352,7 +1372,7 @@ class TypeKind(object): @property def spelling(self): """Retrieve the spelling of this TypeKind.""" - return lib.clang_getTypeKindSpelling(self.value) + return conf.lib.clang_getTypeKindSpelling(self.value) @staticmethod def from_id(id): @@ -1432,7 +1452,7 @@ class Type(Structure): def __len__(self): if self.length is None: - self.length = lib.clang_getNumArgTypes(self.parent) + self.length = conf.lib.clang_getNumArgTypes(self.parent) return self.length @@ -1448,7 +1468,7 @@ class Type(Structure): raise IndexError("Index greater than container length: " "%d > %d" % ( key, len(self) )) - result = lib.clang_getArgType(self.parent, key) + result = conf.lib.clang_getArgType(self.parent, key) if result.kind == TypeKind.INVALID: raise IndexError("Argument could not be retrieved.") @@ -1464,7 +1484,7 @@ class Type(Structure): If accessed on a type that is not an array, complex, or vector type, an exception will be raised. """ - result = lib.clang_getElementType(self) + result = conf.lib.clang_getElementType(self) if result.kind == TypeKind.INVALID: raise Exception('Element type not available on this type.') @@ -1478,7 +1498,7 @@ class Type(Structure): If the Type is not an array or vector, this raises. """ - result = lib.clang_getNumElements(self) + result = conf.lib.clang_getNumElements(self) if result < 0: raise Exception('Type does not have elements.') @@ -1516,7 +1536,7 @@ class Type(Structure): example, if 'T' is a typedef for 'int', the canonical type for 'T' would be 'int'. """ - return lib.clang_getCanonicalType(self) + return conf.lib.clang_getCanonicalType(self) def is_const_qualified(self): """Determine whether a Type has the "const" qualifier set. @@ -1524,7 +1544,7 @@ class Type(Structure): This does not look through typedefs that may have added "const" at a different level. """ - return lib.clang_isConstQualifiedType(self) + return conf.lib.clang_isConstQualifiedType(self) def is_volatile_qualified(self): """Determine whether a Type has the "volatile" qualifier set. @@ -1532,7 +1552,7 @@ class Type(Structure): This does not look through typedefs that may have added "volatile" at a different level. """ - return lib.clang_isVolatileQualifiedType(self) + return conf.lib.clang_isVolatileQualifiedType(self) def is_restrict_qualified(self): """Determine whether a Type has the "restrict" qualifier set. @@ -1540,53 +1560,53 @@ class Type(Structure): This does not look through typedefs that may have added "restrict" at a different level. """ - return lib.clang_isRestrictQualifiedType(self) + return conf.lib.clang_isRestrictQualifiedType(self) def is_function_variadic(self): """Determine whether this function Type is a variadic function type.""" assert self.kind == TypeKind.FUNCTIONPROTO - return lib.clang_isFunctionTypeVariadic(self) + return conf.lib.clang_isFunctionTypeVariadic(self) def is_pod(self): """Determine whether this Type represents plain old data (POD).""" - return lib.clang_isPODType(self) + return conf.lib.clang_isPODType(self) def get_pointee(self): """ For pointer types, returns the type of the pointee. """ - return lib.clang_getPointeeType(self) + return conf.lib.clang_getPointeeType(self) def get_declaration(self): """ Return the cursor for the declaration of the given type. """ - return lib.clang_getTypeDeclaration(self) + return conf.lib.clang_getTypeDeclaration(self) def get_result(self): """ Retrieve the result type associated with a function type. """ - return lib.clang_getResultType(self) + return conf.lib.clang_getResultType(self) def get_array_element_type(self): """ Retrieve the type of the elements of the array type. """ - return lib.clang_getArrayElementType(self) + return conf.lib.clang_getArrayElementType(self) def get_array_size(self): """ Retrieve the size of the constant array. """ - return lib.clang_getArraySize(self) + return conf.lib.clang_getArraySize(self) def __eq__(self, other): if type(other) != type(self): return False - return lib.clang_equalTypes(self, other) + return conf.lib.clang_equalTypes(self, other) def __ne__(self, other): return not self.__eq__(other) @@ -1632,18 +1652,19 @@ class CompletionChunk: def __repr__(self): return "{'" + self.spelling + "', " + str(self.kind) + "}" - @property + @CachedProperty def spelling(self): - return lib.clang_getCompletionChunkText(self.cs, self.key).spelling + return conf.lib.clang_getCompletionChunkText(self.cs, self.key).spelling - @property + @CachedProperty def kind(self): - res = lib.clang_getCompletionChunkKind(self.cs, self.key) + res = conf.lib.clang_getCompletionChunkKind(self.cs, self.key) return completionChunkKindMap[res] - @property + @CachedProperty def string(self): - res = lib.clang_getCompletionChunkCompletionString(self.cs, self.key) + res = conf.lib.clang_getCompletionChunkCompletionString(self.cs, + self.key) if (res): return CompletionString(res) @@ -1700,31 +1721,43 @@ class CompletionString(ClangObject): return "" % self def __len__(self): - return lib.clang_getNumCompletionChunks(self.obj) + self.num_chunks + + @CachedProperty + def num_chunks(self): + return conf.lib.clang_getNumCompletionChunks(self.obj) def __getitem__(self, key): - if len(self) <= key: + if self.num_chunks <= key: raise IndexError return CompletionChunk(self.obj, key) @property def priority(self): - return lib.clang_getCompletionPriority(self.obj) + return conf.lib.clang_getCompletionPriority(self.obj) @property def availability(self): - res = lib.clang_getCompletionAvailability(self.obj) + res = conf.lib.clang_getCompletionAvailability(self.obj) return availabilityKinds[res] + @property + def briefComment(self): + if conf.function_exists("clang_getCompletionBriefComment"): + return conf.lib.clang_getCompletionBriefComment(self.obj) + return _CXString() + def __repr__(self): return " | ".join([str(a) for a in self]) \ + " || Priority: " + str(self.priority) \ - + " || Availability: " + str(self.availability) + + " || Availability: " + str(self.availability) \ + + " || Brief comment: " + str(self.briefComment.spelling) availabilityKinds = { 0: CompletionChunk.Kind("Available"), 1: CompletionChunk.Kind("Deprecated"), - 2: CompletionChunk.Kind("NotAvailable")} + 2: CompletionChunk.Kind("NotAvailable"), + 3: CompletionChunk.Kind("NotAccessible")} class CodeCompletionResult(Structure): _fields_ = [('cursorKind', c_int), ('completionString', c_object_p)] @@ -1762,7 +1795,7 @@ class CodeCompletionResults(ClangObject): return self._as_parameter_ def __del__(self): - CodeCompletionResults_dispose(self) + conf.lib.clang_disposeCodeCompleteResults(self) @property def results(self): @@ -1775,10 +1808,11 @@ class CodeCompletionResults(ClangObject): self.ccr= ccr def __len__(self): - return int(lib.clang_codeCompleteGetNumDiagnostics(self.ccr)) + return int(\ + conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr)) def __getitem__(self, key): - return lib.clang_codeCompleteGetDiagnostic(self.ccr, key) + return conf.lib.clang_codeCompleteGetDiagnostic(self.ccr, key) return DiagnosticsItr(self) @@ -1797,10 +1831,10 @@ class Index(ClangObject): Parameters: excludeDecls -- Exclude local declarations from translation units. """ - return Index(lib.clang_createIndex(excludeDecls, 0)) + return Index(conf.lib.clang_createIndex(excludeDecls, 0)) def __del__(self): - lib.clang_disposeIndex(self) + conf.lib.clang_disposeIndex(self) def read(self, path): """Load a TranslationUnit from the given AST file.""" @@ -1857,6 +1891,10 @@ class TranslationUnit(ClangObject): # searching for declarations/definitions. PARSE_SKIP_FUNCTION_BODIES = 64 + # Used to indicate that brief documentation comments should be included + # into the set of code completions returned from this translation unit. + PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION = 128 + @classmethod def from_source(cls, filename, args=None, unsaved_files=None, options=0, index=None): @@ -1923,7 +1961,7 @@ class TranslationUnit(ClangObject): unsaved_array[i].contents = contents unsaved_array[i].length = len(contents) - ptr = lib.clang_parseTranslationUnit(index, filename, args_array, + ptr = conf.lib.clang_parseTranslationUnit(index, filename, args_array, len(args), unsaved_array, len(unsaved_files), options) @@ -1948,7 +1986,7 @@ class TranslationUnit(ClangObject): if index is None: index = Index.create() - ptr = lib.clang_createTranslationUnit(index, filename) + ptr = conf.lib.clang_createTranslationUnit(index, filename) if ptr is None: raise TranslationUnitLoadError(filename) @@ -1965,17 +2003,17 @@ class TranslationUnit(ClangObject): ClangObject.__init__(self, ptr) def __del__(self): - lib.clang_disposeTranslationUnit(self) + conf.lib.clang_disposeTranslationUnit(self) @property def cursor(self): """Retrieve the cursor that represents the given translation unit.""" - return lib.clang_getTranslationUnitCursor(self) + return conf.lib.clang_getTranslationUnitCursor(self) @property def spelling(self): """Get the original translation unit source file name.""" - return lib.clang_getTranslationUnitSpelling(self) + return conf.lib.clang_getTranslationUnitSpelling(self) def get_includes(self): """ @@ -1992,7 +2030,7 @@ class TranslationUnit(ClangObject): # Automatically adapt CIndex/ctype pointers to python objects includes = [] - lib.clang_getInclusions(self, + conf.lib.clang_getInclusions(self, callbacks['translation_unit_includes'](visitor), includes) return iter(includes) @@ -2068,10 +2106,10 @@ class TranslationUnit(ClangObject): self.tu = tu def __len__(self): - return int(lib.clang_getNumDiagnostics(self.tu)) + return int(conf.lib.clang_getNumDiagnostics(self.tu)) def __getitem__(self, key): - diag = lib.clang_getDiagnostic(self.tu, key) + diag = conf.lib.clang_getDiagnostic(self.tu, key) if not diag: raise IndexError return Diagnostic(diag) @@ -2104,7 +2142,7 @@ class TranslationUnit(ClangObject): unsaved_files_array[i].name = name unsaved_files_array[i].contents = value unsaved_files_array[i].length = len(value) - ptr = lib.clang_reparseTranslationUnit(self, len(unsaved_files), + ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files), unsaved_files_array, options) def save(self, filename): @@ -2122,13 +2160,16 @@ class TranslationUnit(ClangObject): filename -- The path to save the translation unit to. """ - options = lib.clang_defaultSaveOptions(self) - result = int(lib.clang_saveTranslationUnit(self, filename, options)) + options = conf.lib.clang_defaultSaveOptions(self) + result = int(conf.lib.clang_saveTranslationUnit(self, filename, + options)) if result != 0: raise TranslationUnitSaveError(result, 'Error saving TranslationUnit.') - def codeComplete(self, path, line, column, unsaved_files=None, options=0): + def codeComplete(self, path, line, column, unsaved_files=None, + include_macros=False, include_code_patterns=False, + include_brief_comments=False): """ Code complete in this translation unit. @@ -2137,6 +2178,17 @@ class TranslationUnit(ClangObject): and the second should be the contents to be substituted for the file. The contents may be passed as strings or file objects. """ + options = 0 + + if include_macros: + options += 1 + + if include_code_patterns: + options += 2 + + if include_brief_comments: + options += 4 + if unsaved_files is None: unsaved_files = [] @@ -2154,7 +2206,7 @@ class TranslationUnit(ClangObject): unsaved_files_array[i].name = name unsaved_files_array[i].contents = value unsaved_files_array[i].length = len(value) - ptr = lib.clang_codeCompleteAt(self, path, line, column, + ptr = conf.lib.clang_codeCompleteAt(self, path, line, column, unsaved_files_array, len(unsaved_files), options) if ptr: return CodeCompletionResults(ptr) @@ -2182,17 +2234,17 @@ class File(ClangObject): @staticmethod def from_name(translation_unit, file_name): """Retrieve a file handle within the given translation unit.""" - return File(lib.clang_getFile(translation_unit, file_name)) + return File(conf.lib.clang_getFile(translation_unit, file_name)) @property def name(self): """Return the complete file and path name of the file.""" - return lib.clang_getCString(lib.clang_getFileName(self)) + return conf.lib.clang_getCString(conf.lib.clang_getFileName(self)) @property def time(self): """Return the last modification time of the file.""" - return lib.clang_getFileTime(self) + return conf.lib.clang_getFileTime(self) def __str__(self): return self.name @@ -2264,7 +2316,7 @@ class CompileCommand(object): @property def directory(self): """Get the working directory for this CompileCommand""" - return lib.clang_CompileCommand_getDirectory(self.cmd) + return conf.lib.clang_CompileCommand_getDirectory(self.cmd) @property def arguments(self): @@ -2274,9 +2326,9 @@ class CompileCommand(object): Invariant : the first argument is the compiler executable """ - length = lib.clang_CompileCommand_getNumArgs(self.cmd) + length = conf.lib.clang_CompileCommand_getNumArgs(self.cmd) for i in xrange(length): - yield lib.clang_CompileCommand_getArg(self.cmd, i) + yield conf.lib.clang_CompileCommand_getArg(self.cmd, i) class CompileCommands(object): """ @@ -2287,13 +2339,13 @@ class CompileCommands(object): self.ccmds = ccmds def __del__(self): - lib.clang_CompileCommands_dispose(self.ccmds) + conf.lib.clang_CompileCommands_dispose(self.ccmds) def __len__(self): - return int(lib.clang_CompileCommands_getSize(self.ccmds)) + return int(conf.lib.clang_CompileCommands_getSize(self.ccmds)) def __getitem__(self, i): - cc = lib.clang_CompileCommands_getCommand(self.ccmds, i) + cc = conf.lib.clang_CompileCommands_getCommand(self.ccmds, i) if not cc: raise IndexError return CompileCommand(cc, self) @@ -2313,7 +2365,7 @@ class CompilationDatabase(ClangObject): """ def __del__(self): - lib.clang_CompilationDatabase_dispose(self) + conf.lib.clang_CompilationDatabase_dispose(self) @staticmethod def from_result(res, fn, args): @@ -2327,7 +2379,7 @@ class CompilationDatabase(ClangObject): """Builds a CompilationDatabase from the database found in buildDir""" errorCode = c_uint() try: - cdb = lib.clang_CompilationDatabase_fromDirectory(buildDir, + cdb = conf.lib.clang_CompilationDatabase_fromDirectory(buildDir, byref(errorCode)) except CompilationDatabaseError as e: raise CompilationDatabaseError(int(errorCode.value), @@ -2339,7 +2391,8 @@ class CompilationDatabase(ClangObject): Get an iterable object providing all the CompileCommands available to build filename. Returns None if filename is not found in the database. """ - return lib.clang_CompilationDatabase_getCompileCommands(self, filename) + return conf.lib.clang_CompilationDatabase_getCompileCommands(self, + filename) class Token(Structure): """Represents a single token from the preprocessor. @@ -2361,29 +2414,29 @@ class Token(Structure): This is the textual representation of the token in source. """ - return lib.clang_getTokenSpelling(self._tu, self) + return conf.lib.clang_getTokenSpelling(self._tu, self) @property def kind(self): """Obtain the TokenKind of the current token.""" - return TokenKind.from_value(lib.clang_getTokenKind(self)) + return TokenKind.from_value(conf.lib.clang_getTokenKind(self)) @property def location(self): """The SourceLocation this Token occurs at.""" - return lib.clang_getTokenLocation(self._tu, self) + return conf.lib.clang_getTokenLocation(self._tu, self) @property def extent(self): """The SourceRange this Token occupies.""" - return lib.clang_getTokenExtent(self._tu, self) + return conf.lib.clang_getTokenExtent(self._tu, self) @property def cursor(self): """The Cursor this Token corresponds to.""" cursor = Cursor() - lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor)) + conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor)) return cursor @@ -2394,434 +2447,691 @@ callbacks['translation_unit_includes'] = CFUNCTYPE(None, c_object_p, POINTER(SourceLocation), c_uint, py_object) callbacks['cursor_visit'] = CFUNCTYPE(c_int, Cursor, Cursor, py_object) -def register_functions(lib): - """Register function prototypes with a libclang library instance. - - This must be called as part of library instantiation so Python knows how - to call out to the shared library. - """ - # Functions are registered in strictly alphabetical order. - lib.clang_annotateTokens.argtype = [TranslationUnit, POINTER(Token), - c_uint, POINTER(Cursor)] - - lib.clang_CompilationDatabase_dispose.argtypes = [c_object_p] - - lib.clang_CompilationDatabase_fromDirectory.argtypes = [c_char_p, - POINTER(c_uint)] - lib.clang_CompilationDatabase_fromDirectory.restype = c_object_p - lib.clang_CompilationDatabase_fromDirectory.errcheck = CompilationDatabase.from_result - - lib.clang_CompilationDatabase_getCompileCommands.argtypes = [c_object_p, c_char_p] - lib.clang_CompilationDatabase_getCompileCommands.restype = c_object_p - lib.clang_CompilationDatabase_getCompileCommands.errcheck = CompileCommands.from_result - - lib.clang_CompileCommands_dispose.argtypes = [c_object_p] - - lib.clang_CompileCommands_getCommand.argtypes = [c_object_p, c_uint] - lib.clang_CompileCommands_getCommand.restype = c_object_p - - lib.clang_CompileCommands_getSize.argtypes = [c_object_p] - lib.clang_CompileCommands_getSize.restype = c_uint - - lib.clang_CompileCommand_getArg.argtypes = [c_object_p, c_uint] - lib.clang_CompileCommand_getArg.restype = _CXString - lib.clang_CompileCommand_getArg.errcheck = _CXString.from_result - - lib.clang_CompileCommand_getDirectory.argtypes = [c_object_p] - lib.clang_CompileCommand_getDirectory.restype = _CXString - lib.clang_CompileCommand_getDirectory.errcheck = _CXString.from_result - - lib.clang_CompileCommand_getNumArgs.argtypes = [c_object_p] - lib.clang_CompileCommand_getNumArgs.restype = c_uint - - lib.clang_codeCompleteAt.argtypes = [TranslationUnit, c_char_p, c_int, - c_int, c_void_p, c_int, c_int] - lib.clang_codeCompleteAt.restype = POINTER(CCRStructure) - - lib.clang_codeCompleteGetDiagnostic.argtypes = [CodeCompletionResults, - c_int] - lib.clang_codeCompleteGetDiagnostic.restype = Diagnostic - - lib.clang_codeCompleteGetNumDiagnostics.argtypes = [CodeCompletionResults] - lib.clang_codeCompleteGetNumDiagnostics.restype = c_int - - lib.clang_createIndex.argtypes = [c_int, c_int] - lib.clang_createIndex.restype = c_object_p - - lib.clang_createTranslationUnit.argtypes = [Index, c_char_p] - lib.clang_createTranslationUnit.restype = c_object_p - - lib.clang_CXXMethod_isStatic.argtypes = [Cursor] - lib.clang_CXXMethod_isStatic.restype = bool - - lib.clang_CXXMethod_isVirtual.argtypes = [Cursor] - lib.clang_CXXMethod_isVirtual.restype = bool - - lib.clang_defaultSaveOptions.argtypes = [TranslationUnit] - lib.clang_defaultSaveOptions.restype = c_uint - - lib.clang_disposeCodeCompleteResults.argtypes = [CodeCompletionResults] - - #lib.clang_disposeCXTUResourceUsage.argtypes = [CXTUResourceUsage] - - lib.clang_disposeDiagnostic.argtypes = [Diagnostic] - - lib.clang_disposeIndex.argtypes = [Index] - - lib.clang_disposeString.argtypes = [_CXString] - - lib.clang_disposeTokens.argtype = [TranslationUnit, POINTER(Token), c_uint] - - lib.clang_disposeTranslationUnit.argtypes = [TranslationUnit] - - lib.clang_equalCursors.argtypes = [Cursor, Cursor] - lib.clang_equalCursors.restype = bool - - lib.clang_equalLocations.argtypes = [SourceLocation, SourceLocation] - lib.clang_equalLocations.restype = bool - - lib.clang_equalRanges.argtypes = [SourceRange, SourceRange] - lib.clang_equalRanges.restype = bool - - lib.clang_equalTypes.argtypes = [Type, Type] - lib.clang_equalTypes.restype = bool - - lib.clang_getArgType.argtypes = [Type, c_uint] - lib.clang_getArgType.restype = Type - lib.clang_getArgType.errcheck = Type.from_result - - lib.clang_getArrayElementType.argtypes = [Type] - lib.clang_getArrayElementType.restype = Type - lib.clang_getArrayElementType.errcheck = Type.from_result - - lib.clang_getArraySize.argtypes = [Type] - lib.clang_getArraySize.restype = c_longlong - - lib.clang_getCanonicalCursor.argtypes = [Cursor] - lib.clang_getCanonicalCursor.restype = Cursor - lib.clang_getCanonicalCursor.errcheck = Cursor.from_cursor_result +# Functions strictly alphabetical order. +functionList = [ + ("clang_annotateTokens", + [TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)]), - lib.clang_getCanonicalType.argtypes = [Type] - lib.clang_getCanonicalType.restype = Type - lib.clang_getCanonicalType.errcheck = Type.from_result + ("clang_CompilationDatabase_dispose", + [c_object_p]), - lib.clang_getCompletionAvailability.argtypes = [c_void_p] - lib.clang_getCompletionAvailability.restype = c_int + ("clang_CompilationDatabase_fromDirectory", + [c_char_p, POINTER(c_uint)], + c_object_p, + CompilationDatabase.from_result), - lib.clang_getCompletionChunkCompletionString.argtypes = [c_void_p, c_int] - lib.clang_getCompletionChunkCompletionString.restype = c_object_p + ("clang_CompilationDatabase_getCompileCommands", + [c_object_p, c_char_p], + c_object_p, + CompileCommands.from_result), - lib.clang_getCompletionChunkKind.argtypes = [c_void_p, c_int] - lib.clang_getCompletionChunkKind.restype = c_int + ("clang_CompileCommands_dispose", + [c_object_p]), - lib.clang_getCompletionChunkText.argtypes = [c_void_p, c_int] - lib.clang_getCompletionChunkText.restype = _CXString + ("clang_CompileCommands_getCommand", + [c_object_p, c_uint], + c_object_p), - lib.clang_getCompletionPriority.argtypes = [c_void_p] - lib.clang_getCompletionPriority.restype = c_int + ("clang_CompileCommands_getSize", + [c_object_p], + c_uint), - lib.clang_getCString.argtypes = [_CXString] - lib.clang_getCString.restype = c_char_p + ("clang_CompileCommand_getArg", + [c_object_p, c_uint], + _CXString, + _CXString.from_result), - lib.clang_getCursor.argtypes = [TranslationUnit, SourceLocation] - lib.clang_getCursor.restype = Cursor + ("clang_CompileCommand_getDirectory", + [c_object_p], + _CXString, + _CXString.from_result), - lib.clang_getCursorDefinition.argtypes = [Cursor] - lib.clang_getCursorDefinition.restype = Cursor - lib.clang_getCursorDefinition.errcheck = Cursor.from_result + ("clang_CompileCommand_getNumArgs", + [c_object_p], + c_uint), - lib.clang_getCursorDisplayName.argtypes = [Cursor] - lib.clang_getCursorDisplayName.restype = _CXString - lib.clang_getCursorDisplayName.errcheck = _CXString.from_result + ("clang_codeCompleteAt", + [TranslationUnit, c_char_p, c_int, c_int, c_void_p, c_int, c_int], + POINTER(CCRStructure)), - lib.clang_getCursorExtent.argtypes = [Cursor] - lib.clang_getCursorExtent.restype = SourceRange + ("clang_codeCompleteGetDiagnostic", + [CodeCompletionResults, c_int], + Diagnostic), - lib.clang_getCursorLexicalParent.argtypes = [Cursor] - lib.clang_getCursorLexicalParent.restype = Cursor - lib.clang_getCursorLexicalParent.errcheck = Cursor.from_cursor_result + ("clang_codeCompleteGetNumDiagnostics", + [CodeCompletionResults], + c_int), - lib.clang_getCursorLocation.argtypes = [Cursor] - lib.clang_getCursorLocation.restype = SourceLocation + ("clang_createIndex", + [c_int, c_int], + c_object_p), - lib.clang_getCursorReferenced.argtypes = [Cursor] - lib.clang_getCursorReferenced.restype = Cursor - lib.clang_getCursorReferenced.errcheck = Cursor.from_result + ("clang_createTranslationUnit", + [Index, c_char_p], + c_object_p), - lib.clang_getCursorReferenceNameRange.argtypes = [Cursor, c_uint, c_uint] - lib.clang_getCursorReferenceNameRange.restype = SourceRange + ("clang_CXXMethod_isStatic", + [Cursor], + bool), - lib.clang_getCursorSemanticParent.argtypes = [Cursor] - lib.clang_getCursorSemanticParent.restype = Cursor - lib.clang_getCursorSemanticParent.errcheck = Cursor.from_cursor_result + ("clang_CXXMethod_isVirtual", + [Cursor], + bool), - lib.clang_getCursorSpelling.argtypes = [Cursor] - lib.clang_getCursorSpelling.restype = _CXString - lib.clang_getCursorSpelling.errcheck = _CXString.from_result + ("clang_defaultSaveOptions", + [TranslationUnit], + c_uint), - lib.clang_getCursorType.argtypes = [Cursor] - lib.clang_getCursorType.restype = Type - lib.clang_getCursorType.errcheck = Type.from_result + ("clang_disposeCodeCompleteResults", + [CodeCompletionResults]), - lib.clang_getCursorUSR.argtypes = [Cursor] - lib.clang_getCursorUSR.restype = _CXString - lib.clang_getCursorUSR.errcheck = _CXString.from_result +# ("clang_disposeCXTUResourceUsage", +# [CXTUResourceUsage]), - #lib.clang_getCXTUResourceUsage.argtypes = [TranslationUnit] - #lib.clang_getCXTUResourceUsage.restype = CXTUResourceUsage + ("clang_disposeDiagnostic", + [Diagnostic]), - lib.clang_getCXXAccessSpecifier.argtypes = [Cursor] - lib.clang_getCXXAccessSpecifier.restype = c_uint + ("clang_disposeIndex", + [Index]), - lib.clang_getDeclObjCTypeEncoding.argtypes = [Cursor] - lib.clang_getDeclObjCTypeEncoding.restype = _CXString - lib.clang_getDeclObjCTypeEncoding.errcheck = _CXString.from_result + ("clang_disposeString", + [_CXString]), - lib.clang_getDiagnostic.argtypes = [c_object_p, c_uint] - lib.clang_getDiagnostic.restype = c_object_p + ("clang_disposeTokens", + [TranslationUnit, POINTER(Token), c_uint]), - lib.clang_getDiagnosticCategory.argtypes = [Diagnostic] - lib.clang_getDiagnosticCategory.restype = c_uint + ("clang_disposeTranslationUnit", + [TranslationUnit]), - lib.clang_getDiagnosticCategoryName.argtypes = [c_uint] - lib.clang_getDiagnosticCategoryName.restype = _CXString - lib.clang_getDiagnosticCategoryName.errcheck = _CXString.from_result + ("clang_equalCursors", + [Cursor, Cursor], + bool), - lib.clang_getDiagnosticFixIt.argtypes = [Diagnostic, c_uint, - POINTER(SourceRange)] - lib.clang_getDiagnosticFixIt.restype = _CXString - lib.clang_getDiagnosticFixIt.errcheck = _CXString.from_result + ("clang_equalLocations", + [SourceLocation, SourceLocation], + bool), + + ("clang_equalRanges", + [SourceRange, SourceRange], + bool), + + ("clang_equalTypes", + [Type, Type], + bool), + + ("clang_getArgType", + [Type, c_uint], + Type, + Type.from_result), + + ("clang_getArrayElementType", + [Type], + Type, + Type.from_result), + + ("clang_getArraySize", + [Type], + c_longlong), + + ("clang_getCanonicalCursor", + [Cursor], + Cursor, + Cursor.from_cursor_result), + + ("clang_getCanonicalType", + [Type], + Type, + Type.from_result), + + ("clang_getCompletionAvailability", + [c_void_p], + c_int), + + ("clang_getCompletionBriefComment", + [c_void_p], + _CXString), + + ("clang_getCompletionChunkCompletionString", + [c_void_p, c_int], + c_object_p), + + ("clang_getCompletionChunkKind", + [c_void_p, c_int], + c_int), + + ("clang_getCompletionChunkText", + [c_void_p, c_int], + _CXString), + + ("clang_getCompletionPriority", + [c_void_p], + c_int), + + ("clang_getCString", + [_CXString], + c_char_p), + + ("clang_getCursor", + [TranslationUnit, SourceLocation], + Cursor), + + ("clang_getCursorDefinition", + [Cursor], + Cursor, + Cursor.from_result), + + ("clang_getCursorDisplayName", + [Cursor], + _CXString, + _CXString.from_result), + + ("clang_getCursorExtent", + [Cursor], + SourceRange), + + ("clang_getCursorLexicalParent", + [Cursor], + Cursor, + Cursor.from_cursor_result), + + ("clang_getCursorLocation", + [Cursor], + SourceLocation), + + ("clang_getCursorReferenced", + [Cursor], + Cursor, + Cursor.from_result), + + ("clang_getCursorReferenceNameRange", + [Cursor, c_uint, c_uint], + SourceRange), + + ("clang_getCursorSemanticParent", + [Cursor], + Cursor, + Cursor.from_cursor_result), + + ("clang_getCursorSpelling", + [Cursor], + _CXString, + _CXString.from_result), + + ("clang_getCursorType", + [Cursor], + Type, + Type.from_result), + + ("clang_getCursorUSR", + [Cursor], + _CXString, + _CXString.from_result), + +# ("clang_getCXTUResourceUsage", +# [TranslationUnit], +# CXTUResourceUsage), + + ("clang_getCXXAccessSpecifier", + [Cursor], + c_uint), + + ("clang_getDeclObjCTypeEncoding", + [Cursor], + _CXString, + _CXString.from_result), + + ("clang_getDiagnostic", + [c_object_p, c_uint], + c_object_p), + + ("clang_getDiagnosticCategory", + [Diagnostic], + c_uint), + + ("clang_getDiagnosticCategoryName", + [c_uint], + _CXString, + _CXString.from_result), + + ("clang_getDiagnosticFixIt", + [Diagnostic, c_uint, POINTER(SourceRange)], + _CXString, + _CXString.from_result), + + ("clang_getDiagnosticLocation", + [Diagnostic], + SourceLocation), + + ("clang_getDiagnosticNumFixIts", + [Diagnostic], + c_uint), + + ("clang_getDiagnosticNumRanges", + [Diagnostic], + c_uint), + + ("clang_getDiagnosticOption", + [Diagnostic, POINTER(_CXString)], + _CXString, + _CXString.from_result), + + ("clang_getDiagnosticRange", + [Diagnostic, c_uint], + SourceRange), + + ("clang_getDiagnosticSeverity", + [Diagnostic], + c_int), + + ("clang_getDiagnosticSpelling", + [Diagnostic], + _CXString, + _CXString.from_result), + + ("clang_getElementType", + [Type], + Type, + Type.from_result), + + ("clang_getEnumConstantDeclUnsignedValue", + [Cursor], + c_ulonglong), + + ("clang_getEnumConstantDeclValue", + [Cursor], + c_longlong), + + ("clang_getEnumDeclIntegerType", + [Cursor], + Type, + Type.from_result), + + ("clang_getFile", + [TranslationUnit, c_char_p], + c_object_p), + + ("clang_getFileName", + [File], + _CXString), # TODO go through _CXString.from_result? + + ("clang_getFileTime", + [File], + c_uint), + + ("clang_getIBOutletCollectionType", + [Cursor], + Type, + Type.from_result), + + ("clang_getIncludedFile", + [Cursor], + File, + File.from_cursor_result), + + ("clang_getInclusions", + [TranslationUnit, callbacks['translation_unit_includes'], py_object]), + + ("clang_getInstantiationLocation", + [SourceLocation, POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint), + POINTER(c_uint)]), + + ("clang_getLocation", + [TranslationUnit, File, c_uint, c_uint], + SourceLocation), + + ("clang_getLocationForOffset", + [TranslationUnit, File, c_uint], + SourceLocation), + + ("clang_getNullCursor", + None, + Cursor), + + ("clang_getNumArgTypes", + [Type], + c_uint), + + ("clang_getNumCompletionChunks", + [c_void_p], + c_int), + + ("clang_getNumDiagnostics", + [c_object_p], + c_uint), + + ("clang_getNumElements", + [Type], + c_longlong), + + ("clang_getNumOverloadedDecls", + [Cursor], + c_uint), + + ("clang_getOverloadedDecl", + [Cursor, c_uint], + Cursor, + Cursor.from_cursor_result), + + ("clang_getPointeeType", + [Type], + Type, + Type.from_result), + + ("clang_getRange", + [SourceLocation, SourceLocation], + SourceRange), + + ("clang_getRangeEnd", + [SourceRange], + SourceLocation), + + ("clang_getRangeStart", + [SourceRange], + SourceLocation), + + ("clang_getResultType", + [Type], + Type, + Type.from_result), + + ("clang_getSpecializedCursorTemplate", + [Cursor], + Cursor, + Cursor.from_cursor_result), + + ("clang_getTemplateCursorKind", + [Cursor], + c_uint), + + ("clang_getTokenExtent", + [TranslationUnit, Token], + SourceRange), + + ("clang_getTokenKind", + [Token], + c_uint), + + ("clang_getTokenLocation", + [TranslationUnit, Token], + SourceLocation), + + ("clang_getTokenSpelling", + [TranslationUnit, Token], + _CXString, + _CXString.from_result), + + ("clang_getTranslationUnitCursor", + [TranslationUnit], + Cursor, + Cursor.from_result), + + ("clang_getTranslationUnitSpelling", + [TranslationUnit], + _CXString, + _CXString.from_result), + + ("clang_getTUResourceUsageName", + [c_uint], + c_char_p), + + ("clang_getTypeDeclaration", + [Type], + Cursor, + Cursor.from_result), + + ("clang_getTypedefDeclUnderlyingType", + [Cursor], + Type, + Type.from_result), + + ("clang_getTypeKindSpelling", + [c_uint], + _CXString, + _CXString.from_result), + + ("clang_hashCursor", + [Cursor], + c_uint), + + ("clang_isAttribute", + [CursorKind], + bool), + + ("clang_isConstQualifiedType", + [Type], + bool), + + ("clang_isCursorDefinition", + [Cursor], + bool), + + ("clang_isDeclaration", + [CursorKind], + bool), + + ("clang_isExpression", + [CursorKind], + bool), + + ("clang_isFileMultipleIncludeGuarded", + [TranslationUnit, File], + bool), + + ("clang_isFunctionTypeVariadic", + [Type], + bool), + + ("clang_isInvalid", + [CursorKind], + bool), + + ("clang_isPODType", + [Type], + bool), + + ("clang_isPreprocessing", + [CursorKind], + bool), + + ("clang_isReference", + [CursorKind], + bool), + + ("clang_isRestrictQualifiedType", + [Type], + bool), + + ("clang_isStatement", + [CursorKind], + bool), + + ("clang_isTranslationUnit", + [CursorKind], + bool), + + ("clang_isUnexposed", + [CursorKind], + bool), + + ("clang_isVirtualBase", + [Cursor], + bool), + + ("clang_isVolatileQualifiedType", + [Type], + bool), + + ("clang_parseTranslationUnit", + [Index, c_char_p, c_void_p, c_int, c_void_p, c_int, c_int], + c_object_p), + + ("clang_reparseTranslationUnit", + [TranslationUnit, c_int, c_void_p, c_int], + c_int), + + ("clang_saveTranslationUnit", + [TranslationUnit, c_char_p, c_uint], + c_int), + + ("clang_tokenize", + [TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)]), + + ("clang_visitChildren", + [Cursor, callbacks['cursor_visit'], py_object], + c_uint), + + ("clang_Cursor_getNumArguments", + [Cursor], + c_int), - lib.clang_getDiagnosticLocation.argtypes = [Diagnostic] - lib.clang_getDiagnosticLocation.restype = SourceLocation - - lib.clang_getDiagnosticNumFixIts.argtypes = [Diagnostic] - lib.clang_getDiagnosticNumFixIts.restype = c_uint - - lib.clang_getDiagnosticNumRanges.argtypes = [Diagnostic] - lib.clang_getDiagnosticNumRanges.restype = c_uint - - lib.clang_getDiagnosticOption.argtypes = [Diagnostic, POINTER(_CXString)] - lib.clang_getDiagnosticOption.restype = _CXString - lib.clang_getDiagnosticOption.errcheck = _CXString.from_result - - lib.clang_getDiagnosticRange.argtypes = [Diagnostic, c_uint] - lib.clang_getDiagnosticRange.restype = SourceRange - - lib.clang_getDiagnosticSeverity.argtypes = [Diagnostic] - lib.clang_getDiagnosticSeverity.restype = c_int - - lib.clang_getDiagnosticSpelling.argtypes = [Diagnostic] - lib.clang_getDiagnosticSpelling.restype = _CXString - lib.clang_getDiagnosticSpelling.errcheck = _CXString.from_result - - lib.clang_getElementType.argtypes = [Type] - lib.clang_getElementType.restype = Type - lib.clang_getElementType.errcheck = Type.from_result - - lib.clang_getEnumConstantDeclUnsignedValue.argtypes = [Cursor] - lib.clang_getEnumConstantDeclUnsignedValue.restype = c_ulonglong - - lib.clang_getEnumConstantDeclValue.argtypes = [Cursor] - lib.clang_getEnumConstantDeclValue.restype = c_longlong - - lib.clang_getEnumDeclIntegerType.argtypes = [Cursor] - lib.clang_getEnumDeclIntegerType.restype = Type - lib.clang_getEnumDeclIntegerType.errcheck = Type.from_result - - lib.clang_getFile.argtypes = [TranslationUnit, c_char_p] - lib.clang_getFile.restype = c_object_p - - lib.clang_getFileName.argtypes = [File] - lib.clang_getFileName.restype = _CXString - # TODO go through _CXString.from_result? - - lib.clang_getFileTime.argtypes = [File] - lib.clang_getFileTime.restype = c_uint - - lib.clang_getIBOutletCollectionType.argtypes = [Cursor] - lib.clang_getIBOutletCollectionType.restype = Type - lib.clang_getIBOutletCollectionType.errcheck = Type.from_result - - lib.clang_getIncludedFile.argtypes = [Cursor] - lib.clang_getIncludedFile.restype = File - lib.clang_getIncludedFile.errcheck = File.from_cursor_result - - lib.clang_getInclusions.argtypes = [TranslationUnit, - callbacks['translation_unit_includes'], py_object] - - lib.clang_getInstantiationLocation.argtypes = [SourceLocation, - POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint), POINTER(c_uint)] - - lib.clang_getLocation.argtypes = [TranslationUnit, File, c_uint, c_uint] - lib.clang_getLocation.restype = SourceLocation - - lib.clang_getLocationForOffset.argtypes = [TranslationUnit, File, c_uint] - lib.clang_getLocationForOffset.restype = SourceLocation - - lib.clang_getNullCursor.restype = Cursor - - lib.clang_getNumArgTypes.argtypes = [Type] - lib.clang_getNumArgTypes.restype = c_uint - - lib.clang_getNumCompletionChunks.argtypes = [c_void_p] - lib.clang_getNumCompletionChunks.restype = c_int - - lib.clang_getNumDiagnostics.argtypes = [c_object_p] - lib.clang_getNumDiagnostics.restype = c_uint - - lib.clang_getNumElements.argtypes = [Type] - lib.clang_getNumElements.restype = c_longlong - - lib.clang_getNumOverloadedDecls.argtypes = [Cursor] - lib.clang_getNumOverloadedDecls.restyp = c_uint - - lib.clang_getOverloadedDecl.argtypes = [Cursor, c_uint] - lib.clang_getOverloadedDecl.restype = Cursor - lib.clang_getOverloadedDecl.errcheck = Cursor.from_cursor_result - - lib.clang_getPointeeType.argtypes = [Type] - lib.clang_getPointeeType.restype = Type - lib.clang_getPointeeType.errcheck = Type.from_result - - lib.clang_getRange.argtypes = [SourceLocation, SourceLocation] - lib.clang_getRange.restype = SourceRange - - lib.clang_getRangeEnd.argtypes = [SourceRange] - lib.clang_getRangeEnd.restype = SourceLocation - - lib.clang_getRangeStart.argtypes = [SourceRange] - lib.clang_getRangeStart.restype = SourceLocation - - lib.clang_getResultType.argtypes = [Type] - lib.clang_getResultType.restype = Type - lib.clang_getResultType.errcheck = Type.from_result - - lib.clang_getSpecializedCursorTemplate.argtypes = [Cursor] - lib.clang_getSpecializedCursorTemplate.restype = Cursor - lib.clang_getSpecializedCursorTemplate.errcheck = Cursor.from_cursor_result - - lib.clang_getTemplateCursorKind.argtypes = [Cursor] - lib.clang_getTemplateCursorKind.restype = c_uint - - lib.clang_getTokenExtent.argtypes = [TranslationUnit, Token] - lib.clang_getTokenExtent.restype = SourceRange - - lib.clang_getTokenKind.argtypes = [Token] - lib.clang_getTokenKind.restype = c_uint - - lib.clang_getTokenLocation.argtype = [TranslationUnit, Token] - lib.clang_getTokenLocation.restype = SourceLocation - - lib.clang_getTokenSpelling.argtype = [TranslationUnit, Token] - lib.clang_getTokenSpelling.restype = _CXString - lib.clang_getTokenSpelling.errcheck = _CXString.from_result - - lib.clang_getTranslationUnitCursor.argtypes = [TranslationUnit] - lib.clang_getTranslationUnitCursor.restype = Cursor - lib.clang_getTranslationUnitCursor.errcheck = Cursor.from_result - - lib.clang_getTranslationUnitSpelling.argtypes = [TranslationUnit] - lib.clang_getTranslationUnitSpelling.restype = _CXString - lib.clang_getTranslationUnitSpelling.errcheck = _CXString.from_result - - lib.clang_getTUResourceUsageName.argtypes = [c_uint] - lib.clang_getTUResourceUsageName.restype = c_char_p - - lib.clang_getTypeDeclaration.argtypes = [Type] - lib.clang_getTypeDeclaration.restype = Cursor - lib.clang_getTypeDeclaration.errcheck = Cursor.from_result - - lib.clang_getTypedefDeclUnderlyingType.argtypes = [Cursor] - lib.clang_getTypedefDeclUnderlyingType.restype = Type - lib.clang_getTypedefDeclUnderlyingType.errcheck = Type.from_result - - lib.clang_getTypeKindSpelling.argtypes = [c_uint] - lib.clang_getTypeKindSpelling.restype = _CXString - lib.clang_getTypeKindSpelling.errcheck = _CXString.from_result - - lib.clang_hashCursor.argtypes = [Cursor] - lib.clang_hashCursor.restype = c_uint - - lib.clang_isAttribute.argtypes = [CursorKind] - lib.clang_isAttribute.restype = bool - - lib.clang_isConstQualifiedType.argtypes = [Type] - lib.clang_isConstQualifiedType.restype = bool + ("clang_Cursor_getArgument", + [Cursor, c_uint], + Cursor, + Cursor.from_result), +] - lib.clang_isCursorDefinition.argtypes = [Cursor] - lib.clang_isCursorDefinition.restype = bool +class LibclangError(Exception): + def __init__(self, message): + self.m = message - lib.clang_isDeclaration.argtypes = [CursorKind] - lib.clang_isDeclaration.restype = bool + def __str__(self): + return self.m + +def register_function(lib, item, ignore_errors): + # A function may not exist, if these bindings are used with an older or + # incompatible version of libclang.so. + try: + func = getattr(lib, item[0]) + except AttributeError as e: + msg = str(e) + ". Please ensure that your python bindings are "\ + "compatible with your libclang.so version." + if ignore_errors: + return + raise LibclangError(msg) - lib.clang_isExpression.argtypes = [CursorKind] - lib.clang_isExpression.restype = bool + if len(item) >= 2: + func.argtypes = item[1] - lib.clang_isFileMultipleIncludeGuarded.argtypes = [TranslationUnit, File] - lib.clang_isFileMultipleIncludeGuarded.restype = bool + if len(item) >= 3: + func.restype = item[2] - lib.clang_isFunctionTypeVariadic.argtypes = [Type] - lib.clang_isFunctionTypeVariadic.restype = bool + if len(item) == 4: + func.errcheck = item[3] - lib.clang_isInvalid.argtypes = [CursorKind] - lib.clang_isInvalid.restype = bool +def register_functions(lib, ignore_errors): + """Register function prototypes with a libclang library instance. - lib.clang_isPODType.argtypes = [Type] - lib.clang_isPODType.restype = bool + This must be called as part of library instantiation so Python knows how + to call out to the shared library. + """ - lib.clang_isPreprocessing.argtypes = [CursorKind] - lib.clang_isPreprocessing.restype = bool + def register(item): + return register_function(lib, item, ignore_errors) - lib.clang_isReference.argtypes = [CursorKind] - lib.clang_isReference.restype = bool + map(register, functionList) - lib.clang_isRestrictQualifiedType.argtypes = [Type] - lib.clang_isRestrictQualifiedType.restype = bool +class Config: + library_path = None + library_file = None + compatibility_check = True + loaded = False - lib.clang_isStatement.argtypes = [CursorKind] - lib.clang_isStatement.restype = bool + @staticmethod + def set_library_path(path): + """Set the path in which to search for libclang""" + if Config.loaded: + raise Exception("library path must be set before before using " \ + "any other functionalities in libclang.") - lib.clang_isTranslationUnit.argtypes = [CursorKind] - lib.clang_isTranslationUnit.restype = bool + Config.library_path = path - lib.clang_isUnexposed.argtypes = [CursorKind] - lib.clang_isUnexposed.restype = bool + @staticmethod + def set_library_file(file): + """Set the exact location of libclang from""" + if Config.loaded: + raise Exception("library file must be set before before using " \ + "any other functionalities in libclang.") - lib.clang_isVirtualBase.argtypes = [Cursor] - lib.clang_isVirtualBase.restype = bool + Config.library_file = path - lib.clang_isVolatileQualifiedType.argtypes = [Type] - lib.clang_isVolatileQualifiedType.restype = bool + @staticmethod + def set_compatibility_check(check_status): + """ Perform compatibility check when loading libclang + + The python bindings are only tested and evaluated with the version of + libclang they are provided with. To ensure correct behavior a (limited) + compatibility check is performed when loading the bindings. This check + will throw an exception, as soon as it fails. + + In case these bindings are used with an older version of libclang, parts + that have been stable between releases may still work. Users of the + python bindings can disable the compatibility check. This will cause + the python bindings to load, even though they are written for a newer + version of libclang. Failures now arise if unsupported or incompatible + features are accessed. The user is required to test himself if the + features he is using are available and compatible between different + libclang versions. + """ + if Config.loaded: + raise Exception("compatibility_check must be set before before " \ + "using any other functionalities in libclang.") + + Config.compatibility_check = check_status + + @CachedProperty + def lib(self): + lib = self.get_cindex_library() + register_functions(lib, not Config.compatibility_check) + Config.loaded = True + return lib + + def get_filename(self): + if Config.library_file: + return Config.library_file + + import platform + name = platform.system() + + if name == 'Darwin': + file = 'libclang.dylib' + elif name == 'Windows': + file = 'libclang.dll' + else: + file = 'libclang.so' - lib.clang_parseTranslationUnit.argypes = [Index, c_char_p, c_void_p, c_int, - c_void_p, c_int, c_int] - lib.clang_parseTranslationUnit.restype = c_object_p + if Config.library_path: + file = Config.library_path + '/' + file - lib.clang_reparseTranslationUnit.argtypes = [TranslationUnit, c_int, - c_void_p, c_int] - lib.clang_reparseTranslationUnit.restype = c_int + return file - lib.clang_saveTranslationUnit.argtypes = [TranslationUnit, c_char_p, - c_uint] - lib.clang_saveTranslationUnit.restype = c_int + def get_cindex_library(self): + try: + library = cdll.LoadLibrary(self.get_filename()) + except OSError as e: + msg = str(e) + ". To provide a path to libclang use " \ + "Config.set_library_path() or " \ + "Config.set_library_file()." + raise LibclangError(msg) - lib.clang_tokenize.argtypes = [TranslationUnit, SourceRange, - POINTER(POINTER(Token)), POINTER(c_uint)] + return library - lib.clang_visitChildren.argtypes = [Cursor, callbacks['cursor_visit'], - py_object] - lib.clang_visitChildren.restype = c_uint + def function_exists(self, name): + try: + getattr(self.lib, name) + except AttributeError: + return False -register_functions(lib) + return True def register_enumerations(): for name, value in clang.enumerations.TokenKinds: TokenKind.register(value, name) +conf = Config() register_enumerations() __all__ = [ + 'Config', 'CodeCompletionResults', 'CompilationDatabase', 'CompileCommands', diff --git a/bindings/python/tests/cindex/test_code_completion.py b/bindings/python/tests/cindex/test_code_completion.py new file mode 100644 index 0000000..357d50d --- /dev/null +++ b/bindings/python/tests/cindex/test_code_completion.py @@ -0,0 +1,75 @@ +from clang.cindex import TranslationUnit + +def check_completion_results(cr, expected): + assert cr is not None + assert len(cr.diagnostics) == 0 + + completions = [str(c) for c in cr.results] + + for c in expected: + assert c in completions + +def test_code_complete(): + files = [('fake.c', """ +/// Aaa. +int test1; + +/// Bbb. +void test2(void); + +void f() { + +} +""")] + + tu = TranslationUnit.from_source('fake.c', ['-std=c99'], unsaved_files=files, + options=TranslationUnit.PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION) + + cr = tu.codeComplete('fake.c', 9, 1, unsaved_files=files, include_brief_comments=True) + + expected = [ + "{'int', ResultType} | {'test1', TypedText} || Priority: 50 || Availability: Available || Brief comment: Aaa.", + "{'void', ResultType} | {'test2', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 50 || Availability: Available || Brief comment: Bbb.", + "{'return', TypedText} || Priority: 40 || Availability: Available || Brief comment: None" + ] + check_completion_results(cr, expected) + +def test_code_complete_availability(): + files = [('fake.cpp', """ +class P { +protected: + int member; +}; + +class Q : public P { +public: + using P::member; +}; + +void f(P x, Q y) { + x.; // member is inaccessible + y.; // member is accessible +} +""")] + + tu = TranslationUnit.from_source('fake.cpp', ['-std=c++98'], unsaved_files=files) + + cr = tu.codeComplete('fake.cpp', 12, 5, unsaved_files=files) + + expected = [ + "{'const', TypedText} || Priority: 40 || Availability: Available || Brief comment: None", + "{'volatile', TypedText} || Priority: 40 || Availability: Available || Brief comment: None", + "{'operator', TypedText} || Priority: 40 || Availability: Available || Brief comment: None", + "{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None", + "{'Q', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None" + ] + check_completion_results(cr, expected) + + cr = tu.codeComplete('fake.cpp', 13, 5, unsaved_files=files) + expected = [ + "{'P', TypedText} | {'::', Text} || Priority: 75 || Availability: Available || Brief comment: None", + "{'P &', ResultType} | {'operator=', TypedText} | {'(', LeftParen} | {'const P &', Placeholder} | {')', RightParen} || Priority: 34 || Availability: Available || Brief comment: None", + "{'int', ResultType} | {'member', TypedText} || Priority: 35 || Availability: NotAccessible || Brief comment: None", + "{'void', ResultType} | {'~P', TypedText} | {'(', LeftParen} | {')', RightParen} || Priority: 34 || Availability: Available || Brief comment: None" + ] + check_completion_results(cr, expected) diff --git a/bindings/python/tests/cindex/test_cursor.py b/bindings/python/tests/cindex/test_cursor.py index 51695e2..edb209b 100644 --- a/bindings/python/tests/cindex/test_cursor.py +++ b/bindings/python/tests/cindex/test_cursor.py @@ -241,3 +241,12 @@ def test_get_tokens(): assert len(tokens) == 7 assert tokens[0].spelling == 'int' assert tokens[1].spelling == 'foo' + +def test_get_arguments(): + tu = get_tu('void foo(int i, int j);') + foo = get_cursor(tu, 'foo') + arguments = list(foo.get_arguments()) + + assert len(arguments) == 2 + assert arguments[0].spelling == "i" + assert arguments[1].spelling == "j" diff --git a/bindings/xml/comment-xml-schema.rng b/bindings/xml/comment-xml-schema.rng index a0329f8..d98f405 100644 --- a/bindings/xml/comment-xml-schema.rng +++ b/bindings/xml/comment-xml-schema.rng @@ -25,6 +25,9 @@ + + + @@ -71,6 +74,9 @@ + + + @@ -79,6 +85,15 @@ + + + + + + + + + @@ -106,6 +121,9 @@ + + + @@ -135,6 +153,9 @@ + + + @@ -165,6 +186,9 @@ + + + @@ -195,6 +219,9 @@ + + + @@ -225,6 +252,9 @@ + + + @@ -292,11 +322,18 @@ + + + + + + + - + - + @@ -369,6 +406,59 @@ + + + + + + + + + \d+|\d+\.\d+|\d+\.\d+.\d+ + + + + + + + \d+|\d+\.\d+|\d+\.\d+.\d+ + + + + + + + \d+|\d+\.\d+|\d+\.\d+.\d+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/AddressSanitizer.html b/docs/AddressSanitizer.html index 98ea934..397eafc 100644 --- a/docs/AddressSanitizer.html +++ b/docs/AddressSanitizer.html @@ -45,17 +45,21 @@ The tool can detect the following types of bugs: Typical slowdown introduced by AddressSanitizer is 2x.

    How to build

    -Follow the
    clang build instructions.
    -Note: CMake build does not work yet. -See bug 12272. +Follow the clang build instructions. +CMake build is supported.

    Usage

    -Simply compile and link your program with -faddress-sanitizer flag.
    +Simply compile and link your program with -fsanitize=address flag.
    +The AddressSanitizer run-time library should be linked to the final executable, +so make sure to use clang (not ld) for the final link step.
    +When linking shared libraries, the AddressSanitizer run-time is not linked, +so -Wl,-z,defs may cause link errors (don't use it with AddressSanitizer).
    + To get a reasonable performance add -O1 or higher.
    To get nicer stack traces in error messages add -fno-omit-frame-pointer.
    To get perfect stack traces you may need to disable inlining (just use -O1) and tail call -elimination (
    -fno-optimize-sibling-calls). +elimination (-fno-optimize-sibling-calls).
     % cat example_UseAfterFree.cc
    @@ -67,7 +71,15 @@ int main(int argc, char **argv) {
     
    -% clang -O1 -g -faddress-sanitizer -fno-omit-frame-pointer example_UseAfterFree.cc
    +# Compile and link
    +% clang -O1 -g -fsanitize=address -fno-omit-frame-pointer example_UseAfterFree.cc
    +
    +OR +
    +# Compile
    +% clang -O1 -g -fsanitize=address -fno-omit-frame-pointer -c example_UseAfterFree.cc
    +# Link
    +% clang -g -fsanitize=address example_UseAfterFree.o
     
    If a bug is detected, the program will print an error message to stderr and exit with a @@ -93,6 +105,13 @@ previously allocated by thread T0 here: ==9442== ABORTING
    +AddressSanitizer exits on the first detected error. This is by design. +One reason: it makes the generated code smaller and faster (both by ~5%). +Another reason: this makes fixing bugs unavoidable. With Valgrind, it is often +the case that users treat Valgrind warnings as false positives +(which they are not) and don't fix them. + +

    __has_feature(address_sanitizer)

    In some cases one may need to execute different code depending on whether AddressSanitizer is enabled. @@ -107,8 +126,8 @@ can be used for this purpose.

    __attribute__((no_address_safety_analysis))

    -Some code should not be instrumentated by AddressSanitizer. -One may use the function attribute +Some code should not be instrumented by AddressSanitizer. +One may use the function attribute no_address_safety_analysis to disable instrumentation of a particular function. @@ -118,18 +137,18 @@ Note: currently, this attribute will be lost if the function is inlined.

    Supported Platforms

    AddressSanitizer is supported on -
    • Linux x86_64 (tested on Ubuntu 10.04). -
    • MacOS 10.6 and 10.7 (i386/x86_64). +
      • Linux i386/x86_64 (tested on Ubuntu 10.04 and 12.04). +
      • MacOS 10.6, 10.7 and 10.8 (i386/x86_64).
      -Support for Linux i386/ARM is in progress +Support for Linux ARM (and Android ARM) is in progress (it may work, but is not guaranteed too).

      Limitations

      • AddressSanitizer uses more real memory than a native run. -How much -- depends on the allocations sizes. The smaller the -allocations you make the bigger the overhead. +Exact overhead depends on the allocations sizes. The smaller the +allocations you make the bigger the overhead is.
      • AddressSanitizer uses more stack memory. We have seen up to 3x increase.
      • On 64-bit platforms AddressSanitizer maps (but not reserves) 16+ Terabytes of virtual address space. @@ -140,8 +159,8 @@ This means that tools like ulimit may not work as usually expected.

        Current Status

        AddressSanitizer is fully functional on supported platforms starting from LLVM 3.1. -However, the test suite is not fully integrated yet and we lack the testing -process (buildbots). +The test suite is integrated into CMake build and can be run with +make check-asan command.

        More Information

        http://code.google.com/p/address-sanitizer. diff --git a/docs/AutomaticReferenceCounting.html b/docs/AutomaticReferenceCounting.html index 3f1ccaf..5354f8a 100644 --- a/docs/AutomaticReferenceCounting.html +++ b/docs/AutomaticReferenceCounting.html @@ -888,6 +888,15 @@ from non-ARC practice was acceptable because we had conservatively banned the synthesis in order to give ourselves exactly this leeway.

    +

    Applying __attribute__((NSObject)) to a property not of +retainable object pointer type has the same behavior it does outside +of ARC: it requires the property type to be some sort of pointer and +permits the use of modifiers other than assign. These +modifiers only affect the synthesized getter and setter; direct +accesses to the ivar (even if synthesized) still have primitive +semantics, and the value in the ivar will not be automatically +released during deallocation.

    +
    @@ -1602,6 +1611,36 @@ implementation must be very careful to do all the other work that NSObject's dealloc would, which is outside the scope of this document to describe.

    +

    The instance variables for an ARC-compiled class will be destroyed +at some point after control enters the dealloc method for the +root class of the class. The ordering of the destruction of instance +variables is unspecified, both within a single class and between +subclasses and superclasses.

    + +

    Rationale: the traditional, non-ARC pattern +for destroying instance variables is to destroy them immediately +before calling [super dealloc]. Unfortunately, message +sends from the superclass are quite capable of reaching methods in the +subclass, and those methods may well read or write to those instance +variables. Making such message sends from dealloc is generally +discouraged, since the subclass may well rely on other invariants that +were broken during dealloc, but it's not so inescapably +dangerous that we felt comfortable calling it undefined behavior. +Therefore we chose to delay destroying the instance variables to a +point at which message sends are clearly disallowed: the point at +which the root class's deallocation routines take over.

    + +

    In most code, the difference is not observable. It can, however, +be observed if an instance variable holds a strong reference to an +object whose deallocation will trigger a side-effect which must be +carefully ordered with respect to the destruction of the super class. +Such code violates the design principle that semantically important +behavior should be explicit. A simple fix is to clear the instance +variable manually during dealloc; a more holistic solution is +to move semantically important side-effects out of +dealloc and into a separate teardown phase which can rely on +working with well-formed objects.

    + @@ -1865,9 +1904,9 @@ and cf_unknown_transfer.

    A pragma is provided to facilitate the mass annotation of interfaces:

    -
    #pragma arc_cf_code_audited begin
    +
    #pragma clang arc_cf_code_audited begin
     ...
    -#pragma arc_cf_code_audited end
    +#pragma clang arc_cf_code_audited end

    All C functions declared within the extent of this pragma are treated as if annotated with the cf_audited_transfer diff --git a/docs/BlockLanguageSpec.txt b/docs/BlockLanguageSpec.txt index f7bbda3..4cdf75a 100644 --- a/docs/BlockLanguageSpec.txt +++ b/docs/BlockLanguageSpec.txt @@ -81,6 +81,10 @@ The compound statement body establishes a new lexical scope within that of its p Local automatic (stack) variables referenced within the compound statement of a Block are imported and captured by the Block as const copies. The capture (binding) is performed at the time of the Block literal expression evaluation. +The compiler is not required to capture a variable if it can prove that no references to the variable will actually be evaluated. Programmers can force a variable to be captured by referencing it in a statement at the beginning of the Block, like so: + (void) foo; +This matters when capturing the variable has side-effects, as it can in Objective-C or C++. + The lifetime of variables declared in a Block is that of a function; each activation frame contains a new copy of variables declared within the local scope of the Block. Such variable declarations should be allowed anywhere [testme] rather than only when C99 parsing is requested, including for statements. [testme] Block literal expressions may occur within Block literal expressions (nest) and all variables captured by any nested blocks are implicitly also captured in the scopes of their enclosing Blocks. @@ -143,23 +147,25 @@ C++ Extensions Block literal expressions within functions are extended to allow const use of C++ objects, pointers, or references held in automatic storage. -For example, given class Foo with member function fighter(void): +As usual, within the block, references to captured variables become const-qualified, as if they were references to members of a const object. Note that this does not change the type of a variable of reference type. + +For example, given a class Foo: Foo foo; Foo &fooRef = foo; Foo *fooPtr = &foo; -...a Block that used foo would import the variables as const variations: - const Foo block_foo = foo; // const copy constructor - const Foo &block_fooRef = fooRef; - Foo *const block_fooPtr = fooPtr; +A Block that referenced these variables would import the variables as const variations: + const Foo block_foo = foo; + Foo &block_fooRef = fooRef; + Foo *const block_fooPtr = fooPtr; -Stack-local objects are copied into a Block via a copy const constructor. If no such constructor exists, it is considered an error to reference such objects from within the Block compound statements. A destructor is run as control leaves the compound statement that contains the Block literal expression. +Captured variables are copied into the Block at the instant of evaluating the Block literal expression. They are also copied when calling Block_copy() on a Block allocated on the stack. In both cases, they are copied as if the variable were const-qualified, and it's an error if there's no such constructor. -If a Block originates on the stack, a const copy constructor of the stack-based Block const copy is performed when a Block_copy operation is called; when the last Block_release (or subsequently GC) occurs, a destructor is run on the heap copy. +Captured variables in Blocks on the stack are destroyed when control leaves the compound statement that contains the Block literal expression. Captured variables in Blocks on the heap are destroyed when the reference count of the Block drops to zero. -Variables declared as residing in __block storage may be initially allocated in the heap or may first appear on the stack and be copied to the heap as a result of a Block_copy() operation. When copied from the stack, a normal copy constructor is used to initialize the heap-based version from the original stack version. The destructor for a const copied object is run at the normal end of scope. The destructor for any initial stack based version is also called at normal end of scope. +Variables declared as residing in __block storage may be initially allocated in the heap or may first appear on the stack and be copied to the heap as a result of a Block_copy() operation. When copied from the stack, __block variables are copied using their normal qualification (i.e. without adding const). In C++11, __block variables are copied as x-values if that is possible, then as l-values if not; if both fail, it's an error. The destructor for any initial stack-based version is called at the variable's normal end of scope. -Within a member function, access to member functions and variables is done via an implicit const copy of a this pointer. +References to 'this', as well as references to non-static members of any enclosing class, are evaluated by capturing 'this' just like a normal variable of C pointer type. Member variables that are Blocks may not be overloaded by the types of their arguments. diff --git a/docs/ClangTools.html b/docs/ClangTools.html index 0dfdc6a..4de57bd 100644 --- a/docs/ClangTools.html +++ b/docs/ClangTools.html @@ -87,22 +87,14 @@ specific functionality.

    clang-check

    This tool combines the LibTooling framework for running a Clang tool with the -basic Clang diagnostics by syntax checking specific files in a fast, command line -interface. It can also accept flags to re-display the diagnostics in different -formats with different flags, suitable for use driving an IDE or editor.

    +basic Clang diagnostics by syntax checking specific files in a fast, command +line interface. It can also accept flags to re-display the diagnostics in +different formats with different flags, suitable for use driving an IDE or +editor. Furthermore, it can be used in fixit-mode to directly apply fixit-hints +offered by clang.

    FIXME: Link to user-oriented clang-check documentation.

    -

    clang-fixit (Not yet implemented!)

    -

    A tool which specifically applies the Clang fix-it hint diagnostic technology -on top of a dedicated tool. It is designed to explore alternative interfaces for -applying fix-it hints, including automatic application, prompting users with -options for different fixes, etc.

    - -

    NB: The clang-fixit tool is planned, but not yet implemented.

    - -

    FIXME: Link to user-oriented clang-fixit documentation.

    -

    Extra Clang Tools

    diff --git a/docs/HowToSetupToolingForLLVM.html b/docs/HowToSetupToolingForLLVM.html index 493c882..022ed9c 100644 --- a/docs/HowToSetupToolingForLLVM.html +++ b/docs/HowToSetupToolingForLLVM.html @@ -77,12 +77,38 @@ $PATH. Try to run it on any .cpp file inside the LLVM source tree:

    If you're using vim, it's convenient to have clang-check integrated. Put this into your .vimrc:

    -  set makeprg=clang-check\ %
    -  map <F5> :make<CR><CR>
    +function! ClangCheckImpl(cmd)
    +  if &autowrite | wall | endif
    +  echo "Running " . a:cmd . " ..."
    +  let l:output = system(a:cmd)
    +  cexpr l:output
    +  cwindow
    +  let w:quickfix_title = a:cmd
    +  if v:shell_error != 0
    +    cc
    +  endif
    +  let g:clang_check_last_cmd = a:cmd
    +endfunction
    +
    +function! ClangCheck()
    +  let l:filename = expand('%')
    +  if l:filename =~ '\.\(cpp\|cxx\|cc\|c\)$'
    +    call ClangCheckImpl("clang-check " . l:filename)
    +  elseif exists("g:clang_check_last_cmd")
    +    call ClangCheckImpl(g:clang_check_last_cmd)
    +  else
    +    echo "Can't detect file's compilation arguments and no previous clang-check invocation!"
    +  endif
    +endfunction
    +
    +nmap <silent> <F5> :call ClangCheck()<CR><CR>
     
    -

    When editing C++ code, hit F5 to reparse the current buffer. The output will -go into the error window, which you can enable with :cope.

    +

    When editing a .cpp/.cxx/.cc/.c file, hit F5 to reparse the file. In case +the current file has a different extension (for example, .h), F5 will re-run +the last clang-check invocation made from this vim instance (if any). The +output will go into the error window, which is opened automatically when +clang-check finds errors, and can be re-opened with :cope.

    Other clang-check options that can be useful when working with clang AST:

    diff --git a/docs/InternalsManual.html b/docs/InternalsManual.html index 3f3e124..57f0631 100644 --- a/docs/InternalsManual.html +++ b/docs/InternalsManual.html @@ -502,7 +502,9 @@ code, the source ranges, and the caret. However, this behavior isn't required. Instead of formatting and printing out the diagnostics, this implementation just captures and remembers the diagnostics as they fly by. Then -verify compares the list of produced diagnostics to the list of expected ones. If they disagree, -it prints out its own output. +it prints out its own output. Full documentation for the -verify mode can be +found in the Clang API documentation for VerifyDiagnosticConsumer, here.

    There are many other possible implementations of this interface, and this is diff --git a/docs/LanguageExtensions.html b/docs/LanguageExtensions.html index 40477b8..8c0e5b7 100644 --- a/docs/LanguageExtensions.html +++ b/docs/LanguageExtensions.html @@ -1007,6 +1007,7 @@ struct is_convertible_to {

  • __is_convertible_to (Microsoft)
  • __is_empty (GNU, Microsoft)
  • __is_enum (GNU, Microsoft)
  • +
  • __is_interface_class (Microsoft)
  • __is_pod (GNU, Microsoft)
  • __is_polymorphic (GNU, Microsoft)
  • __is_union (GNU, Microsoft)
  • @@ -1582,7 +1583,8 @@ path between it and the next switch label.

     // compile with -Wimplicit-fallthrough
     switch (n) {
    -case 33:
    +case 22:
    +case 33:  // no warning: no statements between case labels
       f();
     case 44:  // warning: unannotated fall-through
       g();
    @@ -1981,8 +1983,8 @@ int fcntl(int fd, int cmd, ...)
     
     

    Use __attribute__((pointer_with_type_tag(ptr_kind, ptr_idx, type_tag_idx))) on a function declaration to specify that the -function a type tag that determines the pointee type of some other pointer -argument.

    +function accepts a type tag that determines the pointee type of some other +pointer argument.

    For example:

    diff --git a/docs/LibASTMatchers.html b/docs/LibASTMatchers.html new file mode 100644 index 0000000..8142c19 --- /dev/null +++ b/docs/LibASTMatchers.html @@ -0,0 +1,130 @@ + + + +Matching the Clang AST + + + + + + + +
    + +

    Matching the Clang AST

    +

    This document explains how to use Clang's LibASTMatchers to match interesting +nodes of the AST and execute code that uses the matched nodes. Combined with +LibTooling, LibASTMatchers helps to write +code-to-code transformation tools or query tools.

    + +

    We assume basic knowledge about the Clang AST. See the +Introduction to the Clang AST if +you want to learn more about how the AST is structured.

    + + + + +

    Introduction

    + + +

    LibASTMatchers provides a domain specific language to create predicates on Clang's +AST. This DSL is written in and can be used from C++, allowing users to write +a single program to both match AST nodes and access the node's C++ interface +to extract attributes, source locations, or any other information provided on +the AST level.

    + +

    AST matchers are predicates on nodes in the AST. Matchers are created +by calling creator functions that allow building up a tree of matchers, where +inner matchers are used to make the match more specific.

    + +

    For example, to create a matcher that matches all class or union declarations +in the AST of a translation unit, you can call +recordDecl(). +To narrow the match down, for example to find all class or union declarations with the name "Foo", +insert a hasName +matcher: the call recordDecl(hasName("Foo")) returns a matcher that matches classes +or unions that are named "Foo", in any namespace. By default, matchers that accept +multiple inner matchers use an implicit allOf(). +This allows further narrowing down the match, for example to match all classes +that are derived from "Bar": recordDecl(hasName("Foo"), isDerivedFrom("Bar")).

    + + +

    How to create a matcher

    + + +

    With more than a thousand classes in the Clang AST, one can quickly get lost +when trying to figure out how to create a matcher for a specific pattern. This +section will teach you how to use a rigorous step-by-step pattern to build the +matcher you are interested in. Note that there will always be matchers missing +for some part of the AST. See the section about how to write +your own AST matchers later in this document.

    + +

    The precondition to using the matchers is to understand how the AST +for what you want to match looks like. The Introduction to the Clang AST +teaches you how to dump a translation unit's AST into a human readable format.

    + + + + +

    In general, the strategy to create the right matchers is:

    +
      +
    1. Find the outermost class in Clang's AST you want to match.
    2. +
    3. Look at the AST Matcher Reference for matchers that either match the +node you're interested in or narrow down attributes on the node.
    4. +
    5. Create your outer match expression. Verify that it works as expected.
    6. +
    7. Examine the matchers for what the next inner node you want to match is.
    8. +
    9. Repeat until the matcher is finished.
    10. +
    + + +

    Binding nodes in match expressions

    + + +

    Matcher expressions allow you to specify which parts of the AST are interesting +for a certain task. Often you will want to then do something with the nodes +that were matched, like building source code transformations.

    + +

    To that end, matchers that match specific AST nodes (so called node matchers) +are bindable; for example, recordDecl(hasName("MyClass")).bind("id") will bind +the matched recordDecl node to the string "id", to be later retrieved in the +match callback.

    + + + + + +

    Writing your own matchers

    + + +

    There are multiple different ways to define a matcher, depending on its +type and flexibility.

    +
      +
    • VariadicDynCastAllOfMatcher<Base, Derived>

      Those match all nodes +of type Base if they can be dynamically casted to Derived. The +names of those matchers are nouns, which closely resemble Derived. +VariadicDynCastAllOfMatchers are the backbone of the matcher hierarchy. Most +often, your match expression will start with one of them, and you can +bind the node they represent to ids for later processing.

      +

      VariadicDynCastAllOfMatchers are callable classes that model variadic +template functions in C++03. They take an aribtrary number of Matcher<Derived> +and return a Matcher<Base>.

    • +
    • AST_MATCHER_P(Type, Name, ParamType, Param)

      Most matcher definitions +use the matcher creation macros. Those define both the matcher of type Matcher<Type> +itself, and a matcher-creation function named Name that takes a parameter +of type ParamType and returns the corresponding matcher.

      +

      There are multiple matcher definition macros that deal with polymorphic return +values and different parameter counts. See ASTMatchersMacros.h. +

    • +
    • Matcher creation functions

      Matchers are generated by nesting +calls to matcher creation functions. Most of the time those functions are either +created by using VariadicDynCastAllOfMatcher or the matcher creation macros +(see below). The free-standing functions are an indication that this matcher +is just a combination of other matchers, as is for example the case with +callee.

    • +
    + +
    + + + diff --git a/docs/LibASTMatchersReference.html b/docs/LibASTMatchersReference.html new file mode 100644 index 0000000..ea038e3 --- /dev/null +++ b/docs/LibASTMatchersReference.html @@ -0,0 +1,1938 @@ + + + +AST Matcher Reference + + + + + + + + + +
    + +

    AST Matcher Reference

    + +

    This document shows all currently implemented matchers. The matchers are grouped +by category and node type they match. You can click on matcher names to show the +matcher's source documentation.

    + +

    There are three different basic categories of matchers: +

    +

    + +

    Within each category the matchers are ordered by node type they match on. +Note that if a matcher can match multiple node types, it will it will appear +multiple times. This means that by searching for Matcher<Stmt> you can +find all matchers that can be used to match on Stmt nodes.

    + +

    The exception to that rule are matchers that can match on any node. Those +are marked with a * and are listed in the beginning of each category.

    + + +

    Node Matchers

    + + +

    Node matchers are at the core of matcher expressions - they specify the type +of node that is expected. Every match expression starts with a node matcher, +which can then be further refined with a narrowing or traversal matcher. All +traversal matchers take node matchers as their arguments.

    + +

    For convenience, all node matchers take an arbitrary number of arguments +and implicitly act as allOf matchers.

    + +

    Node matchers are the only matchers that support the bind("id") call to +bind the matched node to the given string, to be later retrieved from the +match callback.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Return typeNameParameters
    Matcher<Decl>classTemplateDeclMatcher<ClassTemplateDecl>...
    Matches C++ class template declarations.
    +
    +Example matches Z
    +  template<class T> class Z {};
    +
    Matcher<Decl>classTemplateSpecializationDeclMatcher<ClassTemplateSpecializationDecl>...
    Matches C++ class template specializations.
    +
    +Given
    +  template<typename T> class A {};
    +  template<> class A<double> {};
    +  A<int> a;
    +classTemplateSpecializationDecl()
    +  matches the specializations A<int> and A<double>
    +
    Matcher<Decl>constructorDeclMatcher<CXXConstructorDecl>...
    Matches C++ constructor declarations.
    +
    +Example matches Foo::Foo() and Foo::Foo(int)
    +  class Foo {
    +   public:
    +    Foo();
    +    Foo(int);
    +    int DoSomething();
    +  };
    +
    Matcher<Decl>declMatcher<Decl>...
    Matches declarations.
    +
    +Examples matches X, C, and the friend declaration inside C;
    +  void X();
    +  class C {
    +    friend X;
    +  };
    +
    Matcher<Decl>destructorDeclMatcher<CXXDestructorDecl>...
    Matches explicit C++ destructor declarations.
    +
    +Example matches Foo::~Foo()
    +  class Foo {
    +   public:
    +    virtual ~Foo();
    +  };
    +
    Matcher<Decl>enumConstantDeclMatcher<EnumConstantDecl>...
    Matches enum constants.
    +
    +Example matches A, B, C
    +  enum X {
    +    A, B, C
    +  };
    +
    Matcher<Decl>enumDeclMatcher<EnumDecl>...
    Matches enum declarations.
    +
    +Example matches X
    +  enum X {
    +    A, B, C
    +  };
    +
    Matcher<Decl>fieldDeclMatcher<FieldDecl>...
    Matches field declarations.
    +
    +Given
    +  class X { int m; };
    +fieldDecl()
    +  matches 'm'.
    +
    Matcher<Decl>functionDeclMatcher<FunctionDecl>...
    Matches function declarations.
    +
    +Example matches f
    +  void f();
    +
    Matcher<Decl>functionTemplateDeclMatcher<FunctionTemplateDecl>...
    Matches C++ function template declarations.
    +
    +Example matches f
    +  template<class T> void f(T t) {}
    +
    Matcher<Decl>methodDeclMatcher<CXXMethodDecl>...
    Matches method declarations.
    +
    +Example matches y
    +  class X { void y() };
    +
    Matcher<Decl>namedDeclMatcher<NamedDecl>...
    Matches a declaration of anything that could have a name.
    +
    +Example matches X, S, the anonymous union type, i, and U;
    +  typedef int X;
    +  struct S {
    +    union {
    +      int i;
    +    } U;
    +  };
    +
    Matcher<Decl>recordDeclMatcher<CXXRecordDecl>...
    Matches C++ class declarations.
    +
    +Example matches X, Z
    +  class X;
    +  template<class T> class Z {};
    +
    Matcher<Decl>usingDeclMatcher<UsingDecl>...
    Matches using declarations.
    +
    +Given
    +  namespace X { int x; }
    +  using X::x;
    +usingDecl()
    +  matches using X::x 
    Matcher<Decl>varDeclMatcher<VarDecl>...
    Matches variable declarations.
    +
    +Note: this does not match declarations of member variables, which are
    +"field" declarations in Clang parlance.
    +
    +Example matches a
    +  int a;
    +
    Matcher<Expr>boolLiteralMatcher<CXXBoolLiteralExpr>...
    Matches bool literals.
    +
    +Example matches true
    +  true
    +
    Matcher<Expr>castExprMatcher<CastExpr>...
    Matches any cast nodes of Clang's AST.
    +
    +Example: castExpr() matches each of the following:
    +  (int) 3;
    +  const_cast<Expr *>(SubExpr);
    +  char c = 0;
    +but does not match
    +  int i = (0);
    +  int k = 0;
    +
    Matcher<Expr>characterLiteralMatcher<CharacterLiteral>...
    Matches character literals (also matches wchar_t).
    +
    +Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral),
    +though.
    +
    +Example matches 'a', L'a'
    +  char ch = 'a'; wchar_t chw = L'a';
    +
    Matcher<Expr>constCastExprMatcher<CXXConstCastExpr>...
    Matches a const_cast expression.
    +
    +Example: Matches const_cast<int*>(&r) in
    +  int n = 42;
    +  const int &r(n);
    +  int* p = const_cast<int*>(&r);
    +
    Matcher<Expr>dynamicCastExprMatcher<CXXDynamicCastExpr>...
    Matches a dynamic_cast expression.
    +
    +Example:
    +  dynamicCastExpr()
    +matches
    +  dynamic_cast<D*>(&b);
    +in
    +  struct B { virtual ~B() {} }; struct D : B {};
    +  B b;
    +  D* p = dynamic_cast<D*>(&b);
    +
    Matcher<Expr>explicitCastExprMatcher<ExplicitCastExpr>...
    Matches explicit cast expressions.
    +
    +Matches any cast expression written in user code, whether it be a
    +C-style cast, a functional-style cast, or a keyword cast.
    +
    +Does not match implicit conversions.
    +
    +Note: the name "explicitCast" is chosen to match Clang's terminology, as
    +Clang uses the term "cast" to apply to implicit conversions as well as to
    +actual cast expressions.
    +
    +hasDestinationType.
    +
    +Example: matches all five of the casts in
    +  int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42)))))
    +but does not match the implicit conversion in
    +  long ell = 42;
    +
    Matcher<Expr>functionalCastExprMatcher<CXXFunctionalCastExpr>...
    Matches functional cast expressions
    +
    +Example: Matches Foo(bar);
    +  Foo f = bar;
    +  Foo g = (Foo) bar;
    +  Foo h = Foo(bar);
    +
    Matcher<Expr>implicitCastExprMatcher<ImplicitCastExpr>...
    Matches the implicit cast nodes of Clang's AST.
    +
    +This matches many different places, including function call return value
    +eliding, as well as any type conversions.
    +
    Matcher<Expr>integerLiteralMatcher<IntegerLiteral>...
    Matches integer literals of all sizes encodings.
    +
    +Not matching character-encoded integers such as L'a'.
    +
    +Example matches 1, 1L, 0x1, 1U
    +
    Matcher<Expr>reinterpretCastExprMatcher<CXXReinterpretCastExpr>...
    Matches a reinterpret_cast expression.
    +
    +Either the source expression or the destination type can be matched
    +using has(), but hasDestinationType() is more specific and can be
    +more readable.
    +
    +Example matches reinterpret_cast<char*>(&p) in
    +  void* p = reinterpret_cast<char*>(&p);
    +
    Matcher<Expr>staticCastExprMatcher<CXXStaticCastExpr>...
    Matches a C++ static_cast expression.
    +
    +hasDestinationType
    +reinterpretCast
    +
    +Example:
    +  staticCastExpr()
    +matches
    +  static_cast<long>(8)
    +in
    +  long eight(static_cast<long>(8));
    +
    Matcher<Expr>stringLiteralMatcher<StringLiteral>...
    Matches string literals (also matches wide string literals).
    +
    +Example matches "abcd", L"abcd"
    +  char *s = "abcd"; wchar_t *ws = L"abcd"
    +
    Matcher<Stmt>arraySubscriptExprMatcher<ArraySubscriptExpr>...
    Matches array subscript expressions.
    +
    +Given
    +  int i = a[1];
    +arraySubscriptExpr()
    +  matches "a[1]"
    +
    Matcher<Stmt>binaryOperatorMatcher<BinaryOperator>...
    Matches binary operator expressions.
    +
    +Example matches a || b
    +  !(a || b)
    +
    Matcher<Stmt>bindTemporaryExprMatcher<CXXBindTemporaryExpr>...
    Matches nodes where temporaries are created.
    +
    +Example matches FunctionTakesString(GetStringByValue())
    +    (matcher = bindTemporaryExpr())
    +  FunctionTakesString(GetStringByValue());
    +  FunctionTakesStringByPointer(GetStringPointer());
    +
    Matcher<Stmt>callExprMatcher<CallExpr>...
    Matches call expressions.
    +
    +Example matches x.y() and y()
    +  X x;
    +  x.y();
    +  y();
    +
    Matcher<Stmt>compoundStmtMatcher<CompoundStmt>...
    Matches compound statements.
    +
    +Example matches '{}' and '{{}}'in 'for (;;) {{}}'
    +  for (;;) {{}}
    +
    Matcher<Stmt>conditionalOperatorMatcher<ConditionalOperator>...
    Matches conditional operator expressions.
    +
    +Example matches a ? b : c
    +  (a ? b : c) + 42
    +
    Matcher<Stmt>constructExprMatcher<CXXConstructExpr>...
    Matches constructor call expressions (including implicit ones).
    +
    +Example matches string(ptr, n) and ptr within arguments of f
    +    (matcher = constructExpr())
    +  void f(const string &a, const string &b);
    +  char *ptr;
    +  int n;
    +  f(string(ptr, n), ptr);
    +
    Matcher<Stmt>declRefExprMatcher<DeclRefExpr>...
    Matches expressions that refer to declarations.
    +
    +Example matches x in if (x)
    +  bool x;
    +  if (x) {}
    +
    Matcher<Stmt>declStmtMatcher<DeclStmt>...
    Matches declaration statements.
    +
    +Given
    +  int a;
    +declStmt()
    +  matches 'int a'.
    +
    Matcher<Stmt>defaultArgExprMatcher<CXXDefaultArgExpr>...
    Matches the value of a default argument at the call site.
    +
    +Example matches the CXXDefaultArgExpr placeholder inserted for the
    +    default value of the second parameter in the call expression f(42)
    +    (matcher = defaultArgExpr())
    +  void f(int x, int y = 0);
    +  f(42);
    +
    Matcher<Stmt>deleteExprMatcher<CXXDeleteExpr>...
    Matches delete expressions.
    +
    +Given
    +  delete X;
    +deleteExpr()
    +  matches 'delete X'.
    +
    Matcher<Stmt>doStmtMatcher<DoStmt>...
    Matches do statements.
    +
    +Given
    +  do {} while (true);
    +doStmt()
    +  matches 'do {} while(true)'
    +
    Matcher<Stmt>exprMatcher<Expr>...
    Matches expressions.
    +
    +Example matches x()
    +  void f() { x(); }
    +
    Matcher<Stmt>forStmtMatcher<ForStmt>...
    Matches for statements.
    +
    +Example matches 'for (;;) {}'
    +  for (;;) {}
    +
    Matcher<Stmt>ifStmtMatcher<IfStmt>...
    Matches if statements.
    +
    +Example matches 'if (x) {}'
    +  if (x) {}
    +
    Matcher<Stmt>initListExprMatcher<InitListExpr>...
    Matches init list expressions.
    +
    +Given
    +  int a[] = { 1, 2 };
    +  struct B { int x, y; };
    +  B b = { 5, 6 };
    +initList()
    +  matches "{ 1, 2 }" and "{ 5, 6 }"
    +
    Matcher<Stmt>materializeTemporaryExprMatcher<MaterializeTemporaryExpr>...
    Matches nodes where temporaries are materialized.
    +
    +Example: Given
    +  struct T {void func()};
    +  T f();
    +  void g(T);
    +materializeTemporaryExpr() matches 'f()' in these statements
    +  T u(f());
    +  g(f());
    +but does not match
    +  f();
    +  f().func();
    +
    Matcher<Stmt>memberCallExprMatcher<CXXMemberCallExpr>...
    Matches member call expressions.
    +
    +Example matches x.y()
    +  X x;
    +  x.y();
    +
    Matcher<Stmt>memberExprMatcher<MemberExpr>...
    Matches member expressions.
    +
    +Given
    +  class Y {
    +    void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
    +    int a; static int b;
    +  };
    +memberExpr()
    +  matches this->x, x, y.x, a, this->b
    +
    Matcher<Stmt>newExprMatcher<CXXNewExpr>...
    Matches new expressions.
    +
    +Given
    +  new X;
    +newExpr()
    +  matches 'new X'.
    +
    Matcher<Stmt>operatorCallExprMatcher<CXXOperatorCallExpr>...
    Matches overloaded operator calls.
    +
    +Note that if an operator isn't overloaded, it won't match. Instead, use
    +binaryOperator matcher.
    +Currently it does not match operators such as new delete.
    +FIXME: figure out why these do not match?
    +
    +Example matches both operator<<((o << b), c) and operator<<(o, b)
    +    (matcher = operatorCallExpr())
    +  ostream &operator<< (ostream &out, int i) { };
    +  ostream &o; int b = 1, c = 1;
    +  o << b << c;
    +
    Matcher<Stmt>stmtMatcher<Stmt>...
    Matches statements.
    +
    +Given
    +  { ++a; }
    +stmt()
    +  matches both the compound statement '{ ++a; }' and '++a'.
    +
    Matcher<Stmt>switchCaseMatcher<SwitchCase>...
    Matches case and default statements inside switch statements.
    +
    +Given
    +  switch(a) { case 42: break; default: break; }
    +switchCase()
    +  matches 'case 42: break;' and 'default: break;'.
    +
    Matcher<Stmt>unaryExprOrTypeTraitExprMatcher<UnaryExprOrTypeTraitExpr>...
    Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL)
    +
    +Given
    +  Foo x = bar;
    +  int y = sizeof(x) + alignof(x);
    +unaryExprOrTypeTraitExpr()
    +  matches sizeof(x) and alignof(x)
    +
    Matcher<Stmt>unaryOperatorMatcher<UnaryOperator>...
    Matches unary operator expressions.
    +
    +Example matches !a
    +  !a || b
    +
    Matcher<Stmt>whileStmtMatcher<WhileStmt>...
    Matches while statements.
    +
    +Given
    +  while (true) {}
    +whileStmt()
    +  matches 'while (true) {}'.
    +
    + + +

    Narrowing Matchers

    + + +

    Narrowing matchers match certain attributes on the current node, thus +narrowing down the set of nodes of the current type to match on.

    + +

    There are special logical narrowing matchers (allOf, anyOf, anything and unless) +which allow users to create more powerful match expressions.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Return typeNameParameters
    Matcher<*>allOfMatcher<*> P1, Matcher<*> P2
    Matches if all given matchers match.
    +
    +Usable as: Any Matcher
    +
    Matcher<*>anyOfMatcher<*> P1, Matcher<*> P2
    Matches if any of the given matchers matches.
    +
    +Usable as: Any Matcher
    +
    Matcher<*>anything
    Matches any node.
    +
    +Useful when another matcher requires a child matcher, but there's no
    +additional constraint. This will often be used with an explicit conversion
    +to an internal::Matcher<> type such as TypeMatcher.
    +
    +Example: DeclarationMatcher(anything()) matches all declarations, e.g.,
    +"int* p" and "void f()" in
    +  int* p;
    +  void f();
    +
    +Usable as: Any Matcher
    +
    Matcher<*>unlessMatcher<*> InnerMatcher
    Matches if the provided matcher does not match.
    +
    +Example matches Y (matcher = recordDecl(unless(hasName("X"))))
    +  class X {};
    +  class Y {};
    +
    +Usable as: Any Matcher
    +
    Matcher<BinaryOperator>hasOperatorNamestd::string Name
    Matches the operator Name of operator expressions (binary or
    +unary).
    +
    +Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
    +  !(a || b)
    +
    Matcher<CXXBoolLiteral>equalsValueT Value
    Matches literals that are equal to the given value.
    +
    +Example matches true (matcher = boolLiteral(equals(true)))
    +  true
    +
    +Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteral>,
    +           Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
    +
    Matcher<CXXConstructorDecl>isImplicit
    Matches a constructor declaration that has been implicitly added
    +by the compiler (eg. implicit defaultcopy constructors).
    +
    Matcher<CXXCtorInitializer>isWritten
    Matches a contructor initializer if it is explicitly written in
    +code (as opposed to implicitly added by the compiler).
    +
    +Given
    +  struct Foo {
    +    Foo() { }
    +    Foo(int) : foo_("A") { }
    +    string foo_;
    +  };
    +constructorDecl(hasAnyConstructorInitializer(isWritten()))
    +  will match Foo(int), but not Foo()
    +
    Matcher<CXXOperatorCallExpr>hasOverloadedOperatorNamestd::string Name
    Matches overloaded operator names.
    +
    +Matches overloaded operator names specified in strings without the
    +"operator" prefix, such as "<<", for OverloadedOperatorCall's.
    +
    +Example matches a << b
    +    (matcher == operatorCallExpr(hasOverloadedOperatorName("<<")))
    +  a << b;
    +  c && d;  assuming both operator<<
    +           and operator&& are overloaded somewhere.
    +
    Matcher<CXXRecordDecl>isAStringRef BaseName
    Overloaded method as shortcut for isA(hasName(...)).
    +
    Matcher<CXXRecordDecl>isDerivedFromStringRef BaseName
    Overloaded method as shortcut for isDerivedFrom(hasName(...)).
    +
    Matcher<CXXRecordDecl>isExplicitTemplateSpecialization
    Matches explicit template specializations of function, class, or
    +static member variable template instantiations.
    +
    +Given
    +  template<typename T> void A(T t) { }
    +  template<> void A(int N) { }
    +functionDecl(isExplicitTemplateSpecialization())
    +  matches the specialization A<int>().
    +
    +Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
    +
    Matcher<CXXRecordDecl>isTemplateInstantiation
    Matches template instantiations of function, class, or static
    +member variable template instantiations.
    +
    +Given
    +  template <typename T> class X {}; class A {}; X<A> x;
    +or
    +  template <typename T> class X {}; class A {}; template class X<A>;
    +recordDecl(hasName("::X"), isTemplateInstantiation())
    +  matches the template instantiation of X<A>.
    +
    +But given
    +  template <typename T>  class X {}; class A {};
    +  template <> class X<A> {}; X<A> x;
    +recordDecl(hasName("::X"), isTemplateInstantiation())
    +  does not match, as X<A> is an explicit template specialization.
    +
    +Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
    +
    Matcher<CallExpr>argumentCountIsunsigned N
    Checks that a call expression or a constructor call expression has
    +a specific number of arguments (including absent default arguments).
    +
    +Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2)))
    +  void f(int x, int y);
    +  f(0, 0);
    +
    Matcher<CharacterLiteral>equalsValueT Value
    Matches literals that are equal to the given value.
    +
    +Example matches true (matcher = boolLiteral(equals(true)))
    +  true
    +
    +Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteral>,
    +           Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
    +
    Matcher<CompoundStmt>statementCountIsunsigned N
    Checks that a compound statement contains a specific number of
    +child statements.
    +
    +Example: Given
    +  { for (;;) {} }
    +compoundStmt(statementCountIs(0)))
    +  matches '{}'
    +  but does not match the outer compound statement.
    +
    Matcher<DeclStmt>declCountIsunsigned N
    Matches declaration statements that contain a specific number of
    +declarations.
    +
    +Example: Given
    +  int a, b;
    +  int c;
    +  int d = 2, e;
    +declCountIs(2)
    +  matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'.
    +
    Matcher<FloatingLiteral>equalsValueT Value
    Matches literals that are equal to the given value.
    +
    +Example matches true (matcher = boolLiteral(equals(true)))
    +  true
    +
    +Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteral>,
    +           Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
    +
    Matcher<FunctionDecl>isDefinition
    Matches if a declaration has a body attached.
    +
    +Example matches A, va, fa
    +  class A {};
    +  class B;  Doesn't match, as it has no body.
    +  int va;
    +  extern int vb;  Doesn't match, as it doesn't define the variable.
    +  void fa() {}
    +  void fb();  Doesn't match, as it has no body.
    +
    +Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>
    +
    Matcher<FunctionDecl>isExplicitTemplateSpecialization
    Matches explicit template specializations of function, class, or
    +static member variable template instantiations.
    +
    +Given
    +  template<typename T> void A(T t) { }
    +  template<> void A(int N) { }
    +functionDecl(isExplicitTemplateSpecialization())
    +  matches the specialization A<int>().
    +
    +Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
    +
    Matcher<FunctionDecl>isExternC
    Matches extern "C" function declarations.
    +
    +Given:
    +  extern "C" void f() {}
    +  extern "C" { void g() {} }
    +  void h() {}
    +functionDecl(isExternC())
    +  matches the declaration of f and g, but not the declaration h
    +
    Matcher<FunctionDecl>isTemplateInstantiation
    Matches template instantiations of function, class, or static
    +member variable template instantiations.
    +
    +Given
    +  template <typename T> class X {}; class A {}; X<A> x;
    +or
    +  template <typename T> class X {}; class A {}; template class X<A>;
    +recordDecl(hasName("::X"), isTemplateInstantiation())
    +  matches the template instantiation of X<A>.
    +
    +But given
    +  template <typename T>  class X {}; class A {};
    +  template <> class X<A> {}; X<A> x;
    +recordDecl(hasName("::X"), isTemplateInstantiation())
    +  does not match, as X<A> is an explicit template specialization.
    +
    +Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
    +
    Matcher<IntegerLiteral>equalsValueT Value
    Matches literals that are equal to the given value.
    +
    +Example matches true (matcher = boolLiteral(equals(true)))
    +  true
    +
    +Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteral>,
    +           Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
    +
    Matcher<MemberExpr>isArrow
    Matches member expressions that are called with '->' as opposed
    +to '.'.
    +
    +Member calls on the implicit this pointer match as called with '->'.
    +
    +Given
    +  class Y {
    +    void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; }
    +    int a;
    +    static int b;
    +  };
    +memberExpr(isArrow())
    +  matches this->x, x, y.x, a, this->b
    +
    Matcher<NamedDecl>hasNamestd::string Name
    Matches NamedDecl nodes that have the specified name.
    +
    +Supports specifying enclosing namespaces or classes by prefixing the name
    +with '<enclosing>::'.
    +Does not match typedefs of an underlying type with the given name.
    +
    +Example matches X (Name == "X")
    +  class X;
    +
    +Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X")
    +  namespace a { namespace b { class X; } }
    +
    Matcher<NamedDecl>matchesNamestd::string RegExp
    Matches NamedDecl nodes whose full names partially match the
    +given RegExp.
    +
    +Supports specifying enclosing namespaces or classes by
    +prefixing the name with '<enclosing>::'.  Does not match typedefs
    +of an underlying type with the given name.
    +
    +Example matches X (regexp == "::X")
    +  class X;
    +
    +Example matches X (regexp is one of "::X", "^foo::.*X", among others)
    +  namespace foo { namespace bar { class X; } }
    +
    Matcher<QualType>asStringstd::string Name
    Matches if the matched type is represented by the given string.
    +
    +Given
    +  class Y { public: void x(); };
    +  void z() { Y* y; y->x(); }
    +callExpr(on(hasType(asString("class Y *"))))
    +  matches y->x()
    +
    Matcher<QualType>isConstQualified
    Matches QualType nodes that are const-qualified, i.e., that
    +include "top-level" const.
    +
    +Given
    +  void a(int);
    +  void b(int const);
    +  void c(const int);
    +  void d(const int*);
    +  void e(int const) {};
    +functionDecl(hasAnyParameter(hasType(isConstQualified())))
    +  matches "void b(int const)", "void c(const int)" and
    +  "void e(int const) {}". It does not match d as there
    +  is no top-level const on the parameter type "const int *".
    +
    Matcher<QualType>isInteger
    Matches QualType nodes that are of integer type.
    +
    +Given
    +  void a(int);
    +  void b(long);
    +  void c(double);
    +functionDecl(hasAnyParameter(hasType(isInteger())))
    +matches "a(int)", "b(long)", but not "c(double)".
    +
    Matcher<TagDecl>isDefinition
    Matches if a declaration has a body attached.
    +
    +Example matches A, va, fa
    +  class A {};
    +  class B;  Doesn't match, as it has no body.
    +  int va;
    +  extern int vb;  Doesn't match, as it doesn't define the variable.
    +  void fa() {}
    +  void fb();  Doesn't match, as it has no body.
    +
    +Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>
    +
    Matcher<UnaryExprOrTypeTraitExpr>ofKindUnaryExprOrTypeTrait Kind
    Matches unary expressions of a certain kind.
    +
    +Given
    +  int x;
    +  int s = sizeof(x) + alignof(x)
    +unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf))
    +  matches sizeof(x)
    +
    Matcher<UnaryOperator>hasOperatorNamestd::string Name
    Matches the operator Name of operator expressions (binary or
    +unary).
    +
    +Example matches a || b (matcher = binaryOperator(hasOperatorName("||")))
    +  !(a || b)
    +
    Matcher<VarDecl>isDefinition
    Matches if a declaration has a body attached.
    +
    +Example matches A, va, fa
    +  class A {};
    +  class B;  Doesn't match, as it has no body.
    +  int va;
    +  extern int vb;  Doesn't match, as it doesn't define the variable.
    +  void fa() {}
    +  void fb();  Doesn't match, as it has no body.
    +
    +Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>
    +
    Matcher<VarDecl>isExplicitTemplateSpecialization
    Matches explicit template specializations of function, class, or
    +static member variable template instantiations.
    +
    +Given
    +  template<typename T> void A(T t) { }
    +  template<> void A(int N) { }
    +functionDecl(isExplicitTemplateSpecialization())
    +  matches the specialization A<int>().
    +
    +Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
    +
    Matcher<VarDecl>isTemplateInstantiation
    Matches template instantiations of function, class, or static
    +member variable template instantiations.
    +
    +Given
    +  template <typename T> class X {}; class A {}; X<A> x;
    +or
    +  template <typename T> class X {}; class A {}; template class X<A>;
    +recordDecl(hasName("::X"), isTemplateInstantiation())
    +  matches the template instantiation of X<A>.
    +
    +But given
    +  template <typename T>  class X {}; class A {};
    +  template <> class X<A> {}; X<A> x;
    +recordDecl(hasName("::X"), isTemplateInstantiation())
    +  does not match, as X<A> is an explicit template specialization.
    +
    +Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl>
    +
    + + +

    AST Traversal Matchers

    + + +

    Traversal matchers specify the relationship to other nodes that are +reachable from the current node.

    + +

    Note that there are special traversal matchers (has, hasDescendant, forEach and +forEachDescendant) which work on all nodes and allow users to write more generic +match expressions.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Return typeNameParameters
    Matcher<*>forEachMatcher<ChildT> ChildMatcher
    Matches AST nodes that have child AST nodes that match the
    +provided matcher.
    +
    +Example matches X, Y (matcher = recordDecl(forEach(recordDecl(hasName("X")))
    +  class X {};  Matches X, because X::X is a class of name X inside X.
    +  class Y { class X {}; };
    +  class Z { class Y { class X {}; }; };  Does not match Z.
    +
    +ChildT must be an AST base type.
    +
    +As opposed to 'has', 'forEach' will cause a match for each result that
    +matches instead of only on the first one.
    +
    +Usable as: Any Matcher
    +
    Matcher<*>forEachDescendantMatcher<DescendantT> DescendantMatcher
    Matches AST nodes that have descendant AST nodes that match the
    +provided matcher.
    +
    +Example matches X, A, B, C
    +    (matcher = recordDecl(forEachDescendant(recordDecl(hasName("X")))))
    +  class X {};  Matches X, because X::X is a class of name X inside X.
    +  class A { class X {}; };
    +  class B { class C { class X {}; }; };
    +
    +DescendantT must be an AST base type.
    +
    +As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for
    +each result that matches instead of only on the first one.
    +
    +Note: Recursively combined ForEachDescendant can cause many matches:
    +  recordDecl(forEachDescendant(recordDecl(forEachDescendant(recordDecl()))))
    +will match 10 times (plus injected class name matches) on:
    +  class A { class B { class C { class D { class E {}; }; }; }; };
    +
    +Usable as: Any Matcher
    +
    Matcher<*>hasMatcher<ChildT> ChildMatcher
    Matches AST nodes that have child AST nodes that match the
    +provided matcher.
    +
    +Example matches X, Y (matcher = recordDecl(has(recordDecl(hasName("X")))
    +  class X {};  Matches X, because X::X is a class of name X inside X.
    +  class Y { class X {}; };
    +  class Z { class Y { class X {}; }; };  Does not match Z.
    +
    +ChildT must be an AST base type.
    +
    +Usable as: Any Matcher
    +
    Matcher<*>hasAncestorMatcher<AncestorT> AncestorMatcher
    Matches AST nodes that have an ancestor that matches the provided
    +matcher.
    +
    +Given
    +void f() { if (true) { int x = 42; } }
    +void g() { for (;;) { int x = 43; } }
    +expr(integerLiteral(hasAncsestor(ifStmt()))) matches 42, but not 43.
    +
    +Usable as: Any Matcher
    +
    Matcher<*>hasDescendantMatcher<DescendantT> DescendantMatcher
    Matches AST nodes that have descendant AST nodes that match the
    +provided matcher.
    +
    +Example matches X, Y, Z
    +    (matcher = recordDecl(hasDescendant(recordDecl(hasName("X")))))
    +  class X {};  Matches X, because X::X is a class of name X inside X.
    +  class Y { class X {}; };
    +  class Z { class Y { class X {}; }; };
    +
    +DescendantT must be an AST base type.
    +
    +Usable as: Any Matcher
    +
    Matcher<ArraySubscriptExpr>hasBaseMatcher<Expr> InnerMatcher
    Matches the base expression of an array subscript expression.
    +
    +Given
    +  int i[5];
    +  void f() { i[1] = 42; }
    +arraySubscriptExpression(hasBase(implicitCastExpr(
    +    hasSourceExpression(declRefExpr()))))
    +  matches i[1] with the declRefExpr() matching i
    +
    Matcher<ArraySubscriptExpr>hasIndexMatcher<Expr> InnerMatcher
    Matches the index expression of an array subscript expression.
    +
    +Given
    +  int i[5];
    +  void f() { i[1] = 42; }
    +arraySubscriptExpression(hasIndex(integerLiteral()))
    +  matches i[1] with the integerLiteral() matching 1
    +
    Matcher<BinaryOperator>hasEitherOperandMatcher<Expr> InnerMatcher
    Matches if either the left hand side or the right hand side of a
    +binary operator matches.
    +
    Matcher<BinaryOperator>hasLHSMatcher<Expr> InnerMatcher
    Matches the left hand side of binary operator expressions.
    +
    +Example matches a (matcher = binaryOperator(hasLHS()))
    +  a || b
    +
    Matcher<BinaryOperator>hasRHSMatcher<Expr> InnerMatcher
    Matches the right hand side of binary operator expressions.
    +
    +Example matches b (matcher = binaryOperator(hasRHS()))
    +  a || b
    +
    Matcher<CXXConstructExpr>hasDeclarationMatcher<Decl> InnerMatcher
    Matches a type if the declaration of the type matches the given
    +matcher.
    +
    +Usable as: Matcher<QualType>, Matcher<CallExpr>, Matcher<CXXConstructExpr>
    +
    Matcher<CXXConstructorDecl>hasAnyConstructorInitializerMatcher<CXXCtorInitializer> InnerMatcher
    Matches a constructor initializer.
    +
    +Given
    +  struct Foo {
    +    Foo() : foo_(1) { }
    +    int foo_;
    +  };
    +recordDecl(has(constructorDecl(hasAnyConstructorInitializer(anything()))))
    +  record matches Foo, hasAnyConstructorInitializer matches foo_(1)
    +
    Matcher<CXXCtorInitializer>forFieldMatcher<FieldDecl> InnerMatcher
    Matches the field declaration of a constructor initializer.
    +
    +Given
    +  struct Foo {
    +    Foo() : foo_(1) { }
    +    int foo_;
    +  };
    +recordDecl(has(constructorDecl(hasAnyConstructorInitializer(
    +    forField(hasName("foo_"))))))
    +  matches Foo
    +with forField matching foo_
    +
    Matcher<CXXCtorInitializer>withInitializerMatcher<Expr> InnerMatcher
    Matches the initializer expression of a constructor initializer.
    +
    +Given
    +  struct Foo {
    +    Foo() : foo_(1) { }
    +    int foo_;
    +  };
    +recordDecl(has(constructorDecl(hasAnyConstructorInitializer(
    +    withInitializer(integerLiteral(equals(1)))))))
    +  matches Foo
    +with withInitializer matching (1)
    +
    Matcher<CXXMemberCallExpr>onMatcher<Expr> InnerMatcher
    Matches on the implicit object argument of a member call expression.
    +
    +Example matches y.x() (matcher = callExpr(on(hasType(recordDecl(hasName("Y"))))))
    +  class Y { public: void x(); };
    +  void z() { Y y; y.x(); }",
    +
    +FIXME: Overload to allow directly matching types?
    +
    Matcher<CXXMemberCallExpr>onImplicitObjectArgumentMatcher<Expr> InnerMatcher
    Matcher<CXXMemberCallExpr>thisPointerTypeMatcher<Decl> InnerMatcher
    Overloaded to match the type's declaration.
    +
    Matcher<CXXMethodDecl>ofClassMatcher<CXXRecordDecl> InnerMatcher
    Matches the class declaration that the given method declaration
    +belongs to.
    +
    +FIXME: Generalize this for other kinds of declarations.
    +FIXME: What other kind of declarations would we need to generalize
    +this to?
    +
    +Example matches A() in the last line
    +    (matcher = constructExpr(hasDeclaration(methodDecl(
    +        ofClass(hasName("A"))))))
    +  class A {
    +   public:
    +    A();
    +  };
    +  A a = A();
    +
    Matcher<CXXRecordDecl>isAMatcher<NamedDecl> Base
    Similar to isDerivedFrom(), but also matches classes that directly
    +match Base.
    +
    Matcher<CXXRecordDecl>isDerivedFromMatcher<NamedDecl> Base
    Matches C++ classes that are directly or indirectly derived from
    +a class matching Base.
    +
    +Note that a class is not considered to be derived from itself.
    +
    +Example matches Y, Z, C (Base == hasName("X"))
    +  class X;
    +  class Y : public X {};  directly derived
    +  class Z : public Y {};  indirectly derived
    +  typedef X A;
    +  typedef A B;
    +  class C : public B {};  derived from a typedef of X
    +
    +In the following example, Bar matches isDerivedFrom(hasName("X")):
    +  class Foo;
    +  typedef Foo X;
    +  class Bar : public Foo {};  derived from a type that X is a typedef of
    +
    Matcher<CallExpr>calleeMatcher<Decl> InnerMatcher
    Matches if the call expression's callee's declaration matches the
    +given matcher.
    +
    +Example matches y.x() (matcher = callExpr(callee(methodDecl(hasName("x")))))
    +  class Y { public: void x(); };
    +  void z() { Y y; y.x();
    +
    Matcher<CallExpr>hasAnyArgumentMatcher<Expr> InnerMatcher
    Matches any argument of a call expression or a constructor call
    +expression.
    +
    +Given
    +  void x(int, int, int) { int y; x(1, y, 42); }
    +callExpr(hasAnyArgument(declRefExpr()))
    +  matches x(1, y, 42)
    +with hasAnyArgument(...)
    +  matching y
    +
    Matcher<CallExpr>hasArgumentunsigned N, Matcher<Expr> InnerMatcher
    Matches the n'th argument of a call expression or a constructor
    +call expression.
    +
    +Example matches y in x(y)
    +    (matcher = callExpr(hasArgument(0, declRefExpr())))
    +  void x(int) { int y; x(y); }
    +
    Matcher<CallExpr>hasDeclarationMatcher<Decl> InnerMatcher
    Matches a type if the declaration of the type matches the given
    +matcher.
    +
    +Usable as: Matcher<QualType>, Matcher<CallExpr>, Matcher<CXXConstructExpr>
    +
    Matcher<CastExpr>hasSourceExpressionMatcher<Expr> InnerMatcher
    Matches if the cast's source expression matches the given matcher.
    +
    +Example: matches "a string" (matcher =
    +                                 hasSourceExpression(constructExpr()))
    +class URL { URL(string); };
    +URL url = "a string";
    +
    Matcher<ClassTemplateSpecializationDecl>hasAnyTemplateArgumentMatcher<TemplateArgument> InnerMatcher
    Matches classTemplateSpecializations that have at least one
    +TemplateArgument matching the given InnerMatcher.
    +
    +Given
    +  template<typename T> class A {};
    +  template<> class A<double> {};
    +  A<int> a;
    +classTemplateSpecializationDecl(hasAnyTemplateArgument(
    +    refersToType(asString("int"))))
    +  matches the specialization A<int>
    +
    Matcher<ClassTemplateSpecializationDecl>hasTemplateArgumentunsigned N, Matcher<TemplateArgument> InnerMatcher
    Matches classTemplateSpecializations where the n'th TemplateArgument
    +matches the given InnerMatcher.
    +
    +Given
    +  template<typename T, typename U> class A {};
    +  A<bool, int> b;
    +  A<int, bool> c;
    +classTemplateSpecializationDecl(hasTemplateArgument(
    +    1, refersToType(asString("int"))))
    +  matches the specialization A<bool, int>
    +
    Matcher<CompoundStmt>hasAnySubstatementMatcher<Stmt> InnerMatcher
    Matches compound statements where at least one substatement matches
    +a given matcher.
    +
    +Given
    +  { {}; 1+2; }
    +hasAnySubstatement(compoundStmt())
    +  matches '{ {}; 1+2; }'
    +with compoundStmt()
    +  matching '{}'
    +
    Matcher<ConditionalOperator>hasConditionMatcher<Expr> InnerMatcher
    Matches the condition expression of an if statement, for loop,
    +or conditional operator.
    +
    +Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
    +  if (true) {}
    +
    Matcher<ConditionalOperator>hasFalseExpressionMatcher<Expr> InnerMatcher
    Matches the false branch expression of a conditional operator.
    +
    +Example matches b
    +  condition ? a : b
    +
    Matcher<ConditionalOperator>hasTrueExpressionMatcher<Expr> InnerMatcher
    Matches the true branch expression of a conditional operator.
    +
    +Example matches a
    +  condition ? a : b
    +
    Matcher<DeclRefExpr>throughUsingDeclMatcher<UsingShadowDecl> InnerMatcher
    Matches a DeclRefExpr that refers to a declaration through a
    +specific using shadow declaration.
    +
    +FIXME: This currently only works for functions. Fix.
    +
    +Given
    +  namespace a { void f() {} }
    +  using a::f;
    +  void g() {
    +    f();     Matches this ..
    +    a::f();  .. but not this.
    +  }
    +declRefExpr(throughUsingDeclaration(anything()))
    +  matches f()
    +
    Matcher<DeclRefExpr>toMatcher<Decl> InnerMatcher
    Matches a DeclRefExpr that refers to a declaration that matches the
    +specified matcher.
    +
    +Example matches x in if(x)
    +    (matcher = declRefExpr(to(varDecl(hasName("x")))))
    +  bool x;
    +  if (x) {}
    +
    Matcher<DeclStmt>containsDeclarationunsigned N, Matcher<Decl> InnerMatcher
    Matches the n'th declaration of a declaration statement.
    +
    +Note that this does not work for global declarations because the AST
    +breaks up multiple-declaration DeclStmt's into multiple single-declaration
    +DeclStmt's.
    +Example: Given non-global declarations
    +  int a, b = 0;
    +  int c;
    +  int d = 2, e;
    +declStmt(containsDeclaration(
    +      0, varDecl(hasInitializer(anything()))))
    +  matches only 'int d = 2, e;', and
    +declStmt(containsDeclaration(1, varDecl()))
    +  matches 'int a, b = 0' as well as 'int d = 2, e;'
    +  but 'int c;' is not matched.
    +
    Matcher<DeclStmt>hasSingleDeclMatcher<Decl> InnerMatcher
    Matches the Decl of a DeclStmt which has a single declaration.
    +
    +Given
    +  int a, b;
    +  int c;
    +declStmt(hasSingleDecl(anything()))
    +  matches 'int c;' but not 'int a, b;'.
    +
    Matcher<DoStmt>hasBodyMatcher<Stmt> InnerMatcher
    Matches a 'for', 'while', or 'do while' statement that has
    +a given body.
    +
    +Given
    +  for (;;) {}
    +hasBody(compoundStmt())
    +  matches 'for (;;) {}'
    +with compoundStmt()
    +  matching '{}'
    +
    Matcher<DoStmt>hasConditionMatcher<Expr> InnerMatcher
    Matches the condition expression of an if statement, for loop,
    +or conditional operator.
    +
    +Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
    +  if (true) {}
    +
    Matcher<ExplicitCastExpr>hasDestinationTypeMatcher<QualType> InnerMatcher
    Matches casts whose destination type matches a given matcher.
    +
    +(Note: Clang's AST refers to other conversions as "casts" too, and calls
    +actual casts "explicit" casts.)
    +
    Matcher<Expr>hasTypeMatcher<Decl> InnerMatcher
    Overloaded to match the declaration of the expression's or value
    +declaration's type.
    +
    +In case of a value declaration (for example a variable declaration),
    +this resolves one layer of indirection. For example, in the value
    +declaration "X x;", recordDecl(hasName("X")) matches the declaration of X,
    +while varDecl(hasType(recordDecl(hasName("X")))) matches the declaration
    +of x."
    +
    +Example matches x (matcher = expr(hasType(recordDecl(hasName("X")))))
    +            and z (matcher = varDecl(hasType(recordDecl(hasName("X")))))
    + class X {};
    + void y(X &x) { x; X z; }
    +
    +Usable as: Matcher<Expr>, Matcher<ValueDecl>
    +
    Matcher<Expr>ignoringImpCastsMatcher<Expr> InnerMatcher
    Matches expressions that match InnerMatcher after any implicit casts
    +are stripped off.
    +
    +Parentheses and explicit casts are not discarded.
    +Given
    +  int arr[5];
    +  int a = 0;
    +  char b = 0;
    +  const int c = a;
    +  int *d = arr;
    +  long e = (long) 0l;
    +The matchers
    +   varDecl(hasInitializer(ignoringImpCasts(integerLiteral())))
    +   varDecl(hasInitializer(ignoringImpCasts(declRefExpr())))
    +would match the declarations for a, b, c, and d, but not e.
    +While
    +   varDecl(hasInitializer(integerLiteral()))
    +   varDecl(hasInitializer(declRefExpr()))
    +only match the declarations for b, c, and d.
    +
    Matcher<Expr>ignoringParenCastsMatcher<Expr> InnerMatcher
    Matches expressions that match InnerMatcher after parentheses and
    +casts are stripped off.
    +
    +Implicit and non-C Style casts are also discarded.
    +Given
    +  int a = 0;
    +  char b = (0);
    +  void* c = reinterpret_cast<char*>(0);
    +  char d = char(0);
    +The matcher
    +   varDecl(hasInitializer(ignoringParenCasts(integerLiteral())))
    +would match the declarations for a, b, c, and d.
    +while
    +   varDecl(hasInitializer(integerLiteral()))
    +only match the declaration for a.
    +
    Matcher<Expr>ignoringParenImpCastsMatcher<Expr> InnerMatcher
    Matches expressions that match InnerMatcher after implicit casts and
    +parentheses are stripped off.
    +
    +Explicit casts are not discarded.
    +Given
    +  int arr[5];
    +  int a = 0;
    +  char b = (0);
    +  const int c = a;
    +  int *d = (arr);
    +  long e = ((long) 0l);
    +The matchers
    +   varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral())))
    +   varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr())))
    +would match the declarations for a, b, c, and d, but not e.
    +while
    +   varDecl(hasInitializer(integerLiteral()))
    +   varDecl(hasInitializer(declRefExpr()))
    +would only match the declaration for a.
    +
    Matcher<ForStmt>hasBodyMatcher<Stmt> InnerMatcher
    Matches a 'for', 'while', or 'do while' statement that has
    +a given body.
    +
    +Given
    +  for (;;) {}
    +hasBody(compoundStmt())
    +  matches 'for (;;) {}'
    +with compoundStmt()
    +  matching '{}'
    +
    Matcher<ForStmt>hasConditionMatcher<Expr> InnerMatcher
    Matches the condition expression of an if statement, for loop,
    +or conditional operator.
    +
    +Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
    +  if (true) {}
    +
    Matcher<ForStmt>hasIncrementMatcher<Stmt> InnerMatcher
    Matches the increment statement of a for loop.
    +
    +Example:
    +    forStmt(hasIncrement(unaryOperator(hasOperatorName("++"))))
    +matches '++x' in
    +    for (x; x < N; ++x) { }
    +
    Matcher<ForStmt>hasLoopInitMatcher<Stmt> InnerMatcher
    Matches the initialization statement of a for loop.
    +
    +Example:
    +    forStmt(hasLoopInit(declStmt()))
    +matches 'int x = 0' in
    +    for (int x = 0; x < N; ++x) { }
    +
    Matcher<FunctionDecl>hasAnyParameterMatcher<ParmVarDecl> InnerMatcher
    Matches any parameter of a function declaration.
    +
    +Does not match the 'this' parameter of a method.
    +
    +Given
    +  class X { void f(int x, int y, int z) {} };
    +methodDecl(hasAnyParameter(hasName("y")))
    +  matches f(int x, int y, int z) {}
    +with hasAnyParameter(...)
    +  matching int y
    +
    Matcher<FunctionDecl>hasParameterunsigned N, Matcher<ParmVarDecl> InnerMatcher
    Matches the n'th parameter of a function declaration.
    +
    +Given
    +  class X { void f(int x) {} };
    +methodDecl(hasParameter(0, hasType(varDecl())))
    +  matches f(int x) {}
    +with hasParameter(...)
    +  matching int x
    +
    Matcher<FunctionDecl>returnsMatcher<QualType> InnerMatcher
    Matches the return type of a function declaration.
    +
    +Given:
    +  class X { int f() { return 1; } };
    +methodDecl(returns(asString("int")))
    +  matches int f() { return 1; }
    +
    Matcher<IfStmt>hasConditionMatcher<Expr> InnerMatcher
    Matches the condition expression of an if statement, for loop,
    +or conditional operator.
    +
    +Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
    +  if (true) {}
    +
    Matcher<IfStmt>hasConditionVariableStatementMatcher<DeclStmt> InnerMatcher
    Matches the condition variable statement in an if statement.
    +
    +Given
    +  if (A* a = GetAPointer()) {}
    +hasConditionVariableStatment(...)
    +  matches 'A* a = GetAPointer()'.
    +
    Matcher<ImplicitCastExpr>hasImplicitDestinationTypeMatcher<QualType> InnerMatcher
    Matches implicit casts whose destination type matches a given
    +matcher.
    +
    +FIXME: Unit test this matcher
    +
    Matcher<MemberExpr>hasObjectExpressionMatcher<Expr> InnerMatcher
    Matches a member expression where the object expression is
    +matched by a given matcher.
    +
    +Given
    +  struct X { int m; };
    +  void f(X x) { x.m; m; }
    +memberExpr(hasObjectExpression(hasType(recordDecl(hasName("X")))))))
    +  matches "x.m" and "m"
    +with hasObjectExpression(...)
    +  matching "x" and the implicit object expression of "m" which has type X*.
    +
    Matcher<MemberExpr>memberMatcher<ValueDecl> InnerMatcher
    Matches a member expression where the member is matched by a
    +given matcher.
    +
    +Given
    +  struct { int first, second; } first, second;
    +  int i(second.first);
    +  int j(first.second);
    +memberExpr(member(hasName("first")))
    +  matches second.first
    +  but not first.second (because the member name there is "second").
    +
    Matcher<QualType>hasDeclarationMatcher<Decl> InnerMatcher
    Matches a type if the declaration of the type matches the given
    +matcher.
    +
    +Usable as: Matcher<QualType>, Matcher<CallExpr>, Matcher<CXXConstructExpr>
    +
    Matcher<QualType>pointsToMatcher<Decl> InnerMatcher
    Overloaded to match the pointee type's declaration.
    +
    Matcher<QualType>referencesMatcher<Decl> InnerMatcher
    Overloaded to match the referenced type's declaration.
    +
    Matcher<Stmt>alignOfExprMatcher<UnaryExprOrTypeTraitExpr> InnerMatcher
    Same as unaryExprOrTypeTraitExpr, but only matching
    +alignof.
    +
    Matcher<Stmt>sizeOfExprMatcher<UnaryExprOrTypeTraitExpr> InnerMatcher
    Same as unaryExprOrTypeTraitExpr, but only matching
    +sizeof.
    +
    Matcher<TemplateArgument>refersToDeclarationMatcher<Decl> InnerMatcher
    Matches a TemplateArgument that refers to a certain declaration.
    +
    +Given
    +  template<typename T> struct A {};
    +  struct B { B* next; };
    +  A<&B::next> a;
    +classTemplateSpecializationDecl(hasAnyTemplateArgument(
    +    refersToDeclaration(fieldDecl(hasName("next"))))
    +  matches the specialization A<&B::next> with fieldDecl(...) matching
    +    B::next
    +
    Matcher<TemplateArgument>refersToTypeMatcher<QualType> InnerMatcher
    Matches a TemplateArgument that refers to a certain type.
    +
    +Given
    +  struct X {};
    +  template<typename T> struct A {};
    +  A<X> a;
    +classTemplateSpecializationDecl(hasAnyTemplateArgument(
    +    refersToType(class(hasName("X")))))
    +  matches the specialization A<X>
    +
    Matcher<UnaryExprOrTypeTraitExpr>hasArgumentOfTypeMatcher<QualType> InnerMatcher
    Matches unary expressions that have a specific type of argument.
    +
    +Given
    +  int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c);
    +unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int"))
    +  matches sizeof(a) and alignof(c)
    +
    Matcher<UnaryOperator>hasUnaryOperandMatcher<Expr> InnerMatcher
    Matches if the operand of a unary operator matches.
    +
    +Example matches true (matcher = hasOperand(boolLiteral(equals(true))))
    +  !true
    +
    Matcher<UsingDecl>hasAnyUsingShadowDeclMatcher<UsingShadowDecl> InnerMatcher
    Matches any using shadow declaration.
    +
    +Given
    +  namespace X { void b(); }
    +  using X::b;
    +usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
    +  matches using X::b 
    Matcher<UsingShadowDecl>hasTargetDeclMatcher<NamedDecl> InnerMatcher
    Matches a using shadow declaration where the target declaration is
    +matched by the given matcher.
    +
    +Given
    +  namespace X { int a; void b(); }
    +  using X::a;
    +  using X::b;
    +usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl())))
    +  matches using X::b but not using X::a 
    Matcher<ValueDecl>hasTypeMatcher<Decl> InnerMatcher
    Overloaded to match the declaration of the expression's or value
    +declaration's type.
    +
    +In case of a value declaration (for example a variable declaration),
    +this resolves one layer of indirection. For example, in the value
    +declaration "X x;", recordDecl(hasName("X")) matches the declaration of X,
    +while varDecl(hasType(recordDecl(hasName("X")))) matches the declaration
    +of x."
    +
    +Example matches x (matcher = expr(hasType(recordDecl(hasName("X")))))
    +            and z (matcher = varDecl(hasType(recordDecl(hasName("X")))))
    + class X {};
    + void y(X &x) { x; X z; }
    +
    +Usable as: Matcher<Expr>, Matcher<ValueDecl>
    +
    Matcher<VarDecl>hasInitializerMatcher<Expr> InnerMatcher
    Matches a variable declaration that has an initializer expression
    +that matches the given matcher.
    +
    +Example matches x (matcher = varDecl(hasInitializer(callExpr())))
    +  bool y() { return true; }
    +  bool x = y();
    +
    Matcher<WhileStmt>hasBodyMatcher<Stmt> InnerMatcher
    Matches a 'for', 'while', or 'do while' statement that has
    +a given body.
    +
    +Given
    +  for (;;) {}
    +hasBody(compoundStmt())
    +  matches 'for (;;) {}'
    +with compoundStmt()
    +  matching '{}'
    +
    Matcher<WhileStmt>hasConditionMatcher<Expr> InnerMatcher
    Matches the condition expression of an if statement, for loop,
    +or conditional operator.
    +
    +Example matches true (matcher = hasCondition(boolLiteral(equals(true))))
    +  if (true) {}
    +
    + +
    + + + + diff --git a/docs/LibTooling.html b/docs/LibTooling.html index ed0ad45..163d24a 100644 --- a/docs/LibTooling.html +++ b/docs/LibTooling.html @@ -16,24 +16,27 @@

    LibTooling is a library to support writing standalone tools based on Clang. This document will provide a basic walkthrough of how to write a tool using LibTooling.

    +

    For the information on how to setup Clang Tooling for LLVM see +HowToSetupToolingForLLVM.html

    Introduction

    -

    Tools built with LibTooling, like Clang Plugins, run FrontendActions over -code. +

    Tools built with LibTooling, like Clang Plugins, run +FrontendActions over code. + In this tutorial, we'll demonstrate the different ways of running clang's -SyntaxOnlyAction, which runs a quick syntax check, over a bunch of +SyntaxOnlyAction, which runs a quick syntax check, over a bunch of code.

    Parsing a code snippet in memory.

    -

    If you ever wanted to run a FrontendAction over some sample code, for example -to unit test parts of the Clang AST, runToolOnCode is what you looked for. Let -me give you an example: +

    If you ever wanted to run a FrontendAction over some sample +code, for example to unit test parts of the Clang AST, +runToolOnCode is what you looked for. Let me give you an example:

       #include "clang/Tooling/Tooling.h"
     
    @@ -48,53 +51,44 @@ me give you an example:
     

    Writing a standalone tool.

    -

    Once you unit tested your FrontendAction to the point where it cannot -possibly break, it's time to create a standalone tool. For a standalone tool -to run clang, it first needs to figure out what command line arguments to use -for a specified file. To that end we create a CompilationDatabase.

    +

    Once you unit tested your FrontendAction to the point where it +cannot possibly break, it's time to create a standalone tool. For a standalone +tool to run clang, it first needs to figure out what command line arguments to +use for a specified file. To that end we create a +CompilationDatabase. There are different ways to create a +compilation database, and we need to support all of them depending on +command-line options. There's the CommonOptionsParser class +that takes the responsibility to parse command-line parameters related to +compilation databases and inputs, so that all tools share the implementation. +

    -

    Creating a compilation database.

    -

    CompilationDatabase provides static factory functions to help with parsing -compile commands from a build directory or the command line. The following code -allows for both explicit specification of a compile command line, as well as -retrieving the compile commands lines from a database. +

    Parsing common tools options.

    +

    CompilationDatabase can be read from a build directory or the +command line. Using CommonOptionsParser allows for explicit +specification of a compile command line, specification of build path using the +-p command-line option, and automatic location of the compilation +database using source files paths.

    +#include "clang/Tooling/CommonOptionsParser.h"
    +
    +using namespace clang::tooling;
    +
     int main(int argc, const char **argv) {
    -  // First, try to create a fixed compile command database from the command line
    -  // arguments.
    -  llvm::OwningPtr<CompilationDatabase> Compilations(
    -    FixedCompilationDatabase::loadFromCommandLine(argc, argv));
    -
    -  // Next, use normal llvm command line parsing to get the tool specific
    -  // parameters.
    -  cl::ParseCommandLineOptions(argc, argv);
    -
    -  if (!Compilations) {
    -    // In case the user did not specify the compile command line via positional
    -    // command line arguments after "--", try to load the compile commands from
    -    // a database in the specified build directory or auto-detect it from a
    -    // source file.
    -    std::string ErrorMessage;
    -    if (!BuildPath.empty()) {
    -      Compilations.reset(
    -         CompilationDatabase::autoDetectFromDirectory(BuildPath, ErrorMessage));
    -    } else {
    -      Compilations.reset(CompilationDatabase::autoDetectFromSource(
    -          SourcePaths[0], ErrorMessage));
    -    }
    -    // If there is still no valid compile command database, we don't know how
    -    // to run the tool.
    -    if (!Compilations)
    -      llvm::report_fatal_error(ErrorMessage);
    -  }
    +  // CommonOptionsParser constructor will parse arguments and create a
    +  // CompilationDatabase. In case of error it will terminate the program.
    +  CommonOptionsParser OptionsParser(argc, argv);
    +
    +  // Use OptionsParser.GetCompilations() and OptionsParser.GetSourcePathList()
    +  // to retrieve CompilationDatabase and the list of input file paths.
     }
     

    Creating and running a ClangTool.

    -

    Once we have a CompilationDatabase, we can create a ClangTool and run our -FrontendAction over some code. For example, to run the SyntaxOnlyAction over -the files "a.cc" and "b.cc" one would write: +

    Once we have a CompilationDatabase, we can create a +ClangTool and run our FrontendAction over some code. +For example, to run the SyntaxOnlyAction over the files "a.cc" and +"b.cc" one would write:

       // A clang tool can run over a number of sources in the same process...
       std::vector<std::string> Sources;
    @@ -103,7 +97,7 @@ the files "a.cc" and "b.cc" one would write:
     
       // We hand the CompilationDatabase we created and the sources to run over into
       // the tool constructor.
    -  ClangTool Tool(*Compilations, Sources);
    +  ClangTool Tool(OptionsParser.GetCompilations(), Sources);
     
       // The ClangTool needs a new FrontendAction for each translation unit we run
       // on. Thus, it takes a FrontendActionFactory as parameter. To create a
    @@ -117,40 +111,29 @@ the files "a.cc" and "b.cc" one would write:
     

    Now we combine the two previous steps into our first real tool. This example tool is also checked into the clang tree at tools/clang-check/ClangCheck.cpp.

    -#include "llvm/Support/CommandLine.h"
    +// Declares clang::SyntaxOnlyAction.
     #include "clang/Frontend/FrontendActions.h"
    -#include "clang/Tooling/CompilationDatabase.h"
    +#include "clang/Tooling/CommonOptionsParser.h"
     #include "clang/Tooling/Tooling.h"
    +// Declares llvm::cl::extrahelp.
    +#include "llvm/Support/CommandLine.h"
     
     using namespace clang::tooling;
     using namespace llvm;
     
    -cl::opt<std::string> BuildPath(
    -  "p",
    -  cl::desc("<build-path>"),
    -  cl::Optional);
    +// CommonOptionsParser declares HelpMessage with a description of the common
    +// command-line options related to the compilation database and input files.
    +// It's nice to have this help message in all tools.
    +static cl::extrahelp CommonHelp(CommonOptionsParser::HelpMessage);
     
    -cl::list<std::string> SourcePaths(
    -  cl::Positional,
    -  cl::desc("<source0> [... <sourceN>]"),
    -  cl::OneOrMore);
    +// A help message for this specific tool can be added afterwards.
    +static cl::extrahelp MoreHelp("\nMore help text...");
     
     int main(int argc, const char **argv) {
    -  llvm::OwningPtr<CompilationDatabase> Compilations(
    -    FixedCompilationDatabase::loadFromCommandLine(argc, argv));
    -  cl::ParseCommandLineOptions(argc, argv);
    -  if (!Compilations) {
    -    std::string ErrorMessage;
    -    Compilations.reset(
    -      !BuildPath.empty() ?
    -        CompilationDatabase::autoDetectFromDirectory(BuildPath, ErrorMessage) :
    -        CompilationDatabase::autoDetectFromSource(SourcePaths[0], ErrorMessage)
    -      );
    -    if (!Compilations)
    -      llvm::report_fatal_error(ErrorMessage);
    -  }
    -  ClangTool Tool(*Compilations, SourcePaths);
    -  return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
    +  CommonOptionsParser OptionsParser(argc, argv);
    +  ClangTool Tool(OptionsParser.GetCompilations(),
    +                 OptionsParser.GetSourcePathList());
    +  return Tool.run(newFrontendActionFactory<clang::SyntaxOnlyAction>());
     }
     

    diff --git a/docs/ObjectiveCLiterals.html b/docs/ObjectiveCLiterals.html index 11751a6..d5a8a9e 100644 --- a/docs/ObjectiveCLiterals.html +++ b/docs/ObjectiveCLiterals.html @@ -178,7 +178,7 @@ A C string literal prefixed by the '@' token denotes an NSStr
     // Partition command line arguments into positional and option arguments.
     NSMutableArray *args = [NSMutableArray new];
    -NSMutableDictionary *options = [NSMutableArray new];
    +NSMutableDictionary *options = [NSMutableDictionary new];
     while (--argc) {
         const char *arg = *++argv;
         if (strncmp(arg, "--", 2) == 0) {
    diff --git a/docs/PCHInternals.html b/docs/PCHInternals.html
    index 28ce1ce..7fed5ba 100644
    --- a/docs/PCHInternals.html
    +++ b/docs/PCHInternals.html
    @@ -2,7 +2,7 @@
               "http://www.w3.org/TR/html4/strict.dtd">
     
     
    -  Precompiled Headers (PCH)
    +  Precompiled Header and Modules Internals
       
       
       \n\n";
    +
    +  // Generate header
    +  R.InsertTextBefore(StartLoc, os.str());
    +  // Generate footer
    +
    +  R.InsertTextAfter(EndLoc, "\n");
    +}
    +
    +/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with
    +/// information about keywords, macro expansions etc.  This uses the macro
    +/// table state from the end of the file, so it won't be perfectly perfect,
    +/// but it will be reasonably close.
    +void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
    +  RewriteBuffer &RB = R.getEditBuffer(FID);
    +
    +  const SourceManager &SM = PP.getSourceManager();
    +  const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
    +  Lexer L(FID, FromFile, SM, PP.getLangOpts());
    +  const char *BufferStart = L.getBufferStart();
    +
    +  // Inform the preprocessor that we want to retain comments as tokens, so we
    +  // can highlight them.
    +  L.SetCommentRetentionState(true);
    +
    +  // Lex all the tokens in raw mode, to avoid entering #includes or expanding
    +  // macros.
    +  Token Tok;
    +  L.LexFromRawLexer(Tok);
    +
    +  while (Tok.isNot(tok::eof)) {
    +    // Since we are lexing unexpanded tokens, all tokens are from the main
    +    // FileID.
    +    unsigned TokOffs = SM.getFileOffset(Tok.getLocation());
    +    unsigned TokLen = Tok.getLength();
    +    switch (Tok.getKind()) {
    +    default: break;
    +    case tok::identifier:
    +      llvm_unreachable("tok::identifier in raw lexing mode!");
    +    case tok::raw_identifier: {
    +      // Fill in Result.IdentifierInfo and update the token kind,
    +      // looking up the identifier in the identifier table.
    +      PP.LookUpIdentifierInfo(Tok);
    +
    +      // If this is a pp-identifier, for a keyword, highlight it as such.
    +      if (Tok.isNot(tok::identifier))
    +        HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
    +                       "", "");
    +      break;
    +    }
    +    case tok::comment:
    +      HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
    +                     "", "");
    +      break;
    +    case tok::utf8_string_literal:
    +      // Chop off the u part of u8 prefix
    +      ++TokOffs;
    +      --TokLen;
    +      // FALL THROUGH to chop the 8
    +    case tok::wide_string_literal:
    +    case tok::utf16_string_literal:
    +    case tok::utf32_string_literal:
    +      // Chop off the L, u, U or 8 prefix
    +      ++TokOffs;
    +      --TokLen;
    +      // FALL THROUGH.
    +    case tok::string_literal:
    +      // FIXME: Exclude the optional ud-suffix from the highlighted range.
    +      HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
    +                     "", "");
    +      break;
    +    case tok::hash: {
    +      // If this is a preprocessor directive, all tokens to end of line are too.
    +      if (!Tok.isAtStartOfLine())
    +        break;
    +
    +      // Eat all of the tokens until we get to the next one at the start of
    +      // line.
    +      unsigned TokEnd = TokOffs+TokLen;
    +      L.LexFromRawLexer(Tok);
    +      while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) {
    +        TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength();
    +        L.LexFromRawLexer(Tok);
    +      }
    +
    +      // Find end of line.  This is a hack.
    +      HighlightRange(RB, TokOffs, TokEnd, BufferStart,
    +                     "", "");
    +
    +      // Don't skip the next token.
    +      continue;
    +    }
    +    }
    +
    +    L.LexFromRawLexer(Tok);
    +  }
    +}
    +
    +/// HighlightMacros - This uses the macro table state from the end of the
    +/// file, to re-expand macros and insert (into the HTML) information about the
    +/// macro expansions.  This won't be perfectly perfect, but it will be
    +/// reasonably close.
    +void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
    +  // Re-lex the raw token stream into a token buffer.
    +  const SourceManager &SM = PP.getSourceManager();
    +  std::vector TokenStream;
    +
    +  const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
    +  Lexer L(FID, FromFile, SM, PP.getLangOpts());
    +
    +  // Lex all the tokens in raw mode, to avoid entering #includes or expanding
    +  // macros.
    +  while (1) {
    +    Token Tok;
    +    L.LexFromRawLexer(Tok);
    +
    +    // If this is a # at the start of a line, discard it from the token stream.
    +    // We don't want the re-preprocess step to see #defines, #includes or other
    +    // preprocessor directives.
    +    if (Tok.is(tok::hash) && Tok.isAtStartOfLine())
    +      continue;
    +
    +    // If this is a ## token, change its kind to unknown so that repreprocessing
    +    // it will not produce an error.
    +    if (Tok.is(tok::hashhash))
    +      Tok.setKind(tok::unknown);
    +
    +    // If this raw token is an identifier, the raw lexer won't have looked up
    +    // the corresponding identifier info for it.  Do this now so that it will be
    +    // macro expanded when we re-preprocess it.
    +    if (Tok.is(tok::raw_identifier))
    +      PP.LookUpIdentifierInfo(Tok);
    +
    +    TokenStream.push_back(Tok);
    +
    +    if (Tok.is(tok::eof)) break;
    +  }
    +
    +  // Temporarily change the diagnostics object so that we ignore any generated
    +  // diagnostics from this pass.
    +  DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(),
    +                             &PP.getDiagnostics().getDiagnosticOptions(),
    +                      new IgnoringDiagConsumer);
    +
    +  // FIXME: This is a huge hack; we reuse the input preprocessor because we want
    +  // its state, but we aren't actually changing it (we hope). This should really
    +  // construct a copy of the preprocessor.
    +  Preprocessor &TmpPP = const_cast(PP);
    +  DiagnosticsEngine *OldDiags = &TmpPP.getDiagnostics();
    +  TmpPP.setDiagnostics(TmpDiags);
    +
    +  // Inform the preprocessor that we don't want comments.
    +  TmpPP.SetCommentRetentionState(false, false);
    +
    +  // We don't want pragmas either. Although we filtered out #pragma, removing
    +  // _Pragma and __pragma is much harder.
    +  bool PragmasPreviouslyEnabled = TmpPP.getPragmasEnabled();
    +  TmpPP.setPragmasEnabled(false);
    +
    +  // Enter the tokens we just lexed.  This will cause them to be macro expanded
    +  // but won't enter sub-files (because we removed #'s).
    +  TmpPP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false);
    +
    +  TokenConcatenation ConcatInfo(TmpPP);
    +
    +  // Lex all the tokens.
    +  Token Tok;
    +  TmpPP.Lex(Tok);
    +  while (Tok.isNot(tok::eof)) {
    +    // Ignore non-macro tokens.
    +    if (!Tok.getLocation().isMacroID()) {
    +      TmpPP.Lex(Tok);
    +      continue;
    +    }
    +
    +    // Okay, we have the first token of a macro expansion: highlight the
    +    // expansion by inserting a start tag before the macro expansion and
    +    // end tag after it.
    +    std::pair LLoc =
    +      SM.getExpansionRange(Tok.getLocation());
    +
    +    // Ignore tokens whose instantiation location was not the main file.
    +    if (SM.getFileID(LLoc.first) != FID) {
    +      TmpPP.Lex(Tok);
    +      continue;
    +    }
    +
    +    assert(SM.getFileID(LLoc.second) == FID &&
    +           "Start and end of expansion must be in the same ultimate file!");
    +
    +    std::string Expansion = EscapeText(TmpPP.getSpelling(Tok));
    +    unsigned LineLen = Expansion.size();
    +
    +    Token PrevPrevTok;
    +    Token PrevTok = Tok;
    +    // Okay, eat this token, getting the next one.
    +    TmpPP.Lex(Tok);
    +
    +    // Skip all the rest of the tokens that are part of this macro
    +    // instantiation.  It would be really nice to pop up a window with all the
    +    // spelling of the tokens or something.
    +    while (!Tok.is(tok::eof) &&
    +           SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) {
    +      // Insert a newline if the macro expansion is getting large.
    +      if (LineLen > 60) {
    +        Expansion += "
    "; + LineLen = 0; + } + + LineLen -= Expansion.size(); + + // If the tokens were already space separated, or if they must be to avoid + // them being implicitly pasted, add a space between them. + if (Tok.hasLeadingSpace() || + ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok)) + Expansion += ' '; + + // Escape any special characters in the token text. + Expansion += EscapeText(TmpPP.getSpelling(Tok)); + LineLen += Expansion.size(); + + PrevPrevTok = PrevTok; + PrevTok = Tok; + TmpPP.Lex(Tok); + } + + + // Insert the expansion as the end tag, so that multi-line macros all get + // highlighted. + Expansion = "" + Expansion + ""; + + HighlightRange(R, LLoc.first, LLoc.second, + "", Expansion.c_str()); + } + + // Restore the preprocessor's old state. + TmpPP.setDiagnostics(*OldDiags); + TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled); +} diff --git a/lib/Rewrite/Core/Makefile b/lib/Rewrite/Core/Makefile new file mode 100644 index 0000000..8c8d2e4 --- /dev/null +++ b/lib/Rewrite/Core/Makefile @@ -0,0 +1,18 @@ +##===- clang/lib/Rewrite/Makefile --------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +# +# This implements code transformation / rewriting facilities. +# +##===----------------------------------------------------------------------===## + +CLANG_LEVEL := ../../.. +LIBRARYNAME := clangRewriteCore + +include $(CLANG_LEVEL)/Makefile + diff --git a/lib/Rewrite/Core/RewriteRope.cpp b/lib/Rewrite/Core/RewriteRope.cpp new file mode 100644 index 0000000..fe7aa2d --- /dev/null +++ b/lib/Rewrite/Core/RewriteRope.cpp @@ -0,0 +1,807 @@ +//===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the RewriteRope class, which is a powerful string. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Core/RewriteRope.h" +#include "clang/Basic/LLVM.h" +#include +using namespace clang; + +/// RewriteRope is a "strong" string class, designed to make insertions and +/// deletions in the middle of the string nearly constant time (really, they are +/// O(log N), but with a very low constant factor). +/// +/// The implementation of this datastructure is a conceptual linear sequence of +/// RopePiece elements. Each RopePiece represents a view on a separately +/// allocated and reference counted string. This means that splitting a very +/// long string can be done in constant time by splitting a RopePiece that +/// references the whole string into two rope pieces that reference each half. +/// Once split, another string can be inserted in between the two halves by +/// inserting a RopePiece in between the two others. All of this is very +/// inexpensive: it takes time proportional to the number of RopePieces, not the +/// length of the strings they represent. +/// +/// While a linear sequences of RopePieces is the conceptual model, the actual +/// implementation captures them in an adapted B+ Tree. Using a B+ tree (which +/// is a tree that keeps the values in the leaves and has where each node +/// contains a reasonable number of pointers to children/values) allows us to +/// maintain efficient operation when the RewriteRope contains a *huge* number +/// of RopePieces. The basic idea of the B+ Tree is that it allows us to find +/// the RopePiece corresponding to some offset very efficiently, and it +/// automatically balances itself on insertions of RopePieces (which can happen +/// for both insertions and erases of string ranges). +/// +/// The one wrinkle on the theory is that we don't attempt to keep the tree +/// properly balanced when erases happen. Erases of string data can both insert +/// new RopePieces (e.g. when the middle of some other rope piece is deleted, +/// which results in two rope pieces, which is just like an insert) or it can +/// reduce the number of RopePieces maintained by the B+Tree. In the case when +/// the number of RopePieces is reduced, we don't attempt to maintain the +/// standard 'invariant' that each node in the tree contains at least +/// 'WidthFactor' children/values. For our use cases, this doesn't seem to +/// matter. +/// +/// The implementation below is primarily implemented in terms of three classes: +/// RopePieceBTreeNode - Common base class for: +/// +/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece +/// nodes. This directly represents a chunk of the string with those +/// RopePieces contatenated. +/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages +/// up to '2*WidthFactor' other nodes in the tree. + + +//===----------------------------------------------------------------------===// +// RopePieceBTreeNode Class +//===----------------------------------------------------------------------===// + +namespace { + /// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and + /// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods + /// and a flag that determines which subclass the instance is. Also + /// important, this node knows the full extend of the node, including any + /// children that it has. This allows efficient skipping over entire subtrees + /// when looking for an offset in the BTree. + class RopePieceBTreeNode { + protected: + /// WidthFactor - This controls the number of K/V slots held in the BTree: + /// how wide it is. Each level of the BTree is guaranteed to have at least + /// 'WidthFactor' elements in it (either ropepieces or children), (except + /// the root, which may have less) and may have at most 2*WidthFactor + /// elements. + enum { WidthFactor = 8 }; + + /// Size - This is the number of bytes of file this node (including any + /// potential children) covers. + unsigned Size; + + /// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it + /// is an instance of RopePieceBTreeInterior. + bool IsLeaf; + + RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {} + ~RopePieceBTreeNode() {} + public: + + bool isLeaf() const { return IsLeaf; } + unsigned size() const { return Size; } + + void Destroy(); + + /// split - Split the range containing the specified offset so that we are + /// guaranteed that there is a place to do an insertion at the specified + /// offset. The offset is relative, so "0" is the start of the node. + /// + /// If there is no space in this subtree for the extra piece, the extra tree + /// node is returned and must be inserted into a parent. + RopePieceBTreeNode *split(unsigned Offset); + + /// insert - Insert the specified ropepiece into this tree node at the + /// specified offset. The offset is relative, so "0" is the start of the + /// node. + /// + /// If there is no space in this subtree for the extra piece, the extra tree + /// node is returned and must be inserted into a parent. + RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R); + + /// erase - Remove NumBytes from this node at the specified offset. We are + /// guaranteed that there is a split at Offset. + void erase(unsigned Offset, unsigned NumBytes); + + }; +} // end anonymous namespace + +//===----------------------------------------------------------------------===// +// RopePieceBTreeLeaf Class +//===----------------------------------------------------------------------===// + +namespace { + /// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece + /// nodes. This directly represents a chunk of the string with those + /// RopePieces contatenated. Since this is a B+Tree, all values (in this case + /// instances of RopePiece) are stored in leaves like this. To make iteration + /// over the leaves efficient, they maintain a singly linked list through the + /// NextLeaf field. This allows the B+Tree forward iterator to be constant + /// time for all increments. + class RopePieceBTreeLeaf : public RopePieceBTreeNode { + /// NumPieces - This holds the number of rope pieces currently active in the + /// Pieces array. + unsigned char NumPieces; + + /// Pieces - This tracks the file chunks currently in this leaf. + /// + RopePiece Pieces[2*WidthFactor]; + + /// NextLeaf - This is a pointer to the next leaf in the tree, allowing + /// efficient in-order forward iteration of the tree without traversal. + RopePieceBTreeLeaf **PrevLeaf, *NextLeaf; + public: + RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0), + PrevLeaf(0), NextLeaf(0) {} + ~RopePieceBTreeLeaf() { + if (PrevLeaf || NextLeaf) + removeFromLeafInOrder(); + clear(); + } + + bool isFull() const { return NumPieces == 2*WidthFactor; } + + /// clear - Remove all rope pieces from this leaf. + void clear() { + while (NumPieces) + Pieces[--NumPieces] = RopePiece(); + Size = 0; + } + + unsigned getNumPieces() const { return NumPieces; } + + const RopePiece &getPiece(unsigned i) const { + assert(i < getNumPieces() && "Invalid piece ID"); + return Pieces[i]; + } + + const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; } + void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) { + assert(PrevLeaf == 0 && NextLeaf == 0 && "Already in ordering"); + + NextLeaf = Node->NextLeaf; + if (NextLeaf) + NextLeaf->PrevLeaf = &NextLeaf; + PrevLeaf = &Node->NextLeaf; + Node->NextLeaf = this; + } + + void removeFromLeafInOrder() { + if (PrevLeaf) { + *PrevLeaf = NextLeaf; + if (NextLeaf) + NextLeaf->PrevLeaf = PrevLeaf; + } else if (NextLeaf) { + NextLeaf->PrevLeaf = 0; + } + } + + /// FullRecomputeSizeLocally - This method recomputes the 'Size' field by + /// summing the size of all RopePieces. + void FullRecomputeSizeLocally() { + Size = 0; + for (unsigned i = 0, e = getNumPieces(); i != e; ++i) + Size += getPiece(i).size(); + } + + /// split - Split the range containing the specified offset so that we are + /// guaranteed that there is a place to do an insertion at the specified + /// offset. The offset is relative, so "0" is the start of the node. + /// + /// If there is no space in this subtree for the extra piece, the extra tree + /// node is returned and must be inserted into a parent. + RopePieceBTreeNode *split(unsigned Offset); + + /// insert - Insert the specified ropepiece into this tree node at the + /// specified offset. The offset is relative, so "0" is the start of the + /// node. + /// + /// If there is no space in this subtree for the extra piece, the extra tree + /// node is returned and must be inserted into a parent. + RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R); + + + /// erase - Remove NumBytes from this node at the specified offset. We are + /// guaranteed that there is a split at Offset. + void erase(unsigned Offset, unsigned NumBytes); + + static inline bool classof(const RopePieceBTreeNode *N) { + return N->isLeaf(); + } + }; +} // end anonymous namespace + +/// split - Split the range containing the specified offset so that we are +/// guaranteed that there is a place to do an insertion at the specified +/// offset. The offset is relative, so "0" is the start of the node. +/// +/// If there is no space in this subtree for the extra piece, the extra tree +/// node is returned and must be inserted into a parent. +RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) { + // Find the insertion point. We are guaranteed that there is a split at the + // specified offset so find it. + if (Offset == 0 || Offset == size()) { + // Fastpath for a common case. There is already a splitpoint at the end. + return 0; + } + + // Find the piece that this offset lands in. + unsigned PieceOffs = 0; + unsigned i = 0; + while (Offset >= PieceOffs+Pieces[i].size()) { + PieceOffs += Pieces[i].size(); + ++i; + } + + // If there is already a split point at the specified offset, just return + // success. + if (PieceOffs == Offset) + return 0; + + // Otherwise, we need to split piece 'i' at Offset-PieceOffs. Convert Offset + // to being Piece relative. + unsigned IntraPieceOffset = Offset-PieceOffs; + + // We do this by shrinking the RopePiece and then doing an insert of the tail. + RopePiece Tail(Pieces[i].StrData, Pieces[i].StartOffs+IntraPieceOffset, + Pieces[i].EndOffs); + Size -= Pieces[i].size(); + Pieces[i].EndOffs = Pieces[i].StartOffs+IntraPieceOffset; + Size += Pieces[i].size(); + + return insert(Offset, Tail); +} + + +/// insert - Insert the specified RopePiece into this tree node at the +/// specified offset. The offset is relative, so "0" is the start of the node. +/// +/// If there is no space in this subtree for the extra piece, the extra tree +/// node is returned and must be inserted into a parent. +RopePieceBTreeNode *RopePieceBTreeLeaf::insert(unsigned Offset, + const RopePiece &R) { + // If this node is not full, insert the piece. + if (!isFull()) { + // Find the insertion point. We are guaranteed that there is a split at the + // specified offset so find it. + unsigned i = 0, e = getNumPieces(); + if (Offset == size()) { + // Fastpath for a common case. + i = e; + } else { + unsigned SlotOffs = 0; + for (; Offset > SlotOffs; ++i) + SlotOffs += getPiece(i).size(); + assert(SlotOffs == Offset && "Split didn't occur before insertion!"); + } + + // For an insertion into a non-full leaf node, just insert the value in + // its sorted position. This requires moving later values over. + for (; i != e; --e) + Pieces[e] = Pieces[e-1]; + Pieces[i] = R; + ++NumPieces; + Size += R.size(); + return 0; + } + + // Otherwise, if this is leaf is full, split it in two halves. Since this + // node is full, it contains 2*WidthFactor values. We move the first + // 'WidthFactor' values to the LHS child (which we leave in this node) and + // move the last 'WidthFactor' values into the RHS child. + + // Create the new node. + RopePieceBTreeLeaf *NewNode = new RopePieceBTreeLeaf(); + + // Move over the last 'WidthFactor' values from here to NewNode. + std::copy(&Pieces[WidthFactor], &Pieces[2*WidthFactor], + &NewNode->Pieces[0]); + // Replace old pieces with null RopePieces to drop refcounts. + std::fill(&Pieces[WidthFactor], &Pieces[2*WidthFactor], RopePiece()); + + // Decrease the number of values in the two nodes. + NewNode->NumPieces = NumPieces = WidthFactor; + + // Recompute the two nodes' size. + NewNode->FullRecomputeSizeLocally(); + FullRecomputeSizeLocally(); + + // Update the list of leaves. + NewNode->insertAfterLeafInOrder(this); + + // These insertions can't fail. + if (this->size() >= Offset) + this->insert(Offset, R); + else + NewNode->insert(Offset - this->size(), R); + return NewNode; +} + +/// erase - Remove NumBytes from this node at the specified offset. We are +/// guaranteed that there is a split at Offset. +void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) { + // Since we are guaranteed that there is a split at Offset, we start by + // finding the Piece that starts there. + unsigned PieceOffs = 0; + unsigned i = 0; + for (; Offset > PieceOffs; ++i) + PieceOffs += getPiece(i).size(); + assert(PieceOffs == Offset && "Split didn't occur before erase!"); + + unsigned StartPiece = i; + + // Figure out how many pieces completely cover 'NumBytes'. We want to remove + // all of them. + for (; Offset+NumBytes > PieceOffs+getPiece(i).size(); ++i) + PieceOffs += getPiece(i).size(); + + // If we exactly include the last one, include it in the region to delete. + if (Offset+NumBytes == PieceOffs+getPiece(i).size()) + PieceOffs += getPiece(i).size(), ++i; + + // If we completely cover some RopePieces, erase them now. + if (i != StartPiece) { + unsigned NumDeleted = i-StartPiece; + for (; i != getNumPieces(); ++i) + Pieces[i-NumDeleted] = Pieces[i]; + + // Drop references to dead rope pieces. + std::fill(&Pieces[getNumPieces()-NumDeleted], &Pieces[getNumPieces()], + RopePiece()); + NumPieces -= NumDeleted; + + unsigned CoverBytes = PieceOffs-Offset; + NumBytes -= CoverBytes; + Size -= CoverBytes; + } + + // If we completely removed some stuff, we could be done. + if (NumBytes == 0) return; + + // Okay, now might be erasing part of some Piece. If this is the case, then + // move the start point of the piece. + assert(getPiece(StartPiece).size() > NumBytes); + Pieces[StartPiece].StartOffs += NumBytes; + + // The size of this node just shrunk by NumBytes. + Size -= NumBytes; +} + +//===----------------------------------------------------------------------===// +// RopePieceBTreeInterior Class +//===----------------------------------------------------------------------===// + +namespace { + /// RopePieceBTreeInterior - This represents an interior node in the B+Tree, + /// which holds up to 2*WidthFactor pointers to child nodes. + class RopePieceBTreeInterior : public RopePieceBTreeNode { + /// NumChildren - This holds the number of children currently active in the + /// Children array. + unsigned char NumChildren; + RopePieceBTreeNode *Children[2*WidthFactor]; + public: + RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {} + + RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS) + : RopePieceBTreeNode(false) { + Children[0] = LHS; + Children[1] = RHS; + NumChildren = 2; + Size = LHS->size() + RHS->size(); + } + + ~RopePieceBTreeInterior() { + for (unsigned i = 0, e = getNumChildren(); i != e; ++i) + Children[i]->Destroy(); + } + + bool isFull() const { return NumChildren == 2*WidthFactor; } + + unsigned getNumChildren() const { return NumChildren; } + const RopePieceBTreeNode *getChild(unsigned i) const { + assert(i < NumChildren && "invalid child #"); + return Children[i]; + } + RopePieceBTreeNode *getChild(unsigned i) { + assert(i < NumChildren && "invalid child #"); + return Children[i]; + } + + /// FullRecomputeSizeLocally - Recompute the Size field of this node by + /// summing up the sizes of the child nodes. + void FullRecomputeSizeLocally() { + Size = 0; + for (unsigned i = 0, e = getNumChildren(); i != e; ++i) + Size += getChild(i)->size(); + } + + + /// split - Split the range containing the specified offset so that we are + /// guaranteed that there is a place to do an insertion at the specified + /// offset. The offset is relative, so "0" is the start of the node. + /// + /// If there is no space in this subtree for the extra piece, the extra tree + /// node is returned and must be inserted into a parent. + RopePieceBTreeNode *split(unsigned Offset); + + + /// insert - Insert the specified ropepiece into this tree node at the + /// specified offset. The offset is relative, so "0" is the start of the + /// node. + /// + /// If there is no space in this subtree for the extra piece, the extra tree + /// node is returned and must be inserted into a parent. + RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R); + + /// HandleChildPiece - A child propagated an insertion result up to us. + /// Insert the new child, and/or propagate the result further up the tree. + RopePieceBTreeNode *HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS); + + /// erase - Remove NumBytes from this node at the specified offset. We are + /// guaranteed that there is a split at Offset. + void erase(unsigned Offset, unsigned NumBytes); + + static inline bool classof(const RopePieceBTreeNode *N) { + return !N->isLeaf(); + } + }; +} // end anonymous namespace + +/// split - Split the range containing the specified offset so that we are +/// guaranteed that there is a place to do an insertion at the specified +/// offset. The offset is relative, so "0" is the start of the node. +/// +/// If there is no space in this subtree for the extra piece, the extra tree +/// node is returned and must be inserted into a parent. +RopePieceBTreeNode *RopePieceBTreeInterior::split(unsigned Offset) { + // Figure out which child to split. + if (Offset == 0 || Offset == size()) + return 0; // If we have an exact offset, we're already split. + + unsigned ChildOffset = 0; + unsigned i = 0; + for (; Offset >= ChildOffset+getChild(i)->size(); ++i) + ChildOffset += getChild(i)->size(); + + // If already split there, we're done. + if (ChildOffset == Offset) + return 0; + + // Otherwise, recursively split the child. + if (RopePieceBTreeNode *RHS = getChild(i)->split(Offset-ChildOffset)) + return HandleChildPiece(i, RHS); + return 0; // Done! +} + +/// insert - Insert the specified ropepiece into this tree node at the +/// specified offset. The offset is relative, so "0" is the start of the +/// node. +/// +/// If there is no space in this subtree for the extra piece, the extra tree +/// node is returned and must be inserted into a parent. +RopePieceBTreeNode *RopePieceBTreeInterior::insert(unsigned Offset, + const RopePiece &R) { + // Find the insertion point. We are guaranteed that there is a split at the + // specified offset so find it. + unsigned i = 0, e = getNumChildren(); + + unsigned ChildOffs = 0; + if (Offset == size()) { + // Fastpath for a common case. Insert at end of last child. + i = e-1; + ChildOffs = size()-getChild(i)->size(); + } else { + for (; Offset > ChildOffs+getChild(i)->size(); ++i) + ChildOffs += getChild(i)->size(); + } + + Size += R.size(); + + // Insert at the end of this child. + if (RopePieceBTreeNode *RHS = getChild(i)->insert(Offset-ChildOffs, R)) + return HandleChildPiece(i, RHS); + + return 0; +} + +/// HandleChildPiece - A child propagated an insertion result up to us. +/// Insert the new child, and/or propagate the result further up the tree. +RopePieceBTreeNode * +RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) { + // Otherwise the child propagated a subtree up to us as a new child. See if + // we have space for it here. + if (!isFull()) { + // Insert RHS after child 'i'. + if (i + 1 != getNumChildren()) + memmove(&Children[i+2], &Children[i+1], + (getNumChildren()-i-1)*sizeof(Children[0])); + Children[i+1] = RHS; + ++NumChildren; + return 0; + } + + // Okay, this node is full. Split it in half, moving WidthFactor children to + // a newly allocated interior node. + + // Create the new node. + RopePieceBTreeInterior *NewNode = new RopePieceBTreeInterior(); + + // Move over the last 'WidthFactor' values from here to NewNode. + memcpy(&NewNode->Children[0], &Children[WidthFactor], + WidthFactor*sizeof(Children[0])); + + // Decrease the number of values in the two nodes. + NewNode->NumChildren = NumChildren = WidthFactor; + + // Finally, insert the two new children in the side the can (now) hold them. + // These insertions can't fail. + if (i < WidthFactor) + this->HandleChildPiece(i, RHS); + else + NewNode->HandleChildPiece(i-WidthFactor, RHS); + + // Recompute the two nodes' size. + NewNode->FullRecomputeSizeLocally(); + FullRecomputeSizeLocally(); + return NewNode; +} + +/// erase - Remove NumBytes from this node at the specified offset. We are +/// guaranteed that there is a split at Offset. +void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) { + // This will shrink this node by NumBytes. + Size -= NumBytes; + + // Find the first child that overlaps with Offset. + unsigned i = 0; + for (; Offset >= getChild(i)->size(); ++i) + Offset -= getChild(i)->size(); + + // Propagate the delete request into overlapping children, or completely + // delete the children as appropriate. + while (NumBytes) { + RopePieceBTreeNode *CurChild = getChild(i); + + // If we are deleting something contained entirely in the child, pass on the + // request. + if (Offset+NumBytes < CurChild->size()) { + CurChild->erase(Offset, NumBytes); + return; + } + + // If this deletion request starts somewhere in the middle of the child, it + // must be deleting to the end of the child. + if (Offset) { + unsigned BytesFromChild = CurChild->size()-Offset; + CurChild->erase(Offset, BytesFromChild); + NumBytes -= BytesFromChild; + // Start at the beginning of the next child. + Offset = 0; + ++i; + continue; + } + + // If the deletion request completely covers the child, delete it and move + // the rest down. + NumBytes -= CurChild->size(); + CurChild->Destroy(); + --NumChildren; + if (i != getNumChildren()) + memmove(&Children[i], &Children[i+1], + (getNumChildren()-i)*sizeof(Children[0])); + } +} + +//===----------------------------------------------------------------------===// +// RopePieceBTreeNode Implementation +//===----------------------------------------------------------------------===// + +void RopePieceBTreeNode::Destroy() { + if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) + delete Leaf; + else + delete cast(this); +} + +/// split - Split the range containing the specified offset so that we are +/// guaranteed that there is a place to do an insertion at the specified +/// offset. The offset is relative, so "0" is the start of the node. +/// +/// If there is no space in this subtree for the extra piece, the extra tree +/// node is returned and must be inserted into a parent. +RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) { + assert(Offset <= size() && "Invalid offset to split!"); + if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) + return Leaf->split(Offset); + return cast(this)->split(Offset); +} + +/// insert - Insert the specified ropepiece into this tree node at the +/// specified offset. The offset is relative, so "0" is the start of the +/// node. +/// +/// If there is no space in this subtree for the extra piece, the extra tree +/// node is returned and must be inserted into a parent. +RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset, + const RopePiece &R) { + assert(Offset <= size() && "Invalid offset to insert!"); + if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) + return Leaf->insert(Offset, R); + return cast(this)->insert(Offset, R); +} + +/// erase - Remove NumBytes from this node at the specified offset. We are +/// guaranteed that there is a split at Offset. +void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) { + assert(Offset+NumBytes <= size() && "Invalid offset to erase!"); + if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) + return Leaf->erase(Offset, NumBytes); + return cast(this)->erase(Offset, NumBytes); +} + + +//===----------------------------------------------------------------------===// +// RopePieceBTreeIterator Implementation +//===----------------------------------------------------------------------===// + +static const RopePieceBTreeLeaf *getCN(const void *P) { + return static_cast(P); +} + +// begin iterator. +RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) { + const RopePieceBTreeNode *N = static_cast(n); + + // Walk down the left side of the tree until we get to a leaf. + while (const RopePieceBTreeInterior *IN = dyn_cast(N)) + N = IN->getChild(0); + + // We must have at least one leaf. + CurNode = cast(N); + + // If we found a leaf that happens to be empty, skip over it until we get + // to something full. + while (CurNode && getCN(CurNode)->getNumPieces() == 0) + CurNode = getCN(CurNode)->getNextLeafInOrder(); + + if (CurNode != 0) + CurPiece = &getCN(CurNode)->getPiece(0); + else // Empty tree, this is an end() iterator. + CurPiece = 0; + CurChar = 0; +} + +void RopePieceBTreeIterator::MoveToNextPiece() { + if (CurPiece != &getCN(CurNode)->getPiece(getCN(CurNode)->getNumPieces()-1)) { + CurChar = 0; + ++CurPiece; + return; + } + + // Find the next non-empty leaf node. + do + CurNode = getCN(CurNode)->getNextLeafInOrder(); + while (CurNode && getCN(CurNode)->getNumPieces() == 0); + + if (CurNode != 0) + CurPiece = &getCN(CurNode)->getPiece(0); + else // Hit end(). + CurPiece = 0; + CurChar = 0; +} + +//===----------------------------------------------------------------------===// +// RopePieceBTree Implementation +//===----------------------------------------------------------------------===// + +static RopePieceBTreeNode *getRoot(void *P) { + return static_cast(P); +} + +RopePieceBTree::RopePieceBTree() { + Root = new RopePieceBTreeLeaf(); +} +RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) { + assert(RHS.empty() && "Can't copy non-empty tree yet"); + Root = new RopePieceBTreeLeaf(); +} +RopePieceBTree::~RopePieceBTree() { + getRoot(Root)->Destroy(); +} + +unsigned RopePieceBTree::size() const { + return getRoot(Root)->size(); +} + +void RopePieceBTree::clear() { + if (RopePieceBTreeLeaf *Leaf = dyn_cast(getRoot(Root))) + Leaf->clear(); + else { + getRoot(Root)->Destroy(); + Root = new RopePieceBTreeLeaf(); + } +} + +void RopePieceBTree::insert(unsigned Offset, const RopePiece &R) { + // #1. Split at Offset. + if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset)) + Root = new RopePieceBTreeInterior(getRoot(Root), RHS); + + // #2. Do the insertion. + if (RopePieceBTreeNode *RHS = getRoot(Root)->insert(Offset, R)) + Root = new RopePieceBTreeInterior(getRoot(Root), RHS); +} + +void RopePieceBTree::erase(unsigned Offset, unsigned NumBytes) { + // #1. Split at Offset. + if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset)) + Root = new RopePieceBTreeInterior(getRoot(Root), RHS); + + // #2. Do the erasing. + getRoot(Root)->erase(Offset, NumBytes); +} + +//===----------------------------------------------------------------------===// +// RewriteRope Implementation +//===----------------------------------------------------------------------===// + +/// MakeRopeString - This copies the specified byte range into some instance of +/// RopeRefCountString, and return a RopePiece that represents it. This uses +/// the AllocBuffer object to aggregate requests for small strings into one +/// allocation instead of doing tons of tiny allocations. +RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) { + unsigned Len = End-Start; + assert(Len && "Zero length RopePiece is invalid!"); + + // If we have space for this string in the current alloc buffer, use it. + if (AllocOffs+Len <= AllocChunkSize) { + memcpy(AllocBuffer->Data+AllocOffs, Start, Len); + AllocOffs += Len; + return RopePiece(AllocBuffer, AllocOffs-Len, AllocOffs); + } + + // If we don't have enough room because this specific allocation is huge, + // just allocate a new rope piece for it alone. + if (Len > AllocChunkSize) { + unsigned Size = End-Start+sizeof(RopeRefCountString)-1; + RopeRefCountString *Res = + reinterpret_cast(new char[Size]); + Res->RefCount = 0; + memcpy(Res->Data, Start, End-Start); + return RopePiece(Res, 0, End-Start); + } + + // Otherwise, this was a small request but we just don't have space for it + // Make a new chunk and share it with later allocations. + + // If we had an old allocation, drop our reference to it. + if (AllocBuffer && --AllocBuffer->RefCount == 0) + delete [] (char*)AllocBuffer; + + unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize; + AllocBuffer = reinterpret_cast(new char[AllocSize]); + AllocBuffer->RefCount = 0; + memcpy(AllocBuffer->Data, Start, Len); + AllocOffs = Len; + + // Start out the new allocation with a refcount of 1, since we have an + // internal reference to it. + AllocBuffer->addRef(); + return RopePiece(AllocBuffer, 0, Len); +} + + diff --git a/lib/Rewrite/Core/Rewriter.cpp b/lib/Rewrite/Core/Rewriter.cpp new file mode 100644 index 0000000..4df967f --- /dev/null +++ b/lib/Rewrite/Core/Rewriter.cpp @@ -0,0 +1,486 @@ +//===--- Rewriter.cpp - Code rewriting interface --------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the Rewriter class, which is used for code +// transformations. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Core/Rewriter.h" +#include "clang/AST/Stmt.h" +#include "clang/AST/Decl.h" +#include "clang/Basic/DiagnosticIDs.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Lex/Lexer.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/FileSystem.h" +using namespace clang; + +raw_ostream &RewriteBuffer::write(raw_ostream &os) const { + // FIXME: eliminate the copy by writing out each chunk at a time + os << std::string(begin(), end()); + return os; +} + +/// \brief Return true if this character is non-new-line whitespace: +/// ' ', '\\t', '\\f', '\\v', '\\r'. +static inline bool isWhitespace(unsigned char c) { + switch (c) { + case ' ': + case '\t': + case '\f': + case '\v': + case '\r': + return true; + default: + return false; + } +} + +void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size, + bool removeLineIfEmpty) { + // Nothing to remove, exit early. + if (Size == 0) return; + + unsigned RealOffset = getMappedOffset(OrigOffset, true); + assert(RealOffset+Size < Buffer.size() && "Invalid location"); + + // Remove the dead characters. + Buffer.erase(RealOffset, Size); + + // Add a delta so that future changes are offset correctly. + AddReplaceDelta(OrigOffset, -Size); + + if (removeLineIfEmpty) { + // Find the line that the remove occurred and if it is completely empty + // remove the line as well. + + iterator curLineStart = begin(); + unsigned curLineStartOffs = 0; + iterator posI = begin(); + for (unsigned i = 0; i != RealOffset; ++i) { + if (*posI == '\n') { + curLineStart = posI; + ++curLineStart; + curLineStartOffs = i + 1; + } + ++posI; + } + + unsigned lineSize = 0; + posI = curLineStart; + while (posI != end() && isWhitespace(*posI)) { + ++posI; + ++lineSize; + } + if (posI != end() && *posI == '\n') { + Buffer.erase(curLineStartOffs, lineSize + 1/* + '\n'*/); + AddReplaceDelta(curLineStartOffs, -(lineSize + 1/* + '\n'*/)); + } + } +} + +void RewriteBuffer::InsertText(unsigned OrigOffset, StringRef Str, + bool InsertAfter) { + + // Nothing to insert, exit early. + if (Str.empty()) return; + + unsigned RealOffset = getMappedOffset(OrigOffset, InsertAfter); + Buffer.insert(RealOffset, Str.begin(), Str.end()); + + // Add a delta so that future changes are offset correctly. + AddInsertDelta(OrigOffset, Str.size()); +} + +/// ReplaceText - This method replaces a range of characters in the input +/// buffer with a new string. This is effectively a combined "remove+insert" +/// operation. +void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength, + StringRef NewStr) { + unsigned RealOffset = getMappedOffset(OrigOffset, true); + Buffer.erase(RealOffset, OrigLength); + Buffer.insert(RealOffset, NewStr.begin(), NewStr.end()); + if (OrigLength != NewStr.size()) + AddReplaceDelta(OrigOffset, NewStr.size() - OrigLength); +} + + +//===----------------------------------------------------------------------===// +// Rewriter class +//===----------------------------------------------------------------------===// + +/// getRangeSize - Return the size in bytes of the specified range if they +/// are in the same file. If not, this returns -1. +int Rewriter::getRangeSize(const CharSourceRange &Range, + RewriteOptions opts) const { + if (!isRewritable(Range.getBegin()) || + !isRewritable(Range.getEnd())) return -1; + + FileID StartFileID, EndFileID; + unsigned StartOff, EndOff; + + StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID); + EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID); + + if (StartFileID != EndFileID) + return -1; + + // If edits have been made to this buffer, the delta between the range may + // have changed. + std::map::const_iterator I = + RewriteBuffers.find(StartFileID); + if (I != RewriteBuffers.end()) { + const RewriteBuffer &RB = I->second; + EndOff = RB.getMappedOffset(EndOff, opts.IncludeInsertsAtEndOfRange); + StartOff = RB.getMappedOffset(StartOff, !opts.IncludeInsertsAtBeginOfRange); + } + + + // Adjust the end offset to the end of the last token, instead of being the + // start of the last token if this is a token range. + if (Range.isTokenRange()) + EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); + + return EndOff-StartOff; +} + +int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const { + return getRangeSize(CharSourceRange::getTokenRange(Range), opts); +} + + +/// getRewrittenText - Return the rewritten form of the text in the specified +/// range. If the start or end of the range was unrewritable or if they are +/// in different buffers, this returns an empty string. +/// +/// Note that this method is not particularly efficient. +/// +std::string Rewriter::getRewrittenText(SourceRange Range) const { + if (!isRewritable(Range.getBegin()) || + !isRewritable(Range.getEnd())) + return ""; + + FileID StartFileID, EndFileID; + unsigned StartOff, EndOff; + StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID); + EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID); + + if (StartFileID != EndFileID) + return ""; // Start and end in different buffers. + + // If edits have been made to this buffer, the delta between the range may + // have changed. + std::map::const_iterator I = + RewriteBuffers.find(StartFileID); + if (I == RewriteBuffers.end()) { + // If the buffer hasn't been rewritten, just return the text from the input. + const char *Ptr = SourceMgr->getCharacterData(Range.getBegin()); + + // Adjust the end offset to the end of the last token, instead of being the + // start of the last token. + EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); + return std::string(Ptr, Ptr+EndOff-StartOff); + } + + const RewriteBuffer &RB = I->second; + EndOff = RB.getMappedOffset(EndOff, true); + StartOff = RB.getMappedOffset(StartOff); + + // Adjust the end offset to the end of the last token, instead of being the + // start of the last token. + EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); + + // Advance the iterators to the right spot, yay for linear time algorithms. + RewriteBuffer::iterator Start = RB.begin(); + std::advance(Start, StartOff); + RewriteBuffer::iterator End = Start; + std::advance(End, EndOff-StartOff); + + return std::string(Start, End); +} + +unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc, + FileID &FID) const { + assert(Loc.isValid() && "Invalid location"); + std::pair V = SourceMgr->getDecomposedLoc(Loc); + FID = V.first; + return V.second; +} + + +/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID. +/// +RewriteBuffer &Rewriter::getEditBuffer(FileID FID) { + std::map::iterator I = + RewriteBuffers.lower_bound(FID); + if (I != RewriteBuffers.end() && I->first == FID) + return I->second; + I = RewriteBuffers.insert(I, std::make_pair(FID, RewriteBuffer())); + + StringRef MB = SourceMgr->getBufferData(FID); + I->second.Initialize(MB.begin(), MB.end()); + + return I->second; +} + +/// InsertText - Insert the specified string at the specified location in the +/// original buffer. +bool Rewriter::InsertText(SourceLocation Loc, StringRef Str, + bool InsertAfter, bool indentNewLines) { + if (!isRewritable(Loc)) return true; + FileID FID; + unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID); + + SmallString<128> indentedStr; + if (indentNewLines && Str.find('\n') != StringRef::npos) { + StringRef MB = SourceMgr->getBufferData(FID); + + unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1; + const SrcMgr::ContentCache * + Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache(); + unsigned lineOffs = Content->SourceLineCache[lineNo]; + + // Find the whitespace at the start of the line. + StringRef indentSpace; + { + unsigned i = lineOffs; + while (isWhitespace(MB[i])) + ++i; + indentSpace = MB.substr(lineOffs, i-lineOffs); + } + + SmallVector lines; + Str.split(lines, "\n"); + + for (unsigned i = 0, e = lines.size(); i != e; ++i) { + indentedStr += lines[i]; + if (i < e-1) { + indentedStr += '\n'; + indentedStr += indentSpace; + } + } + Str = indentedStr.str(); + } + + getEditBuffer(FID).InsertText(StartOffs, Str, InsertAfter); + return false; +} + +bool Rewriter::InsertTextAfterToken(SourceLocation Loc, StringRef Str) { + if (!isRewritable(Loc)) return true; + FileID FID; + unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID); + RewriteOptions rangeOpts; + rangeOpts.IncludeInsertsAtBeginOfRange = false; + StartOffs += getRangeSize(SourceRange(Loc, Loc), rangeOpts); + getEditBuffer(FID).InsertText(StartOffs, Str, /*InsertAfter*/true); + return false; +} + +/// RemoveText - Remove the specified text region. +bool Rewriter::RemoveText(SourceLocation Start, unsigned Length, + RewriteOptions opts) { + if (!isRewritable(Start)) return true; + FileID FID; + unsigned StartOffs = getLocationOffsetAndFileID(Start, FID); + getEditBuffer(FID).RemoveText(StartOffs, Length, opts.RemoveLineIfEmpty); + return false; +} + +/// ReplaceText - This method replaces a range of characters in the input +/// buffer with a new string. This is effectively a combined "remove/insert" +/// operation. +bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength, + StringRef NewStr) { + if (!isRewritable(Start)) return true; + FileID StartFileID; + unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID); + + getEditBuffer(StartFileID).ReplaceText(StartOffs, OrigLength, NewStr); + return false; +} + +bool Rewriter::ReplaceText(SourceRange range, SourceRange replacementRange) { + if (!isRewritable(range.getBegin())) return true; + if (!isRewritable(range.getEnd())) return true; + if (replacementRange.isInvalid()) return true; + SourceLocation start = range.getBegin(); + unsigned origLength = getRangeSize(range); + unsigned newLength = getRangeSize(replacementRange); + FileID FID; + unsigned newOffs = getLocationOffsetAndFileID(replacementRange.getBegin(), + FID); + StringRef MB = SourceMgr->getBufferData(FID); + return ReplaceText(start, origLength, MB.substr(newOffs, newLength)); +} + +/// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty +/// printer to generate the replacement code. This returns true if the input +/// could not be rewritten, or false if successful. +bool Rewriter::ReplaceStmt(Stmt *From, Stmt *To) { + // Measaure the old text. + int Size = getRangeSize(From->getSourceRange()); + if (Size == -1) + return true; + + // Get the new text. + std::string SStr; + llvm::raw_string_ostream S(SStr); + To->printPretty(S, 0, PrintingPolicy(*LangOpts)); + const std::string &Str = S.str(); + + ReplaceText(From->getLocStart(), Size, Str); + return false; +} + +std::string Rewriter::ConvertToString(Stmt *From) { + std::string SStr; + llvm::raw_string_ostream S(SStr); + From->printPretty(S, 0, PrintingPolicy(*LangOpts)); + return S.str(); +} + +bool Rewriter::IncreaseIndentation(CharSourceRange range, + SourceLocation parentIndent) { + if (range.isInvalid()) return true; + if (!isRewritable(range.getBegin())) return true; + if (!isRewritable(range.getEnd())) return true; + if (!isRewritable(parentIndent)) return true; + + FileID StartFileID, EndFileID, parentFileID; + unsigned StartOff, EndOff, parentOff; + + StartOff = getLocationOffsetAndFileID(range.getBegin(), StartFileID); + EndOff = getLocationOffsetAndFileID(range.getEnd(), EndFileID); + parentOff = getLocationOffsetAndFileID(parentIndent, parentFileID); + + if (StartFileID != EndFileID || StartFileID != parentFileID) + return true; + if (StartOff > EndOff) + return true; + + FileID FID = StartFileID; + StringRef MB = SourceMgr->getBufferData(FID); + + unsigned parentLineNo = SourceMgr->getLineNumber(FID, parentOff) - 1; + unsigned startLineNo = SourceMgr->getLineNumber(FID, StartOff) - 1; + unsigned endLineNo = SourceMgr->getLineNumber(FID, EndOff) - 1; + + const SrcMgr::ContentCache * + Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache(); + + // Find where the lines start. + unsigned parentLineOffs = Content->SourceLineCache[parentLineNo]; + unsigned startLineOffs = Content->SourceLineCache[startLineNo]; + + // Find the whitespace at the start of each line. + StringRef parentSpace, startSpace; + { + unsigned i = parentLineOffs; + while (isWhitespace(MB[i])) + ++i; + parentSpace = MB.substr(parentLineOffs, i-parentLineOffs); + + i = startLineOffs; + while (isWhitespace(MB[i])) + ++i; + startSpace = MB.substr(startLineOffs, i-startLineOffs); + } + if (parentSpace.size() >= startSpace.size()) + return true; + if (!startSpace.startswith(parentSpace)) + return true; + + StringRef indent = startSpace.substr(parentSpace.size()); + + // Indent the lines between start/end offsets. + RewriteBuffer &RB = getEditBuffer(FID); + for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) { + unsigned offs = Content->SourceLineCache[lineNo]; + unsigned i = offs; + while (isWhitespace(MB[i])) + ++i; + StringRef origIndent = MB.substr(offs, i-offs); + if (origIndent.startswith(startSpace)) + RB.InsertText(offs, indent, /*InsertAfter=*/false); + } + + return false; +} + +// A wrapper for a file stream that atomically overwrites the target. +// +// Creates a file output stream for a temporary file in the constructor, +// which is later accessible via getStream() if ok() return true. +// Flushes the stream and moves the temporary file to the target location +// in the destructor. +class AtomicallyMovedFile { +public: + AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename, + bool &AllWritten) + : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) { + TempFilename = Filename; + TempFilename += "-%%%%%%%%"; + int FD; + if (llvm::sys::fs::unique_file(TempFilename.str(), FD, TempFilename, + /*makeAbsolute=*/true, 0664)) { + AllWritten = false; + Diagnostics.Report(clang::diag::err_unable_to_make_temp) + << TempFilename; + } else { + FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true)); + } + } + + ~AtomicallyMovedFile() { + if (!ok()) return; + + FileStream->flush(); +#ifdef _WIN32 + // Win32 does not allow rename/removing opened files. + FileStream.reset(); +#endif + if (llvm::error_code ec = + llvm::sys::fs::rename(TempFilename.str(), Filename)) { + AllWritten = false; + Diagnostics.Report(clang::diag::err_unable_to_rename_temp) + << TempFilename << Filename << ec.message(); + bool existed; + // If the remove fails, there's not a lot we can do - this is already an + // error. + llvm::sys::fs::remove(TempFilename.str(), existed); + } + } + + bool ok() { return FileStream; } + llvm::raw_ostream &getStream() { return *FileStream; } + +private: + DiagnosticsEngine &Diagnostics; + StringRef Filename; + SmallString<128> TempFilename; + OwningPtr FileStream; + bool &AllWritten; +}; + +bool Rewriter::overwriteChangedFiles() { + bool AllWritten = true; + for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) { + const FileEntry *Entry = + getSourceMgr().getFileEntryForID(I->first); + AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(), + AllWritten); + if (File.ok()) { + I->second.write(File.getStream()); + } + } + return !AllWritten; +} diff --git a/lib/Rewrite/Core/TokenRewriter.cpp b/lib/Rewrite/Core/TokenRewriter.cpp new file mode 100644 index 0000000..940ece2 --- /dev/null +++ b/lib/Rewrite/Core/TokenRewriter.cpp @@ -0,0 +1,99 @@ +//===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the TokenRewriter class, which is used for code +// transformations. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Core/TokenRewriter.h" +#include "clang/Lex/Lexer.h" +#include "clang/Lex/ScratchBuffer.h" +#include "clang/Basic/SourceManager.h" +using namespace clang; + +TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM, + const LangOptions &LangOpts) { + ScratchBuf.reset(new ScratchBuffer(SM)); + + // Create a lexer to lex all the tokens of the main file in raw mode. + const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID); + Lexer RawLex(FID, FromFile, SM, LangOpts); + + // Return all comments and whitespace as tokens. + RawLex.SetKeepWhitespaceMode(true); + + // Lex the file, populating our datastructures. + Token RawTok; + RawLex.LexFromRawLexer(RawTok); + while (RawTok.isNot(tok::eof)) { +#if 0 + if (Tok.is(tok::raw_identifier)) { + // Look up the identifier info for the token. This should use + // IdentifierTable directly instead of PP. + PP.LookUpIdentifierInfo(Tok); + } +#endif + + AddToken(RawTok, TokenList.end()); + RawLex.LexFromRawLexer(RawTok); + } +} + +TokenRewriter::~TokenRewriter() { +} + + +/// RemapIterator - Convert from token_iterator (a const iterator) to +/// TokenRefTy (a non-const iterator). +TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) { + if (I == token_end()) return TokenList.end(); + + // FIXME: This is horrible, we should use our own list or something to avoid + // this. + std::map::iterator MapIt = + TokenAtLoc.find(I->getLocation()); + assert(MapIt != TokenAtLoc.end() && "iterator not in rewriter?"); + return MapIt->second; +} + + +/// AddToken - Add the specified token into the Rewriter before the other +/// position. +TokenRewriter::TokenRefTy +TokenRewriter::AddToken(const Token &T, TokenRefTy Where) { + Where = TokenList.insert(Where, T); + + bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(), + Where)).second; + assert(InsertSuccess && "Token location already in rewriter!"); + (void)InsertSuccess; + return Where; +} + + +TokenRewriter::token_iterator +TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) { + unsigned Len = strlen(Val); + + // Plop the string into the scratch buffer, then create a token for this + // string. + Token Tok; + Tok.startToken(); + const char *Spelling; + Tok.setLocation(ScratchBuf->getToken(Val, Len, Spelling)); + Tok.setLength(Len); + + // TODO: Form a whole lexer around this and relex the token! For now, just + // set kind to tok::unknown. + Tok.setKind(tok::unknown); + + return AddToken(Tok, RemapIterator(I)); +} + diff --git a/lib/Rewrite/DeltaTree.cpp b/lib/Rewrite/DeltaTree.cpp deleted file mode 100644 index 4297dc8..0000000 --- a/lib/Rewrite/DeltaTree.cpp +++ /dev/null @@ -1,467 +0,0 @@ -//===--- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ----------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the DeltaTree and related classes. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/DeltaTree.h" -#include "clang/Basic/LLVM.h" -#include -#include -using namespace clang; - -/// The DeltaTree class is a multiway search tree (BTree) structure with some -/// fancy features. B-Trees are generally more memory and cache efficient -/// than binary trees, because they store multiple keys/values in each node. -/// -/// DeltaTree implements a key/value mapping from FileIndex to Delta, allowing -/// fast lookup by FileIndex. However, an added (important) bonus is that it -/// can also efficiently tell us the full accumulated delta for a specific -/// file offset as well, without traversing the whole tree. -/// -/// The nodes of the tree are made up of instances of two classes: -/// DeltaTreeNode and DeltaTreeInteriorNode. The later subclasses the -/// former and adds children pointers. Each node knows the full delta of all -/// entries (recursively) contained inside of it, which allows us to get the -/// full delta implied by a whole subtree in constant time. - -namespace { - /// SourceDelta - As code in the original input buffer is added and deleted, - /// SourceDelta records are used to keep track of how the input SourceLocation - /// object is mapped into the output buffer. - struct SourceDelta { - unsigned FileLoc; - int Delta; - - static SourceDelta get(unsigned Loc, int D) { - SourceDelta Delta; - Delta.FileLoc = Loc; - Delta.Delta = D; - return Delta; - } - }; - - /// DeltaTreeNode - The common part of all nodes. - /// - class DeltaTreeNode { - public: - struct InsertResult { - DeltaTreeNode *LHS, *RHS; - SourceDelta Split; - }; - - private: - friend class DeltaTreeInteriorNode; - - /// WidthFactor - This controls the number of K/V slots held in the BTree: - /// how wide it is. Each level of the BTree is guaranteed to have at least - /// WidthFactor-1 K/V pairs (except the root) and may have at most - /// 2*WidthFactor-1 K/V pairs. - enum { WidthFactor = 8 }; - - /// Values - This tracks the SourceDelta's currently in this node. - /// - SourceDelta Values[2*WidthFactor-1]; - - /// NumValuesUsed - This tracks the number of values this node currently - /// holds. - unsigned char NumValuesUsed; - - /// IsLeaf - This is true if this is a leaf of the btree. If false, this is - /// an interior node, and is actually an instance of DeltaTreeInteriorNode. - bool IsLeaf; - - /// FullDelta - This is the full delta of all the values in this node and - /// all children nodes. - int FullDelta; - public: - DeltaTreeNode(bool isLeaf = true) - : NumValuesUsed(0), IsLeaf(isLeaf), FullDelta(0) {} - - bool isLeaf() const { return IsLeaf; } - int getFullDelta() const { return FullDelta; } - bool isFull() const { return NumValuesUsed == 2*WidthFactor-1; } - - unsigned getNumValuesUsed() const { return NumValuesUsed; } - const SourceDelta &getValue(unsigned i) const { - assert(i < NumValuesUsed && "Invalid value #"); - return Values[i]; - } - SourceDelta &getValue(unsigned i) { - assert(i < NumValuesUsed && "Invalid value #"); - return Values[i]; - } - - /// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into - /// this node. If insertion is easy, do it and return false. Otherwise, - /// split the node, populate InsertRes with info about the split, and return - /// true. - bool DoInsertion(unsigned FileIndex, int Delta, InsertResult *InsertRes); - - void DoSplit(InsertResult &InsertRes); - - - /// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a - /// local walk over our contained deltas. - void RecomputeFullDeltaLocally(); - - void Destroy(); - - //static inline bool classof(const DeltaTreeNode *) { return true; } - }; -} // end anonymous namespace - -namespace { - /// DeltaTreeInteriorNode - When isLeaf = false, a node has child pointers. - /// This class tracks them. - class DeltaTreeInteriorNode : public DeltaTreeNode { - DeltaTreeNode *Children[2*WidthFactor]; - ~DeltaTreeInteriorNode() { - for (unsigned i = 0, e = NumValuesUsed+1; i != e; ++i) - Children[i]->Destroy(); - } - friend class DeltaTreeNode; - public: - DeltaTreeInteriorNode() : DeltaTreeNode(false /*nonleaf*/) {} - - DeltaTreeInteriorNode(const InsertResult &IR) - : DeltaTreeNode(false /*nonleaf*/) { - Children[0] = IR.LHS; - Children[1] = IR.RHS; - Values[0] = IR.Split; - FullDelta = IR.LHS->getFullDelta()+IR.RHS->getFullDelta()+IR.Split.Delta; - NumValuesUsed = 1; - } - - const DeltaTreeNode *getChild(unsigned i) const { - assert(i < getNumValuesUsed()+1 && "Invalid child"); - return Children[i]; - } - DeltaTreeNode *getChild(unsigned i) { - assert(i < getNumValuesUsed()+1 && "Invalid child"); - return Children[i]; - } - - //static inline bool classof(const DeltaTreeInteriorNode *) { return true; } - static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); } - }; -} - - -/// Destroy - A 'virtual' destructor. -void DeltaTreeNode::Destroy() { - if (isLeaf()) - delete this; - else - delete cast(this); -} - -/// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a -/// local walk over our contained deltas. -void DeltaTreeNode::RecomputeFullDeltaLocally() { - int NewFullDelta = 0; - for (unsigned i = 0, e = getNumValuesUsed(); i != e; ++i) - NewFullDelta += Values[i].Delta; - if (DeltaTreeInteriorNode *IN = dyn_cast(this)) - for (unsigned i = 0, e = getNumValuesUsed()+1; i != e; ++i) - NewFullDelta += IN->getChild(i)->getFullDelta(); - FullDelta = NewFullDelta; -} - -/// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into -/// this node. If insertion is easy, do it and return false. Otherwise, -/// split the node, populate InsertRes with info about the split, and return -/// true. -bool DeltaTreeNode::DoInsertion(unsigned FileIndex, int Delta, - InsertResult *InsertRes) { - // Maintain full delta for this node. - FullDelta += Delta; - - // Find the insertion point, the first delta whose index is >= FileIndex. - unsigned i = 0, e = getNumValuesUsed(); - while (i != e && FileIndex > getValue(i).FileLoc) - ++i; - - // If we found an a record for exactly this file index, just merge this - // value into the pre-existing record and finish early. - if (i != e && getValue(i).FileLoc == FileIndex) { - // NOTE: Delta could drop to zero here. This means that the delta entry is - // useless and could be removed. Supporting erases is more complex than - // leaving an entry with Delta=0, so we just leave an entry with Delta=0 in - // the tree. - Values[i].Delta += Delta; - return false; - } - - // Otherwise, we found an insertion point, and we know that the value at the - // specified index is > FileIndex. Handle the leaf case first. - if (isLeaf()) { - if (!isFull()) { - // For an insertion into a non-full leaf node, just insert the value in - // its sorted position. This requires moving later values over. - if (i != e) - memmove(&Values[i+1], &Values[i], sizeof(Values[0])*(e-i)); - Values[i] = SourceDelta::get(FileIndex, Delta); - ++NumValuesUsed; - return false; - } - - // Otherwise, if this is leaf is full, split the node at its median, insert - // the value into one of the children, and return the result. - assert(InsertRes && "No result location specified"); - DoSplit(*InsertRes); - - if (InsertRes->Split.FileLoc > FileIndex) - InsertRes->LHS->DoInsertion(FileIndex, Delta, 0 /*can't fail*/); - else - InsertRes->RHS->DoInsertion(FileIndex, Delta, 0 /*can't fail*/); - return true; - } - - // Otherwise, this is an interior node. Send the request down the tree. - DeltaTreeInteriorNode *IN = cast(this); - if (!IN->Children[i]->DoInsertion(FileIndex, Delta, InsertRes)) - return false; // If there was space in the child, just return. - - // Okay, this split the subtree, producing a new value and two children to - // insert here. If this node is non-full, we can just insert it directly. - if (!isFull()) { - // Now that we have two nodes and a new element, insert the perclated value - // into ourself by moving all the later values/children down, then inserting - // the new one. - if (i != e) - memmove(&IN->Children[i+2], &IN->Children[i+1], - (e-i)*sizeof(IN->Children[0])); - IN->Children[i] = InsertRes->LHS; - IN->Children[i+1] = InsertRes->RHS; - - if (e != i) - memmove(&Values[i+1], &Values[i], (e-i)*sizeof(Values[0])); - Values[i] = InsertRes->Split; - ++NumValuesUsed; - return false; - } - - // Finally, if this interior node was full and a node is percolated up, split - // ourself and return that up the chain. Start by saving all our info to - // avoid having the split clobber it. - IN->Children[i] = InsertRes->LHS; - DeltaTreeNode *SubRHS = InsertRes->RHS; - SourceDelta SubSplit = InsertRes->Split; - - // Do the split. - DoSplit(*InsertRes); - - // Figure out where to insert SubRHS/NewSplit. - DeltaTreeInteriorNode *InsertSide; - if (SubSplit.FileLoc < InsertRes->Split.FileLoc) - InsertSide = cast(InsertRes->LHS); - else - InsertSide = cast(InsertRes->RHS); - - // We now have a non-empty interior node 'InsertSide' to insert - // SubRHS/SubSplit into. Find out where to insert SubSplit. - - // Find the insertion point, the first delta whose index is >SubSplit.FileLoc. - i = 0; e = InsertSide->getNumValuesUsed(); - while (i != e && SubSplit.FileLoc > InsertSide->getValue(i).FileLoc) - ++i; - - // Now we know that i is the place to insert the split value into. Insert it - // and the child right after it. - if (i != e) - memmove(&InsertSide->Children[i+2], &InsertSide->Children[i+1], - (e-i)*sizeof(IN->Children[0])); - InsertSide->Children[i+1] = SubRHS; - - if (e != i) - memmove(&InsertSide->Values[i+1], &InsertSide->Values[i], - (e-i)*sizeof(Values[0])); - InsertSide->Values[i] = SubSplit; - ++InsertSide->NumValuesUsed; - InsertSide->FullDelta += SubSplit.Delta + SubRHS->getFullDelta(); - return true; -} - -/// DoSplit - Split the currently full node (which has 2*WidthFactor-1 values) -/// into two subtrees each with "WidthFactor-1" values and a pivot value. -/// Return the pieces in InsertRes. -void DeltaTreeNode::DoSplit(InsertResult &InsertRes) { - assert(isFull() && "Why split a non-full node?"); - - // Since this node is full, it contains 2*WidthFactor-1 values. We move - // the first 'WidthFactor-1' values to the LHS child (which we leave in this - // node), propagate one value up, and move the last 'WidthFactor-1' values - // into the RHS child. - - // Create the new child node. - DeltaTreeNode *NewNode; - if (DeltaTreeInteriorNode *IN = dyn_cast(this)) { - // If this is an interior node, also move over 'WidthFactor' children - // into the new node. - DeltaTreeInteriorNode *New = new DeltaTreeInteriorNode(); - memcpy(&New->Children[0], &IN->Children[WidthFactor], - WidthFactor*sizeof(IN->Children[0])); - NewNode = New; - } else { - // Just create the new leaf node. - NewNode = new DeltaTreeNode(); - } - - // Move over the last 'WidthFactor-1' values from here to NewNode. - memcpy(&NewNode->Values[0], &Values[WidthFactor], - (WidthFactor-1)*sizeof(Values[0])); - - // Decrease the number of values in the two nodes. - NewNode->NumValuesUsed = NumValuesUsed = WidthFactor-1; - - // Recompute the two nodes' full delta. - NewNode->RecomputeFullDeltaLocally(); - RecomputeFullDeltaLocally(); - - InsertRes.LHS = this; - InsertRes.RHS = NewNode; - InsertRes.Split = Values[WidthFactor-1]; -} - - - -//===----------------------------------------------------------------------===// -// DeltaTree Implementation -//===----------------------------------------------------------------------===// - -//#define VERIFY_TREE - -#ifdef VERIFY_TREE -/// VerifyTree - Walk the btree performing assertions on various properties to -/// verify consistency. This is useful for debugging new changes to the tree. -static void VerifyTree(const DeltaTreeNode *N) { - const DeltaTreeInteriorNode *IN = dyn_cast(N); - if (IN == 0) { - // Verify leaves, just ensure that FullDelta matches up and the elements - // are in proper order. - int FullDelta = 0; - for (unsigned i = 0, e = N->getNumValuesUsed(); i != e; ++i) { - if (i) - assert(N->getValue(i-1).FileLoc < N->getValue(i).FileLoc); - FullDelta += N->getValue(i).Delta; - } - assert(FullDelta == N->getFullDelta()); - return; - } - - // Verify interior nodes: Ensure that FullDelta matches up and the - // elements are in proper order and the children are in proper order. - int FullDelta = 0; - for (unsigned i = 0, e = IN->getNumValuesUsed(); i != e; ++i) { - const SourceDelta &IVal = N->getValue(i); - const DeltaTreeNode *IChild = IN->getChild(i); - if (i) - assert(IN->getValue(i-1).FileLoc < IVal.FileLoc); - FullDelta += IVal.Delta; - FullDelta += IChild->getFullDelta(); - - // The largest value in child #i should be smaller than FileLoc. - assert(IChild->getValue(IChild->getNumValuesUsed()-1).FileLoc < - IVal.FileLoc); - - // The smallest value in child #i+1 should be larger than FileLoc. - assert(IN->getChild(i+1)->getValue(0).FileLoc > IVal.FileLoc); - VerifyTree(IChild); - } - - FullDelta += IN->getChild(IN->getNumValuesUsed())->getFullDelta(); - - assert(FullDelta == N->getFullDelta()); -} -#endif // VERIFY_TREE - -static DeltaTreeNode *getRoot(void *Root) { - return (DeltaTreeNode*)Root; -} - -DeltaTree::DeltaTree() { - Root = new DeltaTreeNode(); -} -DeltaTree::DeltaTree(const DeltaTree &RHS) { - // Currently we only support copying when the RHS is empty. - assert(getRoot(RHS.Root)->getNumValuesUsed() == 0 && - "Can only copy empty tree"); - Root = new DeltaTreeNode(); -} - -DeltaTree::~DeltaTree() { - getRoot(Root)->Destroy(); -} - -/// getDeltaAt - Return the accumulated delta at the specified file offset. -/// This includes all insertions or delections that occurred *before* the -/// specified file index. -int DeltaTree::getDeltaAt(unsigned FileIndex) const { - const DeltaTreeNode *Node = getRoot(Root); - - int Result = 0; - - // Walk down the tree. - while (1) { - // For all nodes, include any local deltas before the specified file - // index by summing them up directly. Keep track of how many were - // included. - unsigned NumValsGreater = 0; - for (unsigned e = Node->getNumValuesUsed(); NumValsGreater != e; - ++NumValsGreater) { - const SourceDelta &Val = Node->getValue(NumValsGreater); - - if (Val.FileLoc >= FileIndex) - break; - Result += Val.Delta; - } - - // If we have an interior node, include information about children and - // recurse. Otherwise, if we have a leaf, we're done. - const DeltaTreeInteriorNode *IN = dyn_cast(Node); - if (!IN) return Result; - - // Include any children to the left of the values we skipped, all of - // their deltas should be included as well. - for (unsigned i = 0; i != NumValsGreater; ++i) - Result += IN->getChild(i)->getFullDelta(); - - // If we found exactly the value we were looking for, break off the - // search early. There is no need to search the RHS of the value for - // partial results. - if (NumValsGreater != Node->getNumValuesUsed() && - Node->getValue(NumValsGreater).FileLoc == FileIndex) - return Result+IN->getChild(NumValsGreater)->getFullDelta(); - - // Otherwise, traverse down the tree. The selected subtree may be - // partially included in the range. - Node = IN->getChild(NumValsGreater); - } - // NOT REACHED. -} - -/// AddDelta - When a change is made that shifts around the text buffer, -/// this method is used to record that info. It inserts a delta of 'Delta' -/// into the current DeltaTree at offset FileIndex. -void DeltaTree::AddDelta(unsigned FileIndex, int Delta) { - assert(Delta && "Adding a noop?"); - DeltaTreeNode *MyRoot = getRoot(Root); - - DeltaTreeNode::InsertResult InsertRes; - if (MyRoot->DoInsertion(FileIndex, Delta, &InsertRes)) { - Root = MyRoot = new DeltaTreeInteriorNode(InsertRes); - } - -#ifdef VERIFY_TREE - VerifyTree(MyRoot); -#endif -} - diff --git a/lib/Rewrite/FixItRewriter.cpp b/lib/Rewrite/FixItRewriter.cpp deleted file mode 100644 index 3863adb..0000000 --- a/lib/Rewrite/FixItRewriter.cpp +++ /dev/null @@ -1,205 +0,0 @@ -//===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This is a diagnostic client adaptor that performs rewrites as -// suggested by code modification hints attached to diagnostics. It -// then forwards any diagnostics to the adapted diagnostic client. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/FixItRewriter.h" -#include "clang/Edit/Commit.h" -#include "clang/Edit/EditsReceiver.h" -#include "clang/Basic/FileManager.h" -#include "clang/Basic/SourceLocation.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Frontend/FrontendDiagnostic.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/Path.h" -#include "llvm/ADT/OwningPtr.h" -#include - -using namespace clang; - -FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr, - const LangOptions &LangOpts, - FixItOptions *FixItOpts) - : Diags(Diags), - Editor(SourceMgr, LangOpts), - Rewrite(SourceMgr, LangOpts), - FixItOpts(FixItOpts), - NumFailures(0), - PrevDiagSilenced(false) { - OwnsClient = Diags.ownsClient(); - Client = Diags.takeClient(); - Diags.setClient(this); -} - -FixItRewriter::~FixItRewriter() { - Diags.takeClient(); - Diags.setClient(Client, OwnsClient); -} - -bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) { - const RewriteBuffer *RewriteBuf = Rewrite.getRewriteBufferFor(ID); - if (!RewriteBuf) return true; - RewriteBuf->write(OS); - OS.flush(); - return false; -} - -namespace { - -class RewritesReceiver : public edit::EditsReceiver { - Rewriter &Rewrite; - -public: - RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { } - - virtual void insert(SourceLocation loc, StringRef text) { - Rewrite.InsertText(loc, text); - } - virtual void replace(CharSourceRange range, StringRef text) { - Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text); - } -}; - -} - -bool FixItRewriter::WriteFixedFiles( - std::vector > *RewrittenFiles) { - if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) { - Diag(FullSourceLoc(), diag::warn_fixit_no_changes); - return true; - } - - RewritesReceiver Rec(Rewrite); - Editor.applyRewrites(Rec); - - for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) { - const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first); - int fd; - std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd); - std::string Err; - OwningPtr OS; - if (fd != -1) { - OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true)); - } else { - OS.reset(new llvm::raw_fd_ostream(Filename.c_str(), Err, - llvm::raw_fd_ostream::F_Binary)); - } - if (!Err.empty()) { - Diags.Report(clang::diag::err_fe_unable_to_open_output) - << Filename << Err; - continue; - } - RewriteBuffer &RewriteBuf = I->second; - RewriteBuf.write(*OS); - OS->flush(); - - if (RewrittenFiles) - RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename)); - } - - return false; -} - -bool FixItRewriter::IncludeInDiagnosticCounts() const { - return Client ? Client->IncludeInDiagnosticCounts() : true; -} - -void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel, - const Diagnostic &Info) { - // Default implementation (Warnings/errors count). - DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info); - - if (!FixItOpts->Silent || - DiagLevel >= DiagnosticsEngine::Error || - (DiagLevel == DiagnosticsEngine::Note && !PrevDiagSilenced) || - (DiagLevel > DiagnosticsEngine::Note && Info.getNumFixItHints())) { - Client->HandleDiagnostic(DiagLevel, Info); - PrevDiagSilenced = false; - } else { - PrevDiagSilenced = true; - } - - // Skip over any diagnostics that are ignored or notes. - if (DiagLevel <= DiagnosticsEngine::Note) - return; - // Skip over errors if we are only fixing warnings. - if (DiagLevel >= DiagnosticsEngine::Error && FixItOpts->FixOnlyWarnings) { - ++NumFailures; - return; - } - - // Make sure that we can perform all of the modifications we - // in this diagnostic. - edit::Commit commit(Editor); - for (unsigned Idx = 0, Last = Info.getNumFixItHints(); - Idx < Last; ++Idx) { - const FixItHint &Hint = Info.getFixItHint(Idx); - - if (Hint.CodeToInsert.empty()) { - if (Hint.InsertFromRange.isValid()) - commit.insertFromRange(Hint.RemoveRange.getBegin(), - Hint.InsertFromRange, /*afterToken=*/false, - Hint.BeforePreviousInsertions); - else - commit.remove(Hint.RemoveRange); - } else { - if (Hint.RemoveRange.isTokenRange() || - Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd()) - commit.replace(Hint.RemoveRange, Hint.CodeToInsert); - else - commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert, - /*afterToken=*/false, Hint.BeforePreviousInsertions); - } - } - bool CanRewrite = Info.getNumFixItHints() > 0 && commit.isCommitable(); - - if (!CanRewrite) { - if (Info.getNumFixItHints() > 0) - Diag(Info.getLocation(), diag::note_fixit_in_macro); - - // If this was an error, refuse to perform any rewriting. - if (DiagLevel >= DiagnosticsEngine::Error) { - if (++NumFailures == 1) - Diag(Info.getLocation(), diag::note_fixit_unfixed_error); - } - return; - } - - if (!Editor.commit(commit)) { - ++NumFailures; - Diag(Info.getLocation(), diag::note_fixit_failed); - return; - } - - Diag(Info.getLocation(), diag::note_fixit_applied); -} - -/// \brief Emit a diagnostic via the adapted diagnostic client. -void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) { - // When producing this diagnostic, we temporarily bypass ourselves, - // clear out any current diagnostic, and let the downstream client - // format the diagnostic. - Diags.takeClient(); - Diags.setClient(Client); - Diags.Clear(); - Diags.Report(Loc, DiagID); - Diags.takeClient(); - Diags.setClient(this); -} - -DiagnosticConsumer *FixItRewriter::clone(DiagnosticsEngine &Diags) const { - return new FixItRewriter(Diags, Diags.getSourceManager(), - Rewrite.getLangOpts(), FixItOpts); -} - -FixItOptions::~FixItOptions() {} diff --git a/lib/Rewrite/Frontend/CMakeLists.txt b/lib/Rewrite/Frontend/CMakeLists.txt new file mode 100644 index 0000000..9017e47 --- /dev/null +++ b/lib/Rewrite/Frontend/CMakeLists.txt @@ -0,0 +1,28 @@ +add_clang_library(clangRewriteFrontend + FixItRewriter.cpp + FrontendActions.cpp + HTMLPrint.cpp + InclusionRewriter.cpp + RewriteMacros.cpp + RewriteModernObjC.cpp + RewriteObjC.cpp + RewriteTest.cpp + ) + +add_dependencies(clangRewriteFrontend + ClangAttrClasses + ClangAttrList + ClangAttrParsedAttrList + ClangCommentNodes + ClangDeclNodes + ClangDiagnosticCommon + ClangDiagnosticFrontend + ClangStmtNodes + ) + +target_link_libraries(clangRewriteFrontend + clangBasic + clangAST + clangParse + clangFrontend + ) diff --git a/lib/Rewrite/Frontend/FixItRewriter.cpp b/lib/Rewrite/Frontend/FixItRewriter.cpp new file mode 100644 index 0000000..43a1ab1 --- /dev/null +++ b/lib/Rewrite/Frontend/FixItRewriter.cpp @@ -0,0 +1,205 @@ +//===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is a diagnostic client adaptor that performs rewrites as +// suggested by code modification hints attached to diagnostics. It +// then forwards any diagnostics to the adapted diagnostic client. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/FixItRewriter.h" +#include "clang/Edit/Commit.h" +#include "clang/Edit/EditsReceiver.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/SourceLocation.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Path.h" +#include "llvm/ADT/OwningPtr.h" +#include + +using namespace clang; + +FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr, + const LangOptions &LangOpts, + FixItOptions *FixItOpts) + : Diags(Diags), + Editor(SourceMgr, LangOpts), + Rewrite(SourceMgr, LangOpts), + FixItOpts(FixItOpts), + NumFailures(0), + PrevDiagSilenced(false) { + OwnsClient = Diags.ownsClient(); + Client = Diags.takeClient(); + Diags.setClient(this); +} + +FixItRewriter::~FixItRewriter() { + Diags.takeClient(); + Diags.setClient(Client, OwnsClient); +} + +bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) { + const RewriteBuffer *RewriteBuf = Rewrite.getRewriteBufferFor(ID); + if (!RewriteBuf) return true; + RewriteBuf->write(OS); + OS.flush(); + return false; +} + +namespace { + +class RewritesReceiver : public edit::EditsReceiver { + Rewriter &Rewrite; + +public: + RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { } + + virtual void insert(SourceLocation loc, StringRef text) { + Rewrite.InsertText(loc, text); + } + virtual void replace(CharSourceRange range, StringRef text) { + Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text); + } +}; + +} + +bool FixItRewriter::WriteFixedFiles( + std::vector > *RewrittenFiles) { + if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) { + Diag(FullSourceLoc(), diag::warn_fixit_no_changes); + return true; + } + + RewritesReceiver Rec(Rewrite); + Editor.applyRewrites(Rec); + + for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) { + const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first); + int fd; + std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd); + std::string Err; + OwningPtr OS; + if (fd != -1) { + OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true)); + } else { + OS.reset(new llvm::raw_fd_ostream(Filename.c_str(), Err, + llvm::raw_fd_ostream::F_Binary)); + } + if (!Err.empty()) { + Diags.Report(clang::diag::err_fe_unable_to_open_output) + << Filename << Err; + continue; + } + RewriteBuffer &RewriteBuf = I->second; + RewriteBuf.write(*OS); + OS->flush(); + + if (RewrittenFiles) + RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename)); + } + + return false; +} + +bool FixItRewriter::IncludeInDiagnosticCounts() const { + return Client ? Client->IncludeInDiagnosticCounts() : true; +} + +void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel, + const Diagnostic &Info) { + // Default implementation (Warnings/errors count). + DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info); + + if (!FixItOpts->Silent || + DiagLevel >= DiagnosticsEngine::Error || + (DiagLevel == DiagnosticsEngine::Note && !PrevDiagSilenced) || + (DiagLevel > DiagnosticsEngine::Note && Info.getNumFixItHints())) { + Client->HandleDiagnostic(DiagLevel, Info); + PrevDiagSilenced = false; + } else { + PrevDiagSilenced = true; + } + + // Skip over any diagnostics that are ignored or notes. + if (DiagLevel <= DiagnosticsEngine::Note) + return; + // Skip over errors if we are only fixing warnings. + if (DiagLevel >= DiagnosticsEngine::Error && FixItOpts->FixOnlyWarnings) { + ++NumFailures; + return; + } + + // Make sure that we can perform all of the modifications we + // in this diagnostic. + edit::Commit commit(Editor); + for (unsigned Idx = 0, Last = Info.getNumFixItHints(); + Idx < Last; ++Idx) { + const FixItHint &Hint = Info.getFixItHint(Idx); + + if (Hint.CodeToInsert.empty()) { + if (Hint.InsertFromRange.isValid()) + commit.insertFromRange(Hint.RemoveRange.getBegin(), + Hint.InsertFromRange, /*afterToken=*/false, + Hint.BeforePreviousInsertions); + else + commit.remove(Hint.RemoveRange); + } else { + if (Hint.RemoveRange.isTokenRange() || + Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd()) + commit.replace(Hint.RemoveRange, Hint.CodeToInsert); + else + commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert, + /*afterToken=*/false, Hint.BeforePreviousInsertions); + } + } + bool CanRewrite = Info.getNumFixItHints() > 0 && commit.isCommitable(); + + if (!CanRewrite) { + if (Info.getNumFixItHints() > 0) + Diag(Info.getLocation(), diag::note_fixit_in_macro); + + // If this was an error, refuse to perform any rewriting. + if (DiagLevel >= DiagnosticsEngine::Error) { + if (++NumFailures == 1) + Diag(Info.getLocation(), diag::note_fixit_unfixed_error); + } + return; + } + + if (!Editor.commit(commit)) { + ++NumFailures; + Diag(Info.getLocation(), diag::note_fixit_failed); + return; + } + + Diag(Info.getLocation(), diag::note_fixit_applied); +} + +/// \brief Emit a diagnostic via the adapted diagnostic client. +void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) { + // When producing this diagnostic, we temporarily bypass ourselves, + // clear out any current diagnostic, and let the downstream client + // format the diagnostic. + Diags.takeClient(); + Diags.setClient(Client); + Diags.Clear(); + Diags.Report(Loc, DiagID); + Diags.takeClient(); + Diags.setClient(this); +} + +DiagnosticConsumer *FixItRewriter::clone(DiagnosticsEngine &Diags) const { + return new FixItRewriter(Diags, Diags.getSourceManager(), + Rewrite.getLangOpts(), FixItOpts); +} + +FixItOptions::~FixItOptions() {} diff --git a/lib/Rewrite/Frontend/FrontendActions.cpp b/lib/Rewrite/Frontend/FrontendActions.cpp new file mode 100644 index 0000000..7d29b6d --- /dev/null +++ b/lib/Rewrite/Frontend/FrontendActions.cpp @@ -0,0 +1,192 @@ +//===--- FrontendActions.cpp ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/FrontendActions.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Parse/Parser.h" +#include "clang/Basic/FileManager.h" +#include "clang/Frontend/FrontendActions.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/Frontend/FrontendDiagnostic.h" +#include "clang/Frontend/Utils.h" +#include "clang/Rewrite/Frontend/ASTConsumers.h" +#include "clang/Rewrite/Frontend/FixItRewriter.h" +#include "clang/Rewrite/Frontend/Rewriters.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/FileSystem.h" + +using namespace clang; + +//===----------------------------------------------------------------------===// +// AST Consumer Actions +//===----------------------------------------------------------------------===// + +ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, + StringRef InFile) { + if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile)) + return CreateHTMLPrinter(OS, CI.getPreprocessor()); + return 0; +} + +FixItAction::FixItAction() {} +FixItAction::~FixItAction() {} + +ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI, + StringRef InFile) { + return new ASTConsumer(); +} + +namespace { +class FixItRewriteInPlace : public FixItOptions { +public: + std::string RewriteFilename(const std::string &Filename, int &fd) { + fd = -1; + return Filename; + } +}; + +class FixItActionSuffixInserter : public FixItOptions { + std::string NewSuffix; + +public: + FixItActionSuffixInserter(std::string NewSuffix, bool FixWhatYouCan) + : NewSuffix(NewSuffix) { + this->FixWhatYouCan = FixWhatYouCan; + } + + std::string RewriteFilename(const std::string &Filename, int &fd) { + fd = -1; + SmallString<128> Path(Filename); + llvm::sys::path::replace_extension(Path, + NewSuffix + llvm::sys::path::extension(Path)); + return Path.str(); + } +}; + +class FixItRewriteToTemp : public FixItOptions { +public: + std::string RewriteFilename(const std::string &Filename, int &fd) { + SmallString<128> Path; + Path = llvm::sys::path::filename(Filename); + Path += "-%%%%%%%%"; + Path += llvm::sys::path::extension(Filename); + SmallString<128> NewPath; + llvm::sys::fs::unique_file(Path.str(), fd, NewPath); + return NewPath.str(); + } +}; +} // end anonymous namespace + +bool FixItAction::BeginSourceFileAction(CompilerInstance &CI, + StringRef Filename) { + const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts(); + if (!FEOpts.FixItSuffix.empty()) { + FixItOpts.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix, + FEOpts.FixWhatYouCan)); + } else { + FixItOpts.reset(new FixItRewriteInPlace); + FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan; + } + Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(), + CI.getLangOpts(), FixItOpts.get())); + return true; +} + +void FixItAction::EndSourceFileAction() { + // Otherwise rewrite all files. + Rewriter->WriteFixedFiles(); +} + +bool FixItRecompile::BeginInvocation(CompilerInstance &CI) { + + std::vector > RewrittenFiles; + bool err = false; + { + const FrontendOptions &FEOpts = CI.getFrontendOpts(); + OwningPtr FixAction(new SyntaxOnlyAction()); + if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) { + OwningPtr FixItOpts; + if (FEOpts.FixToTemporaries) + FixItOpts.reset(new FixItRewriteToTemp()); + else + FixItOpts.reset(new FixItRewriteInPlace()); + FixItOpts->Silent = true; + FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan; + FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings; + FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(), + CI.getLangOpts(), FixItOpts.get()); + FixAction->Execute(); + + err = Rewriter.WriteFixedFiles(&RewrittenFiles); + + FixAction->EndSourceFile(); + CI.setSourceManager(0); + CI.setFileManager(0); + } else { + err = true; + } + } + if (err) + return false; + CI.getDiagnosticClient().clear(); + CI.getDiagnostics().Reset(); + + PreprocessorOptions &PPOpts = CI.getPreprocessorOpts(); + PPOpts.RemappedFiles.insert(PPOpts.RemappedFiles.end(), + RewrittenFiles.begin(), RewrittenFiles.end()); + PPOpts.RemappedFilesKeepOriginalName = false; + + return true; +} + +//===----------------------------------------------------------------------===// +// Preprocessor Actions +//===----------------------------------------------------------------------===// + +ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, + StringRef InFile) { + if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) { + if (CI.getLangOpts().ObjCRuntime.isNonFragile()) + return CreateModernObjCRewriter(InFile, OS, + CI.getDiagnostics(), CI.getLangOpts(), + CI.getDiagnosticOpts().NoRewriteMacros); + return CreateObjCRewriter(InFile, OS, + CI.getDiagnostics(), CI.getLangOpts(), + CI.getDiagnosticOpts().NoRewriteMacros); + } + return 0; +} + +void RewriteMacrosAction::ExecuteAction() { + CompilerInstance &CI = getCompilerInstance(); + raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); + if (!OS) return; + + RewriteMacrosInInput(CI.getPreprocessor(), OS); +} + +void RewriteTestAction::ExecuteAction() { + CompilerInstance &CI = getCompilerInstance(); + raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile()); + if (!OS) return; + + DoRewriteTest(CI.getPreprocessor(), OS); +} + +void RewriteIncludesAction::ExecuteAction() { + CompilerInstance &CI = getCompilerInstance(); + raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); + if (!OS) return; + + RewriteIncludesInInput(CI.getPreprocessor(), OS, + CI.getPreprocessorOutputOpts()); +} diff --git a/lib/Rewrite/Frontend/HTMLPrint.cpp b/lib/Rewrite/Frontend/HTMLPrint.cpp new file mode 100644 index 0000000..79e4447 --- /dev/null +++ b/lib/Rewrite/Frontend/HTMLPrint.cpp @@ -0,0 +1,94 @@ +//===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Pretty-printing of source code to HTML. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/ASTConsumers.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/FileManager.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Rewrite/Core/HTMLRewrite.h" +#include "clang/Rewrite/Core/Rewriter.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// Functional HTML pretty-printing. +//===----------------------------------------------------------------------===// + +namespace { + class HTMLPrinter : public ASTConsumer { + Rewriter R; + raw_ostream *Out; + Preprocessor &PP; + bool SyntaxHighlight, HighlightMacros; + + public: + HTMLPrinter(raw_ostream *OS, Preprocessor &pp, + bool _SyntaxHighlight, bool _HighlightMacros) + : Out(OS), PP(pp), SyntaxHighlight(_SyntaxHighlight), + HighlightMacros(_HighlightMacros) {} + + void Initialize(ASTContext &context); + void HandleTranslationUnit(ASTContext &Ctx); + }; +} + +ASTConsumer* clang::CreateHTMLPrinter(raw_ostream *OS, + Preprocessor &PP, + bool SyntaxHighlight, + bool HighlightMacros) { + return new HTMLPrinter(OS, PP, SyntaxHighlight, HighlightMacros); +} + +void HTMLPrinter::Initialize(ASTContext &context) { + R.setSourceMgr(context.getSourceManager(), context.getLangOpts()); +} + +void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) { + if (PP.getDiagnostics().hasErrorOccurred()) + return; + + // Format the file. + FileID FID = R.getSourceMgr().getMainFileID(); + const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID); + const char* Name; + // In some cases, in particular the case where the input is from stdin, + // there is no entry. Fall back to the memory buffer for a name in those + // cases. + if (Entry) + Name = Entry->getName(); + else + Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier(); + + html::AddLineNumbers(R, FID); + html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name); + + // If we have a preprocessor, relex the file and syntax highlight. + // We might not have a preprocessor if we come from a deserialized AST file, + // for example. + + if (SyntaxHighlight) html::SyntaxHighlight(R, FID, PP); + if (HighlightMacros) html::HighlightMacros(R, FID, PP); + html::EscapeText(R, FID, false, true); + + // Emit the HTML. + const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID); + char *Buffer = (char*)malloc(RewriteBuf.size()); + std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer); + Out->write(Buffer, RewriteBuf.size()); + free(Buffer); +} diff --git a/lib/Rewrite/Frontend/InclusionRewriter.cpp b/lib/Rewrite/Frontend/InclusionRewriter.cpp new file mode 100644 index 0000000..9d1bec9 --- /dev/null +++ b/lib/Rewrite/Frontend/InclusionRewriter.cpp @@ -0,0 +1,363 @@ +//===--- InclusionRewriter.cpp - Rewrite includes into their expansions ---===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This code rewrites include invocations into their expansions. This gives you +// a file with all included files merged into it. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/Rewriters.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Frontend/PreprocessorOutputOptions.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace llvm; + +namespace { + +class InclusionRewriter : public PPCallbacks { + /// Information about which #includes were actually performed, + /// created by preprocessor callbacks. + struct FileChange { + SourceLocation From; + FileID Id; + SrcMgr::CharacteristicKind FileType; + FileChange(SourceLocation From) : From(From) { + } + }; + Preprocessor &PP; ///< Used to find inclusion directives. + SourceManager &SM; ///< Used to read and manage source files. + raw_ostream &OS; ///< The destination stream for rewritten contents. + bool ShowLineMarkers; ///< Show #line markers. + bool UseLineDirective; ///< Use of line directives or line markers. + typedef std::map FileChangeMap; + FileChangeMap FileChanges; /// Tracks which files were included where. + /// Used transitively for building up the FileChanges mapping over the + /// various \c PPCallbacks callbacks. + FileChangeMap::iterator LastInsertedFileChange; +public: + InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers); + bool Process(FileID FileId, SrcMgr::CharacteristicKind FileType); +private: + virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, + SrcMgr::CharacteristicKind FileType, + FileID PrevFID); + virtual void FileSkipped(const FileEntry &ParentFile, + const Token &FilenameTok, + SrcMgr::CharacteristicKind FileType); + virtual void InclusionDirective(SourceLocation HashLoc, + const Token &IncludeTok, + StringRef FileName, + bool IsAngled, + CharSourceRange FilenameRange, + const FileEntry *File, + StringRef SearchPath, + StringRef RelativePath, + const Module *Imported); + void WriteLineInfo(const char *Filename, int Line, + SrcMgr::CharacteristicKind FileType, + StringRef EOL, StringRef Extra = StringRef()); + void OutputContentUpTo(const MemoryBuffer &FromFile, + unsigned &WriteFrom, unsigned WriteTo, + StringRef EOL, int &lines, + bool EnsureNewline = false); + void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken, + const MemoryBuffer &FromFile, StringRef EOL, + unsigned &NextToWrite, int &Lines); + const FileChange *FindFileChangeLocation(SourceLocation Loc) const; + StringRef NextIdentifierName(Lexer &RawLex, Token &RawToken); +}; + +} // end anonymous namespace + +/// Initializes an InclusionRewriter with a \p PP source and \p OS destination. +InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS, + bool ShowLineMarkers) + : PP(PP), SM(PP.getSourceManager()), OS(OS), + ShowLineMarkers(ShowLineMarkers), + LastInsertedFileChange(FileChanges.end()) { + // If we're in microsoft mode, use normal #line instead of line markers. + UseLineDirective = PP.getLangOpts().MicrosoftExt; +} + +/// Write appropriate line information as either #line directives or GNU line +/// markers depending on what mode we're in, including the \p Filename and +/// \p Line we are located at, using the specified \p EOL line separator, and +/// any \p Extra context specifiers in GNU line directives. +void InclusionRewriter::WriteLineInfo(const char *Filename, int Line, + SrcMgr::CharacteristicKind FileType, + StringRef EOL, StringRef Extra) { + if (!ShowLineMarkers) + return; + if (UseLineDirective) { + OS << "#line" << ' ' << Line << ' ' << '"' << Filename << '"'; + } else { + // Use GNU linemarkers as described here: + // http://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html + OS << '#' << ' ' << Line << ' ' << '"' << Filename << '"'; + if (!Extra.empty()) + OS << Extra; + if (FileType == SrcMgr::C_System) + // "`3' This indicates that the following text comes from a system header + // file, so certain warnings should be suppressed." + OS << " 3"; + else if (FileType == SrcMgr::C_ExternCSystem) + // as above for `3', plus "`4' This indicates that the following text + // should be treated as being wrapped in an implicit extern "C" block." + OS << " 3 4"; + } + OS << EOL; +} + +/// FileChanged - Whenever the preprocessor enters or exits a #include file +/// it invokes this handler. +void InclusionRewriter::FileChanged(SourceLocation Loc, + FileChangeReason Reason, + SrcMgr::CharacteristicKind NewFileType, + FileID) { + if (Reason != EnterFile) + return; + if (LastInsertedFileChange == FileChanges.end()) + // we didn't reach this file (eg: the main file) via an inclusion directive + return; + LastInsertedFileChange->second.Id = FullSourceLoc(Loc, SM).getFileID(); + LastInsertedFileChange->second.FileType = NewFileType; + LastInsertedFileChange = FileChanges.end(); +} + +/// Called whenever an inclusion is skipped due to canonical header protection +/// macros. +void InclusionRewriter::FileSkipped(const FileEntry &/*ParentFile*/, + const Token &/*FilenameTok*/, + SrcMgr::CharacteristicKind /*FileType*/) { + assert(LastInsertedFileChange != FileChanges.end() && "A file, that wasn't " + "found via an inclusion directive, was skipped"); + FileChanges.erase(LastInsertedFileChange); + LastInsertedFileChange = FileChanges.end(); +} + +/// This should be called whenever the preprocessor encounters include +/// directives. It does not say whether the file has been included, but it +/// provides more information about the directive (hash location instead +/// of location inside the included file). It is assumed that the matching +/// FileChanged() or FileSkipped() is called after this. +void InclusionRewriter::InclusionDirective(SourceLocation HashLoc, + const Token &/*IncludeTok*/, + StringRef /*FileName*/, + bool /*IsAngled*/, + CharSourceRange /*FilenameRange*/, + const FileEntry * /*File*/, + StringRef /*SearchPath*/, + StringRef /*RelativePath*/, + const Module * /*Imported*/) { + assert(LastInsertedFileChange == FileChanges.end() && "Another inclusion " + "directive was found before the previous one was processed"); + std::pair p = FileChanges.insert( + std::make_pair(HashLoc.getRawEncoding(), FileChange(HashLoc))); + assert(p.second && "Unexpected revisitation of the same include directive"); + LastInsertedFileChange = p.first; +} + +/// Simple lookup for a SourceLocation (specifically one denoting the hash in +/// an inclusion directive) in the map of inclusion information, FileChanges. +const InclusionRewriter::FileChange * +InclusionRewriter::FindFileChangeLocation(SourceLocation Loc) const { + FileChangeMap::const_iterator I = FileChanges.find(Loc.getRawEncoding()); + if (I != FileChanges.end()) + return &I->second; + return NULL; +} + +/// Detect the likely line ending style of \p FromFile by examining the first +/// newline found within it. +static StringRef DetectEOL(const MemoryBuffer &FromFile) { + // detect what line endings the file uses, so that added content does not mix + // the style + const char *Pos = strchr(FromFile.getBufferStart(), '\n'); + if (Pos == NULL) + return "\n"; + if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r') + return "\n\r"; + if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r') + return "\r\n"; + return "\n"; +} + +/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at +/// \p WriteTo - 1. +void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile, + unsigned &WriteFrom, unsigned WriteTo, + StringRef EOL, int &Line, + bool EnsureNewline) { + if (WriteTo <= WriteFrom) + return; + OS.write(FromFile.getBufferStart() + WriteFrom, WriteTo - WriteFrom); + // count lines manually, it's faster than getPresumedLoc() + Line += std::count(FromFile.getBufferStart() + WriteFrom, + FromFile.getBufferStart() + WriteTo, '\n'); + if (EnsureNewline) { + char LastChar = FromFile.getBufferStart()[WriteTo - 1]; + if (LastChar != '\n' && LastChar != '\r') + OS << EOL; + } + WriteFrom = WriteTo; +} + +/// Print characters from \p FromFile starting at \p NextToWrite up until the +/// inclusion directive at \p StartToken, then print out the inclusion +/// inclusion directive disabled by a #if directive, updating \p NextToWrite +/// and \p Line to track the number of source lines visited and the progress +/// through the \p FromFile buffer. +void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex, + const Token &StartToken, + const MemoryBuffer &FromFile, + StringRef EOL, + unsigned &NextToWrite, int &Line) { + OutputContentUpTo(FromFile, NextToWrite, + SM.getFileOffset(StartToken.getLocation()), EOL, Line); + Token DirectiveToken; + do { + DirectiveLex.LexFromRawLexer(DirectiveToken); + } while (!DirectiveToken.is(tok::eod) && DirectiveToken.isNot(tok::eof)); + OS << "#if 0 /* expanded by -frewrite-includes */" << EOL; + OutputContentUpTo(FromFile, NextToWrite, + SM.getFileOffset(DirectiveToken.getLocation()) + DirectiveToken.getLength(), + EOL, Line); + OS << "#endif /* expanded by -frewrite-includes */" << EOL; +} + +/// Find the next identifier in the pragma directive specified by \p RawToken. +StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex, + Token &RawToken) { + RawLex.LexFromRawLexer(RawToken); + if (RawToken.is(tok::raw_identifier)) + PP.LookUpIdentifierInfo(RawToken); + if (RawToken.is(tok::identifier)) + return RawToken.getIdentifierInfo()->getName(); + return StringRef(); +} + +/// Use a raw lexer to analyze \p FileId, inccrementally copying parts of it +/// and including content of included files recursively. +bool InclusionRewriter::Process(FileID FileId, + SrcMgr::CharacteristicKind FileType) +{ + bool Invalid; + const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid); + if (Invalid) // invalid inclusion + return true; + const char *FileName = FromFile.getBufferIdentifier(); + Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts()); + RawLex.SetCommentRetentionState(false); + + StringRef EOL = DetectEOL(FromFile); + + // Per the GNU docs: "1" indicates the start of a new file. + WriteLineInfo(FileName, 1, FileType, EOL, " 1"); + + if (SM.getFileIDSize(FileId) == 0) + return true; + + // The next byte to be copied from the source file + unsigned NextToWrite = 0; + int Line = 1; // The current input file line number. + + Token RawToken; + RawLex.LexFromRawLexer(RawToken); + + // TODO: Consider adding a switch that strips possibly unimportant content, + // such as comments, to reduce the size of repro files. + while (RawToken.isNot(tok::eof)) { + if (RawToken.is(tok::hash) && RawToken.isAtStartOfLine()) { + RawLex.setParsingPreprocessorDirective(true); + Token HashToken = RawToken; + RawLex.LexFromRawLexer(RawToken); + if (RawToken.is(tok::raw_identifier)) + PP.LookUpIdentifierInfo(RawToken); + if (RawToken.is(tok::identifier)) { + switch (RawToken.getIdentifierInfo()->getPPKeywordID()) { + case tok::pp_include: + case tok::pp_include_next: + case tok::pp_import: { + CommentOutDirective(RawLex, HashToken, FromFile, EOL, NextToWrite, + Line); + if (const FileChange *Change = FindFileChangeLocation( + HashToken.getLocation())) { + // now include and recursively process the file + if (Process(Change->Id, Change->FileType)) + // and set lineinfo back to this file, if the nested one was + // actually included + // `2' indicates returning to a file (after having included + // another file. + WriteLineInfo(FileName, Line, FileType, EOL, " 2"); + } else + // fix up lineinfo (since commented out directive changed line + // numbers) for inclusions that were skipped due to header guards + WriteLineInfo(FileName, Line, FileType, EOL); + break; + } + case tok::pp_pragma: { + StringRef Identifier = NextIdentifierName(RawLex, RawToken); + if (Identifier == "clang" || Identifier == "GCC") { + if (NextIdentifierName(RawLex, RawToken) == "system_header") { + // keep the directive in, commented out + CommentOutDirective(RawLex, HashToken, FromFile, EOL, + NextToWrite, Line); + // update our own type + FileType = SM.getFileCharacteristic(RawToken.getLocation()); + WriteLineInfo(FileName, Line, FileType, EOL); + } + } else if (Identifier == "once") { + // keep the directive in, commented out + CommentOutDirective(RawLex, HashToken, FromFile, EOL, + NextToWrite, Line); + WriteLineInfo(FileName, Line, FileType, EOL); + } + break; + } + default: + break; + } + } + RawLex.setParsingPreprocessorDirective(false); + } + RawLex.LexFromRawLexer(RawToken); + } + OutputContentUpTo(FromFile, NextToWrite, + SM.getFileOffset(SM.getLocForEndOfFile(FileId)) + 1, EOL, Line, + /*EnsureNewline*/true); + return true; +} + +/// InclusionRewriterInInput - Implement -frewrite-includes mode. +void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS, + const PreprocessorOutputOptions &Opts) { + SourceManager &SM = PP.getSourceManager(); + InclusionRewriter *Rewrite = new InclusionRewriter(PP, *OS, + Opts.ShowLineMarkers); + PP.addPPCallbacks(Rewrite); + + // First let the preprocessor process the entire file and call callbacks. + // Callbacks will record which #include's were actually performed. + PP.EnterMainSourceFile(); + Token Tok; + // Only preprocessor directives matter here, so disable macro expansion + // everywhere else as an optimization. + // TODO: It would be even faster if the preprocessor could be switched + // to a mode where it would parse only preprocessor directives and comments, + // nothing else matters for parsing or processing. + PP.SetMacroExpansionOnlyInDirectives(); + do { + PP.Lex(Tok); + } while (Tok.isNot(tok::eof)); + Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User); + OS->flush(); +} diff --git a/lib/Rewrite/Frontend/Makefile b/lib/Rewrite/Frontend/Makefile new file mode 100644 index 0000000..ac97d40 --- /dev/null +++ b/lib/Rewrite/Frontend/Makefile @@ -0,0 +1,18 @@ +##===- clang/lib/Rewrite/Makefile --------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +# +# This implements code transformation / rewriting facilities. +# +##===----------------------------------------------------------------------===## + +CLANG_LEVEL := ../../.. +LIBRARYNAME := clangRewriteFrontend + +include $(CLANG_LEVEL)/Makefile + diff --git a/lib/Rewrite/Frontend/RewriteMacros.cpp b/lib/Rewrite/Frontend/RewriteMacros.cpp new file mode 100644 index 0000000..f399dd5 --- /dev/null +++ b/lib/Rewrite/Frontend/RewriteMacros.cpp @@ -0,0 +1,217 @@ +//===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This code rewrites macro invocations into their expansions. This gives you +// a macro expanded file that retains comments and #includes. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/Rewriters.h" +#include "clang/Rewrite/Core/Rewriter.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Basic/SourceManager.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Path.h" +#include "llvm/ADT/OwningPtr.h" +#include + +using namespace clang; + +/// isSameToken - Return true if the two specified tokens start have the same +/// content. +static bool isSameToken(Token &RawTok, Token &PPTok) { + // If two tokens have the same kind and the same identifier info, they are + // obviously the same. + if (PPTok.getKind() == RawTok.getKind() && + PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo()) + return true; + + // Otherwise, if they are different but have the same identifier info, they + // are also considered to be the same. This allows keywords and raw lexed + // identifiers with the same name to be treated the same. + if (PPTok.getIdentifierInfo() && + PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo()) + return true; + + return false; +} + + +/// GetNextRawTok - Return the next raw token in the stream, skipping over +/// comments if ReturnComment is false. +static const Token &GetNextRawTok(const std::vector &RawTokens, + unsigned &CurTok, bool ReturnComment) { + assert(CurTok < RawTokens.size() && "Overran eof!"); + + // If the client doesn't want comments and we have one, skip it. + if (!ReturnComment && RawTokens[CurTok].is(tok::comment)) + ++CurTok; + + return RawTokens[CurTok++]; +} + + +/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into +/// the specified vector. +static void LexRawTokensFromMainFile(Preprocessor &PP, + std::vector &RawTokens) { + SourceManager &SM = PP.getSourceManager(); + + // Create a lexer to lex all the tokens of the main file in raw mode. Even + // though it is in raw mode, it will not return comments. + const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID()); + Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts()); + + // Switch on comment lexing because we really do want them. + RawLex.SetCommentRetentionState(true); + + Token RawTok; + do { + RawLex.LexFromRawLexer(RawTok); + + // If we have an identifier with no identifier info for our raw token, look + // up the indentifier info. This is important for equality comparison of + // identifier tokens. + if (RawTok.is(tok::raw_identifier)) + PP.LookUpIdentifierInfo(RawTok); + + RawTokens.push_back(RawTok); + } while (RawTok.isNot(tok::eof)); +} + + +/// RewriteMacrosInInput - Implement -rewrite-macros mode. +void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) { + SourceManager &SM = PP.getSourceManager(); + + Rewriter Rewrite; + Rewrite.setSourceMgr(SM, PP.getLangOpts()); + RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID()); + + std::vector RawTokens; + LexRawTokensFromMainFile(PP, RawTokens); + unsigned CurRawTok = 0; + Token RawTok = GetNextRawTok(RawTokens, CurRawTok, false); + + + // Get the first preprocessing token. + PP.EnterMainSourceFile(); + Token PPTok; + PP.Lex(PPTok); + + // Preprocess the input file in parallel with raw lexing the main file. Ignore + // all tokens that are preprocessed from a file other than the main file (e.g. + // a header). If we see tokens that are in the preprocessed file but not the + // lexed file, we have a macro expansion. If we see tokens in the lexed file + // that aren't in the preprocessed view, we have macros that expand to no + // tokens, or macro arguments etc. + while (RawTok.isNot(tok::eof) || PPTok.isNot(tok::eof)) { + SourceLocation PPLoc = SM.getExpansionLoc(PPTok.getLocation()); + + // If PPTok is from a different source file, ignore it. + if (!SM.isFromMainFile(PPLoc)) { + PP.Lex(PPTok); + continue; + } + + // If the raw file hits a preprocessor directive, they will be extra tokens + // in the raw file that don't exist in the preprocsesed file. However, we + // choose to preserve them in the output file and otherwise handle them + // specially. + if (RawTok.is(tok::hash) && RawTok.isAtStartOfLine()) { + // If this is a #warning directive or #pragma mark (GNU extensions), + // comment the line out. + if (RawTokens[CurRawTok].is(tok::identifier)) { + const IdentifierInfo *II = RawTokens[CurRawTok].getIdentifierInfo(); + if (II->getName() == "warning") { + // Comment out #warning. + RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//"); + } else if (II->getName() == "pragma" && + RawTokens[CurRawTok+1].is(tok::identifier) && + (RawTokens[CurRawTok+1].getIdentifierInfo()->getName() == + "mark")) { + // Comment out #pragma mark. + RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//"); + } + } + + // Otherwise, if this is a #include or some other directive, just leave it + // in the file by skipping over the line. + RawTok = GetNextRawTok(RawTokens, CurRawTok, false); + while (!RawTok.isAtStartOfLine() && RawTok.isNot(tok::eof)) + RawTok = GetNextRawTok(RawTokens, CurRawTok, false); + continue; + } + + // Okay, both tokens are from the same file. Get their offsets from the + // start of the file. + unsigned PPOffs = SM.getFileOffset(PPLoc); + unsigned RawOffs = SM.getFileOffset(RawTok.getLocation()); + + // If the offsets are the same and the token kind is the same, ignore them. + if (PPOffs == RawOffs && isSameToken(RawTok, PPTok)) { + RawTok = GetNextRawTok(RawTokens, CurRawTok, false); + PP.Lex(PPTok); + continue; + } + + // If the PP token is farther along than the raw token, something was + // deleted. Comment out the raw token. + if (RawOffs <= PPOffs) { + // Comment out a whole run of tokens instead of bracketing each one with + // comments. Add a leading space if RawTok didn't have one. + bool HasSpace = RawTok.hasLeadingSpace(); + RB.InsertTextAfter(RawOffs, &" /*"[HasSpace]); + unsigned EndPos; + + do { + EndPos = RawOffs+RawTok.getLength(); + + RawTok = GetNextRawTok(RawTokens, CurRawTok, true); + RawOffs = SM.getFileOffset(RawTok.getLocation()); + + if (RawTok.is(tok::comment)) { + // Skip past the comment. + RawTok = GetNextRawTok(RawTokens, CurRawTok, false); + break; + } + + } while (RawOffs <= PPOffs && !RawTok.isAtStartOfLine() && + (PPOffs != RawOffs || !isSameToken(RawTok, PPTok))); + + RB.InsertTextBefore(EndPos, "*/"); + continue; + } + + // Otherwise, there was a replacement an expansion. Insert the new token + // in the output buffer. Insert the whole run of new tokens at once to get + // them in the right order. + unsigned InsertPos = PPOffs; + std::string Expansion; + while (PPOffs < RawOffs) { + Expansion += ' ' + PP.getSpelling(PPTok); + PP.Lex(PPTok); + PPLoc = SM.getExpansionLoc(PPTok.getLocation()); + PPOffs = SM.getFileOffset(PPLoc); + } + Expansion += ' '; + RB.InsertTextBefore(InsertPos, Expansion); + } + + // Get the buffer corresponding to MainFileID. If we haven't changed it, then + // we are done. + if (const RewriteBuffer *RewriteBuf = + Rewrite.getRewriteBufferFor(SM.getMainFileID())) { + //printf("Changed:\n"); + *OS << std::string(RewriteBuf->begin(), RewriteBuf->end()); + } else { + fprintf(stderr, "No changes\n"); + } + OS->flush(); +} diff --git a/lib/Rewrite/Frontend/RewriteModernObjC.cpp b/lib/Rewrite/Frontend/RewriteModernObjC.cpp new file mode 100644 index 0000000..4b56b37 --- /dev/null +++ b/lib/Rewrite/Frontend/RewriteModernObjC.cpp @@ -0,0 +1,7607 @@ +//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Hacks and fun related to the code rewriter. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/ASTConsumers.h" +#include "clang/Rewrite/Core/Rewriter.h" +#include "clang/AST/AST.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ParentMap.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/IdentifierTable.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Lex/Lexer.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/DenseSet.h" + +using namespace clang; +using llvm::utostr; + +namespace { + class RewriteModernObjC : public ASTConsumer { + protected: + + enum { + BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), + block, ... */ + BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */ + BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the + __block variable */ + BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy + helpers */ + BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose + support routines */ + BLOCK_BYREF_CURRENT_MAX = 256 + }; + + enum { + BLOCK_NEEDS_FREE = (1 << 24), + BLOCK_HAS_COPY_DISPOSE = (1 << 25), + BLOCK_HAS_CXX_OBJ = (1 << 26), + BLOCK_IS_GC = (1 << 27), + BLOCK_IS_GLOBAL = (1 << 28), + BLOCK_HAS_DESCRIPTOR = (1 << 29) + }; + static const int OBJC_ABI_VERSION = 7; + + Rewriter Rewrite; + DiagnosticsEngine &Diags; + const LangOptions &LangOpts; + ASTContext *Context; + SourceManager *SM; + TranslationUnitDecl *TUDecl; + FileID MainFileID; + const char *MainFileStart, *MainFileEnd; + Stmt *CurrentBody; + ParentMap *PropParentMap; // created lazily. + std::string InFileName; + raw_ostream* OutFile; + std::string Preamble; + + TypeDecl *ProtocolTypeDecl; + VarDecl *GlobalVarDecl; + Expr *GlobalConstructionExp; + unsigned RewriteFailedDiag; + unsigned GlobalBlockRewriteFailedDiag; + // ObjC string constant support. + unsigned NumObjCStringLiterals; + VarDecl *ConstantStringClassReference; + RecordDecl *NSStringRecord; + + // ObjC foreach break/continue generation support. + int BcLabelCount; + + unsigned TryFinallyContainsReturnDiag; + // Needed for super. + ObjCMethodDecl *CurMethodDef; + RecordDecl *SuperStructDecl; + RecordDecl *ConstantStringDecl; + + FunctionDecl *MsgSendFunctionDecl; + FunctionDecl *MsgSendSuperFunctionDecl; + FunctionDecl *MsgSendStretFunctionDecl; + FunctionDecl *MsgSendSuperStretFunctionDecl; + FunctionDecl *MsgSendFpretFunctionDecl; + FunctionDecl *GetClassFunctionDecl; + FunctionDecl *GetMetaClassFunctionDecl; + FunctionDecl *GetSuperClassFunctionDecl; + FunctionDecl *SelGetUidFunctionDecl; + FunctionDecl *CFStringFunctionDecl; + FunctionDecl *SuperContructorFunctionDecl; + FunctionDecl *CurFunctionDef; + + /* Misc. containers needed for meta-data rewrite. */ + SmallVector ClassImplementation; + SmallVector CategoryImplementation; + llvm::SmallPtrSet ObjCSynthesizedStructs; + llvm::SmallPtrSet ObjCSynthesizedProtocols; + llvm::SmallPtrSet ObjCWrittenInterfaces; + llvm::SmallPtrSet GlobalDefinedTags; + SmallVector ObjCInterfacesSeen; + /// DefinedNonLazyClasses - List of defined "non-lazy" classes. + SmallVector DefinedNonLazyClasses; + + /// DefinedNonLazyCategories - List of defined "non-lazy" categories. + llvm::SmallVector DefinedNonLazyCategories; + + SmallVector Stmts; + SmallVector ObjCBcLabelNo; + // Remember all the @protocol() expressions. + llvm::SmallPtrSet ProtocolExprDecls; + + llvm::DenseSet CopyDestroyCache; + + // Block expressions. + SmallVector Blocks; + SmallVector InnerDeclRefsCount; + SmallVector InnerDeclRefs; + + SmallVector BlockDeclRefs; + + // Block related declarations. + SmallVector BlockByCopyDecls; + llvm::SmallPtrSet BlockByCopyDeclsPtrSet; + SmallVector BlockByRefDecls; + llvm::SmallPtrSet BlockByRefDeclsPtrSet; + llvm::DenseMap BlockByRefDeclNo; + llvm::SmallPtrSet ImportedBlockDecls; + llvm::SmallPtrSet ImportedLocalExternalDecls; + + llvm::DenseMap RewrittenBlockExprs; + llvm::DenseMap > ReferencedIvars; + + // This maps an original source AST to it's rewritten form. This allows + // us to avoid rewriting the same node twice (which is very uncommon). + // This is needed to support some of the exotic property rewriting. + llvm::DenseMap ReplacedNodes; + + // Needed for header files being rewritten + bool IsHeader; + bool SilenceRewriteMacroWarning; + bool objc_impl_method; + + bool DisableReplaceStmt; + class DisableReplaceStmtScope { + RewriteModernObjC &R; + bool SavedValue; + + public: + DisableReplaceStmtScope(RewriteModernObjC &R) + : R(R), SavedValue(R.DisableReplaceStmt) { + R.DisableReplaceStmt = true; + } + ~DisableReplaceStmtScope() { + R.DisableReplaceStmt = SavedValue; + } + }; + void InitializeCommon(ASTContext &context); + + public: + llvm::DenseMap MethodInternalNames; + // Top Level Driver code. + virtual bool HandleTopLevelDecl(DeclGroupRef D) { + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + if (ObjCInterfaceDecl *Class = dyn_cast(*I)) { + if (!Class->isThisDeclarationADefinition()) { + RewriteForwardClassDecl(D); + break; + } else { + // Keep track of all interface declarations seen. + ObjCInterfacesSeen.push_back(Class); + break; + } + } + + if (ObjCProtocolDecl *Proto = dyn_cast(*I)) { + if (!Proto->isThisDeclarationADefinition()) { + RewriteForwardProtocolDecl(D); + break; + } + } + + HandleTopLevelSingleDecl(*I); + } + return true; + } + void HandleTopLevelSingleDecl(Decl *D); + void HandleDeclInMainFile(Decl *D); + RewriteModernObjC(std::string inFile, raw_ostream *OS, + DiagnosticsEngine &D, const LangOptions &LOpts, + bool silenceMacroWarn); + + ~RewriteModernObjC() {} + + virtual void HandleTranslationUnit(ASTContext &C); + + void ReplaceStmt(Stmt *Old, Stmt *New) { + Stmt *ReplacingStmt = ReplacedNodes[Old]; + + if (ReplacingStmt) + return; // We can't rewrite the same node twice. + + if (DisableReplaceStmt) + return; + + // If replacement succeeded or warning disabled return with no warning. + if (!Rewrite.ReplaceStmt(Old, New)) { + ReplacedNodes[Old] = New; + return; + } + if (SilenceRewriteMacroWarning) + return; + Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) + << Old->getSourceRange(); + } + + void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) { + if (DisableReplaceStmt) + return; + + // Measure the old text. + int Size = Rewrite.getRangeSize(SrcRange); + if (Size == -1) { + Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) + << Old->getSourceRange(); + return; + } + // Get the new text. + std::string SStr; + llvm::raw_string_ostream S(SStr); + New->printPretty(S, 0, PrintingPolicy(LangOpts)); + const std::string &Str = S.str(); + + // If replacement succeeded or warning disabled return with no warning. + if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) { + ReplacedNodes[Old] = New; + return; + } + if (SilenceRewriteMacroWarning) + return; + Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) + << Old->getSourceRange(); + } + + void InsertText(SourceLocation Loc, StringRef Str, + bool InsertAfter = true) { + // If insertion succeeded or warning disabled return with no warning. + if (!Rewrite.InsertText(Loc, Str, InsertAfter) || + SilenceRewriteMacroWarning) + return; + + Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag); + } + + void ReplaceText(SourceLocation Start, unsigned OrigLength, + StringRef Str) { + // If removal succeeded or warning disabled return with no warning. + if (!Rewrite.ReplaceText(Start, OrigLength, Str) || + SilenceRewriteMacroWarning) + return; + + Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag); + } + + // Syntactic Rewriting. + void RewriteRecordBody(RecordDecl *RD); + void RewriteInclude(); + void RewriteLineDirective(const Decl *D); + void ConvertSourceLocationToLineDirective(SourceLocation Loc, + std::string &LineString); + void RewriteForwardClassDecl(DeclGroupRef D); + void RewriteForwardClassDecl(const llvm::SmallVector &DG); + void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, + const std::string &typedefString); + void RewriteImplementations(); + void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, + ObjCImplementationDecl *IMD, + ObjCCategoryImplDecl *CID); + void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl); + void RewriteImplementationDecl(Decl *Dcl); + void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, + ObjCMethodDecl *MDecl, std::string &ResultStr); + void RewriteTypeIntoString(QualType T, std::string &ResultStr, + const FunctionType *&FPRetType); + void RewriteByRefString(std::string &ResultStr, const std::string &Name, + ValueDecl *VD, bool def=false); + void RewriteCategoryDecl(ObjCCategoryDecl *Dcl); + void RewriteProtocolDecl(ObjCProtocolDecl *Dcl); + void RewriteForwardProtocolDecl(DeclGroupRef D); + void RewriteForwardProtocolDecl(const llvm::SmallVector &DG); + void RewriteMethodDeclaration(ObjCMethodDecl *Method); + void RewriteProperty(ObjCPropertyDecl *prop); + void RewriteFunctionDecl(FunctionDecl *FD); + void RewriteBlockPointerType(std::string& Str, QualType Type); + void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD); + void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD); + void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl); + void RewriteTypeOfDecl(VarDecl *VD); + void RewriteObjCQualifiedInterfaceTypes(Expr *E); + + std::string getIvarAccessString(ObjCIvarDecl *D); + + // Expression Rewriting. + Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S); + Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp); + Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo); + Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo); + Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp); + Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp); + Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp); + Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp); + Stmt *RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp); + Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp); + Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp); + Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp); + Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S); + Stmt *RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S); + Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S); + Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S); + Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, + SourceLocation OrigEnd); + Stmt *RewriteBreakStmt(BreakStmt *S); + Stmt *RewriteContinueStmt(ContinueStmt *S); + void RewriteCastExpr(CStyleCastExpr *CE); + void RewriteImplicitCastObjCExpr(CastExpr *IE); + void RewriteLinkageSpec(LinkageSpecDecl *LSD); + + // Block rewriting. + void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D); + + // Block specific rewrite rules. + void RewriteBlockPointerDecl(NamedDecl *VD); + void RewriteByRefVar(VarDecl *VD, bool firstDecl, bool lastDecl); + Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD); + Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE); + void RewriteBlockPointerFunctionArgs(FunctionDecl *FD); + + void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, + std::string &Result); + + void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result); + bool IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, TagDecl *Tag, + bool &IsNamedDefinition); + void RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl, + std::string &Result); + + bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result); + + void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl, + std::string &Result); + + virtual void Initialize(ASTContext &context); + + // Misc. AST transformation routines. Sometimes they end up calling + // rewriting routines on the new ASTs. + CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD, + Expr **args, unsigned nargs, + SourceLocation StartLoc=SourceLocation(), + SourceLocation EndLoc=SourceLocation()); + + Expr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, + QualType msgSendType, + QualType returnType, + SmallVectorImpl &ArgTypes, + SmallVectorImpl &MsgExprs, + ObjCMethodDecl *Method); + + Stmt *SynthMessageExpr(ObjCMessageExpr *Exp, + SourceLocation StartLoc=SourceLocation(), + SourceLocation EndLoc=SourceLocation()); + + void SynthCountByEnumWithState(std::string &buf); + void SynthMsgSendFunctionDecl(); + void SynthMsgSendSuperFunctionDecl(); + void SynthMsgSendStretFunctionDecl(); + void SynthMsgSendFpretFunctionDecl(); + void SynthMsgSendSuperStretFunctionDecl(); + void SynthGetClassFunctionDecl(); + void SynthGetMetaClassFunctionDecl(); + void SynthGetSuperClassFunctionDecl(); + void SynthSelGetUidFunctionDecl(); + void SynthSuperContructorFunctionDecl(); + + // Rewriting metadata + template + void RewriteObjCMethodsMetaData(MethodIterator MethodBegin, + MethodIterator MethodEnd, + bool IsInstanceMethod, + StringRef prefix, + StringRef ClassName, + std::string &Result); + void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol, + std::string &Result); + void RewriteObjCProtocolListMetaData( + const ObjCList &Prots, + StringRef prefix, StringRef ClassName, std::string &Result); + void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, + std::string &Result); + void RewriteClassSetupInitHook(std::string &Result); + + void RewriteMetaDataIntoBuffer(std::string &Result); + void WriteImageInfo(std::string &Result); + void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl, + std::string &Result); + void RewriteCategorySetupInitHook(std::string &Result); + + // Rewriting ivar + void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, + std::string &Result); + Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV); + + + std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag); + std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, + StringRef funcName, std::string Tag); + std::string SynthesizeBlockFunc(BlockExpr *CE, int i, + StringRef funcName, std::string Tag); + std::string SynthesizeBlockImpl(BlockExpr *CE, + std::string Tag, std::string Desc); + std::string SynthesizeBlockDescriptor(std::string DescTag, + std::string ImplTag, + int i, StringRef funcName, + unsigned hasCopy); + Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp); + void SynthesizeBlockLiterals(SourceLocation FunLocStart, + StringRef FunName); + FunctionDecl *SynthBlockInitFunctionDecl(StringRef name); + Stmt *SynthBlockInitExpr(BlockExpr *Exp, + const SmallVector &InnerBlockDeclRefs); + + // Misc. helper routines. + QualType getProtocolType(); + void WarnAboutReturnGotoStmts(Stmt *S); + void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND); + void InsertBlockLiteralsWithinFunction(FunctionDecl *FD); + void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD); + + bool IsDeclStmtInForeachHeader(DeclStmt *DS); + void CollectBlockDeclRefInfo(BlockExpr *Exp); + void GetBlockDeclRefExprs(Stmt *S); + void GetInnerBlockDeclRefExprs(Stmt *S, + SmallVector &InnerBlockDeclRefs, + llvm::SmallPtrSet &InnerContexts); + + // We avoid calling Type::isBlockPointerType(), since it operates on the + // canonical type. We only care if the top-level type is a closure pointer. + bool isTopLevelBlockPointerType(QualType T) { + return isa(T); + } + + /// convertBlockPointerToFunctionPointer - Converts a block-pointer type + /// to a function pointer type and upon success, returns true; false + /// otherwise. + bool convertBlockPointerToFunctionPointer(QualType &T) { + if (isTopLevelBlockPointerType(T)) { + const BlockPointerType *BPT = T->getAs(); + T = Context->getPointerType(BPT->getPointeeType()); + return true; + } + return false; + } + + bool convertObjCTypeToCStyleType(QualType &T); + + bool needToScanForQualifiers(QualType T); + QualType getSuperStructType(); + QualType getConstantStringStructType(); + QualType convertFunctionTypeOfBlocks(const FunctionType *FT); + bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf); + + void convertToUnqualifiedObjCType(QualType &T) { + if (T->isObjCQualifiedIdType()) { + bool isConst = T.isConstQualified(); + T = isConst ? Context->getObjCIdType().withConst() + : Context->getObjCIdType(); + } + else if (T->isObjCQualifiedClassType()) + T = Context->getObjCClassType(); + else if (T->isObjCObjectPointerType() && + T->getPointeeType()->isObjCQualifiedInterfaceType()) { + if (const ObjCObjectPointerType * OBJPT = + T->getAsObjCInterfacePointerType()) { + const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType(); + T = QualType(IFaceT, 0); + T = Context->getPointerType(T); + } + } + } + + // FIXME: This predicate seems like it would be useful to add to ASTContext. + bool isObjCType(QualType T) { + if (!LangOpts.ObjC1 && !LangOpts.ObjC2) + return false; + + QualType OCT = Context->getCanonicalType(T).getUnqualifiedType(); + + if (OCT == Context->getCanonicalType(Context->getObjCIdType()) || + OCT == Context->getCanonicalType(Context->getObjCClassType())) + return true; + + if (const PointerType *PT = OCT->getAs()) { + if (isa(PT->getPointeeType()) || + PT->getPointeeType()->isObjCQualifiedIdType()) + return true; + } + return false; + } + bool PointerTypeTakesAnyBlockArguments(QualType QT); + bool PointerTypeTakesAnyObjCQualifiedType(QualType QT); + void GetExtentOfArgList(const char *Name, const char *&LParen, + const char *&RParen); + + void QuoteDoublequotes(std::string &From, std::string &To) { + for (unsigned i = 0; i < From.length(); i++) { + if (From[i] == '"') + To += "\\\""; + else + To += From[i]; + } + } + + QualType getSimpleFunctionType(QualType result, + const QualType *args, + unsigned numArgs, + bool variadic = false) { + if (result == Context->getObjCInstanceType()) + result = Context->getObjCIdType(); + FunctionProtoType::ExtProtoInfo fpi; + fpi.Variadic = variadic; + return Context->getFunctionType(result, args, numArgs, fpi); + } + + // Helper function: create a CStyleCastExpr with trivial type source info. + CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty, + CastKind Kind, Expr *E) { + TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation()); + return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo, + SourceLocation(), SourceLocation()); + } + + bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const { + IdentifierInfo* II = &Context->Idents.get("load"); + Selector LoadSel = Context->Selectors.getSelector(0, &II); + return OD->getClassMethod(LoadSel) != 0; + } + }; + +} + +void RewriteModernObjC::RewriteBlocksInFunctionProtoType(QualType funcType, + NamedDecl *D) { + if (const FunctionProtoType *fproto + = dyn_cast(funcType.IgnoreParens())) { + for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(), + E = fproto->arg_type_end(); I && (I != E); ++I) + if (isTopLevelBlockPointerType(*I)) { + // All the args are checked/rewritten. Don't call twice! + RewriteBlockPointerDecl(D); + break; + } + } +} + +void RewriteModernObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) { + const PointerType *PT = funcType->getAs(); + if (PT && PointerTypeTakesAnyBlockArguments(funcType)) + RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND); +} + +static bool IsHeaderFile(const std::string &Filename) { + std::string::size_type DotPos = Filename.rfind('.'); + + if (DotPos == std::string::npos) { + // no file extension + return false; + } + + std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end()); + // C header: .h + // C++ header: .hh or .H; + return Ext == "h" || Ext == "hh" || Ext == "H"; +} + +RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS, + DiagnosticsEngine &D, const LangOptions &LOpts, + bool silenceMacroWarn) + : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS), + SilenceRewriteMacroWarning(silenceMacroWarn) { + IsHeader = IsHeaderFile(inFile); + RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning, + "rewriting sub-expression within a macro (may not be correct)"); + // FIXME. This should be an error. But if block is not called, it is OK. And it + // may break including some headers. + GlobalBlockRewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning, + "rewriting block literal declared in global scope is not implemented"); + + TryFinallyContainsReturnDiag = Diags.getCustomDiagID( + DiagnosticsEngine::Warning, + "rewriter doesn't support user-specified control flow semantics " + "for @try/@finally (code may not execute properly)"); +} + +ASTConsumer *clang::CreateModernObjCRewriter(const std::string& InFile, + raw_ostream* OS, + DiagnosticsEngine &Diags, + const LangOptions &LOpts, + bool SilenceRewriteMacroWarning) { + return new RewriteModernObjC(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning); +} + +void RewriteModernObjC::InitializeCommon(ASTContext &context) { + Context = &context; + SM = &Context->getSourceManager(); + TUDecl = Context->getTranslationUnitDecl(); + MsgSendFunctionDecl = 0; + MsgSendSuperFunctionDecl = 0; + MsgSendStretFunctionDecl = 0; + MsgSendSuperStretFunctionDecl = 0; + MsgSendFpretFunctionDecl = 0; + GetClassFunctionDecl = 0; + GetMetaClassFunctionDecl = 0; + GetSuperClassFunctionDecl = 0; + SelGetUidFunctionDecl = 0; + CFStringFunctionDecl = 0; + ConstantStringClassReference = 0; + NSStringRecord = 0; + CurMethodDef = 0; + CurFunctionDef = 0; + GlobalVarDecl = 0; + GlobalConstructionExp = 0; + SuperStructDecl = 0; + ProtocolTypeDecl = 0; + ConstantStringDecl = 0; + BcLabelCount = 0; + SuperContructorFunctionDecl = 0; + NumObjCStringLiterals = 0; + PropParentMap = 0; + CurrentBody = 0; + DisableReplaceStmt = false; + objc_impl_method = false; + + // Get the ID and start/end of the main file. + MainFileID = SM->getMainFileID(); + const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID); + MainFileStart = MainBuf->getBufferStart(); + MainFileEnd = MainBuf->getBufferEnd(); + + Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts()); +} + +//===----------------------------------------------------------------------===// +// Top Level Driver Code +//===----------------------------------------------------------------------===// + +void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) { + if (Diags.hasErrorOccurred()) + return; + + // Two cases: either the decl could be in the main file, or it could be in a + // #included file. If the former, rewrite it now. If the later, check to see + // if we rewrote the #include/#import. + SourceLocation Loc = D->getLocation(); + Loc = SM->getExpansionLoc(Loc); + + // If this is for a builtin, ignore it. + if (Loc.isInvalid()) return; + + // Look for built-in declarations that we need to refer during the rewrite. + if (FunctionDecl *FD = dyn_cast(D)) { + RewriteFunctionDecl(FD); + } else if (VarDecl *FVD = dyn_cast(D)) { + // declared in + if (FVD->getName() == "_NSConstantStringClassReference") { + ConstantStringClassReference = FVD; + return; + } + } else if (ObjCCategoryDecl *CD = dyn_cast(D)) { + RewriteCategoryDecl(CD); + } else if (ObjCProtocolDecl *PD = dyn_cast(D)) { + if (PD->isThisDeclarationADefinition()) + RewriteProtocolDecl(PD); + } else if (LinkageSpecDecl *LSD = dyn_cast(D)) { + // FIXME. This will not work in all situations and leaving it out + // is harmless. + // RewriteLinkageSpec(LSD); + + // Recurse into linkage specifications + for (DeclContext::decl_iterator DI = LSD->decls_begin(), + DIEnd = LSD->decls_end(); + DI != DIEnd; ) { + if (ObjCInterfaceDecl *IFace = dyn_cast((*DI))) { + if (!IFace->isThisDeclarationADefinition()) { + SmallVector DG; + SourceLocation StartLoc = IFace->getLocStart(); + do { + if (isa(*DI) && + !cast(*DI)->isThisDeclarationADefinition() && + StartLoc == (*DI)->getLocStart()) + DG.push_back(*DI); + else + break; + + ++DI; + } while (DI != DIEnd); + RewriteForwardClassDecl(DG); + continue; + } + else { + // Keep track of all interface declarations seen. + ObjCInterfacesSeen.push_back(IFace); + ++DI; + continue; + } + } + + if (ObjCProtocolDecl *Proto = dyn_cast((*DI))) { + if (!Proto->isThisDeclarationADefinition()) { + SmallVector DG; + SourceLocation StartLoc = Proto->getLocStart(); + do { + if (isa(*DI) && + !cast(*DI)->isThisDeclarationADefinition() && + StartLoc == (*DI)->getLocStart()) + DG.push_back(*DI); + else + break; + + ++DI; + } while (DI != DIEnd); + RewriteForwardProtocolDecl(DG); + continue; + } + } + + HandleTopLevelSingleDecl(*DI); + ++DI; + } + } + // If we have a decl in the main file, see if we should rewrite it. + if (SM->isFromMainFile(Loc)) + return HandleDeclInMainFile(D); +} + +//===----------------------------------------------------------------------===// +// Syntactic (non-AST) Rewriting Code +//===----------------------------------------------------------------------===// + +void RewriteModernObjC::RewriteInclude() { + SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID); + StringRef MainBuf = SM->getBufferData(MainFileID); + const char *MainBufStart = MainBuf.begin(); + const char *MainBufEnd = MainBuf.end(); + size_t ImportLen = strlen("import"); + + // Loop over the whole file, looking for includes. + for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) { + if (*BufPtr == '#') { + if (++BufPtr == MainBufEnd) + return; + while (*BufPtr == ' ' || *BufPtr == '\t') + if (++BufPtr == MainBufEnd) + return; + if (!strncmp(BufPtr, "import", ImportLen)) { + // replace import with include + SourceLocation ImportLoc = + LocStart.getLocWithOffset(BufPtr-MainBufStart); + ReplaceText(ImportLoc, ImportLen, "include"); + BufPtr += ImportLen; + } + } + } +} + +static void WriteInternalIvarName(const ObjCInterfaceDecl *IDecl, + ObjCIvarDecl *IvarDecl, std::string &Result) { + Result += "OBJC_IVAR_$_"; + Result += IDecl->getName(); + Result += "$"; + Result += IvarDecl->getName(); +} + +std::string +RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) { + const ObjCInterfaceDecl *ClassDecl = D->getContainingInterface(); + + // Build name of symbol holding ivar offset. + std::string IvarOffsetName; + WriteInternalIvarName(ClassDecl, D, IvarOffsetName); + + + std::string S = "(*("; + QualType IvarT = D->getType(); + + if (!isa(IvarT) && IvarT->isRecordType()) { + RecordDecl *RD = IvarT->getAs()->getDecl(); + RD = RD->getDefinition(); + if (RD && !RD->getDeclName().getAsIdentifierInfo()) { + // decltype(((Foo_IMPL*)0)->bar) * + ObjCContainerDecl *CDecl = + dyn_cast(D->getDeclContext()); + // ivar in class extensions requires special treatment. + if (ObjCCategoryDecl *CatDecl = dyn_cast(CDecl)) + CDecl = CatDecl->getClassInterface(); + std::string RecName = CDecl->getName(); + RecName += "_IMPL"; + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get(RecName.c_str())); + QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD)); + unsigned UnsignedIntSize = + static_cast(Context->getTypeSize(Context->UnsignedIntTy)); + Expr *Zero = IntegerLiteral::Create(*Context, + llvm::APInt(UnsignedIntSize, 0), + Context->UnsignedIntTy, SourceLocation()); + Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero); + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + Zero); + FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get(D->getNameAsString()), + IvarT, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), + FD->getType(), VK_LValue, + OK_Ordinary); + IvarT = Context->getDecltypeType(ME, ME->getType()); + } + } + convertObjCTypeToCStyleType(IvarT); + QualType castT = Context->getPointerType(IvarT); + std::string TypeString(castT.getAsString(Context->getPrintingPolicy())); + S += TypeString; + S += ")"; + + // ((char *)self + IVAR_OFFSET_SYMBOL_NAME) + S += "((char *)self + "; + S += IvarOffsetName; + S += "))"; + ReferencedIvars[const_cast(ClassDecl)].insert(D); + return S; +} + +/// mustSynthesizeSetterGetterMethod - returns true if setter or getter has not +/// been found in the class implementation. In this case, it must be synthesized. +static bool mustSynthesizeSetterGetterMethod(ObjCImplementationDecl *IMP, + ObjCPropertyDecl *PD, + bool getter) { + return getter ? !IMP->getInstanceMethod(PD->getGetterName()) + : !IMP->getInstanceMethod(PD->getSetterName()); + +} + +void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, + ObjCImplementationDecl *IMD, + ObjCCategoryImplDecl *CID) { + static bool objcGetPropertyDefined = false; + static bool objcSetPropertyDefined = false; + SourceLocation startGetterSetterLoc; + + if (PID->getLocStart().isValid()) { + SourceLocation startLoc = PID->getLocStart(); + InsertText(startLoc, "// "); + const char *startBuf = SM->getCharacterData(startLoc); + assert((*startBuf == '@') && "bogus @synthesize location"); + const char *semiBuf = strchr(startBuf, ';'); + assert((*semiBuf == ';') && "@synthesize: can't find ';'"); + startGetterSetterLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1); + } + else + startGetterSetterLoc = IMD ? IMD->getLocEnd() : CID->getLocEnd(); + + if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + return; // FIXME: is this correct? + + // Generate the 'getter' function. + ObjCPropertyDecl *PD = PID->getPropertyDecl(); + ObjCIvarDecl *OID = PID->getPropertyIvarDecl(); + + if (!OID) + return; + unsigned Attributes = PD->getPropertyAttributes(); + if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) { + bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) && + (Attributes & (ObjCPropertyDecl::OBJC_PR_retain | + ObjCPropertyDecl::OBJC_PR_copy)); + std::string Getr; + if (GenGetProperty && !objcGetPropertyDefined) { + objcGetPropertyDefined = true; + // FIXME. Is this attribute correct in all cases? + Getr = "\nextern \"C\" __declspec(dllimport) " + "id objc_getProperty(id, SEL, long, bool);\n"; + } + RewriteObjCMethodDecl(OID->getContainingInterface(), + PD->getGetterMethodDecl(), Getr); + Getr += "{ "; + // Synthesize an explicit cast to gain access to the ivar. + // See objc-act.c:objc_synthesize_new_getter() for details. + if (GenGetProperty) { + // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1) + Getr += "typedef "; + const FunctionType *FPRetType = 0; + RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr, + FPRetType); + Getr += " _TYPE"; + if (FPRetType) { + Getr += ")"; // close the precedence "scope" for "*". + + // Now, emit the argument types (if any). + if (const FunctionProtoType *FT = dyn_cast(FPRetType)){ + Getr += "("; + for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { + if (i) Getr += ", "; + std::string ParamStr = FT->getArgType(i).getAsString( + Context->getPrintingPolicy()); + Getr += ParamStr; + } + if (FT->isVariadic()) { + if (FT->getNumArgs()) Getr += ", "; + Getr += "..."; + } + Getr += ")"; + } else + Getr += "()"; + } + Getr += ";\n"; + Getr += "return (_TYPE)"; + Getr += "objc_getProperty(self, _cmd, "; + RewriteIvarOffsetComputation(OID, Getr); + Getr += ", 1)"; + } + else + Getr += "return " + getIvarAccessString(OID); + Getr += "; }"; + InsertText(startGetterSetterLoc, Getr); + } + + if (PD->isReadOnly() || + !mustSynthesizeSetterGetterMethod(IMD, PD, false /*setter*/)) + return; + + // Generate the 'setter' function. + std::string Setr; + bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain | + ObjCPropertyDecl::OBJC_PR_copy); + if (GenSetProperty && !objcSetPropertyDefined) { + objcSetPropertyDefined = true; + // FIXME. Is this attribute correct in all cases? + Setr = "\nextern \"C\" __declspec(dllimport) " + "void objc_setProperty (id, SEL, long, id, bool, bool);\n"; + } + + RewriteObjCMethodDecl(OID->getContainingInterface(), + PD->getSetterMethodDecl(), Setr); + Setr += "{ "; + // Synthesize an explicit cast to initialize the ivar. + // See objc-act.c:objc_synthesize_new_setter() for details. + if (GenSetProperty) { + Setr += "objc_setProperty (self, _cmd, "; + RewriteIvarOffsetComputation(OID, Setr); + Setr += ", (id)"; + Setr += PD->getName(); + Setr += ", "; + if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) + Setr += "0, "; + else + Setr += "1, "; + if (Attributes & ObjCPropertyDecl::OBJC_PR_copy) + Setr += "1)"; + else + Setr += "0)"; + } + else { + Setr += getIvarAccessString(OID) + " = "; + Setr += PD->getName(); + } + Setr += "; }\n"; + InsertText(startGetterSetterLoc, Setr); +} + +static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl, + std::string &typedefString) { + typedefString += "#ifndef _REWRITER_typedef_"; + typedefString += ForwardDecl->getNameAsString(); + typedefString += "\n"; + typedefString += "#define _REWRITER_typedef_"; + typedefString += ForwardDecl->getNameAsString(); + typedefString += "\n"; + typedefString += "typedef struct objc_object "; + typedefString += ForwardDecl->getNameAsString(); + // typedef struct { } _objc_exc_Classname; + typedefString += ";\ntypedef struct {} _objc_exc_"; + typedefString += ForwardDecl->getNameAsString(); + typedefString += ";\n#endif\n"; +} + +void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, + const std::string &typedefString) { + SourceLocation startLoc = ClassDecl->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + const char *semiPtr = strchr(startBuf, ';'); + // Replace the @class with typedefs corresponding to the classes. + ReplaceText(startLoc, semiPtr-startBuf+1, typedefString); +} + +void RewriteModernObjC::RewriteForwardClassDecl(DeclGroupRef D) { + std::string typedefString; + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + ObjCInterfaceDecl *ForwardDecl = cast(*I); + if (I == D.begin()) { + // Translate to typedef's that forward reference structs with the same name + // as the class. As a convenience, we include the original declaration + // as a comment. + typedefString += "// @class "; + typedefString += ForwardDecl->getNameAsString(); + typedefString += ";\n"; + } + RewriteOneForwardClassDecl(ForwardDecl, typedefString); + } + DeclGroupRef::iterator I = D.begin(); + RewriteForwardClassEpilogue(cast(*I), typedefString); +} + +void RewriteModernObjC::RewriteForwardClassDecl( + const llvm::SmallVector &D) { + std::string typedefString; + for (unsigned i = 0; i < D.size(); i++) { + ObjCInterfaceDecl *ForwardDecl = cast(D[i]); + if (i == 0) { + typedefString += "// @class "; + typedefString += ForwardDecl->getNameAsString(); + typedefString += ";\n"; + } + RewriteOneForwardClassDecl(ForwardDecl, typedefString); + } + RewriteForwardClassEpilogue(cast(D[0]), typedefString); +} + +void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) { + // When method is a synthesized one, such as a getter/setter there is + // nothing to rewrite. + if (Method->isImplicit()) + return; + SourceLocation LocStart = Method->getLocStart(); + SourceLocation LocEnd = Method->getLocEnd(); + + if (SM->getExpansionLineNumber(LocEnd) > + SM->getExpansionLineNumber(LocStart)) { + InsertText(LocStart, "#if 0\n"); + ReplaceText(LocEnd, 1, ";\n#endif\n"); + } else { + InsertText(LocStart, "// "); + } +} + +void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) { + SourceLocation Loc = prop->getAtLoc(); + + ReplaceText(Loc, 0, "// "); + // FIXME: handle properties that are declared across multiple lines. +} + +void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) { + SourceLocation LocStart = CatDecl->getLocStart(); + + // FIXME: handle category headers that are declared across multiple lines. + if (CatDecl->getIvarRBraceLoc().isValid()) { + ReplaceText(LocStart, 1, "/** "); + ReplaceText(CatDecl->getIvarRBraceLoc(), 1, "**/ "); + } + else { + ReplaceText(LocStart, 0, "// "); + } + + for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(), + E = CatDecl->prop_end(); I != E; ++I) + RewriteProperty(*I); + + for (ObjCCategoryDecl::instmeth_iterator + I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + for (ObjCCategoryDecl::classmeth_iterator + I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + + // Lastly, comment out the @end. + ReplaceText(CatDecl->getAtEndRange().getBegin(), + strlen("@end"), "/* @end */"); +} + +void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) { + SourceLocation LocStart = PDecl->getLocStart(); + assert(PDecl->isThisDeclarationADefinition()); + + // FIXME: handle protocol headers that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); + + for (ObjCProtocolDecl::instmeth_iterator + I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + for (ObjCProtocolDecl::classmeth_iterator + I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + + for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(), + E = PDecl->prop_end(); I != E; ++I) + RewriteProperty(*I); + + // Lastly, comment out the @end. + SourceLocation LocEnd = PDecl->getAtEndRange().getBegin(); + ReplaceText(LocEnd, strlen("@end"), "/* @end */"); + + // Must comment out @optional/@required + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + for (const char *p = startBuf; p < endBuf; p++) { + if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) { + SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); + ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */"); + + } + else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) { + SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); + ReplaceText(OptionalLoc, strlen("@required"), "/* @required */"); + + } + } +} + +void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) { + SourceLocation LocStart = (*D.begin())->getLocStart(); + if (LocStart.isInvalid()) + llvm_unreachable("Invalid SourceLocation"); + // FIXME: handle forward protocol that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); +} + +void +RewriteModernObjC::RewriteForwardProtocolDecl(const llvm::SmallVector &DG) { + SourceLocation LocStart = DG[0]->getLocStart(); + if (LocStart.isInvalid()) + llvm_unreachable("Invalid SourceLocation"); + // FIXME: handle forward protocol that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); +} + +void +RewriteModernObjC::RewriteLinkageSpec(LinkageSpecDecl *LSD) { + SourceLocation LocStart = LSD->getExternLoc(); + if (LocStart.isInvalid()) + llvm_unreachable("Invalid extern SourceLocation"); + + ReplaceText(LocStart, 0, "// "); + if (!LSD->hasBraces()) + return; + // FIXME. We don't rewrite well if '{' is not on same line as 'extern'. + SourceLocation LocRBrace = LSD->getRBraceLoc(); + if (LocRBrace.isInvalid()) + llvm_unreachable("Invalid rbrace SourceLocation"); + ReplaceText(LocRBrace, 0, "// "); +} + +void RewriteModernObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr, + const FunctionType *&FPRetType) { + if (T->isObjCQualifiedIdType()) + ResultStr += "id"; + else if (T->isFunctionPointerType() || + T->isBlockPointerType()) { + // needs special handling, since pointer-to-functions have special + // syntax (where a decaration models use). + QualType retType = T; + QualType PointeeTy; + if (const PointerType* PT = retType->getAs()) + PointeeTy = PT->getPointeeType(); + else if (const BlockPointerType *BPT = retType->getAs()) + PointeeTy = BPT->getPointeeType(); + if ((FPRetType = PointeeTy->getAs())) { + ResultStr += FPRetType->getResultType().getAsString( + Context->getPrintingPolicy()); + ResultStr += "(*"; + } + } else + ResultStr += T.getAsString(Context->getPrintingPolicy()); +} + +void RewriteModernObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, + ObjCMethodDecl *OMD, + std::string &ResultStr) { + //fprintf(stderr,"In RewriteObjCMethodDecl\n"); + const FunctionType *FPRetType = 0; + ResultStr += "\nstatic "; + RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType); + ResultStr += " "; + + // Unique method name + std::string NameStr; + + if (OMD->isInstanceMethod()) + NameStr += "_I_"; + else + NameStr += "_C_"; + + NameStr += IDecl->getNameAsString(); + NameStr += "_"; + + if (ObjCCategoryImplDecl *CID = + dyn_cast(OMD->getDeclContext())) { + NameStr += CID->getNameAsString(); + NameStr += "_"; + } + // Append selector names, replacing ':' with '_' + { + std::string selString = OMD->getSelector().getAsString(); + int len = selString.size(); + for (int i = 0; i < len; i++) + if (selString[i] == ':') + selString[i] = '_'; + NameStr += selString; + } + // Remember this name for metadata emission + MethodInternalNames[OMD] = NameStr; + ResultStr += NameStr; + + // Rewrite arguments + ResultStr += "("; + + // invisible arguments + if (OMD->isInstanceMethod()) { + QualType selfTy = Context->getObjCInterfaceType(IDecl); + selfTy = Context->getPointerType(selfTy); + if (!LangOpts.MicrosoftExt) { + if (ObjCSynthesizedStructs.count(const_cast(IDecl))) + ResultStr += "struct "; + } + // When rewriting for Microsoft, explicitly omit the structure name. + ResultStr += IDecl->getNameAsString(); + ResultStr += " *"; + } + else + ResultStr += Context->getObjCClassType().getAsString( + Context->getPrintingPolicy()); + + ResultStr += " self, "; + ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy()); + ResultStr += " _cmd"; + + // Method arguments. + for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), + E = OMD->param_end(); PI != E; ++PI) { + ParmVarDecl *PDecl = *PI; + ResultStr += ", "; + if (PDecl->getType()->isObjCQualifiedIdType()) { + ResultStr += "id "; + ResultStr += PDecl->getNameAsString(); + } else { + std::string Name = PDecl->getNameAsString(); + QualType QT = PDecl->getType(); + // Make sure we convert "t (^)(...)" to "t (*)(...)". + (void)convertBlockPointerToFunctionPointer(QT); + QT.getAsStringInternal(Name, Context->getPrintingPolicy()); + ResultStr += Name; + } + } + if (OMD->isVariadic()) + ResultStr += ", ..."; + ResultStr += ") "; + + if (FPRetType) { + ResultStr += ")"; // close the precedence "scope" for "*". + + // Now, emit the argument types (if any). + if (const FunctionProtoType *FT = dyn_cast(FPRetType)) { + ResultStr += "("; + for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { + if (i) ResultStr += ", "; + std::string ParamStr = FT->getArgType(i).getAsString( + Context->getPrintingPolicy()); + ResultStr += ParamStr; + } + if (FT->isVariadic()) { + if (FT->getNumArgs()) ResultStr += ", "; + ResultStr += "..."; + } + ResultStr += ")"; + } else { + ResultStr += "()"; + } + } +} +void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) { + ObjCImplementationDecl *IMD = dyn_cast(OID); + ObjCCategoryImplDecl *CID = dyn_cast(OID); + + if (IMD) { + if (IMD->getIvarRBraceLoc().isValid()) { + ReplaceText(IMD->getLocStart(), 1, "/** "); + ReplaceText(IMD->getIvarRBraceLoc(), 1, "**/ "); + } + else { + InsertText(IMD->getLocStart(), "// "); + } + } + else + InsertText(CID->getLocStart(), "// "); + + for (ObjCCategoryImplDecl::instmeth_iterator + I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(), + E = IMD ? IMD->instmeth_end() : CID->instmeth_end(); + I != E; ++I) { + std::string ResultStr; + ObjCMethodDecl *OMD = *I; + RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); + SourceLocation LocStart = OMD->getLocStart(); + SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + ReplaceText(LocStart, endBuf-startBuf, ResultStr); + } + + for (ObjCCategoryImplDecl::classmeth_iterator + I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(), + E = IMD ? IMD->classmeth_end() : CID->classmeth_end(); + I != E; ++I) { + std::string ResultStr; + ObjCMethodDecl *OMD = *I; + RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); + SourceLocation LocStart = OMD->getLocStart(); + SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + ReplaceText(LocStart, endBuf-startBuf, ResultStr); + } + for (ObjCCategoryImplDecl::propimpl_iterator + I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(), + E = IMD ? IMD->propimpl_end() : CID->propimpl_end(); + I != E; ++I) { + RewritePropertyImplDecl(*I, IMD, CID); + } + + InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// "); +} + +void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) { + // Do not synthesize more than once. + if (ObjCSynthesizedStructs.count(ClassDecl)) + return; + // Make sure super class's are written before current class is written. + ObjCInterfaceDecl *SuperClass = ClassDecl->getSuperClass(); + while (SuperClass) { + RewriteInterfaceDecl(SuperClass); + SuperClass = SuperClass->getSuperClass(); + } + std::string ResultStr; + if (!ObjCWrittenInterfaces.count(ClassDecl->getCanonicalDecl())) { + // we haven't seen a forward decl - generate a typedef. + RewriteOneForwardClassDecl(ClassDecl, ResultStr); + RewriteIvarOffsetSymbols(ClassDecl, ResultStr); + + RewriteObjCInternalStruct(ClassDecl, ResultStr); + // Mark this typedef as having been written into its c++ equivalent. + ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl()); + + for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(), + E = ClassDecl->prop_end(); I != E; ++I) + RewriteProperty(*I); + for (ObjCInterfaceDecl::instmeth_iterator + I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + for (ObjCInterfaceDecl::classmeth_iterator + I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + + // Lastly, comment out the @end. + ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"), + "/* @end */"); + } +} + +Stmt *RewriteModernObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) { + SourceRange OldRange = PseudoOp->getSourceRange(); + + // We just magically know some things about the structure of this + // expression. + ObjCMessageExpr *OldMsg = + cast(PseudoOp->getSemanticExpr( + PseudoOp->getNumSemanticExprs() - 1)); + + // Because the rewriter doesn't allow us to rewrite rewritten code, + // we need to suppress rewriting the sub-statements. + Expr *Base; + SmallVector Args; + { + DisableReplaceStmtScope S(*this); + + // Rebuild the base expression if we have one. + Base = 0; + if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { + Base = OldMsg->getInstanceReceiver(); + Base = cast(Base)->getSourceExpr(); + Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); + } + + unsigned numArgs = OldMsg->getNumArgs(); + for (unsigned i = 0; i < numArgs; i++) { + Expr *Arg = OldMsg->getArg(i); + if (isa(Arg)) + Arg = cast(Arg)->getSourceExpr(); + Arg = cast(RewriteFunctionBodyOrGlobalInitializer(Arg)); + Args.push_back(Arg); + } + } + + // TODO: avoid this copy. + SmallVector SelLocs; + OldMsg->getSelectorLocs(SelLocs); + + ObjCMessageExpr *NewMsg = 0; + switch (OldMsg->getReceiverKind()) { + case ObjCMessageExpr::Class: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getClassReceiverTypeInfo(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::Instance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + Base, + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::SuperClass: + case ObjCMessageExpr::SuperInstance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getSuperLoc(), + OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, + OldMsg->getSuperType(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + } + + Stmt *Replacement = SynthMessageExpr(NewMsg); + ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); + return Replacement; +} + +Stmt *RewriteModernObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) { + SourceRange OldRange = PseudoOp->getSourceRange(); + + // We just magically know some things about the structure of this + // expression. + ObjCMessageExpr *OldMsg = + cast(PseudoOp->getResultExpr()->IgnoreImplicit()); + + // Because the rewriter doesn't allow us to rewrite rewritten code, + // we need to suppress rewriting the sub-statements. + Expr *Base = 0; + SmallVector Args; + { + DisableReplaceStmtScope S(*this); + // Rebuild the base expression if we have one. + if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { + Base = OldMsg->getInstanceReceiver(); + Base = cast(Base)->getSourceExpr(); + Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); + } + unsigned numArgs = OldMsg->getNumArgs(); + for (unsigned i = 0; i < numArgs; i++) { + Expr *Arg = OldMsg->getArg(i); + if (isa(Arg)) + Arg = cast(Arg)->getSourceExpr(); + Arg = cast(RewriteFunctionBodyOrGlobalInitializer(Arg)); + Args.push_back(Arg); + } + } + + // Intentionally empty. + SmallVector SelLocs; + + ObjCMessageExpr *NewMsg = 0; + switch (OldMsg->getReceiverKind()) { + case ObjCMessageExpr::Class: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getClassReceiverTypeInfo(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::Instance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + Base, + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::SuperClass: + case ObjCMessageExpr::SuperInstance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getSuperLoc(), + OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, + OldMsg->getSuperType(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + } + + Stmt *Replacement = SynthMessageExpr(NewMsg); + ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); + return Replacement; +} + +/// SynthCountByEnumWithState - To print: +/// ((unsigned int (*) +/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) +/// (void *)objc_msgSend)((id)l_collection, +/// sel_registerName( +/// "countByEnumeratingWithState:objects:count:"), +/// &enumState, +/// (id *)__rw_items, (unsigned int)16) +/// +void RewriteModernObjC::SynthCountByEnumWithState(std::string &buf) { + buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, " + "id *, unsigned int))(void *)objc_msgSend)"; + buf += "\n\t\t"; + buf += "((id)l_collection,\n\t\t"; + buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),"; + buf += "\n\t\t"; + buf += "&enumState, " + "(id *)__rw_items, (unsigned int)16)"; +} + +/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach +/// statement to exit to its outer synthesized loop. +/// +Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) { + if (Stmts.empty() || !isa(Stmts.back())) + return S; + // replace break with goto __break_label + std::string buf; + + SourceLocation startLoc = S->getLocStart(); + buf = "goto __break_label_"; + buf += utostr(ObjCBcLabelNo.back()); + ReplaceText(startLoc, strlen("break"), buf); + + return 0; +} + +void RewriteModernObjC::ConvertSourceLocationToLineDirective( + SourceLocation Loc, + std::string &LineString) { + if (Loc.isFileID()) { + LineString += "\n#line "; + PresumedLoc PLoc = SM->getPresumedLoc(Loc); + LineString += utostr(PLoc.getLine()); + LineString += " \""; + LineString += Lexer::Stringify(PLoc.getFilename()); + LineString += "\"\n"; + } +} + +/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach +/// statement to continue with its inner synthesized loop. +/// +Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) { + if (Stmts.empty() || !isa(Stmts.back())) + return S; + // replace continue with goto __continue_label + std::string buf; + + SourceLocation startLoc = S->getLocStart(); + buf = "goto __continue_label_"; + buf += utostr(ObjCBcLabelNo.back()); + ReplaceText(startLoc, strlen("continue"), buf); + + return 0; +} + +/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement. +/// It rewrites: +/// for ( type elem in collection) { stmts; } + +/// Into: +/// { +/// type elem; +/// struct __objcFastEnumerationState enumState = { 0 }; +/// id __rw_items[16]; +/// id l_collection = (id)collection; +/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState +/// objects:__rw_items count:16]; +/// if (limit) { +/// unsigned long startMutations = *enumState.mutationsPtr; +/// do { +/// unsigned long counter = 0; +/// do { +/// if (startMutations != *enumState.mutationsPtr) +/// objc_enumerationMutation(l_collection); +/// elem = (type)enumState.itemsPtr[counter++]; +/// stmts; +/// __continue_label: ; +/// } while (counter < limit); +/// } while (limit = [l_collection countByEnumeratingWithState:&enumState +/// objects:__rw_items count:16]); +/// elem = nil; +/// __break_label: ; +/// } +/// else +/// elem = nil; +/// } +/// +Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, + SourceLocation OrigEnd) { + assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty"); + assert(isa(Stmts.back()) && + "ObjCForCollectionStmt Statement stack mismatch"); + assert(!ObjCBcLabelNo.empty() && + "ObjCForCollectionStmt - Label No stack empty"); + + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + StringRef elementName; + std::string elementTypeAsString; + std::string buf; + // line directive first. + SourceLocation ForEachLoc = S->getForLoc(); + ConvertSourceLocationToLineDirective(ForEachLoc, buf); + buf += "{\n\t"; + if (DeclStmt *DS = dyn_cast(S->getElement())) { + // type elem; + NamedDecl* D = cast(DS->getSingleDecl()); + QualType ElementType = cast(D)->getType(); + if (ElementType->isObjCQualifiedIdType() || + ElementType->isObjCQualifiedInterfaceType()) + // Simply use 'id' for all qualified types. + elementTypeAsString = "id"; + else + elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy()); + buf += elementTypeAsString; + buf += " "; + elementName = D->getName(); + buf += elementName; + buf += ";\n\t"; + } + else { + DeclRefExpr *DR = cast(S->getElement()); + elementName = DR->getDecl()->getName(); + ValueDecl *VD = cast(DR->getDecl()); + if (VD->getType()->isObjCQualifiedIdType() || + VD->getType()->isObjCQualifiedInterfaceType()) + // Simply use 'id' for all qualified types. + elementTypeAsString = "id"; + else + elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy()); + } + + // struct __objcFastEnumerationState enumState = { 0 }; + buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t"; + // id __rw_items[16]; + buf += "id __rw_items[16];\n\t"; + // id l_collection = (id) + buf += "id l_collection = (id)"; + // Find start location of 'collection' the hard way! + const char *startCollectionBuf = startBuf; + startCollectionBuf += 3; // skip 'for' + startCollectionBuf = strchr(startCollectionBuf, '('); + startCollectionBuf++; // skip '(' + // find 'in' and skip it. + while (*startCollectionBuf != ' ' || + *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' || + (*(startCollectionBuf+3) != ' ' && + *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '(')) + startCollectionBuf++; + startCollectionBuf += 3; + + // Replace: "for (type element in" with string constructed thus far. + ReplaceText(startLoc, startCollectionBuf - startBuf, buf); + // Replace ')' in for '(' type elem in collection ')' with ';' + SourceLocation rightParenLoc = S->getRParenLoc(); + const char *rparenBuf = SM->getCharacterData(rightParenLoc); + SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf); + buf = ";\n\t"; + + // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState + // objects:__rw_items count:16]; + // which is synthesized into: + // unsigned int limit = + // ((unsigned int (*) + // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) + // (void *)objc_msgSend)((id)l_collection, + // sel_registerName( + // "countByEnumeratingWithState:objects:count:"), + // (struct __objcFastEnumerationState *)&state, + // (id *)__rw_items, (unsigned int)16); + buf += "unsigned long limit =\n\t\t"; + SynthCountByEnumWithState(buf); + buf += ";\n\t"; + /// if (limit) { + /// unsigned long startMutations = *enumState.mutationsPtr; + /// do { + /// unsigned long counter = 0; + /// do { + /// if (startMutations != *enumState.mutationsPtr) + /// objc_enumerationMutation(l_collection); + /// elem = (type)enumState.itemsPtr[counter++]; + buf += "if (limit) {\n\t"; + buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t"; + buf += "do {\n\t\t"; + buf += "unsigned long counter = 0;\n\t\t"; + buf += "do {\n\t\t\t"; + buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t"; + buf += "objc_enumerationMutation(l_collection);\n\t\t\t"; + buf += elementName; + buf += " = ("; + buf += elementTypeAsString; + buf += ")enumState.itemsPtr[counter++];"; + // Replace ')' in for '(' type elem in collection ')' with all of these. + ReplaceText(lparenLoc, 1, buf); + + /// __continue_label: ; + /// } while (counter < limit); + /// } while (limit = [l_collection countByEnumeratingWithState:&enumState + /// objects:__rw_items count:16]); + /// elem = nil; + /// __break_label: ; + /// } + /// else + /// elem = nil; + /// } + /// + buf = ";\n\t"; + buf += "__continue_label_"; + buf += utostr(ObjCBcLabelNo.back()); + buf += ": ;"; + buf += "\n\t\t"; + buf += "} while (counter < limit);\n\t"; + buf += "} while (limit = "; + SynthCountByEnumWithState(buf); + buf += ");\n\t"; + buf += elementName; + buf += " = (("; + buf += elementTypeAsString; + buf += ")0);\n\t"; + buf += "__break_label_"; + buf += utostr(ObjCBcLabelNo.back()); + buf += ": ;\n\t"; + buf += "}\n\t"; + buf += "else\n\t\t"; + buf += elementName; + buf += " = (("; + buf += elementTypeAsString; + buf += ")0);\n\t"; + buf += "}\n"; + + // Insert all these *after* the statement body. + // FIXME: If this should support Obj-C++, support CXXTryStmt + if (isa(S->getBody())) { + SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1); + InsertText(endBodyLoc, buf); + } else { + /* Need to treat single statements specially. For example: + * + * for (A *a in b) if (stuff()) break; + * for (A *a in b) xxxyy; + * + * The following code simply scans ahead to the semi to find the actual end. + */ + const char *stmtBuf = SM->getCharacterData(OrigEnd); + const char *semiBuf = strchr(stmtBuf, ';'); + assert(semiBuf && "Can't find ';'"); + SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1); + InsertText(endBodyLoc, buf); + } + Stmts.pop_back(); + ObjCBcLabelNo.pop_back(); + return 0; +} + +static void Write_RethrowObject(std::string &buf) { + buf += "{ struct _FIN { _FIN(id reth) : rethrow(reth) {}\n"; + buf += "\t~_FIN() { if (rethrow) objc_exception_throw(rethrow); }\n"; + buf += "\tid rethrow;\n"; + buf += "\t} _fin_force_rethow(_rethrow);"; +} + +/// RewriteObjCSynchronizedStmt - +/// This routine rewrites @synchronized(expr) stmt; +/// into: +/// objc_sync_enter(expr); +/// @try stmt @finally { objc_sync_exit(expr); } +/// +Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) { + // Get the start location and compute the semi location. + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @synchronized location"); + + std::string buf; + SourceLocation SynchLoc = S->getAtSynchronizedLoc(); + ConvertSourceLocationToLineDirective(SynchLoc, buf); + buf += "{ id _rethrow = 0; id _sync_obj = "; + + const char *lparenBuf = startBuf; + while (*lparenBuf != '(') lparenBuf++; + ReplaceText(startLoc, lparenBuf-startBuf+1, buf); + + buf = "; objc_sync_enter(_sync_obj);\n"; + buf += "try {\n\tstruct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}"; + buf += "\n\t~_SYNC_EXIT() {objc_sync_exit(sync_exit);}"; + buf += "\n\tid sync_exit;"; + buf += "\n\t} _sync_exit(_sync_obj);\n"; + + // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since + // the sync expression is typically a message expression that's already + // been rewritten! (which implies the SourceLocation's are invalid). + SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart(); + const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc); + while (*RParenExprLocBuf != ')') RParenExprLocBuf--; + RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf); + + SourceLocation LBranceLoc = S->getSynchBody()->getLocStart(); + const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc); + assert (*LBraceLocBuf == '{'); + ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf); + + SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd(); + assert((*SM->getCharacterData(startRBraceLoc) == '}') && + "bogus @synchronized block"); + + buf = "} catch (id e) {_rethrow = e;}\n"; + Write_RethrowObject(buf); + buf += "}\n"; + buf += "}\n"; + + ReplaceText(startRBraceLoc, 1, buf); + + return 0; +} + +void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S) +{ + // Perform a bottom up traversal of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) + WarnAboutReturnGotoStmts(*CI); + + if (isa(S) || isa(S)) { + Diags.Report(Context->getFullLoc(S->getLocStart()), + TryFinallyContainsReturnDiag); + } + return; +} + +Stmt *RewriteModernObjC::RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) { + SourceLocation startLoc = S->getAtLoc(); + ReplaceText(startLoc, strlen("@autoreleasepool"), "/* @autoreleasepool */"); + ReplaceText(S->getSubStmt()->getLocStart(), 1, + "{ __AtAutoreleasePool __autoreleasepool; "); + + return 0; +} + +Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) { + ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt(); + bool noCatch = S->getNumCatchStmts() == 0; + std::string buf; + SourceLocation TryLocation = S->getAtTryLoc(); + ConvertSourceLocationToLineDirective(TryLocation, buf); + + if (finalStmt) { + if (noCatch) + buf += "{ id volatile _rethrow = 0;\n"; + else { + buf += "{ id volatile _rethrow = 0;\ntry {\n"; + } + } + // Get the start location and compute the semi location. + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @try location"); + if (finalStmt) + ReplaceText(startLoc, 1, buf); + else + // @try -> try + ReplaceText(startLoc, 1, ""); + + for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { + ObjCAtCatchStmt *Catch = S->getCatchStmt(I); + VarDecl *catchDecl = Catch->getCatchParamDecl(); + + startLoc = Catch->getLocStart(); + bool AtRemoved = false; + if (catchDecl) { + QualType t = catchDecl->getType(); + if (const ObjCObjectPointerType *Ptr = t->getAs()) { + // Should be a pointer to a class. + ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface(); + if (IDecl) { + std::string Result; + ConvertSourceLocationToLineDirective(Catch->getLocStart(), Result); + + startBuf = SM->getCharacterData(startLoc); + assert((*startBuf == '@') && "bogus @catch location"); + SourceLocation rParenLoc = Catch->getRParenLoc(); + const char *rParenBuf = SM->getCharacterData(rParenLoc); + + // _objc_exc_Foo *_e as argument to catch. + Result += "catch (_objc_exc_"; Result += IDecl->getNameAsString(); + Result += " *_"; Result += catchDecl->getNameAsString(); + Result += ")"; + ReplaceText(startLoc, rParenBuf-startBuf+1, Result); + // Foo *e = (Foo *)_e; + Result.clear(); + Result = "{ "; + Result += IDecl->getNameAsString(); + Result += " *"; Result += catchDecl->getNameAsString(); + Result += " = ("; Result += IDecl->getNameAsString(); Result += "*)"; + Result += "_"; Result += catchDecl->getNameAsString(); + + Result += "; "; + SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart(); + ReplaceText(lBraceLoc, 1, Result); + AtRemoved = true; + } + } + } + if (!AtRemoved) + // @catch -> catch + ReplaceText(startLoc, 1, ""); + + } + if (finalStmt) { + buf.clear(); + SourceLocation FinallyLoc = finalStmt->getLocStart(); + + if (noCatch) { + ConvertSourceLocationToLineDirective(FinallyLoc, buf); + buf += "catch (id e) {_rethrow = e;}\n"; + } + else { + buf += "}\n"; + ConvertSourceLocationToLineDirective(FinallyLoc, buf); + buf += "catch (id e) {_rethrow = e;}\n"; + } + + SourceLocation startFinalLoc = finalStmt->getLocStart(); + ReplaceText(startFinalLoc, 8, buf); + Stmt *body = finalStmt->getFinallyBody(); + SourceLocation startFinalBodyLoc = body->getLocStart(); + buf.clear(); + Write_RethrowObject(buf); + ReplaceText(startFinalBodyLoc, 1, buf); + + SourceLocation endFinalBodyLoc = body->getLocEnd(); + ReplaceText(endFinalBodyLoc, 1, "}\n}"); + // Now check for any return/continue/go statements within the @try. + WarnAboutReturnGotoStmts(S->getTryBody()); + } + + return 0; +} + +// This can't be done with ReplaceStmt(S, ThrowExpr), since +// the throw expression is typically a message expression that's already +// been rewritten! (which implies the SourceLocation's are invalid). +Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) { + // Get the start location and compute the semi location. + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @throw location"); + + std::string buf; + /* void objc_exception_throw(id) __attribute__((noreturn)); */ + if (S->getThrowExpr()) + buf = "objc_exception_throw("; + else + buf = "throw"; + + // handle "@ throw" correctly. + const char *wBuf = strchr(startBuf, 'w'); + assert((*wBuf == 'w') && "@throw: can't find 'w'"); + ReplaceText(startLoc, wBuf-startBuf+1, buf); + + const char *semiBuf = strchr(startBuf, ';'); + assert((*semiBuf == ';') && "@throw: can't find ';'"); + SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf); + if (S->getThrowExpr()) + ReplaceText(semiLoc, 1, ");"); + return 0; +} + +Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) { + // Create a new string expression. + QualType StrType = Context->getPointerType(Context->CharTy); + std::string StrEncoding; + Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding); + Expr *Replacement = StringLiteral::Create(*Context, StrEncoding, + StringLiteral::Ascii, false, + StrType, SourceLocation()); + ReplaceStmt(Exp, Replacement); + + // Replace this subexpr in the parent. + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return Replacement; +} + +Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) { + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl"); + // Create a call to sel_registerName("selName"). + SmallVector SelExprs; + QualType argType = Context->getPointerType(Context->CharTy); + SelExprs.push_back(StringLiteral::Create(*Context, + Exp->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size()); + ReplaceStmt(Exp, SelExp); + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return SelExp; +} + +CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl( + FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc, + SourceLocation EndLoc) { + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = FD->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = + new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation()); + + // Now, we cast the reference to a pointer to the objc_msgSend type. + QualType pToFunc = Context->getPointerType(msgSendType); + ImplicitCastExpr *ICE = + ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay, + DRE, 0, VK_RValue); + + const FunctionType *FT = msgSendType->getAs(); + + CallExpr *Exp = + new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs), + FT->getCallResultType(*Context), + VK_RValue, EndLoc); + return Exp; +} + +static bool scanForProtocolRefs(const char *startBuf, const char *endBuf, + const char *&startRef, const char *&endRef) { + while (startBuf < endBuf) { + if (*startBuf == '<') + startRef = startBuf; // mark the start. + if (*startBuf == '>') { + if (startRef && *startRef == '<') { + endRef = startBuf; // mark the end. + return true; + } + return false; + } + startBuf++; + } + return false; +} + +static void scanToNextArgument(const char *&argRef) { + int angle = 0; + while (*argRef != ')' && (*argRef != ',' || angle > 0)) { + if (*argRef == '<') + angle++; + else if (*argRef == '>') + angle--; + argRef++; + } + assert(angle == 0 && "scanToNextArgument - bad protocol type syntax"); +} + +bool RewriteModernObjC::needToScanForQualifiers(QualType T) { + if (T->isObjCQualifiedIdType()) + return true; + if (const PointerType *PT = T->getAs()) { + if (PT->getPointeeType()->isObjCQualifiedIdType()) + return true; + } + if (T->isObjCObjectPointerType()) { + T = T->getPointeeType(); + return T->isObjCQualifiedInterfaceType(); + } + if (T->isArrayType()) { + QualType ElemTy = Context->getBaseElementType(T); + return needToScanForQualifiers(ElemTy); + } + return false; +} + +void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) { + QualType Type = E->getType(); + if (needToScanForQualifiers(Type)) { + SourceLocation Loc, EndLoc; + + if (const CStyleCastExpr *ECE = dyn_cast(E)) { + Loc = ECE->getLParenLoc(); + EndLoc = ECE->getRParenLoc(); + } else { + Loc = E->getLocStart(); + EndLoc = E->getLocEnd(); + } + // This will defend against trying to rewrite synthesized expressions. + if (Loc.isInvalid() || EndLoc.isInvalid()) + return; + + const char *startBuf = SM->getCharacterData(Loc); + const char *endBuf = SM->getCharacterData(EndLoc); + const char *startRef = 0, *endRef = 0; + if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { + // Get the locations of the startRef, endRef. + SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf); + SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1); + // Comment out the protocol references. + InsertText(LessLoc, "/*"); + InsertText(GreaterLoc, "*/"); + } + } +} + +void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) { + SourceLocation Loc; + QualType Type; + const FunctionProtoType *proto = 0; + if (VarDecl *VD = dyn_cast(Dcl)) { + Loc = VD->getLocation(); + Type = VD->getType(); + } + else if (FunctionDecl *FD = dyn_cast(Dcl)) { + Loc = FD->getLocation(); + // Check for ObjC 'id' and class types that have been adorned with protocol + // information (id

    , C

    *). The protocol references need to be rewritten! + const FunctionType *funcType = FD->getType()->getAs(); + assert(funcType && "missing function type"); + proto = dyn_cast(funcType); + if (!proto) + return; + Type = proto->getResultType(); + } + else if (FieldDecl *FD = dyn_cast(Dcl)) { + Loc = FD->getLocation(); + Type = FD->getType(); + } + else + return; + + if (needToScanForQualifiers(Type)) { + // Since types are unique, we need to scan the buffer. + + const char *endBuf = SM->getCharacterData(Loc); + const char *startBuf = endBuf; + while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart) + startBuf--; // scan backward (from the decl location) for return type. + const char *startRef = 0, *endRef = 0; + if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { + // Get the locations of the startRef, endRef. + SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf); + SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1); + // Comment out the protocol references. + InsertText(LessLoc, "/*"); + InsertText(GreaterLoc, "*/"); + } + } + if (!proto) + return; // most likely, was a variable + // Now check arguments. + const char *startBuf = SM->getCharacterData(Loc); + const char *startFuncBuf = startBuf; + for (unsigned i = 0; i < proto->getNumArgs(); i++) { + if (needToScanForQualifiers(proto->getArgType(i))) { + // Since types are unique, we need to scan the buffer. + + const char *endBuf = startBuf; + // scan forward (from the decl location) for argument types. + scanToNextArgument(endBuf); + const char *startRef = 0, *endRef = 0; + if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { + // Get the locations of the startRef, endRef. + SourceLocation LessLoc = + Loc.getLocWithOffset(startRef-startFuncBuf); + SourceLocation GreaterLoc = + Loc.getLocWithOffset(endRef-startFuncBuf+1); + // Comment out the protocol references. + InsertText(LessLoc, "/*"); + InsertText(GreaterLoc, "*/"); + } + startBuf = ++endBuf; + } + else { + // If the function name is derived from a macro expansion, then the + // argument buffer will not follow the name. Need to speak with Chris. + while (*startBuf && *startBuf != ')' && *startBuf != ',') + startBuf++; // scan forward (from the decl location) for argument types. + startBuf++; + } + } +} + +void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) { + QualType QT = ND->getType(); + const Type* TypePtr = QT->getAs(); + if (!isa(TypePtr)) + return; + while (isa(TypePtr)) { + const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); + QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); + TypePtr = QT->getAs(); + } + // FIXME. This will not work for multiple declarators; as in: + // __typeof__(a) b,c,d; + std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy())); + SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); + const char *startBuf = SM->getCharacterData(DeclLoc); + if (ND->getInit()) { + std::string Name(ND->getNameAsString()); + TypeAsString += " " + Name + " = "; + Expr *E = ND->getInit(); + SourceLocation startLoc; + if (const CStyleCastExpr *ECE = dyn_cast(E)) + startLoc = ECE->getLParenLoc(); + else + startLoc = E->getLocStart(); + startLoc = SM->getExpansionLoc(startLoc); + const char *endBuf = SM->getCharacterData(startLoc); + ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); + } + else { + SourceLocation X = ND->getLocEnd(); + X = SM->getExpansionLoc(X); + const char *endBuf = SM->getCharacterData(X); + ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); + } +} + +// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str); +void RewriteModernObjC::SynthSelGetUidFunctionDecl() { + IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName"); + SmallVector ArgTys; + ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); + QualType getFuncType = + getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size()); + SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + SelGetUidIdent, getFuncType, 0, + SC_Extern, + SC_None, false); +} + +void RewriteModernObjC::RewriteFunctionDecl(FunctionDecl *FD) { + // declared in + if (FD->getIdentifier() && + FD->getName() == "sel_registerName") { + SelGetUidFunctionDecl = FD; + return; + } + RewriteObjCQualifiedInterfaceTypes(FD); +} + +void RewriteModernObjC::RewriteBlockPointerType(std::string& Str, QualType Type) { + std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); + const char *argPtr = TypeString.c_str(); + if (!strchr(argPtr, '^')) { + Str += TypeString; + return; + } + while (*argPtr) { + Str += (*argPtr == '^' ? '*' : *argPtr); + argPtr++; + } +} + +// FIXME. Consolidate this routine with RewriteBlockPointerType. +void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str, + ValueDecl *VD) { + QualType Type = VD->getType(); + std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); + const char *argPtr = TypeString.c_str(); + int paren = 0; + while (*argPtr) { + switch (*argPtr) { + case '(': + Str += *argPtr; + paren++; + break; + case ')': + Str += *argPtr; + paren--; + break; + case '^': + Str += '*'; + if (paren == 1) + Str += VD->getNameAsString(); + break; + default: + Str += *argPtr; + break; + } + argPtr++; + } +} + +void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) { + SourceLocation FunLocStart = FD->getTypeSpecStartLoc(); + const FunctionType *funcType = FD->getType()->getAs(); + const FunctionProtoType *proto = dyn_cast(funcType); + if (!proto) + return; + QualType Type = proto->getResultType(); + std::string FdStr = Type.getAsString(Context->getPrintingPolicy()); + FdStr += " "; + FdStr += FD->getName(); + FdStr += "("; + unsigned numArgs = proto->getNumArgs(); + for (unsigned i = 0; i < numArgs; i++) { + QualType ArgType = proto->getArgType(i); + RewriteBlockPointerType(FdStr, ArgType); + if (i+1 < numArgs) + FdStr += ", "; + } + if (FD->isVariadic()) { + FdStr += (numArgs > 0) ? ", ...);\n" : "...);\n"; + } + else + FdStr += ");\n"; + InsertText(FunLocStart, FdStr); +} + +// SynthSuperContructorFunctionDecl - id __rw_objc_super(id obj, id super); +void RewriteModernObjC::SynthSuperContructorFunctionDecl() { + if (SuperContructorFunctionDecl) + return; + IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size()); + SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...); +void RewriteModernObjC::SynthMsgSendFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(void); +void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper"); + SmallVector ArgTys; + ArgTys.push_back(Context->VoidTy); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], 1, + true /*isVariadic*/); + MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...); +void RewriteModernObjC::SynthMsgSendStretFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendSuperStretFunctionDecl - +// id objc_msgSendSuper_stret(void); +void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() { + IdentifierInfo *msgSendIdent = + &Context->Idents.get("objc_msgSendSuper_stret"); + SmallVector ArgTys; + ArgTys.push_back(Context->VoidTy); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], 1, + true /*isVariadic*/); + MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...); +void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->DoubleTy, + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthGetClassFunctionDecl - Class objc_getClass(const char *name); +void RewriteModernObjC::SynthGetClassFunctionDecl() { + IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass"); + SmallVector ArgTys; + ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); + QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), + &ArgTys[0], ArgTys.size()); + GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + getClassIdent, getClassType, 0, + SC_Extern, + SC_None, false); +} + +// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls); +void RewriteModernObjC::SynthGetSuperClassFunctionDecl() { + IdentifierInfo *getSuperClassIdent = + &Context->Idents.get("class_getSuperclass"); + SmallVector ArgTys; + ArgTys.push_back(Context->getObjCClassType()); + QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), + &ArgTys[0], ArgTys.size()); + GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + getSuperClassIdent, + getClassType, 0, + SC_Extern, + SC_None, + false); +} + +// SynthGetMetaClassFunctionDecl - Class objc_getMetaClass(const char *name); +void RewriteModernObjC::SynthGetMetaClassFunctionDecl() { + IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass"); + SmallVector ArgTys; + ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); + QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), + &ArgTys[0], ArgTys.size()); + GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + getClassIdent, getClassType, 0, + SC_Extern, + SC_None, false); +} + +Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) { + QualType strType = getConstantStringStructType(); + + std::string S = "__NSConstantStringImpl_"; + + std::string tmpName = InFileName; + unsigned i; + for (i=0; i < tmpName.length(); i++) { + char c = tmpName.at(i); + // replace any non alphanumeric characters with '_'. + if (!isalpha(c) && (c < '0' || c > '9')) + tmpName[i] = '_'; + } + S += tmpName; + S += "_"; + S += utostr(NumObjCStringLiterals++); + + Preamble += "static __NSConstantStringImpl " + S; + Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,"; + Preamble += "0x000007c8,"; // utf8_str + // The pretty printer for StringLiteral handles escape characters properly. + std::string prettyBufS; + llvm::raw_string_ostream prettyBuf(prettyBufS); + Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts)); + Preamble += prettyBuf.str(); + Preamble += ","; + Preamble += utostr(Exp->getString()->getByteLength()) + "};\n"; + + VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), &Context->Idents.get(S), + strType, 0, SC_Static, SC_None); + DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue, + SourceLocation()); + Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf, + Context->getPointerType(DRE->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + // cast to NSConstantString * + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(), + CK_CPointerToObjCPointerCast, Unop); + ReplaceStmt(Exp, cast); + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return cast; +} + +Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) { + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + + Expr *FlagExp = IntegerLiteral::Create(*Context, + llvm::APInt(IntSize, Exp->getValue()), + Context->IntTy, Exp->getLocation()); + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Context->ObjCBuiltinBoolTy, + CK_BitCast, FlagExp); + ParenExpr *PE = new (Context) ParenExpr(Exp->getLocation(), Exp->getExprLoc(), + cast); + ReplaceStmt(Exp, PE); + return PE; +} + +Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) { + // synthesize declaration of helper functions needed in this routine. + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + // use objc_msgSend() for all. + if (!MsgSendFunctionDecl) + SynthMsgSendFunctionDecl(); + if (!GetClassFunctionDecl) + SynthGetClassFunctionDecl(); + + FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; + SourceLocation StartLoc = Exp->getLocStart(); + SourceLocation EndLoc = Exp->getLocEnd(); + + // Synthesize a call to objc_msgSend(). + SmallVector MsgExprs; + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + + // Create a call to objc_getClass(""). It will be the 1st argument. + ObjCMethodDecl *BoxingMethod = Exp->getBoxingMethod(); + ObjCInterfaceDecl *BoxingClass = BoxingMethod->getClassInterface(); + + IdentifierInfo *clsName = BoxingClass->getIdentifier(); + ClsExprs.push_back(StringLiteral::Create(*Context, + clsName->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(Cls); + + // Create a call to sel_registerName(":"), etc. + // it will be the 2nd argument. + SmallVector SelExprs; + SelExprs.push_back(StringLiteral::Create(*Context, + BoxingMethod->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(SelExp); + + // User provided sub-expression is the 3rd, and last, argument. + Expr *subExpr = Exp->getSubExpr(); + if (ImplicitCastExpr *ICE = dyn_cast(subExpr)) { + QualType type = ICE->getType(); + const Expr *SubExpr = ICE->IgnoreParenImpCasts(); + CastKind CK = CK_BitCast; + if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType()) + CK = CK_IntegralToBoolean; + subExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, subExpr); + } + MsgExprs.push_back(subExpr); + + SmallVector ArgTypes; + ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCSelType()); + for (ObjCMethodDecl::param_iterator PI = BoxingMethod->param_begin(), + E = BoxingMethod->param_end(); PI != E; ++PI) + ArgTypes.push_back((*PI)->getType()); + + QualType returnType = Exp->getType(); + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = MsgSendFlavor->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, + VK_LValue, SourceLocation()); + + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->VoidTy), + CK_BitCast, DRE); + + // Now do the "normal" pointer to function cast. + QualType castType = + getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + BoxingMethod->isVariadic()); + castType = Context->getPointerType(castType); + cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, + cast); + + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); + + const FunctionType *FT = msgSendType->getAs(); + CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs, + FT->getResultType(), VK_RValue, + EndLoc); + ReplaceStmt(Exp, CE); + return CE; +} + +Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) { + // synthesize declaration of helper functions needed in this routine. + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + // use objc_msgSend() for all. + if (!MsgSendFunctionDecl) + SynthMsgSendFunctionDecl(); + if (!GetClassFunctionDecl) + SynthGetClassFunctionDecl(); + + FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; + SourceLocation StartLoc = Exp->getLocStart(); + SourceLocation EndLoc = Exp->getLocEnd(); + + // Build the expression: __NSContainer_literal(int, ...).arr + QualType IntQT = Context->IntTy; + QualType NSArrayFType = + getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true); + std::string NSArrayFName("__NSContainer_literal"); + FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName); + DeclRefExpr *NSArrayDRE = + new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue, + SourceLocation()); + + SmallVector InitExprs; + unsigned NumElements = Exp->getNumElements(); + unsigned UnsignedIntSize = + static_cast(Context->getTypeSize(Context->UnsignedIntTy)); + Expr *count = IntegerLiteral::Create(*Context, + llvm::APInt(UnsignedIntSize, NumElements), + Context->UnsignedIntTy, SourceLocation()); + InitExprs.push_back(count); + for (unsigned i = 0; i < NumElements; i++) + InitExprs.push_back(Exp->getElement(i)); + Expr *NSArrayCallExpr = + new (Context) CallExpr(*Context, NSArrayDRE, InitExprs, + NSArrayFType, VK_LValue, SourceLocation()); + + FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("arr"), + Context->getPointerType(Context->VoidPtrTy), 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ArrayLiteralME = + new (Context) MemberExpr(NSArrayCallExpr, false, ARRFD, + SourceLocation(), + ARRFD->getType(), VK_LValue, + OK_Ordinary); + QualType ConstIdT = Context->getObjCIdType().withConst(); + CStyleCastExpr * ArrayLiteralObjects = + NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(ConstIdT), + CK_BitCast, + ArrayLiteralME); + + // Synthesize a call to objc_msgSend(). + SmallVector MsgExprs; + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + QualType expType = Exp->getType(); + + // Create a call to objc_getClass("NSArray"). It will be th 1st argument. + ObjCInterfaceDecl *Class = + expType->getPointeeType()->getAs()->getInterface(); + + IdentifierInfo *clsName = Class->getIdentifier(); + ClsExprs.push_back(StringLiteral::Create(*Context, + clsName->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(Cls); + + // Create a call to sel_registerName("arrayWithObjects:count:"). + // it will be the 2nd argument. + SmallVector SelExprs; + ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod(); + SelExprs.push_back(StringLiteral::Create(*Context, + ArrayMethod->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(SelExp); + + // (const id [])objects + MsgExprs.push_back(ArrayLiteralObjects); + + // (NSUInteger)cnt + Expr *cnt = IntegerLiteral::Create(*Context, + llvm::APInt(UnsignedIntSize, NumElements), + Context->UnsignedIntTy, SourceLocation()); + MsgExprs.push_back(cnt); + + + SmallVector ArgTypes; + ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCSelType()); + for (ObjCMethodDecl::param_iterator PI = ArrayMethod->param_begin(), + E = ArrayMethod->param_end(); PI != E; ++PI) + ArgTypes.push_back((*PI)->getType()); + + QualType returnType = Exp->getType(); + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = MsgSendFlavor->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, + VK_LValue, SourceLocation()); + + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->VoidTy), + CK_BitCast, DRE); + + // Now do the "normal" pointer to function cast. + QualType castType = + getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + ArrayMethod->isVariadic()); + castType = Context->getPointerType(castType); + cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, + cast); + + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); + + const FunctionType *FT = msgSendType->getAs(); + CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs, + FT->getResultType(), VK_RValue, + EndLoc); + ReplaceStmt(Exp, CE); + return CE; +} + +Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp) { + // synthesize declaration of helper functions needed in this routine. + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + // use objc_msgSend() for all. + if (!MsgSendFunctionDecl) + SynthMsgSendFunctionDecl(); + if (!GetClassFunctionDecl) + SynthGetClassFunctionDecl(); + + FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; + SourceLocation StartLoc = Exp->getLocStart(); + SourceLocation EndLoc = Exp->getLocEnd(); + + // Build the expression: __NSContainer_literal(int, ...).arr + QualType IntQT = Context->IntTy; + QualType NSDictFType = + getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true); + std::string NSDictFName("__NSContainer_literal"); + FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName); + DeclRefExpr *NSDictDRE = + new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue, + SourceLocation()); + + SmallVector KeyExprs; + SmallVector ValueExprs; + + unsigned NumElements = Exp->getNumElements(); + unsigned UnsignedIntSize = + static_cast(Context->getTypeSize(Context->UnsignedIntTy)); + Expr *count = IntegerLiteral::Create(*Context, + llvm::APInt(UnsignedIntSize, NumElements), + Context->UnsignedIntTy, SourceLocation()); + KeyExprs.push_back(count); + ValueExprs.push_back(count); + for (unsigned i = 0; i < NumElements; i++) { + ObjCDictionaryElement Element = Exp->getKeyValueElement(i); + KeyExprs.push_back(Element.Key); + ValueExprs.push_back(Element.Value); + } + + // (const id [])objects + Expr *NSValueCallExpr = + new (Context) CallExpr(*Context, NSDictDRE, ValueExprs, + NSDictFType, VK_LValue, SourceLocation()); + + FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("arr"), + Context->getPointerType(Context->VoidPtrTy), 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *DictLiteralValueME = + new (Context) MemberExpr(NSValueCallExpr, false, ARRFD, + SourceLocation(), + ARRFD->getType(), VK_LValue, + OK_Ordinary); + QualType ConstIdT = Context->getObjCIdType().withConst(); + CStyleCastExpr * DictValueObjects = + NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(ConstIdT), + CK_BitCast, + DictLiteralValueME); + // (const id [])keys + Expr *NSKeyCallExpr = + new (Context) CallExpr(*Context, NSDictDRE, KeyExprs, + NSDictFType, VK_LValue, SourceLocation()); + + MemberExpr *DictLiteralKeyME = + new (Context) MemberExpr(NSKeyCallExpr, false, ARRFD, + SourceLocation(), + ARRFD->getType(), VK_LValue, + OK_Ordinary); + + CStyleCastExpr * DictKeyObjects = + NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(ConstIdT), + CK_BitCast, + DictLiteralKeyME); + + + + // Synthesize a call to objc_msgSend(). + SmallVector MsgExprs; + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + QualType expType = Exp->getType(); + + // Create a call to objc_getClass("NSArray"). It will be th 1st argument. + ObjCInterfaceDecl *Class = + expType->getPointeeType()->getAs()->getInterface(); + + IdentifierInfo *clsName = Class->getIdentifier(); + ClsExprs.push_back(StringLiteral::Create(*Context, + clsName->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(Cls); + + // Create a call to sel_registerName("arrayWithObjects:count:"). + // it will be the 2nd argument. + SmallVector SelExprs; + ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod(); + SelExprs.push_back(StringLiteral::Create(*Context, + DictMethod->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(SelExp); + + // (const id [])objects + MsgExprs.push_back(DictValueObjects); + + // (const id [])keys + MsgExprs.push_back(DictKeyObjects); + + // (NSUInteger)cnt + Expr *cnt = IntegerLiteral::Create(*Context, + llvm::APInt(UnsignedIntSize, NumElements), + Context->UnsignedIntTy, SourceLocation()); + MsgExprs.push_back(cnt); + + + SmallVector ArgTypes; + ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCSelType()); + for (ObjCMethodDecl::param_iterator PI = DictMethod->param_begin(), + E = DictMethod->param_end(); PI != E; ++PI) { + QualType T = (*PI)->getType(); + if (const PointerType* PT = T->getAs()) { + QualType PointeeTy = PT->getPointeeType(); + convertToUnqualifiedObjCType(PointeeTy); + T = Context->getPointerType(PointeeTy); + } + ArgTypes.push_back(T); + } + + QualType returnType = Exp->getType(); + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = MsgSendFlavor->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, + VK_LValue, SourceLocation()); + + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->VoidTy), + CK_BitCast, DRE); + + // Now do the "normal" pointer to function cast. + QualType castType = + getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + DictMethod->isVariadic()); + castType = Context->getPointerType(castType); + cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, + cast); + + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); + + const FunctionType *FT = msgSendType->getAs(); + CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs, + FT->getResultType(), VK_RValue, + EndLoc); + ReplaceStmt(Exp, CE); + return CE; +} + +// struct __rw_objc_super { +// struct objc_object *object; struct objc_object *superClass; +// }; +QualType RewriteModernObjC::getSuperStructType() { + if (!SuperStructDecl) { + SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("__rw_objc_super")); + QualType FieldTypes[2]; + + // struct objc_object *object; + FieldTypes[0] = Context->getObjCIdType(); + // struct objc_object *superClass; + FieldTypes[1] = Context->getObjCIdType(); + + // Create fields + for (unsigned i = 0; i < 2; ++i) { + SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl, + SourceLocation(), + SourceLocation(), 0, + FieldTypes[i], 0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit)); + } + + SuperStructDecl->completeDefinition(); + } + return Context->getTagDeclType(SuperStructDecl); +} + +QualType RewriteModernObjC::getConstantStringStructType() { + if (!ConstantStringDecl) { + ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("__NSConstantStringImpl")); + QualType FieldTypes[4]; + + // struct objc_object *receiver; + FieldTypes[0] = Context->getObjCIdType(); + // int flags; + FieldTypes[1] = Context->IntTy; + // char *str; + FieldTypes[2] = Context->getPointerType(Context->CharTy); + // long length; + FieldTypes[3] = Context->LongTy; + + // Create fields + for (unsigned i = 0; i < 4; ++i) { + ConstantStringDecl->addDecl(FieldDecl::Create(*Context, + ConstantStringDecl, + SourceLocation(), + SourceLocation(), 0, + FieldTypes[i], 0, + /*BitWidth=*/0, + /*Mutable=*/true, + ICIS_NoInit)); + } + + ConstantStringDecl->completeDefinition(); + } + return Context->getTagDeclType(ConstantStringDecl); +} + +/// getFunctionSourceLocation - returns start location of a function +/// definition. Complication arises when function has declared as +/// extern "C" or extern "C" {...} +static SourceLocation getFunctionSourceLocation (RewriteModernObjC &R, + FunctionDecl *FD) { + if (FD->isExternC() && !FD->isMain()) { + const DeclContext *DC = FD->getDeclContext(); + if (const LinkageSpecDecl *LSD = dyn_cast(DC)) + // if it is extern "C" {...}, return function decl's own location. + if (!LSD->getRBraceLoc().isValid()) + return LSD->getExternLoc(); + } + if (FD->getStorageClassAsWritten() != SC_None) + R.RewriteBlockLiteralFunctionDecl(FD); + return FD->getTypeSpecStartLoc(); +} + +void RewriteModernObjC::RewriteLineDirective(const Decl *D) { + + SourceLocation Location = D->getLocation(); + + if (Location.isFileID()) { + std::string LineString("\n#line "); + PresumedLoc PLoc = SM->getPresumedLoc(Location); + LineString += utostr(PLoc.getLine()); + LineString += " \""; + LineString += Lexer::Stringify(PLoc.getFilename()); + if (isa(D)) + LineString += "\""; + else LineString += "\"\n"; + + Location = D->getLocStart(); + if (const FunctionDecl *FD = dyn_cast(D)) { + if (FD->isExternC() && !FD->isMain()) { + const DeclContext *DC = FD->getDeclContext(); + if (const LinkageSpecDecl *LSD = dyn_cast(DC)) + // if it is extern "C" {...}, return function decl's own location. + if (!LSD->getRBraceLoc().isValid()) + Location = LSD->getExternLoc(); + } + } + InsertText(Location, LineString); + } +} + +/// SynthMsgSendStretCallExpr - This routine translates message expression +/// into a call to objc_msgSend_stret() entry point. Tricky part is that +/// nil check on receiver must be performed before calling objc_msgSend_stret. +/// MsgSendStretFlavor - function declaration objc_msgSend_stret(...) +/// msgSendType - function type of objc_msgSend_stret(...) +/// returnType - Result type of the method being synthesized. +/// ArgTypes - type of the arguments passed to objc_msgSend_stret, starting with receiver type. +/// MsgExprs - list of argument expressions being passed to objc_msgSend_stret, +/// starting with receiver. +/// Method - Method being rewritten. +Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, + QualType msgSendType, + QualType returnType, + SmallVectorImpl &ArgTypes, + SmallVectorImpl &MsgExprs, + ObjCMethodDecl *Method) { + // Now do the "normal" pointer to function cast. + QualType castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + Method ? Method->isVariadic() : false); + castType = Context->getPointerType(castType); + + // build type for containing the objc_msgSend_stret object. + static unsigned stretCount=0; + std::string name = "__Stret"; name += utostr(stretCount); + std::string str = + "extern \"C\" void * __cdecl memset(void *_Dst, int _Val, size_t _Size);\n"; + str += "struct "; str += name; + str += " {\n\t"; + str += name; + str += "(id receiver, SEL sel"; + for (unsigned i = 2; i < ArgTypes.size(); i++) { + std::string ArgName = "arg"; ArgName += utostr(i); + ArgTypes[i].getAsStringInternal(ArgName, Context->getPrintingPolicy()); + str += ", "; str += ArgName; + } + // could be vararg. + for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) { + std::string ArgName = "arg"; ArgName += utostr(i); + MsgExprs[i]->getType().getAsStringInternal(ArgName, + Context->getPrintingPolicy()); + str += ", "; str += ArgName; + } + + str += ") {\n"; + str += "\t if (receiver == 0)\n"; + str += "\t memset((void*)&s, 0, sizeof(s));\n"; + str += "\t else\n"; + str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy()); + str += ")(void *)objc_msgSend_stret)(receiver, sel"; + for (unsigned i = 2; i < ArgTypes.size(); i++) { + str += ", arg"; str += utostr(i); + } + // could be vararg. + for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) { + str += ", arg"; str += utostr(i); + } + + str += ");\n"; + str += "\t}\n"; + str += "\t"; str += returnType.getAsString(Context->getPrintingPolicy()); + str += " s;\n"; + str += "};\n\n"; + SourceLocation FunLocStart; + if (CurFunctionDef) + FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef); + else { + assert(CurMethodDef && "SynthMsgSendStretCallExpr - CurMethodDef is null"); + FunLocStart = CurMethodDef->getLocStart(); + } + + InsertText(FunLocStart, str); + ++stretCount; + + // AST for __Stretn(receiver, args).s; + IdentifierInfo *ID = &Context->Idents.get(name); + FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), ID, castType, 0, SC_Extern, + SC_None, false, false); + DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue, + SourceLocation()); + CallExpr *STCE = new (Context) CallExpr(*Context, DRE, MsgExprs, + castType, VK_LValue, SourceLocation()); + + FieldDecl *FieldD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("s"), + returnType, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(STCE, false, FieldD, SourceLocation(), + FieldD->getType(), VK_LValue, + OK_Ordinary); + + return ME; +} + +Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp, + SourceLocation StartLoc, + SourceLocation EndLoc) { + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + if (!MsgSendFunctionDecl) + SynthMsgSendFunctionDecl(); + if (!MsgSendSuperFunctionDecl) + SynthMsgSendSuperFunctionDecl(); + if (!MsgSendStretFunctionDecl) + SynthMsgSendStretFunctionDecl(); + if (!MsgSendSuperStretFunctionDecl) + SynthMsgSendSuperStretFunctionDecl(); + if (!MsgSendFpretFunctionDecl) + SynthMsgSendFpretFunctionDecl(); + if (!GetClassFunctionDecl) + SynthGetClassFunctionDecl(); + if (!GetSuperClassFunctionDecl) + SynthGetSuperClassFunctionDecl(); + if (!GetMetaClassFunctionDecl) + SynthGetMetaClassFunctionDecl(); + + // default to objc_msgSend(). + FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; + // May need to use objc_msgSend_stret() as well. + FunctionDecl *MsgSendStretFlavor = 0; + if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) { + QualType resultType = mDecl->getResultType(); + if (resultType->isRecordType()) + MsgSendStretFlavor = MsgSendStretFunctionDecl; + else if (resultType->isRealFloatingType()) + MsgSendFlavor = MsgSendFpretFunctionDecl; + } + + // Synthesize a call to objc_msgSend(). + SmallVector MsgExprs; + switch (Exp->getReceiverKind()) { + case ObjCMessageExpr::SuperClass: { + MsgSendFlavor = MsgSendSuperFunctionDecl; + if (MsgSendStretFlavor) + MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; + assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); + + ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); + + SmallVector InitExprs; + + // set the receiver to self, the first argument to all methods. + InitExprs.push_back( + NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK_BitCast, + new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), + false, + Context->getObjCIdType(), + VK_RValue, + SourceLocation())) + ); // set the 'receiver'. + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + ClsExprs.push_back(StringLiteral::Create(*Context, + ClassDecl->getIdentifier()->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + // (Class)objc_getClass("CurrentClass") + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, + EndLoc); + ClsExprs.clear(); + ClsExprs.push_back(Cls); + Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, + &ClsExprs[0], ClsExprs.size(), + StartLoc, EndLoc); + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + // To turn off a warning, type-cast to 'id' + InitExprs.push_back( // set 'super class', using class_getSuperclass(). + NoTypeInfoCStyleCastExpr(Context, + Context->getObjCIdType(), + CK_BitCast, Cls)); + // struct __rw_objc_super + QualType superType = getSuperStructType(); + Expr *SuperRep; + + if (LangOpts.MicrosoftExt) { + SynthSuperContructorFunctionDecl(); + // Simulate a contructor call... + DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, + false, superType, VK_LValue, + SourceLocation()); + SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs, + superType, VK_LValue, + SourceLocation()); + // The code for super is a little tricky to prevent collision with + // the structure definition in the header. The rewriter has it's own + // internal definition (__rw_objc_super) that is uses. This is why + // we need the cast below. For example: + // (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) + // + SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, + Context->getPointerType(SuperRep->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + SuperRep = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(superType), + CK_BitCast, SuperRep); + } else { + // (struct __rw_objc_super) { } + InitListExpr *ILE = + new (Context) InitListExpr(*Context, SourceLocation(), InitExprs, + SourceLocation()); + TypeSourceInfo *superTInfo + = Context->getTrivialTypeSourceInfo(superType); + SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, + superType, VK_LValue, + ILE, false); + // struct __rw_objc_super * + SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, + Context->getPointerType(SuperRep->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + } + MsgExprs.push_back(SuperRep); + break; + } + + case ObjCMessageExpr::Class: { + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + ObjCInterfaceDecl *Class + = Exp->getClassReceiver()->getAs()->getInterface(); + IdentifierInfo *clsName = Class->getIdentifier(); + ClsExprs.push_back(StringLiteral::Create(*Context, + clsName->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context, + Context->getObjCIdType(), + CK_BitCast, Cls); + MsgExprs.push_back(ArgExpr); + break; + } + + case ObjCMessageExpr::SuperInstance:{ + MsgSendFlavor = MsgSendSuperFunctionDecl; + if (MsgSendStretFlavor) + MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; + assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); + ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); + SmallVector InitExprs; + + InitExprs.push_back( + NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK_BitCast, + new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), + false, + Context->getObjCIdType(), + VK_RValue, SourceLocation())) + ); // set the 'receiver'. + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + ClsExprs.push_back(StringLiteral::Create(*Context, + ClassDecl->getIdentifier()->getName(), + StringLiteral::Ascii, false, argType, + SourceLocation())); + // (Class)objc_getClass("CurrentClass") + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + ClsExprs.clear(); + ClsExprs.push_back(Cls); + Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, + &ClsExprs[0], ClsExprs.size(), + StartLoc, EndLoc); + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + // To turn off a warning, type-cast to 'id' + InitExprs.push_back( + // set 'super class', using class_getSuperclass(). + NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK_BitCast, Cls)); + // struct __rw_objc_super + QualType superType = getSuperStructType(); + Expr *SuperRep; + + if (LangOpts.MicrosoftExt) { + SynthSuperContructorFunctionDecl(); + // Simulate a contructor call... + DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, + false, superType, VK_LValue, + SourceLocation()); + SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs, + superType, VK_LValue, SourceLocation()); + // The code for super is a little tricky to prevent collision with + // the structure definition in the header. The rewriter has it's own + // internal definition (__rw_objc_super) that is uses. This is why + // we need the cast below. For example: + // (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) + // + SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, + Context->getPointerType(SuperRep->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + SuperRep = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(superType), + CK_BitCast, SuperRep); + } else { + // (struct __rw_objc_super) { } + InitListExpr *ILE = + new (Context) InitListExpr(*Context, SourceLocation(), InitExprs, + SourceLocation()); + TypeSourceInfo *superTInfo + = Context->getTrivialTypeSourceInfo(superType); + SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, + superType, VK_RValue, ILE, + false); + } + MsgExprs.push_back(SuperRep); + break; + } + + case ObjCMessageExpr::Instance: { + // Remove all type-casts because it may contain objc-style types; e.g. + // Foo *. + Expr *recExpr = Exp->getInstanceReceiver(); + while (CStyleCastExpr *CE = dyn_cast(recExpr)) + recExpr = CE->getSubExpr(); + CastKind CK = recExpr->getType()->isObjCObjectPointerType() + ? CK_BitCast : recExpr->getType()->isBlockPointerType() + ? CK_BlockPointerToObjCPointerCast + : CK_CPointerToObjCPointerCast; + + recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK, recExpr); + MsgExprs.push_back(recExpr); + break; + } + } + + // Create a call to sel_registerName("selName"), it will be the 2nd argument. + SmallVector SelExprs; + QualType argType = Context->getPointerType(Context->CharTy); + SelExprs.push_back(StringLiteral::Create(*Context, + Exp->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size(), + StartLoc, + EndLoc); + MsgExprs.push_back(SelExp); + + // Now push any user supplied arguments. + for (unsigned i = 0; i < Exp->getNumArgs(); i++) { + Expr *userExpr = Exp->getArg(i); + // Make all implicit casts explicit...ICE comes in handy:-) + if (ImplicitCastExpr *ICE = dyn_cast(userExpr)) { + // Reuse the ICE type, it is exactly what the doctor ordered. + QualType type = ICE->getType(); + if (needToScanForQualifiers(type)) + type = Context->getObjCIdType(); + // Make sure we convert "type (^)(...)" to "type (*)(...)". + (void)convertBlockPointerToFunctionPointer(type); + const Expr *SubExpr = ICE->IgnoreParenImpCasts(); + CastKind CK; + if (SubExpr->getType()->isIntegralType(*Context) && + type->isBooleanType()) { + CK = CK_IntegralToBoolean; + } else if (type->isObjCObjectPointerType()) { + if (SubExpr->getType()->isBlockPointerType()) { + CK = CK_BlockPointerToObjCPointerCast; + } else if (SubExpr->getType()->isPointerType()) { + CK = CK_CPointerToObjCPointerCast; + } else { + CK = CK_BitCast; + } + } else { + CK = CK_BitCast; + } + + userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr); + } + // Make id cast into an 'id' cast. + else if (CStyleCastExpr *CE = dyn_cast(userExpr)) { + if (CE->getType()->isObjCQualifiedIdType()) { + while ((CE = dyn_cast(userExpr))) + userExpr = CE->getSubExpr(); + CastKind CK; + if (userExpr->getType()->isIntegralType(*Context)) { + CK = CK_IntegralToPointer; + } else if (userExpr->getType()->isBlockPointerType()) { + CK = CK_BlockPointerToObjCPointerCast; + } else if (userExpr->getType()->isPointerType()) { + CK = CK_CPointerToObjCPointerCast; + } else { + CK = CK_BitCast; + } + userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK, userExpr); + } + } + MsgExprs.push_back(userExpr); + // We've transferred the ownership to MsgExprs. For now, we *don't* null + // out the argument in the original expression (since we aren't deleting + // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info. + //Exp->setArg(i, 0); + } + // Generate the funky cast. + CastExpr *cast; + SmallVector ArgTypes; + QualType returnType; + + // Push 'id' and 'SEL', the 2 implicit arguments. + if (MsgSendFlavor == MsgSendSuperFunctionDecl) + ArgTypes.push_back(Context->getPointerType(getSuperStructType())); + else + ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCSelType()); + if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) { + // Push any user argument types. + for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), + E = OMD->param_end(); PI != E; ++PI) { + QualType t = (*PI)->getType()->isObjCQualifiedIdType() + ? Context->getObjCIdType() + : (*PI)->getType(); + // Make sure we convert "t (^)(...)" to "t (*)(...)". + (void)convertBlockPointerToFunctionPointer(t); + ArgTypes.push_back(t); + } + returnType = Exp->getType(); + convertToUnqualifiedObjCType(returnType); + (void)convertBlockPointerToFunctionPointer(returnType); + } else { + returnType = Context->getObjCIdType(); + } + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = MsgSendFlavor->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, + VK_LValue, SourceLocation()); + + // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid). + // If we don't do this cast, we get the following bizarre warning/note: + // xx.m:13: warning: function called through a non-compatible type + // xx.m:13: note: if this code is reached, the program will abort + cast = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->VoidTy), + CK_BitCast, DRE); + + // Now do the "normal" pointer to function cast. + QualType castType = + getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + // If we don't have a method decl, force a variadic cast. + Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true); + castType = Context->getPointerType(castType); + cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, + cast); + + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); + + const FunctionType *FT = msgSendType->getAs(); + CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs, + FT->getResultType(), VK_RValue, EndLoc); + Stmt *ReplacingStmt = CE; + if (MsgSendStretFlavor) { + // We have the method which returns a struct/union. Must also generate + // call to objc_msgSend_stret and hang both varieties on a conditional + // expression which dictate which one to envoke depending on size of + // method's return type. + + Expr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor, + msgSendType, returnType, + ArgTypes, MsgExprs, + Exp->getMethodDecl()); + + // Build sizeof(returnType) + UnaryExprOrTypeTraitExpr *sizeofExpr = + new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf, + Context->getTrivialTypeSourceInfo(returnType), + Context->getSizeType(), SourceLocation(), + SourceLocation()); + // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) + // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases. + // For X86 it is more complicated and some kind of target specific routine + // is needed to decide what to do. + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + IntegerLiteral *limit = IntegerLiteral::Create(*Context, + llvm::APInt(IntSize, 8), + Context->IntTy, + SourceLocation()); + BinaryOperator *lessThanExpr = + new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy, + VK_RValue, OK_Ordinary, SourceLocation(), + false); + // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) + ConditionalOperator *CondExpr = + new (Context) ConditionalOperator(lessThanExpr, + SourceLocation(), CE, + SourceLocation(), STCE, + returnType, VK_RValue, OK_Ordinary); + ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + CondExpr); + } + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return ReplacingStmt; +} + +Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) { + Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(), + Exp->getLocEnd()); + + // Now do the actual rewrite. + ReplaceStmt(Exp, ReplacingStmt); + + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return ReplacingStmt; +} + +// typedef struct objc_object Protocol; +QualType RewriteModernObjC::getProtocolType() { + if (!ProtocolTypeDecl) { + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(Context->getObjCIdType()); + ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("Protocol"), + TInfo); + } + return Context->getTypeDeclType(ProtocolTypeDecl); +} + +/// RewriteObjCProtocolExpr - Rewrite a protocol expression into +/// a synthesized/forward data reference (to the protocol's metadata). +/// The forward references (and metadata) are generated in +/// RewriteModernObjC::HandleTranslationUnit(). +Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) { + std::string Name = "_OBJC_PROTOCOL_REFERENCE_$_" + + Exp->getProtocol()->getNameAsString(); + IdentifierInfo *ID = &Context->Idents.get(Name); + VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), ID, getProtocolType(), 0, + SC_Extern, SC_None); + DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(), + VK_LValue, SourceLocation()); + Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf, + Context->getPointerType(DRE->getType()), + VK_RValue, OK_Ordinary, SourceLocation()); + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(), + CK_BitCast, + DerefExpr); + ReplaceStmt(Exp, castExpr); + ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl()); + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return castExpr; + +} + +bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf, + const char *endBuf) { + while (startBuf < endBuf) { + if (*startBuf == '#') { + // Skip whitespace. + for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf) + ; + if (!strncmp(startBuf, "if", strlen("if")) || + !strncmp(startBuf, "ifdef", strlen("ifdef")) || + !strncmp(startBuf, "ifndef", strlen("ifndef")) || + !strncmp(startBuf, "define", strlen("define")) || + !strncmp(startBuf, "undef", strlen("undef")) || + !strncmp(startBuf, "else", strlen("else")) || + !strncmp(startBuf, "elif", strlen("elif")) || + !strncmp(startBuf, "endif", strlen("endif")) || + !strncmp(startBuf, "pragma", strlen("pragma")) || + !strncmp(startBuf, "include", strlen("include")) || + !strncmp(startBuf, "import", strlen("import")) || + !strncmp(startBuf, "include_next", strlen("include_next"))) + return true; + } + startBuf++; + } + return false; +} + +/// IsTagDefinedInsideClass - This routine checks that a named tagged type +/// is defined inside an objective-c class. If so, it returns true. +bool RewriteModernObjC::IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, + TagDecl *Tag, + bool &IsNamedDefinition) { + if (!IDecl) + return false; + SourceLocation TagLocation; + if (RecordDecl *RD = dyn_cast(Tag)) { + RD = RD->getDefinition(); + if (!RD || !RD->getDeclName().getAsIdentifierInfo()) + return false; + IsNamedDefinition = true; + TagLocation = RD->getLocation(); + return Context->getSourceManager().isBeforeInTranslationUnit( + IDecl->getLocation(), TagLocation); + } + if (EnumDecl *ED = dyn_cast(Tag)) { + if (!ED || !ED->getDeclName().getAsIdentifierInfo()) + return false; + IsNamedDefinition = true; + TagLocation = ED->getLocation(); + return Context->getSourceManager().isBeforeInTranslationUnit( + IDecl->getLocation(), TagLocation); + + } + return false; +} + +/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer. +/// It handles elaborated types, as well as enum types in the process. +bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type, + std::string &Result) { + if (isa(Type)) { + Result += "\t"; + return false; + } + + if (Type->isArrayType()) { + QualType ElemTy = Context->getBaseElementType(Type); + return RewriteObjCFieldDeclType(ElemTy, Result); + } + else if (Type->isRecordType()) { + RecordDecl *RD = Type->getAs()->getDecl(); + if (RD->isCompleteDefinition()) { + if (RD->isStruct()) + Result += "\n\tstruct "; + else if (RD->isUnion()) + Result += "\n\tunion "; + else + assert(false && "class not allowed as an ivar type"); + + Result += RD->getName(); + if (GlobalDefinedTags.count(RD)) { + // struct/union is defined globally, use it. + Result += " "; + return true; + } + Result += " {\n"; + for (RecordDecl::field_iterator i = RD->field_begin(), + e = RD->field_end(); i != e; ++i) { + FieldDecl *FD = *i; + RewriteObjCFieldDecl(FD, Result); + } + Result += "\t} "; + return true; + } + } + else if (Type->isEnumeralType()) { + EnumDecl *ED = Type->getAs()->getDecl(); + if (ED->isCompleteDefinition()) { + Result += "\n\tenum "; + Result += ED->getName(); + if (GlobalDefinedTags.count(ED)) { + // Enum is globall defined, use it. + Result += " "; + return true; + } + + Result += " {\n"; + for (EnumDecl::enumerator_iterator EC = ED->enumerator_begin(), + ECEnd = ED->enumerator_end(); EC != ECEnd; ++EC) { + Result += "\t"; Result += EC->getName(); Result += " = "; + llvm::APSInt Val = EC->getInitVal(); + Result += Val.toString(10); + Result += ",\n"; + } + Result += "\t} "; + return true; + } + } + + Result += "\t"; + convertObjCTypeToCStyleType(Type); + return false; +} + + +/// RewriteObjCFieldDecl - This routine rewrites a field into the buffer. +/// It handles elaborated types, as well as enum types in the process. +void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl, + std::string &Result) { + QualType Type = fieldDecl->getType(); + std::string Name = fieldDecl->getNameAsString(); + + bool EleboratedType = RewriteObjCFieldDeclType(Type, Result); + if (!EleboratedType) + Type.getAsStringInternal(Name, Context->getPrintingPolicy()); + Result += Name; + if (fieldDecl->isBitField()) { + Result += " : "; Result += utostr(fieldDecl->getBitWidthValue(*Context)); + } + else if (EleboratedType && Type->isArrayType()) { + CanQualType CType = Context->getCanonicalType(Type); + while (isa(CType)) { + if (const ConstantArrayType *CAT = Context->getAsConstantArrayType(CType)) { + Result += "["; + llvm::APInt Dim = CAT->getSize(); + Result += utostr(Dim.getZExtValue()); + Result += "]"; + } + CType = CType->getAs()->getElementType(); + } + } + + Result += ";\n"; +} + +/// RewriteLocallyDefinedNamedAggregates - This routine rewrites locally defined +/// named aggregate types into the input buffer. +void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl, + std::string &Result) { + QualType Type = fieldDecl->getType(); + if (isa(Type)) + return; + if (Type->isArrayType()) + Type = Context->getBaseElementType(Type); + ObjCContainerDecl *IDecl = + dyn_cast(fieldDecl->getDeclContext()); + + TagDecl *TD = 0; + if (Type->isRecordType()) { + TD = Type->getAs()->getDecl(); + } + else if (Type->isEnumeralType()) { + TD = Type->getAs()->getDecl(); + } + + if (TD) { + if (GlobalDefinedTags.count(TD)) + return; + + bool IsNamedDefinition = false; + if (IsTagDefinedInsideClass(IDecl, TD, IsNamedDefinition)) { + RewriteObjCFieldDeclType(Type, Result); + Result += ";"; + } + if (IsNamedDefinition) + GlobalDefinedTags.insert(TD); + } + +} + +/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to +/// an objective-c class with ivars. +void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, + std::string &Result) { + assert(CDecl && "Class missing in SynthesizeObjCInternalStruct"); + assert(CDecl->getName() != "" && + "Name missing in SynthesizeObjCInternalStruct"); + ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass(); + SmallVector IVars; + for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin(); + IVD; IVD = IVD->getNextIvar()) + IVars.push_back(IVD); + + SourceLocation LocStart = CDecl->getLocStart(); + SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc(); + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + + // If no ivars and no root or if its root, directly or indirectly, + // have no ivars (thus not synthesized) then no need to synthesize this class. + if ((!CDecl->isThisDeclarationADefinition() || IVars.size() == 0) && + (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) { + endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); + ReplaceText(LocStart, endBuf-startBuf, Result); + return; + } + + // Insert named struct/union definitions inside class to + // outer scope. This follows semantics of locally defined + // struct/unions in objective-c classes. + for (unsigned i = 0, e = IVars.size(); i < e; i++) + RewriteLocallyDefinedNamedAggregates(IVars[i], Result); + + Result += "\nstruct "; + Result += CDecl->getNameAsString(); + Result += "_IMPL {\n"; + + if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) { + Result += "\tstruct "; Result += RCDecl->getNameAsString(); + Result += "_IMPL "; Result += RCDecl->getNameAsString(); + Result += "_IVARS;\n"; + } + + for (unsigned i = 0, e = IVars.size(); i < e; i++) + RewriteObjCFieldDecl(IVars[i], Result); + + Result += "};\n"; + endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); + ReplaceText(LocStart, endBuf-startBuf, Result); + // Mark this struct as having been generated. + if (!ObjCSynthesizedStructs.insert(CDecl)) + llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct"); +} + +/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which +/// have been referenced in an ivar access expression. +void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl, + std::string &Result) { + // write out ivar offset symbols which have been referenced in an ivar + // access expression. + llvm::SmallPtrSet Ivars = ReferencedIvars[CDecl]; + if (Ivars.empty()) + return; + for (llvm::SmallPtrSet::iterator i = Ivars.begin(), + e = Ivars.end(); i != e; i++) { + ObjCIvarDecl *IvarDecl = (*i); + Result += "\n"; + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".objc_ivar$B\")) "; + Result += "extern \"C\" "; + if (LangOpts.MicrosoftExt && + IvarDecl->getAccessControl() != ObjCIvarDecl::Private && + IvarDecl->getAccessControl() != ObjCIvarDecl::Package) + Result += "__declspec(dllimport) "; + + Result += "unsigned long "; + WriteInternalIvarName(CDecl, IvarDecl, Result); + Result += ";"; + } +} + +//===----------------------------------------------------------------------===// +// Meta Data Emission +//===----------------------------------------------------------------------===// + + +/// RewriteImplementations - This routine rewrites all method implementations +/// and emits meta-data. + +void RewriteModernObjC::RewriteImplementations() { + int ClsDefCount = ClassImplementation.size(); + int CatDefCount = CategoryImplementation.size(); + + // Rewrite implemented methods + for (int i = 0; i < ClsDefCount; i++) { + ObjCImplementationDecl *OIMP = ClassImplementation[i]; + ObjCInterfaceDecl *CDecl = OIMP->getClassInterface(); + if (CDecl->isImplicitInterfaceDecl()) + assert(false && + "Legacy implicit interface rewriting not supported in moder abi"); + RewriteImplementationDecl(OIMP); + } + + for (int i = 0; i < CatDefCount; i++) { + ObjCCategoryImplDecl *CIMP = CategoryImplementation[i]; + ObjCInterfaceDecl *CDecl = CIMP->getClassInterface(); + if (CDecl->isImplicitInterfaceDecl()) + assert(false && + "Legacy implicit interface rewriting not supported in moder abi"); + RewriteImplementationDecl(CIMP); + } +} + +void RewriteModernObjC::RewriteByRefString(std::string &ResultStr, + const std::string &Name, + ValueDecl *VD, bool def) { + assert(BlockByRefDeclNo.count(VD) && + "RewriteByRefString: ByRef decl missing"); + if (def) + ResultStr += "struct "; + ResultStr += "__Block_byref_" + Name + + "_" + utostr(BlockByRefDeclNo[VD]) ; +} + +static bool HasLocalVariableExternalStorage(ValueDecl *VD) { + if (VarDecl *Var = dyn_cast(VD)) + return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage()); + return false; +} + +std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i, + StringRef funcName, + std::string Tag) { + const FunctionType *AFT = CE->getFunctionType(); + QualType RT = AFT->getResultType(); + std::string StructRef = "struct " + Tag; + SourceLocation BlockLoc = CE->getExprLoc(); + std::string S; + ConvertSourceLocationToLineDirective(BlockLoc, S); + + S += "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" + + funcName.str() + "_block_func_" + utostr(i); + + BlockDecl *BD = CE->getBlockDecl(); + + if (isa(AFT)) { + // No user-supplied arguments. Still need to pass in a pointer to the + // block (to reference imported block decl refs). + S += "(" + StructRef + " *__cself)"; + } else if (BD->param_empty()) { + S += "(" + StructRef + " *__cself)"; + } else { + const FunctionProtoType *FT = cast(AFT); + assert(FT && "SynthesizeBlockFunc: No function proto"); + S += '('; + // first add the implicit argument. + S += StructRef + " *__cself, "; + std::string ParamStr; + for (BlockDecl::param_iterator AI = BD->param_begin(), + E = BD->param_end(); AI != E; ++AI) { + if (AI != BD->param_begin()) S += ", "; + ParamStr = (*AI)->getNameAsString(); + QualType QT = (*AI)->getType(); + (void)convertBlockPointerToFunctionPointer(QT); + QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy()); + S += ParamStr; + } + if (FT->isVariadic()) { + if (!BD->param_empty()) S += ", "; + S += "..."; + } + S += ')'; + } + S += " {\n"; + + // Create local declarations to avoid rewriting all closure decl ref exprs. + // First, emit a declaration for all "by ref" decls. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + S += " "; + std::string Name = (*I)->getNameAsString(); + std::string TypeString; + RewriteByRefString(TypeString, Name, (*I)); + TypeString += " *"; + Name = TypeString + Name; + S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n"; + } + // Next, emit a declaration for all "by copy" declarations. + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + S += " "; + // Handle nested closure invocation. For example: + // + // void (^myImportedClosure)(void); + // myImportedClosure = ^(void) { setGlobalInt(x + y); }; + // + // void (^anotherClosure)(void); + // anotherClosure = ^(void) { + // myImportedClosure(); // import and invoke the closure + // }; + // + if (isTopLevelBlockPointerType((*I)->getType())) { + RewriteBlockPointerTypeVariable(S, (*I)); + S += " = ("; + RewriteBlockPointerType(S, (*I)->getType()); + S += ")"; + S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n"; + } + else { + std::string Name = (*I)->getNameAsString(); + QualType QT = (*I)->getType(); + if (HasLocalVariableExternalStorage(*I)) + QT = Context->getPointerType(QT); + QT.getAsStringInternal(Name, Context->getPrintingPolicy()); + S += Name + " = __cself->" + + (*I)->getNameAsString() + "; // bound by copy\n"; + } + } + std::string RewrittenStr = RewrittenBlockExprs[CE]; + const char *cstr = RewrittenStr.c_str(); + while (*cstr++ != '{') ; + S += cstr; + S += "\n"; + return S; +} + +std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, + StringRef funcName, + std::string Tag) { + std::string StructRef = "struct " + Tag; + std::string S = "static void __"; + + S += funcName; + S += "_block_copy_" + utostr(i); + S += "(" + StructRef; + S += "*dst, " + StructRef; + S += "*src) {"; + for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), + E = ImportedBlockDecls.end(); I != E; ++I) { + ValueDecl *VD = (*I); + S += "_Block_object_assign((void*)&dst->"; + S += (*I)->getNameAsString(); + S += ", (void*)src->"; + S += (*I)->getNameAsString(); + if (BlockByRefDeclsPtrSet.count((*I))) + S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; + else if (VD->getType()->isBlockPointerType()) + S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; + else + S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; + } + S += "}\n"; + + S += "\nstatic void __"; + S += funcName; + S += "_block_dispose_" + utostr(i); + S += "(" + StructRef; + S += "*src) {"; + for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), + E = ImportedBlockDecls.end(); I != E; ++I) { + ValueDecl *VD = (*I); + S += "_Block_object_dispose((void*)src->"; + S += (*I)->getNameAsString(); + if (BlockByRefDeclsPtrSet.count((*I))) + S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; + else if (VD->getType()->isBlockPointerType()) + S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; + else + S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; + } + S += "}\n"; + return S; +} + +std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag, + std::string Desc) { + std::string S = "\nstruct " + Tag; + std::string Constructor = " " + Tag; + + S += " {\n struct __block_impl impl;\n"; + S += " struct " + Desc; + S += "* Desc;\n"; + + Constructor += "(void *fp, "; // Invoke function pointer. + Constructor += "struct " + Desc; // Descriptor pointer. + Constructor += " *desc"; + + if (BlockDeclRefs.size()) { + // Output all "by copy" declarations. + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + S += " "; + std::string FieldName = (*I)->getNameAsString(); + std::string ArgName = "_" + FieldName; + // Handle nested closure invocation. For example: + // + // void (^myImportedBlock)(void); + // myImportedBlock = ^(void) { setGlobalInt(x + y); }; + // + // void (^anotherBlock)(void); + // anotherBlock = ^(void) { + // myImportedBlock(); // import and invoke the closure + // }; + // + if (isTopLevelBlockPointerType((*I)->getType())) { + S += "struct __block_impl *"; + Constructor += ", void *" + ArgName; + } else { + QualType QT = (*I)->getType(); + if (HasLocalVariableExternalStorage(*I)) + QT = Context->getPointerType(QT); + QT.getAsStringInternal(FieldName, Context->getPrintingPolicy()); + QT.getAsStringInternal(ArgName, Context->getPrintingPolicy()); + Constructor += ", " + ArgName; + } + S += FieldName + ";\n"; + } + // Output all "by ref" declarations. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + S += " "; + std::string FieldName = (*I)->getNameAsString(); + std::string ArgName = "_" + FieldName; + { + std::string TypeString; + RewriteByRefString(TypeString, FieldName, (*I)); + TypeString += " *"; + FieldName = TypeString + FieldName; + ArgName = TypeString + ArgName; + Constructor += ", " + ArgName; + } + S += FieldName + "; // by ref\n"; + } + // Finish writing the constructor. + Constructor += ", int flags=0)"; + // Initialize all "by copy" arguments. + bool firsTime = true; + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + std::string Name = (*I)->getNameAsString(); + if (firsTime) { + Constructor += " : "; + firsTime = false; + } + else + Constructor += ", "; + if (isTopLevelBlockPointerType((*I)->getType())) + Constructor += Name + "((struct __block_impl *)_" + Name + ")"; + else + Constructor += Name + "(_" + Name + ")"; + } + // Initialize all "by ref" arguments. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + std::string Name = (*I)->getNameAsString(); + if (firsTime) { + Constructor += " : "; + firsTime = false; + } + else + Constructor += ", "; + Constructor += Name + "(_" + Name + "->__forwarding)"; + } + + Constructor += " {\n"; + if (GlobalVarDecl) + Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; + else + Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; + Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; + + Constructor += " Desc = desc;\n"; + } else { + // Finish writing the constructor. + Constructor += ", int flags=0) {\n"; + if (GlobalVarDecl) + Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; + else + Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; + Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; + Constructor += " Desc = desc;\n"; + } + Constructor += " "; + Constructor += "}\n"; + S += Constructor; + S += "};\n"; + return S; +} + +std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag, + std::string ImplTag, int i, + StringRef FunName, + unsigned hasCopy) { + std::string S = "\nstatic struct " + DescTag; + + S += " {\n size_t reserved;\n"; + S += " size_t Block_size;\n"; + if (hasCopy) { + S += " void (*copy)(struct "; + S += ImplTag; S += "*, struct "; + S += ImplTag; S += "*);\n"; + + S += " void (*dispose)(struct "; + S += ImplTag; S += "*);\n"; + } + S += "} "; + + S += DescTag + "_DATA = { 0, sizeof(struct "; + S += ImplTag + ")"; + if (hasCopy) { + S += ", __" + FunName.str() + "_block_copy_" + utostr(i); + S += ", __" + FunName.str() + "_block_dispose_" + utostr(i); + } + S += "};\n"; + return S; +} + +void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart, + StringRef FunName) { + bool RewriteSC = (GlobalVarDecl && + !Blocks.empty() && + GlobalVarDecl->getStorageClass() == SC_Static && + GlobalVarDecl->getType().getCVRQualifiers()); + if (RewriteSC) { + std::string SC(" void __"); + SC += GlobalVarDecl->getNameAsString(); + SC += "() {}"; + InsertText(FunLocStart, SC); + } + + // Insert closures that were part of the function. + for (unsigned i = 0, count=0; i < Blocks.size(); i++) { + CollectBlockDeclRefInfo(Blocks[i]); + // Need to copy-in the inner copied-in variables not actually used in this + // block. + for (int j = 0; j < InnerDeclRefsCount[i]; j++) { + DeclRefExpr *Exp = InnerDeclRefs[count++]; + ValueDecl *VD = Exp->getDecl(); + BlockDeclRefs.push_back(Exp); + if (!VD->hasAttr()) { + if (!BlockByCopyDeclsPtrSet.count(VD)) { + BlockByCopyDeclsPtrSet.insert(VD); + BlockByCopyDecls.push_back(VD); + } + continue; + } + + if (!BlockByRefDeclsPtrSet.count(VD)) { + BlockByRefDeclsPtrSet.insert(VD); + BlockByRefDecls.push_back(VD); + } + + // imported objects in the inner blocks not used in the outer + // blocks must be copied/disposed in the outer block as well. + if (VD->getType()->isObjCObjectPointerType() || + VD->getType()->isBlockPointerType()) + ImportedBlockDecls.insert(VD); + } + + std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i); + std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i); + + std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag); + + InsertText(FunLocStart, CI); + + std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag); + + InsertText(FunLocStart, CF); + + if (ImportedBlockDecls.size()) { + std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag); + InsertText(FunLocStart, HF); + } + std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName, + ImportedBlockDecls.size() > 0); + InsertText(FunLocStart, BD); + + BlockDeclRefs.clear(); + BlockByRefDecls.clear(); + BlockByRefDeclsPtrSet.clear(); + BlockByCopyDecls.clear(); + BlockByCopyDeclsPtrSet.clear(); + ImportedBlockDecls.clear(); + } + if (RewriteSC) { + // Must insert any 'const/volatile/static here. Since it has been + // removed as result of rewriting of block literals. + std::string SC; + if (GlobalVarDecl->getStorageClass() == SC_Static) + SC = "static "; + if (GlobalVarDecl->getType().isConstQualified()) + SC += "const "; + if (GlobalVarDecl->getType().isVolatileQualified()) + SC += "volatile "; + if (GlobalVarDecl->getType().isRestrictQualified()) + SC += "restrict "; + InsertText(FunLocStart, SC); + } + if (GlobalConstructionExp) { + // extra fancy dance for global literal expression. + + // Always the latest block expression on the block stack. + std::string Tag = "__"; + Tag += FunName; + Tag += "_block_impl_"; + Tag += utostr(Blocks.size()-1); + std::string globalBuf = "static "; + globalBuf += Tag; globalBuf += " "; + std::string SStr; + + llvm::raw_string_ostream constructorExprBuf(SStr); + GlobalConstructionExp->printPretty(constructorExprBuf, 0, + PrintingPolicy(LangOpts)); + globalBuf += constructorExprBuf.str(); + globalBuf += ";\n"; + InsertText(FunLocStart, globalBuf); + GlobalConstructionExp = 0; + } + + Blocks.clear(); + InnerDeclRefsCount.clear(); + InnerDeclRefs.clear(); + RewrittenBlockExprs.clear(); +} + +void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) { + SourceLocation FunLocStart = + (!Blocks.empty()) ? getFunctionSourceLocation(*this, FD) + : FD->getTypeSpecStartLoc(); + StringRef FuncName = FD->getName(); + + SynthesizeBlockLiterals(FunLocStart, FuncName); +} + +static void BuildUniqueMethodName(std::string &Name, + ObjCMethodDecl *MD) { + ObjCInterfaceDecl *IFace = MD->getClassInterface(); + Name = IFace->getName(); + Name += "__" + MD->getSelector().getAsString(); + // Convert colons to underscores. + std::string::size_type loc = 0; + while ((loc = Name.find(":", loc)) != std::string::npos) + Name.replace(loc, 1, "_"); +} + +void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) { + //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n"); + //SourceLocation FunLocStart = MD->getLocStart(); + SourceLocation FunLocStart = MD->getLocStart(); + std::string FuncName; + BuildUniqueMethodName(FuncName, MD); + SynthesizeBlockLiterals(FunLocStart, FuncName); +} + +void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) { + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + if (BlockExpr *CBE = dyn_cast(*CI)) + GetBlockDeclRefExprs(CBE->getBody()); + else + GetBlockDeclRefExprs(*CI); + } + // Handle specific things. + if (DeclRefExpr *DRE = dyn_cast(S)) { + if (DRE->refersToEnclosingLocal()) { + // FIXME: Handle enums. + if (!isa(DRE->getDecl())) + BlockDeclRefs.push_back(DRE); + if (HasLocalVariableExternalStorage(DRE->getDecl())) + BlockDeclRefs.push_back(DRE); + } + } + + return; +} + +void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S, + SmallVector &InnerBlockDeclRefs, + llvm::SmallPtrSet &InnerContexts) { + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + if (BlockExpr *CBE = dyn_cast(*CI)) { + InnerContexts.insert(cast(CBE->getBlockDecl())); + GetInnerBlockDeclRefExprs(CBE->getBody(), + InnerBlockDeclRefs, + InnerContexts); + } + else + GetInnerBlockDeclRefExprs(*CI, + InnerBlockDeclRefs, + InnerContexts); + + } + // Handle specific things. + if (DeclRefExpr *DRE = dyn_cast(S)) { + if (DRE->refersToEnclosingLocal()) { + if (!isa(DRE->getDecl()) && + !InnerContexts.count(DRE->getDecl()->getDeclContext())) + InnerBlockDeclRefs.push_back(DRE); + if (VarDecl *Var = dyn_cast(DRE->getDecl())) + if (Var->isFunctionOrMethodVarDecl()) + ImportedLocalExternalDecls.insert(Var); + } + } + + return; +} + +/// convertObjCTypeToCStyleType - This routine converts such objc types +/// as qualified objects, and blocks to their closest c/c++ types that +/// it can. It returns true if input type was modified. +bool RewriteModernObjC::convertObjCTypeToCStyleType(QualType &T) { + QualType oldT = T; + convertBlockPointerToFunctionPointer(T); + if (T->isFunctionPointerType()) { + QualType PointeeTy; + if (const PointerType* PT = T->getAs()) { + PointeeTy = PT->getPointeeType(); + if (const FunctionType *FT = PointeeTy->getAs()) { + T = convertFunctionTypeOfBlocks(FT); + T = Context->getPointerType(T); + } + } + } + + convertToUnqualifiedObjCType(T); + return T != oldT; +} + +/// convertFunctionTypeOfBlocks - This routine converts a function type +/// whose result type may be a block pointer or whose argument type(s) +/// might be block pointers to an equivalent function type replacing +/// all block pointers to function pointers. +QualType RewriteModernObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) { + const FunctionProtoType *FTP = dyn_cast(FT); + // FTP will be null for closures that don't take arguments. + // Generate a funky cast. + SmallVector ArgTypes; + QualType Res = FT->getResultType(); + bool modified = convertObjCTypeToCStyleType(Res); + + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I && (I != E); ++I) { + QualType t = *I; + // Make sure we convert "t (^)(...)" to "t (*)(...)". + if (convertObjCTypeToCStyleType(t)) + modified = true; + ArgTypes.push_back(t); + } + } + QualType FuncType; + if (modified) + FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size()); + else FuncType = QualType(FT, 0); + return FuncType; +} + +Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) { + // Navigate to relevant type information. + const BlockPointerType *CPT = 0; + + if (const DeclRefExpr *DRE = dyn_cast(BlockExp)) { + CPT = DRE->getType()->getAs(); + } else if (const MemberExpr *MExpr = dyn_cast(BlockExp)) { + CPT = MExpr->getType()->getAs(); + } + else if (const ParenExpr *PRE = dyn_cast(BlockExp)) { + return SynthesizeBlockCall(Exp, PRE->getSubExpr()); + } + else if (const ImplicitCastExpr *IEXPR = dyn_cast(BlockExp)) + CPT = IEXPR->getType()->getAs(); + else if (const ConditionalOperator *CEXPR = + dyn_cast(BlockExp)) { + Expr *LHSExp = CEXPR->getLHS(); + Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp); + Expr *RHSExp = CEXPR->getRHS(); + Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp); + Expr *CONDExp = CEXPR->getCond(); + ConditionalOperator *CondExpr = + new (Context) ConditionalOperator(CONDExp, + SourceLocation(), cast(LHSStmt), + SourceLocation(), cast(RHSStmt), + Exp->getType(), VK_RValue, OK_Ordinary); + return CondExpr; + } else if (const ObjCIvarRefExpr *IRE = dyn_cast(BlockExp)) { + CPT = IRE->getType()->getAs(); + } else if (const PseudoObjectExpr *POE + = dyn_cast(BlockExp)) { + CPT = POE->getType()->castAs(); + } else { + assert(1 && "RewriteBlockClass: Bad type"); + } + assert(CPT && "RewriteBlockClass: Bad type"); + const FunctionType *FT = CPT->getPointeeType()->getAs(); + assert(FT && "RewriteBlockClass: Bad type"); + const FunctionProtoType *FTP = dyn_cast(FT); + // FTP will be null for closures that don't take arguments. + + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("__block_impl")); + QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD)); + + // Generate a funky cast. + SmallVector ArgTypes; + + // Push the block argument type. + ArgTypes.push_back(PtrBlock); + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I && (I != E); ++I) { + QualType t = *I; + // Make sure we convert "t (^)(...)" to "t (*)(...)". + if (!convertBlockPointerToFunctionPointer(t)) + convertToUnqualifiedObjCType(t); + ArgTypes.push_back(t); + } + } + // Now do the pointer to function cast. + QualType PtrToFuncCastType + = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size()); + + PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType); + + CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock, + CK_BitCast, + const_cast(BlockExp)); + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + BlkCast); + //PE->dump(); + + FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("FuncPtr"), + Context->VoidPtrTy, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), + FD->getType(), VK_LValue, + OK_Ordinary); + + + CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType, + CK_BitCast, ME); + PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast); + + SmallVector BlkExprs; + // Add the implicit argument. + BlkExprs.push_back(BlkCast); + // Add the user arguments. + for (CallExpr::arg_iterator I = Exp->arg_begin(), + E = Exp->arg_end(); I != E; ++I) { + BlkExprs.push_back(*I); + } + CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs, + Exp->getType(), VK_RValue, + SourceLocation()); + return CE; +} + +// We need to return the rewritten expression to handle cases where the +// DeclRefExpr is embedded in another expression being rewritten. +// For example: +// +// int main() { +// __block Foo *f; +// __block int i; +// +// void (^myblock)() = ^() { +// [f test]; // f is a DeclRefExpr embedded in a message (which is being rewritten). +// i = 77; +// }; +//} +Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { + // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR + // for each DeclRefExp where BYREFVAR is name of the variable. + ValueDecl *VD = DeclRefExp->getDecl(); + bool isArrow = DeclRefExp->refersToEnclosingLocal(); + + FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("__forwarding"), + Context->VoidPtrTy, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow, + FD, SourceLocation(), + FD->getType(), VK_LValue, + OK_Ordinary); + + StringRef Name = VD->getName(); + FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(), + &Context->Idents.get(Name), + Context->VoidPtrTy, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(), + DeclRefExp->getType(), VK_LValue, OK_Ordinary); + + + + // Need parens to enforce precedence. + ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(), + DeclRefExp->getExprLoc(), + ME); + ReplaceStmt(DeclRefExp, PE); + return PE; +} + +// Rewrites the imported local variable V with external storage +// (static, extern, etc.) as *V +// +Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) { + ValueDecl *VD = DRE->getDecl(); + if (VarDecl *Var = dyn_cast(VD)) + if (!ImportedLocalExternalDecls.count(Var)) + return DRE; + Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(), + VK_LValue, OK_Ordinary, + DRE->getLocation()); + // Need parens to enforce precedence. + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + Exp); + ReplaceStmt(DRE, PE); + return PE; +} + +void RewriteModernObjC::RewriteCastExpr(CStyleCastExpr *CE) { + SourceLocation LocStart = CE->getLParenLoc(); + SourceLocation LocEnd = CE->getRParenLoc(); + + // Need to avoid trying to rewrite synthesized casts. + if (LocStart.isInvalid()) + return; + // Need to avoid trying to rewrite casts contained in macros. + if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd)) + return; + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + QualType QT = CE->getType(); + const Type* TypePtr = QT->getAs(); + if (isa(TypePtr)) { + const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); + QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); + std::string TypeAsString = "("; + RewriteBlockPointerType(TypeAsString, QT); + TypeAsString += ")"; + ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString); + return; + } + // advance the location to startArgList. + const char *argPtr = startBuf; + + while (*argPtr++ && (argPtr < endBuf)) { + switch (*argPtr) { + case '^': + // Replace the '^' with '*'. + LocStart = LocStart.getLocWithOffset(argPtr-startBuf); + ReplaceText(LocStart, 1, "*"); + break; + } + } + return; +} + +void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) { + CastKind CastKind = IC->getCastKind(); + if (CastKind != CK_BlockPointerToObjCPointerCast && + CastKind != CK_AnyPointerToBlockPointerCast) + return; + + QualType QT = IC->getType(); + (void)convertBlockPointerToFunctionPointer(QT); + std::string TypeString(QT.getAsString(Context->getPrintingPolicy())); + std::string Str = "("; + Str += TypeString; + Str += ")"; + InsertText(IC->getSubExpr()->getLocStart(), &Str[0], Str.size()); + + return; +} + +void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) { + SourceLocation DeclLoc = FD->getLocation(); + unsigned parenCount = 0; + + // We have 1 or more arguments that have closure pointers. + const char *startBuf = SM->getCharacterData(DeclLoc); + const char *startArgList = strchr(startBuf, '('); + + assert((*startArgList == '(') && "Rewriter fuzzy parser confused"); + + parenCount++; + // advance the location to startArgList. + DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf); + assert((DeclLoc.isValid()) && "Invalid DeclLoc"); + + const char *argPtr = startArgList; + + while (*argPtr++ && parenCount) { + switch (*argPtr) { + case '^': + // Replace the '^' with '*'. + DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList); + ReplaceText(DeclLoc, 1, "*"); + break; + case '(': + parenCount++; + break; + case ')': + parenCount--; + break; + } + } + return; +} + +bool RewriteModernObjC::PointerTypeTakesAnyBlockArguments(QualType QT) { + const FunctionProtoType *FTP; + const PointerType *PT = QT->getAs(); + if (PT) { + FTP = PT->getPointeeType()->getAs(); + } else { + const BlockPointerType *BPT = QT->getAs(); + assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); + FTP = BPT->getPointeeType()->getAs(); + } + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I != E; ++I) + if (isTopLevelBlockPointerType(*I)) + return true; + } + return false; +} + +bool RewriteModernObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) { + const FunctionProtoType *FTP; + const PointerType *PT = QT->getAs(); + if (PT) { + FTP = PT->getPointeeType()->getAs(); + } else { + const BlockPointerType *BPT = QT->getAs(); + assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); + FTP = BPT->getPointeeType()->getAs(); + } + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I != E; ++I) { + if ((*I)->isObjCQualifiedIdType()) + return true; + if ((*I)->isObjCObjectPointerType() && + (*I)->getPointeeType()->isObjCQualifiedInterfaceType()) + return true; + } + + } + return false; +} + +void RewriteModernObjC::GetExtentOfArgList(const char *Name, const char *&LParen, + const char *&RParen) { + const char *argPtr = strchr(Name, '('); + assert((*argPtr == '(') && "Rewriter fuzzy parser confused"); + + LParen = argPtr; // output the start. + argPtr++; // skip past the left paren. + unsigned parenCount = 1; + + while (*argPtr && parenCount) { + switch (*argPtr) { + case '(': parenCount++; break; + case ')': parenCount--; break; + default: break; + } + if (parenCount) argPtr++; + } + assert((*argPtr == ')') && "Rewriter fuzzy parser confused"); + RParen = argPtr; // output the end +} + +void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) { + if (FunctionDecl *FD = dyn_cast(ND)) { + RewriteBlockPointerFunctionArgs(FD); + return; + } + // Handle Variables and Typedefs. + SourceLocation DeclLoc = ND->getLocation(); + QualType DeclT; + if (VarDecl *VD = dyn_cast(ND)) + DeclT = VD->getType(); + else if (TypedefNameDecl *TDD = dyn_cast(ND)) + DeclT = TDD->getUnderlyingType(); + else if (FieldDecl *FD = dyn_cast(ND)) + DeclT = FD->getType(); + else + llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled"); + + const char *startBuf = SM->getCharacterData(DeclLoc); + const char *endBuf = startBuf; + // scan backward (from the decl location) for the end of the previous decl. + while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart) + startBuf--; + SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf); + std::string buf; + unsigned OrigLength=0; + // *startBuf != '^' if we are dealing with a pointer to function that + // may take block argument types (which will be handled below). + if (*startBuf == '^') { + // Replace the '^' with '*', computing a negative offset. + buf = '*'; + startBuf++; + OrigLength++; + } + while (*startBuf != ')') { + buf += *startBuf; + startBuf++; + OrigLength++; + } + buf += ')'; + OrigLength++; + + if (PointerTypeTakesAnyBlockArguments(DeclT) || + PointerTypeTakesAnyObjCQualifiedType(DeclT)) { + // Replace the '^' with '*' for arguments. + // Replace id

    with id/*<>*/ + DeclLoc = ND->getLocation(); + startBuf = SM->getCharacterData(DeclLoc); + const char *argListBegin, *argListEnd; + GetExtentOfArgList(startBuf, argListBegin, argListEnd); + while (argListBegin < argListEnd) { + if (*argListBegin == '^') + buf += '*'; + else if (*argListBegin == '<') { + buf += "/*"; + buf += *argListBegin++; + OrigLength++; + while (*argListBegin != '>') { + buf += *argListBegin++; + OrigLength++; + } + buf += *argListBegin; + buf += "*/"; + } + else + buf += *argListBegin; + argListBegin++; + OrigLength++; + } + buf += ')'; + OrigLength++; + } + ReplaceText(Start, OrigLength, buf); + + return; +} + + +/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes: +/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst, +/// struct Block_byref_id_object *src) { +/// _Block_object_assign (&_dest->object, _src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT +/// [|BLOCK_FIELD_IS_WEAK]) // object +/// _Block_object_assign(&_dest->object, _src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK +/// [|BLOCK_FIELD_IS_WEAK]) // block +/// } +/// And: +/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) { +/// _Block_object_dispose(_src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT +/// [|BLOCK_FIELD_IS_WEAK]) // object +/// _Block_object_dispose(_src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK +/// [|BLOCK_FIELD_IS_WEAK]) // block +/// } + +std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD, + int flag) { + std::string S; + if (CopyDestroyCache.count(flag)) + return S; + CopyDestroyCache.insert(flag); + S = "static void __Block_byref_id_object_copy_"; + S += utostr(flag); + S += "(void *dst, void *src) {\n"; + + // offset into the object pointer is computed as: + // void * + void* + int + int + void* + void * + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + unsigned VoidPtrSize = + static_cast(Context->getTypeSize(Context->VoidPtrTy)); + + unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth(); + S += " _Block_object_assign((char*)dst + "; + S += utostr(offset); + S += ", *(void * *) ((char*)src + "; + S += utostr(offset); + S += "), "; + S += utostr(flag); + S += ");\n}\n"; + + S += "static void __Block_byref_id_object_dispose_"; + S += utostr(flag); + S += "(void *src) {\n"; + S += " _Block_object_dispose(*(void * *) ((char*)src + "; + S += utostr(offset); + S += "), "; + S += utostr(flag); + S += ");\n}\n"; + return S; +} + +/// RewriteByRefVar - For each __block typex ND variable this routine transforms +/// the declaration into: +/// struct __Block_byref_ND { +/// void *__isa; // NULL for everything except __weak pointers +/// struct __Block_byref_ND *__forwarding; +/// int32_t __flags; +/// int32_t __size; +/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object +/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object +/// typex ND; +/// }; +/// +/// It then replaces declaration of ND variable with: +/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag, +/// __size=sizeof(struct __Block_byref_ND), +/// ND=initializer-if-any}; +/// +/// +void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl, + bool lastDecl) { + int flag = 0; + int isa = 0; + SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); + if (DeclLoc.isInvalid()) + // If type location is missing, it is because of missing type (a warning). + // Use variable's location which is good for this case. + DeclLoc = ND->getLocation(); + const char *startBuf = SM->getCharacterData(DeclLoc); + SourceLocation X = ND->getLocEnd(); + X = SM->getExpansionLoc(X); + const char *endBuf = SM->getCharacterData(X); + std::string Name(ND->getNameAsString()); + std::string ByrefType; + RewriteByRefString(ByrefType, Name, ND, true); + ByrefType += " {\n"; + ByrefType += " void *__isa;\n"; + RewriteByRefString(ByrefType, Name, ND); + ByrefType += " *__forwarding;\n"; + ByrefType += " int __flags;\n"; + ByrefType += " int __size;\n"; + // Add void *__Block_byref_id_object_copy; + // void *__Block_byref_id_object_dispose; if needed. + QualType Ty = ND->getType(); + bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty); + if (HasCopyAndDispose) { + ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n"; + ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n"; + } + + QualType T = Ty; + (void)convertBlockPointerToFunctionPointer(T); + T.getAsStringInternal(Name, Context->getPrintingPolicy()); + + ByrefType += " " + Name + ";\n"; + ByrefType += "};\n"; + // Insert this type in global scope. It is needed by helper function. + SourceLocation FunLocStart; + if (CurFunctionDef) + FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef); + else { + assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null"); + FunLocStart = CurMethodDef->getLocStart(); + } + InsertText(FunLocStart, ByrefType); + + if (Ty.isObjCGCWeak()) { + flag |= BLOCK_FIELD_IS_WEAK; + isa = 1; + } + if (HasCopyAndDispose) { + flag = BLOCK_BYREF_CALLER; + QualType Ty = ND->getType(); + // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well. + if (Ty->isBlockPointerType()) + flag |= BLOCK_FIELD_IS_BLOCK; + else + flag |= BLOCK_FIELD_IS_OBJECT; + std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag); + if (!HF.empty()) + InsertText(FunLocStart, HF); + } + + // struct __Block_byref_ND ND = + // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND), + // initializer-if-any}; + bool hasInit = (ND->getInit() != 0); + // FIXME. rewriter does not support __block c++ objects which + // require construction. + if (hasInit) + if (CXXConstructExpr *CExp = dyn_cast(ND->getInit())) { + CXXConstructorDecl *CXXDecl = CExp->getConstructor(); + if (CXXDecl && CXXDecl->isDefaultConstructor()) + hasInit = false; + } + + unsigned flags = 0; + if (HasCopyAndDispose) + flags |= BLOCK_HAS_COPY_DISPOSE; + Name = ND->getNameAsString(); + ByrefType.clear(); + RewriteByRefString(ByrefType, Name, ND); + std::string ForwardingCastType("("); + ForwardingCastType += ByrefType + " *)"; + ByrefType += " " + Name + " = {(void*)"; + ByrefType += utostr(isa); + ByrefType += "," + ForwardingCastType + "&" + Name + ", "; + ByrefType += utostr(flags); + ByrefType += ", "; + ByrefType += "sizeof("; + RewriteByRefString(ByrefType, Name, ND); + ByrefType += ")"; + if (HasCopyAndDispose) { + ByrefType += ", __Block_byref_id_object_copy_"; + ByrefType += utostr(flag); + ByrefType += ", __Block_byref_id_object_dispose_"; + ByrefType += utostr(flag); + } + + if (!firstDecl) { + // In multiple __block declarations, and for all but 1st declaration, + // find location of the separating comma. This would be start location + // where new text is to be inserted. + DeclLoc = ND->getLocation(); + const char *startDeclBuf = SM->getCharacterData(DeclLoc); + const char *commaBuf = startDeclBuf; + while (*commaBuf != ',') + commaBuf--; + assert((*commaBuf == ',') && "RewriteByRefVar: can't find ','"); + DeclLoc = DeclLoc.getLocWithOffset(commaBuf - startDeclBuf); + startBuf = commaBuf; + } + + if (!hasInit) { + ByrefType += "};\n"; + unsigned nameSize = Name.size(); + // for block or function pointer declaration. Name is aleady + // part of the declaration. + if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) + nameSize = 1; + ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType); + } + else { + ByrefType += ", "; + SourceLocation startLoc; + Expr *E = ND->getInit(); + if (const CStyleCastExpr *ECE = dyn_cast(E)) + startLoc = ECE->getLParenLoc(); + else + startLoc = E->getLocStart(); + startLoc = SM->getExpansionLoc(startLoc); + endBuf = SM->getCharacterData(startLoc); + ReplaceText(DeclLoc, endBuf-startBuf, ByrefType); + + const char separator = lastDecl ? ';' : ','; + const char *startInitializerBuf = SM->getCharacterData(startLoc); + const char *separatorBuf = strchr(startInitializerBuf, separator); + assert((*separatorBuf == separator) && + "RewriteByRefVar: can't find ';' or ','"); + SourceLocation separatorLoc = + startLoc.getLocWithOffset(separatorBuf-startInitializerBuf); + + InsertText(separatorLoc, lastDecl ? "}" : "};\n"); + } + return; +} + +void RewriteModernObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) { + // Add initializers for any closure decl refs. + GetBlockDeclRefExprs(Exp->getBody()); + if (BlockDeclRefs.size()) { + // Unique all "by copy" declarations. + for (unsigned i = 0; i < BlockDeclRefs.size(); i++) + if (!BlockDeclRefs[i]->getDecl()->hasAttr()) { + if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { + BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); + BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl()); + } + } + // Unique all "by ref" declarations. + for (unsigned i = 0; i < BlockDeclRefs.size(); i++) + if (BlockDeclRefs[i]->getDecl()->hasAttr()) { + if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { + BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); + BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl()); + } + } + // Find any imported blocks...they will need special attention. + for (unsigned i = 0; i < BlockDeclRefs.size(); i++) + if (BlockDeclRefs[i]->getDecl()->hasAttr() || + BlockDeclRefs[i]->getType()->isObjCObjectPointerType() || + BlockDeclRefs[i]->getType()->isBlockPointerType()) + ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl()); + } +} + +FunctionDecl *RewriteModernObjC::SynthBlockInitFunctionDecl(StringRef name) { + IdentifierInfo *ID = &Context->Idents.get(name); + QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy); + return FunctionDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), ID, FType, 0, SC_Extern, + SC_None, false, false); +} + +Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp, + const SmallVector &InnerBlockDeclRefs) { + + const BlockDecl *block = Exp->getBlockDecl(); + + Blocks.push_back(Exp); + + CollectBlockDeclRefInfo(Exp); + + // Add inner imported variables now used in current block. + int countOfInnerDecls = 0; + if (!InnerBlockDeclRefs.empty()) { + for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) { + DeclRefExpr *Exp = InnerBlockDeclRefs[i]; + ValueDecl *VD = Exp->getDecl(); + if (!VD->hasAttr() && !BlockByCopyDeclsPtrSet.count(VD)) { + // We need to save the copied-in variables in nested + // blocks because it is needed at the end for some of the API generations. + // See SynthesizeBlockLiterals routine. + InnerDeclRefs.push_back(Exp); countOfInnerDecls++; + BlockDeclRefs.push_back(Exp); + BlockByCopyDeclsPtrSet.insert(VD); + BlockByCopyDecls.push_back(VD); + } + if (VD->hasAttr() && !BlockByRefDeclsPtrSet.count(VD)) { + InnerDeclRefs.push_back(Exp); countOfInnerDecls++; + BlockDeclRefs.push_back(Exp); + BlockByRefDeclsPtrSet.insert(VD); + BlockByRefDecls.push_back(VD); + } + } + // Find any imported blocks...they will need special attention. + for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) + if (InnerBlockDeclRefs[i]->getDecl()->hasAttr() || + InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() || + InnerBlockDeclRefs[i]->getType()->isBlockPointerType()) + ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl()); + } + InnerDeclRefsCount.push_back(countOfInnerDecls); + + std::string FuncName; + + if (CurFunctionDef) + FuncName = CurFunctionDef->getNameAsString(); + else if (CurMethodDef) + BuildUniqueMethodName(FuncName, CurMethodDef); + else if (GlobalVarDecl) + FuncName = std::string(GlobalVarDecl->getNameAsString()); + + bool GlobalBlockExpr = + block->getDeclContext()->getRedeclContext()->isFileContext(); + + if (GlobalBlockExpr && !GlobalVarDecl) { + Diags.Report(block->getLocation(), GlobalBlockRewriteFailedDiag); + GlobalBlockExpr = false; + } + + std::string BlockNumber = utostr(Blocks.size()-1); + + std::string Func = "__" + FuncName + "_block_func_" + BlockNumber; + + // Get a pointer to the function type so we can cast appropriately. + QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType()); + QualType FType = Context->getPointerType(BFT); + + FunctionDecl *FD; + Expr *NewRep; + + // Simulate a contructor call... + std::string Tag; + + if (GlobalBlockExpr) + Tag = "__global_"; + else + Tag = "__"; + Tag += FuncName + "_block_impl_" + BlockNumber; + + FD = SynthBlockInitFunctionDecl(Tag); + DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue, + SourceLocation()); + + SmallVector InitExprs; + + // Initialize the block function. + FD = SynthBlockInitFunctionDecl(Func); + DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), + VK_LValue, SourceLocation()); + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, + CK_BitCast, Arg); + InitExprs.push_back(castExpr); + + // Initialize the block descriptor. + std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA"; + + VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get(DescData.c_str()), + Context->VoidPtrTy, 0, + SC_Static, SC_None); + UnaryOperator *DescRefExpr = + new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false, + Context->VoidPtrTy, + VK_LValue, + SourceLocation()), + UO_AddrOf, + Context->getPointerType(Context->VoidPtrTy), + VK_RValue, OK_Ordinary, + SourceLocation()); + InitExprs.push_back(DescRefExpr); + + // Add initializers for any closure decl refs. + if (BlockDeclRefs.size()) { + Expr *Exp; + // Output all "by copy" declarations. + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + if (isObjCType((*I)->getType())) { + // FIXME: Conform to ABI ([[obj retain] autorelease]). + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), + VK_LValue, SourceLocation()); + if (HasLocalVariableExternalStorage(*I)) { + QualType QT = (*I)->getType(); + QT = Context->getPointerType(QT); + Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, + OK_Ordinary, SourceLocation()); + } + } else if (isTopLevelBlockPointerType((*I)->getType())) { + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), + VK_LValue, SourceLocation()); + Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, + CK_BitCast, Arg); + } else { + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), + VK_LValue, SourceLocation()); + if (HasLocalVariableExternalStorage(*I)) { + QualType QT = (*I)->getType(); + QT = Context->getPointerType(QT); + Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, + OK_Ordinary, SourceLocation()); + } + + } + InitExprs.push_back(Exp); + } + // Output all "by ref" declarations. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + ValueDecl *ND = (*I); + std::string Name(ND->getNameAsString()); + std::string RecName; + RewriteByRefString(RecName, Name, ND, true); + IdentifierInfo *II = &Context->Idents.get(RecName.c_str() + + sizeof("struct")); + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + II); + assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl"); + QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); + + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, + SourceLocation()); + bool isNestedCapturedVar = false; + if (block) + for (BlockDecl::capture_const_iterator ci = block->capture_begin(), + ce = block->capture_end(); ci != ce; ++ci) { + const VarDecl *variable = ci->getVariable(); + if (variable == ND && ci->isNested()) { + assert (ci->isByRef() && + "SynthBlockInitExpr - captured block variable is not byref"); + isNestedCapturedVar = true; + break; + } + } + // captured nested byref variable has its address passed. Do not take + // its address again. + if (!isNestedCapturedVar) + Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, + Context->getPointerType(Exp->getType()), + VK_RValue, OK_Ordinary, SourceLocation()); + Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp); + InitExprs.push_back(Exp); + } + } + if (ImportedBlockDecls.size()) { + // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR + int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR); + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag), + Context->IntTy, SourceLocation()); + InitExprs.push_back(FlagExp); + } + NewRep = new (Context) CallExpr(*Context, DRE, InitExprs, + FType, VK_LValue, SourceLocation()); + + if (GlobalBlockExpr) { + assert (GlobalConstructionExp == 0 && + "SynthBlockInitExpr - GlobalConstructionExp must be null"); + GlobalConstructionExp = NewRep; + NewRep = DRE; + } + + NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf, + Context->getPointerType(NewRep->getType()), + VK_RValue, OK_Ordinary, SourceLocation()); + NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast, + NewRep); + BlockDeclRefs.clear(); + BlockByRefDecls.clear(); + BlockByRefDeclsPtrSet.clear(); + BlockByCopyDecls.clear(); + BlockByCopyDeclsPtrSet.clear(); + ImportedBlockDecls.clear(); + return NewRep; +} + +bool RewriteModernObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) { + if (const ObjCForCollectionStmt * CS = + dyn_cast(Stmts.back())) + return CS->getElement() == DS; + return false; +} + +//===----------------------------------------------------------------------===// +// Function Body / Expression rewriting +//===----------------------------------------------------------------------===// + +Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) { + if (isa(S) || isa(S) || + isa(S) || isa(S)) + Stmts.push_back(S); + else if (isa(S)) { + Stmts.push_back(S); + ObjCBcLabelNo.push_back(++BcLabelCount); + } + + // Pseudo-object operations and ivar references need special + // treatment because we're going to recursively rewrite them. + if (PseudoObjectExpr *PseudoOp = dyn_cast(S)) { + if (isa(PseudoOp->getSyntacticForm())) { + return RewritePropertyOrImplicitSetter(PseudoOp); + } else { + return RewritePropertyOrImplicitGetter(PseudoOp); + } + } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast(S)) { + return RewriteObjCIvarRefExpr(IvarRefExpr); + } + + SourceRange OrigStmtRange = S->getSourceRange(); + + // Perform a bottom up rewrite of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + Stmt *childStmt = (*CI); + Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt); + if (newStmt) { + *CI = newStmt; + } + } + + if (BlockExpr *BE = dyn_cast(S)) { + SmallVector InnerBlockDeclRefs; + llvm::SmallPtrSet InnerContexts; + InnerContexts.insert(BE->getBlockDecl()); + ImportedLocalExternalDecls.clear(); + GetInnerBlockDeclRefExprs(BE->getBody(), + InnerBlockDeclRefs, InnerContexts); + // Rewrite the block body in place. + Stmt *SaveCurrentBody = CurrentBody; + CurrentBody = BE->getBody(); + PropParentMap = 0; + // block literal on rhs of a property-dot-sytax assignment + // must be replaced by its synthesize ast so getRewrittenText + // works as expected. In this case, what actually ends up on RHS + // is the blockTranscribed which is the helper function for the + // block literal; as in: self.c = ^() {[ace ARR];}; + bool saveDisableReplaceStmt = DisableReplaceStmt; + DisableReplaceStmt = false; + RewriteFunctionBodyOrGlobalInitializer(BE->getBody()); + DisableReplaceStmt = saveDisableReplaceStmt; + CurrentBody = SaveCurrentBody; + PropParentMap = 0; + ImportedLocalExternalDecls.clear(); + // Now we snarf the rewritten text and stash it away for later use. + std::string Str = Rewrite.getRewrittenText(BE->getSourceRange()); + RewrittenBlockExprs[BE] = Str; + + Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs); + + //blockTranscribed->dump(); + ReplaceStmt(S, blockTranscribed); + return blockTranscribed; + } + // Handle specific things. + if (ObjCEncodeExpr *AtEncode = dyn_cast(S)) + return RewriteAtEncode(AtEncode); + + if (ObjCSelectorExpr *AtSelector = dyn_cast(S)) + return RewriteAtSelector(AtSelector); + + if (ObjCStringLiteral *AtString = dyn_cast(S)) + return RewriteObjCStringLiteral(AtString); + + if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast(S)) + return RewriteObjCBoolLiteralExpr(BoolLitExpr); + + if (ObjCBoxedExpr *BoxedExpr = dyn_cast(S)) + return RewriteObjCBoxedExpr(BoxedExpr); + + if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast(S)) + return RewriteObjCArrayLiteralExpr(ArrayLitExpr); + + if (ObjCDictionaryLiteral *DictionaryLitExpr = + dyn_cast(S)) + return RewriteObjCDictionaryLiteralExpr(DictionaryLitExpr); + + if (ObjCMessageExpr *MessExpr = dyn_cast(S)) { +#if 0 + // Before we rewrite it, put the original message expression in a comment. + SourceLocation startLoc = MessExpr->getLocStart(); + SourceLocation endLoc = MessExpr->getLocEnd(); + + const char *startBuf = SM->getCharacterData(startLoc); + const char *endBuf = SM->getCharacterData(endLoc); + + std::string messString; + messString += "// "; + messString.append(startBuf, endBuf-startBuf+1); + messString += "\n"; + + // FIXME: Missing definition of + // InsertText(clang::SourceLocation, char const*, unsigned int). + // InsertText(startLoc, messString.c_str(), messString.size()); + // Tried this, but it didn't work either... + // ReplaceText(startLoc, 0, messString.c_str(), messString.size()); +#endif + return RewriteMessageExpr(MessExpr); + } + + if (ObjCAutoreleasePoolStmt *StmtAutoRelease = + dyn_cast(S)) { + return RewriteObjCAutoreleasePoolStmt(StmtAutoRelease); + } + + if (ObjCAtTryStmt *StmtTry = dyn_cast(S)) + return RewriteObjCTryStmt(StmtTry); + + if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast(S)) + return RewriteObjCSynchronizedStmt(StmtTry); + + if (ObjCAtThrowStmt *StmtThrow = dyn_cast(S)) + return RewriteObjCThrowStmt(StmtThrow); + + if (ObjCProtocolExpr *ProtocolExp = dyn_cast(S)) + return RewriteObjCProtocolExpr(ProtocolExp); + + if (ObjCForCollectionStmt *StmtForCollection = + dyn_cast(S)) + return RewriteObjCForCollectionStmt(StmtForCollection, + OrigStmtRange.getEnd()); + if (BreakStmt *StmtBreakStmt = + dyn_cast(S)) + return RewriteBreakStmt(StmtBreakStmt); + if (ContinueStmt *StmtContinueStmt = + dyn_cast(S)) + return RewriteContinueStmt(StmtContinueStmt); + + // Need to check for protocol refs (id

    , Foo

    *) in variable decls + // and cast exprs. + if (DeclStmt *DS = dyn_cast(S)) { + // FIXME: What we're doing here is modifying the type-specifier that + // precedes the first Decl. In the future the DeclGroup should have + // a separate type-specifier that we can rewrite. + // NOTE: We need to avoid rewriting the DeclStmt if it is within + // the context of an ObjCForCollectionStmt. For example: + // NSArray *someArray; + // for (id index in someArray) ; + // This is because RewriteObjCForCollectionStmt() does textual rewriting + // and it depends on the original text locations/positions. + if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS)) + RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin()); + + // Blocks rewrite rules. + for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end(); + DI != DE; ++DI) { + Decl *SD = *DI; + if (ValueDecl *ND = dyn_cast(SD)) { + if (isTopLevelBlockPointerType(ND->getType())) + RewriteBlockPointerDecl(ND); + else if (ND->getType()->isFunctionPointerType()) + CheckFunctionPointerDecl(ND->getType(), ND); + if (VarDecl *VD = dyn_cast(SD)) { + if (VD->hasAttr()) { + static unsigned uniqueByrefDeclCount = 0; + assert(!BlockByRefDeclNo.count(ND) && + "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl"); + BlockByRefDeclNo[ND] = uniqueByrefDeclCount++; + RewriteByRefVar(VD, (DI == DS->decl_begin()), ((DI+1) == DE)); + } + else + RewriteTypeOfDecl(VD); + } + } + if (TypedefNameDecl *TD = dyn_cast(SD)) { + if (isTopLevelBlockPointerType(TD->getUnderlyingType())) + RewriteBlockPointerDecl(TD); + else if (TD->getUnderlyingType()->isFunctionPointerType()) + CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); + } + } + } + + if (CStyleCastExpr *CE = dyn_cast(S)) + RewriteObjCQualifiedInterfaceTypes(CE); + + if (isa(S) || isa(S) || + isa(S) || isa(S)) { + assert(!Stmts.empty() && "Statement stack is empty"); + assert ((isa(Stmts.back()) || isa(Stmts.back()) || + isa(Stmts.back()) || isa(Stmts.back())) + && "Statement stack mismatch"); + Stmts.pop_back(); + } + // Handle blocks rewriting. + if (DeclRefExpr *DRE = dyn_cast(S)) { + ValueDecl *VD = DRE->getDecl(); + if (VD->hasAttr()) + return RewriteBlockDeclRefExpr(DRE); + if (HasLocalVariableExternalStorage(VD)) + return RewriteLocalVariableExternalStorage(DRE); + } + + if (CallExpr *CE = dyn_cast(S)) { + if (CE->getCallee()->getType()->isBlockPointerType()) { + Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee()); + ReplaceStmt(S, BlockCall); + return BlockCall; + } + } + if (CStyleCastExpr *CE = dyn_cast(S)) { + RewriteCastExpr(CE); + } + if (ImplicitCastExpr *ICE = dyn_cast(S)) { + RewriteImplicitCastObjCExpr(ICE); + } +#if 0 + + if (ImplicitCastExpr *ICE = dyn_cast(S)) { + CastExpr *Replacement = new (Context) CastExpr(ICE->getType(), + ICE->getSubExpr(), + SourceLocation()); + // Get the new text. + std::string SStr; + llvm::raw_string_ostream Buf(SStr); + Replacement->printPretty(Buf); + const std::string &Str = Buf.str(); + + printf("CAST = %s\n", &Str[0]); + InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size()); + delete S; + return Replacement; + } +#endif + // Return this stmt unmodified. + return S; +} + +void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) { + for (RecordDecl::field_iterator i = RD->field_begin(), + e = RD->field_end(); i != e; ++i) { + FieldDecl *FD = *i; + if (isTopLevelBlockPointerType(FD->getType())) + RewriteBlockPointerDecl(FD); + if (FD->getType()->isObjCQualifiedIdType() || + FD->getType()->isObjCQualifiedInterfaceType()) + RewriteObjCQualifiedInterfaceTypes(FD); + } +} + +/// HandleDeclInMainFile - This is called for each top-level decl defined in the +/// main file of the input. +void RewriteModernObjC::HandleDeclInMainFile(Decl *D) { + switch (D->getKind()) { + case Decl::Function: { + FunctionDecl *FD = cast(D); + if (FD->isOverloadedOperator()) + return; + + // Since function prototypes don't have ParmDecl's, we check the function + // prototype. This enables us to rewrite function declarations and + // definitions using the same code. + RewriteBlocksInFunctionProtoType(FD->getType(), FD); + + if (!FD->isThisDeclarationADefinition()) + break; + + // FIXME: If this should support Obj-C++, support CXXTryStmt + if (CompoundStmt *Body = dyn_cast_or_null(FD->getBody())) { + CurFunctionDef = FD; + CurrentBody = Body; + Body = + cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); + FD->setBody(Body); + CurrentBody = 0; + if (PropParentMap) { + delete PropParentMap; + PropParentMap = 0; + } + // This synthesizes and inserts the block "impl" struct, invoke function, + // and any copy/dispose helper functions. + InsertBlockLiteralsWithinFunction(FD); + RewriteLineDirective(D); + CurFunctionDef = 0; + } + break; + } + case Decl::ObjCMethod: { + ObjCMethodDecl *MD = cast(D); + if (CompoundStmt *Body = MD->getCompoundBody()) { + CurMethodDef = MD; + CurrentBody = Body; + Body = + cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); + MD->setBody(Body); + CurrentBody = 0; + if (PropParentMap) { + delete PropParentMap; + PropParentMap = 0; + } + InsertBlockLiteralsWithinMethod(MD); + RewriteLineDirective(D); + CurMethodDef = 0; + } + break; + } + case Decl::ObjCImplementation: { + ObjCImplementationDecl *CI = cast(D); + ClassImplementation.push_back(CI); + break; + } + case Decl::ObjCCategoryImpl: { + ObjCCategoryImplDecl *CI = cast(D); + CategoryImplementation.push_back(CI); + break; + } + case Decl::Var: { + VarDecl *VD = cast(D); + RewriteObjCQualifiedInterfaceTypes(VD); + if (isTopLevelBlockPointerType(VD->getType())) + RewriteBlockPointerDecl(VD); + else if (VD->getType()->isFunctionPointerType()) { + CheckFunctionPointerDecl(VD->getType(), VD); + if (VD->getInit()) { + if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { + RewriteCastExpr(CE); + } + } + } else if (VD->getType()->isRecordType()) { + RecordDecl *RD = VD->getType()->getAs()->getDecl(); + if (RD->isCompleteDefinition()) + RewriteRecordBody(RD); + } + if (VD->getInit()) { + GlobalVarDecl = VD; + CurrentBody = VD->getInit(); + RewriteFunctionBodyOrGlobalInitializer(VD->getInit()); + CurrentBody = 0; + if (PropParentMap) { + delete PropParentMap; + PropParentMap = 0; + } + SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName()); + GlobalVarDecl = 0; + + // This is needed for blocks. + if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { + RewriteCastExpr(CE); + } + } + break; + } + case Decl::TypeAlias: + case Decl::Typedef: { + if (TypedefNameDecl *TD = dyn_cast(D)) { + if (isTopLevelBlockPointerType(TD->getUnderlyingType())) + RewriteBlockPointerDecl(TD); + else if (TD->getUnderlyingType()->isFunctionPointerType()) + CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); + } + break; + } + case Decl::CXXRecord: + case Decl::Record: { + RecordDecl *RD = cast(D); + if (RD->isCompleteDefinition()) + RewriteRecordBody(RD); + break; + } + default: + break; + } + // Nothing yet. +} + +/// Write_ProtocolExprReferencedMetadata - This routine writer out the +/// protocol reference symbols in the for of: +/// struct _protocol_t *PROTOCOL_REF = &PROTOCOL_METADATA. +static void Write_ProtocolExprReferencedMetadata(ASTContext *Context, + ObjCProtocolDecl *PDecl, + std::string &Result) { + // Also output .objc_protorefs$B section and its meta-data. + if (Context->getLangOpts().MicrosoftExt) + Result += "static "; + Result += "struct _protocol_t *"; + Result += "_OBJC_PROTOCOL_REFERENCE_$_"; + Result += PDecl->getNameAsString(); + Result += " = &"; + Result += "_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString(); + Result += ";\n"; +} + +void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) { + if (Diags.hasErrorOccurred()) + return; + + RewriteInclude(); + + // Here's a great place to add any extra declarations that may be needed. + // Write out meta data for each @protocol(). + for (llvm::SmallPtrSet::iterator I = ProtocolExprDecls.begin(), + E = ProtocolExprDecls.end(); I != E; ++I) { + RewriteObjCProtocolMetaData(*I, Preamble); + Write_ProtocolExprReferencedMetadata(Context, (*I), Preamble); + } + + InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false); + + if (ClassImplementation.size() || CategoryImplementation.size()) + RewriteImplementations(); + + for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) { + ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i]; + // Write struct declaration for the class matching its ivar declarations. + // Note that for modern abi, this is postponed until the end of TU + // because class extensions and the implementation might declare their own + // private ivars. + RewriteInterfaceDecl(CDecl); + } + + // Get the buffer corresponding to MainFileID. If we haven't changed it, then + // we are done. + if (const RewriteBuffer *RewriteBuf = + Rewrite.getRewriteBufferFor(MainFileID)) { + //printf("Changed:\n"); + *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end()); + } else { + llvm::errs() << "No changes\n"; + } + + if (ClassImplementation.size() || CategoryImplementation.size() || + ProtocolExprDecls.size()) { + // Rewrite Objective-c meta data* + std::string ResultStr; + RewriteMetaDataIntoBuffer(ResultStr); + // Emit metadata. + *OutFile << ResultStr; + } + // Emit ImageInfo; + { + std::string ResultStr; + WriteImageInfo(ResultStr); + *OutFile << ResultStr; + } + OutFile->flush(); +} + +void RewriteModernObjC::Initialize(ASTContext &context) { + InitializeCommon(context); + + Preamble += "#ifndef __OBJC2__\n"; + Preamble += "#define __OBJC2__\n"; + Preamble += "#endif\n"; + + // declaring objc_selector outside the parameter list removes a silly + // scope related warning... + if (IsHeader) + Preamble = "#pragma once\n"; + Preamble += "struct objc_selector; struct objc_class;\n"; + Preamble += "struct __rw_objc_super { \n\tstruct objc_object *object; "; + Preamble += "\n\tstruct objc_object *superClass; "; + // Add a constructor for creating temporary objects. + Preamble += "\n\t__rw_objc_super(struct objc_object *o, struct objc_object *s) "; + Preamble += ": object(o), superClass(s) {} "; + Preamble += "\n};\n"; + + if (LangOpts.MicrosoftExt) { + // Define all sections using syntax that makes sense. + // These are currently generated. + Preamble += "\n#pragma section(\".objc_classlist$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_catlist$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n"; + // These are generated but not necessary for functionality. + Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n"; + Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n"; + Preamble += "#pragma section(\".cls_meth$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_ivar$B\", long, read, write)\n"; + + // These need be generated for performance. Currently they are not, + // using API calls instead. + Preamble += "#pragma section(\".objc_selrefs$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_classrefs$B\", long, read, write)\n"; + Preamble += "#pragma section(\".objc_superrefs$B\", long, read, write)\n"; + + } + Preamble += "#ifndef _REWRITER_typedef_Protocol\n"; + Preamble += "typedef struct objc_object Protocol;\n"; + Preamble += "#define _REWRITER_typedef_Protocol\n"; + Preamble += "#endif\n"; + if (LangOpts.MicrosoftExt) { + Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n"; + Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n"; + } + else + Preamble += "#define __OBJC_RW_DLLIMPORT extern\n"; + + Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend(void);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper(void);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_stret(void);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n"; + + Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getClass"; + Preamble += "(const char *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass"; + Preamble += "(struct objc_class *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getMetaClass"; + Preamble += "(const char *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n"; + // @synchronized hooks. + Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter( struct objc_object *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit( struct objc_object *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n"; + Preamble += "#ifndef __FASTENUMERATIONSTATE\n"; + Preamble += "struct __objcFastEnumerationState {\n\t"; + Preamble += "unsigned long state;\n\t"; + Preamble += "void **itemsPtr;\n\t"; + Preamble += "unsigned long *mutationsPtr;\n\t"; + Preamble += "unsigned long extra[5];\n};\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n"; + Preamble += "#define __FASTENUMERATIONSTATE\n"; + Preamble += "#endif\n"; + Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n"; + Preamble += "struct __NSConstantStringImpl {\n"; + Preamble += " int *isa;\n"; + Preamble += " int flags;\n"; + Preamble += " char *str;\n"; + Preamble += " long length;\n"; + Preamble += "};\n"; + Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n"; + Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n"; + Preamble += "#else\n"; + Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n"; + Preamble += "#endif\n"; + Preamble += "#define __NSCONSTANTSTRINGIMPL\n"; + Preamble += "#endif\n"; + // Blocks preamble. + Preamble += "#ifndef BLOCK_IMPL\n"; + Preamble += "#define BLOCK_IMPL\n"; + Preamble += "struct __block_impl {\n"; + Preamble += " void *isa;\n"; + Preamble += " int Flags;\n"; + Preamble += " int Reserved;\n"; + Preamble += " void *FuncPtr;\n"; + Preamble += "};\n"; + Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n"; + Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n"; + Preamble += "extern \"C\" __declspec(dllexport) " + "void _Block_object_assign(void *, const void *, const int);\n"; + Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n"; + Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n"; + Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n"; + Preamble += "#else\n"; + Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n"; + Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n"; + Preamble += "#endif\n"; + Preamble += "#endif\n"; + if (LangOpts.MicrosoftExt) { + Preamble += "#undef __OBJC_RW_DLLIMPORT\n"; + Preamble += "#undef __OBJC_RW_STATICIMPORT\n"; + Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests. + Preamble += "#define __attribute__(X)\n"; + Preamble += "#endif\n"; + Preamble += "#ifndef __weak\n"; + Preamble += "#define __weak\n"; + Preamble += "#endif\n"; + Preamble += "#ifndef __block\n"; + Preamble += "#define __block\n"; + Preamble += "#endif\n"; + } + else { + Preamble += "#define __block\n"; + Preamble += "#define __weak\n"; + } + + // Declarations required for modern objective-c array and dictionary literals. + Preamble += "\n#include \n"; + Preamble += "struct __NSContainer_literal {\n"; + Preamble += " void * *arr;\n"; + Preamble += " __NSContainer_literal (unsigned int count, ...) {\n"; + Preamble += "\tva_list marker;\n"; + Preamble += "\tva_start(marker, count);\n"; + Preamble += "\tarr = new void *[count];\n"; + Preamble += "\tfor (unsigned i = 0; i < count; i++)\n"; + Preamble += "\t arr[i] = va_arg(marker, void *);\n"; + Preamble += "\tva_end( marker );\n"; + Preamble += " };\n"; + Preamble += " ~__NSContainer_literal() {\n"; + Preamble += "\tdelete[] arr;\n"; + Preamble += " }\n"; + Preamble += "};\n"; + + // Declaration required for implementation of @autoreleasepool statement. + Preamble += "extern \"C\" __declspec(dllimport) void * objc_autoreleasePoolPush(void);\n"; + Preamble += "extern \"C\" __declspec(dllimport) void objc_autoreleasePoolPop(void *);\n\n"; + Preamble += "struct __AtAutoreleasePool {\n"; + Preamble += " __AtAutoreleasePool() {atautoreleasepoolobj = objc_autoreleasePoolPush();}\n"; + Preamble += " ~__AtAutoreleasePool() {objc_autoreleasePoolPop(atautoreleasepoolobj);}\n"; + Preamble += " void * atautoreleasepoolobj;\n"; + Preamble += "};\n"; + + // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long + // as this avoids warning in any 64bit/32bit compilation model. + Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n"; +} + +/// RewriteIvarOffsetComputation - This rutine synthesizes computation of +/// ivar offset. +void RewriteModernObjC::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, + std::string &Result) { + if (ivar->isBitField()) { + // FIXME: The hack below doesn't work for bitfields. For now, we simply + // place all bitfields at offset 0. + Result += "0"; + } else { + Result += "__OFFSETOFIVAR__(struct "; + Result += ivar->getContainingInterface()->getNameAsString(); + if (LangOpts.MicrosoftExt) + Result += "_IMPL"; + Result += ", "; + Result += ivar->getNameAsString(); + Result += ")"; + } +} + +/// WriteModernMetadataDeclarations - Writes out metadata declarations for modern ABI. +/// struct _prop_t { +/// const char *name; +/// char *attributes; +/// } + +/// struct _prop_list_t { +/// uint32_t entsize; // sizeof(struct _prop_t) +/// uint32_t count_of_properties; +/// struct _prop_t prop_list[count_of_properties]; +/// } + +/// struct _protocol_t; + +/// struct _protocol_list_t { +/// long protocol_count; // Note, this is 32/64 bit +/// struct _protocol_t * protocol_list[protocol_count]; +/// } + +/// struct _objc_method { +/// SEL _cmd; +/// const char *method_type; +/// char *_imp; +/// } + +/// struct _method_list_t { +/// uint32_t entsize; // sizeof(struct _objc_method) +/// uint32_t method_count; +/// struct _objc_method method_list[method_count]; +/// } + +/// struct _protocol_t { +/// id isa; // NULL +/// const char *protocol_name; +/// const struct _protocol_list_t * protocol_list; // super protocols +/// const struct method_list_t *instance_methods; +/// const struct method_list_t *class_methods; +/// const struct method_list_t *optionalInstanceMethods; +/// const struct method_list_t *optionalClassMethods; +/// const struct _prop_list_t * properties; +/// const uint32_t size; // sizeof(struct _protocol_t) +/// const uint32_t flags; // = 0 +/// const char ** extendedMethodTypes; +/// } + +/// struct _ivar_t { +/// unsigned long int *offset; // pointer to ivar offset location +/// const char *name; +/// const char *type; +/// uint32_t alignment; +/// uint32_t size; +/// } + +/// struct _ivar_list_t { +/// uint32 entsize; // sizeof(struct _ivar_t) +/// uint32 count; +/// struct _ivar_t list[count]; +/// } + +/// struct _class_ro_t { +/// uint32_t flags; +/// uint32_t instanceStart; +/// uint32_t instanceSize; +/// uint32_t reserved; // only when building for 64bit targets +/// const uint8_t *ivarLayout; +/// const char *name; +/// const struct _method_list_t *baseMethods; +/// const struct _protocol_list_t *baseProtocols; +/// const struct _ivar_list_t *ivars; +/// const uint8_t *weakIvarLayout; +/// const struct _prop_list_t *properties; +/// } + +/// struct _class_t { +/// struct _class_t *isa; +/// struct _class_t *superclass; +/// void *cache; +/// IMP *vtable; +/// struct _class_ro_t *ro; +/// } + +/// struct _category_t { +/// const char *name; +/// struct _class_t *cls; +/// const struct _method_list_t *instance_methods; +/// const struct _method_list_t *class_methods; +/// const struct _protocol_list_t *protocols; +/// const struct _prop_list_t *properties; +/// } + +/// MessageRefTy - LLVM for: +/// struct _message_ref_t { +/// IMP messenger; +/// SEL name; +/// }; + +/// SuperMessageRefTy - LLVM for: +/// struct _super_message_ref_t { +/// SUPER_IMP messenger; +/// SEL name; +/// }; + +static void WriteModernMetadataDeclarations(ASTContext *Context, std::string &Result) { + static bool meta_data_declared = false; + if (meta_data_declared) + return; + + Result += "\nstruct _prop_t {\n"; + Result += "\tconst char *name;\n"; + Result += "\tconst char *attributes;\n"; + Result += "};\n"; + + Result += "\nstruct _protocol_t;\n"; + + Result += "\nstruct _objc_method {\n"; + Result += "\tstruct objc_selector * _cmd;\n"; + Result += "\tconst char *method_type;\n"; + Result += "\tvoid *_imp;\n"; + Result += "};\n"; + + Result += "\nstruct _protocol_t {\n"; + Result += "\tvoid * isa; // NULL\n"; + Result += "\tconst char *protocol_name;\n"; + Result += "\tconst struct _protocol_list_t * protocol_list; // super protocols\n"; + Result += "\tconst struct method_list_t *instance_methods;\n"; + Result += "\tconst struct method_list_t *class_methods;\n"; + Result += "\tconst struct method_list_t *optionalInstanceMethods;\n"; + Result += "\tconst struct method_list_t *optionalClassMethods;\n"; + Result += "\tconst struct _prop_list_t * properties;\n"; + Result += "\tconst unsigned int size; // sizeof(struct _protocol_t)\n"; + Result += "\tconst unsigned int flags; // = 0\n"; + Result += "\tconst char ** extendedMethodTypes;\n"; + Result += "};\n"; + + Result += "\nstruct _ivar_t {\n"; + Result += "\tunsigned long int *offset; // pointer to ivar offset location\n"; + Result += "\tconst char *name;\n"; + Result += "\tconst char *type;\n"; + Result += "\tunsigned int alignment;\n"; + Result += "\tunsigned int size;\n"; + Result += "};\n"; + + Result += "\nstruct _class_ro_t {\n"; + Result += "\tunsigned int flags;\n"; + Result += "\tunsigned int instanceStart;\n"; + Result += "\tunsigned int instanceSize;\n"; + const llvm::Triple &Triple(Context->getTargetInfo().getTriple()); + if (Triple.getArch() == llvm::Triple::x86_64) + Result += "\tunsigned int reserved;\n"; + Result += "\tconst unsigned char *ivarLayout;\n"; + Result += "\tconst char *name;\n"; + Result += "\tconst struct _method_list_t *baseMethods;\n"; + Result += "\tconst struct _objc_protocol_list *baseProtocols;\n"; + Result += "\tconst struct _ivar_list_t *ivars;\n"; + Result += "\tconst unsigned char *weakIvarLayout;\n"; + Result += "\tconst struct _prop_list_t *properties;\n"; + Result += "};\n"; + + Result += "\nstruct _class_t {\n"; + Result += "\tstruct _class_t *isa;\n"; + Result += "\tstruct _class_t *superclass;\n"; + Result += "\tvoid *cache;\n"; + Result += "\tvoid *vtable;\n"; + Result += "\tstruct _class_ro_t *ro;\n"; + Result += "};\n"; + + Result += "\nstruct _category_t {\n"; + Result += "\tconst char *name;\n"; + Result += "\tstruct _class_t *cls;\n"; + Result += "\tconst struct _method_list_t *instance_methods;\n"; + Result += "\tconst struct _method_list_t *class_methods;\n"; + Result += "\tconst struct _protocol_list_t *protocols;\n"; + Result += "\tconst struct _prop_list_t *properties;\n"; + Result += "};\n"; + + Result += "extern \"C\" __declspec(dllimport) struct objc_cache _objc_empty_cache;\n"; + Result += "#pragma warning(disable:4273)\n"; + meta_data_declared = true; +} + +static void Write_protocol_list_t_TypeDecl(std::string &Result, + long super_protocol_count) { + Result += "struct /*_protocol_list_t*/"; Result += " {\n"; + Result += "\tlong protocol_count; // Note, this is 32/64 bit\n"; + Result += "\tstruct _protocol_t *super_protocols["; + Result += utostr(super_protocol_count); Result += "];\n"; + Result += "}"; +} + +static void Write_method_list_t_TypeDecl(std::string &Result, + unsigned int method_count) { + Result += "struct /*_method_list_t*/"; Result += " {\n"; + Result += "\tunsigned int entsize; // sizeof(struct _objc_method)\n"; + Result += "\tunsigned int method_count;\n"; + Result += "\tstruct _objc_method method_list["; + Result += utostr(method_count); Result += "];\n"; + Result += "}"; +} + +static void Write__prop_list_t_TypeDecl(std::string &Result, + unsigned int property_count) { + Result += "struct /*_prop_list_t*/"; Result += " {\n"; + Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n"; + Result += "\tunsigned int count_of_properties;\n"; + Result += "\tstruct _prop_t prop_list["; + Result += utostr(property_count); Result += "];\n"; + Result += "}"; +} + +static void Write__ivar_list_t_TypeDecl(std::string &Result, + unsigned int ivar_count) { + Result += "struct /*_ivar_list_t*/"; Result += " {\n"; + Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n"; + Result += "\tunsigned int count;\n"; + Result += "\tstruct _ivar_t ivar_list["; + Result += utostr(ivar_count); Result += "];\n"; + Result += "}"; +} + +static void Write_protocol_list_initializer(ASTContext *Context, std::string &Result, + ArrayRef SuperProtocols, + StringRef VarName, + StringRef ProtocolName) { + if (SuperProtocols.size() > 0) { + Result += "\nstatic "; + Write_protocol_list_t_TypeDecl(Result, SuperProtocols.size()); + Result += " "; Result += VarName; + Result += ProtocolName; + Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; + Result += "\t"; Result += utostr(SuperProtocols.size()); Result += ",\n"; + for (unsigned i = 0, e = SuperProtocols.size(); i < e; i++) { + ObjCProtocolDecl *SuperPD = SuperProtocols[i]; + Result += "\t&"; Result += "_OBJC_PROTOCOL_"; + Result += SuperPD->getNameAsString(); + if (i == e-1) + Result += "\n};\n"; + else + Result += ",\n"; + } + } +} + +static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj, + ASTContext *Context, std::string &Result, + ArrayRef Methods, + StringRef VarName, + StringRef TopLevelDeclName, + bool MethodImpl) { + if (Methods.size() > 0) { + Result += "\nstatic "; + Write_method_list_t_TypeDecl(Result, Methods.size()); + Result += " "; Result += VarName; + Result += TopLevelDeclName; + Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; + Result += "\t"; Result += "sizeof(_objc_method)"; Result += ",\n"; + Result += "\t"; Result += utostr(Methods.size()); Result += ",\n"; + for (unsigned i = 0, e = Methods.size(); i < e; i++) { + ObjCMethodDecl *MD = Methods[i]; + if (i == 0) + Result += "\t{{(struct objc_selector *)\""; + else + Result += "\t{(struct objc_selector *)\""; + Result += (MD)->getSelector().getAsString(); Result += "\""; + Result += ", "; + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl(MD, MethodTypeString); + Result += "\""; Result += MethodTypeString; Result += "\""; + Result += ", "; + if (!MethodImpl) + Result += "0"; + else { + Result += "(void *)"; + Result += RewriteObj.MethodInternalNames[MD]; + } + if (i == e-1) + Result += "}}\n"; + else + Result += "},\n"; + } + Result += "};\n"; + } +} + +static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj, + ASTContext *Context, std::string &Result, + ArrayRef Properties, + const Decl *Container, + StringRef VarName, + StringRef ProtocolName) { + if (Properties.size() > 0) { + Result += "\nstatic "; + Write__prop_list_t_TypeDecl(Result, Properties.size()); + Result += " "; Result += VarName; + Result += ProtocolName; + Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; + Result += "\t"; Result += "sizeof(_prop_t)"; Result += ",\n"; + Result += "\t"; Result += utostr(Properties.size()); Result += ",\n"; + for (unsigned i = 0, e = Properties.size(); i < e; i++) { + ObjCPropertyDecl *PropDecl = Properties[i]; + if (i == 0) + Result += "\t{{\""; + else + Result += "\t{\""; + Result += PropDecl->getName(); Result += "\","; + std::string PropertyTypeString, QuotePropertyTypeString; + Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString); + RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString); + Result += "\""; Result += QuotePropertyTypeString; Result += "\""; + if (i == e-1) + Result += "}}\n"; + else + Result += "},\n"; + } + Result += "};\n"; + } +} + +// Metadata flags +enum MetaDataDlags { + CLS = 0x0, + CLS_META = 0x1, + CLS_ROOT = 0x2, + OBJC2_CLS_HIDDEN = 0x10, + CLS_EXCEPTION = 0x20, + + /// (Obsolete) ARC-specific: this class has a .release_ivars method + CLS_HAS_IVAR_RELEASER = 0x40, + /// class was compiled with -fobjc-arr + CLS_COMPILED_BY_ARC = 0x80 // (1<<7) +}; + +static void Write__class_ro_t_initializer(ASTContext *Context, std::string &Result, + unsigned int flags, + const std::string &InstanceStart, + const std::string &InstanceSize, + ArrayRefbaseMethods, + ArrayRefbaseProtocols, + ArrayRefivars, + ArrayRefProperties, + StringRef VarName, + StringRef ClassName) { + Result += "\nstatic struct _class_ro_t "; + Result += VarName; Result += ClassName; + Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; + Result += "\t"; + Result += llvm::utostr(flags); Result += ", "; + Result += InstanceStart; Result += ", "; + Result += InstanceSize; Result += ", \n"; + Result += "\t"; + const llvm::Triple &Triple(Context->getTargetInfo().getTriple()); + if (Triple.getArch() == llvm::Triple::x86_64) + // uint32_t const reserved; // only when building for 64bit targets + Result += "(unsigned int)0, \n\t"; + // const uint8_t * const ivarLayout; + Result += "0, \n\t"; + Result += "\""; Result += ClassName; Result += "\",\n\t"; + bool metaclass = ((flags & CLS_META) != 0); + if (baseMethods.size() > 0) { + Result += "(const struct _method_list_t *)&"; + if (metaclass) + Result += "_OBJC_$_CLASS_METHODS_"; + else + Result += "_OBJC_$_INSTANCE_METHODS_"; + Result += ClassName; + Result += ",\n\t"; + } + else + Result += "0, \n\t"; + + if (!metaclass && baseProtocols.size() > 0) { + Result += "(const struct _objc_protocol_list *)&"; + Result += "_OBJC_CLASS_PROTOCOLS_$_"; Result += ClassName; + Result += ",\n\t"; + } + else + Result += "0, \n\t"; + + if (!metaclass && ivars.size() > 0) { + Result += "(const struct _ivar_list_t *)&"; + Result += "_OBJC_$_INSTANCE_VARIABLES_"; Result += ClassName; + Result += ",\n\t"; + } + else + Result += "0, \n\t"; + + // weakIvarLayout + Result += "0, \n\t"; + if (!metaclass && Properties.size() > 0) { + Result += "(const struct _prop_list_t *)&"; + Result += "_OBJC_$_PROP_LIST_"; Result += ClassName; + Result += ",\n"; + } + else + Result += "0, \n"; + + Result += "};\n"; +} + +static void Write_class_t(ASTContext *Context, std::string &Result, + StringRef VarName, + const ObjCInterfaceDecl *CDecl, bool metaclass) { + bool rootClass = (!CDecl->getSuperClass()); + const ObjCInterfaceDecl *RootClass = CDecl; + + if (!rootClass) { + // Find the Root class + RootClass = CDecl->getSuperClass(); + while (RootClass->getSuperClass()) { + RootClass = RootClass->getSuperClass(); + } + } + + if (metaclass && rootClass) { + // Need to handle a case of use of forward declaration. + Result += "\n"; + Result += "extern \"C\" "; + if (CDecl->getImplementation()) + Result += "__declspec(dllexport) "; + else + Result += "__declspec(dllimport) "; + + Result += "struct _class_t OBJC_CLASS_$_"; + Result += CDecl->getNameAsString(); + Result += ";\n"; + } + // Also, for possibility of 'super' metadata class not having been defined yet. + if (!rootClass) { + ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass(); + Result += "\n"; + Result += "extern \"C\" "; + if (SuperClass->getImplementation()) + Result += "__declspec(dllexport) "; + else + Result += "__declspec(dllimport) "; + + Result += "struct _class_t "; + Result += VarName; + Result += SuperClass->getNameAsString(); + Result += ";\n"; + + if (metaclass && RootClass != SuperClass) { + Result += "extern \"C\" "; + if (RootClass->getImplementation()) + Result += "__declspec(dllexport) "; + else + Result += "__declspec(dllimport) "; + + Result += "struct _class_t "; + Result += VarName; + Result += RootClass->getNameAsString(); + Result += ";\n"; + } + } + + Result += "\nextern \"C\" __declspec(dllexport) struct _class_t "; + Result += VarName; Result += CDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__DATA,__objc_data\"))) = {\n"; + Result += "\t"; + if (metaclass) { + if (!rootClass) { + Result += "0, // &"; Result += VarName; + Result += RootClass->getNameAsString(); + Result += ",\n\t"; + Result += "0, // &"; Result += VarName; + Result += CDecl->getSuperClass()->getNameAsString(); + Result += ",\n\t"; + } + else { + Result += "0, // &"; Result += VarName; + Result += CDecl->getNameAsString(); + Result += ",\n\t"; + Result += "0, // &OBJC_CLASS_$_"; Result += CDecl->getNameAsString(); + Result += ",\n\t"; + } + } + else { + Result += "0, // &OBJC_METACLASS_$_"; + Result += CDecl->getNameAsString(); + Result += ",\n\t"; + if (!rootClass) { + Result += "0, // &"; Result += VarName; + Result += CDecl->getSuperClass()->getNameAsString(); + Result += ",\n\t"; + } + else + Result += "0,\n\t"; + } + Result += "0, // (void *)&_objc_empty_cache,\n\t"; + Result += "0, // unused, was (void *)&_objc_empty_vtable,\n\t"; + if (metaclass) + Result += "&_OBJC_METACLASS_RO_$_"; + else + Result += "&_OBJC_CLASS_RO_$_"; + Result += CDecl->getNameAsString(); + Result += ",\n};\n"; + + // Add static function to initialize some of the meta-data fields. + // avoid doing it twice. + if (metaclass) + return; + + const ObjCInterfaceDecl *SuperClass = + rootClass ? CDecl : CDecl->getSuperClass(); + + Result += "static void OBJC_CLASS_SETUP_$_"; + Result += CDecl->getNameAsString(); + Result += "(void ) {\n"; + Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString(); + Result += ".isa = "; Result += "&OBJC_METACLASS_$_"; + Result += RootClass->getNameAsString(); Result += ";\n"; + + Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString(); + Result += ".superclass = "; + if (rootClass) + Result += "&OBJC_CLASS_$_"; + else + Result += "&OBJC_METACLASS_$_"; + + Result += SuperClass->getNameAsString(); Result += ";\n"; + + Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString(); + Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n"; + + Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString(); + Result += ".isa = "; Result += "&OBJC_METACLASS_$_"; + Result += CDecl->getNameAsString(); Result += ";\n"; + + if (!rootClass) { + Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString(); + Result += ".superclass = "; Result += "&OBJC_CLASS_$_"; + Result += SuperClass->getNameAsString(); Result += ";\n"; + } + + Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString(); + Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n"; + Result += "}\n"; +} + +static void Write_category_t(RewriteModernObjC &RewriteObj, ASTContext *Context, + std::string &Result, + ObjCCategoryDecl *CatDecl, + ObjCInterfaceDecl *ClassDecl, + ArrayRef InstanceMethods, + ArrayRef ClassMethods, + ArrayRef RefedProtocols, + ArrayRef ClassProperties) { + StringRef CatName = CatDecl->getName(); + StringRef ClassName = ClassDecl->getName(); + // must declare an extern class object in case this class is not implemented + // in this TU. + Result += "\n"; + Result += "extern \"C\" "; + if (ClassDecl->getImplementation()) + Result += "__declspec(dllexport) "; + else + Result += "__declspec(dllimport) "; + + Result += "struct _class_t "; + Result += "OBJC_CLASS_$_"; Result += ClassName; + Result += ";\n"; + + Result += "\nstatic struct _category_t "; + Result += "_OBJC_$_CATEGORY_"; + Result += ClassName; Result += "_$_"; Result += CatName; + Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n"; + Result += "{\n"; + Result += "\t\""; Result += ClassName; Result += "\",\n"; + Result += "\t0, // &"; Result += "OBJC_CLASS_$_"; Result += ClassName; + Result += ",\n"; + if (InstanceMethods.size() > 0) { + Result += "\t(const struct _method_list_t *)&"; + Result += "_OBJC_$_CATEGORY_INSTANCE_METHODS_"; + Result += ClassName; Result += "_$_"; Result += CatName; + Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (ClassMethods.size() > 0) { + Result += "\t(const struct _method_list_t *)&"; + Result += "_OBJC_$_CATEGORY_CLASS_METHODS_"; + Result += ClassName; Result += "_$_"; Result += CatName; + Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (RefedProtocols.size() > 0) { + Result += "\t(const struct _protocol_list_t *)&"; + Result += "_OBJC_CATEGORY_PROTOCOLS_$_"; + Result += ClassName; Result += "_$_"; Result += CatName; + Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (ClassProperties.size() > 0) { + Result += "\t(const struct _prop_list_t *)&"; Result += "_OBJC_$_PROP_LIST_"; + Result += ClassName; Result += "_$_"; Result += CatName; + Result += ",\n"; + } + else + Result += "\t0,\n"; + + Result += "};\n"; + + // Add static function to initialize the class pointer in the category structure. + Result += "static void OBJC_CATEGORY_SETUP_$_"; + Result += ClassDecl->getNameAsString(); + Result += "_$_"; + Result += CatName; + Result += "(void ) {\n"; + Result += "\t_OBJC_$_CATEGORY_"; + Result += ClassDecl->getNameAsString(); + Result += "_$_"; + Result += CatName; + Result += ".cls = "; Result += "&OBJC_CLASS_$_"; Result += ClassName; + Result += ";\n}\n"; +} + +static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj, + ASTContext *Context, std::string &Result, + ArrayRef Methods, + StringRef VarName, + StringRef ProtocolName) { + if (Methods.size() == 0) + return; + + Result += "\nstatic const char *"; + Result += VarName; Result += ProtocolName; + Result += " [] __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n"; + Result += "{\n"; + for (unsigned i = 0, e = Methods.size(); i < e; i++) { + ObjCMethodDecl *MD = Methods[i]; + std::string MethodTypeString, QuoteMethodTypeString; + Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true); + RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString); + Result += "\t\""; Result += QuoteMethodTypeString; Result += "\""; + if (i == e-1) + Result += "\n};\n"; + else { + Result += ",\n"; + } + } +} + +static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj, + ASTContext *Context, + std::string &Result, + ArrayRef Ivars, + ObjCInterfaceDecl *CDecl) { + // FIXME. visibilty of offset symbols may have to be set; for Darwin + // this is what happens: + /** + if (Ivar->getAccessControl() == ObjCIvarDecl::Private || + Ivar->getAccessControl() == ObjCIvarDecl::Package || + Class->getVisibility() == HiddenVisibility) + Visibility shoud be: HiddenVisibility; + else + Visibility shoud be: DefaultVisibility; + */ + + Result += "\n"; + for (unsigned i =0, e = Ivars.size(); i < e; i++) { + ObjCIvarDecl *IvarDecl = Ivars[i]; + if (Context->getLangOpts().MicrosoftExt) + Result += "__declspec(allocate(\".objc_ivar$B\")) "; + + if (!Context->getLangOpts().MicrosoftExt || + IvarDecl->getAccessControl() == ObjCIvarDecl::Private || + IvarDecl->getAccessControl() == ObjCIvarDecl::Package) + Result += "extern \"C\" unsigned long int "; + else + Result += "extern \"C\" __declspec(dllexport) unsigned long int "; + WriteInternalIvarName(CDecl, IvarDecl, Result); + Result += " __attribute__ ((used, section (\"__DATA,__objc_ivar\")))"; + Result += " = "; + RewriteObj.RewriteIvarOffsetComputation(IvarDecl, Result); + Result += ";\n"; + } +} + +static void Write__ivar_list_t_initializer(RewriteModernObjC &RewriteObj, + ASTContext *Context, std::string &Result, + ArrayRef Ivars, + StringRef VarName, + ObjCInterfaceDecl *CDecl) { + if (Ivars.size() > 0) { + Write_IvarOffsetVar(RewriteObj, Context, Result, Ivars, CDecl); + + Result += "\nstatic "; + Write__ivar_list_t_TypeDecl(Result, Ivars.size()); + Result += " "; Result += VarName; + Result += CDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; + Result += "\t"; Result += "sizeof(_ivar_t)"; Result += ",\n"; + Result += "\t"; Result += utostr(Ivars.size()); Result += ",\n"; + for (unsigned i =0, e = Ivars.size(); i < e; i++) { + ObjCIvarDecl *IvarDecl = Ivars[i]; + if (i == 0) + Result += "\t{{"; + else + Result += "\t {"; + Result += "(unsigned long int *)&"; + WriteInternalIvarName(CDecl, IvarDecl, Result); + Result += ", "; + + Result += "\""; Result += IvarDecl->getName(); Result += "\", "; + std::string IvarTypeString, QuoteIvarTypeString; + Context->getObjCEncodingForType(IvarDecl->getType(), IvarTypeString, + IvarDecl); + RewriteObj.QuoteDoublequotes(IvarTypeString, QuoteIvarTypeString); + Result += "\""; Result += QuoteIvarTypeString; Result += "\", "; + + // FIXME. this alignment represents the host alignment and need be changed to + // represent the target alignment. + unsigned Align = Context->getTypeAlign(IvarDecl->getType())/8; + Align = llvm::Log2_32(Align); + Result += llvm::utostr(Align); Result += ", "; + CharUnits Size = Context->getTypeSizeInChars(IvarDecl->getType()); + Result += llvm::utostr(Size.getQuantity()); + if (i == e-1) + Result += "}}\n"; + else + Result += "},\n"; + } + Result += "};\n"; + } +} + +/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data. +void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl, + std::string &Result) { + + // Do not synthesize the protocol more than once. + if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl())) + return; + WriteModernMetadataDeclarations(Context, Result); + + if (ObjCProtocolDecl *Def = PDecl->getDefinition()) + PDecl = Def; + // Must write out all protocol definitions in current qualifier list, + // and in their nested qualifiers before writing out current definition. + for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(), + E = PDecl->protocol_end(); I != E; ++I) + RewriteObjCProtocolMetaData(*I, Result); + + // Construct method lists. + std::vector InstanceMethods, ClassMethods; + std::vector OptInstanceMethods, OptClassMethods; + for (ObjCProtocolDecl::instmeth_iterator + I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); + I != E; ++I) { + ObjCMethodDecl *MD = *I; + if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { + OptInstanceMethods.push_back(MD); + } else { + InstanceMethods.push_back(MD); + } + } + + for (ObjCProtocolDecl::classmeth_iterator + I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); + I != E; ++I) { + ObjCMethodDecl *MD = *I; + if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { + OptClassMethods.push_back(MD); + } else { + ClassMethods.push_back(MD); + } + } + std::vector AllMethods; + for (unsigned i = 0, e = InstanceMethods.size(); i < e; i++) + AllMethods.push_back(InstanceMethods[i]); + for (unsigned i = 0, e = ClassMethods.size(); i < e; i++) + AllMethods.push_back(ClassMethods[i]); + for (unsigned i = 0, e = OptInstanceMethods.size(); i < e; i++) + AllMethods.push_back(OptInstanceMethods[i]); + for (unsigned i = 0, e = OptClassMethods.size(); i < e; i++) + AllMethods.push_back(OptClassMethods[i]); + + Write__extendedMethodTypes_initializer(*this, Context, Result, + AllMethods, + "_OBJC_PROTOCOL_METHOD_TYPES_", + PDecl->getNameAsString()); + // Protocol's super protocol list + std::vector SuperProtocols; + for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(), + E = PDecl->protocol_end(); I != E; ++I) + SuperProtocols.push_back(*I); + + Write_protocol_list_initializer(Context, Result, SuperProtocols, + "_OBJC_PROTOCOL_REFS_", + PDecl->getNameAsString()); + + Write_method_list_t_initializer(*this, Context, Result, InstanceMethods, + "_OBJC_PROTOCOL_INSTANCE_METHODS_", + PDecl->getNameAsString(), false); + + Write_method_list_t_initializer(*this, Context, Result, ClassMethods, + "_OBJC_PROTOCOL_CLASS_METHODS_", + PDecl->getNameAsString(), false); + + Write_method_list_t_initializer(*this, Context, Result, OptInstanceMethods, + "_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_", + PDecl->getNameAsString(), false); + + Write_method_list_t_initializer(*this, Context, Result, OptClassMethods, + "_OBJC_PROTOCOL_OPT_CLASS_METHODS_", + PDecl->getNameAsString(), false); + + // Protocol's property metadata. + std::vector ProtocolProperties; + for (ObjCContainerDecl::prop_iterator I = PDecl->prop_begin(), + E = PDecl->prop_end(); I != E; ++I) + ProtocolProperties.push_back(*I); + + Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties, + /* Container */0, + "_OBJC_PROTOCOL_PROPERTIES_", + PDecl->getNameAsString()); + + // Writer out root metadata for current protocol: struct _protocol_t + Result += "\n"; + if (LangOpts.MicrosoftExt) + Result += "static "; + Result += "struct _protocol_t _OBJC_PROTOCOL_"; + Result += PDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__DATA,__datacoal_nt,coalesced\"))) = {\n"; + Result += "\t0,\n"; // id is; is null + Result += "\t\""; Result += PDecl->getNameAsString(); Result += "\",\n"; + if (SuperProtocols.size() > 0) { + Result += "\t(const struct _protocol_list_t *)&"; Result += "_OBJC_PROTOCOL_REFS_"; + Result += PDecl->getNameAsString(); Result += ",\n"; + } + else + Result += "\t0,\n"; + if (InstanceMethods.size() > 0) { + Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_INSTANCE_METHODS_"; + Result += PDecl->getNameAsString(); Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (ClassMethods.size() > 0) { + Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_CLASS_METHODS_"; + Result += PDecl->getNameAsString(); Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (OptInstanceMethods.size() > 0) { + Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_"; + Result += PDecl->getNameAsString(); Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (OptClassMethods.size() > 0) { + Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_CLASS_METHODS_"; + Result += PDecl->getNameAsString(); Result += ",\n"; + } + else + Result += "\t0,\n"; + + if (ProtocolProperties.size() > 0) { + Result += "\t(const struct _prop_list_t *)&_OBJC_PROTOCOL_PROPERTIES_"; + Result += PDecl->getNameAsString(); Result += ",\n"; + } + else + Result += "\t0,\n"; + + Result += "\t"; Result += "sizeof(_protocol_t)"; Result += ",\n"; + Result += "\t0,\n"; + + if (AllMethods.size() > 0) { + Result += "\t(const char **)&"; Result += "_OBJC_PROTOCOL_METHOD_TYPES_"; + Result += PDecl->getNameAsString(); + Result += "\n};\n"; + } + else + Result += "\t0\n};\n"; + + if (LangOpts.MicrosoftExt) + Result += "static "; + Result += "struct _protocol_t *"; + Result += "_OBJC_LABEL_PROTOCOL_$_"; Result += PDecl->getNameAsString(); + Result += " = &_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString(); + Result += ";\n"; + + // Mark this protocol as having been generated. + if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl())) + llvm_unreachable("protocol already synthesized"); + +} + +void RewriteModernObjC::RewriteObjCProtocolListMetaData( + const ObjCList &Protocols, + StringRef prefix, StringRef ClassName, + std::string &Result) { + if (Protocols.empty()) return; + + for (unsigned i = 0; i != Protocols.size(); i++) + RewriteObjCProtocolMetaData(Protocols[i], Result); + + // Output the top lovel protocol meta-data for the class. + /* struct _objc_protocol_list { + struct _objc_protocol_list *next; + int protocol_count; + struct _objc_protocol *class_protocols[]; + } + */ + Result += "\n"; + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".cat_cls_meth$B\")) "; + Result += "static struct {\n"; + Result += "\tstruct _objc_protocol_list *next;\n"; + Result += "\tint protocol_count;\n"; + Result += "\tstruct _objc_protocol *class_protocols["; + Result += utostr(Protocols.size()); + Result += "];\n} _OBJC_"; + Result += prefix; + Result += "_PROTOCOLS_"; + Result += ClassName; + Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= " + "{\n\t0, "; + Result += utostr(Protocols.size()); + Result += "\n"; + + Result += "\t,{&_OBJC_PROTOCOL_"; + Result += Protocols[0]->getNameAsString(); + Result += " \n"; + + for (unsigned i = 1; i != Protocols.size(); i++) { + Result += "\t ,&_OBJC_PROTOCOL_"; + Result += Protocols[i]->getNameAsString(); + Result += "\n"; + } + Result += "\t }\n};\n"; +} + +/// hasObjCExceptionAttribute - Return true if this class or any super +/// class has the __objc_exception__ attribute. +/// FIXME. Move this to ASTContext.cpp as it is also used for IRGen. +static bool hasObjCExceptionAttribute(ASTContext &Context, + const ObjCInterfaceDecl *OID) { + if (OID->hasAttr()) + return true; + if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) + return hasObjCExceptionAttribute(Context, Super); + return false; +} + +void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, + std::string &Result) { + ObjCInterfaceDecl *CDecl = IDecl->getClassInterface(); + + // Explicitly declared @interface's are already synthesized. + if (CDecl->isImplicitInterfaceDecl()) + assert(false && + "Legacy implicit interface rewriting not supported in moder abi"); + + WriteModernMetadataDeclarations(Context, Result); + SmallVector IVars; + + for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin(); + IVD; IVD = IVD->getNextIvar()) { + // Ignore unnamed bit-fields. + if (!IVD->getDeclName()) + continue; + IVars.push_back(IVD); + } + + Write__ivar_list_t_initializer(*this, Context, Result, IVars, + "_OBJC_$_INSTANCE_VARIABLES_", + CDecl); + + // Build _objc_method_list for class's instance methods if needed + SmallVector + InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); + + // If any of our property implementations have associated getters or + // setters, produce metadata for them as well. + for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), + PropEnd = IDecl->propimpl_end(); + Prop != PropEnd; ++Prop) { + if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + continue; + if (!Prop->getPropertyIvarDecl()) + continue; + ObjCPropertyDecl *PD = Prop->getPropertyDecl(); + if (!PD) + continue; + if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) + if (mustSynthesizeSetterGetterMethod(IDecl, PD, true /*getter*/)) + InstanceMethods.push_back(Getter); + if (PD->isReadOnly()) + continue; + if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) + if (mustSynthesizeSetterGetterMethod(IDecl, PD, false /*setter*/)) + InstanceMethods.push_back(Setter); + } + + Write_method_list_t_initializer(*this, Context, Result, InstanceMethods, + "_OBJC_$_INSTANCE_METHODS_", + IDecl->getNameAsString(), true); + + SmallVector + ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end()); + + Write_method_list_t_initializer(*this, Context, Result, ClassMethods, + "_OBJC_$_CLASS_METHODS_", + IDecl->getNameAsString(), true); + + // Protocols referenced in class declaration? + // Protocol's super protocol list + std::vector RefedProtocols; + const ObjCList &Protocols = CDecl->getReferencedProtocols(); + for (ObjCList::iterator I = Protocols.begin(), + E = Protocols.end(); + I != E; ++I) { + RefedProtocols.push_back(*I); + // Must write out all protocol definitions in current qualifier list, + // and in their nested qualifiers before writing out current definition. + RewriteObjCProtocolMetaData(*I, Result); + } + + Write_protocol_list_initializer(Context, Result, + RefedProtocols, + "_OBJC_CLASS_PROTOCOLS_$_", + IDecl->getNameAsString()); + + // Protocol's property metadata. + std::vector ClassProperties; + for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(), + E = CDecl->prop_end(); I != E; ++I) + ClassProperties.push_back(*I); + + Write_prop_list_t_initializer(*this, Context, Result, ClassProperties, + /* Container */IDecl, + "_OBJC_$_PROP_LIST_", + CDecl->getNameAsString()); + + + // Data for initializing _class_ro_t metaclass meta-data + uint32_t flags = CLS_META; + std::string InstanceSize; + std::string InstanceStart; + + + bool classIsHidden = CDecl->getVisibility() == HiddenVisibility; + if (classIsHidden) + flags |= OBJC2_CLS_HIDDEN; + + if (!CDecl->getSuperClass()) + // class is root + flags |= CLS_ROOT; + InstanceSize = "sizeof(struct _class_t)"; + InstanceStart = InstanceSize; + Write__class_ro_t_initializer(Context, Result, flags, + InstanceStart, InstanceSize, + ClassMethods, + 0, + 0, + 0, + "_OBJC_METACLASS_RO_$_", + CDecl->getNameAsString()); + + + // Data for initializing _class_ro_t meta-data + flags = CLS; + if (classIsHidden) + flags |= OBJC2_CLS_HIDDEN; + + if (hasObjCExceptionAttribute(*Context, CDecl)) + flags |= CLS_EXCEPTION; + + if (!CDecl->getSuperClass()) + // class is root + flags |= CLS_ROOT; + + InstanceSize.clear(); + InstanceStart.clear(); + if (!ObjCSynthesizedStructs.count(CDecl)) { + InstanceSize = "0"; + InstanceStart = "0"; + } + else { + InstanceSize = "sizeof(struct "; + InstanceSize += CDecl->getNameAsString(); + InstanceSize += "_IMPL)"; + + ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin(); + if (IVD) { + RewriteIvarOffsetComputation(IVD, InstanceStart); + } + else + InstanceStart = InstanceSize; + } + Write__class_ro_t_initializer(Context, Result, flags, + InstanceStart, InstanceSize, + InstanceMethods, + RefedProtocols, + IVars, + ClassProperties, + "_OBJC_CLASS_RO_$_", + CDecl->getNameAsString()); + + Write_class_t(Context, Result, + "OBJC_METACLASS_$_", + CDecl, /*metaclass*/true); + + Write_class_t(Context, Result, + "OBJC_CLASS_$_", + CDecl, /*metaclass*/false); + + if (ImplementationIsNonLazy(IDecl)) + DefinedNonLazyClasses.push_back(CDecl); + +} + +void RewriteModernObjC::RewriteClassSetupInitHook(std::string &Result) { + int ClsDefCount = ClassImplementation.size(); + if (!ClsDefCount) + return; + Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n"; + Result += "__declspec(allocate(\".objc_inithooks$B\")) "; + Result += "static void *OBJC_CLASS_SETUP[] = {\n"; + for (int i = 0; i < ClsDefCount; i++) { + ObjCImplementationDecl *IDecl = ClassImplementation[i]; + ObjCInterfaceDecl *CDecl = IDecl->getClassInterface(); + Result += "\t(void *)&OBJC_CLASS_SETUP_$_"; + Result += CDecl->getName(); Result += ",\n"; + } + Result += "};\n"; +} + +void RewriteModernObjC::RewriteMetaDataIntoBuffer(std::string &Result) { + int ClsDefCount = ClassImplementation.size(); + int CatDefCount = CategoryImplementation.size(); + + // For each implemented class, write out all its meta data. + for (int i = 0; i < ClsDefCount; i++) + RewriteObjCClassMetaData(ClassImplementation[i], Result); + + RewriteClassSetupInitHook(Result); + + // For each implemented category, write out all its meta data. + for (int i = 0; i < CatDefCount; i++) + RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result); + + RewriteCategorySetupInitHook(Result); + + if (ClsDefCount > 0) { + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".objc_classlist$B\")) "; + Result += "static struct _class_t *L_OBJC_LABEL_CLASS_$ ["; + Result += llvm::utostr(ClsDefCount); Result += "]"; + Result += + " __attribute__((used, section (\"__DATA, __objc_classlist," + "regular,no_dead_strip\")))= {\n"; + for (int i = 0; i < ClsDefCount; i++) { + Result += "\t&OBJC_CLASS_$_"; + Result += ClassImplementation[i]->getNameAsString(); + Result += ",\n"; + } + Result += "};\n"; + + if (!DefinedNonLazyClasses.empty()) { + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".objc_nlclslist$B\")) \n"; + Result += "static struct _class_t *_OBJC_LABEL_NONLAZY_CLASS_$[] = {\n\t"; + for (unsigned i = 0, e = DefinedNonLazyClasses.size(); i < e; i++) { + Result += "\t&OBJC_CLASS_$_"; Result += DefinedNonLazyClasses[i]->getNameAsString(); + Result += ",\n"; + } + Result += "};\n"; + } + } + + if (CatDefCount > 0) { + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".objc_catlist$B\")) "; + Result += "static struct _category_t *L_OBJC_LABEL_CATEGORY_$ ["; + Result += llvm::utostr(CatDefCount); Result += "]"; + Result += + " __attribute__((used, section (\"__DATA, __objc_catlist," + "regular,no_dead_strip\")))= {\n"; + for (int i = 0; i < CatDefCount; i++) { + Result += "\t&_OBJC_$_CATEGORY_"; + Result += + CategoryImplementation[i]->getClassInterface()->getNameAsString(); + Result += "_$_"; + Result += CategoryImplementation[i]->getNameAsString(); + Result += ",\n"; + } + Result += "};\n"; + } + + if (!DefinedNonLazyCategories.empty()) { + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".objc_nlcatlist$B\")) \n"; + Result += "static struct _category_t *_OBJC_LABEL_NONLAZY_CATEGORY_$[] = {\n\t"; + for (unsigned i = 0, e = DefinedNonLazyCategories.size(); i < e; i++) { + Result += "\t&_OBJC_$_CATEGORY_"; + Result += + DefinedNonLazyCategories[i]->getClassInterface()->getNameAsString(); + Result += "_$_"; + Result += DefinedNonLazyCategories[i]->getNameAsString(); + Result += ",\n"; + } + Result += "};\n"; + } +} + +void RewriteModernObjC::WriteImageInfo(std::string &Result) { + if (LangOpts.MicrosoftExt) + Result += "__declspec(allocate(\".objc_imageinfo$B\")) \n"; + + Result += "static struct IMAGE_INFO { unsigned version; unsigned flag; } "; + // version 0, ObjCABI is 2 + Result += "_OBJC_IMAGE_INFO = { 0, 2 };\n"; +} + +/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category +/// implementation. +void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl, + std::string &Result) { + WriteModernMetadataDeclarations(Context, Result); + ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface(); + // Find category declaration for this implementation. + ObjCCategoryDecl *CDecl=0; + for (CDecl = ClassDecl->getCategoryList(); CDecl; + CDecl = CDecl->getNextClassCategory()) + if (CDecl->getIdentifier() == IDecl->getIdentifier()) + break; + + std::string FullCategoryName = ClassDecl->getNameAsString(); + FullCategoryName += "_$_"; + FullCategoryName += CDecl->getNameAsString(); + + // Build _objc_method_list for class's instance methods if needed + SmallVector + InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); + + // If any of our property implementations have associated getters or + // setters, produce metadata for them as well. + for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), + PropEnd = IDecl->propimpl_end(); + Prop != PropEnd; ++Prop) { + if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + continue; + if (!Prop->getPropertyIvarDecl()) + continue; + ObjCPropertyDecl *PD = Prop->getPropertyDecl(); + if (!PD) + continue; + if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) + InstanceMethods.push_back(Getter); + if (PD->isReadOnly()) + continue; + if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) + InstanceMethods.push_back(Setter); + } + + Write_method_list_t_initializer(*this, Context, Result, InstanceMethods, + "_OBJC_$_CATEGORY_INSTANCE_METHODS_", + FullCategoryName, true); + + SmallVector + ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end()); + + Write_method_list_t_initializer(*this, Context, Result, ClassMethods, + "_OBJC_$_CATEGORY_CLASS_METHODS_", + FullCategoryName, true); + + // Protocols referenced in class declaration? + // Protocol's super protocol list + std::vector RefedProtocols; + for (ObjCInterfaceDecl::protocol_iterator I = CDecl->protocol_begin(), + E = CDecl->protocol_end(); + + I != E; ++I) { + RefedProtocols.push_back(*I); + // Must write out all protocol definitions in current qualifier list, + // and in their nested qualifiers before writing out current definition. + RewriteObjCProtocolMetaData(*I, Result); + } + + Write_protocol_list_initializer(Context, Result, + RefedProtocols, + "_OBJC_CATEGORY_PROTOCOLS_$_", + FullCategoryName); + + // Protocol's property metadata. + std::vector ClassProperties; + for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(), + E = CDecl->prop_end(); I != E; ++I) + ClassProperties.push_back(*I); + + Write_prop_list_t_initializer(*this, Context, Result, ClassProperties, + /* Container */IDecl, + "_OBJC_$_PROP_LIST_", + FullCategoryName); + + Write_category_t(*this, Context, Result, + CDecl, + ClassDecl, + InstanceMethods, + ClassMethods, + RefedProtocols, + ClassProperties); + + // Determine if this category is also "non-lazy". + if (ImplementationIsNonLazy(IDecl)) + DefinedNonLazyCategories.push_back(CDecl); + +} + +void RewriteModernObjC::RewriteCategorySetupInitHook(std::string &Result) { + int CatDefCount = CategoryImplementation.size(); + if (!CatDefCount) + return; + Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n"; + Result += "__declspec(allocate(\".objc_inithooks$B\")) "; + Result += "static void *OBJC_CATEGORY_SETUP[] = {\n"; + for (int i = 0; i < CatDefCount; i++) { + ObjCCategoryImplDecl *IDecl = CategoryImplementation[i]; + ObjCCategoryDecl *CatDecl= IDecl->getCategoryDecl(); + ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface(); + Result += "\t(void *)&OBJC_CATEGORY_SETUP_$_"; + Result += ClassDecl->getName(); + Result += "_$_"; + Result += CatDecl->getName(); + Result += ",\n"; + } + Result += "};\n"; +} + +// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or +/// class methods. +template +void RewriteModernObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin, + MethodIterator MethodEnd, + bool IsInstanceMethod, + StringRef prefix, + StringRef ClassName, + std::string &Result) { + if (MethodBegin == MethodEnd) return; + + if (!objc_impl_method) { + /* struct _objc_method { + SEL _cmd; + char *method_types; + void *_imp; + } + */ + Result += "\nstruct _objc_method {\n"; + Result += "\tSEL _cmd;\n"; + Result += "\tchar *method_types;\n"; + Result += "\tvoid *_imp;\n"; + Result += "};\n"; + + objc_impl_method = true; + } + + // Build _objc_method_list for class's methods if needed + + /* struct { + struct _objc_method_list *next_method; + int method_count; + struct _objc_method method_list[]; + } + */ + unsigned NumMethods = std::distance(MethodBegin, MethodEnd); + Result += "\n"; + if (LangOpts.MicrosoftExt) { + if (IsInstanceMethod) + Result += "__declspec(allocate(\".inst_meth$B\")) "; + else + Result += "__declspec(allocate(\".cls_meth$B\")) "; + } + Result += "static struct {\n"; + Result += "\tstruct _objc_method_list *next_method;\n"; + Result += "\tint method_count;\n"; + Result += "\tstruct _objc_method method_list["; + Result += utostr(NumMethods); + Result += "];\n} _OBJC_"; + Result += prefix; + Result += IsInstanceMethod ? "INSTANCE" : "CLASS"; + Result += "_METHODS_"; + Result += ClassName; + Result += " __attribute__ ((used, section (\"__OBJC, __"; + Result += IsInstanceMethod ? "inst" : "cls"; + Result += "_meth\")))= "; + Result += "{\n\t0, " + utostr(NumMethods) + "\n"; + + Result += "\t,{{(SEL)\""; + Result += (*MethodBegin)->getSelector().getAsString().c_str(); + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); + Result += "\", \""; + Result += MethodTypeString; + Result += "\", (void *)"; + Result += MethodInternalNames[*MethodBegin]; + Result += "}\n"; + for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) { + Result += "\t ,{(SEL)\""; + Result += (*MethodBegin)->getSelector().getAsString().c_str(); + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); + Result += "\", \""; + Result += MethodTypeString; + Result += "\", (void *)"; + Result += MethodInternalNames[*MethodBegin]; + Result += "}\n"; + } + Result += "\t }\n};\n"; +} + +Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { + SourceRange OldRange = IV->getSourceRange(); + Expr *BaseExpr = IV->getBase(); + + // Rewrite the base, but without actually doing replaces. + { + DisableReplaceStmtScope S(*this); + BaseExpr = cast(RewriteFunctionBodyOrGlobalInitializer(BaseExpr)); + IV->setBase(BaseExpr); + } + + ObjCIvarDecl *D = IV->getDecl(); + + Expr *Replacement = IV; + + if (BaseExpr->getType()->isObjCObjectPointerType()) { + const ObjCInterfaceType *iFaceDecl = + dyn_cast(BaseExpr->getType()->getPointeeType()); + assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null"); + // lookup which class implements the instance variable. + ObjCInterfaceDecl *clsDeclared = 0; + iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(), + clsDeclared); + assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class"); + + // Build name of symbol holding ivar offset. + std::string IvarOffsetName; + WriteInternalIvarName(clsDeclared, D, IvarOffsetName); + + ReferencedIvars[clsDeclared].insert(D); + + // cast offset to "char *". + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->CharTy), + CK_BitCast, + BaseExpr); + VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), &Context->Idents.get(IvarOffsetName), + Context->UnsignedLongTy, 0, SC_Extern, SC_None); + DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, + Context->UnsignedLongTy, VK_LValue, + SourceLocation()); + BinaryOperator *addExpr = + new (Context) BinaryOperator(castExpr, DRE, BO_Add, + Context->getPointerType(Context->CharTy), + VK_RValue, OK_Ordinary, SourceLocation(), false); + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), + SourceLocation(), + addExpr); + QualType IvarT = D->getType(); + + if (!isa(IvarT) && IvarT->isRecordType()) { + RecordDecl *RD = IvarT->getAs()->getDecl(); + RD = RD->getDefinition(); + if (RD && !RD->getDeclName().getAsIdentifierInfo()) { + // decltype(((Foo_IMPL*)0)->bar) * + ObjCContainerDecl *CDecl = + dyn_cast(D->getDeclContext()); + // ivar in class extensions requires special treatment. + if (ObjCCategoryDecl *CatDecl = dyn_cast(CDecl)) + CDecl = CatDecl->getClassInterface(); + std::string RecName = CDecl->getName(); + RecName += "_IMPL"; + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get(RecName.c_str())); + QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD)); + unsigned UnsignedIntSize = + static_cast(Context->getTypeSize(Context->UnsignedIntTy)); + Expr *Zero = IntegerLiteral::Create(*Context, + llvm::APInt(UnsignedIntSize, 0), + Context->UnsignedIntTy, SourceLocation()); + Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero); + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + Zero); + FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get(D->getNameAsString()), + IvarT, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), + FD->getType(), VK_LValue, + OK_Ordinary); + IvarT = Context->getDecltypeType(ME, ME->getType()); + } + } + convertObjCTypeToCStyleType(IvarT); + QualType castT = Context->getPointerType(IvarT); + + castExpr = NoTypeInfoCStyleCastExpr(Context, + castT, + CK_BitCast, + PE); + + + Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT, + VK_LValue, OK_Ordinary, + SourceLocation()); + PE = new (Context) ParenExpr(OldRange.getBegin(), + OldRange.getEnd(), + Exp); + + Replacement = PE; + } + + ReplaceStmtWithRange(IV, Replacement, OldRange); + return Replacement; +} diff --git a/lib/Rewrite/Frontend/RewriteObjC.cpp b/lib/Rewrite/Frontend/RewriteObjC.cpp new file mode 100644 index 0000000..a6dcc6b --- /dev/null +++ b/lib/Rewrite/Frontend/RewriteObjC.cpp @@ -0,0 +1,6029 @@ +//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Hacks and fun related to the code rewriter. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/ASTConsumers.h" +#include "clang/Rewrite/Core/Rewriter.h" +#include "clang/AST/AST.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ParentMap.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/IdentifierTable.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Lex/Lexer.h" +#include "llvm/Support/MemoryBuffer.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/OwningPtr.h" +#include "llvm/ADT/DenseSet.h" + +using namespace clang; +using llvm::utostr; + +namespace { + class RewriteObjC : public ASTConsumer { + protected: + + enum { + BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), + block, ... */ + BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */ + BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the + __block variable */ + BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy + helpers */ + BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose + support routines */ + BLOCK_BYREF_CURRENT_MAX = 256 + }; + + enum { + BLOCK_NEEDS_FREE = (1 << 24), + BLOCK_HAS_COPY_DISPOSE = (1 << 25), + BLOCK_HAS_CXX_OBJ = (1 << 26), + BLOCK_IS_GC = (1 << 27), + BLOCK_IS_GLOBAL = (1 << 28), + BLOCK_HAS_DESCRIPTOR = (1 << 29) + }; + static const int OBJC_ABI_VERSION = 7; + + Rewriter Rewrite; + DiagnosticsEngine &Diags; + const LangOptions &LangOpts; + ASTContext *Context; + SourceManager *SM; + TranslationUnitDecl *TUDecl; + FileID MainFileID; + const char *MainFileStart, *MainFileEnd; + Stmt *CurrentBody; + ParentMap *PropParentMap; // created lazily. + std::string InFileName; + raw_ostream* OutFile; + std::string Preamble; + + TypeDecl *ProtocolTypeDecl; + VarDecl *GlobalVarDecl; + unsigned RewriteFailedDiag; + // ObjC string constant support. + unsigned NumObjCStringLiterals; + VarDecl *ConstantStringClassReference; + RecordDecl *NSStringRecord; + + // ObjC foreach break/continue generation support. + int BcLabelCount; + + unsigned TryFinallyContainsReturnDiag; + // Needed for super. + ObjCMethodDecl *CurMethodDef; + RecordDecl *SuperStructDecl; + RecordDecl *ConstantStringDecl; + + FunctionDecl *MsgSendFunctionDecl; + FunctionDecl *MsgSendSuperFunctionDecl; + FunctionDecl *MsgSendStretFunctionDecl; + FunctionDecl *MsgSendSuperStretFunctionDecl; + FunctionDecl *MsgSendFpretFunctionDecl; + FunctionDecl *GetClassFunctionDecl; + FunctionDecl *GetMetaClassFunctionDecl; + FunctionDecl *GetSuperClassFunctionDecl; + FunctionDecl *SelGetUidFunctionDecl; + FunctionDecl *CFStringFunctionDecl; + FunctionDecl *SuperContructorFunctionDecl; + FunctionDecl *CurFunctionDef; + FunctionDecl *CurFunctionDeclToDeclareForBlock; + + /* Misc. containers needed for meta-data rewrite. */ + SmallVector ClassImplementation; + SmallVector CategoryImplementation; + llvm::SmallPtrSet ObjCSynthesizedStructs; + llvm::SmallPtrSet ObjCSynthesizedProtocols; + llvm::SmallPtrSet ObjCForwardDecls; + llvm::DenseMap MethodInternalNames; + SmallVector Stmts; + SmallVector ObjCBcLabelNo; + // Remember all the @protocol() expressions. + llvm::SmallPtrSet ProtocolExprDecls; + + llvm::DenseSet CopyDestroyCache; + + // Block expressions. + SmallVector Blocks; + SmallVector InnerDeclRefsCount; + SmallVector InnerDeclRefs; + + SmallVector BlockDeclRefs; + + // Block related declarations. + SmallVector BlockByCopyDecls; + llvm::SmallPtrSet BlockByCopyDeclsPtrSet; + SmallVector BlockByRefDecls; + llvm::SmallPtrSet BlockByRefDeclsPtrSet; + llvm::DenseMap BlockByRefDeclNo; + llvm::SmallPtrSet ImportedBlockDecls; + llvm::SmallPtrSet ImportedLocalExternalDecls; + + llvm::DenseMap RewrittenBlockExprs; + + // This maps an original source AST to it's rewritten form. This allows + // us to avoid rewriting the same node twice (which is very uncommon). + // This is needed to support some of the exotic property rewriting. + llvm::DenseMap ReplacedNodes; + + // Needed for header files being rewritten + bool IsHeader; + bool SilenceRewriteMacroWarning; + bool objc_impl_method; + + bool DisableReplaceStmt; + class DisableReplaceStmtScope { + RewriteObjC &R; + bool SavedValue; + + public: + DisableReplaceStmtScope(RewriteObjC &R) + : R(R), SavedValue(R.DisableReplaceStmt) { + R.DisableReplaceStmt = true; + } + ~DisableReplaceStmtScope() { + R.DisableReplaceStmt = SavedValue; + } + }; + void InitializeCommon(ASTContext &context); + + public: + + // Top Level Driver code. + virtual bool HandleTopLevelDecl(DeclGroupRef D) { + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + if (ObjCInterfaceDecl *Class = dyn_cast(*I)) { + if (!Class->isThisDeclarationADefinition()) { + RewriteForwardClassDecl(D); + break; + } + } + + if (ObjCProtocolDecl *Proto = dyn_cast(*I)) { + if (!Proto->isThisDeclarationADefinition()) { + RewriteForwardProtocolDecl(D); + break; + } + } + + HandleTopLevelSingleDecl(*I); + } + return true; + } + void HandleTopLevelSingleDecl(Decl *D); + void HandleDeclInMainFile(Decl *D); + RewriteObjC(std::string inFile, raw_ostream *OS, + DiagnosticsEngine &D, const LangOptions &LOpts, + bool silenceMacroWarn); + + ~RewriteObjC() {} + + virtual void HandleTranslationUnit(ASTContext &C); + + void ReplaceStmt(Stmt *Old, Stmt *New) { + Stmt *ReplacingStmt = ReplacedNodes[Old]; + + if (ReplacingStmt) + return; // We can't rewrite the same node twice. + + if (DisableReplaceStmt) + return; + + // If replacement succeeded or warning disabled return with no warning. + if (!Rewrite.ReplaceStmt(Old, New)) { + ReplacedNodes[Old] = New; + return; + } + if (SilenceRewriteMacroWarning) + return; + Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) + << Old->getSourceRange(); + } + + void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) { + if (DisableReplaceStmt) + return; + + // Measure the old text. + int Size = Rewrite.getRangeSize(SrcRange); + if (Size == -1) { + Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) + << Old->getSourceRange(); + return; + } + // Get the new text. + std::string SStr; + llvm::raw_string_ostream S(SStr); + New->printPretty(S, 0, PrintingPolicy(LangOpts)); + const std::string &Str = S.str(); + + // If replacement succeeded or warning disabled return with no warning. + if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) { + ReplacedNodes[Old] = New; + return; + } + if (SilenceRewriteMacroWarning) + return; + Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) + << Old->getSourceRange(); + } + + void InsertText(SourceLocation Loc, StringRef Str, + bool InsertAfter = true) { + // If insertion succeeded or warning disabled return with no warning. + if (!Rewrite.InsertText(Loc, Str, InsertAfter) || + SilenceRewriteMacroWarning) + return; + + Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag); + } + + void ReplaceText(SourceLocation Start, unsigned OrigLength, + StringRef Str) { + // If removal succeeded or warning disabled return with no warning. + if (!Rewrite.ReplaceText(Start, OrigLength, Str) || + SilenceRewriteMacroWarning) + return; + + Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag); + } + + // Syntactic Rewriting. + void RewriteRecordBody(RecordDecl *RD); + void RewriteInclude(); + void RewriteForwardClassDecl(DeclGroupRef D); + void RewriteForwardClassDecl(const llvm::SmallVector &DG); + void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, + const std::string &typedefString); + void RewriteImplementations(); + void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, + ObjCImplementationDecl *IMD, + ObjCCategoryImplDecl *CID); + void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl); + void RewriteImplementationDecl(Decl *Dcl); + void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, + ObjCMethodDecl *MDecl, std::string &ResultStr); + void RewriteTypeIntoString(QualType T, std::string &ResultStr, + const FunctionType *&FPRetType); + void RewriteByRefString(std::string &ResultStr, const std::string &Name, + ValueDecl *VD, bool def=false); + void RewriteCategoryDecl(ObjCCategoryDecl *Dcl); + void RewriteProtocolDecl(ObjCProtocolDecl *Dcl); + void RewriteForwardProtocolDecl(DeclGroupRef D); + void RewriteForwardProtocolDecl(const llvm::SmallVector &DG); + void RewriteMethodDeclaration(ObjCMethodDecl *Method); + void RewriteProperty(ObjCPropertyDecl *prop); + void RewriteFunctionDecl(FunctionDecl *FD); + void RewriteBlockPointerType(std::string& Str, QualType Type); + void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD); + void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD); + void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl); + void RewriteTypeOfDecl(VarDecl *VD); + void RewriteObjCQualifiedInterfaceTypes(Expr *E); + + // Expression Rewriting. + Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S); + Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp); + Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo); + Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo); + Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp); + Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp); + Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp); + Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp); + void RewriteTryReturnStmts(Stmt *S); + void RewriteSyncReturnStmts(Stmt *S, std::string buf); + Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S); + Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S); + Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S); + Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, + SourceLocation OrigEnd); + Stmt *RewriteBreakStmt(BreakStmt *S); + Stmt *RewriteContinueStmt(ContinueStmt *S); + void RewriteCastExpr(CStyleCastExpr *CE); + + // Block rewriting. + void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D); + + // Block specific rewrite rules. + void RewriteBlockPointerDecl(NamedDecl *VD); + void RewriteByRefVar(VarDecl *VD); + Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD); + Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE); + void RewriteBlockPointerFunctionArgs(FunctionDecl *FD); + + void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, + std::string &Result); + + virtual void Initialize(ASTContext &context) = 0; + + // Metadata Rewriting. + virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0; + virtual void RewriteObjCProtocolListMetaData(const ObjCList &Prots, + StringRef prefix, + StringRef ClassName, + std::string &Result) = 0; + virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl, + std::string &Result) = 0; + virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol, + StringRef prefix, + StringRef ClassName, + std::string &Result) = 0; + virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, + std::string &Result) = 0; + + // Rewriting ivar access + virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) = 0; + virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, + std::string &Result) = 0; + + // Misc. AST transformation routines. Sometimes they end up calling + // rewriting routines on the new ASTs. + CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD, + Expr **args, unsigned nargs, + SourceLocation StartLoc=SourceLocation(), + SourceLocation EndLoc=SourceLocation()); + CallExpr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, + QualType msgSendType, + QualType returnType, + SmallVectorImpl &ArgTypes, + SmallVectorImpl &MsgExprs, + ObjCMethodDecl *Method); + Stmt *SynthMessageExpr(ObjCMessageExpr *Exp, + SourceLocation StartLoc=SourceLocation(), + SourceLocation EndLoc=SourceLocation()); + + void SynthCountByEnumWithState(std::string &buf); + void SynthMsgSendFunctionDecl(); + void SynthMsgSendSuperFunctionDecl(); + void SynthMsgSendStretFunctionDecl(); + void SynthMsgSendFpretFunctionDecl(); + void SynthMsgSendSuperStretFunctionDecl(); + void SynthGetClassFunctionDecl(); + void SynthGetMetaClassFunctionDecl(); + void SynthGetSuperClassFunctionDecl(); + void SynthSelGetUidFunctionDecl(); + void SynthSuperContructorFunctionDecl(); + + std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag); + std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, + StringRef funcName, std::string Tag); + std::string SynthesizeBlockFunc(BlockExpr *CE, int i, + StringRef funcName, std::string Tag); + std::string SynthesizeBlockImpl(BlockExpr *CE, + std::string Tag, std::string Desc); + std::string SynthesizeBlockDescriptor(std::string DescTag, + std::string ImplTag, + int i, StringRef funcName, + unsigned hasCopy); + Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp); + void SynthesizeBlockLiterals(SourceLocation FunLocStart, + StringRef FunName); + FunctionDecl *SynthBlockInitFunctionDecl(StringRef name); + Stmt *SynthBlockInitExpr(BlockExpr *Exp, + const SmallVector &InnerBlockDeclRefs); + + // Misc. helper routines. + QualType getProtocolType(); + void WarnAboutReturnGotoStmts(Stmt *S); + void HasReturnStmts(Stmt *S, bool &hasReturns); + void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND); + void InsertBlockLiteralsWithinFunction(FunctionDecl *FD); + void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD); + + bool IsDeclStmtInForeachHeader(DeclStmt *DS); + void CollectBlockDeclRefInfo(BlockExpr *Exp); + void GetBlockDeclRefExprs(Stmt *S); + void GetInnerBlockDeclRefExprs(Stmt *S, + SmallVector &InnerBlockDeclRefs, + llvm::SmallPtrSet &InnerContexts); + + // We avoid calling Type::isBlockPointerType(), since it operates on the + // canonical type. We only care if the top-level type is a closure pointer. + bool isTopLevelBlockPointerType(QualType T) { + return isa(T); + } + + /// convertBlockPointerToFunctionPointer - Converts a block-pointer type + /// to a function pointer type and upon success, returns true; false + /// otherwise. + bool convertBlockPointerToFunctionPointer(QualType &T) { + if (isTopLevelBlockPointerType(T)) { + const BlockPointerType *BPT = T->getAs(); + T = Context->getPointerType(BPT->getPointeeType()); + return true; + } + return false; + } + + bool needToScanForQualifiers(QualType T); + QualType getSuperStructType(); + QualType getConstantStringStructType(); + QualType convertFunctionTypeOfBlocks(const FunctionType *FT); + bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf); + + void convertToUnqualifiedObjCType(QualType &T) { + if (T->isObjCQualifiedIdType()) + T = Context->getObjCIdType(); + else if (T->isObjCQualifiedClassType()) + T = Context->getObjCClassType(); + else if (T->isObjCObjectPointerType() && + T->getPointeeType()->isObjCQualifiedInterfaceType()) { + if (const ObjCObjectPointerType * OBJPT = + T->getAsObjCInterfacePointerType()) { + const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType(); + T = QualType(IFaceT, 0); + T = Context->getPointerType(T); + } + } + } + + // FIXME: This predicate seems like it would be useful to add to ASTContext. + bool isObjCType(QualType T) { + if (!LangOpts.ObjC1 && !LangOpts.ObjC2) + return false; + + QualType OCT = Context->getCanonicalType(T).getUnqualifiedType(); + + if (OCT == Context->getCanonicalType(Context->getObjCIdType()) || + OCT == Context->getCanonicalType(Context->getObjCClassType())) + return true; + + if (const PointerType *PT = OCT->getAs()) { + if (isa(PT->getPointeeType()) || + PT->getPointeeType()->isObjCQualifiedIdType()) + return true; + } + return false; + } + bool PointerTypeTakesAnyBlockArguments(QualType QT); + bool PointerTypeTakesAnyObjCQualifiedType(QualType QT); + void GetExtentOfArgList(const char *Name, const char *&LParen, + const char *&RParen); + + void QuoteDoublequotes(std::string &From, std::string &To) { + for (unsigned i = 0; i < From.length(); i++) { + if (From[i] == '"') + To += "\\\""; + else + To += From[i]; + } + } + + QualType getSimpleFunctionType(QualType result, + const QualType *args, + unsigned numArgs, + bool variadic = false) { + if (result == Context->getObjCInstanceType()) + result = Context->getObjCIdType(); + FunctionProtoType::ExtProtoInfo fpi; + fpi.Variadic = variadic; + return Context->getFunctionType(result, args, numArgs, fpi); + } + + // Helper function: create a CStyleCastExpr with trivial type source info. + CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty, + CastKind Kind, Expr *E) { + TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation()); + return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo, + SourceLocation(), SourceLocation()); + } + }; + + class RewriteObjCFragileABI : public RewriteObjC { + public: + + RewriteObjCFragileABI(std::string inFile, raw_ostream *OS, + DiagnosticsEngine &D, const LangOptions &LOpts, + bool silenceMacroWarn) : RewriteObjC(inFile, OS, + D, LOpts, + silenceMacroWarn) {} + + ~RewriteObjCFragileABI() {} + virtual void Initialize(ASTContext &context); + + // Rewriting metadata + template + void RewriteObjCMethodsMetaData(MethodIterator MethodBegin, + MethodIterator MethodEnd, + bool IsInstanceMethod, + StringRef prefix, + StringRef ClassName, + std::string &Result); + virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol, + StringRef prefix, + StringRef ClassName, + std::string &Result); + virtual void RewriteObjCProtocolListMetaData( + const ObjCList &Prots, + StringRef prefix, StringRef ClassName, std::string &Result); + virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, + std::string &Result); + virtual void RewriteMetaDataIntoBuffer(std::string &Result); + virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl, + std::string &Result); + + // Rewriting ivar + virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, + std::string &Result); + virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV); + }; +} + +void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType, + NamedDecl *D) { + if (const FunctionProtoType *fproto + = dyn_cast(funcType.IgnoreParens())) { + for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(), + E = fproto->arg_type_end(); I && (I != E); ++I) + if (isTopLevelBlockPointerType(*I)) { + // All the args are checked/rewritten. Don't call twice! + RewriteBlockPointerDecl(D); + break; + } + } +} + +void RewriteObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) { + const PointerType *PT = funcType->getAs(); + if (PT && PointerTypeTakesAnyBlockArguments(funcType)) + RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND); +} + +static bool IsHeaderFile(const std::string &Filename) { + std::string::size_type DotPos = Filename.rfind('.'); + + if (DotPos == std::string::npos) { + // no file extension + return false; + } + + std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end()); + // C header: .h + // C++ header: .hh or .H; + return Ext == "h" || Ext == "hh" || Ext == "H"; +} + +RewriteObjC::RewriteObjC(std::string inFile, raw_ostream* OS, + DiagnosticsEngine &D, const LangOptions &LOpts, + bool silenceMacroWarn) + : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS), + SilenceRewriteMacroWarning(silenceMacroWarn) { + IsHeader = IsHeaderFile(inFile); + RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning, + "rewriting sub-expression within a macro (may not be correct)"); + TryFinallyContainsReturnDiag = Diags.getCustomDiagID( + DiagnosticsEngine::Warning, + "rewriter doesn't support user-specified control flow semantics " + "for @try/@finally (code may not execute properly)"); +} + +ASTConsumer *clang::CreateObjCRewriter(const std::string& InFile, + raw_ostream* OS, + DiagnosticsEngine &Diags, + const LangOptions &LOpts, + bool SilenceRewriteMacroWarning) { + return new RewriteObjCFragileABI(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning); +} + +void RewriteObjC::InitializeCommon(ASTContext &context) { + Context = &context; + SM = &Context->getSourceManager(); + TUDecl = Context->getTranslationUnitDecl(); + MsgSendFunctionDecl = 0; + MsgSendSuperFunctionDecl = 0; + MsgSendStretFunctionDecl = 0; + MsgSendSuperStretFunctionDecl = 0; + MsgSendFpretFunctionDecl = 0; + GetClassFunctionDecl = 0; + GetMetaClassFunctionDecl = 0; + GetSuperClassFunctionDecl = 0; + SelGetUidFunctionDecl = 0; + CFStringFunctionDecl = 0; + ConstantStringClassReference = 0; + NSStringRecord = 0; + CurMethodDef = 0; + CurFunctionDef = 0; + CurFunctionDeclToDeclareForBlock = 0; + GlobalVarDecl = 0; + SuperStructDecl = 0; + ProtocolTypeDecl = 0; + ConstantStringDecl = 0; + BcLabelCount = 0; + SuperContructorFunctionDecl = 0; + NumObjCStringLiterals = 0; + PropParentMap = 0; + CurrentBody = 0; + DisableReplaceStmt = false; + objc_impl_method = false; + + // Get the ID and start/end of the main file. + MainFileID = SM->getMainFileID(); + const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID); + MainFileStart = MainBuf->getBufferStart(); + MainFileEnd = MainBuf->getBufferEnd(); + + Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts()); +} + +//===----------------------------------------------------------------------===// +// Top Level Driver Code +//===----------------------------------------------------------------------===// + +void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) { + if (Diags.hasErrorOccurred()) + return; + + // Two cases: either the decl could be in the main file, or it could be in a + // #included file. If the former, rewrite it now. If the later, check to see + // if we rewrote the #include/#import. + SourceLocation Loc = D->getLocation(); + Loc = SM->getExpansionLoc(Loc); + + // If this is for a builtin, ignore it. + if (Loc.isInvalid()) return; + + // Look for built-in declarations that we need to refer during the rewrite. + if (FunctionDecl *FD = dyn_cast(D)) { + RewriteFunctionDecl(FD); + } else if (VarDecl *FVD = dyn_cast(D)) { + // declared in + if (FVD->getName() == "_NSConstantStringClassReference") { + ConstantStringClassReference = FVD; + return; + } + } else if (ObjCInterfaceDecl *ID = dyn_cast(D)) { + if (ID->isThisDeclarationADefinition()) + RewriteInterfaceDecl(ID); + } else if (ObjCCategoryDecl *CD = dyn_cast(D)) { + RewriteCategoryDecl(CD); + } else if (ObjCProtocolDecl *PD = dyn_cast(D)) { + if (PD->isThisDeclarationADefinition()) + RewriteProtocolDecl(PD); + } else if (LinkageSpecDecl *LSD = dyn_cast(D)) { + // Recurse into linkage specifications + for (DeclContext::decl_iterator DI = LSD->decls_begin(), + DIEnd = LSD->decls_end(); + DI != DIEnd; ) { + if (ObjCInterfaceDecl *IFace = dyn_cast((*DI))) { + if (!IFace->isThisDeclarationADefinition()) { + SmallVector DG; + SourceLocation StartLoc = IFace->getLocStart(); + do { + if (isa(*DI) && + !cast(*DI)->isThisDeclarationADefinition() && + StartLoc == (*DI)->getLocStart()) + DG.push_back(*DI); + else + break; + + ++DI; + } while (DI != DIEnd); + RewriteForwardClassDecl(DG); + continue; + } + } + + if (ObjCProtocolDecl *Proto = dyn_cast((*DI))) { + if (!Proto->isThisDeclarationADefinition()) { + SmallVector DG; + SourceLocation StartLoc = Proto->getLocStart(); + do { + if (isa(*DI) && + !cast(*DI)->isThisDeclarationADefinition() && + StartLoc == (*DI)->getLocStart()) + DG.push_back(*DI); + else + break; + + ++DI; + } while (DI != DIEnd); + RewriteForwardProtocolDecl(DG); + continue; + } + } + + HandleTopLevelSingleDecl(*DI); + ++DI; + } + } + // If we have a decl in the main file, see if we should rewrite it. + if (SM->isFromMainFile(Loc)) + return HandleDeclInMainFile(D); +} + +//===----------------------------------------------------------------------===// +// Syntactic (non-AST) Rewriting Code +//===----------------------------------------------------------------------===// + +void RewriteObjC::RewriteInclude() { + SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID); + StringRef MainBuf = SM->getBufferData(MainFileID); + const char *MainBufStart = MainBuf.begin(); + const char *MainBufEnd = MainBuf.end(); + size_t ImportLen = strlen("import"); + + // Loop over the whole file, looking for includes. + for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) { + if (*BufPtr == '#') { + if (++BufPtr == MainBufEnd) + return; + while (*BufPtr == ' ' || *BufPtr == '\t') + if (++BufPtr == MainBufEnd) + return; + if (!strncmp(BufPtr, "import", ImportLen)) { + // replace import with include + SourceLocation ImportLoc = + LocStart.getLocWithOffset(BufPtr-MainBufStart); + ReplaceText(ImportLoc, ImportLen, "include"); + BufPtr += ImportLen; + } + } + } +} + +static std::string getIvarAccessString(ObjCIvarDecl *OID) { + const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface(); + std::string S; + S = "((struct "; + S += ClassDecl->getIdentifier()->getName(); + S += "_IMPL *)self)->"; + S += OID->getName(); + return S; +} + +void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, + ObjCImplementationDecl *IMD, + ObjCCategoryImplDecl *CID) { + static bool objcGetPropertyDefined = false; + static bool objcSetPropertyDefined = false; + SourceLocation startLoc = PID->getLocStart(); + InsertText(startLoc, "// "); + const char *startBuf = SM->getCharacterData(startLoc); + assert((*startBuf == '@') && "bogus @synthesize location"); + const char *semiBuf = strchr(startBuf, ';'); + assert((*semiBuf == ';') && "@synthesize: can't find ';'"); + SourceLocation onePastSemiLoc = + startLoc.getLocWithOffset(semiBuf-startBuf+1); + + if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + return; // FIXME: is this correct? + + // Generate the 'getter' function. + ObjCPropertyDecl *PD = PID->getPropertyDecl(); + ObjCIvarDecl *OID = PID->getPropertyIvarDecl(); + + if (!OID) + return; + unsigned Attributes = PD->getPropertyAttributes(); + if (!PD->getGetterMethodDecl()->isDefined()) { + bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) && + (Attributes & (ObjCPropertyDecl::OBJC_PR_retain | + ObjCPropertyDecl::OBJC_PR_copy)); + std::string Getr; + if (GenGetProperty && !objcGetPropertyDefined) { + objcGetPropertyDefined = true; + // FIXME. Is this attribute correct in all cases? + Getr = "\nextern \"C\" __declspec(dllimport) " + "id objc_getProperty(id, SEL, long, bool);\n"; + } + RewriteObjCMethodDecl(OID->getContainingInterface(), + PD->getGetterMethodDecl(), Getr); + Getr += "{ "; + // Synthesize an explicit cast to gain access to the ivar. + // See objc-act.c:objc_synthesize_new_getter() for details. + if (GenGetProperty) { + // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1) + Getr += "typedef "; + const FunctionType *FPRetType = 0; + RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr, + FPRetType); + Getr += " _TYPE"; + if (FPRetType) { + Getr += ")"; // close the precedence "scope" for "*". + + // Now, emit the argument types (if any). + if (const FunctionProtoType *FT = dyn_cast(FPRetType)){ + Getr += "("; + for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { + if (i) Getr += ", "; + std::string ParamStr = FT->getArgType(i).getAsString( + Context->getPrintingPolicy()); + Getr += ParamStr; + } + if (FT->isVariadic()) { + if (FT->getNumArgs()) Getr += ", "; + Getr += "..."; + } + Getr += ")"; + } else + Getr += "()"; + } + Getr += ";\n"; + Getr += "return (_TYPE)"; + Getr += "objc_getProperty(self, _cmd, "; + RewriteIvarOffsetComputation(OID, Getr); + Getr += ", 1)"; + } + else + Getr += "return " + getIvarAccessString(OID); + Getr += "; }"; + InsertText(onePastSemiLoc, Getr); + } + + if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined()) + return; + + // Generate the 'setter' function. + std::string Setr; + bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain | + ObjCPropertyDecl::OBJC_PR_copy); + if (GenSetProperty && !objcSetPropertyDefined) { + objcSetPropertyDefined = true; + // FIXME. Is this attribute correct in all cases? + Setr = "\nextern \"C\" __declspec(dllimport) " + "void objc_setProperty (id, SEL, long, id, bool, bool);\n"; + } + + RewriteObjCMethodDecl(OID->getContainingInterface(), + PD->getSetterMethodDecl(), Setr); + Setr += "{ "; + // Synthesize an explicit cast to initialize the ivar. + // See objc-act.c:objc_synthesize_new_setter() for details. + if (GenSetProperty) { + Setr += "objc_setProperty (self, _cmd, "; + RewriteIvarOffsetComputation(OID, Setr); + Setr += ", (id)"; + Setr += PD->getName(); + Setr += ", "; + if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) + Setr += "0, "; + else + Setr += "1, "; + if (Attributes & ObjCPropertyDecl::OBJC_PR_copy) + Setr += "1)"; + else + Setr += "0)"; + } + else { + Setr += getIvarAccessString(OID) + " = "; + Setr += PD->getName(); + } + Setr += "; }"; + InsertText(onePastSemiLoc, Setr); +} + +static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl, + std::string &typedefString) { + typedefString += "#ifndef _REWRITER_typedef_"; + typedefString += ForwardDecl->getNameAsString(); + typedefString += "\n"; + typedefString += "#define _REWRITER_typedef_"; + typedefString += ForwardDecl->getNameAsString(); + typedefString += "\n"; + typedefString += "typedef struct objc_object "; + typedefString += ForwardDecl->getNameAsString(); + typedefString += ";\n#endif\n"; +} + +void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, + const std::string &typedefString) { + SourceLocation startLoc = ClassDecl->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + const char *semiPtr = strchr(startBuf, ';'); + // Replace the @class with typedefs corresponding to the classes. + ReplaceText(startLoc, semiPtr-startBuf+1, typedefString); +} + +void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) { + std::string typedefString; + for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { + ObjCInterfaceDecl *ForwardDecl = cast(*I); + if (I == D.begin()) { + // Translate to typedef's that forward reference structs with the same name + // as the class. As a convenience, we include the original declaration + // as a comment. + typedefString += "// @class "; + typedefString += ForwardDecl->getNameAsString(); + typedefString += ";\n"; + } + RewriteOneForwardClassDecl(ForwardDecl, typedefString); + } + DeclGroupRef::iterator I = D.begin(); + RewriteForwardClassEpilogue(cast(*I), typedefString); +} + +void RewriteObjC::RewriteForwardClassDecl( + const llvm::SmallVector &D) { + std::string typedefString; + for (unsigned i = 0; i < D.size(); i++) { + ObjCInterfaceDecl *ForwardDecl = cast(D[i]); + if (i == 0) { + typedefString += "// @class "; + typedefString += ForwardDecl->getNameAsString(); + typedefString += ";\n"; + } + RewriteOneForwardClassDecl(ForwardDecl, typedefString); + } + RewriteForwardClassEpilogue(cast(D[0]), typedefString); +} + +void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) { + // When method is a synthesized one, such as a getter/setter there is + // nothing to rewrite. + if (Method->isImplicit()) + return; + SourceLocation LocStart = Method->getLocStart(); + SourceLocation LocEnd = Method->getLocEnd(); + + if (SM->getExpansionLineNumber(LocEnd) > + SM->getExpansionLineNumber(LocStart)) { + InsertText(LocStart, "#if 0\n"); + ReplaceText(LocEnd, 1, ";\n#endif\n"); + } else { + InsertText(LocStart, "// "); + } +} + +void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop) { + SourceLocation Loc = prop->getAtLoc(); + + ReplaceText(Loc, 0, "// "); + // FIXME: handle properties that are declared across multiple lines. +} + +void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) { + SourceLocation LocStart = CatDecl->getLocStart(); + + // FIXME: handle category headers that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); + + for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(), + E = CatDecl->prop_end(); I != E; ++I) + RewriteProperty(*I); + + for (ObjCCategoryDecl::instmeth_iterator + I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + for (ObjCCategoryDecl::classmeth_iterator + I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + + // Lastly, comment out the @end. + ReplaceText(CatDecl->getAtEndRange().getBegin(), + strlen("@end"), "/* @end */"); +} + +void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) { + SourceLocation LocStart = PDecl->getLocStart(); + assert(PDecl->isThisDeclarationADefinition()); + + // FIXME: handle protocol headers that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); + + for (ObjCProtocolDecl::instmeth_iterator + I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + for (ObjCProtocolDecl::classmeth_iterator + I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + + for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(), + E = PDecl->prop_end(); I != E; ++I) + RewriteProperty(*I); + + // Lastly, comment out the @end. + SourceLocation LocEnd = PDecl->getAtEndRange().getBegin(); + ReplaceText(LocEnd, strlen("@end"), "/* @end */"); + + // Must comment out @optional/@required + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + for (const char *p = startBuf; p < endBuf; p++) { + if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) { + SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); + ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */"); + + } + else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) { + SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); + ReplaceText(OptionalLoc, strlen("@required"), "/* @required */"); + + } + } +} + +void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) { + SourceLocation LocStart = (*D.begin())->getLocStart(); + if (LocStart.isInvalid()) + llvm_unreachable("Invalid SourceLocation"); + // FIXME: handle forward protocol that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); +} + +void +RewriteObjC::RewriteForwardProtocolDecl(const llvm::SmallVector &DG) { + SourceLocation LocStart = DG[0]->getLocStart(); + if (LocStart.isInvalid()) + llvm_unreachable("Invalid SourceLocation"); + // FIXME: handle forward protocol that are declared across multiple lines. + ReplaceText(LocStart, 0, "// "); +} + +void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr, + const FunctionType *&FPRetType) { + if (T->isObjCQualifiedIdType()) + ResultStr += "id"; + else if (T->isFunctionPointerType() || + T->isBlockPointerType()) { + // needs special handling, since pointer-to-functions have special + // syntax (where a decaration models use). + QualType retType = T; + QualType PointeeTy; + if (const PointerType* PT = retType->getAs()) + PointeeTy = PT->getPointeeType(); + else if (const BlockPointerType *BPT = retType->getAs()) + PointeeTy = BPT->getPointeeType(); + if ((FPRetType = PointeeTy->getAs())) { + ResultStr += FPRetType->getResultType().getAsString( + Context->getPrintingPolicy()); + ResultStr += "(*"; + } + } else + ResultStr += T.getAsString(Context->getPrintingPolicy()); +} + +void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, + ObjCMethodDecl *OMD, + std::string &ResultStr) { + //fprintf(stderr,"In RewriteObjCMethodDecl\n"); + const FunctionType *FPRetType = 0; + ResultStr += "\nstatic "; + RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType); + ResultStr += " "; + + // Unique method name + std::string NameStr; + + if (OMD->isInstanceMethod()) + NameStr += "_I_"; + else + NameStr += "_C_"; + + NameStr += IDecl->getNameAsString(); + NameStr += "_"; + + if (ObjCCategoryImplDecl *CID = + dyn_cast(OMD->getDeclContext())) { + NameStr += CID->getNameAsString(); + NameStr += "_"; + } + // Append selector names, replacing ':' with '_' + { + std::string selString = OMD->getSelector().getAsString(); + int len = selString.size(); + for (int i = 0; i < len; i++) + if (selString[i] == ':') + selString[i] = '_'; + NameStr += selString; + } + // Remember this name for metadata emission + MethodInternalNames[OMD] = NameStr; + ResultStr += NameStr; + + // Rewrite arguments + ResultStr += "("; + + // invisible arguments + if (OMD->isInstanceMethod()) { + QualType selfTy = Context->getObjCInterfaceType(IDecl); + selfTy = Context->getPointerType(selfTy); + if (!LangOpts.MicrosoftExt) { + if (ObjCSynthesizedStructs.count(const_cast(IDecl))) + ResultStr += "struct "; + } + // When rewriting for Microsoft, explicitly omit the structure name. + ResultStr += IDecl->getNameAsString(); + ResultStr += " *"; + } + else + ResultStr += Context->getObjCClassType().getAsString( + Context->getPrintingPolicy()); + + ResultStr += " self, "; + ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy()); + ResultStr += " _cmd"; + + // Method arguments. + for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), + E = OMD->param_end(); PI != E; ++PI) { + ParmVarDecl *PDecl = *PI; + ResultStr += ", "; + if (PDecl->getType()->isObjCQualifiedIdType()) { + ResultStr += "id "; + ResultStr += PDecl->getNameAsString(); + } else { + std::string Name = PDecl->getNameAsString(); + QualType QT = PDecl->getType(); + // Make sure we convert "t (^)(...)" to "t (*)(...)". + (void)convertBlockPointerToFunctionPointer(QT); + QT.getAsStringInternal(Name, Context->getPrintingPolicy()); + ResultStr += Name; + } + } + if (OMD->isVariadic()) + ResultStr += ", ..."; + ResultStr += ") "; + + if (FPRetType) { + ResultStr += ")"; // close the precedence "scope" for "*". + + // Now, emit the argument types (if any). + if (const FunctionProtoType *FT = dyn_cast(FPRetType)) { + ResultStr += "("; + for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { + if (i) ResultStr += ", "; + std::string ParamStr = FT->getArgType(i).getAsString( + Context->getPrintingPolicy()); + ResultStr += ParamStr; + } + if (FT->isVariadic()) { + if (FT->getNumArgs()) ResultStr += ", "; + ResultStr += "..."; + } + ResultStr += ")"; + } else { + ResultStr += "()"; + } + } +} +void RewriteObjC::RewriteImplementationDecl(Decl *OID) { + ObjCImplementationDecl *IMD = dyn_cast(OID); + ObjCCategoryImplDecl *CID = dyn_cast(OID); + + InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// "); + + for (ObjCCategoryImplDecl::instmeth_iterator + I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(), + E = IMD ? IMD->instmeth_end() : CID->instmeth_end(); + I != E; ++I) { + std::string ResultStr; + ObjCMethodDecl *OMD = *I; + RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); + SourceLocation LocStart = OMD->getLocStart(); + SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + ReplaceText(LocStart, endBuf-startBuf, ResultStr); + } + + for (ObjCCategoryImplDecl::classmeth_iterator + I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(), + E = IMD ? IMD->classmeth_end() : CID->classmeth_end(); + I != E; ++I) { + std::string ResultStr; + ObjCMethodDecl *OMD = *I; + RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); + SourceLocation LocStart = OMD->getLocStart(); + SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + ReplaceText(LocStart, endBuf-startBuf, ResultStr); + } + for (ObjCCategoryImplDecl::propimpl_iterator + I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(), + E = IMD ? IMD->propimpl_end() : CID->propimpl_end(); + I != E; ++I) { + RewritePropertyImplDecl(*I, IMD, CID); + } + + InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// "); +} + +void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) { + std::string ResultStr; + if (!ObjCForwardDecls.count(ClassDecl->getCanonicalDecl())) { + // we haven't seen a forward decl - generate a typedef. + ResultStr = "#ifndef _REWRITER_typedef_"; + ResultStr += ClassDecl->getNameAsString(); + ResultStr += "\n"; + ResultStr += "#define _REWRITER_typedef_"; + ResultStr += ClassDecl->getNameAsString(); + ResultStr += "\n"; + ResultStr += "typedef struct objc_object "; + ResultStr += ClassDecl->getNameAsString(); + ResultStr += ";\n#endif\n"; + // Mark this typedef as having been generated. + ObjCForwardDecls.insert(ClassDecl->getCanonicalDecl()); + } + RewriteObjCInternalStruct(ClassDecl, ResultStr); + + for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(), + E = ClassDecl->prop_end(); I != E; ++I) + RewriteProperty(*I); + for (ObjCInterfaceDecl::instmeth_iterator + I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + for (ObjCInterfaceDecl::classmeth_iterator + I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end(); + I != E; ++I) + RewriteMethodDeclaration(*I); + + // Lastly, comment out the @end. + ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"), + "/* @end */"); +} + +Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) { + SourceRange OldRange = PseudoOp->getSourceRange(); + + // We just magically know some things about the structure of this + // expression. + ObjCMessageExpr *OldMsg = + cast(PseudoOp->getSemanticExpr( + PseudoOp->getNumSemanticExprs() - 1)); + + // Because the rewriter doesn't allow us to rewrite rewritten code, + // we need to suppress rewriting the sub-statements. + Expr *Base, *RHS; + { + DisableReplaceStmtScope S(*this); + + // Rebuild the base expression if we have one. + Base = 0; + if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { + Base = OldMsg->getInstanceReceiver(); + Base = cast(Base)->getSourceExpr(); + Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); + } + + // Rebuild the RHS. + RHS = cast(PseudoOp->getSyntacticForm())->getRHS(); + RHS = cast(RHS)->getSourceExpr(); + RHS = cast(RewriteFunctionBodyOrGlobalInitializer(RHS)); + } + + // TODO: avoid this copy. + SmallVector SelLocs; + OldMsg->getSelectorLocs(SelLocs); + + ObjCMessageExpr *NewMsg = 0; + switch (OldMsg->getReceiverKind()) { + case ObjCMessageExpr::Class: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getClassReceiverTypeInfo(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + RHS, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::Instance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + Base, + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + RHS, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::SuperClass: + case ObjCMessageExpr::SuperInstance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getSuperLoc(), + OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, + OldMsg->getSuperType(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + RHS, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + } + + Stmt *Replacement = SynthMessageExpr(NewMsg); + ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); + return Replacement; +} + +Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) { + SourceRange OldRange = PseudoOp->getSourceRange(); + + // We just magically know some things about the structure of this + // expression. + ObjCMessageExpr *OldMsg = + cast(PseudoOp->getResultExpr()->IgnoreImplicit()); + + // Because the rewriter doesn't allow us to rewrite rewritten code, + // we need to suppress rewriting the sub-statements. + Expr *Base = 0; + { + DisableReplaceStmtScope S(*this); + + // Rebuild the base expression if we have one. + if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { + Base = OldMsg->getInstanceReceiver(); + Base = cast(Base)->getSourceExpr(); + Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); + } + } + + // Intentionally empty. + SmallVector SelLocs; + SmallVector Args; + + ObjCMessageExpr *NewMsg = 0; + switch (OldMsg->getReceiverKind()) { + case ObjCMessageExpr::Class: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getClassReceiverTypeInfo(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::Instance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + Base, + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + + case ObjCMessageExpr::SuperClass: + case ObjCMessageExpr::SuperInstance: + NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), + OldMsg->getValueKind(), + OldMsg->getLeftLoc(), + OldMsg->getSuperLoc(), + OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, + OldMsg->getSuperType(), + OldMsg->getSelector(), + SelLocs, + OldMsg->getMethodDecl(), + Args, + OldMsg->getRightLoc(), + OldMsg->isImplicit()); + break; + } + + Stmt *Replacement = SynthMessageExpr(NewMsg); + ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); + return Replacement; +} + +/// SynthCountByEnumWithState - To print: +/// ((unsigned int (*) +/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) +/// (void *)objc_msgSend)((id)l_collection, +/// sel_registerName( +/// "countByEnumeratingWithState:objects:count:"), +/// &enumState, +/// (id *)__rw_items, (unsigned int)16) +/// +void RewriteObjC::SynthCountByEnumWithState(std::string &buf) { + buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, " + "id *, unsigned int))(void *)objc_msgSend)"; + buf += "\n\t\t"; + buf += "((id)l_collection,\n\t\t"; + buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),"; + buf += "\n\t\t"; + buf += "&enumState, " + "(id *)__rw_items, (unsigned int)16)"; +} + +/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach +/// statement to exit to its outer synthesized loop. +/// +Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) { + if (Stmts.empty() || !isa(Stmts.back())) + return S; + // replace break with goto __break_label + std::string buf; + + SourceLocation startLoc = S->getLocStart(); + buf = "goto __break_label_"; + buf += utostr(ObjCBcLabelNo.back()); + ReplaceText(startLoc, strlen("break"), buf); + + return 0; +} + +/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach +/// statement to continue with its inner synthesized loop. +/// +Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) { + if (Stmts.empty() || !isa(Stmts.back())) + return S; + // replace continue with goto __continue_label + std::string buf; + + SourceLocation startLoc = S->getLocStart(); + buf = "goto __continue_label_"; + buf += utostr(ObjCBcLabelNo.back()); + ReplaceText(startLoc, strlen("continue"), buf); + + return 0; +} + +/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement. +/// It rewrites: +/// for ( type elem in collection) { stmts; } + +/// Into: +/// { +/// type elem; +/// struct __objcFastEnumerationState enumState = { 0 }; +/// id __rw_items[16]; +/// id l_collection = (id)collection; +/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState +/// objects:__rw_items count:16]; +/// if (limit) { +/// unsigned long startMutations = *enumState.mutationsPtr; +/// do { +/// unsigned long counter = 0; +/// do { +/// if (startMutations != *enumState.mutationsPtr) +/// objc_enumerationMutation(l_collection); +/// elem = (type)enumState.itemsPtr[counter++]; +/// stmts; +/// __continue_label: ; +/// } while (counter < limit); +/// } while (limit = [l_collection countByEnumeratingWithState:&enumState +/// objects:__rw_items count:16]); +/// elem = nil; +/// __break_label: ; +/// } +/// else +/// elem = nil; +/// } +/// +Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, + SourceLocation OrigEnd) { + assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty"); + assert(isa(Stmts.back()) && + "ObjCForCollectionStmt Statement stack mismatch"); + assert(!ObjCBcLabelNo.empty() && + "ObjCForCollectionStmt - Label No stack empty"); + + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + StringRef elementName; + std::string elementTypeAsString; + std::string buf; + buf = "\n{\n\t"; + if (DeclStmt *DS = dyn_cast(S->getElement())) { + // type elem; + NamedDecl* D = cast(DS->getSingleDecl()); + QualType ElementType = cast(D)->getType(); + if (ElementType->isObjCQualifiedIdType() || + ElementType->isObjCQualifiedInterfaceType()) + // Simply use 'id' for all qualified types. + elementTypeAsString = "id"; + else + elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy()); + buf += elementTypeAsString; + buf += " "; + elementName = D->getName(); + buf += elementName; + buf += ";\n\t"; + } + else { + DeclRefExpr *DR = cast(S->getElement()); + elementName = DR->getDecl()->getName(); + ValueDecl *VD = cast(DR->getDecl()); + if (VD->getType()->isObjCQualifiedIdType() || + VD->getType()->isObjCQualifiedInterfaceType()) + // Simply use 'id' for all qualified types. + elementTypeAsString = "id"; + else + elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy()); + } + + // struct __objcFastEnumerationState enumState = { 0 }; + buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t"; + // id __rw_items[16]; + buf += "id __rw_items[16];\n\t"; + // id l_collection = (id) + buf += "id l_collection = (id)"; + // Find start location of 'collection' the hard way! + const char *startCollectionBuf = startBuf; + startCollectionBuf += 3; // skip 'for' + startCollectionBuf = strchr(startCollectionBuf, '('); + startCollectionBuf++; // skip '(' + // find 'in' and skip it. + while (*startCollectionBuf != ' ' || + *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' || + (*(startCollectionBuf+3) != ' ' && + *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '(')) + startCollectionBuf++; + startCollectionBuf += 3; + + // Replace: "for (type element in" with string constructed thus far. + ReplaceText(startLoc, startCollectionBuf - startBuf, buf); + // Replace ')' in for '(' type elem in collection ')' with ';' + SourceLocation rightParenLoc = S->getRParenLoc(); + const char *rparenBuf = SM->getCharacterData(rightParenLoc); + SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf); + buf = ";\n\t"; + + // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState + // objects:__rw_items count:16]; + // which is synthesized into: + // unsigned int limit = + // ((unsigned int (*) + // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) + // (void *)objc_msgSend)((id)l_collection, + // sel_registerName( + // "countByEnumeratingWithState:objects:count:"), + // (struct __objcFastEnumerationState *)&state, + // (id *)__rw_items, (unsigned int)16); + buf += "unsigned long limit =\n\t\t"; + SynthCountByEnumWithState(buf); + buf += ";\n\t"; + /// if (limit) { + /// unsigned long startMutations = *enumState.mutationsPtr; + /// do { + /// unsigned long counter = 0; + /// do { + /// if (startMutations != *enumState.mutationsPtr) + /// objc_enumerationMutation(l_collection); + /// elem = (type)enumState.itemsPtr[counter++]; + buf += "if (limit) {\n\t"; + buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t"; + buf += "do {\n\t\t"; + buf += "unsigned long counter = 0;\n\t\t"; + buf += "do {\n\t\t\t"; + buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t"; + buf += "objc_enumerationMutation(l_collection);\n\t\t\t"; + buf += elementName; + buf += " = ("; + buf += elementTypeAsString; + buf += ")enumState.itemsPtr[counter++];"; + // Replace ')' in for '(' type elem in collection ')' with all of these. + ReplaceText(lparenLoc, 1, buf); + + /// __continue_label: ; + /// } while (counter < limit); + /// } while (limit = [l_collection countByEnumeratingWithState:&enumState + /// objects:__rw_items count:16]); + /// elem = nil; + /// __break_label: ; + /// } + /// else + /// elem = nil; + /// } + /// + buf = ";\n\t"; + buf += "__continue_label_"; + buf += utostr(ObjCBcLabelNo.back()); + buf += ": ;"; + buf += "\n\t\t"; + buf += "} while (counter < limit);\n\t"; + buf += "} while (limit = "; + SynthCountByEnumWithState(buf); + buf += ");\n\t"; + buf += elementName; + buf += " = (("; + buf += elementTypeAsString; + buf += ")0);\n\t"; + buf += "__break_label_"; + buf += utostr(ObjCBcLabelNo.back()); + buf += ": ;\n\t"; + buf += "}\n\t"; + buf += "else\n\t\t"; + buf += elementName; + buf += " = (("; + buf += elementTypeAsString; + buf += ")0);\n\t"; + buf += "}\n"; + + // Insert all these *after* the statement body. + // FIXME: If this should support Obj-C++, support CXXTryStmt + if (isa(S->getBody())) { + SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1); + InsertText(endBodyLoc, buf); + } else { + /* Need to treat single statements specially. For example: + * + * for (A *a in b) if (stuff()) break; + * for (A *a in b) xxxyy; + * + * The following code simply scans ahead to the semi to find the actual end. + */ + const char *stmtBuf = SM->getCharacterData(OrigEnd); + const char *semiBuf = strchr(stmtBuf, ';'); + assert(semiBuf && "Can't find ';'"); + SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1); + InsertText(endBodyLoc, buf); + } + Stmts.pop_back(); + ObjCBcLabelNo.pop_back(); + return 0; +} + +/// RewriteObjCSynchronizedStmt - +/// This routine rewrites @synchronized(expr) stmt; +/// into: +/// objc_sync_enter(expr); +/// @try stmt @finally { objc_sync_exit(expr); } +/// +Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) { + // Get the start location and compute the semi location. + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @synchronized location"); + + std::string buf; + buf = "objc_sync_enter((id)"; + const char *lparenBuf = startBuf; + while (*lparenBuf != '(') lparenBuf++; + ReplaceText(startLoc, lparenBuf-startBuf+1, buf); + // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since + // the sync expression is typically a message expression that's already + // been rewritten! (which implies the SourceLocation's are invalid). + SourceLocation endLoc = S->getSynchBody()->getLocStart(); + const char *endBuf = SM->getCharacterData(endLoc); + while (*endBuf != ')') endBuf--; + SourceLocation rparenLoc = startLoc.getLocWithOffset(endBuf-startBuf); + buf = ");\n"; + // declare a new scope with two variables, _stack and _rethrow. + buf += "/* @try scope begin */ \n{ struct _objc_exception_data {\n"; + buf += "int buf[18/*32-bit i386*/];\n"; + buf += "char *pointers[4];} _stack;\n"; + buf += "id volatile _rethrow = 0;\n"; + buf += "objc_exception_try_enter(&_stack);\n"; + buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n"; + ReplaceText(rparenLoc, 1, buf); + startLoc = S->getSynchBody()->getLocEnd(); + startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '}') && "bogus @synchronized block"); + SourceLocation lastCurlyLoc = startLoc; + buf = "}\nelse {\n"; + buf += " _rethrow = objc_exception_extract(&_stack);\n"; + buf += "}\n"; + buf += "{ /* implicit finally clause */\n"; + buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n"; + + std::string syncBuf; + syncBuf += " objc_sync_exit("; + + Expr *syncExpr = S->getSynchExpr(); + CastKind CK = syncExpr->getType()->isObjCObjectPointerType() + ? CK_BitCast : + syncExpr->getType()->isBlockPointerType() + ? CK_BlockPointerToObjCPointerCast + : CK_CPointerToObjCPointerCast; + syncExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK, syncExpr); + std::string syncExprBufS; + llvm::raw_string_ostream syncExprBuf(syncExprBufS); + syncExpr->printPretty(syncExprBuf, 0, PrintingPolicy(LangOpts)); + syncBuf += syncExprBuf.str(); + syncBuf += ");"; + + buf += syncBuf; + buf += "\n if (_rethrow) objc_exception_throw(_rethrow);\n"; + buf += "}\n"; + buf += "}"; + + ReplaceText(lastCurlyLoc, 1, buf); + + bool hasReturns = false; + HasReturnStmts(S->getSynchBody(), hasReturns); + if (hasReturns) + RewriteSyncReturnStmts(S->getSynchBody(), syncBuf); + + return 0; +} + +void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S) +{ + // Perform a bottom up traversal of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) + WarnAboutReturnGotoStmts(*CI); + + if (isa(S) || isa(S)) { + Diags.Report(Context->getFullLoc(S->getLocStart()), + TryFinallyContainsReturnDiag); + } + return; +} + +void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns) +{ + // Perform a bottom up traversal of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) + HasReturnStmts(*CI, hasReturns); + + if (isa(S)) + hasReturns = true; + return; +} + +void RewriteObjC::RewriteTryReturnStmts(Stmt *S) { + // Perform a bottom up traversal of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + RewriteTryReturnStmts(*CI); + } + if (isa(S)) { + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + const char *semiBuf = strchr(startBuf, ';'); + assert((*semiBuf == ';') && "RewriteTryReturnStmts: can't find ';'"); + SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1); + + std::string buf; + buf = "{ objc_exception_try_exit(&_stack); return"; + + ReplaceText(startLoc, 6, buf); + InsertText(onePastSemiLoc, "}"); + } + return; +} + +void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) { + // Perform a bottom up traversal of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + RewriteSyncReturnStmts(*CI, syncExitBuf); + } + if (isa(S)) { + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + const char *semiBuf = strchr(startBuf, ';'); + assert((*semiBuf == ';') && "RewriteSyncReturnStmts: can't find ';'"); + SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1); + + std::string buf; + buf = "{ objc_exception_try_exit(&_stack);"; + buf += syncExitBuf; + buf += " return"; + + ReplaceText(startLoc, 6, buf); + InsertText(onePastSemiLoc, "}"); + } + return; +} + +Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) { + // Get the start location and compute the semi location. + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @try location"); + + std::string buf; + // declare a new scope with two variables, _stack and _rethrow. + buf = "/* @try scope begin */ { struct _objc_exception_data {\n"; + buf += "int buf[18/*32-bit i386*/];\n"; + buf += "char *pointers[4];} _stack;\n"; + buf += "id volatile _rethrow = 0;\n"; + buf += "objc_exception_try_enter(&_stack);\n"; + buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n"; + + ReplaceText(startLoc, 4, buf); + + startLoc = S->getTryBody()->getLocEnd(); + startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '}') && "bogus @try block"); + + SourceLocation lastCurlyLoc = startLoc; + if (S->getNumCatchStmts()) { + startLoc = startLoc.getLocWithOffset(1); + buf = " /* @catch begin */ else {\n"; + buf += " id _caught = objc_exception_extract(&_stack);\n"; + buf += " objc_exception_try_enter (&_stack);\n"; + buf += " if (_setjmp(_stack.buf))\n"; + buf += " _rethrow = objc_exception_extract(&_stack);\n"; + buf += " else { /* @catch continue */"; + + InsertText(startLoc, buf); + } else { /* no catch list */ + buf = "}\nelse {\n"; + buf += " _rethrow = objc_exception_extract(&_stack);\n"; + buf += "}"; + ReplaceText(lastCurlyLoc, 1, buf); + } + Stmt *lastCatchBody = 0; + for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { + ObjCAtCatchStmt *Catch = S->getCatchStmt(I); + VarDecl *catchDecl = Catch->getCatchParamDecl(); + + if (I == 0) + buf = "if ("; // we are generating code for the first catch clause + else + buf = "else if ("; + startLoc = Catch->getLocStart(); + startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @catch location"); + + const char *lParenLoc = strchr(startBuf, '('); + + if (Catch->hasEllipsis()) { + // Now rewrite the body... + lastCatchBody = Catch->getCatchBody(); + SourceLocation bodyLoc = lastCatchBody->getLocStart(); + const char *bodyBuf = SM->getCharacterData(bodyLoc); + assert(*SM->getCharacterData(Catch->getRParenLoc()) == ')' && + "bogus @catch paren location"); + assert((*bodyBuf == '{') && "bogus @catch body location"); + + buf += "1) { id _tmp = _caught;"; + Rewrite.ReplaceText(startLoc, bodyBuf-startBuf+1, buf); + } else if (catchDecl) { + QualType t = catchDecl->getType(); + if (t == Context->getObjCIdType()) { + buf += "1) { "; + ReplaceText(startLoc, lParenLoc-startBuf+1, buf); + } else if (const ObjCObjectPointerType *Ptr = + t->getAs()) { + // Should be a pointer to a class. + ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface(); + if (IDecl) { + buf += "objc_exception_match((struct objc_class *)objc_getClass(\""; + buf += IDecl->getNameAsString(); + buf += "\"), (struct objc_object *)_caught)) { "; + ReplaceText(startLoc, lParenLoc-startBuf+1, buf); + } + } + // Now rewrite the body... + lastCatchBody = Catch->getCatchBody(); + SourceLocation rParenLoc = Catch->getRParenLoc(); + SourceLocation bodyLoc = lastCatchBody->getLocStart(); + const char *bodyBuf = SM->getCharacterData(bodyLoc); + const char *rParenBuf = SM->getCharacterData(rParenLoc); + assert((*rParenBuf == ')') && "bogus @catch paren location"); + assert((*bodyBuf == '{') && "bogus @catch body location"); + + // Here we replace ") {" with "= _caught;" (which initializes and + // declares the @catch parameter). + ReplaceText(rParenLoc, bodyBuf-rParenBuf+1, " = _caught;"); + } else { + llvm_unreachable("@catch rewrite bug"); + } + } + // Complete the catch list... + if (lastCatchBody) { + SourceLocation bodyLoc = lastCatchBody->getLocEnd(); + assert(*SM->getCharacterData(bodyLoc) == '}' && + "bogus @catch body location"); + + // Insert the last (implicit) else clause *before* the right curly brace. + bodyLoc = bodyLoc.getLocWithOffset(-1); + buf = "} /* last catch end */\n"; + buf += "else {\n"; + buf += " _rethrow = _caught;\n"; + buf += " objc_exception_try_exit(&_stack);\n"; + buf += "} } /* @catch end */\n"; + if (!S->getFinallyStmt()) + buf += "}\n"; + InsertText(bodyLoc, buf); + + // Set lastCurlyLoc + lastCurlyLoc = lastCatchBody->getLocEnd(); + } + if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) { + startLoc = finalStmt->getLocStart(); + startBuf = SM->getCharacterData(startLoc); + assert((*startBuf == '@') && "bogus @finally start"); + + ReplaceText(startLoc, 8, "/* @finally */"); + + Stmt *body = finalStmt->getFinallyBody(); + SourceLocation startLoc = body->getLocStart(); + SourceLocation endLoc = body->getLocEnd(); + assert(*SM->getCharacterData(startLoc) == '{' && + "bogus @finally body location"); + assert(*SM->getCharacterData(endLoc) == '}' && + "bogus @finally body location"); + + startLoc = startLoc.getLocWithOffset(1); + InsertText(startLoc, " if (!_rethrow) objc_exception_try_exit(&_stack);\n"); + endLoc = endLoc.getLocWithOffset(-1); + InsertText(endLoc, " if (_rethrow) objc_exception_throw(_rethrow);\n"); + + // Set lastCurlyLoc + lastCurlyLoc = body->getLocEnd(); + + // Now check for any return/continue/go statements within the @try. + WarnAboutReturnGotoStmts(S->getTryBody()); + } else { /* no finally clause - make sure we synthesize an implicit one */ + buf = "{ /* implicit finally clause */\n"; + buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n"; + buf += " if (_rethrow) objc_exception_throw(_rethrow);\n"; + buf += "}"; + ReplaceText(lastCurlyLoc, 1, buf); + + // Now check for any return/continue/go statements within the @try. + // The implicit finally clause won't called if the @try contains any + // jump statements. + bool hasReturns = false; + HasReturnStmts(S->getTryBody(), hasReturns); + if (hasReturns) + RewriteTryReturnStmts(S->getTryBody()); + } + // Now emit the final closing curly brace... + lastCurlyLoc = lastCurlyLoc.getLocWithOffset(1); + InsertText(lastCurlyLoc, " } /* @try scope end */\n"); + return 0; +} + +// This can't be done with ReplaceStmt(S, ThrowExpr), since +// the throw expression is typically a message expression that's already +// been rewritten! (which implies the SourceLocation's are invalid). +Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) { + // Get the start location and compute the semi location. + SourceLocation startLoc = S->getLocStart(); + const char *startBuf = SM->getCharacterData(startLoc); + + assert((*startBuf == '@') && "bogus @throw location"); + + std::string buf; + /* void objc_exception_throw(id) __attribute__((noreturn)); */ + if (S->getThrowExpr()) + buf = "objc_exception_throw("; + else // add an implicit argument + buf = "objc_exception_throw(_caught"; + + // handle "@ throw" correctly. + const char *wBuf = strchr(startBuf, 'w'); + assert((*wBuf == 'w') && "@throw: can't find 'w'"); + ReplaceText(startLoc, wBuf-startBuf+1, buf); + + const char *semiBuf = strchr(startBuf, ';'); + assert((*semiBuf == ';') && "@throw: can't find ';'"); + SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf); + ReplaceText(semiLoc, 1, ");"); + return 0; +} + +Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) { + // Create a new string expression. + QualType StrType = Context->getPointerType(Context->CharTy); + std::string StrEncoding; + Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding); + Expr *Replacement = StringLiteral::Create(*Context, StrEncoding, + StringLiteral::Ascii, false, + StrType, SourceLocation()); + ReplaceStmt(Exp, Replacement); + + // Replace this subexpr in the parent. + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return Replacement; +} + +Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) { + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl"); + // Create a call to sel_registerName("selName"). + SmallVector SelExprs; + QualType argType = Context->getPointerType(Context->CharTy); + SelExprs.push_back(StringLiteral::Create(*Context, + Exp->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size()); + ReplaceStmt(Exp, SelExp); + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return SelExp; +} + +CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl( + FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc, + SourceLocation EndLoc) { + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = FD->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType, + VK_LValue, SourceLocation()); + + // Now, we cast the reference to a pointer to the objc_msgSend type. + QualType pToFunc = Context->getPointerType(msgSendType); + ImplicitCastExpr *ICE = + ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay, + DRE, 0, VK_RValue); + + const FunctionType *FT = msgSendType->getAs(); + + CallExpr *Exp = + new (Context) CallExpr(*Context, ICE, llvm::makeArrayRef(args, nargs), + FT->getCallResultType(*Context), + VK_RValue, EndLoc); + return Exp; +} + +static bool scanForProtocolRefs(const char *startBuf, const char *endBuf, + const char *&startRef, const char *&endRef) { + while (startBuf < endBuf) { + if (*startBuf == '<') + startRef = startBuf; // mark the start. + if (*startBuf == '>') { + if (startRef && *startRef == '<') { + endRef = startBuf; // mark the end. + return true; + } + return false; + } + startBuf++; + } + return false; +} + +static void scanToNextArgument(const char *&argRef) { + int angle = 0; + while (*argRef != ')' && (*argRef != ',' || angle > 0)) { + if (*argRef == '<') + angle++; + else if (*argRef == '>') + angle--; + argRef++; + } + assert(angle == 0 && "scanToNextArgument - bad protocol type syntax"); +} + +bool RewriteObjC::needToScanForQualifiers(QualType T) { + if (T->isObjCQualifiedIdType()) + return true; + if (const PointerType *PT = T->getAs()) { + if (PT->getPointeeType()->isObjCQualifiedIdType()) + return true; + } + if (T->isObjCObjectPointerType()) { + T = T->getPointeeType(); + return T->isObjCQualifiedInterfaceType(); + } + if (T->isArrayType()) { + QualType ElemTy = Context->getBaseElementType(T); + return needToScanForQualifiers(ElemTy); + } + return false; +} + +void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) { + QualType Type = E->getType(); + if (needToScanForQualifiers(Type)) { + SourceLocation Loc, EndLoc; + + if (const CStyleCastExpr *ECE = dyn_cast(E)) { + Loc = ECE->getLParenLoc(); + EndLoc = ECE->getRParenLoc(); + } else { + Loc = E->getLocStart(); + EndLoc = E->getLocEnd(); + } + // This will defend against trying to rewrite synthesized expressions. + if (Loc.isInvalid() || EndLoc.isInvalid()) + return; + + const char *startBuf = SM->getCharacterData(Loc); + const char *endBuf = SM->getCharacterData(EndLoc); + const char *startRef = 0, *endRef = 0; + if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { + // Get the locations of the startRef, endRef. + SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf); + SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1); + // Comment out the protocol references. + InsertText(LessLoc, "/*"); + InsertText(GreaterLoc, "*/"); + } + } +} + +void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) { + SourceLocation Loc; + QualType Type; + const FunctionProtoType *proto = 0; + if (VarDecl *VD = dyn_cast(Dcl)) { + Loc = VD->getLocation(); + Type = VD->getType(); + } + else if (FunctionDecl *FD = dyn_cast(Dcl)) { + Loc = FD->getLocation(); + // Check for ObjC 'id' and class types that have been adorned with protocol + // information (id

    , C

    *). The protocol references need to be rewritten! + const FunctionType *funcType = FD->getType()->getAs(); + assert(funcType && "missing function type"); + proto = dyn_cast(funcType); + if (!proto) + return; + Type = proto->getResultType(); + } + else if (FieldDecl *FD = dyn_cast(Dcl)) { + Loc = FD->getLocation(); + Type = FD->getType(); + } + else + return; + + if (needToScanForQualifiers(Type)) { + // Since types are unique, we need to scan the buffer. + + const char *endBuf = SM->getCharacterData(Loc); + const char *startBuf = endBuf; + while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart) + startBuf--; // scan backward (from the decl location) for return type. + const char *startRef = 0, *endRef = 0; + if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { + // Get the locations of the startRef, endRef. + SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf); + SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1); + // Comment out the protocol references. + InsertText(LessLoc, "/*"); + InsertText(GreaterLoc, "*/"); + } + } + if (!proto) + return; // most likely, was a variable + // Now check arguments. + const char *startBuf = SM->getCharacterData(Loc); + const char *startFuncBuf = startBuf; + for (unsigned i = 0; i < proto->getNumArgs(); i++) { + if (needToScanForQualifiers(proto->getArgType(i))) { + // Since types are unique, we need to scan the buffer. + + const char *endBuf = startBuf; + // scan forward (from the decl location) for argument types. + scanToNextArgument(endBuf); + const char *startRef = 0, *endRef = 0; + if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { + // Get the locations of the startRef, endRef. + SourceLocation LessLoc = + Loc.getLocWithOffset(startRef-startFuncBuf); + SourceLocation GreaterLoc = + Loc.getLocWithOffset(endRef-startFuncBuf+1); + // Comment out the protocol references. + InsertText(LessLoc, "/*"); + InsertText(GreaterLoc, "*/"); + } + startBuf = ++endBuf; + } + else { + // If the function name is derived from a macro expansion, then the + // argument buffer will not follow the name. Need to speak with Chris. + while (*startBuf && *startBuf != ')' && *startBuf != ',') + startBuf++; // scan forward (from the decl location) for argument types. + startBuf++; + } + } +} + +void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) { + QualType QT = ND->getType(); + const Type* TypePtr = QT->getAs(); + if (!isa(TypePtr)) + return; + while (isa(TypePtr)) { + const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); + QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); + TypePtr = QT->getAs(); + } + // FIXME. This will not work for multiple declarators; as in: + // __typeof__(a) b,c,d; + std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy())); + SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); + const char *startBuf = SM->getCharacterData(DeclLoc); + if (ND->getInit()) { + std::string Name(ND->getNameAsString()); + TypeAsString += " " + Name + " = "; + Expr *E = ND->getInit(); + SourceLocation startLoc; + if (const CStyleCastExpr *ECE = dyn_cast(E)) + startLoc = ECE->getLParenLoc(); + else + startLoc = E->getLocStart(); + startLoc = SM->getExpansionLoc(startLoc); + const char *endBuf = SM->getCharacterData(startLoc); + ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); + } + else { + SourceLocation X = ND->getLocEnd(); + X = SM->getExpansionLoc(X); + const char *endBuf = SM->getCharacterData(X); + ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); + } +} + +// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str); +void RewriteObjC::SynthSelGetUidFunctionDecl() { + IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName"); + SmallVector ArgTys; + ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); + QualType getFuncType = + getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size()); + SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + SelGetUidIdent, getFuncType, 0, + SC_Extern, + SC_None, false); +} + +void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) { + // declared in + if (FD->getIdentifier() && + FD->getName() == "sel_registerName") { + SelGetUidFunctionDecl = FD; + return; + } + RewriteObjCQualifiedInterfaceTypes(FD); +} + +void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) { + std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); + const char *argPtr = TypeString.c_str(); + if (!strchr(argPtr, '^')) { + Str += TypeString; + return; + } + while (*argPtr) { + Str += (*argPtr == '^' ? '*' : *argPtr); + argPtr++; + } +} + +// FIXME. Consolidate this routine with RewriteBlockPointerType. +void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str, + ValueDecl *VD) { + QualType Type = VD->getType(); + std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); + const char *argPtr = TypeString.c_str(); + int paren = 0; + while (*argPtr) { + switch (*argPtr) { + case '(': + Str += *argPtr; + paren++; + break; + case ')': + Str += *argPtr; + paren--; + break; + case '^': + Str += '*'; + if (paren == 1) + Str += VD->getNameAsString(); + break; + default: + Str += *argPtr; + break; + } + argPtr++; + } +} + + +void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) { + SourceLocation FunLocStart = FD->getTypeSpecStartLoc(); + const FunctionType *funcType = FD->getType()->getAs(); + const FunctionProtoType *proto = dyn_cast(funcType); + if (!proto) + return; + QualType Type = proto->getResultType(); + std::string FdStr = Type.getAsString(Context->getPrintingPolicy()); + FdStr += " "; + FdStr += FD->getName(); + FdStr += "("; + unsigned numArgs = proto->getNumArgs(); + for (unsigned i = 0; i < numArgs; i++) { + QualType ArgType = proto->getArgType(i); + RewriteBlockPointerType(FdStr, ArgType); + if (i+1 < numArgs) + FdStr += ", "; + } + FdStr += ");\n"; + InsertText(FunLocStart, FdStr); + CurFunctionDeclToDeclareForBlock = 0; +} + +// SynthSuperContructorFunctionDecl - id objc_super(id obj, id super); +void RewriteObjC::SynthSuperContructorFunctionDecl() { + if (SuperContructorFunctionDecl) + return; + IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size()); + SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...); +void RewriteObjC::SynthMsgSendFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...); +void RewriteObjC::SynthMsgSendSuperFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper"); + SmallVector ArgTys; + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("objc_super")); + QualType argT = Context->getPointerType(Context->getTagDeclType(RD)); + assert(!argT.isNull() && "Can't build 'struct objc_super *' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...); +void RewriteObjC::SynthMsgSendStretFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendSuperStretFunctionDecl - +// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...); +void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() { + IdentifierInfo *msgSendIdent = + &Context->Idents.get("objc_msgSendSuper_stret"); + SmallVector ArgTys; + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("objc_super")); + QualType argT = Context->getPointerType(Context->getTagDeclType(RD)); + assert(!argT.isNull() && "Can't build 'struct objc_super *' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...); +void RewriteObjC::SynthMsgSendFpretFunctionDecl() { + IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret"); + SmallVector ArgTys; + QualType argT = Context->getObjCIdType(); + assert(!argT.isNull() && "Can't find 'id' type"); + ArgTys.push_back(argT); + argT = Context->getObjCSelType(); + assert(!argT.isNull() && "Can't find 'SEL' type"); + ArgTys.push_back(argT); + QualType msgSendType = getSimpleFunctionType(Context->DoubleTy, + &ArgTys[0], ArgTys.size(), + true /*isVariadic*/); + MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + msgSendIdent, msgSendType, 0, + SC_Extern, + SC_None, false); +} + +// SynthGetClassFunctionDecl - id objc_getClass(const char *name); +void RewriteObjC::SynthGetClassFunctionDecl() { + IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass"); + SmallVector ArgTys; + ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); + QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size()); + GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + getClassIdent, getClassType, 0, + SC_Extern, + SC_None, false); +} + +// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls); +void RewriteObjC::SynthGetSuperClassFunctionDecl() { + IdentifierInfo *getSuperClassIdent = + &Context->Idents.get("class_getSuperclass"); + SmallVector ArgTys; + ArgTys.push_back(Context->getObjCClassType()); + QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), + &ArgTys[0], ArgTys.size()); + GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + getSuperClassIdent, + getClassType, 0, + SC_Extern, + SC_None, + false); +} + +// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name); +void RewriteObjC::SynthGetMetaClassFunctionDecl() { + IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass"); + SmallVector ArgTys; + ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); + QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(), + &ArgTys[0], ArgTys.size()); + GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, + SourceLocation(), + SourceLocation(), + getClassIdent, getClassType, 0, + SC_Extern, + SC_None, false); +} + +Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) { + QualType strType = getConstantStringStructType(); + + std::string S = "__NSConstantStringImpl_"; + + std::string tmpName = InFileName; + unsigned i; + for (i=0; i < tmpName.length(); i++) { + char c = tmpName.at(i); + // replace any non alphanumeric characters with '_'. + if (!isalpha(c) && (c < '0' || c > '9')) + tmpName[i] = '_'; + } + S += tmpName; + S += "_"; + S += utostr(NumObjCStringLiterals++); + + Preamble += "static __NSConstantStringImpl " + S; + Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,"; + Preamble += "0x000007c8,"; // utf8_str + // The pretty printer for StringLiteral handles escape characters properly. + std::string prettyBufS; + llvm::raw_string_ostream prettyBuf(prettyBufS); + Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts)); + Preamble += prettyBuf.str(); + Preamble += ","; + Preamble += utostr(Exp->getString()->getByteLength()) + "};\n"; + + VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), &Context->Idents.get(S), + strType, 0, SC_Static, SC_None); + DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue, + SourceLocation()); + Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf, + Context->getPointerType(DRE->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + // cast to NSConstantString * + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(), + CK_CPointerToObjCPointerCast, Unop); + ReplaceStmt(Exp, cast); + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return cast; +} + +// struct objc_super { struct objc_object *receiver; struct objc_class *super; }; +QualType RewriteObjC::getSuperStructType() { + if (!SuperStructDecl) { + SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("objc_super")); + QualType FieldTypes[2]; + + // struct objc_object *receiver; + FieldTypes[0] = Context->getObjCIdType(); + // struct objc_class *super; + FieldTypes[1] = Context->getObjCClassType(); + + // Create fields + for (unsigned i = 0; i < 2; ++i) { + SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl, + SourceLocation(), + SourceLocation(), 0, + FieldTypes[i], 0, + /*BitWidth=*/0, + /*Mutable=*/false, + ICIS_NoInit)); + } + + SuperStructDecl->completeDefinition(); + } + return Context->getTagDeclType(SuperStructDecl); +} + +QualType RewriteObjC::getConstantStringStructType() { + if (!ConstantStringDecl) { + ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("__NSConstantStringImpl")); + QualType FieldTypes[4]; + + // struct objc_object *receiver; + FieldTypes[0] = Context->getObjCIdType(); + // int flags; + FieldTypes[1] = Context->IntTy; + // char *str; + FieldTypes[2] = Context->getPointerType(Context->CharTy); + // long length; + FieldTypes[3] = Context->LongTy; + + // Create fields + for (unsigned i = 0; i < 4; ++i) { + ConstantStringDecl->addDecl(FieldDecl::Create(*Context, + ConstantStringDecl, + SourceLocation(), + SourceLocation(), 0, + FieldTypes[i], 0, + /*BitWidth=*/0, + /*Mutable=*/true, + ICIS_NoInit)); + } + + ConstantStringDecl->completeDefinition(); + } + return Context->getTagDeclType(ConstantStringDecl); +} + +CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, + QualType msgSendType, + QualType returnType, + SmallVectorImpl &ArgTypes, + SmallVectorImpl &MsgExprs, + ObjCMethodDecl *Method) { + // Create a reference to the objc_msgSend_stret() declaration. + DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor, + false, msgSendType, + VK_LValue, SourceLocation()); + // Need to cast objc_msgSend_stret to "void *" (see above comment). + CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->VoidTy), + CK_BitCast, STDRE); + // Now do the "normal" pointer to function cast. + QualType castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + Method ? Method->isVariadic() : false); + castType = Context->getPointerType(castType); + cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, + cast); + + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast); + + const FunctionType *FT = msgSendType->getAs(); + CallExpr *STCE = new (Context) CallExpr(*Context, PE, MsgExprs, + FT->getResultType(), VK_RValue, + SourceLocation()); + return STCE; + +} + + +Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp, + SourceLocation StartLoc, + SourceLocation EndLoc) { + if (!SelGetUidFunctionDecl) + SynthSelGetUidFunctionDecl(); + if (!MsgSendFunctionDecl) + SynthMsgSendFunctionDecl(); + if (!MsgSendSuperFunctionDecl) + SynthMsgSendSuperFunctionDecl(); + if (!MsgSendStretFunctionDecl) + SynthMsgSendStretFunctionDecl(); + if (!MsgSendSuperStretFunctionDecl) + SynthMsgSendSuperStretFunctionDecl(); + if (!MsgSendFpretFunctionDecl) + SynthMsgSendFpretFunctionDecl(); + if (!GetClassFunctionDecl) + SynthGetClassFunctionDecl(); + if (!GetSuperClassFunctionDecl) + SynthGetSuperClassFunctionDecl(); + if (!GetMetaClassFunctionDecl) + SynthGetMetaClassFunctionDecl(); + + // default to objc_msgSend(). + FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; + // May need to use objc_msgSend_stret() as well. + FunctionDecl *MsgSendStretFlavor = 0; + if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) { + QualType resultType = mDecl->getResultType(); + if (resultType->isRecordType()) + MsgSendStretFlavor = MsgSendStretFunctionDecl; + else if (resultType->isRealFloatingType()) + MsgSendFlavor = MsgSendFpretFunctionDecl; + } + + // Synthesize a call to objc_msgSend(). + SmallVector MsgExprs; + switch (Exp->getReceiverKind()) { + case ObjCMessageExpr::SuperClass: { + MsgSendFlavor = MsgSendSuperFunctionDecl; + if (MsgSendStretFlavor) + MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; + assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); + + ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); + + SmallVector InitExprs; + + // set the receiver to self, the first argument to all methods. + InitExprs.push_back( + NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK_BitCast, + new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), + false, + Context->getObjCIdType(), + VK_RValue, + SourceLocation())) + ); // set the 'receiver'. + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + ClsExprs.push_back(StringLiteral::Create(*Context, + ClassDecl->getIdentifier()->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, + EndLoc); + // (Class)objc_getClass("CurrentClass") + CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context, + Context->getObjCClassType(), + CK_BitCast, Cls); + ClsExprs.clear(); + ClsExprs.push_back(ArgExpr); + Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, + &ClsExprs[0], ClsExprs.size(), + StartLoc, EndLoc); + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + // To turn off a warning, type-cast to 'id' + InitExprs.push_back( // set 'super class', using class_getSuperclass(). + NoTypeInfoCStyleCastExpr(Context, + Context->getObjCIdType(), + CK_BitCast, Cls)); + // struct objc_super + QualType superType = getSuperStructType(); + Expr *SuperRep; + + if (LangOpts.MicrosoftExt) { + SynthSuperContructorFunctionDecl(); + // Simulate a contructor call... + DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, + false, superType, VK_LValue, + SourceLocation()); + SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs, + superType, VK_LValue, + SourceLocation()); + // The code for super is a little tricky to prevent collision with + // the structure definition in the header. The rewriter has it's own + // internal definition (__rw_objc_super) that is uses. This is why + // we need the cast below. For example: + // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) + // + SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, + Context->getPointerType(SuperRep->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + SuperRep = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(superType), + CK_BitCast, SuperRep); + } else { + // (struct objc_super) { } + InitListExpr *ILE = + new (Context) InitListExpr(*Context, SourceLocation(), InitExprs, + SourceLocation()); + TypeSourceInfo *superTInfo + = Context->getTrivialTypeSourceInfo(superType); + SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, + superType, VK_LValue, + ILE, false); + // struct objc_super * + SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, + Context->getPointerType(SuperRep->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + } + MsgExprs.push_back(SuperRep); + break; + } + + case ObjCMessageExpr::Class: { + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + ObjCInterfaceDecl *Class + = Exp->getClassReceiver()->getAs()->getInterface(); + IdentifierInfo *clsName = Class->getIdentifier(); + ClsExprs.push_back(StringLiteral::Create(*Context, + clsName->getName(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + MsgExprs.push_back(Cls); + break; + } + + case ObjCMessageExpr::SuperInstance:{ + MsgSendFlavor = MsgSendSuperFunctionDecl; + if (MsgSendStretFlavor) + MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; + assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); + ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); + SmallVector InitExprs; + + InitExprs.push_back( + NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK_BitCast, + new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), + false, + Context->getObjCIdType(), + VK_RValue, SourceLocation())) + ); // set the 'receiver'. + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + SmallVector ClsExprs; + QualType argType = Context->getPointerType(Context->CharTy); + ClsExprs.push_back(StringLiteral::Create(*Context, + ClassDecl->getIdentifier()->getName(), + StringLiteral::Ascii, false, argType, + SourceLocation())); + CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, + &ClsExprs[0], + ClsExprs.size(), + StartLoc, EndLoc); + // (Class)objc_getClass("CurrentClass") + CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context, + Context->getObjCClassType(), + CK_BitCast, Cls); + ClsExprs.clear(); + ClsExprs.push_back(ArgExpr); + Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, + &ClsExprs[0], ClsExprs.size(), + StartLoc, EndLoc); + + // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) + // To turn off a warning, type-cast to 'id' + InitExprs.push_back( + // set 'super class', using class_getSuperclass(). + NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK_BitCast, Cls)); + // struct objc_super + QualType superType = getSuperStructType(); + Expr *SuperRep; + + if (LangOpts.MicrosoftExt) { + SynthSuperContructorFunctionDecl(); + // Simulate a contructor call... + DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, + false, superType, VK_LValue, + SourceLocation()); + SuperRep = new (Context) CallExpr(*Context, DRE, InitExprs, + superType, VK_LValue, SourceLocation()); + // The code for super is a little tricky to prevent collision with + // the structure definition in the header. The rewriter has it's own + // internal definition (__rw_objc_super) that is uses. This is why + // we need the cast below. For example: + // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) + // + SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, + Context->getPointerType(SuperRep->getType()), + VK_RValue, OK_Ordinary, + SourceLocation()); + SuperRep = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(superType), + CK_BitCast, SuperRep); + } else { + // (struct objc_super) { } + InitListExpr *ILE = + new (Context) InitListExpr(*Context, SourceLocation(), InitExprs, + SourceLocation()); + TypeSourceInfo *superTInfo + = Context->getTrivialTypeSourceInfo(superType); + SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, + superType, VK_RValue, ILE, + false); + } + MsgExprs.push_back(SuperRep); + break; + } + + case ObjCMessageExpr::Instance: { + // Remove all type-casts because it may contain objc-style types; e.g. + // Foo *. + Expr *recExpr = Exp->getInstanceReceiver(); + while (CStyleCastExpr *CE = dyn_cast(recExpr)) + recExpr = CE->getSubExpr(); + CastKind CK = recExpr->getType()->isObjCObjectPointerType() + ? CK_BitCast : recExpr->getType()->isBlockPointerType() + ? CK_BlockPointerToObjCPointerCast + : CK_CPointerToObjCPointerCast; + + recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK, recExpr); + MsgExprs.push_back(recExpr); + break; + } + } + + // Create a call to sel_registerName("selName"), it will be the 2nd argument. + SmallVector SelExprs; + QualType argType = Context->getPointerType(Context->CharTy); + SelExprs.push_back(StringLiteral::Create(*Context, + Exp->getSelector().getAsString(), + StringLiteral::Ascii, false, + argType, SourceLocation())); + CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, + &SelExprs[0], SelExprs.size(), + StartLoc, + EndLoc); + MsgExprs.push_back(SelExp); + + // Now push any user supplied arguments. + for (unsigned i = 0; i < Exp->getNumArgs(); i++) { + Expr *userExpr = Exp->getArg(i); + // Make all implicit casts explicit...ICE comes in handy:-) + if (ImplicitCastExpr *ICE = dyn_cast(userExpr)) { + // Reuse the ICE type, it is exactly what the doctor ordered. + QualType type = ICE->getType(); + if (needToScanForQualifiers(type)) + type = Context->getObjCIdType(); + // Make sure we convert "type (^)(...)" to "type (*)(...)". + (void)convertBlockPointerToFunctionPointer(type); + const Expr *SubExpr = ICE->IgnoreParenImpCasts(); + CastKind CK; + if (SubExpr->getType()->isIntegralType(*Context) && + type->isBooleanType()) { + CK = CK_IntegralToBoolean; + } else if (type->isObjCObjectPointerType()) { + if (SubExpr->getType()->isBlockPointerType()) { + CK = CK_BlockPointerToObjCPointerCast; + } else if (SubExpr->getType()->isPointerType()) { + CK = CK_CPointerToObjCPointerCast; + } else { + CK = CK_BitCast; + } + } else { + CK = CK_BitCast; + } + + userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr); + } + // Make id cast into an 'id' cast. + else if (CStyleCastExpr *CE = dyn_cast(userExpr)) { + if (CE->getType()->isObjCQualifiedIdType()) { + while ((CE = dyn_cast(userExpr))) + userExpr = CE->getSubExpr(); + CastKind CK; + if (userExpr->getType()->isIntegralType(*Context)) { + CK = CK_IntegralToPointer; + } else if (userExpr->getType()->isBlockPointerType()) { + CK = CK_BlockPointerToObjCPointerCast; + } else if (userExpr->getType()->isPointerType()) { + CK = CK_CPointerToObjCPointerCast; + } else { + CK = CK_BitCast; + } + userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), + CK, userExpr); + } + } + MsgExprs.push_back(userExpr); + // We've transferred the ownership to MsgExprs. For now, we *don't* null + // out the argument in the original expression (since we aren't deleting + // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info. + //Exp->setArg(i, 0); + } + // Generate the funky cast. + CastExpr *cast; + SmallVector ArgTypes; + QualType returnType; + + // Push 'id' and 'SEL', the 2 implicit arguments. + if (MsgSendFlavor == MsgSendSuperFunctionDecl) + ArgTypes.push_back(Context->getPointerType(getSuperStructType())); + else + ArgTypes.push_back(Context->getObjCIdType()); + ArgTypes.push_back(Context->getObjCSelType()); + if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) { + // Push any user argument types. + for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), + E = OMD->param_end(); PI != E; ++PI) { + QualType t = (*PI)->getType()->isObjCQualifiedIdType() + ? Context->getObjCIdType() + : (*PI)->getType(); + // Make sure we convert "t (^)(...)" to "t (*)(...)". + (void)convertBlockPointerToFunctionPointer(t); + ArgTypes.push_back(t); + } + returnType = Exp->getType(); + convertToUnqualifiedObjCType(returnType); + (void)convertBlockPointerToFunctionPointer(returnType); + } else { + returnType = Context->getObjCIdType(); + } + // Get the type, we will need to reference it in a couple spots. + QualType msgSendType = MsgSendFlavor->getType(); + + // Create a reference to the objc_msgSend() declaration. + DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, + VK_LValue, SourceLocation()); + + // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid). + // If we don't do this cast, we get the following bizarre warning/note: + // xx.m:13: warning: function called through a non-compatible type + // xx.m:13: note: if this code is reached, the program will abort + cast = NoTypeInfoCStyleCastExpr(Context, + Context->getPointerType(Context->VoidTy), + CK_BitCast, DRE); + + // Now do the "normal" pointer to function cast. + QualType castType = + getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), + // If we don't have a method decl, force a variadic cast. + Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true); + castType = Context->getPointerType(castType); + cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, + cast); + + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); + + const FunctionType *FT = msgSendType->getAs(); + CallExpr *CE = new (Context) CallExpr(*Context, PE, MsgExprs, + FT->getResultType(), VK_RValue, + EndLoc); + Stmt *ReplacingStmt = CE; + if (MsgSendStretFlavor) { + // We have the method which returns a struct/union. Must also generate + // call to objc_msgSend_stret and hang both varieties on a conditional + // expression which dictate which one to envoke depending on size of + // method's return type. + + CallExpr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor, + msgSendType, returnType, + ArgTypes, MsgExprs, + Exp->getMethodDecl()); + + // Build sizeof(returnType) + UnaryExprOrTypeTraitExpr *sizeofExpr = + new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf, + Context->getTrivialTypeSourceInfo(returnType), + Context->getSizeType(), SourceLocation(), + SourceLocation()); + // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) + // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases. + // For X86 it is more complicated and some kind of target specific routine + // is needed to decide what to do. + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + IntegerLiteral *limit = IntegerLiteral::Create(*Context, + llvm::APInt(IntSize, 8), + Context->IntTy, + SourceLocation()); + BinaryOperator *lessThanExpr = + new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy, + VK_RValue, OK_Ordinary, SourceLocation(), + false); + // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) + ConditionalOperator *CondExpr = + new (Context) ConditionalOperator(lessThanExpr, + SourceLocation(), CE, + SourceLocation(), STCE, + returnType, VK_RValue, OK_Ordinary); + ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + CondExpr); + } + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return ReplacingStmt; +} + +Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) { + Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(), + Exp->getLocEnd()); + + // Now do the actual rewrite. + ReplaceStmt(Exp, ReplacingStmt); + + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return ReplacingStmt; +} + +// typedef struct objc_object Protocol; +QualType RewriteObjC::getProtocolType() { + if (!ProtocolTypeDecl) { + TypeSourceInfo *TInfo + = Context->getTrivialTypeSourceInfo(Context->getObjCIdType()); + ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("Protocol"), + TInfo); + } + return Context->getTypeDeclType(ProtocolTypeDecl); +} + +/// RewriteObjCProtocolExpr - Rewrite a protocol expression into +/// a synthesized/forward data reference (to the protocol's metadata). +/// The forward references (and metadata) are generated in +/// RewriteObjC::HandleTranslationUnit(). +Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) { + std::string Name = "_OBJC_PROTOCOL_" + Exp->getProtocol()->getNameAsString(); + IdentifierInfo *ID = &Context->Idents.get(Name); + VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), ID, getProtocolType(), 0, + SC_Extern, SC_None); + DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(), + VK_LValue, SourceLocation()); + Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf, + Context->getPointerType(DRE->getType()), + VK_RValue, OK_Ordinary, SourceLocation()); + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(), + CK_BitCast, + DerefExpr); + ReplaceStmt(Exp, castExpr); + ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl()); + // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. + return castExpr; + +} + +bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf, + const char *endBuf) { + while (startBuf < endBuf) { + if (*startBuf == '#') { + // Skip whitespace. + for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf) + ; + if (!strncmp(startBuf, "if", strlen("if")) || + !strncmp(startBuf, "ifdef", strlen("ifdef")) || + !strncmp(startBuf, "ifndef", strlen("ifndef")) || + !strncmp(startBuf, "define", strlen("define")) || + !strncmp(startBuf, "undef", strlen("undef")) || + !strncmp(startBuf, "else", strlen("else")) || + !strncmp(startBuf, "elif", strlen("elif")) || + !strncmp(startBuf, "endif", strlen("endif")) || + !strncmp(startBuf, "pragma", strlen("pragma")) || + !strncmp(startBuf, "include", strlen("include")) || + !strncmp(startBuf, "import", strlen("import")) || + !strncmp(startBuf, "include_next", strlen("include_next"))) + return true; + } + startBuf++; + } + return false; +} + +/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to +/// an objective-c class with ivars. +void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, + std::string &Result) { + assert(CDecl && "Class missing in SynthesizeObjCInternalStruct"); + assert(CDecl->getName() != "" && + "Name missing in SynthesizeObjCInternalStruct"); + // Do not synthesize more than once. + if (ObjCSynthesizedStructs.count(CDecl)) + return; + ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass(); + int NumIvars = CDecl->ivar_size(); + SourceLocation LocStart = CDecl->getLocStart(); + SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc(); + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + + // If no ivars and no root or if its root, directly or indirectly, + // have no ivars (thus not synthesized) then no need to synthesize this class. + if ((!CDecl->isThisDeclarationADefinition() || NumIvars == 0) && + (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) { + endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); + ReplaceText(LocStart, endBuf-startBuf, Result); + return; + } + + // FIXME: This has potential of causing problem. If + // SynthesizeObjCInternalStruct is ever called recursively. + Result += "\nstruct "; + Result += CDecl->getNameAsString(); + if (LangOpts.MicrosoftExt) + Result += "_IMPL"; + + if (NumIvars > 0) { + const char *cursor = strchr(startBuf, '{'); + assert((cursor && endBuf) + && "SynthesizeObjCInternalStruct - malformed @interface"); + // If the buffer contains preprocessor directives, we do more fine-grained + // rewrites. This is intended to fix code that looks like (which occurs in + // NSURL.h, for example): + // + // #ifdef XYZ + // @interface Foo : NSObject + // #else + // @interface FooBar : NSObject + // #endif + // { + // int i; + // } + // @end + // + // This clause is segregated to avoid breaking the common case. + if (BufferContainsPPDirectives(startBuf, cursor)) { + SourceLocation L = RCDecl ? CDecl->getSuperClassLoc() : + CDecl->getAtStartLoc(); + const char *endHeader = SM->getCharacterData(L); + endHeader += Lexer::MeasureTokenLength(L, *SM, LangOpts); + + if (CDecl->protocol_begin() != CDecl->protocol_end()) { + // advance to the end of the referenced protocols. + while (endHeader < cursor && *endHeader != '>') endHeader++; + endHeader++; + } + // rewrite the original header + ReplaceText(LocStart, endHeader-startBuf, Result); + } else { + // rewrite the original header *without* disturbing the '{' + ReplaceText(LocStart, cursor-startBuf, Result); + } + if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) { + Result = "\n struct "; + Result += RCDecl->getNameAsString(); + Result += "_IMPL "; + Result += RCDecl->getNameAsString(); + Result += "_IVARS;\n"; + + // insert the super class structure definition. + SourceLocation OnePastCurly = + LocStart.getLocWithOffset(cursor-startBuf+1); + InsertText(OnePastCurly, Result); + } + cursor++; // past '{' + + // Now comment out any visibility specifiers. + while (cursor < endBuf) { + if (*cursor == '@') { + SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf); + // Skip whitespace. + for (++cursor; cursor[0] == ' ' || cursor[0] == '\t'; ++cursor) + /*scan*/; + + // FIXME: presence of @public, etc. inside comment results in + // this transformation as well, which is still correct c-code. + if (!strncmp(cursor, "public", strlen("public")) || + !strncmp(cursor, "private", strlen("private")) || + !strncmp(cursor, "package", strlen("package")) || + !strncmp(cursor, "protected", strlen("protected"))) + InsertText(atLoc, "// "); + } + // FIXME: If there are cases where '<' is used in ivar declaration part + // of user code, then scan the ivar list and use needToScanForQualifiers + // for type checking. + else if (*cursor == '<') { + SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf); + InsertText(atLoc, "/* "); + cursor = strchr(cursor, '>'); + cursor++; + atLoc = LocStart.getLocWithOffset(cursor-startBuf); + InsertText(atLoc, " */"); + } else if (*cursor == '^') { // rewrite block specifier. + SourceLocation caretLoc = LocStart.getLocWithOffset(cursor-startBuf); + ReplaceText(caretLoc, 1, "*"); + } + cursor++; + } + // Don't forget to add a ';'!! + InsertText(LocEnd.getLocWithOffset(1), ";"); + } else { // we don't have any instance variables - insert super struct. + endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); + Result += " {\n struct "; + Result += RCDecl->getNameAsString(); + Result += "_IMPL "; + Result += RCDecl->getNameAsString(); + Result += "_IVARS;\n};\n"; + ReplaceText(LocStart, endBuf-startBuf, Result); + } + // Mark this struct as having been generated. + if (!ObjCSynthesizedStructs.insert(CDecl)) + llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct"); +} + +//===----------------------------------------------------------------------===// +// Meta Data Emission +//===----------------------------------------------------------------------===// + + +/// RewriteImplementations - This routine rewrites all method implementations +/// and emits meta-data. + +void RewriteObjC::RewriteImplementations() { + int ClsDefCount = ClassImplementation.size(); + int CatDefCount = CategoryImplementation.size(); + + // Rewrite implemented methods + for (int i = 0; i < ClsDefCount; i++) + RewriteImplementationDecl(ClassImplementation[i]); + + for (int i = 0; i < CatDefCount; i++) + RewriteImplementationDecl(CategoryImplementation[i]); +} + +void RewriteObjC::RewriteByRefString(std::string &ResultStr, + const std::string &Name, + ValueDecl *VD, bool def) { + assert(BlockByRefDeclNo.count(VD) && + "RewriteByRefString: ByRef decl missing"); + if (def) + ResultStr += "struct "; + ResultStr += "__Block_byref_" + Name + + "_" + utostr(BlockByRefDeclNo[VD]) ; +} + +static bool HasLocalVariableExternalStorage(ValueDecl *VD) { + if (VarDecl *Var = dyn_cast(VD)) + return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage()); + return false; +} + +std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i, + StringRef funcName, + std::string Tag) { + const FunctionType *AFT = CE->getFunctionType(); + QualType RT = AFT->getResultType(); + std::string StructRef = "struct " + Tag; + std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" + + funcName.str() + "_" + "block_func_" + utostr(i); + + BlockDecl *BD = CE->getBlockDecl(); + + if (isa(AFT)) { + // No user-supplied arguments. Still need to pass in a pointer to the + // block (to reference imported block decl refs). + S += "(" + StructRef + " *__cself)"; + } else if (BD->param_empty()) { + S += "(" + StructRef + " *__cself)"; + } else { + const FunctionProtoType *FT = cast(AFT); + assert(FT && "SynthesizeBlockFunc: No function proto"); + S += '('; + // first add the implicit argument. + S += StructRef + " *__cself, "; + std::string ParamStr; + for (BlockDecl::param_iterator AI = BD->param_begin(), + E = BD->param_end(); AI != E; ++AI) { + if (AI != BD->param_begin()) S += ", "; + ParamStr = (*AI)->getNameAsString(); + QualType QT = (*AI)->getType(); + (void)convertBlockPointerToFunctionPointer(QT); + QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy()); + S += ParamStr; + } + if (FT->isVariadic()) { + if (!BD->param_empty()) S += ", "; + S += "..."; + } + S += ')'; + } + S += " {\n"; + + // Create local declarations to avoid rewriting all closure decl ref exprs. + // First, emit a declaration for all "by ref" decls. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + S += " "; + std::string Name = (*I)->getNameAsString(); + std::string TypeString; + RewriteByRefString(TypeString, Name, (*I)); + TypeString += " *"; + Name = TypeString + Name; + S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n"; + } + // Next, emit a declaration for all "by copy" declarations. + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + S += " "; + // Handle nested closure invocation. For example: + // + // void (^myImportedClosure)(void); + // myImportedClosure = ^(void) { setGlobalInt(x + y); }; + // + // void (^anotherClosure)(void); + // anotherClosure = ^(void) { + // myImportedClosure(); // import and invoke the closure + // }; + // + if (isTopLevelBlockPointerType((*I)->getType())) { + RewriteBlockPointerTypeVariable(S, (*I)); + S += " = ("; + RewriteBlockPointerType(S, (*I)->getType()); + S += ")"; + S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n"; + } + else { + std::string Name = (*I)->getNameAsString(); + QualType QT = (*I)->getType(); + if (HasLocalVariableExternalStorage(*I)) + QT = Context->getPointerType(QT); + QT.getAsStringInternal(Name, Context->getPrintingPolicy()); + S += Name + " = __cself->" + + (*I)->getNameAsString() + "; // bound by copy\n"; + } + } + std::string RewrittenStr = RewrittenBlockExprs[CE]; + const char *cstr = RewrittenStr.c_str(); + while (*cstr++ != '{') ; + S += cstr; + S += "\n"; + return S; +} + +std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, + StringRef funcName, + std::string Tag) { + std::string StructRef = "struct " + Tag; + std::string S = "static void __"; + + S += funcName; + S += "_block_copy_" + utostr(i); + S += "(" + StructRef; + S += "*dst, " + StructRef; + S += "*src) {"; + for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), + E = ImportedBlockDecls.end(); I != E; ++I) { + ValueDecl *VD = (*I); + S += "_Block_object_assign((void*)&dst->"; + S += (*I)->getNameAsString(); + S += ", (void*)src->"; + S += (*I)->getNameAsString(); + if (BlockByRefDeclsPtrSet.count((*I))) + S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; + else if (VD->getType()->isBlockPointerType()) + S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; + else + S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; + } + S += "}\n"; + + S += "\nstatic void __"; + S += funcName; + S += "_block_dispose_" + utostr(i); + S += "(" + StructRef; + S += "*src) {"; + for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), + E = ImportedBlockDecls.end(); I != E; ++I) { + ValueDecl *VD = (*I); + S += "_Block_object_dispose((void*)src->"; + S += (*I)->getNameAsString(); + if (BlockByRefDeclsPtrSet.count((*I))) + S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; + else if (VD->getType()->isBlockPointerType()) + S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; + else + S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; + } + S += "}\n"; + return S; +} + +std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag, + std::string Desc) { + std::string S = "\nstruct " + Tag; + std::string Constructor = " " + Tag; + + S += " {\n struct __block_impl impl;\n"; + S += " struct " + Desc; + S += "* Desc;\n"; + + Constructor += "(void *fp, "; // Invoke function pointer. + Constructor += "struct " + Desc; // Descriptor pointer. + Constructor += " *desc"; + + if (BlockDeclRefs.size()) { + // Output all "by copy" declarations. + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + S += " "; + std::string FieldName = (*I)->getNameAsString(); + std::string ArgName = "_" + FieldName; + // Handle nested closure invocation. For example: + // + // void (^myImportedBlock)(void); + // myImportedBlock = ^(void) { setGlobalInt(x + y); }; + // + // void (^anotherBlock)(void); + // anotherBlock = ^(void) { + // myImportedBlock(); // import and invoke the closure + // }; + // + if (isTopLevelBlockPointerType((*I)->getType())) { + S += "struct __block_impl *"; + Constructor += ", void *" + ArgName; + } else { + QualType QT = (*I)->getType(); + if (HasLocalVariableExternalStorage(*I)) + QT = Context->getPointerType(QT); + QT.getAsStringInternal(FieldName, Context->getPrintingPolicy()); + QT.getAsStringInternal(ArgName, Context->getPrintingPolicy()); + Constructor += ", " + ArgName; + } + S += FieldName + ";\n"; + } + // Output all "by ref" declarations. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + S += " "; + std::string FieldName = (*I)->getNameAsString(); + std::string ArgName = "_" + FieldName; + { + std::string TypeString; + RewriteByRefString(TypeString, FieldName, (*I)); + TypeString += " *"; + FieldName = TypeString + FieldName; + ArgName = TypeString + ArgName; + Constructor += ", " + ArgName; + } + S += FieldName + "; // by ref\n"; + } + // Finish writing the constructor. + Constructor += ", int flags=0)"; + // Initialize all "by copy" arguments. + bool firsTime = true; + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + std::string Name = (*I)->getNameAsString(); + if (firsTime) { + Constructor += " : "; + firsTime = false; + } + else + Constructor += ", "; + if (isTopLevelBlockPointerType((*I)->getType())) + Constructor += Name + "((struct __block_impl *)_" + Name + ")"; + else + Constructor += Name + "(_" + Name + ")"; + } + // Initialize all "by ref" arguments. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + std::string Name = (*I)->getNameAsString(); + if (firsTime) { + Constructor += " : "; + firsTime = false; + } + else + Constructor += ", "; + Constructor += Name + "(_" + Name + "->__forwarding)"; + } + + Constructor += " {\n"; + if (GlobalVarDecl) + Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; + else + Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; + Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; + + Constructor += " Desc = desc;\n"; + } else { + // Finish writing the constructor. + Constructor += ", int flags=0) {\n"; + if (GlobalVarDecl) + Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; + else + Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; + Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; + Constructor += " Desc = desc;\n"; + } + Constructor += " "; + Constructor += "}\n"; + S += Constructor; + S += "};\n"; + return S; +} + +std::string RewriteObjC::SynthesizeBlockDescriptor(std::string DescTag, + std::string ImplTag, int i, + StringRef FunName, + unsigned hasCopy) { + std::string S = "\nstatic struct " + DescTag; + + S += " {\n unsigned long reserved;\n"; + S += " unsigned long Block_size;\n"; + if (hasCopy) { + S += " void (*copy)(struct "; + S += ImplTag; S += "*, struct "; + S += ImplTag; S += "*);\n"; + + S += " void (*dispose)(struct "; + S += ImplTag; S += "*);\n"; + } + S += "} "; + + S += DescTag + "_DATA = { 0, sizeof(struct "; + S += ImplTag + ")"; + if (hasCopy) { + S += ", __" + FunName.str() + "_block_copy_" + utostr(i); + S += ", __" + FunName.str() + "_block_dispose_" + utostr(i); + } + S += "};\n"; + return S; +} + +void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart, + StringRef FunName) { + // Insert declaration for the function in which block literal is used. + if (CurFunctionDeclToDeclareForBlock && !Blocks.empty()) + RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock); + bool RewriteSC = (GlobalVarDecl && + !Blocks.empty() && + GlobalVarDecl->getStorageClass() == SC_Static && + GlobalVarDecl->getType().getCVRQualifiers()); + if (RewriteSC) { + std::string SC(" void __"); + SC += GlobalVarDecl->getNameAsString(); + SC += "() {}"; + InsertText(FunLocStart, SC); + } + + // Insert closures that were part of the function. + for (unsigned i = 0, count=0; i < Blocks.size(); i++) { + CollectBlockDeclRefInfo(Blocks[i]); + // Need to copy-in the inner copied-in variables not actually used in this + // block. + for (int j = 0; j < InnerDeclRefsCount[i]; j++) { + DeclRefExpr *Exp = InnerDeclRefs[count++]; + ValueDecl *VD = Exp->getDecl(); + BlockDeclRefs.push_back(Exp); + if (!VD->hasAttr() && !BlockByCopyDeclsPtrSet.count(VD)) { + BlockByCopyDeclsPtrSet.insert(VD); + BlockByCopyDecls.push_back(VD); + } + if (VD->hasAttr() && !BlockByRefDeclsPtrSet.count(VD)) { + BlockByRefDeclsPtrSet.insert(VD); + BlockByRefDecls.push_back(VD); + } + // imported objects in the inner blocks not used in the outer + // blocks must be copied/disposed in the outer block as well. + if (VD->hasAttr() || + VD->getType()->isObjCObjectPointerType() || + VD->getType()->isBlockPointerType()) + ImportedBlockDecls.insert(VD); + } + + std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i); + std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i); + + std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag); + + InsertText(FunLocStart, CI); + + std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag); + + InsertText(FunLocStart, CF); + + if (ImportedBlockDecls.size()) { + std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag); + InsertText(FunLocStart, HF); + } + std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName, + ImportedBlockDecls.size() > 0); + InsertText(FunLocStart, BD); + + BlockDeclRefs.clear(); + BlockByRefDecls.clear(); + BlockByRefDeclsPtrSet.clear(); + BlockByCopyDecls.clear(); + BlockByCopyDeclsPtrSet.clear(); + ImportedBlockDecls.clear(); + } + if (RewriteSC) { + // Must insert any 'const/volatile/static here. Since it has been + // removed as result of rewriting of block literals. + std::string SC; + if (GlobalVarDecl->getStorageClass() == SC_Static) + SC = "static "; + if (GlobalVarDecl->getType().isConstQualified()) + SC += "const "; + if (GlobalVarDecl->getType().isVolatileQualified()) + SC += "volatile "; + if (GlobalVarDecl->getType().isRestrictQualified()) + SC += "restrict "; + InsertText(FunLocStart, SC); + } + + Blocks.clear(); + InnerDeclRefsCount.clear(); + InnerDeclRefs.clear(); + RewrittenBlockExprs.clear(); +} + +void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) { + SourceLocation FunLocStart = FD->getTypeSpecStartLoc(); + StringRef FuncName = FD->getName(); + + SynthesizeBlockLiterals(FunLocStart, FuncName); +} + +static void BuildUniqueMethodName(std::string &Name, + ObjCMethodDecl *MD) { + ObjCInterfaceDecl *IFace = MD->getClassInterface(); + Name = IFace->getName(); + Name += "__" + MD->getSelector().getAsString(); + // Convert colons to underscores. + std::string::size_type loc = 0; + while ((loc = Name.find(":", loc)) != std::string::npos) + Name.replace(loc, 1, "_"); +} + +void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) { + //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n"); + //SourceLocation FunLocStart = MD->getLocStart(); + SourceLocation FunLocStart = MD->getLocStart(); + std::string FuncName; + BuildUniqueMethodName(FuncName, MD); + SynthesizeBlockLiterals(FunLocStart, FuncName); +} + +void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) { + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + if (BlockExpr *CBE = dyn_cast(*CI)) + GetBlockDeclRefExprs(CBE->getBody()); + else + GetBlockDeclRefExprs(*CI); + } + // Handle specific things. + if (DeclRefExpr *DRE = dyn_cast(S)) { + if (DRE->refersToEnclosingLocal()) { + // FIXME: Handle enums. + if (!isa(DRE->getDecl())) + BlockDeclRefs.push_back(DRE); + if (HasLocalVariableExternalStorage(DRE->getDecl())) + BlockDeclRefs.push_back(DRE); + } + } + + return; +} + +void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S, + SmallVector &InnerBlockDeclRefs, + llvm::SmallPtrSet &InnerContexts) { + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + if (BlockExpr *CBE = dyn_cast(*CI)) { + InnerContexts.insert(cast(CBE->getBlockDecl())); + GetInnerBlockDeclRefExprs(CBE->getBody(), + InnerBlockDeclRefs, + InnerContexts); + } + else + GetInnerBlockDeclRefExprs(*CI, + InnerBlockDeclRefs, + InnerContexts); + + } + // Handle specific things. + if (DeclRefExpr *DRE = dyn_cast(S)) { + if (DRE->refersToEnclosingLocal()) { + if (!isa(DRE->getDecl()) && + !InnerContexts.count(DRE->getDecl()->getDeclContext())) + InnerBlockDeclRefs.push_back(DRE); + if (VarDecl *Var = dyn_cast(DRE->getDecl())) + if (Var->isFunctionOrMethodVarDecl()) + ImportedLocalExternalDecls.insert(Var); + } + } + + return; +} + +/// convertFunctionTypeOfBlocks - This routine converts a function type +/// whose result type may be a block pointer or whose argument type(s) +/// might be block pointers to an equivalent function type replacing +/// all block pointers to function pointers. +QualType RewriteObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) { + const FunctionProtoType *FTP = dyn_cast(FT); + // FTP will be null for closures that don't take arguments. + // Generate a funky cast. + SmallVector ArgTypes; + QualType Res = FT->getResultType(); + bool HasBlockType = convertBlockPointerToFunctionPointer(Res); + + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I && (I != E); ++I) { + QualType t = *I; + // Make sure we convert "t (^)(...)" to "t (*)(...)". + if (convertBlockPointerToFunctionPointer(t)) + HasBlockType = true; + ArgTypes.push_back(t); + } + } + QualType FuncType; + // FIXME. Does this work if block takes no argument but has a return type + // which is of block type? + if (HasBlockType) + FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size()); + else FuncType = QualType(FT, 0); + return FuncType; +} + +Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) { + // Navigate to relevant type information. + const BlockPointerType *CPT = 0; + + if (const DeclRefExpr *DRE = dyn_cast(BlockExp)) { + CPT = DRE->getType()->getAs(); + } else if (const MemberExpr *MExpr = dyn_cast(BlockExp)) { + CPT = MExpr->getType()->getAs(); + } + else if (const ParenExpr *PRE = dyn_cast(BlockExp)) { + return SynthesizeBlockCall(Exp, PRE->getSubExpr()); + } + else if (const ImplicitCastExpr *IEXPR = dyn_cast(BlockExp)) + CPT = IEXPR->getType()->getAs(); + else if (const ConditionalOperator *CEXPR = + dyn_cast(BlockExp)) { + Expr *LHSExp = CEXPR->getLHS(); + Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp); + Expr *RHSExp = CEXPR->getRHS(); + Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp); + Expr *CONDExp = CEXPR->getCond(); + ConditionalOperator *CondExpr = + new (Context) ConditionalOperator(CONDExp, + SourceLocation(), cast(LHSStmt), + SourceLocation(), cast(RHSStmt), + Exp->getType(), VK_RValue, OK_Ordinary); + return CondExpr; + } else if (const ObjCIvarRefExpr *IRE = dyn_cast(BlockExp)) { + CPT = IRE->getType()->getAs(); + } else if (const PseudoObjectExpr *POE + = dyn_cast(BlockExp)) { + CPT = POE->getType()->castAs(); + } else { + assert(1 && "RewriteBlockClass: Bad type"); + } + assert(CPT && "RewriteBlockClass: Bad type"); + const FunctionType *FT = CPT->getPointeeType()->getAs(); + assert(FT && "RewriteBlockClass: Bad type"); + const FunctionProtoType *FTP = dyn_cast(FT); + // FTP will be null for closures that don't take arguments. + + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get("__block_impl")); + QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD)); + + // Generate a funky cast. + SmallVector ArgTypes; + + // Push the block argument type. + ArgTypes.push_back(PtrBlock); + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I && (I != E); ++I) { + QualType t = *I; + // Make sure we convert "t (^)(...)" to "t (*)(...)". + if (!convertBlockPointerToFunctionPointer(t)) + convertToUnqualifiedObjCType(t); + ArgTypes.push_back(t); + } + } + // Now do the pointer to function cast. + QualType PtrToFuncCastType + = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size()); + + PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType); + + CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock, + CK_BitCast, + const_cast(BlockExp)); + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + BlkCast); + //PE->dump(); + + FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("FuncPtr"), + Context->VoidPtrTy, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), + FD->getType(), VK_LValue, + OK_Ordinary); + + + CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType, + CK_BitCast, ME); + PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast); + + SmallVector BlkExprs; + // Add the implicit argument. + BlkExprs.push_back(BlkCast); + // Add the user arguments. + for (CallExpr::arg_iterator I = Exp->arg_begin(), + E = Exp->arg_end(); I != E; ++I) { + BlkExprs.push_back(*I); + } + CallExpr *CE = new (Context) CallExpr(*Context, PE, BlkExprs, + Exp->getType(), VK_RValue, + SourceLocation()); + return CE; +} + +// We need to return the rewritten expression to handle cases where the +// BlockDeclRefExpr is embedded in another expression being rewritten. +// For example: +// +// int main() { +// __block Foo *f; +// __block int i; +// +// void (^myblock)() = ^() { +// [f test]; // f is a BlockDeclRefExpr embedded in a message (which is being rewritten). +// i = 77; +// }; +//} +Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { + // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR + // for each DeclRefExp where BYREFVAR is name of the variable. + ValueDecl *VD = DeclRefExp->getDecl(); + bool isArrow = DeclRefExp->refersToEnclosingLocal(); + + FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), + SourceLocation(), + &Context->Idents.get("__forwarding"), + Context->VoidPtrTy, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow, + FD, SourceLocation(), + FD->getType(), VK_LValue, + OK_Ordinary); + + StringRef Name = VD->getName(); + FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(), + &Context->Idents.get(Name), + Context->VoidPtrTy, 0, + /*BitWidth=*/0, /*Mutable=*/true, + ICIS_NoInit); + ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(), + DeclRefExp->getType(), VK_LValue, OK_Ordinary); + + + + // Need parens to enforce precedence. + ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(), + DeclRefExp->getExprLoc(), + ME); + ReplaceStmt(DeclRefExp, PE); + return PE; +} + +// Rewrites the imported local variable V with external storage +// (static, extern, etc.) as *V +// +Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) { + ValueDecl *VD = DRE->getDecl(); + if (VarDecl *Var = dyn_cast(VD)) + if (!ImportedLocalExternalDecls.count(Var)) + return DRE; + Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(), + VK_LValue, OK_Ordinary, + DRE->getLocation()); + // Need parens to enforce precedence. + ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), + Exp); + ReplaceStmt(DRE, PE); + return PE; +} + +void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) { + SourceLocation LocStart = CE->getLParenLoc(); + SourceLocation LocEnd = CE->getRParenLoc(); + + // Need to avoid trying to rewrite synthesized casts. + if (LocStart.isInvalid()) + return; + // Need to avoid trying to rewrite casts contained in macros. + if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd)) + return; + + const char *startBuf = SM->getCharacterData(LocStart); + const char *endBuf = SM->getCharacterData(LocEnd); + QualType QT = CE->getType(); + const Type* TypePtr = QT->getAs(); + if (isa(TypePtr)) { + const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); + QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); + std::string TypeAsString = "("; + RewriteBlockPointerType(TypeAsString, QT); + TypeAsString += ")"; + ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString); + return; + } + // advance the location to startArgList. + const char *argPtr = startBuf; + + while (*argPtr++ && (argPtr < endBuf)) { + switch (*argPtr) { + case '^': + // Replace the '^' with '*'. + LocStart = LocStart.getLocWithOffset(argPtr-startBuf); + ReplaceText(LocStart, 1, "*"); + break; + } + } + return; +} + +void RewriteObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) { + SourceLocation DeclLoc = FD->getLocation(); + unsigned parenCount = 0; + + // We have 1 or more arguments that have closure pointers. + const char *startBuf = SM->getCharacterData(DeclLoc); + const char *startArgList = strchr(startBuf, '('); + + assert((*startArgList == '(') && "Rewriter fuzzy parser confused"); + + parenCount++; + // advance the location to startArgList. + DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf); + assert((DeclLoc.isValid()) && "Invalid DeclLoc"); + + const char *argPtr = startArgList; + + while (*argPtr++ && parenCount) { + switch (*argPtr) { + case '^': + // Replace the '^' with '*'. + DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList); + ReplaceText(DeclLoc, 1, "*"); + break; + case '(': + parenCount++; + break; + case ')': + parenCount--; + break; + } + } + return; +} + +bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) { + const FunctionProtoType *FTP; + const PointerType *PT = QT->getAs(); + if (PT) { + FTP = PT->getPointeeType()->getAs(); + } else { + const BlockPointerType *BPT = QT->getAs(); + assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); + FTP = BPT->getPointeeType()->getAs(); + } + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I != E; ++I) + if (isTopLevelBlockPointerType(*I)) + return true; + } + return false; +} + +bool RewriteObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) { + const FunctionProtoType *FTP; + const PointerType *PT = QT->getAs(); + if (PT) { + FTP = PT->getPointeeType()->getAs(); + } else { + const BlockPointerType *BPT = QT->getAs(); + assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); + FTP = BPT->getPointeeType()->getAs(); + } + if (FTP) { + for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), + E = FTP->arg_type_end(); I != E; ++I) { + if ((*I)->isObjCQualifiedIdType()) + return true; + if ((*I)->isObjCObjectPointerType() && + (*I)->getPointeeType()->isObjCQualifiedInterfaceType()) + return true; + } + + } + return false; +} + +void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen, + const char *&RParen) { + const char *argPtr = strchr(Name, '('); + assert((*argPtr == '(') && "Rewriter fuzzy parser confused"); + + LParen = argPtr; // output the start. + argPtr++; // skip past the left paren. + unsigned parenCount = 1; + + while (*argPtr && parenCount) { + switch (*argPtr) { + case '(': parenCount++; break; + case ')': parenCount--; break; + default: break; + } + if (parenCount) argPtr++; + } + assert((*argPtr == ')') && "Rewriter fuzzy parser confused"); + RParen = argPtr; // output the end +} + +void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) { + if (FunctionDecl *FD = dyn_cast(ND)) { + RewriteBlockPointerFunctionArgs(FD); + return; + } + // Handle Variables and Typedefs. + SourceLocation DeclLoc = ND->getLocation(); + QualType DeclT; + if (VarDecl *VD = dyn_cast(ND)) + DeclT = VD->getType(); + else if (TypedefNameDecl *TDD = dyn_cast(ND)) + DeclT = TDD->getUnderlyingType(); + else if (FieldDecl *FD = dyn_cast(ND)) + DeclT = FD->getType(); + else + llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled"); + + const char *startBuf = SM->getCharacterData(DeclLoc); + const char *endBuf = startBuf; + // scan backward (from the decl location) for the end of the previous decl. + while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart) + startBuf--; + SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf); + std::string buf; + unsigned OrigLength=0; + // *startBuf != '^' if we are dealing with a pointer to function that + // may take block argument types (which will be handled below). + if (*startBuf == '^') { + // Replace the '^' with '*', computing a negative offset. + buf = '*'; + startBuf++; + OrigLength++; + } + while (*startBuf != ')') { + buf += *startBuf; + startBuf++; + OrigLength++; + } + buf += ')'; + OrigLength++; + + if (PointerTypeTakesAnyBlockArguments(DeclT) || + PointerTypeTakesAnyObjCQualifiedType(DeclT)) { + // Replace the '^' with '*' for arguments. + // Replace id

    with id/*<>*/ + DeclLoc = ND->getLocation(); + startBuf = SM->getCharacterData(DeclLoc); + const char *argListBegin, *argListEnd; + GetExtentOfArgList(startBuf, argListBegin, argListEnd); + while (argListBegin < argListEnd) { + if (*argListBegin == '^') + buf += '*'; + else if (*argListBegin == '<') { + buf += "/*"; + buf += *argListBegin++; + OrigLength++; + while (*argListBegin != '>') { + buf += *argListBegin++; + OrigLength++; + } + buf += *argListBegin; + buf += "*/"; + } + else + buf += *argListBegin; + argListBegin++; + OrigLength++; + } + buf += ')'; + OrigLength++; + } + ReplaceText(Start, OrigLength, buf); + + return; +} + + +/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes: +/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst, +/// struct Block_byref_id_object *src) { +/// _Block_object_assign (&_dest->object, _src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT +/// [|BLOCK_FIELD_IS_WEAK]) // object +/// _Block_object_assign(&_dest->object, _src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK +/// [|BLOCK_FIELD_IS_WEAK]) // block +/// } +/// And: +/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) { +/// _Block_object_dispose(_src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT +/// [|BLOCK_FIELD_IS_WEAK]) // object +/// _Block_object_dispose(_src->object, +/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK +/// [|BLOCK_FIELD_IS_WEAK]) // block +/// } + +std::string RewriteObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD, + int flag) { + std::string S; + if (CopyDestroyCache.count(flag)) + return S; + CopyDestroyCache.insert(flag); + S = "static void __Block_byref_id_object_copy_"; + S += utostr(flag); + S += "(void *dst, void *src) {\n"; + + // offset into the object pointer is computed as: + // void * + void* + int + int + void* + void * + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + unsigned VoidPtrSize = + static_cast(Context->getTypeSize(Context->VoidPtrTy)); + + unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth(); + S += " _Block_object_assign((char*)dst + "; + S += utostr(offset); + S += ", *(void * *) ((char*)src + "; + S += utostr(offset); + S += "), "; + S += utostr(flag); + S += ");\n}\n"; + + S += "static void __Block_byref_id_object_dispose_"; + S += utostr(flag); + S += "(void *src) {\n"; + S += " _Block_object_dispose(*(void * *) ((char*)src + "; + S += utostr(offset); + S += "), "; + S += utostr(flag); + S += ");\n}\n"; + return S; +} + +/// RewriteByRefVar - For each __block typex ND variable this routine transforms +/// the declaration into: +/// struct __Block_byref_ND { +/// void *__isa; // NULL for everything except __weak pointers +/// struct __Block_byref_ND *__forwarding; +/// int32_t __flags; +/// int32_t __size; +/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object +/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object +/// typex ND; +/// }; +/// +/// It then replaces declaration of ND variable with: +/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag, +/// __size=sizeof(struct __Block_byref_ND), +/// ND=initializer-if-any}; +/// +/// +void RewriteObjC::RewriteByRefVar(VarDecl *ND) { + // Insert declaration for the function in which block literal is + // used. + if (CurFunctionDeclToDeclareForBlock) + RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock); + int flag = 0; + int isa = 0; + SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); + if (DeclLoc.isInvalid()) + // If type location is missing, it is because of missing type (a warning). + // Use variable's location which is good for this case. + DeclLoc = ND->getLocation(); + const char *startBuf = SM->getCharacterData(DeclLoc); + SourceLocation X = ND->getLocEnd(); + X = SM->getExpansionLoc(X); + const char *endBuf = SM->getCharacterData(X); + std::string Name(ND->getNameAsString()); + std::string ByrefType; + RewriteByRefString(ByrefType, Name, ND, true); + ByrefType += " {\n"; + ByrefType += " void *__isa;\n"; + RewriteByRefString(ByrefType, Name, ND); + ByrefType += " *__forwarding;\n"; + ByrefType += " int __flags;\n"; + ByrefType += " int __size;\n"; + // Add void *__Block_byref_id_object_copy; + // void *__Block_byref_id_object_dispose; if needed. + QualType Ty = ND->getType(); + bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty); + if (HasCopyAndDispose) { + ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n"; + ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n"; + } + + QualType T = Ty; + (void)convertBlockPointerToFunctionPointer(T); + T.getAsStringInternal(Name, Context->getPrintingPolicy()); + + ByrefType += " " + Name + ";\n"; + ByrefType += "};\n"; + // Insert this type in global scope. It is needed by helper function. + SourceLocation FunLocStart; + if (CurFunctionDef) + FunLocStart = CurFunctionDef->getTypeSpecStartLoc(); + else { + assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null"); + FunLocStart = CurMethodDef->getLocStart(); + } + InsertText(FunLocStart, ByrefType); + if (Ty.isObjCGCWeak()) { + flag |= BLOCK_FIELD_IS_WEAK; + isa = 1; + } + + if (HasCopyAndDispose) { + flag = BLOCK_BYREF_CALLER; + QualType Ty = ND->getType(); + // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well. + if (Ty->isBlockPointerType()) + flag |= BLOCK_FIELD_IS_BLOCK; + else + flag |= BLOCK_FIELD_IS_OBJECT; + std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag); + if (!HF.empty()) + InsertText(FunLocStart, HF); + } + + // struct __Block_byref_ND ND = + // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND), + // initializer-if-any}; + bool hasInit = (ND->getInit() != 0); + unsigned flags = 0; + if (HasCopyAndDispose) + flags |= BLOCK_HAS_COPY_DISPOSE; + Name = ND->getNameAsString(); + ByrefType.clear(); + RewriteByRefString(ByrefType, Name, ND); + std::string ForwardingCastType("("); + ForwardingCastType += ByrefType + " *)"; + if (!hasInit) { + ByrefType += " " + Name + " = {(void*)"; + ByrefType += utostr(isa); + ByrefType += "," + ForwardingCastType + "&" + Name + ", "; + ByrefType += utostr(flags); + ByrefType += ", "; + ByrefType += "sizeof("; + RewriteByRefString(ByrefType, Name, ND); + ByrefType += ")"; + if (HasCopyAndDispose) { + ByrefType += ", __Block_byref_id_object_copy_"; + ByrefType += utostr(flag); + ByrefType += ", __Block_byref_id_object_dispose_"; + ByrefType += utostr(flag); + } + ByrefType += "};\n"; + unsigned nameSize = Name.size(); + // for block or function pointer declaration. Name is aleady + // part of the declaration. + if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) + nameSize = 1; + ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType); + } + else { + SourceLocation startLoc; + Expr *E = ND->getInit(); + if (const CStyleCastExpr *ECE = dyn_cast(E)) + startLoc = ECE->getLParenLoc(); + else + startLoc = E->getLocStart(); + startLoc = SM->getExpansionLoc(startLoc); + endBuf = SM->getCharacterData(startLoc); + ByrefType += " " + Name; + ByrefType += " = {(void*)"; + ByrefType += utostr(isa); + ByrefType += "," + ForwardingCastType + "&" + Name + ", "; + ByrefType += utostr(flags); + ByrefType += ", "; + ByrefType += "sizeof("; + RewriteByRefString(ByrefType, Name, ND); + ByrefType += "), "; + if (HasCopyAndDispose) { + ByrefType += "__Block_byref_id_object_copy_"; + ByrefType += utostr(flag); + ByrefType += ", __Block_byref_id_object_dispose_"; + ByrefType += utostr(flag); + ByrefType += ", "; + } + ReplaceText(DeclLoc, endBuf-startBuf, ByrefType); + + // Complete the newly synthesized compound expression by inserting a right + // curly brace before the end of the declaration. + // FIXME: This approach avoids rewriting the initializer expression. It + // also assumes there is only one declarator. For example, the following + // isn't currently supported by this routine (in general): + // + // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37; + // + const char *startInitializerBuf = SM->getCharacterData(startLoc); + const char *semiBuf = strchr(startInitializerBuf, ';'); + assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'"); + SourceLocation semiLoc = + startLoc.getLocWithOffset(semiBuf-startInitializerBuf); + + InsertText(semiLoc, "}"); + } + return; +} + +void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) { + // Add initializers for any closure decl refs. + GetBlockDeclRefExprs(Exp->getBody()); + if (BlockDeclRefs.size()) { + // Unique all "by copy" declarations. + for (unsigned i = 0; i < BlockDeclRefs.size(); i++) + if (!BlockDeclRefs[i]->getDecl()->hasAttr()) { + if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { + BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); + BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl()); + } + } + // Unique all "by ref" declarations. + for (unsigned i = 0; i < BlockDeclRefs.size(); i++) + if (BlockDeclRefs[i]->getDecl()->hasAttr()) { + if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { + BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); + BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl()); + } + } + // Find any imported blocks...they will need special attention. + for (unsigned i = 0; i < BlockDeclRefs.size(); i++) + if (BlockDeclRefs[i]->getDecl()->hasAttr() || + BlockDeclRefs[i]->getType()->isObjCObjectPointerType() || + BlockDeclRefs[i]->getType()->isBlockPointerType()) + ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl()); + } +} + +FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(StringRef name) { + IdentifierInfo *ID = &Context->Idents.get(name); + QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy); + return FunctionDecl::Create(*Context, TUDecl, SourceLocation(), + SourceLocation(), ID, FType, 0, SC_Extern, + SC_None, false, false); +} + +Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp, + const SmallVector &InnerBlockDeclRefs) { + const BlockDecl *block = Exp->getBlockDecl(); + Blocks.push_back(Exp); + + CollectBlockDeclRefInfo(Exp); + + // Add inner imported variables now used in current block. + int countOfInnerDecls = 0; + if (!InnerBlockDeclRefs.empty()) { + for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) { + DeclRefExpr *Exp = InnerBlockDeclRefs[i]; + ValueDecl *VD = Exp->getDecl(); + if (!VD->hasAttr() && !BlockByCopyDeclsPtrSet.count(VD)) { + // We need to save the copied-in variables in nested + // blocks because it is needed at the end for some of the API generations. + // See SynthesizeBlockLiterals routine. + InnerDeclRefs.push_back(Exp); countOfInnerDecls++; + BlockDeclRefs.push_back(Exp); + BlockByCopyDeclsPtrSet.insert(VD); + BlockByCopyDecls.push_back(VD); + } + if (VD->hasAttr() && !BlockByRefDeclsPtrSet.count(VD)) { + InnerDeclRefs.push_back(Exp); countOfInnerDecls++; + BlockDeclRefs.push_back(Exp); + BlockByRefDeclsPtrSet.insert(VD); + BlockByRefDecls.push_back(VD); + } + } + // Find any imported blocks...they will need special attention. + for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) + if (InnerBlockDeclRefs[i]->getDecl()->hasAttr() || + InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() || + InnerBlockDeclRefs[i]->getType()->isBlockPointerType()) + ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl()); + } + InnerDeclRefsCount.push_back(countOfInnerDecls); + + std::string FuncName; + + if (CurFunctionDef) + FuncName = CurFunctionDef->getNameAsString(); + else if (CurMethodDef) + BuildUniqueMethodName(FuncName, CurMethodDef); + else if (GlobalVarDecl) + FuncName = std::string(GlobalVarDecl->getNameAsString()); + + std::string BlockNumber = utostr(Blocks.size()-1); + + std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber; + std::string Func = "__" + FuncName + "_block_func_" + BlockNumber; + + // Get a pointer to the function type so we can cast appropriately. + QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType()); + QualType FType = Context->getPointerType(BFT); + + FunctionDecl *FD; + Expr *NewRep; + + // Simulate a contructor call... + FD = SynthBlockInitFunctionDecl(Tag); + DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue, + SourceLocation()); + + SmallVector InitExprs; + + // Initialize the block function. + FD = SynthBlockInitFunctionDecl(Func); + DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), + VK_LValue, SourceLocation()); + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, + CK_BitCast, Arg); + InitExprs.push_back(castExpr); + + // Initialize the block descriptor. + std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA"; + + VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, + SourceLocation(), SourceLocation(), + &Context->Idents.get(DescData.c_str()), + Context->VoidPtrTy, 0, + SC_Static, SC_None); + UnaryOperator *DescRefExpr = + new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false, + Context->VoidPtrTy, + VK_LValue, + SourceLocation()), + UO_AddrOf, + Context->getPointerType(Context->VoidPtrTy), + VK_RValue, OK_Ordinary, + SourceLocation()); + InitExprs.push_back(DescRefExpr); + + // Add initializers for any closure decl refs. + if (BlockDeclRefs.size()) { + Expr *Exp; + // Output all "by copy" declarations. + for (SmallVector::iterator I = BlockByCopyDecls.begin(), + E = BlockByCopyDecls.end(); I != E; ++I) { + if (isObjCType((*I)->getType())) { + // FIXME: Conform to ABI ([[obj retain] autorelease]). + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, + SourceLocation()); + if (HasLocalVariableExternalStorage(*I)) { + QualType QT = (*I)->getType(); + QT = Context->getPointerType(QT); + Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, + OK_Ordinary, SourceLocation()); + } + } else if (isTopLevelBlockPointerType((*I)->getType())) { + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, + SourceLocation()); + Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, + CK_BitCast, Arg); + } else { + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, + SourceLocation()); + if (HasLocalVariableExternalStorage(*I)) { + QualType QT = (*I)->getType(); + QT = Context->getPointerType(QT); + Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, + OK_Ordinary, SourceLocation()); + } + + } + InitExprs.push_back(Exp); + } + // Output all "by ref" declarations. + for (SmallVector::iterator I = BlockByRefDecls.begin(), + E = BlockByRefDecls.end(); I != E; ++I) { + ValueDecl *ND = (*I); + std::string Name(ND->getNameAsString()); + std::string RecName; + RewriteByRefString(RecName, Name, ND, true); + IdentifierInfo *II = &Context->Idents.get(RecName.c_str() + + sizeof("struct")); + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + II); + assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl"); + QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); + + FD = SynthBlockInitFunctionDecl((*I)->getName()); + Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, + SourceLocation()); + bool isNestedCapturedVar = false; + if (block) + for (BlockDecl::capture_const_iterator ci = block->capture_begin(), + ce = block->capture_end(); ci != ce; ++ci) { + const VarDecl *variable = ci->getVariable(); + if (variable == ND && ci->isNested()) { + assert (ci->isByRef() && + "SynthBlockInitExpr - captured block variable is not byref"); + isNestedCapturedVar = true; + break; + } + } + // captured nested byref variable has its address passed. Do not take + // its address again. + if (!isNestedCapturedVar) + Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, + Context->getPointerType(Exp->getType()), + VK_RValue, OK_Ordinary, SourceLocation()); + Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp); + InitExprs.push_back(Exp); + } + } + if (ImportedBlockDecls.size()) { + // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR + int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR); + unsigned IntSize = + static_cast(Context->getTypeSize(Context->IntTy)); + Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag), + Context->IntTy, SourceLocation()); + InitExprs.push_back(FlagExp); + } + NewRep = new (Context) CallExpr(*Context, DRE, InitExprs, + FType, VK_LValue, SourceLocation()); + NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf, + Context->getPointerType(NewRep->getType()), + VK_RValue, OK_Ordinary, SourceLocation()); + NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast, + NewRep); + BlockDeclRefs.clear(); + BlockByRefDecls.clear(); + BlockByRefDeclsPtrSet.clear(); + BlockByCopyDecls.clear(); + BlockByCopyDeclsPtrSet.clear(); + ImportedBlockDecls.clear(); + return NewRep; +} + +bool RewriteObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) { + if (const ObjCForCollectionStmt * CS = + dyn_cast(Stmts.back())) + return CS->getElement() == DS; + return false; +} + +//===----------------------------------------------------------------------===// +// Function Body / Expression rewriting +//===----------------------------------------------------------------------===// + +Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) { + if (isa(S) || isa(S) || + isa(S) || isa(S)) + Stmts.push_back(S); + else if (isa(S)) { + Stmts.push_back(S); + ObjCBcLabelNo.push_back(++BcLabelCount); + } + + // Pseudo-object operations and ivar references need special + // treatment because we're going to recursively rewrite them. + if (PseudoObjectExpr *PseudoOp = dyn_cast(S)) { + if (isa(PseudoOp->getSyntacticForm())) { + return RewritePropertyOrImplicitSetter(PseudoOp); + } else { + return RewritePropertyOrImplicitGetter(PseudoOp); + } + } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast(S)) { + return RewriteObjCIvarRefExpr(IvarRefExpr); + } + + SourceRange OrigStmtRange = S->getSourceRange(); + + // Perform a bottom up rewrite of all children. + for (Stmt::child_range CI = S->children(); CI; ++CI) + if (*CI) { + Stmt *childStmt = (*CI); + Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt); + if (newStmt) { + *CI = newStmt; + } + } + + if (BlockExpr *BE = dyn_cast(S)) { + SmallVector InnerBlockDeclRefs; + llvm::SmallPtrSet InnerContexts; + InnerContexts.insert(BE->getBlockDecl()); + ImportedLocalExternalDecls.clear(); + GetInnerBlockDeclRefExprs(BE->getBody(), + InnerBlockDeclRefs, InnerContexts); + // Rewrite the block body in place. + Stmt *SaveCurrentBody = CurrentBody; + CurrentBody = BE->getBody(); + PropParentMap = 0; + // block literal on rhs of a property-dot-sytax assignment + // must be replaced by its synthesize ast so getRewrittenText + // works as expected. In this case, what actually ends up on RHS + // is the blockTranscribed which is the helper function for the + // block literal; as in: self.c = ^() {[ace ARR];}; + bool saveDisableReplaceStmt = DisableReplaceStmt; + DisableReplaceStmt = false; + RewriteFunctionBodyOrGlobalInitializer(BE->getBody()); + DisableReplaceStmt = saveDisableReplaceStmt; + CurrentBody = SaveCurrentBody; + PropParentMap = 0; + ImportedLocalExternalDecls.clear(); + // Now we snarf the rewritten text and stash it away for later use. + std::string Str = Rewrite.getRewrittenText(BE->getSourceRange()); + RewrittenBlockExprs[BE] = Str; + + Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs); + + //blockTranscribed->dump(); + ReplaceStmt(S, blockTranscribed); + return blockTranscribed; + } + // Handle specific things. + if (ObjCEncodeExpr *AtEncode = dyn_cast(S)) + return RewriteAtEncode(AtEncode); + + if (ObjCSelectorExpr *AtSelector = dyn_cast(S)) + return RewriteAtSelector(AtSelector); + + if (ObjCStringLiteral *AtString = dyn_cast(S)) + return RewriteObjCStringLiteral(AtString); + + if (ObjCMessageExpr *MessExpr = dyn_cast(S)) { +#if 0 + // Before we rewrite it, put the original message expression in a comment. + SourceLocation startLoc = MessExpr->getLocStart(); + SourceLocation endLoc = MessExpr->getLocEnd(); + + const char *startBuf = SM->getCharacterData(startLoc); + const char *endBuf = SM->getCharacterData(endLoc); + + std::string messString; + messString += "// "; + messString.append(startBuf, endBuf-startBuf+1); + messString += "\n"; + + // FIXME: Missing definition of + // InsertText(clang::SourceLocation, char const*, unsigned int). + // InsertText(startLoc, messString.c_str(), messString.size()); + // Tried this, but it didn't work either... + // ReplaceText(startLoc, 0, messString.c_str(), messString.size()); +#endif + return RewriteMessageExpr(MessExpr); + } + + if (ObjCAtTryStmt *StmtTry = dyn_cast(S)) + return RewriteObjCTryStmt(StmtTry); + + if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast(S)) + return RewriteObjCSynchronizedStmt(StmtTry); + + if (ObjCAtThrowStmt *StmtThrow = dyn_cast(S)) + return RewriteObjCThrowStmt(StmtThrow); + + if (ObjCProtocolExpr *ProtocolExp = dyn_cast(S)) + return RewriteObjCProtocolExpr(ProtocolExp); + + if (ObjCForCollectionStmt *StmtForCollection = + dyn_cast(S)) + return RewriteObjCForCollectionStmt(StmtForCollection, + OrigStmtRange.getEnd()); + if (BreakStmt *StmtBreakStmt = + dyn_cast(S)) + return RewriteBreakStmt(StmtBreakStmt); + if (ContinueStmt *StmtContinueStmt = + dyn_cast(S)) + return RewriteContinueStmt(StmtContinueStmt); + + // Need to check for protocol refs (id

    , Foo

    *) in variable decls + // and cast exprs. + if (DeclStmt *DS = dyn_cast(S)) { + // FIXME: What we're doing here is modifying the type-specifier that + // precedes the first Decl. In the future the DeclGroup should have + // a separate type-specifier that we can rewrite. + // NOTE: We need to avoid rewriting the DeclStmt if it is within + // the context of an ObjCForCollectionStmt. For example: + // NSArray *someArray; + // for (id index in someArray) ; + // This is because RewriteObjCForCollectionStmt() does textual rewriting + // and it depends on the original text locations/positions. + if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS)) + RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin()); + + // Blocks rewrite rules. + for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end(); + DI != DE; ++DI) { + Decl *SD = *DI; + if (ValueDecl *ND = dyn_cast(SD)) { + if (isTopLevelBlockPointerType(ND->getType())) + RewriteBlockPointerDecl(ND); + else if (ND->getType()->isFunctionPointerType()) + CheckFunctionPointerDecl(ND->getType(), ND); + if (VarDecl *VD = dyn_cast(SD)) { + if (VD->hasAttr()) { + static unsigned uniqueByrefDeclCount = 0; + assert(!BlockByRefDeclNo.count(ND) && + "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl"); + BlockByRefDeclNo[ND] = uniqueByrefDeclCount++; + RewriteByRefVar(VD); + } + else + RewriteTypeOfDecl(VD); + } + } + if (TypedefNameDecl *TD = dyn_cast(SD)) { + if (isTopLevelBlockPointerType(TD->getUnderlyingType())) + RewriteBlockPointerDecl(TD); + else if (TD->getUnderlyingType()->isFunctionPointerType()) + CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); + } + } + } + + if (CStyleCastExpr *CE = dyn_cast(S)) + RewriteObjCQualifiedInterfaceTypes(CE); + + if (isa(S) || isa(S) || + isa(S) || isa(S)) { + assert(!Stmts.empty() && "Statement stack is empty"); + assert ((isa(Stmts.back()) || isa(Stmts.back()) || + isa(Stmts.back()) || isa(Stmts.back())) + && "Statement stack mismatch"); + Stmts.pop_back(); + } + // Handle blocks rewriting. + if (DeclRefExpr *DRE = dyn_cast(S)) { + ValueDecl *VD = DRE->getDecl(); + if (VD->hasAttr()) + return RewriteBlockDeclRefExpr(DRE); + if (HasLocalVariableExternalStorage(VD)) + return RewriteLocalVariableExternalStorage(DRE); + } + + if (CallExpr *CE = dyn_cast(S)) { + if (CE->getCallee()->getType()->isBlockPointerType()) { + Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee()); + ReplaceStmt(S, BlockCall); + return BlockCall; + } + } + if (CStyleCastExpr *CE = dyn_cast(S)) { + RewriteCastExpr(CE); + } +#if 0 + if (ImplicitCastExpr *ICE = dyn_cast(S)) { + CastExpr *Replacement = new (Context) CastExpr(ICE->getType(), + ICE->getSubExpr(), + SourceLocation()); + // Get the new text. + std::string SStr; + llvm::raw_string_ostream Buf(SStr); + Replacement->printPretty(Buf); + const std::string &Str = Buf.str(); + + printf("CAST = %s\n", &Str[0]); + InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size()); + delete S; + return Replacement; + } +#endif + // Return this stmt unmodified. + return S; +} + +void RewriteObjC::RewriteRecordBody(RecordDecl *RD) { + for (RecordDecl::field_iterator i = RD->field_begin(), + e = RD->field_end(); i != e; ++i) { + FieldDecl *FD = *i; + if (isTopLevelBlockPointerType(FD->getType())) + RewriteBlockPointerDecl(FD); + if (FD->getType()->isObjCQualifiedIdType() || + FD->getType()->isObjCQualifiedInterfaceType()) + RewriteObjCQualifiedInterfaceTypes(FD); + } +} + +/// HandleDeclInMainFile - This is called for each top-level decl defined in the +/// main file of the input. +void RewriteObjC::HandleDeclInMainFile(Decl *D) { + switch (D->getKind()) { + case Decl::Function: { + FunctionDecl *FD = cast(D); + if (FD->isOverloadedOperator()) + return; + + // Since function prototypes don't have ParmDecl's, we check the function + // prototype. This enables us to rewrite function declarations and + // definitions using the same code. + RewriteBlocksInFunctionProtoType(FD->getType(), FD); + + if (!FD->isThisDeclarationADefinition()) + break; + + // FIXME: If this should support Obj-C++, support CXXTryStmt + if (CompoundStmt *Body = dyn_cast_or_null(FD->getBody())) { + CurFunctionDef = FD; + CurFunctionDeclToDeclareForBlock = FD; + CurrentBody = Body; + Body = + cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); + FD->setBody(Body); + CurrentBody = 0; + if (PropParentMap) { + delete PropParentMap; + PropParentMap = 0; + } + // This synthesizes and inserts the block "impl" struct, invoke function, + // and any copy/dispose helper functions. + InsertBlockLiteralsWithinFunction(FD); + CurFunctionDef = 0; + CurFunctionDeclToDeclareForBlock = 0; + } + break; + } + case Decl::ObjCMethod: { + ObjCMethodDecl *MD = cast(D); + if (CompoundStmt *Body = MD->getCompoundBody()) { + CurMethodDef = MD; + CurrentBody = Body; + Body = + cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); + MD->setBody(Body); + CurrentBody = 0; + if (PropParentMap) { + delete PropParentMap; + PropParentMap = 0; + } + InsertBlockLiteralsWithinMethod(MD); + CurMethodDef = 0; + } + break; + } + case Decl::ObjCImplementation: { + ObjCImplementationDecl *CI = cast(D); + ClassImplementation.push_back(CI); + break; + } + case Decl::ObjCCategoryImpl: { + ObjCCategoryImplDecl *CI = cast(D); + CategoryImplementation.push_back(CI); + break; + } + case Decl::Var: { + VarDecl *VD = cast(D); + RewriteObjCQualifiedInterfaceTypes(VD); + if (isTopLevelBlockPointerType(VD->getType())) + RewriteBlockPointerDecl(VD); + else if (VD->getType()->isFunctionPointerType()) { + CheckFunctionPointerDecl(VD->getType(), VD); + if (VD->getInit()) { + if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { + RewriteCastExpr(CE); + } + } + } else if (VD->getType()->isRecordType()) { + RecordDecl *RD = VD->getType()->getAs()->getDecl(); + if (RD->isCompleteDefinition()) + RewriteRecordBody(RD); + } + if (VD->getInit()) { + GlobalVarDecl = VD; + CurrentBody = VD->getInit(); + RewriteFunctionBodyOrGlobalInitializer(VD->getInit()); + CurrentBody = 0; + if (PropParentMap) { + delete PropParentMap; + PropParentMap = 0; + } + SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName()); + GlobalVarDecl = 0; + + // This is needed for blocks. + if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { + RewriteCastExpr(CE); + } + } + break; + } + case Decl::TypeAlias: + case Decl::Typedef: { + if (TypedefNameDecl *TD = dyn_cast(D)) { + if (isTopLevelBlockPointerType(TD->getUnderlyingType())) + RewriteBlockPointerDecl(TD); + else if (TD->getUnderlyingType()->isFunctionPointerType()) + CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); + } + break; + } + case Decl::CXXRecord: + case Decl::Record: { + RecordDecl *RD = cast(D); + if (RD->isCompleteDefinition()) + RewriteRecordBody(RD); + break; + } + default: + break; + } + // Nothing yet. +} + +void RewriteObjC::HandleTranslationUnit(ASTContext &C) { + if (Diags.hasErrorOccurred()) + return; + + RewriteInclude(); + + // Here's a great place to add any extra declarations that may be needed. + // Write out meta data for each @protocol(). + for (llvm::SmallPtrSet::iterator I = ProtocolExprDecls.begin(), + E = ProtocolExprDecls.end(); I != E; ++I) + RewriteObjCProtocolMetaData(*I, "", "", Preamble); + + InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false); + if (ClassImplementation.size() || CategoryImplementation.size()) + RewriteImplementations(); + + // Get the buffer corresponding to MainFileID. If we haven't changed it, then + // we are done. + if (const RewriteBuffer *RewriteBuf = + Rewrite.getRewriteBufferFor(MainFileID)) { + //printf("Changed:\n"); + *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end()); + } else { + llvm::errs() << "No changes\n"; + } + + if (ClassImplementation.size() || CategoryImplementation.size() || + ProtocolExprDecls.size()) { + // Rewrite Objective-c meta data* + std::string ResultStr; + RewriteMetaDataIntoBuffer(ResultStr); + // Emit metadata. + *OutFile << ResultStr; + } + OutFile->flush(); +} + +void RewriteObjCFragileABI::Initialize(ASTContext &context) { + InitializeCommon(context); + + // declaring objc_selector outside the parameter list removes a silly + // scope related warning... + if (IsHeader) + Preamble = "#pragma once\n"; + Preamble += "struct objc_selector; struct objc_class;\n"; + Preamble += "struct __rw_objc_super { struct objc_object *object; "; + Preamble += "struct objc_object *superClass; "; + if (LangOpts.MicrosoftExt) { + // Add a constructor for creating temporary objects. + Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) " + ": "; + Preamble += "object(o), superClass(s) {} "; + } + Preamble += "};\n"; + Preamble += "#ifndef _REWRITER_typedef_Protocol\n"; + Preamble += "typedef struct objc_object Protocol;\n"; + Preamble += "#define _REWRITER_typedef_Protocol\n"; + Preamble += "#endif\n"; + if (LangOpts.MicrosoftExt) { + Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n"; + Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n"; + } else + Preamble += "#define __OBJC_RW_DLLIMPORT extern\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend"; + Preamble += "(struct objc_object *, struct objc_selector *, ...);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper"; + Preamble += "(struct objc_super *, struct objc_selector *, ...);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret"; + Preamble += "(struct objc_object *, struct objc_selector *, ...);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret"; + Preamble += "(struct objc_super *, struct objc_selector *, ...);\n"; + Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret"; + Preamble += "(struct objc_object *, struct objc_selector *, ...);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass"; + Preamble += "(const char *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass"; + Preamble += "(struct objc_class *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass"; + Preamble += "(const char *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match"; + Preamble += "(struct objc_class *, struct objc_object *);\n"; + // @synchronized hooks. + Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_enter(struct objc_object *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT int objc_sync_exit(struct objc_object *);\n"; + Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n"; + Preamble += "#ifndef __FASTENUMERATIONSTATE\n"; + Preamble += "struct __objcFastEnumerationState {\n\t"; + Preamble += "unsigned long state;\n\t"; + Preamble += "void **itemsPtr;\n\t"; + Preamble += "unsigned long *mutationsPtr;\n\t"; + Preamble += "unsigned long extra[5];\n};\n"; + Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n"; + Preamble += "#define __FASTENUMERATIONSTATE\n"; + Preamble += "#endif\n"; + Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n"; + Preamble += "struct __NSConstantStringImpl {\n"; + Preamble += " int *isa;\n"; + Preamble += " int flags;\n"; + Preamble += " char *str;\n"; + Preamble += " long length;\n"; + Preamble += "};\n"; + Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n"; + Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n"; + Preamble += "#else\n"; + Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n"; + Preamble += "#endif\n"; + Preamble += "#define __NSCONSTANTSTRINGIMPL\n"; + Preamble += "#endif\n"; + // Blocks preamble. + Preamble += "#ifndef BLOCK_IMPL\n"; + Preamble += "#define BLOCK_IMPL\n"; + Preamble += "struct __block_impl {\n"; + Preamble += " void *isa;\n"; + Preamble += " int Flags;\n"; + Preamble += " int Reserved;\n"; + Preamble += " void *FuncPtr;\n"; + Preamble += "};\n"; + Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n"; + Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n"; + Preamble += "extern \"C\" __declspec(dllexport) " + "void _Block_object_assign(void *, const void *, const int);\n"; + Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n"; + Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n"; + Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n"; + Preamble += "#else\n"; + Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n"; + Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n"; + Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n"; + Preamble += "#endif\n"; + Preamble += "#endif\n"; + if (LangOpts.MicrosoftExt) { + Preamble += "#undef __OBJC_RW_DLLIMPORT\n"; + Preamble += "#undef __OBJC_RW_STATICIMPORT\n"; + Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests. + Preamble += "#define __attribute__(X)\n"; + Preamble += "#endif\n"; + Preamble += "#define __weak\n"; + } + else { + Preamble += "#define __block\n"; + Preamble += "#define __weak\n"; + } + // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long + // as this avoids warning in any 64bit/32bit compilation model. + Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n"; +} + +/// RewriteIvarOffsetComputation - This rutine synthesizes computation of +/// ivar offset. +void RewriteObjCFragileABI::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, + std::string &Result) { + if (ivar->isBitField()) { + // FIXME: The hack below doesn't work for bitfields. For now, we simply + // place all bitfields at offset 0. + Result += "0"; + } else { + Result += "__OFFSETOFIVAR__(struct "; + Result += ivar->getContainingInterface()->getNameAsString(); + if (LangOpts.MicrosoftExt) + Result += "_IMPL"; + Result += ", "; + Result += ivar->getNameAsString(); + Result += ")"; + } +} + +/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data. +void RewriteObjCFragileABI::RewriteObjCProtocolMetaData( + ObjCProtocolDecl *PDecl, StringRef prefix, + StringRef ClassName, std::string &Result) { + static bool objc_protocol_methods = false; + + // Output struct protocol_methods holder of method selector and type. + if (!objc_protocol_methods && PDecl->hasDefinition()) { + /* struct protocol_methods { + SEL _cmd; + char *method_types; + } + */ + Result += "\nstruct _protocol_methods {\n"; + Result += "\tstruct objc_selector *_cmd;\n"; + Result += "\tchar *method_types;\n"; + Result += "};\n"; + + objc_protocol_methods = true; + } + // Do not synthesize the protocol more than once. + if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl())) + return; + + if (ObjCProtocolDecl *Def = PDecl->getDefinition()) + PDecl = Def; + + if (PDecl->instmeth_begin() != PDecl->instmeth_end()) { + unsigned NumMethods = std::distance(PDecl->instmeth_begin(), + PDecl->instmeth_end()); + /* struct _objc_protocol_method_list { + int protocol_method_count; + struct protocol_methods protocols[]; + } + */ + Result += "\nstatic struct {\n"; + Result += "\tint protocol_method_count;\n"; + Result += "\tstruct _protocol_methods protocol_methods["; + Result += utostr(NumMethods); + Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_"; + Result += PDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= " + "{\n\t" + utostr(NumMethods) + "\n"; + + // Output instance methods declared in this protocol. + for (ObjCProtocolDecl::instmeth_iterator + I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); + I != E; ++I) { + if (I == PDecl->instmeth_begin()) + Result += "\t ,{{(struct objc_selector *)\""; + else + Result += "\t ,{(struct objc_selector *)\""; + Result += (*I)->getSelector().getAsString(); + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl((*I), MethodTypeString); + Result += "\", \""; + Result += MethodTypeString; + Result += "\"}\n"; + } + Result += "\t }\n};\n"; + } + + // Output class methods declared in this protocol. + unsigned NumMethods = std::distance(PDecl->classmeth_begin(), + PDecl->classmeth_end()); + if (NumMethods > 0) { + /* struct _objc_protocol_method_list { + int protocol_method_count; + struct protocol_methods protocols[]; + } + */ + Result += "\nstatic struct {\n"; + Result += "\tint protocol_method_count;\n"; + Result += "\tstruct _protocol_methods protocol_methods["; + Result += utostr(NumMethods); + Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_"; + Result += PDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= " + "{\n\t"; + Result += utostr(NumMethods); + Result += "\n"; + + // Output instance methods declared in this protocol. + for (ObjCProtocolDecl::classmeth_iterator + I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); + I != E; ++I) { + if (I == PDecl->classmeth_begin()) + Result += "\t ,{{(struct objc_selector *)\""; + else + Result += "\t ,{(struct objc_selector *)\""; + Result += (*I)->getSelector().getAsString(); + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl((*I), MethodTypeString); + Result += "\", \""; + Result += MethodTypeString; + Result += "\"}\n"; + } + Result += "\t }\n};\n"; + } + + // Output: + /* struct _objc_protocol { + // Objective-C 1.0 extensions + struct _objc_protocol_extension *isa; + char *protocol_name; + struct _objc_protocol **protocol_list; + struct _objc_protocol_method_list *instance_methods; + struct _objc_protocol_method_list *class_methods; + }; + */ + static bool objc_protocol = false; + if (!objc_protocol) { + Result += "\nstruct _objc_protocol {\n"; + Result += "\tstruct _objc_protocol_extension *isa;\n"; + Result += "\tchar *protocol_name;\n"; + Result += "\tstruct _objc_protocol **protocol_list;\n"; + Result += "\tstruct _objc_protocol_method_list *instance_methods;\n"; + Result += "\tstruct _objc_protocol_method_list *class_methods;\n"; + Result += "};\n"; + + objc_protocol = true; + } + + Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_"; + Result += PDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= " + "{\n\t0, \""; + Result += PDecl->getNameAsString(); + Result += "\", 0, "; + if (PDecl->instmeth_begin() != PDecl->instmeth_end()) { + Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_"; + Result += PDecl->getNameAsString(); + Result += ", "; + } + else + Result += "0, "; + if (PDecl->classmeth_begin() != PDecl->classmeth_end()) { + Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_"; + Result += PDecl->getNameAsString(); + Result += "\n"; + } + else + Result += "0\n"; + Result += "};\n"; + + // Mark this protocol as having been generated. + if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl())) + llvm_unreachable("protocol already synthesized"); + +} + +void RewriteObjCFragileABI::RewriteObjCProtocolListMetaData( + const ObjCList &Protocols, + StringRef prefix, StringRef ClassName, + std::string &Result) { + if (Protocols.empty()) return; + + for (unsigned i = 0; i != Protocols.size(); i++) + RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result); + + // Output the top lovel protocol meta-data for the class. + /* struct _objc_protocol_list { + struct _objc_protocol_list *next; + int protocol_count; + struct _objc_protocol *class_protocols[]; + } + */ + Result += "\nstatic struct {\n"; + Result += "\tstruct _objc_protocol_list *next;\n"; + Result += "\tint protocol_count;\n"; + Result += "\tstruct _objc_protocol *class_protocols["; + Result += utostr(Protocols.size()); + Result += "];\n} _OBJC_"; + Result += prefix; + Result += "_PROTOCOLS_"; + Result += ClassName; + Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= " + "{\n\t0, "; + Result += utostr(Protocols.size()); + Result += "\n"; + + Result += "\t,{&_OBJC_PROTOCOL_"; + Result += Protocols[0]->getNameAsString(); + Result += " \n"; + + for (unsigned i = 1; i != Protocols.size(); i++) { + Result += "\t ,&_OBJC_PROTOCOL_"; + Result += Protocols[i]->getNameAsString(); + Result += "\n"; + } + Result += "\t }\n};\n"; +} + +void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, + std::string &Result) { + ObjCInterfaceDecl *CDecl = IDecl->getClassInterface(); + + // Explicitly declared @interface's are already synthesized. + if (CDecl->isImplicitInterfaceDecl()) { + // FIXME: Implementation of a class with no @interface (legacy) does not + // produce correct synthesis as yet. + RewriteObjCInternalStruct(CDecl, Result); + } + + // Build _objc_ivar_list metadata for classes ivars if needed + unsigned NumIvars = !IDecl->ivar_empty() + ? IDecl->ivar_size() + : (CDecl ? CDecl->ivar_size() : 0); + if (NumIvars > 0) { + static bool objc_ivar = false; + if (!objc_ivar) { + /* struct _objc_ivar { + char *ivar_name; + char *ivar_type; + int ivar_offset; + }; + */ + Result += "\nstruct _objc_ivar {\n"; + Result += "\tchar *ivar_name;\n"; + Result += "\tchar *ivar_type;\n"; + Result += "\tint ivar_offset;\n"; + Result += "};\n"; + + objc_ivar = true; + } + + /* struct { + int ivar_count; + struct _objc_ivar ivar_list[nIvars]; + }; + */ + Result += "\nstatic struct {\n"; + Result += "\tint ivar_count;\n"; + Result += "\tstruct _objc_ivar ivar_list["; + Result += utostr(NumIvars); + Result += "];\n} _OBJC_INSTANCE_VARIABLES_"; + Result += IDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= " + "{\n\t"; + Result += utostr(NumIvars); + Result += "\n"; + + ObjCInterfaceDecl::ivar_iterator IVI, IVE; + SmallVector IVars; + if (!IDecl->ivar_empty()) { + for (ObjCInterfaceDecl::ivar_iterator + IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end(); + IV != IVEnd; ++IV) + IVars.push_back(*IV); + IVI = IDecl->ivar_begin(); + IVE = IDecl->ivar_end(); + } else { + IVI = CDecl->ivar_begin(); + IVE = CDecl->ivar_end(); + } + Result += "\t,{{\""; + Result += IVI->getNameAsString(); + Result += "\", \""; + std::string TmpString, StrEncoding; + Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI); + QuoteDoublequotes(TmpString, StrEncoding); + Result += StrEncoding; + Result += "\", "; + RewriteIvarOffsetComputation(*IVI, Result); + Result += "}\n"; + for (++IVI; IVI != IVE; ++IVI) { + Result += "\t ,{\""; + Result += IVI->getNameAsString(); + Result += "\", \""; + std::string TmpString, StrEncoding; + Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI); + QuoteDoublequotes(TmpString, StrEncoding); + Result += StrEncoding; + Result += "\", "; + RewriteIvarOffsetComputation(*IVI, Result); + Result += "}\n"; + } + + Result += "\t }\n};\n"; + } + + // Build _objc_method_list for class's instance methods if needed + SmallVector + InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); + + // If any of our property implementations have associated getters or + // setters, produce metadata for them as well. + for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), + PropEnd = IDecl->propimpl_end(); + Prop != PropEnd; ++Prop) { + if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + continue; + if (!Prop->getPropertyIvarDecl()) + continue; + ObjCPropertyDecl *PD = Prop->getPropertyDecl(); + if (!PD) + continue; + if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) + if (!Getter->isDefined()) + InstanceMethods.push_back(Getter); + if (PD->isReadOnly()) + continue; + if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) + if (!Setter->isDefined()) + InstanceMethods.push_back(Setter); + } + RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(), + true, "", IDecl->getName(), Result); + + // Build _objc_method_list for class's class methods if needed + RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(), + false, "", IDecl->getName(), Result); + + // Protocols referenced in class declaration? + RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), + "CLASS", CDecl->getName(), Result); + + // Declaration of class/meta-class metadata + /* struct _objc_class { + struct _objc_class *isa; // or const char *root_class_name when metadata + const char *super_class_name; + char *name; + long version; + long info; + long instance_size; + struct _objc_ivar_list *ivars; + struct _objc_method_list *methods; + struct objc_cache *cache; + struct objc_protocol_list *protocols; + const char *ivar_layout; + struct _objc_class_ext *ext; + }; + */ + static bool objc_class = false; + if (!objc_class) { + Result += "\nstruct _objc_class {\n"; + Result += "\tstruct _objc_class *isa;\n"; + Result += "\tconst char *super_class_name;\n"; + Result += "\tchar *name;\n"; + Result += "\tlong version;\n"; + Result += "\tlong info;\n"; + Result += "\tlong instance_size;\n"; + Result += "\tstruct _objc_ivar_list *ivars;\n"; + Result += "\tstruct _objc_method_list *methods;\n"; + Result += "\tstruct objc_cache *cache;\n"; + Result += "\tstruct _objc_protocol_list *protocols;\n"; + Result += "\tconst char *ivar_layout;\n"; + Result += "\tstruct _objc_class_ext *ext;\n"; + Result += "};\n"; + objc_class = true; + } + + // Meta-class metadata generation. + ObjCInterfaceDecl *RootClass = 0; + ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass(); + while (SuperClass) { + RootClass = SuperClass; + SuperClass = SuperClass->getSuperClass(); + } + SuperClass = CDecl->getSuperClass(); + + Result += "\nstatic struct _objc_class _OBJC_METACLASS_"; + Result += CDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= " + "{\n\t(struct _objc_class *)\""; + Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString()); + Result += "\""; + + if (SuperClass) { + Result += ", \""; + Result += SuperClass->getNameAsString(); + Result += "\", \""; + Result += CDecl->getNameAsString(); + Result += "\""; + } + else { + Result += ", 0, \""; + Result += CDecl->getNameAsString(); + Result += "\""; + } + // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it. + // 'info' field is initialized to CLS_META(2) for metaclass + Result += ", 0,2, sizeof(struct _objc_class), 0"; + if (IDecl->classmeth_begin() != IDecl->classmeth_end()) { + Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_"; + Result += IDecl->getNameAsString(); + Result += "\n"; + } + else + Result += ", 0\n"; + if (CDecl->protocol_begin() != CDecl->protocol_end()) { + Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_"; + Result += CDecl->getNameAsString(); + Result += ",0,0\n"; + } + else + Result += "\t,0,0,0,0\n"; + Result += "};\n"; + + // class metadata generation. + Result += "\nstatic struct _objc_class _OBJC_CLASS_"; + Result += CDecl->getNameAsString(); + Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= " + "{\n\t&_OBJC_METACLASS_"; + Result += CDecl->getNameAsString(); + if (SuperClass) { + Result += ", \""; + Result += SuperClass->getNameAsString(); + Result += "\", \""; + Result += CDecl->getNameAsString(); + Result += "\""; + } + else { + Result += ", 0, \""; + Result += CDecl->getNameAsString(); + Result += "\""; + } + // 'info' field is initialized to CLS_CLASS(1) for class + Result += ", 0,1"; + if (!ObjCSynthesizedStructs.count(CDecl)) + Result += ",0"; + else { + // class has size. Must synthesize its size. + Result += ",sizeof(struct "; + Result += CDecl->getNameAsString(); + if (LangOpts.MicrosoftExt) + Result += "_IMPL"; + Result += ")"; + } + if (NumIvars > 0) { + Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_"; + Result += CDecl->getNameAsString(); + Result += "\n\t"; + } + else + Result += ",0"; + if (IDecl->instmeth_begin() != IDecl->instmeth_end()) { + Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_"; + Result += CDecl->getNameAsString(); + Result += ", 0\n\t"; + } + else + Result += ",0,0"; + if (CDecl->protocol_begin() != CDecl->protocol_end()) { + Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_"; + Result += CDecl->getNameAsString(); + Result += ", 0,0\n"; + } + else + Result += ",0,0,0\n"; + Result += "};\n"; +} + +void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) { + int ClsDefCount = ClassImplementation.size(); + int CatDefCount = CategoryImplementation.size(); + + // For each implemented class, write out all its meta data. + for (int i = 0; i < ClsDefCount; i++) + RewriteObjCClassMetaData(ClassImplementation[i], Result); + + // For each implemented category, write out all its meta data. + for (int i = 0; i < CatDefCount; i++) + RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result); + + // Write objc_symtab metadata + /* + struct _objc_symtab + { + long sel_ref_cnt; + SEL *refs; + short cls_def_cnt; + short cat_def_cnt; + void *defs[cls_def_cnt + cat_def_cnt]; + }; + */ + + Result += "\nstruct _objc_symtab {\n"; + Result += "\tlong sel_ref_cnt;\n"; + Result += "\tSEL *refs;\n"; + Result += "\tshort cls_def_cnt;\n"; + Result += "\tshort cat_def_cnt;\n"; + Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n"; + Result += "};\n\n"; + + Result += "static struct _objc_symtab " + "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n"; + Result += "\t0, 0, " + utostr(ClsDefCount) + + ", " + utostr(CatDefCount) + "\n"; + for (int i = 0; i < ClsDefCount; i++) { + Result += "\t,&_OBJC_CLASS_"; + Result += ClassImplementation[i]->getNameAsString(); + Result += "\n"; + } + + for (int i = 0; i < CatDefCount; i++) { + Result += "\t,&_OBJC_CATEGORY_"; + Result += CategoryImplementation[i]->getClassInterface()->getNameAsString(); + Result += "_"; + Result += CategoryImplementation[i]->getNameAsString(); + Result += "\n"; + } + + Result += "};\n\n"; + + // Write objc_module metadata + + /* + struct _objc_module { + long version; + long size; + const char *name; + struct _objc_symtab *symtab; + } + */ + + Result += "\nstruct _objc_module {\n"; + Result += "\tlong version;\n"; + Result += "\tlong size;\n"; + Result += "\tconst char *name;\n"; + Result += "\tstruct _objc_symtab *symtab;\n"; + Result += "};\n\n"; + Result += "static struct _objc_module " + "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n"; + Result += "\t" + utostr(OBJC_ABI_VERSION) + + ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n"; + Result += "};\n\n"; + + if (LangOpts.MicrosoftExt) { + if (ProtocolExprDecls.size()) { + Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n"; + Result += "#pragma data_seg(push, \".objc_protocol$B\")\n"; + for (llvm::SmallPtrSet::iterator I = ProtocolExprDecls.begin(), + E = ProtocolExprDecls.end(); I != E; ++I) { + Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_"; + Result += (*I)->getNameAsString(); + Result += " = &_OBJC_PROTOCOL_"; + Result += (*I)->getNameAsString(); + Result += ";\n"; + } + Result += "#pragma data_seg(pop)\n\n"; + } + Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n"; + Result += "#pragma data_seg(push, \".objc_module_info$B\")\n"; + Result += "static struct _objc_module *_POINTER_OBJC_MODULES = "; + Result += "&_OBJC_MODULES;\n"; + Result += "#pragma data_seg(pop)\n\n"; + } +} + +/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category +/// implementation. +void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl, + std::string &Result) { + ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface(); + // Find category declaration for this implementation. + ObjCCategoryDecl *CDecl; + for (CDecl = ClassDecl->getCategoryList(); CDecl; + CDecl = CDecl->getNextClassCategory()) + if (CDecl->getIdentifier() == IDecl->getIdentifier()) + break; + + std::string FullCategoryName = ClassDecl->getNameAsString(); + FullCategoryName += '_'; + FullCategoryName += IDecl->getNameAsString(); + + // Build _objc_method_list for class's instance methods if needed + SmallVector + InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); + + // If any of our property implementations have associated getters or + // setters, produce metadata for them as well. + for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), + PropEnd = IDecl->propimpl_end(); + Prop != PropEnd; ++Prop) { + if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) + continue; + if (!Prop->getPropertyIvarDecl()) + continue; + ObjCPropertyDecl *PD = Prop->getPropertyDecl(); + if (!PD) + continue; + if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) + InstanceMethods.push_back(Getter); + if (PD->isReadOnly()) + continue; + if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) + InstanceMethods.push_back(Setter); + } + RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(), + true, "CATEGORY_", FullCategoryName.c_str(), + Result); + + // Build _objc_method_list for class's class methods if needed + RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(), + false, "CATEGORY_", FullCategoryName.c_str(), + Result); + + // Protocols referenced in class declaration? + // Null CDecl is case of a category implementation with no category interface + if (CDecl) + RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY", + FullCategoryName, Result); + /* struct _objc_category { + char *category_name; + char *class_name; + struct _objc_method_list *instance_methods; + struct _objc_method_list *class_methods; + struct _objc_protocol_list *protocols; + // Objective-C 1.0 extensions + uint32_t size; // sizeof (struct _objc_category) + struct _objc_property_list *instance_properties; // category's own + // @property decl. + }; + */ + + static bool objc_category = false; + if (!objc_category) { + Result += "\nstruct _objc_category {\n"; + Result += "\tchar *category_name;\n"; + Result += "\tchar *class_name;\n"; + Result += "\tstruct _objc_method_list *instance_methods;\n"; + Result += "\tstruct _objc_method_list *class_methods;\n"; + Result += "\tstruct _objc_protocol_list *protocols;\n"; + Result += "\tunsigned int size;\n"; + Result += "\tstruct _objc_property_list *instance_properties;\n"; + Result += "};\n"; + objc_category = true; + } + Result += "\nstatic struct _objc_category _OBJC_CATEGORY_"; + Result += FullCategoryName; + Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\""; + Result += IDecl->getNameAsString(); + Result += "\"\n\t, \""; + Result += ClassDecl->getNameAsString(); + Result += "\"\n"; + + if (IDecl->instmeth_begin() != IDecl->instmeth_end()) { + Result += "\t, (struct _objc_method_list *)" + "&_OBJC_CATEGORY_INSTANCE_METHODS_"; + Result += FullCategoryName; + Result += "\n"; + } + else + Result += "\t, 0\n"; + if (IDecl->classmeth_begin() != IDecl->classmeth_end()) { + Result += "\t, (struct _objc_method_list *)" + "&_OBJC_CATEGORY_CLASS_METHODS_"; + Result += FullCategoryName; + Result += "\n"; + } + else + Result += "\t, 0\n"; + + if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) { + Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_"; + Result += FullCategoryName; + Result += "\n"; + } + else + Result += "\t, 0\n"; + Result += "\t, sizeof(struct _objc_category), 0\n};\n"; +} + +// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or +/// class methods. +template +void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegin, + MethodIterator MethodEnd, + bool IsInstanceMethod, + StringRef prefix, + StringRef ClassName, + std::string &Result) { + if (MethodBegin == MethodEnd) return; + + if (!objc_impl_method) { + /* struct _objc_method { + SEL _cmd; + char *method_types; + void *_imp; + } + */ + Result += "\nstruct _objc_method {\n"; + Result += "\tSEL _cmd;\n"; + Result += "\tchar *method_types;\n"; + Result += "\tvoid *_imp;\n"; + Result += "};\n"; + + objc_impl_method = true; + } + + // Build _objc_method_list for class's methods if needed + + /* struct { + struct _objc_method_list *next_method; + int method_count; + struct _objc_method method_list[]; + } + */ + unsigned NumMethods = std::distance(MethodBegin, MethodEnd); + Result += "\nstatic struct {\n"; + Result += "\tstruct _objc_method_list *next_method;\n"; + Result += "\tint method_count;\n"; + Result += "\tstruct _objc_method method_list["; + Result += utostr(NumMethods); + Result += "];\n} _OBJC_"; + Result += prefix; + Result += IsInstanceMethod ? "INSTANCE" : "CLASS"; + Result += "_METHODS_"; + Result += ClassName; + Result += " __attribute__ ((used, section (\"__OBJC, __"; + Result += IsInstanceMethod ? "inst" : "cls"; + Result += "_meth\")))= "; + Result += "{\n\t0, " + utostr(NumMethods) + "\n"; + + Result += "\t,{{(SEL)\""; + Result += (*MethodBegin)->getSelector().getAsString().c_str(); + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); + Result += "\", \""; + Result += MethodTypeString; + Result += "\", (void *)"; + Result += MethodInternalNames[*MethodBegin]; + Result += "}\n"; + for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) { + Result += "\t ,{(SEL)\""; + Result += (*MethodBegin)->getSelector().getAsString().c_str(); + std::string MethodTypeString; + Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); + Result += "\", \""; + Result += MethodTypeString; + Result += "\", (void *)"; + Result += MethodInternalNames[*MethodBegin]; + Result += "}\n"; + } + Result += "\t }\n};\n"; +} + +Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { + SourceRange OldRange = IV->getSourceRange(); + Expr *BaseExpr = IV->getBase(); + + // Rewrite the base, but without actually doing replaces. + { + DisableReplaceStmtScope S(*this); + BaseExpr = cast(RewriteFunctionBodyOrGlobalInitializer(BaseExpr)); + IV->setBase(BaseExpr); + } + + ObjCIvarDecl *D = IV->getDecl(); + + Expr *Replacement = IV; + if (CurMethodDef) { + if (BaseExpr->getType()->isObjCObjectPointerType()) { + const ObjCInterfaceType *iFaceDecl = + dyn_cast(BaseExpr->getType()->getPointeeType()); + assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null"); + // lookup which class implements the instance variable. + ObjCInterfaceDecl *clsDeclared = 0; + iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(), + clsDeclared); + assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class"); + + // Synthesize an explicit cast to gain access to the ivar. + std::string RecName = clsDeclared->getIdentifier()->getName(); + RecName += "_IMPL"; + IdentifierInfo *II = &Context->Idents.get(RecName); + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + II); + assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl"); + QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT, + CK_BitCast, + IV->getBase()); + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(OldRange.getBegin(), + OldRange.getEnd(), + castExpr); + if (IV->isFreeIvar() && + declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) { + MemberExpr *ME = new (Context) MemberExpr(PE, true, D, + IV->getLocation(), + D->getType(), + VK_LValue, OK_Ordinary); + Replacement = ME; + } else { + IV->setBase(PE); + } + } + } else { // we are outside a method. + assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method"); + + // Explicit ivar refs need to have a cast inserted. + // FIXME: consider sharing some of this code with the code above. + if (BaseExpr->getType()->isObjCObjectPointerType()) { + const ObjCInterfaceType *iFaceDecl = + dyn_cast(BaseExpr->getType()->getPointeeType()); + // lookup which class implements the instance variable. + ObjCInterfaceDecl *clsDeclared = 0; + iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(), + clsDeclared); + assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class"); + + // Synthesize an explicit cast to gain access to the ivar. + std::string RecName = clsDeclared->getIdentifier()->getName(); + RecName += "_IMPL"; + IdentifierInfo *II = &Context->Idents.get(RecName); + RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, + SourceLocation(), SourceLocation(), + II); + assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl"); + QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); + CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT, + CK_BitCast, + IV->getBase()); + // Don't forget the parens to enforce the proper binding. + ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(), + IV->getBase()->getLocEnd(), castExpr); + // Cannot delete IV->getBase(), since PE points to it. + // Replace the old base with the cast. This is important when doing + // embedded rewrites. For example, [newInv->_container addObject:0]. + IV->setBase(PE); + } + } + + ReplaceStmtWithRange(IV, Replacement, OldRange); + return Replacement; +} diff --git a/lib/Rewrite/Frontend/RewriteTest.cpp b/lib/Rewrite/Frontend/RewriteTest.cpp new file mode 100644 index 0000000..722c5e8 --- /dev/null +++ b/lib/Rewrite/Frontend/RewriteTest.cpp @@ -0,0 +1,39 @@ +//===--- RewriteTest.cpp - Rewriter playground ----------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is a testbed. +// +//===----------------------------------------------------------------------===// + +#include "clang/Rewrite/Frontend/Rewriters.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Rewrite/Core/TokenRewriter.h" +#include "llvm/Support/raw_ostream.h" + +void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) { + SourceManager &SM = PP.getSourceManager(); + const LangOptions &LangOpts = PP.getLangOpts(); + + TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts); + + // Throw tags around comments. + for (TokenRewriter::token_iterator I = Rewriter.token_begin(), + E = Rewriter.token_end(); I != E; ++I) { + if (I->isNot(tok::comment)) continue; + + Rewriter.AddTokenBefore(I, ""); + Rewriter.AddTokenAfter(I, ""); + } + + + // Print out the output. + for (TokenRewriter::token_iterator I = Rewriter.token_begin(), + E = Rewriter.token_end(); I != E; ++I) + *OS << PP.getSpelling(*I); +} diff --git a/lib/Rewrite/FrontendActions.cpp b/lib/Rewrite/FrontendActions.cpp deleted file mode 100644 index 9bc218e..0000000 --- a/lib/Rewrite/FrontendActions.cpp +++ /dev/null @@ -1,192 +0,0 @@ -//===--- FrontendActions.cpp ----------------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/FrontendActions.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Parse/Parser.h" -#include "clang/Basic/FileManager.h" -#include "clang/Frontend/FrontendActions.h" -#include "clang/Frontend/CompilerInstance.h" -#include "clang/Frontend/FrontendDiagnostic.h" -#include "clang/Frontend/Utils.h" -#include "clang/Rewrite/ASTConsumers.h" -#include "clang/Rewrite/FixItRewriter.h" -#include "clang/Rewrite/Rewriters.h" -#include "llvm/ADT/OwningPtr.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/Path.h" -#include "llvm/Support/FileSystem.h" - -using namespace clang; - -//===----------------------------------------------------------------------===// -// AST Consumer Actions -//===----------------------------------------------------------------------===// - -ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI, - StringRef InFile) { - if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile)) - return CreateHTMLPrinter(OS, CI.getPreprocessor()); - return 0; -} - -FixItAction::FixItAction() {} -FixItAction::~FixItAction() {} - -ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI, - StringRef InFile) { - return new ASTConsumer(); -} - -namespace { -class FixItRewriteInPlace : public FixItOptions { -public: - std::string RewriteFilename(const std::string &Filename, int &fd) { - fd = -1; - return Filename; - } -}; - -class FixItActionSuffixInserter : public FixItOptions { - std::string NewSuffix; - -public: - FixItActionSuffixInserter(std::string NewSuffix, bool FixWhatYouCan) - : NewSuffix(NewSuffix) { - this->FixWhatYouCan = FixWhatYouCan; - } - - std::string RewriteFilename(const std::string &Filename, int &fd) { - fd = -1; - SmallString<128> Path(Filename); - llvm::sys::path::replace_extension(Path, - NewSuffix + llvm::sys::path::extension(Path)); - return Path.str(); - } -}; - -class FixItRewriteToTemp : public FixItOptions { -public: - std::string RewriteFilename(const std::string &Filename, int &fd) { - SmallString<128> Path; - Path = llvm::sys::path::filename(Filename); - Path += "-%%%%%%%%"; - Path += llvm::sys::path::extension(Filename); - SmallString<128> NewPath; - llvm::sys::fs::unique_file(Path.str(), fd, NewPath); - return NewPath.str(); - } -}; -} // end anonymous namespace - -bool FixItAction::BeginSourceFileAction(CompilerInstance &CI, - StringRef Filename) { - const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts(); - if (!FEOpts.FixItSuffix.empty()) { - FixItOpts.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix, - FEOpts.FixWhatYouCan)); - } else { - FixItOpts.reset(new FixItRewriteInPlace); - FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan; - } - Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(), - CI.getLangOpts(), FixItOpts.get())); - return true; -} - -void FixItAction::EndSourceFileAction() { - // Otherwise rewrite all files. - Rewriter->WriteFixedFiles(); -} - -bool FixItRecompile::BeginInvocation(CompilerInstance &CI) { - - std::vector > RewrittenFiles; - bool err = false; - { - const FrontendOptions &FEOpts = CI.getFrontendOpts(); - OwningPtr FixAction(new SyntaxOnlyAction()); - if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) { - OwningPtr FixItOpts; - if (FEOpts.FixToTemporaries) - FixItOpts.reset(new FixItRewriteToTemp()); - else - FixItOpts.reset(new FixItRewriteInPlace()); - FixItOpts->Silent = true; - FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan; - FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings; - FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(), - CI.getLangOpts(), FixItOpts.get()); - FixAction->Execute(); - - err = Rewriter.WriteFixedFiles(&RewrittenFiles); - - FixAction->EndSourceFile(); - CI.setSourceManager(0); - CI.setFileManager(0); - } else { - err = true; - } - } - if (err) - return false; - CI.getDiagnosticClient().clear(); - CI.getDiagnostics().Reset(); - - PreprocessorOptions &PPOpts = CI.getPreprocessorOpts(); - PPOpts.RemappedFiles.insert(PPOpts.RemappedFiles.end(), - RewrittenFiles.begin(), RewrittenFiles.end()); - PPOpts.RemappedFilesKeepOriginalName = false; - - return true; -} - -//===----------------------------------------------------------------------===// -// Preprocessor Actions -//===----------------------------------------------------------------------===// - -ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, - StringRef InFile) { - if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) { - if (CI.getLangOpts().ObjCRuntime.isNonFragile()) - return CreateModernObjCRewriter(InFile, OS, - CI.getDiagnostics(), CI.getLangOpts(), - CI.getDiagnosticOpts().NoRewriteMacros); - return CreateObjCRewriter(InFile, OS, - CI.getDiagnostics(), CI.getLangOpts(), - CI.getDiagnosticOpts().NoRewriteMacros); - } - return 0; -} - -void RewriteMacrosAction::ExecuteAction() { - CompilerInstance &CI = getCompilerInstance(); - raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); - if (!OS) return; - - RewriteMacrosInInput(CI.getPreprocessor(), OS); -} - -void RewriteTestAction::ExecuteAction() { - CompilerInstance &CI = getCompilerInstance(); - raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile()); - if (!OS) return; - - DoRewriteTest(CI.getPreprocessor(), OS); -} - -void RewriteIncludesAction::ExecuteAction() { - CompilerInstance &CI = getCompilerInstance(); - raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile()); - if (!OS) return; - - RewriteIncludesInInput(CI.getPreprocessor(), OS, - CI.getPreprocessorOutputOpts()); -} diff --git a/lib/Rewrite/HTMLPrint.cpp b/lib/Rewrite/HTMLPrint.cpp deleted file mode 100644 index 3d190ab..0000000 --- a/lib/Rewrite/HTMLPrint.cpp +++ /dev/null @@ -1,94 +0,0 @@ -//===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Pretty-printing of source code to HTML. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/ASTConsumers.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/AST/ASTContext.h" -#include "clang/AST/Decl.h" -#include "clang/Basic/Diagnostic.h" -#include "clang/Basic/FileManager.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Rewrite/HTMLRewrite.h" -#include "clang/Rewrite/Rewriter.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/raw_ostream.h" -using namespace clang; - -//===----------------------------------------------------------------------===// -// Functional HTML pretty-printing. -//===----------------------------------------------------------------------===// - -namespace { - class HTMLPrinter : public ASTConsumer { - Rewriter R; - raw_ostream *Out; - Preprocessor &PP; - bool SyntaxHighlight, HighlightMacros; - - public: - HTMLPrinter(raw_ostream *OS, Preprocessor &pp, - bool _SyntaxHighlight, bool _HighlightMacros) - : Out(OS), PP(pp), SyntaxHighlight(_SyntaxHighlight), - HighlightMacros(_HighlightMacros) {} - - void Initialize(ASTContext &context); - void HandleTranslationUnit(ASTContext &Ctx); - }; -} - -ASTConsumer* clang::CreateHTMLPrinter(raw_ostream *OS, - Preprocessor &PP, - bool SyntaxHighlight, - bool HighlightMacros) { - return new HTMLPrinter(OS, PP, SyntaxHighlight, HighlightMacros); -} - -void HTMLPrinter::Initialize(ASTContext &context) { - R.setSourceMgr(context.getSourceManager(), context.getLangOpts()); -} - -void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) { - if (PP.getDiagnostics().hasErrorOccurred()) - return; - - // Format the file. - FileID FID = R.getSourceMgr().getMainFileID(); - const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID); - const char* Name; - // In some cases, in particular the case where the input is from stdin, - // there is no entry. Fall back to the memory buffer for a name in those - // cases. - if (Entry) - Name = Entry->getName(); - else - Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier(); - - html::AddLineNumbers(R, FID); - html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name); - - // If we have a preprocessor, relex the file and syntax highlight. - // We might not have a preprocessor if we come from a deserialized AST file, - // for example. - - if (SyntaxHighlight) html::SyntaxHighlight(R, FID, PP); - if (HighlightMacros) html::HighlightMacros(R, FID, PP); - html::EscapeText(R, FID, false, true); - - // Emit the HTML. - const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID); - char *Buffer = (char*)malloc(RewriteBuf.size()); - std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer); - Out->write(Buffer, RewriteBuf.size()); - free(Buffer); -} diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp deleted file mode 100644 index 236b98f..0000000 --- a/lib/Rewrite/HTMLRewrite.cpp +++ /dev/null @@ -1,583 +0,0 @@ -//== HTMLRewrite.cpp - Translate source code into prettified HTML --*- C++ -*-// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the HTMLRewriter clas, which is used to translate the -// text of a source file into prettified HTML. -// -//===----------------------------------------------------------------------===// - -#include "clang/Lex/Preprocessor.h" -#include "clang/Rewrite/Rewriter.h" -#include "clang/Rewrite/HTMLRewrite.h" -#include "clang/Lex/TokenConcatenation.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Basic/SourceManager.h" -#include "llvm/ADT/SmallString.h" -#include "llvm/ADT/OwningPtr.h" -#include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/raw_ostream.h" -using namespace clang; - - -/// HighlightRange - Highlight a range in the source code with the specified -/// start/end tags. B/E must be in the same file. This ensures that -/// start/end tags are placed at the start/end of each line if the range is -/// multiline. -void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E, - const char *StartTag, const char *EndTag) { - SourceManager &SM = R.getSourceMgr(); - B = SM.getExpansionLoc(B); - E = SM.getExpansionLoc(E); - FileID FID = SM.getFileID(B); - assert(SM.getFileID(E) == FID && "B/E not in the same file!"); - - unsigned BOffset = SM.getFileOffset(B); - unsigned EOffset = SM.getFileOffset(E); - - // Include the whole end token in the range. - EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts()); - - bool Invalid = false; - const char *BufferStart = SM.getBufferData(FID, &Invalid).data(); - if (Invalid) - return; - - HighlightRange(R.getEditBuffer(FID), BOffset, EOffset, - BufferStart, StartTag, EndTag); -} - -/// HighlightRange - This is the same as the above method, but takes -/// decomposed file locations. -void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E, - const char *BufferStart, - const char *StartTag, const char *EndTag) { - // Insert the tag at the absolute start/end of the range. - RB.InsertTextAfter(B, StartTag); - RB.InsertTextBefore(E, EndTag); - - // Scan the range to see if there is a \r or \n. If so, and if the line is - // not blank, insert tags on that line as well. - bool HadOpenTag = true; - - unsigned LastNonWhiteSpace = B; - for (unsigned i = B; i != E; ++i) { - switch (BufferStart[i]) { - case '\r': - case '\n': - // Okay, we found a newline in the range. If we have an open tag, we need - // to insert a close tag at the first non-whitespace before the newline. - if (HadOpenTag) - RB.InsertTextBefore(LastNonWhiteSpace+1, EndTag); - - // Instead of inserting an open tag immediately after the newline, we - // wait until we see a non-whitespace character. This prevents us from - // inserting tags around blank lines, and also allows the open tag to - // be put *after* whitespace on a non-blank line. - HadOpenTag = false; - break; - case '\0': - case ' ': - case '\t': - case '\f': - case '\v': - // Ignore whitespace. - break; - - default: - // If there is no tag open, do it now. - if (!HadOpenTag) { - RB.InsertTextAfter(i, StartTag); - HadOpenTag = true; - } - - // Remember this character. - LastNonWhiteSpace = i; - break; - } - } -} - -void html::EscapeText(Rewriter &R, FileID FID, - bool EscapeSpaces, bool ReplaceTabs) { - - const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID); - const char* C = Buf->getBufferStart(); - const char* FileEnd = Buf->getBufferEnd(); - - assert (C <= FileEnd); - - RewriteBuffer &RB = R.getEditBuffer(FID); - - unsigned ColNo = 0; - for (unsigned FilePos = 0; C != FileEnd ; ++C, ++FilePos) { - switch (*C) { - default: ++ColNo; break; - case '\n': - case '\r': - ColNo = 0; - break; - - case ' ': - if (EscapeSpaces) - RB.ReplaceText(FilePos, 1, " "); - ++ColNo; - break; - case '\f': - RB.ReplaceText(FilePos, 1, "


    "); - ColNo = 0; - break; - - case '\t': { - if (!ReplaceTabs) - break; - unsigned NumSpaces = 8-(ColNo&7); - if (EscapeSpaces) - RB.ReplaceText(FilePos, 1, - StringRef("     " - "   ", 6*NumSpaces)); - else - RB.ReplaceText(FilePos, 1, StringRef(" ", NumSpaces)); - ColNo += NumSpaces; - break; - } - case '<': - RB.ReplaceText(FilePos, 1, "<"); - ++ColNo; - break; - - case '>': - RB.ReplaceText(FilePos, 1, ">"); - ++ColNo; - break; - - case '&': - RB.ReplaceText(FilePos, 1, "&"); - ++ColNo; - break; - } - } -} - -std::string html::EscapeText(const std::string& s, bool EscapeSpaces, - bool ReplaceTabs) { - - unsigned len = s.size(); - std::string Str; - llvm::raw_string_ostream os(Str); - - for (unsigned i = 0 ; i < len; ++i) { - - char c = s[i]; - switch (c) { - default: - os << c; break; - - case ' ': - if (EscapeSpaces) os << " "; - else os << ' '; - break; - - case '\t': - if (ReplaceTabs) { - if (EscapeSpaces) - for (unsigned i = 0; i < 4; ++i) - os << " "; - else - for (unsigned i = 0; i < 4; ++i) - os << " "; - } - else - os << c; - - break; - - case '<': os << "<"; break; - case '>': os << ">"; break; - case '&': os << "&"; break; - } - } - - return os.str(); -} - -static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo, - unsigned B, unsigned E) { - SmallString<256> Str; - llvm::raw_svector_ostream OS(Str); - - OS << "" - << LineNo << ""; - - if (B == E) { // Handle empty lines. - OS << " "; - RB.InsertTextBefore(B, OS.str()); - } else { - RB.InsertTextBefore(B, OS.str()); - RB.InsertTextBefore(E, ""); - } -} - -void html::AddLineNumbers(Rewriter& R, FileID FID) { - - const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID); - const char* FileBeg = Buf->getBufferStart(); - const char* FileEnd = Buf->getBufferEnd(); - const char* C = FileBeg; - RewriteBuffer &RB = R.getEditBuffer(FID); - - assert (C <= FileEnd); - - unsigned LineNo = 0; - unsigned FilePos = 0; - - while (C != FileEnd) { - - ++LineNo; - unsigned LineStartPos = FilePos; - unsigned LineEndPos = FileEnd - FileBeg; - - assert (FilePos <= LineEndPos); - assert (C < FileEnd); - - // Scan until the newline (or end-of-file). - - while (C != FileEnd) { - char c = *C; - ++C; - - if (c == '\n') { - LineEndPos = FilePos++; - break; - } - - ++FilePos; - } - - AddLineNumber(RB, LineNo, LineStartPos, LineEndPos); - } - - // Add one big table tag that surrounds all of the code. - RB.InsertTextBefore(0, "\n"); - RB.InsertTextAfter(FileEnd - FileBeg, "
    "); -} - -void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID, - const char *title) { - - const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID); - const char* FileStart = Buf->getBufferStart(); - const char* FileEnd = Buf->getBufferEnd(); - - SourceLocation StartLoc = R.getSourceMgr().getLocForStartOfFile(FID); - SourceLocation EndLoc = StartLoc.getLocWithOffset(FileEnd-FileStart); - - std::string s; - llvm::raw_string_ostream os(s); - os << "\n" // Use HTML 5 doctype - "\n\n"; - - if (title) - os << "" << html::EscapeText(title) << "\n"; - - os << "\n\n"; - - // Generate header - R.InsertTextBefore(StartLoc, os.str()); - // Generate footer - - R.InsertTextAfter(EndLoc, "\n"); -} - -/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with -/// information about keywords, macro expansions etc. This uses the macro -/// table state from the end of the file, so it won't be perfectly perfect, -/// but it will be reasonably close. -void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) { - RewriteBuffer &RB = R.getEditBuffer(FID); - - const SourceManager &SM = PP.getSourceManager(); - const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID); - Lexer L(FID, FromFile, SM, PP.getLangOpts()); - const char *BufferStart = L.getBufferStart(); - - // Inform the preprocessor that we want to retain comments as tokens, so we - // can highlight them. - L.SetCommentRetentionState(true); - - // Lex all the tokens in raw mode, to avoid entering #includes or expanding - // macros. - Token Tok; - L.LexFromRawLexer(Tok); - - while (Tok.isNot(tok::eof)) { - // Since we are lexing unexpanded tokens, all tokens are from the main - // FileID. - unsigned TokOffs = SM.getFileOffset(Tok.getLocation()); - unsigned TokLen = Tok.getLength(); - switch (Tok.getKind()) { - default: break; - case tok::identifier: - llvm_unreachable("tok::identifier in raw lexing mode!"); - case tok::raw_identifier: { - // Fill in Result.IdentifierInfo and update the token kind, - // looking up the identifier in the identifier table. - PP.LookUpIdentifierInfo(Tok); - - // If this is a pp-identifier, for a keyword, highlight it as such. - if (Tok.isNot(tok::identifier)) - HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart, - "", ""); - break; - } - case tok::comment: - HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart, - "", ""); - break; - case tok::utf8_string_literal: - // Chop off the u part of u8 prefix - ++TokOffs; - --TokLen; - // FALL THROUGH to chop the 8 - case tok::wide_string_literal: - case tok::utf16_string_literal: - case tok::utf32_string_literal: - // Chop off the L, u, U or 8 prefix - ++TokOffs; - --TokLen; - // FALL THROUGH. - case tok::string_literal: - // FIXME: Exclude the optional ud-suffix from the highlighted range. - HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart, - "", ""); - break; - case tok::hash: { - // If this is a preprocessor directive, all tokens to end of line are too. - if (!Tok.isAtStartOfLine()) - break; - - // Eat all of the tokens until we get to the next one at the start of - // line. - unsigned TokEnd = TokOffs+TokLen; - L.LexFromRawLexer(Tok); - while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) { - TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength(); - L.LexFromRawLexer(Tok); - } - - // Find end of line. This is a hack. - HighlightRange(RB, TokOffs, TokEnd, BufferStart, - "", ""); - - // Don't skip the next token. - continue; - } - } - - L.LexFromRawLexer(Tok); - } -} - -/// HighlightMacros - This uses the macro table state from the end of the -/// file, to re-expand macros and insert (into the HTML) information about the -/// macro expansions. This won't be perfectly perfect, but it will be -/// reasonably close. -void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) { - // Re-lex the raw token stream into a token buffer. - const SourceManager &SM = PP.getSourceManager(); - std::vector TokenStream; - - const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID); - Lexer L(FID, FromFile, SM, PP.getLangOpts()); - - // Lex all the tokens in raw mode, to avoid entering #includes or expanding - // macros. - while (1) { - Token Tok; - L.LexFromRawLexer(Tok); - - // If this is a # at the start of a line, discard it from the token stream. - // We don't want the re-preprocess step to see #defines, #includes or other - // preprocessor directives. - if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) - continue; - - // If this is a ## token, change its kind to unknown so that repreprocessing - // it will not produce an error. - if (Tok.is(tok::hashhash)) - Tok.setKind(tok::unknown); - - // If this raw token is an identifier, the raw lexer won't have looked up - // the corresponding identifier info for it. Do this now so that it will be - // macro expanded when we re-preprocess it. - if (Tok.is(tok::raw_identifier)) - PP.LookUpIdentifierInfo(Tok); - - TokenStream.push_back(Tok); - - if (Tok.is(tok::eof)) break; - } - - // Temporarily change the diagnostics object so that we ignore any generated - // diagnostics from this pass. - DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(), - new IgnoringDiagConsumer); - - // FIXME: This is a huge hack; we reuse the input preprocessor because we want - // its state, but we aren't actually changing it (we hope). This should really - // construct a copy of the preprocessor. - Preprocessor &TmpPP = const_cast(PP); - DiagnosticsEngine *OldDiags = &TmpPP.getDiagnostics(); - TmpPP.setDiagnostics(TmpDiags); - - // Inform the preprocessor that we don't want comments. - TmpPP.SetCommentRetentionState(false, false); - - // We don't want pragmas either. Although we filtered out #pragma, removing - // _Pragma and __pragma is much harder. - bool PragmasPreviouslyEnabled = TmpPP.getPragmasEnabled(); - TmpPP.setPragmasEnabled(false); - - // Enter the tokens we just lexed. This will cause them to be macro expanded - // but won't enter sub-files (because we removed #'s). - TmpPP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false); - - TokenConcatenation ConcatInfo(TmpPP); - - // Lex all the tokens. - Token Tok; - TmpPP.Lex(Tok); - while (Tok.isNot(tok::eof)) { - // Ignore non-macro tokens. - if (!Tok.getLocation().isMacroID()) { - TmpPP.Lex(Tok); - continue; - } - - // Okay, we have the first token of a macro expansion: highlight the - // expansion by inserting a start tag before the macro expansion and - // end tag after it. - std::pair LLoc = - SM.getExpansionRange(Tok.getLocation()); - - // Ignore tokens whose instantiation location was not the main file. - if (SM.getFileID(LLoc.first) != FID) { - TmpPP.Lex(Tok); - continue; - } - - assert(SM.getFileID(LLoc.second) == FID && - "Start and end of expansion must be in the same ultimate file!"); - - std::string Expansion = EscapeText(TmpPP.getSpelling(Tok)); - unsigned LineLen = Expansion.size(); - - Token PrevPrevTok; - Token PrevTok = Tok; - // Okay, eat this token, getting the next one. - TmpPP.Lex(Tok); - - // Skip all the rest of the tokens that are part of this macro - // instantiation. It would be really nice to pop up a window with all the - // spelling of the tokens or something. - while (!Tok.is(tok::eof) && - SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) { - // Insert a newline if the macro expansion is getting large. - if (LineLen > 60) { - Expansion += "
    "; - LineLen = 0; - } - - LineLen -= Expansion.size(); - - // If the tokens were already space separated, or if they must be to avoid - // them being implicitly pasted, add a space between them. - if (Tok.hasLeadingSpace() || - ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok)) - Expansion += ' '; - - // Escape any special characters in the token text. - Expansion += EscapeText(TmpPP.getSpelling(Tok)); - LineLen += Expansion.size(); - - PrevPrevTok = PrevTok; - PrevTok = Tok; - TmpPP.Lex(Tok); - } - - - // Insert the expansion as the end tag, so that multi-line macros all get - // highlighted. - Expansion = "" + Expansion + "
    "; - - HighlightRange(R, LLoc.first, LLoc.second, - "", Expansion.c_str()); - } - - // Restore the preprocessor's old state. - TmpPP.setDiagnostics(*OldDiags); - TmpPP.setPragmasEnabled(PragmasPreviouslyEnabled); -} diff --git a/lib/Rewrite/InclusionRewriter.cpp b/lib/Rewrite/InclusionRewriter.cpp deleted file mode 100644 index 3dfc3b0..0000000 --- a/lib/Rewrite/InclusionRewriter.cpp +++ /dev/null @@ -1,361 +0,0 @@ -//===--- InclusionRewriter.cpp - Rewrite includes into their expansions ---===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This code rewrites include invocations into their expansions. This gives you -// a file with all included files merged into it. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/Rewriters.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Frontend/PreprocessorOutputOptions.h" -#include "llvm/Support/raw_ostream.h" - -using namespace clang; -using namespace llvm; - -namespace { - -class InclusionRewriter : public PPCallbacks { - /// Information about which #includes were actually performed, - /// created by preprocessor callbacks. - struct FileChange { - SourceLocation From; - FileID Id; - SrcMgr::CharacteristicKind FileType; - FileChange(SourceLocation From) : From(From) { - } - }; - Preprocessor &PP; ///< Used to find inclusion directives. - SourceManager &SM; ///< Used to read and manage source files. - raw_ostream &OS; ///< The destination stream for rewritten contents. - bool ShowLineMarkers; ///< Show #line markers. - bool UseLineDirective; ///< Use of line directives or line markers. - typedef std::map FileChangeMap; - FileChangeMap FileChanges; /// Tracks which files were included where. - /// Used transitively for building up the FileChanges mapping over the - /// various \c PPCallbacks callbacks. - FileChangeMap::iterator LastInsertedFileChange; -public: - InclusionRewriter(Preprocessor &PP, raw_ostream &OS, bool ShowLineMarkers); - bool Process(FileID FileId, SrcMgr::CharacteristicKind FileType); -private: - virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason, - SrcMgr::CharacteristicKind FileType, - FileID PrevFID); - virtual void FileSkipped(const FileEntry &ParentFile, - const Token &FilenameTok, - SrcMgr::CharacteristicKind FileType); - virtual void InclusionDirective(SourceLocation HashLoc, - const Token &IncludeTok, - StringRef FileName, - bool IsAngled, - const FileEntry *File, - SourceLocation EndLoc, - StringRef SearchPath, - StringRef RelativePath); - void WriteLineInfo(const char *Filename, int Line, - SrcMgr::CharacteristicKind FileType, - StringRef EOL, StringRef Extra = StringRef()); - void OutputContentUpTo(const MemoryBuffer &FromFile, - unsigned &WriteFrom, unsigned WriteTo, - StringRef EOL, int &lines, - bool EnsureNewline = false); - void CommentOutDirective(Lexer &DirectivesLex, const Token &StartToken, - const MemoryBuffer &FromFile, StringRef EOL, - unsigned &NextToWrite, int &Lines); - const FileChange *FindFileChangeLocation(SourceLocation Loc) const; - StringRef NextIdentifierName(Lexer &RawLex, Token &RawToken); -}; - -} // end anonymous namespace - -/// Initializes an InclusionRewriter with a \p PP source and \p OS destination. -InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS, - bool ShowLineMarkers) - : PP(PP), SM(PP.getSourceManager()), OS(OS), - ShowLineMarkers(ShowLineMarkers), - LastInsertedFileChange(FileChanges.end()) { - // If we're in microsoft mode, use normal #line instead of line markers. - UseLineDirective = PP.getLangOpts().MicrosoftExt; -} - -/// Write appropriate line information as either #line directives or GNU line -/// markers depending on what mode we're in, including the \p Filename and -/// \p Line we are located at, using the specified \p EOL line separator, and -/// any \p Extra context specifiers in GNU line directives. -void InclusionRewriter::WriteLineInfo(const char *Filename, int Line, - SrcMgr::CharacteristicKind FileType, - StringRef EOL, StringRef Extra) { - if (!ShowLineMarkers) - return; - if (UseLineDirective) { - OS << "#line" << ' ' << Line << ' ' << '"' << Filename << '"'; - } else { - // Use GNU linemarkers as described here: - // http://gcc.gnu.org/onlinedocs/cpp/Preprocessor-Output.html - OS << '#' << ' ' << Line << ' ' << '"' << Filename << '"'; - if (!Extra.empty()) - OS << Extra; - if (FileType == SrcMgr::C_System) - // "`3' This indicates that the following text comes from a system header - // file, so certain warnings should be suppressed." - OS << " 3"; - else if (FileType == SrcMgr::C_ExternCSystem) - // as above for `3', plus "`4' This indicates that the following text - // should be treated as being wrapped in an implicit extern "C" block." - OS << " 3 4"; - } - OS << EOL; -} - -/// FileChanged - Whenever the preprocessor enters or exits a #include file -/// it invokes this handler. -void InclusionRewriter::FileChanged(SourceLocation Loc, - FileChangeReason Reason, - SrcMgr::CharacteristicKind NewFileType, - FileID) { - if (Reason != EnterFile) - return; - if (LastInsertedFileChange == FileChanges.end()) - // we didn't reach this file (eg: the main file) via an inclusion directive - return; - LastInsertedFileChange->second.Id = FullSourceLoc(Loc, SM).getFileID(); - LastInsertedFileChange->second.FileType = NewFileType; - LastInsertedFileChange = FileChanges.end(); -} - -/// Called whenever an inclusion is skipped due to canonical header protection -/// macros. -void InclusionRewriter::FileSkipped(const FileEntry &/*ParentFile*/, - const Token &/*FilenameTok*/, - SrcMgr::CharacteristicKind /*FileType*/) { - assert(LastInsertedFileChange != FileChanges.end() && "A file, that wasn't " - "found via an inclusion directive, was skipped"); - FileChanges.erase(LastInsertedFileChange); - LastInsertedFileChange = FileChanges.end(); -} - -/// This should be called whenever the preprocessor encounters include -/// directives. It does not say whether the file has been included, but it -/// provides more information about the directive (hash location instead -/// of location inside the included file). It is assumed that the matching -/// FileChanged() or FileSkipped() is called after this. -void InclusionRewriter::InclusionDirective(SourceLocation HashLoc, - const Token &/*IncludeTok*/, - StringRef /*FileName*/, - bool /*IsAngled*/, - const FileEntry * /*File*/, - SourceLocation /*EndLoc*/, - StringRef /*SearchPath*/, - StringRef /*RelativePath*/) { - assert(LastInsertedFileChange == FileChanges.end() && "Another inclusion " - "directive was found before the previous one was processed"); - std::pair p = FileChanges.insert( - std::make_pair(HashLoc.getRawEncoding(), FileChange(HashLoc))); - assert(p.second && "Unexpected revisitation of the same include directive"); - LastInsertedFileChange = p.first; -} - -/// Simple lookup for a SourceLocation (specifically one denoting the hash in -/// an inclusion directive) in the map of inclusion information, FileChanges. -const InclusionRewriter::FileChange * -InclusionRewriter::FindFileChangeLocation(SourceLocation Loc) const { - FileChangeMap::const_iterator I = FileChanges.find(Loc.getRawEncoding()); - if (I != FileChanges.end()) - return &I->second; - return NULL; -} - -/// Detect the likely line ending style of \p FromFile by examining the first -/// newline found within it. -static StringRef DetectEOL(const MemoryBuffer &FromFile) { - // detect what line endings the file uses, so that added content does not mix - // the style - const char *Pos = strchr(FromFile.getBufferStart(), '\n'); - if (Pos == NULL) - return "\n"; - if (Pos + 1 < FromFile.getBufferEnd() && Pos[1] == '\r') - return "\n\r"; - if (Pos - 1 >= FromFile.getBufferStart() && Pos[-1] == '\r') - return "\r\n"; - return "\n"; -} - -/// Writes out bytes from \p FromFile, starting at \p NextToWrite and ending at -/// \p WriteTo - 1. -void InclusionRewriter::OutputContentUpTo(const MemoryBuffer &FromFile, - unsigned &WriteFrom, unsigned WriteTo, - StringRef EOL, int &Line, - bool EnsureNewline) { - if (WriteTo <= WriteFrom) - return; - OS.write(FromFile.getBufferStart() + WriteFrom, WriteTo - WriteFrom); - // count lines manually, it's faster than getPresumedLoc() - Line += std::count(FromFile.getBufferStart() + WriteFrom, - FromFile.getBufferStart() + WriteTo, '\n'); - if (EnsureNewline) { - char LastChar = FromFile.getBufferStart()[WriteTo - 1]; - if (LastChar != '\n' && LastChar != '\r') - OS << EOL; - } - WriteFrom = WriteTo; -} - -/// Print characters from \p FromFile starting at \p NextToWrite up until the -/// inclusion directive at \p StartToken, then print out the inclusion -/// inclusion directive disabled by a #if directive, updating \p NextToWrite -/// and \p Line to track the number of source lines visited and the progress -/// through the \p FromFile buffer. -void InclusionRewriter::CommentOutDirective(Lexer &DirectiveLex, - const Token &StartToken, - const MemoryBuffer &FromFile, - StringRef EOL, - unsigned &NextToWrite, int &Line) { - OutputContentUpTo(FromFile, NextToWrite, - SM.getFileOffset(StartToken.getLocation()), EOL, Line); - Token DirectiveToken; - do { - DirectiveLex.LexFromRawLexer(DirectiveToken); - } while (!DirectiveToken.is(tok::eod) && DirectiveToken.isNot(tok::eof)); - OS << "#if 0 /* expanded by -frewrite-includes */" << EOL; - OutputContentUpTo(FromFile, NextToWrite, - SM.getFileOffset(DirectiveToken.getLocation()) + DirectiveToken.getLength(), - EOL, Line); - OS << "#endif /* expanded by -frewrite-includes */" << EOL; -} - -/// Find the next identifier in the pragma directive specified by \p RawToken. -StringRef InclusionRewriter::NextIdentifierName(Lexer &RawLex, - Token &RawToken) { - RawLex.LexFromRawLexer(RawToken); - if (RawToken.is(tok::raw_identifier)) - PP.LookUpIdentifierInfo(RawToken); - if (RawToken.is(tok::identifier)) - return RawToken.getIdentifierInfo()->getName(); - return StringRef(); -} - -/// Use a raw lexer to analyze \p FileId, inccrementally copying parts of it -/// and including content of included files recursively. -bool InclusionRewriter::Process(FileID FileId, - SrcMgr::CharacteristicKind FileType) -{ - bool Invalid; - const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid); - if (Invalid) // invalid inclusion - return true; - const char *FileName = FromFile.getBufferIdentifier(); - Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts()); - RawLex.SetCommentRetentionState(false); - - StringRef EOL = DetectEOL(FromFile); - - // Per the GNU docs: "1" indicates the start of a new file. - WriteLineInfo(FileName, 1, FileType, EOL, " 1"); - - if (SM.getFileIDSize(FileId) == 0) - return true; - - // The next byte to be copied from the source file - unsigned NextToWrite = 0; - int Line = 1; // The current input file line number. - - Token RawToken; - RawLex.LexFromRawLexer(RawToken); - - // TODO: Consider adding a switch that strips possibly unimportant content, - // such as comments, to reduce the size of repro files. - while (RawToken.isNot(tok::eof)) { - if (RawToken.is(tok::hash) && RawToken.isAtStartOfLine()) { - RawLex.setParsingPreprocessorDirective(true); - Token HashToken = RawToken; - RawLex.LexFromRawLexer(RawToken); - if (RawToken.is(tok::raw_identifier)) - PP.LookUpIdentifierInfo(RawToken); - if (RawToken.is(tok::identifier)) { - switch (RawToken.getIdentifierInfo()->getPPKeywordID()) { - case tok::pp_include: - case tok::pp_include_next: - case tok::pp_import: { - CommentOutDirective(RawLex, HashToken, FromFile, EOL, NextToWrite, - Line); - if (const FileChange *Change = FindFileChangeLocation( - HashToken.getLocation())) { - // now include and recursively process the file - if (Process(Change->Id, Change->FileType)) - // and set lineinfo back to this file, if the nested one was - // actually included - // `2' indicates returning to a file (after having included - // another file. - WriteLineInfo(FileName, Line, FileType, EOL, " 2"); - } else - // fix up lineinfo (since commented out directive changed line - // numbers) for inclusions that were skipped due to header guards - WriteLineInfo(FileName, Line, FileType, EOL); - break; - } - case tok::pp_pragma: { - StringRef Identifier = NextIdentifierName(RawLex, RawToken); - if (Identifier == "clang" || Identifier == "GCC") { - if (NextIdentifierName(RawLex, RawToken) == "system_header") { - // keep the directive in, commented out - CommentOutDirective(RawLex, HashToken, FromFile, EOL, - NextToWrite, Line); - // update our own type - FileType = SM.getFileCharacteristic(RawToken.getLocation()); - WriteLineInfo(FileName, Line, FileType, EOL); - } - } else if (Identifier == "once") { - // keep the directive in, commented out - CommentOutDirective(RawLex, HashToken, FromFile, EOL, - NextToWrite, Line); - WriteLineInfo(FileName, Line, FileType, EOL); - } - break; - } - default: - break; - } - } - RawLex.setParsingPreprocessorDirective(false); - } - RawLex.LexFromRawLexer(RawToken); - } - OutputContentUpTo(FromFile, NextToWrite, - SM.getFileOffset(SM.getLocForEndOfFile(FileId)) + 1, EOL, Line, - /*EnsureNewline*/true); - return true; -} - -/// InclusionRewriterInInput - Implement -frewrite-includes mode. -void clang::RewriteIncludesInInput(Preprocessor &PP, raw_ostream *OS, - const PreprocessorOutputOptions &Opts) { - SourceManager &SM = PP.getSourceManager(); - InclusionRewriter *Rewrite = new InclusionRewriter(PP, *OS, - Opts.ShowLineMarkers); - PP.addPPCallbacks(Rewrite); - - // First let the preprocessor process the entire file and call callbacks. - // Callbacks will record which #include's were actually performed. - PP.EnterMainSourceFile(); - Token Tok; - // Only preprocessor directives matter here, so disable macro expansion - // everywhere else as an optimization. - // TODO: It would be even faster if the preprocessor could be switched - // to a mode where it would parse only preprocessor directives and comments, - // nothing else matters for parsing or processing. - PP.SetMacroExpansionOnlyInDirectives(); - do { - PP.Lex(Tok); - } while (Tok.isNot(tok::eof)); - Rewrite->Process(SM.getMainFileID(), SrcMgr::C_User); - OS->flush(); -} diff --git a/lib/Rewrite/Makefile b/lib/Rewrite/Makefile index 5fef9b2..0be84d4 100644 --- a/lib/Rewrite/Makefile +++ b/lib/Rewrite/Makefile @@ -1,4 +1,4 @@ -##===- clang/lib/Rewrite/Makefile --------------------------*- Makefile -*-===## +##===- clang/lib/StaticAnalyzer/Makefile -------------------*- Makefile -*-===## # # The LLVM Compiler Infrastructure # @@ -6,13 +6,9 @@ # License. See LICENSE.TXT for details. # ##===----------------------------------------------------------------------===## -# -# This implements code transformation / rewriting facilities. -# -##===----------------------------------------------------------------------===## CLANG_LEVEL := ../.. -LIBRARYNAME := clangRewrite +DIRS := Frontend +PARALLEL_DIRS := Core include $(CLANG_LEVEL)/Makefile - diff --git a/lib/Rewrite/RewriteMacros.cpp b/lib/Rewrite/RewriteMacros.cpp deleted file mode 100644 index 3fa0bdb..0000000 --- a/lib/Rewrite/RewriteMacros.cpp +++ /dev/null @@ -1,217 +0,0 @@ -//===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This code rewrites macro invocations into their expansions. This gives you -// a macro expanded file that retains comments and #includes. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/Rewriters.h" -#include "clang/Rewrite/Rewriter.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Basic/SourceManager.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/Path.h" -#include "llvm/ADT/OwningPtr.h" -#include - -using namespace clang; - -/// isSameToken - Return true if the two specified tokens start have the same -/// content. -static bool isSameToken(Token &RawTok, Token &PPTok) { - // If two tokens have the same kind and the same identifier info, they are - // obviously the same. - if (PPTok.getKind() == RawTok.getKind() && - PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo()) - return true; - - // Otherwise, if they are different but have the same identifier info, they - // are also considered to be the same. This allows keywords and raw lexed - // identifiers with the same name to be treated the same. - if (PPTok.getIdentifierInfo() && - PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo()) - return true; - - return false; -} - - -/// GetNextRawTok - Return the next raw token in the stream, skipping over -/// comments if ReturnComment is false. -static const Token &GetNextRawTok(const std::vector &RawTokens, - unsigned &CurTok, bool ReturnComment) { - assert(CurTok < RawTokens.size() && "Overran eof!"); - - // If the client doesn't want comments and we have one, skip it. - if (!ReturnComment && RawTokens[CurTok].is(tok::comment)) - ++CurTok; - - return RawTokens[CurTok++]; -} - - -/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into -/// the specified vector. -static void LexRawTokensFromMainFile(Preprocessor &PP, - std::vector &RawTokens) { - SourceManager &SM = PP.getSourceManager(); - - // Create a lexer to lex all the tokens of the main file in raw mode. Even - // though it is in raw mode, it will not return comments. - const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID()); - Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts()); - - // Switch on comment lexing because we really do want them. - RawLex.SetCommentRetentionState(true); - - Token RawTok; - do { - RawLex.LexFromRawLexer(RawTok); - - // If we have an identifier with no identifier info for our raw token, look - // up the indentifier info. This is important for equality comparison of - // identifier tokens. - if (RawTok.is(tok::raw_identifier)) - PP.LookUpIdentifierInfo(RawTok); - - RawTokens.push_back(RawTok); - } while (RawTok.isNot(tok::eof)); -} - - -/// RewriteMacrosInInput - Implement -rewrite-macros mode. -void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) { - SourceManager &SM = PP.getSourceManager(); - - Rewriter Rewrite; - Rewrite.setSourceMgr(SM, PP.getLangOpts()); - RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID()); - - std::vector RawTokens; - LexRawTokensFromMainFile(PP, RawTokens); - unsigned CurRawTok = 0; - Token RawTok = GetNextRawTok(RawTokens, CurRawTok, false); - - - // Get the first preprocessing token. - PP.EnterMainSourceFile(); - Token PPTok; - PP.Lex(PPTok); - - // Preprocess the input file in parallel with raw lexing the main file. Ignore - // all tokens that are preprocessed from a file other than the main file (e.g. - // a header). If we see tokens that are in the preprocessed file but not the - // lexed file, we have a macro expansion. If we see tokens in the lexed file - // that aren't in the preprocessed view, we have macros that expand to no - // tokens, or macro arguments etc. - while (RawTok.isNot(tok::eof) || PPTok.isNot(tok::eof)) { - SourceLocation PPLoc = SM.getExpansionLoc(PPTok.getLocation()); - - // If PPTok is from a different source file, ignore it. - if (!SM.isFromMainFile(PPLoc)) { - PP.Lex(PPTok); - continue; - } - - // If the raw file hits a preprocessor directive, they will be extra tokens - // in the raw file that don't exist in the preprocsesed file. However, we - // choose to preserve them in the output file and otherwise handle them - // specially. - if (RawTok.is(tok::hash) && RawTok.isAtStartOfLine()) { - // If this is a #warning directive or #pragma mark (GNU extensions), - // comment the line out. - if (RawTokens[CurRawTok].is(tok::identifier)) { - const IdentifierInfo *II = RawTokens[CurRawTok].getIdentifierInfo(); - if (II->getName() == "warning") { - // Comment out #warning. - RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//"); - } else if (II->getName() == "pragma" && - RawTokens[CurRawTok+1].is(tok::identifier) && - (RawTokens[CurRawTok+1].getIdentifierInfo()->getName() == - "mark")) { - // Comment out #pragma mark. - RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//"); - } - } - - // Otherwise, if this is a #include or some other directive, just leave it - // in the file by skipping over the line. - RawTok = GetNextRawTok(RawTokens, CurRawTok, false); - while (!RawTok.isAtStartOfLine() && RawTok.isNot(tok::eof)) - RawTok = GetNextRawTok(RawTokens, CurRawTok, false); - continue; - } - - // Okay, both tokens are from the same file. Get their offsets from the - // start of the file. - unsigned PPOffs = SM.getFileOffset(PPLoc); - unsigned RawOffs = SM.getFileOffset(RawTok.getLocation()); - - // If the offsets are the same and the token kind is the same, ignore them. - if (PPOffs == RawOffs && isSameToken(RawTok, PPTok)) { - RawTok = GetNextRawTok(RawTokens, CurRawTok, false); - PP.Lex(PPTok); - continue; - } - - // If the PP token is farther along than the raw token, something was - // deleted. Comment out the raw token. - if (RawOffs <= PPOffs) { - // Comment out a whole run of tokens instead of bracketing each one with - // comments. Add a leading space if RawTok didn't have one. - bool HasSpace = RawTok.hasLeadingSpace(); - RB.InsertTextAfter(RawOffs, &" /*"[HasSpace]); - unsigned EndPos; - - do { - EndPos = RawOffs+RawTok.getLength(); - - RawTok = GetNextRawTok(RawTokens, CurRawTok, true); - RawOffs = SM.getFileOffset(RawTok.getLocation()); - - if (RawTok.is(tok::comment)) { - // Skip past the comment. - RawTok = GetNextRawTok(RawTokens, CurRawTok, false); - break; - } - - } while (RawOffs <= PPOffs && !RawTok.isAtStartOfLine() && - (PPOffs != RawOffs || !isSameToken(RawTok, PPTok))); - - RB.InsertTextBefore(EndPos, "*/"); - continue; - } - - // Otherwise, there was a replacement an expansion. Insert the new token - // in the output buffer. Insert the whole run of new tokens at once to get - // them in the right order. - unsigned InsertPos = PPOffs; - std::string Expansion; - while (PPOffs < RawOffs) { - Expansion += ' ' + PP.getSpelling(PPTok); - PP.Lex(PPTok); - PPLoc = SM.getExpansionLoc(PPTok.getLocation()); - PPOffs = SM.getFileOffset(PPLoc); - } - Expansion += ' '; - RB.InsertTextBefore(InsertPos, Expansion); - } - - // Get the buffer corresponding to MainFileID. If we haven't changed it, then - // we are done. - if (const RewriteBuffer *RewriteBuf = - Rewrite.getRewriteBufferFor(SM.getMainFileID())) { - //printf("Changed:\n"); - *OS << std::string(RewriteBuf->begin(), RewriteBuf->end()); - } else { - fprintf(stderr, "No changes\n"); - } - OS->flush(); -} diff --git a/lib/Rewrite/RewriteModernObjC.cpp b/lib/Rewrite/RewriteModernObjC.cpp deleted file mode 100644 index dcd003f..0000000 --- a/lib/Rewrite/RewriteModernObjC.cpp +++ /dev/null @@ -1,7543 +0,0 @@ -//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Hacks and fun related to the code rewriter. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/ASTConsumers.h" -#include "clang/Rewrite/Rewriter.h" -#include "clang/AST/AST.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/AST/ParentMap.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Basic/IdentifierTable.h" -#include "clang/Basic/Diagnostic.h" -#include "clang/Lex/Lexer.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/StringExtras.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/OwningPtr.h" -#include "llvm/ADT/DenseSet.h" - -using namespace clang; -using llvm::utostr; - -namespace { - class RewriteModernObjC : public ASTConsumer { - protected: - - enum { - BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), - block, ... */ - BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */ - BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the - __block variable */ - BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy - helpers */ - BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose - support routines */ - BLOCK_BYREF_CURRENT_MAX = 256 - }; - - enum { - BLOCK_NEEDS_FREE = (1 << 24), - BLOCK_HAS_COPY_DISPOSE = (1 << 25), - BLOCK_HAS_CXX_OBJ = (1 << 26), - BLOCK_IS_GC = (1 << 27), - BLOCK_IS_GLOBAL = (1 << 28), - BLOCK_HAS_DESCRIPTOR = (1 << 29) - }; - static const int OBJC_ABI_VERSION = 7; - - Rewriter Rewrite; - DiagnosticsEngine &Diags; - const LangOptions &LangOpts; - ASTContext *Context; - SourceManager *SM; - TranslationUnitDecl *TUDecl; - FileID MainFileID; - const char *MainFileStart, *MainFileEnd; - Stmt *CurrentBody; - ParentMap *PropParentMap; // created lazily. - std::string InFileName; - raw_ostream* OutFile; - std::string Preamble; - - TypeDecl *ProtocolTypeDecl; - VarDecl *GlobalVarDecl; - Expr *GlobalConstructionExp; - unsigned RewriteFailedDiag; - unsigned GlobalBlockRewriteFailedDiag; - // ObjC string constant support. - unsigned NumObjCStringLiterals; - VarDecl *ConstantStringClassReference; - RecordDecl *NSStringRecord; - - // ObjC foreach break/continue generation support. - int BcLabelCount; - - unsigned TryFinallyContainsReturnDiag; - // Needed for super. - ObjCMethodDecl *CurMethodDef; - RecordDecl *SuperStructDecl; - RecordDecl *ConstantStringDecl; - - FunctionDecl *MsgSendFunctionDecl; - FunctionDecl *MsgSendSuperFunctionDecl; - FunctionDecl *MsgSendStretFunctionDecl; - FunctionDecl *MsgSendSuperStretFunctionDecl; - FunctionDecl *MsgSendFpretFunctionDecl; - FunctionDecl *GetClassFunctionDecl; - FunctionDecl *GetMetaClassFunctionDecl; - FunctionDecl *GetSuperClassFunctionDecl; - FunctionDecl *SelGetUidFunctionDecl; - FunctionDecl *CFStringFunctionDecl; - FunctionDecl *SuperContructorFunctionDecl; - FunctionDecl *CurFunctionDef; - - /* Misc. containers needed for meta-data rewrite. */ - SmallVector ClassImplementation; - SmallVector CategoryImplementation; - llvm::SmallPtrSet ObjCSynthesizedStructs; - llvm::SmallPtrSet ObjCSynthesizedProtocols; - llvm::SmallPtrSet ObjCWrittenInterfaces; - llvm::SmallPtrSet GlobalDefinedTags; - SmallVector ObjCInterfacesSeen; - /// DefinedNonLazyClasses - List of defined "non-lazy" classes. - SmallVector DefinedNonLazyClasses; - - /// DefinedNonLazyCategories - List of defined "non-lazy" categories. - llvm::SmallVector DefinedNonLazyCategories; - - SmallVector Stmts; - SmallVector ObjCBcLabelNo; - // Remember all the @protocol() expressions. - llvm::SmallPtrSet ProtocolExprDecls; - - llvm::DenseSet CopyDestroyCache; - - // Block expressions. - SmallVector Blocks; - SmallVector InnerDeclRefsCount; - SmallVector InnerDeclRefs; - - SmallVector BlockDeclRefs; - - // Block related declarations. - SmallVector BlockByCopyDecls; - llvm::SmallPtrSet BlockByCopyDeclsPtrSet; - SmallVector BlockByRefDecls; - llvm::SmallPtrSet BlockByRefDeclsPtrSet; - llvm::DenseMap BlockByRefDeclNo; - llvm::SmallPtrSet ImportedBlockDecls; - llvm::SmallPtrSet ImportedLocalExternalDecls; - - llvm::DenseMap RewrittenBlockExprs; - llvm::DenseMap > ReferencedIvars; - - // This maps an original source AST to it's rewritten form. This allows - // us to avoid rewriting the same node twice (which is very uncommon). - // This is needed to support some of the exotic property rewriting. - llvm::DenseMap ReplacedNodes; - - // Needed for header files being rewritten - bool IsHeader; - bool SilenceRewriteMacroWarning; - bool objc_impl_method; - - bool DisableReplaceStmt; - class DisableReplaceStmtScope { - RewriteModernObjC &R; - bool SavedValue; - - public: - DisableReplaceStmtScope(RewriteModernObjC &R) - : R(R), SavedValue(R.DisableReplaceStmt) { - R.DisableReplaceStmt = true; - } - ~DisableReplaceStmtScope() { - R.DisableReplaceStmt = SavedValue; - } - }; - void InitializeCommon(ASTContext &context); - - public: - llvm::DenseMap MethodInternalNames; - // Top Level Driver code. - virtual bool HandleTopLevelDecl(DeclGroupRef D) { - for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - if (ObjCInterfaceDecl *Class = dyn_cast(*I)) { - if (!Class->isThisDeclarationADefinition()) { - RewriteForwardClassDecl(D); - break; - } else { - // Keep track of all interface declarations seen. - ObjCInterfacesSeen.push_back(Class); - break; - } - } - - if (ObjCProtocolDecl *Proto = dyn_cast(*I)) { - if (!Proto->isThisDeclarationADefinition()) { - RewriteForwardProtocolDecl(D); - break; - } - } - - HandleTopLevelSingleDecl(*I); - } - return true; - } - void HandleTopLevelSingleDecl(Decl *D); - void HandleDeclInMainFile(Decl *D); - RewriteModernObjC(std::string inFile, raw_ostream *OS, - DiagnosticsEngine &D, const LangOptions &LOpts, - bool silenceMacroWarn); - - ~RewriteModernObjC() {} - - virtual void HandleTranslationUnit(ASTContext &C); - - void ReplaceStmt(Stmt *Old, Stmt *New) { - Stmt *ReplacingStmt = ReplacedNodes[Old]; - - if (ReplacingStmt) - return; // We can't rewrite the same node twice. - - if (DisableReplaceStmt) - return; - - // If replacement succeeded or warning disabled return with no warning. - if (!Rewrite.ReplaceStmt(Old, New)) { - ReplacedNodes[Old] = New; - return; - } - if (SilenceRewriteMacroWarning) - return; - Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) - << Old->getSourceRange(); - } - - void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) { - if (DisableReplaceStmt) - return; - - // Measure the old text. - int Size = Rewrite.getRangeSize(SrcRange); - if (Size == -1) { - Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) - << Old->getSourceRange(); - return; - } - // Get the new text. - std::string SStr; - llvm::raw_string_ostream S(SStr); - New->printPretty(S, 0, PrintingPolicy(LangOpts)); - const std::string &Str = S.str(); - - // If replacement succeeded or warning disabled return with no warning. - if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) { - ReplacedNodes[Old] = New; - return; - } - if (SilenceRewriteMacroWarning) - return; - Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) - << Old->getSourceRange(); - } - - void InsertText(SourceLocation Loc, StringRef Str, - bool InsertAfter = true) { - // If insertion succeeded or warning disabled return with no warning. - if (!Rewrite.InsertText(Loc, Str, InsertAfter) || - SilenceRewriteMacroWarning) - return; - - Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag); - } - - void ReplaceText(SourceLocation Start, unsigned OrigLength, - StringRef Str) { - // If removal succeeded or warning disabled return with no warning. - if (!Rewrite.ReplaceText(Start, OrigLength, Str) || - SilenceRewriteMacroWarning) - return; - - Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag); - } - - // Syntactic Rewriting. - void RewriteRecordBody(RecordDecl *RD); - void RewriteInclude(); - void RewriteForwardClassDecl(DeclGroupRef D); - void RewriteForwardClassDecl(const llvm::SmallVector &DG); - void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, - const std::string &typedefString); - void RewriteImplementations(); - void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, - ObjCImplementationDecl *IMD, - ObjCCategoryImplDecl *CID); - void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl); - void RewriteImplementationDecl(Decl *Dcl); - void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, - ObjCMethodDecl *MDecl, std::string &ResultStr); - void RewriteTypeIntoString(QualType T, std::string &ResultStr, - const FunctionType *&FPRetType); - void RewriteByRefString(std::string &ResultStr, const std::string &Name, - ValueDecl *VD, bool def=false); - void RewriteCategoryDecl(ObjCCategoryDecl *Dcl); - void RewriteProtocolDecl(ObjCProtocolDecl *Dcl); - void RewriteForwardProtocolDecl(DeclGroupRef D); - void RewriteForwardProtocolDecl(const llvm::SmallVector &DG); - void RewriteMethodDeclaration(ObjCMethodDecl *Method); - void RewriteProperty(ObjCPropertyDecl *prop); - void RewriteFunctionDecl(FunctionDecl *FD); - void RewriteBlockPointerType(std::string& Str, QualType Type); - void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD); - void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD); - void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl); - void RewriteTypeOfDecl(VarDecl *VD); - void RewriteObjCQualifiedInterfaceTypes(Expr *E); - - std::string getIvarAccessString(ObjCIvarDecl *D); - - // Expression Rewriting. - Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S); - Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp); - Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo); - Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo); - Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp); - Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp); - Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp); - Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp); - Stmt *RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp); - Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp); - Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp); - Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp); - Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S); - Stmt *RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S); - Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S); - Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S); - Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, - SourceLocation OrigEnd); - Stmt *RewriteBreakStmt(BreakStmt *S); - Stmt *RewriteContinueStmt(ContinueStmt *S); - void RewriteCastExpr(CStyleCastExpr *CE); - void RewriteImplicitCastObjCExpr(CastExpr *IE); - void RewriteLinkageSpec(LinkageSpecDecl *LSD); - - // Block rewriting. - void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D); - - // Block specific rewrite rules. - void RewriteBlockPointerDecl(NamedDecl *VD); - void RewriteByRefVar(VarDecl *VD, bool firstDecl, bool lastDecl); - Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD); - Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE); - void RewriteBlockPointerFunctionArgs(FunctionDecl *FD); - - void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, - std::string &Result); - - void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result); - bool IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, TagDecl *Tag, - bool &IsNamedDefinition); - void RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl, - std::string &Result); - - bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result); - - void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl, - std::string &Result); - - virtual void Initialize(ASTContext &context); - - // Misc. AST transformation routines. Sometimes they end up calling - // rewriting routines on the new ASTs. - CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD, - Expr **args, unsigned nargs, - SourceLocation StartLoc=SourceLocation(), - SourceLocation EndLoc=SourceLocation()); - - Expr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, - QualType msgSendType, - QualType returnType, - SmallVectorImpl &ArgTypes, - SmallVectorImpl &MsgExprs, - ObjCMethodDecl *Method); - - Stmt *SynthMessageExpr(ObjCMessageExpr *Exp, - SourceLocation StartLoc=SourceLocation(), - SourceLocation EndLoc=SourceLocation()); - - void SynthCountByEnumWithState(std::string &buf); - void SynthMsgSendFunctionDecl(); - void SynthMsgSendSuperFunctionDecl(); - void SynthMsgSendStretFunctionDecl(); - void SynthMsgSendFpretFunctionDecl(); - void SynthMsgSendSuperStretFunctionDecl(); - void SynthGetClassFunctionDecl(); - void SynthGetMetaClassFunctionDecl(); - void SynthGetSuperClassFunctionDecl(); - void SynthSelGetUidFunctionDecl(); - void SynthSuperContructorFunctionDecl(); - - // Rewriting metadata - template - void RewriteObjCMethodsMetaData(MethodIterator MethodBegin, - MethodIterator MethodEnd, - bool IsInstanceMethod, - StringRef prefix, - StringRef ClassName, - std::string &Result); - void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol, - std::string &Result); - void RewriteObjCProtocolListMetaData( - const ObjCList &Prots, - StringRef prefix, StringRef ClassName, std::string &Result); - void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, - std::string &Result); - void RewriteClassSetupInitHook(std::string &Result); - - void RewriteMetaDataIntoBuffer(std::string &Result); - void WriteImageInfo(std::string &Result); - void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl, - std::string &Result); - void RewriteCategorySetupInitHook(std::string &Result); - - // Rewriting ivar - void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, - std::string &Result); - Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV); - - - std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag); - std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, - StringRef funcName, std::string Tag); - std::string SynthesizeBlockFunc(BlockExpr *CE, int i, - StringRef funcName, std::string Tag); - std::string SynthesizeBlockImpl(BlockExpr *CE, - std::string Tag, std::string Desc); - std::string SynthesizeBlockDescriptor(std::string DescTag, - std::string ImplTag, - int i, StringRef funcName, - unsigned hasCopy); - Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp); - void SynthesizeBlockLiterals(SourceLocation FunLocStart, - StringRef FunName); - FunctionDecl *SynthBlockInitFunctionDecl(StringRef name); - Stmt *SynthBlockInitExpr(BlockExpr *Exp, - const SmallVector &InnerBlockDeclRefs); - - // Misc. helper routines. - QualType getProtocolType(); - void WarnAboutReturnGotoStmts(Stmt *S); - void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND); - void InsertBlockLiteralsWithinFunction(FunctionDecl *FD); - void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD); - - bool IsDeclStmtInForeachHeader(DeclStmt *DS); - void CollectBlockDeclRefInfo(BlockExpr *Exp); - void GetBlockDeclRefExprs(Stmt *S); - void GetInnerBlockDeclRefExprs(Stmt *S, - SmallVector &InnerBlockDeclRefs, - llvm::SmallPtrSet &InnerContexts); - - // We avoid calling Type::isBlockPointerType(), since it operates on the - // canonical type. We only care if the top-level type is a closure pointer. - bool isTopLevelBlockPointerType(QualType T) { - return isa(T); - } - - /// convertBlockPointerToFunctionPointer - Converts a block-pointer type - /// to a function pointer type and upon success, returns true; false - /// otherwise. - bool convertBlockPointerToFunctionPointer(QualType &T) { - if (isTopLevelBlockPointerType(T)) { - const BlockPointerType *BPT = T->getAs(); - T = Context->getPointerType(BPT->getPointeeType()); - return true; - } - return false; - } - - bool convertObjCTypeToCStyleType(QualType &T); - - bool needToScanForQualifiers(QualType T); - QualType getSuperStructType(); - QualType getConstantStringStructType(); - QualType convertFunctionTypeOfBlocks(const FunctionType *FT); - bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf); - - void convertToUnqualifiedObjCType(QualType &T) { - if (T->isObjCQualifiedIdType()) { - bool isConst = T.isConstQualified(); - T = isConst ? Context->getObjCIdType().withConst() - : Context->getObjCIdType(); - } - else if (T->isObjCQualifiedClassType()) - T = Context->getObjCClassType(); - else if (T->isObjCObjectPointerType() && - T->getPointeeType()->isObjCQualifiedInterfaceType()) { - if (const ObjCObjectPointerType * OBJPT = - T->getAsObjCInterfacePointerType()) { - const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType(); - T = QualType(IFaceT, 0); - T = Context->getPointerType(T); - } - } - } - - // FIXME: This predicate seems like it would be useful to add to ASTContext. - bool isObjCType(QualType T) { - if (!LangOpts.ObjC1 && !LangOpts.ObjC2) - return false; - - QualType OCT = Context->getCanonicalType(T).getUnqualifiedType(); - - if (OCT == Context->getCanonicalType(Context->getObjCIdType()) || - OCT == Context->getCanonicalType(Context->getObjCClassType())) - return true; - - if (const PointerType *PT = OCT->getAs()) { - if (isa(PT->getPointeeType()) || - PT->getPointeeType()->isObjCQualifiedIdType()) - return true; - } - return false; - } - bool PointerTypeTakesAnyBlockArguments(QualType QT); - bool PointerTypeTakesAnyObjCQualifiedType(QualType QT); - void GetExtentOfArgList(const char *Name, const char *&LParen, - const char *&RParen); - - void QuoteDoublequotes(std::string &From, std::string &To) { - for (unsigned i = 0; i < From.length(); i++) { - if (From[i] == '"') - To += "\\\""; - else - To += From[i]; - } - } - - QualType getSimpleFunctionType(QualType result, - const QualType *args, - unsigned numArgs, - bool variadic = false) { - if (result == Context->getObjCInstanceType()) - result = Context->getObjCIdType(); - FunctionProtoType::ExtProtoInfo fpi; - fpi.Variadic = variadic; - return Context->getFunctionType(result, args, numArgs, fpi); - } - - // Helper function: create a CStyleCastExpr with trivial type source info. - CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty, - CastKind Kind, Expr *E) { - TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation()); - return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo, - SourceLocation(), SourceLocation()); - } - - bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const { - IdentifierInfo* II = &Context->Idents.get("load"); - Selector LoadSel = Context->Selectors.getSelector(0, &II); - return OD->getClassMethod(LoadSel) != 0; - } - }; - -} - -void RewriteModernObjC::RewriteBlocksInFunctionProtoType(QualType funcType, - NamedDecl *D) { - if (const FunctionProtoType *fproto - = dyn_cast(funcType.IgnoreParens())) { - for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(), - E = fproto->arg_type_end(); I && (I != E); ++I) - if (isTopLevelBlockPointerType(*I)) { - // All the args are checked/rewritten. Don't call twice! - RewriteBlockPointerDecl(D); - break; - } - } -} - -void RewriteModernObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) { - const PointerType *PT = funcType->getAs(); - if (PT && PointerTypeTakesAnyBlockArguments(funcType)) - RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND); -} - -static bool IsHeaderFile(const std::string &Filename) { - std::string::size_type DotPos = Filename.rfind('.'); - - if (DotPos == std::string::npos) { - // no file extension - return false; - } - - std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end()); - // C header: .h - // C++ header: .hh or .H; - return Ext == "h" || Ext == "hh" || Ext == "H"; -} - -RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS, - DiagnosticsEngine &D, const LangOptions &LOpts, - bool silenceMacroWarn) - : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS), - SilenceRewriteMacroWarning(silenceMacroWarn) { - IsHeader = IsHeaderFile(inFile); - RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning, - "rewriting sub-expression within a macro (may not be correct)"); - // FIXME. This should be an error. But if block is not called, it is OK. And it - // may break including some headers. - GlobalBlockRewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning, - "rewriting block literal declared in global scope is not implemented"); - - TryFinallyContainsReturnDiag = Diags.getCustomDiagID( - DiagnosticsEngine::Warning, - "rewriter doesn't support user-specified control flow semantics " - "for @try/@finally (code may not execute properly)"); -} - -ASTConsumer *clang::CreateModernObjCRewriter(const std::string& InFile, - raw_ostream* OS, - DiagnosticsEngine &Diags, - const LangOptions &LOpts, - bool SilenceRewriteMacroWarning) { - return new RewriteModernObjC(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning); -} - -void RewriteModernObjC::InitializeCommon(ASTContext &context) { - Context = &context; - SM = &Context->getSourceManager(); - TUDecl = Context->getTranslationUnitDecl(); - MsgSendFunctionDecl = 0; - MsgSendSuperFunctionDecl = 0; - MsgSendStretFunctionDecl = 0; - MsgSendSuperStretFunctionDecl = 0; - MsgSendFpretFunctionDecl = 0; - GetClassFunctionDecl = 0; - GetMetaClassFunctionDecl = 0; - GetSuperClassFunctionDecl = 0; - SelGetUidFunctionDecl = 0; - CFStringFunctionDecl = 0; - ConstantStringClassReference = 0; - NSStringRecord = 0; - CurMethodDef = 0; - CurFunctionDef = 0; - GlobalVarDecl = 0; - GlobalConstructionExp = 0; - SuperStructDecl = 0; - ProtocolTypeDecl = 0; - ConstantStringDecl = 0; - BcLabelCount = 0; - SuperContructorFunctionDecl = 0; - NumObjCStringLiterals = 0; - PropParentMap = 0; - CurrentBody = 0; - DisableReplaceStmt = false; - objc_impl_method = false; - - // Get the ID and start/end of the main file. - MainFileID = SM->getMainFileID(); - const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID); - MainFileStart = MainBuf->getBufferStart(); - MainFileEnd = MainBuf->getBufferEnd(); - - Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts()); -} - -//===----------------------------------------------------------------------===// -// Top Level Driver Code -//===----------------------------------------------------------------------===// - -void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) { - if (Diags.hasErrorOccurred()) - return; - - // Two cases: either the decl could be in the main file, or it could be in a - // #included file. If the former, rewrite it now. If the later, check to see - // if we rewrote the #include/#import. - SourceLocation Loc = D->getLocation(); - Loc = SM->getExpansionLoc(Loc); - - // If this is for a builtin, ignore it. - if (Loc.isInvalid()) return; - - // Look for built-in declarations that we need to refer during the rewrite. - if (FunctionDecl *FD = dyn_cast(D)) { - RewriteFunctionDecl(FD); - } else if (VarDecl *FVD = dyn_cast(D)) { - // declared in - if (FVD->getName() == "_NSConstantStringClassReference") { - ConstantStringClassReference = FVD; - return; - } - } else if (ObjCCategoryDecl *CD = dyn_cast(D)) { - RewriteCategoryDecl(CD); - } else if (ObjCProtocolDecl *PD = dyn_cast(D)) { - if (PD->isThisDeclarationADefinition()) - RewriteProtocolDecl(PD); - } else if (LinkageSpecDecl *LSD = dyn_cast(D)) { - // FIXME. This will not work in all situations and leaving it out - // is harmless. - // RewriteLinkageSpec(LSD); - - // Recurse into linkage specifications - for (DeclContext::decl_iterator DI = LSD->decls_begin(), - DIEnd = LSD->decls_end(); - DI != DIEnd; ) { - if (ObjCInterfaceDecl *IFace = dyn_cast((*DI))) { - if (!IFace->isThisDeclarationADefinition()) { - SmallVector DG; - SourceLocation StartLoc = IFace->getLocStart(); - do { - if (isa(*DI) && - !cast(*DI)->isThisDeclarationADefinition() && - StartLoc == (*DI)->getLocStart()) - DG.push_back(*DI); - else - break; - - ++DI; - } while (DI != DIEnd); - RewriteForwardClassDecl(DG); - continue; - } - else { - // Keep track of all interface declarations seen. - ObjCInterfacesSeen.push_back(IFace); - ++DI; - continue; - } - } - - if (ObjCProtocolDecl *Proto = dyn_cast((*DI))) { - if (!Proto->isThisDeclarationADefinition()) { - SmallVector DG; - SourceLocation StartLoc = Proto->getLocStart(); - do { - if (isa(*DI) && - !cast(*DI)->isThisDeclarationADefinition() && - StartLoc == (*DI)->getLocStart()) - DG.push_back(*DI); - else - break; - - ++DI; - } while (DI != DIEnd); - RewriteForwardProtocolDecl(DG); - continue; - } - } - - HandleTopLevelSingleDecl(*DI); - ++DI; - } - } - // If we have a decl in the main file, see if we should rewrite it. - if (SM->isFromMainFile(Loc)) - return HandleDeclInMainFile(D); -} - -//===----------------------------------------------------------------------===// -// Syntactic (non-AST) Rewriting Code -//===----------------------------------------------------------------------===// - -void RewriteModernObjC::RewriteInclude() { - SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID); - StringRef MainBuf = SM->getBufferData(MainFileID); - const char *MainBufStart = MainBuf.begin(); - const char *MainBufEnd = MainBuf.end(); - size_t ImportLen = strlen("import"); - - // Loop over the whole file, looking for includes. - for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) { - if (*BufPtr == '#') { - if (++BufPtr == MainBufEnd) - return; - while (*BufPtr == ' ' || *BufPtr == '\t') - if (++BufPtr == MainBufEnd) - return; - if (!strncmp(BufPtr, "import", ImportLen)) { - // replace import with include - SourceLocation ImportLoc = - LocStart.getLocWithOffset(BufPtr-MainBufStart); - ReplaceText(ImportLoc, ImportLen, "include"); - BufPtr += ImportLen; - } - } - } -} - -static void WriteInternalIvarName(const ObjCInterfaceDecl *IDecl, - ObjCIvarDecl *IvarDecl, std::string &Result) { - Result += "OBJC_IVAR_$_"; - Result += IDecl->getName(); - Result += "$"; - Result += IvarDecl->getName(); -} - -std::string -RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) { - const ObjCInterfaceDecl *ClassDecl = D->getContainingInterface(); - - // Build name of symbol holding ivar offset. - std::string IvarOffsetName; - WriteInternalIvarName(ClassDecl, D, IvarOffsetName); - - - std::string S = "(*("; - QualType IvarT = D->getType(); - - if (!isa(IvarT) && IvarT->isRecordType()) { - RecordDecl *RD = IvarT->getAs()->getDecl(); - RD = RD->getDefinition(); - if (RD && !RD->getDeclName().getAsIdentifierInfo()) { - // decltype(((Foo_IMPL*)0)->bar) * - ObjCContainerDecl *CDecl = - dyn_cast(D->getDeclContext()); - // ivar in class extensions requires special treatment. - if (ObjCCategoryDecl *CatDecl = dyn_cast(CDecl)) - CDecl = CatDecl->getClassInterface(); - std::string RecName = CDecl->getName(); - RecName += "_IMPL"; - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get(RecName.c_str())); - QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD)); - unsigned UnsignedIntSize = - static_cast(Context->getTypeSize(Context->UnsignedIntTy)); - Expr *Zero = IntegerLiteral::Create(*Context, - llvm::APInt(UnsignedIntSize, 0), - Context->UnsignedIntTy, SourceLocation()); - Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero); - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - Zero); - FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get(D->getNameAsString()), - IvarT, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); - IvarT = Context->getDecltypeType(ME, ME->getType()); - } - } - convertObjCTypeToCStyleType(IvarT); - QualType castT = Context->getPointerType(IvarT); - std::string TypeString(castT.getAsString(Context->getPrintingPolicy())); - S += TypeString; - S += ")"; - - // ((char *)self + IVAR_OFFSET_SYMBOL_NAME) - S += "((char *)self + "; - S += IvarOffsetName; - S += "))"; - ReferencedIvars[const_cast(ClassDecl)].insert(D); - return S; -} - -/// mustSynthesizeSetterGetterMethod - returns true if setter or getter has not -/// been found in the class implementation. In this case, it must be synthesized. -static bool mustSynthesizeSetterGetterMethod(ObjCImplementationDecl *IMP, - ObjCPropertyDecl *PD, - bool getter) { - return getter ? !IMP->getInstanceMethod(PD->getGetterName()) - : !IMP->getInstanceMethod(PD->getSetterName()); - -} - -void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, - ObjCImplementationDecl *IMD, - ObjCCategoryImplDecl *CID) { - static bool objcGetPropertyDefined = false; - static bool objcSetPropertyDefined = false; - SourceLocation startGetterSetterLoc; - - if (PID->getLocStart().isValid()) { - SourceLocation startLoc = PID->getLocStart(); - InsertText(startLoc, "// "); - const char *startBuf = SM->getCharacterData(startLoc); - assert((*startBuf == '@') && "bogus @synthesize location"); - const char *semiBuf = strchr(startBuf, ';'); - assert((*semiBuf == ';') && "@synthesize: can't find ';'"); - startGetterSetterLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1); - } - else - startGetterSetterLoc = IMD ? IMD->getLocEnd() : CID->getLocEnd(); - - if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) - return; // FIXME: is this correct? - - // Generate the 'getter' function. - ObjCPropertyDecl *PD = PID->getPropertyDecl(); - ObjCIvarDecl *OID = PID->getPropertyIvarDecl(); - - if (!OID) - return; - unsigned Attributes = PD->getPropertyAttributes(); - if (mustSynthesizeSetterGetterMethod(IMD, PD, true /*getter*/)) { - bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) && - (Attributes & (ObjCPropertyDecl::OBJC_PR_retain | - ObjCPropertyDecl::OBJC_PR_copy)); - std::string Getr; - if (GenGetProperty && !objcGetPropertyDefined) { - objcGetPropertyDefined = true; - // FIXME. Is this attribute correct in all cases? - Getr = "\nextern \"C\" __declspec(dllimport) " - "id objc_getProperty(id, SEL, long, bool);\n"; - } - RewriteObjCMethodDecl(OID->getContainingInterface(), - PD->getGetterMethodDecl(), Getr); - Getr += "{ "; - // Synthesize an explicit cast to gain access to the ivar. - // See objc-act.c:objc_synthesize_new_getter() for details. - if (GenGetProperty) { - // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1) - Getr += "typedef "; - const FunctionType *FPRetType = 0; - RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr, - FPRetType); - Getr += " _TYPE"; - if (FPRetType) { - Getr += ")"; // close the precedence "scope" for "*". - - // Now, emit the argument types (if any). - if (const FunctionProtoType *FT = dyn_cast(FPRetType)){ - Getr += "("; - for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { - if (i) Getr += ", "; - std::string ParamStr = FT->getArgType(i).getAsString( - Context->getPrintingPolicy()); - Getr += ParamStr; - } - if (FT->isVariadic()) { - if (FT->getNumArgs()) Getr += ", "; - Getr += "..."; - } - Getr += ")"; - } else - Getr += "()"; - } - Getr += ";\n"; - Getr += "return (_TYPE)"; - Getr += "objc_getProperty(self, _cmd, "; - RewriteIvarOffsetComputation(OID, Getr); - Getr += ", 1)"; - } - else - Getr += "return " + getIvarAccessString(OID); - Getr += "; }"; - InsertText(startGetterSetterLoc, Getr); - } - - if (PD->isReadOnly() || - !mustSynthesizeSetterGetterMethod(IMD, PD, false /*setter*/)) - return; - - // Generate the 'setter' function. - std::string Setr; - bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain | - ObjCPropertyDecl::OBJC_PR_copy); - if (GenSetProperty && !objcSetPropertyDefined) { - objcSetPropertyDefined = true; - // FIXME. Is this attribute correct in all cases? - Setr = "\nextern \"C\" __declspec(dllimport) " - "void objc_setProperty (id, SEL, long, id, bool, bool);\n"; - } - - RewriteObjCMethodDecl(OID->getContainingInterface(), - PD->getSetterMethodDecl(), Setr); - Setr += "{ "; - // Synthesize an explicit cast to initialize the ivar. - // See objc-act.c:objc_synthesize_new_setter() for details. - if (GenSetProperty) { - Setr += "objc_setProperty (self, _cmd, "; - RewriteIvarOffsetComputation(OID, Setr); - Setr += ", (id)"; - Setr += PD->getName(); - Setr += ", "; - if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) - Setr += "0, "; - else - Setr += "1, "; - if (Attributes & ObjCPropertyDecl::OBJC_PR_copy) - Setr += "1)"; - else - Setr += "0)"; - } - else { - Setr += getIvarAccessString(OID) + " = "; - Setr += PD->getName(); - } - Setr += "; }\n"; - InsertText(startGetterSetterLoc, Setr); -} - -static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl, - std::string &typedefString) { - typedefString += "#ifndef _REWRITER_typedef_"; - typedefString += ForwardDecl->getNameAsString(); - typedefString += "\n"; - typedefString += "#define _REWRITER_typedef_"; - typedefString += ForwardDecl->getNameAsString(); - typedefString += "\n"; - typedefString += "typedef struct objc_object "; - typedefString += ForwardDecl->getNameAsString(); - // typedef struct { } _objc_exc_Classname; - typedefString += ";\ntypedef struct {} _objc_exc_"; - typedefString += ForwardDecl->getNameAsString(); - typedefString += ";\n#endif\n"; -} - -void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, - const std::string &typedefString) { - SourceLocation startLoc = ClassDecl->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - const char *semiPtr = strchr(startBuf, ';'); - // Replace the @class with typedefs corresponding to the classes. - ReplaceText(startLoc, semiPtr-startBuf+1, typedefString); -} - -void RewriteModernObjC::RewriteForwardClassDecl(DeclGroupRef D) { - std::string typedefString; - for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - ObjCInterfaceDecl *ForwardDecl = cast(*I); - if (I == D.begin()) { - // Translate to typedef's that forward reference structs with the same name - // as the class. As a convenience, we include the original declaration - // as a comment. - typedefString += "// @class "; - typedefString += ForwardDecl->getNameAsString(); - typedefString += ";\n"; - } - RewriteOneForwardClassDecl(ForwardDecl, typedefString); - } - DeclGroupRef::iterator I = D.begin(); - RewriteForwardClassEpilogue(cast(*I), typedefString); -} - -void RewriteModernObjC::RewriteForwardClassDecl( - const llvm::SmallVector &D) { - std::string typedefString; - for (unsigned i = 0; i < D.size(); i++) { - ObjCInterfaceDecl *ForwardDecl = cast(D[i]); - if (i == 0) { - typedefString += "// @class "; - typedefString += ForwardDecl->getNameAsString(); - typedefString += ";\n"; - } - RewriteOneForwardClassDecl(ForwardDecl, typedefString); - } - RewriteForwardClassEpilogue(cast(D[0]), typedefString); -} - -void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) { - // When method is a synthesized one, such as a getter/setter there is - // nothing to rewrite. - if (Method->isImplicit()) - return; - SourceLocation LocStart = Method->getLocStart(); - SourceLocation LocEnd = Method->getLocEnd(); - - if (SM->getExpansionLineNumber(LocEnd) > - SM->getExpansionLineNumber(LocStart)) { - InsertText(LocStart, "#if 0\n"); - ReplaceText(LocEnd, 1, ";\n#endif\n"); - } else { - InsertText(LocStart, "// "); - } -} - -void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) { - SourceLocation Loc = prop->getAtLoc(); - - ReplaceText(Loc, 0, "// "); - // FIXME: handle properties that are declared across multiple lines. -} - -void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) { - SourceLocation LocStart = CatDecl->getLocStart(); - - // FIXME: handle category headers that are declared across multiple lines. - if (CatDecl->getIvarRBraceLoc().isValid()) { - ReplaceText(LocStart, 1, "/** "); - ReplaceText(CatDecl->getIvarRBraceLoc(), 1, "**/ "); - } - else { - ReplaceText(LocStart, 0, "// "); - } - - for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(), - E = CatDecl->prop_end(); I != E; ++I) - RewriteProperty(*I); - - for (ObjCCategoryDecl::instmeth_iterator - I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - for (ObjCCategoryDecl::classmeth_iterator - I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - - // Lastly, comment out the @end. - ReplaceText(CatDecl->getAtEndRange().getBegin(), - strlen("@end"), "/* @end */"); -} - -void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) { - SourceLocation LocStart = PDecl->getLocStart(); - assert(PDecl->isThisDeclarationADefinition()); - - // FIXME: handle protocol headers that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); - - for (ObjCProtocolDecl::instmeth_iterator - I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - for (ObjCProtocolDecl::classmeth_iterator - I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - - for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(), - E = PDecl->prop_end(); I != E; ++I) - RewriteProperty(*I); - - // Lastly, comment out the @end. - SourceLocation LocEnd = PDecl->getAtEndRange().getBegin(); - ReplaceText(LocEnd, strlen("@end"), "/* @end */"); - - // Must comment out @optional/@required - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - for (const char *p = startBuf; p < endBuf; p++) { - if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) { - SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); - ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */"); - - } - else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) { - SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); - ReplaceText(OptionalLoc, strlen("@required"), "/* @required */"); - - } - } -} - -void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) { - SourceLocation LocStart = (*D.begin())->getLocStart(); - if (LocStart.isInvalid()) - llvm_unreachable("Invalid SourceLocation"); - // FIXME: handle forward protocol that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); -} - -void -RewriteModernObjC::RewriteForwardProtocolDecl(const llvm::SmallVector &DG) { - SourceLocation LocStart = DG[0]->getLocStart(); - if (LocStart.isInvalid()) - llvm_unreachable("Invalid SourceLocation"); - // FIXME: handle forward protocol that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); -} - -void -RewriteModernObjC::RewriteLinkageSpec(LinkageSpecDecl *LSD) { - SourceLocation LocStart = LSD->getExternLoc(); - if (LocStart.isInvalid()) - llvm_unreachable("Invalid extern SourceLocation"); - - ReplaceText(LocStart, 0, "// "); - if (!LSD->hasBraces()) - return; - // FIXME. We don't rewrite well if '{' is not on same line as 'extern'. - SourceLocation LocRBrace = LSD->getRBraceLoc(); - if (LocRBrace.isInvalid()) - llvm_unreachable("Invalid rbrace SourceLocation"); - ReplaceText(LocRBrace, 0, "// "); -} - -void RewriteModernObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr, - const FunctionType *&FPRetType) { - if (T->isObjCQualifiedIdType()) - ResultStr += "id"; - else if (T->isFunctionPointerType() || - T->isBlockPointerType()) { - // needs special handling, since pointer-to-functions have special - // syntax (where a decaration models use). - QualType retType = T; - QualType PointeeTy; - if (const PointerType* PT = retType->getAs()) - PointeeTy = PT->getPointeeType(); - else if (const BlockPointerType *BPT = retType->getAs()) - PointeeTy = BPT->getPointeeType(); - if ((FPRetType = PointeeTy->getAs())) { - ResultStr += FPRetType->getResultType().getAsString( - Context->getPrintingPolicy()); - ResultStr += "(*"; - } - } else - ResultStr += T.getAsString(Context->getPrintingPolicy()); -} - -void RewriteModernObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, - ObjCMethodDecl *OMD, - std::string &ResultStr) { - //fprintf(stderr,"In RewriteObjCMethodDecl\n"); - const FunctionType *FPRetType = 0; - ResultStr += "\nstatic "; - RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType); - ResultStr += " "; - - // Unique method name - std::string NameStr; - - if (OMD->isInstanceMethod()) - NameStr += "_I_"; - else - NameStr += "_C_"; - - NameStr += IDecl->getNameAsString(); - NameStr += "_"; - - if (ObjCCategoryImplDecl *CID = - dyn_cast(OMD->getDeclContext())) { - NameStr += CID->getNameAsString(); - NameStr += "_"; - } - // Append selector names, replacing ':' with '_' - { - std::string selString = OMD->getSelector().getAsString(); - int len = selString.size(); - for (int i = 0; i < len; i++) - if (selString[i] == ':') - selString[i] = '_'; - NameStr += selString; - } - // Remember this name for metadata emission - MethodInternalNames[OMD] = NameStr; - ResultStr += NameStr; - - // Rewrite arguments - ResultStr += "("; - - // invisible arguments - if (OMD->isInstanceMethod()) { - QualType selfTy = Context->getObjCInterfaceType(IDecl); - selfTy = Context->getPointerType(selfTy); - if (!LangOpts.MicrosoftExt) { - if (ObjCSynthesizedStructs.count(const_cast(IDecl))) - ResultStr += "struct "; - } - // When rewriting for Microsoft, explicitly omit the structure name. - ResultStr += IDecl->getNameAsString(); - ResultStr += " *"; - } - else - ResultStr += Context->getObjCClassType().getAsString( - Context->getPrintingPolicy()); - - ResultStr += " self, "; - ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy()); - ResultStr += " _cmd"; - - // Method arguments. - for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), - E = OMD->param_end(); PI != E; ++PI) { - ParmVarDecl *PDecl = *PI; - ResultStr += ", "; - if (PDecl->getType()->isObjCQualifiedIdType()) { - ResultStr += "id "; - ResultStr += PDecl->getNameAsString(); - } else { - std::string Name = PDecl->getNameAsString(); - QualType QT = PDecl->getType(); - // Make sure we convert "t (^)(...)" to "t (*)(...)". - (void)convertBlockPointerToFunctionPointer(QT); - QT.getAsStringInternal(Name, Context->getPrintingPolicy()); - ResultStr += Name; - } - } - if (OMD->isVariadic()) - ResultStr += ", ..."; - ResultStr += ") "; - - if (FPRetType) { - ResultStr += ")"; // close the precedence "scope" for "*". - - // Now, emit the argument types (if any). - if (const FunctionProtoType *FT = dyn_cast(FPRetType)) { - ResultStr += "("; - for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { - if (i) ResultStr += ", "; - std::string ParamStr = FT->getArgType(i).getAsString( - Context->getPrintingPolicy()); - ResultStr += ParamStr; - } - if (FT->isVariadic()) { - if (FT->getNumArgs()) ResultStr += ", "; - ResultStr += "..."; - } - ResultStr += ")"; - } else { - ResultStr += "()"; - } - } -} -void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) { - ObjCImplementationDecl *IMD = dyn_cast(OID); - ObjCCategoryImplDecl *CID = dyn_cast(OID); - - if (IMD) { - if (IMD->getIvarRBraceLoc().isValid()) { - ReplaceText(IMD->getLocStart(), 1, "/** "); - ReplaceText(IMD->getIvarRBraceLoc(), 1, "**/ "); - } - else { - InsertText(IMD->getLocStart(), "// "); - } - } - else - InsertText(CID->getLocStart(), "// "); - - for (ObjCCategoryImplDecl::instmeth_iterator - I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(), - E = IMD ? IMD->instmeth_end() : CID->instmeth_end(); - I != E; ++I) { - std::string ResultStr; - ObjCMethodDecl *OMD = *I; - RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); - SourceLocation LocStart = OMD->getLocStart(); - SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - ReplaceText(LocStart, endBuf-startBuf, ResultStr); - } - - for (ObjCCategoryImplDecl::classmeth_iterator - I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(), - E = IMD ? IMD->classmeth_end() : CID->classmeth_end(); - I != E; ++I) { - std::string ResultStr; - ObjCMethodDecl *OMD = *I; - RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); - SourceLocation LocStart = OMD->getLocStart(); - SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - ReplaceText(LocStart, endBuf-startBuf, ResultStr); - } - for (ObjCCategoryImplDecl::propimpl_iterator - I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(), - E = IMD ? IMD->propimpl_end() : CID->propimpl_end(); - I != E; ++I) { - RewritePropertyImplDecl(*I, IMD, CID); - } - - InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// "); -} - -void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) { - // Do not synthesize more than once. - if (ObjCSynthesizedStructs.count(ClassDecl)) - return; - // Make sure super class's are written before current class is written. - ObjCInterfaceDecl *SuperClass = ClassDecl->getSuperClass(); - while (SuperClass) { - RewriteInterfaceDecl(SuperClass); - SuperClass = SuperClass->getSuperClass(); - } - std::string ResultStr; - if (!ObjCWrittenInterfaces.count(ClassDecl->getCanonicalDecl())) { - // we haven't seen a forward decl - generate a typedef. - RewriteOneForwardClassDecl(ClassDecl, ResultStr); - RewriteIvarOffsetSymbols(ClassDecl, ResultStr); - - RewriteObjCInternalStruct(ClassDecl, ResultStr); - // Mark this typedef as having been written into its c++ equivalent. - ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl()); - - for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(), - E = ClassDecl->prop_end(); I != E; ++I) - RewriteProperty(*I); - for (ObjCInterfaceDecl::instmeth_iterator - I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - for (ObjCInterfaceDecl::classmeth_iterator - I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - - // Lastly, comment out the @end. - ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"), - "/* @end */"); - } -} - -Stmt *RewriteModernObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) { - SourceRange OldRange = PseudoOp->getSourceRange(); - - // We just magically know some things about the structure of this - // expression. - ObjCMessageExpr *OldMsg = - cast(PseudoOp->getSemanticExpr( - PseudoOp->getNumSemanticExprs() - 1)); - - // Because the rewriter doesn't allow us to rewrite rewritten code, - // we need to suppress rewriting the sub-statements. - Expr *Base; - SmallVector Args; - { - DisableReplaceStmtScope S(*this); - - // Rebuild the base expression if we have one. - Base = 0; - if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { - Base = OldMsg->getInstanceReceiver(); - Base = cast(Base)->getSourceExpr(); - Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); - } - - unsigned numArgs = OldMsg->getNumArgs(); - for (unsigned i = 0; i < numArgs; i++) { - Expr *Arg = OldMsg->getArg(i); - if (isa(Arg)) - Arg = cast(Arg)->getSourceExpr(); - Arg = cast(RewriteFunctionBodyOrGlobalInitializer(Arg)); - Args.push_back(Arg); - } - } - - // TODO: avoid this copy. - SmallVector SelLocs; - OldMsg->getSelectorLocs(SelLocs); - - ObjCMessageExpr *NewMsg = 0; - switch (OldMsg->getReceiverKind()) { - case ObjCMessageExpr::Class: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getClassReceiverTypeInfo(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::Instance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - Base, - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::SuperClass: - case ObjCMessageExpr::SuperInstance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getSuperLoc(), - OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, - OldMsg->getSuperType(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - } - - Stmt *Replacement = SynthMessageExpr(NewMsg); - ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); - return Replacement; -} - -Stmt *RewriteModernObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) { - SourceRange OldRange = PseudoOp->getSourceRange(); - - // We just magically know some things about the structure of this - // expression. - ObjCMessageExpr *OldMsg = - cast(PseudoOp->getResultExpr()->IgnoreImplicit()); - - // Because the rewriter doesn't allow us to rewrite rewritten code, - // we need to suppress rewriting the sub-statements. - Expr *Base = 0; - SmallVector Args; - { - DisableReplaceStmtScope S(*this); - // Rebuild the base expression if we have one. - if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { - Base = OldMsg->getInstanceReceiver(); - Base = cast(Base)->getSourceExpr(); - Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); - } - unsigned numArgs = OldMsg->getNumArgs(); - for (unsigned i = 0; i < numArgs; i++) { - Expr *Arg = OldMsg->getArg(i); - if (isa(Arg)) - Arg = cast(Arg)->getSourceExpr(); - Arg = cast(RewriteFunctionBodyOrGlobalInitializer(Arg)); - Args.push_back(Arg); - } - } - - // Intentionally empty. - SmallVector SelLocs; - - ObjCMessageExpr *NewMsg = 0; - switch (OldMsg->getReceiverKind()) { - case ObjCMessageExpr::Class: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getClassReceiverTypeInfo(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::Instance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - Base, - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::SuperClass: - case ObjCMessageExpr::SuperInstance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getSuperLoc(), - OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, - OldMsg->getSuperType(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - } - - Stmt *Replacement = SynthMessageExpr(NewMsg); - ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); - return Replacement; -} - -/// SynthCountByEnumWithState - To print: -/// ((unsigned int (*) -/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) -/// (void *)objc_msgSend)((id)l_collection, -/// sel_registerName( -/// "countByEnumeratingWithState:objects:count:"), -/// &enumState, -/// (id *)__rw_items, (unsigned int)16) -/// -void RewriteModernObjC::SynthCountByEnumWithState(std::string &buf) { - buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, " - "id *, unsigned int))(void *)objc_msgSend)"; - buf += "\n\t\t"; - buf += "((id)l_collection,\n\t\t"; - buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),"; - buf += "\n\t\t"; - buf += "&enumState, " - "(id *)__rw_items, (unsigned int)16)"; -} - -/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach -/// statement to exit to its outer synthesized loop. -/// -Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) { - if (Stmts.empty() || !isa(Stmts.back())) - return S; - // replace break with goto __break_label - std::string buf; - - SourceLocation startLoc = S->getLocStart(); - buf = "goto __break_label_"; - buf += utostr(ObjCBcLabelNo.back()); - ReplaceText(startLoc, strlen("break"), buf); - - return 0; -} - -/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach -/// statement to continue with its inner synthesized loop. -/// -Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) { - if (Stmts.empty() || !isa(Stmts.back())) - return S; - // replace continue with goto __continue_label - std::string buf; - - SourceLocation startLoc = S->getLocStart(); - buf = "goto __continue_label_"; - buf += utostr(ObjCBcLabelNo.back()); - ReplaceText(startLoc, strlen("continue"), buf); - - return 0; -} - -/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement. -/// It rewrites: -/// for ( type elem in collection) { stmts; } - -/// Into: -/// { -/// type elem; -/// struct __objcFastEnumerationState enumState = { 0 }; -/// id __rw_items[16]; -/// id l_collection = (id)collection; -/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState -/// objects:__rw_items count:16]; -/// if (limit) { -/// unsigned long startMutations = *enumState.mutationsPtr; -/// do { -/// unsigned long counter = 0; -/// do { -/// if (startMutations != *enumState.mutationsPtr) -/// objc_enumerationMutation(l_collection); -/// elem = (type)enumState.itemsPtr[counter++]; -/// stmts; -/// __continue_label: ; -/// } while (counter < limit); -/// } while (limit = [l_collection countByEnumeratingWithState:&enumState -/// objects:__rw_items count:16]); -/// elem = nil; -/// __break_label: ; -/// } -/// else -/// elem = nil; -/// } -/// -Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, - SourceLocation OrigEnd) { - assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty"); - assert(isa(Stmts.back()) && - "ObjCForCollectionStmt Statement stack mismatch"); - assert(!ObjCBcLabelNo.empty() && - "ObjCForCollectionStmt - Label No stack empty"); - - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - StringRef elementName; - std::string elementTypeAsString; - std::string buf; - buf = "\n{\n\t"; - if (DeclStmt *DS = dyn_cast(S->getElement())) { - // type elem; - NamedDecl* D = cast(DS->getSingleDecl()); - QualType ElementType = cast(D)->getType(); - if (ElementType->isObjCQualifiedIdType() || - ElementType->isObjCQualifiedInterfaceType()) - // Simply use 'id' for all qualified types. - elementTypeAsString = "id"; - else - elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy()); - buf += elementTypeAsString; - buf += " "; - elementName = D->getName(); - buf += elementName; - buf += ";\n\t"; - } - else { - DeclRefExpr *DR = cast(S->getElement()); - elementName = DR->getDecl()->getName(); - ValueDecl *VD = cast(DR->getDecl()); - if (VD->getType()->isObjCQualifiedIdType() || - VD->getType()->isObjCQualifiedInterfaceType()) - // Simply use 'id' for all qualified types. - elementTypeAsString = "id"; - else - elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy()); - } - - // struct __objcFastEnumerationState enumState = { 0 }; - buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t"; - // id __rw_items[16]; - buf += "id __rw_items[16];\n\t"; - // id l_collection = (id) - buf += "id l_collection = (id)"; - // Find start location of 'collection' the hard way! - const char *startCollectionBuf = startBuf; - startCollectionBuf += 3; // skip 'for' - startCollectionBuf = strchr(startCollectionBuf, '('); - startCollectionBuf++; // skip '(' - // find 'in' and skip it. - while (*startCollectionBuf != ' ' || - *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' || - (*(startCollectionBuf+3) != ' ' && - *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '(')) - startCollectionBuf++; - startCollectionBuf += 3; - - // Replace: "for (type element in" with string constructed thus far. - ReplaceText(startLoc, startCollectionBuf - startBuf, buf); - // Replace ')' in for '(' type elem in collection ')' with ';' - SourceLocation rightParenLoc = S->getRParenLoc(); - const char *rparenBuf = SM->getCharacterData(rightParenLoc); - SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf); - buf = ";\n\t"; - - // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState - // objects:__rw_items count:16]; - // which is synthesized into: - // unsigned int limit = - // ((unsigned int (*) - // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) - // (void *)objc_msgSend)((id)l_collection, - // sel_registerName( - // "countByEnumeratingWithState:objects:count:"), - // (struct __objcFastEnumerationState *)&state, - // (id *)__rw_items, (unsigned int)16); - buf += "unsigned long limit =\n\t\t"; - SynthCountByEnumWithState(buf); - buf += ";\n\t"; - /// if (limit) { - /// unsigned long startMutations = *enumState.mutationsPtr; - /// do { - /// unsigned long counter = 0; - /// do { - /// if (startMutations != *enumState.mutationsPtr) - /// objc_enumerationMutation(l_collection); - /// elem = (type)enumState.itemsPtr[counter++]; - buf += "if (limit) {\n\t"; - buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t"; - buf += "do {\n\t\t"; - buf += "unsigned long counter = 0;\n\t\t"; - buf += "do {\n\t\t\t"; - buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t"; - buf += "objc_enumerationMutation(l_collection);\n\t\t\t"; - buf += elementName; - buf += " = ("; - buf += elementTypeAsString; - buf += ")enumState.itemsPtr[counter++];"; - // Replace ')' in for '(' type elem in collection ')' with all of these. - ReplaceText(lparenLoc, 1, buf); - - /// __continue_label: ; - /// } while (counter < limit); - /// } while (limit = [l_collection countByEnumeratingWithState:&enumState - /// objects:__rw_items count:16]); - /// elem = nil; - /// __break_label: ; - /// } - /// else - /// elem = nil; - /// } - /// - buf = ";\n\t"; - buf += "__continue_label_"; - buf += utostr(ObjCBcLabelNo.back()); - buf += ": ;"; - buf += "\n\t\t"; - buf += "} while (counter < limit);\n\t"; - buf += "} while (limit = "; - SynthCountByEnumWithState(buf); - buf += ");\n\t"; - buf += elementName; - buf += " = (("; - buf += elementTypeAsString; - buf += ")0);\n\t"; - buf += "__break_label_"; - buf += utostr(ObjCBcLabelNo.back()); - buf += ": ;\n\t"; - buf += "}\n\t"; - buf += "else\n\t\t"; - buf += elementName; - buf += " = (("; - buf += elementTypeAsString; - buf += ")0);\n\t"; - buf += "}\n"; - - // Insert all these *after* the statement body. - // FIXME: If this should support Obj-C++, support CXXTryStmt - if (isa(S->getBody())) { - SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1); - InsertText(endBodyLoc, buf); - } else { - /* Need to treat single statements specially. For example: - * - * for (A *a in b) if (stuff()) break; - * for (A *a in b) xxxyy; - * - * The following code simply scans ahead to the semi to find the actual end. - */ - const char *stmtBuf = SM->getCharacterData(OrigEnd); - const char *semiBuf = strchr(stmtBuf, ';'); - assert(semiBuf && "Can't find ';'"); - SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1); - InsertText(endBodyLoc, buf); - } - Stmts.pop_back(); - ObjCBcLabelNo.pop_back(); - return 0; -} - -static void Write_RethrowObject(std::string &buf) { - buf += "{ struct _FIN { _FIN(id reth) : rethrow(reth) {}\n"; - buf += "\t~_FIN() { if (rethrow) objc_exception_throw(rethrow); }\n"; - buf += "\tid rethrow;\n"; - buf += "\t} _fin_force_rethow(_rethrow);"; -} - -/// RewriteObjCSynchronizedStmt - -/// This routine rewrites @synchronized(expr) stmt; -/// into: -/// objc_sync_enter(expr); -/// @try stmt @finally { objc_sync_exit(expr); } -/// -Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) { - // Get the start location and compute the semi location. - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @synchronized location"); - - std::string buf; - buf = "{ id _rethrow = 0; id _sync_obj = "; - - const char *lparenBuf = startBuf; - while (*lparenBuf != '(') lparenBuf++; - ReplaceText(startLoc, lparenBuf-startBuf+1, buf); - - buf = "; objc_sync_enter(_sync_obj);\n"; - buf += "try {\n\tstruct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}"; - buf += "\n\t~_SYNC_EXIT() {objc_sync_exit(sync_exit);}"; - buf += "\n\tid sync_exit;"; - buf += "\n\t} _sync_exit(_sync_obj);\n"; - - // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since - // the sync expression is typically a message expression that's already - // been rewritten! (which implies the SourceLocation's are invalid). - SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart(); - const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc); - while (*RParenExprLocBuf != ')') RParenExprLocBuf--; - RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf); - - SourceLocation LBranceLoc = S->getSynchBody()->getLocStart(); - const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc); - assert (*LBraceLocBuf == '{'); - ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf); - - SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd(); - assert((*SM->getCharacterData(startRBraceLoc) == '}') && - "bogus @synchronized block"); - - buf = "} catch (id e) {_rethrow = e;}\n"; - Write_RethrowObject(buf); - buf += "}\n"; - buf += "}\n"; - - ReplaceText(startRBraceLoc, 1, buf); - - return 0; -} - -void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S) -{ - // Perform a bottom up traversal of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) - WarnAboutReturnGotoStmts(*CI); - - if (isa(S) || isa(S)) { - Diags.Report(Context->getFullLoc(S->getLocStart()), - TryFinallyContainsReturnDiag); - } - return; -} - -Stmt *RewriteModernObjC::RewriteObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) { - SourceLocation startLoc = S->getAtLoc(); - ReplaceText(startLoc, strlen("@autoreleasepool"), "/* @autoreleasepool */"); - ReplaceText(S->getSubStmt()->getLocStart(), 1, - "{ __AtAutoreleasePool __autoreleasepool; "); - - return 0; -} - -Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) { - ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt(); - bool noCatch = S->getNumCatchStmts() == 0; - std::string buf; - - if (finalStmt) { - if (noCatch) - buf = "{ id volatile _rethrow = 0;\n"; - else { - buf = "{ id volatile _rethrow = 0;\ntry {\n"; - } - } - // Get the start location and compute the semi location. - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @try location"); - if (finalStmt) - ReplaceText(startLoc, 1, buf); - else - // @try -> try - ReplaceText(startLoc, 1, ""); - - for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { - ObjCAtCatchStmt *Catch = S->getCatchStmt(I); - VarDecl *catchDecl = Catch->getCatchParamDecl(); - - startLoc = Catch->getLocStart(); - bool AtRemoved = false; - if (catchDecl) { - QualType t = catchDecl->getType(); - if (const ObjCObjectPointerType *Ptr = t->getAs()) { - // Should be a pointer to a class. - ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface(); - if (IDecl) { - std::string Result; - startBuf = SM->getCharacterData(startLoc); - assert((*startBuf == '@') && "bogus @catch location"); - SourceLocation rParenLoc = Catch->getRParenLoc(); - const char *rParenBuf = SM->getCharacterData(rParenLoc); - - // _objc_exc_Foo *_e as argument to catch. - Result = "catch (_objc_exc_"; Result += IDecl->getNameAsString(); - Result += " *_"; Result += catchDecl->getNameAsString(); - Result += ")"; - ReplaceText(startLoc, rParenBuf-startBuf+1, Result); - // Foo *e = (Foo *)_e; - Result.clear(); - Result = "{ "; - Result += IDecl->getNameAsString(); - Result += " *"; Result += catchDecl->getNameAsString(); - Result += " = ("; Result += IDecl->getNameAsString(); Result += "*)"; - Result += "_"; Result += catchDecl->getNameAsString(); - - Result += "; "; - SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart(); - ReplaceText(lBraceLoc, 1, Result); - AtRemoved = true; - } - } - } - if (!AtRemoved) - // @catch -> catch - ReplaceText(startLoc, 1, ""); - - } - if (finalStmt) { - buf.clear(); - if (noCatch) - buf = "catch (id e) {_rethrow = e;}\n"; - else - buf = "}\ncatch (id e) {_rethrow = e;}\n"; - - SourceLocation startFinalLoc = finalStmt->getLocStart(); - ReplaceText(startFinalLoc, 8, buf); - Stmt *body = finalStmt->getFinallyBody(); - SourceLocation startFinalBodyLoc = body->getLocStart(); - buf.clear(); - Write_RethrowObject(buf); - ReplaceText(startFinalBodyLoc, 1, buf); - - SourceLocation endFinalBodyLoc = body->getLocEnd(); - ReplaceText(endFinalBodyLoc, 1, "}\n}"); - // Now check for any return/continue/go statements within the @try. - WarnAboutReturnGotoStmts(S->getTryBody()); - } - - return 0; -} - -// This can't be done with ReplaceStmt(S, ThrowExpr), since -// the throw expression is typically a message expression that's already -// been rewritten! (which implies the SourceLocation's are invalid). -Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) { - // Get the start location and compute the semi location. - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @throw location"); - - std::string buf; - /* void objc_exception_throw(id) __attribute__((noreturn)); */ - if (S->getThrowExpr()) - buf = "objc_exception_throw("; - else - buf = "throw"; - - // handle "@ throw" correctly. - const char *wBuf = strchr(startBuf, 'w'); - assert((*wBuf == 'w') && "@throw: can't find 'w'"); - ReplaceText(startLoc, wBuf-startBuf+1, buf); - - const char *semiBuf = strchr(startBuf, ';'); - assert((*semiBuf == ';') && "@throw: can't find ';'"); - SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf); - if (S->getThrowExpr()) - ReplaceText(semiLoc, 1, ");"); - return 0; -} - -Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) { - // Create a new string expression. - QualType StrType = Context->getPointerType(Context->CharTy); - std::string StrEncoding; - Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding); - Expr *Replacement = StringLiteral::Create(*Context, StrEncoding, - StringLiteral::Ascii, false, - StrType, SourceLocation()); - ReplaceStmt(Exp, Replacement); - - // Replace this subexpr in the parent. - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return Replacement; -} - -Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) { - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl"); - // Create a call to sel_registerName("selName"). - SmallVector SelExprs; - QualType argType = Context->getPointerType(Context->CharTy); - SelExprs.push_back(StringLiteral::Create(*Context, - Exp->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size()); - ReplaceStmt(Exp, SelExp); - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return SelExp; -} - -CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl( - FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc, - SourceLocation EndLoc) { - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = FD->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = - new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation()); - - // Now, we cast the reference to a pointer to the objc_msgSend type. - QualType pToFunc = Context->getPointerType(msgSendType); - ImplicitCastExpr *ICE = - ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay, - DRE, 0, VK_RValue); - - const FunctionType *FT = msgSendType->getAs(); - - CallExpr *Exp = - new (Context) CallExpr(*Context, ICE, args, nargs, - FT->getCallResultType(*Context), - VK_RValue, EndLoc); - return Exp; -} - -static bool scanForProtocolRefs(const char *startBuf, const char *endBuf, - const char *&startRef, const char *&endRef) { - while (startBuf < endBuf) { - if (*startBuf == '<') - startRef = startBuf; // mark the start. - if (*startBuf == '>') { - if (startRef && *startRef == '<') { - endRef = startBuf; // mark the end. - return true; - } - return false; - } - startBuf++; - } - return false; -} - -static void scanToNextArgument(const char *&argRef) { - int angle = 0; - while (*argRef != ')' && (*argRef != ',' || angle > 0)) { - if (*argRef == '<') - angle++; - else if (*argRef == '>') - angle--; - argRef++; - } - assert(angle == 0 && "scanToNextArgument - bad protocol type syntax"); -} - -bool RewriteModernObjC::needToScanForQualifiers(QualType T) { - if (T->isObjCQualifiedIdType()) - return true; - if (const PointerType *PT = T->getAs()) { - if (PT->getPointeeType()->isObjCQualifiedIdType()) - return true; - } - if (T->isObjCObjectPointerType()) { - T = T->getPointeeType(); - return T->isObjCQualifiedInterfaceType(); - } - if (T->isArrayType()) { - QualType ElemTy = Context->getBaseElementType(T); - return needToScanForQualifiers(ElemTy); - } - return false; -} - -void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) { - QualType Type = E->getType(); - if (needToScanForQualifiers(Type)) { - SourceLocation Loc, EndLoc; - - if (const CStyleCastExpr *ECE = dyn_cast(E)) { - Loc = ECE->getLParenLoc(); - EndLoc = ECE->getRParenLoc(); - } else { - Loc = E->getLocStart(); - EndLoc = E->getLocEnd(); - } - // This will defend against trying to rewrite synthesized expressions. - if (Loc.isInvalid() || EndLoc.isInvalid()) - return; - - const char *startBuf = SM->getCharacterData(Loc); - const char *endBuf = SM->getCharacterData(EndLoc); - const char *startRef = 0, *endRef = 0; - if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { - // Get the locations of the startRef, endRef. - SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf); - SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1); - // Comment out the protocol references. - InsertText(LessLoc, "/*"); - InsertText(GreaterLoc, "*/"); - } - } -} - -void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) { - SourceLocation Loc; - QualType Type; - const FunctionProtoType *proto = 0; - if (VarDecl *VD = dyn_cast(Dcl)) { - Loc = VD->getLocation(); - Type = VD->getType(); - } - else if (FunctionDecl *FD = dyn_cast(Dcl)) { - Loc = FD->getLocation(); - // Check for ObjC 'id' and class types that have been adorned with protocol - // information (id

    , C

    *). The protocol references need to be rewritten! - const FunctionType *funcType = FD->getType()->getAs(); - assert(funcType && "missing function type"); - proto = dyn_cast(funcType); - if (!proto) - return; - Type = proto->getResultType(); - } - else if (FieldDecl *FD = dyn_cast(Dcl)) { - Loc = FD->getLocation(); - Type = FD->getType(); - } - else - return; - - if (needToScanForQualifiers(Type)) { - // Since types are unique, we need to scan the buffer. - - const char *endBuf = SM->getCharacterData(Loc); - const char *startBuf = endBuf; - while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart) - startBuf--; // scan backward (from the decl location) for return type. - const char *startRef = 0, *endRef = 0; - if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { - // Get the locations of the startRef, endRef. - SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf); - SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1); - // Comment out the protocol references. - InsertText(LessLoc, "/*"); - InsertText(GreaterLoc, "*/"); - } - } - if (!proto) - return; // most likely, was a variable - // Now check arguments. - const char *startBuf = SM->getCharacterData(Loc); - const char *startFuncBuf = startBuf; - for (unsigned i = 0; i < proto->getNumArgs(); i++) { - if (needToScanForQualifiers(proto->getArgType(i))) { - // Since types are unique, we need to scan the buffer. - - const char *endBuf = startBuf; - // scan forward (from the decl location) for argument types. - scanToNextArgument(endBuf); - const char *startRef = 0, *endRef = 0; - if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { - // Get the locations of the startRef, endRef. - SourceLocation LessLoc = - Loc.getLocWithOffset(startRef-startFuncBuf); - SourceLocation GreaterLoc = - Loc.getLocWithOffset(endRef-startFuncBuf+1); - // Comment out the protocol references. - InsertText(LessLoc, "/*"); - InsertText(GreaterLoc, "*/"); - } - startBuf = ++endBuf; - } - else { - // If the function name is derived from a macro expansion, then the - // argument buffer will not follow the name. Need to speak with Chris. - while (*startBuf && *startBuf != ')' && *startBuf != ',') - startBuf++; // scan forward (from the decl location) for argument types. - startBuf++; - } - } -} - -void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) { - QualType QT = ND->getType(); - const Type* TypePtr = QT->getAs(); - if (!isa(TypePtr)) - return; - while (isa(TypePtr)) { - const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); - QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); - TypePtr = QT->getAs(); - } - // FIXME. This will not work for multiple declarators; as in: - // __typeof__(a) b,c,d; - std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy())); - SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); - const char *startBuf = SM->getCharacterData(DeclLoc); - if (ND->getInit()) { - std::string Name(ND->getNameAsString()); - TypeAsString += " " + Name + " = "; - Expr *E = ND->getInit(); - SourceLocation startLoc; - if (const CStyleCastExpr *ECE = dyn_cast(E)) - startLoc = ECE->getLParenLoc(); - else - startLoc = E->getLocStart(); - startLoc = SM->getExpansionLoc(startLoc); - const char *endBuf = SM->getCharacterData(startLoc); - ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); - } - else { - SourceLocation X = ND->getLocEnd(); - X = SM->getExpansionLoc(X); - const char *endBuf = SM->getCharacterData(X); - ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); - } -} - -// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str); -void RewriteModernObjC::SynthSelGetUidFunctionDecl() { - IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName"); - SmallVector ArgTys; - ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); - QualType getFuncType = - getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size()); - SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - SelGetUidIdent, getFuncType, 0, - SC_Extern, - SC_None, false); -} - -void RewriteModernObjC::RewriteFunctionDecl(FunctionDecl *FD) { - // declared in - if (FD->getIdentifier() && - FD->getName() == "sel_registerName") { - SelGetUidFunctionDecl = FD; - return; - } - RewriteObjCQualifiedInterfaceTypes(FD); -} - -void RewriteModernObjC::RewriteBlockPointerType(std::string& Str, QualType Type) { - std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); - const char *argPtr = TypeString.c_str(); - if (!strchr(argPtr, '^')) { - Str += TypeString; - return; - } - while (*argPtr) { - Str += (*argPtr == '^' ? '*' : *argPtr); - argPtr++; - } -} - -// FIXME. Consolidate this routine with RewriteBlockPointerType. -void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str, - ValueDecl *VD) { - QualType Type = VD->getType(); - std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); - const char *argPtr = TypeString.c_str(); - int paren = 0; - while (*argPtr) { - switch (*argPtr) { - case '(': - Str += *argPtr; - paren++; - break; - case ')': - Str += *argPtr; - paren--; - break; - case '^': - Str += '*'; - if (paren == 1) - Str += VD->getNameAsString(); - break; - default: - Str += *argPtr; - break; - } - argPtr++; - } -} - -void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) { - SourceLocation FunLocStart = FD->getTypeSpecStartLoc(); - const FunctionType *funcType = FD->getType()->getAs(); - const FunctionProtoType *proto = dyn_cast(funcType); - if (!proto) - return; - QualType Type = proto->getResultType(); - std::string FdStr = Type.getAsString(Context->getPrintingPolicy()); - FdStr += " "; - FdStr += FD->getName(); - FdStr += "("; - unsigned numArgs = proto->getNumArgs(); - for (unsigned i = 0; i < numArgs; i++) { - QualType ArgType = proto->getArgType(i); - RewriteBlockPointerType(FdStr, ArgType); - if (i+1 < numArgs) - FdStr += ", "; - } - if (FD->isVariadic()) { - FdStr += (numArgs > 0) ? ", ...);\n" : "...);\n"; - } - else - FdStr += ");\n"; - InsertText(FunLocStart, FdStr); -} - -// SynthSuperContructorFunctionDecl - id __rw_objc_super(id obj, id super); -void RewriteModernObjC::SynthSuperContructorFunctionDecl() { - if (SuperContructorFunctionDecl) - return; - IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size()); - SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...); -void RewriteModernObjC::SynthMsgSendFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(void); -void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper"); - SmallVector ArgTys; - ArgTys.push_back(Context->VoidTy); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], 1, - true /*isVariadic*/); - MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...); -void RewriteModernObjC::SynthMsgSendStretFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendSuperStretFunctionDecl - -// id objc_msgSendSuper_stret(void); -void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() { - IdentifierInfo *msgSendIdent = - &Context->Idents.get("objc_msgSendSuper_stret"); - SmallVector ArgTys; - ArgTys.push_back(Context->VoidTy); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], 1, - true /*isVariadic*/); - MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...); -void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->DoubleTy, - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthGetClassFunctionDecl - Class objc_getClass(const char *name); -void RewriteModernObjC::SynthGetClassFunctionDecl() { - IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass"); - SmallVector ArgTys; - ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); - QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), - &ArgTys[0], ArgTys.size()); - GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - getClassIdent, getClassType, 0, - SC_Extern, - SC_None, false); -} - -// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls); -void RewriteModernObjC::SynthGetSuperClassFunctionDecl() { - IdentifierInfo *getSuperClassIdent = - &Context->Idents.get("class_getSuperclass"); - SmallVector ArgTys; - ArgTys.push_back(Context->getObjCClassType()); - QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), - &ArgTys[0], ArgTys.size()); - GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - getSuperClassIdent, - getClassType, 0, - SC_Extern, - SC_None, - false); -} - -// SynthGetMetaClassFunctionDecl - Class objc_getMetaClass(const char *name); -void RewriteModernObjC::SynthGetMetaClassFunctionDecl() { - IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass"); - SmallVector ArgTys; - ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); - QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), - &ArgTys[0], ArgTys.size()); - GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - getClassIdent, getClassType, 0, - SC_Extern, - SC_None, false); -} - -Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) { - QualType strType = getConstantStringStructType(); - - std::string S = "__NSConstantStringImpl_"; - - std::string tmpName = InFileName; - unsigned i; - for (i=0; i < tmpName.length(); i++) { - char c = tmpName.at(i); - // replace any non alphanumeric characters with '_'. - if (!isalpha(c) && (c < '0' || c > '9')) - tmpName[i] = '_'; - } - S += tmpName; - S += "_"; - S += utostr(NumObjCStringLiterals++); - - Preamble += "static __NSConstantStringImpl " + S; - Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,"; - Preamble += "0x000007c8,"; // utf8_str - // The pretty printer for StringLiteral handles escape characters properly. - std::string prettyBufS; - llvm::raw_string_ostream prettyBuf(prettyBufS); - Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts)); - Preamble += prettyBuf.str(); - Preamble += ","; - Preamble += utostr(Exp->getString()->getByteLength()) + "};\n"; - - VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), &Context->Idents.get(S), - strType, 0, SC_Static, SC_None); - DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue, - SourceLocation()); - Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf, - Context->getPointerType(DRE->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - // cast to NSConstantString * - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(), - CK_CPointerToObjCPointerCast, Unop); - ReplaceStmt(Exp, cast); - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return cast; -} - -Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) { - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - - Expr *FlagExp = IntegerLiteral::Create(*Context, - llvm::APInt(IntSize, Exp->getValue()), - Context->IntTy, Exp->getLocation()); - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Context->ObjCBuiltinBoolTy, - CK_BitCast, FlagExp); - ParenExpr *PE = new (Context) ParenExpr(Exp->getLocation(), Exp->getExprLoc(), - cast); - ReplaceStmt(Exp, PE); - return PE; -} - -Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) { - // synthesize declaration of helper functions needed in this routine. - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - // use objc_msgSend() for all. - if (!MsgSendFunctionDecl) - SynthMsgSendFunctionDecl(); - if (!GetClassFunctionDecl) - SynthGetClassFunctionDecl(); - - FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; - SourceLocation StartLoc = Exp->getLocStart(); - SourceLocation EndLoc = Exp->getLocEnd(); - - // Synthesize a call to objc_msgSend(). - SmallVector MsgExprs; - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - - // Create a call to objc_getClass(""). It will be the 1st argument. - ObjCMethodDecl *BoxingMethod = Exp->getBoxingMethod(); - ObjCInterfaceDecl *BoxingClass = BoxingMethod->getClassInterface(); - - IdentifierInfo *clsName = BoxingClass->getIdentifier(); - ClsExprs.push_back(StringLiteral::Create(*Context, - clsName->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(Cls); - - // Create a call to sel_registerName(":"), etc. - // it will be the 2nd argument. - SmallVector SelExprs; - SelExprs.push_back(StringLiteral::Create(*Context, - BoxingMethod->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(SelExp); - - // User provided sub-expression is the 3rd, and last, argument. - Expr *subExpr = Exp->getSubExpr(); - if (ImplicitCastExpr *ICE = dyn_cast(subExpr)) { - QualType type = ICE->getType(); - const Expr *SubExpr = ICE->IgnoreParenImpCasts(); - CastKind CK = CK_BitCast; - if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType()) - CK = CK_IntegralToBoolean; - subExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, subExpr); - } - MsgExprs.push_back(subExpr); - - SmallVector ArgTypes; - ArgTypes.push_back(Context->getObjCIdType()); - ArgTypes.push_back(Context->getObjCSelType()); - for (ObjCMethodDecl::param_iterator PI = BoxingMethod->param_begin(), - E = BoxingMethod->param_end(); PI != E; ++PI) - ArgTypes.push_back((*PI)->getType()); - - QualType returnType = Exp->getType(); - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = MsgSendFlavor->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, - VK_LValue, SourceLocation()); - - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->VoidTy), - CK_BitCast, DRE); - - // Now do the "normal" pointer to function cast. - QualType castType = - getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - BoxingMethod->isVariadic()); - castType = Context->getPointerType(castType); - cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, - cast); - - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); - - const FunctionType *FT = msgSendType->getAs(); - CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0], - MsgExprs.size(), - FT->getResultType(), VK_RValue, - EndLoc); - ReplaceStmt(Exp, CE); - return CE; -} - -Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) { - // synthesize declaration of helper functions needed in this routine. - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - // use objc_msgSend() for all. - if (!MsgSendFunctionDecl) - SynthMsgSendFunctionDecl(); - if (!GetClassFunctionDecl) - SynthGetClassFunctionDecl(); - - FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; - SourceLocation StartLoc = Exp->getLocStart(); - SourceLocation EndLoc = Exp->getLocEnd(); - - // Build the expression: __NSContainer_literal(int, ...).arr - QualType IntQT = Context->IntTy; - QualType NSArrayFType = - getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true); - std::string NSArrayFName("__NSContainer_literal"); - FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName); - DeclRefExpr *NSArrayDRE = - new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue, - SourceLocation()); - - SmallVector InitExprs; - unsigned NumElements = Exp->getNumElements(); - unsigned UnsignedIntSize = - static_cast(Context->getTypeSize(Context->UnsignedIntTy)); - Expr *count = IntegerLiteral::Create(*Context, - llvm::APInt(UnsignedIntSize, NumElements), - Context->UnsignedIntTy, SourceLocation()); - InitExprs.push_back(count); - for (unsigned i = 0; i < NumElements; i++) - InitExprs.push_back(Exp->getElement(i)); - Expr *NSArrayCallExpr = - new (Context) CallExpr(*Context, NSArrayDRE, &InitExprs[0], InitExprs.size(), - NSArrayFType, VK_LValue, SourceLocation()); - - FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("arr"), - Context->getPointerType(Context->VoidPtrTy), 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ArrayLiteralME = - new (Context) MemberExpr(NSArrayCallExpr, false, ARRFD, - SourceLocation(), - ARRFD->getType(), VK_LValue, - OK_Ordinary); - QualType ConstIdT = Context->getObjCIdType().withConst(); - CStyleCastExpr * ArrayLiteralObjects = - NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(ConstIdT), - CK_BitCast, - ArrayLiteralME); - - // Synthesize a call to objc_msgSend(). - SmallVector MsgExprs; - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - QualType expType = Exp->getType(); - - // Create a call to objc_getClass("NSArray"). It will be th 1st argument. - ObjCInterfaceDecl *Class = - expType->getPointeeType()->getAs()->getInterface(); - - IdentifierInfo *clsName = Class->getIdentifier(); - ClsExprs.push_back(StringLiteral::Create(*Context, - clsName->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(Cls); - - // Create a call to sel_registerName("arrayWithObjects:count:"). - // it will be the 2nd argument. - SmallVector SelExprs; - ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod(); - SelExprs.push_back(StringLiteral::Create(*Context, - ArrayMethod->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(SelExp); - - // (const id [])objects - MsgExprs.push_back(ArrayLiteralObjects); - - // (NSUInteger)cnt - Expr *cnt = IntegerLiteral::Create(*Context, - llvm::APInt(UnsignedIntSize, NumElements), - Context->UnsignedIntTy, SourceLocation()); - MsgExprs.push_back(cnt); - - - SmallVector ArgTypes; - ArgTypes.push_back(Context->getObjCIdType()); - ArgTypes.push_back(Context->getObjCSelType()); - for (ObjCMethodDecl::param_iterator PI = ArrayMethod->param_begin(), - E = ArrayMethod->param_end(); PI != E; ++PI) - ArgTypes.push_back((*PI)->getType()); - - QualType returnType = Exp->getType(); - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = MsgSendFlavor->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, - VK_LValue, SourceLocation()); - - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->VoidTy), - CK_BitCast, DRE); - - // Now do the "normal" pointer to function cast. - QualType castType = - getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - ArrayMethod->isVariadic()); - castType = Context->getPointerType(castType); - cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, - cast); - - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); - - const FunctionType *FT = msgSendType->getAs(); - CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0], - MsgExprs.size(), - FT->getResultType(), VK_RValue, - EndLoc); - ReplaceStmt(Exp, CE); - return CE; -} - -Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp) { - // synthesize declaration of helper functions needed in this routine. - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - // use objc_msgSend() for all. - if (!MsgSendFunctionDecl) - SynthMsgSendFunctionDecl(); - if (!GetClassFunctionDecl) - SynthGetClassFunctionDecl(); - - FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; - SourceLocation StartLoc = Exp->getLocStart(); - SourceLocation EndLoc = Exp->getLocEnd(); - - // Build the expression: __NSContainer_literal(int, ...).arr - QualType IntQT = Context->IntTy; - QualType NSDictFType = - getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true); - std::string NSDictFName("__NSContainer_literal"); - FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName); - DeclRefExpr *NSDictDRE = - new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue, - SourceLocation()); - - SmallVector KeyExprs; - SmallVector ValueExprs; - - unsigned NumElements = Exp->getNumElements(); - unsigned UnsignedIntSize = - static_cast(Context->getTypeSize(Context->UnsignedIntTy)); - Expr *count = IntegerLiteral::Create(*Context, - llvm::APInt(UnsignedIntSize, NumElements), - Context->UnsignedIntTy, SourceLocation()); - KeyExprs.push_back(count); - ValueExprs.push_back(count); - for (unsigned i = 0; i < NumElements; i++) { - ObjCDictionaryElement Element = Exp->getKeyValueElement(i); - KeyExprs.push_back(Element.Key); - ValueExprs.push_back(Element.Value); - } - - // (const id [])objects - Expr *NSValueCallExpr = - new (Context) CallExpr(*Context, NSDictDRE, &ValueExprs[0], ValueExprs.size(), - NSDictFType, VK_LValue, SourceLocation()); - - FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("arr"), - Context->getPointerType(Context->VoidPtrTy), 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *DictLiteralValueME = - new (Context) MemberExpr(NSValueCallExpr, false, ARRFD, - SourceLocation(), - ARRFD->getType(), VK_LValue, - OK_Ordinary); - QualType ConstIdT = Context->getObjCIdType().withConst(); - CStyleCastExpr * DictValueObjects = - NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(ConstIdT), - CK_BitCast, - DictLiteralValueME); - // (const id [])keys - Expr *NSKeyCallExpr = - new (Context) CallExpr(*Context, NSDictDRE, &KeyExprs[0], KeyExprs.size(), - NSDictFType, VK_LValue, SourceLocation()); - - MemberExpr *DictLiteralKeyME = - new (Context) MemberExpr(NSKeyCallExpr, false, ARRFD, - SourceLocation(), - ARRFD->getType(), VK_LValue, - OK_Ordinary); - - CStyleCastExpr * DictKeyObjects = - NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(ConstIdT), - CK_BitCast, - DictLiteralKeyME); - - - - // Synthesize a call to objc_msgSend(). - SmallVector MsgExprs; - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - QualType expType = Exp->getType(); - - // Create a call to objc_getClass("NSArray"). It will be th 1st argument. - ObjCInterfaceDecl *Class = - expType->getPointeeType()->getAs()->getInterface(); - - IdentifierInfo *clsName = Class->getIdentifier(); - ClsExprs.push_back(StringLiteral::Create(*Context, - clsName->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(Cls); - - // Create a call to sel_registerName("arrayWithObjects:count:"). - // it will be the 2nd argument. - SmallVector SelExprs; - ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod(); - SelExprs.push_back(StringLiteral::Create(*Context, - DictMethod->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(SelExp); - - // (const id [])objects - MsgExprs.push_back(DictValueObjects); - - // (const id [])keys - MsgExprs.push_back(DictKeyObjects); - - // (NSUInteger)cnt - Expr *cnt = IntegerLiteral::Create(*Context, - llvm::APInt(UnsignedIntSize, NumElements), - Context->UnsignedIntTy, SourceLocation()); - MsgExprs.push_back(cnt); - - - SmallVector ArgTypes; - ArgTypes.push_back(Context->getObjCIdType()); - ArgTypes.push_back(Context->getObjCSelType()); - for (ObjCMethodDecl::param_iterator PI = DictMethod->param_begin(), - E = DictMethod->param_end(); PI != E; ++PI) { - QualType T = (*PI)->getType(); - if (const PointerType* PT = T->getAs()) { - QualType PointeeTy = PT->getPointeeType(); - convertToUnqualifiedObjCType(PointeeTy); - T = Context->getPointerType(PointeeTy); - } - ArgTypes.push_back(T); - } - - QualType returnType = Exp->getType(); - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = MsgSendFlavor->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, - VK_LValue, SourceLocation()); - - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->VoidTy), - CK_BitCast, DRE); - - // Now do the "normal" pointer to function cast. - QualType castType = - getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - DictMethod->isVariadic()); - castType = Context->getPointerType(castType); - cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, - cast); - - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); - - const FunctionType *FT = msgSendType->getAs(); - CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0], - MsgExprs.size(), - FT->getResultType(), VK_RValue, - EndLoc); - ReplaceStmt(Exp, CE); - return CE; -} - -// struct __rw_objc_super { -// struct objc_object *object; struct objc_object *superClass; -// }; -QualType RewriteModernObjC::getSuperStructType() { - if (!SuperStructDecl) { - SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("__rw_objc_super")); - QualType FieldTypes[2]; - - // struct objc_object *object; - FieldTypes[0] = Context->getObjCIdType(); - // struct objc_object *superClass; - FieldTypes[1] = Context->getObjCIdType(); - - // Create fields - for (unsigned i = 0; i < 2; ++i) { - SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl, - SourceLocation(), - SourceLocation(), 0, - FieldTypes[i], 0, - /*BitWidth=*/0, - /*Mutable=*/false, - ICIS_NoInit)); - } - - SuperStructDecl->completeDefinition(); - } - return Context->getTagDeclType(SuperStructDecl); -} - -QualType RewriteModernObjC::getConstantStringStructType() { - if (!ConstantStringDecl) { - ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("__NSConstantStringImpl")); - QualType FieldTypes[4]; - - // struct objc_object *receiver; - FieldTypes[0] = Context->getObjCIdType(); - // int flags; - FieldTypes[1] = Context->IntTy; - // char *str; - FieldTypes[2] = Context->getPointerType(Context->CharTy); - // long length; - FieldTypes[3] = Context->LongTy; - - // Create fields - for (unsigned i = 0; i < 4; ++i) { - ConstantStringDecl->addDecl(FieldDecl::Create(*Context, - ConstantStringDecl, - SourceLocation(), - SourceLocation(), 0, - FieldTypes[i], 0, - /*BitWidth=*/0, - /*Mutable=*/true, - ICIS_NoInit)); - } - - ConstantStringDecl->completeDefinition(); - } - return Context->getTagDeclType(ConstantStringDecl); -} - -/// getFunctionSourceLocation - returns start location of a function -/// definition. Complication arises when function has declared as -/// extern "C" or extern "C" {...} -static SourceLocation getFunctionSourceLocation (RewriteModernObjC &R, - FunctionDecl *FD) { - if (FD->isExternC() && !FD->isMain()) { - const DeclContext *DC = FD->getDeclContext(); - if (const LinkageSpecDecl *LSD = dyn_cast(DC)) - // if it is extern "C" {...}, return function decl's own location. - if (!LSD->getRBraceLoc().isValid()) - return LSD->getExternLoc(); - } - if (FD->getStorageClassAsWritten() != SC_None) - R.RewriteBlockLiteralFunctionDecl(FD); - return FD->getTypeSpecStartLoc(); -} - -/// SynthMsgSendStretCallExpr - This routine translates message expression -/// into a call to objc_msgSend_stret() entry point. Tricky part is that -/// nil check on receiver must be performed before calling objc_msgSend_stret. -/// MsgSendStretFlavor - function declaration objc_msgSend_stret(...) -/// msgSendType - function type of objc_msgSend_stret(...) -/// returnType - Result type of the method being synthesized. -/// ArgTypes - type of the arguments passed to objc_msgSend_stret, starting with receiver type. -/// MsgExprs - list of argument expressions being passed to objc_msgSend_stret, -/// starting with receiver. -/// Method - Method being rewritten. -Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, - QualType msgSendType, - QualType returnType, - SmallVectorImpl &ArgTypes, - SmallVectorImpl &MsgExprs, - ObjCMethodDecl *Method) { - // Now do the "normal" pointer to function cast. - QualType castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - Method ? Method->isVariadic() : false); - castType = Context->getPointerType(castType); - - // build type for containing the objc_msgSend_stret object. - static unsigned stretCount=0; - std::string name = "__Stret"; name += utostr(stretCount); - std::string str = - "extern \"C\" void * __cdecl memset(void *_Dst, int _Val, size_t _Size);\n"; - str += "struct "; str += name; - str += " {\n\t"; - str += name; - str += "(id receiver, SEL sel"; - for (unsigned i = 2; i < ArgTypes.size(); i++) { - std::string ArgName = "arg"; ArgName += utostr(i); - ArgTypes[i].getAsStringInternal(ArgName, Context->getPrintingPolicy()); - str += ", "; str += ArgName; - } - // could be vararg. - for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) { - std::string ArgName = "arg"; ArgName += utostr(i); - MsgExprs[i]->getType().getAsStringInternal(ArgName, - Context->getPrintingPolicy()); - str += ", "; str += ArgName; - } - - str += ") {\n"; - str += "\t if (receiver == 0)\n"; - str += "\t memset((void*)&s, 0, sizeof(s));\n"; - str += "\t else\n"; - str += "\t s = (("; str += castType.getAsString(Context->getPrintingPolicy()); - str += ")(void *)objc_msgSend_stret)(receiver, sel"; - for (unsigned i = 2; i < ArgTypes.size(); i++) { - str += ", arg"; str += utostr(i); - } - // could be vararg. - for (unsigned i = ArgTypes.size(); i < MsgExprs.size(); i++) { - str += ", arg"; str += utostr(i); - } - - str += ");\n"; - str += "\t}\n"; - str += "\t"; str += returnType.getAsString(Context->getPrintingPolicy()); - str += " s;\n"; - str += "};\n\n"; - SourceLocation FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef); - InsertText(FunLocStart, str); - ++stretCount; - - // AST for __Stretn(receiver, args).s; - IdentifierInfo *ID = &Context->Idents.get(name); - FunctionDecl *FD = FunctionDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), ID, castType, 0, SC_Extern, - SC_None, false, false); - DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, castType, VK_RValue, - SourceLocation()); - CallExpr *STCE = new (Context) CallExpr(*Context, DRE, &MsgExprs[0], MsgExprs.size(), - castType, VK_LValue, SourceLocation()); - - FieldDecl *FieldD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("s"), - returnType, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(STCE, false, FieldD, SourceLocation(), - FieldD->getType(), VK_LValue, - OK_Ordinary); - - return ME; -} - -Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp, - SourceLocation StartLoc, - SourceLocation EndLoc) { - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - if (!MsgSendFunctionDecl) - SynthMsgSendFunctionDecl(); - if (!MsgSendSuperFunctionDecl) - SynthMsgSendSuperFunctionDecl(); - if (!MsgSendStretFunctionDecl) - SynthMsgSendStretFunctionDecl(); - if (!MsgSendSuperStretFunctionDecl) - SynthMsgSendSuperStretFunctionDecl(); - if (!MsgSendFpretFunctionDecl) - SynthMsgSendFpretFunctionDecl(); - if (!GetClassFunctionDecl) - SynthGetClassFunctionDecl(); - if (!GetSuperClassFunctionDecl) - SynthGetSuperClassFunctionDecl(); - if (!GetMetaClassFunctionDecl) - SynthGetMetaClassFunctionDecl(); - - // default to objc_msgSend(). - FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; - // May need to use objc_msgSend_stret() as well. - FunctionDecl *MsgSendStretFlavor = 0; - if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) { - QualType resultType = mDecl->getResultType(); - if (resultType->isRecordType()) - MsgSendStretFlavor = MsgSendStretFunctionDecl; - else if (resultType->isRealFloatingType()) - MsgSendFlavor = MsgSendFpretFunctionDecl; - } - - // Synthesize a call to objc_msgSend(). - SmallVector MsgExprs; - switch (Exp->getReceiverKind()) { - case ObjCMessageExpr::SuperClass: { - MsgSendFlavor = MsgSendSuperFunctionDecl; - if (MsgSendStretFlavor) - MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; - assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); - - ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); - - SmallVector InitExprs; - - // set the receiver to self, the first argument to all methods. - InitExprs.push_back( - NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK_BitCast, - new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), - false, - Context->getObjCIdType(), - VK_RValue, - SourceLocation())) - ); // set the 'receiver'. - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - ClsExprs.push_back(StringLiteral::Create(*Context, - ClassDecl->getIdentifier()->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - // (Class)objc_getClass("CurrentClass") - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, - EndLoc); - ClsExprs.clear(); - ClsExprs.push_back(Cls); - Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, - &ClsExprs[0], ClsExprs.size(), - StartLoc, EndLoc); - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - // To turn off a warning, type-cast to 'id' - InitExprs.push_back( // set 'super class', using class_getSuperclass(). - NoTypeInfoCStyleCastExpr(Context, - Context->getObjCIdType(), - CK_BitCast, Cls)); - // struct __rw_objc_super - QualType superType = getSuperStructType(); - Expr *SuperRep; - - if (LangOpts.MicrosoftExt) { - SynthSuperContructorFunctionDecl(); - // Simulate a contructor call... - DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, - false, superType, VK_LValue, - SourceLocation()); - SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], - InitExprs.size(), - superType, VK_LValue, - SourceLocation()); - // The code for super is a little tricky to prevent collision with - // the structure definition in the header. The rewriter has it's own - // internal definition (__rw_objc_super) that is uses. This is why - // we need the cast below. For example: - // (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) - // - SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, - Context->getPointerType(SuperRep->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - SuperRep = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(superType), - CK_BitCast, SuperRep); - } else { - // (struct __rw_objc_super) { } - InitListExpr *ILE = - new (Context) InitListExpr(*Context, SourceLocation(), - &InitExprs[0], InitExprs.size(), - SourceLocation()); - TypeSourceInfo *superTInfo - = Context->getTrivialTypeSourceInfo(superType); - SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, - superType, VK_LValue, - ILE, false); - // struct __rw_objc_super * - SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, - Context->getPointerType(SuperRep->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - } - MsgExprs.push_back(SuperRep); - break; - } - - case ObjCMessageExpr::Class: { - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - ObjCInterfaceDecl *Class - = Exp->getClassReceiver()->getAs()->getInterface(); - IdentifierInfo *clsName = Class->getIdentifier(); - ClsExprs.push_back(StringLiteral::Create(*Context, - clsName->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context, - Context->getObjCIdType(), - CK_BitCast, Cls); - MsgExprs.push_back(ArgExpr); - break; - } - - case ObjCMessageExpr::SuperInstance:{ - MsgSendFlavor = MsgSendSuperFunctionDecl; - if (MsgSendStretFlavor) - MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; - assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); - ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); - SmallVector InitExprs; - - InitExprs.push_back( - NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK_BitCast, - new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), - false, - Context->getObjCIdType(), - VK_RValue, SourceLocation())) - ); // set the 'receiver'. - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - ClsExprs.push_back(StringLiteral::Create(*Context, - ClassDecl->getIdentifier()->getName(), - StringLiteral::Ascii, false, argType, - SourceLocation())); - // (Class)objc_getClass("CurrentClass") - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - ClsExprs.clear(); - ClsExprs.push_back(Cls); - Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, - &ClsExprs[0], ClsExprs.size(), - StartLoc, EndLoc); - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - // To turn off a warning, type-cast to 'id' - InitExprs.push_back( - // set 'super class', using class_getSuperclass(). - NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK_BitCast, Cls)); - // struct __rw_objc_super - QualType superType = getSuperStructType(); - Expr *SuperRep; - - if (LangOpts.MicrosoftExt) { - SynthSuperContructorFunctionDecl(); - // Simulate a contructor call... - DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, - false, superType, VK_LValue, - SourceLocation()); - SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], - InitExprs.size(), - superType, VK_LValue, SourceLocation()); - // The code for super is a little tricky to prevent collision with - // the structure definition in the header. The rewriter has it's own - // internal definition (__rw_objc_super) that is uses. This is why - // we need the cast below. For example: - // (struct __rw_objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) - // - SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, - Context->getPointerType(SuperRep->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - SuperRep = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(superType), - CK_BitCast, SuperRep); - } else { - // (struct __rw_objc_super) { } - InitListExpr *ILE = - new (Context) InitListExpr(*Context, SourceLocation(), - &InitExprs[0], InitExprs.size(), - SourceLocation()); - TypeSourceInfo *superTInfo - = Context->getTrivialTypeSourceInfo(superType); - SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, - superType, VK_RValue, ILE, - false); - } - MsgExprs.push_back(SuperRep); - break; - } - - case ObjCMessageExpr::Instance: { - // Remove all type-casts because it may contain objc-style types; e.g. - // Foo *. - Expr *recExpr = Exp->getInstanceReceiver(); - while (CStyleCastExpr *CE = dyn_cast(recExpr)) - recExpr = CE->getSubExpr(); - CastKind CK = recExpr->getType()->isObjCObjectPointerType() - ? CK_BitCast : recExpr->getType()->isBlockPointerType() - ? CK_BlockPointerToObjCPointerCast - : CK_CPointerToObjCPointerCast; - - recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK, recExpr); - MsgExprs.push_back(recExpr); - break; - } - } - - // Create a call to sel_registerName("selName"), it will be the 2nd argument. - SmallVector SelExprs; - QualType argType = Context->getPointerType(Context->CharTy); - SelExprs.push_back(StringLiteral::Create(*Context, - Exp->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size(), - StartLoc, - EndLoc); - MsgExprs.push_back(SelExp); - - // Now push any user supplied arguments. - for (unsigned i = 0; i < Exp->getNumArgs(); i++) { - Expr *userExpr = Exp->getArg(i); - // Make all implicit casts explicit...ICE comes in handy:-) - if (ImplicitCastExpr *ICE = dyn_cast(userExpr)) { - // Reuse the ICE type, it is exactly what the doctor ordered. - QualType type = ICE->getType(); - if (needToScanForQualifiers(type)) - type = Context->getObjCIdType(); - // Make sure we convert "type (^)(...)" to "type (*)(...)". - (void)convertBlockPointerToFunctionPointer(type); - const Expr *SubExpr = ICE->IgnoreParenImpCasts(); - CastKind CK; - if (SubExpr->getType()->isIntegralType(*Context) && - type->isBooleanType()) { - CK = CK_IntegralToBoolean; - } else if (type->isObjCObjectPointerType()) { - if (SubExpr->getType()->isBlockPointerType()) { - CK = CK_BlockPointerToObjCPointerCast; - } else if (SubExpr->getType()->isPointerType()) { - CK = CK_CPointerToObjCPointerCast; - } else { - CK = CK_BitCast; - } - } else { - CK = CK_BitCast; - } - - userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr); - } - // Make id cast into an 'id' cast. - else if (CStyleCastExpr *CE = dyn_cast(userExpr)) { - if (CE->getType()->isObjCQualifiedIdType()) { - while ((CE = dyn_cast(userExpr))) - userExpr = CE->getSubExpr(); - CastKind CK; - if (userExpr->getType()->isIntegralType(*Context)) { - CK = CK_IntegralToPointer; - } else if (userExpr->getType()->isBlockPointerType()) { - CK = CK_BlockPointerToObjCPointerCast; - } else if (userExpr->getType()->isPointerType()) { - CK = CK_CPointerToObjCPointerCast; - } else { - CK = CK_BitCast; - } - userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK, userExpr); - } - } - MsgExprs.push_back(userExpr); - // We've transferred the ownership to MsgExprs. For now, we *don't* null - // out the argument in the original expression (since we aren't deleting - // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info. - //Exp->setArg(i, 0); - } - // Generate the funky cast. - CastExpr *cast; - SmallVector ArgTypes; - QualType returnType; - - // Push 'id' and 'SEL', the 2 implicit arguments. - if (MsgSendFlavor == MsgSendSuperFunctionDecl) - ArgTypes.push_back(Context->getPointerType(getSuperStructType())); - else - ArgTypes.push_back(Context->getObjCIdType()); - ArgTypes.push_back(Context->getObjCSelType()); - if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) { - // Push any user argument types. - for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), - E = OMD->param_end(); PI != E; ++PI) { - QualType t = (*PI)->getType()->isObjCQualifiedIdType() - ? Context->getObjCIdType() - : (*PI)->getType(); - // Make sure we convert "t (^)(...)" to "t (*)(...)". - (void)convertBlockPointerToFunctionPointer(t); - ArgTypes.push_back(t); - } - returnType = Exp->getType(); - convertToUnqualifiedObjCType(returnType); - (void)convertBlockPointerToFunctionPointer(returnType); - } else { - returnType = Context->getObjCIdType(); - } - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = MsgSendFlavor->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, - VK_LValue, SourceLocation()); - - // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid). - // If we don't do this cast, we get the following bizarre warning/note: - // xx.m:13: warning: function called through a non-compatible type - // xx.m:13: note: if this code is reached, the program will abort - cast = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->VoidTy), - CK_BitCast, DRE); - - // Now do the "normal" pointer to function cast. - QualType castType = - getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - // If we don't have a method decl, force a variadic cast. - Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true); - castType = Context->getPointerType(castType); - cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, - cast); - - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); - - const FunctionType *FT = msgSendType->getAs(); - CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0], - MsgExprs.size(), - FT->getResultType(), VK_RValue, - EndLoc); - Stmt *ReplacingStmt = CE; - if (MsgSendStretFlavor) { - // We have the method which returns a struct/union. Must also generate - // call to objc_msgSend_stret and hang both varieties on a conditional - // expression which dictate which one to envoke depending on size of - // method's return type. - - Expr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor, - msgSendType, returnType, - ArgTypes, MsgExprs, - Exp->getMethodDecl()); - - // Build sizeof(returnType) - UnaryExprOrTypeTraitExpr *sizeofExpr = - new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf, - Context->getTrivialTypeSourceInfo(returnType), - Context->getSizeType(), SourceLocation(), - SourceLocation()); - // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) - // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases. - // For X86 it is more complicated and some kind of target specific routine - // is needed to decide what to do. - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - IntegerLiteral *limit = IntegerLiteral::Create(*Context, - llvm::APInt(IntSize, 8), - Context->IntTy, - SourceLocation()); - BinaryOperator *lessThanExpr = - new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy, - VK_RValue, OK_Ordinary, SourceLocation()); - // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) - ConditionalOperator *CondExpr = - new (Context) ConditionalOperator(lessThanExpr, - SourceLocation(), CE, - SourceLocation(), STCE, - returnType, VK_RValue, OK_Ordinary); - ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - CondExpr); - } - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return ReplacingStmt; -} - -Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) { - Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(), - Exp->getLocEnd()); - - // Now do the actual rewrite. - ReplaceStmt(Exp, ReplacingStmt); - - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return ReplacingStmt; -} - -// typedef struct objc_object Protocol; -QualType RewriteModernObjC::getProtocolType() { - if (!ProtocolTypeDecl) { - TypeSourceInfo *TInfo - = Context->getTrivialTypeSourceInfo(Context->getObjCIdType()); - ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("Protocol"), - TInfo); - } - return Context->getTypeDeclType(ProtocolTypeDecl); -} - -/// RewriteObjCProtocolExpr - Rewrite a protocol expression into -/// a synthesized/forward data reference (to the protocol's metadata). -/// The forward references (and metadata) are generated in -/// RewriteModernObjC::HandleTranslationUnit(). -Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) { - std::string Name = "_OBJC_PROTOCOL_REFERENCE_$_" + - Exp->getProtocol()->getNameAsString(); - IdentifierInfo *ID = &Context->Idents.get(Name); - VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), ID, getProtocolType(), 0, - SC_Extern, SC_None); - DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(), - VK_LValue, SourceLocation()); - Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf, - Context->getPointerType(DRE->getType()), - VK_RValue, OK_Ordinary, SourceLocation()); - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(), - CK_BitCast, - DerefExpr); - ReplaceStmt(Exp, castExpr); - ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl()); - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return castExpr; - -} - -bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf, - const char *endBuf) { - while (startBuf < endBuf) { - if (*startBuf == '#') { - // Skip whitespace. - for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf) - ; - if (!strncmp(startBuf, "if", strlen("if")) || - !strncmp(startBuf, "ifdef", strlen("ifdef")) || - !strncmp(startBuf, "ifndef", strlen("ifndef")) || - !strncmp(startBuf, "define", strlen("define")) || - !strncmp(startBuf, "undef", strlen("undef")) || - !strncmp(startBuf, "else", strlen("else")) || - !strncmp(startBuf, "elif", strlen("elif")) || - !strncmp(startBuf, "endif", strlen("endif")) || - !strncmp(startBuf, "pragma", strlen("pragma")) || - !strncmp(startBuf, "include", strlen("include")) || - !strncmp(startBuf, "import", strlen("import")) || - !strncmp(startBuf, "include_next", strlen("include_next"))) - return true; - } - startBuf++; - } - return false; -} - -/// IsTagDefinedInsideClass - This routine checks that a named tagged type -/// is defined inside an objective-c class. If so, it returns true. -bool RewriteModernObjC::IsTagDefinedInsideClass(ObjCContainerDecl *IDecl, - TagDecl *Tag, - bool &IsNamedDefinition) { - if (!IDecl) - return false; - SourceLocation TagLocation; - if (RecordDecl *RD = dyn_cast(Tag)) { - RD = RD->getDefinition(); - if (!RD || !RD->getDeclName().getAsIdentifierInfo()) - return false; - IsNamedDefinition = true; - TagLocation = RD->getLocation(); - return Context->getSourceManager().isBeforeInTranslationUnit( - IDecl->getLocation(), TagLocation); - } - if (EnumDecl *ED = dyn_cast(Tag)) { - if (!ED || !ED->getDeclName().getAsIdentifierInfo()) - return false; - IsNamedDefinition = true; - TagLocation = ED->getLocation(); - return Context->getSourceManager().isBeforeInTranslationUnit( - IDecl->getLocation(), TagLocation); - - } - return false; -} - -/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer. -/// It handles elaborated types, as well as enum types in the process. -bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type, - std::string &Result) { - if (isa(Type)) { - Result += "\t"; - return false; - } - - if (Type->isArrayType()) { - QualType ElemTy = Context->getBaseElementType(Type); - return RewriteObjCFieldDeclType(ElemTy, Result); - } - else if (Type->isRecordType()) { - RecordDecl *RD = Type->getAs()->getDecl(); - if (RD->isCompleteDefinition()) { - if (RD->isStruct()) - Result += "\n\tstruct "; - else if (RD->isUnion()) - Result += "\n\tunion "; - else - assert(false && "class not allowed as an ivar type"); - - Result += RD->getName(); - if (GlobalDefinedTags.count(RD)) { - // struct/union is defined globally, use it. - Result += " "; - return true; - } - Result += " {\n"; - for (RecordDecl::field_iterator i = RD->field_begin(), - e = RD->field_end(); i != e; ++i) { - FieldDecl *FD = *i; - RewriteObjCFieldDecl(FD, Result); - } - Result += "\t} "; - return true; - } - } - else if (Type->isEnumeralType()) { - EnumDecl *ED = Type->getAs()->getDecl(); - if (ED->isCompleteDefinition()) { - Result += "\n\tenum "; - Result += ED->getName(); - if (GlobalDefinedTags.count(ED)) { - // Enum is globall defined, use it. - Result += " "; - return true; - } - - Result += " {\n"; - for (EnumDecl::enumerator_iterator EC = ED->enumerator_begin(), - ECEnd = ED->enumerator_end(); EC != ECEnd; ++EC) { - Result += "\t"; Result += EC->getName(); Result += " = "; - llvm::APSInt Val = EC->getInitVal(); - Result += Val.toString(10); - Result += ",\n"; - } - Result += "\t} "; - return true; - } - } - - Result += "\t"; - convertObjCTypeToCStyleType(Type); - return false; -} - - -/// RewriteObjCFieldDecl - This routine rewrites a field into the buffer. -/// It handles elaborated types, as well as enum types in the process. -void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl, - std::string &Result) { - QualType Type = fieldDecl->getType(); - std::string Name = fieldDecl->getNameAsString(); - - bool EleboratedType = RewriteObjCFieldDeclType(Type, Result); - if (!EleboratedType) - Type.getAsStringInternal(Name, Context->getPrintingPolicy()); - Result += Name; - if (fieldDecl->isBitField()) { - Result += " : "; Result += utostr(fieldDecl->getBitWidthValue(*Context)); - } - else if (EleboratedType && Type->isArrayType()) { - CanQualType CType = Context->getCanonicalType(Type); - while (isa(CType)) { - if (const ConstantArrayType *CAT = Context->getAsConstantArrayType(CType)) { - Result += "["; - llvm::APInt Dim = CAT->getSize(); - Result += utostr(Dim.getZExtValue()); - Result += "]"; - } - CType = CType->getAs()->getElementType(); - } - } - - Result += ";\n"; -} - -/// RewriteLocallyDefinedNamedAggregates - This routine rewrites locally defined -/// named aggregate types into the input buffer. -void RewriteModernObjC::RewriteLocallyDefinedNamedAggregates(FieldDecl *fieldDecl, - std::string &Result) { - QualType Type = fieldDecl->getType(); - if (isa(Type)) - return; - if (Type->isArrayType()) - Type = Context->getBaseElementType(Type); - ObjCContainerDecl *IDecl = - dyn_cast(fieldDecl->getDeclContext()); - - TagDecl *TD = 0; - if (Type->isRecordType()) { - TD = Type->getAs()->getDecl(); - } - else if (Type->isEnumeralType()) { - TD = Type->getAs()->getDecl(); - } - - if (TD) { - if (GlobalDefinedTags.count(TD)) - return; - - bool IsNamedDefinition = false; - if (IsTagDefinedInsideClass(IDecl, TD, IsNamedDefinition)) { - RewriteObjCFieldDeclType(Type, Result); - Result += ";"; - } - if (IsNamedDefinition) - GlobalDefinedTags.insert(TD); - } - -} - -/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to -/// an objective-c class with ivars. -void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, - std::string &Result) { - assert(CDecl && "Class missing in SynthesizeObjCInternalStruct"); - assert(CDecl->getName() != "" && - "Name missing in SynthesizeObjCInternalStruct"); - ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass(); - SmallVector IVars; - for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin(); - IVD; IVD = IVD->getNextIvar()) - IVars.push_back(IVD); - - SourceLocation LocStart = CDecl->getLocStart(); - SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc(); - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - - // If no ivars and no root or if its root, directly or indirectly, - // have no ivars (thus not synthesized) then no need to synthesize this class. - if ((!CDecl->isThisDeclarationADefinition() || IVars.size() == 0) && - (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) { - endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); - ReplaceText(LocStart, endBuf-startBuf, Result); - return; - } - - // Insert named struct/union definitions inside class to - // outer scope. This follows semantics of locally defined - // struct/unions in objective-c classes. - for (unsigned i = 0, e = IVars.size(); i < e; i++) - RewriteLocallyDefinedNamedAggregates(IVars[i], Result); - - Result += "\nstruct "; - Result += CDecl->getNameAsString(); - Result += "_IMPL {\n"; - - if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) { - Result += "\tstruct "; Result += RCDecl->getNameAsString(); - Result += "_IMPL "; Result += RCDecl->getNameAsString(); - Result += "_IVARS;\n"; - } - - for (unsigned i = 0, e = IVars.size(); i < e; i++) - RewriteObjCFieldDecl(IVars[i], Result); - - Result += "};\n"; - endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); - ReplaceText(LocStart, endBuf-startBuf, Result); - // Mark this struct as having been generated. - if (!ObjCSynthesizedStructs.insert(CDecl)) - llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct"); -} - -/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which -/// have been referenced in an ivar access expression. -void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl, - std::string &Result) { - // write out ivar offset symbols which have been referenced in an ivar - // access expression. - llvm::SmallPtrSet Ivars = ReferencedIvars[CDecl]; - if (Ivars.empty()) - return; - for (llvm::SmallPtrSet::iterator i = Ivars.begin(), - e = Ivars.end(); i != e; i++) { - ObjCIvarDecl *IvarDecl = (*i); - Result += "\n"; - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".objc_ivar$B\")) "; - Result += "extern \"C\" "; - if (LangOpts.MicrosoftExt && - IvarDecl->getAccessControl() != ObjCIvarDecl::Private && - IvarDecl->getAccessControl() != ObjCIvarDecl::Package) - Result += "__declspec(dllimport) "; - - Result += "unsigned long "; - WriteInternalIvarName(CDecl, IvarDecl, Result); - Result += ";"; - } -} - -//===----------------------------------------------------------------------===// -// Meta Data Emission -//===----------------------------------------------------------------------===// - - -/// RewriteImplementations - This routine rewrites all method implementations -/// and emits meta-data. - -void RewriteModernObjC::RewriteImplementations() { - int ClsDefCount = ClassImplementation.size(); - int CatDefCount = CategoryImplementation.size(); - - // Rewrite implemented methods - for (int i = 0; i < ClsDefCount; i++) { - ObjCImplementationDecl *OIMP = ClassImplementation[i]; - ObjCInterfaceDecl *CDecl = OIMP->getClassInterface(); - if (CDecl->isImplicitInterfaceDecl()) - assert(false && - "Legacy implicit interface rewriting not supported in moder abi"); - RewriteImplementationDecl(OIMP); - } - - for (int i = 0; i < CatDefCount; i++) { - ObjCCategoryImplDecl *CIMP = CategoryImplementation[i]; - ObjCInterfaceDecl *CDecl = CIMP->getClassInterface(); - if (CDecl->isImplicitInterfaceDecl()) - assert(false && - "Legacy implicit interface rewriting not supported in moder abi"); - RewriteImplementationDecl(CIMP); - } -} - -void RewriteModernObjC::RewriteByRefString(std::string &ResultStr, - const std::string &Name, - ValueDecl *VD, bool def) { - assert(BlockByRefDeclNo.count(VD) && - "RewriteByRefString: ByRef decl missing"); - if (def) - ResultStr += "struct "; - ResultStr += "__Block_byref_" + Name + - "_" + utostr(BlockByRefDeclNo[VD]) ; -} - -static bool HasLocalVariableExternalStorage(ValueDecl *VD) { - if (VarDecl *Var = dyn_cast(VD)) - return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage()); - return false; -} - -std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i, - StringRef funcName, - std::string Tag) { - const FunctionType *AFT = CE->getFunctionType(); - QualType RT = AFT->getResultType(); - std::string StructRef = "struct " + Tag; - std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" + - funcName.str() + "_block_func_" + utostr(i); - - BlockDecl *BD = CE->getBlockDecl(); - - if (isa(AFT)) { - // No user-supplied arguments. Still need to pass in a pointer to the - // block (to reference imported block decl refs). - S += "(" + StructRef + " *__cself)"; - } else if (BD->param_empty()) { - S += "(" + StructRef + " *__cself)"; - } else { - const FunctionProtoType *FT = cast(AFT); - assert(FT && "SynthesizeBlockFunc: No function proto"); - S += '('; - // first add the implicit argument. - S += StructRef + " *__cself, "; - std::string ParamStr; - for (BlockDecl::param_iterator AI = BD->param_begin(), - E = BD->param_end(); AI != E; ++AI) { - if (AI != BD->param_begin()) S += ", "; - ParamStr = (*AI)->getNameAsString(); - QualType QT = (*AI)->getType(); - (void)convertBlockPointerToFunctionPointer(QT); - QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy()); - S += ParamStr; - } - if (FT->isVariadic()) { - if (!BD->param_empty()) S += ", "; - S += "..."; - } - S += ')'; - } - S += " {\n"; - - // Create local declarations to avoid rewriting all closure decl ref exprs. - // First, emit a declaration for all "by ref" decls. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - S += " "; - std::string Name = (*I)->getNameAsString(); - std::string TypeString; - RewriteByRefString(TypeString, Name, (*I)); - TypeString += " *"; - Name = TypeString + Name; - S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n"; - } - // Next, emit a declaration for all "by copy" declarations. - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - S += " "; - // Handle nested closure invocation. For example: - // - // void (^myImportedClosure)(void); - // myImportedClosure = ^(void) { setGlobalInt(x + y); }; - // - // void (^anotherClosure)(void); - // anotherClosure = ^(void) { - // myImportedClosure(); // import and invoke the closure - // }; - // - if (isTopLevelBlockPointerType((*I)->getType())) { - RewriteBlockPointerTypeVariable(S, (*I)); - S += " = ("; - RewriteBlockPointerType(S, (*I)->getType()); - S += ")"; - S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n"; - } - else { - std::string Name = (*I)->getNameAsString(); - QualType QT = (*I)->getType(); - if (HasLocalVariableExternalStorage(*I)) - QT = Context->getPointerType(QT); - QT.getAsStringInternal(Name, Context->getPrintingPolicy()); - S += Name + " = __cself->" + - (*I)->getNameAsString() + "; // bound by copy\n"; - } - } - std::string RewrittenStr = RewrittenBlockExprs[CE]; - const char *cstr = RewrittenStr.c_str(); - while (*cstr++ != '{') ; - S += cstr; - S += "\n"; - return S; -} - -std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, - StringRef funcName, - std::string Tag) { - std::string StructRef = "struct " + Tag; - std::string S = "static void __"; - - S += funcName; - S += "_block_copy_" + utostr(i); - S += "(" + StructRef; - S += "*dst, " + StructRef; - S += "*src) {"; - for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), - E = ImportedBlockDecls.end(); I != E; ++I) { - ValueDecl *VD = (*I); - S += "_Block_object_assign((void*)&dst->"; - S += (*I)->getNameAsString(); - S += ", (void*)src->"; - S += (*I)->getNameAsString(); - if (BlockByRefDeclsPtrSet.count((*I))) - S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; - else if (VD->getType()->isBlockPointerType()) - S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; - else - S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; - } - S += "}\n"; - - S += "\nstatic void __"; - S += funcName; - S += "_block_dispose_" + utostr(i); - S += "(" + StructRef; - S += "*src) {"; - for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), - E = ImportedBlockDecls.end(); I != E; ++I) { - ValueDecl *VD = (*I); - S += "_Block_object_dispose((void*)src->"; - S += (*I)->getNameAsString(); - if (BlockByRefDeclsPtrSet.count((*I))) - S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; - else if (VD->getType()->isBlockPointerType()) - S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; - else - S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; - } - S += "}\n"; - return S; -} - -std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag, - std::string Desc) { - std::string S = "\nstruct " + Tag; - std::string Constructor = " " + Tag; - - S += " {\n struct __block_impl impl;\n"; - S += " struct " + Desc; - S += "* Desc;\n"; - - Constructor += "(void *fp, "; // Invoke function pointer. - Constructor += "struct " + Desc; // Descriptor pointer. - Constructor += " *desc"; - - if (BlockDeclRefs.size()) { - // Output all "by copy" declarations. - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - S += " "; - std::string FieldName = (*I)->getNameAsString(); - std::string ArgName = "_" + FieldName; - // Handle nested closure invocation. For example: - // - // void (^myImportedBlock)(void); - // myImportedBlock = ^(void) { setGlobalInt(x + y); }; - // - // void (^anotherBlock)(void); - // anotherBlock = ^(void) { - // myImportedBlock(); // import and invoke the closure - // }; - // - if (isTopLevelBlockPointerType((*I)->getType())) { - S += "struct __block_impl *"; - Constructor += ", void *" + ArgName; - } else { - QualType QT = (*I)->getType(); - if (HasLocalVariableExternalStorage(*I)) - QT = Context->getPointerType(QT); - QT.getAsStringInternal(FieldName, Context->getPrintingPolicy()); - QT.getAsStringInternal(ArgName, Context->getPrintingPolicy()); - Constructor += ", " + ArgName; - } - S += FieldName + ";\n"; - } - // Output all "by ref" declarations. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - S += " "; - std::string FieldName = (*I)->getNameAsString(); - std::string ArgName = "_" + FieldName; - { - std::string TypeString; - RewriteByRefString(TypeString, FieldName, (*I)); - TypeString += " *"; - FieldName = TypeString + FieldName; - ArgName = TypeString + ArgName; - Constructor += ", " + ArgName; - } - S += FieldName + "; // by ref\n"; - } - // Finish writing the constructor. - Constructor += ", int flags=0)"; - // Initialize all "by copy" arguments. - bool firsTime = true; - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - std::string Name = (*I)->getNameAsString(); - if (firsTime) { - Constructor += " : "; - firsTime = false; - } - else - Constructor += ", "; - if (isTopLevelBlockPointerType((*I)->getType())) - Constructor += Name + "((struct __block_impl *)_" + Name + ")"; - else - Constructor += Name + "(_" + Name + ")"; - } - // Initialize all "by ref" arguments. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - std::string Name = (*I)->getNameAsString(); - if (firsTime) { - Constructor += " : "; - firsTime = false; - } - else - Constructor += ", "; - Constructor += Name + "(_" + Name + "->__forwarding)"; - } - - Constructor += " {\n"; - if (GlobalVarDecl) - Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; - else - Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; - Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; - - Constructor += " Desc = desc;\n"; - } else { - // Finish writing the constructor. - Constructor += ", int flags=0) {\n"; - if (GlobalVarDecl) - Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; - else - Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; - Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; - Constructor += " Desc = desc;\n"; - } - Constructor += " "; - Constructor += "}\n"; - S += Constructor; - S += "};\n"; - return S; -} - -std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag, - std::string ImplTag, int i, - StringRef FunName, - unsigned hasCopy) { - std::string S = "\nstatic struct " + DescTag; - - S += " {\n size_t reserved;\n"; - S += " size_t Block_size;\n"; - if (hasCopy) { - S += " void (*copy)(struct "; - S += ImplTag; S += "*, struct "; - S += ImplTag; S += "*);\n"; - - S += " void (*dispose)(struct "; - S += ImplTag; S += "*);\n"; - } - S += "} "; - - S += DescTag + "_DATA = { 0, sizeof(struct "; - S += ImplTag + ")"; - if (hasCopy) { - S += ", __" + FunName.str() + "_block_copy_" + utostr(i); - S += ", __" + FunName.str() + "_block_dispose_" + utostr(i); - } - S += "};\n"; - return S; -} - -void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart, - StringRef FunName) { - bool RewriteSC = (GlobalVarDecl && - !Blocks.empty() && - GlobalVarDecl->getStorageClass() == SC_Static && - GlobalVarDecl->getType().getCVRQualifiers()); - if (RewriteSC) { - std::string SC(" void __"); - SC += GlobalVarDecl->getNameAsString(); - SC += "() {}"; - InsertText(FunLocStart, SC); - } - - // Insert closures that were part of the function. - for (unsigned i = 0, count=0; i < Blocks.size(); i++) { - CollectBlockDeclRefInfo(Blocks[i]); - // Need to copy-in the inner copied-in variables not actually used in this - // block. - for (int j = 0; j < InnerDeclRefsCount[i]; j++) { - DeclRefExpr *Exp = InnerDeclRefs[count++]; - ValueDecl *VD = Exp->getDecl(); - BlockDeclRefs.push_back(Exp); - if (!VD->hasAttr()) { - if (!BlockByCopyDeclsPtrSet.count(VD)) { - BlockByCopyDeclsPtrSet.insert(VD); - BlockByCopyDecls.push_back(VD); - } - continue; - } - - if (!BlockByRefDeclsPtrSet.count(VD)) { - BlockByRefDeclsPtrSet.insert(VD); - BlockByRefDecls.push_back(VD); - } - - // imported objects in the inner blocks not used in the outer - // blocks must be copied/disposed in the outer block as well. - if (VD->getType()->isObjCObjectPointerType() || - VD->getType()->isBlockPointerType()) - ImportedBlockDecls.insert(VD); - } - - std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i); - std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i); - - std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag); - - InsertText(FunLocStart, CI); - - std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag); - - InsertText(FunLocStart, CF); - - if (ImportedBlockDecls.size()) { - std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag); - InsertText(FunLocStart, HF); - } - std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName, - ImportedBlockDecls.size() > 0); - InsertText(FunLocStart, BD); - - BlockDeclRefs.clear(); - BlockByRefDecls.clear(); - BlockByRefDeclsPtrSet.clear(); - BlockByCopyDecls.clear(); - BlockByCopyDeclsPtrSet.clear(); - ImportedBlockDecls.clear(); - } - if (RewriteSC) { - // Must insert any 'const/volatile/static here. Since it has been - // removed as result of rewriting of block literals. - std::string SC; - if (GlobalVarDecl->getStorageClass() == SC_Static) - SC = "static "; - if (GlobalVarDecl->getType().isConstQualified()) - SC += "const "; - if (GlobalVarDecl->getType().isVolatileQualified()) - SC += "volatile "; - if (GlobalVarDecl->getType().isRestrictQualified()) - SC += "restrict "; - InsertText(FunLocStart, SC); - } - if (GlobalConstructionExp) { - // extra fancy dance for global literal expression. - - // Always the latest block expression on the block stack. - std::string Tag = "__"; - Tag += FunName; - Tag += "_block_impl_"; - Tag += utostr(Blocks.size()-1); - std::string globalBuf = "static "; - globalBuf += Tag; globalBuf += " "; - std::string SStr; - - llvm::raw_string_ostream constructorExprBuf(SStr); - GlobalConstructionExp->printPretty(constructorExprBuf, 0, - PrintingPolicy(LangOpts)); - globalBuf += constructorExprBuf.str(); - globalBuf += ";\n"; - InsertText(FunLocStart, globalBuf); - GlobalConstructionExp = 0; - } - - Blocks.clear(); - InnerDeclRefsCount.clear(); - InnerDeclRefs.clear(); - RewrittenBlockExprs.clear(); -} - -void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) { - SourceLocation FunLocStart = - (!Blocks.empty()) ? getFunctionSourceLocation(*this, FD) - : FD->getTypeSpecStartLoc(); - StringRef FuncName = FD->getName(); - - SynthesizeBlockLiterals(FunLocStart, FuncName); -} - -static void BuildUniqueMethodName(std::string &Name, - ObjCMethodDecl *MD) { - ObjCInterfaceDecl *IFace = MD->getClassInterface(); - Name = IFace->getName(); - Name += "__" + MD->getSelector().getAsString(); - // Convert colons to underscores. - std::string::size_type loc = 0; - while ((loc = Name.find(":", loc)) != std::string::npos) - Name.replace(loc, 1, "_"); -} - -void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) { - //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n"); - //SourceLocation FunLocStart = MD->getLocStart(); - SourceLocation FunLocStart = MD->getLocStart(); - std::string FuncName; - BuildUniqueMethodName(FuncName, MD); - SynthesizeBlockLiterals(FunLocStart, FuncName); -} - -void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) { - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - if (BlockExpr *CBE = dyn_cast(*CI)) - GetBlockDeclRefExprs(CBE->getBody()); - else - GetBlockDeclRefExprs(*CI); - } - // Handle specific things. - if (DeclRefExpr *DRE = dyn_cast(S)) { - if (DRE->refersToEnclosingLocal()) { - // FIXME: Handle enums. - if (!isa(DRE->getDecl())) - BlockDeclRefs.push_back(DRE); - if (HasLocalVariableExternalStorage(DRE->getDecl())) - BlockDeclRefs.push_back(DRE); - } - } - - return; -} - -void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S, - SmallVector &InnerBlockDeclRefs, - llvm::SmallPtrSet &InnerContexts) { - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - if (BlockExpr *CBE = dyn_cast(*CI)) { - InnerContexts.insert(cast(CBE->getBlockDecl())); - GetInnerBlockDeclRefExprs(CBE->getBody(), - InnerBlockDeclRefs, - InnerContexts); - } - else - GetInnerBlockDeclRefExprs(*CI, - InnerBlockDeclRefs, - InnerContexts); - - } - // Handle specific things. - if (DeclRefExpr *DRE = dyn_cast(S)) { - if (DRE->refersToEnclosingLocal()) { - if (!isa(DRE->getDecl()) && - !InnerContexts.count(DRE->getDecl()->getDeclContext())) - InnerBlockDeclRefs.push_back(DRE); - if (VarDecl *Var = dyn_cast(DRE->getDecl())) - if (Var->isFunctionOrMethodVarDecl()) - ImportedLocalExternalDecls.insert(Var); - } - } - - return; -} - -/// convertObjCTypeToCStyleType - This routine converts such objc types -/// as qualified objects, and blocks to their closest c/c++ types that -/// it can. It returns true if input type was modified. -bool RewriteModernObjC::convertObjCTypeToCStyleType(QualType &T) { - QualType oldT = T; - convertBlockPointerToFunctionPointer(T); - if (T->isFunctionPointerType()) { - QualType PointeeTy; - if (const PointerType* PT = T->getAs()) { - PointeeTy = PT->getPointeeType(); - if (const FunctionType *FT = PointeeTy->getAs()) { - T = convertFunctionTypeOfBlocks(FT); - T = Context->getPointerType(T); - } - } - } - - convertToUnqualifiedObjCType(T); - return T != oldT; -} - -/// convertFunctionTypeOfBlocks - This routine converts a function type -/// whose result type may be a block pointer or whose argument type(s) -/// might be block pointers to an equivalent function type replacing -/// all block pointers to function pointers. -QualType RewriteModernObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) { - const FunctionProtoType *FTP = dyn_cast(FT); - // FTP will be null for closures that don't take arguments. - // Generate a funky cast. - SmallVector ArgTypes; - QualType Res = FT->getResultType(); - bool modified = convertObjCTypeToCStyleType(Res); - - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I && (I != E); ++I) { - QualType t = *I; - // Make sure we convert "t (^)(...)" to "t (*)(...)". - if (convertObjCTypeToCStyleType(t)) - modified = true; - ArgTypes.push_back(t); - } - } - QualType FuncType; - if (modified) - FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size()); - else FuncType = QualType(FT, 0); - return FuncType; -} - -Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) { - // Navigate to relevant type information. - const BlockPointerType *CPT = 0; - - if (const DeclRefExpr *DRE = dyn_cast(BlockExp)) { - CPT = DRE->getType()->getAs(); - } else if (const MemberExpr *MExpr = dyn_cast(BlockExp)) { - CPT = MExpr->getType()->getAs(); - } - else if (const ParenExpr *PRE = dyn_cast(BlockExp)) { - return SynthesizeBlockCall(Exp, PRE->getSubExpr()); - } - else if (const ImplicitCastExpr *IEXPR = dyn_cast(BlockExp)) - CPT = IEXPR->getType()->getAs(); - else if (const ConditionalOperator *CEXPR = - dyn_cast(BlockExp)) { - Expr *LHSExp = CEXPR->getLHS(); - Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp); - Expr *RHSExp = CEXPR->getRHS(); - Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp); - Expr *CONDExp = CEXPR->getCond(); - ConditionalOperator *CondExpr = - new (Context) ConditionalOperator(CONDExp, - SourceLocation(), cast(LHSStmt), - SourceLocation(), cast(RHSStmt), - Exp->getType(), VK_RValue, OK_Ordinary); - return CondExpr; - } else if (const ObjCIvarRefExpr *IRE = dyn_cast(BlockExp)) { - CPT = IRE->getType()->getAs(); - } else if (const PseudoObjectExpr *POE - = dyn_cast(BlockExp)) { - CPT = POE->getType()->castAs(); - } else { - assert(1 && "RewriteBlockClass: Bad type"); - } - assert(CPT && "RewriteBlockClass: Bad type"); - const FunctionType *FT = CPT->getPointeeType()->getAs(); - assert(FT && "RewriteBlockClass: Bad type"); - const FunctionProtoType *FTP = dyn_cast(FT); - // FTP will be null for closures that don't take arguments. - - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("__block_impl")); - QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD)); - - // Generate a funky cast. - SmallVector ArgTypes; - - // Push the block argument type. - ArgTypes.push_back(PtrBlock); - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I && (I != E); ++I) { - QualType t = *I; - // Make sure we convert "t (^)(...)" to "t (*)(...)". - if (!convertBlockPointerToFunctionPointer(t)) - convertToUnqualifiedObjCType(t); - ArgTypes.push_back(t); - } - } - // Now do the pointer to function cast. - QualType PtrToFuncCastType - = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size()); - - PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType); - - CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock, - CK_BitCast, - const_cast(BlockExp)); - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - BlkCast); - //PE->dump(); - - FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("FuncPtr"), - Context->VoidPtrTy, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); - - - CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType, - CK_BitCast, ME); - PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast); - - SmallVector BlkExprs; - // Add the implicit argument. - BlkExprs.push_back(BlkCast); - // Add the user arguments. - for (CallExpr::arg_iterator I = Exp->arg_begin(), - E = Exp->arg_end(); I != E; ++I) { - BlkExprs.push_back(*I); - } - CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0], - BlkExprs.size(), - Exp->getType(), VK_RValue, - SourceLocation()); - return CE; -} - -// We need to return the rewritten expression to handle cases where the -// DeclRefExpr is embedded in another expression being rewritten. -// For example: -// -// int main() { -// __block Foo *f; -// __block int i; -// -// void (^myblock)() = ^() { -// [f test]; // f is a DeclRefExpr embedded in a message (which is being rewritten). -// i = 77; -// }; -//} -Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { - // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR - // for each DeclRefExp where BYREFVAR is name of the variable. - ValueDecl *VD = DeclRefExp->getDecl(); - bool isArrow = DeclRefExp->refersToEnclosingLocal(); - - FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("__forwarding"), - Context->VoidPtrTy, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow, - FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); - - StringRef Name = VD->getName(); - FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(), - &Context->Idents.get(Name), - Context->VoidPtrTy, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(), - DeclRefExp->getType(), VK_LValue, OK_Ordinary); - - - - // Need parens to enforce precedence. - ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(), - DeclRefExp->getExprLoc(), - ME); - ReplaceStmt(DeclRefExp, PE); - return PE; -} - -// Rewrites the imported local variable V with external storage -// (static, extern, etc.) as *V -// -Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) { - ValueDecl *VD = DRE->getDecl(); - if (VarDecl *Var = dyn_cast(VD)) - if (!ImportedLocalExternalDecls.count(Var)) - return DRE; - Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(), - VK_LValue, OK_Ordinary, - DRE->getLocation()); - // Need parens to enforce precedence. - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - Exp); - ReplaceStmt(DRE, PE); - return PE; -} - -void RewriteModernObjC::RewriteCastExpr(CStyleCastExpr *CE) { - SourceLocation LocStart = CE->getLParenLoc(); - SourceLocation LocEnd = CE->getRParenLoc(); - - // Need to avoid trying to rewrite synthesized casts. - if (LocStart.isInvalid()) - return; - // Need to avoid trying to rewrite casts contained in macros. - if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd)) - return; - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - QualType QT = CE->getType(); - const Type* TypePtr = QT->getAs(); - if (isa(TypePtr)) { - const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); - QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); - std::string TypeAsString = "("; - RewriteBlockPointerType(TypeAsString, QT); - TypeAsString += ")"; - ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString); - return; - } - // advance the location to startArgList. - const char *argPtr = startBuf; - - while (*argPtr++ && (argPtr < endBuf)) { - switch (*argPtr) { - case '^': - // Replace the '^' with '*'. - LocStart = LocStart.getLocWithOffset(argPtr-startBuf); - ReplaceText(LocStart, 1, "*"); - break; - } - } - return; -} - -void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) { - CastKind CastKind = IC->getCastKind(); - if (CastKind != CK_BlockPointerToObjCPointerCast && - CastKind != CK_AnyPointerToBlockPointerCast) - return; - - QualType QT = IC->getType(); - (void)convertBlockPointerToFunctionPointer(QT); - std::string TypeString(QT.getAsString(Context->getPrintingPolicy())); - std::string Str = "("; - Str += TypeString; - Str += ")"; - InsertText(IC->getSubExpr()->getLocStart(), &Str[0], Str.size()); - - return; -} - -void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) { - SourceLocation DeclLoc = FD->getLocation(); - unsigned parenCount = 0; - - // We have 1 or more arguments that have closure pointers. - const char *startBuf = SM->getCharacterData(DeclLoc); - const char *startArgList = strchr(startBuf, '('); - - assert((*startArgList == '(') && "Rewriter fuzzy parser confused"); - - parenCount++; - // advance the location to startArgList. - DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf); - assert((DeclLoc.isValid()) && "Invalid DeclLoc"); - - const char *argPtr = startArgList; - - while (*argPtr++ && parenCount) { - switch (*argPtr) { - case '^': - // Replace the '^' with '*'. - DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList); - ReplaceText(DeclLoc, 1, "*"); - break; - case '(': - parenCount++; - break; - case ')': - parenCount--; - break; - } - } - return; -} - -bool RewriteModernObjC::PointerTypeTakesAnyBlockArguments(QualType QT) { - const FunctionProtoType *FTP; - const PointerType *PT = QT->getAs(); - if (PT) { - FTP = PT->getPointeeType()->getAs(); - } else { - const BlockPointerType *BPT = QT->getAs(); - assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); - FTP = BPT->getPointeeType()->getAs(); - } - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I != E; ++I) - if (isTopLevelBlockPointerType(*I)) - return true; - } - return false; -} - -bool RewriteModernObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) { - const FunctionProtoType *FTP; - const PointerType *PT = QT->getAs(); - if (PT) { - FTP = PT->getPointeeType()->getAs(); - } else { - const BlockPointerType *BPT = QT->getAs(); - assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); - FTP = BPT->getPointeeType()->getAs(); - } - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I != E; ++I) { - if ((*I)->isObjCQualifiedIdType()) - return true; - if ((*I)->isObjCObjectPointerType() && - (*I)->getPointeeType()->isObjCQualifiedInterfaceType()) - return true; - } - - } - return false; -} - -void RewriteModernObjC::GetExtentOfArgList(const char *Name, const char *&LParen, - const char *&RParen) { - const char *argPtr = strchr(Name, '('); - assert((*argPtr == '(') && "Rewriter fuzzy parser confused"); - - LParen = argPtr; // output the start. - argPtr++; // skip past the left paren. - unsigned parenCount = 1; - - while (*argPtr && parenCount) { - switch (*argPtr) { - case '(': parenCount++; break; - case ')': parenCount--; break; - default: break; - } - if (parenCount) argPtr++; - } - assert((*argPtr == ')') && "Rewriter fuzzy parser confused"); - RParen = argPtr; // output the end -} - -void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) { - if (FunctionDecl *FD = dyn_cast(ND)) { - RewriteBlockPointerFunctionArgs(FD); - return; - } - // Handle Variables and Typedefs. - SourceLocation DeclLoc = ND->getLocation(); - QualType DeclT; - if (VarDecl *VD = dyn_cast(ND)) - DeclT = VD->getType(); - else if (TypedefNameDecl *TDD = dyn_cast(ND)) - DeclT = TDD->getUnderlyingType(); - else if (FieldDecl *FD = dyn_cast(ND)) - DeclT = FD->getType(); - else - llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled"); - - const char *startBuf = SM->getCharacterData(DeclLoc); - const char *endBuf = startBuf; - // scan backward (from the decl location) for the end of the previous decl. - while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart) - startBuf--; - SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf); - std::string buf; - unsigned OrigLength=0; - // *startBuf != '^' if we are dealing with a pointer to function that - // may take block argument types (which will be handled below). - if (*startBuf == '^') { - // Replace the '^' with '*', computing a negative offset. - buf = '*'; - startBuf++; - OrigLength++; - } - while (*startBuf != ')') { - buf += *startBuf; - startBuf++; - OrigLength++; - } - buf += ')'; - OrigLength++; - - if (PointerTypeTakesAnyBlockArguments(DeclT) || - PointerTypeTakesAnyObjCQualifiedType(DeclT)) { - // Replace the '^' with '*' for arguments. - // Replace id

    with id/*<>*/ - DeclLoc = ND->getLocation(); - startBuf = SM->getCharacterData(DeclLoc); - const char *argListBegin, *argListEnd; - GetExtentOfArgList(startBuf, argListBegin, argListEnd); - while (argListBegin < argListEnd) { - if (*argListBegin == '^') - buf += '*'; - else if (*argListBegin == '<') { - buf += "/*"; - buf += *argListBegin++; - OrigLength++;; - while (*argListBegin != '>') { - buf += *argListBegin++; - OrigLength++; - } - buf += *argListBegin; - buf += "*/"; - } - else - buf += *argListBegin; - argListBegin++; - OrigLength++; - } - buf += ')'; - OrigLength++; - } - ReplaceText(Start, OrigLength, buf); - - return; -} - - -/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes: -/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst, -/// struct Block_byref_id_object *src) { -/// _Block_object_assign (&_dest->object, _src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT -/// [|BLOCK_FIELD_IS_WEAK]) // object -/// _Block_object_assign(&_dest->object, _src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK -/// [|BLOCK_FIELD_IS_WEAK]) // block -/// } -/// And: -/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) { -/// _Block_object_dispose(_src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT -/// [|BLOCK_FIELD_IS_WEAK]) // object -/// _Block_object_dispose(_src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK -/// [|BLOCK_FIELD_IS_WEAK]) // block -/// } - -std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD, - int flag) { - std::string S; - if (CopyDestroyCache.count(flag)) - return S; - CopyDestroyCache.insert(flag); - S = "static void __Block_byref_id_object_copy_"; - S += utostr(flag); - S += "(void *dst, void *src) {\n"; - - // offset into the object pointer is computed as: - // void * + void* + int + int + void* + void * - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - unsigned VoidPtrSize = - static_cast(Context->getTypeSize(Context->VoidPtrTy)); - - unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth(); - S += " _Block_object_assign((char*)dst + "; - S += utostr(offset); - S += ", *(void * *) ((char*)src + "; - S += utostr(offset); - S += "), "; - S += utostr(flag); - S += ");\n}\n"; - - S += "static void __Block_byref_id_object_dispose_"; - S += utostr(flag); - S += "(void *src) {\n"; - S += " _Block_object_dispose(*(void * *) ((char*)src + "; - S += utostr(offset); - S += "), "; - S += utostr(flag); - S += ");\n}\n"; - return S; -} - -/// RewriteByRefVar - For each __block typex ND variable this routine transforms -/// the declaration into: -/// struct __Block_byref_ND { -/// void *__isa; // NULL for everything except __weak pointers -/// struct __Block_byref_ND *__forwarding; -/// int32_t __flags; -/// int32_t __size; -/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object -/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object -/// typex ND; -/// }; -/// -/// It then replaces declaration of ND variable with: -/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag, -/// __size=sizeof(struct __Block_byref_ND), -/// ND=initializer-if-any}; -/// -/// -void RewriteModernObjC::RewriteByRefVar(VarDecl *ND, bool firstDecl, - bool lastDecl) { - int flag = 0; - int isa = 0; - SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); - if (DeclLoc.isInvalid()) - // If type location is missing, it is because of missing type (a warning). - // Use variable's location which is good for this case. - DeclLoc = ND->getLocation(); - const char *startBuf = SM->getCharacterData(DeclLoc); - SourceLocation X = ND->getLocEnd(); - X = SM->getExpansionLoc(X); - const char *endBuf = SM->getCharacterData(X); - std::string Name(ND->getNameAsString()); - std::string ByrefType; - RewriteByRefString(ByrefType, Name, ND, true); - ByrefType += " {\n"; - ByrefType += " void *__isa;\n"; - RewriteByRefString(ByrefType, Name, ND); - ByrefType += " *__forwarding;\n"; - ByrefType += " int __flags;\n"; - ByrefType += " int __size;\n"; - // Add void *__Block_byref_id_object_copy; - // void *__Block_byref_id_object_dispose; if needed. - QualType Ty = ND->getType(); - bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty); - if (HasCopyAndDispose) { - ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n"; - ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n"; - } - - QualType T = Ty; - (void)convertBlockPointerToFunctionPointer(T); - T.getAsStringInternal(Name, Context->getPrintingPolicy()); - - ByrefType += " " + Name + ";\n"; - ByrefType += "};\n"; - // Insert this type in global scope. It is needed by helper function. - SourceLocation FunLocStart; - if (CurFunctionDef) - FunLocStart = getFunctionSourceLocation(*this, CurFunctionDef); - else { - assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null"); - FunLocStart = CurMethodDef->getLocStart(); - } - InsertText(FunLocStart, ByrefType); - - if (Ty.isObjCGCWeak()) { - flag |= BLOCK_FIELD_IS_WEAK; - isa = 1; - } - if (HasCopyAndDispose) { - flag = BLOCK_BYREF_CALLER; - QualType Ty = ND->getType(); - // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well. - if (Ty->isBlockPointerType()) - flag |= BLOCK_FIELD_IS_BLOCK; - else - flag |= BLOCK_FIELD_IS_OBJECT; - std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag); - if (!HF.empty()) - InsertText(FunLocStart, HF); - } - - // struct __Block_byref_ND ND = - // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND), - // initializer-if-any}; - bool hasInit = (ND->getInit() != 0); - // FIXME. rewriter does not support __block c++ objects which - // require construction. - if (hasInit) - if (CXXConstructExpr *CExp = dyn_cast(ND->getInit())) { - CXXConstructorDecl *CXXDecl = CExp->getConstructor(); - if (CXXDecl && CXXDecl->isDefaultConstructor()) - hasInit = false; - } - - unsigned flags = 0; - if (HasCopyAndDispose) - flags |= BLOCK_HAS_COPY_DISPOSE; - Name = ND->getNameAsString(); - ByrefType.clear(); - RewriteByRefString(ByrefType, Name, ND); - std::string ForwardingCastType("("); - ForwardingCastType += ByrefType + " *)"; - ByrefType += " " + Name + " = {(void*)"; - ByrefType += utostr(isa); - ByrefType += "," + ForwardingCastType + "&" + Name + ", "; - ByrefType += utostr(flags); - ByrefType += ", "; - ByrefType += "sizeof("; - RewriteByRefString(ByrefType, Name, ND); - ByrefType += ")"; - if (HasCopyAndDispose) { - ByrefType += ", __Block_byref_id_object_copy_"; - ByrefType += utostr(flag); - ByrefType += ", __Block_byref_id_object_dispose_"; - ByrefType += utostr(flag); - } - - if (!firstDecl) { - // In multiple __block declarations, and for all but 1st declaration, - // find location of the separating comma. This would be start location - // where new text is to be inserted. - DeclLoc = ND->getLocation(); - const char *startDeclBuf = SM->getCharacterData(DeclLoc); - const char *commaBuf = startDeclBuf; - while (*commaBuf != ',') - commaBuf--; - assert((*commaBuf == ',') && "RewriteByRefVar: can't find ','"); - DeclLoc = DeclLoc.getLocWithOffset(commaBuf - startDeclBuf); - startBuf = commaBuf; - } - - if (!hasInit) { - ByrefType += "};\n"; - unsigned nameSize = Name.size(); - // for block or function pointer declaration. Name is aleady - // part of the declaration. - if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) - nameSize = 1; - ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType); - } - else { - ByrefType += ", "; - SourceLocation startLoc; - Expr *E = ND->getInit(); - if (const CStyleCastExpr *ECE = dyn_cast(E)) - startLoc = ECE->getLParenLoc(); - else - startLoc = E->getLocStart(); - startLoc = SM->getExpansionLoc(startLoc); - endBuf = SM->getCharacterData(startLoc); - ReplaceText(DeclLoc, endBuf-startBuf, ByrefType); - - const char separator = lastDecl ? ';' : ','; - const char *startInitializerBuf = SM->getCharacterData(startLoc); - const char *separatorBuf = strchr(startInitializerBuf, separator); - assert((*separatorBuf == separator) && - "RewriteByRefVar: can't find ';' or ','"); - SourceLocation separatorLoc = - startLoc.getLocWithOffset(separatorBuf-startInitializerBuf); - - InsertText(separatorLoc, lastDecl ? "}" : "};\n"); - } - return; -} - -void RewriteModernObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) { - // Add initializers for any closure decl refs. - GetBlockDeclRefExprs(Exp->getBody()); - if (BlockDeclRefs.size()) { - // Unique all "by copy" declarations. - for (unsigned i = 0; i < BlockDeclRefs.size(); i++) - if (!BlockDeclRefs[i]->getDecl()->hasAttr()) { - if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { - BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); - BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl()); - } - } - // Unique all "by ref" declarations. - for (unsigned i = 0; i < BlockDeclRefs.size(); i++) - if (BlockDeclRefs[i]->getDecl()->hasAttr()) { - if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { - BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); - BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl()); - } - } - // Find any imported blocks...they will need special attention. - for (unsigned i = 0; i < BlockDeclRefs.size(); i++) - if (BlockDeclRefs[i]->getDecl()->hasAttr() || - BlockDeclRefs[i]->getType()->isObjCObjectPointerType() || - BlockDeclRefs[i]->getType()->isBlockPointerType()) - ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl()); - } -} - -FunctionDecl *RewriteModernObjC::SynthBlockInitFunctionDecl(StringRef name) { - IdentifierInfo *ID = &Context->Idents.get(name); - QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy); - return FunctionDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), ID, FType, 0, SC_Extern, - SC_None, false, false); -} - -Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp, - const SmallVector &InnerBlockDeclRefs) { - - const BlockDecl *block = Exp->getBlockDecl(); - - Blocks.push_back(Exp); - - CollectBlockDeclRefInfo(Exp); - - // Add inner imported variables now used in current block. - int countOfInnerDecls = 0; - if (!InnerBlockDeclRefs.empty()) { - for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) { - DeclRefExpr *Exp = InnerBlockDeclRefs[i]; - ValueDecl *VD = Exp->getDecl(); - if (!VD->hasAttr() && !BlockByCopyDeclsPtrSet.count(VD)) { - // We need to save the copied-in variables in nested - // blocks because it is needed at the end for some of the API generations. - // See SynthesizeBlockLiterals routine. - InnerDeclRefs.push_back(Exp); countOfInnerDecls++; - BlockDeclRefs.push_back(Exp); - BlockByCopyDeclsPtrSet.insert(VD); - BlockByCopyDecls.push_back(VD); - } - if (VD->hasAttr() && !BlockByRefDeclsPtrSet.count(VD)) { - InnerDeclRefs.push_back(Exp); countOfInnerDecls++; - BlockDeclRefs.push_back(Exp); - BlockByRefDeclsPtrSet.insert(VD); - BlockByRefDecls.push_back(VD); - } - } - // Find any imported blocks...they will need special attention. - for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) - if (InnerBlockDeclRefs[i]->getDecl()->hasAttr() || - InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() || - InnerBlockDeclRefs[i]->getType()->isBlockPointerType()) - ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl()); - } - InnerDeclRefsCount.push_back(countOfInnerDecls); - - std::string FuncName; - - if (CurFunctionDef) - FuncName = CurFunctionDef->getNameAsString(); - else if (CurMethodDef) - BuildUniqueMethodName(FuncName, CurMethodDef); - else if (GlobalVarDecl) - FuncName = std::string(GlobalVarDecl->getNameAsString()); - - bool GlobalBlockExpr = - block->getDeclContext()->getRedeclContext()->isFileContext(); - - if (GlobalBlockExpr && !GlobalVarDecl) { - Diags.Report(block->getLocation(), GlobalBlockRewriteFailedDiag); - GlobalBlockExpr = false; - } - - std::string BlockNumber = utostr(Blocks.size()-1); - - std::string Func = "__" + FuncName + "_block_func_" + BlockNumber; - - // Get a pointer to the function type so we can cast appropriately. - QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType()); - QualType FType = Context->getPointerType(BFT); - - FunctionDecl *FD; - Expr *NewRep; - - // Simulate a contructor call... - std::string Tag; - - if (GlobalBlockExpr) - Tag = "__global_"; - else - Tag = "__"; - Tag += FuncName + "_block_impl_" + BlockNumber; - - FD = SynthBlockInitFunctionDecl(Tag); - DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue, - SourceLocation()); - - SmallVector InitExprs; - - // Initialize the block function. - FD = SynthBlockInitFunctionDecl(Func); - DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), - VK_LValue, SourceLocation()); - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, - CK_BitCast, Arg); - InitExprs.push_back(castExpr); - - // Initialize the block descriptor. - std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA"; - - VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get(DescData.c_str()), - Context->VoidPtrTy, 0, - SC_Static, SC_None); - UnaryOperator *DescRefExpr = - new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false, - Context->VoidPtrTy, - VK_LValue, - SourceLocation()), - UO_AddrOf, - Context->getPointerType(Context->VoidPtrTy), - VK_RValue, OK_Ordinary, - SourceLocation()); - InitExprs.push_back(DescRefExpr); - - // Add initializers for any closure decl refs. - if (BlockDeclRefs.size()) { - Expr *Exp; - // Output all "by copy" declarations. - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - if (isObjCType((*I)->getType())) { - // FIXME: Conform to ABI ([[obj retain] autorelease]). - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), - VK_LValue, SourceLocation()); - if (HasLocalVariableExternalStorage(*I)) { - QualType QT = (*I)->getType(); - QT = Context->getPointerType(QT); - Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, - OK_Ordinary, SourceLocation()); - } - } else if (isTopLevelBlockPointerType((*I)->getType())) { - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), - VK_LValue, SourceLocation()); - Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, - CK_BitCast, Arg); - } else { - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), - VK_LValue, SourceLocation()); - if (HasLocalVariableExternalStorage(*I)) { - QualType QT = (*I)->getType(); - QT = Context->getPointerType(QT); - Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, - OK_Ordinary, SourceLocation()); - } - - } - InitExprs.push_back(Exp); - } - // Output all "by ref" declarations. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - ValueDecl *ND = (*I); - std::string Name(ND->getNameAsString()); - std::string RecName; - RewriteByRefString(RecName, Name, ND, true); - IdentifierInfo *II = &Context->Idents.get(RecName.c_str() - + sizeof("struct")); - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - II); - assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl"); - QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); - - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, - SourceLocation()); - bool isNestedCapturedVar = false; - if (block) - for (BlockDecl::capture_const_iterator ci = block->capture_begin(), - ce = block->capture_end(); ci != ce; ++ci) { - const VarDecl *variable = ci->getVariable(); - if (variable == ND && ci->isNested()) { - assert (ci->isByRef() && - "SynthBlockInitExpr - captured block variable is not byref"); - isNestedCapturedVar = true; - break; - } - } - // captured nested byref variable has its address passed. Do not take - // its address again. - if (!isNestedCapturedVar) - Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, - Context->getPointerType(Exp->getType()), - VK_RValue, OK_Ordinary, SourceLocation()); - Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp); - InitExprs.push_back(Exp); - } - } - if (ImportedBlockDecls.size()) { - // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR - int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR); - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag), - Context->IntTy, SourceLocation()); - InitExprs.push_back(FlagExp); - } - NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(), - FType, VK_LValue, SourceLocation()); - - if (GlobalBlockExpr) { - assert (GlobalConstructionExp == 0 && - "SynthBlockInitExpr - GlobalConstructionExp must be null"); - GlobalConstructionExp = NewRep; - NewRep = DRE; - } - - NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf, - Context->getPointerType(NewRep->getType()), - VK_RValue, OK_Ordinary, SourceLocation()); - NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast, - NewRep); - BlockDeclRefs.clear(); - BlockByRefDecls.clear(); - BlockByRefDeclsPtrSet.clear(); - BlockByCopyDecls.clear(); - BlockByCopyDeclsPtrSet.clear(); - ImportedBlockDecls.clear(); - return NewRep; -} - -bool RewriteModernObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) { - if (const ObjCForCollectionStmt * CS = - dyn_cast(Stmts.back())) - return CS->getElement() == DS; - return false; -} - -//===----------------------------------------------------------------------===// -// Function Body / Expression rewriting -//===----------------------------------------------------------------------===// - -Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) { - if (isa(S) || isa(S) || - isa(S) || isa(S)) - Stmts.push_back(S); - else if (isa(S)) { - Stmts.push_back(S); - ObjCBcLabelNo.push_back(++BcLabelCount); - } - - // Pseudo-object operations and ivar references need special - // treatment because we're going to recursively rewrite them. - if (PseudoObjectExpr *PseudoOp = dyn_cast(S)) { - if (isa(PseudoOp->getSyntacticForm())) { - return RewritePropertyOrImplicitSetter(PseudoOp); - } else { - return RewritePropertyOrImplicitGetter(PseudoOp); - } - } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast(S)) { - return RewriteObjCIvarRefExpr(IvarRefExpr); - } - - SourceRange OrigStmtRange = S->getSourceRange(); - - // Perform a bottom up rewrite of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - Stmt *childStmt = (*CI); - Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt); - if (newStmt) { - *CI = newStmt; - } - } - - if (BlockExpr *BE = dyn_cast(S)) { - SmallVector InnerBlockDeclRefs; - llvm::SmallPtrSet InnerContexts; - InnerContexts.insert(BE->getBlockDecl()); - ImportedLocalExternalDecls.clear(); - GetInnerBlockDeclRefExprs(BE->getBody(), - InnerBlockDeclRefs, InnerContexts); - // Rewrite the block body in place. - Stmt *SaveCurrentBody = CurrentBody; - CurrentBody = BE->getBody(); - PropParentMap = 0; - // block literal on rhs of a property-dot-sytax assignment - // must be replaced by its synthesize ast so getRewrittenText - // works as expected. In this case, what actually ends up on RHS - // is the blockTranscribed which is the helper function for the - // block literal; as in: self.c = ^() {[ace ARR];}; - bool saveDisableReplaceStmt = DisableReplaceStmt; - DisableReplaceStmt = false; - RewriteFunctionBodyOrGlobalInitializer(BE->getBody()); - DisableReplaceStmt = saveDisableReplaceStmt; - CurrentBody = SaveCurrentBody; - PropParentMap = 0; - ImportedLocalExternalDecls.clear(); - // Now we snarf the rewritten text and stash it away for later use. - std::string Str = Rewrite.getRewrittenText(BE->getSourceRange()); - RewrittenBlockExprs[BE] = Str; - - Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs); - - //blockTranscribed->dump(); - ReplaceStmt(S, blockTranscribed); - return blockTranscribed; - } - // Handle specific things. - if (ObjCEncodeExpr *AtEncode = dyn_cast(S)) - return RewriteAtEncode(AtEncode); - - if (ObjCSelectorExpr *AtSelector = dyn_cast(S)) - return RewriteAtSelector(AtSelector); - - if (ObjCStringLiteral *AtString = dyn_cast(S)) - return RewriteObjCStringLiteral(AtString); - - if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast(S)) - return RewriteObjCBoolLiteralExpr(BoolLitExpr); - - if (ObjCBoxedExpr *BoxedExpr = dyn_cast(S)) - return RewriteObjCBoxedExpr(BoxedExpr); - - if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast(S)) - return RewriteObjCArrayLiteralExpr(ArrayLitExpr); - - if (ObjCDictionaryLiteral *DictionaryLitExpr = - dyn_cast(S)) - return RewriteObjCDictionaryLiteralExpr(DictionaryLitExpr); - - if (ObjCMessageExpr *MessExpr = dyn_cast(S)) { -#if 0 - // Before we rewrite it, put the original message expression in a comment. - SourceLocation startLoc = MessExpr->getLocStart(); - SourceLocation endLoc = MessExpr->getLocEnd(); - - const char *startBuf = SM->getCharacterData(startLoc); - const char *endBuf = SM->getCharacterData(endLoc); - - std::string messString; - messString += "// "; - messString.append(startBuf, endBuf-startBuf+1); - messString += "\n"; - - // FIXME: Missing definition of - // InsertText(clang::SourceLocation, char const*, unsigned int). - // InsertText(startLoc, messString.c_str(), messString.size()); - // Tried this, but it didn't work either... - // ReplaceText(startLoc, 0, messString.c_str(), messString.size()); -#endif - return RewriteMessageExpr(MessExpr); - } - - if (ObjCAutoreleasePoolStmt *StmtAutoRelease = - dyn_cast(S)) { - return RewriteObjCAutoreleasePoolStmt(StmtAutoRelease); - } - - if (ObjCAtTryStmt *StmtTry = dyn_cast(S)) - return RewriteObjCTryStmt(StmtTry); - - if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast(S)) - return RewriteObjCSynchronizedStmt(StmtTry); - - if (ObjCAtThrowStmt *StmtThrow = dyn_cast(S)) - return RewriteObjCThrowStmt(StmtThrow); - - if (ObjCProtocolExpr *ProtocolExp = dyn_cast(S)) - return RewriteObjCProtocolExpr(ProtocolExp); - - if (ObjCForCollectionStmt *StmtForCollection = - dyn_cast(S)) - return RewriteObjCForCollectionStmt(StmtForCollection, - OrigStmtRange.getEnd()); - if (BreakStmt *StmtBreakStmt = - dyn_cast(S)) - return RewriteBreakStmt(StmtBreakStmt); - if (ContinueStmt *StmtContinueStmt = - dyn_cast(S)) - return RewriteContinueStmt(StmtContinueStmt); - - // Need to check for protocol refs (id

    , Foo

    *) in variable decls - // and cast exprs. - if (DeclStmt *DS = dyn_cast(S)) { - // FIXME: What we're doing here is modifying the type-specifier that - // precedes the first Decl. In the future the DeclGroup should have - // a separate type-specifier that we can rewrite. - // NOTE: We need to avoid rewriting the DeclStmt if it is within - // the context of an ObjCForCollectionStmt. For example: - // NSArray *someArray; - // for (id index in someArray) ; - // This is because RewriteObjCForCollectionStmt() does textual rewriting - // and it depends on the original text locations/positions. - if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS)) - RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin()); - - // Blocks rewrite rules. - for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end(); - DI != DE; ++DI) { - Decl *SD = *DI; - if (ValueDecl *ND = dyn_cast(SD)) { - if (isTopLevelBlockPointerType(ND->getType())) - RewriteBlockPointerDecl(ND); - else if (ND->getType()->isFunctionPointerType()) - CheckFunctionPointerDecl(ND->getType(), ND); - if (VarDecl *VD = dyn_cast(SD)) { - if (VD->hasAttr()) { - static unsigned uniqueByrefDeclCount = 0; - assert(!BlockByRefDeclNo.count(ND) && - "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl"); - BlockByRefDeclNo[ND] = uniqueByrefDeclCount++; - RewriteByRefVar(VD, (DI == DS->decl_begin()), ((DI+1) == DE)); - } - else - RewriteTypeOfDecl(VD); - } - } - if (TypedefNameDecl *TD = dyn_cast(SD)) { - if (isTopLevelBlockPointerType(TD->getUnderlyingType())) - RewriteBlockPointerDecl(TD); - else if (TD->getUnderlyingType()->isFunctionPointerType()) - CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); - } - } - } - - if (CStyleCastExpr *CE = dyn_cast(S)) - RewriteObjCQualifiedInterfaceTypes(CE); - - if (isa(S) || isa(S) || - isa(S) || isa(S)) { - assert(!Stmts.empty() && "Statement stack is empty"); - assert ((isa(Stmts.back()) || isa(Stmts.back()) || - isa(Stmts.back()) || isa(Stmts.back())) - && "Statement stack mismatch"); - Stmts.pop_back(); - } - // Handle blocks rewriting. - if (DeclRefExpr *DRE = dyn_cast(S)) { - ValueDecl *VD = DRE->getDecl(); - if (VD->hasAttr()) - return RewriteBlockDeclRefExpr(DRE); - if (HasLocalVariableExternalStorage(VD)) - return RewriteLocalVariableExternalStorage(DRE); - } - - if (CallExpr *CE = dyn_cast(S)) { - if (CE->getCallee()->getType()->isBlockPointerType()) { - Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee()); - ReplaceStmt(S, BlockCall); - return BlockCall; - } - } - if (CStyleCastExpr *CE = dyn_cast(S)) { - RewriteCastExpr(CE); - } - if (ImplicitCastExpr *ICE = dyn_cast(S)) { - RewriteImplicitCastObjCExpr(ICE); - } -#if 0 - - if (ImplicitCastExpr *ICE = dyn_cast(S)) { - CastExpr *Replacement = new (Context) CastExpr(ICE->getType(), - ICE->getSubExpr(), - SourceLocation()); - // Get the new text. - std::string SStr; - llvm::raw_string_ostream Buf(SStr); - Replacement->printPretty(Buf); - const std::string &Str = Buf.str(); - - printf("CAST = %s\n", &Str[0]); - InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size()); - delete S; - return Replacement; - } -#endif - // Return this stmt unmodified. - return S; -} - -void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) { - for (RecordDecl::field_iterator i = RD->field_begin(), - e = RD->field_end(); i != e; ++i) { - FieldDecl *FD = *i; - if (isTopLevelBlockPointerType(FD->getType())) - RewriteBlockPointerDecl(FD); - if (FD->getType()->isObjCQualifiedIdType() || - FD->getType()->isObjCQualifiedInterfaceType()) - RewriteObjCQualifiedInterfaceTypes(FD); - } -} - -/// HandleDeclInMainFile - This is called for each top-level decl defined in the -/// main file of the input. -void RewriteModernObjC::HandleDeclInMainFile(Decl *D) { - switch (D->getKind()) { - case Decl::Function: { - FunctionDecl *FD = cast(D); - if (FD->isOverloadedOperator()) - return; - - // Since function prototypes don't have ParmDecl's, we check the function - // prototype. This enables us to rewrite function declarations and - // definitions using the same code. - RewriteBlocksInFunctionProtoType(FD->getType(), FD); - - if (!FD->isThisDeclarationADefinition()) - break; - - // FIXME: If this should support Obj-C++, support CXXTryStmt - if (CompoundStmt *Body = dyn_cast_or_null(FD->getBody())) { - CurFunctionDef = FD; - CurrentBody = Body; - Body = - cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); - FD->setBody(Body); - CurrentBody = 0; - if (PropParentMap) { - delete PropParentMap; - PropParentMap = 0; - } - // This synthesizes and inserts the block "impl" struct, invoke function, - // and any copy/dispose helper functions. - InsertBlockLiteralsWithinFunction(FD); - CurFunctionDef = 0; - } - break; - } - case Decl::ObjCMethod: { - ObjCMethodDecl *MD = cast(D); - if (CompoundStmt *Body = MD->getCompoundBody()) { - CurMethodDef = MD; - CurrentBody = Body; - Body = - cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); - MD->setBody(Body); - CurrentBody = 0; - if (PropParentMap) { - delete PropParentMap; - PropParentMap = 0; - } - InsertBlockLiteralsWithinMethod(MD); - CurMethodDef = 0; - } - break; - } - case Decl::ObjCImplementation: { - ObjCImplementationDecl *CI = cast(D); - ClassImplementation.push_back(CI); - break; - } - case Decl::ObjCCategoryImpl: { - ObjCCategoryImplDecl *CI = cast(D); - CategoryImplementation.push_back(CI); - break; - } - case Decl::Var: { - VarDecl *VD = cast(D); - RewriteObjCQualifiedInterfaceTypes(VD); - if (isTopLevelBlockPointerType(VD->getType())) - RewriteBlockPointerDecl(VD); - else if (VD->getType()->isFunctionPointerType()) { - CheckFunctionPointerDecl(VD->getType(), VD); - if (VD->getInit()) { - if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { - RewriteCastExpr(CE); - } - } - } else if (VD->getType()->isRecordType()) { - RecordDecl *RD = VD->getType()->getAs()->getDecl(); - if (RD->isCompleteDefinition()) - RewriteRecordBody(RD); - } - if (VD->getInit()) { - GlobalVarDecl = VD; - CurrentBody = VD->getInit(); - RewriteFunctionBodyOrGlobalInitializer(VD->getInit()); - CurrentBody = 0; - if (PropParentMap) { - delete PropParentMap; - PropParentMap = 0; - } - SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName()); - GlobalVarDecl = 0; - - // This is needed for blocks. - if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { - RewriteCastExpr(CE); - } - } - break; - } - case Decl::TypeAlias: - case Decl::Typedef: { - if (TypedefNameDecl *TD = dyn_cast(D)) { - if (isTopLevelBlockPointerType(TD->getUnderlyingType())) - RewriteBlockPointerDecl(TD); - else if (TD->getUnderlyingType()->isFunctionPointerType()) - CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); - } - break; - } - case Decl::CXXRecord: - case Decl::Record: { - RecordDecl *RD = cast(D); - if (RD->isCompleteDefinition()) - RewriteRecordBody(RD); - break; - } - default: - break; - } - // Nothing yet. -} - -/// Write_ProtocolExprReferencedMetadata - This routine writer out the -/// protocol reference symbols in the for of: -/// struct _protocol_t *PROTOCOL_REF = &PROTOCOL_METADATA. -static void Write_ProtocolExprReferencedMetadata(ASTContext *Context, - ObjCProtocolDecl *PDecl, - std::string &Result) { - // Also output .objc_protorefs$B section and its meta-data. - if (Context->getLangOpts().MicrosoftExt) - Result += "static "; - Result += "struct _protocol_t *"; - Result += "_OBJC_PROTOCOL_REFERENCE_$_"; - Result += PDecl->getNameAsString(); - Result += " = &"; - Result += "_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString(); - Result += ";\n"; -} - -void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) { - if (Diags.hasErrorOccurred()) - return; - - RewriteInclude(); - - // Here's a great place to add any extra declarations that may be needed. - // Write out meta data for each @protocol(). - for (llvm::SmallPtrSet::iterator I = ProtocolExprDecls.begin(), - E = ProtocolExprDecls.end(); I != E; ++I) { - RewriteObjCProtocolMetaData(*I, Preamble); - Write_ProtocolExprReferencedMetadata(Context, (*I), Preamble); - } - - InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false); - - if (ClassImplementation.size() || CategoryImplementation.size()) - RewriteImplementations(); - - for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) { - ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i]; - // Write struct declaration for the class matching its ivar declarations. - // Note that for modern abi, this is postponed until the end of TU - // because class extensions and the implementation might declare their own - // private ivars. - RewriteInterfaceDecl(CDecl); - } - - // Get the buffer corresponding to MainFileID. If we haven't changed it, then - // we are done. - if (const RewriteBuffer *RewriteBuf = - Rewrite.getRewriteBufferFor(MainFileID)) { - //printf("Changed:\n"); - *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end()); - } else { - llvm::errs() << "No changes\n"; - } - - if (ClassImplementation.size() || CategoryImplementation.size() || - ProtocolExprDecls.size()) { - // Rewrite Objective-c meta data* - std::string ResultStr; - RewriteMetaDataIntoBuffer(ResultStr); - // Emit metadata. - *OutFile << ResultStr; - } - // Emit ImageInfo; - { - std::string ResultStr; - WriteImageInfo(ResultStr); - *OutFile << ResultStr; - } - OutFile->flush(); -} - -void RewriteModernObjC::Initialize(ASTContext &context) { - InitializeCommon(context); - - Preamble += "#ifndef __OBJC2__\n"; - Preamble += "#define __OBJC2__\n"; - Preamble += "#endif\n"; - - // declaring objc_selector outside the parameter list removes a silly - // scope related warning... - if (IsHeader) - Preamble = "#pragma once\n"; - Preamble += "struct objc_selector; struct objc_class;\n"; - Preamble += "struct __rw_objc_super { \n\tstruct objc_object *object; "; - Preamble += "\n\tstruct objc_object *superClass; "; - // Add a constructor for creating temporary objects. - Preamble += "\n\t__rw_objc_super(struct objc_object *o, struct objc_object *s) "; - Preamble += ": object(o), superClass(s) {} "; - Preamble += "\n};\n"; - - if (LangOpts.MicrosoftExt) { - // Define all sections using syntax that makes sense. - // These are currently generated. - Preamble += "\n#pragma section(\".objc_classlist$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_catlist$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n"; - // These are generated but not necessary for functionality. - Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n"; - Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n"; - Preamble += "#pragma section(\".cls_meth$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_ivar$B\", long, read, write)\n"; - - // These need be generated for performance. Currently they are not, - // using API calls instead. - Preamble += "#pragma section(\".objc_selrefs$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_classrefs$B\", long, read, write)\n"; - Preamble += "#pragma section(\".objc_superrefs$B\", long, read, write)\n"; - - } - Preamble += "#ifndef _REWRITER_typedef_Protocol\n"; - Preamble += "typedef struct objc_object Protocol;\n"; - Preamble += "#define _REWRITER_typedef_Protocol\n"; - Preamble += "#endif\n"; - if (LangOpts.MicrosoftExt) { - Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n"; - Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n"; - } - else - Preamble += "#define __OBJC_RW_DLLIMPORT extern\n"; - - Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend(void);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper(void);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_stret(void);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n"; - - Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getClass"; - Preamble += "(const char *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass"; - Preamble += "(struct objc_class *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *objc_getMetaClass"; - Preamble += "(const char *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n"; - // @synchronized hooks. - Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter( struct objc_object *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit( struct objc_object *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n"; - Preamble += "#ifndef __FASTENUMERATIONSTATE\n"; - Preamble += "struct __objcFastEnumerationState {\n\t"; - Preamble += "unsigned long state;\n\t"; - Preamble += "void **itemsPtr;\n\t"; - Preamble += "unsigned long *mutationsPtr;\n\t"; - Preamble += "unsigned long extra[5];\n};\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n"; - Preamble += "#define __FASTENUMERATIONSTATE\n"; - Preamble += "#endif\n"; - Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n"; - Preamble += "struct __NSConstantStringImpl {\n"; - Preamble += " int *isa;\n"; - Preamble += " int flags;\n"; - Preamble += " char *str;\n"; - Preamble += " long length;\n"; - Preamble += "};\n"; - Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n"; - Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n"; - Preamble += "#else\n"; - Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n"; - Preamble += "#endif\n"; - Preamble += "#define __NSCONSTANTSTRINGIMPL\n"; - Preamble += "#endif\n"; - // Blocks preamble. - Preamble += "#ifndef BLOCK_IMPL\n"; - Preamble += "#define BLOCK_IMPL\n"; - Preamble += "struct __block_impl {\n"; - Preamble += " void *isa;\n"; - Preamble += " int Flags;\n"; - Preamble += " int Reserved;\n"; - Preamble += " void *FuncPtr;\n"; - Preamble += "};\n"; - Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n"; - Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n"; - Preamble += "extern \"C\" __declspec(dllexport) " - "void _Block_object_assign(void *, const void *, const int);\n"; - Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n"; - Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n"; - Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n"; - Preamble += "#else\n"; - Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n"; - Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n"; - Preamble += "#endif\n"; - Preamble += "#endif\n"; - if (LangOpts.MicrosoftExt) { - Preamble += "#undef __OBJC_RW_DLLIMPORT\n"; - Preamble += "#undef __OBJC_RW_STATICIMPORT\n"; - Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests. - Preamble += "#define __attribute__(X)\n"; - Preamble += "#endif\n"; - Preamble += "#ifndef __weak\n"; - Preamble += "#define __weak\n"; - Preamble += "#endif\n"; - Preamble += "#ifndef __block\n"; - Preamble += "#define __block\n"; - Preamble += "#endif\n"; - } - else { - Preamble += "#define __block\n"; - Preamble += "#define __weak\n"; - } - - // Declarations required for modern objective-c array and dictionary literals. - Preamble += "\n#include \n"; - Preamble += "struct __NSContainer_literal {\n"; - Preamble += " void * *arr;\n"; - Preamble += " __NSContainer_literal (unsigned int count, ...) {\n"; - Preamble += "\tva_list marker;\n"; - Preamble += "\tva_start(marker, count);\n"; - Preamble += "\tarr = new void *[count];\n"; - Preamble += "\tfor (unsigned i = 0; i < count; i++)\n"; - Preamble += "\t arr[i] = va_arg(marker, void *);\n"; - Preamble += "\tva_end( marker );\n"; - Preamble += " };\n"; - Preamble += " ~__NSContainer_literal() {\n"; - Preamble += "\tdelete[] arr;\n"; - Preamble += " }\n"; - Preamble += "};\n"; - - // Declaration required for implementation of @autoreleasepool statement. - Preamble += "extern \"C\" __declspec(dllimport) void * objc_autoreleasePoolPush(void);\n"; - Preamble += "extern \"C\" __declspec(dllimport) void objc_autoreleasePoolPop(void *);\n\n"; - Preamble += "struct __AtAutoreleasePool {\n"; - Preamble += " __AtAutoreleasePool() {atautoreleasepoolobj = objc_autoreleasePoolPush();}\n"; - Preamble += " ~__AtAutoreleasePool() {objc_autoreleasePoolPop(atautoreleasepoolobj);}\n"; - Preamble += " void * atautoreleasepoolobj;\n"; - Preamble += "};\n"; - - // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long - // as this avoids warning in any 64bit/32bit compilation model. - Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n"; -} - -/// RewriteIvarOffsetComputation - This rutine synthesizes computation of -/// ivar offset. -void RewriteModernObjC::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, - std::string &Result) { - if (ivar->isBitField()) { - // FIXME: The hack below doesn't work for bitfields. For now, we simply - // place all bitfields at offset 0. - Result += "0"; - } else { - Result += "__OFFSETOFIVAR__(struct "; - Result += ivar->getContainingInterface()->getNameAsString(); - if (LangOpts.MicrosoftExt) - Result += "_IMPL"; - Result += ", "; - Result += ivar->getNameAsString(); - Result += ")"; - } -} - -/// WriteModernMetadataDeclarations - Writes out metadata declarations for modern ABI. -/// struct _prop_t { -/// const char *name; -/// char *attributes; -/// } - -/// struct _prop_list_t { -/// uint32_t entsize; // sizeof(struct _prop_t) -/// uint32_t count_of_properties; -/// struct _prop_t prop_list[count_of_properties]; -/// } - -/// struct _protocol_t; - -/// struct _protocol_list_t { -/// long protocol_count; // Note, this is 32/64 bit -/// struct _protocol_t * protocol_list[protocol_count]; -/// } - -/// struct _objc_method { -/// SEL _cmd; -/// const char *method_type; -/// char *_imp; -/// } - -/// struct _method_list_t { -/// uint32_t entsize; // sizeof(struct _objc_method) -/// uint32_t method_count; -/// struct _objc_method method_list[method_count]; -/// } - -/// struct _protocol_t { -/// id isa; // NULL -/// const char *protocol_name; -/// const struct _protocol_list_t * protocol_list; // super protocols -/// const struct method_list_t *instance_methods; -/// const struct method_list_t *class_methods; -/// const struct method_list_t *optionalInstanceMethods; -/// const struct method_list_t *optionalClassMethods; -/// const struct _prop_list_t * properties; -/// const uint32_t size; // sizeof(struct _protocol_t) -/// const uint32_t flags; // = 0 -/// const char ** extendedMethodTypes; -/// } - -/// struct _ivar_t { -/// unsigned long int *offset; // pointer to ivar offset location -/// const char *name; -/// const char *type; -/// uint32_t alignment; -/// uint32_t size; -/// } - -/// struct _ivar_list_t { -/// uint32 entsize; // sizeof(struct _ivar_t) -/// uint32 count; -/// struct _ivar_t list[count]; -/// } - -/// struct _class_ro_t { -/// uint32_t flags; -/// uint32_t instanceStart; -/// uint32_t instanceSize; -/// uint32_t reserved; // only when building for 64bit targets -/// const uint8_t *ivarLayout; -/// const char *name; -/// const struct _method_list_t *baseMethods; -/// const struct _protocol_list_t *baseProtocols; -/// const struct _ivar_list_t *ivars; -/// const uint8_t *weakIvarLayout; -/// const struct _prop_list_t *properties; -/// } - -/// struct _class_t { -/// struct _class_t *isa; -/// struct _class_t *superclass; -/// void *cache; -/// IMP *vtable; -/// struct _class_ro_t *ro; -/// } - -/// struct _category_t { -/// const char *name; -/// struct _class_t *cls; -/// const struct _method_list_t *instance_methods; -/// const struct _method_list_t *class_methods; -/// const struct _protocol_list_t *protocols; -/// const struct _prop_list_t *properties; -/// } - -/// MessageRefTy - LLVM for: -/// struct _message_ref_t { -/// IMP messenger; -/// SEL name; -/// }; - -/// SuperMessageRefTy - LLVM for: -/// struct _super_message_ref_t { -/// SUPER_IMP messenger; -/// SEL name; -/// }; - -static void WriteModernMetadataDeclarations(ASTContext *Context, std::string &Result) { - static bool meta_data_declared = false; - if (meta_data_declared) - return; - - Result += "\nstruct _prop_t {\n"; - Result += "\tconst char *name;\n"; - Result += "\tconst char *attributes;\n"; - Result += "};\n"; - - Result += "\nstruct _protocol_t;\n"; - - Result += "\nstruct _objc_method {\n"; - Result += "\tstruct objc_selector * _cmd;\n"; - Result += "\tconst char *method_type;\n"; - Result += "\tvoid *_imp;\n"; - Result += "};\n"; - - Result += "\nstruct _protocol_t {\n"; - Result += "\tvoid * isa; // NULL\n"; - Result += "\tconst char *protocol_name;\n"; - Result += "\tconst struct _protocol_list_t * protocol_list; // super protocols\n"; - Result += "\tconst struct method_list_t *instance_methods;\n"; - Result += "\tconst struct method_list_t *class_methods;\n"; - Result += "\tconst struct method_list_t *optionalInstanceMethods;\n"; - Result += "\tconst struct method_list_t *optionalClassMethods;\n"; - Result += "\tconst struct _prop_list_t * properties;\n"; - Result += "\tconst unsigned int size; // sizeof(struct _protocol_t)\n"; - Result += "\tconst unsigned int flags; // = 0\n"; - Result += "\tconst char ** extendedMethodTypes;\n"; - Result += "};\n"; - - Result += "\nstruct _ivar_t {\n"; - Result += "\tunsigned long int *offset; // pointer to ivar offset location\n"; - Result += "\tconst char *name;\n"; - Result += "\tconst char *type;\n"; - Result += "\tunsigned int alignment;\n"; - Result += "\tunsigned int size;\n"; - Result += "};\n"; - - Result += "\nstruct _class_ro_t {\n"; - Result += "\tunsigned int flags;\n"; - Result += "\tunsigned int instanceStart;\n"; - Result += "\tunsigned int instanceSize;\n"; - const llvm::Triple &Triple(Context->getTargetInfo().getTriple()); - if (Triple.getArch() == llvm::Triple::x86_64) - Result += "\tunsigned int reserved;\n"; - Result += "\tconst unsigned char *ivarLayout;\n"; - Result += "\tconst char *name;\n"; - Result += "\tconst struct _method_list_t *baseMethods;\n"; - Result += "\tconst struct _objc_protocol_list *baseProtocols;\n"; - Result += "\tconst struct _ivar_list_t *ivars;\n"; - Result += "\tconst unsigned char *weakIvarLayout;\n"; - Result += "\tconst struct _prop_list_t *properties;\n"; - Result += "};\n"; - - Result += "\nstruct _class_t {\n"; - Result += "\tstruct _class_t *isa;\n"; - Result += "\tstruct _class_t *superclass;\n"; - Result += "\tvoid *cache;\n"; - Result += "\tvoid *vtable;\n"; - Result += "\tstruct _class_ro_t *ro;\n"; - Result += "};\n"; - - Result += "\nstruct _category_t {\n"; - Result += "\tconst char *name;\n"; - Result += "\tstruct _class_t *cls;\n"; - Result += "\tconst struct _method_list_t *instance_methods;\n"; - Result += "\tconst struct _method_list_t *class_methods;\n"; - Result += "\tconst struct _protocol_list_t *protocols;\n"; - Result += "\tconst struct _prop_list_t *properties;\n"; - Result += "};\n"; - - Result += "extern \"C\" __declspec(dllimport) struct objc_cache _objc_empty_cache;\n"; - Result += "#pragma warning(disable:4273)\n"; - meta_data_declared = true; -} - -static void Write_protocol_list_t_TypeDecl(std::string &Result, - long super_protocol_count) { - Result += "struct /*_protocol_list_t*/"; Result += " {\n"; - Result += "\tlong protocol_count; // Note, this is 32/64 bit\n"; - Result += "\tstruct _protocol_t *super_protocols["; - Result += utostr(super_protocol_count); Result += "];\n"; - Result += "}"; -} - -static void Write_method_list_t_TypeDecl(std::string &Result, - unsigned int method_count) { - Result += "struct /*_method_list_t*/"; Result += " {\n"; - Result += "\tunsigned int entsize; // sizeof(struct _objc_method)\n"; - Result += "\tunsigned int method_count;\n"; - Result += "\tstruct _objc_method method_list["; - Result += utostr(method_count); Result += "];\n"; - Result += "}"; -} - -static void Write__prop_list_t_TypeDecl(std::string &Result, - unsigned int property_count) { - Result += "struct /*_prop_list_t*/"; Result += " {\n"; - Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n"; - Result += "\tunsigned int count_of_properties;\n"; - Result += "\tstruct _prop_t prop_list["; - Result += utostr(property_count); Result += "];\n"; - Result += "}"; -} - -static void Write__ivar_list_t_TypeDecl(std::string &Result, - unsigned int ivar_count) { - Result += "struct /*_ivar_list_t*/"; Result += " {\n"; - Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n"; - Result += "\tunsigned int count;\n"; - Result += "\tstruct _ivar_t ivar_list["; - Result += utostr(ivar_count); Result += "];\n"; - Result += "}"; -} - -static void Write_protocol_list_initializer(ASTContext *Context, std::string &Result, - ArrayRef SuperProtocols, - StringRef VarName, - StringRef ProtocolName) { - if (SuperProtocols.size() > 0) { - Result += "\nstatic "; - Write_protocol_list_t_TypeDecl(Result, SuperProtocols.size()); - Result += " "; Result += VarName; - Result += ProtocolName; - Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; - Result += "\t"; Result += utostr(SuperProtocols.size()); Result += ",\n"; - for (unsigned i = 0, e = SuperProtocols.size(); i < e; i++) { - ObjCProtocolDecl *SuperPD = SuperProtocols[i]; - Result += "\t&"; Result += "_OBJC_PROTOCOL_"; - Result += SuperPD->getNameAsString(); - if (i == e-1) - Result += "\n};\n"; - else - Result += ",\n"; - } - } -} - -static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj, - ASTContext *Context, std::string &Result, - ArrayRef Methods, - StringRef VarName, - StringRef TopLevelDeclName, - bool MethodImpl) { - if (Methods.size() > 0) { - Result += "\nstatic "; - Write_method_list_t_TypeDecl(Result, Methods.size()); - Result += " "; Result += VarName; - Result += TopLevelDeclName; - Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; - Result += "\t"; Result += "sizeof(_objc_method)"; Result += ",\n"; - Result += "\t"; Result += utostr(Methods.size()); Result += ",\n"; - for (unsigned i = 0, e = Methods.size(); i < e; i++) { - ObjCMethodDecl *MD = Methods[i]; - if (i == 0) - Result += "\t{{(struct objc_selector *)\""; - else - Result += "\t{(struct objc_selector *)\""; - Result += (MD)->getSelector().getAsString(); Result += "\""; - Result += ", "; - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl(MD, MethodTypeString); - Result += "\""; Result += MethodTypeString; Result += "\""; - Result += ", "; - if (!MethodImpl) - Result += "0"; - else { - Result += "(void *)"; - Result += RewriteObj.MethodInternalNames[MD]; - } - if (i == e-1) - Result += "}}\n"; - else - Result += "},\n"; - } - Result += "};\n"; - } -} - -static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj, - ASTContext *Context, std::string &Result, - ArrayRef Properties, - const Decl *Container, - StringRef VarName, - StringRef ProtocolName) { - if (Properties.size() > 0) { - Result += "\nstatic "; - Write__prop_list_t_TypeDecl(Result, Properties.size()); - Result += " "; Result += VarName; - Result += ProtocolName; - Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; - Result += "\t"; Result += "sizeof(_prop_t)"; Result += ",\n"; - Result += "\t"; Result += utostr(Properties.size()); Result += ",\n"; - for (unsigned i = 0, e = Properties.size(); i < e; i++) { - ObjCPropertyDecl *PropDecl = Properties[i]; - if (i == 0) - Result += "\t{{\""; - else - Result += "\t{\""; - Result += PropDecl->getName(); Result += "\","; - std::string PropertyTypeString, QuotePropertyTypeString; - Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString); - RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString); - Result += "\""; Result += QuotePropertyTypeString; Result += "\""; - if (i == e-1) - Result += "}}\n"; - else - Result += "},\n"; - } - Result += "};\n"; - } -} - -// Metadata flags -enum MetaDataDlags { - CLS = 0x0, - CLS_META = 0x1, - CLS_ROOT = 0x2, - OBJC2_CLS_HIDDEN = 0x10, - CLS_EXCEPTION = 0x20, - - /// (Obsolete) ARC-specific: this class has a .release_ivars method - CLS_HAS_IVAR_RELEASER = 0x40, - /// class was compiled with -fobjc-arr - CLS_COMPILED_BY_ARC = 0x80 // (1<<7) -}; - -static void Write__class_ro_t_initializer(ASTContext *Context, std::string &Result, - unsigned int flags, - const std::string &InstanceStart, - const std::string &InstanceSize, - ArrayRefbaseMethods, - ArrayRefbaseProtocols, - ArrayRefivars, - ArrayRefProperties, - StringRef VarName, - StringRef ClassName) { - Result += "\nstatic struct _class_ro_t "; - Result += VarName; Result += ClassName; - Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; - Result += "\t"; - Result += llvm::utostr(flags); Result += ", "; - Result += InstanceStart; Result += ", "; - Result += InstanceSize; Result += ", \n"; - Result += "\t"; - const llvm::Triple &Triple(Context->getTargetInfo().getTriple()); - if (Triple.getArch() == llvm::Triple::x86_64) - // uint32_t const reserved; // only when building for 64bit targets - Result += "(unsigned int)0, \n\t"; - // const uint8_t * const ivarLayout; - Result += "0, \n\t"; - Result += "\""; Result += ClassName; Result += "\",\n\t"; - bool metaclass = ((flags & CLS_META) != 0); - if (baseMethods.size() > 0) { - Result += "(const struct _method_list_t *)&"; - if (metaclass) - Result += "_OBJC_$_CLASS_METHODS_"; - else - Result += "_OBJC_$_INSTANCE_METHODS_"; - Result += ClassName; - Result += ",\n\t"; - } - else - Result += "0, \n\t"; - - if (!metaclass && baseProtocols.size() > 0) { - Result += "(const struct _objc_protocol_list *)&"; - Result += "_OBJC_CLASS_PROTOCOLS_$_"; Result += ClassName; - Result += ",\n\t"; - } - else - Result += "0, \n\t"; - - if (!metaclass && ivars.size() > 0) { - Result += "(const struct _ivar_list_t *)&"; - Result += "_OBJC_$_INSTANCE_VARIABLES_"; Result += ClassName; - Result += ",\n\t"; - } - else - Result += "0, \n\t"; - - // weakIvarLayout - Result += "0, \n\t"; - if (!metaclass && Properties.size() > 0) { - Result += "(const struct _prop_list_t *)&"; - Result += "_OBJC_$_PROP_LIST_"; Result += ClassName; - Result += ",\n"; - } - else - Result += "0, \n"; - - Result += "};\n"; -} - -static void Write_class_t(ASTContext *Context, std::string &Result, - StringRef VarName, - const ObjCInterfaceDecl *CDecl, bool metaclass) { - bool rootClass = (!CDecl->getSuperClass()); - const ObjCInterfaceDecl *RootClass = CDecl; - - if (!rootClass) { - // Find the Root class - RootClass = CDecl->getSuperClass(); - while (RootClass->getSuperClass()) { - RootClass = RootClass->getSuperClass(); - } - } - - if (metaclass && rootClass) { - // Need to handle a case of use of forward declaration. - Result += "\n"; - Result += "extern \"C\" "; - if (CDecl->getImplementation()) - Result += "__declspec(dllexport) "; - else - Result += "__declspec(dllimport) "; - - Result += "struct _class_t OBJC_CLASS_$_"; - Result += CDecl->getNameAsString(); - Result += ";\n"; - } - // Also, for possibility of 'super' metadata class not having been defined yet. - if (!rootClass) { - ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass(); - Result += "\n"; - Result += "extern \"C\" "; - if (SuperClass->getImplementation()) - Result += "__declspec(dllexport) "; - else - Result += "__declspec(dllimport) "; - - Result += "struct _class_t "; - Result += VarName; - Result += SuperClass->getNameAsString(); - Result += ";\n"; - - if (metaclass && RootClass != SuperClass) { - Result += "extern \"C\" "; - if (RootClass->getImplementation()) - Result += "__declspec(dllexport) "; - else - Result += "__declspec(dllimport) "; - - Result += "struct _class_t "; - Result += VarName; - Result += RootClass->getNameAsString(); - Result += ";\n"; - } - } - - Result += "\nextern \"C\" __declspec(dllexport) struct _class_t "; - Result += VarName; Result += CDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__DATA,__objc_data\"))) = {\n"; - Result += "\t"; - if (metaclass) { - if (!rootClass) { - Result += "0, // &"; Result += VarName; - Result += RootClass->getNameAsString(); - Result += ",\n\t"; - Result += "0, // &"; Result += VarName; - Result += CDecl->getSuperClass()->getNameAsString(); - Result += ",\n\t"; - } - else { - Result += "0, // &"; Result += VarName; - Result += CDecl->getNameAsString(); - Result += ",\n\t"; - Result += "0, // &OBJC_CLASS_$_"; Result += CDecl->getNameAsString(); - Result += ",\n\t"; - } - } - else { - Result += "0, // &OBJC_METACLASS_$_"; - Result += CDecl->getNameAsString(); - Result += ",\n\t"; - if (!rootClass) { - Result += "0, // &"; Result += VarName; - Result += CDecl->getSuperClass()->getNameAsString(); - Result += ",\n\t"; - } - else - Result += "0,\n\t"; - } - Result += "0, // (void *)&_objc_empty_cache,\n\t"; - Result += "0, // unused, was (void *)&_objc_empty_vtable,\n\t"; - if (metaclass) - Result += "&_OBJC_METACLASS_RO_$_"; - else - Result += "&_OBJC_CLASS_RO_$_"; - Result += CDecl->getNameAsString(); - Result += ",\n};\n"; - - // Add static function to initialize some of the meta-data fields. - // avoid doing it twice. - if (metaclass) - return; - - const ObjCInterfaceDecl *SuperClass = - rootClass ? CDecl : CDecl->getSuperClass(); - - Result += "static void OBJC_CLASS_SETUP_$_"; - Result += CDecl->getNameAsString(); - Result += "(void ) {\n"; - Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString(); - Result += ".isa = "; Result += "&OBJC_METACLASS_$_"; - Result += RootClass->getNameAsString(); Result += ";\n"; - - Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString(); - Result += ".superclass = "; - if (rootClass) - Result += "&OBJC_CLASS_$_"; - else - Result += "&OBJC_METACLASS_$_"; - - Result += SuperClass->getNameAsString(); Result += ";\n"; - - Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString(); - Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n"; - - Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString(); - Result += ".isa = "; Result += "&OBJC_METACLASS_$_"; - Result += CDecl->getNameAsString(); Result += ";\n"; - - if (!rootClass) { - Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString(); - Result += ".superclass = "; Result += "&OBJC_CLASS_$_"; - Result += SuperClass->getNameAsString(); Result += ";\n"; - } - - Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString(); - Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n"; - Result += "}\n"; -} - -static void Write_category_t(RewriteModernObjC &RewriteObj, ASTContext *Context, - std::string &Result, - ObjCCategoryDecl *CatDecl, - ObjCInterfaceDecl *ClassDecl, - ArrayRef InstanceMethods, - ArrayRef ClassMethods, - ArrayRef RefedProtocols, - ArrayRef ClassProperties) { - StringRef CatName = CatDecl->getName(); - StringRef ClassName = ClassDecl->getName(); - // must declare an extern class object in case this class is not implemented - // in this TU. - Result += "\n"; - Result += "extern \"C\" "; - if (ClassDecl->getImplementation()) - Result += "__declspec(dllexport) "; - else - Result += "__declspec(dllimport) "; - - Result += "struct _class_t "; - Result += "OBJC_CLASS_$_"; Result += ClassName; - Result += ";\n"; - - Result += "\nstatic struct _category_t "; - Result += "_OBJC_$_CATEGORY_"; - Result += ClassName; Result += "_$_"; Result += CatName; - Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n"; - Result += "{\n"; - Result += "\t\""; Result += ClassName; Result += "\",\n"; - Result += "\t0, // &"; Result += "OBJC_CLASS_$_"; Result += ClassName; - Result += ",\n"; - if (InstanceMethods.size() > 0) { - Result += "\t(const struct _method_list_t *)&"; - Result += "_OBJC_$_CATEGORY_INSTANCE_METHODS_"; - Result += ClassName; Result += "_$_"; Result += CatName; - Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (ClassMethods.size() > 0) { - Result += "\t(const struct _method_list_t *)&"; - Result += "_OBJC_$_CATEGORY_CLASS_METHODS_"; - Result += ClassName; Result += "_$_"; Result += CatName; - Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (RefedProtocols.size() > 0) { - Result += "\t(const struct _protocol_list_t *)&"; - Result += "_OBJC_CATEGORY_PROTOCOLS_$_"; - Result += ClassName; Result += "_$_"; Result += CatName; - Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (ClassProperties.size() > 0) { - Result += "\t(const struct _prop_list_t *)&"; Result += "_OBJC_$_PROP_LIST_"; - Result += ClassName; Result += "_$_"; Result += CatName; - Result += ",\n"; - } - else - Result += "\t0,\n"; - - Result += "};\n"; - - // Add static function to initialize the class pointer in the category structure. - Result += "static void OBJC_CATEGORY_SETUP_$_"; - Result += ClassDecl->getNameAsString(); - Result += "_$_"; - Result += CatName; - Result += "(void ) {\n"; - Result += "\t_OBJC_$_CATEGORY_"; - Result += ClassDecl->getNameAsString(); - Result += "_$_"; - Result += CatName; - Result += ".cls = "; Result += "&OBJC_CLASS_$_"; Result += ClassName; - Result += ";\n}\n"; -} - -static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj, - ASTContext *Context, std::string &Result, - ArrayRef Methods, - StringRef VarName, - StringRef ProtocolName) { - if (Methods.size() == 0) - return; - - Result += "\nstatic const char *"; - Result += VarName; Result += ProtocolName; - Result += " [] __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n"; - Result += "{\n"; - for (unsigned i = 0, e = Methods.size(); i < e; i++) { - ObjCMethodDecl *MD = Methods[i]; - std::string MethodTypeString, QuoteMethodTypeString; - Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true); - RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString); - Result += "\t\""; Result += QuoteMethodTypeString; Result += "\""; - if (i == e-1) - Result += "\n};\n"; - else { - Result += ",\n"; - } - } -} - -static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj, - ASTContext *Context, - std::string &Result, - ArrayRef Ivars, - ObjCInterfaceDecl *CDecl) { - // FIXME. visibilty of offset symbols may have to be set; for Darwin - // this is what happens: - /** - if (Ivar->getAccessControl() == ObjCIvarDecl::Private || - Ivar->getAccessControl() == ObjCIvarDecl::Package || - Class->getVisibility() == HiddenVisibility) - Visibility shoud be: HiddenVisibility; - else - Visibility shoud be: DefaultVisibility; - */ - - Result += "\n"; - for (unsigned i =0, e = Ivars.size(); i < e; i++) { - ObjCIvarDecl *IvarDecl = Ivars[i]; - if (Context->getLangOpts().MicrosoftExt) - Result += "__declspec(allocate(\".objc_ivar$B\")) "; - - if (!Context->getLangOpts().MicrosoftExt || - IvarDecl->getAccessControl() == ObjCIvarDecl::Private || - IvarDecl->getAccessControl() == ObjCIvarDecl::Package) - Result += "extern \"C\" unsigned long int "; - else - Result += "extern \"C\" __declspec(dllexport) unsigned long int "; - WriteInternalIvarName(CDecl, IvarDecl, Result); - Result += " __attribute__ ((used, section (\"__DATA,__objc_ivar\")))"; - Result += " = "; - RewriteObj.RewriteIvarOffsetComputation(IvarDecl, Result); - Result += ";\n"; - } -} - -static void Write__ivar_list_t_initializer(RewriteModernObjC &RewriteObj, - ASTContext *Context, std::string &Result, - ArrayRef Ivars, - StringRef VarName, - ObjCInterfaceDecl *CDecl) { - if (Ivars.size() > 0) { - Write_IvarOffsetVar(RewriteObj, Context, Result, Ivars, CDecl); - - Result += "\nstatic "; - Write__ivar_list_t_TypeDecl(Result, Ivars.size()); - Result += " "; Result += VarName; - Result += CDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n"; - Result += "\t"; Result += "sizeof(_ivar_t)"; Result += ",\n"; - Result += "\t"; Result += utostr(Ivars.size()); Result += ",\n"; - for (unsigned i =0, e = Ivars.size(); i < e; i++) { - ObjCIvarDecl *IvarDecl = Ivars[i]; - if (i == 0) - Result += "\t{{"; - else - Result += "\t {"; - Result += "(unsigned long int *)&"; - WriteInternalIvarName(CDecl, IvarDecl, Result); - Result += ", "; - - Result += "\""; Result += IvarDecl->getName(); Result += "\", "; - std::string IvarTypeString, QuoteIvarTypeString; - Context->getObjCEncodingForType(IvarDecl->getType(), IvarTypeString, - IvarDecl); - RewriteObj.QuoteDoublequotes(IvarTypeString, QuoteIvarTypeString); - Result += "\""; Result += QuoteIvarTypeString; Result += "\", "; - - // FIXME. this alignment represents the host alignment and need be changed to - // represent the target alignment. - unsigned Align = Context->getTypeAlign(IvarDecl->getType())/8; - Align = llvm::Log2_32(Align); - Result += llvm::utostr(Align); Result += ", "; - CharUnits Size = Context->getTypeSizeInChars(IvarDecl->getType()); - Result += llvm::utostr(Size.getQuantity()); - if (i == e-1) - Result += "}}\n"; - else - Result += "},\n"; - } - Result += "};\n"; - } -} - -/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data. -void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl, - std::string &Result) { - - // Do not synthesize the protocol more than once. - if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl())) - return; - WriteModernMetadataDeclarations(Context, Result); - - if (ObjCProtocolDecl *Def = PDecl->getDefinition()) - PDecl = Def; - // Must write out all protocol definitions in current qualifier list, - // and in their nested qualifiers before writing out current definition. - for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(), - E = PDecl->protocol_end(); I != E; ++I) - RewriteObjCProtocolMetaData(*I, Result); - - // Construct method lists. - std::vector InstanceMethods, ClassMethods; - std::vector OptInstanceMethods, OptClassMethods; - for (ObjCProtocolDecl::instmeth_iterator - I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); - I != E; ++I) { - ObjCMethodDecl *MD = *I; - if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { - OptInstanceMethods.push_back(MD); - } else { - InstanceMethods.push_back(MD); - } - } - - for (ObjCProtocolDecl::classmeth_iterator - I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); - I != E; ++I) { - ObjCMethodDecl *MD = *I; - if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { - OptClassMethods.push_back(MD); - } else { - ClassMethods.push_back(MD); - } - } - std::vector AllMethods; - for (unsigned i = 0, e = InstanceMethods.size(); i < e; i++) - AllMethods.push_back(InstanceMethods[i]); - for (unsigned i = 0, e = ClassMethods.size(); i < e; i++) - AllMethods.push_back(ClassMethods[i]); - for (unsigned i = 0, e = OptInstanceMethods.size(); i < e; i++) - AllMethods.push_back(OptInstanceMethods[i]); - for (unsigned i = 0, e = OptClassMethods.size(); i < e; i++) - AllMethods.push_back(OptClassMethods[i]); - - Write__extendedMethodTypes_initializer(*this, Context, Result, - AllMethods, - "_OBJC_PROTOCOL_METHOD_TYPES_", - PDecl->getNameAsString()); - // Protocol's super protocol list - std::vector SuperProtocols; - for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(), - E = PDecl->protocol_end(); I != E; ++I) - SuperProtocols.push_back(*I); - - Write_protocol_list_initializer(Context, Result, SuperProtocols, - "_OBJC_PROTOCOL_REFS_", - PDecl->getNameAsString()); - - Write_method_list_t_initializer(*this, Context, Result, InstanceMethods, - "_OBJC_PROTOCOL_INSTANCE_METHODS_", - PDecl->getNameAsString(), false); - - Write_method_list_t_initializer(*this, Context, Result, ClassMethods, - "_OBJC_PROTOCOL_CLASS_METHODS_", - PDecl->getNameAsString(), false); - - Write_method_list_t_initializer(*this, Context, Result, OptInstanceMethods, - "_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_", - PDecl->getNameAsString(), false); - - Write_method_list_t_initializer(*this, Context, Result, OptClassMethods, - "_OBJC_PROTOCOL_OPT_CLASS_METHODS_", - PDecl->getNameAsString(), false); - - // Protocol's property metadata. - std::vector ProtocolProperties; - for (ObjCContainerDecl::prop_iterator I = PDecl->prop_begin(), - E = PDecl->prop_end(); I != E; ++I) - ProtocolProperties.push_back(*I); - - Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties, - /* Container */0, - "_OBJC_PROTOCOL_PROPERTIES_", - PDecl->getNameAsString()); - - // Writer out root metadata for current protocol: struct _protocol_t - Result += "\n"; - if (LangOpts.MicrosoftExt) - Result += "static "; - Result += "struct _protocol_t _OBJC_PROTOCOL_"; - Result += PDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__DATA,__datacoal_nt,coalesced\"))) = {\n"; - Result += "\t0,\n"; // id is; is null - Result += "\t\""; Result += PDecl->getNameAsString(); Result += "\",\n"; - if (SuperProtocols.size() > 0) { - Result += "\t(const struct _protocol_list_t *)&"; Result += "_OBJC_PROTOCOL_REFS_"; - Result += PDecl->getNameAsString(); Result += ",\n"; - } - else - Result += "\t0,\n"; - if (InstanceMethods.size() > 0) { - Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_INSTANCE_METHODS_"; - Result += PDecl->getNameAsString(); Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (ClassMethods.size() > 0) { - Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_CLASS_METHODS_"; - Result += PDecl->getNameAsString(); Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (OptInstanceMethods.size() > 0) { - Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_"; - Result += PDecl->getNameAsString(); Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (OptClassMethods.size() > 0) { - Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_CLASS_METHODS_"; - Result += PDecl->getNameAsString(); Result += ",\n"; - } - else - Result += "\t0,\n"; - - if (ProtocolProperties.size() > 0) { - Result += "\t(const struct _prop_list_t *)&_OBJC_PROTOCOL_PROPERTIES_"; - Result += PDecl->getNameAsString(); Result += ",\n"; - } - else - Result += "\t0,\n"; - - Result += "\t"; Result += "sizeof(_protocol_t)"; Result += ",\n"; - Result += "\t0,\n"; - - if (AllMethods.size() > 0) { - Result += "\t(const char **)&"; Result += "_OBJC_PROTOCOL_METHOD_TYPES_"; - Result += PDecl->getNameAsString(); - Result += "\n};\n"; - } - else - Result += "\t0\n};\n"; - - if (LangOpts.MicrosoftExt) - Result += "static "; - Result += "struct _protocol_t *"; - Result += "_OBJC_LABEL_PROTOCOL_$_"; Result += PDecl->getNameAsString(); - Result += " = &_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString(); - Result += ";\n"; - - // Mark this protocol as having been generated. - if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl())) - llvm_unreachable("protocol already synthesized"); - -} - -void RewriteModernObjC::RewriteObjCProtocolListMetaData( - const ObjCList &Protocols, - StringRef prefix, StringRef ClassName, - std::string &Result) { - if (Protocols.empty()) return; - - for (unsigned i = 0; i != Protocols.size(); i++) - RewriteObjCProtocolMetaData(Protocols[i], Result); - - // Output the top lovel protocol meta-data for the class. - /* struct _objc_protocol_list { - struct _objc_protocol_list *next; - int protocol_count; - struct _objc_protocol *class_protocols[]; - } - */ - Result += "\n"; - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".cat_cls_meth$B\")) "; - Result += "static struct {\n"; - Result += "\tstruct _objc_protocol_list *next;\n"; - Result += "\tint protocol_count;\n"; - Result += "\tstruct _objc_protocol *class_protocols["; - Result += utostr(Protocols.size()); - Result += "];\n} _OBJC_"; - Result += prefix; - Result += "_PROTOCOLS_"; - Result += ClassName; - Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= " - "{\n\t0, "; - Result += utostr(Protocols.size()); - Result += "\n"; - - Result += "\t,{&_OBJC_PROTOCOL_"; - Result += Protocols[0]->getNameAsString(); - Result += " \n"; - - for (unsigned i = 1; i != Protocols.size(); i++) { - Result += "\t ,&_OBJC_PROTOCOL_"; - Result += Protocols[i]->getNameAsString(); - Result += "\n"; - } - Result += "\t }\n};\n"; -} - -/// hasObjCExceptionAttribute - Return true if this class or any super -/// class has the __objc_exception__ attribute. -/// FIXME. Move this to ASTContext.cpp as it is also used for IRGen. -static bool hasObjCExceptionAttribute(ASTContext &Context, - const ObjCInterfaceDecl *OID) { - if (OID->hasAttr()) - return true; - if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) - return hasObjCExceptionAttribute(Context, Super); - return false; -} - -void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, - std::string &Result) { - ObjCInterfaceDecl *CDecl = IDecl->getClassInterface(); - - // Explicitly declared @interface's are already synthesized. - if (CDecl->isImplicitInterfaceDecl()) - assert(false && - "Legacy implicit interface rewriting not supported in moder abi"); - - WriteModernMetadataDeclarations(Context, Result); - SmallVector IVars; - - for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin(); - IVD; IVD = IVD->getNextIvar()) { - // Ignore unnamed bit-fields. - if (!IVD->getDeclName()) - continue; - IVars.push_back(IVD); - } - - Write__ivar_list_t_initializer(*this, Context, Result, IVars, - "_OBJC_$_INSTANCE_VARIABLES_", - CDecl); - - // Build _objc_method_list for class's instance methods if needed - SmallVector - InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); - - // If any of our property implementations have associated getters or - // setters, produce metadata for them as well. - for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), - PropEnd = IDecl->propimpl_end(); - Prop != PropEnd; ++Prop) { - if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) - continue; - if (!Prop->getPropertyIvarDecl()) - continue; - ObjCPropertyDecl *PD = Prop->getPropertyDecl(); - if (!PD) - continue; - if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) - if (mustSynthesizeSetterGetterMethod(IDecl, PD, true /*getter*/)) - InstanceMethods.push_back(Getter); - if (PD->isReadOnly()) - continue; - if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) - if (mustSynthesizeSetterGetterMethod(IDecl, PD, false /*setter*/)) - InstanceMethods.push_back(Setter); - } - - Write_method_list_t_initializer(*this, Context, Result, InstanceMethods, - "_OBJC_$_INSTANCE_METHODS_", - IDecl->getNameAsString(), true); - - SmallVector - ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end()); - - Write_method_list_t_initializer(*this, Context, Result, ClassMethods, - "_OBJC_$_CLASS_METHODS_", - IDecl->getNameAsString(), true); - - // Protocols referenced in class declaration? - // Protocol's super protocol list - std::vector RefedProtocols; - const ObjCList &Protocols = CDecl->getReferencedProtocols(); - for (ObjCList::iterator I = Protocols.begin(), - E = Protocols.end(); - I != E; ++I) { - RefedProtocols.push_back(*I); - // Must write out all protocol definitions in current qualifier list, - // and in their nested qualifiers before writing out current definition. - RewriteObjCProtocolMetaData(*I, Result); - } - - Write_protocol_list_initializer(Context, Result, - RefedProtocols, - "_OBJC_CLASS_PROTOCOLS_$_", - IDecl->getNameAsString()); - - // Protocol's property metadata. - std::vector ClassProperties; - for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(), - E = CDecl->prop_end(); I != E; ++I) - ClassProperties.push_back(*I); - - Write_prop_list_t_initializer(*this, Context, Result, ClassProperties, - /* Container */IDecl, - "_OBJC_$_PROP_LIST_", - CDecl->getNameAsString()); - - - // Data for initializing _class_ro_t metaclass meta-data - uint32_t flags = CLS_META; - std::string InstanceSize; - std::string InstanceStart; - - - bool classIsHidden = CDecl->getVisibility() == HiddenVisibility; - if (classIsHidden) - flags |= OBJC2_CLS_HIDDEN; - - if (!CDecl->getSuperClass()) - // class is root - flags |= CLS_ROOT; - InstanceSize = "sizeof(struct _class_t)"; - InstanceStart = InstanceSize; - Write__class_ro_t_initializer(Context, Result, flags, - InstanceStart, InstanceSize, - ClassMethods, - 0, - 0, - 0, - "_OBJC_METACLASS_RO_$_", - CDecl->getNameAsString()); - - - // Data for initializing _class_ro_t meta-data - flags = CLS; - if (classIsHidden) - flags |= OBJC2_CLS_HIDDEN; - - if (hasObjCExceptionAttribute(*Context, CDecl)) - flags |= CLS_EXCEPTION; - - if (!CDecl->getSuperClass()) - // class is root - flags |= CLS_ROOT; - - InstanceSize.clear(); - InstanceStart.clear(); - if (!ObjCSynthesizedStructs.count(CDecl)) { - InstanceSize = "0"; - InstanceStart = "0"; - } - else { - InstanceSize = "sizeof(struct "; - InstanceSize += CDecl->getNameAsString(); - InstanceSize += "_IMPL)"; - - ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin(); - if (IVD) { - RewriteIvarOffsetComputation(IVD, InstanceStart); - } - else - InstanceStart = InstanceSize; - } - Write__class_ro_t_initializer(Context, Result, flags, - InstanceStart, InstanceSize, - InstanceMethods, - RefedProtocols, - IVars, - ClassProperties, - "_OBJC_CLASS_RO_$_", - CDecl->getNameAsString()); - - Write_class_t(Context, Result, - "OBJC_METACLASS_$_", - CDecl, /*metaclass*/true); - - Write_class_t(Context, Result, - "OBJC_CLASS_$_", - CDecl, /*metaclass*/false); - - if (ImplementationIsNonLazy(IDecl)) - DefinedNonLazyClasses.push_back(CDecl); - -} - -void RewriteModernObjC::RewriteClassSetupInitHook(std::string &Result) { - int ClsDefCount = ClassImplementation.size(); - if (!ClsDefCount) - return; - Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n"; - Result += "__declspec(allocate(\".objc_inithooks$B\")) "; - Result += "static void *OBJC_CLASS_SETUP[] = {\n"; - for (int i = 0; i < ClsDefCount; i++) { - ObjCImplementationDecl *IDecl = ClassImplementation[i]; - ObjCInterfaceDecl *CDecl = IDecl->getClassInterface(); - Result += "\t(void *)&OBJC_CLASS_SETUP_$_"; - Result += CDecl->getName(); Result += ",\n"; - } - Result += "};\n"; -} - -void RewriteModernObjC::RewriteMetaDataIntoBuffer(std::string &Result) { - int ClsDefCount = ClassImplementation.size(); - int CatDefCount = CategoryImplementation.size(); - - // For each implemented class, write out all its meta data. - for (int i = 0; i < ClsDefCount; i++) - RewriteObjCClassMetaData(ClassImplementation[i], Result); - - RewriteClassSetupInitHook(Result); - - // For each implemented category, write out all its meta data. - for (int i = 0; i < CatDefCount; i++) - RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result); - - RewriteCategorySetupInitHook(Result); - - if (ClsDefCount > 0) { - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".objc_classlist$B\")) "; - Result += "static struct _class_t *L_OBJC_LABEL_CLASS_$ ["; - Result += llvm::utostr(ClsDefCount); Result += "]"; - Result += - " __attribute__((used, section (\"__DATA, __objc_classlist," - "regular,no_dead_strip\")))= {\n"; - for (int i = 0; i < ClsDefCount; i++) { - Result += "\t&OBJC_CLASS_$_"; - Result += ClassImplementation[i]->getNameAsString(); - Result += ",\n"; - } - Result += "};\n"; - - if (!DefinedNonLazyClasses.empty()) { - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".objc_nlclslist$B\")) \n"; - Result += "static struct _class_t *_OBJC_LABEL_NONLAZY_CLASS_$[] = {\n\t"; - for (unsigned i = 0, e = DefinedNonLazyClasses.size(); i < e; i++) { - Result += "\t&OBJC_CLASS_$_"; Result += DefinedNonLazyClasses[i]->getNameAsString(); - Result += ",\n"; - } - Result += "};\n"; - } - } - - if (CatDefCount > 0) { - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".objc_catlist$B\")) "; - Result += "static struct _category_t *L_OBJC_LABEL_CATEGORY_$ ["; - Result += llvm::utostr(CatDefCount); Result += "]"; - Result += - " __attribute__((used, section (\"__DATA, __objc_catlist," - "regular,no_dead_strip\")))= {\n"; - for (int i = 0; i < CatDefCount; i++) { - Result += "\t&_OBJC_$_CATEGORY_"; - Result += - CategoryImplementation[i]->getClassInterface()->getNameAsString(); - Result += "_$_"; - Result += CategoryImplementation[i]->getNameAsString(); - Result += ",\n"; - } - Result += "};\n"; - } - - if (!DefinedNonLazyCategories.empty()) { - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".objc_nlcatlist$B\")) \n"; - Result += "static struct _category_t *_OBJC_LABEL_NONLAZY_CATEGORY_$[] = {\n\t"; - for (unsigned i = 0, e = DefinedNonLazyCategories.size(); i < e; i++) { - Result += "\t&_OBJC_$_CATEGORY_"; - Result += - DefinedNonLazyCategories[i]->getClassInterface()->getNameAsString(); - Result += "_$_"; - Result += DefinedNonLazyCategories[i]->getNameAsString(); - Result += ",\n"; - } - Result += "};\n"; - } -} - -void RewriteModernObjC::WriteImageInfo(std::string &Result) { - if (LangOpts.MicrosoftExt) - Result += "__declspec(allocate(\".objc_imageinfo$B\")) \n"; - - Result += "static struct IMAGE_INFO { unsigned version; unsigned flag; } "; - // version 0, ObjCABI is 2 - Result += "_OBJC_IMAGE_INFO = { 0, 2 };\n"; -} - -/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category -/// implementation. -void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl, - std::string &Result) { - WriteModernMetadataDeclarations(Context, Result); - ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface(); - // Find category declaration for this implementation. - ObjCCategoryDecl *CDecl=0; - for (CDecl = ClassDecl->getCategoryList(); CDecl; - CDecl = CDecl->getNextClassCategory()) - if (CDecl->getIdentifier() == IDecl->getIdentifier()) - break; - - std::string FullCategoryName = ClassDecl->getNameAsString(); - FullCategoryName += "_$_"; - FullCategoryName += CDecl->getNameAsString(); - - // Build _objc_method_list for class's instance methods if needed - SmallVector - InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); - - // If any of our property implementations have associated getters or - // setters, produce metadata for them as well. - for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), - PropEnd = IDecl->propimpl_end(); - Prop != PropEnd; ++Prop) { - if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) - continue; - if (!Prop->getPropertyIvarDecl()) - continue; - ObjCPropertyDecl *PD = Prop->getPropertyDecl(); - if (!PD) - continue; - if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) - InstanceMethods.push_back(Getter); - if (PD->isReadOnly()) - continue; - if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) - InstanceMethods.push_back(Setter); - } - - Write_method_list_t_initializer(*this, Context, Result, InstanceMethods, - "_OBJC_$_CATEGORY_INSTANCE_METHODS_", - FullCategoryName, true); - - SmallVector - ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end()); - - Write_method_list_t_initializer(*this, Context, Result, ClassMethods, - "_OBJC_$_CATEGORY_CLASS_METHODS_", - FullCategoryName, true); - - // Protocols referenced in class declaration? - // Protocol's super protocol list - std::vector RefedProtocols; - for (ObjCInterfaceDecl::protocol_iterator I = CDecl->protocol_begin(), - E = CDecl->protocol_end(); - - I != E; ++I) { - RefedProtocols.push_back(*I); - // Must write out all protocol definitions in current qualifier list, - // and in their nested qualifiers before writing out current definition. - RewriteObjCProtocolMetaData(*I, Result); - } - - Write_protocol_list_initializer(Context, Result, - RefedProtocols, - "_OBJC_CATEGORY_PROTOCOLS_$_", - FullCategoryName); - - // Protocol's property metadata. - std::vector ClassProperties; - for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(), - E = CDecl->prop_end(); I != E; ++I) - ClassProperties.push_back(*I); - - Write_prop_list_t_initializer(*this, Context, Result, ClassProperties, - /* Container */IDecl, - "_OBJC_$_PROP_LIST_", - FullCategoryName); - - Write_category_t(*this, Context, Result, - CDecl, - ClassDecl, - InstanceMethods, - ClassMethods, - RefedProtocols, - ClassProperties); - - // Determine if this category is also "non-lazy". - if (ImplementationIsNonLazy(IDecl)) - DefinedNonLazyCategories.push_back(CDecl); - -} - -void RewriteModernObjC::RewriteCategorySetupInitHook(std::string &Result) { - int CatDefCount = CategoryImplementation.size(); - if (!CatDefCount) - return; - Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n"; - Result += "__declspec(allocate(\".objc_inithooks$B\")) "; - Result += "static void *OBJC_CATEGORY_SETUP[] = {\n"; - for (int i = 0; i < CatDefCount; i++) { - ObjCCategoryImplDecl *IDecl = CategoryImplementation[i]; - ObjCCategoryDecl *CatDecl= IDecl->getCategoryDecl(); - ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface(); - Result += "\t(void *)&OBJC_CATEGORY_SETUP_$_"; - Result += ClassDecl->getName(); - Result += "_$_"; - Result += CatDecl->getName(); - Result += ",\n"; - } - Result += "};\n"; -} - -// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or -/// class methods. -template -void RewriteModernObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin, - MethodIterator MethodEnd, - bool IsInstanceMethod, - StringRef prefix, - StringRef ClassName, - std::string &Result) { - if (MethodBegin == MethodEnd) return; - - if (!objc_impl_method) { - /* struct _objc_method { - SEL _cmd; - char *method_types; - void *_imp; - } - */ - Result += "\nstruct _objc_method {\n"; - Result += "\tSEL _cmd;\n"; - Result += "\tchar *method_types;\n"; - Result += "\tvoid *_imp;\n"; - Result += "};\n"; - - objc_impl_method = true; - } - - // Build _objc_method_list for class's methods if needed - - /* struct { - struct _objc_method_list *next_method; - int method_count; - struct _objc_method method_list[]; - } - */ - unsigned NumMethods = std::distance(MethodBegin, MethodEnd); - Result += "\n"; - if (LangOpts.MicrosoftExt) { - if (IsInstanceMethod) - Result += "__declspec(allocate(\".inst_meth$B\")) "; - else - Result += "__declspec(allocate(\".cls_meth$B\")) "; - } - Result += "static struct {\n"; - Result += "\tstruct _objc_method_list *next_method;\n"; - Result += "\tint method_count;\n"; - Result += "\tstruct _objc_method method_list["; - Result += utostr(NumMethods); - Result += "];\n} _OBJC_"; - Result += prefix; - Result += IsInstanceMethod ? "INSTANCE" : "CLASS"; - Result += "_METHODS_"; - Result += ClassName; - Result += " __attribute__ ((used, section (\"__OBJC, __"; - Result += IsInstanceMethod ? "inst" : "cls"; - Result += "_meth\")))= "; - Result += "{\n\t0, " + utostr(NumMethods) + "\n"; - - Result += "\t,{{(SEL)\""; - Result += (*MethodBegin)->getSelector().getAsString().c_str(); - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); - Result += "\", \""; - Result += MethodTypeString; - Result += "\", (void *)"; - Result += MethodInternalNames[*MethodBegin]; - Result += "}\n"; - for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) { - Result += "\t ,{(SEL)\""; - Result += (*MethodBegin)->getSelector().getAsString().c_str(); - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); - Result += "\", \""; - Result += MethodTypeString; - Result += "\", (void *)"; - Result += MethodInternalNames[*MethodBegin]; - Result += "}\n"; - } - Result += "\t }\n};\n"; -} - -Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { - SourceRange OldRange = IV->getSourceRange(); - Expr *BaseExpr = IV->getBase(); - - // Rewrite the base, but without actually doing replaces. - { - DisableReplaceStmtScope S(*this); - BaseExpr = cast(RewriteFunctionBodyOrGlobalInitializer(BaseExpr)); - IV->setBase(BaseExpr); - } - - ObjCIvarDecl *D = IV->getDecl(); - - Expr *Replacement = IV; - - if (BaseExpr->getType()->isObjCObjectPointerType()) { - const ObjCInterfaceType *iFaceDecl = - dyn_cast(BaseExpr->getType()->getPointeeType()); - assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null"); - // lookup which class implements the instance variable. - ObjCInterfaceDecl *clsDeclared = 0; - iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(), - clsDeclared); - assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class"); - - // Build name of symbol holding ivar offset. - std::string IvarOffsetName; - WriteInternalIvarName(clsDeclared, D, IvarOffsetName); - - ReferencedIvars[clsDeclared].insert(D); - - // cast offset to "char *". - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->CharTy), - CK_BitCast, - BaseExpr); - VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), &Context->Idents.get(IvarOffsetName), - Context->UnsignedLongTy, 0, SC_Extern, SC_None); - DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, - Context->UnsignedLongTy, VK_LValue, - SourceLocation()); - BinaryOperator *addExpr = - new (Context) BinaryOperator(castExpr, DRE, BO_Add, - Context->getPointerType(Context->CharTy), - VK_RValue, OK_Ordinary, SourceLocation()); - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), - SourceLocation(), - addExpr); - QualType IvarT = D->getType(); - - if (!isa(IvarT) && IvarT->isRecordType()) { - RecordDecl *RD = IvarT->getAs()->getDecl(); - RD = RD->getDefinition(); - if (RD && !RD->getDeclName().getAsIdentifierInfo()) { - // decltype(((Foo_IMPL*)0)->bar) * - ObjCContainerDecl *CDecl = - dyn_cast(D->getDeclContext()); - // ivar in class extensions requires special treatment. - if (ObjCCategoryDecl *CatDecl = dyn_cast(CDecl)) - CDecl = CatDecl->getClassInterface(); - std::string RecName = CDecl->getName(); - RecName += "_IMPL"; - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get(RecName.c_str())); - QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD)); - unsigned UnsignedIntSize = - static_cast(Context->getTypeSize(Context->UnsignedIntTy)); - Expr *Zero = IntegerLiteral::Create(*Context, - llvm::APInt(UnsignedIntSize, 0), - Context->UnsignedIntTy, SourceLocation()); - Zero = NoTypeInfoCStyleCastExpr(Context, PtrStructIMPL, CK_BitCast, Zero); - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - Zero); - FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get(D->getNameAsString()), - IvarT, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); - IvarT = Context->getDecltypeType(ME, ME->getType()); - } - } - convertObjCTypeToCStyleType(IvarT); - QualType castT = Context->getPointerType(IvarT); - - castExpr = NoTypeInfoCStyleCastExpr(Context, - castT, - CK_BitCast, - PE); - - - Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT, - VK_LValue, OK_Ordinary, - SourceLocation()); - PE = new (Context) ParenExpr(OldRange.getBegin(), - OldRange.getEnd(), - Exp); - - Replacement = PE; - } - - ReplaceStmtWithRange(IV, Replacement, OldRange); - return Replacement; -} diff --git a/lib/Rewrite/RewriteObjC.cpp b/lib/Rewrite/RewriteObjC.cpp deleted file mode 100644 index 37c17e6..0000000 --- a/lib/Rewrite/RewriteObjC.cpp +++ /dev/null @@ -1,6035 +0,0 @@ -//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Hacks and fun related to the code rewriter. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/ASTConsumers.h" -#include "clang/Rewrite/Rewriter.h" -#include "clang/AST/AST.h" -#include "clang/AST/ASTConsumer.h" -#include "clang/AST/ParentMap.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Basic/IdentifierTable.h" -#include "clang/Basic/Diagnostic.h" -#include "clang/Lex/Lexer.h" -#include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/StringExtras.h" -#include "llvm/ADT/SmallPtrSet.h" -#include "llvm/ADT/OwningPtr.h" -#include "llvm/ADT/DenseSet.h" - -using namespace clang; -using llvm::utostr; - -namespace { - class RewriteObjC : public ASTConsumer { - protected: - - enum { - BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), - block, ... */ - BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */ - BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the - __block variable */ - BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy - helpers */ - BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose - support routines */ - BLOCK_BYREF_CURRENT_MAX = 256 - }; - - enum { - BLOCK_NEEDS_FREE = (1 << 24), - BLOCK_HAS_COPY_DISPOSE = (1 << 25), - BLOCK_HAS_CXX_OBJ = (1 << 26), - BLOCK_IS_GC = (1 << 27), - BLOCK_IS_GLOBAL = (1 << 28), - BLOCK_HAS_DESCRIPTOR = (1 << 29) - }; - static const int OBJC_ABI_VERSION = 7; - - Rewriter Rewrite; - DiagnosticsEngine &Diags; - const LangOptions &LangOpts; - ASTContext *Context; - SourceManager *SM; - TranslationUnitDecl *TUDecl; - FileID MainFileID; - const char *MainFileStart, *MainFileEnd; - Stmt *CurrentBody; - ParentMap *PropParentMap; // created lazily. - std::string InFileName; - raw_ostream* OutFile; - std::string Preamble; - - TypeDecl *ProtocolTypeDecl; - VarDecl *GlobalVarDecl; - unsigned RewriteFailedDiag; - // ObjC string constant support. - unsigned NumObjCStringLiterals; - VarDecl *ConstantStringClassReference; - RecordDecl *NSStringRecord; - - // ObjC foreach break/continue generation support. - int BcLabelCount; - - unsigned TryFinallyContainsReturnDiag; - // Needed for super. - ObjCMethodDecl *CurMethodDef; - RecordDecl *SuperStructDecl; - RecordDecl *ConstantStringDecl; - - FunctionDecl *MsgSendFunctionDecl; - FunctionDecl *MsgSendSuperFunctionDecl; - FunctionDecl *MsgSendStretFunctionDecl; - FunctionDecl *MsgSendSuperStretFunctionDecl; - FunctionDecl *MsgSendFpretFunctionDecl; - FunctionDecl *GetClassFunctionDecl; - FunctionDecl *GetMetaClassFunctionDecl; - FunctionDecl *GetSuperClassFunctionDecl; - FunctionDecl *SelGetUidFunctionDecl; - FunctionDecl *CFStringFunctionDecl; - FunctionDecl *SuperContructorFunctionDecl; - FunctionDecl *CurFunctionDef; - FunctionDecl *CurFunctionDeclToDeclareForBlock; - - /* Misc. containers needed for meta-data rewrite. */ - SmallVector ClassImplementation; - SmallVector CategoryImplementation; - llvm::SmallPtrSet ObjCSynthesizedStructs; - llvm::SmallPtrSet ObjCSynthesizedProtocols; - llvm::SmallPtrSet ObjCForwardDecls; - llvm::DenseMap MethodInternalNames; - SmallVector Stmts; - SmallVector ObjCBcLabelNo; - // Remember all the @protocol() expressions. - llvm::SmallPtrSet ProtocolExprDecls; - - llvm::DenseSet CopyDestroyCache; - - // Block expressions. - SmallVector Blocks; - SmallVector InnerDeclRefsCount; - SmallVector InnerDeclRefs; - - SmallVector BlockDeclRefs; - - // Block related declarations. - SmallVector BlockByCopyDecls; - llvm::SmallPtrSet BlockByCopyDeclsPtrSet; - SmallVector BlockByRefDecls; - llvm::SmallPtrSet BlockByRefDeclsPtrSet; - llvm::DenseMap BlockByRefDeclNo; - llvm::SmallPtrSet ImportedBlockDecls; - llvm::SmallPtrSet ImportedLocalExternalDecls; - - llvm::DenseMap RewrittenBlockExprs; - - // This maps an original source AST to it's rewritten form. This allows - // us to avoid rewriting the same node twice (which is very uncommon). - // This is needed to support some of the exotic property rewriting. - llvm::DenseMap ReplacedNodes; - - // Needed for header files being rewritten - bool IsHeader; - bool SilenceRewriteMacroWarning; - bool objc_impl_method; - - bool DisableReplaceStmt; - class DisableReplaceStmtScope { - RewriteObjC &R; - bool SavedValue; - - public: - DisableReplaceStmtScope(RewriteObjC &R) - : R(R), SavedValue(R.DisableReplaceStmt) { - R.DisableReplaceStmt = true; - } - ~DisableReplaceStmtScope() { - R.DisableReplaceStmt = SavedValue; - } - }; - void InitializeCommon(ASTContext &context); - - public: - - // Top Level Driver code. - virtual bool HandleTopLevelDecl(DeclGroupRef D) { - for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - if (ObjCInterfaceDecl *Class = dyn_cast(*I)) { - if (!Class->isThisDeclarationADefinition()) { - RewriteForwardClassDecl(D); - break; - } - } - - if (ObjCProtocolDecl *Proto = dyn_cast(*I)) { - if (!Proto->isThisDeclarationADefinition()) { - RewriteForwardProtocolDecl(D); - break; - } - } - - HandleTopLevelSingleDecl(*I); - } - return true; - } - void HandleTopLevelSingleDecl(Decl *D); - void HandleDeclInMainFile(Decl *D); - RewriteObjC(std::string inFile, raw_ostream *OS, - DiagnosticsEngine &D, const LangOptions &LOpts, - bool silenceMacroWarn); - - ~RewriteObjC() {} - - virtual void HandleTranslationUnit(ASTContext &C); - - void ReplaceStmt(Stmt *Old, Stmt *New) { - Stmt *ReplacingStmt = ReplacedNodes[Old]; - - if (ReplacingStmt) - return; // We can't rewrite the same node twice. - - if (DisableReplaceStmt) - return; - - // If replacement succeeded or warning disabled return with no warning. - if (!Rewrite.ReplaceStmt(Old, New)) { - ReplacedNodes[Old] = New; - return; - } - if (SilenceRewriteMacroWarning) - return; - Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) - << Old->getSourceRange(); - } - - void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) { - if (DisableReplaceStmt) - return; - - // Measure the old text. - int Size = Rewrite.getRangeSize(SrcRange); - if (Size == -1) { - Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) - << Old->getSourceRange(); - return; - } - // Get the new text. - std::string SStr; - llvm::raw_string_ostream S(SStr); - New->printPretty(S, 0, PrintingPolicy(LangOpts)); - const std::string &Str = S.str(); - - // If replacement succeeded or warning disabled return with no warning. - if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) { - ReplacedNodes[Old] = New; - return; - } - if (SilenceRewriteMacroWarning) - return; - Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag) - << Old->getSourceRange(); - } - - void InsertText(SourceLocation Loc, StringRef Str, - bool InsertAfter = true) { - // If insertion succeeded or warning disabled return with no warning. - if (!Rewrite.InsertText(Loc, Str, InsertAfter) || - SilenceRewriteMacroWarning) - return; - - Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag); - } - - void ReplaceText(SourceLocation Start, unsigned OrigLength, - StringRef Str) { - // If removal succeeded or warning disabled return with no warning. - if (!Rewrite.ReplaceText(Start, OrigLength, Str) || - SilenceRewriteMacroWarning) - return; - - Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag); - } - - // Syntactic Rewriting. - void RewriteRecordBody(RecordDecl *RD); - void RewriteInclude(); - void RewriteForwardClassDecl(DeclGroupRef D); - void RewriteForwardClassDecl(const llvm::SmallVector &DG); - void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, - const std::string &typedefString); - void RewriteImplementations(); - void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, - ObjCImplementationDecl *IMD, - ObjCCategoryImplDecl *CID); - void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl); - void RewriteImplementationDecl(Decl *Dcl); - void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, - ObjCMethodDecl *MDecl, std::string &ResultStr); - void RewriteTypeIntoString(QualType T, std::string &ResultStr, - const FunctionType *&FPRetType); - void RewriteByRefString(std::string &ResultStr, const std::string &Name, - ValueDecl *VD, bool def=false); - void RewriteCategoryDecl(ObjCCategoryDecl *Dcl); - void RewriteProtocolDecl(ObjCProtocolDecl *Dcl); - void RewriteForwardProtocolDecl(DeclGroupRef D); - void RewriteForwardProtocolDecl(const llvm::SmallVector &DG); - void RewriteMethodDeclaration(ObjCMethodDecl *Method); - void RewriteProperty(ObjCPropertyDecl *prop); - void RewriteFunctionDecl(FunctionDecl *FD); - void RewriteBlockPointerType(std::string& Str, QualType Type); - void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD); - void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD); - void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl); - void RewriteTypeOfDecl(VarDecl *VD); - void RewriteObjCQualifiedInterfaceTypes(Expr *E); - - // Expression Rewriting. - Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S); - Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp); - Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo); - Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo); - Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp); - Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp); - Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp); - Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp); - void RewriteTryReturnStmts(Stmt *S); - void RewriteSyncReturnStmts(Stmt *S, std::string buf); - Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S); - Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S); - Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S); - Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, - SourceLocation OrigEnd); - Stmt *RewriteBreakStmt(BreakStmt *S); - Stmt *RewriteContinueStmt(ContinueStmt *S); - void RewriteCastExpr(CStyleCastExpr *CE); - - // Block rewriting. - void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D); - - // Block specific rewrite rules. - void RewriteBlockPointerDecl(NamedDecl *VD); - void RewriteByRefVar(VarDecl *VD); - Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD); - Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE); - void RewriteBlockPointerFunctionArgs(FunctionDecl *FD); - - void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, - std::string &Result); - - virtual void Initialize(ASTContext &context) = 0; - - // Metadata Rewriting. - virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0; - virtual void RewriteObjCProtocolListMetaData(const ObjCList &Prots, - StringRef prefix, - StringRef ClassName, - std::string &Result) = 0; - virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl, - std::string &Result) = 0; - virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol, - StringRef prefix, - StringRef ClassName, - std::string &Result) = 0; - virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, - std::string &Result) = 0; - - // Rewriting ivar access - virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) = 0; - virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, - std::string &Result) = 0; - - // Misc. AST transformation routines. Sometimes they end up calling - // rewriting routines on the new ASTs. - CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD, - Expr **args, unsigned nargs, - SourceLocation StartLoc=SourceLocation(), - SourceLocation EndLoc=SourceLocation()); - CallExpr *SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, - QualType msgSendType, - QualType returnType, - SmallVectorImpl &ArgTypes, - SmallVectorImpl &MsgExprs, - ObjCMethodDecl *Method); - Stmt *SynthMessageExpr(ObjCMessageExpr *Exp, - SourceLocation StartLoc=SourceLocation(), - SourceLocation EndLoc=SourceLocation()); - - void SynthCountByEnumWithState(std::string &buf); - void SynthMsgSendFunctionDecl(); - void SynthMsgSendSuperFunctionDecl(); - void SynthMsgSendStretFunctionDecl(); - void SynthMsgSendFpretFunctionDecl(); - void SynthMsgSendSuperStretFunctionDecl(); - void SynthGetClassFunctionDecl(); - void SynthGetMetaClassFunctionDecl(); - void SynthGetSuperClassFunctionDecl(); - void SynthSelGetUidFunctionDecl(); - void SynthSuperContructorFunctionDecl(); - - std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag); - std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, - StringRef funcName, std::string Tag); - std::string SynthesizeBlockFunc(BlockExpr *CE, int i, - StringRef funcName, std::string Tag); - std::string SynthesizeBlockImpl(BlockExpr *CE, - std::string Tag, std::string Desc); - std::string SynthesizeBlockDescriptor(std::string DescTag, - std::string ImplTag, - int i, StringRef funcName, - unsigned hasCopy); - Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp); - void SynthesizeBlockLiterals(SourceLocation FunLocStart, - StringRef FunName); - FunctionDecl *SynthBlockInitFunctionDecl(StringRef name); - Stmt *SynthBlockInitExpr(BlockExpr *Exp, - const SmallVector &InnerBlockDeclRefs); - - // Misc. helper routines. - QualType getProtocolType(); - void WarnAboutReturnGotoStmts(Stmt *S); - void HasReturnStmts(Stmt *S, bool &hasReturns); - void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND); - void InsertBlockLiteralsWithinFunction(FunctionDecl *FD); - void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD); - - bool IsDeclStmtInForeachHeader(DeclStmt *DS); - void CollectBlockDeclRefInfo(BlockExpr *Exp); - void GetBlockDeclRefExprs(Stmt *S); - void GetInnerBlockDeclRefExprs(Stmt *S, - SmallVector &InnerBlockDeclRefs, - llvm::SmallPtrSet &InnerContexts); - - // We avoid calling Type::isBlockPointerType(), since it operates on the - // canonical type. We only care if the top-level type is a closure pointer. - bool isTopLevelBlockPointerType(QualType T) { - return isa(T); - } - - /// convertBlockPointerToFunctionPointer - Converts a block-pointer type - /// to a function pointer type and upon success, returns true; false - /// otherwise. - bool convertBlockPointerToFunctionPointer(QualType &T) { - if (isTopLevelBlockPointerType(T)) { - const BlockPointerType *BPT = T->getAs(); - T = Context->getPointerType(BPT->getPointeeType()); - return true; - } - return false; - } - - bool needToScanForQualifiers(QualType T); - QualType getSuperStructType(); - QualType getConstantStringStructType(); - QualType convertFunctionTypeOfBlocks(const FunctionType *FT); - bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf); - - void convertToUnqualifiedObjCType(QualType &T) { - if (T->isObjCQualifiedIdType()) - T = Context->getObjCIdType(); - else if (T->isObjCQualifiedClassType()) - T = Context->getObjCClassType(); - else if (T->isObjCObjectPointerType() && - T->getPointeeType()->isObjCQualifiedInterfaceType()) { - if (const ObjCObjectPointerType * OBJPT = - T->getAsObjCInterfacePointerType()) { - const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType(); - T = QualType(IFaceT, 0); - T = Context->getPointerType(T); - } - } - } - - // FIXME: This predicate seems like it would be useful to add to ASTContext. - bool isObjCType(QualType T) { - if (!LangOpts.ObjC1 && !LangOpts.ObjC2) - return false; - - QualType OCT = Context->getCanonicalType(T).getUnqualifiedType(); - - if (OCT == Context->getCanonicalType(Context->getObjCIdType()) || - OCT == Context->getCanonicalType(Context->getObjCClassType())) - return true; - - if (const PointerType *PT = OCT->getAs()) { - if (isa(PT->getPointeeType()) || - PT->getPointeeType()->isObjCQualifiedIdType()) - return true; - } - return false; - } - bool PointerTypeTakesAnyBlockArguments(QualType QT); - bool PointerTypeTakesAnyObjCQualifiedType(QualType QT); - void GetExtentOfArgList(const char *Name, const char *&LParen, - const char *&RParen); - - void QuoteDoublequotes(std::string &From, std::string &To) { - for (unsigned i = 0; i < From.length(); i++) { - if (From[i] == '"') - To += "\\\""; - else - To += From[i]; - } - } - - QualType getSimpleFunctionType(QualType result, - const QualType *args, - unsigned numArgs, - bool variadic = false) { - if (result == Context->getObjCInstanceType()) - result = Context->getObjCIdType(); - FunctionProtoType::ExtProtoInfo fpi; - fpi.Variadic = variadic; - return Context->getFunctionType(result, args, numArgs, fpi); - } - - // Helper function: create a CStyleCastExpr with trivial type source info. - CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty, - CastKind Kind, Expr *E) { - TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation()); - return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo, - SourceLocation(), SourceLocation()); - } - }; - - class RewriteObjCFragileABI : public RewriteObjC { - public: - - RewriteObjCFragileABI(std::string inFile, raw_ostream *OS, - DiagnosticsEngine &D, const LangOptions &LOpts, - bool silenceMacroWarn) : RewriteObjC(inFile, OS, - D, LOpts, - silenceMacroWarn) {} - - ~RewriteObjCFragileABI() {} - virtual void Initialize(ASTContext &context); - - // Rewriting metadata - template - void RewriteObjCMethodsMetaData(MethodIterator MethodBegin, - MethodIterator MethodEnd, - bool IsInstanceMethod, - StringRef prefix, - StringRef ClassName, - std::string &Result); - virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol, - StringRef prefix, - StringRef ClassName, - std::string &Result); - virtual void RewriteObjCProtocolListMetaData( - const ObjCList &Prots, - StringRef prefix, StringRef ClassName, std::string &Result); - virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, - std::string &Result); - virtual void RewriteMetaDataIntoBuffer(std::string &Result); - virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl, - std::string &Result); - - // Rewriting ivar - virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, - std::string &Result); - virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV); - }; -} - -void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType, - NamedDecl *D) { - if (const FunctionProtoType *fproto - = dyn_cast(funcType.IgnoreParens())) { - for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(), - E = fproto->arg_type_end(); I && (I != E); ++I) - if (isTopLevelBlockPointerType(*I)) { - // All the args are checked/rewritten. Don't call twice! - RewriteBlockPointerDecl(D); - break; - } - } -} - -void RewriteObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) { - const PointerType *PT = funcType->getAs(); - if (PT && PointerTypeTakesAnyBlockArguments(funcType)) - RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND); -} - -static bool IsHeaderFile(const std::string &Filename) { - std::string::size_type DotPos = Filename.rfind('.'); - - if (DotPos == std::string::npos) { - // no file extension - return false; - } - - std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end()); - // C header: .h - // C++ header: .hh or .H; - return Ext == "h" || Ext == "hh" || Ext == "H"; -} - -RewriteObjC::RewriteObjC(std::string inFile, raw_ostream* OS, - DiagnosticsEngine &D, const LangOptions &LOpts, - bool silenceMacroWarn) - : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS), - SilenceRewriteMacroWarning(silenceMacroWarn) { - IsHeader = IsHeaderFile(inFile); - RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning, - "rewriting sub-expression within a macro (may not be correct)"); - TryFinallyContainsReturnDiag = Diags.getCustomDiagID( - DiagnosticsEngine::Warning, - "rewriter doesn't support user-specified control flow semantics " - "for @try/@finally (code may not execute properly)"); -} - -ASTConsumer *clang::CreateObjCRewriter(const std::string& InFile, - raw_ostream* OS, - DiagnosticsEngine &Diags, - const LangOptions &LOpts, - bool SilenceRewriteMacroWarning) { - return new RewriteObjCFragileABI(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning); -} - -void RewriteObjC::InitializeCommon(ASTContext &context) { - Context = &context; - SM = &Context->getSourceManager(); - TUDecl = Context->getTranslationUnitDecl(); - MsgSendFunctionDecl = 0; - MsgSendSuperFunctionDecl = 0; - MsgSendStretFunctionDecl = 0; - MsgSendSuperStretFunctionDecl = 0; - MsgSendFpretFunctionDecl = 0; - GetClassFunctionDecl = 0; - GetMetaClassFunctionDecl = 0; - GetSuperClassFunctionDecl = 0; - SelGetUidFunctionDecl = 0; - CFStringFunctionDecl = 0; - ConstantStringClassReference = 0; - NSStringRecord = 0; - CurMethodDef = 0; - CurFunctionDef = 0; - CurFunctionDeclToDeclareForBlock = 0; - GlobalVarDecl = 0; - SuperStructDecl = 0; - ProtocolTypeDecl = 0; - ConstantStringDecl = 0; - BcLabelCount = 0; - SuperContructorFunctionDecl = 0; - NumObjCStringLiterals = 0; - PropParentMap = 0; - CurrentBody = 0; - DisableReplaceStmt = false; - objc_impl_method = false; - - // Get the ID and start/end of the main file. - MainFileID = SM->getMainFileID(); - const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID); - MainFileStart = MainBuf->getBufferStart(); - MainFileEnd = MainBuf->getBufferEnd(); - - Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts()); -} - -//===----------------------------------------------------------------------===// -// Top Level Driver Code -//===----------------------------------------------------------------------===// - -void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) { - if (Diags.hasErrorOccurred()) - return; - - // Two cases: either the decl could be in the main file, or it could be in a - // #included file. If the former, rewrite it now. If the later, check to see - // if we rewrote the #include/#import. - SourceLocation Loc = D->getLocation(); - Loc = SM->getExpansionLoc(Loc); - - // If this is for a builtin, ignore it. - if (Loc.isInvalid()) return; - - // Look for built-in declarations that we need to refer during the rewrite. - if (FunctionDecl *FD = dyn_cast(D)) { - RewriteFunctionDecl(FD); - } else if (VarDecl *FVD = dyn_cast(D)) { - // declared in - if (FVD->getName() == "_NSConstantStringClassReference") { - ConstantStringClassReference = FVD; - return; - } - } else if (ObjCInterfaceDecl *ID = dyn_cast(D)) { - if (ID->isThisDeclarationADefinition()) - RewriteInterfaceDecl(ID); - } else if (ObjCCategoryDecl *CD = dyn_cast(D)) { - RewriteCategoryDecl(CD); - } else if (ObjCProtocolDecl *PD = dyn_cast(D)) { - if (PD->isThisDeclarationADefinition()) - RewriteProtocolDecl(PD); - } else if (LinkageSpecDecl *LSD = dyn_cast(D)) { - // Recurse into linkage specifications - for (DeclContext::decl_iterator DI = LSD->decls_begin(), - DIEnd = LSD->decls_end(); - DI != DIEnd; ) { - if (ObjCInterfaceDecl *IFace = dyn_cast((*DI))) { - if (!IFace->isThisDeclarationADefinition()) { - SmallVector DG; - SourceLocation StartLoc = IFace->getLocStart(); - do { - if (isa(*DI) && - !cast(*DI)->isThisDeclarationADefinition() && - StartLoc == (*DI)->getLocStart()) - DG.push_back(*DI); - else - break; - - ++DI; - } while (DI != DIEnd); - RewriteForwardClassDecl(DG); - continue; - } - } - - if (ObjCProtocolDecl *Proto = dyn_cast((*DI))) { - if (!Proto->isThisDeclarationADefinition()) { - SmallVector DG; - SourceLocation StartLoc = Proto->getLocStart(); - do { - if (isa(*DI) && - !cast(*DI)->isThisDeclarationADefinition() && - StartLoc == (*DI)->getLocStart()) - DG.push_back(*DI); - else - break; - - ++DI; - } while (DI != DIEnd); - RewriteForwardProtocolDecl(DG); - continue; - } - } - - HandleTopLevelSingleDecl(*DI); - ++DI; - } - } - // If we have a decl in the main file, see if we should rewrite it. - if (SM->isFromMainFile(Loc)) - return HandleDeclInMainFile(D); -} - -//===----------------------------------------------------------------------===// -// Syntactic (non-AST) Rewriting Code -//===----------------------------------------------------------------------===// - -void RewriteObjC::RewriteInclude() { - SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID); - StringRef MainBuf = SM->getBufferData(MainFileID); - const char *MainBufStart = MainBuf.begin(); - const char *MainBufEnd = MainBuf.end(); - size_t ImportLen = strlen("import"); - - // Loop over the whole file, looking for includes. - for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) { - if (*BufPtr == '#') { - if (++BufPtr == MainBufEnd) - return; - while (*BufPtr == ' ' || *BufPtr == '\t') - if (++BufPtr == MainBufEnd) - return; - if (!strncmp(BufPtr, "import", ImportLen)) { - // replace import with include - SourceLocation ImportLoc = - LocStart.getLocWithOffset(BufPtr-MainBufStart); - ReplaceText(ImportLoc, ImportLen, "include"); - BufPtr += ImportLen; - } - } - } -} - -static std::string getIvarAccessString(ObjCIvarDecl *OID) { - const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface(); - std::string S; - S = "((struct "; - S += ClassDecl->getIdentifier()->getName(); - S += "_IMPL *)self)->"; - S += OID->getName(); - return S; -} - -void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID, - ObjCImplementationDecl *IMD, - ObjCCategoryImplDecl *CID) { - static bool objcGetPropertyDefined = false; - static bool objcSetPropertyDefined = false; - SourceLocation startLoc = PID->getLocStart(); - InsertText(startLoc, "// "); - const char *startBuf = SM->getCharacterData(startLoc); - assert((*startBuf == '@') && "bogus @synthesize location"); - const char *semiBuf = strchr(startBuf, ';'); - assert((*semiBuf == ';') && "@synthesize: can't find ';'"); - SourceLocation onePastSemiLoc = - startLoc.getLocWithOffset(semiBuf-startBuf+1); - - if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) - return; // FIXME: is this correct? - - // Generate the 'getter' function. - ObjCPropertyDecl *PD = PID->getPropertyDecl(); - ObjCIvarDecl *OID = PID->getPropertyIvarDecl(); - - if (!OID) - return; - unsigned Attributes = PD->getPropertyAttributes(); - if (!PD->getGetterMethodDecl()->isDefined()) { - bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) && - (Attributes & (ObjCPropertyDecl::OBJC_PR_retain | - ObjCPropertyDecl::OBJC_PR_copy)); - std::string Getr; - if (GenGetProperty && !objcGetPropertyDefined) { - objcGetPropertyDefined = true; - // FIXME. Is this attribute correct in all cases? - Getr = "\nextern \"C\" __declspec(dllimport) " - "id objc_getProperty(id, SEL, long, bool);\n"; - } - RewriteObjCMethodDecl(OID->getContainingInterface(), - PD->getGetterMethodDecl(), Getr); - Getr += "{ "; - // Synthesize an explicit cast to gain access to the ivar. - // See objc-act.c:objc_synthesize_new_getter() for details. - if (GenGetProperty) { - // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1) - Getr += "typedef "; - const FunctionType *FPRetType = 0; - RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr, - FPRetType); - Getr += " _TYPE"; - if (FPRetType) { - Getr += ")"; // close the precedence "scope" for "*". - - // Now, emit the argument types (if any). - if (const FunctionProtoType *FT = dyn_cast(FPRetType)){ - Getr += "("; - for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { - if (i) Getr += ", "; - std::string ParamStr = FT->getArgType(i).getAsString( - Context->getPrintingPolicy()); - Getr += ParamStr; - } - if (FT->isVariadic()) { - if (FT->getNumArgs()) Getr += ", "; - Getr += "..."; - } - Getr += ")"; - } else - Getr += "()"; - } - Getr += ";\n"; - Getr += "return (_TYPE)"; - Getr += "objc_getProperty(self, _cmd, "; - RewriteIvarOffsetComputation(OID, Getr); - Getr += ", 1)"; - } - else - Getr += "return " + getIvarAccessString(OID); - Getr += "; }"; - InsertText(onePastSemiLoc, Getr); - } - - if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined()) - return; - - // Generate the 'setter' function. - std::string Setr; - bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain | - ObjCPropertyDecl::OBJC_PR_copy); - if (GenSetProperty && !objcSetPropertyDefined) { - objcSetPropertyDefined = true; - // FIXME. Is this attribute correct in all cases? - Setr = "\nextern \"C\" __declspec(dllimport) " - "void objc_setProperty (id, SEL, long, id, bool, bool);\n"; - } - - RewriteObjCMethodDecl(OID->getContainingInterface(), - PD->getSetterMethodDecl(), Setr); - Setr += "{ "; - // Synthesize an explicit cast to initialize the ivar. - // See objc-act.c:objc_synthesize_new_setter() for details. - if (GenSetProperty) { - Setr += "objc_setProperty (self, _cmd, "; - RewriteIvarOffsetComputation(OID, Setr); - Setr += ", (id)"; - Setr += PD->getName(); - Setr += ", "; - if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) - Setr += "0, "; - else - Setr += "1, "; - if (Attributes & ObjCPropertyDecl::OBJC_PR_copy) - Setr += "1)"; - else - Setr += "0)"; - } - else { - Setr += getIvarAccessString(OID) + " = "; - Setr += PD->getName(); - } - Setr += "; }"; - InsertText(onePastSemiLoc, Setr); -} - -static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl, - std::string &typedefString) { - typedefString += "#ifndef _REWRITER_typedef_"; - typedefString += ForwardDecl->getNameAsString(); - typedefString += "\n"; - typedefString += "#define _REWRITER_typedef_"; - typedefString += ForwardDecl->getNameAsString(); - typedefString += "\n"; - typedefString += "typedef struct objc_object "; - typedefString += ForwardDecl->getNameAsString(); - typedefString += ";\n#endif\n"; -} - -void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl, - const std::string &typedefString) { - SourceLocation startLoc = ClassDecl->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - const char *semiPtr = strchr(startBuf, ';'); - // Replace the @class with typedefs corresponding to the classes. - ReplaceText(startLoc, semiPtr-startBuf+1, typedefString); -} - -void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) { - std::string typedefString; - for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) { - ObjCInterfaceDecl *ForwardDecl = cast(*I); - if (I == D.begin()) { - // Translate to typedef's that forward reference structs with the same name - // as the class. As a convenience, we include the original declaration - // as a comment. - typedefString += "// @class "; - typedefString += ForwardDecl->getNameAsString(); - typedefString += ";\n"; - } - RewriteOneForwardClassDecl(ForwardDecl, typedefString); - } - DeclGroupRef::iterator I = D.begin(); - RewriteForwardClassEpilogue(cast(*I), typedefString); -} - -void RewriteObjC::RewriteForwardClassDecl( - const llvm::SmallVector &D) { - std::string typedefString; - for (unsigned i = 0; i < D.size(); i++) { - ObjCInterfaceDecl *ForwardDecl = cast(D[i]); - if (i == 0) { - typedefString += "// @class "; - typedefString += ForwardDecl->getNameAsString(); - typedefString += ";\n"; - } - RewriteOneForwardClassDecl(ForwardDecl, typedefString); - } - RewriteForwardClassEpilogue(cast(D[0]), typedefString); -} - -void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) { - // When method is a synthesized one, such as a getter/setter there is - // nothing to rewrite. - if (Method->isImplicit()) - return; - SourceLocation LocStart = Method->getLocStart(); - SourceLocation LocEnd = Method->getLocEnd(); - - if (SM->getExpansionLineNumber(LocEnd) > - SM->getExpansionLineNumber(LocStart)) { - InsertText(LocStart, "#if 0\n"); - ReplaceText(LocEnd, 1, ";\n#endif\n"); - } else { - InsertText(LocStart, "// "); - } -} - -void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop) { - SourceLocation Loc = prop->getAtLoc(); - - ReplaceText(Loc, 0, "// "); - // FIXME: handle properties that are declared across multiple lines. -} - -void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) { - SourceLocation LocStart = CatDecl->getLocStart(); - - // FIXME: handle category headers that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); - - for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(), - E = CatDecl->prop_end(); I != E; ++I) - RewriteProperty(*I); - - for (ObjCCategoryDecl::instmeth_iterator - I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - for (ObjCCategoryDecl::classmeth_iterator - I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - - // Lastly, comment out the @end. - ReplaceText(CatDecl->getAtEndRange().getBegin(), - strlen("@end"), "/* @end */"); -} - -void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) { - SourceLocation LocStart = PDecl->getLocStart(); - assert(PDecl->isThisDeclarationADefinition()); - - // FIXME: handle protocol headers that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); - - for (ObjCProtocolDecl::instmeth_iterator - I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - for (ObjCProtocolDecl::classmeth_iterator - I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - - for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(), - E = PDecl->prop_end(); I != E; ++I) - RewriteProperty(*I); - - // Lastly, comment out the @end. - SourceLocation LocEnd = PDecl->getAtEndRange().getBegin(); - ReplaceText(LocEnd, strlen("@end"), "/* @end */"); - - // Must comment out @optional/@required - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - for (const char *p = startBuf; p < endBuf; p++) { - if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) { - SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); - ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */"); - - } - else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) { - SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf); - ReplaceText(OptionalLoc, strlen("@required"), "/* @required */"); - - } - } -} - -void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) { - SourceLocation LocStart = (*D.begin())->getLocStart(); - if (LocStart.isInvalid()) - llvm_unreachable("Invalid SourceLocation"); - // FIXME: handle forward protocol that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); -} - -void -RewriteObjC::RewriteForwardProtocolDecl(const llvm::SmallVector &DG) { - SourceLocation LocStart = DG[0]->getLocStart(); - if (LocStart.isInvalid()) - llvm_unreachable("Invalid SourceLocation"); - // FIXME: handle forward protocol that are declared across multiple lines. - ReplaceText(LocStart, 0, "// "); -} - -void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr, - const FunctionType *&FPRetType) { - if (T->isObjCQualifiedIdType()) - ResultStr += "id"; - else if (T->isFunctionPointerType() || - T->isBlockPointerType()) { - // needs special handling, since pointer-to-functions have special - // syntax (where a decaration models use). - QualType retType = T; - QualType PointeeTy; - if (const PointerType* PT = retType->getAs()) - PointeeTy = PT->getPointeeType(); - else if (const BlockPointerType *BPT = retType->getAs()) - PointeeTy = BPT->getPointeeType(); - if ((FPRetType = PointeeTy->getAs())) { - ResultStr += FPRetType->getResultType().getAsString( - Context->getPrintingPolicy()); - ResultStr += "(*"; - } - } else - ResultStr += T.getAsString(Context->getPrintingPolicy()); -} - -void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl, - ObjCMethodDecl *OMD, - std::string &ResultStr) { - //fprintf(stderr,"In RewriteObjCMethodDecl\n"); - const FunctionType *FPRetType = 0; - ResultStr += "\nstatic "; - RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType); - ResultStr += " "; - - // Unique method name - std::string NameStr; - - if (OMD->isInstanceMethod()) - NameStr += "_I_"; - else - NameStr += "_C_"; - - NameStr += IDecl->getNameAsString(); - NameStr += "_"; - - if (ObjCCategoryImplDecl *CID = - dyn_cast(OMD->getDeclContext())) { - NameStr += CID->getNameAsString(); - NameStr += "_"; - } - // Append selector names, replacing ':' with '_' - { - std::string selString = OMD->getSelector().getAsString(); - int len = selString.size(); - for (int i = 0; i < len; i++) - if (selString[i] == ':') - selString[i] = '_'; - NameStr += selString; - } - // Remember this name for metadata emission - MethodInternalNames[OMD] = NameStr; - ResultStr += NameStr; - - // Rewrite arguments - ResultStr += "("; - - // invisible arguments - if (OMD->isInstanceMethod()) { - QualType selfTy = Context->getObjCInterfaceType(IDecl); - selfTy = Context->getPointerType(selfTy); - if (!LangOpts.MicrosoftExt) { - if (ObjCSynthesizedStructs.count(const_cast(IDecl))) - ResultStr += "struct "; - } - // When rewriting for Microsoft, explicitly omit the structure name. - ResultStr += IDecl->getNameAsString(); - ResultStr += " *"; - } - else - ResultStr += Context->getObjCClassType().getAsString( - Context->getPrintingPolicy()); - - ResultStr += " self, "; - ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy()); - ResultStr += " _cmd"; - - // Method arguments. - for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), - E = OMD->param_end(); PI != E; ++PI) { - ParmVarDecl *PDecl = *PI; - ResultStr += ", "; - if (PDecl->getType()->isObjCQualifiedIdType()) { - ResultStr += "id "; - ResultStr += PDecl->getNameAsString(); - } else { - std::string Name = PDecl->getNameAsString(); - QualType QT = PDecl->getType(); - // Make sure we convert "t (^)(...)" to "t (*)(...)". - (void)convertBlockPointerToFunctionPointer(QT); - QT.getAsStringInternal(Name, Context->getPrintingPolicy()); - ResultStr += Name; - } - } - if (OMD->isVariadic()) - ResultStr += ", ..."; - ResultStr += ") "; - - if (FPRetType) { - ResultStr += ")"; // close the precedence "scope" for "*". - - // Now, emit the argument types (if any). - if (const FunctionProtoType *FT = dyn_cast(FPRetType)) { - ResultStr += "("; - for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) { - if (i) ResultStr += ", "; - std::string ParamStr = FT->getArgType(i).getAsString( - Context->getPrintingPolicy()); - ResultStr += ParamStr; - } - if (FT->isVariadic()) { - if (FT->getNumArgs()) ResultStr += ", "; - ResultStr += "..."; - } - ResultStr += ")"; - } else { - ResultStr += "()"; - } - } -} -void RewriteObjC::RewriteImplementationDecl(Decl *OID) { - ObjCImplementationDecl *IMD = dyn_cast(OID); - ObjCCategoryImplDecl *CID = dyn_cast(OID); - - InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// "); - - for (ObjCCategoryImplDecl::instmeth_iterator - I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(), - E = IMD ? IMD->instmeth_end() : CID->instmeth_end(); - I != E; ++I) { - std::string ResultStr; - ObjCMethodDecl *OMD = *I; - RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); - SourceLocation LocStart = OMD->getLocStart(); - SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - ReplaceText(LocStart, endBuf-startBuf, ResultStr); - } - - for (ObjCCategoryImplDecl::classmeth_iterator - I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(), - E = IMD ? IMD->classmeth_end() : CID->classmeth_end(); - I != E; ++I) { - std::string ResultStr; - ObjCMethodDecl *OMD = *I; - RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr); - SourceLocation LocStart = OMD->getLocStart(); - SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart(); - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - ReplaceText(LocStart, endBuf-startBuf, ResultStr); - } - for (ObjCCategoryImplDecl::propimpl_iterator - I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(), - E = IMD ? IMD->propimpl_end() : CID->propimpl_end(); - I != E; ++I) { - RewritePropertyImplDecl(*I, IMD, CID); - } - - InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// "); -} - -void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) { - std::string ResultStr; - if (!ObjCForwardDecls.count(ClassDecl->getCanonicalDecl())) { - // we haven't seen a forward decl - generate a typedef. - ResultStr = "#ifndef _REWRITER_typedef_"; - ResultStr += ClassDecl->getNameAsString(); - ResultStr += "\n"; - ResultStr += "#define _REWRITER_typedef_"; - ResultStr += ClassDecl->getNameAsString(); - ResultStr += "\n"; - ResultStr += "typedef struct objc_object "; - ResultStr += ClassDecl->getNameAsString(); - ResultStr += ";\n#endif\n"; - // Mark this typedef as having been generated. - ObjCForwardDecls.insert(ClassDecl->getCanonicalDecl()); - } - RewriteObjCInternalStruct(ClassDecl, ResultStr); - - for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(), - E = ClassDecl->prop_end(); I != E; ++I) - RewriteProperty(*I); - for (ObjCInterfaceDecl::instmeth_iterator - I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - for (ObjCInterfaceDecl::classmeth_iterator - I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end(); - I != E; ++I) - RewriteMethodDeclaration(*I); - - // Lastly, comment out the @end. - ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"), - "/* @end */"); -} - -Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) { - SourceRange OldRange = PseudoOp->getSourceRange(); - - // We just magically know some things about the structure of this - // expression. - ObjCMessageExpr *OldMsg = - cast(PseudoOp->getSemanticExpr( - PseudoOp->getNumSemanticExprs() - 1)); - - // Because the rewriter doesn't allow us to rewrite rewritten code, - // we need to suppress rewriting the sub-statements. - Expr *Base, *RHS; - { - DisableReplaceStmtScope S(*this); - - // Rebuild the base expression if we have one. - Base = 0; - if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { - Base = OldMsg->getInstanceReceiver(); - Base = cast(Base)->getSourceExpr(); - Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); - } - - // Rebuild the RHS. - RHS = cast(PseudoOp->getSyntacticForm())->getRHS(); - RHS = cast(RHS)->getSourceExpr(); - RHS = cast(RewriteFunctionBodyOrGlobalInitializer(RHS)); - } - - // TODO: avoid this copy. - SmallVector SelLocs; - OldMsg->getSelectorLocs(SelLocs); - - ObjCMessageExpr *NewMsg = 0; - switch (OldMsg->getReceiverKind()) { - case ObjCMessageExpr::Class: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getClassReceiverTypeInfo(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - RHS, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::Instance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - Base, - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - RHS, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::SuperClass: - case ObjCMessageExpr::SuperInstance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getSuperLoc(), - OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, - OldMsg->getSuperType(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - RHS, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - } - - Stmt *Replacement = SynthMessageExpr(NewMsg); - ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); - return Replacement; -} - -Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) { - SourceRange OldRange = PseudoOp->getSourceRange(); - - // We just magically know some things about the structure of this - // expression. - ObjCMessageExpr *OldMsg = - cast(PseudoOp->getResultExpr()->IgnoreImplicit()); - - // Because the rewriter doesn't allow us to rewrite rewritten code, - // we need to suppress rewriting the sub-statements. - Expr *Base = 0; - { - DisableReplaceStmtScope S(*this); - - // Rebuild the base expression if we have one. - if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) { - Base = OldMsg->getInstanceReceiver(); - Base = cast(Base)->getSourceExpr(); - Base = cast(RewriteFunctionBodyOrGlobalInitializer(Base)); - } - } - - // Intentionally empty. - SmallVector SelLocs; - SmallVector Args; - - ObjCMessageExpr *NewMsg = 0; - switch (OldMsg->getReceiverKind()) { - case ObjCMessageExpr::Class: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getClassReceiverTypeInfo(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::Instance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - Base, - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - - case ObjCMessageExpr::SuperClass: - case ObjCMessageExpr::SuperInstance: - NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(), - OldMsg->getValueKind(), - OldMsg->getLeftLoc(), - OldMsg->getSuperLoc(), - OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance, - OldMsg->getSuperType(), - OldMsg->getSelector(), - SelLocs, - OldMsg->getMethodDecl(), - Args, - OldMsg->getRightLoc(), - OldMsg->isImplicit()); - break; - } - - Stmt *Replacement = SynthMessageExpr(NewMsg); - ReplaceStmtWithRange(PseudoOp, Replacement, OldRange); - return Replacement; -} - -/// SynthCountByEnumWithState - To print: -/// ((unsigned int (*) -/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) -/// (void *)objc_msgSend)((id)l_collection, -/// sel_registerName( -/// "countByEnumeratingWithState:objects:count:"), -/// &enumState, -/// (id *)__rw_items, (unsigned int)16) -/// -void RewriteObjC::SynthCountByEnumWithState(std::string &buf) { - buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, " - "id *, unsigned int))(void *)objc_msgSend)"; - buf += "\n\t\t"; - buf += "((id)l_collection,\n\t\t"; - buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),"; - buf += "\n\t\t"; - buf += "&enumState, " - "(id *)__rw_items, (unsigned int)16)"; -} - -/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach -/// statement to exit to its outer synthesized loop. -/// -Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) { - if (Stmts.empty() || !isa(Stmts.back())) - return S; - // replace break with goto __break_label - std::string buf; - - SourceLocation startLoc = S->getLocStart(); - buf = "goto __break_label_"; - buf += utostr(ObjCBcLabelNo.back()); - ReplaceText(startLoc, strlen("break"), buf); - - return 0; -} - -/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach -/// statement to continue with its inner synthesized loop. -/// -Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) { - if (Stmts.empty() || !isa(Stmts.back())) - return S; - // replace continue with goto __continue_label - std::string buf; - - SourceLocation startLoc = S->getLocStart(); - buf = "goto __continue_label_"; - buf += utostr(ObjCBcLabelNo.back()); - ReplaceText(startLoc, strlen("continue"), buf); - - return 0; -} - -/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement. -/// It rewrites: -/// for ( type elem in collection) { stmts; } - -/// Into: -/// { -/// type elem; -/// struct __objcFastEnumerationState enumState = { 0 }; -/// id __rw_items[16]; -/// id l_collection = (id)collection; -/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState -/// objects:__rw_items count:16]; -/// if (limit) { -/// unsigned long startMutations = *enumState.mutationsPtr; -/// do { -/// unsigned long counter = 0; -/// do { -/// if (startMutations != *enumState.mutationsPtr) -/// objc_enumerationMutation(l_collection); -/// elem = (type)enumState.itemsPtr[counter++]; -/// stmts; -/// __continue_label: ; -/// } while (counter < limit); -/// } while (limit = [l_collection countByEnumeratingWithState:&enumState -/// objects:__rw_items count:16]); -/// elem = nil; -/// __break_label: ; -/// } -/// else -/// elem = nil; -/// } -/// -Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S, - SourceLocation OrigEnd) { - assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty"); - assert(isa(Stmts.back()) && - "ObjCForCollectionStmt Statement stack mismatch"); - assert(!ObjCBcLabelNo.empty() && - "ObjCForCollectionStmt - Label No stack empty"); - - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - StringRef elementName; - std::string elementTypeAsString; - std::string buf; - buf = "\n{\n\t"; - if (DeclStmt *DS = dyn_cast(S->getElement())) { - // type elem; - NamedDecl* D = cast(DS->getSingleDecl()); - QualType ElementType = cast(D)->getType(); - if (ElementType->isObjCQualifiedIdType() || - ElementType->isObjCQualifiedInterfaceType()) - // Simply use 'id' for all qualified types. - elementTypeAsString = "id"; - else - elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy()); - buf += elementTypeAsString; - buf += " "; - elementName = D->getName(); - buf += elementName; - buf += ";\n\t"; - } - else { - DeclRefExpr *DR = cast(S->getElement()); - elementName = DR->getDecl()->getName(); - ValueDecl *VD = cast(DR->getDecl()); - if (VD->getType()->isObjCQualifiedIdType() || - VD->getType()->isObjCQualifiedInterfaceType()) - // Simply use 'id' for all qualified types. - elementTypeAsString = "id"; - else - elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy()); - } - - // struct __objcFastEnumerationState enumState = { 0 }; - buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t"; - // id __rw_items[16]; - buf += "id __rw_items[16];\n\t"; - // id l_collection = (id) - buf += "id l_collection = (id)"; - // Find start location of 'collection' the hard way! - const char *startCollectionBuf = startBuf; - startCollectionBuf += 3; // skip 'for' - startCollectionBuf = strchr(startCollectionBuf, '('); - startCollectionBuf++; // skip '(' - // find 'in' and skip it. - while (*startCollectionBuf != ' ' || - *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' || - (*(startCollectionBuf+3) != ' ' && - *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '(')) - startCollectionBuf++; - startCollectionBuf += 3; - - // Replace: "for (type element in" with string constructed thus far. - ReplaceText(startLoc, startCollectionBuf - startBuf, buf); - // Replace ')' in for '(' type elem in collection ')' with ';' - SourceLocation rightParenLoc = S->getRParenLoc(); - const char *rparenBuf = SM->getCharacterData(rightParenLoc); - SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf); - buf = ";\n\t"; - - // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState - // objects:__rw_items count:16]; - // which is synthesized into: - // unsigned int limit = - // ((unsigned int (*) - // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int)) - // (void *)objc_msgSend)((id)l_collection, - // sel_registerName( - // "countByEnumeratingWithState:objects:count:"), - // (struct __objcFastEnumerationState *)&state, - // (id *)__rw_items, (unsigned int)16); - buf += "unsigned long limit =\n\t\t"; - SynthCountByEnumWithState(buf); - buf += ";\n\t"; - /// if (limit) { - /// unsigned long startMutations = *enumState.mutationsPtr; - /// do { - /// unsigned long counter = 0; - /// do { - /// if (startMutations != *enumState.mutationsPtr) - /// objc_enumerationMutation(l_collection); - /// elem = (type)enumState.itemsPtr[counter++]; - buf += "if (limit) {\n\t"; - buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t"; - buf += "do {\n\t\t"; - buf += "unsigned long counter = 0;\n\t\t"; - buf += "do {\n\t\t\t"; - buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t"; - buf += "objc_enumerationMutation(l_collection);\n\t\t\t"; - buf += elementName; - buf += " = ("; - buf += elementTypeAsString; - buf += ")enumState.itemsPtr[counter++];"; - // Replace ')' in for '(' type elem in collection ')' with all of these. - ReplaceText(lparenLoc, 1, buf); - - /// __continue_label: ; - /// } while (counter < limit); - /// } while (limit = [l_collection countByEnumeratingWithState:&enumState - /// objects:__rw_items count:16]); - /// elem = nil; - /// __break_label: ; - /// } - /// else - /// elem = nil; - /// } - /// - buf = ";\n\t"; - buf += "__continue_label_"; - buf += utostr(ObjCBcLabelNo.back()); - buf += ": ;"; - buf += "\n\t\t"; - buf += "} while (counter < limit);\n\t"; - buf += "} while (limit = "; - SynthCountByEnumWithState(buf); - buf += ");\n\t"; - buf += elementName; - buf += " = (("; - buf += elementTypeAsString; - buf += ")0);\n\t"; - buf += "__break_label_"; - buf += utostr(ObjCBcLabelNo.back()); - buf += ": ;\n\t"; - buf += "}\n\t"; - buf += "else\n\t\t"; - buf += elementName; - buf += " = (("; - buf += elementTypeAsString; - buf += ")0);\n\t"; - buf += "}\n"; - - // Insert all these *after* the statement body. - // FIXME: If this should support Obj-C++, support CXXTryStmt - if (isa(S->getBody())) { - SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1); - InsertText(endBodyLoc, buf); - } else { - /* Need to treat single statements specially. For example: - * - * for (A *a in b) if (stuff()) break; - * for (A *a in b) xxxyy; - * - * The following code simply scans ahead to the semi to find the actual end. - */ - const char *stmtBuf = SM->getCharacterData(OrigEnd); - const char *semiBuf = strchr(stmtBuf, ';'); - assert(semiBuf && "Can't find ';'"); - SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1); - InsertText(endBodyLoc, buf); - } - Stmts.pop_back(); - ObjCBcLabelNo.pop_back(); - return 0; -} - -/// RewriteObjCSynchronizedStmt - -/// This routine rewrites @synchronized(expr) stmt; -/// into: -/// objc_sync_enter(expr); -/// @try stmt @finally { objc_sync_exit(expr); } -/// -Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) { - // Get the start location and compute the semi location. - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @synchronized location"); - - std::string buf; - buf = "objc_sync_enter((id)"; - const char *lparenBuf = startBuf; - while (*lparenBuf != '(') lparenBuf++; - ReplaceText(startLoc, lparenBuf-startBuf+1, buf); - // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since - // the sync expression is typically a message expression that's already - // been rewritten! (which implies the SourceLocation's are invalid). - SourceLocation endLoc = S->getSynchBody()->getLocStart(); - const char *endBuf = SM->getCharacterData(endLoc); - while (*endBuf != ')') endBuf--; - SourceLocation rparenLoc = startLoc.getLocWithOffset(endBuf-startBuf); - buf = ");\n"; - // declare a new scope with two variables, _stack and _rethrow. - buf += "/* @try scope begin */ \n{ struct _objc_exception_data {\n"; - buf += "int buf[18/*32-bit i386*/];\n"; - buf += "char *pointers[4];} _stack;\n"; - buf += "id volatile _rethrow = 0;\n"; - buf += "objc_exception_try_enter(&_stack);\n"; - buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n"; - ReplaceText(rparenLoc, 1, buf); - startLoc = S->getSynchBody()->getLocEnd(); - startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '}') && "bogus @synchronized block"); - SourceLocation lastCurlyLoc = startLoc; - buf = "}\nelse {\n"; - buf += " _rethrow = objc_exception_extract(&_stack);\n"; - buf += "}\n"; - buf += "{ /* implicit finally clause */\n"; - buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n"; - - std::string syncBuf; - syncBuf += " objc_sync_exit("; - - Expr *syncExpr = S->getSynchExpr(); - CastKind CK = syncExpr->getType()->isObjCObjectPointerType() - ? CK_BitCast : - syncExpr->getType()->isBlockPointerType() - ? CK_BlockPointerToObjCPointerCast - : CK_CPointerToObjCPointerCast; - syncExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK, syncExpr); - std::string syncExprBufS; - llvm::raw_string_ostream syncExprBuf(syncExprBufS); - syncExpr->printPretty(syncExprBuf, 0, PrintingPolicy(LangOpts)); - syncBuf += syncExprBuf.str(); - syncBuf += ");"; - - buf += syncBuf; - buf += "\n if (_rethrow) objc_exception_throw(_rethrow);\n"; - buf += "}\n"; - buf += "}"; - - ReplaceText(lastCurlyLoc, 1, buf); - - bool hasReturns = false; - HasReturnStmts(S->getSynchBody(), hasReturns); - if (hasReturns) - RewriteSyncReturnStmts(S->getSynchBody(), syncBuf); - - return 0; -} - -void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S) -{ - // Perform a bottom up traversal of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) - WarnAboutReturnGotoStmts(*CI); - - if (isa(S) || isa(S)) { - Diags.Report(Context->getFullLoc(S->getLocStart()), - TryFinallyContainsReturnDiag); - } - return; -} - -void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns) -{ - // Perform a bottom up traversal of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) - HasReturnStmts(*CI, hasReturns); - - if (isa(S)) - hasReturns = true; - return; -} - -void RewriteObjC::RewriteTryReturnStmts(Stmt *S) { - // Perform a bottom up traversal of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - RewriteTryReturnStmts(*CI); - } - if (isa(S)) { - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - const char *semiBuf = strchr(startBuf, ';'); - assert((*semiBuf == ';') && "RewriteTryReturnStmts: can't find ';'"); - SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1); - - std::string buf; - buf = "{ objc_exception_try_exit(&_stack); return"; - - ReplaceText(startLoc, 6, buf); - InsertText(onePastSemiLoc, "}"); - } - return; -} - -void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) { - // Perform a bottom up traversal of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - RewriteSyncReturnStmts(*CI, syncExitBuf); - } - if (isa(S)) { - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - const char *semiBuf = strchr(startBuf, ';'); - assert((*semiBuf == ';') && "RewriteSyncReturnStmts: can't find ';'"); - SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1); - - std::string buf; - buf = "{ objc_exception_try_exit(&_stack);"; - buf += syncExitBuf; - buf += " return"; - - ReplaceText(startLoc, 6, buf); - InsertText(onePastSemiLoc, "}"); - } - return; -} - -Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) { - // Get the start location and compute the semi location. - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @try location"); - - std::string buf; - // declare a new scope with two variables, _stack and _rethrow. - buf = "/* @try scope begin */ { struct _objc_exception_data {\n"; - buf += "int buf[18/*32-bit i386*/];\n"; - buf += "char *pointers[4];} _stack;\n"; - buf += "id volatile _rethrow = 0;\n"; - buf += "objc_exception_try_enter(&_stack);\n"; - buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n"; - - ReplaceText(startLoc, 4, buf); - - startLoc = S->getTryBody()->getLocEnd(); - startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '}') && "bogus @try block"); - - SourceLocation lastCurlyLoc = startLoc; - if (S->getNumCatchStmts()) { - startLoc = startLoc.getLocWithOffset(1); - buf = " /* @catch begin */ else {\n"; - buf += " id _caught = objc_exception_extract(&_stack);\n"; - buf += " objc_exception_try_enter (&_stack);\n"; - buf += " if (_setjmp(_stack.buf))\n"; - buf += " _rethrow = objc_exception_extract(&_stack);\n"; - buf += " else { /* @catch continue */"; - - InsertText(startLoc, buf); - } else { /* no catch list */ - buf = "}\nelse {\n"; - buf += " _rethrow = objc_exception_extract(&_stack);\n"; - buf += "}"; - ReplaceText(lastCurlyLoc, 1, buf); - } - Stmt *lastCatchBody = 0; - for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { - ObjCAtCatchStmt *Catch = S->getCatchStmt(I); - VarDecl *catchDecl = Catch->getCatchParamDecl(); - - if (I == 0) - buf = "if ("; // we are generating code for the first catch clause - else - buf = "else if ("; - startLoc = Catch->getLocStart(); - startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @catch location"); - - const char *lParenLoc = strchr(startBuf, '('); - - if (Catch->hasEllipsis()) { - // Now rewrite the body... - lastCatchBody = Catch->getCatchBody(); - SourceLocation bodyLoc = lastCatchBody->getLocStart(); - const char *bodyBuf = SM->getCharacterData(bodyLoc); - assert(*SM->getCharacterData(Catch->getRParenLoc()) == ')' && - "bogus @catch paren location"); - assert((*bodyBuf == '{') && "bogus @catch body location"); - - buf += "1) { id _tmp = _caught;"; - Rewrite.ReplaceText(startLoc, bodyBuf-startBuf+1, buf); - } else if (catchDecl) { - QualType t = catchDecl->getType(); - if (t == Context->getObjCIdType()) { - buf += "1) { "; - ReplaceText(startLoc, lParenLoc-startBuf+1, buf); - } else if (const ObjCObjectPointerType *Ptr = - t->getAs()) { - // Should be a pointer to a class. - ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface(); - if (IDecl) { - buf += "objc_exception_match((struct objc_class *)objc_getClass(\""; - buf += IDecl->getNameAsString(); - buf += "\"), (struct objc_object *)_caught)) { "; - ReplaceText(startLoc, lParenLoc-startBuf+1, buf); - } - } - // Now rewrite the body... - lastCatchBody = Catch->getCatchBody(); - SourceLocation rParenLoc = Catch->getRParenLoc(); - SourceLocation bodyLoc = lastCatchBody->getLocStart(); - const char *bodyBuf = SM->getCharacterData(bodyLoc); - const char *rParenBuf = SM->getCharacterData(rParenLoc); - assert((*rParenBuf == ')') && "bogus @catch paren location"); - assert((*bodyBuf == '{') && "bogus @catch body location"); - - // Here we replace ") {" with "= _caught;" (which initializes and - // declares the @catch parameter). - ReplaceText(rParenLoc, bodyBuf-rParenBuf+1, " = _caught;"); - } else { - llvm_unreachable("@catch rewrite bug"); - } - } - // Complete the catch list... - if (lastCatchBody) { - SourceLocation bodyLoc = lastCatchBody->getLocEnd(); - assert(*SM->getCharacterData(bodyLoc) == '}' && - "bogus @catch body location"); - - // Insert the last (implicit) else clause *before* the right curly brace. - bodyLoc = bodyLoc.getLocWithOffset(-1); - buf = "} /* last catch end */\n"; - buf += "else {\n"; - buf += " _rethrow = _caught;\n"; - buf += " objc_exception_try_exit(&_stack);\n"; - buf += "} } /* @catch end */\n"; - if (!S->getFinallyStmt()) - buf += "}\n"; - InsertText(bodyLoc, buf); - - // Set lastCurlyLoc - lastCurlyLoc = lastCatchBody->getLocEnd(); - } - if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) { - startLoc = finalStmt->getLocStart(); - startBuf = SM->getCharacterData(startLoc); - assert((*startBuf == '@') && "bogus @finally start"); - - ReplaceText(startLoc, 8, "/* @finally */"); - - Stmt *body = finalStmt->getFinallyBody(); - SourceLocation startLoc = body->getLocStart(); - SourceLocation endLoc = body->getLocEnd(); - assert(*SM->getCharacterData(startLoc) == '{' && - "bogus @finally body location"); - assert(*SM->getCharacterData(endLoc) == '}' && - "bogus @finally body location"); - - startLoc = startLoc.getLocWithOffset(1); - InsertText(startLoc, " if (!_rethrow) objc_exception_try_exit(&_stack);\n"); - endLoc = endLoc.getLocWithOffset(-1); - InsertText(endLoc, " if (_rethrow) objc_exception_throw(_rethrow);\n"); - - // Set lastCurlyLoc - lastCurlyLoc = body->getLocEnd(); - - // Now check for any return/continue/go statements within the @try. - WarnAboutReturnGotoStmts(S->getTryBody()); - } else { /* no finally clause - make sure we synthesize an implicit one */ - buf = "{ /* implicit finally clause */\n"; - buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n"; - buf += " if (_rethrow) objc_exception_throw(_rethrow);\n"; - buf += "}"; - ReplaceText(lastCurlyLoc, 1, buf); - - // Now check for any return/continue/go statements within the @try. - // The implicit finally clause won't called if the @try contains any - // jump statements. - bool hasReturns = false; - HasReturnStmts(S->getTryBody(), hasReturns); - if (hasReturns) - RewriteTryReturnStmts(S->getTryBody()); - } - // Now emit the final closing curly brace... - lastCurlyLoc = lastCurlyLoc.getLocWithOffset(1); - InsertText(lastCurlyLoc, " } /* @try scope end */\n"); - return 0; -} - -// This can't be done with ReplaceStmt(S, ThrowExpr), since -// the throw expression is typically a message expression that's already -// been rewritten! (which implies the SourceLocation's are invalid). -Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) { - // Get the start location and compute the semi location. - SourceLocation startLoc = S->getLocStart(); - const char *startBuf = SM->getCharacterData(startLoc); - - assert((*startBuf == '@') && "bogus @throw location"); - - std::string buf; - /* void objc_exception_throw(id) __attribute__((noreturn)); */ - if (S->getThrowExpr()) - buf = "objc_exception_throw("; - else // add an implicit argument - buf = "objc_exception_throw(_caught"; - - // handle "@ throw" correctly. - const char *wBuf = strchr(startBuf, 'w'); - assert((*wBuf == 'w') && "@throw: can't find 'w'"); - ReplaceText(startLoc, wBuf-startBuf+1, buf); - - const char *semiBuf = strchr(startBuf, ';'); - assert((*semiBuf == ';') && "@throw: can't find ';'"); - SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf); - ReplaceText(semiLoc, 1, ");"); - return 0; -} - -Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) { - // Create a new string expression. - QualType StrType = Context->getPointerType(Context->CharTy); - std::string StrEncoding; - Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding); - Expr *Replacement = StringLiteral::Create(*Context, StrEncoding, - StringLiteral::Ascii, false, - StrType, SourceLocation()); - ReplaceStmt(Exp, Replacement); - - // Replace this subexpr in the parent. - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return Replacement; -} - -Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) { - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl"); - // Create a call to sel_registerName("selName"). - SmallVector SelExprs; - QualType argType = Context->getPointerType(Context->CharTy); - SelExprs.push_back(StringLiteral::Create(*Context, - Exp->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size()); - ReplaceStmt(Exp, SelExp); - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return SelExp; -} - -CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl( - FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc, - SourceLocation EndLoc) { - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = FD->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType, - VK_LValue, SourceLocation()); - - // Now, we cast the reference to a pointer to the objc_msgSend type. - QualType pToFunc = Context->getPointerType(msgSendType); - ImplicitCastExpr *ICE = - ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay, - DRE, 0, VK_RValue); - - const FunctionType *FT = msgSendType->getAs(); - - CallExpr *Exp = - new (Context) CallExpr(*Context, ICE, args, nargs, - FT->getCallResultType(*Context), - VK_RValue, EndLoc); - return Exp; -} - -static bool scanForProtocolRefs(const char *startBuf, const char *endBuf, - const char *&startRef, const char *&endRef) { - while (startBuf < endBuf) { - if (*startBuf == '<') - startRef = startBuf; // mark the start. - if (*startBuf == '>') { - if (startRef && *startRef == '<') { - endRef = startBuf; // mark the end. - return true; - } - return false; - } - startBuf++; - } - return false; -} - -static void scanToNextArgument(const char *&argRef) { - int angle = 0; - while (*argRef != ')' && (*argRef != ',' || angle > 0)) { - if (*argRef == '<') - angle++; - else if (*argRef == '>') - angle--; - argRef++; - } - assert(angle == 0 && "scanToNextArgument - bad protocol type syntax"); -} - -bool RewriteObjC::needToScanForQualifiers(QualType T) { - if (T->isObjCQualifiedIdType()) - return true; - if (const PointerType *PT = T->getAs()) { - if (PT->getPointeeType()->isObjCQualifiedIdType()) - return true; - } - if (T->isObjCObjectPointerType()) { - T = T->getPointeeType(); - return T->isObjCQualifiedInterfaceType(); - } - if (T->isArrayType()) { - QualType ElemTy = Context->getBaseElementType(T); - return needToScanForQualifiers(ElemTy); - } - return false; -} - -void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) { - QualType Type = E->getType(); - if (needToScanForQualifiers(Type)) { - SourceLocation Loc, EndLoc; - - if (const CStyleCastExpr *ECE = dyn_cast(E)) { - Loc = ECE->getLParenLoc(); - EndLoc = ECE->getRParenLoc(); - } else { - Loc = E->getLocStart(); - EndLoc = E->getLocEnd(); - } - // This will defend against trying to rewrite synthesized expressions. - if (Loc.isInvalid() || EndLoc.isInvalid()) - return; - - const char *startBuf = SM->getCharacterData(Loc); - const char *endBuf = SM->getCharacterData(EndLoc); - const char *startRef = 0, *endRef = 0; - if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { - // Get the locations of the startRef, endRef. - SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf); - SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1); - // Comment out the protocol references. - InsertText(LessLoc, "/*"); - InsertText(GreaterLoc, "*/"); - } - } -} - -void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) { - SourceLocation Loc; - QualType Type; - const FunctionProtoType *proto = 0; - if (VarDecl *VD = dyn_cast(Dcl)) { - Loc = VD->getLocation(); - Type = VD->getType(); - } - else if (FunctionDecl *FD = dyn_cast(Dcl)) { - Loc = FD->getLocation(); - // Check for ObjC 'id' and class types that have been adorned with protocol - // information (id

    , C

    *). The protocol references need to be rewritten! - const FunctionType *funcType = FD->getType()->getAs(); - assert(funcType && "missing function type"); - proto = dyn_cast(funcType); - if (!proto) - return; - Type = proto->getResultType(); - } - else if (FieldDecl *FD = dyn_cast(Dcl)) { - Loc = FD->getLocation(); - Type = FD->getType(); - } - else - return; - - if (needToScanForQualifiers(Type)) { - // Since types are unique, we need to scan the buffer. - - const char *endBuf = SM->getCharacterData(Loc); - const char *startBuf = endBuf; - while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart) - startBuf--; // scan backward (from the decl location) for return type. - const char *startRef = 0, *endRef = 0; - if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { - // Get the locations of the startRef, endRef. - SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf); - SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1); - // Comment out the protocol references. - InsertText(LessLoc, "/*"); - InsertText(GreaterLoc, "*/"); - } - } - if (!proto) - return; // most likely, was a variable - // Now check arguments. - const char *startBuf = SM->getCharacterData(Loc); - const char *startFuncBuf = startBuf; - for (unsigned i = 0; i < proto->getNumArgs(); i++) { - if (needToScanForQualifiers(proto->getArgType(i))) { - // Since types are unique, we need to scan the buffer. - - const char *endBuf = startBuf; - // scan forward (from the decl location) for argument types. - scanToNextArgument(endBuf); - const char *startRef = 0, *endRef = 0; - if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) { - // Get the locations of the startRef, endRef. - SourceLocation LessLoc = - Loc.getLocWithOffset(startRef-startFuncBuf); - SourceLocation GreaterLoc = - Loc.getLocWithOffset(endRef-startFuncBuf+1); - // Comment out the protocol references. - InsertText(LessLoc, "/*"); - InsertText(GreaterLoc, "*/"); - } - startBuf = ++endBuf; - } - else { - // If the function name is derived from a macro expansion, then the - // argument buffer will not follow the name. Need to speak with Chris. - while (*startBuf && *startBuf != ')' && *startBuf != ',') - startBuf++; // scan forward (from the decl location) for argument types. - startBuf++; - } - } -} - -void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) { - QualType QT = ND->getType(); - const Type* TypePtr = QT->getAs(); - if (!isa(TypePtr)) - return; - while (isa(TypePtr)) { - const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); - QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); - TypePtr = QT->getAs(); - } - // FIXME. This will not work for multiple declarators; as in: - // __typeof__(a) b,c,d; - std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy())); - SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); - const char *startBuf = SM->getCharacterData(DeclLoc); - if (ND->getInit()) { - std::string Name(ND->getNameAsString()); - TypeAsString += " " + Name + " = "; - Expr *E = ND->getInit(); - SourceLocation startLoc; - if (const CStyleCastExpr *ECE = dyn_cast(E)) - startLoc = ECE->getLParenLoc(); - else - startLoc = E->getLocStart(); - startLoc = SM->getExpansionLoc(startLoc); - const char *endBuf = SM->getCharacterData(startLoc); - ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); - } - else { - SourceLocation X = ND->getLocEnd(); - X = SM->getExpansionLoc(X); - const char *endBuf = SM->getCharacterData(X); - ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString); - } -} - -// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str); -void RewriteObjC::SynthSelGetUidFunctionDecl() { - IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName"); - SmallVector ArgTys; - ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); - QualType getFuncType = - getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size()); - SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - SelGetUidIdent, getFuncType, 0, - SC_Extern, - SC_None, false); -} - -void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) { - // declared in - if (FD->getIdentifier() && - FD->getName() == "sel_registerName") { - SelGetUidFunctionDecl = FD; - return; - } - RewriteObjCQualifiedInterfaceTypes(FD); -} - -void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) { - std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); - const char *argPtr = TypeString.c_str(); - if (!strchr(argPtr, '^')) { - Str += TypeString; - return; - } - while (*argPtr) { - Str += (*argPtr == '^' ? '*' : *argPtr); - argPtr++; - } -} - -// FIXME. Consolidate this routine with RewriteBlockPointerType. -void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str, - ValueDecl *VD) { - QualType Type = VD->getType(); - std::string TypeString(Type.getAsString(Context->getPrintingPolicy())); - const char *argPtr = TypeString.c_str(); - int paren = 0; - while (*argPtr) { - switch (*argPtr) { - case '(': - Str += *argPtr; - paren++; - break; - case ')': - Str += *argPtr; - paren--; - break; - case '^': - Str += '*'; - if (paren == 1) - Str += VD->getNameAsString(); - break; - default: - Str += *argPtr; - break; - } - argPtr++; - } -} - - -void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) { - SourceLocation FunLocStart = FD->getTypeSpecStartLoc(); - const FunctionType *funcType = FD->getType()->getAs(); - const FunctionProtoType *proto = dyn_cast(funcType); - if (!proto) - return; - QualType Type = proto->getResultType(); - std::string FdStr = Type.getAsString(Context->getPrintingPolicy()); - FdStr += " "; - FdStr += FD->getName(); - FdStr += "("; - unsigned numArgs = proto->getNumArgs(); - for (unsigned i = 0; i < numArgs; i++) { - QualType ArgType = proto->getArgType(i); - RewriteBlockPointerType(FdStr, ArgType); - if (i+1 < numArgs) - FdStr += ", "; - } - FdStr += ");\n"; - InsertText(FunLocStart, FdStr); - CurFunctionDeclToDeclareForBlock = 0; -} - -// SynthSuperContructorFunctionDecl - id objc_super(id obj, id super); -void RewriteObjC::SynthSuperContructorFunctionDecl() { - if (SuperContructorFunctionDecl) - return; - IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size()); - SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...); -void RewriteObjC::SynthMsgSendFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...); -void RewriteObjC::SynthMsgSendSuperFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper"); - SmallVector ArgTys; - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("objc_super")); - QualType argT = Context->getPointerType(Context->getTagDeclType(RD)); - assert(!argT.isNull() && "Can't build 'struct objc_super *' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...); -void RewriteObjC::SynthMsgSendStretFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendSuperStretFunctionDecl - -// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...); -void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() { - IdentifierInfo *msgSendIdent = - &Context->Idents.get("objc_msgSendSuper_stret"); - SmallVector ArgTys; - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("objc_super")); - QualType argT = Context->getPointerType(Context->getTagDeclType(RD)); - assert(!argT.isNull() && "Can't build 'struct objc_super *' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...); -void RewriteObjC::SynthMsgSendFpretFunctionDecl() { - IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret"); - SmallVector ArgTys; - QualType argT = Context->getObjCIdType(); - assert(!argT.isNull() && "Can't find 'id' type"); - ArgTys.push_back(argT); - argT = Context->getObjCSelType(); - assert(!argT.isNull() && "Can't find 'SEL' type"); - ArgTys.push_back(argT); - QualType msgSendType = getSimpleFunctionType(Context->DoubleTy, - &ArgTys[0], ArgTys.size(), - true /*isVariadic*/); - MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - msgSendIdent, msgSendType, 0, - SC_Extern, - SC_None, false); -} - -// SynthGetClassFunctionDecl - id objc_getClass(const char *name); -void RewriteObjC::SynthGetClassFunctionDecl() { - IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass"); - SmallVector ArgTys; - ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); - QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size()); - GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - getClassIdent, getClassType, 0, - SC_Extern, - SC_None, false); -} - -// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls); -void RewriteObjC::SynthGetSuperClassFunctionDecl() { - IdentifierInfo *getSuperClassIdent = - &Context->Idents.get("class_getSuperclass"); - SmallVector ArgTys; - ArgTys.push_back(Context->getObjCClassType()); - QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(), - &ArgTys[0], ArgTys.size()); - GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - getSuperClassIdent, - getClassType, 0, - SC_Extern, - SC_None, - false); -} - -// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name); -void RewriteObjC::SynthGetMetaClassFunctionDecl() { - IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass"); - SmallVector ArgTys; - ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst())); - QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(), - &ArgTys[0], ArgTys.size()); - GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl, - SourceLocation(), - SourceLocation(), - getClassIdent, getClassType, 0, - SC_Extern, - SC_None, false); -} - -Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) { - QualType strType = getConstantStringStructType(); - - std::string S = "__NSConstantStringImpl_"; - - std::string tmpName = InFileName; - unsigned i; - for (i=0; i < tmpName.length(); i++) { - char c = tmpName.at(i); - // replace any non alphanumeric characters with '_'. - if (!isalpha(c) && (c < '0' || c > '9')) - tmpName[i] = '_'; - } - S += tmpName; - S += "_"; - S += utostr(NumObjCStringLiterals++); - - Preamble += "static __NSConstantStringImpl " + S; - Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,"; - Preamble += "0x000007c8,"; // utf8_str - // The pretty printer for StringLiteral handles escape characters properly. - std::string prettyBufS; - llvm::raw_string_ostream prettyBuf(prettyBufS); - Exp->getString()->printPretty(prettyBuf, 0, PrintingPolicy(LangOpts)); - Preamble += prettyBuf.str(); - Preamble += ","; - Preamble += utostr(Exp->getString()->getByteLength()) + "};\n"; - - VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), &Context->Idents.get(S), - strType, 0, SC_Static, SC_None); - DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue, - SourceLocation()); - Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf, - Context->getPointerType(DRE->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - // cast to NSConstantString * - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(), - CK_CPointerToObjCPointerCast, Unop); - ReplaceStmt(Exp, cast); - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return cast; -} - -// struct objc_super { struct objc_object *receiver; struct objc_class *super; }; -QualType RewriteObjC::getSuperStructType() { - if (!SuperStructDecl) { - SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("objc_super")); - QualType FieldTypes[2]; - - // struct objc_object *receiver; - FieldTypes[0] = Context->getObjCIdType(); - // struct objc_class *super; - FieldTypes[1] = Context->getObjCClassType(); - - // Create fields - for (unsigned i = 0; i < 2; ++i) { - SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl, - SourceLocation(), - SourceLocation(), 0, - FieldTypes[i], 0, - /*BitWidth=*/0, - /*Mutable=*/false, - ICIS_NoInit)); - } - - SuperStructDecl->completeDefinition(); - } - return Context->getTagDeclType(SuperStructDecl); -} - -QualType RewriteObjC::getConstantStringStructType() { - if (!ConstantStringDecl) { - ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("__NSConstantStringImpl")); - QualType FieldTypes[4]; - - // struct objc_object *receiver; - FieldTypes[0] = Context->getObjCIdType(); - // int flags; - FieldTypes[1] = Context->IntTy; - // char *str; - FieldTypes[2] = Context->getPointerType(Context->CharTy); - // long length; - FieldTypes[3] = Context->LongTy; - - // Create fields - for (unsigned i = 0; i < 4; ++i) { - ConstantStringDecl->addDecl(FieldDecl::Create(*Context, - ConstantStringDecl, - SourceLocation(), - SourceLocation(), 0, - FieldTypes[i], 0, - /*BitWidth=*/0, - /*Mutable=*/true, - ICIS_NoInit)); - } - - ConstantStringDecl->completeDefinition(); - } - return Context->getTagDeclType(ConstantStringDecl); -} - -CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavor, - QualType msgSendType, - QualType returnType, - SmallVectorImpl &ArgTypes, - SmallVectorImpl &MsgExprs, - ObjCMethodDecl *Method) { - // Create a reference to the objc_msgSend_stret() declaration. - DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor, - false, msgSendType, - VK_LValue, SourceLocation()); - // Need to cast objc_msgSend_stret to "void *" (see above comment). - CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->VoidTy), - CK_BitCast, STDRE); - // Now do the "normal" pointer to function cast. - QualType castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - Method ? Method->isVariadic() : false); - castType = Context->getPointerType(castType); - cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, - cast); - - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast); - - const FunctionType *FT = msgSendType->getAs(); - CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0], - MsgExprs.size(), - FT->getResultType(), VK_RValue, - SourceLocation()); - return STCE; - -} - - -Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp, - SourceLocation StartLoc, - SourceLocation EndLoc) { - if (!SelGetUidFunctionDecl) - SynthSelGetUidFunctionDecl(); - if (!MsgSendFunctionDecl) - SynthMsgSendFunctionDecl(); - if (!MsgSendSuperFunctionDecl) - SynthMsgSendSuperFunctionDecl(); - if (!MsgSendStretFunctionDecl) - SynthMsgSendStretFunctionDecl(); - if (!MsgSendSuperStretFunctionDecl) - SynthMsgSendSuperStretFunctionDecl(); - if (!MsgSendFpretFunctionDecl) - SynthMsgSendFpretFunctionDecl(); - if (!GetClassFunctionDecl) - SynthGetClassFunctionDecl(); - if (!GetSuperClassFunctionDecl) - SynthGetSuperClassFunctionDecl(); - if (!GetMetaClassFunctionDecl) - SynthGetMetaClassFunctionDecl(); - - // default to objc_msgSend(). - FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl; - // May need to use objc_msgSend_stret() as well. - FunctionDecl *MsgSendStretFlavor = 0; - if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) { - QualType resultType = mDecl->getResultType(); - if (resultType->isRecordType()) - MsgSendStretFlavor = MsgSendStretFunctionDecl; - else if (resultType->isRealFloatingType()) - MsgSendFlavor = MsgSendFpretFunctionDecl; - } - - // Synthesize a call to objc_msgSend(). - SmallVector MsgExprs; - switch (Exp->getReceiverKind()) { - case ObjCMessageExpr::SuperClass: { - MsgSendFlavor = MsgSendSuperFunctionDecl; - if (MsgSendStretFlavor) - MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; - assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); - - ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); - - SmallVector InitExprs; - - // set the receiver to self, the first argument to all methods. - InitExprs.push_back( - NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK_BitCast, - new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), - false, - Context->getObjCIdType(), - VK_RValue, - SourceLocation())) - ); // set the 'receiver'. - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - ClsExprs.push_back(StringLiteral::Create(*Context, - ClassDecl->getIdentifier()->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, - EndLoc); - // (Class)objc_getClass("CurrentClass") - CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context, - Context->getObjCClassType(), - CK_BitCast, Cls); - ClsExprs.clear(); - ClsExprs.push_back(ArgExpr); - Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, - &ClsExprs[0], ClsExprs.size(), - StartLoc, EndLoc); - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - // To turn off a warning, type-cast to 'id' - InitExprs.push_back( // set 'super class', using class_getSuperclass(). - NoTypeInfoCStyleCastExpr(Context, - Context->getObjCIdType(), - CK_BitCast, Cls)); - // struct objc_super - QualType superType = getSuperStructType(); - Expr *SuperRep; - - if (LangOpts.MicrosoftExt) { - SynthSuperContructorFunctionDecl(); - // Simulate a contructor call... - DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, - false, superType, VK_LValue, - SourceLocation()); - SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], - InitExprs.size(), - superType, VK_LValue, - SourceLocation()); - // The code for super is a little tricky to prevent collision with - // the structure definition in the header. The rewriter has it's own - // internal definition (__rw_objc_super) that is uses. This is why - // we need the cast below. For example: - // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) - // - SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, - Context->getPointerType(SuperRep->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - SuperRep = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(superType), - CK_BitCast, SuperRep); - } else { - // (struct objc_super) { } - InitListExpr *ILE = - new (Context) InitListExpr(*Context, SourceLocation(), - &InitExprs[0], InitExprs.size(), - SourceLocation()); - TypeSourceInfo *superTInfo - = Context->getTrivialTypeSourceInfo(superType); - SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, - superType, VK_LValue, - ILE, false); - // struct objc_super * - SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, - Context->getPointerType(SuperRep->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - } - MsgExprs.push_back(SuperRep); - break; - } - - case ObjCMessageExpr::Class: { - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - ObjCInterfaceDecl *Class - = Exp->getClassReceiver()->getAs()->getInterface(); - IdentifierInfo *clsName = Class->getIdentifier(); - ClsExprs.push_back(StringLiteral::Create(*Context, - clsName->getName(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - MsgExprs.push_back(Cls); - break; - } - - case ObjCMessageExpr::SuperInstance:{ - MsgSendFlavor = MsgSendSuperFunctionDecl; - if (MsgSendStretFlavor) - MsgSendStretFlavor = MsgSendSuperStretFunctionDecl; - assert(MsgSendFlavor && "MsgSendFlavor is NULL!"); - ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface(); - SmallVector InitExprs; - - InitExprs.push_back( - NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK_BitCast, - new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(), - false, - Context->getObjCIdType(), - VK_RValue, SourceLocation())) - ); // set the 'receiver'. - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - SmallVector ClsExprs; - QualType argType = Context->getPointerType(Context->CharTy); - ClsExprs.push_back(StringLiteral::Create(*Context, - ClassDecl->getIdentifier()->getName(), - StringLiteral::Ascii, false, argType, - SourceLocation())); - CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl, - &ClsExprs[0], - ClsExprs.size(), - StartLoc, EndLoc); - // (Class)objc_getClass("CurrentClass") - CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context, - Context->getObjCClassType(), - CK_BitCast, Cls); - ClsExprs.clear(); - ClsExprs.push_back(ArgExpr); - Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl, - &ClsExprs[0], ClsExprs.size(), - StartLoc, EndLoc); - - // (id)class_getSuperclass((Class)objc_getClass("CurrentClass")) - // To turn off a warning, type-cast to 'id' - InitExprs.push_back( - // set 'super class', using class_getSuperclass(). - NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK_BitCast, Cls)); - // struct objc_super - QualType superType = getSuperStructType(); - Expr *SuperRep; - - if (LangOpts.MicrosoftExt) { - SynthSuperContructorFunctionDecl(); - // Simulate a contructor call... - DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl, - false, superType, VK_LValue, - SourceLocation()); - SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], - InitExprs.size(), - superType, VK_LValue, SourceLocation()); - // The code for super is a little tricky to prevent collision with - // the structure definition in the header. The rewriter has it's own - // internal definition (__rw_objc_super) that is uses. This is why - // we need the cast below. For example: - // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER")) - // - SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf, - Context->getPointerType(SuperRep->getType()), - VK_RValue, OK_Ordinary, - SourceLocation()); - SuperRep = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(superType), - CK_BitCast, SuperRep); - } else { - // (struct objc_super) { } - InitListExpr *ILE = - new (Context) InitListExpr(*Context, SourceLocation(), - &InitExprs[0], InitExprs.size(), - SourceLocation()); - TypeSourceInfo *superTInfo - = Context->getTrivialTypeSourceInfo(superType); - SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo, - superType, VK_RValue, ILE, - false); - } - MsgExprs.push_back(SuperRep); - break; - } - - case ObjCMessageExpr::Instance: { - // Remove all type-casts because it may contain objc-style types; e.g. - // Foo *. - Expr *recExpr = Exp->getInstanceReceiver(); - while (CStyleCastExpr *CE = dyn_cast(recExpr)) - recExpr = CE->getSubExpr(); - CastKind CK = recExpr->getType()->isObjCObjectPointerType() - ? CK_BitCast : recExpr->getType()->isBlockPointerType() - ? CK_BlockPointerToObjCPointerCast - : CK_CPointerToObjCPointerCast; - - recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK, recExpr); - MsgExprs.push_back(recExpr); - break; - } - } - - // Create a call to sel_registerName("selName"), it will be the 2nd argument. - SmallVector SelExprs; - QualType argType = Context->getPointerType(Context->CharTy); - SelExprs.push_back(StringLiteral::Create(*Context, - Exp->getSelector().getAsString(), - StringLiteral::Ascii, false, - argType, SourceLocation())); - CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl, - &SelExprs[0], SelExprs.size(), - StartLoc, - EndLoc); - MsgExprs.push_back(SelExp); - - // Now push any user supplied arguments. - for (unsigned i = 0; i < Exp->getNumArgs(); i++) { - Expr *userExpr = Exp->getArg(i); - // Make all implicit casts explicit...ICE comes in handy:-) - if (ImplicitCastExpr *ICE = dyn_cast(userExpr)) { - // Reuse the ICE type, it is exactly what the doctor ordered. - QualType type = ICE->getType(); - if (needToScanForQualifiers(type)) - type = Context->getObjCIdType(); - // Make sure we convert "type (^)(...)" to "type (*)(...)". - (void)convertBlockPointerToFunctionPointer(type); - const Expr *SubExpr = ICE->IgnoreParenImpCasts(); - CastKind CK; - if (SubExpr->getType()->isIntegralType(*Context) && - type->isBooleanType()) { - CK = CK_IntegralToBoolean; - } else if (type->isObjCObjectPointerType()) { - if (SubExpr->getType()->isBlockPointerType()) { - CK = CK_BlockPointerToObjCPointerCast; - } else if (SubExpr->getType()->isPointerType()) { - CK = CK_CPointerToObjCPointerCast; - } else { - CK = CK_BitCast; - } - } else { - CK = CK_BitCast; - } - - userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr); - } - // Make id cast into an 'id' cast. - else if (CStyleCastExpr *CE = dyn_cast(userExpr)) { - if (CE->getType()->isObjCQualifiedIdType()) { - while ((CE = dyn_cast(userExpr))) - userExpr = CE->getSubExpr(); - CastKind CK; - if (userExpr->getType()->isIntegralType(*Context)) { - CK = CK_IntegralToPointer; - } else if (userExpr->getType()->isBlockPointerType()) { - CK = CK_BlockPointerToObjCPointerCast; - } else if (userExpr->getType()->isPointerType()) { - CK = CK_CPointerToObjCPointerCast; - } else { - CK = CK_BitCast; - } - userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(), - CK, userExpr); - } - } - MsgExprs.push_back(userExpr); - // We've transferred the ownership to MsgExprs. For now, we *don't* null - // out the argument in the original expression (since we aren't deleting - // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info. - //Exp->setArg(i, 0); - } - // Generate the funky cast. - CastExpr *cast; - SmallVector ArgTypes; - QualType returnType; - - // Push 'id' and 'SEL', the 2 implicit arguments. - if (MsgSendFlavor == MsgSendSuperFunctionDecl) - ArgTypes.push_back(Context->getPointerType(getSuperStructType())); - else - ArgTypes.push_back(Context->getObjCIdType()); - ArgTypes.push_back(Context->getObjCSelType()); - if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) { - // Push any user argument types. - for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(), - E = OMD->param_end(); PI != E; ++PI) { - QualType t = (*PI)->getType()->isObjCQualifiedIdType() - ? Context->getObjCIdType() - : (*PI)->getType(); - // Make sure we convert "t (^)(...)" to "t (*)(...)". - (void)convertBlockPointerToFunctionPointer(t); - ArgTypes.push_back(t); - } - returnType = Exp->getType(); - convertToUnqualifiedObjCType(returnType); - (void)convertBlockPointerToFunctionPointer(returnType); - } else { - returnType = Context->getObjCIdType(); - } - // Get the type, we will need to reference it in a couple spots. - QualType msgSendType = MsgSendFlavor->getType(); - - // Create a reference to the objc_msgSend() declaration. - DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType, - VK_LValue, SourceLocation()); - - // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid). - // If we don't do this cast, we get the following bizarre warning/note: - // xx.m:13: warning: function called through a non-compatible type - // xx.m:13: note: if this code is reached, the program will abort - cast = NoTypeInfoCStyleCastExpr(Context, - Context->getPointerType(Context->VoidTy), - CK_BitCast, DRE); - - // Now do the "normal" pointer to function cast. - QualType castType = - getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(), - // If we don't have a method decl, force a variadic cast. - Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true); - castType = Context->getPointerType(castType); - cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast, - cast); - - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast); - - const FunctionType *FT = msgSendType->getAs(); - CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0], - MsgExprs.size(), - FT->getResultType(), VK_RValue, - EndLoc); - Stmt *ReplacingStmt = CE; - if (MsgSendStretFlavor) { - // We have the method which returns a struct/union. Must also generate - // call to objc_msgSend_stret and hang both varieties on a conditional - // expression which dictate which one to envoke depending on size of - // method's return type. - - CallExpr *STCE = SynthMsgSendStretCallExpr(MsgSendStretFlavor, - msgSendType, returnType, - ArgTypes, MsgExprs, - Exp->getMethodDecl()); - - // Build sizeof(returnType) - UnaryExprOrTypeTraitExpr *sizeofExpr = - new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf, - Context->getTrivialTypeSourceInfo(returnType), - Context->getSizeType(), SourceLocation(), - SourceLocation()); - // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) - // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases. - // For X86 it is more complicated and some kind of target specific routine - // is needed to decide what to do. - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - IntegerLiteral *limit = IntegerLiteral::Create(*Context, - llvm::APInt(IntSize, 8), - Context->IntTy, - SourceLocation()); - BinaryOperator *lessThanExpr = - new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy, - VK_RValue, OK_Ordinary, SourceLocation()); - // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...)) - ConditionalOperator *CondExpr = - new (Context) ConditionalOperator(lessThanExpr, - SourceLocation(), CE, - SourceLocation(), STCE, - returnType, VK_RValue, OK_Ordinary); - ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - CondExpr); - } - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return ReplacingStmt; -} - -Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) { - Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(), - Exp->getLocEnd()); - - // Now do the actual rewrite. - ReplaceStmt(Exp, ReplacingStmt); - - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return ReplacingStmt; -} - -// typedef struct objc_object Protocol; -QualType RewriteObjC::getProtocolType() { - if (!ProtocolTypeDecl) { - TypeSourceInfo *TInfo - = Context->getTrivialTypeSourceInfo(Context->getObjCIdType()); - ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("Protocol"), - TInfo); - } - return Context->getTypeDeclType(ProtocolTypeDecl); -} - -/// RewriteObjCProtocolExpr - Rewrite a protocol expression into -/// a synthesized/forward data reference (to the protocol's metadata). -/// The forward references (and metadata) are generated in -/// RewriteObjC::HandleTranslationUnit(). -Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) { - std::string Name = "_OBJC_PROTOCOL_" + Exp->getProtocol()->getNameAsString(); - IdentifierInfo *ID = &Context->Idents.get(Name); - VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), ID, getProtocolType(), 0, - SC_Extern, SC_None); - DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(), - VK_LValue, SourceLocation()); - Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf, - Context->getPointerType(DRE->getType()), - VK_RValue, OK_Ordinary, SourceLocation()); - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(), - CK_BitCast, - DerefExpr); - ReplaceStmt(Exp, castExpr); - ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl()); - // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info. - return castExpr; - -} - -bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf, - const char *endBuf) { - while (startBuf < endBuf) { - if (*startBuf == '#') { - // Skip whitespace. - for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf) - ; - if (!strncmp(startBuf, "if", strlen("if")) || - !strncmp(startBuf, "ifdef", strlen("ifdef")) || - !strncmp(startBuf, "ifndef", strlen("ifndef")) || - !strncmp(startBuf, "define", strlen("define")) || - !strncmp(startBuf, "undef", strlen("undef")) || - !strncmp(startBuf, "else", strlen("else")) || - !strncmp(startBuf, "elif", strlen("elif")) || - !strncmp(startBuf, "endif", strlen("endif")) || - !strncmp(startBuf, "pragma", strlen("pragma")) || - !strncmp(startBuf, "include", strlen("include")) || - !strncmp(startBuf, "import", strlen("import")) || - !strncmp(startBuf, "include_next", strlen("include_next"))) - return true; - } - startBuf++; - } - return false; -} - -/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to -/// an objective-c class with ivars. -void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl, - std::string &Result) { - assert(CDecl && "Class missing in SynthesizeObjCInternalStruct"); - assert(CDecl->getName() != "" && - "Name missing in SynthesizeObjCInternalStruct"); - // Do not synthesize more than once. - if (ObjCSynthesizedStructs.count(CDecl)) - return; - ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass(); - int NumIvars = CDecl->ivar_size(); - SourceLocation LocStart = CDecl->getLocStart(); - SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc(); - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - - // If no ivars and no root or if its root, directly or indirectly, - // have no ivars (thus not synthesized) then no need to synthesize this class. - if ((!CDecl->isThisDeclarationADefinition() || NumIvars == 0) && - (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) { - endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); - ReplaceText(LocStart, endBuf-startBuf, Result); - return; - } - - // FIXME: This has potential of causing problem. If - // SynthesizeObjCInternalStruct is ever called recursively. - Result += "\nstruct "; - Result += CDecl->getNameAsString(); - if (LangOpts.MicrosoftExt) - Result += "_IMPL"; - - if (NumIvars > 0) { - const char *cursor = strchr(startBuf, '{'); - assert((cursor && endBuf) - && "SynthesizeObjCInternalStruct - malformed @interface"); - // If the buffer contains preprocessor directives, we do more fine-grained - // rewrites. This is intended to fix code that looks like (which occurs in - // NSURL.h, for example): - // - // #ifdef XYZ - // @interface Foo : NSObject - // #else - // @interface FooBar : NSObject - // #endif - // { - // int i; - // } - // @end - // - // This clause is segregated to avoid breaking the common case. - if (BufferContainsPPDirectives(startBuf, cursor)) { - SourceLocation L = RCDecl ? CDecl->getSuperClassLoc() : - CDecl->getAtStartLoc(); - const char *endHeader = SM->getCharacterData(L); - endHeader += Lexer::MeasureTokenLength(L, *SM, LangOpts); - - if (CDecl->protocol_begin() != CDecl->protocol_end()) { - // advance to the end of the referenced protocols. - while (endHeader < cursor && *endHeader != '>') endHeader++; - endHeader++; - } - // rewrite the original header - ReplaceText(LocStart, endHeader-startBuf, Result); - } else { - // rewrite the original header *without* disturbing the '{' - ReplaceText(LocStart, cursor-startBuf, Result); - } - if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) { - Result = "\n struct "; - Result += RCDecl->getNameAsString(); - Result += "_IMPL "; - Result += RCDecl->getNameAsString(); - Result += "_IVARS;\n"; - - // insert the super class structure definition. - SourceLocation OnePastCurly = - LocStart.getLocWithOffset(cursor-startBuf+1); - InsertText(OnePastCurly, Result); - } - cursor++; // past '{' - - // Now comment out any visibility specifiers. - while (cursor < endBuf) { - if (*cursor == '@') { - SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf); - // Skip whitespace. - for (++cursor; cursor[0] == ' ' || cursor[0] == '\t'; ++cursor) - /*scan*/; - - // FIXME: presence of @public, etc. inside comment results in - // this transformation as well, which is still correct c-code. - if (!strncmp(cursor, "public", strlen("public")) || - !strncmp(cursor, "private", strlen("private")) || - !strncmp(cursor, "package", strlen("package")) || - !strncmp(cursor, "protected", strlen("protected"))) - InsertText(atLoc, "// "); - } - // FIXME: If there are cases where '<' is used in ivar declaration part - // of user code, then scan the ivar list and use needToScanForQualifiers - // for type checking. - else if (*cursor == '<') { - SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf); - InsertText(atLoc, "/* "); - cursor = strchr(cursor, '>'); - cursor++; - atLoc = LocStart.getLocWithOffset(cursor-startBuf); - InsertText(atLoc, " */"); - } else if (*cursor == '^') { // rewrite block specifier. - SourceLocation caretLoc = LocStart.getLocWithOffset(cursor-startBuf); - ReplaceText(caretLoc, 1, "*"); - } - cursor++; - } - // Don't forget to add a ';'!! - InsertText(LocEnd.getLocWithOffset(1), ";"); - } else { // we don't have any instance variables - insert super struct. - endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts); - Result += " {\n struct "; - Result += RCDecl->getNameAsString(); - Result += "_IMPL "; - Result += RCDecl->getNameAsString(); - Result += "_IVARS;\n};\n"; - ReplaceText(LocStart, endBuf-startBuf, Result); - } - // Mark this struct as having been generated. - if (!ObjCSynthesizedStructs.insert(CDecl)) - llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct"); -} - -//===----------------------------------------------------------------------===// -// Meta Data Emission -//===----------------------------------------------------------------------===// - - -/// RewriteImplementations - This routine rewrites all method implementations -/// and emits meta-data. - -void RewriteObjC::RewriteImplementations() { - int ClsDefCount = ClassImplementation.size(); - int CatDefCount = CategoryImplementation.size(); - - // Rewrite implemented methods - for (int i = 0; i < ClsDefCount; i++) - RewriteImplementationDecl(ClassImplementation[i]); - - for (int i = 0; i < CatDefCount; i++) - RewriteImplementationDecl(CategoryImplementation[i]); -} - -void RewriteObjC::RewriteByRefString(std::string &ResultStr, - const std::string &Name, - ValueDecl *VD, bool def) { - assert(BlockByRefDeclNo.count(VD) && - "RewriteByRefString: ByRef decl missing"); - if (def) - ResultStr += "struct "; - ResultStr += "__Block_byref_" + Name + - "_" + utostr(BlockByRefDeclNo[VD]) ; -} - -static bool HasLocalVariableExternalStorage(ValueDecl *VD) { - if (VarDecl *Var = dyn_cast(VD)) - return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage()); - return false; -} - -std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i, - StringRef funcName, - std::string Tag) { - const FunctionType *AFT = CE->getFunctionType(); - QualType RT = AFT->getResultType(); - std::string StructRef = "struct " + Tag; - std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" + - funcName.str() + "_" + "block_func_" + utostr(i); - - BlockDecl *BD = CE->getBlockDecl(); - - if (isa(AFT)) { - // No user-supplied arguments. Still need to pass in a pointer to the - // block (to reference imported block decl refs). - S += "(" + StructRef + " *__cself)"; - } else if (BD->param_empty()) { - S += "(" + StructRef + " *__cself)"; - } else { - const FunctionProtoType *FT = cast(AFT); - assert(FT && "SynthesizeBlockFunc: No function proto"); - S += '('; - // first add the implicit argument. - S += StructRef + " *__cself, "; - std::string ParamStr; - for (BlockDecl::param_iterator AI = BD->param_begin(), - E = BD->param_end(); AI != E; ++AI) { - if (AI != BD->param_begin()) S += ", "; - ParamStr = (*AI)->getNameAsString(); - QualType QT = (*AI)->getType(); - (void)convertBlockPointerToFunctionPointer(QT); - QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy()); - S += ParamStr; - } - if (FT->isVariadic()) { - if (!BD->param_empty()) S += ", "; - S += "..."; - } - S += ')'; - } - S += " {\n"; - - // Create local declarations to avoid rewriting all closure decl ref exprs. - // First, emit a declaration for all "by ref" decls. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - S += " "; - std::string Name = (*I)->getNameAsString(); - std::string TypeString; - RewriteByRefString(TypeString, Name, (*I)); - TypeString += " *"; - Name = TypeString + Name; - S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n"; - } - // Next, emit a declaration for all "by copy" declarations. - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - S += " "; - // Handle nested closure invocation. For example: - // - // void (^myImportedClosure)(void); - // myImportedClosure = ^(void) { setGlobalInt(x + y); }; - // - // void (^anotherClosure)(void); - // anotherClosure = ^(void) { - // myImportedClosure(); // import and invoke the closure - // }; - // - if (isTopLevelBlockPointerType((*I)->getType())) { - RewriteBlockPointerTypeVariable(S, (*I)); - S += " = ("; - RewriteBlockPointerType(S, (*I)->getType()); - S += ")"; - S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n"; - } - else { - std::string Name = (*I)->getNameAsString(); - QualType QT = (*I)->getType(); - if (HasLocalVariableExternalStorage(*I)) - QT = Context->getPointerType(QT); - QT.getAsStringInternal(Name, Context->getPrintingPolicy()); - S += Name + " = __cself->" + - (*I)->getNameAsString() + "; // bound by copy\n"; - } - } - std::string RewrittenStr = RewrittenBlockExprs[CE]; - const char *cstr = RewrittenStr.c_str(); - while (*cstr++ != '{') ; - S += cstr; - S += "\n"; - return S; -} - -std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i, - StringRef funcName, - std::string Tag) { - std::string StructRef = "struct " + Tag; - std::string S = "static void __"; - - S += funcName; - S += "_block_copy_" + utostr(i); - S += "(" + StructRef; - S += "*dst, " + StructRef; - S += "*src) {"; - for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), - E = ImportedBlockDecls.end(); I != E; ++I) { - ValueDecl *VD = (*I); - S += "_Block_object_assign((void*)&dst->"; - S += (*I)->getNameAsString(); - S += ", (void*)src->"; - S += (*I)->getNameAsString(); - if (BlockByRefDeclsPtrSet.count((*I))) - S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; - else if (VD->getType()->isBlockPointerType()) - S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; - else - S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; - } - S += "}\n"; - - S += "\nstatic void __"; - S += funcName; - S += "_block_dispose_" + utostr(i); - S += "(" + StructRef; - S += "*src) {"; - for (llvm::SmallPtrSet::iterator I = ImportedBlockDecls.begin(), - E = ImportedBlockDecls.end(); I != E; ++I) { - ValueDecl *VD = (*I); - S += "_Block_object_dispose((void*)src->"; - S += (*I)->getNameAsString(); - if (BlockByRefDeclsPtrSet.count((*I))) - S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);"; - else if (VD->getType()->isBlockPointerType()) - S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);"; - else - S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);"; - } - S += "}\n"; - return S; -} - -std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag, - std::string Desc) { - std::string S = "\nstruct " + Tag; - std::string Constructor = " " + Tag; - - S += " {\n struct __block_impl impl;\n"; - S += " struct " + Desc; - S += "* Desc;\n"; - - Constructor += "(void *fp, "; // Invoke function pointer. - Constructor += "struct " + Desc; // Descriptor pointer. - Constructor += " *desc"; - - if (BlockDeclRefs.size()) { - // Output all "by copy" declarations. - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - S += " "; - std::string FieldName = (*I)->getNameAsString(); - std::string ArgName = "_" + FieldName; - // Handle nested closure invocation. For example: - // - // void (^myImportedBlock)(void); - // myImportedBlock = ^(void) { setGlobalInt(x + y); }; - // - // void (^anotherBlock)(void); - // anotherBlock = ^(void) { - // myImportedBlock(); // import and invoke the closure - // }; - // - if (isTopLevelBlockPointerType((*I)->getType())) { - S += "struct __block_impl *"; - Constructor += ", void *" + ArgName; - } else { - QualType QT = (*I)->getType(); - if (HasLocalVariableExternalStorage(*I)) - QT = Context->getPointerType(QT); - QT.getAsStringInternal(FieldName, Context->getPrintingPolicy()); - QT.getAsStringInternal(ArgName, Context->getPrintingPolicy()); - Constructor += ", " + ArgName; - } - S += FieldName + ";\n"; - } - // Output all "by ref" declarations. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - S += " "; - std::string FieldName = (*I)->getNameAsString(); - std::string ArgName = "_" + FieldName; - { - std::string TypeString; - RewriteByRefString(TypeString, FieldName, (*I)); - TypeString += " *"; - FieldName = TypeString + FieldName; - ArgName = TypeString + ArgName; - Constructor += ", " + ArgName; - } - S += FieldName + "; // by ref\n"; - } - // Finish writing the constructor. - Constructor += ", int flags=0)"; - // Initialize all "by copy" arguments. - bool firsTime = true; - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - std::string Name = (*I)->getNameAsString(); - if (firsTime) { - Constructor += " : "; - firsTime = false; - } - else - Constructor += ", "; - if (isTopLevelBlockPointerType((*I)->getType())) - Constructor += Name + "((struct __block_impl *)_" + Name + ")"; - else - Constructor += Name + "(_" + Name + ")"; - } - // Initialize all "by ref" arguments. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - std::string Name = (*I)->getNameAsString(); - if (firsTime) { - Constructor += " : "; - firsTime = false; - } - else - Constructor += ", "; - Constructor += Name + "(_" + Name + "->__forwarding)"; - } - - Constructor += " {\n"; - if (GlobalVarDecl) - Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; - else - Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; - Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; - - Constructor += " Desc = desc;\n"; - } else { - // Finish writing the constructor. - Constructor += ", int flags=0) {\n"; - if (GlobalVarDecl) - Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n"; - else - Constructor += " impl.isa = &_NSConcreteStackBlock;\n"; - Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n"; - Constructor += " Desc = desc;\n"; - } - Constructor += " "; - Constructor += "}\n"; - S += Constructor; - S += "};\n"; - return S; -} - -std::string RewriteObjC::SynthesizeBlockDescriptor(std::string DescTag, - std::string ImplTag, int i, - StringRef FunName, - unsigned hasCopy) { - std::string S = "\nstatic struct " + DescTag; - - S += " {\n unsigned long reserved;\n"; - S += " unsigned long Block_size;\n"; - if (hasCopy) { - S += " void (*copy)(struct "; - S += ImplTag; S += "*, struct "; - S += ImplTag; S += "*);\n"; - - S += " void (*dispose)(struct "; - S += ImplTag; S += "*);\n"; - } - S += "} "; - - S += DescTag + "_DATA = { 0, sizeof(struct "; - S += ImplTag + ")"; - if (hasCopy) { - S += ", __" + FunName.str() + "_block_copy_" + utostr(i); - S += ", __" + FunName.str() + "_block_dispose_" + utostr(i); - } - S += "};\n"; - return S; -} - -void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart, - StringRef FunName) { - // Insert declaration for the function in which block literal is used. - if (CurFunctionDeclToDeclareForBlock && !Blocks.empty()) - RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock); - bool RewriteSC = (GlobalVarDecl && - !Blocks.empty() && - GlobalVarDecl->getStorageClass() == SC_Static && - GlobalVarDecl->getType().getCVRQualifiers()); - if (RewriteSC) { - std::string SC(" void __"); - SC += GlobalVarDecl->getNameAsString(); - SC += "() {}"; - InsertText(FunLocStart, SC); - } - - // Insert closures that were part of the function. - for (unsigned i = 0, count=0; i < Blocks.size(); i++) { - CollectBlockDeclRefInfo(Blocks[i]); - // Need to copy-in the inner copied-in variables not actually used in this - // block. - for (int j = 0; j < InnerDeclRefsCount[i]; j++) { - DeclRefExpr *Exp = InnerDeclRefs[count++]; - ValueDecl *VD = Exp->getDecl(); - BlockDeclRefs.push_back(Exp); - if (!VD->hasAttr() && !BlockByCopyDeclsPtrSet.count(VD)) { - BlockByCopyDeclsPtrSet.insert(VD); - BlockByCopyDecls.push_back(VD); - } - if (VD->hasAttr() && !BlockByRefDeclsPtrSet.count(VD)) { - BlockByRefDeclsPtrSet.insert(VD); - BlockByRefDecls.push_back(VD); - } - // imported objects in the inner blocks not used in the outer - // blocks must be copied/disposed in the outer block as well. - if (VD->hasAttr() || - VD->getType()->isObjCObjectPointerType() || - VD->getType()->isBlockPointerType()) - ImportedBlockDecls.insert(VD); - } - - std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i); - std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i); - - std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag); - - InsertText(FunLocStart, CI); - - std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag); - - InsertText(FunLocStart, CF); - - if (ImportedBlockDecls.size()) { - std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag); - InsertText(FunLocStart, HF); - } - std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName, - ImportedBlockDecls.size() > 0); - InsertText(FunLocStart, BD); - - BlockDeclRefs.clear(); - BlockByRefDecls.clear(); - BlockByRefDeclsPtrSet.clear(); - BlockByCopyDecls.clear(); - BlockByCopyDeclsPtrSet.clear(); - ImportedBlockDecls.clear(); - } - if (RewriteSC) { - // Must insert any 'const/volatile/static here. Since it has been - // removed as result of rewriting of block literals. - std::string SC; - if (GlobalVarDecl->getStorageClass() == SC_Static) - SC = "static "; - if (GlobalVarDecl->getType().isConstQualified()) - SC += "const "; - if (GlobalVarDecl->getType().isVolatileQualified()) - SC += "volatile "; - if (GlobalVarDecl->getType().isRestrictQualified()) - SC += "restrict "; - InsertText(FunLocStart, SC); - } - - Blocks.clear(); - InnerDeclRefsCount.clear(); - InnerDeclRefs.clear(); - RewrittenBlockExprs.clear(); -} - -void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) { - SourceLocation FunLocStart = FD->getTypeSpecStartLoc(); - StringRef FuncName = FD->getName(); - - SynthesizeBlockLiterals(FunLocStart, FuncName); -} - -static void BuildUniqueMethodName(std::string &Name, - ObjCMethodDecl *MD) { - ObjCInterfaceDecl *IFace = MD->getClassInterface(); - Name = IFace->getName(); - Name += "__" + MD->getSelector().getAsString(); - // Convert colons to underscores. - std::string::size_type loc = 0; - while ((loc = Name.find(":", loc)) != std::string::npos) - Name.replace(loc, 1, "_"); -} - -void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) { - //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n"); - //SourceLocation FunLocStart = MD->getLocStart(); - SourceLocation FunLocStart = MD->getLocStart(); - std::string FuncName; - BuildUniqueMethodName(FuncName, MD); - SynthesizeBlockLiterals(FunLocStart, FuncName); -} - -void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) { - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - if (BlockExpr *CBE = dyn_cast(*CI)) - GetBlockDeclRefExprs(CBE->getBody()); - else - GetBlockDeclRefExprs(*CI); - } - // Handle specific things. - if (DeclRefExpr *DRE = dyn_cast(S)) { - if (DRE->refersToEnclosingLocal()) { - // FIXME: Handle enums. - if (!isa(DRE->getDecl())) - BlockDeclRefs.push_back(DRE); - if (HasLocalVariableExternalStorage(DRE->getDecl())) - BlockDeclRefs.push_back(DRE); - } - } - - return; -} - -void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S, - SmallVector &InnerBlockDeclRefs, - llvm::SmallPtrSet &InnerContexts) { - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - if (BlockExpr *CBE = dyn_cast(*CI)) { - InnerContexts.insert(cast(CBE->getBlockDecl())); - GetInnerBlockDeclRefExprs(CBE->getBody(), - InnerBlockDeclRefs, - InnerContexts); - } - else - GetInnerBlockDeclRefExprs(*CI, - InnerBlockDeclRefs, - InnerContexts); - - } - // Handle specific things. - if (DeclRefExpr *DRE = dyn_cast(S)) { - if (DRE->refersToEnclosingLocal()) { - if (!isa(DRE->getDecl()) && - !InnerContexts.count(DRE->getDecl()->getDeclContext())) - InnerBlockDeclRefs.push_back(DRE); - if (VarDecl *Var = dyn_cast(DRE->getDecl())) - if (Var->isFunctionOrMethodVarDecl()) - ImportedLocalExternalDecls.insert(Var); - } - } - - return; -} - -/// convertFunctionTypeOfBlocks - This routine converts a function type -/// whose result type may be a block pointer or whose argument type(s) -/// might be block pointers to an equivalent function type replacing -/// all block pointers to function pointers. -QualType RewriteObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) { - const FunctionProtoType *FTP = dyn_cast(FT); - // FTP will be null for closures that don't take arguments. - // Generate a funky cast. - SmallVector ArgTypes; - QualType Res = FT->getResultType(); - bool HasBlockType = convertBlockPointerToFunctionPointer(Res); - - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I && (I != E); ++I) { - QualType t = *I; - // Make sure we convert "t (^)(...)" to "t (*)(...)". - if (convertBlockPointerToFunctionPointer(t)) - HasBlockType = true; - ArgTypes.push_back(t); - } - } - QualType FuncType; - // FIXME. Does this work if block takes no argument but has a return type - // which is of block type? - if (HasBlockType) - FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size()); - else FuncType = QualType(FT, 0); - return FuncType; -} - -Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) { - // Navigate to relevant type information. - const BlockPointerType *CPT = 0; - - if (const DeclRefExpr *DRE = dyn_cast(BlockExp)) { - CPT = DRE->getType()->getAs(); - } else if (const MemberExpr *MExpr = dyn_cast(BlockExp)) { - CPT = MExpr->getType()->getAs(); - } - else if (const ParenExpr *PRE = dyn_cast(BlockExp)) { - return SynthesizeBlockCall(Exp, PRE->getSubExpr()); - } - else if (const ImplicitCastExpr *IEXPR = dyn_cast(BlockExp)) - CPT = IEXPR->getType()->getAs(); - else if (const ConditionalOperator *CEXPR = - dyn_cast(BlockExp)) { - Expr *LHSExp = CEXPR->getLHS(); - Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp); - Expr *RHSExp = CEXPR->getRHS(); - Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp); - Expr *CONDExp = CEXPR->getCond(); - ConditionalOperator *CondExpr = - new (Context) ConditionalOperator(CONDExp, - SourceLocation(), cast(LHSStmt), - SourceLocation(), cast(RHSStmt), - Exp->getType(), VK_RValue, OK_Ordinary); - return CondExpr; - } else if (const ObjCIvarRefExpr *IRE = dyn_cast(BlockExp)) { - CPT = IRE->getType()->getAs(); - } else if (const PseudoObjectExpr *POE - = dyn_cast(BlockExp)) { - CPT = POE->getType()->castAs(); - } else { - assert(1 && "RewriteBlockClass: Bad type"); - } - assert(CPT && "RewriteBlockClass: Bad type"); - const FunctionType *FT = CPT->getPointeeType()->getAs(); - assert(FT && "RewriteBlockClass: Bad type"); - const FunctionProtoType *FTP = dyn_cast(FT); - // FTP will be null for closures that don't take arguments. - - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get("__block_impl")); - QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD)); - - // Generate a funky cast. - SmallVector ArgTypes; - - // Push the block argument type. - ArgTypes.push_back(PtrBlock); - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I && (I != E); ++I) { - QualType t = *I; - // Make sure we convert "t (^)(...)" to "t (*)(...)". - if (!convertBlockPointerToFunctionPointer(t)) - convertToUnqualifiedObjCType(t); - ArgTypes.push_back(t); - } - } - // Now do the pointer to function cast. - QualType PtrToFuncCastType - = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size()); - - PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType); - - CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock, - CK_BitCast, - const_cast(BlockExp)); - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - BlkCast); - //PE->dump(); - - FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("FuncPtr"), - Context->VoidPtrTy, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); - - - CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType, - CK_BitCast, ME); - PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast); - - SmallVector BlkExprs; - // Add the implicit argument. - BlkExprs.push_back(BlkCast); - // Add the user arguments. - for (CallExpr::arg_iterator I = Exp->arg_begin(), - E = Exp->arg_end(); I != E; ++I) { - BlkExprs.push_back(*I); - } - CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0], - BlkExprs.size(), - Exp->getType(), VK_RValue, - SourceLocation()); - return CE; -} - -// We need to return the rewritten expression to handle cases where the -// BlockDeclRefExpr is embedded in another expression being rewritten. -// For example: -// -// int main() { -// __block Foo *f; -// __block int i; -// -// void (^myblock)() = ^() { -// [f test]; // f is a BlockDeclRefExpr embedded in a message (which is being rewritten). -// i = 77; -// }; -//} -Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) { - // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR - // for each DeclRefExp where BYREFVAR is name of the variable. - ValueDecl *VD = DeclRefExp->getDecl(); - bool isArrow = DeclRefExp->refersToEnclosingLocal(); - - FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(), - SourceLocation(), - &Context->Idents.get("__forwarding"), - Context->VoidPtrTy, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow, - FD, SourceLocation(), - FD->getType(), VK_LValue, - OK_Ordinary); - - StringRef Name = VD->getName(); - FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(), - &Context->Idents.get(Name), - Context->VoidPtrTy, 0, - /*BitWidth=*/0, /*Mutable=*/true, - ICIS_NoInit); - ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(), - DeclRefExp->getType(), VK_LValue, OK_Ordinary); - - - - // Need parens to enforce precedence. - ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(), - DeclRefExp->getExprLoc(), - ME); - ReplaceStmt(DeclRefExp, PE); - return PE; -} - -// Rewrites the imported local variable V with external storage -// (static, extern, etc.) as *V -// -Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) { - ValueDecl *VD = DRE->getDecl(); - if (VarDecl *Var = dyn_cast(VD)) - if (!ImportedLocalExternalDecls.count(Var)) - return DRE; - Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(), - VK_LValue, OK_Ordinary, - DRE->getLocation()); - // Need parens to enforce precedence. - ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), - Exp); - ReplaceStmt(DRE, PE); - return PE; -} - -void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) { - SourceLocation LocStart = CE->getLParenLoc(); - SourceLocation LocEnd = CE->getRParenLoc(); - - // Need to avoid trying to rewrite synthesized casts. - if (LocStart.isInvalid()) - return; - // Need to avoid trying to rewrite casts contained in macros. - if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd)) - return; - - const char *startBuf = SM->getCharacterData(LocStart); - const char *endBuf = SM->getCharacterData(LocEnd); - QualType QT = CE->getType(); - const Type* TypePtr = QT->getAs(); - if (isa(TypePtr)) { - const TypeOfExprType *TypeOfExprTypePtr = cast(TypePtr); - QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType(); - std::string TypeAsString = "("; - RewriteBlockPointerType(TypeAsString, QT); - TypeAsString += ")"; - ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString); - return; - } - // advance the location to startArgList. - const char *argPtr = startBuf; - - while (*argPtr++ && (argPtr < endBuf)) { - switch (*argPtr) { - case '^': - // Replace the '^' with '*'. - LocStart = LocStart.getLocWithOffset(argPtr-startBuf); - ReplaceText(LocStart, 1, "*"); - break; - } - } - return; -} - -void RewriteObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) { - SourceLocation DeclLoc = FD->getLocation(); - unsigned parenCount = 0; - - // We have 1 or more arguments that have closure pointers. - const char *startBuf = SM->getCharacterData(DeclLoc); - const char *startArgList = strchr(startBuf, '('); - - assert((*startArgList == '(') && "Rewriter fuzzy parser confused"); - - parenCount++; - // advance the location to startArgList. - DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf); - assert((DeclLoc.isValid()) && "Invalid DeclLoc"); - - const char *argPtr = startArgList; - - while (*argPtr++ && parenCount) { - switch (*argPtr) { - case '^': - // Replace the '^' with '*'. - DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList); - ReplaceText(DeclLoc, 1, "*"); - break; - case '(': - parenCount++; - break; - case ')': - parenCount--; - break; - } - } - return; -} - -bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) { - const FunctionProtoType *FTP; - const PointerType *PT = QT->getAs(); - if (PT) { - FTP = PT->getPointeeType()->getAs(); - } else { - const BlockPointerType *BPT = QT->getAs(); - assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); - FTP = BPT->getPointeeType()->getAs(); - } - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I != E; ++I) - if (isTopLevelBlockPointerType(*I)) - return true; - } - return false; -} - -bool RewriteObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) { - const FunctionProtoType *FTP; - const PointerType *PT = QT->getAs(); - if (PT) { - FTP = PT->getPointeeType()->getAs(); - } else { - const BlockPointerType *BPT = QT->getAs(); - assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type"); - FTP = BPT->getPointeeType()->getAs(); - } - if (FTP) { - for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(), - E = FTP->arg_type_end(); I != E; ++I) { - if ((*I)->isObjCQualifiedIdType()) - return true; - if ((*I)->isObjCObjectPointerType() && - (*I)->getPointeeType()->isObjCQualifiedInterfaceType()) - return true; - } - - } - return false; -} - -void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen, - const char *&RParen) { - const char *argPtr = strchr(Name, '('); - assert((*argPtr == '(') && "Rewriter fuzzy parser confused"); - - LParen = argPtr; // output the start. - argPtr++; // skip past the left paren. - unsigned parenCount = 1; - - while (*argPtr && parenCount) { - switch (*argPtr) { - case '(': parenCount++; break; - case ')': parenCount--; break; - default: break; - } - if (parenCount) argPtr++; - } - assert((*argPtr == ')') && "Rewriter fuzzy parser confused"); - RParen = argPtr; // output the end -} - -void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) { - if (FunctionDecl *FD = dyn_cast(ND)) { - RewriteBlockPointerFunctionArgs(FD); - return; - } - // Handle Variables and Typedefs. - SourceLocation DeclLoc = ND->getLocation(); - QualType DeclT; - if (VarDecl *VD = dyn_cast(ND)) - DeclT = VD->getType(); - else if (TypedefNameDecl *TDD = dyn_cast(ND)) - DeclT = TDD->getUnderlyingType(); - else if (FieldDecl *FD = dyn_cast(ND)) - DeclT = FD->getType(); - else - llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled"); - - const char *startBuf = SM->getCharacterData(DeclLoc); - const char *endBuf = startBuf; - // scan backward (from the decl location) for the end of the previous decl. - while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart) - startBuf--; - SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf); - std::string buf; - unsigned OrigLength=0; - // *startBuf != '^' if we are dealing with a pointer to function that - // may take block argument types (which will be handled below). - if (*startBuf == '^') { - // Replace the '^' with '*', computing a negative offset. - buf = '*'; - startBuf++; - OrigLength++; - } - while (*startBuf != ')') { - buf += *startBuf; - startBuf++; - OrigLength++; - } - buf += ')'; - OrigLength++; - - if (PointerTypeTakesAnyBlockArguments(DeclT) || - PointerTypeTakesAnyObjCQualifiedType(DeclT)) { - // Replace the '^' with '*' for arguments. - // Replace id

    with id/*<>*/ - DeclLoc = ND->getLocation(); - startBuf = SM->getCharacterData(DeclLoc); - const char *argListBegin, *argListEnd; - GetExtentOfArgList(startBuf, argListBegin, argListEnd); - while (argListBegin < argListEnd) { - if (*argListBegin == '^') - buf += '*'; - else if (*argListBegin == '<') { - buf += "/*"; - buf += *argListBegin++; - OrigLength++;; - while (*argListBegin != '>') { - buf += *argListBegin++; - OrigLength++; - } - buf += *argListBegin; - buf += "*/"; - } - else - buf += *argListBegin; - argListBegin++; - OrigLength++; - } - buf += ')'; - OrigLength++; - } - ReplaceText(Start, OrigLength, buf); - - return; -} - - -/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes: -/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst, -/// struct Block_byref_id_object *src) { -/// _Block_object_assign (&_dest->object, _src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT -/// [|BLOCK_FIELD_IS_WEAK]) // object -/// _Block_object_assign(&_dest->object, _src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK -/// [|BLOCK_FIELD_IS_WEAK]) // block -/// } -/// And: -/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) { -/// _Block_object_dispose(_src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT -/// [|BLOCK_FIELD_IS_WEAK]) // object -/// _Block_object_dispose(_src->object, -/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK -/// [|BLOCK_FIELD_IS_WEAK]) // block -/// } - -std::string RewriteObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD, - int flag) { - std::string S; - if (CopyDestroyCache.count(flag)) - return S; - CopyDestroyCache.insert(flag); - S = "static void __Block_byref_id_object_copy_"; - S += utostr(flag); - S += "(void *dst, void *src) {\n"; - - // offset into the object pointer is computed as: - // void * + void* + int + int + void* + void * - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - unsigned VoidPtrSize = - static_cast(Context->getTypeSize(Context->VoidPtrTy)); - - unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth(); - S += " _Block_object_assign((char*)dst + "; - S += utostr(offset); - S += ", *(void * *) ((char*)src + "; - S += utostr(offset); - S += "), "; - S += utostr(flag); - S += ");\n}\n"; - - S += "static void __Block_byref_id_object_dispose_"; - S += utostr(flag); - S += "(void *src) {\n"; - S += " _Block_object_dispose(*(void * *) ((char*)src + "; - S += utostr(offset); - S += "), "; - S += utostr(flag); - S += ");\n}\n"; - return S; -} - -/// RewriteByRefVar - For each __block typex ND variable this routine transforms -/// the declaration into: -/// struct __Block_byref_ND { -/// void *__isa; // NULL for everything except __weak pointers -/// struct __Block_byref_ND *__forwarding; -/// int32_t __flags; -/// int32_t __size; -/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object -/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object -/// typex ND; -/// }; -/// -/// It then replaces declaration of ND variable with: -/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag, -/// __size=sizeof(struct __Block_byref_ND), -/// ND=initializer-if-any}; -/// -/// -void RewriteObjC::RewriteByRefVar(VarDecl *ND) { - // Insert declaration for the function in which block literal is - // used. - if (CurFunctionDeclToDeclareForBlock) - RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock); - int flag = 0; - int isa = 0; - SourceLocation DeclLoc = ND->getTypeSpecStartLoc(); - if (DeclLoc.isInvalid()) - // If type location is missing, it is because of missing type (a warning). - // Use variable's location which is good for this case. - DeclLoc = ND->getLocation(); - const char *startBuf = SM->getCharacterData(DeclLoc); - SourceLocation X = ND->getLocEnd(); - X = SM->getExpansionLoc(X); - const char *endBuf = SM->getCharacterData(X); - std::string Name(ND->getNameAsString()); - std::string ByrefType; - RewriteByRefString(ByrefType, Name, ND, true); - ByrefType += " {\n"; - ByrefType += " void *__isa;\n"; - RewriteByRefString(ByrefType, Name, ND); - ByrefType += " *__forwarding;\n"; - ByrefType += " int __flags;\n"; - ByrefType += " int __size;\n"; - // Add void *__Block_byref_id_object_copy; - // void *__Block_byref_id_object_dispose; if needed. - QualType Ty = ND->getType(); - bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty); - if (HasCopyAndDispose) { - ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n"; - ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n"; - } - - QualType T = Ty; - (void)convertBlockPointerToFunctionPointer(T); - T.getAsStringInternal(Name, Context->getPrintingPolicy()); - - ByrefType += " " + Name + ";\n"; - ByrefType += "};\n"; - // Insert this type in global scope. It is needed by helper function. - SourceLocation FunLocStart; - if (CurFunctionDef) - FunLocStart = CurFunctionDef->getTypeSpecStartLoc(); - else { - assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null"); - FunLocStart = CurMethodDef->getLocStart(); - } - InsertText(FunLocStart, ByrefType); - if (Ty.isObjCGCWeak()) { - flag |= BLOCK_FIELD_IS_WEAK; - isa = 1; - } - - if (HasCopyAndDispose) { - flag = BLOCK_BYREF_CALLER; - QualType Ty = ND->getType(); - // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well. - if (Ty->isBlockPointerType()) - flag |= BLOCK_FIELD_IS_BLOCK; - else - flag |= BLOCK_FIELD_IS_OBJECT; - std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag); - if (!HF.empty()) - InsertText(FunLocStart, HF); - } - - // struct __Block_byref_ND ND = - // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND), - // initializer-if-any}; - bool hasInit = (ND->getInit() != 0); - unsigned flags = 0; - if (HasCopyAndDispose) - flags |= BLOCK_HAS_COPY_DISPOSE; - Name = ND->getNameAsString(); - ByrefType.clear(); - RewriteByRefString(ByrefType, Name, ND); - std::string ForwardingCastType("("); - ForwardingCastType += ByrefType + " *)"; - if (!hasInit) { - ByrefType += " " + Name + " = {(void*)"; - ByrefType += utostr(isa); - ByrefType += "," + ForwardingCastType + "&" + Name + ", "; - ByrefType += utostr(flags); - ByrefType += ", "; - ByrefType += "sizeof("; - RewriteByRefString(ByrefType, Name, ND); - ByrefType += ")"; - if (HasCopyAndDispose) { - ByrefType += ", __Block_byref_id_object_copy_"; - ByrefType += utostr(flag); - ByrefType += ", __Block_byref_id_object_dispose_"; - ByrefType += utostr(flag); - } - ByrefType += "};\n"; - unsigned nameSize = Name.size(); - // for block or function pointer declaration. Name is aleady - // part of the declaration. - if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) - nameSize = 1; - ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType); - } - else { - SourceLocation startLoc; - Expr *E = ND->getInit(); - if (const CStyleCastExpr *ECE = dyn_cast(E)) - startLoc = ECE->getLParenLoc(); - else - startLoc = E->getLocStart(); - startLoc = SM->getExpansionLoc(startLoc); - endBuf = SM->getCharacterData(startLoc); - ByrefType += " " + Name; - ByrefType += " = {(void*)"; - ByrefType += utostr(isa); - ByrefType += "," + ForwardingCastType + "&" + Name + ", "; - ByrefType += utostr(flags); - ByrefType += ", "; - ByrefType += "sizeof("; - RewriteByRefString(ByrefType, Name, ND); - ByrefType += "), "; - if (HasCopyAndDispose) { - ByrefType += "__Block_byref_id_object_copy_"; - ByrefType += utostr(flag); - ByrefType += ", __Block_byref_id_object_dispose_"; - ByrefType += utostr(flag); - ByrefType += ", "; - } - ReplaceText(DeclLoc, endBuf-startBuf, ByrefType); - - // Complete the newly synthesized compound expression by inserting a right - // curly brace before the end of the declaration. - // FIXME: This approach avoids rewriting the initializer expression. It - // also assumes there is only one declarator. For example, the following - // isn't currently supported by this routine (in general): - // - // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37; - // - const char *startInitializerBuf = SM->getCharacterData(startLoc); - const char *semiBuf = strchr(startInitializerBuf, ';'); - assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'"); - SourceLocation semiLoc = - startLoc.getLocWithOffset(semiBuf-startInitializerBuf); - - InsertText(semiLoc, "}"); - } - return; -} - -void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) { - // Add initializers for any closure decl refs. - GetBlockDeclRefExprs(Exp->getBody()); - if (BlockDeclRefs.size()) { - // Unique all "by copy" declarations. - for (unsigned i = 0; i < BlockDeclRefs.size(); i++) - if (!BlockDeclRefs[i]->getDecl()->hasAttr()) { - if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { - BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); - BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl()); - } - } - // Unique all "by ref" declarations. - for (unsigned i = 0; i < BlockDeclRefs.size(); i++) - if (BlockDeclRefs[i]->getDecl()->hasAttr()) { - if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) { - BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl()); - BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl()); - } - } - // Find any imported blocks...they will need special attention. - for (unsigned i = 0; i < BlockDeclRefs.size(); i++) - if (BlockDeclRefs[i]->getDecl()->hasAttr() || - BlockDeclRefs[i]->getType()->isObjCObjectPointerType() || - BlockDeclRefs[i]->getType()->isBlockPointerType()) - ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl()); - } -} - -FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(StringRef name) { - IdentifierInfo *ID = &Context->Idents.get(name); - QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy); - return FunctionDecl::Create(*Context, TUDecl, SourceLocation(), - SourceLocation(), ID, FType, 0, SC_Extern, - SC_None, false, false); -} - -Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp, - const SmallVector &InnerBlockDeclRefs) { - const BlockDecl *block = Exp->getBlockDecl(); - Blocks.push_back(Exp); - - CollectBlockDeclRefInfo(Exp); - - // Add inner imported variables now used in current block. - int countOfInnerDecls = 0; - if (!InnerBlockDeclRefs.empty()) { - for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) { - DeclRefExpr *Exp = InnerBlockDeclRefs[i]; - ValueDecl *VD = Exp->getDecl(); - if (!VD->hasAttr() && !BlockByCopyDeclsPtrSet.count(VD)) { - // We need to save the copied-in variables in nested - // blocks because it is needed at the end for some of the API generations. - // See SynthesizeBlockLiterals routine. - InnerDeclRefs.push_back(Exp); countOfInnerDecls++; - BlockDeclRefs.push_back(Exp); - BlockByCopyDeclsPtrSet.insert(VD); - BlockByCopyDecls.push_back(VD); - } - if (VD->hasAttr() && !BlockByRefDeclsPtrSet.count(VD)) { - InnerDeclRefs.push_back(Exp); countOfInnerDecls++; - BlockDeclRefs.push_back(Exp); - BlockByRefDeclsPtrSet.insert(VD); - BlockByRefDecls.push_back(VD); - } - } - // Find any imported blocks...they will need special attention. - for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) - if (InnerBlockDeclRefs[i]->getDecl()->hasAttr() || - InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() || - InnerBlockDeclRefs[i]->getType()->isBlockPointerType()) - ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl()); - } - InnerDeclRefsCount.push_back(countOfInnerDecls); - - std::string FuncName; - - if (CurFunctionDef) - FuncName = CurFunctionDef->getNameAsString(); - else if (CurMethodDef) - BuildUniqueMethodName(FuncName, CurMethodDef); - else if (GlobalVarDecl) - FuncName = std::string(GlobalVarDecl->getNameAsString()); - - std::string BlockNumber = utostr(Blocks.size()-1); - - std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber; - std::string Func = "__" + FuncName + "_block_func_" + BlockNumber; - - // Get a pointer to the function type so we can cast appropriately. - QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType()); - QualType FType = Context->getPointerType(BFT); - - FunctionDecl *FD; - Expr *NewRep; - - // Simulate a contructor call... - FD = SynthBlockInitFunctionDecl(Tag); - DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue, - SourceLocation()); - - SmallVector InitExprs; - - // Initialize the block function. - FD = SynthBlockInitFunctionDecl(Func); - DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), - VK_LValue, SourceLocation()); - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, - CK_BitCast, Arg); - InitExprs.push_back(castExpr); - - // Initialize the block descriptor. - std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA"; - - VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, - SourceLocation(), SourceLocation(), - &Context->Idents.get(DescData.c_str()), - Context->VoidPtrTy, 0, - SC_Static, SC_None); - UnaryOperator *DescRefExpr = - new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false, - Context->VoidPtrTy, - VK_LValue, - SourceLocation()), - UO_AddrOf, - Context->getPointerType(Context->VoidPtrTy), - VK_RValue, OK_Ordinary, - SourceLocation()); - InitExprs.push_back(DescRefExpr); - - // Add initializers for any closure decl refs. - if (BlockDeclRefs.size()) { - Expr *Exp; - // Output all "by copy" declarations. - for (SmallVector::iterator I = BlockByCopyDecls.begin(), - E = BlockByCopyDecls.end(); I != E; ++I) { - if (isObjCType((*I)->getType())) { - // FIXME: Conform to ABI ([[obj retain] autorelease]). - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, - SourceLocation()); - if (HasLocalVariableExternalStorage(*I)) { - QualType QT = (*I)->getType(); - QT = Context->getPointerType(QT); - Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, - OK_Ordinary, SourceLocation()); - } - } else if (isTopLevelBlockPointerType((*I)->getType())) { - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, - SourceLocation()); - Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy, - CK_BitCast, Arg); - } else { - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, - SourceLocation()); - if (HasLocalVariableExternalStorage(*I)) { - QualType QT = (*I)->getType(); - QT = Context->getPointerType(QT); - Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue, - OK_Ordinary, SourceLocation()); - } - - } - InitExprs.push_back(Exp); - } - // Output all "by ref" declarations. - for (SmallVector::iterator I = BlockByRefDecls.begin(), - E = BlockByRefDecls.end(); I != E; ++I) { - ValueDecl *ND = (*I); - std::string Name(ND->getNameAsString()); - std::string RecName; - RewriteByRefString(RecName, Name, ND, true); - IdentifierInfo *II = &Context->Idents.get(RecName.c_str() - + sizeof("struct")); - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - II); - assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl"); - QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); - - FD = SynthBlockInitFunctionDecl((*I)->getName()); - Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue, - SourceLocation()); - bool isNestedCapturedVar = false; - if (block) - for (BlockDecl::capture_const_iterator ci = block->capture_begin(), - ce = block->capture_end(); ci != ce; ++ci) { - const VarDecl *variable = ci->getVariable(); - if (variable == ND && ci->isNested()) { - assert (ci->isByRef() && - "SynthBlockInitExpr - captured block variable is not byref"); - isNestedCapturedVar = true; - break; - } - } - // captured nested byref variable has its address passed. Do not take - // its address again. - if (!isNestedCapturedVar) - Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, - Context->getPointerType(Exp->getType()), - VK_RValue, OK_Ordinary, SourceLocation()); - Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp); - InitExprs.push_back(Exp); - } - } - if (ImportedBlockDecls.size()) { - // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR - int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR); - unsigned IntSize = - static_cast(Context->getTypeSize(Context->IntTy)); - Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag), - Context->IntTy, SourceLocation()); - InitExprs.push_back(FlagExp); - } - NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(), - FType, VK_LValue, SourceLocation()); - NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf, - Context->getPointerType(NewRep->getType()), - VK_RValue, OK_Ordinary, SourceLocation()); - NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast, - NewRep); - BlockDeclRefs.clear(); - BlockByRefDecls.clear(); - BlockByRefDeclsPtrSet.clear(); - BlockByCopyDecls.clear(); - BlockByCopyDeclsPtrSet.clear(); - ImportedBlockDecls.clear(); - return NewRep; -} - -bool RewriteObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) { - if (const ObjCForCollectionStmt * CS = - dyn_cast(Stmts.back())) - return CS->getElement() == DS; - return false; -} - -//===----------------------------------------------------------------------===// -// Function Body / Expression rewriting -//===----------------------------------------------------------------------===// - -Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) { - if (isa(S) || isa(S) || - isa(S) || isa(S)) - Stmts.push_back(S); - else if (isa(S)) { - Stmts.push_back(S); - ObjCBcLabelNo.push_back(++BcLabelCount); - } - - // Pseudo-object operations and ivar references need special - // treatment because we're going to recursively rewrite them. - if (PseudoObjectExpr *PseudoOp = dyn_cast(S)) { - if (isa(PseudoOp->getSyntacticForm())) { - return RewritePropertyOrImplicitSetter(PseudoOp); - } else { - return RewritePropertyOrImplicitGetter(PseudoOp); - } - } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast(S)) { - return RewriteObjCIvarRefExpr(IvarRefExpr); - } - - SourceRange OrigStmtRange = S->getSourceRange(); - - // Perform a bottom up rewrite of all children. - for (Stmt::child_range CI = S->children(); CI; ++CI) - if (*CI) { - Stmt *childStmt = (*CI); - Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt); - if (newStmt) { - *CI = newStmt; - } - } - - if (BlockExpr *BE = dyn_cast(S)) { - SmallVector InnerBlockDeclRefs; - llvm::SmallPtrSet InnerContexts; - InnerContexts.insert(BE->getBlockDecl()); - ImportedLocalExternalDecls.clear(); - GetInnerBlockDeclRefExprs(BE->getBody(), - InnerBlockDeclRefs, InnerContexts); - // Rewrite the block body in place. - Stmt *SaveCurrentBody = CurrentBody; - CurrentBody = BE->getBody(); - PropParentMap = 0; - // block literal on rhs of a property-dot-sytax assignment - // must be replaced by its synthesize ast so getRewrittenText - // works as expected. In this case, what actually ends up on RHS - // is the blockTranscribed which is the helper function for the - // block literal; as in: self.c = ^() {[ace ARR];}; - bool saveDisableReplaceStmt = DisableReplaceStmt; - DisableReplaceStmt = false; - RewriteFunctionBodyOrGlobalInitializer(BE->getBody()); - DisableReplaceStmt = saveDisableReplaceStmt; - CurrentBody = SaveCurrentBody; - PropParentMap = 0; - ImportedLocalExternalDecls.clear(); - // Now we snarf the rewritten text and stash it away for later use. - std::string Str = Rewrite.getRewrittenText(BE->getSourceRange()); - RewrittenBlockExprs[BE] = Str; - - Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs); - - //blockTranscribed->dump(); - ReplaceStmt(S, blockTranscribed); - return blockTranscribed; - } - // Handle specific things. - if (ObjCEncodeExpr *AtEncode = dyn_cast(S)) - return RewriteAtEncode(AtEncode); - - if (ObjCSelectorExpr *AtSelector = dyn_cast(S)) - return RewriteAtSelector(AtSelector); - - if (ObjCStringLiteral *AtString = dyn_cast(S)) - return RewriteObjCStringLiteral(AtString); - - if (ObjCMessageExpr *MessExpr = dyn_cast(S)) { -#if 0 - // Before we rewrite it, put the original message expression in a comment. - SourceLocation startLoc = MessExpr->getLocStart(); - SourceLocation endLoc = MessExpr->getLocEnd(); - - const char *startBuf = SM->getCharacterData(startLoc); - const char *endBuf = SM->getCharacterData(endLoc); - - std::string messString; - messString += "// "; - messString.append(startBuf, endBuf-startBuf+1); - messString += "\n"; - - // FIXME: Missing definition of - // InsertText(clang::SourceLocation, char const*, unsigned int). - // InsertText(startLoc, messString.c_str(), messString.size()); - // Tried this, but it didn't work either... - // ReplaceText(startLoc, 0, messString.c_str(), messString.size()); -#endif - return RewriteMessageExpr(MessExpr); - } - - if (ObjCAtTryStmt *StmtTry = dyn_cast(S)) - return RewriteObjCTryStmt(StmtTry); - - if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast(S)) - return RewriteObjCSynchronizedStmt(StmtTry); - - if (ObjCAtThrowStmt *StmtThrow = dyn_cast(S)) - return RewriteObjCThrowStmt(StmtThrow); - - if (ObjCProtocolExpr *ProtocolExp = dyn_cast(S)) - return RewriteObjCProtocolExpr(ProtocolExp); - - if (ObjCForCollectionStmt *StmtForCollection = - dyn_cast(S)) - return RewriteObjCForCollectionStmt(StmtForCollection, - OrigStmtRange.getEnd()); - if (BreakStmt *StmtBreakStmt = - dyn_cast(S)) - return RewriteBreakStmt(StmtBreakStmt); - if (ContinueStmt *StmtContinueStmt = - dyn_cast(S)) - return RewriteContinueStmt(StmtContinueStmt); - - // Need to check for protocol refs (id

    , Foo

    *) in variable decls - // and cast exprs. - if (DeclStmt *DS = dyn_cast(S)) { - // FIXME: What we're doing here is modifying the type-specifier that - // precedes the first Decl. In the future the DeclGroup should have - // a separate type-specifier that we can rewrite. - // NOTE: We need to avoid rewriting the DeclStmt if it is within - // the context of an ObjCForCollectionStmt. For example: - // NSArray *someArray; - // for (id index in someArray) ; - // This is because RewriteObjCForCollectionStmt() does textual rewriting - // and it depends on the original text locations/positions. - if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS)) - RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin()); - - // Blocks rewrite rules. - for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end(); - DI != DE; ++DI) { - Decl *SD = *DI; - if (ValueDecl *ND = dyn_cast(SD)) { - if (isTopLevelBlockPointerType(ND->getType())) - RewriteBlockPointerDecl(ND); - else if (ND->getType()->isFunctionPointerType()) - CheckFunctionPointerDecl(ND->getType(), ND); - if (VarDecl *VD = dyn_cast(SD)) { - if (VD->hasAttr()) { - static unsigned uniqueByrefDeclCount = 0; - assert(!BlockByRefDeclNo.count(ND) && - "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl"); - BlockByRefDeclNo[ND] = uniqueByrefDeclCount++; - RewriteByRefVar(VD); - } - else - RewriteTypeOfDecl(VD); - } - } - if (TypedefNameDecl *TD = dyn_cast(SD)) { - if (isTopLevelBlockPointerType(TD->getUnderlyingType())) - RewriteBlockPointerDecl(TD); - else if (TD->getUnderlyingType()->isFunctionPointerType()) - CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); - } - } - } - - if (CStyleCastExpr *CE = dyn_cast(S)) - RewriteObjCQualifiedInterfaceTypes(CE); - - if (isa(S) || isa(S) || - isa(S) || isa(S)) { - assert(!Stmts.empty() && "Statement stack is empty"); - assert ((isa(Stmts.back()) || isa(Stmts.back()) || - isa(Stmts.back()) || isa(Stmts.back())) - && "Statement stack mismatch"); - Stmts.pop_back(); - } - // Handle blocks rewriting. - if (DeclRefExpr *DRE = dyn_cast(S)) { - ValueDecl *VD = DRE->getDecl(); - if (VD->hasAttr()) - return RewriteBlockDeclRefExpr(DRE); - if (HasLocalVariableExternalStorage(VD)) - return RewriteLocalVariableExternalStorage(DRE); - } - - if (CallExpr *CE = dyn_cast(S)) { - if (CE->getCallee()->getType()->isBlockPointerType()) { - Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee()); - ReplaceStmt(S, BlockCall); - return BlockCall; - } - } - if (CStyleCastExpr *CE = dyn_cast(S)) { - RewriteCastExpr(CE); - } -#if 0 - if (ImplicitCastExpr *ICE = dyn_cast(S)) { - CastExpr *Replacement = new (Context) CastExpr(ICE->getType(), - ICE->getSubExpr(), - SourceLocation()); - // Get the new text. - std::string SStr; - llvm::raw_string_ostream Buf(SStr); - Replacement->printPretty(Buf); - const std::string &Str = Buf.str(); - - printf("CAST = %s\n", &Str[0]); - InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size()); - delete S; - return Replacement; - } -#endif - // Return this stmt unmodified. - return S; -} - -void RewriteObjC::RewriteRecordBody(RecordDecl *RD) { - for (RecordDecl::field_iterator i = RD->field_begin(), - e = RD->field_end(); i != e; ++i) { - FieldDecl *FD = *i; - if (isTopLevelBlockPointerType(FD->getType())) - RewriteBlockPointerDecl(FD); - if (FD->getType()->isObjCQualifiedIdType() || - FD->getType()->isObjCQualifiedInterfaceType()) - RewriteObjCQualifiedInterfaceTypes(FD); - } -} - -/// HandleDeclInMainFile - This is called for each top-level decl defined in the -/// main file of the input. -void RewriteObjC::HandleDeclInMainFile(Decl *D) { - switch (D->getKind()) { - case Decl::Function: { - FunctionDecl *FD = cast(D); - if (FD->isOverloadedOperator()) - return; - - // Since function prototypes don't have ParmDecl's, we check the function - // prototype. This enables us to rewrite function declarations and - // definitions using the same code. - RewriteBlocksInFunctionProtoType(FD->getType(), FD); - - if (!FD->isThisDeclarationADefinition()) - break; - - // FIXME: If this should support Obj-C++, support CXXTryStmt - if (CompoundStmt *Body = dyn_cast_or_null(FD->getBody())) { - CurFunctionDef = FD; - CurFunctionDeclToDeclareForBlock = FD; - CurrentBody = Body; - Body = - cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); - FD->setBody(Body); - CurrentBody = 0; - if (PropParentMap) { - delete PropParentMap; - PropParentMap = 0; - } - // This synthesizes and inserts the block "impl" struct, invoke function, - // and any copy/dispose helper functions. - InsertBlockLiteralsWithinFunction(FD); - CurFunctionDef = 0; - CurFunctionDeclToDeclareForBlock = 0; - } - break; - } - case Decl::ObjCMethod: { - ObjCMethodDecl *MD = cast(D); - if (CompoundStmt *Body = MD->getCompoundBody()) { - CurMethodDef = MD; - CurrentBody = Body; - Body = - cast_or_null(RewriteFunctionBodyOrGlobalInitializer(Body)); - MD->setBody(Body); - CurrentBody = 0; - if (PropParentMap) { - delete PropParentMap; - PropParentMap = 0; - } - InsertBlockLiteralsWithinMethod(MD); - CurMethodDef = 0; - } - break; - } - case Decl::ObjCImplementation: { - ObjCImplementationDecl *CI = cast(D); - ClassImplementation.push_back(CI); - break; - } - case Decl::ObjCCategoryImpl: { - ObjCCategoryImplDecl *CI = cast(D); - CategoryImplementation.push_back(CI); - break; - } - case Decl::Var: { - VarDecl *VD = cast(D); - RewriteObjCQualifiedInterfaceTypes(VD); - if (isTopLevelBlockPointerType(VD->getType())) - RewriteBlockPointerDecl(VD); - else if (VD->getType()->isFunctionPointerType()) { - CheckFunctionPointerDecl(VD->getType(), VD); - if (VD->getInit()) { - if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { - RewriteCastExpr(CE); - } - } - } else if (VD->getType()->isRecordType()) { - RecordDecl *RD = VD->getType()->getAs()->getDecl(); - if (RD->isCompleteDefinition()) - RewriteRecordBody(RD); - } - if (VD->getInit()) { - GlobalVarDecl = VD; - CurrentBody = VD->getInit(); - RewriteFunctionBodyOrGlobalInitializer(VD->getInit()); - CurrentBody = 0; - if (PropParentMap) { - delete PropParentMap; - PropParentMap = 0; - } - SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName()); - GlobalVarDecl = 0; - - // This is needed for blocks. - if (CStyleCastExpr *CE = dyn_cast(VD->getInit())) { - RewriteCastExpr(CE); - } - } - break; - } - case Decl::TypeAlias: - case Decl::Typedef: { - if (TypedefNameDecl *TD = dyn_cast(D)) { - if (isTopLevelBlockPointerType(TD->getUnderlyingType())) - RewriteBlockPointerDecl(TD); - else if (TD->getUnderlyingType()->isFunctionPointerType()) - CheckFunctionPointerDecl(TD->getUnderlyingType(), TD); - } - break; - } - case Decl::CXXRecord: - case Decl::Record: { - RecordDecl *RD = cast(D); - if (RD->isCompleteDefinition()) - RewriteRecordBody(RD); - break; - } - default: - break; - } - // Nothing yet. -} - -void RewriteObjC::HandleTranslationUnit(ASTContext &C) { - if (Diags.hasErrorOccurred()) - return; - - RewriteInclude(); - - // Here's a great place to add any extra declarations that may be needed. - // Write out meta data for each @protocol(). - for (llvm::SmallPtrSet::iterator I = ProtocolExprDecls.begin(), - E = ProtocolExprDecls.end(); I != E; ++I) - RewriteObjCProtocolMetaData(*I, "", "", Preamble); - - InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false); - if (ClassImplementation.size() || CategoryImplementation.size()) - RewriteImplementations(); - - // Get the buffer corresponding to MainFileID. If we haven't changed it, then - // we are done. - if (const RewriteBuffer *RewriteBuf = - Rewrite.getRewriteBufferFor(MainFileID)) { - //printf("Changed:\n"); - *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end()); - } else { - llvm::errs() << "No changes\n"; - } - - if (ClassImplementation.size() || CategoryImplementation.size() || - ProtocolExprDecls.size()) { - // Rewrite Objective-c meta data* - std::string ResultStr; - RewriteMetaDataIntoBuffer(ResultStr); - // Emit metadata. - *OutFile << ResultStr; - } - OutFile->flush(); -} - -void RewriteObjCFragileABI::Initialize(ASTContext &context) { - InitializeCommon(context); - - // declaring objc_selector outside the parameter list removes a silly - // scope related warning... - if (IsHeader) - Preamble = "#pragma once\n"; - Preamble += "struct objc_selector; struct objc_class;\n"; - Preamble += "struct __rw_objc_super { struct objc_object *object; "; - Preamble += "struct objc_object *superClass; "; - if (LangOpts.MicrosoftExt) { - // Add a constructor for creating temporary objects. - Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) " - ": "; - Preamble += "object(o), superClass(s) {} "; - } - Preamble += "};\n"; - Preamble += "#ifndef _REWRITER_typedef_Protocol\n"; - Preamble += "typedef struct objc_object Protocol;\n"; - Preamble += "#define _REWRITER_typedef_Protocol\n"; - Preamble += "#endif\n"; - if (LangOpts.MicrosoftExt) { - Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n"; - Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n"; - } else - Preamble += "#define __OBJC_RW_DLLIMPORT extern\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend"; - Preamble += "(struct objc_object *, struct objc_selector *, ...);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper"; - Preamble += "(struct objc_super *, struct objc_selector *, ...);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret"; - Preamble += "(struct objc_object *, struct objc_selector *, ...);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret"; - Preamble += "(struct objc_super *, struct objc_selector *, ...);\n"; - Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret"; - Preamble += "(struct objc_object *, struct objc_selector *, ...);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass"; - Preamble += "(const char *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass"; - Preamble += "(struct objc_class *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass"; - Preamble += "(const char *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match"; - Preamble += "(struct objc_class *, struct objc_object *);\n"; - // @synchronized hooks. - Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter(struct objc_object *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit(struct objc_object *);\n"; - Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n"; - Preamble += "#ifndef __FASTENUMERATIONSTATE\n"; - Preamble += "struct __objcFastEnumerationState {\n\t"; - Preamble += "unsigned long state;\n\t"; - Preamble += "void **itemsPtr;\n\t"; - Preamble += "unsigned long *mutationsPtr;\n\t"; - Preamble += "unsigned long extra[5];\n};\n"; - Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n"; - Preamble += "#define __FASTENUMERATIONSTATE\n"; - Preamble += "#endif\n"; - Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n"; - Preamble += "struct __NSConstantStringImpl {\n"; - Preamble += " int *isa;\n"; - Preamble += " int flags;\n"; - Preamble += " char *str;\n"; - Preamble += " long length;\n"; - Preamble += "};\n"; - Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n"; - Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n"; - Preamble += "#else\n"; - Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n"; - Preamble += "#endif\n"; - Preamble += "#define __NSCONSTANTSTRINGIMPL\n"; - Preamble += "#endif\n"; - // Blocks preamble. - Preamble += "#ifndef BLOCK_IMPL\n"; - Preamble += "#define BLOCK_IMPL\n"; - Preamble += "struct __block_impl {\n"; - Preamble += " void *isa;\n"; - Preamble += " int Flags;\n"; - Preamble += " int Reserved;\n"; - Preamble += " void *FuncPtr;\n"; - Preamble += "};\n"; - Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n"; - Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n"; - Preamble += "extern \"C\" __declspec(dllexport) " - "void _Block_object_assign(void *, const void *, const int);\n"; - Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n"; - Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n"; - Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n"; - Preamble += "#else\n"; - Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n"; - Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n"; - Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n"; - Preamble += "#endif\n"; - Preamble += "#endif\n"; - if (LangOpts.MicrosoftExt) { - Preamble += "#undef __OBJC_RW_DLLIMPORT\n"; - Preamble += "#undef __OBJC_RW_STATICIMPORT\n"; - Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests. - Preamble += "#define __attribute__(X)\n"; - Preamble += "#endif\n"; - Preamble += "#define __weak\n"; - } - else { - Preamble += "#define __block\n"; - Preamble += "#define __weak\n"; - } - // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long - // as this avoids warning in any 64bit/32bit compilation model. - Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n"; -} - -/// RewriteIvarOffsetComputation - This rutine synthesizes computation of -/// ivar offset. -void RewriteObjCFragileABI::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar, - std::string &Result) { - if (ivar->isBitField()) { - // FIXME: The hack below doesn't work for bitfields. For now, we simply - // place all bitfields at offset 0. - Result += "0"; - } else { - Result += "__OFFSETOFIVAR__(struct "; - Result += ivar->getContainingInterface()->getNameAsString(); - if (LangOpts.MicrosoftExt) - Result += "_IMPL"; - Result += ", "; - Result += ivar->getNameAsString(); - Result += ")"; - } -} - -/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data. -void RewriteObjCFragileABI::RewriteObjCProtocolMetaData( - ObjCProtocolDecl *PDecl, StringRef prefix, - StringRef ClassName, std::string &Result) { - static bool objc_protocol_methods = false; - - // Output struct protocol_methods holder of method selector and type. - if (!objc_protocol_methods && PDecl->hasDefinition()) { - /* struct protocol_methods { - SEL _cmd; - char *method_types; - } - */ - Result += "\nstruct _protocol_methods {\n"; - Result += "\tstruct objc_selector *_cmd;\n"; - Result += "\tchar *method_types;\n"; - Result += "};\n"; - - objc_protocol_methods = true; - } - // Do not synthesize the protocol more than once. - if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl())) - return; - - if (ObjCProtocolDecl *Def = PDecl->getDefinition()) - PDecl = Def; - - if (PDecl->instmeth_begin() != PDecl->instmeth_end()) { - unsigned NumMethods = std::distance(PDecl->instmeth_begin(), - PDecl->instmeth_end()); - /* struct _objc_protocol_method_list { - int protocol_method_count; - struct protocol_methods protocols[]; - } - */ - Result += "\nstatic struct {\n"; - Result += "\tint protocol_method_count;\n"; - Result += "\tstruct _protocol_methods protocol_methods["; - Result += utostr(NumMethods); - Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_"; - Result += PDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= " - "{\n\t" + utostr(NumMethods) + "\n"; - - // Output instance methods declared in this protocol. - for (ObjCProtocolDecl::instmeth_iterator - I = PDecl->instmeth_begin(), E = PDecl->instmeth_end(); - I != E; ++I) { - if (I == PDecl->instmeth_begin()) - Result += "\t ,{{(struct objc_selector *)\""; - else - Result += "\t ,{(struct objc_selector *)\""; - Result += (*I)->getSelector().getAsString(); - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl((*I), MethodTypeString); - Result += "\", \""; - Result += MethodTypeString; - Result += "\"}\n"; - } - Result += "\t }\n};\n"; - } - - // Output class methods declared in this protocol. - unsigned NumMethods = std::distance(PDecl->classmeth_begin(), - PDecl->classmeth_end()); - if (NumMethods > 0) { - /* struct _objc_protocol_method_list { - int protocol_method_count; - struct protocol_methods protocols[]; - } - */ - Result += "\nstatic struct {\n"; - Result += "\tint protocol_method_count;\n"; - Result += "\tstruct _protocol_methods protocol_methods["; - Result += utostr(NumMethods); - Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_"; - Result += PDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= " - "{\n\t"; - Result += utostr(NumMethods); - Result += "\n"; - - // Output instance methods declared in this protocol. - for (ObjCProtocolDecl::classmeth_iterator - I = PDecl->classmeth_begin(), E = PDecl->classmeth_end(); - I != E; ++I) { - if (I == PDecl->classmeth_begin()) - Result += "\t ,{{(struct objc_selector *)\""; - else - Result += "\t ,{(struct objc_selector *)\""; - Result += (*I)->getSelector().getAsString(); - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl((*I), MethodTypeString); - Result += "\", \""; - Result += MethodTypeString; - Result += "\"}\n"; - } - Result += "\t }\n};\n"; - } - - // Output: - /* struct _objc_protocol { - // Objective-C 1.0 extensions - struct _objc_protocol_extension *isa; - char *protocol_name; - struct _objc_protocol **protocol_list; - struct _objc_protocol_method_list *instance_methods; - struct _objc_protocol_method_list *class_methods; - }; - */ - static bool objc_protocol = false; - if (!objc_protocol) { - Result += "\nstruct _objc_protocol {\n"; - Result += "\tstruct _objc_protocol_extension *isa;\n"; - Result += "\tchar *protocol_name;\n"; - Result += "\tstruct _objc_protocol **protocol_list;\n"; - Result += "\tstruct _objc_protocol_method_list *instance_methods;\n"; - Result += "\tstruct _objc_protocol_method_list *class_methods;\n"; - Result += "};\n"; - - objc_protocol = true; - } - - Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_"; - Result += PDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= " - "{\n\t0, \""; - Result += PDecl->getNameAsString(); - Result += "\", 0, "; - if (PDecl->instmeth_begin() != PDecl->instmeth_end()) { - Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_"; - Result += PDecl->getNameAsString(); - Result += ", "; - } - else - Result += "0, "; - if (PDecl->classmeth_begin() != PDecl->classmeth_end()) { - Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_"; - Result += PDecl->getNameAsString(); - Result += "\n"; - } - else - Result += "0\n"; - Result += "};\n"; - - // Mark this protocol as having been generated. - if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl())) - llvm_unreachable("protocol already synthesized"); - -} - -void RewriteObjCFragileABI::RewriteObjCProtocolListMetaData( - const ObjCList &Protocols, - StringRef prefix, StringRef ClassName, - std::string &Result) { - if (Protocols.empty()) return; - - for (unsigned i = 0; i != Protocols.size(); i++) - RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result); - - // Output the top lovel protocol meta-data for the class. - /* struct _objc_protocol_list { - struct _objc_protocol_list *next; - int protocol_count; - struct _objc_protocol *class_protocols[]; - } - */ - Result += "\nstatic struct {\n"; - Result += "\tstruct _objc_protocol_list *next;\n"; - Result += "\tint protocol_count;\n"; - Result += "\tstruct _objc_protocol *class_protocols["; - Result += utostr(Protocols.size()); - Result += "];\n} _OBJC_"; - Result += prefix; - Result += "_PROTOCOLS_"; - Result += ClassName; - Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= " - "{\n\t0, "; - Result += utostr(Protocols.size()); - Result += "\n"; - - Result += "\t,{&_OBJC_PROTOCOL_"; - Result += Protocols[0]->getNameAsString(); - Result += " \n"; - - for (unsigned i = 1; i != Protocols.size(); i++) { - Result += "\t ,&_OBJC_PROTOCOL_"; - Result += Protocols[i]->getNameAsString(); - Result += "\n"; - } - Result += "\t }\n};\n"; -} - -void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl, - std::string &Result) { - ObjCInterfaceDecl *CDecl = IDecl->getClassInterface(); - - // Explicitly declared @interface's are already synthesized. - if (CDecl->isImplicitInterfaceDecl()) { - // FIXME: Implementation of a class with no @interface (legacy) does not - // produce correct synthesis as yet. - RewriteObjCInternalStruct(CDecl, Result); - } - - // Build _objc_ivar_list metadata for classes ivars if needed - unsigned NumIvars = !IDecl->ivar_empty() - ? IDecl->ivar_size() - : (CDecl ? CDecl->ivar_size() : 0); - if (NumIvars > 0) { - static bool objc_ivar = false; - if (!objc_ivar) { - /* struct _objc_ivar { - char *ivar_name; - char *ivar_type; - int ivar_offset; - }; - */ - Result += "\nstruct _objc_ivar {\n"; - Result += "\tchar *ivar_name;\n"; - Result += "\tchar *ivar_type;\n"; - Result += "\tint ivar_offset;\n"; - Result += "};\n"; - - objc_ivar = true; - } - - /* struct { - int ivar_count; - struct _objc_ivar ivar_list[nIvars]; - }; - */ - Result += "\nstatic struct {\n"; - Result += "\tint ivar_count;\n"; - Result += "\tstruct _objc_ivar ivar_list["; - Result += utostr(NumIvars); - Result += "];\n} _OBJC_INSTANCE_VARIABLES_"; - Result += IDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= " - "{\n\t"; - Result += utostr(NumIvars); - Result += "\n"; - - ObjCInterfaceDecl::ivar_iterator IVI, IVE; - SmallVector IVars; - if (!IDecl->ivar_empty()) { - for (ObjCInterfaceDecl::ivar_iterator - IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end(); - IV != IVEnd; ++IV) - IVars.push_back(*IV); - IVI = IDecl->ivar_begin(); - IVE = IDecl->ivar_end(); - } else { - IVI = CDecl->ivar_begin(); - IVE = CDecl->ivar_end(); - } - Result += "\t,{{\""; - Result += IVI->getNameAsString(); - Result += "\", \""; - std::string TmpString, StrEncoding; - Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI); - QuoteDoublequotes(TmpString, StrEncoding); - Result += StrEncoding; - Result += "\", "; - RewriteIvarOffsetComputation(*IVI, Result); - Result += "}\n"; - for (++IVI; IVI != IVE; ++IVI) { - Result += "\t ,{\""; - Result += IVI->getNameAsString(); - Result += "\", \""; - std::string TmpString, StrEncoding; - Context->getObjCEncodingForType(IVI->getType(), TmpString, *IVI); - QuoteDoublequotes(TmpString, StrEncoding); - Result += StrEncoding; - Result += "\", "; - RewriteIvarOffsetComputation(*IVI, Result); - Result += "}\n"; - } - - Result += "\t }\n};\n"; - } - - // Build _objc_method_list for class's instance methods if needed - SmallVector - InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); - - // If any of our property implementations have associated getters or - // setters, produce metadata for them as well. - for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), - PropEnd = IDecl->propimpl_end(); - Prop != PropEnd; ++Prop) { - if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) - continue; - if (!Prop->getPropertyIvarDecl()) - continue; - ObjCPropertyDecl *PD = Prop->getPropertyDecl(); - if (!PD) - continue; - if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) - if (!Getter->isDefined()) - InstanceMethods.push_back(Getter); - if (PD->isReadOnly()) - continue; - if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) - if (!Setter->isDefined()) - InstanceMethods.push_back(Setter); - } - RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(), - true, "", IDecl->getName(), Result); - - // Build _objc_method_list for class's class methods if needed - RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(), - false, "", IDecl->getName(), Result); - - // Protocols referenced in class declaration? - RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), - "CLASS", CDecl->getName(), Result); - - // Declaration of class/meta-class metadata - /* struct _objc_class { - struct _objc_class *isa; // or const char *root_class_name when metadata - const char *super_class_name; - char *name; - long version; - long info; - long instance_size; - struct _objc_ivar_list *ivars; - struct _objc_method_list *methods; - struct objc_cache *cache; - struct objc_protocol_list *protocols; - const char *ivar_layout; - struct _objc_class_ext *ext; - }; - */ - static bool objc_class = false; - if (!objc_class) { - Result += "\nstruct _objc_class {\n"; - Result += "\tstruct _objc_class *isa;\n"; - Result += "\tconst char *super_class_name;\n"; - Result += "\tchar *name;\n"; - Result += "\tlong version;\n"; - Result += "\tlong info;\n"; - Result += "\tlong instance_size;\n"; - Result += "\tstruct _objc_ivar_list *ivars;\n"; - Result += "\tstruct _objc_method_list *methods;\n"; - Result += "\tstruct objc_cache *cache;\n"; - Result += "\tstruct _objc_protocol_list *protocols;\n"; - Result += "\tconst char *ivar_layout;\n"; - Result += "\tstruct _objc_class_ext *ext;\n"; - Result += "};\n"; - objc_class = true; - } - - // Meta-class metadata generation. - ObjCInterfaceDecl *RootClass = 0; - ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass(); - while (SuperClass) { - RootClass = SuperClass; - SuperClass = SuperClass->getSuperClass(); - } - SuperClass = CDecl->getSuperClass(); - - Result += "\nstatic struct _objc_class _OBJC_METACLASS_"; - Result += CDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= " - "{\n\t(struct _objc_class *)\""; - Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString()); - Result += "\""; - - if (SuperClass) { - Result += ", \""; - Result += SuperClass->getNameAsString(); - Result += "\", \""; - Result += CDecl->getNameAsString(); - Result += "\""; - } - else { - Result += ", 0, \""; - Result += CDecl->getNameAsString(); - Result += "\""; - } - // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it. - // 'info' field is initialized to CLS_META(2) for metaclass - Result += ", 0,2, sizeof(struct _objc_class), 0"; - if (IDecl->classmeth_begin() != IDecl->classmeth_end()) { - Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_"; - Result += IDecl->getNameAsString(); - Result += "\n"; - } - else - Result += ", 0\n"; - if (CDecl->protocol_begin() != CDecl->protocol_end()) { - Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_"; - Result += CDecl->getNameAsString(); - Result += ",0,0\n"; - } - else - Result += "\t,0,0,0,0\n"; - Result += "};\n"; - - // class metadata generation. - Result += "\nstatic struct _objc_class _OBJC_CLASS_"; - Result += CDecl->getNameAsString(); - Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= " - "{\n\t&_OBJC_METACLASS_"; - Result += CDecl->getNameAsString(); - if (SuperClass) { - Result += ", \""; - Result += SuperClass->getNameAsString(); - Result += "\", \""; - Result += CDecl->getNameAsString(); - Result += "\""; - } - else { - Result += ", 0, \""; - Result += CDecl->getNameAsString(); - Result += "\""; - } - // 'info' field is initialized to CLS_CLASS(1) for class - Result += ", 0,1"; - if (!ObjCSynthesizedStructs.count(CDecl)) - Result += ",0"; - else { - // class has size. Must synthesize its size. - Result += ",sizeof(struct "; - Result += CDecl->getNameAsString(); - if (LangOpts.MicrosoftExt) - Result += "_IMPL"; - Result += ")"; - } - if (NumIvars > 0) { - Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_"; - Result += CDecl->getNameAsString(); - Result += "\n\t"; - } - else - Result += ",0"; - if (IDecl->instmeth_begin() != IDecl->instmeth_end()) { - Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_"; - Result += CDecl->getNameAsString(); - Result += ", 0\n\t"; - } - else - Result += ",0,0"; - if (CDecl->protocol_begin() != CDecl->protocol_end()) { - Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_"; - Result += CDecl->getNameAsString(); - Result += ", 0,0\n"; - } - else - Result += ",0,0,0\n"; - Result += "};\n"; -} - -void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) { - int ClsDefCount = ClassImplementation.size(); - int CatDefCount = CategoryImplementation.size(); - - // For each implemented class, write out all its meta data. - for (int i = 0; i < ClsDefCount; i++) - RewriteObjCClassMetaData(ClassImplementation[i], Result); - - // For each implemented category, write out all its meta data. - for (int i = 0; i < CatDefCount; i++) - RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result); - - // Write objc_symtab metadata - /* - struct _objc_symtab - { - long sel_ref_cnt; - SEL *refs; - short cls_def_cnt; - short cat_def_cnt; - void *defs[cls_def_cnt + cat_def_cnt]; - }; - */ - - Result += "\nstruct _objc_symtab {\n"; - Result += "\tlong sel_ref_cnt;\n"; - Result += "\tSEL *refs;\n"; - Result += "\tshort cls_def_cnt;\n"; - Result += "\tshort cat_def_cnt;\n"; - Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n"; - Result += "};\n\n"; - - Result += "static struct _objc_symtab " - "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n"; - Result += "\t0, 0, " + utostr(ClsDefCount) - + ", " + utostr(CatDefCount) + "\n"; - for (int i = 0; i < ClsDefCount; i++) { - Result += "\t,&_OBJC_CLASS_"; - Result += ClassImplementation[i]->getNameAsString(); - Result += "\n"; - } - - for (int i = 0; i < CatDefCount; i++) { - Result += "\t,&_OBJC_CATEGORY_"; - Result += CategoryImplementation[i]->getClassInterface()->getNameAsString(); - Result += "_"; - Result += CategoryImplementation[i]->getNameAsString(); - Result += "\n"; - } - - Result += "};\n\n"; - - // Write objc_module metadata - - /* - struct _objc_module { - long version; - long size; - const char *name; - struct _objc_symtab *symtab; - } - */ - - Result += "\nstruct _objc_module {\n"; - Result += "\tlong version;\n"; - Result += "\tlong size;\n"; - Result += "\tconst char *name;\n"; - Result += "\tstruct _objc_symtab *symtab;\n"; - Result += "};\n\n"; - Result += "static struct _objc_module " - "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n"; - Result += "\t" + utostr(OBJC_ABI_VERSION) + - ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n"; - Result += "};\n\n"; - - if (LangOpts.MicrosoftExt) { - if (ProtocolExprDecls.size()) { - Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n"; - Result += "#pragma data_seg(push, \".objc_protocol$B\")\n"; - for (llvm::SmallPtrSet::iterator I = ProtocolExprDecls.begin(), - E = ProtocolExprDecls.end(); I != E; ++I) { - Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_"; - Result += (*I)->getNameAsString(); - Result += " = &_OBJC_PROTOCOL_"; - Result += (*I)->getNameAsString(); - Result += ";\n"; - } - Result += "#pragma data_seg(pop)\n\n"; - } - Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n"; - Result += "#pragma data_seg(push, \".objc_module_info$B\")\n"; - Result += "static struct _objc_module *_POINTER_OBJC_MODULES = "; - Result += "&_OBJC_MODULES;\n"; - Result += "#pragma data_seg(pop)\n\n"; - } -} - -/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category -/// implementation. -void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl, - std::string &Result) { - ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface(); - // Find category declaration for this implementation. - ObjCCategoryDecl *CDecl; - for (CDecl = ClassDecl->getCategoryList(); CDecl; - CDecl = CDecl->getNextClassCategory()) - if (CDecl->getIdentifier() == IDecl->getIdentifier()) - break; - - std::string FullCategoryName = ClassDecl->getNameAsString(); - FullCategoryName += '_'; - FullCategoryName += IDecl->getNameAsString(); - - // Build _objc_method_list for class's instance methods if needed - SmallVector - InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end()); - - // If any of our property implementations have associated getters or - // setters, produce metadata for them as well. - for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(), - PropEnd = IDecl->propimpl_end(); - Prop != PropEnd; ++Prop) { - if (Prop->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) - continue; - if (!Prop->getPropertyIvarDecl()) - continue; - ObjCPropertyDecl *PD = Prop->getPropertyDecl(); - if (!PD) - continue; - if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl()) - InstanceMethods.push_back(Getter); - if (PD->isReadOnly()) - continue; - if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl()) - InstanceMethods.push_back(Setter); - } - RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(), - true, "CATEGORY_", FullCategoryName.c_str(), - Result); - - // Build _objc_method_list for class's class methods if needed - RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(), - false, "CATEGORY_", FullCategoryName.c_str(), - Result); - - // Protocols referenced in class declaration? - // Null CDecl is case of a category implementation with no category interface - if (CDecl) - RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY", - FullCategoryName, Result); - /* struct _objc_category { - char *category_name; - char *class_name; - struct _objc_method_list *instance_methods; - struct _objc_method_list *class_methods; - struct _objc_protocol_list *protocols; - // Objective-C 1.0 extensions - uint32_t size; // sizeof (struct _objc_category) - struct _objc_property_list *instance_properties; // category's own - // @property decl. - }; - */ - - static bool objc_category = false; - if (!objc_category) { - Result += "\nstruct _objc_category {\n"; - Result += "\tchar *category_name;\n"; - Result += "\tchar *class_name;\n"; - Result += "\tstruct _objc_method_list *instance_methods;\n"; - Result += "\tstruct _objc_method_list *class_methods;\n"; - Result += "\tstruct _objc_protocol_list *protocols;\n"; - Result += "\tunsigned int size;\n"; - Result += "\tstruct _objc_property_list *instance_properties;\n"; - Result += "};\n"; - objc_category = true; - } - Result += "\nstatic struct _objc_category _OBJC_CATEGORY_"; - Result += FullCategoryName; - Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\""; - Result += IDecl->getNameAsString(); - Result += "\"\n\t, \""; - Result += ClassDecl->getNameAsString(); - Result += "\"\n"; - - if (IDecl->instmeth_begin() != IDecl->instmeth_end()) { - Result += "\t, (struct _objc_method_list *)" - "&_OBJC_CATEGORY_INSTANCE_METHODS_"; - Result += FullCategoryName; - Result += "\n"; - } - else - Result += "\t, 0\n"; - if (IDecl->classmeth_begin() != IDecl->classmeth_end()) { - Result += "\t, (struct _objc_method_list *)" - "&_OBJC_CATEGORY_CLASS_METHODS_"; - Result += FullCategoryName; - Result += "\n"; - } - else - Result += "\t, 0\n"; - - if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) { - Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_"; - Result += FullCategoryName; - Result += "\n"; - } - else - Result += "\t, 0\n"; - Result += "\t, sizeof(struct _objc_category), 0\n};\n"; -} - -// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or -/// class methods. -template -void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegin, - MethodIterator MethodEnd, - bool IsInstanceMethod, - StringRef prefix, - StringRef ClassName, - std::string &Result) { - if (MethodBegin == MethodEnd) return; - - if (!objc_impl_method) { - /* struct _objc_method { - SEL _cmd; - char *method_types; - void *_imp; - } - */ - Result += "\nstruct _objc_method {\n"; - Result += "\tSEL _cmd;\n"; - Result += "\tchar *method_types;\n"; - Result += "\tvoid *_imp;\n"; - Result += "};\n"; - - objc_impl_method = true; - } - - // Build _objc_method_list for class's methods if needed - - /* struct { - struct _objc_method_list *next_method; - int method_count; - struct _objc_method method_list[]; - } - */ - unsigned NumMethods = std::distance(MethodBegin, MethodEnd); - Result += "\nstatic struct {\n"; - Result += "\tstruct _objc_method_list *next_method;\n"; - Result += "\tint method_count;\n"; - Result += "\tstruct _objc_method method_list["; - Result += utostr(NumMethods); - Result += "];\n} _OBJC_"; - Result += prefix; - Result += IsInstanceMethod ? "INSTANCE" : "CLASS"; - Result += "_METHODS_"; - Result += ClassName; - Result += " __attribute__ ((used, section (\"__OBJC, __"; - Result += IsInstanceMethod ? "inst" : "cls"; - Result += "_meth\")))= "; - Result += "{\n\t0, " + utostr(NumMethods) + "\n"; - - Result += "\t,{{(SEL)\""; - Result += (*MethodBegin)->getSelector().getAsString().c_str(); - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); - Result += "\", \""; - Result += MethodTypeString; - Result += "\", (void *)"; - Result += MethodInternalNames[*MethodBegin]; - Result += "}\n"; - for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) { - Result += "\t ,{(SEL)\""; - Result += (*MethodBegin)->getSelector().getAsString().c_str(); - std::string MethodTypeString; - Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString); - Result += "\", \""; - Result += MethodTypeString; - Result += "\", (void *)"; - Result += MethodInternalNames[*MethodBegin]; - Result += "}\n"; - } - Result += "\t }\n};\n"; -} - -Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) { - SourceRange OldRange = IV->getSourceRange(); - Expr *BaseExpr = IV->getBase(); - - // Rewrite the base, but without actually doing replaces. - { - DisableReplaceStmtScope S(*this); - BaseExpr = cast(RewriteFunctionBodyOrGlobalInitializer(BaseExpr)); - IV->setBase(BaseExpr); - } - - ObjCIvarDecl *D = IV->getDecl(); - - Expr *Replacement = IV; - if (CurMethodDef) { - if (BaseExpr->getType()->isObjCObjectPointerType()) { - const ObjCInterfaceType *iFaceDecl = - dyn_cast(BaseExpr->getType()->getPointeeType()); - assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null"); - // lookup which class implements the instance variable. - ObjCInterfaceDecl *clsDeclared = 0; - iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(), - clsDeclared); - assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class"); - - // Synthesize an explicit cast to gain access to the ivar. - std::string RecName = clsDeclared->getIdentifier()->getName(); - RecName += "_IMPL"; - IdentifierInfo *II = &Context->Idents.get(RecName); - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - II); - assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl"); - QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT, - CK_BitCast, - IV->getBase()); - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(OldRange.getBegin(), - OldRange.getEnd(), - castExpr); - if (IV->isFreeIvar() && - declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) { - MemberExpr *ME = new (Context) MemberExpr(PE, true, D, - IV->getLocation(), - D->getType(), - VK_LValue, OK_Ordinary); - Replacement = ME; - } else { - IV->setBase(PE); - } - } - } else { // we are outside a method. - assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method"); - - // Explicit ivar refs need to have a cast inserted. - // FIXME: consider sharing some of this code with the code above. - if (BaseExpr->getType()->isObjCObjectPointerType()) { - const ObjCInterfaceType *iFaceDecl = - dyn_cast(BaseExpr->getType()->getPointeeType()); - // lookup which class implements the instance variable. - ObjCInterfaceDecl *clsDeclared = 0; - iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(), - clsDeclared); - assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class"); - - // Synthesize an explicit cast to gain access to the ivar. - std::string RecName = clsDeclared->getIdentifier()->getName(); - RecName += "_IMPL"; - IdentifierInfo *II = &Context->Idents.get(RecName); - RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl, - SourceLocation(), SourceLocation(), - II); - assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl"); - QualType castT = Context->getPointerType(Context->getTagDeclType(RD)); - CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT, - CK_BitCast, - IV->getBase()); - // Don't forget the parens to enforce the proper binding. - ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(), - IV->getBase()->getLocEnd(), castExpr); - // Cannot delete IV->getBase(), since PE points to it. - // Replace the old base with the cast. This is important when doing - // embedded rewrites. For example, [newInv->_container addObject:0]. - IV->setBase(PE); - } - } - - ReplaceStmtWithRange(IV, Replacement, OldRange); - return Replacement; -} diff --git a/lib/Rewrite/RewriteRope.cpp b/lib/Rewrite/RewriteRope.cpp deleted file mode 100644 index cc8de1b..0000000 --- a/lib/Rewrite/RewriteRope.cpp +++ /dev/null @@ -1,811 +0,0 @@ -//===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the RewriteRope class, which is a powerful string. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/RewriteRope.h" -#include "clang/Basic/LLVM.h" -#include -using namespace clang; - -/// RewriteRope is a "strong" string class, designed to make insertions and -/// deletions in the middle of the string nearly constant time (really, they are -/// O(log N), but with a very low constant factor). -/// -/// The implementation of this datastructure is a conceptual linear sequence of -/// RopePiece elements. Each RopePiece represents a view on a separately -/// allocated and reference counted string. This means that splitting a very -/// long string can be done in constant time by splitting a RopePiece that -/// references the whole string into two rope pieces that reference each half. -/// Once split, another string can be inserted in between the two halves by -/// inserting a RopePiece in between the two others. All of this is very -/// inexpensive: it takes time proportional to the number of RopePieces, not the -/// length of the strings they represent. -/// -/// While a linear sequences of RopePieces is the conceptual model, the actual -/// implementation captures them in an adapted B+ Tree. Using a B+ tree (which -/// is a tree that keeps the values in the leaves and has where each node -/// contains a reasonable number of pointers to children/values) allows us to -/// maintain efficient operation when the RewriteRope contains a *huge* number -/// of RopePieces. The basic idea of the B+ Tree is that it allows us to find -/// the RopePiece corresponding to some offset very efficiently, and it -/// automatically balances itself on insertions of RopePieces (which can happen -/// for both insertions and erases of string ranges). -/// -/// The one wrinkle on the theory is that we don't attempt to keep the tree -/// properly balanced when erases happen. Erases of string data can both insert -/// new RopePieces (e.g. when the middle of some other rope piece is deleted, -/// which results in two rope pieces, which is just like an insert) or it can -/// reduce the number of RopePieces maintained by the B+Tree. In the case when -/// the number of RopePieces is reduced, we don't attempt to maintain the -/// standard 'invariant' that each node in the tree contains at least -/// 'WidthFactor' children/values. For our use cases, this doesn't seem to -/// matter. -/// -/// The implementation below is primarily implemented in terms of three classes: -/// RopePieceBTreeNode - Common base class for: -/// -/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece -/// nodes. This directly represents a chunk of the string with those -/// RopePieces contatenated. -/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages -/// up to '2*WidthFactor' other nodes in the tree. - - -//===----------------------------------------------------------------------===// -// RopePieceBTreeNode Class -//===----------------------------------------------------------------------===// - -namespace { - /// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and - /// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods - /// and a flag that determines which subclass the instance is. Also - /// important, this node knows the full extend of the node, including any - /// children that it has. This allows efficient skipping over entire subtrees - /// when looking for an offset in the BTree. - class RopePieceBTreeNode { - protected: - /// WidthFactor - This controls the number of K/V slots held in the BTree: - /// how wide it is. Each level of the BTree is guaranteed to have at least - /// 'WidthFactor' elements in it (either ropepieces or children), (except - /// the root, which may have less) and may have at most 2*WidthFactor - /// elements. - enum { WidthFactor = 8 }; - - /// Size - This is the number of bytes of file this node (including any - /// potential children) covers. - unsigned Size; - - /// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it - /// is an instance of RopePieceBTreeInterior. - bool IsLeaf; - - RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {} - ~RopePieceBTreeNode() {} - public: - - bool isLeaf() const { return IsLeaf; } - unsigned size() const { return Size; } - - void Destroy(); - - /// split - Split the range containing the specified offset so that we are - /// guaranteed that there is a place to do an insertion at the specified - /// offset. The offset is relative, so "0" is the start of the node. - /// - /// If there is no space in this subtree for the extra piece, the extra tree - /// node is returned and must be inserted into a parent. - RopePieceBTreeNode *split(unsigned Offset); - - /// insert - Insert the specified ropepiece into this tree node at the - /// specified offset. The offset is relative, so "0" is the start of the - /// node. - /// - /// If there is no space in this subtree for the extra piece, the extra tree - /// node is returned and must be inserted into a parent. - RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R); - - /// erase - Remove NumBytes from this node at the specified offset. We are - /// guaranteed that there is a split at Offset. - void erase(unsigned Offset, unsigned NumBytes); - - //static inline bool classof(const RopePieceBTreeNode *) { return true; } - - }; -} // end anonymous namespace - -//===----------------------------------------------------------------------===// -// RopePieceBTreeLeaf Class -//===----------------------------------------------------------------------===// - -namespace { - /// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece - /// nodes. This directly represents a chunk of the string with those - /// RopePieces contatenated. Since this is a B+Tree, all values (in this case - /// instances of RopePiece) are stored in leaves like this. To make iteration - /// over the leaves efficient, they maintain a singly linked list through the - /// NextLeaf field. This allows the B+Tree forward iterator to be constant - /// time for all increments. - class RopePieceBTreeLeaf : public RopePieceBTreeNode { - /// NumPieces - This holds the number of rope pieces currently active in the - /// Pieces array. - unsigned char NumPieces; - - /// Pieces - This tracks the file chunks currently in this leaf. - /// - RopePiece Pieces[2*WidthFactor]; - - /// NextLeaf - This is a pointer to the next leaf in the tree, allowing - /// efficient in-order forward iteration of the tree without traversal. - RopePieceBTreeLeaf **PrevLeaf, *NextLeaf; - public: - RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0), - PrevLeaf(0), NextLeaf(0) {} - ~RopePieceBTreeLeaf() { - if (PrevLeaf || NextLeaf) - removeFromLeafInOrder(); - clear(); - } - - bool isFull() const { return NumPieces == 2*WidthFactor; } - - /// clear - Remove all rope pieces from this leaf. - void clear() { - while (NumPieces) - Pieces[--NumPieces] = RopePiece(); - Size = 0; - } - - unsigned getNumPieces() const { return NumPieces; } - - const RopePiece &getPiece(unsigned i) const { - assert(i < getNumPieces() && "Invalid piece ID"); - return Pieces[i]; - } - - const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; } - void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) { - assert(PrevLeaf == 0 && NextLeaf == 0 && "Already in ordering"); - - NextLeaf = Node->NextLeaf; - if (NextLeaf) - NextLeaf->PrevLeaf = &NextLeaf; - PrevLeaf = &Node->NextLeaf; - Node->NextLeaf = this; - } - - void removeFromLeafInOrder() { - if (PrevLeaf) { - *PrevLeaf = NextLeaf; - if (NextLeaf) - NextLeaf->PrevLeaf = PrevLeaf; - } else if (NextLeaf) { - NextLeaf->PrevLeaf = 0; - } - } - - /// FullRecomputeSizeLocally - This method recomputes the 'Size' field by - /// summing the size of all RopePieces. - void FullRecomputeSizeLocally() { - Size = 0; - for (unsigned i = 0, e = getNumPieces(); i != e; ++i) - Size += getPiece(i).size(); - } - - /// split - Split the range containing the specified offset so that we are - /// guaranteed that there is a place to do an insertion at the specified - /// offset. The offset is relative, so "0" is the start of the node. - /// - /// If there is no space in this subtree for the extra piece, the extra tree - /// node is returned and must be inserted into a parent. - RopePieceBTreeNode *split(unsigned Offset); - - /// insert - Insert the specified ropepiece into this tree node at the - /// specified offset. The offset is relative, so "0" is the start of the - /// node. - /// - /// If there is no space in this subtree for the extra piece, the extra tree - /// node is returned and must be inserted into a parent. - RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R); - - - /// erase - Remove NumBytes from this node at the specified offset. We are - /// guaranteed that there is a split at Offset. - void erase(unsigned Offset, unsigned NumBytes); - - //static inline bool classof(const RopePieceBTreeLeaf *) { return true; } - static inline bool classof(const RopePieceBTreeNode *N) { - return N->isLeaf(); - } - }; -} // end anonymous namespace - -/// split - Split the range containing the specified offset so that we are -/// guaranteed that there is a place to do an insertion at the specified -/// offset. The offset is relative, so "0" is the start of the node. -/// -/// If there is no space in this subtree for the extra piece, the extra tree -/// node is returned and must be inserted into a parent. -RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) { - // Find the insertion point. We are guaranteed that there is a split at the - // specified offset so find it. - if (Offset == 0 || Offset == size()) { - // Fastpath for a common case. There is already a splitpoint at the end. - return 0; - } - - // Find the piece that this offset lands in. - unsigned PieceOffs = 0; - unsigned i = 0; - while (Offset >= PieceOffs+Pieces[i].size()) { - PieceOffs += Pieces[i].size(); - ++i; - } - - // If there is already a split point at the specified offset, just return - // success. - if (PieceOffs == Offset) - return 0; - - // Otherwise, we need to split piece 'i' at Offset-PieceOffs. Convert Offset - // to being Piece relative. - unsigned IntraPieceOffset = Offset-PieceOffs; - - // We do this by shrinking the RopePiece and then doing an insert of the tail. - RopePiece Tail(Pieces[i].StrData, Pieces[i].StartOffs+IntraPieceOffset, - Pieces[i].EndOffs); - Size -= Pieces[i].size(); - Pieces[i].EndOffs = Pieces[i].StartOffs+IntraPieceOffset; - Size += Pieces[i].size(); - - return insert(Offset, Tail); -} - - -/// insert - Insert the specified RopePiece into this tree node at the -/// specified offset. The offset is relative, so "0" is the start of the node. -/// -/// If there is no space in this subtree for the extra piece, the extra tree -/// node is returned and must be inserted into a parent. -RopePieceBTreeNode *RopePieceBTreeLeaf::insert(unsigned Offset, - const RopePiece &R) { - // If this node is not full, insert the piece. - if (!isFull()) { - // Find the insertion point. We are guaranteed that there is a split at the - // specified offset so find it. - unsigned i = 0, e = getNumPieces(); - if (Offset == size()) { - // Fastpath for a common case. - i = e; - } else { - unsigned SlotOffs = 0; - for (; Offset > SlotOffs; ++i) - SlotOffs += getPiece(i).size(); - assert(SlotOffs == Offset && "Split didn't occur before insertion!"); - } - - // For an insertion into a non-full leaf node, just insert the value in - // its sorted position. This requires moving later values over. - for (; i != e; --e) - Pieces[e] = Pieces[e-1]; - Pieces[i] = R; - ++NumPieces; - Size += R.size(); - return 0; - } - - // Otherwise, if this is leaf is full, split it in two halves. Since this - // node is full, it contains 2*WidthFactor values. We move the first - // 'WidthFactor' values to the LHS child (which we leave in this node) and - // move the last 'WidthFactor' values into the RHS child. - - // Create the new node. - RopePieceBTreeLeaf *NewNode = new RopePieceBTreeLeaf(); - - // Move over the last 'WidthFactor' values from here to NewNode. - std::copy(&Pieces[WidthFactor], &Pieces[2*WidthFactor], - &NewNode->Pieces[0]); - // Replace old pieces with null RopePieces to drop refcounts. - std::fill(&Pieces[WidthFactor], &Pieces[2*WidthFactor], RopePiece()); - - // Decrease the number of values in the two nodes. - NewNode->NumPieces = NumPieces = WidthFactor; - - // Recompute the two nodes' size. - NewNode->FullRecomputeSizeLocally(); - FullRecomputeSizeLocally(); - - // Update the list of leaves. - NewNode->insertAfterLeafInOrder(this); - - // These insertions can't fail. - if (this->size() >= Offset) - this->insert(Offset, R); - else - NewNode->insert(Offset - this->size(), R); - return NewNode; -} - -/// erase - Remove NumBytes from this node at the specified offset. We are -/// guaranteed that there is a split at Offset. -void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) { - // Since we are guaranteed that there is a split at Offset, we start by - // finding the Piece that starts there. - unsigned PieceOffs = 0; - unsigned i = 0; - for (; Offset > PieceOffs; ++i) - PieceOffs += getPiece(i).size(); - assert(PieceOffs == Offset && "Split didn't occur before erase!"); - - unsigned StartPiece = i; - - // Figure out how many pieces completely cover 'NumBytes'. We want to remove - // all of them. - for (; Offset+NumBytes > PieceOffs+getPiece(i).size(); ++i) - PieceOffs += getPiece(i).size(); - - // If we exactly include the last one, include it in the region to delete. - if (Offset+NumBytes == PieceOffs+getPiece(i).size()) - PieceOffs += getPiece(i).size(), ++i; - - // If we completely cover some RopePieces, erase them now. - if (i != StartPiece) { - unsigned NumDeleted = i-StartPiece; - for (; i != getNumPieces(); ++i) - Pieces[i-NumDeleted] = Pieces[i]; - - // Drop references to dead rope pieces. - std::fill(&Pieces[getNumPieces()-NumDeleted], &Pieces[getNumPieces()], - RopePiece()); - NumPieces -= NumDeleted; - - unsigned CoverBytes = PieceOffs-Offset; - NumBytes -= CoverBytes; - Size -= CoverBytes; - } - - // If we completely removed some stuff, we could be done. - if (NumBytes == 0) return; - - // Okay, now might be erasing part of some Piece. If this is the case, then - // move the start point of the piece. - assert(getPiece(StartPiece).size() > NumBytes); - Pieces[StartPiece].StartOffs += NumBytes; - - // The size of this node just shrunk by NumBytes. - Size -= NumBytes; -} - -//===----------------------------------------------------------------------===// -// RopePieceBTreeInterior Class -//===----------------------------------------------------------------------===// - -namespace { - /// RopePieceBTreeInterior - This represents an interior node in the B+Tree, - /// which holds up to 2*WidthFactor pointers to child nodes. - class RopePieceBTreeInterior : public RopePieceBTreeNode { - /// NumChildren - This holds the number of children currently active in the - /// Children array. - unsigned char NumChildren; - RopePieceBTreeNode *Children[2*WidthFactor]; - public: - RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {} - - RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS) - : RopePieceBTreeNode(false) { - Children[0] = LHS; - Children[1] = RHS; - NumChildren = 2; - Size = LHS->size() + RHS->size(); - } - - ~RopePieceBTreeInterior() { - for (unsigned i = 0, e = getNumChildren(); i != e; ++i) - Children[i]->Destroy(); - } - - bool isFull() const { return NumChildren == 2*WidthFactor; } - - unsigned getNumChildren() const { return NumChildren; } - const RopePieceBTreeNode *getChild(unsigned i) const { - assert(i < NumChildren && "invalid child #"); - return Children[i]; - } - RopePieceBTreeNode *getChild(unsigned i) { - assert(i < NumChildren && "invalid child #"); - return Children[i]; - } - - /// FullRecomputeSizeLocally - Recompute the Size field of this node by - /// summing up the sizes of the child nodes. - void FullRecomputeSizeLocally() { - Size = 0; - for (unsigned i = 0, e = getNumChildren(); i != e; ++i) - Size += getChild(i)->size(); - } - - - /// split - Split the range containing the specified offset so that we are - /// guaranteed that there is a place to do an insertion at the specified - /// offset. The offset is relative, so "0" is the start of the node. - /// - /// If there is no space in this subtree for the extra piece, the extra tree - /// node is returned and must be inserted into a parent. - RopePieceBTreeNode *split(unsigned Offset); - - - /// insert - Insert the specified ropepiece into this tree node at the - /// specified offset. The offset is relative, so "0" is the start of the - /// node. - /// - /// If there is no space in this subtree for the extra piece, the extra tree - /// node is returned and must be inserted into a parent. - RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R); - - /// HandleChildPiece - A child propagated an insertion result up to us. - /// Insert the new child, and/or propagate the result further up the tree. - RopePieceBTreeNode *HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS); - - /// erase - Remove NumBytes from this node at the specified offset. We are - /// guaranteed that there is a split at Offset. - void erase(unsigned Offset, unsigned NumBytes); - - //static inline bool classof(const RopePieceBTreeInterior *) { return true; } - static inline bool classof(const RopePieceBTreeNode *N) { - return !N->isLeaf(); - } - }; -} // end anonymous namespace - -/// split - Split the range containing the specified offset so that we are -/// guaranteed that there is a place to do an insertion at the specified -/// offset. The offset is relative, so "0" is the start of the node. -/// -/// If there is no space in this subtree for the extra piece, the extra tree -/// node is returned and must be inserted into a parent. -RopePieceBTreeNode *RopePieceBTreeInterior::split(unsigned Offset) { - // Figure out which child to split. - if (Offset == 0 || Offset == size()) - return 0; // If we have an exact offset, we're already split. - - unsigned ChildOffset = 0; - unsigned i = 0; - for (; Offset >= ChildOffset+getChild(i)->size(); ++i) - ChildOffset += getChild(i)->size(); - - // If already split there, we're done. - if (ChildOffset == Offset) - return 0; - - // Otherwise, recursively split the child. - if (RopePieceBTreeNode *RHS = getChild(i)->split(Offset-ChildOffset)) - return HandleChildPiece(i, RHS); - return 0; // Done! -} - -/// insert - Insert the specified ropepiece into this tree node at the -/// specified offset. The offset is relative, so "0" is the start of the -/// node. -/// -/// If there is no space in this subtree for the extra piece, the extra tree -/// node is returned and must be inserted into a parent. -RopePieceBTreeNode *RopePieceBTreeInterior::insert(unsigned Offset, - const RopePiece &R) { - // Find the insertion point. We are guaranteed that there is a split at the - // specified offset so find it. - unsigned i = 0, e = getNumChildren(); - - unsigned ChildOffs = 0; - if (Offset == size()) { - // Fastpath for a common case. Insert at end of last child. - i = e-1; - ChildOffs = size()-getChild(i)->size(); - } else { - for (; Offset > ChildOffs+getChild(i)->size(); ++i) - ChildOffs += getChild(i)->size(); - } - - Size += R.size(); - - // Insert at the end of this child. - if (RopePieceBTreeNode *RHS = getChild(i)->insert(Offset-ChildOffs, R)) - return HandleChildPiece(i, RHS); - - return 0; -} - -/// HandleChildPiece - A child propagated an insertion result up to us. -/// Insert the new child, and/or propagate the result further up the tree. -RopePieceBTreeNode * -RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) { - // Otherwise the child propagated a subtree up to us as a new child. See if - // we have space for it here. - if (!isFull()) { - // Insert RHS after child 'i'. - if (i + 1 != getNumChildren()) - memmove(&Children[i+2], &Children[i+1], - (getNumChildren()-i-1)*sizeof(Children[0])); - Children[i+1] = RHS; - ++NumChildren; - return 0; - } - - // Okay, this node is full. Split it in half, moving WidthFactor children to - // a newly allocated interior node. - - // Create the new node. - RopePieceBTreeInterior *NewNode = new RopePieceBTreeInterior(); - - // Move over the last 'WidthFactor' values from here to NewNode. - memcpy(&NewNode->Children[0], &Children[WidthFactor], - WidthFactor*sizeof(Children[0])); - - // Decrease the number of values in the two nodes. - NewNode->NumChildren = NumChildren = WidthFactor; - - // Finally, insert the two new children in the side the can (now) hold them. - // These insertions can't fail. - if (i < WidthFactor) - this->HandleChildPiece(i, RHS); - else - NewNode->HandleChildPiece(i-WidthFactor, RHS); - - // Recompute the two nodes' size. - NewNode->FullRecomputeSizeLocally(); - FullRecomputeSizeLocally(); - return NewNode; -} - -/// erase - Remove NumBytes from this node at the specified offset. We are -/// guaranteed that there is a split at Offset. -void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) { - // This will shrink this node by NumBytes. - Size -= NumBytes; - - // Find the first child that overlaps with Offset. - unsigned i = 0; - for (; Offset >= getChild(i)->size(); ++i) - Offset -= getChild(i)->size(); - - // Propagate the delete request into overlapping children, or completely - // delete the children as appropriate. - while (NumBytes) { - RopePieceBTreeNode *CurChild = getChild(i); - - // If we are deleting something contained entirely in the child, pass on the - // request. - if (Offset+NumBytes < CurChild->size()) { - CurChild->erase(Offset, NumBytes); - return; - } - - // If this deletion request starts somewhere in the middle of the child, it - // must be deleting to the end of the child. - if (Offset) { - unsigned BytesFromChild = CurChild->size()-Offset; - CurChild->erase(Offset, BytesFromChild); - NumBytes -= BytesFromChild; - // Start at the beginning of the next child. - Offset = 0; - ++i; - continue; - } - - // If the deletion request completely covers the child, delete it and move - // the rest down. - NumBytes -= CurChild->size(); - CurChild->Destroy(); - --NumChildren; - if (i != getNumChildren()) - memmove(&Children[i], &Children[i+1], - (getNumChildren()-i)*sizeof(Children[0])); - } -} - -//===----------------------------------------------------------------------===// -// RopePieceBTreeNode Implementation -//===----------------------------------------------------------------------===// - -void RopePieceBTreeNode::Destroy() { - if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) - delete Leaf; - else - delete cast(this); -} - -/// split - Split the range containing the specified offset so that we are -/// guaranteed that there is a place to do an insertion at the specified -/// offset. The offset is relative, so "0" is the start of the node. -/// -/// If there is no space in this subtree for the extra piece, the extra tree -/// node is returned and must be inserted into a parent. -RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) { - assert(Offset <= size() && "Invalid offset to split!"); - if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) - return Leaf->split(Offset); - return cast(this)->split(Offset); -} - -/// insert - Insert the specified ropepiece into this tree node at the -/// specified offset. The offset is relative, so "0" is the start of the -/// node. -/// -/// If there is no space in this subtree for the extra piece, the extra tree -/// node is returned and must be inserted into a parent. -RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset, - const RopePiece &R) { - assert(Offset <= size() && "Invalid offset to insert!"); - if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) - return Leaf->insert(Offset, R); - return cast(this)->insert(Offset, R); -} - -/// erase - Remove NumBytes from this node at the specified offset. We are -/// guaranteed that there is a split at Offset. -void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) { - assert(Offset+NumBytes <= size() && "Invalid offset to erase!"); - if (RopePieceBTreeLeaf *Leaf = dyn_cast(this)) - return Leaf->erase(Offset, NumBytes); - return cast(this)->erase(Offset, NumBytes); -} - - -//===----------------------------------------------------------------------===// -// RopePieceBTreeIterator Implementation -//===----------------------------------------------------------------------===// - -static const RopePieceBTreeLeaf *getCN(const void *P) { - return static_cast(P); -} - -// begin iterator. -RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) { - const RopePieceBTreeNode *N = static_cast(n); - - // Walk down the left side of the tree until we get to a leaf. - while (const RopePieceBTreeInterior *IN = dyn_cast(N)) - N = IN->getChild(0); - - // We must have at least one leaf. - CurNode = cast(N); - - // If we found a leaf that happens to be empty, skip over it until we get - // to something full. - while (CurNode && getCN(CurNode)->getNumPieces() == 0) - CurNode = getCN(CurNode)->getNextLeafInOrder(); - - if (CurNode != 0) - CurPiece = &getCN(CurNode)->getPiece(0); - else // Empty tree, this is an end() iterator. - CurPiece = 0; - CurChar = 0; -} - -void RopePieceBTreeIterator::MoveToNextPiece() { - if (CurPiece != &getCN(CurNode)->getPiece(getCN(CurNode)->getNumPieces()-1)) { - CurChar = 0; - ++CurPiece; - return; - } - - // Find the next non-empty leaf node. - do - CurNode = getCN(CurNode)->getNextLeafInOrder(); - while (CurNode && getCN(CurNode)->getNumPieces() == 0); - - if (CurNode != 0) - CurPiece = &getCN(CurNode)->getPiece(0); - else // Hit end(). - CurPiece = 0; - CurChar = 0; -} - -//===----------------------------------------------------------------------===// -// RopePieceBTree Implementation -//===----------------------------------------------------------------------===// - -static RopePieceBTreeNode *getRoot(void *P) { - return static_cast(P); -} - -RopePieceBTree::RopePieceBTree() { - Root = new RopePieceBTreeLeaf(); -} -RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) { - assert(RHS.empty() && "Can't copy non-empty tree yet"); - Root = new RopePieceBTreeLeaf(); -} -RopePieceBTree::~RopePieceBTree() { - getRoot(Root)->Destroy(); -} - -unsigned RopePieceBTree::size() const { - return getRoot(Root)->size(); -} - -void RopePieceBTree::clear() { - if (RopePieceBTreeLeaf *Leaf = dyn_cast(getRoot(Root))) - Leaf->clear(); - else { - getRoot(Root)->Destroy(); - Root = new RopePieceBTreeLeaf(); - } -} - -void RopePieceBTree::insert(unsigned Offset, const RopePiece &R) { - // #1. Split at Offset. - if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset)) - Root = new RopePieceBTreeInterior(getRoot(Root), RHS); - - // #2. Do the insertion. - if (RopePieceBTreeNode *RHS = getRoot(Root)->insert(Offset, R)) - Root = new RopePieceBTreeInterior(getRoot(Root), RHS); -} - -void RopePieceBTree::erase(unsigned Offset, unsigned NumBytes) { - // #1. Split at Offset. - if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset)) - Root = new RopePieceBTreeInterior(getRoot(Root), RHS); - - // #2. Do the erasing. - getRoot(Root)->erase(Offset, NumBytes); -} - -//===----------------------------------------------------------------------===// -// RewriteRope Implementation -//===----------------------------------------------------------------------===// - -/// MakeRopeString - This copies the specified byte range into some instance of -/// RopeRefCountString, and return a RopePiece that represents it. This uses -/// the AllocBuffer object to aggregate requests for small strings into one -/// allocation instead of doing tons of tiny allocations. -RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) { - unsigned Len = End-Start; - assert(Len && "Zero length RopePiece is invalid!"); - - // If we have space for this string in the current alloc buffer, use it. - if (AllocOffs+Len <= AllocChunkSize) { - memcpy(AllocBuffer->Data+AllocOffs, Start, Len); - AllocOffs += Len; - return RopePiece(AllocBuffer, AllocOffs-Len, AllocOffs); - } - - // If we don't have enough room because this specific allocation is huge, - // just allocate a new rope piece for it alone. - if (Len > AllocChunkSize) { - unsigned Size = End-Start+sizeof(RopeRefCountString)-1; - RopeRefCountString *Res = - reinterpret_cast(new char[Size]); - Res->RefCount = 0; - memcpy(Res->Data, Start, End-Start); - return RopePiece(Res, 0, End-Start); - } - - // Otherwise, this was a small request but we just don't have space for it - // Make a new chunk and share it with later allocations. - - // If we had an old allocation, drop our reference to it. - if (AllocBuffer && --AllocBuffer->RefCount == 0) - delete [] (char*)AllocBuffer; - - unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize; - AllocBuffer = reinterpret_cast(new char[AllocSize]); - AllocBuffer->RefCount = 0; - memcpy(AllocBuffer->Data, Start, Len); - AllocOffs = Len; - - // Start out the new allocation with a refcount of 1, since we have an - // internal reference to it. - AllocBuffer->addRef(); - return RopePiece(AllocBuffer, 0, Len); -} - - diff --git a/lib/Rewrite/RewriteTest.cpp b/lib/Rewrite/RewriteTest.cpp deleted file mode 100644 index 019e5e7..0000000 --- a/lib/Rewrite/RewriteTest.cpp +++ /dev/null @@ -1,39 +0,0 @@ -//===--- RewriteTest.cpp - Rewriter playground ----------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This is a testbed. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/Rewriters.h" -#include "clang/Lex/Preprocessor.h" -#include "clang/Rewrite/TokenRewriter.h" -#include "llvm/Support/raw_ostream.h" - -void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) { - SourceManager &SM = PP.getSourceManager(); - const LangOptions &LangOpts = PP.getLangOpts(); - - TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts); - - // Throw tags around comments. - for (TokenRewriter::token_iterator I = Rewriter.token_begin(), - E = Rewriter.token_end(); I != E; ++I) { - if (I->isNot(tok::comment)) continue; - - Rewriter.AddTokenBefore(I, ""); - Rewriter.AddTokenAfter(I, ""); - } - - - // Print out the output. - for (TokenRewriter::token_iterator I = Rewriter.token_begin(), - E = Rewriter.token_end(); I != E; ++I) - *OS << PP.getSpelling(*I); -} diff --git a/lib/Rewrite/Rewriter.cpp b/lib/Rewrite/Rewriter.cpp deleted file mode 100644 index 7c27114..0000000 --- a/lib/Rewrite/Rewriter.cpp +++ /dev/null @@ -1,486 +0,0 @@ -//===--- Rewriter.cpp - Code rewriting interface --------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the Rewriter class, which is used for code -// transformations. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/Rewriter.h" -#include "clang/AST/Stmt.h" -#include "clang/AST/Decl.h" -#include "clang/Basic/DiagnosticIDs.h" -#include "clang/Basic/FileManager.h" -#include "clang/Basic/SourceManager.h" -#include "clang/Lex/Lexer.h" -#include "llvm/ADT/SmallString.h" -#include "llvm/Support/FileSystem.h" -using namespace clang; - -raw_ostream &RewriteBuffer::write(raw_ostream &os) const { - // FIXME: eliminate the copy by writing out each chunk at a time - os << std::string(begin(), end()); - return os; -} - -/// \brief Return true if this character is non-new-line whitespace: -/// ' ', '\\t', '\\f', '\\v', '\\r'. -static inline bool isWhitespace(unsigned char c) { - switch (c) { - case ' ': - case '\t': - case '\f': - case '\v': - case '\r': - return true; - default: - return false; - } -} - -void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size, - bool removeLineIfEmpty) { - // Nothing to remove, exit early. - if (Size == 0) return; - - unsigned RealOffset = getMappedOffset(OrigOffset, true); - assert(RealOffset+Size < Buffer.size() && "Invalid location"); - - // Remove the dead characters. - Buffer.erase(RealOffset, Size); - - // Add a delta so that future changes are offset correctly. - AddReplaceDelta(OrigOffset, -Size); - - if (removeLineIfEmpty) { - // Find the line that the remove occurred and if it is completely empty - // remove the line as well. - - iterator curLineStart = begin(); - unsigned curLineStartOffs = 0; - iterator posI = begin(); - for (unsigned i = 0; i != RealOffset; ++i) { - if (*posI == '\n') { - curLineStart = posI; - ++curLineStart; - curLineStartOffs = i + 1; - } - ++posI; - } - - unsigned lineSize = 0; - posI = curLineStart; - while (posI != end() && isWhitespace(*posI)) { - ++posI; - ++lineSize; - } - if (posI != end() && *posI == '\n') { - Buffer.erase(curLineStartOffs, lineSize + 1/* + '\n'*/); - AddReplaceDelta(curLineStartOffs, -(lineSize + 1/* + '\n'*/)); - } - } -} - -void RewriteBuffer::InsertText(unsigned OrigOffset, StringRef Str, - bool InsertAfter) { - - // Nothing to insert, exit early. - if (Str.empty()) return; - - unsigned RealOffset = getMappedOffset(OrigOffset, InsertAfter); - Buffer.insert(RealOffset, Str.begin(), Str.end()); - - // Add a delta so that future changes are offset correctly. - AddInsertDelta(OrigOffset, Str.size()); -} - -/// ReplaceText - This method replaces a range of characters in the input -/// buffer with a new string. This is effectively a combined "remove+insert" -/// operation. -void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength, - StringRef NewStr) { - unsigned RealOffset = getMappedOffset(OrigOffset, true); - Buffer.erase(RealOffset, OrigLength); - Buffer.insert(RealOffset, NewStr.begin(), NewStr.end()); - if (OrigLength != NewStr.size()) - AddReplaceDelta(OrigOffset, NewStr.size() - OrigLength); -} - - -//===----------------------------------------------------------------------===// -// Rewriter class -//===----------------------------------------------------------------------===// - -/// getRangeSize - Return the size in bytes of the specified range if they -/// are in the same file. If not, this returns -1. -int Rewriter::getRangeSize(const CharSourceRange &Range, - RewriteOptions opts) const { - if (!isRewritable(Range.getBegin()) || - !isRewritable(Range.getEnd())) return -1; - - FileID StartFileID, EndFileID; - unsigned StartOff, EndOff; - - StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID); - EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID); - - if (StartFileID != EndFileID) - return -1; - - // If edits have been made to this buffer, the delta between the range may - // have changed. - std::map::const_iterator I = - RewriteBuffers.find(StartFileID); - if (I != RewriteBuffers.end()) { - const RewriteBuffer &RB = I->second; - EndOff = RB.getMappedOffset(EndOff, opts.IncludeInsertsAtEndOfRange); - StartOff = RB.getMappedOffset(StartOff, !opts.IncludeInsertsAtBeginOfRange); - } - - - // Adjust the end offset to the end of the last token, instead of being the - // start of the last token if this is a token range. - if (Range.isTokenRange()) - EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); - - return EndOff-StartOff; -} - -int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const { - return getRangeSize(CharSourceRange::getTokenRange(Range), opts); -} - - -/// getRewrittenText - Return the rewritten form of the text in the specified -/// range. If the start or end of the range was unrewritable or if they are -/// in different buffers, this returns an empty string. -/// -/// Note that this method is not particularly efficient. -/// -std::string Rewriter::getRewrittenText(SourceRange Range) const { - if (!isRewritable(Range.getBegin()) || - !isRewritable(Range.getEnd())) - return ""; - - FileID StartFileID, EndFileID; - unsigned StartOff, EndOff; - StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID); - EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID); - - if (StartFileID != EndFileID) - return ""; // Start and end in different buffers. - - // If edits have been made to this buffer, the delta between the range may - // have changed. - std::map::const_iterator I = - RewriteBuffers.find(StartFileID); - if (I == RewriteBuffers.end()) { - // If the buffer hasn't been rewritten, just return the text from the input. - const char *Ptr = SourceMgr->getCharacterData(Range.getBegin()); - - // Adjust the end offset to the end of the last token, instead of being the - // start of the last token. - EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); - return std::string(Ptr, Ptr+EndOff-StartOff); - } - - const RewriteBuffer &RB = I->second; - EndOff = RB.getMappedOffset(EndOff, true); - StartOff = RB.getMappedOffset(StartOff); - - // Adjust the end offset to the end of the last token, instead of being the - // start of the last token. - EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts); - - // Advance the iterators to the right spot, yay for linear time algorithms. - RewriteBuffer::iterator Start = RB.begin(); - std::advance(Start, StartOff); - RewriteBuffer::iterator End = Start; - std::advance(End, EndOff-StartOff); - - return std::string(Start, End); -} - -unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc, - FileID &FID) const { - assert(Loc.isValid() && "Invalid location"); - std::pair V = SourceMgr->getDecomposedLoc(Loc); - FID = V.first; - return V.second; -} - - -/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID. -/// -RewriteBuffer &Rewriter::getEditBuffer(FileID FID) { - std::map::iterator I = - RewriteBuffers.lower_bound(FID); - if (I != RewriteBuffers.end() && I->first == FID) - return I->second; - I = RewriteBuffers.insert(I, std::make_pair(FID, RewriteBuffer())); - - StringRef MB = SourceMgr->getBufferData(FID); - I->second.Initialize(MB.begin(), MB.end()); - - return I->second; -} - -/// InsertText - Insert the specified string at the specified location in the -/// original buffer. -bool Rewriter::InsertText(SourceLocation Loc, StringRef Str, - bool InsertAfter, bool indentNewLines) { - if (!isRewritable(Loc)) return true; - FileID FID; - unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID); - - SmallString<128> indentedStr; - if (indentNewLines && Str.find('\n') != StringRef::npos) { - StringRef MB = SourceMgr->getBufferData(FID); - - unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1; - const SrcMgr::ContentCache * - Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache(); - unsigned lineOffs = Content->SourceLineCache[lineNo]; - - // Find the whitespace at the start of the line. - StringRef indentSpace; - { - unsigned i = lineOffs; - while (isWhitespace(MB[i])) - ++i; - indentSpace = MB.substr(lineOffs, i-lineOffs); - } - - SmallVector lines; - Str.split(lines, "\n"); - - for (unsigned i = 0, e = lines.size(); i != e; ++i) { - indentedStr += lines[i]; - if (i < e-1) { - indentedStr += '\n'; - indentedStr += indentSpace; - } - } - Str = indentedStr.str(); - } - - getEditBuffer(FID).InsertText(StartOffs, Str, InsertAfter); - return false; -} - -bool Rewriter::InsertTextAfterToken(SourceLocation Loc, StringRef Str) { - if (!isRewritable(Loc)) return true; - FileID FID; - unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID); - RewriteOptions rangeOpts; - rangeOpts.IncludeInsertsAtBeginOfRange = false; - StartOffs += getRangeSize(SourceRange(Loc, Loc), rangeOpts); - getEditBuffer(FID).InsertText(StartOffs, Str, /*InsertAfter*/true); - return false; -} - -/// RemoveText - Remove the specified text region. -bool Rewriter::RemoveText(SourceLocation Start, unsigned Length, - RewriteOptions opts) { - if (!isRewritable(Start)) return true; - FileID FID; - unsigned StartOffs = getLocationOffsetAndFileID(Start, FID); - getEditBuffer(FID).RemoveText(StartOffs, Length, opts.RemoveLineIfEmpty); - return false; -} - -/// ReplaceText - This method replaces a range of characters in the input -/// buffer with a new string. This is effectively a combined "remove/insert" -/// operation. -bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength, - StringRef NewStr) { - if (!isRewritable(Start)) return true; - FileID StartFileID; - unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID); - - getEditBuffer(StartFileID).ReplaceText(StartOffs, OrigLength, NewStr); - return false; -} - -bool Rewriter::ReplaceText(SourceRange range, SourceRange replacementRange) { - if (!isRewritable(range.getBegin())) return true; - if (!isRewritable(range.getEnd())) return true; - if (replacementRange.isInvalid()) return true; - SourceLocation start = range.getBegin(); - unsigned origLength = getRangeSize(range); - unsigned newLength = getRangeSize(replacementRange); - FileID FID; - unsigned newOffs = getLocationOffsetAndFileID(replacementRange.getBegin(), - FID); - StringRef MB = SourceMgr->getBufferData(FID); - return ReplaceText(start, origLength, MB.substr(newOffs, newLength)); -} - -/// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty -/// printer to generate the replacement code. This returns true if the input -/// could not be rewritten, or false if successful. -bool Rewriter::ReplaceStmt(Stmt *From, Stmt *To) { - // Measaure the old text. - int Size = getRangeSize(From->getSourceRange()); - if (Size == -1) - return true; - - // Get the new text. - std::string SStr; - llvm::raw_string_ostream S(SStr); - To->printPretty(S, 0, PrintingPolicy(*LangOpts)); - const std::string &Str = S.str(); - - ReplaceText(From->getLocStart(), Size, Str); - return false; -} - -std::string Rewriter::ConvertToString(Stmt *From) { - std::string SStr; - llvm::raw_string_ostream S(SStr); - From->printPretty(S, 0, PrintingPolicy(*LangOpts)); - return S.str(); -} - -bool Rewriter::IncreaseIndentation(CharSourceRange range, - SourceLocation parentIndent) { - if (range.isInvalid()) return true; - if (!isRewritable(range.getBegin())) return true; - if (!isRewritable(range.getEnd())) return true; - if (!isRewritable(parentIndent)) return true; - - FileID StartFileID, EndFileID, parentFileID; - unsigned StartOff, EndOff, parentOff; - - StartOff = getLocationOffsetAndFileID(range.getBegin(), StartFileID); - EndOff = getLocationOffsetAndFileID(range.getEnd(), EndFileID); - parentOff = getLocationOffsetAndFileID(parentIndent, parentFileID); - - if (StartFileID != EndFileID || StartFileID != parentFileID) - return true; - if (StartOff > EndOff) - return true; - - FileID FID = StartFileID; - StringRef MB = SourceMgr->getBufferData(FID); - - unsigned parentLineNo = SourceMgr->getLineNumber(FID, parentOff) - 1; - unsigned startLineNo = SourceMgr->getLineNumber(FID, StartOff) - 1; - unsigned endLineNo = SourceMgr->getLineNumber(FID, EndOff) - 1; - - const SrcMgr::ContentCache * - Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache(); - - // Find where the lines start. - unsigned parentLineOffs = Content->SourceLineCache[parentLineNo]; - unsigned startLineOffs = Content->SourceLineCache[startLineNo]; - - // Find the whitespace at the start of each line. - StringRef parentSpace, startSpace; - { - unsigned i = parentLineOffs; - while (isWhitespace(MB[i])) - ++i; - parentSpace = MB.substr(parentLineOffs, i-parentLineOffs); - - i = startLineOffs; - while (isWhitespace(MB[i])) - ++i; - startSpace = MB.substr(startLineOffs, i-startLineOffs); - } - if (parentSpace.size() >= startSpace.size()) - return true; - if (!startSpace.startswith(parentSpace)) - return true; - - StringRef indent = startSpace.substr(parentSpace.size()); - - // Indent the lines between start/end offsets. - RewriteBuffer &RB = getEditBuffer(FID); - for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) { - unsigned offs = Content->SourceLineCache[lineNo]; - unsigned i = offs; - while (isWhitespace(MB[i])) - ++i; - StringRef origIndent = MB.substr(offs, i-offs); - if (origIndent.startswith(startSpace)) - RB.InsertText(offs, indent, /*InsertAfter=*/false); - } - - return false; -} - -// A wrapper for a file stream that atomically overwrites the target. -// -// Creates a file output stream for a temporary file in the constructor, -// which is later accessible via getStream() if ok() return true. -// Flushes the stream and moves the temporary file to the target location -// in the destructor. -class AtomicallyMovedFile { -public: - AtomicallyMovedFile(DiagnosticsEngine &Diagnostics, StringRef Filename, - bool &AllWritten) - : Diagnostics(Diagnostics), Filename(Filename), AllWritten(AllWritten) { - TempFilename = Filename; - TempFilename += "-%%%%%%%%"; - int FD; - if (llvm::sys::fs::unique_file(TempFilename.str(), FD, TempFilename, - /*makeAbsolute=*/true, 0664)) { - AllWritten = false; - Diagnostics.Report(clang::diag::err_unable_to_make_temp) - << TempFilename; - } else { - FileStream.reset(new llvm::raw_fd_ostream(FD, /*shouldClose=*/true)); - } - } - - ~AtomicallyMovedFile() { - if (!ok()) return; - - FileStream->flush(); -#ifdef _WIN32 - // Win32 does not allow rename/removing opened files. - FileStream.reset(); -#endif - if (llvm::error_code ec = - llvm::sys::fs::rename(TempFilename.str(), Filename)) { - AllWritten = false; - Diagnostics.Report(clang::diag::err_unable_to_rename_temp) - << TempFilename << Filename << ec.message(); - bool existed; - // If the remove fails, there's not a lot we can do - this is already an - // error. - llvm::sys::fs::remove(TempFilename.str(), existed); - } - } - - bool ok() { return FileStream; } - llvm::raw_ostream &getStream() { return *FileStream; } - -private: - DiagnosticsEngine &Diagnostics; - StringRef Filename; - SmallString<128> TempFilename; - OwningPtr FileStream; - bool &AllWritten; -}; - -bool Rewriter::overwriteChangedFiles() { - bool AllWritten = true; - for (buffer_iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) { - const FileEntry *Entry = - getSourceMgr().getFileEntryForID(I->first); - AtomicallyMovedFile File(getSourceMgr().getDiagnostics(), Entry->getName(), - AllWritten); - if (File.ok()) { - I->second.write(File.getStream()); - } - } - return !AllWritten; -} diff --git a/lib/Rewrite/TokenRewriter.cpp b/lib/Rewrite/TokenRewriter.cpp deleted file mode 100644 index 03ce63e..0000000 --- a/lib/Rewrite/TokenRewriter.cpp +++ /dev/null @@ -1,99 +0,0 @@ -//===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the TokenRewriter class, which is used for code -// transformations. -// -//===----------------------------------------------------------------------===// - -#include "clang/Rewrite/TokenRewriter.h" -#include "clang/Lex/Lexer.h" -#include "clang/Lex/ScratchBuffer.h" -#include "clang/Basic/SourceManager.h" -using namespace clang; - -TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM, - const LangOptions &LangOpts) { - ScratchBuf.reset(new ScratchBuffer(SM)); - - // Create a lexer to lex all the tokens of the main file in raw mode. - const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID); - Lexer RawLex(FID, FromFile, SM, LangOpts); - - // Return all comments and whitespace as tokens. - RawLex.SetKeepWhitespaceMode(true); - - // Lex the file, populating our datastructures. - Token RawTok; - RawLex.LexFromRawLexer(RawTok); - while (RawTok.isNot(tok::eof)) { -#if 0 - if (Tok.is(tok::raw_identifier)) { - // Look up the identifier info for the token. This should use - // IdentifierTable directly instead of PP. - PP.LookUpIdentifierInfo(Tok); - } -#endif - - AddToken(RawTok, TokenList.end()); - RawLex.LexFromRawLexer(RawTok); - } -} - -TokenRewriter::~TokenRewriter() { -} - - -/// RemapIterator - Convert from token_iterator (a const iterator) to -/// TokenRefTy (a non-const iterator). -TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) { - if (I == token_end()) return TokenList.end(); - - // FIXME: This is horrible, we should use our own list or something to avoid - // this. - std::map::iterator MapIt = - TokenAtLoc.find(I->getLocation()); - assert(MapIt != TokenAtLoc.end() && "iterator not in rewriter?"); - return MapIt->second; -} - - -/// AddToken - Add the specified token into the Rewriter before the other -/// position. -TokenRewriter::TokenRefTy -TokenRewriter::AddToken(const Token &T, TokenRefTy Where) { - Where = TokenList.insert(Where, T); - - bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(), - Where)).second; - assert(InsertSuccess && "Token location already in rewriter!"); - (void)InsertSuccess; - return Where; -} - - -TokenRewriter::token_iterator -TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) { - unsigned Len = strlen(Val); - - // Plop the string into the scratch buffer, then create a token for this - // string. - Token Tok; - Tok.startToken(); - const char *Spelling; - Tok.setLocation(ScratchBuf->getToken(Val, Len, Spelling)); - Tok.setLength(Len); - - // TODO: Form a whole lexer around this and relex the token! For now, just - // set kind to tok::unknown. - Tok.setKind(tok::unknown); - - return AddToken(Tok, RemapIterator(I)); -} - diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp index 19a7d6f..801a1b1 100644 --- a/lib/Sema/AnalysisBasedWarnings.cpp +++ b/lib/Sema/AnalysisBasedWarnings.cpp @@ -27,6 +27,7 @@ #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/EvaluatedExprVisitor.h" +#include "clang/AST/ParentMap.h" #include "clang/AST/StmtVisitor.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Analysis/AnalysisContext.h" @@ -36,10 +37,12 @@ #include "clang/Analysis/Analyses/ThreadSafety.h" #include "clang/Analysis/CFGStmtMap.h" #include "clang/Analysis/Analyses/UninitializedValues.h" +#include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/ImmutableMap.h" #include "llvm/ADT/PostOrderIterator.h" +#include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" @@ -182,13 +185,6 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { HasFakeEdge = true; continue; } - if (const AsmStmt *AS = dyn_cast(S)) { - if (AS->isMSAsm()) { - HasFakeEdge = true; - HasLiveReturn = true; - continue; - } - } if (isa(S)) { // TODO: Verify this is correct. HasFakeEdge = true; @@ -506,7 +502,7 @@ static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, // Information used when building the diagnostic. unsigned DiagKind; - const char *Str; + StringRef Str; SourceRange Range; // FixIts to suppress the diagnosic by removing the dead condition. @@ -822,6 +818,18 @@ namespace { static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, bool PerFunction) { + // Only perform this analysis when using C++11. There is no good workflow + // for this warning when not using C++11. There is no good way to silence + // the warning (no attribute is available) unless we are using C++11's support + // for generalized attributes. Once could use pragmas to silence the warning, + // but as a general solution that is gross and not in the spirit of this + // warning. + // + // NOTE: This an intermediate solution. There are on-going discussions on + // how to properly support this warning outside of C++11 with an annotation. + if (!AC.getASTContext().getLangOpts().CPlusPlus0x) + return; + FallthroughMapper FM(S); FM.TraverseStmt(AC.getBody()); @@ -859,8 +867,21 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, if (S.getLangOpts().CPlusPlus0x) { const Stmt *Term = B.getTerminator(); if (!(B.empty() && Term && isa(Term))) { + Preprocessor &PP = S.getPreprocessor(); + TokenValue Tokens[] = { + tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"), + tok::coloncolon, PP.getIdentifierInfo("fallthrough"), + tok::r_square, tok::r_square + }; + StringRef AnnotationSpelling = "[[clang::fallthrough]]"; + StringRef MacroName = PP.getLastMacroWithSpelling(L, Tokens); + if (!MacroName.empty()) + AnnotationSpelling = MacroName; + SmallString<64> TextToInsert(AnnotationSpelling); + TextToInsert += "; "; S.Diag(L, diag::note_insert_fallthrough_fixit) << - FixItHint::CreateInsertion(L, "[[clang::fallthrough]]; "); + AnnotationSpelling << + FixItHint::CreateInsertion(L, TextToInsert); } } S.Diag(L, diag::note_insert_break_fixit) << @@ -878,6 +899,199 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, } namespace { +typedef std::pair + StmtUsesPair; + +class StmtUseSorter { + const SourceManager &SM; + +public: + explicit StmtUseSorter(const SourceManager &SM) : SM(SM) { } + + bool operator()(const StmtUsesPair &LHS, const StmtUsesPair &RHS) { + return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(), + RHS.first->getLocStart()); + } +}; +} + +static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM, + const Stmt *S) { + assert(S); + + do { + switch (S->getStmtClass()) { + case Stmt::ForStmtClass: + case Stmt::WhileStmtClass: + case Stmt::CXXForRangeStmtClass: + case Stmt::ObjCForCollectionStmtClass: + return true; + case Stmt::DoStmtClass: { + const Expr *Cond = cast(S)->getCond(); + llvm::APSInt Val; + if (!Cond->EvaluateAsInt(Val, Ctx)) + return true; + return Val.getBoolValue(); + } + default: + break; + } + } while ((S = PM.getParent(S))); + + return false; +} + + +static void diagnoseRepeatedUseOfWeak(Sema &S, + const sema::FunctionScopeInfo *CurFn, + const Decl *D, + const ParentMap &PM) { + typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy; + typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap; + typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector; + + ASTContext &Ctx = S.getASTContext(); + + const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses(); + + // Extract all weak objects that are referenced more than once. + SmallVector UsesByStmt; + for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end(); + I != E; ++I) { + const WeakUseVector &Uses = I->second; + + // Find the first read of the weak object. + WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); + for ( ; UI != UE; ++UI) { + if (UI->isUnsafe()) + break; + } + + // If there were only writes to this object, don't warn. + if (UI == UE) + continue; + + // If there was only one read, followed by any number of writes, and the + // read is not within a loop, don't warn. Additionally, don't warn in a + // loop if the base object is a local variable -- local variables are often + // changed in loops. + if (UI == Uses.begin()) { + WeakUseVector::const_iterator UI2 = UI; + for (++UI2; UI2 != UE; ++UI2) + if (UI2->isUnsafe()) + break; + + if (UI2 == UE) { + if (!isInLoop(Ctx, PM, UI->getUseExpr())) + continue; + + const WeakObjectProfileTy &Profile = I->first; + if (!Profile.isExactProfile()) + continue; + + const NamedDecl *Base = Profile.getBase(); + if (!Base) + Base = Profile.getProperty(); + assert(Base && "A profile always has a base or property."); + + if (const VarDecl *BaseVar = dyn_cast(Base)) + if (BaseVar->hasLocalStorage() && !isa(Base)) + continue; + } + } + + UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I)); + } + + if (UsesByStmt.empty()) + return; + + // Sort by first use so that we emit the warnings in a deterministic order. + std::sort(UsesByStmt.begin(), UsesByStmt.end(), + StmtUseSorter(S.getSourceManager())); + + // Classify the current code body for better warning text. + // This enum should stay in sync with the cases in + // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. + // FIXME: Should we use a common classification enum and the same set of + // possibilities all throughout Sema? + enum { + Function, + Method, + Block, + Lambda + } FunctionKind; + + if (isa(CurFn)) + FunctionKind = Block; + else if (isa(CurFn)) + FunctionKind = Lambda; + else if (isa(D)) + FunctionKind = Method; + else + FunctionKind = Function; + + // Iterate through the sorted problems and emit warnings for each. + for (SmallVectorImpl::const_iterator I = UsesByStmt.begin(), + E = UsesByStmt.end(); + I != E; ++I) { + const Stmt *FirstRead = I->first; + const WeakObjectProfileTy &Key = I->second->first; + const WeakUseVector &Uses = I->second->second; + + // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy + // may not contain enough information to determine that these are different + // properties. We can only be 100% sure of a repeated use in certain cases, + // and we adjust the diagnostic kind accordingly so that the less certain + // case can be turned off if it is too noisy. + unsigned DiagKind; + if (Key.isExactProfile()) + DiagKind = diag::warn_arc_repeated_use_of_weak; + else + DiagKind = diag::warn_arc_possible_repeated_use_of_weak; + + // Classify the weak object being accessed for better warning text. + // This enum should stay in sync with the cases in + // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. + enum { + Variable, + Property, + ImplicitProperty, + Ivar + } ObjectKind; + + const NamedDecl *D = Key.getProperty(); + if (isa(D)) + ObjectKind = Variable; + else if (isa(D)) + ObjectKind = Property; + else if (isa(D)) + ObjectKind = ImplicitProperty; + else if (isa(D)) + ObjectKind = Ivar; + else + llvm_unreachable("Unexpected weak object kind!"); + + // Show the first time the object was read. + S.Diag(FirstRead->getLocStart(), DiagKind) + << ObjectKind << D << FunctionKind + << FirstRead->getSourceRange(); + + // Print all the other accesses as notes. + for (WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); + UI != UE; ++UI) { + if (UI->getUseExpr() == FirstRead) + continue; + S.Diag(UI->getUseExpr()->getLocStart(), + diag::note_arc_weak_also_accessed_here) + << UI->getUseExpr()->getSourceRange(); + } + } +} + + +namespace { struct SLocSort { bool operator()(const UninitUse &a, const UninitUse &b) { // Prefer a more confident report over a less confident one. @@ -1091,27 +1305,47 @@ class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler { diag::warn_variable_requires_any_lock: diag::warn_var_deref_requires_any_lock; PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) - << D->getName() << getLockKindFromAccessKind(AK)); + << D->getNameAsString() << getLockKindFromAccessKind(AK)); Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); } void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK, - Name LockName, LockKind LK, SourceLocation Loc) { + Name LockName, LockKind LK, SourceLocation Loc, + Name *PossibleMatch) { unsigned DiagID = 0; - switch (POK) { - case POK_VarAccess: - DiagID = diag::warn_variable_requires_lock; - break; - case POK_VarDereference: - DiagID = diag::warn_var_deref_requires_lock; - break; - case POK_FunctionCall: - DiagID = diag::warn_fun_requires_lock; - break; + if (PossibleMatch) { + switch (POK) { + case POK_VarAccess: + DiagID = diag::warn_variable_requires_lock_precise; + break; + case POK_VarDereference: + DiagID = diag::warn_var_deref_requires_lock_precise; + break; + case POK_FunctionCall: + DiagID = diag::warn_fun_requires_lock_precise; + break; + } + PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) + << D->getNameAsString() << LockName << LK); + PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match) + << *PossibleMatch); + Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note))); + } else { + switch (POK) { + case POK_VarAccess: + DiagID = diag::warn_variable_requires_lock; + break; + case POK_VarDereference: + DiagID = diag::warn_var_deref_requires_lock; + break; + case POK_FunctionCall: + DiagID = diag::warn_fun_requires_lock; + break; + } + PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) + << D->getNameAsString() << LockName << LK); + Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); } - PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) - << D->getName() << LockName << LK); - Warnings.push_back(DelayedDiag(Warning, OptionalNotes())); } void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) { @@ -1206,7 +1440,8 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, AC.getCFGBuildOptions().AddEHEdges = false; AC.getCFGBuildOptions().AddInitializers = true; AC.getCFGBuildOptions().AddImplicitDtors = true; - + AC.getCFGBuildOptions().AddTemporaryDtors = true; + // Force that certain expressions appear as CFGElements in the CFG. This // is used to speed up various analyses. // FIXME: This isn't the right factoring. This is here for initial @@ -1350,6 +1585,11 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); } + if (S.getLangOpts().ObjCARCWeak && + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + D->getLocStart()) != DiagnosticsEngine::Ignored) + diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap()); + // Collect statistics about the CFG if it was built. if (S.CollectStats && AC.isCFGBuilt()) { ++NumFunctionsAnalyzed; diff --git a/lib/Sema/CMakeLists.txt b/lib/Sema/CMakeLists.txt index 46dfa05..7cfe3ae 100644 --- a/lib/Sema/CMakeLists.txt +++ b/lib/Sema/CMakeLists.txt @@ -13,7 +13,9 @@ add_clang_library(clangSema DelayedDiagnostic.cpp IdentifierResolver.cpp JumpDiagnostics.cpp + MultiplexExternalSemaSource.cpp Scope.cpp + ScopeInfo.cpp Sema.cpp SemaAccess.cpp SemaAttr.cpp @@ -39,6 +41,7 @@ add_clang_library(clangSema SemaOverload.cpp SemaPseudoObject.cpp SemaStmt.cpp + SemaStmtAsm.cpp SemaStmtAttr.cpp SemaTemplate.cpp SemaTemplateDeduction.cpp diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp index a835725..0a23601 100644 --- a/lib/Sema/CodeCompleteConsumer.cpp +++ b/lib/Sema/CodeCompleteConsumer.cpp @@ -193,11 +193,10 @@ CodeCompletionString::CodeCompletionString(const Chunk *Chunks, CXAvailabilityKind Availability, const char **Annotations, unsigned NumAnnotations, - CXCursorKind ParentKind, StringRef ParentName, const char *BriefComment) : NumChunks(NumChunks), NumAnnotations(NumAnnotations), - Priority(Priority), Availability(Availability), ParentKind(ParentKind), + Priority(Priority), Availability(Availability), ParentName(ParentName), BriefComment(BriefComment) { assert(NumChunks <= 0xffff); @@ -339,7 +338,7 @@ CodeCompletionString *CodeCompletionBuilder::TakeString() { = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(), Priority, Availability, Annotations.data(), Annotations.size(), - ParentKind, ParentName, BriefComment); + ParentName, BriefComment); Chunks.clear(); return Result; } @@ -380,7 +379,6 @@ void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK, void CodeCompletionBuilder::addParentContext(DeclContext *DC) { if (DC->isTranslationUnit()) { - ParentKind = CXCursor_TranslationUnit; return; } @@ -391,7 +389,6 @@ void CodeCompletionBuilder::addParentContext(DeclContext *DC) { if (!ND) return; - ParentKind = getCursorKindForDecl(ND); ParentName = getCodeCompletionTUInfo().getParentName(DC); } diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp index d12ca78..b3066eb 100644 --- a/lib/Sema/DeclSpec.cpp +++ b/lib/Sema/DeclSpec.cpp @@ -144,11 +144,13 @@ CXXScopeSpec::getWithLocInContext(ASTContext &Context) const { /// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function. /// "TheDeclarator" is the declarator that this will be added to. -DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic, +DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isAmbiguous, - SourceLocation EllipsisLoc, + SourceLocation LParenLoc, ParamInfo *ArgInfo, unsigned NumArgs, + SourceLocation EllipsisLoc, + SourceLocation RParenLoc, unsigned TypeQuals, bool RefQualifierIsLvalueRef, SourceLocation RefQualifierLoc, @@ -173,9 +175,11 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic, I.EndLoc = LocalRangeEnd; I.Fun.AttrList = 0; I.Fun.hasPrototype = hasProto; - I.Fun.isVariadic = isVariadic; + I.Fun.isVariadic = EllipsisLoc.isValid(); I.Fun.isAmbiguous = isAmbiguous; + I.Fun.LParenLoc = LParenLoc.getRawEncoding(); I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding(); + I.Fun.RParenLoc = RParenLoc.getRawEncoding(); I.Fun.DeleteArgInfo = false; I.Fun.TypeQuals = TypeQuals; I.Fun.NumArgs = NumArgs; @@ -270,6 +274,7 @@ bool Declarator::isDeclarationOfFunction() const { case TST_int: case TST_int128: case TST_struct: + case TST_interface: case TST_union: case TST_unknown_anytype: case TST_unspecified: @@ -325,10 +330,14 @@ unsigned DeclSpec::getParsedSpecifiers() const { template static bool BadSpecifier(T TNew, T TPrev, const char *&PrevSpec, - unsigned &DiagID) { + unsigned &DiagID, + bool IsExtension = true) { PrevSpec = DeclSpec::getSpecifierName(TPrev); - DiagID = (TNew == TPrev ? diag::ext_duplicate_declspec - : diag::err_invalid_decl_spec_combination); + if (TNew != TPrev) + DiagID = diag::err_invalid_decl_spec_combination; + else + DiagID = IsExtension ? diag::ext_duplicate_declspec : + diag::warn_duplicate_declspec; return true; } @@ -396,6 +405,7 @@ const char *DeclSpec::getSpecifierName(DeclSpec::TST T) { case DeclSpec::TST_class: return "class"; case DeclSpec::TST_union: return "union"; case DeclSpec::TST_struct: return "struct"; + case DeclSpec::TST_interface: return "__interface"; case DeclSpec::TST_typename: return "type-name"; case DeclSpec::TST_typeofType: case DeclSpec::TST_typeofExpr: return "typeof"; @@ -670,12 +680,16 @@ bool DeclSpec::SetTypeSpecError() { } bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec, - unsigned &DiagID, const LangOptions &Lang, - bool IsTypeSpec) { - // Duplicates are permitted in C99, and are permitted in C++11 unless the - // cv-qualifier appears as a type-specifier. - if ((TypeQualifiers & T) && !Lang.C99 && (!Lang.CPlusPlus0x || IsTypeSpec)) - return BadSpecifier(T, T, PrevSpec, DiagID); + unsigned &DiagID, const LangOptions &Lang) { + // Duplicates are permitted in C99, but are not permitted in C++. However, + // since this is likely not what the user intended, we will always warn. We + // do not need to set the qualifier's location since we already have it. + if (TypeQualifiers & T) { + bool IsExtension = true; + if (Lang.C99) + IsExtension = false; + return BadSpecifier(T, T, PrevSpec, DiagID, IsExtension); + } TypeQualifiers |= T; switch (T) { diff --git a/lib/Sema/DelayedDiagnostic.cpp b/lib/Sema/DelayedDiagnostic.cpp index 876f9d7..3100432 100644 --- a/lib/Sema/DelayedDiagnostic.cpp +++ b/lib/Sema/DelayedDiagnostic.cpp @@ -22,6 +22,7 @@ using namespace sema; DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc, const NamedDecl *D, const ObjCInterfaceDecl *UnknownObjCClass, + const ObjCPropertyDecl *ObjCProperty, StringRef Msg) { DelayedDiagnostic DD; DD.Kind = Deprecation; @@ -29,6 +30,7 @@ DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc, DD.Loc = Loc; DD.DeprecationData.Decl = D; DD.DeprecationData.UnknownObjCClass = UnknownObjCClass; + DD.DeprecationData.ObjCProperty = ObjCProperty; char *MessageData = 0; if (Msg.size()) { MessageData = new char [Msg.size()]; diff --git a/lib/Sema/IdentifierResolver.cpp b/lib/Sema/IdentifierResolver.cpp index 4d62cab..0093915 100644 --- a/lib/Sema/IdentifierResolver.cpp +++ b/lib/Sema/IdentifierResolver.cpp @@ -135,8 +135,16 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, // of the controlled statement. // assert(S->getParent() && "No TUScope?"); - if (S->getParent()->getFlags() & Scope::ControlScope) + if (S->getFlags() & Scope::FnTryScope) return S->getParent()->isDeclScope(D); + if (S->getParent()->getFlags() & Scope::ControlScope) { + if (S->getParent()->getFlags() & Scope::FnCatchScope) { + S = S->getParent(); + if (S->isDeclScope(D)) + return true; + } + return S->getParent()->isDeclScope(D); + } } return false; } diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp index ab786c6..e2ec1cc 100644 --- a/lib/Sema/JumpDiagnostics.cpp +++ b/lib/Sema/JumpDiagnostics.cpp @@ -123,7 +123,7 @@ typedef std::pair ScopePair; /// diagnostic that should be emitted if control goes over it. If not, return 0. static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) { if (const VarDecl *VD = dyn_cast(D)) { - unsigned InDiag = 0, OutDiag = 0; + unsigned InDiag = 0; if (VD->getType()->isVariablyModifiedType()) InDiag = diag::note_protected_by_vla; @@ -164,43 +164,53 @@ static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) { // where it is in scope is ill-formed unless the variable has // POD type and is declared without an initializer. - if (const Expr *init = VD->getInit()) { - // We actually give variables of record type (or array thereof) - // an initializer even if that initializer only calls a trivial - // ctor. Detect that case. - // FIXME: With generalized initializer lists, this may - // classify "X x{};" as having no initializer. - unsigned inDiagToUse = diag::note_protected_by_variable_init; - - const CXXRecordDecl *record = 0; - - if (const CXXConstructExpr *cce = dyn_cast(init)) { - const CXXConstructorDecl *ctor = cce->getConstructor(); - record = ctor->getParent(); - - if (ctor->isTrivial() && ctor->isDefaultConstructor()) { - if (!record->hasTrivialDestructor()) - inDiagToUse = diag::note_protected_by_variable_nontriv_destructor; - else if (!record->isPOD()) - inDiagToUse = diag::note_protected_by_variable_non_pod; - else - inDiagToUse = 0; - } - } else if (VD->getType()->isArrayType()) { - record = VD->getType()->getBaseElementTypeUnsafe() - ->getAsCXXRecordDecl(); + const Expr *Init = VD->getInit(); + if (!Init) + return ScopePair(InDiag, 0); + + const ExprWithCleanups *EWC = dyn_cast(Init); + if (EWC) + Init = EWC->getSubExpr(); + + const MaterializeTemporaryExpr *M = NULL; + Init = Init->findMaterializedTemporary(M); + + SmallVector Adjustments; + Init = Init->skipRValueSubobjectAdjustments(Adjustments); + + QualType QT = Init->getType(); + if (QT.isNull()) + return ScopePair(diag::note_protected_by_variable_init, 0); + + const Type *T = QT.getTypePtr(); + if (T->isArrayType()) + T = T->getBaseElementTypeUnsafe(); + + const CXXRecordDecl *Record = T->getAsCXXRecordDecl(); + if (!Record) + return ScopePair(diag::note_protected_by_variable_init, 0); + + // If we need to call a non trivial destructor for this variable, + // record an out diagnostic. + unsigned OutDiag = 0; + if (!Init->isGLValue() && !Record->hasTrivialDestructor()) + OutDiag = diag::note_exits_dtor; + + if (const CXXConstructExpr *cce = dyn_cast(Init)) { + const CXXConstructorDecl *ctor = cce->getConstructor(); + if (ctor->isTrivial() && ctor->isDefaultConstructor()) { + if (OutDiag) + InDiag = diag::note_protected_by_variable_nontriv_destructor; + else if (!Record->isPOD()) + InDiag = diag::note_protected_by_variable_non_pod; + return ScopePair(InDiag, OutDiag); } - - if (inDiagToUse) - InDiag = inDiagToUse; - - // Also object to indirect jumps which leave scopes with dtors. - if (record && !record->hasTrivialDestructor()) - OutDiag = diag::note_exits_dtor; } + + return ScopePair(diag::note_protected_by_variable_init, OutDiag); } - - return ScopePair(InDiag, OutDiag); + + return ScopePair(InDiag, 0); } if (const TypedefDecl *TD = dyn_cast(D)) { @@ -322,6 +332,29 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) Jumps.push_back(S); break; + case Stmt::CXXTryStmtClass: { + CXXTryStmt *TS = cast(S); + unsigned newParentScope; + Scopes.push_back(GotoScope(ParentScope, + diag::note_protected_by_cxx_try, + diag::note_exits_cxx_try, + TS->getSourceRange().getBegin())); + if (Stmt *TryBlock = TS->getTryBlock()) + BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1)); + + // Jump from the catch into the try is not allowed either. + for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) { + CXXCatchStmt *CS = TS->getHandler(I); + Scopes.push_back(GotoScope(ParentScope, + diag::note_protected_by_cxx_catch, + diag::note_exits_cxx_catch, + CS->getSourceRange().getBegin())); + BuildScopeInformation(CS->getHandlerBlock(), + (newParentScope = Scopes.size()-1)); + } + return; + } + default: break; } @@ -418,30 +451,6 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) continue; } - // Disallow jumps into any part of a C++ try statement. This is pretty - // much the same as for Obj-C. - if (CXXTryStmt *TS = dyn_cast(SubStmt)) { - Scopes.push_back(GotoScope(ParentScope, - diag::note_protected_by_cxx_try, - diag::note_exits_cxx_try, - TS->getSourceRange().getBegin())); - if (Stmt *TryBlock = TS->getTryBlock()) - BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1)); - - // Jump from the catch into the try is not allowed either. - for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) { - CXXCatchStmt *CS = TS->getHandler(I); - Scopes.push_back(GotoScope(ParentScope, - diag::note_protected_by_cxx_catch, - diag::note_exits_cxx_catch, - CS->getSourceRange().getBegin())); - BuildScopeInformation(CS->getHandlerBlock(), - (newParentScope = Scopes.size()-1)); - } - - continue; - } - // Disallow jumps into the protected statement of an @autoreleasepool. if (ObjCAutoreleasePoolStmt *AS = dyn_cast(SubStmt)){ // Recursively walk the AST for the @autoreleasepool part, protected by a new @@ -453,14 +462,19 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) BuildScopeInformation(AS->getSubStmt(), (newParentScope = Scopes.size()-1)); continue; } - - if (const BlockExpr *BE = dyn_cast(SubStmt)) { - const BlockDecl *BDecl = BE->getBlockDecl(); + + // Disallow jumps past full-expressions that use blocks with + // non-trivial cleanups of their captures. This is theoretically + // implementable but a lot of work which we haven't felt up to doing. + if (ExprWithCleanups *EWC = dyn_cast(SubStmt)) { + for (unsigned i = 0, e = EWC->getNumObjects(); i != e; ++i) { + const BlockDecl *BDecl = EWC->getObject(i); for (BlockDecl::capture_const_iterator ci = BDecl->capture_begin(), ce = BDecl->capture_end(); ci != ce; ++ci) { VarDecl *variable = ci->getVariable(); BuildScopeInformation(variable, BDecl, ParentScope); } + } } // Recursively walk the AST. diff --git a/lib/Sema/MultiplexExternalSemaSource.cpp b/lib/Sema/MultiplexExternalSemaSource.cpp new file mode 100644 index 0000000..f930fb3 --- /dev/null +++ b/lib/Sema/MultiplexExternalSemaSource.cpp @@ -0,0 +1,271 @@ +//===--- MultiplexExternalSemaSource.cpp ---------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the event dispatching to the subscribed clients. +// +//===----------------------------------------------------------------------===// +#include "clang/Sema/MultiplexExternalSemaSource.h" + +#include "clang/AST/DeclContextInternals.h" +#include "clang/Sema/Lookup.h" + +using namespace clang; + +///\brief Constructs a new multiplexing external sema source and appends the +/// given element to it. +/// +///\param[in] source - An ExternalSemaSource. +/// +MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1, + ExternalSemaSource &s2){ + Sources.push_back(&s1); + Sources.push_back(&s2); +} + +// pin the vtable here. +MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {} + +///\brief Appends new source to the source list. +/// +///\param[in] source - An ExternalSemaSource. +/// +void MultiplexExternalSemaSource::addSource(ExternalSemaSource &source) { + Sources.push_back(&source); +} + +//===----------------------------------------------------------------------===// +// ExternalASTSource. +//===----------------------------------------------------------------------===// + +Decl *MultiplexExternalSemaSource::GetExternalDecl(uint32_t ID) { + for(size_t i = 0; i < Sources.size(); ++i) + if (Decl *Result = Sources[i]->GetExternalDecl(ID)) + return Result; + return 0; +} + +Selector MultiplexExternalSemaSource::GetExternalSelector(uint32_t ID) { + Selector Sel; + for(size_t i = 0; i < Sources.size(); ++i) { + Sel = Sources[i]->GetExternalSelector(ID); + if (!Sel.isNull()) + return Sel; + } + return Sel; +} + +uint32_t MultiplexExternalSemaSource::GetNumExternalSelectors() { + uint32_t total = 0; + for(size_t i = 0; i < Sources.size(); ++i) + total += Sources[i]->GetNumExternalSelectors(); + return total; +} + +Stmt *MultiplexExternalSemaSource::GetExternalDeclStmt(uint64_t Offset) { + for(size_t i = 0; i < Sources.size(); ++i) + if (Stmt *Result = Sources[i]->GetExternalDeclStmt(Offset)) + return Result; + return 0; +} + +CXXBaseSpecifier *MultiplexExternalSemaSource::GetExternalCXXBaseSpecifiers( + uint64_t Offset){ + for(size_t i = 0; i < Sources.size(); ++i) + if (CXXBaseSpecifier *R = Sources[i]->GetExternalCXXBaseSpecifiers(Offset)) + return R; + return 0; +} + +DeclContextLookupResult MultiplexExternalSemaSource:: +FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) { + StoredDeclsList DeclsFound; + DeclContextLookupResult lookup; + for(size_t i = 0; i < Sources.size(); ++i) { + lookup = Sources[i]->FindExternalVisibleDeclsByName(DC, Name); + while(lookup.first != lookup.second) { + if (!DeclsFound.HandleRedeclaration(*lookup.first)) + DeclsFound.AddSubsequentDecl(*lookup.first); + lookup.first++; + } + } + return DeclsFound.getLookupResult(); +} + +void MultiplexExternalSemaSource::completeVisibleDeclsMap(const DeclContext *DC){ + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->completeVisibleDeclsMap(DC); +} + +ExternalLoadResult MultiplexExternalSemaSource:: +FindExternalLexicalDecls(const DeclContext *DC, + bool (*isKindWeWant)(Decl::Kind), + SmallVectorImpl &Result) { + for(size_t i = 0; i < Sources.size(); ++i) + // FIXME: The semantics of the return result is unclear to me... + Sources[i]->FindExternalLexicalDecls(DC, isKindWeWant, Result); + + return ELR_Success; +} + +void MultiplexExternalSemaSource::FindFileRegionDecls(FileID File, + unsigned Offset, + unsigned Length, + SmallVectorImpl &Decls){ + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->FindFileRegionDecls(File, Offset, Length, Decls); +} + +void MultiplexExternalSemaSource::CompleteType(TagDecl *Tag) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->CompleteType(Tag); +} + +void MultiplexExternalSemaSource::CompleteType(ObjCInterfaceDecl *Class) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->CompleteType(Class); +} + +void MultiplexExternalSemaSource::ReadComments() { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadComments(); +} + +void MultiplexExternalSemaSource::StartedDeserializing() { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->StartedDeserializing(); +} + +void MultiplexExternalSemaSource::FinishedDeserializing() { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->FinishedDeserializing(); +} + +void MultiplexExternalSemaSource::StartTranslationUnit(ASTConsumer *Consumer) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->StartTranslationUnit(Consumer); +} + +void MultiplexExternalSemaSource::PrintStats() { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->PrintStats(); +} + +bool MultiplexExternalSemaSource::layoutRecordType(const RecordDecl *Record, + uint64_t &Size, + uint64_t &Alignment, + llvm::DenseMap &FieldOffsets, + llvm::DenseMap &BaseOffsets, + llvm::DenseMap &VirtualBaseOffsets){ + for(size_t i = 0; i < Sources.size(); ++i) + if (Sources[i]->layoutRecordType(Record, Size, Alignment, FieldOffsets, + BaseOffsets, VirtualBaseOffsets)) + return true; + return false; +} + +void MultiplexExternalSemaSource:: +getMemoryBufferSizes(MemoryBufferSizes &sizes) const { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->getMemoryBufferSizes(sizes); + +} + +//===----------------------------------------------------------------------===// +// ExternalSemaSource. +//===----------------------------------------------------------------------===// + + +void MultiplexExternalSemaSource::InitializeSema(Sema &S) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->InitializeSema(S); +} + +void MultiplexExternalSemaSource::ForgetSema() { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ForgetSema(); +} + +void MultiplexExternalSemaSource::ReadMethodPool(Selector Sel) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadMethodPool(Sel); +} + +void MultiplexExternalSemaSource::ReadKnownNamespaces( + SmallVectorImpl &Namespaces){ + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadKnownNamespaces(Namespaces); +} + +bool MultiplexExternalSemaSource::LookupUnqualified(LookupResult &R, Scope *S){ + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->LookupUnqualified(R, S); + + return !R.empty(); +} + +void MultiplexExternalSemaSource::ReadTentativeDefinitions( + SmallVectorImpl &TentativeDefs) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadTentativeDefinitions(TentativeDefs); +} + +void MultiplexExternalSemaSource::ReadUnusedFileScopedDecls( + SmallVectorImpl &Decls) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadUnusedFileScopedDecls(Decls); +} + +void MultiplexExternalSemaSource::ReadDelegatingConstructors( + SmallVectorImpl &Decls) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadDelegatingConstructors(Decls); +} + +void MultiplexExternalSemaSource::ReadExtVectorDecls( + SmallVectorImpl &Decls) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadExtVectorDecls(Decls); +} + +void MultiplexExternalSemaSource::ReadDynamicClasses( + SmallVectorImpl &Decls) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadDynamicClasses(Decls); +} + +void MultiplexExternalSemaSource::ReadLocallyScopedExternalDecls( + SmallVectorImpl &Decls) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadLocallyScopedExternalDecls(Decls); +} + +void MultiplexExternalSemaSource::ReadReferencedSelectors( + SmallVectorImpl > &Sels) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadReferencedSelectors(Sels); +} + +void MultiplexExternalSemaSource::ReadWeakUndeclaredIdentifiers( + SmallVectorImpl > &WI) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadWeakUndeclaredIdentifiers(WI); +} + +void MultiplexExternalSemaSource::ReadUsedVTables( + SmallVectorImpl &VTables) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadUsedVTables(VTables); +} + +void MultiplexExternalSemaSource::ReadPendingInstantiations( + SmallVectorImpl > &Pending) { + for(size_t i = 0; i < Sources.size(); ++i) + Sources[i]->ReadPendingInstantiations(Pending); +} diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp new file mode 100644 index 0000000..4d29a34 --- /dev/null +++ b/lib/Sema/ScopeInfo.cpp @@ -0,0 +1,189 @@ +//===--- ScopeInfo.cpp - Information about a semantic context -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements FunctionScopeInfo and its subclasses, which contain +// information about a single function, block, lambda, or method body. +// +//===----------------------------------------------------------------------===// + +#include "clang/Sema/ScopeInfo.h" +#include "clang/AST/Decl.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/Expr.h" +#include "clang/AST/ExprCXX.h" +#include "clang/AST/ExprObjC.h" + +using namespace clang; +using namespace sema; + +void FunctionScopeInfo::Clear() { + HasBranchProtectedScope = false; + HasBranchIntoScope = false; + HasIndirectGoto = false; + + SwitchStack.clear(); + Returns.clear(); + ErrorTrap.reset(); + PossiblyUnreachableDiags.clear(); + WeakObjectUses.clear(); +} + +static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) { + if (PropE->isExplicitProperty()) + return PropE->getExplicitProperty(); + + return PropE->getImplicitPropertyGetter(); +} + +FunctionScopeInfo::WeakObjectProfileTy::BaseInfoTy +FunctionScopeInfo::WeakObjectProfileTy::getBaseInfo(const Expr *E) { + E = E->IgnoreParenCasts(); + + const NamedDecl *D = 0; + bool IsExact = false; + + switch (E->getStmtClass()) { + case Stmt::DeclRefExprClass: + D = cast(E)->getDecl(); + IsExact = isa(D); + break; + case Stmt::MemberExprClass: { + const MemberExpr *ME = cast(E); + D = ME->getMemberDecl(); + IsExact = isa(ME->getBase()->IgnoreParenImpCasts()); + break; + } + case Stmt::ObjCIvarRefExprClass: { + const ObjCIvarRefExpr *IE = cast(E); + D = IE->getDecl(); + IsExact = IE->getBase()->isObjCSelfExpr(); + break; + } + case Stmt::PseudoObjectExprClass: { + const PseudoObjectExpr *POE = cast(E); + const ObjCPropertyRefExpr *BaseProp = + dyn_cast(POE->getSyntacticForm()); + if (BaseProp) { + D = getBestPropertyDecl(BaseProp); + + const Expr *DoubleBase = BaseProp->getBase(); + if (const OpaqueValueExpr *OVE = dyn_cast(DoubleBase)) + DoubleBase = OVE->getSourceExpr(); + + IsExact = DoubleBase->isObjCSelfExpr(); + } + break; + } + default: + break; + } + + return BaseInfoTy(D, IsExact); +} + + +FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy( + const ObjCPropertyRefExpr *PropE) + : Base(0, true), Property(getBestPropertyDecl(PropE)) { + + if (PropE->isObjectReceiver()) { + const OpaqueValueExpr *OVE = cast(PropE->getBase()); + const Expr *E = OVE->getSourceExpr(); + Base = getBaseInfo(E); + } else if (PropE->isClassReceiver()) { + Base.setPointer(PropE->getClassReceiver()); + } else { + assert(PropE->isSuperReceiver()); + } +} + +FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(const Expr *BaseE, + const ObjCPropertyDecl *Prop) + : Base(0, true), Property(Prop) { + if (BaseE) + Base = getBaseInfo(BaseE); + // else, this is a message accessing a property on super. +} + +FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy( + const DeclRefExpr *DRE) + : Base(0, true), Property(DRE->getDecl()) { + assert(isa(Property)); +} + +FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy( + const ObjCIvarRefExpr *IvarE) + : Base(getBaseInfo(IvarE->getBase())), Property(IvarE->getDecl()) { +} + +void FunctionScopeInfo::recordUseOfWeak(const ObjCMessageExpr *Msg, + const ObjCPropertyDecl *Prop) { + assert(Msg && Prop); + WeakUseVector &Uses = + WeakObjectUses[WeakObjectProfileTy(Msg->getInstanceReceiver(), Prop)]; + Uses.push_back(WeakUseTy(Msg, Msg->getNumArgs() == 0)); +} + +void FunctionScopeInfo::markSafeWeakUse(const Expr *E) { + E = E->IgnoreParenCasts(); + + if (const PseudoObjectExpr *POE = dyn_cast(E)) { + markSafeWeakUse(POE->getSyntacticForm()); + return; + } + + if (const ConditionalOperator *Cond = dyn_cast(E)) { + markSafeWeakUse(Cond->getTrueExpr()); + markSafeWeakUse(Cond->getFalseExpr()); + return; + } + + if (const BinaryConditionalOperator *Cond = + dyn_cast(E)) { + markSafeWeakUse(Cond->getCommon()); + markSafeWeakUse(Cond->getFalseExpr()); + return; + } + + // Has this weak object been seen before? + FunctionScopeInfo::WeakObjectUseMap::iterator Uses; + if (const ObjCPropertyRefExpr *RefExpr = dyn_cast(E)) + Uses = WeakObjectUses.find(WeakObjectProfileTy(RefExpr)); + else if (const ObjCIvarRefExpr *IvarE = dyn_cast(E)) + Uses = WeakObjectUses.find(WeakObjectProfileTy(IvarE)); + else if (const DeclRefExpr *DRE = dyn_cast(E)) + Uses = WeakObjectUses.find(WeakObjectProfileTy(DRE)); + else if (const ObjCMessageExpr *MsgE = dyn_cast(E)) { + Uses = WeakObjectUses.end(); + if (const ObjCMethodDecl *MD = MsgE->getMethodDecl()) { + if (const ObjCPropertyDecl *Prop = MD->findPropertyDecl()) { + Uses = + WeakObjectUses.find(WeakObjectProfileTy(MsgE->getInstanceReceiver(), + Prop)); + } + } + } + else + return; + + if (Uses == WeakObjectUses.end()) + return; + + // Has there been a read from the object using this Expr? + FunctionScopeInfo::WeakUseVector::reverse_iterator ThisUse = + std::find(Uses->second.rbegin(), Uses->second.rend(), WeakUseTy(E, true)); + if (ThisUse == Uses->second.rend()) + return; + + ThisUse->markSafe(); +} + +FunctionScopeInfo::~FunctionScopeInfo() { } +BlockScopeInfo::~BlockScopeInfo() { } +LambdaScopeInfo::~LambdaScopeInfo() { } diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp index 7f79f0c..13a33b7 100644 --- a/lib/Sema/Sema.cpp +++ b/lib/Sema/Sema.cpp @@ -22,6 +22,7 @@ #include "clang/Sema/CXXFieldCollector.h" #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/ExternalSemaSource.h" +#include "clang/Sema/MultiplexExternalSemaSource.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/PrettyDeclStackTrace.h" #include "clang/Sema/Scope.h" @@ -43,22 +44,6 @@ using namespace clang; using namespace sema; -FunctionScopeInfo::~FunctionScopeInfo() { } - -void FunctionScopeInfo::Clear() { - HasBranchProtectedScope = false; - HasBranchIntoScope = false; - HasIndirectGoto = false; - - SwitchStack.clear(); - Returns.clear(); - ErrorTrap.reset(); - PossiblyUnreachableDiags.clear(); -} - -BlockScopeInfo::~BlockScopeInfo() { } -LambdaScopeInfo::~LambdaScopeInfo() { } - PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, const Preprocessor &PP) { PrintingPolicy Policy = Context.getPrintingPolicy(); @@ -84,12 +69,14 @@ void Sema::ActOnTranslationUnitScope(Scope *S) { Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) - : TheTargetAttributesSema(0), FPFeatures(pp.getLangOpts()), + : TheTargetAttributesSema(0), ExternalSource(0), + isMultiplexExternalSource(false), FPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()), - CollectStats(false), ExternalSource(0), CodeCompleter(CodeCompleter), + CollectStats(false), CodeCompleter(CodeCompleter), CurContext(0), OriginalLexicalContext(0), PackContext(0), MSStructPragmaOn(false), VisContext(0), + IsBuildingRecoveryCallExpr(false), ExprNeedsCleanups(false), LateTemplateParser(0), OpaqueParser(0), IdResolver(pp), StdInitializerList(0), CXXTypeInfoDecl(0), MSVCGuidDecl(0), NSNumberDecl(0), @@ -203,6 +190,10 @@ Sema::~Sema() { if (ExternalSemaSource *ExternalSema = dyn_cast_or_null(Context.getExternalSource())) ExternalSema->ForgetSema(); + + // If Sema's ExternalSource is the multiplexer - we own it. + if (isMultiplexExternalSource) + delete ExternalSource; } /// makeUnavailableInSystemHeader - There is an error in the current @@ -234,6 +225,27 @@ ASTMutationListener *Sema::getASTMutationListener() const { return getASTConsumer().GetASTMutationListener(); } +///\brief Registers an external source. If an external source already exists, +/// creates a multiplex external source and appends to it. +/// +///\param[in] E - A non-null external sema source. +/// +void Sema::addExternalSource(ExternalSemaSource *E) { + assert(E && "Cannot use with NULL ptr"); + + if (!ExternalSource) { + ExternalSource = E; + return; + } + + if (isMultiplexExternalSource) + static_cast(ExternalSource)->addSource(*E); + else { + ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); + isMultiplexExternalSource = true; + } +} + /// \brief Print out statistics about the semantic analysis. void Sema::PrintStats() const { llvm::errs() << "\n*** Semantic Analysis Stats:\n"; @@ -507,6 +519,11 @@ void Sema::ActOnEndOfTranslationUnit() { assert(DelayedDiagnostics.getCurrentPool() == NULL && "reached end of translation unit with a pool attached?"); + // If code completion is enabled, don't perform any end-of-translation-unit + // work. + if (PP.isCodeCompletionEnabled()) + return; + // Only complete translation units define vtables and perform implicit // instantiations. if (TUKind == TU_Complete) { @@ -648,6 +665,8 @@ void Sema::ActOnEndOfTranslationUnit() { diag::err_tentative_def_incomplete_type)) VD->setInvalidDecl(); + CheckCompleteVariableDeclaration(VD); + // Notify the consumer that we've completed a tentative definition. if (!VD->isInvalidDecl()) Consumer.CompleteTentativeDefinition(VD); @@ -1021,6 +1040,9 @@ LambdaScopeInfo *Sema::getCurLambda() { } void Sema::ActOnComment(SourceRange Comment) { + if (!LangOpts.RetainCommentsFromSystemHeaders && + SourceMgr.isInSystemHeader(Comment.getBegin())) + return; RawComment RC(SourceMgr, Comment); if (RC.isAlmostTrailingComment()) { SourceRange MagicMarkerRange(Comment.getBegin(), @@ -1165,8 +1187,7 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { // FIXME: Magic number for max shown overloads stolen from // OverloadCandidateSet::NoteCandidates. - if (ShownOverloads >= 4 && - S.Diags.getShowOverloads() == DiagnosticsEngine::Ovl_Best) { + if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) { ++SuppressedOverloads; continue; } @@ -1237,8 +1258,7 @@ bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, // FIXME: Try this before emitting the fixit, and suppress diagnostics // while doing so. E = ActOnCallExpr(0, E.take(), ParenInsertionLoc, - MultiExprArg(*this, 0, 0), - ParenInsertionLoc.getLocWithOffset(1)); + MultiExprArg(), ParenInsertionLoc.getLocWithOffset(1)); return true; } diff --git a/lib/Sema/SemaAccess.cpp b/lib/Sema/SemaAccess.cpp index 3481171..58b1a51 100644 --- a/lib/Sema/SemaAccess.cpp +++ b/lib/Sema/SemaAccess.cpp @@ -97,14 +97,19 @@ struct EffectiveContext { // functions (which can gain privileges through friendship), but we // take that as an oversight. while (true) { + // We want to add canonical declarations to the EC lists for + // simplicity of checking, but we need to walk up through the + // actual current DC chain. Otherwise, something like a local + // extern or friend which happens to be the canonical + // declaration will really mess us up. + if (isa(DC)) { - CXXRecordDecl *Record = cast(DC)->getCanonicalDecl(); - Records.push_back(Record); + CXXRecordDecl *Record = cast(DC); + Records.push_back(Record->getCanonicalDecl()); DC = Record->getDeclContext(); } else if (isa(DC)) { - FunctionDecl *Function = cast(DC)->getCanonicalDecl(); - Functions.push_back(Function); - + FunctionDecl *Function = cast(DC); + Functions.push_back(Function->getCanonicalDecl()); if (Function->getFriendObjectKind()) DC = Function->getLexicalDeclContext(); else @@ -1791,7 +1796,7 @@ void Sema::CheckLookupAccess(const LookupResult &R) { /// specifiers into account, but no member access expressions and such. /// /// \param Decl the declaration to check if it can be accessed -/// \param Class the class/context from which to start the search +/// \param Ctx the class/context from which to start the search /// \return true if the Decl is accessible from the Class, false otherwise. bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) { if (CXXRecordDecl *Class = dyn_cast(Ctx)) { diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp index e935fc7..f1154c1 100644 --- a/lib/Sema/SemaAttr.cpp +++ b/lib/Sema/SemaAttr.cpp @@ -136,23 +136,12 @@ void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) { } void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, - SourceLocation PragmaLoc, - SourceLocation KindLoc) { + SourceLocation PragmaLoc) { if (PackContext == 0) PackContext = new PragmaPackStack(); PragmaPackStack *Context = static_cast(PackContext); - // Reset just pops the top of the stack, or resets the current alignment to - // default. - if (Kind == Sema::POAK_Reset) { - if (!Context->pop(0, /*IsReset=*/true)) { - Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed) - << "stack empty"; - } - return; - } - switch (Kind) { // For all targets we support native and natural are the same. // @@ -181,9 +170,13 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, Context->setAlignment(PackStackEntry::kMac68kAlignmentSentinel); break; - default: - Diag(PragmaLoc, diag::warn_pragma_options_align_unsupported_option) - << KindLoc; + case POAK_Reset: + // Reset just pops the top of the stack, or resets the current alignment to + // default. + if (!Context->pop(0, /*IsReset=*/true)) { + Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed) + << "stack empty"; + } break; } } diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp index 0de9dd5..15bfd1c 100644 --- a/lib/Sema/SemaCXXScopeSpec.cpp +++ b/lib/Sema/SemaCXXScopeSpec.cpp @@ -520,7 +520,8 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, if (LookupCtx) Diag(Found.getNameLoc(), diag::err_no_member_suggest) << Name << LookupCtx << CorrectedQuotedStr << SS.getRange() - << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); else Diag(Found.getNameLoc(), diag::err_undeclared_var_use_suggest) << Name << CorrectedQuotedStr diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp index d8d51e7..bf25c61 100644 --- a/lib/Sema/SemaCast.cpp +++ b/lib/Sema/SemaCast.cpp @@ -227,7 +227,7 @@ Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, CheckExtraCXXDefaultArguments(D); } - return BuildCXXNamedCast(OpLoc, Kind, TInfo, move(E), + return BuildCXXNamedCast(OpLoc, Kind, TInfo, E, SourceRange(LAngleBracketLoc, RAngleBracketLoc), SourceRange(LParenLoc, RParenLoc)); } @@ -1331,8 +1331,7 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType())) return TC_NotApplicable; - ExprResult Result - = InitSeq.Perform(Self, Entity, InitKind, MultiExprArg(Self, &SrcExprRaw, 1)); + ExprResult Result = InitSeq.Perform(Self, Entity, InitKind, SrcExprRaw); if (Result.isInvalid()) { msg = 0; return TC_Failed; @@ -1343,7 +1342,7 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, else Kind = CK_NoOp; - SrcExpr = move(Result); + SrcExpr = Result; return TC_Success; } @@ -1492,6 +1491,22 @@ static void DiagnoseCastOfObjCSEL(Sema &Self, const ExprResult &SrcExpr, } } +static void checkIntToPointerCast(bool CStyle, SourceLocation Loc, + const Expr *SrcExpr, QualType DestType, + Sema &Self) { + QualType SrcType = SrcExpr->getType(); + + // Not warning on reinterpret_cast, boolean, constant expressions, etc + // are not explicit design choices, but consistent with GCC's behavior. + // Feel free to modify them if you've reason/evidence for an alternative. + if (CStyle && SrcType->isIntegralType(Self.Context) + && !SrcType->isBooleanType() + && !SrcType->isEnumeralType() + && !SrcExpr->isIntegerConstantExpr(Self.Context) + && Self.Context.getTypeSize(DestType) > Self.Context.getTypeSize(SrcType)) + Self.Diag(Loc, diag::warn_int_to_pointer_cast) << SrcType << DestType; +} + static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr, QualType DestType, bool CStyle, const SourceRange &OpRange, @@ -1513,7 +1528,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr, SingleFunctionExpr, Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr ) && SingleFunctionExpr.isUsable()) { - SrcExpr = move(SingleFunctionExpr); + SrcExpr = SingleFunctionExpr; SrcType = SrcExpr.get()->getType(); } else { return TC_NotApplicable; @@ -1690,6 +1705,8 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr, if (SrcType->isIntegralOrEnumerationType()) { assert(destIsPtr && "One type must be a pointer"); + checkIntToPointerCast(CStyle, OpRange.getBegin(), SrcExpr.get(), DestType, + Self); // C++ 5.2.10p5: A value of integral or enumeration type can be explicitly // converted to a pointer. // C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not @@ -1903,6 +1920,43 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle, SrcExpr = ExprError(); } +/// DiagnoseBadFunctionCast - Warn whenever a function call is cast to a +/// non-matching type. Such as enum function call to int, int call to +/// pointer; etc. Cast to 'void' is an exception. +static void DiagnoseBadFunctionCast(Sema &Self, const ExprResult &SrcExpr, + QualType DestType) { + if (Self.Diags.getDiagnosticLevel(diag::warn_bad_function_cast, + SrcExpr.get()->getExprLoc()) + == DiagnosticsEngine::Ignored) + return; + + if (!isa(SrcExpr.get())) + return; + + QualType SrcType = SrcExpr.get()->getType(); + if (DestType.getUnqualifiedType()->isVoidType()) + return; + if ((SrcType->isAnyPointerType() || SrcType->isBlockPointerType()) + && (DestType->isAnyPointerType() || DestType->isBlockPointerType())) + return; + if (SrcType->isIntegerType() && DestType->isIntegerType() && + (SrcType->isBooleanType() == DestType->isBooleanType()) && + (SrcType->isEnumeralType() == DestType->isEnumeralType())) + return; + if (SrcType->isRealFloatingType() && DestType->isRealFloatingType()) + return; + if (SrcType->isEnumeralType() && DestType->isEnumeralType()) + return; + if (SrcType->isComplexType() && DestType->isComplexType()) + return; + if (SrcType->isComplexIntegerType() && DestType->isComplexIntegerType()) + return; + + Self.Diag(SrcExpr.get()->getExprLoc(), + diag::warn_bad_function_cast) + << SrcType << DestType << SrcExpr.get()->getSourceRange(); +} + /// Check the semantics of a C-style cast operation, in C. void CastOperation::CheckCStyleCast() { assert(!Self.getLangOpts().CPlusPlus); @@ -2035,6 +2089,8 @@ void CastOperation::CheckCStyleCast() { SrcExpr = ExprError(); return; } + checkIntToPointerCast(/* CStyle */ true, OpRange.getBegin(), SrcExpr.get(), + DestType, Self); } else if (!SrcType->isArithmeticType()) { if (!DestType->isIntegralType(Self.Context) && DestType->isArithmeticType()) { @@ -2076,7 +2132,7 @@ void CastOperation::CheckCStyleCast() { } } DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType); - + DiagnoseBadFunctionCast(Self, SrcExpr, DestType); Kind = Self.PrepareScalarCast(SrcExpr, DestType); if (SrcExpr.isInvalid()) return; diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp index 2559f00..692a210 100644 --- a/lib/Sema/SemaChecking.cpp +++ b/lib/Sema/SemaChecking.cpp @@ -266,11 +266,11 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { case Builtin::BI__sync_swap_4: case Builtin::BI__sync_swap_8: case Builtin::BI__sync_swap_16: - return SemaBuiltinAtomicOverloaded(move(TheCallResult)); + return SemaBuiltinAtomicOverloaded(TheCallResult); #define BUILTIN(ID, TYPE, ATTRS) #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ case Builtin::BI##ID: \ - return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID); + return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); #include "clang/Basic/Builtins.def" case Builtin::BI__builtin_annotation: if (SemaBuiltinAnnotation(*this, TheCall)) @@ -299,7 +299,7 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { } } - return move(TheCallResult); + return TheCallResult; } // Get the valid immediate range for the specified NEON type code. @@ -437,6 +437,11 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { default: return false; case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; + case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; + case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; + case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; + case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; + case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; }; // We can't check the value of a dependent argument. @@ -490,9 +495,8 @@ void Sema::checkCall(NamedDecl *FDecl, Expr **Args, SourceLocation Loc, SourceRange Range, VariadicCallType CallType) { - // FIXME: This mechanism should be abstracted to be less fragile and - // more efficient. For example, just map function ids to custom - // handlers. + if (CurContext->isDependentContext()) + return; // Printf and scanf checking. bool HandledFormatString = false; @@ -506,8 +510,11 @@ void Sema::checkCall(NamedDecl *FDecl, Expr **Args, // Refuse POD arguments that weren't caught by the format string // checks above. if (!HandledFormatString && CallType != VariadicDoesNotApply) - for (unsigned ArgIdx = NumProtoArgs; ArgIdx < NumArgs; ++ArgIdx) - variadicArgumentPODCheck(Args[ArgIdx], CallType); + for (unsigned ArgIdx = NumProtoArgs; ArgIdx < NumArgs; ++ArgIdx) { + // Args[ArgIdx] can be null in malformed code. + if (Expr *Arg = Args[ArgIdx]) + variadicArgumentPODCheck(Arg, CallType); + } for (specific_attr_iterator I = FDecl->specific_attr_begin(), @@ -538,11 +545,23 @@ void Sema::CheckConstructorCall(FunctionDecl *FDecl, Expr **Args, /// and safety properties not strictly enforced by the C type system. bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto) { - bool IsMemberFunction = isa(TheCall); + bool IsMemberOperatorCall = isa(TheCall) && + isa(FDecl); + bool IsMemberFunction = isa(TheCall) || + IsMemberOperatorCall; VariadicCallType CallType = getVariadicCallType(FDecl, Proto, TheCall->getCallee()); unsigned NumProtoArgs = Proto ? Proto->getNumArgs() : 0; - checkCall(FDecl, TheCall->getArgs(), TheCall->getNumArgs(), NumProtoArgs, + Expr** Args = TheCall->getArgs(); + unsigned NumArgs = TheCall->getNumArgs(); + if (IsMemberOperatorCall) { + // If this is a call to a member operator, hide the first argument + // from checkCall. + // FIXME: Our choice of AST representation here is less than ideal. + ++Args; + --NumArgs; + } + checkCall(FDecl, Args, NumArgs, NumProtoArgs, IsMemberFunction, TheCall->getRParenLoc(), TheCall->getCallee()->getSourceRange(), CallType); @@ -737,6 +756,11 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } + if (AtomTy.isConstQualified()) { + Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic) + << Ptr->getType() << Ptr->getSourceRange(); + return ExprError(); + } ValType = AtomTy->getAs()->getValueType(); } @@ -885,8 +909,7 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, } return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(), - SubExprs.data(), SubExprs.size(), - ResultType, Op, + SubExprs, ResultType, Op, TheCall->getRParenLoc())); } @@ -1189,10 +1212,19 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { // concrete integer type we should convert to is. unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID); - IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName); - FunctionDecl *NewBuiltinDecl = - cast(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID, - TUScope, false, DRE->getLocStart())); + FunctionDecl *NewBuiltinDecl; + if (NewBuiltinID == BuiltinID) + NewBuiltinDecl = FDecl; + else { + // Perform builtin lookup to avoid redeclaring it. + DeclarationName DN(&Context.Idents.get(NewBuiltinName)); + LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName); + LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); + assert(Res.getFoundDecl()); + NewBuiltinDecl = dyn_cast(Res.getFoundDecl()); + if (NewBuiltinDecl == 0) + return ExprError(); + } // The first argument --- the pointer --- has a fixed type; we // deduce the types of the rest of the arguments accordingly. Walk @@ -1228,14 +1260,14 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { NewBuiltinDecl, /*enclosing*/ false, DRE->getLocation(), - NewBuiltinDecl->getType(), + Context.BuiltinFnTy, DRE->getValueKind()); // Set the callee in the CallExpr. - // FIXME: This leaks the original parens and implicit casts. - ExprResult PromotedCall = UsualUnaryConversions(NewDRE); - if (PromotedCall.isInvalid()) - return ExprError(); + // FIXME: This loses syntactic information. + QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); + ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, + CK_BuiltinFnToFnPtr); TheCall->setCallee(PromotedCall.take()); // Change the result type of the call to match the original value type. This @@ -1243,7 +1275,7 @@ Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { // gracefully. TheCall->setType(ResultType); - return move(TheCallResult); + return TheCallResult; } /// CheckObjCString - Checks that the argument to the builtin @@ -1264,7 +1296,7 @@ bool Sema::CheckObjCString(Expr *Arg) { StringRef String = Literal->getString(); unsigned NumBytes = String.size(); SmallVector ToBuf(NumBytes); - const UTF8 *FromPtr = (UTF8 *)String.data(); + const UTF8 *FromPtr = (const UTF8 *)String.data(); UTF16 *ToPtr = &ToBuf[0]; ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, @@ -1503,8 +1535,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { TheCall->setArg(i, 0); } - return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(), - exprs.size(), resType, + return Owned(new (Context) ShuffleVectorExpr(Context, exprs, resType, TheCall->getCallee()->getLocStart(), TheCall->getRParenLoc())); } @@ -1935,19 +1966,19 @@ public: void HandleIncompleteSpecifier(const char *startSpecifier, unsigned specifierLen); + void HandleInvalidLengthModifier( + const analyze_format_string::FormatSpecifier &FS, + const analyze_format_string::ConversionSpecifier &CS, + const char *startSpecifier, unsigned specifierLen, unsigned DiagID); + void HandleNonStandardLengthModifier( - const analyze_format_string::LengthModifier &LM, + const analyze_format_string::FormatSpecifier &FS, const char *startSpecifier, unsigned specifierLen); void HandleNonStandardConversionSpecifier( const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen); - void HandleNonStandardConversionSpecification( - const analyze_format_string::LengthModifier &LM, - const analyze_format_string::ConversionSpecifier &CS, - const char *startSpecifier, unsigned specifierLen); - virtual void HandlePosition(const char *startPos, unsigned posLen); virtual void HandleInvalidPosition(const char *startSpecifier, @@ -1964,7 +1995,7 @@ public: PartialDiagnostic PDiag, SourceLocation StringLoc, bool IsStringLocation, Range StringRange, - FixItHint Fixit = FixItHint()); + ArrayRef Fixit = ArrayRef()); protected: bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, @@ -1991,7 +2022,7 @@ protected: template void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, bool IsStringLocation, Range StringRange, - FixItHint Fixit = FixItHint()); + ArrayRef Fixit = ArrayRef()); void CheckPositionalAndNonpositionalArgs( const analyze_format_string::FormatSpecifier *FS); @@ -2025,35 +2056,95 @@ void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, getSpecifierRange(startSpecifier, specifierLen)); } +void CheckFormatHandler::HandleInvalidLengthModifier( + const analyze_format_string::FormatSpecifier &FS, + const analyze_format_string::ConversionSpecifier &CS, + const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { + using namespace analyze_format_string; + + const LengthModifier &LM = FS.getLengthModifier(); + CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); + + // See if we know how to fix this length modifier. + llvm::Optional FixedLM = FS.getCorrectedLengthModifier(); + if (FixedLM) { + EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), + getLocationOfByte(LM.getStart()), + /*IsStringLocation*/true, + getSpecifierRange(startSpecifier, specifierLen)); + + S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) + << FixedLM->toString() + << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); + + } else { + FixItHint Hint; + if (DiagID == diag::warn_format_nonsensical_length) + Hint = FixItHint::CreateRemoval(LMRange); + + EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), + getLocationOfByte(LM.getStart()), + /*IsStringLocation*/true, + getSpecifierRange(startSpecifier, specifierLen), + Hint); + } +} + void CheckFormatHandler::HandleNonStandardLengthModifier( - const analyze_format_string::LengthModifier &LM, + const analyze_format_string::FormatSpecifier &FS, const char *startSpecifier, unsigned specifierLen) { - EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << LM.toString() - << 0, - getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen)); + using namespace analyze_format_string; + + const LengthModifier &LM = FS.getLengthModifier(); + CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); + + // See if we know how to fix this length modifier. + llvm::Optional FixedLM = FS.getCorrectedLengthModifier(); + if (FixedLM) { + EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) + << LM.toString() << 0, + getLocationOfByte(LM.getStart()), + /*IsStringLocation*/true, + getSpecifierRange(startSpecifier, specifierLen)); + + S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) + << FixedLM->toString() + << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); + + } else { + EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) + << LM.toString() << 0, + getLocationOfByte(LM.getStart()), + /*IsStringLocation*/true, + getSpecifierRange(startSpecifier, specifierLen)); + } } void CheckFormatHandler::HandleNonStandardConversionSpecifier( const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen) { - EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << CS.toString() - << 1, - getLocationOfByte(CS.getStart()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen)); -} + using namespace analyze_format_string; -void CheckFormatHandler::HandleNonStandardConversionSpecification( - const analyze_format_string::LengthModifier &LM, - const analyze_format_string::ConversionSpecifier &CS, - const char *startSpecifier, unsigned specifierLen) { - EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_conversion_spec) - << LM.toString() << CS.toString(), - getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen)); + // See if we know how to fix this conversion specifier. + llvm::Optional FixedCS = CS.getStandardSpecifier(); + if (FixedCS) { + EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) + << CS.toString() << /*conversion specifier*/1, + getLocationOfByte(CS.getStart()), + /*IsStringLocation*/true, + getSpecifierRange(startSpecifier, specifierLen)); + + CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); + S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) + << FixedCS->toString() + << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); + } else { + EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) + << CS.toString() << /*conversion specifier*/1, + getLocationOfByte(CS.getStart()), + /*IsStringLocation*/true, + getSpecifierRange(startSpecifier, specifierLen)); + } } void CheckFormatHandler::HandlePosition(const char *startPos, @@ -2182,7 +2273,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation Loc, bool IsStringLocation, Range StringRange, - FixItHint FixIt) { + ArrayRef FixIt) { EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, Loc, IsStringLocation, StringRange, FixIt); } @@ -2190,7 +2281,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, /// \brief If the format string is not within the funcion call, emit a note /// so that the function call and string are in diagnostic messages. /// -/// \param inFunctionCall if true, the format string is within the function +/// \param InFunctionCall if true, the format string is within the function /// call and only one diagnostic message will be produced. Otherwise, an /// extra note will be emitted pointing to location of the format string. /// @@ -2213,7 +2304,7 @@ void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, /// \param StringRange some or all of the string to highlight. This is /// templated so it can accept either a CharSourceRange or a SourceRange. /// -/// \param Fixit optional fix it hint for the format string. +/// \param FixIt optional fix it hint for the format string. template void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, @@ -2221,15 +2312,27 @@ void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall, SourceLocation Loc, bool IsStringLocation, Range StringRange, - FixItHint FixIt) { - if (InFunctionCall) - S.Diag(Loc, PDiag) << StringRange << FixIt; - else { + ArrayRef FixIt) { + if (InFunctionCall) { + const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); + D << StringRange; + for (ArrayRef::iterator I = FixIt.begin(), E = FixIt.end(); + I != E; ++I) { + D << *I; + } + } else { S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) << ArgumentExpr->getSourceRange(); - S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), - diag::note_format_string_defined) - << StringRange << FixIt; + + const Sema::SemaDiagnosticBuilder &Note = + S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), + diag::note_format_string_defined); + + Note << StringRange; + for (ArrayRef::iterator I = FixIt.begin(), E = FixIt.end(); + I != E; ++I) { + Note << *I; + } } } @@ -2550,23 +2653,17 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier startSpecifier, specifierLen); // Check the length modifier is valid with the given conversion specifier. - const LengthModifier &LM = FS.getLengthModifier(); - if (!FS.hasValidLengthModifier()) - EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length) - << LM.toString() << CS.toString(), - getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, - getSpecifierRange(startSpecifier, specifierLen), - FixItHint::CreateRemoval( - getSpecifierRange(LM.getStart(), - LM.getLength()))); - if (!FS.hasStandardLengthModifier()) - HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen); + if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) + HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, + diag::warn_format_nonsensical_length); + else if (!FS.hasStandardLengthModifier()) + HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); + else if (!FS.hasStandardLengthConversionCombination()) + HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, + diag::warn_format_non_standard_conversion_spec); + if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); - if (!FS.hasStandardLengthConversionCombination()) - HandleNonStandardConversionSpecification(LM, CS, startSpecifier, - specifierLen); // The remaining checks depend on the data arguments. if (HasVAListArg) @@ -2582,6 +2679,30 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); } +static bool requiresParensToAddCast(const Expr *E) { + // FIXME: We should have a general way to reason about operator + // precedence and whether parens are actually needed here. + // Take care of a few common cases where they aren't. + const Expr *Inside = E->IgnoreImpCasts(); + if (const PseudoObjectExpr *POE = dyn_cast(Inside)) + Inside = POE->getSyntacticForm()->IgnoreImpCasts(); + + switch (Inside->getStmtClass()) { + case Stmt::ArraySubscriptExprClass: + case Stmt::CallExprClass: + case Stmt::DeclRefExprClass: + case Stmt::MemberExprClass: + case Stmt::ObjCIvarRefExprClass: + case Stmt::ObjCMessageExprClass: + case Stmt::ObjCPropertyRefExprClass: + case Stmt::ParenExprClass: + case Stmt::UnaryOperatorClass: + return false; + default: + return true; + } +} + bool CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, const char *StartSpecifier, @@ -2593,81 +2714,151 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, // format specifier. const analyze_printf::ArgType &AT = FS.getArgType(S.Context, ObjCContext); - if (AT.isValid() && !AT.matchesType(S.Context, E->getType())) { - // Look through argument promotions for our error message's reported type. - // This includes the integral and floating promotions, but excludes array - // and function pointer decay; seeing that an argument intended to be a - // string has type 'char [6]' is probably more confusing than 'char *'. - if (const ImplicitCastExpr *ICE = dyn_cast(E)) { - if (ICE->getCastKind() == CK_IntegralCast || - ICE->getCastKind() == CK_FloatingCast) { - E = ICE->getSubExpr(); - - // Check if we didn't match because of an implicit cast from a 'char' - // or 'short' to an 'int'. This is done because printf is a varargs - // function. - if (ICE->getType() == S.Context.IntTy || - ICE->getType() == S.Context.UnsignedIntTy) { - // All further checking is done on the subexpression. - if (AT.matchesType(S.Context, E->getType())) - return true; - } + if (!AT.isValid()) + return true; + + QualType IntendedTy = E->getType(); + if (AT.matchesType(S.Context, IntendedTy)) + return true; + + // Look through argument promotions for our error message's reported type. + // This includes the integral and floating promotions, but excludes array + // and function pointer decay; seeing that an argument intended to be a + // string has type 'char [6]' is probably more confusing than 'char *'. + if (const ImplicitCastExpr *ICE = dyn_cast(E)) { + if (ICE->getCastKind() == CK_IntegralCast || + ICE->getCastKind() == CK_FloatingCast) { + E = ICE->getSubExpr(); + IntendedTy = E->getType(); + + // Check if we didn't match because of an implicit cast from a 'char' + // or 'short' to an 'int'. This is done because printf is a varargs + // function. + if (ICE->getType() == S.Context.IntTy || + ICE->getType() == S.Context.UnsignedIntTy) { + // All further checking is done on the subexpression. + if (AT.matchesType(S.Context, IntendedTy)) + return true; } } + } - // We may be able to offer a FixItHint if it is a supported type. - PrintfSpecifier fixedFS = FS; - bool success = fixedFS.fixType(E->getType(), S.getLangOpts(), - S.Context, ObjCContext); + if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { + // Special-case some of Darwin's platform-independence types. + if (const TypedefType *UserTy = IntendedTy->getAs()) { + StringRef Name = UserTy->getDecl()->getName(); + IntendedTy = llvm::StringSwitch(Name) + .Case("NSInteger", S.Context.LongTy) + .Case("NSUInteger", S.Context.UnsignedLongTy) + .Case("SInt32", S.Context.IntTy) + .Case("UInt32", S.Context.UnsignedIntTy) + .Default(IntendedTy); + } + } - if (success) { - // Get the fix string from the fixed format specifier - SmallString<16> buf; - llvm::raw_svector_ostream os(buf); - fixedFS.toString(os); + // We may be able to offer a FixItHint if it is a supported type. + PrintfSpecifier fixedFS = FS; + bool success = fixedFS.fixType(IntendedTy, S.getLangOpts(), + S.Context, ObjCContext); + + if (success) { + // Get the fix string from the fixed format specifier + SmallString<16> buf; + llvm::raw_svector_ostream os(buf); + fixedFS.toString(os); + + CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); + + if (IntendedTy != E->getType()) { + // The canonical type for formatting this value is different from the + // actual type of the expression. (This occurs, for example, with Darwin's + // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but + // should be printed as 'long' for 64-bit compatibility.) + // Rather than emitting a normal format/argument mismatch, we want to + // add a cast to the recommended type (and correct the format string + // if necessary). + SmallString<16> CastBuf; + llvm::raw_svector_ostream CastFix(CastBuf); + CastFix << "("; + IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); + CastFix << ")"; + + SmallVector Hints; + if (!AT.matchesType(S.Context, IntendedTy)) + Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); + + if (const CStyleCastExpr *CCast = dyn_cast(E)) { + // If there's already a cast present, just replace it. + SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); + Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); + + } else if (!requiresParensToAddCast(E)) { + // If the expression has high enough precedence, + // just write the C-style cast. + Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(), + CastFix.str())); + } else { + // Otherwise, add parens around the expression as well as the cast. + CastFix << "("; + Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(), + CastFix.str())); + + SourceLocation After = S.PP.getLocForEndOfToken(E->getLocEnd()); + Hints.push_back(FixItHint::CreateInsertion(After, ")")); + } + // We extract the name from the typedef because we don't want to show + // the underlying type in the diagnostic. + const TypedefType *UserTy = cast(E->getType()); + StringRef Name = UserTy->getDecl()->getName(); + + // Finally, emit the diagnostic. + EmitFormatDiagnostic(S.PDiag(diag::warn_format_argument_needs_cast) + << Name << IntendedTy + << E->getSourceRange(), + E->getLocStart(), /*IsStringLocation=*/false, + SpecRange, Hints); + } else { EmitFormatDiagnostic( S.PDiag(diag::warn_printf_conversion_argument_type_mismatch) - << AT.getRepresentativeTypeName(S.Context) << E->getType() + << AT.getRepresentativeTypeName(S.Context) << IntendedTy << E->getSourceRange(), E->getLocStart(), /*IsStringLocation*/false, - getSpecifierRange(StartSpecifier, SpecifierLen), - FixItHint::CreateReplacement( - getSpecifierRange(StartSpecifier, SpecifierLen), - os.str())); - } else { - const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, - SpecifierLen); - // Since the warning for passing non-POD types to variadic functions - // was deferred until now, we emit a warning for non-POD - // arguments here. - if (S.isValidVarArgType(E->getType()) == Sema::VAK_Invalid) { - unsigned DiagKind; - if (E->getType()->isObjCObjectType()) - DiagKind = diag::err_cannot_pass_objc_interface_to_vararg_format; - else - DiagKind = diag::warn_non_pod_vararg_with_format_string; - - EmitFormatDiagnostic( - S.PDiag(DiagKind) - << S.getLangOpts().CPlusPlus0x - << E->getType() - << CallType - << AT.getRepresentativeTypeName(S.Context) - << CSR - << E->getSourceRange(), - E->getLocStart(), /*IsStringLocation*/false, CSR); - - checkForCStrMembers(AT, E, CSR); - } else - EmitFormatDiagnostic( - S.PDiag(diag::warn_printf_conversion_argument_type_mismatch) - << AT.getRepresentativeTypeName(S.Context) << E->getType() - << CSR - << E->getSourceRange(), - E->getLocStart(), /*IsStringLocation*/false, CSR); + SpecRange, + FixItHint::CreateReplacement(SpecRange, os.str())); } + } else { + const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, + SpecifierLen); + // Since the warning for passing non-POD types to variadic functions + // was deferred until now, we emit a warning for non-POD + // arguments here. + if (S.isValidVarArgType(E->getType()) == Sema::VAK_Invalid) { + unsigned DiagKind; + if (E->getType()->isObjCObjectType()) + DiagKind = diag::err_cannot_pass_objc_interface_to_vararg_format; + else + DiagKind = diag::warn_non_pod_vararg_with_format_string; + + EmitFormatDiagnostic( + S.PDiag(DiagKind) + << S.getLangOpts().CPlusPlus0x + << E->getType() + << CallType + << AT.getRepresentativeTypeName(S.Context) + << CSR + << E->getSourceRange(), + E->getLocStart(), /*IsStringLocation*/false, CSR); + + checkForCStrMembers(AT, E, CSR); + } else + EmitFormatDiagnostic( + S.PDiag(diag::warn_printf_conversion_argument_type_mismatch) + << AT.getRepresentativeTypeName(S.Context) << E->getType() + << CSR + << E->getSourceRange(), + E->getLocStart(), /*IsStringLocation*/false, CSR); } return true; @@ -2776,24 +2967,17 @@ bool CheckScanfHandler::HandleScanfSpecifier( } // Check the length modifier is valid with the given conversion specifier. - const LengthModifier &LM = FS.getLengthModifier(); - if (!FS.hasValidLengthModifier()) { - const CharSourceRange &R = getSpecifierRange(LM.getStart(), LM.getLength()); - EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length) - << LM.toString() << CS.toString() - << getSpecifierRange(startSpecifier, specifierLen), - getLocationOfByte(LM.getStart()), - /*IsStringLocation*/true, R, - FixItHint::CreateRemoval(R)); - } + if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) + HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, + diag::warn_format_nonsensical_length); + else if (!FS.hasStandardLengthModifier()) + HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); + else if (!FS.hasStandardLengthConversionCombination()) + HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, + diag::warn_format_non_standard_conversion_spec); - if (!FS.hasStandardLengthModifier()) - HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen); if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); - if (!FS.hasStandardLengthConversionCombination()) - HandleNonStandardConversionSpecification(LM, CS, startSpecifier, - specifierLen); // The remaining checks depend on the data arguments. if (HasVAListArg) @@ -2881,7 +3065,8 @@ void Sema::CheckFormatString(const StringLiteral *FExpr, inFunctionCall, CallType); if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, - getLangOpts())) + getLangOpts(), + Context.getTargetInfo())) H.DoneProcessing(); } else if (Type == FST_Scanf) { CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, numDataArgs, @@ -2889,7 +3074,8 @@ void Sema::CheckFormatString(const StringLiteral *FExpr, inFunctionCall, CallType); if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, - getLangOpts())) + getLangOpts(), + Context.getTargetInfo())) H.DoneProcessing(); } // TODO: handle other formats } @@ -4138,6 +4324,44 @@ static void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) { } } +static void DiagnoseOutOfRangeComparison(Sema &S, BinaryOperator *E, + Expr *Constant, Expr *Other, + llvm::APSInt Value, + bool RhsConstant) { + BinaryOperatorKind op = E->getOpcode(); + QualType OtherT = Other->getType(); + QualType ConstantT = Constant->getType(); + if (S.Context.hasSameUnqualifiedType(OtherT, ConstantT)) + return; + assert((OtherT->isIntegerType() && ConstantT->isIntegerType()) + && "comparison with non-integer type"); + // FIXME. handle cases for signedness to catch (signed char)N == 200 + IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); + IntRange LitRange = GetValueRange(S.Context, Value, Value.getBitWidth()); + if (OtherRange.Width >= LitRange.Width) + return; + bool IsTrue = true; + if (op == BO_EQ) + IsTrue = false; + else if (op == BO_NE) + IsTrue = true; + else if (RhsConstant) { + if (op == BO_GT || op == BO_GE) + IsTrue = !LitRange.NonNegative; + else // op == BO_LT || op == BO_LE + IsTrue = LitRange.NonNegative; + } else { + if (op == BO_LT || op == BO_LE) + IsTrue = !LitRange.NonNegative; + else // op == BO_GT || op == BO_GE + IsTrue = LitRange.NonNegative; + } + SmallString<16> PrettySourceValue(Value.toString(10)); + S.Diag(E->getOperatorLoc(), diag::warn_out_of_range_compare) + << PrettySourceValue << OtherT << IsTrue + << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); +} + /// Analyze the operands of the given comparison. Implements the /// fallback case from AnalyzeComparison. static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { @@ -4153,20 +4377,42 @@ static void AnalyzeComparison(Sema &S, BinaryOperator *E) { QualType T = E->getLHS()->getType(); assert(S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType()) && "comparison with mismatched types"); + if (E->isValueDependent()) + return AnalyzeImpConvsInComparison(S, E); + Expr *LHS = E->getLHS()->IgnoreParenImpCasts(); + Expr *RHS = E->getRHS()->IgnoreParenImpCasts(); + + bool IsComparisonConstant = false; + + // Check whether an integer constant comparison results in a value + // of 'true' or 'false'. + if (T->isIntegralType(S.Context)) { + llvm::APSInt RHSValue; + bool IsRHSIntegralLiteral = + RHS->isIntegerConstantExpr(RHSValue, S.Context); + llvm::APSInt LHSValue; + bool IsLHSIntegralLiteral = + LHS->isIntegerConstantExpr(LHSValue, S.Context); + if (IsRHSIntegralLiteral && !IsLHSIntegralLiteral) + DiagnoseOutOfRangeComparison(S, E, RHS, LHS, RHSValue, true); + else if (!IsRHSIntegralLiteral && IsLHSIntegralLiteral) + DiagnoseOutOfRangeComparison(S, E, LHS, RHS, LHSValue, false); + else + IsComparisonConstant = + (IsRHSIntegralLiteral && IsLHSIntegralLiteral); + } else if (!T->hasUnsignedIntegerRepresentation()) + IsComparisonConstant = E->isIntegerConstantExpr(S.Context); + // We don't do anything special if this isn't an unsigned integral // comparison: we're only interested in integral comparisons, and // signed comparisons only happen in cases we don't care to warn about. // // We also don't care about value-dependent expressions or expressions // whose result is a constant. - if (!T->hasUnsignedIntegerRepresentation() - || E->isValueDependent() || E->isIntegerConstantExpr(S.Context)) + if (!T->hasUnsignedIntegerRepresentation() || IsComparisonConstant) return AnalyzeImpConvsInComparison(S, E); - - Expr *LHS = E->getLHS()->IgnoreParenImpCasts(); - Expr *RHS = E->getRHS()->IgnoreParenImpCasts(); - + // Check to see if one of the (unmodified) operands is of different // signedness. Expr *signedOperand, *unsignedOperand; @@ -4353,6 +4599,46 @@ std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) { return ValueInRange.toString(10); } +static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { + if (!isa(Ex)) + return false; + + Expr *InnerE = Ex->IgnoreParenImpCasts(); + const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); + const Type *Source = + S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); + if (Target->isDependentType()) + return false; + + const BuiltinType *FloatCandidateBT = + dyn_cast(ToBool ? Source : Target); + const Type *BoolCandidateType = ToBool ? Target : Source; + + return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && + FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); +} + +void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, + SourceLocation CC) { + unsigned NumArgs = TheCall->getNumArgs(); + for (unsigned i = 0; i < NumArgs; ++i) { + Expr *CurrA = TheCall->getArg(i); + if (!IsImplicitBoolFloatConversion(S, CurrA, true)) + continue; + + bool IsSwapped = ((i > 0) && + IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); + IsSwapped |= ((i < (NumArgs - 1)) && + IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); + if (IsSwapped) { + // Warn on this floating-point to bool conversion. + DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), + CurrA->getType(), CC, + diag::warn_impcast_floating_point_to_bool); + } + } +} + void CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC, bool *ICContext = 0) { if (E->isTypeDependent() || E->isValueDependent()) return; @@ -4488,12 +4774,33 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T, } } + // If the target is bool, warn if expr is a function or method call. + if (Target->isSpecificBuiltinType(BuiltinType::Bool) && + isa(E)) { + // Check last argument of function call to see if it is an + // implicit cast from a type matching the type the result + // is being cast to. + CallExpr *CEx = cast(E); + unsigned NumArgs = CEx->getNumArgs(); + if (NumArgs > 0) { + Expr *LastA = CEx->getArg(NumArgs - 1); + Expr *InnerE = LastA->IgnoreParenImpCasts(); + const Type *InnerType = + S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); + if (isa(LastA) && (InnerType == Target)) { + // Warn on this floating-point to bool conversion + DiagnoseImpCast(S, E, T, CC, + diag::warn_impcast_floating_point_to_bool); + } + } + } return; } if ((E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull) == Expr::NPCK_GNUNull) && !Target->isAnyPointerType() - && !Target->isBlockPointerType() && !Target->isMemberPointerType()) { + && !Target->isBlockPointerType() && !Target->isMemberPointerType() + && Target->isScalarType()) { SourceLocation Loc = E->getSourceRange().getBegin(); if (Loc.isMacroID()) Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first; @@ -4658,6 +4965,10 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) { return; } + // Check implicit argument conversions for function calls. + if (CallExpr *Call = dyn_cast(E)) + CheckImplicitArgumentConversions(S, Call, CC); + // Go ahead and check any implicit conversions we might have skipped. // The non-canonical typecheck is just an optimization; // CheckImplicitConversion will filter out dead implicit conversions. @@ -5077,7 +5388,8 @@ static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { return false; owner.Variable = var; - owner.setLocsFrom(ref); + if (ref) + owner.setLocsFrom(ref); return true; } @@ -5186,6 +5498,12 @@ namespace { if (block->getBlockDecl()->capturesVariable(Variable)) Visit(block->getBlockDecl()->getBody()); } + + void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { + if (Capturer) return; + if (OVE->getSourceExpr()) + Visit(OVE->getSourceExpr()); + } }; } @@ -5195,6 +5513,28 @@ static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { assert(owner.Variable && owner.Loc.isValid()); e = e->IgnoreParenCasts(); + + // Look through [^{...} copy] and Block_copy(^{...}). + if (ObjCMessageExpr *ME = dyn_cast(e)) { + Selector Cmd = ME->getSelector(); + if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { + e = ME->getInstanceReceiver(); + if (!e) + return 0; + e = e->IgnoreParenCasts(); + } + } else if (CallExpr *CE = dyn_cast(e)) { + if (CE->getNumArgs() == 1) { + FunctionDecl *Fn = dyn_cast_or_null(CE->getCalleeDecl()); + if (Fn) { + const IdentifierInfo *FnI = Fn->getIdentifier(); + if (FnI && FnI->isStr("_Block_copy")) { + e = CE->getArg(0)->IgnoreParenCasts(); + } + } + } + } + BlockExpr *block = dyn_cast(e); if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) return 0; @@ -5271,6 +5611,20 @@ void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { diagnoseRetainCycle(*this, capturer, owner); } +void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { + RetainCycleOwner Owner; + if (!considerVariable(Var, /*DeclRefExpr=*/0, Owner)) + return; + + // Because we don't have an expression for the variable, we have to set the + // location explicitly here. + Owner.Loc = Var->getLocation(); + Owner.Range = Var->getSourceRange(); + + if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) + diagnoseRetainCycle(*this, Capturer, Owner); +} + bool Sema::checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS) { Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); @@ -5304,9 +5658,19 @@ void Sema::checkUnsafeExprAssigns(SourceLocation Loc, if (LHSType.isNull()) LHSType = LHS->getType(); + + Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); + + if (LT == Qualifiers::OCL_Weak) { + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, Loc); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->markSafeWeakUse(LHS); + } + if (checkUnsafeAssigns(Loc, LHSType, RHS)) return; - Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); + // FIXME. Check for other life times. if (LT != Qualifiers::OCL_None) return; @@ -5826,7 +6190,8 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, if (IsPointerAttr) { // Skip implicit cast of pointer to `void *' (as a function argument). if (const ImplicitCastExpr *ICE = dyn_cast(ArgumentExpr)) - if (ICE->getType()->isVoidPointerType()) + if (ICE->getType()->isVoidPointerType() && + ICE->getCastKind() == CK_BitCast) ArgumentExpr = ICE->getSubExpr(); } QualType ArgumentType = ArgumentExpr->getType(); @@ -5881,4 +6246,3 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, << ArgumentExpr->getSourceRange() << TypeTagExpr->getSourceRange(); } - diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp index adf1327..b1aead8 100644 --- a/lib/Sema/SemaCodeComplete.cpp +++ b/lib/Sema/SemaCodeComplete.cpp @@ -1059,10 +1059,12 @@ bool ResultBuilder::IsClassOrStruct(NamedDecl *ND) const { // Allow us to find class templates, too. if (ClassTemplateDecl *ClassTemplate = dyn_cast(ND)) ND = ClassTemplate->getTemplatedDecl(); - + + // For purposes of this check, interfaces match too. if (RecordDecl *RD = dyn_cast(ND)) return RD->getTagKind() == TTK_Class || - RD->getTagKind() == TTK_Struct; + RD->getTagKind() == TTK_Struct || + RD->getTagKind() == TTK_Interface; return false; } @@ -1422,7 +1424,8 @@ static const char *GetCompletionTypeString(QualType T, if (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl()) { switch (Tag->getTagKind()) { case TTK_Struct: return "struct "; - case TTK_Class: return "class "; + case TTK_Interface: return "__interface "; + case TTK_Class: return "class "; case TTK_Union: return "union "; case TTK_Enum: return "enum "; } @@ -1449,7 +1452,7 @@ static void addThisCompletion(Sema &S, ResultBuilder &Results) { Policy, Allocator)); Builder.AddTypedTextChunk("this"); - Results.AddResult(CodeCompletionResult(Builder.TakeString())); + Results.AddResult(CodeCompletionResult(Builder.TakeString())); } /// \brief Add language constructs that show up for "ordinary" names. @@ -2480,7 +2483,6 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, if (Declaration) { Result.addParentContext(Declaration->getDeclContext()); - Pattern->ParentKind = Result.getParentKind(); Pattern->ParentName = Result.getParentName(); } @@ -2493,7 +2495,7 @@ CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx, } if (Kind == RK_Macro) { - MacroInfo *MI = PP.getMacroInfo(Macro); + MacroInfo *MI = PP.getMacroInfoHistory(Macro); assert(MI && "Not a macro?"); Result.AddTypedTextChunk( @@ -2880,10 +2882,14 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) { case ObjCPropertyImplDecl::Synthesize: return CXCursor_ObjCSynthesizeDecl; } + + case Decl::Import: + return CXCursor_ModuleImportDecl; default: if (TagDecl *TD = dyn_cast(D)) { switch (TD->getTagKind()) { + case TTK_Interface: // fall through case TTK_Struct: return CXCursor_StructDecl; case TTK_Class: return CXCursor_ClassDecl; case TTK_Union: return CXCursor_UnionDecl; @@ -2896,6 +2902,7 @@ CXCursorKind clang::getCursorKindForDecl(Decl *D) { } static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results, + bool IncludeUndefined, bool TargetTypeIsPointer = false) { typedef CodeCompletionResult Result; @@ -2904,7 +2911,8 @@ static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results, for (Preprocessor::macro_iterator M = PP.macro_begin(), MEnd = PP.macro_end(); M != MEnd; ++M) { - Results.AddResult(Result(M->first, + if (IncludeUndefined || M->first->hasMacroDefinition()) + Results.AddResult(Result(M->first, getMacroUsagePriority(M->first->getName(), PP.getLangOpts(), TargetTypeIsPointer))); @@ -3125,7 +3133,6 @@ void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc, void Sema::CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext) { - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), mapCodeCompletionContext(*this, CompletionContext)); @@ -3204,7 +3211,7 @@ void Sema::CodeCompleteOrdinaryName(Scope *S, } if (CodeCompleter->includeMacros()) - AddMacroResults(PP, Results); + AddMacroResults(PP, Results, false); HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(), Results.data(),Results.size()); @@ -3296,7 +3303,6 @@ struct Sema::CodeCompleteExpressionData { /// type we're looking for. void Sema::CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data) { - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), CodeCompletionContext::CCC_Expression); @@ -3336,7 +3342,7 @@ void Sema::CodeCompleteExpression(Scope *S, AddPrettyFunctionResults(PP.getLangOpts(), Results); if (CodeCompleter->includeMacros()) - AddMacroResults(PP, Results, PreferredTypeIsPointer); + AddMacroResults(PP, Results, false, PreferredTypeIsPointer); HandleCodeCompleteResults(this, CodeCompleter, CodeCompletionContext(CodeCompletionContext::CCC_Expression, Data.PreferredType), @@ -3580,7 +3586,6 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) { if (!CodeCompleter) return; - typedef CodeCompletionResult Result; ResultBuilder::LookupFilter Filter = 0; enum CodeCompletionContext::Kind ContextKind = CodeCompletionContext::CCC_Other; @@ -3597,6 +3602,7 @@ void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) { case DeclSpec::TST_struct: case DeclSpec::TST_class: + case DeclSpec::TST_interface: Filter = &ResultBuilder::IsClassOrStruct; ContextKind = CodeCompletionContext::CCC_ClassOrStructTag; break; @@ -3728,7 +3734,7 @@ void Sema::CodeCompleteCase(Scope *S) { //so only say we include macros if the code completer says we do enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other; if (CodeCompleter->includeMacros()) { - AddMacroResults(PP, Results); + AddMacroResults(PP, Results, false); kind = CodeCompletionContext::CCC_OtherWithMacros; } @@ -3898,7 +3904,6 @@ void Sema::CodeCompleteReturn(Scope *S) { } void Sema::CodeCompleteAfterIf(Scope *S) { - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), mapCodeCompletionContext(*this, PCC_Statement)); @@ -3952,7 +3957,7 @@ void Sema::CodeCompleteAfterIf(Scope *S) { AddPrettyFunctionResults(PP.getLangOpts(), Results); if (CodeCompleter->includeMacros()) - AddMacroResults(PP, Results); + AddMacroResults(PP, Results, false); HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(), Results.data(),Results.size()); @@ -4408,7 +4413,6 @@ static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) { } void Sema::CodeCompleteObjCAtDirective(Scope *S) { - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), CodeCompletionContext::CCC_Other); @@ -4595,26 +4599,23 @@ static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) { // Check for collisions with "readonly". if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) && - (Attributes & (ObjCDeclSpec::DQ_PR_readwrite | - ObjCDeclSpec::DQ_PR_assign | - ObjCDeclSpec::DQ_PR_unsafe_unretained | - ObjCDeclSpec::DQ_PR_copy | - ObjCDeclSpec::DQ_PR_retain | - ObjCDeclSpec::DQ_PR_strong))) + (Attributes & ObjCDeclSpec::DQ_PR_readwrite)) return true; - // Check for more than one of { assign, copy, retain, strong }. + // Check for more than one of { assign, copy, retain, strong, weak }. unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_unsafe_unretained | ObjCDeclSpec::DQ_PR_copy | - ObjCDeclSpec::DQ_PR_retain| - ObjCDeclSpec::DQ_PR_strong); + ObjCDeclSpec::DQ_PR_retain | + ObjCDeclSpec::DQ_PR_strong | + ObjCDeclSpec::DQ_PR_weak); if (AssignCopyRetMask && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy && AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain && - AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong) + AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong && + AssignCopyRetMask != ObjCDeclSpec::DQ_PR_weak) return true; return false; @@ -4626,7 +4627,6 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) { unsigned Attributes = ODS.getPropertyAttributes(); - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), CodeCompletionContext::CCC_Other); @@ -4650,6 +4650,12 @@ void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) { Results.AddResult(CodeCompletionResult("nonatomic")); if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic)) Results.AddResult(CodeCompletionResult("atomic")); + + // Only suggest "weak" if we're compiling for ARC-with-weak-references or GC. + if (getLangOpts().ObjCARCWeak || getLangOpts().getGC() != LangOptions::NonGC) + if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_weak)) + Results.AddResult(CodeCompletionResult("weak")); + if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) { CodeCompletionBuilder Setter(Results.getAllocator(), Results.getCodeCompletionTUInfo()); @@ -4837,8 +4843,6 @@ static void AddObjCMethods(ObjCContainerDecl *Container, void Sema::CodeCompleteObjCPropertyGetter(Scope *S) { - typedef CodeCompletionResult Result; - // Try to find the interface where getters might live. ObjCInterfaceDecl *Class = dyn_cast_or_null(CurContext); if (!Class) { @@ -4866,8 +4870,6 @@ void Sema::CodeCompleteObjCPropertyGetter(Scope *S) { } void Sema::CodeCompleteObjCPropertySetter(Scope *S) { - typedef CodeCompletionResult Result; - // Try to find the interface where setters might live. ObjCInterfaceDecl *Class = dyn_cast_or_null(CurContext); @@ -4898,7 +4900,6 @@ void Sema::CodeCompleteObjCPropertySetter(Scope *S) { void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter) { - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), CodeCompletionContext::CCC_Type); @@ -4957,7 +4958,7 @@ void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, CodeCompleter->includeGlobals()); if (CodeCompleter->includeMacros()) - AddMacroResults(PP, Results); + AddMacroResults(PP, Results, false); HandleCodeCompleteResults(this, CodeCompleter, CodeCompletionContext::CCC_Type, @@ -5041,7 +5042,7 @@ static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) { /// /// \param S The semantic analysis object. /// -/// \param S NeedSuperKeyword Whether we need to prefix this completion with +/// \param NeedSuperKeyword Whether we need to prefix this completion with /// the "super" keyword. Otherwise, we just need to provide the arguments. /// /// \param SelIdents The identifiers in the selector that have already been @@ -5187,7 +5188,7 @@ void Sema::CodeCompleteObjCMessageReceiver(Scope *S) { Results.ExitScope(); if (CodeCompleter->includeMacros()) - AddMacroResults(PP, Results); + AddMacroResults(PP, Results, false); HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(), Results.data(), Results.size()); @@ -5339,11 +5340,11 @@ static void AddClassMessageCompletions(Sema &SemaRef, Scope *S, // If we have an external source, load the entire class method // pool from the AST file. - if (SemaRef.ExternalSource) { + if (SemaRef.getExternalSource()) { for (uint32_t I = 0, - N = SemaRef.ExternalSource->GetNumExternalSelectors(); + N = SemaRef.getExternalSource()->GetNumExternalSelectors(); I != N; ++I) { - Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I); + Selector Sel = SemaRef.getExternalSource()->GetExternalSelector(I); if (Sel.isNull() || SemaRef.MethodPool.count(Sel)) continue; @@ -5868,7 +5869,6 @@ void Sema::CodeCompleteObjCImplementationCategory(Scope *S, } void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) { - typedef CodeCompletionResult Result; ResultBuilder Results(*this, CodeCompleter->getAllocator(), CodeCompleter->getCodeCompletionTUInfo(), CodeCompletionContext::CCC_Other); @@ -7169,7 +7169,9 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) { M != MEnd; ++M) { Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( M->first->getName())); - Results.AddResult(Builder.TakeString()); + Results.AddResult(CodeCompletionResult(Builder.TakeString(), + CCP_CodePattern, + CXCursor_MacroDefinition)); } Results.ExitScope(); } else if (IsDefinition) { @@ -7186,7 +7188,7 @@ void Sema::CodeCompletePreprocessorExpression() { CodeCompletionContext::CCC_PreprocessorExpression); if (!CodeCompleter || CodeCompleter->includeMacros()) - AddMacroResults(PP, Results); + AddMacroResults(PP, Results, true); // defined () Results.EnterNewScope(); @@ -7235,7 +7237,7 @@ void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, } if (!CodeCompleter || CodeCompleter->includeMacros()) - AddMacroResults(PP, Builder); + AddMacroResults(PP, Builder, true); Results.clear(); Results.insert(Results.end(), diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp index ea181de..0092d5d 100644 --- a/lib/Sema/SemaDecl.cpp +++ b/lib/Sema/SemaDecl.cpp @@ -350,8 +350,8 @@ ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc, /// isTagName() - This method is called *for error recovery purposes only* /// to determine if the specified name is a valid tag name ("struct foo"). If /// so, this returns the TST for the tag corresponding to it (TST_enum, -/// TST_union, TST_struct, TST_class). This is used to diagnose cases in C -/// where the user forgot to specify the tag. +/// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose +/// cases in C where the user forgot to specify the tag. DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) { // Do a tag name lookup in this scope. LookupResult R(*this, &II, SourceLocation(), LookupTagName); @@ -361,6 +361,7 @@ DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) { if (const TagDecl *TD = R.getAsSingle()) { switch (TD->getTagKind()) { case TTK_Struct: return DeclSpec::TST_struct; + case TTK_Interface: return DeclSpec::TST_interface; case TTK_Union: return DeclSpec::TST_union; case TTK_Class: return DeclSpec::TST_class; case TTK_Enum: return DeclSpec::TST_enum; @@ -434,7 +435,8 @@ bool Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II, else if (DeclContext *DC = computeDeclContext(*SS, false)) Diag(IILoc, diag::err_unknown_nested_typename_suggest) << II << DC << CorrectedQuotedStr << SS->getRange() - << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); else llvm_unreachable("could not have corrected a typo here"); @@ -517,9 +519,9 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result, Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc) { - Result.clear(Sema::LookupTagName); - SemaRef.LookupParsedName(Result, S, &SS); - if (TagDecl *Tag = Result.getAsSingle()) { + LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName); + SemaRef.LookupParsedName(R, S, &SS); + if (TagDecl *Tag = R.getAsSingle()) { const char *TagName = 0; const char *FixItTagName = 0; switch (Tag->getTagKind()) { @@ -538,6 +540,11 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result, FixItTagName = "struct "; break; + case TTK_Interface: + TagName = "__interface"; + FixItTagName = "__interface "; + break; + case TTK_Union: TagName = "union"; FixItTagName = "union "; @@ -548,25 +555,42 @@ static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result, << Name << TagName << SemaRef.getLangOpts().CPlusPlus << FixItHint::CreateInsertion(NameLoc, FixItTagName); - LookupResult R(SemaRef, Name, NameLoc, Sema::LookupOrdinaryName); - if (SemaRef.LookupParsedName(R, S, &SS)) { - for (LookupResult::iterator I = R.begin(), IEnd = R.end(); - I != IEnd; ++I) - SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type) - << Name << TagName; - } + for (LookupResult::iterator I = Result.begin(), IEnd = Result.end(); + I != IEnd; ++I) + SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type) + << Name << TagName; + + // Replace lookup results with just the tag decl. + Result.clear(Sema::LookupTagName); + SemaRef.LookupParsedName(Result, S, &SS); return true; } - Result.clear(Sema::LookupOrdinaryName); return false; } +/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier. +static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS, + QualType T, SourceLocation NameLoc) { + ASTContext &Context = S.Context; + + TypeLocBuilder Builder; + Builder.pushTypeSpec(T).setNameLoc(NameLoc); + + T = S.getElaboratedType(ETK_None, SS, T); + ElaboratedTypeLoc ElabTL = Builder.push(T); + ElabTL.setElaboratedKeywordLoc(SourceLocation()); + ElabTL.setQualifierLoc(SS.getWithLocInContext(Context)); + return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T)); +} + Sema::NameClassification Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, - const Token &NextToken) { + const Token &NextToken, + bool IsAddressOfOperand, + CorrectionCandidateCallback *CCC) { DeclarationNameInfo NameInfo(Name, NameLoc); ObjCMethodDecl *CurMethod = getCurMethodDecl(); @@ -632,25 +656,11 @@ Corrected: // Perform typo correction to determine if there is another name that is // close to this name. - if (!SecondTry) { + if (!SecondTry && CCC) { SecondTry = true; - CorrectionCandidateCallback DefaultValidator; - // Try to limit which sets of keywords should be included in typo - // correction based on what the next token is. - DefaultValidator.WantTypeSpecifiers = - NextToken.is(tok::l_paren) || NextToken.is(tok::less) || - NextToken.is(tok::identifier) || NextToken.is(tok::star) || - NextToken.is(tok::amp) || NextToken.is(tok::l_square); - DefaultValidator.WantExpressionKeywords = - NextToken.is(tok::l_paren) || NextToken.is(tok::identifier) || - NextToken.is(tok::arrow) || NextToken.is(tok::period); - DefaultValidator.WantRemainingKeywords = - NextToken.is(tok::l_paren) || NextToken.is(tok::semi) || - NextToken.is(tok::identifier) || NextToken.is(tok::l_brace); - DefaultValidator.WantCXXNamedCasts = false; if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(), Result.getLookupKind(), S, - &SS, DefaultValidator)) { + &SS, *CCC)) { unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest; unsigned QualifiedDiag = diag::err_no_member_suggest; std::string CorrectedStr(Corrected.getAsString(getLangOpts())); @@ -675,11 +685,12 @@ Corrected: Diag(NameLoc, UnqualifiedDiag) << Name << CorrectedQuotedStr << FixItHint::CreateReplacement(NameLoc, CorrectedStr); - else + else // FIXME: is this even reachable? Test it. Diag(NameLoc, QualifiedDiag) << Name << computeDeclContext(SS, false) << CorrectedQuotedStr << SS.getRange() - << FixItHint::CreateReplacement(NameLoc, CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); // Update the name, so that the caller has the new name. Name = Corrected.getCorrectionAsIdentifierInfo(); @@ -705,7 +716,7 @@ Corrected: if (ObjCIvarDecl *Ivar = Result.getAsSingle()) { Result.clear(); ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier())); - return move(E); + return E; } goto Corrected; @@ -731,8 +742,9 @@ Corrected: // perform some heroics to see if we actually have a // template-argument-list, which would indicate a missing 'template' // keyword here. - return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), - NameInfo, /*TemplateArgs=*/0); + return ActOnDependentIdExpression(SS, /*TemplateKWLoc=*/SourceLocation(), + NameInfo, IsAddressOfOperand, + /*TemplateArgs=*/0); } case LookupResult::Found: @@ -808,14 +820,16 @@ Corrected: return NameClassification::TypeTemplate(Template); } } - + NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl(); if (TypeDecl *Type = dyn_cast(FirstDecl)) { DiagnoseUseOfDecl(Type, NameLoc); QualType T = Context.getTypeDeclType(Type); + if (SS.isNotEmpty()) + return buildNestedType(*this, SS, T, NameLoc); return ParsedType::make(T); } - + ObjCInterfaceDecl *Class = dyn_cast(FirstDecl); if (!Class) { // FIXME: It's unfortunate that we don't have a Type node for handling this. @@ -838,24 +852,28 @@ Corrected: return ParsedType::make(T); } + // We can have a type template here if we're classifying a template argument. + if (isa(FirstDecl) && !isa(FirstDecl)) + return NameClassification::TypeTemplate( + TemplateName(cast(FirstDecl))); + // Check for a tag type hidden by a non-type decl in a few cases where it // seems likely a type is wanted instead of the non-type that was found. - if (!getLangOpts().ObjC1 && FirstDecl && !isa(FirstDecl) && - !isa(FirstDecl)) { + if (!getLangOpts().ObjC1) { bool NextIsOp = NextToken.is(tok::amp) || NextToken.is(tok::star); if ((NextToken.is(tok::identifier) || (NextIsOp && FirstDecl->isFunctionOrFunctionTemplate())) && isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) { - FirstDecl = (*Result.begin())->getUnderlyingDecl(); - if (TypeDecl *Type = dyn_cast(FirstDecl)) { - DiagnoseUseOfDecl(Type, NameLoc); - QualType T = Context.getTypeDeclType(Type); - return ParsedType::make(T); - } + TypeDecl *Type = Result.getAsSingle(); + DiagnoseUseOfDecl(Type, NameLoc); + QualType T = Context.getTypeDeclType(Type); + if (SS.isNotEmpty()) + return buildNestedType(*this, SS, T, NameLoc); + return ParsedType::make(T); } } - if (!Result.empty() && (*Result.begin())->isCXXClassMember()) + if (FirstDecl->isCXXClassMember()) return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, 0); bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren)); @@ -1186,8 +1204,14 @@ bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const { Context.DeclMustBeEmitted(FD)) return false; } else if (const VarDecl *VD = dyn_cast(D)) { + // Don't warn on variables of const-qualified or reference type, since their + // values can be used even if though they're not odr-used, and because const + // qualified variables can appear in headers in contexts where they're not + // intended to be used. + // FIXME: Use more principled rules for these exemptions. if (!VD->isFileVarDecl() || - VD->getType().isConstant(Context) || + VD->getType().isConstQualified() || + VD->getType()->isReferenceType() || Context.DeclMustBeEmitted(VD)) return false; @@ -1248,7 +1272,7 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) { QualType Ty = VD->getType(); // Only look at the outermost level of typedef. - if (const TypedefType *TT = dyn_cast(Ty)) { + if (const TypedefType *TT = Ty->getAs()) { if (TT->getDecl()->hasAttr()) return false; } @@ -1268,6 +1292,8 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) { return false; if (const Expr *Init = VD->getInit()) { + if (const ExprWithCleanups *Cleanups = dyn_cast(Init)) + Init = Cleanups->getSubExpr(); const CXXConstructExpr *Construct = dyn_cast(Init); if (Construct && !Construct->isElidable()) { @@ -1706,6 +1732,25 @@ DeclHasAttr(const Decl *D, const Attr *A) { if (AA) return false; + // The following thread safety attributes can also be duplicated. + switch (A->getKind()) { + case attr::ExclusiveLocksRequired: + case attr::SharedLocksRequired: + case attr::LocksExcluded: + case attr::ExclusiveLockFunction: + case attr::SharedLockFunction: + case attr::UnlockFunction: + case attr::ExclusiveTrylockFunction: + case attr::SharedTrylockFunction: + case attr::GuardedBy: + case attr::PtGuardedBy: + case attr::AcquiredBefore: + case attr::AcquiredAfter: + return false; + default: + ; + } + const OwnershipAttr *OA = dyn_cast(A); const AnnotateAttr *Ann = dyn_cast(A); for (Decl::attr_iterator i = D->attr_begin(), e = D->attr_end(); i != e; ++i) @@ -1908,6 +1953,19 @@ static bool canRedefineFunction(const FunctionDecl *FD, FD->getStorageClass() == SC_Extern); } +/// Is the given calling convention the ABI default for the given +/// declaration? +static bool isABIDefaultCC(Sema &S, CallingConv CC, FunctionDecl *D) { + CallingConv ABIDefaultCC; + if (isa(D) && cast(D)->isInstance()) { + ABIDefaultCC = S.Context.getDefaultCXXMethodCallConv(D->isVariadic()); + } else { + // Free C function or a static method. + ABIDefaultCC = (S.Context.getLangOpts().MRTD ? CC_X86StdCall : CC_C); + } + return ABIDefaultCC == CC; +} + /// MergeFunctionDecl - We just parsed a function 'New' from /// declarator D which has the same name and scope as a previous /// declaration 'Old'. Figure out how to resolve this situation, @@ -1976,6 +2034,9 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) { // later declared or defined without one, the second decl assumes the // calling convention of the first. // + // It's OK if a function is first declared without a calling convention, + // but is later declared or defined with the default calling convention. + // // For the new decl, we have to look at the NON-canonical type to tell the // difference between a function that really doesn't have a calling // convention and one that is declared cdecl. That's because in @@ -1989,10 +2050,22 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) { FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo(); FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo(); bool RequiresAdjustment = false; - if (OldTypeInfo.getCC() != CC_Default && - NewTypeInfo.getCC() == CC_Default) { + if (OldTypeInfo.getCC() == NewTypeInfo.getCC()) { + // Fast path: nothing to do. + + // Inherit the CC from the previous declaration if it was specified + // there but not here. + } else if (NewTypeInfo.getCC() == CC_Default) { NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC()); RequiresAdjustment = true; + + // Don't complain about mismatches when the default CC is + // effectively the same as the explict one. + } else if (OldTypeInfo.getCC() == CC_Default && + isABIDefaultCC(*this, NewTypeInfo.getCC(), New)) { + NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC()); + RequiresAdjustment = true; + } else if (!Context.isSameCallConv(OldTypeInfo.getCC(), NewTypeInfo.getCC())) { // Calling conventions really aren't compatible, so complain. @@ -2398,7 +2471,7 @@ void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old) { } if (MergedT.isNull()) { Diag(New->getLocation(), diag::err_redefinition_different_type) - << New->getDeclName(); + << New->getDeclName() << New->getType() << Old->getType(); Diag(Old->getLocation(), diag::note_previous_definition); return New->setInvalidDecl(); } @@ -2551,8 +2624,7 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) { /// no declarator (e.g. "struct foo;") is parsed. Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS) { - return ParsedFreeStandingDeclSpec(S, AS, DS, - MultiTemplateParamsArg(*this, 0, 0)); + return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg()); } /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with @@ -2565,6 +2637,7 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, TagDecl *Tag = 0; if (DS.getTypeSpecType() == DeclSpec::TST_class || DS.getTypeSpecType() == DeclSpec::TST_struct || + DS.getTypeSpecType() == DeclSpec::TST_interface || DS.getTypeSpecType() == DeclSpec::TST_union || DS.getTypeSpecType() == DeclSpec::TST_enum) { TagD = DS.getRepAsDecl(); @@ -2603,7 +2676,8 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag) << (DS.getTypeSpecType() == DeclSpec::TST_class ? 0 : DS.getTypeSpecType() == DeclSpec::TST_struct ? 1 : - DS.getTypeSpecType() == DeclSpec::TST_union ? 2 : 3); + DS.getTypeSpecType() == DeclSpec::TST_interface ? 2 : + DS.getTypeSpecType() == DeclSpec::TST_union ? 3 : 4); else Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators); // Don't emit warnings after this error. @@ -2724,16 +2798,17 @@ Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec::TST TypeSpecType = DS.getTypeSpecType(); if (TypeSpecType == DeclSpec::TST_class || TypeSpecType == DeclSpec::TST_struct || + TypeSpecType == DeclSpec::TST_interface || TypeSpecType == DeclSpec::TST_union || TypeSpecType == DeclSpec::TST_enum) { AttributeList* attrs = DS.getAttributes().getList(); while (attrs) { - Diag(attrs->getScopeLoc(), - diag::warn_declspec_attribute_ignored) + Diag(attrs->getLoc(), diag::warn_declspec_attribute_ignored) << attrs->getName() << (TypeSpecType == DeclSpec::TST_class ? 0 : TypeSpecType == DeclSpec::TST_struct ? 1 : - TypeSpecType == DeclSpec::TST_union ? 2 : 3); + TypeSpecType == DeclSpec::TST_union ? 2 : + TypeSpecType == DeclSpec::TST_interface ? 3 : 4); attrs = attrs->getNext(); } } @@ -3353,7 +3428,6 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D, switch (DS.getTypeSpecType()) { case DeclSpec::TST_typename: case DeclSpec::TST_typeofType: - case DeclSpec::TST_decltype: case DeclSpec::TST_underlyingType: case DeclSpec::TST_atomic: { // Grab the type from the parser. @@ -3377,6 +3451,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D, break; } + case DeclSpec::TST_decltype: case DeclSpec::TST_typeofExpr: { Expr *E = DS.getRepAsExpr(); ExprResult Result = S.RebuildExprInCurrentInstantiation(E); @@ -3411,7 +3486,7 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D, Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) { D.setFunctionDefinitionKind(FDK_Declaration); - Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg(*this)); + Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg()); if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() && Dcl && Dcl->getDeclContext()->isFileContext()) @@ -3476,7 +3551,8 @@ bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, // void X::f(); // }; if (Cur->Equals(DC)) { - Diag(Loc, diag::warn_member_extra_qualification) + Diag(Loc, LangOpts.MicrosoftExt? diag::warn_member_extra_qualification + : diag::err_member_extra_qualification) << Name << FixItHint::CreateRemoval(SS.getRange()); SS.clear(); return false; @@ -3710,11 +3786,11 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D, New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous); } else if (R->isFunctionType()) { New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous, - move(TemplateParamLists), + TemplateParamLists, AddToScope); } else { New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous, - move(TemplateParamLists)); + TemplateParamLists); } if (New == 0) @@ -3729,9 +3805,9 @@ Decl *Sema::HandleDeclarator(Scope *S, Declarator &D, return New; } -/// TryToFixInvalidVariablyModifiedType - Helper method to turn variable array -/// types into constant array types in certain situations which would otherwise -/// be errors (for GCC compatibility). +/// Helper method to turn variable array types into constant array +/// types in certain situations which would otherwise be errors (for +/// GCC compatibility). static QualType TryToFixInvalidVariablyModifiedType(QualType T, ASTContext &Context, bool &SizeIsNegative, @@ -3799,6 +3875,52 @@ static QualType TryToFixInvalidVariablyModifiedType(QualType T, Res, ArrayType::Normal, 0); } +static void +FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) { + if (PointerTypeLoc* SrcPTL = dyn_cast(&SrcTL)) { + PointerTypeLoc* DstPTL = cast(&DstTL); + FixInvalidVariablyModifiedTypeLoc(SrcPTL->getPointeeLoc(), + DstPTL->getPointeeLoc()); + DstPTL->setStarLoc(SrcPTL->getStarLoc()); + return; + } + if (ParenTypeLoc* SrcPTL = dyn_cast(&SrcTL)) { + ParenTypeLoc* DstPTL = cast(&DstTL); + FixInvalidVariablyModifiedTypeLoc(SrcPTL->getInnerLoc(), + DstPTL->getInnerLoc()); + DstPTL->setLParenLoc(SrcPTL->getLParenLoc()); + DstPTL->setRParenLoc(SrcPTL->getRParenLoc()); + return; + } + ArrayTypeLoc* SrcATL = cast(&SrcTL); + ArrayTypeLoc* DstATL = cast(&DstTL); + TypeLoc SrcElemTL = SrcATL->getElementLoc(); + TypeLoc DstElemTL = DstATL->getElementLoc(); + DstElemTL.initializeFullCopy(SrcElemTL); + DstATL->setLBracketLoc(SrcATL->getLBracketLoc()); + DstATL->setSizeExpr(SrcATL->getSizeExpr()); + DstATL->setRBracketLoc(SrcATL->getRBracketLoc()); +} + +/// Helper method to turn variable array types into constant array +/// types in certain situations which would otherwise be errors (for +/// GCC compatibility). +static TypeSourceInfo* +TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo, + ASTContext &Context, + bool &SizeIsNegative, + llvm::APSInt &Oversized) { + QualType FixedTy + = TryToFixInvalidVariablyModifiedType(TInfo->getType(), Context, + SizeIsNegative, Oversized); + if (FixedTy.isNull()) + return 0; + TypeSourceInfo *FixedTInfo = Context.getTrivialTypeSourceInfo(FixedTy); + FixInvalidVariablyModifiedTypeLoc(TInfo->getTypeLoc(), + FixedTInfo->getTypeLoc()); + return FixedTInfo; +} + /// \brief Register the given locally-scoped external C declaration so /// that it can be found later for redeclarations void @@ -3926,19 +4048,21 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) { // then it shall have block scope. // Note that variably modified types must be fixed before merging the decl so // that redeclarations will match. - QualType T = NewTD->getUnderlyingType(); + TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo(); + QualType T = TInfo->getType(); if (T->isVariablyModifiedType()) { getCurFunction()->setHasBranchProtectedScope(); if (S->getFnParent() == 0) { bool SizeIsNegative; llvm::APSInt Oversized; - QualType FixedTy = - TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative, - Oversized); - if (!FixedTy.isNull()) { + TypeSourceInfo *FixedTInfo = + TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context, + SizeIsNegative, + Oversized); + if (FixedTInfo) { Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size); - NewTD->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(FixedTy)); + NewTD->setTypeSourceInfo(FixedTInfo); } else { if (SizeIsNegative) Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size); @@ -4203,7 +4327,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, D.getDeclSpec().getLocStart(), D.getIdentifierLoc(), D.getCXXScopeSpec(), - TemplateParamLists.get(), + TemplateParamLists.data(), TemplateParamLists.size(), /*never a friend*/ false, isExplicitSpecialization, @@ -4244,7 +4368,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (TemplateParamLists.size() > 0 && D.getCXXScopeSpec().isSet()) { NewVD->setTemplateParameterListsInfo(Context, TemplateParamLists.size(), - TemplateParamLists.release()); + TemplateParamLists.data()); } if (D.getDeclSpec().isConstexprSpecified()) @@ -4281,6 +4405,14 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, // Handle attributes prior to checking for duplicates in MergeVarDecl ProcessDeclAttributes(S, NewVD, D); + if (getLangOpts().CUDA) { + // CUDA B.2.5: "__shared__ and __constant__ variables have implied static + // storage [duration]." + if (SC == SC_None && S->getFnParent() != 0 && + (NewVD->hasAttr() || NewVD->hasAttr())) + NewVD->setStorageClass(SC_Static); + } + // In auto-retain/release, infer strong retension for variables of // retainable type. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD)) @@ -4490,7 +4622,8 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, if (NewVD->isInvalidDecl()) return false; - QualType T = NewVD->getType(); + TypeSourceInfo *TInfo = NewVD->getTypeSourceInfo(); + QualType T = TInfo->getType(); if (T->isObjCObjectType()) { Diag(NewVD->getLocation(), diag::err_statically_allocated_object) @@ -4522,8 +4655,10 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, && !NewVD->hasAttr()) { if (getLangOpts().getGC() != LangOptions::NonGC) Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local); - else + else { + assert(!getLangOpts().ObjCAutoRefCount); Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local); + } } bool isVM = T->isVariablyModifiedType(); @@ -4535,11 +4670,10 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, (T->isVariableArrayType() && NewVD->hasGlobalStorage())) { bool SizeIsNegative; llvm::APSInt Oversized; - QualType FixedTy = - TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative, - Oversized); - - if (FixedTy.isNull() && T->isVariableArrayType()) { + TypeSourceInfo *FixedTInfo = + TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context, + SizeIsNegative, Oversized); + if (FixedTInfo == 0 && T->isVariableArrayType()) { const VariableArrayType *VAT = Context.getAsVariableArrayType(T); // FIXME: This won't give the correct result for // int a[10][n]; @@ -4558,7 +4692,7 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, return false; } - if (FixedTy.isNull()) { + if (FixedTInfo == 0) { if (NewVD->isFileVarDecl()) Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope); else @@ -4568,7 +4702,8 @@ bool Sema::CheckVariableDeclaration(VarDecl *NewVD, } Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size); - NewVD->setType(FixedTy); + NewVD->setType(FixedTInfo->getType()); + NewVD->setTypeSourceInfo(FixedTInfo); } if (Previous.empty() && NewVD->isExternC()) { @@ -4655,6 +4790,31 @@ static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier, return false; } +namespace { + enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted }; +} +/// \brief Report an error regarding overriding, along with any relevant +/// overriden methods. +/// +/// \param DiagID the primary error to report. +/// \param MD the overriding method. +/// \param OEK which overrides to include as notes. +static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD, + OverrideErrorKind OEK = OEK_All) { + S.Diag(MD->getLocation(), DiagID) << MD->getDeclName(); + for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), + E = MD->end_overridden_methods(); + I != E; ++I) { + // This check (& the OEK parameter) could be replaced by a predicate, but + // without lambdas that would be overkill. This is still nicer than writing + // out the diag loop 3 times. + if ((OEK == OEK_All) || + (OEK == OEK_NonDeleted && !(*I)->isDeleted()) || + (OEK == OEK_Deleted && (*I)->isDeleted())) + S.Diag((*I)->getLocation(), diag::note_overridden_virtual_function); + } +} + /// AddOverriddenMethods - See if a method overrides any in the base classes, /// and if so, check that it's a valid override and remember it. bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) { @@ -4663,6 +4823,8 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) { FindOverriddenMethodData Data; Data.Method = MD; Data.S = this; + bool hasDeletedOverridenMethods = false; + bool hasNonDeletedOverridenMethods = false; bool AddedAny = false; if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) { for (CXXBasePaths::decl_iterator I = Paths.found_decls_begin(), @@ -4672,12 +4834,21 @@ bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) { if (!CheckOverridingFunctionReturnType(MD, OldMD) && !CheckOverridingFunctionExceptionSpec(MD, OldMD) && !CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) { + hasDeletedOverridenMethods |= OldMD->isDeleted(); + hasNonDeletedOverridenMethods |= !OldMD->isDeleted(); AddedAny = true; } } } } - + + if (hasDeletedOverridenMethods && !MD->isDeleted()) { + ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted); + } + if (hasNonDeletedOverridenMethods && MD->isDeleted()) { + ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted); + } + return AddedAny; } @@ -4837,6 +5008,10 @@ static NamedDecl* DiagnoseInvalidRedeclaration( } if (Correction) { + // FIXME: use Correction.getCorrectionRange() instead of computing the range + // here. This requires passing in the CXXScopeSpec to CorrectTypo which in + // turn causes the correction to fully qualify the name. If we fix + // CorrectTypo to minimally qualify then this change should be good. SourceRange FixItLoc(NewFD->getLocation()); CXXScopeSpec &SS = ExtraArgs.D.getCXXScopeSpec(); if (Correction.getCorrectionSpecifier() && SS.isValid()) @@ -5072,6 +5247,22 @@ static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D, } } +void Sema::checkVoidParamDecl(ParmVarDecl *Param) { + // In C++, the empty parameter-type-list must be spelled "void"; a + // typedef of void is not permitted. + if (getLangOpts().CPlusPlus && + Param->getType().getUnqualifiedType() != Context.VoidTy) { + bool IsTypeAlias = false; + if (const TypedefType *TT = Param->getType()->getAs()) + IsTypeAlias = isa(TT->getDecl()); + else if (const TemplateSpecializationType *TST = + Param->getType()->getAs()) + IsTypeAlias = TST->isTypeAlias(); + Diag(Param->getLocation(), diag::err_param_typedef_of_void) + << IsTypeAlias; + } +} + NamedDecl* Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, @@ -5138,6 +5329,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, NewFD->setImplicitlyInline(); } + // If this is a method defined in an __interface, and is not a constructor + // or an overloaded operator, then set the pure flag (isVirtual will already + // return true). + if (const CXXRecordDecl *Parent = + dyn_cast(NewFD->getDeclContext())) { + if (Parent->isInterface() && cast(NewFD)->isUserProvided()) + NewFD->setPure(true); + } + SetNestedNameSpecifier(NewFD, D); isExplicitSpecialization = false; isFunctionTemplateSpecialization = false; @@ -5157,7 +5357,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, D.getDeclSpec().getLocStart(), D.getIdentifierLoc(), D.getCXXScopeSpec(), - TemplateParamLists.get(), + TemplateParamLists.data(), TemplateParamLists.size(), isFriend, isExplicitSpecialization, @@ -5196,7 +5396,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, if (TemplateParamLists.size() > 1) { NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists.size() - 1, - TemplateParamLists.release()); + TemplateParamLists.data()); } } else { // This is a function template specialization. @@ -5204,7 +5404,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, // For source fidelity, store all the template param lists. NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists.size(), - TemplateParamLists.release()); + TemplateParamLists.data()); // C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);". if (isFriend) { @@ -5236,7 +5436,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, // For source fidelity, store all the template param lists. NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists.size(), - TemplateParamLists.release()); + TemplateParamLists.data()); } if (Invalid) { @@ -5376,6 +5576,20 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, diag::err_static_out_of_line) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); } + + // C++11 [except.spec]p15: + // A deallocation function with no exception-specification is treated + // as if it were specified with noexcept(true). + const FunctionProtoType *FPT = R->getAs(); + if ((Name.getCXXOverloadedOperator() == OO_Delete || + Name.getCXXOverloadedOperator() == OO_Array_Delete) && + getLangOpts().CPlusPlus0x && FPT && !FPT->hasExceptionSpec()) { + FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); + EPI.ExceptionSpecType = EST_BasicNoexcept; + NewFD->setType(Context.getFunctionType(FPT->getResultType(), + FPT->arg_type_begin(), + FPT->getNumArgs(), EPI)); + } } // Filter out previous declarations that don't match the scope. @@ -5413,21 +5627,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, FTI.ArgInfo[0].Param && cast(FTI.ArgInfo[0].Param)->getType()->isVoidType()) { // Empty arg list, don't push any params. - ParmVarDecl *Param = cast(FTI.ArgInfo[0].Param); - - // In C++, the empty parameter-type-list must be spelled "void"; a - // typedef of void is not permitted. - if (getLangOpts().CPlusPlus && - Param->getType().getUnqualifiedType() != Context.VoidTy) { - bool IsTypeAlias = false; - if (const TypedefType *TT = Param->getType()->getAs()) - IsTypeAlias = isa(TT->getDecl()); - else if (const TemplateSpecializationType *TST = - Param->getType()->getAs()) - IsTypeAlias = TST->isTypeAlias(); - Diag(Param->getLocation(), diag::err_param_typedef_of_void) - << IsTypeAlias; - } + checkVoidParamDecl(cast(FTI.ArgInfo[0].Param)); } else if (FTI.NumArgs > 0 && FTI.ArgInfo[0].Param != 0) { for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) { ParmVarDecl *Param = cast(FTI.ArgInfo[i].Param); @@ -5500,6 +5700,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization)); } + // Make graceful recovery from an invalid redeclaration. + else if (!Previous.empty()) + D.setRedeclaration(true); assert((NewFD->isInvalidDecl() || !D.isRedeclaration() || Previous.getResultKind() != LookupResult::FoundOverloaded) && "previous declaration set still overloaded"); @@ -5510,12 +5713,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, TemplateIdAnnotation *TemplateId = D.getName().TemplateId; TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc); TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc); - ASTTemplateArgsPtr TemplateArgsPtr(*this, - TemplateId->getTemplateArgs(), + ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); translateTemplateArguments(TemplateArgsPtr, TemplateArgs); - TemplateArgsPtr.release(); HasExplicitTemplateArgs = true; @@ -5969,20 +6170,12 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, // Find any virtual functions that this function overrides. if (CXXMethodDecl *Method = dyn_cast(NewFD)) { if (!Method->isFunctionTemplateSpecialization() && - !Method->getDescribedFunctionTemplate()) { + !Method->getDescribedFunctionTemplate() && + Method->isCanonicalDecl()) { if (AddOverriddenMethods(Method->getParent(), Method)) { // If the function was marked as "static", we have a problem. if (NewFD->getStorageClass() == SC_Static) { - Diag(NewFD->getLocation(), diag::err_static_overrides_virtual) - << NewFD->getDeclName(); - for (CXXMethodDecl::method_iterator - Overridden = Method->begin_overridden_methods(), - OverriddenEnd = Method->end_overridden_methods(); - Overridden != OverriddenEnd; - ++Overridden) { - Diag((*Overridden)->getLocation(), - diag::note_overridden_virtual_function); - } + ReportOverrides(*this, diag::err_static_overrides_virtual, Method); } } } @@ -6191,28 +6384,12 @@ namespace { } } - // Sometimes, the expression passed in lacks the casts that are used - // to determine which DeclRefExpr's to check. Assume that the casts - // are present and continue visiting the expression. - void HandleExpr(Expr *E) { - // Skip checking T a = a where T is not a record or reference type. - // Doing so is a way to silence uninitialized warnings. - if (isRecordType || isReferenceType) - if (DeclRefExpr *DRE = dyn_cast(E)) - HandleDeclRefExpr(DRE); - - if (ConditionalOperator *CO = dyn_cast(E)) { - HandleValue(CO->getTrueExpr()); - HandleValue(CO->getFalseExpr()); - } - - Visit(E); - } - // For most expressions, the cast is directly above the DeclRefExpr. // For conditional operators, the cast can be outside the conditional // operator if both expressions are DeclRefExpr's. void HandleValue(Expr *E) { + if (isReferenceType) + return; E = E->IgnoreParenImpCasts(); if (DeclRefExpr* DRE = dyn_cast(E)) { HandleDeclRefExpr(DRE); @@ -6222,11 +6399,32 @@ namespace { if (ConditionalOperator *CO = dyn_cast(E)) { HandleValue(CO->getTrueExpr()); HandleValue(CO->getFalseExpr()); + return; + } + + if (isa(E)) { + Expr *Base = E->IgnoreParenImpCasts(); + while (MemberExpr *ME = dyn_cast(Base)) { + // Check for static member variables and don't warn on them. + if (!isa(ME->getMemberDecl())) + return; + Base = ME->getBase()->IgnoreParenImpCasts(); + } + if (DeclRefExpr *DRE = dyn_cast(Base)) + HandleDeclRefExpr(DRE); + return; } } + // Reference types are handled here since all uses of references are + // bad, not just r-value uses. + void VisitDeclRefExpr(DeclRefExpr *E) { + if (isReferenceType) + HandleDeclRefExpr(E); + } + void VisitImplicitCastExpr(ImplicitCastExpr *E) { - if ((!isRecordType && E->getCastKind() == CK_LValueToRValue) || + if (E->getCastKind() == CK_LValueToRValue || (isRecordType && E->getCastKind() == CK_NoOp)) HandleValue(E->getSubExpr()); @@ -6237,22 +6435,36 @@ namespace { // Don't warn on arrays since they can be treated as pointers. if (E->getType()->canDecayToPointerType()) return; - ValueDecl *VD = E->getMemberDecl(); - CXXMethodDecl *MD = dyn_cast(VD); - if (isa(VD) || (MD && !MD->isStatic())) - if (DeclRefExpr *DRE - = dyn_cast(E->getBase()->IgnoreParenImpCasts())) { + // Warn when a non-static method call is followed by non-static member + // field accesses, which is followed by a DeclRefExpr. + CXXMethodDecl *MD = dyn_cast(E->getMemberDecl()); + bool Warn = (MD && !MD->isStatic()); + Expr *Base = E->getBase()->IgnoreParenImpCasts(); + while (MemberExpr *ME = dyn_cast(Base)) { + if (!isa(ME->getMemberDecl())) + Warn = false; + Base = ME->getBase()->IgnoreParenImpCasts(); + } + + if (DeclRefExpr *DRE = dyn_cast(Base)) { + if (Warn) HandleDeclRefExpr(DRE); - return; - } + return; + } - Inherited::VisitMemberExpr(E); + // The base of a MemberExpr is not a MemberExpr or a DeclRefExpr. + // Visit that expression. + Visit(Base); } void VisitUnaryOperator(UnaryOperator *E) { // For POD record types, addresses of its own members are well-defined. - if (E->getOpcode() == UO_AddrOf && isRecordType && isPODType && - isa(E->getSubExpr()->IgnoreParens())) return; + if (E->getOpcode() == UO_AddrOf && isRecordType && + isa(E->getSubExpr()->IgnoreParens())) { + if (!isPODType) + HandleValue(E->getSubExpr()); + return; + } Inherited::VisitUnaryOperator(E); } @@ -6261,20 +6473,38 @@ namespace { void HandleDeclRefExpr(DeclRefExpr *DRE) { Decl* ReferenceDecl = DRE->getDecl(); if (OrigDecl != ReferenceDecl) return; - LookupResult Result(S, DRE->getNameInfo(), Sema::LookupOrdinaryName, - Sema::NotForRedeclaration); + unsigned diag = isReferenceType + ? diag::warn_uninit_self_reference_in_reference_init + : diag::warn_uninit_self_reference_in_init; S.DiagRuntimeBehavior(DRE->getLocStart(), DRE, - S.PDiag(diag::warn_uninit_self_reference_in_init) - << Result.getLookupName() + S.PDiag(diag) + << DRE->getNameInfo().getName() << OrigDecl->getLocation() << DRE->getSourceRange()); } }; -} -/// CheckSelfReference - Warns if OrigDecl is used in expression E. -void Sema::CheckSelfReference(Decl* OrigDecl, Expr *E) { - SelfReferenceChecker(*this, OrigDecl).HandleExpr(E); + /// CheckSelfReference - Warns if OrigDecl is used in expression E. + static void CheckSelfReference(Sema &S, Decl* OrigDecl, Expr *E, + bool DirectInit) { + // Parameters arguments are occassionially constructed with itself, + // for instance, in recursive functions. Skip them. + if (isa(OrigDecl)) + return; + + E = E->IgnoreParens(); + + // Skip checking T a = a where T is not a record or reference type. + // Doing so is a way to silence uninitialized warnings. + if (!DirectInit && !cast(OrigDecl)->getType()->isRecordType()) + if (ImplicitCastExpr *ICE = dyn_cast(E)) + if (ICE->getCastKind() == CK_LValueToRValue) + if (DeclRefExpr *DRE = dyn_cast(ICE->getSubExpr())) + if (DRE->getDecl() == OrigDecl) + return; + + SelfReferenceChecker(S, OrigDecl).Visit(E); + } } /// AddInitializerToDecl - Adds the initializer Init to the @@ -6311,15 +6541,6 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, return; } - // Check for self-references within variable initializers. - // Variables declared within a function/method body (except for references) - // are handled by a dataflow analysis. - // Record types initialized by initializer list are handled here. - // Initialization by constructors are handled in TryConstructorInitialization. - if ((!VDecl->hasLocalStorage() || VDecl->getType()->isReferenceType()) && - (isa(Init) || !VDecl->getType()->isRecordType())) - CheckSelfReference(RealDecl, Init); - ParenListExpr *CXXDirectInit = dyn_cast(Init); // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for. @@ -6495,8 +6716,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, } InitializationSequence InitSeq(*this, Entity, Kind, Args, NumArgs); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, - MultiExprArg(*this, Args,NumArgs), - &DclT); + MultiExprArg(Args, NumArgs), &DclT); if (Result.isInvalid()) { VDecl->setInvalidDecl(); return; @@ -6505,6 +6725,14 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, Init = Result.takeAs(); } + // Check for self-references within variable initializers. + // Variables declared within a function/method body (except for references) + // are handled by a dataflow analysis. + if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() || + VDecl->getType()->isReferenceType()) { + CheckSelfReference(*this, RealDecl, Init, DirectInit); + } + // If the type changed, it means we had an incomplete type that was // completed by the initializer. For example: // int ary[] = { 1, 3, 5 }; @@ -6515,9 +6743,28 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, // Check any implicit conversions within the expression. CheckImplicitConversions(Init, VDecl->getLocation()); - if (!VDecl->isInvalidDecl()) + if (!VDecl->isInvalidDecl()) { checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init); + if (VDecl->hasAttr()) + checkRetainCycles(VDecl, Init); + + // It is safe to assign a weak reference into a strong variable. + // Although this code can still have problems: + // id x = self.weakProp; + // id y = self.weakProp; + // we do not warn to warn spuriously when 'x' and 'y' are on separate + // paths through the function. This should be revisited if + // -Wrepeated-use-of-weak is made flow-sensitive. + if (VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong) { + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + Init->getLocStart()); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->markSafeWeakUse(Init); + } + } + Init = MaybeCreateExprWithCleanups(Init); // Attach the initializer to the decl. VDecl->setInit(Init); @@ -6758,8 +7005,10 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl, AbstractVariableType)) Var->setInvalidDecl(); if (!Type->isDependentType() && !Var->isInvalidDecl() && - Var->getStorageClass() == SC_PrivateExtern) + Var->getStorageClass() == SC_PrivateExtern) { Diag(Var->getLocation(), diag::warn_private_extern); + Diag(Var->getLocation(), diag::note_private_extern); + } return; @@ -6881,8 +7130,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl, = InitializationKind::CreateDefault(Var->getLocation()); InitializationSequence InitSeq(*this, Entity, Kind, 0, 0); - ExprResult Init = InitSeq.Perform(*this, Entity, Kind, - MultiExprArg(*this, 0, 0)); + ExprResult Init = InitSeq.Perform(*this, Entity, Kind, MultiExprArg()); if (Init.isInvalid()) Var->setInvalidDecl(); else if (Init.get()) { @@ -6957,11 +7205,22 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { } } + if (var->isThisDeclarationADefinition() && + var->getLinkage() == ExternalLinkage) { + // Find a previous declaration that's not a definition. + VarDecl *prev = var->getPreviousDecl(); + while (prev && prev->isThisDeclarationADefinition()) + prev = prev->getPreviousDecl(); + + if (!prev) + Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var; + } + // All the following checks are C++ only. if (!getLangOpts().CPlusPlus) return; - QualType baseType = Context.getBaseElementType(var->getType()); - if (baseType->isDependentType()) return; + QualType type = var->getType(); + if (type->isDependentType()) return; // __block variables might require us to capture a copy-initializer. if (var->hasAttr()) { @@ -6970,8 +7229,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { // Regardless, we don't want to ignore array nesting when // constructing this copy. - QualType type = var->getType(); - if (type->isStructureOrClassType()) { SourceLocation poi = var->getLocation(); Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi); @@ -6989,8 +7246,10 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { Expr *Init = var->getInit(); bool IsGlobal = var->hasGlobalStorage() && !var->isStaticLocal(); + QualType baseType = Context.getBaseElementType(type); - if (!var->getDeclContext()->isDependentContext() && Init) { + if (!var->getDeclContext()->isDependentContext() && + Init && !Init->isValueDependent()) { if (IsGlobal && !var->isConstexpr() && getDiagnostics().getDiagnosticLevel(diag::warn_global_constructor, var->getLocation()) @@ -7178,7 +7437,7 @@ void Sema::ActOnDocumentableDecls(Decl **Group, unsigned NumDecls) { // the lookahead in the lexer: we've consumed the semicolon and looked // ahead through comments. for (unsigned i = 0; i != NumDecls; ++i) - Context.getCommentForDecl(Group[i]); + Context.getCommentForDecl(Group[i], &PP); } } @@ -7450,6 +7709,9 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, unsigned DiagID; // unused DS.SetTypeSpecType(DeclSpec::TST_int, FTI.ArgInfo[i].IdentLoc, PrevSpec, DiagID); + // Use the identifier location for the type source range. + DS.SetRangeStart(FTI.ArgInfo[i].IdentLoc); + DS.SetRangeEnd(FTI.ArgInfo[i].IdentLoc); Declarator ParamD(DS, Declarator::KNRTypeListContext); ParamD.SetIdentifier(FTI.ArgInfo[i].Ident, FTI.ArgInfo[i].IdentLoc); FTI.ArgInfo[i].Param = ActOnParamDeclarator(S, ParamD); @@ -7464,8 +7726,7 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D) { Scope *ParentScope = FnBodyScope->getParent(); D.setFunctionDefinitionKind(FDK_Definition); - Decl *DP = HandleDeclarator(ParentScope, D, - MultiTemplateParamsArg(*this)); + Decl *DP = HandleDeclarator(ParentScope, D, MultiTemplateParamsArg()); return ActOnStartOfFunctionDef(FnBodyScope, DP); } @@ -7707,7 +7968,7 @@ void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) { } Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) { - return ActOnFinishFunctionBody(D, move(BodyArg), false); + return ActOnFinishFunctionBody(D, BodyArg, false); } Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, @@ -7765,22 +8026,16 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, if (Body) computeNRVO(Body, getCurFunction()); } - if (getCurFunction()->ObjCShouldCallSuperDealloc) { - Diag(MD->getLocEnd(), diag::warn_objc_missing_super_dealloc); - getCurFunction()->ObjCShouldCallSuperDealloc = false; - } - if (getCurFunction()->ObjCShouldCallSuperFinalize) { - Diag(MD->getLocEnd(), diag::warn_objc_missing_super_finalize); - getCurFunction()->ObjCShouldCallSuperFinalize = false; + if (getCurFunction()->ObjCShouldCallSuper) { + Diag(MD->getLocEnd(), diag::warn_objc_missing_super_call) + << MD->getSelector().getAsString(); + getCurFunction()->ObjCShouldCallSuper = false; } } else { return 0; } - assert(!getCurFunction()->ObjCShouldCallSuperDealloc && - "This should only be set for ObjC methods, which should have been " - "handled in the block above."); - assert(!getCurFunction()->ObjCShouldCallSuperFinalize && + assert(!getCurFunction()->ObjCShouldCallSuper && "This should only be set for ObjC methods, which should have been " "handled in the block above."); @@ -7916,13 +8171,28 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc, bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID); (void)Error; // Silence warning. assert(!Error && "Error setting up implicit decl!"); + SourceLocation NoLoc; Declarator D(DS, Declarator::BlockContext); - D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, false, - SourceLocation(), 0, 0, 0, true, - SourceLocation(), SourceLocation(), - SourceLocation(), SourceLocation(), - EST_None, SourceLocation(), - 0, 0, 0, 0, Loc, Loc, D), + D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false, + /*IsAmbiguous=*/false, + /*RParenLoc=*/NoLoc, + /*ArgInfo=*/0, + /*NumArgs=*/0, + /*EllipsisLoc=*/NoLoc, + /*RParenLoc=*/NoLoc, + /*TypeQuals=*/0, + /*RefQualifierIsLvalueRef=*/true, + /*RefQualifierLoc=*/NoLoc, + /*ConstQualifierLoc=*/NoLoc, + /*VolatileQualifierLoc=*/NoLoc, + /*MutableLoc=*/NoLoc, + EST_None, + /*ESpecLoc=*/NoLoc, + /*Exceptions=*/0, + /*ExceptionRanges=*/0, + /*NumExceptions=*/0, + /*NoexceptExpr=*/0, + Loc, Loc, D), DS.getAttributes(), SourceLocation()); D.SetIdentifier(&II, Loc); @@ -8071,6 +8341,7 @@ TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T, switch (D.getDeclSpec().getTypeSpecType()) { case TST_enum: case TST_struct: + case TST_interface: case TST_union: case TST_class: { TagDecl *tagFromDeclSpec = cast(D.getDeclSpec().getRepAsDecl()); @@ -8146,6 +8417,29 @@ bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, return false; } +/// \brief Get diagnostic %select index for tag kind for +/// redeclaration diagnostic message. +/// WARNING: Indexes apply to particular diagnostics only! +/// +/// \returns diagnostic %select index. +static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) { + switch (Tag) { + case TTK_Struct: return 0; + case TTK_Interface: return 1; + case TTK_Class: return 2; + default: llvm_unreachable("Invalid tag kind for redecl diagnostic!"); + } +} + +/// \brief Determine if tag kind is a class-key compatible with +/// class for redeclaration (class, struct, or __interface). +/// +/// \returns true iff the tag kind is compatible. +static bool isClassCompatTagKind(TagTypeKind Tag) +{ + return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface; +} + /// \brief Determine whether a tag with a given kind is acceptable /// as a redeclaration of the given tag declaration. /// @@ -8168,12 +8462,11 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous, // struct class-key shall be used to refer to a class (clause 9) // declared using the class or struct class-key. TagTypeKind OldTag = Previous->getTagKind(); - if (!isDefinition || (NewTag != TTK_Class && NewTag != TTK_Struct)) + if (!isDefinition || !isClassCompatTagKind(NewTag)) if (OldTag == NewTag) return true; - if ((OldTag == TTK_Struct || OldTag == TTK_Class) && - (NewTag == TTK_Struct || NewTag == TTK_Class)) { + if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) { // Warn about the struct/class tag mismatch. bool isTemplate = false; if (const CXXRecordDecl *Record = dyn_cast(Previous)) @@ -8183,7 +8476,8 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous, // In a template instantiation, do not offer fix-its for tag mismatches // since they usually mess up the template instead of fixing the problem. Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch) - << (NewTag == TTK_Class) << isTemplate << &Name; + << getRedeclDiagFromTagKind(NewTag) << isTemplate << &Name + << getRedeclDiagFromTagKind(OldTag); return true; } @@ -8202,13 +8496,13 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous, if (!previousMismatch) { previousMismatch = true; Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch) - << (NewTag == TTK_Class) << isTemplate << &Name; + << getRedeclDiagFromTagKind(NewTag) << isTemplate << &Name + << getRedeclDiagFromTagKind(I->getTagKind()); } Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion) - << (NewTag == TTK_Class) + << getRedeclDiagFromTagKind(NewTag) << FixItHint::CreateReplacement(I->getInnerLocStart(), - NewTag == TTK_Class? - "class" : "struct"); + TypeWithKeyword::getTagTypeKindName(NewTag)); } } return true; @@ -8224,16 +8518,16 @@ bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous, } Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch) - << (NewTag == TTK_Class) - << isTemplate << &Name; + << getRedeclDiagFromTagKind(NewTag) << isTemplate << &Name + << getRedeclDiagFromTagKind(OldTag); Diag(Redecl->getLocation(), diag::note_previous_use); // If there is a previous defintion, suggest a fix-it. if (Previous->getDefinition()) { Diag(NewTagLoc, diag::note_struct_class_suggestion) - << (Redecl->getTagKind() == TTK_Class) + << getRedeclDiagFromTagKind(Redecl->getTagKind()) << FixItHint::CreateReplacement(SourceRange(NewTagLoc), - Redecl->getTagKind() == TTK_Class? "class" : "struct"); + TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind())); } return true; @@ -8276,7 +8570,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, (SS.isNotEmpty() && TUK != TUK_Reference)) { if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier(KWLoc, NameLoc, SS, - TemplateParameterLists.get(), + TemplateParameterLists.data(), TemplateParameterLists.size(), TUK == TUK_Friend, isExplicitSpecialization, @@ -8293,8 +8587,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SS, Name, NameLoc, Attr, TemplateParams, AS, ModulePrivateLoc, - TemplateParameterLists.size() - 1, - (TemplateParameterList**) TemplateParameterLists.release()); + TemplateParameterLists.size()-1, + TemplateParameterLists.data()); return Result.get(); } else { // The "template<>" header is extraneous. @@ -8843,7 +9137,7 @@ CreateNewDecl: if (TemplateParameterLists.size() > 0) { New->setTemplateParameterListsInfo(Context, TemplateParameterLists.size(), - (TemplateParameterList**) TemplateParameterLists.release()); + TemplateParameterLists.data()); } } else @@ -9298,12 +9592,15 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, if (!InvalidDecl && T->isVariablyModifiedType()) { bool SizeIsNegative; llvm::APSInt Oversized; - QualType FixedTy = TryToFixInvalidVariablyModifiedType(T, Context, - SizeIsNegative, - Oversized); - if (!FixedTy.isNull()) { + + TypeSourceInfo *FixedTInfo = + TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context, + SizeIsNegative, + Oversized); + if (FixedTInfo) { Diag(Loc, diag::warn_illegal_constant_array_size); - T = FixedTy; + TInfo = FixedTInfo; + T = FixedTInfo->getType(); } else { if (SizeIsNegative) Diag(Loc, diag::err_typecheck_negative_array_size); @@ -9460,12 +9757,12 @@ bool Sema::CheckNontrivialField(FieldDecl *FD) { return false; } -/// If the given constructor is user-provided, produce a diagnostic explaining +/// If the given constructor is user-declared, produce a diagnostic explaining /// that it makes the class non-trivial. -static bool DiagnoseNontrivialUserProvidedCtor(Sema &S, QualType QT, +static bool diagnoseNonTrivialUserDeclaredCtor(Sema &S, QualType QT, CXXConstructorDecl *CD, Sema::CXXSpecialMember CSM) { - if (!CD->isUserProvided()) + if (CD->isImplicit()) return false; SourceLocation CtorLoc = CD->getLocation(); @@ -9488,17 +9785,17 @@ void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) { if (RD->hasUserDeclaredConstructor()) { typedef CXXRecordDecl::ctor_iterator ctor_iter; for (ctor_iter CI = RD->ctor_begin(), CE = RD->ctor_end(); CI != CE; ++CI) - if (DiagnoseNontrivialUserProvidedCtor(*this, QT, *CI, member)) + if (diagnoseNonTrivialUserDeclaredCtor(*this, QT, *CI, member)) return; - // No user-provided constructors; look for constructor templates. + // No user-delcared constructors; look for constructor templates. typedef CXXRecordDecl::specific_decl_iterator tmpl_iter; for (tmpl_iter TI(RD->decls_begin()), TE(RD->decls_end()); TI != TE; ++TI) { CXXConstructorDecl *CD = dyn_cast(TI->getTemplatedDecl()); - if (CD && DiagnoseNontrivialUserProvidedCtor(*this, QT, CD, member)) + if (CD && diagnoseNonTrivialUserDeclaredCtor(*this, QT, CD, member)) return; } } @@ -10025,42 +10322,6 @@ void Sema::ActOnFields(Scope* S, Convs->setAccess(I, (*I)->getAccess()); if (!CXXRecord->isDependentType()) { - // Objective-C Automatic Reference Counting: - // If a class has a non-static data member of Objective-C pointer - // type (or array thereof), it is a non-POD type and its - // default constructor (if any), copy constructor, copy assignment - // operator, and destructor are non-trivial. - // - // This rule is also handled by CXXRecordDecl::completeDefinition(). - // However, here we check whether this particular class is only - // non-POD because of the presence of an Objective-C pointer member. - // If so, objects of this type cannot be shared between code compiled - // with ARC and code compiled with manual retain/release. - if (getLangOpts().ObjCAutoRefCount && - CXXRecord->hasObjectMember() && - CXXRecord->getLinkage() == ExternalLinkage) { - if (CXXRecord->isPOD()) { - Diag(CXXRecord->getLocation(), - diag::warn_arc_non_pod_class_with_object_member) - << CXXRecord; - } else { - // FIXME: Fix-Its would be nice here, but finding a good location - // for them is going to be tricky. - if (CXXRecord->hasTrivialCopyConstructor()) - Diag(CXXRecord->getLocation(), - diag::warn_arc_trivial_member_function_with_object_member) - << CXXRecord << 0; - if (CXXRecord->hasTrivialCopyAssignment()) - Diag(CXXRecord->getLocation(), - diag::warn_arc_trivial_member_function_with_object_member) - << CXXRecord << 1; - if (CXXRecord->hasTrivialDestructor()) - Diag(CXXRecord->getLocation(), - diag::warn_arc_trivial_member_function_with_object_member) - << CXXRecord << 2; - } - } - // Adjust user-defined destructor exception spec. if (getLangOpts().CPlusPlus0x && CXXRecord->hasUserDeclaredDestructor()) @@ -10092,7 +10353,7 @@ void Sema::ActOnFields(Scope* S, // class subobject has more than one final overrider the // program is ill-formed. Diag(Record->getLocation(), diag::err_multiple_final_overriders) - << (NamedDecl *)M->first << Record; + << (const NamedDecl *)M->first << Record; Diag(M->first->getLocation(), diag::note_overridden_virtual_function); for (OverridingMethods::overriding_iterator @@ -10100,7 +10361,7 @@ void Sema::ActOnFields(Scope* S, OMEnd = SO->second.end(); OM != OMEnd; ++OM) Diag(OM->Method->getLocation(), diag::note_final_overrider) - << (NamedDecl *)M->first << OM->Method->getParent(); + << (const NamedDecl *)M->first << OM->Method->getParent(); Record->setInvalidDecl(); } @@ -10461,57 +10722,6 @@ Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst, return New; } -// Emits a warning if every element in the enum is the same value and if -// every element is initialized with a integer or boolean literal. -static void CheckForUniqueEnumValues(Sema &S, Decl **Elements, - unsigned NumElements, EnumDecl *Enum, - QualType EnumType) { - if (S.Diags.getDiagnosticLevel(diag::warn_identical_enum_values, - Enum->getLocation()) == - DiagnosticsEngine::Ignored) - return; - - if (NumElements < 2) - return; - - if (!Enum->getIdentifier()) - return; - - llvm::APSInt FirstVal; - - for (unsigned i = 0; i != NumElements; ++i) { - EnumConstantDecl *ECD = cast_or_null(Elements[i]); - if (!ECD) - return; - - Expr *InitExpr = ECD->getInitExpr(); - if (!InitExpr) - return; - InitExpr = InitExpr->IgnoreImpCasts(); - if (!isa(InitExpr) && !isa(InitExpr)) - return; - - if (i == 0) { - FirstVal = ECD->getInitVal(); - continue; - } - - if (!llvm::APSInt::isSameValue(FirstVal, ECD->getInitVal())) - return; - } - - S.Diag(Enum->getLocation(), diag::warn_identical_enum_values) - << EnumType << FirstVal.toString(10) - << Enum->getSourceRange(); - - EnumConstantDecl *Last = cast(Elements[NumElements - 1]), - *Next = cast(Elements[NumElements - 2]); - - S.Diag(Last->getLocation(), diag::note_identical_enum_values) - << FixItHint::CreateReplacement(Last->getInitExpr()->getSourceRange(), - Next->getName()); -} - void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDeclX, Decl **Elements, unsigned NumElements, @@ -10734,8 +10944,6 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, // it needs to go into the function scope. if (InFunctionDeclarator) DeclsInPrototypeScope.push_back(Enum); - - CheckForUniqueEnumValues(*this, Elements, NumElements, Enum, EnumType); } Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr, @@ -10834,10 +11042,6 @@ Decl *Sema::getObjCDeclContext() const { } AvailabilityResult Sema::getCurContextAvailability() const { - const Decl *D = cast(getCurLexicalContext()); - // A category implicitly has the availability of the interface. - if (const ObjCCategoryDecl *CatD = dyn_cast(D)) - D = CatD->getClassInterface(); - + const Decl *D = cast(getCurObjCLexicalContext()); return D->getAvailability(); } diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp index caa7b2f..e326a20 100644 --- a/lib/Sema/SemaDeclAttr.cpp +++ b/lib/Sema/SemaDeclAttr.cpp @@ -415,14 +415,19 @@ static void checkAttrArgsAreLockableObjs(Sema &S, Decl *D, } if (StringLiteral *StrLit = dyn_cast(ArgExp)) { - // Ignore empty strings without warnings - if (StrLit->getLength() == 0) + if (StrLit->getLength() == 0 || + StrLit->getString() == StringRef("*")) { + // Pass empty strings to the analyzer without warnings. + // Treat "*" as the universal lock. + Args.push_back(ArgExp); continue; + } // We allow constant strings to be used as a placeholder for expressions // that are not valid C++ syntax, but warn that they are ignored. S.Diag(Attr.getLoc(), diag::warn_thread_attribute_ignored) << Attr.getName(); + Args.push_back(ArgExp); continue; } @@ -859,7 +864,6 @@ static void handleLockReturnedAttr(Sema &S, Decl *D, if (!checkAttributeNumArgs(S, Attr, 1)) return; - Expr *Arg = Attr.getArg(0); if (!isa(D) && !isa(D)) { S.Diag(Attr.getLoc(), diag::warn_thread_attribute_wrong_decl_type) @@ -867,9 +871,6 @@ static void handleLockReturnedAttr(Sema &S, Decl *D, return; } - if (Arg->isTypeDependent()) - return; - // check that the argument is lockable object SmallVector Args; checkAttrArgsAreLockableObjs(S, D, Attr, Args); @@ -962,7 +963,8 @@ static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) { else if (FieldDecl *FD = dyn_cast(D)) { // If the alignment is less than or equal to 8 bits, the packed attribute // has no effect. - if (!FD->getType()->isIncompleteType() && + if (!FD->getType()->isDependentType() && + !FD->getType()->isIncompleteType() && S.Context.getTypeAlign(FD->getType()) <= 8) S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type) << Attr.getName() << FD->getType(); @@ -973,8 +975,8 @@ static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) { } static void handleMsStructAttr(Sema &S, Decl *D, const AttributeList &Attr) { - if (TagDecl *TD = dyn_cast(D)) - TD->addAttr(::new (S.Context) MsStructAttr(Attr.getRange(), S.Context)); + if (RecordDecl *RD = dyn_cast(D)) + RD->addAttr(::new (S.Context) MsStructAttr(Attr.getRange(), S.Context)); else S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); } @@ -1522,6 +1524,20 @@ static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) { Str->getString())); } +static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) { + // Check the attribute arguments. + if (!checkAttributeNumArgs(S, Attr, 0)) + return; + + if (!isa(D) && !isa(D)) { + S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type) + << Attr.getName() << ExpectedFunctionOrMethod; + return; + } + + D->addAttr(::new (S.Context) MinSizeAttr(Attr.getRange(), S.Context)); +} + static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Check the attribute arguments. if (!checkAttributeNumArgs(S, Attr, 0)) @@ -2268,16 +2284,14 @@ static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) { } if (TypedefNameDecl *TD = dyn_cast(D)) { QualType T = TD->getUnderlyingType(); - if (!T->isPointerType() || - !T->getAs()->getPointeeType()->isRecordType()) { + if (!T->isCARCBridgableType()) { S.Diag(TD->getLocation(), diag::err_nsobject_attribute); return; } } else if (ObjCPropertyDecl *PD = dyn_cast(D)) { QualType T = PD->getType(); - if (!T->isPointerType() || - !T->getAs()->getPointeeType()->isRecordType()) { + if (!T->isCARCBridgableType()) { S.Diag(PD->getLocation(), diag::err_nsobject_attribute); return; } @@ -3583,7 +3597,12 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) { } D->addAttr(::new (S.Context) PcsAttr(Attr.getRange(), S.Context, PCS)); + return; } + case AttributeList::AT_PnaclCall: + D->addAttr(::new (S.Context) PnaclCallAttr(Attr.getRange(), S.Context)); + return; + default: llvm_unreachable("unexpected attribute kind"); } @@ -3636,9 +3655,17 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) { Diag(attr.getLoc(), diag::err_invalid_pcs); return true; } + case AttributeList::AT_PnaclCall: CC = CC_PnaclCall; break; default: llvm_unreachable("unexpected attribute kind"); } + const TargetInfo &TI = Context.getTargetInfo(); + TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC); + if (A == TargetInfo::CCCR_Warning) { + Diag(attr.getLoc(), diag::warn_cconv_ignored) << attr.getName(); + CC = TI.getDefaultCallingConv(); + } + return false; } @@ -3878,11 +3905,11 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D, if (ObjCMethodDecl *MD = dyn_cast(D)) returnType = MD->getResultType(); - else if (ObjCPropertyDecl *PD = dyn_cast(D)) - returnType = PD->getType(); else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) && (Attr.getKind() == AttributeList::AT_NSReturnsRetained)) return; // ignore: was handled as a type attribute + else if (ObjCPropertyDecl *PD = dyn_cast(D)) + returnType = PD->getType(); else if (FunctionDecl *FD = dyn_cast(D)) returnType = FD->getResultType(); else { @@ -3971,6 +3998,33 @@ static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D, ::new (S.Context) ObjCReturnsInnerPointerAttr(attr.getRange(), S.Context)); } +static void handleObjCRequiresSuperAttr(Sema &S, Decl *D, + const AttributeList &attr) { + SourceLocation loc = attr.getLoc(); + ObjCMethodDecl *method = dyn_cast(D); + + if (!method) { + S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type) + << SourceRange(loc, loc) << attr.getName() << ExpectedMethod; + return; + } + DeclContext *DC = method->getDeclContext(); + if (const ObjCProtocolDecl *PDecl = dyn_cast_or_null(DC)) { + S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol) + << attr.getName() << 0; + S.Diag(PDecl->getLocation(), diag::note_protocol_decl); + return; + } + if (method->getMethodFamily() == OMF_dealloc) { + S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol) + << attr.getName() << 1; + return; + } + + method->addAttr( + ::new (S.Context) ObjCRequiresSuperAttr(attr.getRange(), S.Context)); +} + /// Handle cf_audited_transfer and cf_unknown_transfer. static void handleCFTransferAttr(Sema &S, Decl *D, const AttributeList &A) { if (!isa(D)) { @@ -4149,19 +4203,21 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) { } static void handleInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) { - if (S.LangOpts.MicrosoftExt) { - AttributeList::Kind Kind = Attr.getKind(); - if (Kind == AttributeList::AT_SingleInheritance) - D->addAttr( - ::new (S.Context) SingleInheritanceAttr(Attr.getRange(), S.Context)); - else if (Kind == AttributeList::AT_MultipleInheritance) - D->addAttr( - ::new (S.Context) MultipleInheritanceAttr(Attr.getRange(), S.Context)); - else if (Kind == AttributeList::AT_VirtualInheritance) - D->addAttr( - ::new (S.Context) VirtualInheritanceAttr(Attr.getRange(), S.Context)); - } else + if (!S.LangOpts.MicrosoftExt) { S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); + return; + } + + AttributeList::Kind Kind = Attr.getKind(); + if (Kind == AttributeList::AT_SingleInheritance) + D->addAttr( + ::new (S.Context) SingleInheritanceAttr(Attr.getRange(), S.Context)); + else if (Kind == AttributeList::AT_MultipleInheritance) + D->addAttr( + ::new (S.Context) MultipleInheritanceAttr(Attr.getRange(), S.Context)); + else if (Kind == AttributeList::AT_VirtualInheritance) + D->addAttr( + ::new (S.Context) VirtualInheritanceAttr(Attr.getRange(), S.Context)); } static void handlePortabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { @@ -4246,6 +4302,9 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_ExtVectorType: handleExtVectorTypeAttr(S, scope, D, Attr); break; + case AttributeList::AT_MinSize: + handleMinSizeAttr(S, D, Attr); + break; case AttributeList::AT_Format: handleFormatAttr (S, D, Attr); break; case AttributeList::AT_FormatArg: handleFormatArgAttr (S, D, Attr); break; case AttributeList::AT_CUDAGlobal: handleGlobalAttr (S, D, Attr); break; @@ -4278,6 +4337,9 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_ObjCReturnsInnerPointer: handleObjCReturnsInnerPointerAttr(S, D, Attr); break; + case AttributeList::AT_ObjCRequiresSuper: + handleObjCRequiresSuperAttr(S, D, Attr); break; + case AttributeList::AT_NSBridged: handleNSBridgedAttr(S, scope, D, Attr); break; @@ -4360,6 +4422,7 @@ static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D, case AttributeList::AT_ThisCall: case AttributeList::AT_Pascal: case AttributeList::AT_Pcs: + case AttributeList::AT_PnaclCall: handleCallConvAttr(S, D, Attr); break; case AttributeList::AT_OpenCLKernel: @@ -4774,18 +4837,25 @@ static bool isDeclDeprecated(Decl *D) { static void DoEmitDeprecationWarning(Sema &S, const NamedDecl *D, StringRef Message, SourceLocation Loc, - const ObjCInterfaceDecl *UnknownObjCClass) { + const ObjCInterfaceDecl *UnknownObjCClass, + const ObjCPropertyDecl *ObjCPropery) { DeclarationName Name = D->getDeclName(); if (!Message.empty()) { S.Diag(Loc, diag::warn_deprecated_message) << Name << Message; S.Diag(D->getLocation(), isa(D) ? diag::note_method_declared_at : diag::note_previous_decl) << Name; + if (ObjCPropery) + S.Diag(ObjCPropery->getLocation(), diag::note_property_attribute) + << ObjCPropery->getDeclName() << 0; } else if (!UnknownObjCClass) { S.Diag(Loc, diag::warn_deprecated) << D->getDeclName(); S.Diag(D->getLocation(), isa(D) ? diag::note_method_declared_at : diag::note_previous_decl) << Name; + if (ObjCPropery) + S.Diag(ObjCPropery->getLocation(), diag::note_property_attribute) + << ObjCPropery->getDeclName() << 0; } else { S.Diag(Loc, diag::warn_deprecated_fwdclass_message) << Name; S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class); @@ -4800,16 +4870,19 @@ void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD, DD.Triggered = true; DoEmitDeprecationWarning(*this, DD.getDeprecationDecl(), DD.getDeprecationMessage(), DD.Loc, - DD.getUnknownObjCClass()); + DD.getUnknownObjCClass(), + DD.getObjCProperty()); } void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message, SourceLocation Loc, - const ObjCInterfaceDecl *UnknownObjCClass) { + const ObjCInterfaceDecl *UnknownObjCClass, + const ObjCPropertyDecl *ObjCProperty) { // Delay if we're currently parsing a declaration. if (DelayedDiagnostics.shouldDelayDiagnostics()) { DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D, UnknownObjCClass, + ObjCProperty, Message)); return; } @@ -4817,5 +4890,5 @@ void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message, // Otherwise, don't warn if our current context is deprecated. if (isDeclDeprecated(cast(getCurLexicalContext()))) return; - DoEmitDeprecationWarning(*this, D, Message, Loc, UnknownObjCClass); + DoEmitDeprecationWarning(*this, D, Message, Loc, UnknownObjCClass, ObjCProperty); } diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp index eeac9b8..16eddf8 100644 --- a/lib/Sema/SemaDeclCXX.cpp +++ b/lib/Sema/SemaDeclCXX.cpp @@ -246,8 +246,7 @@ Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg, InitializationKind Kind = InitializationKind::CreateCopy(Param->getLocation(), EqualLoc); InitializationSequence InitSeq(*this, Entity, Kind, &Arg, 1); - ExprResult Result = InitSeq.Perform(*this, Entity, Kind, - MultiExprArg(*this, &Arg, 1)); + ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Arg); if (Result.isInvalid()) return true; Arg = Result.takeAs(); @@ -374,10 +373,10 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) { } } -// MergeCXXFunctionDecl - Merge two declarations of the same C++ -// function, once we already know that they have the same -// type. Subroutine of MergeFunctionDecl. Returns true if there was an -// error, false otherwise. +/// MergeCXXFunctionDecl - Merge two declarations of the same C++ +/// function, once we already know that they have the same +/// type. Subroutine of MergeFunctionDecl. Returns true if there was an +/// error, false otherwise. bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S) { bool Invalid = false; @@ -676,6 +675,20 @@ static bool CheckConstexprParameterTypes(Sema &SemaRef, return true; } +/// \brief Get diagnostic %select index for tag kind for +/// record diagnostic message. +/// WARNING: Indexes apply to particular diagnostics only! +/// +/// \returns diagnostic %select index. +static unsigned getRecordDiagFromTagKind(TagTypeKind Tag) { + switch (Tag) { + case TTK_Struct: return 0; + case TTK_Interface: return 1; + case TTK_Class: return 2; + default: llvm_unreachable("Invalid tag kind for record diagnostic!"); + } +} + // CheckConstexprFunctionDecl - Check whether a function declaration satisfies // the requirements of a constexpr function definition or a constexpr // constructor definition. If so, return true. If not, produce appropriate @@ -692,8 +705,8 @@ bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) { const CXXRecordDecl *RD = MD->getParent(); if (RD->getNumVBases()) { Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base) - << isa(NewFD) << RD->isStruct() - << RD->getNumVBases(); + << isa(NewFD) + << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases(); for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) Diag(I->getLocStart(), @@ -1005,6 +1018,41 @@ bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *, return false; } +/// \brief Determine whether the given class is a base class of the given +/// class, including looking at dependent bases. +static bool findCircularInheritance(const CXXRecordDecl *Class, + const CXXRecordDecl *Current) { + SmallVector Queue; + + Class = Class->getCanonicalDecl(); + while (true) { + for (CXXRecordDecl::base_class_const_iterator I = Current->bases_begin(), + E = Current->bases_end(); + I != E; ++I) { + CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl(); + if (!Base) + continue; + + Base = Base->getDefinition(); + if (!Base) + continue; + + if (Base->getCanonicalDecl() == Class) + return true; + + Queue.push_back(Base); + } + + if (Queue.empty()) + return false; + + Current = Queue.back(); + Queue.pop_back(); + } + + return false; +} + /// \brief Check the validity of a C++ base class specifier. /// /// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics @@ -1031,13 +1079,32 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class, << TInfo->getTypeLoc().getSourceRange(); EllipsisLoc = SourceLocation(); } - - if (BaseType->isDependentType()) + + SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc(); + + if (BaseType->isDependentType()) { + // Make sure that we don't have circular inheritance among our dependent + // bases. For non-dependent bases, the check for completeness below handles + // this. + if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) { + if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() || + ((BaseDecl = BaseDecl->getDefinition()) && + findCircularInheritance(Class, BaseDecl))) { + Diag(BaseLoc, diag::err_circular_inheritance) + << BaseType << Context.getTypeDeclType(Class); + + if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl()) + Diag(BaseDecl->getLocation(), diag::note_previous_decl) + << BaseType; + + return 0; + } + } + return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual, Class->getTagKind() == TTK_Class, Access, TInfo, EllipsisLoc); - - SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc(); + } // Base specifiers must be record types. if (!BaseType->isRecordType()) { @@ -1165,10 +1232,21 @@ bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, // Okay, add this new base class. KnownBase = Bases[idx]; Bases[NumGoodBases++] = Bases[idx]; - if (const RecordType *Record = NewBaseType->getAs()) - if (const CXXRecordDecl *RD = cast(Record->getDecl())) - if (RD->hasAttr()) - Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context)); + if (const RecordType *Record = NewBaseType->getAs()) { + const CXXRecordDecl *RD = cast(Record->getDecl()); + if (Class->isInterface() && + (!RD->isInterface() || + KnownBase->getAccessSpecifier() != AS_public)) { + // The Microsoft extension __interface does not permit bases that + // are not themselves public interfaces. + Diag(KnownBase->getLocStart(), diag::err_invalid_base_in_interface) + << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getName() + << RD->getSourceRange(); + Invalid = true; + } + if (RD->hasAttr()) + Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context)); + } } } @@ -1407,6 +1485,9 @@ bool Sema::ActOnAccessSpecifier(AccessSpecifier Access, /// CheckOverrideControl - Check C++11 override control semantics. void Sema::CheckOverrideControl(Decl *D) { + if (D->isInvalidDecl()) + return; + const CXXMethodDecl *MD = dyn_cast(D); // Do we know which functions this declaration might be overriding? @@ -1496,6 +1577,50 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, bool isFunc = D.isDeclarationOfFunction(); + if (cast(CurContext)->isInterface()) { + // The Microsoft extension __interface only permits public member functions + // and prohibits constructors, destructors, operators, non-public member + // functions, static methods and data members. + unsigned InvalidDecl; + bool ShowDeclName = true; + if (!isFunc) + InvalidDecl = (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) ? 0 : 1; + else if (AS != AS_public) + InvalidDecl = 2; + else if (DS.getStorageClassSpec() == DeclSpec::SCS_static) + InvalidDecl = 3; + else switch (Name.getNameKind()) { + case DeclarationName::CXXConstructorName: + InvalidDecl = 4; + ShowDeclName = false; + break; + + case DeclarationName::CXXDestructorName: + InvalidDecl = 5; + ShowDeclName = false; + break; + + case DeclarationName::CXXOperatorName: + case DeclarationName::CXXConversionFunctionName: + InvalidDecl = 6; + break; + + default: + InvalidDecl = 0; + break; + } + + if (InvalidDecl) { + if (ShowDeclName) + Diag(Loc, diag::err_invalid_member_in_interface) + << (InvalidDecl-1) << Name; + else + Diag(Loc, diag::err_invalid_member_in_interface) + << (InvalidDecl-1) << ""; + return 0; + } + } + // C++ 9.2p6: A member shall not be declared to have automatic storage // duration (auto, register) or with the extern storage-class-specifier. // C++ 7.1.1p8: The mutable specifier can be applied only to names of class @@ -1548,7 +1673,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, // Member field could not be with "template" keyword. // So TemplateParameterLists should be empty in this case. if (TemplateParameterLists.size()) { - TemplateParameterList* TemplateParams = TemplateParameterLists.get()[0]; + TemplateParameterList* TemplateParams = TemplateParameterLists[0]; if (TemplateParams->size()) { // There is no such thing as a member field template. Diag(D.getIdentifierLoc(), diag::err_template_member) @@ -1588,7 +1713,7 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, } else { assert(InitStyle == ICIS_NoInit); - Member = HandleDeclarator(S, D, move(TemplateParameterLists)); + Member = HandleDeclarator(S, D, TemplateParameterLists); if (!Member) { return 0; } @@ -1662,6 +1787,99 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, return Member; } +namespace { + class UninitializedFieldVisitor + : public EvaluatedExprVisitor { + Sema &S; + ValueDecl *VD; + public: + typedef EvaluatedExprVisitor Inherited; + UninitializedFieldVisitor(Sema &S, ValueDecl *VD) : Inherited(S.Context), + S(S), VD(VD) { + } + + void HandleExpr(Expr *E) { + if (!E) return; + + // Expressions like x(x) sometimes lack the surrounding expressions + // but need to be checked anyways. + HandleValue(E); + Visit(E); + } + + void HandleValue(Expr *E) { + E = E->IgnoreParens(); + + if (MemberExpr *ME = dyn_cast(E)) { + if (isa(ME->getMemberDecl())) + return; + Expr *Base = E; + while (isa(Base)) { + ME = dyn_cast(Base); + if (VarDecl *VarD = dyn_cast(ME->getMemberDecl())) + if (VarD->hasGlobalStorage()) + return; + Base = ME->getBase(); + } + + if (VD == ME->getMemberDecl() && isa(Base)) { + unsigned diag = VD->getType()->isReferenceType() + ? diag::warn_reference_field_is_uninit + : diag::warn_field_is_uninit; + S.Diag(ME->getExprLoc(), diag) << ME->getMemberNameInfo().getName(); + return; + } + } + + if (ConditionalOperator *CO = dyn_cast(E)) { + HandleValue(CO->getTrueExpr()); + HandleValue(CO->getFalseExpr()); + return; + } + + if (BinaryConditionalOperator *BCO = + dyn_cast(E)) { + HandleValue(BCO->getCommon()); + HandleValue(BCO->getFalseExpr()); + return; + } + + if (BinaryOperator *BO = dyn_cast(E)) { + switch (BO->getOpcode()) { + default: + return; + case(BO_PtrMemD): + case(BO_PtrMemI): + HandleValue(BO->getLHS()); + return; + case(BO_Comma): + HandleValue(BO->getRHS()); + return; + } + } + } + + void VisitImplicitCastExpr(ImplicitCastExpr *E) { + if (E->getCastKind() == CK_LValueToRValue) + HandleValue(E->getSubExpr()); + + Inherited::VisitImplicitCastExpr(E); + } + + void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { + Expr *Callee = E->getCallee(); + if (isa(Callee)) + HandleValue(Callee); + + Inherited::VisitCXXMemberCallExpr(E); + } + }; + static void CheckInitExprContainsUninitializedFields(Sema &S, Expr *E, + ValueDecl *VD) { + UninitializedFieldVisitor(S, VD).HandleExpr(E); + } +} // namespace + /// ActOnCXXInClassMemberInitializer - This is invoked after parsing an /// in-class initializer for a non-static C++ class member, and after /// instantiating an in-class initializer in a class template. Such actions @@ -1685,8 +1903,17 @@ Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation InitLoc, return; } + if (getDiagnostics().getDiagnosticLevel(diag::warn_field_is_uninit, InitLoc) + != DiagnosticsEngine::Ignored) { + CheckInitExprContainsUninitializedFields(*this, InitExpr, FD); + } + ExprResult Init = InitExpr; - if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) { + if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent() && + !FD->getDeclContext()->isDependentContext()) { + // Note: We don't type-check when we're in a dependent context, because + // the initialization-substitution code does not properly handle direct + // list initialization. We have the same hackaround for ctor-initializers. if (isa(InitExpr) && isStdInitializerList(FD->getType(), 0)) { Diag(FD->getLocation(), diag::warn_dangling_std_initializer_list) << /*at end of ctor*/1 << InitExpr->getSourceRange(); @@ -1795,7 +2022,8 @@ Sema::ActOnMemInitializer(Decl *ConstructorD, Expr **Args, unsigned NumArgs, SourceLocation RParenLoc, SourceLocation EllipsisLoc) { - Expr *List = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs, + Expr *List = new (Context) ParenListExpr(Context, LParenLoc, + llvm::makeArrayRef(Args, NumArgs), RParenLoc); return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy, DS, IdLoc, List, EllipsisLoc); @@ -2044,96 +2272,6 @@ static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member, << (unsigned)IsPointer; } -namespace { - class UninitializedFieldVisitor - : public EvaluatedExprVisitor { - Sema &S; - ValueDecl *VD; - public: - typedef EvaluatedExprVisitor Inherited; - UninitializedFieldVisitor(Sema &S, ValueDecl *VD) : Inherited(S.Context), - S(S), VD(VD) { - } - - void HandleExpr(Expr *E) { - if (!E) return; - - // Expressions like x(x) sometimes lack the surrounding expressions - // but need to be checked anyways. - HandleValue(E); - Visit(E); - } - - void HandleValue(Expr *E) { - E = E->IgnoreParens(); - - if (MemberExpr *ME = dyn_cast(E)) { - if (isa(ME->getMemberDecl())) - return; - Expr *Base = E; - while (isa(Base)) { - ME = dyn_cast(Base); - if (VarDecl *VarD = dyn_cast(ME->getMemberDecl())) - if (VarD->hasGlobalStorage()) - return; - Base = ME->getBase(); - } - - if (VD == ME->getMemberDecl() && isa(Base)) { - S.Diag(ME->getExprLoc(), diag::warn_field_is_uninit); - return; - } - } - - if (ConditionalOperator *CO = dyn_cast(E)) { - HandleValue(CO->getTrueExpr()); - HandleValue(CO->getFalseExpr()); - return; - } - - if (BinaryConditionalOperator *BCO = - dyn_cast(E)) { - HandleValue(BCO->getCommon()); - HandleValue(BCO->getFalseExpr()); - return; - } - - if (BinaryOperator *BO = dyn_cast(E)) { - switch (BO->getOpcode()) { - default: - return; - case(BO_PtrMemD): - case(BO_PtrMemI): - HandleValue(BO->getLHS()); - return; - case(BO_Comma): - HandleValue(BO->getRHS()); - return; - } - } - } - - void VisitImplicitCastExpr(ImplicitCastExpr *E) { - if (E->getCastKind() == CK_LValueToRValue) - HandleValue(E->getSubExpr()); - - Inherited::VisitImplicitCastExpr(E); - } - - void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { - Expr *Callee = E->getCallee(); - if (isa(Callee)) - HandleValue(Callee); - - Inherited::VisitCXXMemberCallExpr(E); - } - }; - static void CheckInitExprContainsUninitializedFields(Sema &S, Expr *E, - ValueDecl *VD) { - UninitializedFieldVisitor(S, VD).HandleExpr(E); - } -} // namespace - MemInitResult Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc) { @@ -2167,11 +2305,13 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init, != DiagnosticsEngine::Ignored) for (unsigned i = 0; i < NumArgs; ++i) // FIXME: Warn about the case when other fields are used before being - // uninitialized. For example, let this field be the i'th field. When + // initialized. For example, let this field be the i'th field. When // initializing the i'th field, throw a warning if any of the >= i'th // fields are used, as they are not yet initialized. // Right now we are only handling the case where the i'th field uses // itself in its initializer. + // Also need to take into account that some fields may be initialized by + // in-class initializers, see C++11 [class.base.init]p9. CheckInitExprContainsUninitializedFields(*this, Args[i], Member); SourceRange InitRange = Init->getSourceRange(); @@ -2204,7 +2344,7 @@ Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init, InitializationSequence InitSeq(*this, MemberEntity, Kind, Args, NumArgs); ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind, - MultiExprArg(*this, Args, NumArgs), + MultiExprArg(Args, NumArgs), 0); if (MemberInit.isInvalid()) return true; @@ -2273,7 +2413,7 @@ Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, InitRange.getEnd()); InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args, NumArgs); ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind, - MultiExprArg(*this, Args,NumArgs), + MultiExprArg(Args, NumArgs), 0); if (DelegationInit.isInvalid()) return true; @@ -2411,8 +2551,7 @@ Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, InitRange.getEnd()); InitializationSequence InitSeq(*this, BaseEntity, Kind, Args, NumArgs); ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind, - MultiExprArg(*this, Args, NumArgs), - 0); + MultiExprArg(Args, NumArgs), 0); if (BaseInit.isInvalid()) return true; @@ -2480,8 +2619,7 @@ BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, InitializationKind InitKind = InitializationKind::CreateDefault(Constructor->getLocation()); InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0); - BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, - MultiExprArg(SemaRef, 0, 0)); + BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, MultiExprArg()); break; } @@ -2936,7 +3074,11 @@ bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, NumInitializers * sizeof(CXXCtorInitializer*)); Constructor->setCtorInitializers(baseOrMemberInitializers); } - + + // Let template instantiation know whether we had errors. + if (AnyErrors) + Constructor->setInvalidDecl(); + return false; } @@ -3324,11 +3466,10 @@ void Sema::ActOnMemInitializers(Decl *ConstructorDecl, } else { assert(Init->isDelegatingInitializer()); // This must be the only initializer - if (i != 0 || NumMemInits > 1) { - Diag(MemInits[0]->getSourceLocation(), + if (NumMemInits != 1) { + Diag(Init->getSourceLocation(), diag::err_delegating_initializer_alone) - << MemInits[0]->getSourceRange(); - HadError = true; + << Init->getSourceRange() << MemInits[i ? 0 : 1]->getSourceRange(); // We will treat this as being the only initializer. } SetDelegatingInitializer(Constructor, MemInits[i]); @@ -3812,6 +3953,11 @@ void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) { diag::warn_non_virtual_dtor) << Context.getRecordType(Record); } + if (Record->isAbstract() && Record->hasAttr()) { + Diag(Record->getLocation(), diag::warn_abstract_final_class); + DiagnoseAbstractType(Record); + } + // See if a method overloads virtual methods in a base /// class without overriding any. if (!Record->isDependentType()) { @@ -4065,7 +4211,7 @@ void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) { // Compute argument constness, constexpr, and triviality. bool CanHaveConstParam = false; - bool Trivial; + bool Trivial = false; switch (CSM) { case CXXDefaultConstructor: Trivial = RD->hasTrivialDefaultConstructor(); @@ -4304,7 +4450,7 @@ bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj, /// If we're operating on a base class, the object type is the /// type of this special member. QualType objectTy; - AccessSpecifier access = target->getAccess();; + AccessSpecifier access = target->getAccess(); if (CXXBaseSpecifier *base = Subobj.dyn_cast()) { objectTy = S.Context.getTypeDeclType(MD->getParent()); access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access); @@ -4647,6 +4793,19 @@ namespace { }; } +/// \brief Check whether any most overriden method from MD in Methods +static bool CheckMostOverridenMethods(const CXXMethodDecl *MD, + const llvm::SmallPtrSet& Methods) { + if (MD->size_overridden_methods() == 0) + return Methods.count(MD->getCanonicalDecl()); + for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), + E = MD->end_overridden_methods(); + I != E; ++I) + if (CheckMostOverridenMethods(*I, Methods)) + return true; + return false; +} + /// \brief Member lookup function that determines whether a given C++ /// method overloads virtual methods in a base class without overriding any, /// to be used with CXXRecordDecl::lookupInBases(). @@ -4678,7 +4837,7 @@ static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier, if (!Data.S->IsOverload(Data.Method, MD, false)) return true; // Collect the overload only if its hidden. - if (!Data.OverridenAndUsingBaseMethods.count(MD)) + if (!CheckMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods)) overloadedMethods.push_back(MD); } } @@ -4689,6 +4848,17 @@ static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier, return foundSameNameMethod; } +/// \brief Add the most overriden methods from MD to Methods +static void AddMostOverridenMethods(const CXXMethodDecl *MD, + llvm::SmallPtrSet& Methods) { + if (MD->size_overridden_methods() == 0) + Methods.insert(MD->getCanonicalDecl()); + for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), + E = MD->end_overridden_methods(); + I != E; ++I) + AddMostOverridenMethods(*I, Methods); +} + /// \brief See if a method overloads virtual methods in a base class without /// overriding any. void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) { @@ -4709,14 +4879,11 @@ void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) { // by 'using' in a set. A base method not in this set is hidden. for (DeclContext::lookup_result res = DC->lookup(MD->getDeclName()); res.first != res.second; ++res.first) { - if (CXXMethodDecl *MD = dyn_cast(*res.first)) - for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), - E = MD->end_overridden_methods(); - I != E; ++I) - Data.OverridenAndUsingBaseMethods.insert((*I)->getCanonicalDecl()); + NamedDecl *ND = *res.first; if (UsingShadowDecl *shad = dyn_cast(*res.first)) - if (CXXMethodDecl *MD = dyn_cast(shad->getTargetDecl())) - Data.OverridenAndUsingBaseMethods.insert(MD->getCanonicalDecl()); + ND = shad->getTargetDecl(); + if (CXXMethodDecl *MD = dyn_cast(ND)) + AddMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods); } if (DC->lookupInBases(&FindHiddenVirtualMethod, &Data, Paths) && @@ -5307,7 +5474,47 @@ Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) { // Namespace Handling //===----------------------------------------------------------------------===// +/// \brief Diagnose a mismatch in 'inline' qualifiers when a namespace is +/// reopened. +static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc, + SourceLocation Loc, + IdentifierInfo *II, bool *IsInline, + NamespaceDecl *PrevNS) { + assert(*IsInline != PrevNS->isInline()); + + // HACK: Work around a bug in libstdc++4.6's , where + // std::__atomic[0,1,2] are defined as non-inline namespaces, then reopened as + // inline namespaces, with the intention of bringing names into namespace std. + // + // We support this just well enough to get that case working; this is not + // sufficient to support reopening namespaces as inline in general. + if (*IsInline && II && II->getName().startswith("__atomic") && + S.getSourceManager().isInSystemHeader(Loc)) { + // Mark all prior declarations of the namespace as inline. + for (NamespaceDecl *NS = PrevNS->getMostRecentDecl(); NS; + NS = NS->getPreviousDecl()) + NS->setInline(*IsInline); + // Patch up the lookup table for the containing namespace. This isn't really + // correct, but it's good enough for this particular case. + for (DeclContext::decl_iterator I = PrevNS->decls_begin(), + E = PrevNS->decls_end(); I != E; ++I) + if (NamedDecl *ND = dyn_cast(*I)) + PrevNS->getParent()->makeDeclVisibleInContext(ND); + return; + } + + if (PrevNS->isInline()) + // The user probably just forgot the 'inline', so suggest that it + // be added back. + S.Diag(Loc, diag::warn_inline_namespace_reopened_noninline) + << FixItHint::CreateInsertion(KeywordLoc, "inline "); + else + S.Diag(Loc, diag::err_inline_namespace_mismatch) + << IsInline; + S.Diag(PrevNS->getLocation(), diag::note_previous_definition); + *IsInline = PrevNS->isInline(); +} /// ActOnStartNamespaceDef - This is called at the start of a namespace /// definition. @@ -5357,21 +5564,9 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope, if (PrevNS) { // This is an extended namespace definition. - if (IsInline != PrevNS->isInline()) { - // inline-ness must match - if (PrevNS->isInline()) { - // The user probably just forgot the 'inline', so suggest that it - // be added back. - Diag(Loc, diag::warn_inline_namespace_reopened_noninline) - << FixItHint::CreateInsertion(NamespaceLoc, "inline "); - } else { - Diag(Loc, diag::err_inline_namespace_mismatch) - << IsInline; - } - Diag(PrevNS->getLocation(), diag::note_previous_definition); - - IsInline = PrevNS->isInline(); - } + if (IsInline != PrevNS->isInline()) + DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, Loc, II, + &IsInline, PrevNS); } else if (PrevDecl) { // This is an invalid name redefinition. Diag(Loc, diag::err_redefinition_different_kind) @@ -5402,15 +5597,9 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope, PrevNS = ND->getAnonymousNamespace(); } - if (PrevNS && IsInline != PrevNS->isInline()) { - // inline-ness must match - Diag(Loc, diag::err_inline_namespace_mismatch) - << IsInline; - Diag(PrevNS->getLocation(), diag::note_previous_definition); - - // Recover by ignoring the new namespace's inline status. - IsInline = PrevNS->isInline(); - } + if (PrevNS && IsInline != PrevNS->isInline()) + DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, NamespaceLoc, II, + &IsInline, PrevNS); } NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline, @@ -5460,15 +5649,15 @@ Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope, if (!PrevNS) { UsingDirectiveDecl* UD - = UsingDirectiveDecl::Create(Context, CurContext, + = UsingDirectiveDecl::Create(Context, Parent, /* 'using' */ LBrace, /* 'namespace' */ SourceLocation(), /* qualifier */ NestedNameSpecifierLoc(), /* identifier */ SourceLocation(), Namespc, - /* Ancestor */ CurContext); + /* Ancestor */ Parent); UD->setImplicit(); - CurContext->addDecl(UD); + Parent->addDecl(UD); } } @@ -5697,7 +5886,8 @@ static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc, if (DeclContext *DC = S.computeDeclContext(SS, false)) S.Diag(IdentLoc, diag::err_using_directive_member_suggest) << Ident << DC << CorrectedQuotedStr << SS.getRange() - << FixItHint::CreateReplacement(IdentLoc, CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); else S.Diag(IdentLoc, diag::err_using_directive_suggest) << Ident << CorrectedQuotedStr @@ -6562,10 +6752,10 @@ Decl *Sema::ActOnAliasDeclaration(Scope *S, if (TemplateParamLists.size() != 1) { Diag(UsingLoc, diag::err_alias_template_extra_headers) - << SourceRange(TemplateParamLists.get()[1]->getTemplateLoc(), - TemplateParamLists.get()[TemplateParamLists.size()-1]->getRAngleLoc()); + << SourceRange(TemplateParamLists[1]->getTemplateLoc(), + TemplateParamLists[TemplateParamLists.size()-1]->getRAngleLoc()); } - TemplateParameterList *TemplateParams = TemplateParamLists.get()[0]; + TemplateParameterList *TemplateParams = TemplateParamLists[0]; // Only consider previous declarations in the same scope. FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage*/false, @@ -6696,28 +6886,6 @@ Decl *Sema::ActOnNamespaceAliasDef(Scope *S, return AliasDecl; } -namespace { - /// \brief Scoped object used to handle the state changes required in Sema - /// to implicitly define the body of a C++ member function; - class ImplicitlyDefinedFunctionScope { - Sema &S; - Sema::ContextRAII SavedContext; - - public: - ImplicitlyDefinedFunctionScope(Sema &S, CXXMethodDecl *Method) - : S(S), SavedContext(S, Method) - { - S.PushFunctionScope(); - S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); - } - - ~ImplicitlyDefinedFunctionScope() { - S.PopExpressionEvaluationContext(); - S.PopFunctionScopeInfo(); - } - }; -} - Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD) { @@ -6861,7 +7029,7 @@ void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXRecordDecl *ClassDecl = Constructor->getParent(); assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor"); - ImplicitlyDefinedFunctionScope Scope(*this, Constructor); + SynthesizedFunctionScope Scope(*this, Constructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(Constructor, 0, 0, /*AnyErrors=*/false) || Trap.hasErrorOccurred()) { @@ -7173,7 +7341,7 @@ void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation, if (Destructor->isInvalidDecl()) return; - ImplicitlyDefinedFunctionScope Scope(*this, Destructor); + SynthesizedFunctionScope Scope(*this, Destructor); DiagnosticErrorTrap Trap(Diags); MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(), @@ -7412,7 +7580,7 @@ BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T, = new (S.Context) BinaryOperator(IterationVarRefRVal, IntegerLiteral::Create(S.Context, Upper, SizeType, Loc), BO_NE, S.Context.BoolTy, - VK_RValue, OK_Ordinary, Loc); + VK_RValue, OK_Ordinary, Loc, false); // Create the pre-increment of the iteration variable. Expr *Increment @@ -7654,7 +7822,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CopyAssignOperator->setUsed(); - ImplicitlyDefinedFunctionScope Scope(*this, CopyAssignOperator); + SynthesizedFunctionScope Scope(*this, CopyAssignOperator); DiagnosticErrorTrap Trap(Diags); // C++0x [class.copy]p30: @@ -7666,7 +7834,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, // which they were declared in the class definition. // The statements that form the synthesized function body. - ASTOwningVector Statements(*this); + SmallVector Statements; // The parameter for the "other" object, which we are copying from. ParmVarDecl *Other = CopyAssignOperator->getParamDecl(0); @@ -7846,8 +8014,8 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, } CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy, - CollectableMemCpy->getType(), - VK_LValue, Loc, 0).take(); + Context.BuiltinFnTy, + VK_RValue, Loc, 0).take(); assert(CollectableMemCpyRef && "Builtin reference cannot fail"); } } @@ -7866,12 +8034,12 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, } BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy, - BuiltinMemCpy->getType(), - VK_LValue, Loc, 0).take(); + Context.BuiltinFnTy, + VK_RValue, Loc, 0).take(); assert(BuiltinMemCpyRef && "Builtin reference cannot fail"); } - ASTOwningVector CallArgs(*this); + SmallVector CallArgs; CallArgs.push_back(To.takeAs()); CallArgs.push_back(From.takeAs()); CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc)); @@ -7879,12 +8047,12 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, if (NeedsCollectableMemCpy) Call = ActOnCallExpr(/*Scope=*/0, CollectableMemCpyRef, - Loc, move_arg(CallArgs), + Loc, CallArgs, Loc); else Call = ActOnCallExpr(/*Scope=*/0, BuiltinMemCpyRef, - Loc, move_arg(CallArgs), + Loc, CallArgs, Loc); assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!"); @@ -7934,7 +8102,7 @@ void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, StmtResult Body; { CompoundScopeRAII CompoundScope(*this); - Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements), + Body = ActOnCompoundStmt(Loc, Loc, Statements, /*isStmtExpr=*/false); assert(!Body.isInvalid() && "Compound statement creation cannot fail"); } @@ -8040,7 +8208,7 @@ hasMoveOrIsTriviallyCopyable(Sema &S, QualType Type, bool IsConstructor) { // reference types, are supposed to return false here, but that appears // to be a standard defect. CXXRecordDecl *ClassDecl = Type->getAsCXXRecordDecl(); - if (!ClassDecl || !ClassDecl->getDefinition()) + if (!ClassDecl || !ClassDecl->getDefinition() || ClassDecl->isInvalidDecl()) return true; if (Type.isTriviallyCopyableType(S.Context)) @@ -8195,7 +8363,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, MoveAssignOperator->setUsed(); - ImplicitlyDefinedFunctionScope Scope(*this, MoveAssignOperator); + SynthesizedFunctionScope Scope(*this, MoveAssignOperator); DiagnosticErrorTrap Trap(Diags); // C++0x [class.copy]p28: @@ -8207,7 +8375,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, // definition. // The statements that form the synthesized function body. - ASTOwningVector Statements(*this); + SmallVector Statements; // The parameter for the "other" object, which we are move from. ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0); @@ -8395,8 +8563,8 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, } CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy, - CollectableMemCpy->getType(), - VK_LValue, Loc, 0).take(); + Context.BuiltinFnTy, + VK_RValue, Loc, 0).take(); assert(CollectableMemCpyRef && "Builtin reference cannot fail"); } } @@ -8415,12 +8583,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, } BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy, - BuiltinMemCpy->getType(), - VK_LValue, Loc, 0).take(); + Context.BuiltinFnTy, + VK_RValue, Loc, 0).take(); assert(BuiltinMemCpyRef && "Builtin reference cannot fail"); } - ASTOwningVector CallArgs(*this); + SmallVector CallArgs; CallArgs.push_back(To.takeAs()); CallArgs.push_back(From.takeAs()); CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc)); @@ -8428,12 +8596,12 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, if (NeedsCollectableMemCpy) Call = ActOnCallExpr(/*Scope=*/0, CollectableMemCpyRef, - Loc, move_arg(CallArgs), + Loc, CallArgs, Loc); else Call = ActOnCallExpr(/*Scope=*/0, BuiltinMemCpyRef, - Loc, move_arg(CallArgs), + Loc, CallArgs, Loc); assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!"); @@ -8483,7 +8651,7 @@ void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, StmtResult Body; { CompoundScopeRAII CompoundScope(*this); - Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements), + Body = ActOnCompoundStmt(Loc, Loc, Statements, /*isStmtExpr=*/false); assert(!Body.isInvalid() && "Compound statement creation cannot fail"); } @@ -8691,7 +8859,7 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXRecordDecl *ClassDecl = CopyConstructor->getParent(); assert(ClassDecl && "DefineImplicitCopyConstructor - invalid constructor"); - ImplicitlyDefinedFunctionScope Scope(*this, CopyConstructor); + SynthesizedFunctionScope Scope(*this, CopyConstructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(CopyConstructor, 0, 0, /*AnyErrors=*/false) || @@ -8703,7 +8871,7 @@ void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation, Sema::CompoundScopeRAII CompoundScope(*this); CopyConstructor->setBody(ActOnCompoundStmt(CopyConstructor->getLocation(), CopyConstructor->getLocation(), - MultiStmtArg(*this, 0, 0), + MultiStmtArg(), /*isStmtExpr=*/false) .takeAs()); CopyConstructor->setImplicitlyDefined(true); @@ -8874,7 +9042,7 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXRecordDecl *ClassDecl = MoveConstructor->getParent(); assert(ClassDecl && "DefineImplicitMoveConstructor - invalid constructor"); - ImplicitlyDefinedFunctionScope Scope(*this, MoveConstructor); + SynthesizedFunctionScope Scope(*this, MoveConstructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(MoveConstructor, 0, 0, /*AnyErrors=*/false) || @@ -8886,7 +9054,7 @@ void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation, Sema::CompoundScopeRAII CompoundScope(*this); MoveConstructor->setBody(ActOnCompoundStmt(MoveConstructor->getLocation(), MoveConstructor->getLocation(), - MultiStmtArg(*this, 0, 0), + MultiStmtArg(), /*isStmtExpr=*/false) .takeAs()); MoveConstructor->setImplicitlyDefined(true); @@ -8926,7 +9094,7 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion( Conv->setUsed(); - ImplicitlyDefinedFunctionScope Scope(*this, Conv); + SynthesizedFunctionScope Scope(*this, Conv); DiagnosticErrorTrap Trap(Diags); // Return the address of the __invoke function. @@ -8959,7 +9127,7 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion( { Conv->setUsed(); - ImplicitlyDefinedFunctionScope Scope(*this, Conv); + SynthesizedFunctionScope Scope(*this, Conv); DiagnosticErrorTrap Trap(Diags); // Copy-initialize the lambda object as needed to capture it. @@ -9014,12 +9182,12 @@ static bool hasOneRealArgument(MultiExprArg Args) { return false; default: - if (!Args.get()[1]->isDefaultArgument()) + if (!Args[1]->isDefaultArgument()) return false; // fall through case 1: - return !Args.get()[0]->isDefaultArgument(); + return !Args[0]->isDefaultArgument(); } return false; @@ -9047,12 +9215,12 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, // directly into the target of the omitted copy/move if (ConstructKind == CXXConstructExpr::CK_Complete && Constructor->isCopyOrMoveConstructor() && hasOneRealArgument(ExprArgs)) { - Expr *SubExpr = ((Expr **)ExprArgs.get())[0]; + Expr *SubExpr = ExprArgs[0]; Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent()); } return BuildCXXConstructExpr(ConstructLoc, DeclInitType, Constructor, - Elidable, move(ExprArgs), HadMultipleCandidates, + Elidable, ExprArgs, HadMultipleCandidates, RequiresZeroInit, ConstructKind, ParenRange); } @@ -9066,12 +9234,9 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange) { - unsigned NumExprs = ExprArgs.size(); - Expr **Exprs = (Expr **)ExprArgs.release(); - MarkFunctionReferenced(ConstructLoc, Constructor); return Owned(CXXConstructExpr::Create(Context, DeclInitType, ConstructLoc, - Constructor, Elidable, Exprs, NumExprs, + Constructor, Elidable, ExprArgs, HadMultipleCandidates, /*FIXME*/false, RequiresZeroInit, static_cast(ConstructKind), @@ -9085,7 +9250,7 @@ bool Sema::InitializeVarWithConstructor(VarDecl *VD, // FIXME: Provide the correct paren SourceRange when available. ExprResult TempResult = BuildCXXConstructExpr(VD->getLocation(), VD->getType(), Constructor, - move(Exprs), HadMultipleCandidates, false, + Exprs, HadMultipleCandidates, false, CXXConstructExpr::CK_Complete, SourceRange()); if (TempResult.isInvalid()) return true; @@ -9135,11 +9300,11 @@ bool Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, - ASTOwningVector &ConvertedArgs, + SmallVectorImpl &ConvertedArgs, bool AllowExplicit) { // FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall. unsigned NumArgs = ArgsPtr.size(); - Expr **Args = (Expr **)ArgsPtr.get(); + Expr **Args = ArgsPtr.data(); const FunctionProtoType *Proto = Constructor->getType()->getAs(); @@ -9268,7 +9433,7 @@ CheckOperatorNewDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) { } static bool -CheckOperatorDeleteDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) { +CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) { // C++ [basic.stc.dynamic.deallocation]p1: // A program is ill-formed if deallocation functions are declared in a // namespace scope other than global scope or declared static in global @@ -9825,7 +9990,7 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, /// \brief Perform semantic analysis of the given friend type declaration. /// /// \returns A friend declaration that. -FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc, +FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo) { assert(TSInfo && "NULL TypeSourceInfo for friend type declaration"); @@ -9864,7 +10029,7 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc, diag::warn_cxx98_compat_nonclass_type_friend : diag::ext_nonclass_type_friend) << T - << SourceRange(FriendLoc, TypeRange.getEnd()); + << TypeRange; } } else if (T->getAs()) { Diag(FriendLoc, @@ -9872,18 +10037,22 @@ FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc, diag::warn_cxx98_compat_enum_friend : diag::ext_enum_friend) << T - << SourceRange(FriendLoc, TypeRange.getEnd()); + << TypeRange; } - // C++0x [class.friend]p3: + // C++11 [class.friend]p3: + // A friend declaration that does not declare a function shall have one + // of the following forms: + // friend elaborated-type-specifier ; + // friend simple-type-specifier ; + // friend typename-specifier ; + if (getLangOpts().CPlusPlus0x && LocStart != FriendLoc) + Diag(FriendLoc, diag::err_friend_not_first_in_declaration) << T; + // If the type specifier in a friend declaration designates a (possibly - // cv-qualified) class type, that class is declared as a friend; otherwise, + // cv-qualified) class type, that class is declared as a friend; otherwise, // the friend declaration is ignored. - - // FIXME: C++0x has some syntactic restrictions on friend type declarations - // in [class.friend]p3 that we do not implement. - - return FriendDecl::Create(Context, CurContext, Loc, TSInfo, FriendLoc); + return FriendDecl::Create(Context, CurContext, LocStart, TSInfo, FriendLoc); } /// Handle a friend tag declaration where the scope specifier was @@ -9901,7 +10070,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier(TagLoc, NameLoc, SS, - TempParamLists.get(), + TempParamLists.data(), TempParamLists.size(), /*friend*/ true, isExplicitSpecialization, @@ -9916,7 +10085,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, TemplateParams, AS_public, /*ModulePrivateLoc=*/SourceLocation(), TempParamLists.size() - 1, - (TemplateParameterList**) TempParamLists.release()).take(); + TempParamLists.data()).take(); } else { // The "template<>" header is extraneous. Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams) @@ -9929,7 +10098,7 @@ Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, bool isAllExplicitSpecializations = true; for (unsigned I = TempParamLists.size(); I-- > 0; ) { - if (TempParamLists.get()[I]->size()) { + if (TempParamLists[I]->size()) { isAllExplicitSpecializations = false; break; } @@ -10076,7 +10245,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, if (unsigned NumTempParamLists = TempParams.size()) D = FriendTemplateDecl::Create(Context, CurContext, Loc, NumTempParamLists, - TempParams.release(), + TempParams.data(), TSI, DS.getFriendSpecLoc()); else @@ -10318,7 +10487,7 @@ Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D, bool AddToScope = true; NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, TInfo, Previous, - move(TemplateParams), AddToScope); + TemplateParams, AddToScope); if (!ND) return 0; assert(ND->getDeclContext() == DC); @@ -10435,10 +10604,10 @@ void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) { // If this definition appears within the record, do the checking when // the record is complete. const FunctionDecl *Primary = MD; - if (MD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) + if (const FunctionDecl *Pattern = MD->getTemplateInstantiationPattern()) // Find the uninstantiated declaration that actually had the '= default' // on it. - MD->getTemplateInstantiationPattern()->isDefined(Primary); + Pattern->isDefined(Primary); if (Primary == Primary->getCanonicalDecl()) return; @@ -10963,14 +11132,16 @@ void DelegatingCycleHelper(CXXConstructorDecl* Ctor, if (Ctor->isInvalidDecl()) return; - const FunctionDecl *FNTarget = 0; - CXXConstructorDecl *Target; - - // We ignore the result here since if we don't have a body, Target will be - // null below. - (void)Ctor->getTargetConstructor()->hasBody(FNTarget); - Target -= const_cast(cast_or_null(FNTarget)); + CXXConstructorDecl *Target = Ctor->getTargetConstructor(); + + // Target may not be determinable yet, for instance if this is a dependent + // call in an uninstantiated template. + if (Target) { + const FunctionDecl *FNTarget = 0; + (void)Target->hasBody(FNTarget); + Target = const_cast( + cast_or_null(FNTarget)); + } CXXConstructorDecl *Canonical = Ctor->getCanonicalDecl(), // Avoid dereferencing a null pointer here. @@ -10994,17 +11165,18 @@ void DelegatingCycleHelper(CXXConstructorDecl* Ctor, diag::warn_delegating_ctor_cycle) << Ctor; - // Don't add a note for a function delegating directo to itself. + // Don't add a note for a function delegating directly to itself. if (TCanonical != Canonical) S.Diag(Target->getLocation(), diag::note_it_delegates_to); CXXConstructorDecl *C = Target; while (C->getCanonicalDecl() != Canonical) { + const FunctionDecl *FNTarget = 0; (void)C->getTargetConstructor()->hasBody(FNTarget); assert(FNTarget && "Ctor cycle through bodiless function"); - C - = const_cast(cast(FNTarget)); + C = const_cast( + cast(FNTarget)); S.Diag(C->getLocation(), diag::note_which_delegates_to); } } @@ -11027,9 +11199,8 @@ void Sema::CheckDelegatingCtorCycles() { for (DelegatingCtorDeclsType::iterator I = DelegatingCtorDecls.begin(ExternalSource), E = DelegatingCtorDecls.end(); - I != E; ++I) { - DelegatingCycleHelper(*I, Valid, Invalid, Current, *this); - } + I != E; ++I) + DelegatingCycleHelper(*I, Valid, Invalid, Current, *this); for (CI = Invalid.begin(), CE = Invalid.end(); CI != CE; ++CI) (*CI)->setInvalidDecl(); diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp index 9da4d69..c4e91e8 100644 --- a/lib/Sema/SemaDeclObjC.cpp +++ b/lib/Sema/SemaDeclObjC.cpp @@ -282,6 +282,25 @@ void Sema::AddAnyMethodToGlobalPool(Decl *D) { AddFactoryMethodToGlobalPool(MDecl, true); } +/// HasExplicitOwnershipAttr - returns true when pointer to ObjC pointer +/// has explicit ownership attribute; false otherwise. +static bool +HasExplicitOwnershipAttr(Sema &S, ParmVarDecl *Param) { + QualType T = Param->getType(); + + if (const PointerType *PT = T->getAs()) { + T = PT->getPointeeType(); + } else if (const ReferenceType *RT = T->getAs()) { + T = RT->getPointeeType(); + } else { + return true; + } + + // If we have a lifetime qualifier, but it's local, we must have + // inferred it. So, it is implicit. + return !T.getLocalQualifiers().hasObjCLifetime(); +} + /// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible /// and user declared, in the method definition's AST. void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) { @@ -313,6 +332,12 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) { RequireCompleteType(Param->getLocation(), Param->getType(), diag::err_typecheck_decl_incomplete_type)) Param->setInvalidDecl(); + if (!Param->isInvalidDecl() && + getLangOpts().ObjCAutoRefCount && + !HasExplicitOwnershipAttr(*this, Param)) + Diag(Param->getLocation(), diag::warn_arc_strong_pointer_objc_pointer) << + Param->getType(); + if ((*PI)->getIdentifier()) PushOnScopeChains(*PI, FnBodyScope); } @@ -345,8 +370,10 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) { // Warn on deprecated methods under -Wdeprecated-implementations, // and prepare for warning on missing super calls. if (ObjCInterfaceDecl *IC = MDecl->getClassInterface()) { - if (ObjCMethodDecl *IMD = - IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod())) + ObjCMethodDecl *IMD = + IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod()); + + if (IMD) DiagnoseObjCImplementedDeprecations(*this, dyn_cast(IMD), MDecl->getLocation(), 0); @@ -356,13 +383,23 @@ void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) { // Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set. // Only do this if the current class actually has a superclass. if (IC->getSuperClass()) { - getCurFunction()->ObjCShouldCallSuperDealloc = - !(Context.getLangOpts().ObjCAutoRefCount || - Context.getLangOpts().getGC() == LangOptions::GCOnly) && - MDecl->getMethodFamily() == OMF_dealloc; - getCurFunction()->ObjCShouldCallSuperFinalize = - Context.getLangOpts().getGC() != LangOptions::NonGC && - MDecl->getMethodFamily() == OMF_finalize; + ObjCMethodFamily Family = MDecl->getMethodFamily(); + if (Family == OMF_dealloc) { + if (!(getLangOpts().ObjCAutoRefCount || + getLangOpts().getGC() == LangOptions::GCOnly)) + getCurFunction()->ObjCShouldCallSuper = true; + + } else if (Family == OMF_finalize) { + if (Context.getLangOpts().getGC() != LangOptions::NonGC) + getCurFunction()->ObjCShouldCallSuper = true; + + } else { + const ObjCMethodDecl *SuperMethod = + IC->getSuperClass()->lookupMethod(MDecl->getSelector(), + MDecl->isInstanceMethod()); + getCurFunction()->ObjCShouldCallSuper = + (SuperMethod && SuperMethod->hasAttr()); + } } } } @@ -510,7 +547,7 @@ ActOnStartClassInterface(SourceLocation AtInterfaceLoc, // Check then save referenced protocols. if (NumProtoRefs) { - IDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs, + IDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); IDecl->setEndOfDefinitionLoc(EndProtoLoc); } @@ -652,7 +689,7 @@ Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc, if (!err && NumProtoRefs ) { /// Check then save referenced protocols. - PDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs, + PDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); } @@ -819,11 +856,11 @@ ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, CurContext->addDecl(CDecl); if (NumProtoRefs) { - CDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs, + CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); // Protocols in the class extension belong to the class. if (CDecl->IsClassExtension()) - IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl**)ProtoRefs, + IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, Context); } @@ -1545,9 +1582,9 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc, E = PDecl->instmeth_end(); I != E; ++I) { ObjCMethodDecl *method = *I; if (method->getImplementationControl() != ObjCMethodDecl::Optional && - !method->isSynthesized() && !InsMap.count(method->getSelector()) && - (!Super || - !Super->lookupInstanceMethod(method->getSelector()))) { + !method->isPropertyAccessor() && + !InsMap.count(method->getSelector()) && + (!Super || !Super->lookupInstanceMethod(method->getSelector()))) { // If a method is not implemented in the category implementation but // has been declared in its primary class, superclass, // or in one of their protocols, no need to issue the warning. @@ -1560,7 +1597,7 @@ void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc, if (ObjCMethodDecl *MethodInClass = IDecl->lookupInstanceMethod(method->getSelector(), true /*shallowCategoryLookup*/)) - if (C || MethodInClass->isSynthesized()) + if (C || MethodInClass->isPropertyAccessor()) continue; unsigned DIAG = diag::warn_unimplemented_protocol_method; if (Diags.getDiagnosticLevel(DIAG, ImpLoc) @@ -1621,7 +1658,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap, if (InsMapSeen.count((*I)->getSelector())) continue; InsMapSeen.insert((*I)->getSelector()); - if (!(*I)->isSynthesized() && + if (!(*I)->isPropertyAccessor() && !InsMap.count((*I)->getSelector())) { if (ImmediateClass) WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl, @@ -1638,7 +1675,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap, if (!WarnCategoryMethodImpl) WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl, isa(CDecl)); - else if (!MethodDecl->isSynthesized()) + else if (!MethodDecl->isPropertyAccessor()) WarnExactTypedMethods(ImpMethodDecl, MethodDecl, isa(CDecl)); } @@ -1672,14 +1709,26 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap, } if (ObjCInterfaceDecl *I = dyn_cast (CDecl)) { - // Also methods in class extensions need be looked at next. - for (const ObjCCategoryDecl *ClsExtDecl = I->getFirstClassExtension(); - ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) - MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, - IMPDecl, - const_cast(ClsExtDecl), - IncompleteImpl, false, - WarnCategoryMethodImpl); + // when checking that methods in implementation match their declaration, + // i.e. when WarnCategoryMethodImpl is false, check declarations in class + // extension; as well as those in categories. + if (!WarnCategoryMethodImpl) + for (const ObjCCategoryDecl *CDeclChain = I->getCategoryList(); + CDeclChain; CDeclChain = CDeclChain->getNextClassCategory()) + MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, + IMPDecl, + const_cast(CDeclChain), + IncompleteImpl, false, + WarnCategoryMethodImpl); + else + // Also methods in class extensions need be looked at next. + for (const ObjCCategoryDecl *ClsExtDecl = I->getFirstClassExtension(); + ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) + MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, + IMPDecl, + const_cast(ClsExtDecl), + IncompleteImpl, false, + WarnCategoryMethodImpl); // Check for any implementation of a methods declared in protocol. for (ObjCInterfaceDecl::all_protocol_iterator @@ -2339,11 +2388,11 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, CExtDecl; CExtDecl = CExtDecl->getNextClassExtension()) { if (ObjCMethodDecl *GetterMethod = CExtDecl->getInstanceMethod(Property->getGetterName())) - GetterMethod->setSynthesized(true); + GetterMethod->setPropertyAccessor(true); if (!Property->isReadOnly()) if (ObjCMethodDecl *SetterMethod = CExtDecl->getInstanceMethod(Property->getSetterName())) - SetterMethod->setSynthesized(true); + SetterMethod->setPropertyAccessor(true); } } } @@ -2435,26 +2484,49 @@ CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) { } static inline +unsigned countAlignAttr(const AttrVec &A) { + unsigned count=0; + for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i) + if ((*i)->getKind() == attr::Aligned) + ++count; + return count; +} + +static inline bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD, const AttrVec &A) { // If method is only declared in implementation (private method), // No need to issue any diagnostics on method definition with attributes. if (!IMD) return false; - + // method declared in interface has no attribute. - // But implementation has attributes. This is invalid + // But implementation has attributes. This is invalid. + // Except when implementation has 'Align' attribute which is + // immaterial to method declared in interface. if (!IMD->hasAttrs()) - return true; + return (A.size() > countAlignAttr(A)); const AttrVec &D = IMD->getAttrs(); - if (D.size() != A.size()) - return true; + unsigned countAlignOnImpl = countAlignAttr(A); + if (!countAlignOnImpl && (A.size() != D.size())) + return true; + else if (countAlignOnImpl) { + unsigned countAlignOnDecl = countAlignAttr(D); + if (countAlignOnDecl && (A.size() != D.size())) + return true; + else if (!countAlignOnDecl && + ((A.size()-countAlignOnImpl) != D.size())) + return true; + } + // attributes on method declaration and definition must match exactly. // Note that we have at most a couple of attributes on methods, so this // n*n search is good enough. for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i) { + if ((*i)->getKind() == attr::Aligned) + continue; bool match = false; for (AttrVec::const_iterator i1 = D.begin(), e1 = D.end(); i1 != e1; ++i1) { if ((*i)->getKind() == (*i1)->getKind()) { @@ -2465,6 +2537,7 @@ bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD, if (!match) return true; } + return false; } @@ -2525,7 +2598,7 @@ public: // with this selector before. Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector); if (it == S.MethodPool.end()) { - if (!S.ExternalSource) return; + if (!S.getExternalSource()) return; S.ReadMethodPool(selector); it = S.MethodPool.find(selector); @@ -2767,7 +2840,7 @@ Decl *Sema::ActOnMethodDeclaration( ResultTInfo, CurContext, MethodType == tok::minus, isVariadic, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/false, /*isDefined=*/false, MethodDeclKind == tok::objc_optional ? ObjCMethodDecl::Optional diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp index e6266fb..e1f4888 100644 --- a/lib/Sema/SemaExceptionSpec.cpp +++ b/lib/Sema/SemaExceptionSpec.cpp @@ -120,6 +120,24 @@ Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) { return SourceDecl->getType()->castAs(); } +/// Determine whether a function has an implicitly-generated exception +/// specification. +static bool hasImplicitExceptionSpec(FunctionDecl *Decl) { + if (!isa(Decl) && + Decl->getDeclName().getCXXOverloadedOperator() != OO_Delete && + Decl->getDeclName().getCXXOverloadedOperator() != OO_Array_Delete) + return false; + + // If the user didn't declare the function, its exception specification must + // be implicit. + if (!Decl->getTypeSourceInfo()) + return true; + + const FunctionProtoType *Ty = + Decl->getTypeSourceInfo()->getType()->getAs(); + return !Ty->hasExceptionSpec(); +} + bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) { OverloadedOperatorKind OO = New->getDeclName().getCXXOverloadedOperator(); bool IsOperatorNew = OO == OO_New || OO == OO_Array_New; @@ -129,25 +147,35 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) { if (getLangOpts().MicrosoftExt) DiagID = diag::warn_mismatched_exception_spec; - if (!CheckEquivalentExceptionSpec(PDiag(DiagID), - PDiag(diag::note_previous_declaration), - Old->getType()->getAs(), - Old->getLocation(), - New->getType()->getAs(), - New->getLocation(), - &MissingExceptionSpecification, - &MissingEmptyExceptionSpecification, - /*AllowNoexceptAllMatchWithNoSpec=*/true, - IsOperatorNew)) + // Check the types as written: they must match before any exception + // specification adjustment is applied. + if (!CheckEquivalentExceptionSpec( + PDiag(DiagID), PDiag(diag::note_previous_declaration), + Old->getType()->getAs(), Old->getLocation(), + New->getType()->getAs(), New->getLocation(), + &MissingExceptionSpecification, &MissingEmptyExceptionSpecification, + /*AllowNoexceptAllMatchWithNoSpec=*/true, IsOperatorNew)) { + // C++11 [except.spec]p4 [DR1492]: + // If a declaration of a function has an implicit + // exception-specification, other declarations of the function shall + // not specify an exception-specification. + if (getLangOpts().CPlusPlus0x && + hasImplicitExceptionSpec(Old) != hasImplicitExceptionSpec(New)) { + Diag(New->getLocation(), diag::ext_implicit_exception_spec_mismatch) + << hasImplicitExceptionSpec(Old); + if (!Old->getLocation().isInvalid()) + Diag(Old->getLocation(), diag::note_previous_declaration); + } return false; + } // The failure was something other than an empty exception // specification; return an error. if (!MissingExceptionSpecification && !MissingEmptyExceptionSpecification) return true; - const FunctionProtoType *NewProto - = New->getType()->getAs(); + const FunctionProtoType *NewProto = + New->getType()->getAs(); // The new function declaration is only missing an empty exception // specification "throw()". If the throw() specification came from a @@ -172,8 +200,8 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) { } if (MissingExceptionSpecification && NewProto) { - const FunctionProtoType *OldProto - = Old->getType()->getAs(); + const FunctionProtoType *OldProto = + Old->getType()->getAs(); FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo(); EPI.ExceptionSpecType = OldProto->getExceptionSpecType(); @@ -290,14 +318,17 @@ bool Sema::CheckEquivalentExceptionSpec( unsigned DiagID = diag::err_mismatched_exception_spec; if (getLangOpts().MicrosoftExt) DiagID = diag::warn_mismatched_exception_spec; - return CheckEquivalentExceptionSpec( - PDiag(DiagID), + return CheckEquivalentExceptionSpec(PDiag(DiagID), PDiag(diag::note_previous_declaration), Old, OldLoc, New, NewLoc); } /// CheckEquivalentExceptionSpec - Check if the two types have compatible /// exception specifications. See C++ [except.spec]p3. +/// +/// \return \c false if the exception specifications match, \c true if there is +/// a problem. If \c true is returned, either a diagnostic has already been +/// produced or \c *MissingExceptionSpecification is set to \c true. bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, @@ -1029,6 +1060,7 @@ CanThrowResult Sema::canThrow(const Expr *E) { case Expr::PseudoObjectExprClass: case Expr::SubstNonTypeTemplateParmExprClass: case Expr::SubstNonTypeTemplateParmPackExprClass: + case Expr::FunctionParmPackExprClass: case Expr::UnaryExprOrTypeTraitExprClass: case Expr::UnresolvedLookupExprClass: case Expr::UnresolvedMemberExprClass: diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp index 3875ba1..bf4abfc 100644 --- a/lib/Sema/SemaExpr.cpp +++ b/lib/Sema/SemaExpr.cpp @@ -66,6 +66,15 @@ bool Sema::CanUseDecl(NamedDecl *D) { return true; } +static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) { + // Warn if this is used but marked unused. + if (D->hasAttr()) { + const Decl *DC = cast(S.getCurObjCLexicalContext()); + if (!DC->hasAttr()) + S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName(); + } +} + static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass) { @@ -78,6 +87,17 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, if (const EnumDecl *TheEnumDecl = dyn_cast(DC)) Result = TheEnumDecl->getAvailability(&Message); } + + const ObjCPropertyDecl *ObjCPDecl = 0; + if (Result == AR_Deprecated || Result == AR_Unavailable) { + if (const ObjCMethodDecl *MD = dyn_cast(D)) { + if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) { + AvailabilityResult PDeclResult = PD->getAvailability(0); + if (PDeclResult == Result) + ObjCPDecl = PD; + } + } + } switch (Result) { case AR_Available: @@ -85,23 +105,30 @@ static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, break; case AR_Deprecated: - S.EmitDeprecationWarning(D, Message, Loc, UnknownObjCClass); + S.EmitDeprecationWarning(D, Message, Loc, UnknownObjCClass, ObjCPDecl); break; case AR_Unavailable: if (S.getCurContextAvailability() != AR_Unavailable) { if (Message.empty()) { - if (!UnknownObjCClass) + if (!UnknownObjCClass) { S.Diag(Loc, diag::err_unavailable) << D->getDeclName(); + if (ObjCPDecl) + S.Diag(ObjCPDecl->getLocation(), diag::note_property_attribute) + << ObjCPDecl->getDeclName() << 1; + } else S.Diag(Loc, diag::warn_unavailable_fwdclass_message) << D->getDeclName(); } - else + else S.Diag(Loc, diag::err_unavailable_message) << D->getDeclName() << Message; - S.Diag(D->getLocation(), diag::note_unavailable_here) - << isa(D) << false; + S.Diag(D->getLocation(), diag::note_unavailable_here) + << isa(D) << false; + if (ObjCPDecl) + S.Diag(ObjCPDecl->getLocation(), diag::note_property_attribute) + << ObjCPDecl->getDeclName() << 1; } break; } @@ -250,9 +277,7 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, } DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass); - // Warn if this is used but marked unused. - if (D->hasAttr()) - Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName(); + DiagnoseUnusedOfDecl(*this, D, Loc); diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc); @@ -502,7 +527,7 @@ ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E) { Res = DefaultLvalueConversion(Res.take()); if (Res.isInvalid()) return ExprError(); - return move(Res); + return Res; } @@ -1098,8 +1123,8 @@ Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc, unsigned NumAssocs = ArgTypes.size(); assert(NumAssocs == ArgExprs.size()); - ParsedType *ParsedTypes = ArgTypes.release(); - Expr **Exprs = ArgExprs.release(); + ParsedType *ParsedTypes = ArgTypes.data(); + Expr **Exprs = ArgExprs.data(); TypeSourceInfo **Types = new TypeSourceInfo*[NumAssocs]; for (unsigned i = 0; i < NumAssocs; ++i) { @@ -1185,8 +1210,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc, if (IsResultDependent) return Owned(new (Context) GenericSelectionExpr( Context, KeyLoc, ControllingExpr, - Types, Exprs, NumAssocs, DefaultLoc, - RParenLoc, ContainsUnexpandedParameterPack)); + llvm::makeArrayRef(Types, NumAssocs), + llvm::makeArrayRef(Exprs, NumAssocs), + DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack)); SmallVector CompatIndices; unsigned DefaultIndex = -1U; @@ -1240,8 +1266,9 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc, return Owned(new (Context) GenericSelectionExpr( Context, KeyLoc, ControllingExpr, - Types, Exprs, NumAssocs, DefaultLoc, - RParenLoc, ContainsUnexpandedParameterPack, + llvm::makeArrayRef(Types, NumAssocs), + llvm::makeArrayRef(Exprs, NumAssocs), + DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack, ResultIndex)); } @@ -1402,6 +1429,15 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, MarkDeclRefReferenced(E); + if (getLangOpts().ObjCARCWeak && isa(D) && + Ty.getObjCLifetime() == Qualifiers::OCL_Weak) { + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + E->getLocStart()); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->recordUseOfWeak(E); + } + // Just in case we're building an illegal pointer-to-member. FieldDecl *FD = dyn_cast(D); if (FD && FD->isBitField()) @@ -1428,11 +1464,9 @@ Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id, Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc); Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc); - ASTTemplateArgsPtr TemplateArgsPtr(*this, - Id.TemplateId->getTemplateArgs(), + ASTTemplateArgsPtr TemplateArgsPtr(Id.TemplateId->getTemplateArgs(), Id.TemplateId->NumArgs); translateTemplateArguments(TemplateArgsPtr, Buffer); - TemplateArgsPtr.release(); TemplateName TName = Id.TemplateId->Template.get(); SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc; @@ -1606,7 +1640,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, Diag(R.getNameLoc(), diag::err_no_member_suggest) << Name << computeDeclContext(SS, false) << CorrectedQuotedStr << SS.getRange() - << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); if (ND) Diag(ND->getLocation(), diag::note_previous_decl) << CorrectedQuotedStr; @@ -1797,7 +1832,7 @@ ExprResult Sema::ActOnIdExpression(Scope *S, // lookup fails and no expression will be built to reference it. if (!E.isInvalid() && !E.get()) return ExprError(); - return move(E); + return E; } } } @@ -1860,9 +1895,10 @@ ExprResult Sema::ActOnIdExpression(Scope *S, /// this path. ExprResult Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, - const DeclarationNameInfo &NameInfo) { - DeclContext *DC; - if (!(DC = computeDeclContext(SS, false)) || DC->isDependentContext()) + const DeclarationNameInfo &NameInfo, + bool IsAddressOfOperand) { + DeclContext *DC = computeDeclContext(SS, false); + if (!DC) return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo, /*TemplateArgs=*/0); @@ -1875,13 +1911,26 @@ Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, if (R.isAmbiguous()) return ExprError(); + if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation) + return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), + NameInfo, /*TemplateArgs=*/0); + if (R.empty()) { Diag(NameInfo.getLoc(), diag::err_no_member) << NameInfo.getName() << DC << SS.getRange(); return ExprError(); } - return BuildDeclarationNameExpr(SS, R, /*ADL*/ false); + // Defend against this resolving to an implicit member access. We usually + // won't get here if this might be a legitimate a class member (we end up in + // BuildMemberReferenceExpr instead), but this can be valid if we're forming + // a pointer-to-member or in an unevaluated context in C++11. + if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand) + return BuildPossibleImplicitMemberExpr(SS, + /*TemplateKWLoc=*/SourceLocation(), + R, /*TemplateArgs=*/0); + + return BuildDeclarationNameExpr(SS, R, /* ADL */ false); } /// LookupInObjCMethod - The parser has read a name in, and Sema has @@ -1965,9 +2014,25 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S, ObjCMethodFamily MF = CurMethod->getMethodFamily(); if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize) Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName(); - return Owned(new (Context) - ObjCIvarRefExpr(IV, IV->getType(), Loc, - SelfExpr.take(), true, true)); + + ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getType(), + Loc, + SelfExpr.take(), + true, true); + + if (getLangOpts().ObjCAutoRefCount) { + if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, Loc); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->recordUseOfWeak(Result); + } + if (CurContext->isClosure()) + Diag(Loc, diag::warn_implicitly_retains_self) + << FixItHint::CreateInsertion(Loc, "self->"); + } + + return Owned(Result); } } else if (CurMethod->isInstanceMethod()) { // We should warn if a local variable hides an ivar. @@ -2416,6 +2481,14 @@ Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS, } case Decl::Function: { + if (unsigned BID = cast(VD)->getBuiltinID()) { + if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) { + type = Context.BuiltinFnTy; + valueKind = VK_RValue; + break; + } + } + const FunctionType *fty = type->castAs(); // If we're referring to a function with an __unknown_anytype @@ -2615,19 +2688,20 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { return ActOnIntegerConstant(Tok.getLocation(), Val-'0'); } - SmallString<512> IntegerBuffer; - // Add padding so that NumericLiteralParser can overread by one character. - IntegerBuffer.resize(Tok.getLength()+1); - const char *ThisTokBegin = &IntegerBuffer[0]; + SmallString<128> SpellingBuffer; + // NumericLiteralParser wants to overread by one character. Add padding to + // the buffer in case the token is copied to the buffer. If getSpelling() + // returns a StringRef to the memory buffer, it should have a null char at + // the EOF, so it is also safe. + SpellingBuffer.resize(Tok.getLength() + 1); // Get the spelling of the token, which eliminates trigraphs, etc. bool Invalid = false; - unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin, &Invalid); + StringRef TokSpelling = PP.getSpelling(Tok, SpellingBuffer, &Invalid); if (Invalid) return ExprError(); - NumericLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength, - Tok.getLocation(), PP); + NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP); if (Literal.hadError) return ExprError(); @@ -2693,7 +2767,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { Context.CharTy, llvm::APInt(32, Length + 1), ArrayType::Normal, 0); Expr *Lit = StringLiteral::Create( - Context, StringRef(ThisTokBegin, Length), StringLiteral::Ascii, + Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii, /*Pascal*/false, StrTy, &TokLoc, 1); return BuildLiteralOperatorCall(R, OpNameInfo, llvm::makeArrayRef(&Lit, 1), TokLoc); @@ -2709,7 +2783,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType(); llvm::APSInt Value(CharBits, CharIsUnsigned); for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) { - Value = ThisTokBegin[I]; + Value = TokSpelling[I]; TemplateArgument Arg(Context, Value, Context.CharTy); TemplateArgumentLocInfo ArgInfo; ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo)); @@ -2747,11 +2821,15 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { } else { QualType Ty; - // long long is a C99 feature. - if (!getLangOpts().C99 && Literal.isLongLong) - Diag(Tok.getLocation(), - getLangOpts().CPlusPlus0x ? - diag::warn_cxx98_compat_longlong : diag::ext_longlong); + // 'long long' is a C99 or C++11 feature. + if (!getLangOpts().C99 && Literal.isLongLong) { + if (getLangOpts().CPlusPlus) + Diag(Tok.getLocation(), + getLangOpts().CPlusPlus0x ? + diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); + else + Diag(Tok.getLocation(), diag::ext_c99_longlong); + } // Get the value in the widest-possible width. unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth(); @@ -3140,7 +3218,7 @@ Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, Expr *ArgEx = (Expr *)TyOrEx; ExprResult Result = CreateUnaryExprOrTypeTraitExpr(ArgEx, OpLoc, ExprKind); - return move(Result); + return Result; } static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc, @@ -3167,7 +3245,7 @@ static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc, ExprResult PR = S.CheckPlaceholderExpr(V.get()); if (PR.isInvalid()) return QualType(); if (PR.get() != V.get()) { - V = move(PR); + V = PR; return CheckRealImagOperand(S, V, Loc, IsReal); } @@ -3442,8 +3520,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc, Expr *ResultE = Result.takeAs(); InitializationSequence InitSeq(*this, Entity, Kind, &ResultE, 1); - Result = InitSeq.Perform(*this, Entity, Kind, - MultiExprArg(*this, &ResultE, 1)); + Result = InitSeq.Perform(*this, Entity, Kind, ResultE); if (Result.isInvalid()) return ExprError(); @@ -3776,28 +3853,25 @@ ExprResult Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig, bool IsExecConfig) { - unsigned NumArgs = ArgExprs.size(); - // Since this might be a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Fn); if (Result.isInvalid()) return ExprError(); Fn = Result.take(); - Expr **Args = ArgExprs.release(); - if (getLangOpts().CPlusPlus) { // If this is a pseudo-destructor expression, build the call immediately. if (isa(Fn)) { - if (NumArgs > 0) { + if (!ArgExprs.empty()) { // Pseudo-destructor calls should not have any arguments. Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args) << FixItHint::CreateRemoval( - SourceRange(Args[0]->getLocStart(), - Args[NumArgs-1]->getLocEnd())); + SourceRange(ArgExprs[0]->getLocStart(), + ArgExprs.back()->getLocEnd())); } - return Owned(new (Context) CallExpr(Context, Fn, 0, 0, Context.VoidTy, - VK_RValue, RParenLoc)); + return Owned(new (Context) CallExpr(Context, Fn, MultiExprArg(), + Context.VoidTy, VK_RValue, + RParenLoc)); } // Determine whether this is a dependent call inside a C++ template, @@ -3807,17 +3881,16 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, bool Dependent = false; if (Fn->isTypeDependent()) Dependent = true; - else if (Expr::hasAnyTypeDependentArguments( - llvm::makeArrayRef(Args, NumArgs))) + else if (Expr::hasAnyTypeDependentArguments(ArgExprs)) Dependent = true; if (Dependent) { if (ExecConfig) { return Owned(new (Context) CUDAKernelCallExpr( - Context, Fn, cast(ExecConfig), Args, NumArgs, + Context, Fn, cast(ExecConfig), ArgExprs, Context.DependentTy, VK_RValue, RParenLoc)); } else { - return Owned(new (Context) CallExpr(Context, Fn, Args, NumArgs, + return Owned(new (Context) CallExpr(Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc)); } @@ -3825,8 +3898,9 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, // Determine whether this is a call to an object (C++ [over.call.object]). if (Fn->getType()->isRecordType()) - return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc, Args, NumArgs, - RParenLoc)); + return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc, + ArgExprs.data(), + ArgExprs.size(), RParenLoc)); if (Fn->getType() == Context.UnknownAnyTy) { ExprResult result = rebuildUnknownAnyFunction(*this, Fn); @@ -3835,8 +3909,8 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, } if (Fn->getType() == Context.BoundMemberTy) { - return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs, - RParenLoc); + return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs.data(), + ArgExprs.size(), RParenLoc); } } @@ -3849,11 +3923,11 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, OverloadExpr *ovl = find.Expression; if (isa(ovl)) { UnresolvedLookupExpr *ULE = cast(ovl); - return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, Args, NumArgs, - RParenLoc, ExecConfig); + return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, ArgExprs.data(), + ArgExprs.size(), RParenLoc, ExecConfig); } else { - return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs, - RParenLoc); + return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs.data(), + ArgExprs.size(), RParenLoc); } } } @@ -3877,8 +3951,9 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, else if (isa(NakedFn)) NDecl = cast(NakedFn)->getMemberDecl(); - return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, Args, NumArgs, RParenLoc, - ExecConfig, IsExecConfig); + return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs.data(), + ArgExprs.size(), RParenLoc, ExecConfig, + IsExecConfig); } ExprResult @@ -3932,9 +4007,19 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation RParenLoc, Expr *Config, bool IsExecConfig) { FunctionDecl *FDecl = dyn_cast_or_null(NDecl); + unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0); // Promote the function operand. - ExprResult Result = UsualUnaryConversions(Fn); + // We special-case function promotion here because we only allow promoting + // builtin functions to function pointers in the callee of a call. + ExprResult Result; + if (BuiltinID && + Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) { + Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()), + CK_BuiltinFnToFnPtr).take(); + } else { + Result = UsualUnaryConversions(Fn); + } if (Result.isInvalid()) return ExprError(); Fn = Result.take(); @@ -3945,19 +4030,17 @@ Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, if (Config) TheCall = new (Context) CUDAKernelCallExpr(Context, Fn, cast(Config), - Args, NumArgs, + llvm::makeArrayRef(Args,NumArgs), Context.BoolTy, VK_RValue, RParenLoc); else TheCall = new (Context) CallExpr(Context, Fn, - Args, NumArgs, + llvm::makeArrayRef(Args, NumArgs), Context.BoolTy, VK_RValue, RParenLoc); - unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0); - // Bail out early if calling a builtin with custom typechecking. if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) return CheckBuiltinFunctionCall(BuiltinID, TheCall); @@ -4143,9 +4226,8 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceRange(LParenLoc, RParenLoc), /*InitList=*/true); InitializationSequence InitSeq(*this, Entity, Kind, &LiteralExpr, 1); - ExprResult Result = InitSeq.Perform(*this, Entity, Kind, - MultiExprArg(*this, &LiteralExpr, 1), - &literalType); + ExprResult Result = InitSeq.Perform(*this, Entity, Kind, LiteralExpr, + &literalType); if (Result.isInvalid()) return ExprError(); LiteralExpr = Result.get(); @@ -4167,28 +4249,25 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, ExprResult Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc) { - unsigned NumInit = InitArgList.size(); - Expr **InitList = InitArgList.release(); - // Immediately handle non-overload placeholders. Overloads can be // resolved contextually, but everything else here can't. - for (unsigned I = 0; I != NumInit; ++I) { - if (InitList[I]->getType()->isNonOverloadPlaceholderType()) { - ExprResult result = CheckPlaceholderExpr(InitList[I]); + for (unsigned I = 0, E = InitArgList.size(); I != E; ++I) { + if (InitArgList[I]->getType()->isNonOverloadPlaceholderType()) { + ExprResult result = CheckPlaceholderExpr(InitArgList[I]); // Ignore failures; dropping the entire initializer list because // of one failure would be terrible for indexing/etc. if (result.isInvalid()) continue; - InitList[I] = result.take(); + InitArgList[I] = result.take(); } } // Semantic analysis for initializers is done by ActOnDeclarator() and // CheckInitializer() - it requires knowledge of the object being intialized. - InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitList, - NumInit, RBraceLoc); + InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList, + RBraceLoc); E->setType(Context.VoidTy); // FIXME: just a place holder for now. return Owned(E); } @@ -4575,8 +4654,7 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc, // FIXME: This means that pretty-printing the final AST will produce curly // braces instead of the original commas. InitListExpr *initE = new (Context) InitListExpr(Context, LParenLoc, - &initExprs[0], - initExprs.size(), RParenLoc); + initExprs, RParenLoc); initE->setType(Ty); return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE); } @@ -4603,10 +4681,8 @@ Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) { ExprResult Sema::ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val) { - unsigned nexprs = Val.size(); - Expr **exprs = reinterpret_cast(Val.release()); - assert((exprs != 0) && "ActOnParenOrParenListExpr() missing expr list"); - Expr *expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R); + assert(Val.data() != 0 && "ActOnParenOrParenListExpr() missing expr list"); + Expr *expr = new (Context) ParenListExpr(Context, L, Val, R); return Owned(expr); } @@ -4884,11 +4960,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult LHSResult = CheckPlaceholderExpr(LHS.get()); if (!LHSResult.isUsable()) return QualType(); - LHS = move(LHSResult); + LHS = LHSResult; ExprResult RHSResult = CheckPlaceholderExpr(RHS.get()); if (!RHSResult.isUsable()) return QualType(); - RHS = move(RHSResult); + RHS = RHSResult; // C++ is sufficiently different to merit its own checker. if (getLangOpts().CPlusPlus) @@ -5247,7 +5323,7 @@ static void DiagnoseConditionalPrecedence(Sema &Self, << BinaryOperator::getOpcodeStr(CondOpcode); SuggestParentheses(Self, OpLoc, - Self.PDiag(diag::note_precedence_conditional_silence) + Self.PDiag(diag::note_precedence_silence) << BinaryOperator::getOpcodeStr(CondOpcode), SourceRange(Condition->getLocStart(), Condition->getLocEnd())); @@ -5811,8 +5887,7 @@ static void ConstructTransparentUnion(Sema &S, ASTContext &C, // of the transparent union. Expr *E = EResult.take(); InitListExpr *Initializer = new (C) InitListExpr(C, SourceLocation(), - &E, 1, - SourceLocation()); + E, SourceLocation()); Initializer->setType(UnionType); Initializer->setInitializedFieldInUnion(Field); @@ -5910,7 +5985,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, !CheckObjCARCUnavailableWeakConversion(LHSType, RHS.get()->getType())) result = IncompatibleObjCWeakRef; - RHS = move(Res); + RHS = Res; return result; } @@ -6708,7 +6783,7 @@ static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc, } static bool isObjCObjectLiteral(ExprResult &E) { - switch (E.get()->getStmtClass()) { + switch (E.get()->IgnoreParenImpCasts()->getStmtClass()) { case Stmt::ObjCArrayLiteralClass: case Stmt::ObjCDictionaryLiteralClass: case Stmt::ObjCStringLiteralClass: @@ -6800,6 +6875,7 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc, LK_String } LiteralKind; + Literal = Literal->IgnoreParenImpCasts(); switch (Literal->getStmtClass()) { case Stmt::ObjCStringLiteralClass: // "string literal" @@ -7202,7 +7278,10 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS, (LHSType->isIntegerType() && RHSType->isAnyPointerType())) { unsigned DiagID = 0; bool isError = false; - if ((LHSIsNull && LHSType->isIntegerType()) || + if (LangOpts.DebuggerSupport) { + // Under a debugger, allow the comparison of pointers to integers, + // since users tend to want to compare addresses. + } else if ((LHSIsNull && LHSType->isIntegerType()) || (RHSIsNull && RHSType->isIntegerType())) { if (IsRelational && !getLangOpts().CPlusPlus) DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero; @@ -7419,12 +7498,12 @@ inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14] ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get()); if (LHSRes.isInvalid()) return InvalidOperands(Loc, LHS, RHS); - LHS = move(LHSRes); + LHS = LHSRes; ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get()); if (RHSRes.isInvalid()) return InvalidOperands(Loc, LHS, RHS); - RHS = move(RHSRes); + RHS = RHSRes; // C++ [expr.log.and]p2 // C++ [expr.log.or]p2 @@ -7683,10 +7762,31 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS, } if (ConvTy == Compatible) { - if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong) - checkRetainCycles(LHSExpr, RHS.get()); - else if (getLangOpts().ObjCAutoRefCount) + if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong) { + // Warn about retain cycles where a block captures the LHS, but + // not if the LHS is a simple variable into which the block is + // being stored...unless that variable can be captured by reference! + const Expr *InnerLHS = LHSExpr->IgnoreParenCasts(); + const DeclRefExpr *DRE = dyn_cast(InnerLHS); + if (!DRE || DRE->getDecl()->hasAttr()) + checkRetainCycles(LHSExpr, RHS.get()); + + // It is safe to assign a weak reference into a strong variable. + // Although this code can still have problems: + // id x = self.weakProp; + // id y = self.weakProp; + // we do not warn to warn spuriously when 'x' and 'y' are on separate + // paths through the function. This should be revisited if + // -Wrepeated-use-of-weak is made flow-sensitive. + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + RHS.get()->getLocStart()); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->markSafeWeakUse(RHS.get()); + + } else if (getLangOpts().ObjCAutoRefCount) { checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get()); + } } } else { // Compound assignment "x += y" @@ -7972,8 +8072,16 @@ static QualType CheckAddressOfOperand(Sema &S, ExprResult &OrigOp, // The method was named without a qualifier. } else if (!DRE->getQualifier()) { - S.Diag(OpLoc, diag::err_unqualified_pointer_member_function) - << op->getSourceRange(); + if (MD->getParent()->getName().empty()) + S.Diag(OpLoc, diag::err_unqualified_pointer_member_function) + << op->getSourceRange(); + else { + SmallString<32> Str; + StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str); + S.Diag(OpLoc, diag::err_unqualified_pointer_member_function) + << op->getSourceRange() + << FixItHint::CreateInsertion(op->getSourceRange().getBegin(), Qual); + } } return S.Context.getMemberPointerType(op->getType(), @@ -8216,8 +8324,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc, InitializedEntity Entity = InitializedEntity::InitializeTemporary(LHSExpr->getType()); InitializationSequence InitSeq(*this, Entity, Kind, &RHSExpr, 1); - ExprResult Init = InitSeq.Perform(*this, Entity, Kind, - MultiExprArg(&RHSExpr, 1)); + ExprResult Init = InitSeq.Perform(*this, Entity, Kind, RHSExpr); if (Init.isInvalid()) return Init; RHSExpr = Init.take(); @@ -8340,7 +8447,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc, if (CompResultTy.isNull()) return Owned(new (Context) BinaryOperator(LHS.take(), RHS.take(), Opc, - ResultTy, VK, OK, OpLoc)); + ResultTy, VK, OK, OpLoc, + FPFeatures.fp_contract)); if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() != OK_ObjCProperty) { VK = VK_LValue; @@ -8348,7 +8456,8 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc, } return Owned(new (Context) CompoundAssignOperator(LHS.take(), RHS.take(), Opc, ResultTy, VK, OK, CompLHSTy, - CompResultTy, OpLoc)); + CompResultTy, OpLoc, + FPFeatures.fp_contract)); } /// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison @@ -8383,8 +8492,8 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc, SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(), OpLoc) : SourceRange(OpLoc, RHSExpr->getLocEnd()); - std::string OpStr = isLeftComp ? BinOp::getOpcodeStr(LHSopc) - : BinOp::getOpcodeStr(RHSopc); + StringRef OpStr = isLeftComp ? BinOp::getOpcodeStr(LHSopc) + : BinOp::getOpcodeStr(RHSopc); SourceRange ParensRange = isLeftComp ? SourceRange(cast(LHSExpr)->getRHS()->getLocStart(), RHSExpr->getLocEnd()) @@ -8394,7 +8503,7 @@ static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc, Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel) << DiagRange << BinOp::getOpcodeStr(Opc) << OpStr; SuggestParentheses(Self, OpLoc, - Self.PDiag(diag::note_precedence_bitwise_silence) << OpStr, + Self.PDiag(diag::note_precedence_silence) << OpStr, (isLeftComp ? LHSExpr : RHSExpr)->getSourceRange()); SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_bitwise_first) << BinOp::getOpcodeStr(Opc), @@ -8411,7 +8520,8 @@ EmitDiagnosticForBitwiseAndInBitwiseOr(Sema &Self, SourceLocation OpLoc, Self.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_and_in_bitwise_or) << Bop->getSourceRange() << OpLoc; SuggestParentheses(Self, Bop->getOperatorLoc(), - Self.PDiag(diag::note_bitwise_and_in_bitwise_or_silence), + Self.PDiag(diag::note_precedence_silence) + << Bop->getOpcodeStr(), Bop->getSourceRange()); } @@ -8425,7 +8535,8 @@ EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc, Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or) << Bop->getSourceRange() << OpLoc; SuggestParentheses(Self, Bop->getOperatorLoc(), - Self.PDiag(diag::note_logical_and_in_logical_or_silence), + Self.PDiag(diag::note_precedence_silence) + << Bop->getOpcodeStr(), Bop->getSourceRange()); } @@ -8489,6 +8600,20 @@ static void DiagnoseBitwiseAndInBitwiseOr(Sema &S, SourceLocation OpLoc, } } +static void DiagnoseAdditionInShift(Sema &S, SourceLocation OpLoc, + Expr *SubExpr, StringRef Shift) { + if (BinaryOperator *Bop = dyn_cast(SubExpr)) { + if (Bop->getOpcode() == BO_Add || Bop->getOpcode() == BO_Sub) { + StringRef Op = Bop->getOpcodeStr(); + S.Diag(Bop->getOperatorLoc(), diag::warn_addition_in_bitshift) + << Bop->getSourceRange() << OpLoc << Shift << Op; + SuggestParentheses(S, Bop->getOperatorLoc(), + S.PDiag(diag::note_precedence_silence) << Op, + Bop->getSourceRange()); + } + } +} + /// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky /// precedence. static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc, @@ -8510,6 +8635,13 @@ static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc, DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, LHSExpr, RHSExpr); DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, LHSExpr, RHSExpr); } + + if ((Opc == BO_Shl && LHSExpr->getType()->isIntegralType(Self.getASTContext())) + || Opc == BO_Shr) { + StringRef Shift = BinaryOperator::getOpcodeStr(Opc); + DiagnoseAdditionInShift(Self, OpLoc, LHSExpr, Shift); + DiagnoseAdditionInShift(Self, OpLoc, RHSExpr, Shift); + } } // Binary Operators. 'Tok' is the token for the operator. @@ -8647,6 +8779,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc, break; case UO_Deref: { Input = DefaultFunctionArrayLvalueConversion(Input.take()); + if (Input.isInvalid()) return ExprError(); resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc); break; } @@ -9147,8 +9280,7 @@ ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, } return Owned(OffsetOfExpr::Create(Context, Context.getSizeType(), BuiltinLoc, - TInfo, Comps.data(), Comps.size(), - Exprs.data(), Exprs.size(), RParenLoc)); + TInfo, Comps, Exprs, RParenLoc)); } ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S, @@ -9526,6 +9658,16 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc, if (Result.isInvalid()) return ExprError(); E = Result.take(); + } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) { + // If va_list is a record type and we are compiling in C++ mode, + // check the argument using reference binding. + InitializedEntity Entity + = InitializedEntity::InitializeParameter(Context, + Context.getLValueReferenceType(VaListType), false); + ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E); + if (Init.isInvalid()) + return ExprError(); + E = Init.takeAs(); } else { // Otherwise, the va_list argument must be an l-value because // it is modified by va_arg. @@ -10094,6 +10236,14 @@ Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs); } +void +Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, + ReuseLambdaContextDecl_t, + bool IsDecltype) { + Decl *LambdaContextDecl = ExprEvalContexts.back().LambdaContextDecl; + PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); +} + void Sema::PopExpressionEvaluationContext() { ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back(); @@ -10191,15 +10341,44 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) { Func->setReferenced(); - // Don't mark this function as used multiple times, unless it's a constexpr - // function which we need to instantiate. - if (Func->isUsed(false) && - !(Func->isConstexpr() && !Func->getBody() && - Func->isImplicitlyInstantiable())) - return; - - if (!IsPotentiallyEvaluatedContext(*this)) - return; + // C++11 [basic.def.odr]p3: + // A function whose name appears as a potentially-evaluated expression is + // odr-used if it is the unique lookup result or the selected member of a + // set of overloaded functions [...]. + // + // We (incorrectly) mark overload resolution as an unevaluated context, so we + // can just check that here. Skip the rest of this function if we've already + // marked the function as used. + if (Func->isUsed(false) || !IsPotentiallyEvaluatedContext(*this)) { + // C++11 [temp.inst]p3: + // Unless a function template specialization has been explicitly + // instantiated or explicitly specialized, the function template + // specialization is implicitly instantiated when the specialization is + // referenced in a context that requires a function definition to exist. + // + // We consider constexpr function templates to be referenced in a context + // that requires a definition to exist whenever they are referenced. + // + // FIXME: This instantiates constexpr functions too frequently. If this is + // really an unevaluated context (and we're not just in the definition of a + // function template or overload resolution or other cases which we + // incorrectly consider to be unevaluated contexts), and we're not in a + // subexpression which we actually need to evaluate (for instance, a + // template argument, array bound or an expression in a braced-init-list), + // we are not permitted to instantiate this constexpr function definition. + // + // FIXME: This also implicitly defines special members too frequently. They + // are only supposed to be implicitly defined if they are odr-used, but they + // are not odr-used from constant expressions in unevaluated contexts. + // However, they cannot be referenced if they are deleted, and they are + // deleted whenever the implicit definition of the special member would + // fail. + if (!Func->isConstexpr() || Func->getBody()) + return; + CXXMethodDecl *MD = dyn_cast(Func); + if (!Func->isImplicitlyInstantiable() && (!MD || MD->isUserProvided())) + return; + } // Note that this declaration has been used. if (CXXConstructorDecl *Constructor = dyn_cast(Func)) { @@ -10469,8 +10648,7 @@ static ExprResult captureInLambda(Sema &S, LambdaScopeInfo *LSI, InitializationSequence Init(S, Entities.back(), InitKind, &Ref, 1); ExprResult Result(true); if (!Init.Diagnose(S, Entities.back(), InitKind, &Ref, 1)) - Result = Init.Perform(S, Entities.back(), InitKind, - MultiExprArg(S, &Ref, 1)); + Result = Init.Perform(S, Entities.back(), InitKind, Ref); // If this initialization requires any cleanups (e.g., due to a // default argument to a copy constructor), note that for the @@ -10886,20 +11064,21 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc, } } - // Per C++11 [basic.def.odr], a variable is odr-used "unless it is - // an object that satisfies the requirements for appearing in a - // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1) + // Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies + // the requirements for appearing in a constant expression (5.19) and, if + // it is an object, the lvalue-to-rvalue conversion (4.1) // is immediately applied." We check the first part here, and // Sema::UpdateMarkingForLValueToRValue deals with the second part. // Note that we use the C++11 definition everywhere because nothing in - // C++03 depends on whether we get the C++03 version correct. This does not - // apply to references, since they are not objects. + // C++03 depends on whether we get the C++03 version correct. The second + // part does not apply to references, since they are not objects. const VarDecl *DefVD; - if (E && !isa(Var) && !Var->getType()->isReferenceType() && + if (E && !isa(Var) && Var->isUsableInConstantExpressions(SemaRef.Context) && - Var->getAnyInitializer(DefVD) && DefVD->checkInitIsICE()) - SemaRef.MaybeODRUseExprs.insert(E); - else + Var->getAnyInitializer(DefVD) && DefVD->checkInitIsICE()) { + if (!Var->getType()->isReferenceType()) + SemaRef.MaybeODRUseExprs.insert(E); + } else MarkVarDeclODRUsed(SemaRef, Var, Loc); } @@ -11205,7 +11384,9 @@ void Sema::DiagnoseAssignmentAsCondition(Expr *E) { IsOrAssign = Op->getOperator() == OO_PipeEqual; Loc = Op->getOperatorLoc(); - } else { + } else if (PseudoObjectExpr *POE = dyn_cast(E)) + return DiagnoseAssignmentAsCondition(POE->getSyntacticForm()); + else { // Not an assignment. return; } @@ -11759,6 +11940,10 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) { case BuiltinType::PseudoObject: return checkPseudoObjectRValue(E); + case BuiltinType::BuiltinFn: + Diag(E->getLocStart(), diag::err_builtin_fn_use); + return ExprError(); + // Everything else should be impossible. #define BUILTIN_TYPE(Id, SingletonId) \ case BuiltinType::Id: @@ -11783,6 +11968,18 @@ ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) { assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) && "Unknown Objective-C Boolean value!"); + QualType BoolT = Context.ObjCBuiltinBoolTy; + if (!Context.getBOOLDecl()) { + LookupResult Result(*this, &Context.Idents.get("BOOL"), OpLoc, + Sema::LookupOrdinaryName); + if (LookupName(Result, getCurScope()) && Result.isSingleResult()) { + NamedDecl *ND = Result.getFoundDecl(); + if (TypedefDecl *TD = dyn_cast(ND)) + Context.setBOOLDecl(TD); + } + } + if (Context.getBOOLDecl()) + BoolT = Context.getBOOLType(); return Owned(new (Context) ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes, - Context.ObjCBuiltinBoolTy, OpLoc)); + BoolT, OpLoc)); } diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp index 2740259..0919bc5 100644 --- a/lib/Sema/SemaExprCXX.cpp +++ b/lib/Sema/SemaExprCXX.cpp @@ -410,33 +410,13 @@ Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc); } -/// Retrieve the UuidAttr associated with QT. -static UuidAttr *GetUuidAttrOfType(QualType QT) { - // Optionally remove one level of pointer, reference or array indirection. - const Type *Ty = QT.getTypePtr();; - if (QT->isPointerType() || QT->isReferenceType()) - Ty = QT->getPointeeType().getTypePtr(); - else if (QT->isArrayType()) - Ty = cast(QT)->getElementType().getTypePtr(); - - // Loop all record redeclaration looking for an uuid attribute. - CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); - for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(), - E = RD->redecls_end(); I != E; ++I) { - if (UuidAttr *Uuid = I->getAttr()) - return Uuid; - } - - return 0; -} - /// \brief Build a Microsoft __uuidof expression with a type operand. ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { if (!Operand->getType()->isDependentType()) { - if (!GetUuidAttrOfType(Operand->getType())) + if (!CXXUuidofExpr::GetUuidAttrOfType(Operand->getType())) return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid)); } @@ -452,7 +432,7 @@ ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType, Expr *E, SourceLocation RParenLoc) { if (!E->getType()->isDependentType()) { - if (!GetUuidAttrOfType(E->getType()) && + if (!CXXUuidofExpr::GetUuidAttrOfType(E->getType()) && !E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid)); } @@ -808,21 +788,18 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, MultiExprArg exprs, SourceLocation RParenLoc) { QualType Ty = TInfo->getType(); - unsigned NumExprs = exprs.size(); - Expr **Exprs = (Expr**)exprs.get(); SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc(); - if (Ty->isDependentType() || - CallExpr::hasAnyTypeDependentArguments( - llvm::makeArrayRef(Exprs, NumExprs))) { - exprs.release(); - + if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(exprs)) { return Owned(CXXUnresolvedConstructExpr::Create(Context, TInfo, LParenLoc, - Exprs, NumExprs, + exprs, RParenLoc)); } + unsigned NumExprs = exprs.size(); + Expr **Exprs = exprs.data(); + bool ListInitialization = LParenLoc.isInvalid(); assert((!ListInitialization || (NumExprs == 1 && isa(Exprs[0]))) && "List initialization must have initializer list as expression."); @@ -835,7 +812,6 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, // corresponding cast expression. if (NumExprs == 1 && !ListInitialization) { Expr *Arg = Exprs[0]; - exprs.release(); return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc); } @@ -865,7 +841,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, : InitializationKind::CreateValue(TyBeginLoc, LParenLoc, RParenLoc); InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs); - ExprResult Result = InitSeq.Perform(*this, Entity, Kind, move(exprs)); + ExprResult Result = InitSeq.Perform(*this, Entity, Kind, exprs); if (!Result.isInvalid() && ListInitialization && isa(Result.get())) { @@ -881,7 +857,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, } // FIXME: Improve AST representation? - return move(Result); + return Result; } /// doesUsualArrayDeleteWantSize - Answers whether the usual @@ -1011,9 +987,9 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, if (ParenListExpr *List = dyn_cast_or_null(Initializer)) DirectInitRange = List->getSourceRange(); - return BuildCXXNew(StartLoc, UseGlobal, + return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal, PlacementLParen, - move(PlacementArgs), + PlacementArgs, PlacementRParen, TypeIdParens, AllocType, @@ -1044,7 +1020,7 @@ static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style, } ExprResult -Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, +Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, @@ -1056,6 +1032,7 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, Expr *Initializer, bool TypeMayContainAuto) { SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange(); + SourceLocation StartLoc = Range.getBegin(); CXXNewExpr::InitializationStyle initStyle; if (DirectInitRange.isValid()) { @@ -1279,21 +1256,13 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, } } - // ARC: warn about ABI issues. - if (getLangOpts().ObjCAutoRefCount) { - QualType BaseAllocType = Context.getBaseElementType(AllocType); - if (BaseAllocType.hasStrongOrWeakObjCLifetime()) - Diag(StartLoc, diag::warn_err_new_delete_object_array) - << 0 << BaseAllocType; - } - // Note that we do *not* convert the argument in any way. It can // be signed, larger than size_t, whatever. } FunctionDecl *OperatorNew = 0; FunctionDecl *OperatorDelete = 0; - Expr **PlaceArgs = (Expr**)PlacementArgs.get(); + Expr **PlaceArgs = PlacementArgs.data(); unsigned NumPlaceArgs = PlacementArgs.size(); if (!AllocType->isDependentType() && @@ -1432,15 +1401,14 @@ Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal, } } - PlacementArgs.release(); - return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete, UsualArrayDeleteWantsSize, - PlaceArgs, NumPlaceArgs, TypeIdParens, + llvm::makeArrayRef(PlaceArgs, NumPlaceArgs), + TypeIdParens, ArraySize, initStyle, Initializer, ResultType, AllocTypeInfo, - StartLoc, DirectInitRange)); + Range, DirectInitRange)); } /// \brief Checks that a type is suitable as the allocated type @@ -1638,7 +1606,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, = dyn_cast((*D)->getUnderlyingDecl())) { // Perform template argument deduction to try to match the // expected function type. - TemplateDeductionInfo Info(Context, StartLoc); + TemplateDeductionInfo Info(StartLoc); if (DeduceTemplateArguments(FnTmpl, 0, ExpectedFunctionType, Fn, Info)) continue; } else @@ -2100,7 +2068,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, ObjectPtrConversions.front()->getConversionType(), AA_Converting); if (Res.isUsable()) { - Ex = move(Res); + Ex = Res; Type = Ex.get()->getType(); } } @@ -2211,13 +2179,6 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, } } - } else if (getLangOpts().ObjCAutoRefCount && - PointeeElem->isObjCLifetimeType() && - (PointeeElem.getObjCLifetime() == Qualifiers::OCL_Strong || - PointeeElem.getObjCLifetime() == Qualifiers::OCL_Weak) && - ArrayForm) { - Diag(StartLoc, diag::warn_err_new_delete_object_array) - << 1 << PointeeElem; } if (!OperatorDelete) { @@ -2287,7 +2248,7 @@ ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar, return ExprError(); } - return move(Condition); + return Condition; } /// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid. @@ -2354,11 +2315,9 @@ static ExprResult BuildCXXCastArgument(Sema &S, default: llvm_unreachable("Unhandled cast kind!"); case CK_ConstructorConversion: { CXXConstructorDecl *Constructor = cast(Method); - ASTOwningVector ConstructorArgs(S); + SmallVector ConstructorArgs; - if (S.CompleteConstructorCall(Constructor, - MultiExprArg(&From, 1), - CastLoc, ConstructorArgs)) + if (S.CompleteConstructorCall(Constructor, From, CastLoc, ConstructorArgs)) return ExprError(); S.CheckConstructorAccess(CastLoc, Constructor, @@ -2367,7 +2326,7 @@ static ExprResult BuildCXXCastArgument(Sema &S, ExprResult Result = S.BuildCXXConstructExpr(CastLoc, Ty, cast(Method), - move_arg(ConstructorArgs), + ConstructorArgs, HadMultipleCandidates, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); if (Result.isInvalid()) @@ -2511,15 +2470,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, // FIXME: When can ToType be a reference type? assert(!ToType->isReferenceType()); if (SCS.Second == ICK_Derived_To_Base) { - ASTOwningVector ConstructorArgs(*this); + SmallVector ConstructorArgs; if (CompleteConstructorCall(cast(SCS.CopyConstructor), - MultiExprArg(*this, &From, 1), - /*FIXME:ConstructLoc*/SourceLocation(), + From, /*FIXME:ConstructLoc*/SourceLocation(), ConstructorArgs)) return ExprError(); return BuildCXXConstructExpr(/*FIXME:ConstructLoc*/SourceLocation(), ToType, SCS.CopyConstructor, - move_arg(ConstructorArgs), + ConstructorArgs, /*HadMultipleCandidates*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, @@ -2527,8 +2485,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, } return BuildCXXConstructExpr(/*FIXME:ConstructLoc*/SourceLocation(), ToType, SCS.CopyConstructor, - MultiExprArg(*this, &From, 1), - /*HadMultipleCandidates*/ false, + From, /*HadMultipleCandidates*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); @@ -2602,8 +2559,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType, case ICK_Integral_Promotion: case ICK_Integral_Conversion: - From = ImpCastExprToType(From, ToType, CK_IntegralCast, - VK_RValue, /*BasePath=*/0, CCK).take(); + if (ToType->isBooleanType()) { + assert(FromType->castAs()->getDecl()->isFixed() && + SCS.Second == ICK_Integral_Promotion && + "only enums with fixed underlying type can promote to bool"); + From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean, + VK_RValue, /*BasePath=*/0, CCK).take(); + } else { + From = ImpCastExprToType(From, ToType, CK_IntegralCast, + VK_RValue, /*BasePath=*/0, CCK).take(); + } break; case ICK_Floating_Promotion: @@ -2943,6 +2908,7 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, case UTT_IsEmpty: case UTT_IsPolymorphic: case UTT_IsAbstract: + case UTT_IsInterfaceClass: // Fall-through // These traits require a complete type. @@ -3007,7 +2973,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT, case UTT_IsUnion: return T->isUnionType(); case UTT_IsClass: - return T->isClassType() || T->isStructureType(); + return T->isClassType() || T->isStructureType() || T->isInterfaceType(); case UTT_IsFunction: return T->isFunctionType(); @@ -3073,6 +3039,10 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT, if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isAbstract(); return false; + case UTT_IsInterfaceClass: + if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) + return RD->isInterface(); + return false; case UTT_IsFinal: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasAttr(); @@ -3417,9 +3387,7 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc, if (Init.Failed()) return false; - ExprResult Result = Init.Perform(S, To, InitKind, - MultiExprArg(ArgExprs.data(), - ArgExprs.size())); + ExprResult Result = Init.Perform(S, To, InitKind, ArgExprs); if (Result.isInvalid() || SFINAE.hasErrorOccurred()) return false; @@ -3577,7 +3545,7 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT, if (Init.Failed()) return false; - ExprResult Result = Init.Perform(Self, To, Kind, MultiExprArg(&FromPtr, 1)); + ExprResult Result = Init.Perform(Self, To, Kind, FromPtr); return !Result.isInvalid() && !SFINAE.hasErrorOccurred(); } @@ -3774,7 +3742,7 @@ ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET, ExprResult Result = BuildExpressionTrait(ET, KWLoc, Queried, RParen); - return move(Result); + return Result; } static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) { @@ -4056,14 +4024,14 @@ static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS Best->Conversions[0], Sema::AA_Converting); if (LHSRes.isInvalid()) break; - LHS = move(LHSRes); + LHS = LHSRes; ExprResult RHSRes = Self.PerformImplicitConversion(RHS.get(), Best->BuiltinTypes.ParamTypes[1], Best->Conversions[1], Sema::AA_Converting); if (RHSRes.isInvalid()) break; - RHS = move(RHSRes); + RHS = RHSRes; if (Best->Function) Self.MarkFunctionReferenced(QuestionLoc, Best->Function); return false; @@ -4104,7 +4072,7 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) { SourceLocation()); Expr *Arg = E.take(); InitializationSequence InitSeq(Self, Entity, Kind, &Arg, 1); - ExprResult Result = InitSeq.Perform(Self, Entity, Kind, MultiExprArg(&Arg, 1)); + ExprResult Result = InitSeq.Perform(Self, Entity, Kind, Arg); if (Result.isInvalid()) return true; @@ -4129,7 +4097,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult CondRes = CheckCXXBooleanCondition(Cond.take()); if (CondRes.isInvalid()) return QualType(); - Cond = move(CondRes); + Cond = CondRes; } // Assume r-value. @@ -4160,6 +4128,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &NonVoid = LVoid ? RHS : LHS; if (NonVoid.get()->getType()->isRecordType() && NonVoid.get()->isGLValue()) { + if (RequireNonAbstractType(QuestionLoc, NonVoid.get()->getType(), + diag::err_allocation_of_abstract_type)) + return QualType(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(NonVoid.get()->getType()); NonVoid = PerformCopyInitialization(Entity, SourceLocation(), NonVoid); @@ -4302,7 +4273,11 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) { if (LTy->isRecordType()) { // The operands have class type. Make a temporary copy. + if (RequireNonAbstractType(QuestionLoc, LTy, + diag::err_allocation_of_abstract_type)) + return QualType(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy); + ExprResult LHSCopy = PerformCopyInitialization(Entity, SourceLocation(), LHS); @@ -4566,14 +4541,14 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc, // Convert E1 to Composite1 ExprResult E1Result - = E1ToC1.Perform(*this, Entity1, Kind, MultiExprArg(*this,&E1,1)); + = E1ToC1.Perform(*this, Entity1, Kind, E1); if (E1Result.isInvalid()) return QualType(); E1 = E1Result.takeAs(); // Convert E2 to Composite1 ExprResult E2Result - = E2ToC1.Perform(*this, Entity1, Kind, MultiExprArg(*this,&E2,1)); + = E2ToC1.Perform(*this, Entity1, Kind, E2); if (E2Result.isInvalid()) return QualType(); E2 = E2Result.takeAs(); @@ -4591,14 +4566,14 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc, // Convert E1 to Composite2 ExprResult E1Result - = E1ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E1, 1)); + = E1ToC2.Perform(*this, Entity2, Kind, E1); if (E1Result.isInvalid()) return QualType(); E1 = E1Result.takeAs(); // Convert E2 to Composite2 ExprResult E2Result - = E2ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E2, 1)); + = E2ToC2.Perform(*this, Entity2, Kind, E2); if (E2Result.isInvalid()) return QualType(); E2 = E2Result.takeAs(); @@ -4839,7 +4814,8 @@ ExprResult Sema::ActOnDecltypeExpression(Expr *E) { BO_Comma, BO->getType(), BO->getValueKind(), BO->getObjectKind(), - BO->getOperatorLoc())); + BO->getOperatorLoc(), + BO->isFPContractable())); } } @@ -4991,7 +4967,7 @@ Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, // type C (or of pointer to a class type C), the unqualified-id is looked // up in the scope of class C. [...] ObjectType = ParsedType::make(BaseType); - return move(Base); + return Base; } ExprResult Sema::DiagnoseDtorReference(SourceLocation NameLoc, @@ -5056,7 +5032,8 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base, if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); - if (!ObjectType->isDependentType() && !ObjectType->isScalarType()) { + if (!ObjectType->isDependentType() && !ObjectType->isScalarType() && + !ObjectType->isVectorType()) { if (getLangOpts().MicrosoftMode && ObjectType->isVoidType()) Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange(); else @@ -5203,8 +5180,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, } else { // Resolve the template-id to a type. TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId; - ASTTemplateArgsPtr TemplateArgsPtr(*this, - TemplateId->getTemplateArgs(), + ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); TypeResult T = ActOnTemplateIdType(TemplateId->SS, TemplateId->TemplateKWLoc, @@ -5253,8 +5229,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, } else { // Resolve the template-id to a type. TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId; - ASTTemplateArgsPtr TemplateArgsPtr(*this, - TemplateId->getTemplateArgs(), + ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); TypeResult T = ActOnTemplateIdType(TemplateId->SS, TemplateId->TemplateKWLoc, @@ -5353,7 +5328,7 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl, MarkFunctionReferenced(Exp.get()->getLocStart(), Method); CXXMemberCallExpr *CE = - new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType, VK, + new (Context) CXXMemberCallExpr(Context, ME, MultiExprArg(), ResultType, VK, Exp.get()->getLocEnd()); return CE; } diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp index 8f445e2..a7fd471 100644 --- a/lib/Sema/SemaExprMember.cpp +++ b/lib/Sema/SemaExprMember.cpp @@ -13,6 +13,7 @@ #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" +#include "clang/Sema/ScopeInfo.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" @@ -353,7 +354,7 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK, // Now look up the TypeDefDecl from the vector type. Without this, // diagostics look bad. We want extended vector types to appear built-in. for (Sema::ExtVectorDeclsType::iterator - I = S.ExtVectorDecls.begin(S.ExternalSource), + I = S.ExtVectorDecls.begin(S.getExternalSource()), E = S.ExtVectorDecls.end(); I != E; ++I) { if ((*I)->getUnderlyingType() == VT) @@ -605,7 +606,8 @@ LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R, R.addDecl(ND); SemaRef.Diag(R.getNameLoc(), diag::err_no_member_suggest) << Name << DC << CorrectedQuotedStr << SS.getRange() - << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); SemaRef.Diag(ND->getLocation(), diag::note_previous_decl) << ND->getDeclName(); } @@ -656,7 +658,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType, } if (Result.get()) - return move(Result); + return Result; // LookupMemberExpr can modify Base, and thus change BaseType BaseType = Base->getType(); @@ -1021,7 +1023,7 @@ static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) { // Do the substitution as long as the redefinition type isn't just a // possibly-qualified pointer to builtin-id or builtin-Class again. opty = redef->getAs(); - if (opty && !opty->getObjectType()->getInterface() != 0) + if (opty && !opty->getObjectType()->getInterface()) return false; base = S.ImpCastExprToType(base.take(), redef, CK_BitCast); @@ -1272,9 +1274,23 @@ Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr, if (warn) Diag(MemberLoc, diag::warn_direct_ivar_access) << IV->getDeclName(); } - return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(), - MemberLoc, BaseExpr.take(), - IsArrow)); + + ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getType(), + MemberLoc, + BaseExpr.take(), + IsArrow); + + if (getLangOpts().ObjCAutoRefCount) { + if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + MemberLoc); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->recordUseOfWeak(Result); + } + } + + return Owned(Result); } // Objective-C property access. @@ -1550,7 +1566,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base, Id.getKind() == UnqualifiedId::IK_DestructorName) return DiagnoseDtorReference(NameInfo.getLoc(), Result.get()); - return move(Result); + return Result; } ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl, HasTrailingLParen}; @@ -1560,7 +1576,7 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base, false, &ExtraArgs); } - return move(Result); + return Result; } static ExprResult diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp index 0aabf8b..e43b6bf 100644 --- a/lib/Sema/SemaExprObjC.cpp +++ b/lib/Sema/SemaExprObjC.cpp @@ -229,7 +229,7 @@ static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc, S.NSNumberPointer, ResultTInfo, S.NSNumberDecl, /*isInstance=*/false, /*isVariadic=*/false, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, @@ -345,7 +345,7 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element, SourceLocation()); InitializationSequence Seq(S, Entity, Kind, &Element, 1); if (!Seq.Failed()) - return Seq.Perform(S, Entity, Kind, MultiExprArg(S, &Element, 1)); + return Seq.Perform(S, Entity, Kind, Element); } Expr *OrigElement = Element; @@ -477,7 +477,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) { stringWithUTF8String, NSStringPointer, ResultTInfo, NSStringDecl, /*isInstance=*/false, /*isVariadic=*/false, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, @@ -646,7 +646,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) { ResultTInfo, Context.getTranslationUnitDecl(), false /*Instance*/, false/*isVariadic*/, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); @@ -708,7 +708,7 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) { // Check that each of the elements provided is valid in a collection literal, // performing conversions as necessary. - Expr **ElementsBuffer = Elements.get(); + Expr **ElementsBuffer = Elements.data(); for (unsigned I = 0, N = Elements.size(); I != N; ++I) { ExprResult Converted = CheckObjCCollectionLiteralElement(*this, ElementsBuffer[I], @@ -724,10 +724,8 @@ ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) { Context.getObjCInterfaceType(NSArrayDecl)); return MaybeBindToTemporary( - ObjCArrayLiteral::Create(Context, - llvm::makeArrayRef(Elements.get(), - Elements.size()), - Ty, ArrayWithObjectsMethod, SR)); + ObjCArrayLiteral::Create(Context, Elements, Ty, + ArrayWithObjectsMethod, SR)); } ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR, @@ -766,7 +764,7 @@ ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR, 0 /*TypeSourceInfo */, Context.getTranslationUnitDecl(), false /*Instance*/, false/*isVariadic*/, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); @@ -1125,7 +1123,9 @@ void Sema::EmitRelatedResultTypeNote(const Expr *E) { bool Sema::CheckMessageArgumentTypes(QualType ReceiverType, Expr **Args, unsigned NumArgs, - Selector Sel, ObjCMethodDecl *Method, + Selector Sel, + ArrayRef SelectorLocs, + ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, QualType &ReturnType, ExprValueKind &VK) { @@ -1149,7 +1149,8 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType, : diag::warn_inst_method_not_found; if (!getLangOpts().DebuggerSupport) Diag(lbrac, DiagID) - << Sel << isClassMessage << SourceRange(lbrac, rbrac); + << Sel << isClassMessage << SourceRange(SelectorLocs.front(), + SelectorLocs.back()); // In debuggers, we want to use __unknown_anytype for these // results so that clients can cast them. @@ -1304,8 +1305,8 @@ static void DiagnoseARCUseOfWeakReceiver(Sema &S, Expr *Receiver) { Expr *RExpr = Receiver->IgnoreParenImpCasts(); SourceLocation Loc = RExpr->getLocStart(); QualType T = RExpr->getType(); - ObjCPropertyDecl *PDecl = 0; - ObjCMethodDecl *GDecl = 0; + const ObjCPropertyDecl *PDecl = 0; + const ObjCMethodDecl *GDecl = 0; if (PseudoObjectExpr *POE = dyn_cast(RExpr)) { RExpr = POE->getSyntacticForm(); if (ObjCPropertyRefExpr *PRE = dyn_cast(RExpr)) { @@ -1327,34 +1328,29 @@ static void DiagnoseARCUseOfWeakReceiver(Sema &S, Expr *Receiver) { // See if receiver is a method which envokes a synthesized getter // backing a 'weak' property. ObjCMethodDecl *Method = ME->getMethodDecl(); - if (Method && Method->isSynthesized()) { - Selector Sel = Method->getSelector(); - if (Sel.getNumArgs() == 0) { - const DeclContext *Container = Method->getDeclContext(); - PDecl = - S.LookupPropertyDecl(cast(Container), - Sel.getIdentifierInfoForSlot(0)); - } + if (Method && Method->getSelector().getNumArgs() == 0) { + PDecl = Method->findPropertyDecl(); if (PDecl) T = PDecl->getType(); } } - if (T.getObjCLifetime() == Qualifiers::OCL_Weak) { - S.Diag(Loc, diag::warn_receiver_is_weak) - << ((!PDecl && !GDecl) ? 0 : (PDecl ? 1 : 2)); - if (PDecl) - S.Diag(PDecl->getLocation(), diag::note_property_declare); - else if (GDecl) - S.Diag(GDecl->getLocation(), diag::note_method_declared_at) << GDecl; - return; + if (T.getObjCLifetime() != Qualifiers::OCL_Weak) { + if (!PDecl) + return; + if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)) + return; } - - if (PDecl && - (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak)) { - S.Diag(Loc, diag::warn_receiver_is_weak) << 1; + + S.Diag(Loc, diag::warn_receiver_is_weak) + << ((!PDecl && !GDecl) ? 0 : (PDecl ? 1 : 2)); + + if (PDecl) S.Diag(PDecl->getLocation(), diag::note_property_declare); - } + else if (GDecl) + S.Diag(GDecl->getLocation(), diag::note_method_declared_at) << GDecl; + + S.Diag(Loc, diag::note_arc_assign_to_strong); } /// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an @@ -1776,19 +1772,17 @@ ExprResult Sema::ActOnSuperMessage(Scope *S, // We are in a method whose class has a superclass, so 'super' // is acting as a keyword. - if (Method->isInstanceMethod()) { - if (Sel.getMethodFamily() == OMF_dealloc) - getCurFunction()->ObjCShouldCallSuperDealloc = false; - if (Sel.getMethodFamily() == OMF_finalize) - getCurFunction()->ObjCShouldCallSuperFinalize = false; + if (Method->getSelector() == Sel) + getCurFunction()->ObjCShouldCallSuper = false; + if (Method->isInstanceMethod()) { // Since we are in an instance method, this is an instance // message to the superclass instance. QualType SuperTy = Context.getObjCInterfaceType(Super); SuperTy = Context.getObjCObjectPointerType(SuperTy); return BuildInstanceMessage(0, SuperTy, SuperLoc, Sel, /*Method=*/0, - LBracLoc, SelectorLocs, RBracLoc, move(Args)); + LBracLoc, SelectorLocs, RBracLoc, Args); } // Since we are in a class method, this is a class message to @@ -1796,7 +1790,7 @@ ExprResult Sema::ActOnSuperMessage(Scope *S, return BuildClassMessage(/*ReceiverTypeInfo=*/0, Context.getObjCInterfaceType(Super), SuperLoc, Sel, /*Method=*/0, - LBracLoc, SelectorLocs, RBracLoc, move(Args)); + LBracLoc, SelectorLocs, RBracLoc, Args); } @@ -1911,7 +1905,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, // If the receiver type is dependent, we can't type-check anything // at this point. Build a dependent expression. unsigned NumArgs = ArgsIn.size(); - Expr **Args = reinterpret_cast(ArgsIn.release()); + Expr **Args = ArgsIn.data(); assert(SuperLoc.isInvalid() && "Message to super with dependent type"); return Owned(ObjCMessageExpr::Create(Context, ReceiverType, VK_RValue, LBracLoc, ReceiverTypeInfo, @@ -1965,8 +1959,9 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, ExprValueKind VK = VK_RValue; unsigned NumArgs = ArgsIn.size(); - Expr **Args = reinterpret_cast(ArgsIn.release()); - if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, Method, true, + Expr **Args = ArgsIn.data(); + if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, SelectorLocs, + Method, true, SuperLoc.isValid(), LBracLoc, RBracLoc, ReturnType, VK)) return ExprError(); @@ -2016,7 +2011,7 @@ ExprResult Sema::ActOnClassMessage(Scope *S, return BuildClassMessage(ReceiverTypeInfo, ReceiverType, /*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0, - LBracLoc, SelectorLocs, RBracLoc, move(Args)); + LBracLoc, SelectorLocs, RBracLoc, Args); } ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver, @@ -2095,7 +2090,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, // If the receiver is type-dependent, we can't type-check anything // at this point. Build a dependent expression. unsigned NumArgs = ArgsIn.size(); - Expr **Args = reinterpret_cast(ArgsIn.release()); + Expr **Args = ArgsIn.data(); assert(SuperLoc.isInvalid() && "Message to super with dependent type"); return Owned(ObjCMessageExpr::Create(Context, Context.DependentTy, VK_RValue, LBracLoc, Receiver, Sel, @@ -2282,7 +2277,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, LBracLoc, SelectorLocs, RBracLoc, - move(ArgsIn)); + ArgsIn); } else { // Reject other random receiver types (e.g. structs). Diag(Loc, diag::err_bad_receiver_type) @@ -2295,12 +2290,13 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, // Check the message arguments. unsigned NumArgs = ArgsIn.size(); - Expr **Args = reinterpret_cast(ArgsIn.release()); + Expr **Args = ArgsIn.data(); QualType ReturnType; ExprValueKind VK = VK_RValue; bool ClassMessage = (ReceiverType->isObjCClassType() || ReceiverType->isObjCQualifiedClassType()); - if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, Method, + if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, + SelectorLocs, Method, ClassMessage, SuperLoc.isValid(), LBracLoc, RBracLoc, ReturnType, VK)) return ExprError(); @@ -2428,6 +2424,24 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver, // In ARC, check for message sends which are likely to introduce // retain cycles. checkRetainCycles(Result); + + if (!isImplicit && Method) { + if (const ObjCPropertyDecl *Prop = Method->findPropertyDecl()) { + bool IsWeak = + Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak; + if (!IsWeak && Sel.isUnarySelector()) + IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak; + + if (IsWeak) { + DiagnosticsEngine::Level Level = + Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + LBracLoc); + if (Level != DiagnosticsEngine::Ignored) + getCurFunction()->recordUseOfWeak(Result, Prop); + + } + } + } } return MaybeBindToTemporary(Result); @@ -2448,7 +2462,7 @@ ExprResult Sema::ActOnInstanceMessage(Scope *S, return BuildInstanceMessage(Receiver, Receiver->getType(), /*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0, - LBracLoc, SelectorLocs, RBracLoc, move(Args)); + LBracLoc, SelectorLocs, RBracLoc, Args); } enum ARCConversionTypeClass { @@ -3079,8 +3093,8 @@ Expr *Sema::stripARCUnbridgedCast(Expr *e) { return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(), gse->getControllingExpr(), - subTypes.data(), subExprs.data(), - n, gse->getDefaultLoc(), + subTypes, subExprs, + gse->getDefaultLoc(), gse->getRParenLoc(), gse->containsUnexpandedParameterPack(), gse->getResultIndex()); @@ -3101,8 +3115,8 @@ bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType, canExprType->isObjCObjectPointerType()) { if (const ObjCObjectPointerType *ObjT = canExprType->getAs()) - if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable()) - return false; + if (const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl()) + return !ObjI->isArcWeakrefUnavailable(); } return true; } diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp index 62ab1e6..3596bbf 100644 --- a/lib/Sema/SemaInit.cpp +++ b/lib/Sema/SemaInit.cpp @@ -1316,6 +1316,8 @@ void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity, // If the record is invalid, some of it's members are invalid. To avoid // confusion, we forgo checking the intializer for the entire record. if (structDecl->isInvalidDecl()) { + // Assume it was supposed to consume a single initializer. + ++Index; hadError = true; return; } @@ -1503,11 +1505,14 @@ static void ExpandAnonymousFieldDesignator(Sema &SemaRef, /// corresponds to FieldName. static IndirectFieldDecl *FindIndirectFieldDesignator(FieldDecl *AnonField, IdentifierInfo *FieldName) { + if (!FieldName) + return 0; + assert(AnonField->isAnonymousStructOrUnion()); Decl *NextDecl = AnonField->getNextDeclInContext(); while (IndirectFieldDecl *IF = dyn_cast_or_null(NextDecl)) { - if (FieldName && FieldName == IF->getAnonField()->getIdentifier()) + if (FieldName == IF->getAnonField()->getIdentifier()) return IF; NextDecl = NextDecl->getNextDeclInContext(); } @@ -1521,8 +1526,8 @@ static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef, for (unsigned I = 0; I < NumIndexExprs; ++I) IndexExprs[I] = DIE->getSubExpr(I + 1); return DesignatedInitExpr::Create(SemaRef.Context, DIE->designators_begin(), - DIE->size(), IndexExprs.data(), - NumIndexExprs, DIE->getEqualOrColonLoc(), + DIE->size(), IndexExprs, + DIE->getEqualOrColonLoc(), DIE->usesGNUSyntax(), DIE->getInit()); } @@ -1562,7 +1567,7 @@ class FieldInitializerValidatorCCC : public CorrectionCandidateCallback { /// /// @param DesigIdx The index of the current designator. /// -/// @param DeclType The type of the "current object" (C99 6.7.8p17), +/// @param CurrentObjectType The type of the "current object" (C99 6.7.8p17), /// into which the designation in @p DIE should refer. /// /// @param NextField If non-NULL and the first designator in @p DIE is @@ -2068,7 +2073,7 @@ InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index, InitListExpr *Result = new (SemaRef.Context) InitListExpr(SemaRef.Context, - InitRange.getBegin(), 0, 0, + InitRange.getBegin(), MultiExprArg(), InitRange.getEnd()); QualType ResultType = CurrentObjectType; @@ -2261,8 +2266,8 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig, DesignatedInitExpr *DIE = DesignatedInitExpr::Create(Context, Designators.data(), Designators.size(), - InitExpressions.data(), InitExpressions.size(), - Loc, GNUSyntax, Init.takeAs()); + InitExpressions, Loc, GNUSyntax, + Init.takeAs()); if (!getLangOpts().C99) Diag(DIE->getLocStart(), diag::ext_designated_init) @@ -2814,14 +2819,6 @@ static void TryConstructorInitialization(Sema &S, assert((!InitListSyntax || (NumArgs == 1 && isa(Args[0]))) && "InitListSyntax must come with a single initializer list argument."); - // Check constructor arguments for self reference. - if (DeclaratorDecl *DD = Entity.getDecl()) - // Parameters arguments are occassionially constructed with itself, - // for instance, in recursive functions. Skip them. - if (!isa(DD)) - for (unsigned i = 0; i < NumArgs; ++i) - S.CheckSelfReference(DD, Args[i]); - // The type we're constructing needs to be complete. if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) { Sequence.setIncompleteTypeFailure(DestType); @@ -3614,8 +3611,8 @@ static void TryValueInitialization(Sema &S, // user-provided or deleted default constructor, then the object is // zero-initialized and, if T has a non-trivial default constructor, // default-initialized; - // FIXME: The 'non-union' here is a defect (not yet assigned an issue - // number). Update the quotation when the defect is resolved. + // The 'non-union' here was removed by DR1502. The 'non-trivial default + // constructor' part was removed by DR1507. if (NeedZeroInitialization) Sequence.AddZeroInitializationStep(Entity.getType()); @@ -3703,8 +3700,14 @@ static void TryUserDefinedConversion(Sema &S, // Try to complete the type we're converting to. if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) { - DeclContext::lookup_iterator Con, ConEnd; - for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl); + DeclContext::lookup_iterator ConOrig, ConEndOrig; + llvm::tie(ConOrig, ConEndOrig) = S.LookupConstructors(DestRecordDecl); + // The container holding the constructors can under certain conditions + // be changed while iterating. To be safe we copy the lookup results + // to a new container. + SmallVector CopyOfCon(ConOrig, ConEndOrig); + for (SmallVector::iterator + Con = CopyOfCon.begin(), ConEnd = CopyOfCon.end(); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); @@ -4413,7 +4416,7 @@ static ExprResult CopyObject(Sema &S, if (const RecordType *Record = T->getAs()) Class = cast(Record->getDecl()); if (!Class) - return move(CurInit); + return CurInit; // C++0x [class.copy]p32: // When certain criteria are met, an implementation is allowed to @@ -4435,7 +4438,7 @@ static ExprResult CopyObject(Sema &S, // Make sure that the type we are copying is complete. if (S.RequireCompleteType(Loc, T, diag::err_temp_copy_incomplete)) - return move(CurInit); + return CurInit; // Perform overload resolution using the class's copy/move constructors. // Only consider constructors and constructor templates. Per @@ -4460,7 +4463,7 @@ static ExprResult CopyObject(Sema &S, CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr); if (!IsExtraneousCopy || S.isSFINAEContext()) return ExprError(); - return move(CurInit); + return CurInit; case OR_Ambiguous: S.Diag(Loc, diag::err_temp_copy_ambiguous) @@ -4478,7 +4481,7 @@ static ExprResult CopyObject(Sema &S, } CXXConstructorDecl *Constructor = cast(Best->Function); - ASTOwningVector ConstructorArgs(S); + SmallVector ConstructorArgs; CurInit.release(); // Ownership transferred into MultiExprArg, below. S.CheckConstructorAccess(Loc, Constructor, Entity, @@ -4521,7 +4524,7 @@ static ExprResult CopyObject(Sema &S, // Actually perform the constructor call. CurInit = S.BuildCXXConstructExpr(Loc, T, Constructor, Elidable, - move_arg(ConstructorArgs), + ConstructorArgs, HadMultipleCandidates, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, @@ -4530,7 +4533,7 @@ static ExprResult CopyObject(Sema &S, // If we're supposed to bind temporaries, do so. if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity)) CurInit = S.MaybeBindToTemporary(CurInit.takeAs()); - return move(CurInit); + return CurInit; } /// \brief Check whether elidable copy construction for binding a reference to @@ -4619,7 +4622,7 @@ PerformConstructorInitialization(Sema &S, bool HadMultipleCandidates = Step.Function.HadMultipleCandidates; // Build a call to the selected constructor. - ASTOwningVector ConstructorArgs(S); + SmallVector ConstructorArgs; SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid()) ? Kind.getEqualLoc() : Kind.getLocation(); @@ -4648,7 +4651,7 @@ PerformConstructorInitialization(Sema &S, // Determine the arguments required to actually perform the constructor // call. - if (S.CompleteConstructorCall(Constructor, move(Args), + if (S.CompleteConstructorCall(Constructor, Args, Loc, ConstructorArgs, AllowExplicitConv)) return ExprError(); @@ -4660,8 +4663,6 @@ PerformConstructorInitialization(Sema &S, (Kind.getKind() == InitializationKind::IK_Direct || Kind.getKind() == InitializationKind::IK_Value)))) { // An explicitly-constructed temporary, e.g., X(1, 2). - unsigned NumExprs = ConstructorArgs.size(); - Expr **Exprs = (Expr **)ConstructorArgs.take(); S.MarkFunctionReferenced(Loc, Constructor); S.DiagnoseUseOfDecl(Constructor, Loc); @@ -4675,8 +4676,7 @@ PerformConstructorInitialization(Sema &S, CurInit = S.Owned(new (S.Context) CXXTemporaryObjectExpr(S.Context, Constructor, TSInfo, - Exprs, - NumExprs, + ConstructorArgs, ParenRange, HadMultipleCandidates, ConstructorInitRequiresZeroInit)); @@ -4702,7 +4702,7 @@ PerformConstructorInitialization(Sema &S, if (Entity.allowsNRVO()) CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(), Constructor, /*Elidable=*/true, - move_arg(ConstructorArgs), + ConstructorArgs, HadMultipleCandidates, ConstructorInitRequiresZeroInit, ConstructKind, @@ -4710,7 +4710,7 @@ PerformConstructorInitialization(Sema &S, else CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(), Constructor, - move_arg(ConstructorArgs), + ConstructorArgs, HadMultipleCandidates, ConstructorInitRequiresZeroInit, ConstructKind, @@ -4727,7 +4727,7 @@ PerformConstructorInitialization(Sema &S, if (shouldBindAsTemporary(Entity)) CurInit = S.MaybeBindToTemporary(CurInit.takeAs()); - return move(CurInit); + return CurInit; } /// Determine whether the specified InitializedEntity definitely has a lifetime @@ -4775,7 +4775,7 @@ InitializationSequence::Perform(Sema &S, QualType *ResultType) { if (Failed()) { unsigned NumArgs = Args.size(); - Diagnose(S, Entity, Kind, (Expr **)Args.release(), NumArgs); + Diagnose(S, Entity, Kind, Args.data(), NumArgs); return ExprError(); } @@ -4795,7 +4795,7 @@ InitializationSequence::Perform(Sema &S, // introduced and such). So, we fall back to making the array // type a dependently-sized array type with no specified // bound. - if (isa((Expr *)Args.get()[0])) { + if (isa((Expr *)Args[0])) { SourceRange Brackets; // Scavange the location of the brackets from the entity, if we can. @@ -4823,12 +4823,12 @@ InitializationSequence::Perform(Sema &S, // Rebuild the ParenListExpr. SourceRange ParenRange = Kind.getParenRange(); return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(), - move(Args)); + Args); } assert(Kind.getKind() == InitializationKind::IK_Copy || Kind.isExplicitCast() || Kind.getKind() == InitializationKind::IK_DirectList); - return ExprResult(Args.release()[0]); + return ExprResult(Args[0]); } // No steps means no initialization. @@ -4836,22 +4836,22 @@ InitializationSequence::Perform(Sema &S, return S.Owned((Expr *)0); if (S.getLangOpts().CPlusPlus0x && Entity.getType()->isReferenceType() && - Args.size() == 1 && isa(Args.get()[0]) && + Args.size() == 1 && isa(Args[0]) && Entity.getKind() != InitializedEntity::EK_Parameter) { // Produce a C++98 compatibility warning if we are initializing a reference // from an initializer list. For parameters, we produce a better warning // elsewhere. - Expr *Init = Args.get()[0]; + Expr *Init = Args[0]; S.Diag(Init->getLocStart(), diag::warn_cxx98_compat_reference_list_init) << Init->getSourceRange(); } // Diagnose cases where we initialize a pointer to an array temporary, and the // pointer obviously outlives the temporary. - if (Args.size() == 1 && Args.get()[0]->getType()->isArrayType() && + if (Args.size() == 1 && Args[0]->getType()->isArrayType() && Entity.getType()->isPointerType() && InitializedEntityOutlivesFullExpression(Entity)) { - Expr *Init = Args.get()[0]; + Expr *Init = Args[0]; Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context); if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary) S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay) @@ -4897,7 +4897,7 @@ InitializationSequence::Perform(Sema &S, case SK_ProduceObjCObject: case SK_StdInitializerList: { assert(Args.size() == 1); - CurInit = Args.get()[0]; + CurInit = Args[0]; if (!CurInit.get()) return ExprError(); break; } @@ -4924,7 +4924,7 @@ InitializationSequence::Perform(Sema &S, // initializer to reflect that choice. S.CheckAddressOfMemberAccess(CurInit.get(), Step->Function.FoundDecl); S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Kind.getLocation()); - CurInit = S.FixOverloadedFunctionReference(move(CurInit), + CurInit = S.FixOverloadedFunctionReference(CurInit, Step->Function.FoundDecl, Step->Function.Function); break; @@ -5016,7 +5016,7 @@ InitializationSequence::Perform(Sema &S, break; case SK_ExtraneousCopyToTemporary: - CurInit = CopyObject(S, Step->Type, Entity, move(CurInit), + CurInit = CopyObject(S, Step->Type, Entity, CurInit, /*IsExtraneousCopy=*/true); break; @@ -5031,7 +5031,7 @@ InitializationSequence::Perform(Sema &S, bool CreatedObject = false; if (CXXConstructorDecl *Constructor = dyn_cast(Fn)) { // Build a call to the selected constructor. - ASTOwningVector ConstructorArgs(S); + SmallVector ConstructorArgs; SourceLocation Loc = CurInit.get()->getLocStart(); CurInit.release(); // Ownership transferred into MultiExprArg, below. @@ -5045,7 +5045,7 @@ InitializationSequence::Perform(Sema &S, // Build an expression that constructs a temporary. CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor, - move_arg(ConstructorArgs), + ConstructorArgs, HadMultipleCandidates, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, @@ -5079,7 +5079,7 @@ InitializationSequence::Perform(Sema &S, FoundFn, Conversion); if(CurInitExprRes.isInvalid()) return ExprError(); - CurInit = move(CurInitExprRes); + CurInit = CurInitExprRes; // Build the actual call to the conversion function. CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion, @@ -5115,7 +5115,7 @@ InitializationSequence::Perform(Sema &S, CurInit = S.MaybeBindToTemporary(CurInit.takeAs()); if (RequiresCopy) CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity, - move(CurInit), /*IsExtraneousCopy=*/false); + CurInit, /*IsExtraneousCopy=*/false); break; } @@ -5144,7 +5144,7 @@ InitializationSequence::Perform(Sema &S, getAssignmentAction(Entity), CCK); if (CurInitExprRes.isInvalid()) return ExprError(); - CurInit = move(CurInitExprRes); + CurInit = CurInitExprRes; break; } @@ -5195,13 +5195,13 @@ InitializationSequence::Perform(Sema &S, Entity.getType().getNonReferenceType()); bool UseTemporary = Entity.getType()->isReferenceType(); assert(Args.size() == 1 && "expected a single argument for list init"); - InitListExpr *InitList = cast(Args.get()[0]); + InitListExpr *InitList = cast(Args[0]); S.Diag(InitList->getExprLoc(), diag::warn_cxx98_compat_ctor_list_init) << InitList->getSourceRange(); MultiExprArg Arg(InitList->getInits(), InitList->getNumInits()); CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity : Entity, - Kind, move(Arg), *Step, + Kind, Arg, *Step, ConstructorInitRequiresZeroInit); break; } @@ -5214,7 +5214,7 @@ InitializationSequence::Perform(Sema &S, Expr *E = CurInit.take(); InitListExpr *Syntactic = Step->WrappingSyntacticList; InitListExpr *ILE = new (S.Context) InitListExpr(S.Context, - Syntactic->getLBraceLoc(), &E, 1, Syntactic->getRBraceLoc()); + Syntactic->getLBraceLoc(), E, Syntactic->getRBraceLoc()); ILE->setSyntacticForm(Syntactic); ILE->setType(E->getType()); ILE->setValueKind(E->getValueKind()); @@ -5234,7 +5234,7 @@ InitializationSequence::Perform(Sema &S, bool UseTemporary = Entity.getType()->isReferenceType(); CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity : Entity, - Kind, move(Args), *Step, + Kind, Args, *Step, ConstructorInitRequiresZeroInit); break; } @@ -5268,15 +5268,15 @@ InitializationSequence::Perform(Sema &S, case SK_CAssignment: { QualType SourceType = CurInit.get()->getType(); - ExprResult Result = move(CurInit); + ExprResult Result = CurInit; Sema::AssignConvertType ConvTy = S.CheckSingleAssignmentConstraints(Step->Type, Result); if (Result.isInvalid()) return ExprError(); - CurInit = move(Result); + CurInit = Result; // If this is a call, allow conversion to a transparent union. - ExprResult CurInitExprRes = move(CurInit); + ExprResult CurInitExprRes = CurInit; if (ConvTy != Sema::Compatible && Entity.getKind() == InitializedEntity::EK_Parameter && S.CheckTransparentUnionArgumentConstraints(Step->Type, CurInitExprRes) @@ -5284,7 +5284,7 @@ InitializationSequence::Perform(Sema &S, ConvTy = Sema::Compatible; if (CurInitExprRes.isInvalid()) return ExprError(); - CurInit = move(CurInitExprRes); + CurInit = CurInitExprRes; bool Complained; if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(), @@ -5398,7 +5398,7 @@ InitializationSequence::Perform(Sema &S, } InitListExpr *Semantic = new (S.Context) InitListExpr(S.Context, ILE->getLBraceLoc(), - Converted.data(), NumInits, ILE->getRBraceLoc()); + Converted, ILE->getRBraceLoc()); Semantic->setSyntacticForm(ILE); Semantic->setType(Dest); Semantic->setInitializesStdInitializerList(); @@ -5415,7 +5415,7 @@ InitializationSequence::Perform(Sema &S, cast(Entity.getDecl()), CurInit.get()); - return move(CurInit); + return CurInit; } //===----------------------------------------------------------------------===// diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp index 6414c6f..15cd2a7 100644 --- a/lib/Sema/SemaLambda.cpp +++ b/lib/Sema/SemaLambda.cpp @@ -22,13 +22,14 @@ using namespace clang; using namespace sema; CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange, + TypeSourceInfo *Info, bool KnownDependent) { DeclContext *DC = CurContext; while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext())) DC = DC->getParent(); // Start constructing the lambda class. - CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, + CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info, IntroducerRange.getBegin(), KnownDependent); DC->addDecl(Class); @@ -369,15 +370,13 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, if (!TmplScope->decl_empty()) KnownDependent = true; - CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, KnownDependent); - // Determine the signature of the call operator. TypeSourceInfo *MethodTyInfo; bool ExplicitParams = true; bool ExplicitResultType = true; bool ContainsUnexpandedParameterPack = false; SourceLocation EndLoc; - llvm::ArrayRef Params; + llvm::SmallVector Params; if (ParamInfo.getNumTypeObjects() == 0) { // C++11 [expr.prim.lambda]p4: // If a lambda-expression does not include a lambda-declarator, it is as @@ -410,17 +409,25 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, ExplicitResultType = MethodTyInfo->getType()->getAs()->getResultType() != Context.DependentTy; - - TypeLoc TL = MethodTyInfo->getTypeLoc(); - FunctionProtoTypeLoc Proto = cast(TL); - Params = llvm::ArrayRef(Proto.getParmArray(), - Proto.getNumArgs()); + + if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 && + cast(FTI.ArgInfo[0].Param)->getType()->isVoidType()) { + // Empty arg list, don't push any params. + checkVoidParamDecl(cast(FTI.ArgInfo[0].Param)); + } else { + Params.reserve(FTI.NumArgs); + for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) + Params.push_back(cast(FTI.ArgInfo[i].Param)); + } // Check for unexpanded parameter packs in the method type. if (MethodTyInfo->getType()->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; } - + + CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo, + KnownDependent); + CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params); @@ -528,6 +535,10 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, continue; } + // Ignore invalid decls; they'll just confuse the code later. + if (Var->isInvalidDecl()) + continue; + if (!Var->hasLocalStorage()) { Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id; Diag(Var->getLocation(), diag::note_previous_decl) << C->Id; diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp index dad196b..f6987e7 100644 --- a/lib/Sema/SemaLookup.cpp +++ b/lib/Sema/SemaLookup.cpp @@ -707,7 +707,7 @@ static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) { // result), perform template argument deduction and place the // specialization into the result set. We do this to avoid forcing all // callers to perform special deduction for conversion functions. - TemplateDeductionInfo Info(R.getSema().Context, R.getNameLoc()); + TemplateDeductionInfo Info(R.getNameLoc()); FunctionDecl *Specialization = 0; const FunctionProtoType *ConvProto @@ -1725,15 +1725,17 @@ bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) { namespace { struct AssociatedLookup { - AssociatedLookup(Sema &S, + AssociatedLookup(Sema &S, SourceLocation InstantiationLoc, Sema::AssociatedNamespaceSet &Namespaces, Sema::AssociatedClassSet &Classes) - : S(S), Namespaces(Namespaces), Classes(Classes) { + : S(S), Namespaces(Namespaces), Classes(Classes), + InstantiationLoc(InstantiationLoc) { } Sema &S; Sema::AssociatedNamespaceSet &Namespaces; Sema::AssociatedClassSet &Classes; + SourceLocation InstantiationLoc; }; } @@ -1796,6 +1798,7 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, case TemplateArgument::Declaration: case TemplateArgument::Integral: case TemplateArgument::Expression: + case TemplateArgument::NullPtr: // [Note: non-type template arguments do not contribute to the set of // associated namespaces. ] break; @@ -1864,8 +1867,10 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, // Only recurse into base classes for complete types. if (!Class->hasDefinition()) { - // FIXME: we might need to instantiate templates here - return; + QualType type = Result.S.Context.getTypeDeclType(Class); + if (Result.S.RequireCompleteType(Result.InstantiationLoc, type, + /*no diagnostic*/ 0)) + return; } // Add direct and indirect base classes along with their associated @@ -2069,13 +2074,15 @@ addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) { /// namespaces searched by argument-dependent lookup /// (C++ [basic.lookup.argdep]) for a given set of arguments. void -Sema::FindAssociatedClassesAndNamespaces(llvm::ArrayRef Args, +Sema::FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, + llvm::ArrayRef Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses) { AssociatedNamespaces.clear(); AssociatedClasses.clear(); - AssociatedLookup Result(*this, AssociatedNamespaces, AssociatedClasses); + AssociatedLookup Result(*this, InstantiationLoc, + AssociatedNamespaces, AssociatedClasses); // C++ [basic.lookup.koenig]p2: // For each argument type T in the function call, there is a set @@ -2642,17 +2649,14 @@ void ADLResult::insert(NamedDecl *New) { void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator, SourceLocation Loc, llvm::ArrayRef Args, - ADLResult &Result, - bool StdNamespaceIsAssociated) { + ADLResult &Result) { // Find all of the associated namespaces and classes based on the // arguments we have. AssociatedNamespaceSet AssociatedNamespaces; AssociatedClassSet AssociatedClasses; - FindAssociatedClassesAndNamespaces(Args, + FindAssociatedClassesAndNamespaces(Loc, Args, AssociatedNamespaces, AssociatedClasses); - if (StdNamespaceIsAssociated && StdNamespace) - AssociatedNamespaces.insert(getStdNamespace()); QualType T1, T2; if (Operator) { @@ -2661,13 +2665,6 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator, T2 = Args[1]->getType(); } - // Try to complete all associated classes, in case they contain a - // declaration of a friend function. - for (AssociatedClassSet::iterator C = AssociatedClasses.begin(), - CEnd = AssociatedClasses.end(); - C != CEnd; ++C) - RequireCompleteType(Loc, Context.getRecordType(*C), 0); - // C++ [basic.lookup.argdep]p3: // Let X be the lookup set produced by unqualified lookup (3.4.1) // and let Y be the lookup set produced by argument dependent @@ -4056,7 +4053,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName, if (IsUnqualifiedLookup) UnqualifiedTyposCorrected[Typo] = Result; - return Result; + TypoCorrection TC = Result; + TC.setCorrectionRange(SS, TypoName); + return TC; } else if (BestResults.size() > 1 // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver; @@ -4076,7 +4075,9 @@ TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName, if (IsUnqualifiedLookup) UnqualifiedTyposCorrected[Typo] = BestResults["super"].front(); - return BestResults["super"].front(); + TypoCorrection TC = BestResults["super"].front(); + TC.setCorrectionRange(SS, TypoName); + return TC; } // If this was an unqualified lookup and we believe the callback object did diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp index 27deab2..8d70860 100644 --- a/lib/Sema/SemaObjCProperty.cpp +++ b/lib/Sema/SemaObjCProperty.cpp @@ -22,6 +22,7 @@ #include "clang/Basic/SourceManager.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SmallString.h" +#include "clang/Lex/Preprocessor.h" using namespace clang; @@ -43,7 +44,7 @@ static Qualifiers::ObjCLifetime getImpliedARCOwnership( if (attrs & (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong | ObjCPropertyDecl::OBJC_PR_copy)) { - return type->getObjCARCImplicitLifetime(); + return Qualifiers::OCL_Strong; } else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) { return Qualifiers::OCL_Weak; } else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) { @@ -102,6 +103,15 @@ static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) { << propertyLifetime; } +static unsigned deduceWeakPropertyFromType(Sema &S, QualType T) { + if ((S.getLangOpts().getGC() != LangOptions::NonGC && + T.isObjCGCWeak()) || + (S.getLangOpts().ObjCAutoRefCount && + T.getObjCLifetime() == Qualifiers::OCL_Weak)) + return ObjCDeclSpec::DQ_PR_weak; + return 0; +} + Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, @@ -114,12 +124,8 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc, unsigned Attributes = ODS.getPropertyAttributes(); TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S); QualType T = TSI->getType(); - if ((getLangOpts().getGC() != LangOptions::NonGC && - T.isObjCGCWeak()) || - (getLangOpts().ObjCAutoRefCount && - T.getObjCLifetime() == Qualifiers::OCL_Weak)) - Attributes |= ObjCDeclSpec::DQ_PR_weak; - + Attributes |= deduceWeakPropertyFromType(*this, T); + bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) || // default is readwrite! !(Attributes & ObjCDeclSpec::DQ_PR_readonly)); @@ -236,6 +242,15 @@ static bool LocPropertyAttribute( ASTContext &Context, const char *attrName, } +static unsigned getOwnershipRule(unsigned attr) { + return attr & (ObjCPropertyDecl::OBJC_PR_assign | + ObjCPropertyDecl::OBJC_PR_retain | + ObjCPropertyDecl::OBJC_PR_copy | + ObjCPropertyDecl::OBJC_PR_weak | + ObjCPropertyDecl::OBJC_PR_strong | + ObjCPropertyDecl::OBJC_PR_unsafe_unretained); +} + Decl * Sema::HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, @@ -335,6 +350,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, Diag(AtLoc, diag::err_type_mismatch_continuation_class) << PDecl->getType(); Diag(PIDecl->getLocation(), diag::note_property_declare); + return 0; } } @@ -342,13 +358,11 @@ Sema::HandlePropertyInClassExtension(Scope *S, // with continuation class's readwrite property attribute! unsigned PIkind = PIDecl->getPropertyAttributesAsWritten(); if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) { - unsigned retainCopyNonatomic = - (ObjCPropertyDecl::OBJC_PR_retain | - ObjCPropertyDecl::OBJC_PR_strong | - ObjCPropertyDecl::OBJC_PR_copy | - ObjCPropertyDecl::OBJC_PR_nonatomic); - if ((Attributes & retainCopyNonatomic) != - (PIkind & retainCopyNonatomic)) { + PIkind |= deduceWeakPropertyFromType(*this, PIDecl->getType()); + unsigned ClassExtensionMemoryModel = getOwnershipRule(Attributes); + unsigned PrimaryClassMemoryModel = getOwnershipRule(PIkind); + if (PrimaryClassMemoryModel && ClassExtensionMemoryModel && + (PrimaryClassMemoryModel != ClassExtensionMemoryModel)) { Diag(AtLoc, diag::warn_property_attr_mismatch); Diag(PIDecl->getLocation(), diag::note_property_declare); } @@ -397,6 +411,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, Diag(AtLoc, diag) << CCPrimary->getDeclName(); Diag(PIDecl->getLocation(), diag::note_property_declare); + return 0; } *isOverridingProperty = true; // Make sure setter decl is synthesized, and added to primary class's list. @@ -405,7 +420,7 @@ Sema::HandlePropertyInClassExtension(Scope *S, PDecl->setSetterMethodDecl(PIDecl->getSetterMethodDecl()); if (ASTMutationListener *L = Context.getASTMutationListener()) L->AddedObjCPropertyInClassExtension(PDecl, PIDecl, CDecl); - return 0; + return PDecl; } ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, @@ -543,6 +558,23 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc, ivarLifetime == Qualifiers::OCL_Autoreleasing) return; + // If the ivar is private, and it's implicitly __unsafe_unretained + // becaues of its type, then pretend it was actually implicitly + // __strong. This is only sound because we're processing the + // property implementation before parsing any method bodies. + if (ivarLifetime == Qualifiers::OCL_ExplicitNone && + propertyLifetime == Qualifiers::OCL_Strong && + ivar->getAccessControl() == ObjCIvarDecl::Private) { + SplitQualType split = ivarType.split(); + if (split.Quals.hasObjCLifetime()) { + assert(ivarType->isObjCARCImplicitlyUnretainedType()); + split.Quals.setObjCLifetime(Qualifiers::OCL_Strong); + ivarType = S.Context.getQualifiedType(split); + ivar->setType(ivarType); + return; + } + } + switch (propertyLifetime) { case Qualifiers::OCL_Strong: S.Diag(propertyImplLoc, diag::err_arc_strong_property_ownership) @@ -632,7 +664,13 @@ DiagnoseClassAndClassExtPropertyMismatch(Sema &S, ObjCInterfaceDecl *ClassDecl, // property. if (Attributes & ObjCDeclSpec::DQ_PR_readonly) { if (!classExtPropertyAttr || - (classExtPropertyAttr & ObjCDeclSpec::DQ_PR_readwrite)) + (classExtPropertyAttr & + (ObjCDeclSpec::DQ_PR_readwrite| + ObjCDeclSpec::DQ_PR_assign | + ObjCDeclSpec::DQ_PR_unsafe_unretained | + ObjCDeclSpec::DQ_PR_copy | + ObjCDeclSpec::DQ_PR_retain | + ObjCDeclSpec::DQ_PR_strong))) continue; warn = true; break; @@ -857,13 +895,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, if (lifetime == Qualifiers::OCL_Weak) { bool err = false; if (const ObjCObjectPointerType *ObjT = - PropertyIvarType->getAs()) - if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable()) { + PropertyIvarType->getAs()) { + const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl(); + if (ObjI && ObjI->isArcWeakrefUnavailable()) { Diag(PropertyDiagLoc, diag::err_arc_weak_unavailable_property); Diag(property->getLocation(), diag::note_property_declare); err = true; } - if (!err && !getLangOpts().ObjCRuntimeHasWeak) { + } + if (!err && !getLangOpts().ObjCARCWeak) { Diag(PropertyDiagLoc, diag::err_arc_weak_no_runtime); Diag(property->getLocation(), diag::note_property_declare); } @@ -891,7 +931,6 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, Ivar->setInvalidDecl(); ClassImpDecl->addDecl(Ivar); IDecl->makeDeclVisibleInContext(Ivar); - property->setPropertyIvarDecl(Ivar); if (getLangOpts().ObjCRuntime.isFragile()) Diag(PropertyDiagLoc, diag::error_missing_property_ivar_decl) @@ -907,14 +946,15 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, << Ivar << Ivar->getName(); // Note! I deliberately want it to fall thru so more errors are caught. } + property->setPropertyIvarDecl(Ivar); + QualType IvarType = Context.getCanonicalType(Ivar->getType()); // Check that type of property and its ivar are type compatible. if (!Context.hasSameType(PropertyIvarType, IvarType)) { - compat = false; if (isa(PropertyIvarType) && isa(IvarType)) - compat = + compat = Context.canAssignObjCInterfaces( PropertyIvarType->getAs(), IvarType->getAs()); @@ -988,19 +1028,21 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, // For Objective-C++, need to synthesize the AST for the IVAR object to be // returned by the getter as it must conform to C++'s copy-return rules. // FIXME. Eventually we want to do this for Objective-C as well. + SynthesizedFunctionScope Scope(*this, getterMethod); ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl(); DeclRefExpr *SelfExpr = new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(), - VK_RValue, SourceLocation()); + VK_RValue, PropertyDiagLoc); + MarkDeclRefReferenced(SelfExpr); Expr *IvarRefExpr = - new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc, + new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), PropertyDiagLoc, SelfExpr, true, true); ExprResult Res = PerformCopyInitialization(InitializedEntity::InitializeResult( - SourceLocation(), + PropertyDiagLoc, getterMethod->getResultType(), /*NRVO=*/false), - SourceLocation(), + PropertyDiagLoc, Owned(IvarRefExpr)); if (!Res.isInvalid()) { Expr *ResExpr = Res.takeAs(); @@ -1021,19 +1063,22 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr && Ivar->getType()->isRecordType()) { // FIXME. Eventually we want to do this for Objective-C as well. + SynthesizedFunctionScope Scope(*this, setterMethod); ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl(); DeclRefExpr *SelfExpr = new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(), - VK_RValue, SourceLocation()); + VK_RValue, PropertyDiagLoc); + MarkDeclRefReferenced(SelfExpr); Expr *lhs = - new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc, + new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), PropertyDiagLoc, SelfExpr, true, true); ObjCMethodDecl::param_iterator P = setterMethod->param_begin(); ParmVarDecl *Param = (*P); QualType T = Param->getType().getNonReferenceType(); - Expr *rhs = new (Context) DeclRefExpr(Param, false, T, - VK_LValue, SourceLocation()); - ExprResult Res = BuildBinOp(S, lhs->getLocEnd(), + DeclRefExpr *rhs = new (Context) DeclRefExpr(Param, false, T, + VK_LValue, PropertyDiagLoc); + MarkDeclRefReferenced(rhs); + ExprResult Res = BuildBinOp(S, PropertyDiagLoc, BO_Assign, lhs, rhs); if (property->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic) { @@ -1043,7 +1088,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S, if (const FunctionDecl *FuncDecl = CXXCE->getDirectCallee()) if (!FuncDecl->isTrivial()) if (property->getType()->isReferenceType()) { - Diag(PropertyLoc, + Diag(PropertyDiagLoc, diag::err_atomic_property_nontrivial_assign_op) << property->getType(); Diag(FuncDecl->getLocStart(), @@ -1395,8 +1440,8 @@ bool Sema::isPropertyReadonly(ObjCPropertyDecl *PDecl, /// CollectImmediateProperties - This routine collects all properties in /// the class and its conforming protocols; but not those it its super class. void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl, - llvm::DenseMap& PropMap, - llvm::DenseMap& SuperPropMap) { + ObjCContainerDecl::PropertyMap &PropMap, + ObjCContainerDecl::PropertyMap &SuperPropMap) { if (ObjCInterfaceDecl *IDecl = dyn_cast(CDecl)) { for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(), E = IDecl->prop_end(); P != E; ++P) { @@ -1442,118 +1487,38 @@ void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl, } } -/// CollectClassPropertyImplementations - This routine collects list of -/// properties to be implemented in the class. This includes, class's -/// and its conforming protocols' properties. -static void CollectClassPropertyImplementations(ObjCContainerDecl *CDecl, - llvm::DenseMap& PropMap) { - if (ObjCInterfaceDecl *IDecl = dyn_cast(CDecl)) { - for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(), - E = IDecl->prop_end(); P != E; ++P) { - ObjCPropertyDecl *Prop = *P; - PropMap[Prop->getIdentifier()] = Prop; - } - for (ObjCInterfaceDecl::all_protocol_iterator - PI = IDecl->all_referenced_protocol_begin(), - E = IDecl->all_referenced_protocol_end(); PI != E; ++PI) - CollectClassPropertyImplementations((*PI), PropMap); - } - else if (ObjCProtocolDecl *PDecl = dyn_cast(CDecl)) { - for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(), - E = PDecl->prop_end(); P != E; ++P) { - ObjCPropertyDecl *Prop = *P; - if (!PropMap.count(Prop->getIdentifier())) - PropMap[Prop->getIdentifier()] = Prop; - } - // scan through protocol's protocols. - for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(), - E = PDecl->protocol_end(); PI != E; ++PI) - CollectClassPropertyImplementations((*PI), PropMap); - } -} - /// CollectSuperClassPropertyImplementations - This routine collects list of /// properties to be implemented in super class(s) and also coming from their /// conforming protocols. static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl, - llvm::DenseMap& PropMap) { + ObjCInterfaceDecl::PropertyMap &PropMap) { if (ObjCInterfaceDecl *SDecl = CDecl->getSuperClass()) { while (SDecl) { - CollectClassPropertyImplementations(SDecl, PropMap); + SDecl->collectPropertiesToImplement(PropMap); SDecl = SDecl->getSuperClass(); } } } -/// LookupPropertyDecl - Looks up a property in the current class and all -/// its protocols. -ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl, - IdentifierInfo *II) { - if (const ObjCInterfaceDecl *IDecl = - dyn_cast(CDecl)) { - for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(), - E = IDecl->prop_end(); P != E; ++P) { - ObjCPropertyDecl *Prop = *P; - if (Prop->getIdentifier() == II) - return Prop; - } - // scan through class's protocols. - for (ObjCInterfaceDecl::all_protocol_iterator - PI = IDecl->all_referenced_protocol_begin(), - E = IDecl->all_referenced_protocol_end(); PI != E; ++PI) { - ObjCPropertyDecl *Prop = LookupPropertyDecl((*PI), II); - if (Prop) - return Prop; - } - } - else if (const ObjCProtocolDecl *PDecl = - dyn_cast(CDecl)) { - for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(), - E = PDecl->prop_end(); P != E; ++P) { - ObjCPropertyDecl *Prop = *P; - if (Prop->getIdentifier() == II) - return Prop; - } - // scan through protocol's protocols. - for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(), - E = PDecl->protocol_end(); PI != E; ++PI) { - ObjCPropertyDecl *Prop = LookupPropertyDecl((*PI), II); - if (Prop) - return Prop; - } - } - return 0; -} - -static IdentifierInfo * getDefaultSynthIvarName(ObjCPropertyDecl *Prop, - ASTContext &Ctx) { - SmallString<128> ivarName; - { - llvm::raw_svector_ostream os(ivarName); - os << '_' << Prop->getIdentifier()->getName(); - } - return &Ctx.Idents.get(ivarName.str()); -} - /// \brief Default synthesizes all properties which must be synthesized /// in class's \@implementation. void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl) { - llvm::DenseMap PropMap; - CollectClassPropertyImplementations(IDecl, PropMap); + ObjCInterfaceDecl::PropertyMap PropMap; + IDecl->collectPropertiesToImplement(PropMap); if (PropMap.empty()) return; - llvm::DenseMap SuperPropMap; + ObjCInterfaceDecl::PropertyMap SuperPropMap; CollectSuperClassPropertyImplementations(IDecl, SuperPropMap); - for (llvm::DenseMap::iterator + for (ObjCInterfaceDecl::PropertyMap::iterator P = PropMap.begin(), E = PropMap.end(); P != E; ++P) { ObjCPropertyDecl *Prop = P->second; // If property to be implemented in the super class, ignore. if (SuperPropMap[Prop->getIdentifier()]) continue; - // Is there a matching propery synthesize/dynamic? + // Is there a matching property synthesize/dynamic? if (Prop->isInvalidDecl() || Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional || IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) @@ -1583,7 +1548,7 @@ void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl, ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(), true, /* property = */ Prop->getIdentifier(), - /* ivar = */ getDefaultSynthIvarName(Prop, Context), + /* ivar = */ Prop->getDefaultSynthIvarName(Context), Prop->getLocation())); if (PIDecl) { Diag(Prop->getLocation(), diag::warn_missing_explicit_synthesis); @@ -1606,11 +1571,11 @@ void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) { void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, const SelectorSet &InsMap) { - llvm::DenseMap SuperPropMap; + ObjCContainerDecl::PropertyMap SuperPropMap; if (ObjCInterfaceDecl *IDecl = dyn_cast(CDecl)) CollectSuperClassPropertyImplementations(IDecl, SuperPropMap); - llvm::DenseMap PropMap; + ObjCContainerDecl::PropertyMap PropMap; CollectImmediateProperties(CDecl, PropMap, SuperPropMap); if (PropMap.empty()) return; @@ -1621,7 +1586,7 @@ void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, EI = IMPDecl->propimpl_end(); I != EI; ++I) PropImplMap.insert(I->getPropertyDecl()); - for (llvm::DenseMap::iterator + for (ObjCContainerDecl::PropertyMap::iterator P = PropMap.begin(), E = PropMap.end(); P != E; ++P) { ObjCPropertyDecl *Prop = P->second; // Is there a matching propery synthesize/dynamic? @@ -1847,7 +1812,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property, GetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc, property->getGetterName(), property->getType(), 0, CD, /*isInstance=*/true, - /*isVariadic=*/false, /*isSynthesized=*/true, + /*isVariadic=*/false, /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) ? @@ -1867,7 +1832,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property, } else // A user declared getter will be synthesize when @synthesize of // the property with the same name is seen in the @implementation - GetterMethod->setSynthesized(true); + GetterMethod->setPropertyAccessor(true); property->setGetterMethodDecl(GetterMethod); // Skip setter if property is read-only. @@ -1885,7 +1850,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCMethodDecl::Create(Context, Loc, Loc, property->getSetterName(), Context.VoidTy, 0, CD, /*isInstance=*/true, /*isVariadic=*/false, - /*isSynthesized=*/true, + /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, (property->getPropertyImplementation() == @@ -1916,7 +1881,7 @@ void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property, } else // A user declared setter will be synthesize when @synthesize of // the property with the same name is seen in the @implementation - SetterMethod->setSynthesized(true); + SetterMethod->setPropertyAccessor(true); property->setSetterMethodDecl(SetterMethod); } // Add any synthesized methods to the global pool. This allows us to @@ -2121,7 +2086,9 @@ void Sema::CheckObjCPropertyAttributes(Decl *PDecl, // issue any warning. if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC) ; - else { + else if (propertyInPrimaryClass) { + // Don't issue warning on property with no life time in class + // extension as it is inherited from property in primary class. // Skip this warning in gc-only mode. if (getLangOpts().getGC() != LangOptions::GCOnly) Diag(Loc, diag::warn_objc_property_no_assignment_attribute); diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp index 9382f7d..9111878 100644 --- a/lib/Sema/SemaOverload.cpp +++ b/lib/Sema/SemaOverload.cpp @@ -49,7 +49,7 @@ CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, bool HadMultipleCandidates, E = S.DefaultFunctionArrayConversion(E.take()); if (E.isInvalid()) return ExprError(); - return move(E); + return E; } static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType, @@ -555,6 +555,7 @@ static MakeDeductionFailureInfo(ASTContext &Context, Result.Data = 0; switch (TDK) { case Sema::TDK_Success: + case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: @@ -597,6 +598,7 @@ static MakeDeductionFailureInfo(ASTContext &Context, void OverloadCandidate::DeductionFailureInfo::Destroy() { switch (static_cast(Result)) { case Sema::TDK_Success: + case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_Incomplete: case Sema::TDK_TooManyArguments: @@ -637,6 +639,7 @@ TemplateParameter OverloadCandidate::DeductionFailureInfo::getTemplateParameter() { switch (static_cast(Result)) { case Sema::TDK_Success: + case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: @@ -664,6 +667,7 @@ TemplateArgumentList * OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() { switch (static_cast(Result)) { case Sema::TDK_Success: + case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: @@ -688,6 +692,7 @@ OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() { const TemplateArgument *OverloadCandidate::DeductionFailureInfo::getFirstArg() { switch (static_cast(Result)) { case Sema::TDK_Success: + case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_Incomplete: case Sema::TDK_TooManyArguments: @@ -713,6 +718,7 @@ const TemplateArgument * OverloadCandidate::DeductionFailureInfo::getSecondArg() { switch (static_cast(Result)) { case Sema::TDK_Success: + case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_Incomplete: case Sema::TDK_TooManyArguments: @@ -734,13 +740,17 @@ OverloadCandidate::DeductionFailureInfo::getSecondArg() { return 0; } -void OverloadCandidateSet::clear() { +void OverloadCandidateSet::destroyCandidates() { for (iterator i = begin(), e = end(); i != e; ++i) { for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii) i->Conversions[ii].~ImplicitConversionSequence(); if (!i->Viable && i->FailureKind == ovl_fail_bad_deduction) i->DeductionFailure.Destroy(); } +} + +void OverloadCandidateSet::clear() { + destroyCandidates(); NumInlineSequences = 0; Candidates.clear(); Functions.clear(); @@ -1668,7 +1678,7 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { return To->getKind() == BuiltinType::UInt; } - // C++0x [conv.prom]p3: + // C++11 [conv.prom]p3: // A prvalue of an unscoped enumeration type whose underlying type is not // fixed (7.2) can be converted to an rvalue a prvalue of the first of the // following types that can represent all the values of the enumeration @@ -1680,12 +1690,26 @@ bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { // with lowest integer conversion rank (4.13) greater than the rank of long // long in which all the values of the enumeration can be represented. If // there are two such extended types, the signed one is chosen. + // C++11 [conv.prom]p4: + // A prvalue of an unscoped enumeration type whose underlying type is fixed + // can be converted to a prvalue of its underlying type. Moreover, if + // integral promotion can be applied to its underlying type, a prvalue of an + // unscoped enumeration type whose underlying type is fixed can also be + // converted to a prvalue of the promoted underlying type. if (const EnumType *FromEnumType = FromType->getAs()) { // C++0x 7.2p9: Note that this implicit enum to int conversion is not // provided for a scoped enumeration. if (FromEnumType->getDecl()->isScoped()) return false; + // We can perform an integral promotion to the underlying type of the enum, + // even if that's not the promoted type. + if (FromEnumType->getDecl()->isFixed()) { + QualType Underlying = FromEnumType->getDecl()->getIntegerType(); + return Context.hasSameUnqualifiedType(Underlying, ToType) || + IsIntegralPromotion(From, Underlying, ToType); + } + // We have already pre-calculated the promotion type, so this is trivial. if (ToType->isIntegerType() && !RequireCompleteType(From->getLocStart(), FromType, 0)) @@ -2899,8 +2923,6 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType, case OR_Success: { // Record the standard conversion we used and the conversion function. CXXConstructorDecl *Constructor = cast(Best->Function); - S.MarkFunctionReferenced(From->getLocStart(), Constructor); - QualType ThisType = Constructor->getThisType(S.Context); // Initializer lists don't have conversions as such. User.Before.setAsIdentityConversion(); @@ -3081,8 +3103,6 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, // Record the standard conversion we used and the conversion function. if (CXXConstructorDecl *Constructor = dyn_cast(Best->Function)) { - S.MarkFunctionReferenced(From->getLocStart(), Constructor); - // C++ [over.ics.user]p1: // If the user-defined conversion is specified by a // constructor (12.3.1), the initial standard conversion @@ -3111,8 +3131,6 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, } if (CXXConversionDecl *Conversion = dyn_cast(Best->Function)) { - S.MarkFunctionReferenced(From->getLocStart(), Conversion); - // C++ [over.ics.user]p1: // // [...] If the user-defined conversion is specified by a @@ -4025,8 +4043,6 @@ FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS, if (!Best->FinalConversion.DirectBinding) return false; - if (Best->Function) - S.MarkFunctionReferenced(DeclLoc, Best->Function); ICS.setUserDefined(); ICS.UserDefined.Before = Best->Conversions[0].Standard; ICS.UserDefined.After = Best->FinalConversion; @@ -5531,7 +5547,7 @@ Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, // functions. In such a case, the candidate functions generated from each // function template are combined with the set of non-template candidate // functions. - TemplateDeductionInfo Info(Context, CandidateSet.getLocation()); + TemplateDeductionInfo Info(CandidateSet.getLocation()); FunctionDecl *Specialization = 0; if (TemplateDeductionResult Result = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args, @@ -5581,7 +5597,7 @@ Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, // functions. In such a case, the candidate functions generated from each // function template are combined with the set of non-template candidate // functions. - TemplateDeductionInfo Info(Context, CandidateSet.getLocation()); + TemplateDeductionInfo Info(CandidateSet.getLocation()); FunctionDecl *Specialization = 0; if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args, @@ -5703,7 +5719,7 @@ Sema::AddConversionCandidate(CXXConversionDecl *Conversion, // there are 0 arguments (i.e., nothing is allocated using ASTContext's // allocator). QualType CallResultType = ConversionType.getNonLValueExprType(Context); - CallExpr Call(Context, &ConversionFn, 0, 0, CallResultType, VK, + CallExpr Call(Context, &ConversionFn, MultiExprArg(), CallResultType, VK, From->getLocStart()); ImplicitConversionSequence ICS = TryCopyInitialization(*this, &Call, ToType, @@ -5765,7 +5781,7 @@ Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, if (!CandidateSet.isNewCandidate(FunctionTemplate)) return; - TemplateDeductionInfo Info(Context, CandidateSet.getLocation()); + TemplateDeductionInfo Info(CandidateSet.getLocation()); CXXConversionDecl *Specialization = 0; if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, ToType, @@ -6770,17 +6786,16 @@ public: // bool operator==(T, T); // bool operator!=(T, T); void addRelationalPointerOrEnumeralOverloads() { - // C++ [over.built]p1: - // If there is a user-written candidate with the same name and parameter - // types as a built-in candidate operator function, the built-in operator - // function is hidden and is not included in the set of candidate - // functions. + // C++ [over.match.oper]p3: + // [...]the built-in candidates include all of the candidate operator + // functions defined in 13.6 that, compared to the given operator, [...] + // do not have the same parameter-type-list as any non-template non-member + // candidate. // - // The text is actually in a note, but if we don't implement it then we end - // up with ambiguities when the user provides an overloaded operator for - // an enumeration type. Note that only enumeration types have this problem, - // so we track which enumeration types we've seen operators for. Also, the - // only other overloaded operator with enumeration argumenst, operator=, + // Note that in practice, this only affects enumeration types because there + // aren't any built-in candidates of record type, and a user-defined operator + // must have an operand of record or enumeration type. Also, the only other + // overloaded operator with enumeration arguments, operator=, // cannot be overloaded for enumeration types, so this is the only place // where we must suppress candidates like this. llvm::DenseSet > @@ -6795,6 +6810,9 @@ public: if (!C->Viable || !C->Function || C->Function->getNumParams() != 2) continue; + if (C->Function->isFunctionTemplateSpecialization()) + continue; + QualType FirstParamType = C->Function->getParamDecl(0)->getType().getUnqualifiedType(); QualType SecondParamType = @@ -7652,8 +7670,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name, llvm::ArrayRef Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, - bool PartialOverloading, - bool StdNamespaceIsAssociated) { + bool PartialOverloading) { ADLResult Fns; // FIXME: This approach for uniquing ADL results (and removing @@ -7664,8 +7681,7 @@ Sema::AddArgumentDependentLookupCandidates(DeclarationName Name, // we supposed to consider on ADL candidates, anyway? // FIXME: Pass in the explicit template arguments? - ArgumentDependentLookup(Name, Operator, Loc, Args, Fns, - StdNamespaceIsAssociated); + ArgumentDependentLookup(Name, Operator, Loc, Args, Fns); // Erase all of the candidates we already knew about. for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(), @@ -7976,10 +7992,20 @@ void ImplicitConversionSequence::DiagnoseAmbiguousConversion( const PartialDiagnostic &PDiag) const { S.Diag(CaretLoc, PDiag) << Ambiguous.getFromType() << Ambiguous.getToType(); - for (AmbiguousConversionSequence::const_iterator - I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) { + // FIXME: The note limiting machinery is borrowed from + // OverloadCandidateSet::NoteCandidates; there's an opportunity for + // refactoring here. + const OverloadsShown ShowOverloads = S.Diags.getShowOverloads(); + unsigned CandsShown = 0; + AmbiguousConversionSequence::const_iterator I, E; + for (I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) { + if (CandsShown >= 4 && ShowOverloads == Ovl_Best) + break; + ++CandsShown; S.NoteOverloadCandidate(*I); } + if (I != E) + S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I); } namespace { @@ -8515,7 +8541,7 @@ void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) { } void NoteBuiltinOperatorCandidate(Sema &S, - const char *Opc, + StringRef Opc, SourceLocation OpLoc, OverloadCandidate *Cand) { assert(Cand->NumConversions <= 2 && "builtin operator is not binary"); @@ -8561,6 +8587,7 @@ RankDeductionFailure(const OverloadCandidate::DeductionFailureInfo &DFI) { case Sema::TDK_Success: llvm_unreachable("TDK_success while diagnosing bad deduction"); + case Sema::TDK_Invalid: case Sema::TDK_Incomplete: return 1; @@ -8783,7 +8810,7 @@ void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand, void OverloadCandidateSet::NoteCandidates(Sema &S, OverloadCandidateDisplayKind OCD, llvm::ArrayRef Args, - const char *Opc, + StringRef Opc, SourceLocation OpLoc) { // Sort the candidates by viability and position. Sorting directly would // be prohibitive, so we make a set of pointers and sort those. @@ -8807,8 +8834,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, bool ReportedAmbiguousConversions = false; SmallVectorImpl::iterator I, E; - const DiagnosticsEngine::OverloadsShown ShowOverloads = - S.Diags.getShowOverloads(); + const OverloadsShown ShowOverloads = S.Diags.getShowOverloads(); unsigned CandsShown = 0; for (I = Cands.begin(), E = Cands.end(); I != E; ++I) { OverloadCandidate *Cand = *I; @@ -8816,7 +8842,7 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, // Set an arbitrary limit on the number of candidate functions we'll spam // the user with. FIXME: This limit should depend on details of the // candidate list. - if (CandsShown >= 4 && ShowOverloads == DiagnosticsEngine::Ovl_Best) { + if (CandsShown >= 4 && ShowOverloads == Ovl_Best) { break; } ++CandsShown; @@ -8979,7 +9005,7 @@ private: // function template specialization, which is added to the set of // overloaded functions considered. FunctionDecl *Specialization = 0; - TemplateDeductionInfo Info(Context, OvlExpr->getNameLoc()); + TemplateDeductionInfo Info(OvlExpr->getNameLoc()); if (Sema::TemplateDeductionResult Result = S.DeduceTemplateArguments(FunctionTemplate, &OvlExplicitTemplateArgs, @@ -9201,7 +9227,6 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, Fn = Resolver.getMatchingFunctionDecl(); assert(Fn); FoundResult = *Resolver.getMatchingFunctionAccessPair(); - MarkFunctionReferenced(AddressOfExpr->getLocStart(), Fn); if (Complain) CheckAddressOfMemberAccess(AddressOfExpr, FoundResult); } @@ -9257,7 +9282,7 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, // function template specialization, which is added to the set of // overloaded functions considered. FunctionDecl *Specialization = 0; - TemplateDeductionInfo Info(Context, ovl->getNameLoc()); + TemplateDeductionInfo Info(ovl->getNameLoc()); if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs, Specialization, Info)) { @@ -9457,8 +9482,7 @@ void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, AddArgumentDependentLookupCandidates(ULE->getName(), /*Operator*/ false, ULE->getExprLoc(), Args, ExplicitTemplateArgs, - CandidateSet, PartialOverloading, - ULE->isStdAssociatedNamespace()); + CandidateSet, PartialOverloading); } /// Attempt to recover from an ill-formed use of a non-dependent name in a @@ -9509,7 +9533,7 @@ DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc, // declaring the function there instead. Sema::AssociatedNamespaceSet AssociatedNamespaces; Sema::AssociatedClassSet AssociatedClasses; - SemaRef.FindAssociatedClassesAndNamespaces(Args, + SemaRef.FindAssociatedClassesAndNamespaces(FnLoc, Args, AssociatedNamespaces, AssociatedClasses); // Never suggest declaring a function within namespace 'std'. @@ -9632,6 +9656,20 @@ class NoTypoCorrectionCCC : public CorrectionCandidateCallback { return false; } }; + +class BuildRecoveryCallExprRAII { + Sema &SemaRef; +public: + BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S) { + assert(SemaRef.IsBuildingRecoveryCallExpr == false); + SemaRef.IsBuildingRecoveryCallExpr = true; + } + + ~BuildRecoveryCallExprRAII() { + SemaRef.IsBuildingRecoveryCallExpr = false; + } +}; + } /// Attempts to recover from a call where no functions were found. @@ -9644,6 +9682,15 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, llvm::MutableArrayRef Args, SourceLocation RParenLoc, bool EmptyLookup, bool AllowTypoCorrection) { + // Do not try to recover if it is already building a recovery call. + // This stops infinite loops for template instantiations like + // + // template auto foo(T t) -> decltype(foo(t)) {} + // template auto foo(T t) -> decltype(foo(&t)) {} + // + if (SemaRef.IsBuildingRecoveryCallExpr) + return ExprError(); + BuildRecoveryCallExprRAII RCE(SemaRef); CXXScopeSpec SS; SS.Adopt(ULE->getQualifierLoc()); @@ -9695,20 +9742,15 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, RParenLoc); } -/// ResolveOverloadedCallFn - Given the call expression that calls Fn -/// (which eventually refers to the declaration Func) and the call -/// arguments Args/NumArgs, attempt to resolve the function call down -/// to a specific function. If overload resolution succeeds, returns -/// the function declaration produced by overload -/// resolution. Otherwise, emits diagnostics, deletes all of the -/// arguments and Fn, and returns NULL. -ExprResult -Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, - SourceLocation LParenLoc, - Expr **Args, unsigned NumArgs, - SourceLocation RParenLoc, - Expr *ExecConfig, - bool AllowTypoCorrection) { +/// \brief Constructs and populates an OverloadedCandidateSet from +/// the given function. +/// \returns true when an the ExprResult output parameter has been set. +bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn, + UnresolvedLookupExpr *ULE, + Expr **Args, unsigned NumArgs, + SourceLocation RParenLoc, + OverloadCandidateSet *CandidateSet, + ExprResult *Result) { #ifndef NDEBUG if (ULE->requiresADL()) { // To do ADL, we must have found an unqualified name. @@ -9724,62 +9766,79 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, // We don't perform ADL in C. assert(getLangOpts().CPlusPlus && "ADL enabled in C"); - } else - assert(!ULE->isStdAssociatedNamespace() && - "std is associated namespace but not doing ADL"); + } #endif UnbridgedCastsSet UnbridgedCasts; - if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts)) - return ExprError(); - - OverloadCandidateSet CandidateSet(Fn->getExprLoc()); + if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts)) { + *Result = ExprError(); + return true; + } // Add the functions denoted by the callee to the set of candidate // functions, including those from argument-dependent lookup. AddOverloadedCallCandidates(ULE, llvm::makeArrayRef(Args, NumArgs), - CandidateSet); + *CandidateSet); // If we found nothing, try to recover. // BuildRecoveryCallExpr diagnoses the error itself, so we just bail // out if it fails. - if (CandidateSet.empty()) { + if (CandidateSet->empty()) { // In Microsoft mode, if we are inside a template class member function then // create a type dependent CallExpr. The goal is to postpone name lookup // to instantiation time to be able to search into type dependent base // classes. if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() && (isa(CurContext) || isa(CurContext))) { - CallExpr *CE = new (Context) CallExpr(Context, Fn, Args, NumArgs, - Context.DependentTy, VK_RValue, - RParenLoc); + CallExpr *CE = new (Context) CallExpr(Context, Fn, + llvm::makeArrayRef(Args, NumArgs), + Context.DependentTy, VK_RValue, + RParenLoc); CE->setTypeDependent(true); - return Owned(CE); + *Result = Owned(CE); + return true; } - return BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc, - llvm::MutableArrayRef(Args, NumArgs), - RParenLoc, /*EmptyLookup=*/true, - AllowTypoCorrection); + return false; } UnbridgedCasts.restore(); + return false; +} - OverloadCandidateSet::iterator Best; - switch (CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best)) { +/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns +/// the completed call expression. If overload resolution fails, emits +/// diagnostics and returns ExprError() +static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, + UnresolvedLookupExpr *ULE, + SourceLocation LParenLoc, + Expr **Args, unsigned NumArgs, + SourceLocation RParenLoc, + Expr *ExecConfig, + OverloadCandidateSet *CandidateSet, + OverloadCandidateSet::iterator *Best, + OverloadingResult OverloadResult, + bool AllowTypoCorrection) { + if (CandidateSet->empty()) + return BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, + llvm::MutableArrayRef(Args, NumArgs), + RParenLoc, /*EmptyLookup=*/true, + AllowTypoCorrection); + + switch (OverloadResult) { case OR_Success: { - FunctionDecl *FDecl = Best->Function; - MarkFunctionReferenced(Fn->getExprLoc(), FDecl); - CheckUnresolvedLookupAccess(ULE, Best->FoundDecl); - DiagnoseUseOfDecl(FDecl, ULE->getNameLoc()); - Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl); - return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, RParenLoc, - ExecConfig); + FunctionDecl *FDecl = (*Best)->Function; + SemaRef.MarkFunctionReferenced(Fn->getExprLoc(), FDecl); + SemaRef.CheckUnresolvedLookupAccess(ULE, (*Best)->FoundDecl); + SemaRef.DiagnoseUseOfDecl(FDecl, ULE->getNameLoc()); + Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl); + return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, + RParenLoc, ExecConfig); } case OR_No_Viable_Function: { // Try to recover by looking for viable functions which the user might // have meant to call. - ExprResult Recovery = BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc, + ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, llvm::MutableArrayRef(Args, NumArgs), RParenLoc, /*EmptyLookup=*/false, @@ -9787,44 +9846,73 @@ Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, if (!Recovery.isInvalid()) return Recovery; - Diag(Fn->getLocStart(), + SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_no_viable_function_in_call) << ULE->getName() << Fn->getSourceRange(); - CandidateSet.NoteCandidates(*this, OCD_AllCandidates, - llvm::makeArrayRef(Args, NumArgs)); + CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, + llvm::makeArrayRef(Args, NumArgs)); break; } case OR_Ambiguous: - Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call) + SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call) << ULE->getName() << Fn->getSourceRange(); - CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, - llvm::makeArrayRef(Args, NumArgs)); + CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates, + llvm::makeArrayRef(Args, NumArgs)); break; - case OR_Deleted: - { - Diag(Fn->getLocStart(), diag::err_ovl_deleted_call) - << Best->Function->isDeleted() - << ULE->getName() - << getDeletedOrUnavailableSuffix(Best->Function) - << Fn->getSourceRange(); - CandidateSet.NoteCandidates(*this, OCD_AllCandidates, - llvm::makeArrayRef(Args, NumArgs)); + case OR_Deleted: { + SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_deleted_call) + << (*Best)->Function->isDeleted() + << ULE->getName() + << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function) + << Fn->getSourceRange(); + CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, + llvm::makeArrayRef(Args, NumArgs)); - // We emitted an error for the unvailable/deleted function call but keep - // the call in the AST. - FunctionDecl *FDecl = Best->Function; - Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl); - return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, - RParenLoc, ExecConfig); - } + // We emitted an error for the unvailable/deleted function call but keep + // the call in the AST. + FunctionDecl *FDecl = (*Best)->Function; + Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl); + return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, + RParenLoc, ExecConfig); + } } // Overload resolution failed. return ExprError(); } +/// BuildOverloadedCallExpr - Given the call expression that calls Fn +/// (which eventually refers to the declaration Func) and the call +/// arguments Args/NumArgs, attempt to resolve the function call down +/// to a specific function. If overload resolution succeeds, returns +/// the call expression produced by overload resolution. +/// Otherwise, emits diagnostics and returns ExprError. +ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, + UnresolvedLookupExpr *ULE, + SourceLocation LParenLoc, + Expr **Args, unsigned NumArgs, + SourceLocation RParenLoc, + Expr *ExecConfig, + bool AllowTypoCorrection) { + OverloadCandidateSet CandidateSet(Fn->getExprLoc()); + ExprResult result; + + if (buildOverloadedCallSet(S, Fn, ULE, Args, NumArgs, LParenLoc, + &CandidateSet, &result)) + return result; + + OverloadCandidateSet::iterator Best; + OverloadingResult OverloadResult = + CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best); + + return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args, NumArgs, + RParenLoc, ExecConfig, &CandidateSet, + &Best, OverloadResult, + AllowTypoCorrection); +} + static bool IsOverloaded(const UnresolvedSetImpl &Functions) { return Functions.size() > 1 || (Functions.size() == 1 && isa(*Functions.begin())); @@ -9889,10 +9977,10 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn, /*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end()); return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn, - &Args[0], NumArgs, + llvm::makeArrayRef(Args, NumArgs), Context.DependentTy, VK_RValue, - OpLoc)); + OpLoc, false)); } // Build an empty overload set. @@ -9968,7 +10056,8 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn, Args[0] = Input; CallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(), - Args, NumArgs, ResultTy, VK, OpLoc); + llvm::makeArrayRef(Args, NumArgs), + ResultTy, VK, OpLoc, false); if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall, FnDecl)) @@ -10069,7 +10158,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, return Owned(new (Context) BinaryOperator(Args[0], Args[1], Opc, Context.DependentTy, VK_RValue, OK_Ordinary, - OpLoc)); + OpLoc, + FPFeatures.fp_contract)); return Owned(new (Context) CompoundAssignOperator(Args[0], Args[1], Opc, Context.DependentTy, @@ -10077,7 +10167,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, OK_Ordinary, Context.DependentTy, Context.DependentTy, - OpLoc)); + OpLoc, + FPFeatures.fp_contract)); } // FIXME: save results of ADL from here? @@ -10089,11 +10180,9 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, NestedNameSpecifierLoc(), OpNameInfo, /*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end()); - return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn, - Args, 2, - Context.DependentTy, - VK_RValue, - OpLoc)); + return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn, Args, + Context.DependentTy, VK_RValue, + OpLoc, FPFeatures.fp_contract)); } // Always do placeholder-like conversions on the RHS. @@ -10208,7 +10297,8 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(), - Args, 2, ResultTy, VK, OpLoc); + Args, ResultTy, VK, OpLoc, + FPFeatures.fp_contract); if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall, FnDecl)) @@ -10270,7 +10360,7 @@ Sema::CreateOverloadedBinOp(SourceLocation OpLoc, if (Result.isInvalid()) CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, BinaryOperator::getOpcodeStr(Opc), OpLoc); - return move(Result); + return Result; } case OR_Ambiguous: @@ -10337,10 +10427,10 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, // Can't add any actual overloads yet return Owned(new (Context) CXXOperatorCallExpr(Context, OO_Subscript, Fn, - Args, 2, + Args, Context.DependentTy, VK_RValue, - RLoc)); + RLoc, false)); } // Handle placeholders on both operands. @@ -10416,8 +10506,9 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, OO_Subscript, - FnExpr.take(), Args, 2, - ResultTy, VK, RLoc); + FnExpr.take(), Args, + ResultTy, VK, RLoc, + false); if (CheckCallReturnType(FnDecl->getResultType(), LLoc, TheCall, FnDecl)) @@ -10534,7 +10625,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE, } CXXMemberCallExpr *call - = new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs, + = new (Context) CXXMemberCallExpr(Context, MemExprE, + llvm::makeArrayRef(Args, NumArgs), resultType, valueKind, RParenLoc); if (CheckCallReturnType(proto->getResultType(), @@ -10676,7 +10768,8 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE, assert(Method && "Member call to something that isn't a method?"); CXXMemberCallExpr *TheCall = - new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs, + new (Context) CXXMemberCallExpr(Context, MemExprE, + llvm::makeArrayRef(Args, NumArgs), ResultType, VK, RParenLoc); // Check for a valid return type. @@ -10904,6 +10997,11 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj, // that calls this method, using Object for the implicit object // parameter and passing along the remaining arguments. CXXMethodDecl *Method = cast(Best->Function); + + // An error diagnostic has already been printed when parsing the declaration. + if (Method->isInvalidDecl()) + return ExprError(); + const FunctionProtoType *Proto = Method->getType()->getAs(); @@ -10942,8 +11040,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj, CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn.take(), - MethodArgs, NumArgs + 1, - ResultTy, VK, RParenLoc); + llvm::makeArrayRef(MethodArgs, NumArgs+1), + ResultTy, VK, RParenLoc, false); delete [] MethodArgs; if (CheckCallReturnType(Method->getResultType(), LParenLoc, TheCall, @@ -10966,7 +11064,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj, if (ObjRes.isInvalid()) IsError = true; else - Object = move(ObjRes); + Object = ObjRes; TheCall->setArg(0, Object.take()); // Check the argument types. @@ -11116,7 +11214,7 @@ Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) { ResultTy = ResultTy.getNonLValueExprType(Context); CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.take(), - &Base, 1, ResultTy, VK, OpLoc); + Base, ResultTy, VK, OpLoc, false); if (CheckCallReturnType(Method->getResultType(), OpLoc, TheCall, Method)) @@ -11187,7 +11285,8 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R, ResultTy = ResultTy.getNonLValueExprType(Context); UserDefinedLiteral *UDL = - new (Context) UserDefinedLiteral(Context, Fn.take(), ConvArgs, Args.size(), + new (Context) UserDefinedLiteral(Context, Fn.take(), + llvm::makeArrayRef(ConvArgs, Args.size()), ResultTy, VK, LitEndLoc, UDSuffixLoc); if (CheckCallReturnType(FD->getResultType(), UDSuffixLoc, UDL, FD)) @@ -11199,6 +11298,80 @@ ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R, return MaybeBindToTemporary(UDL); } +/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the +/// given LookupResult is non-empty, it is assumed to describe a member which +/// will be invoked. Otherwise, the function will be found via argument +/// dependent lookup. +/// CallExpr is set to a valid expression and FRS_Success returned on success, +/// otherwise CallExpr is set to ExprError() and some non-success value +/// is returned. +Sema::ForRangeStatus +Sema::BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, + SourceLocation RangeLoc, VarDecl *Decl, + BeginEndFunction BEF, + const DeclarationNameInfo &NameInfo, + LookupResult &MemberLookup, + OverloadCandidateSet *CandidateSet, + Expr *Range, ExprResult *CallExpr) { + CandidateSet->clear(); + if (!MemberLookup.empty()) { + ExprResult MemberRef = + BuildMemberReferenceExpr(Range, Range->getType(), Loc, + /*IsPtr=*/false, CXXScopeSpec(), + /*TemplateKWLoc=*/SourceLocation(), + /*FirstQualifierInScope=*/0, + MemberLookup, + /*TemplateArgs=*/0); + if (MemberRef.isInvalid()) { + *CallExpr = ExprError(); + Diag(Range->getLocStart(), diag::note_in_for_range) + << RangeLoc << BEF << Range->getType(); + return FRS_DiagnosticIssued; + } + *CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, MultiExprArg(), Loc, 0); + if (CallExpr->isInvalid()) { + *CallExpr = ExprError(); + Diag(Range->getLocStart(), diag::note_in_for_range) + << RangeLoc << BEF << Range->getType(); + return FRS_DiagnosticIssued; + } + } else { + UnresolvedSet<0> FoundNames; + UnresolvedLookupExpr *Fn = + UnresolvedLookupExpr::Create(Context, /*NamingClass=*/0, + NestedNameSpecifierLoc(), NameInfo, + /*NeedsADL=*/true, /*Overloaded=*/false, + FoundNames.begin(), FoundNames.end()); + + bool CandidateSetError = buildOverloadedCallSet(S, Fn, Fn, &Range, 1, Loc, + CandidateSet, CallExpr); + if (CandidateSet->empty() || CandidateSetError) { + *CallExpr = ExprError(); + return FRS_NoViableFunction; + } + OverloadCandidateSet::iterator Best; + OverloadingResult OverloadResult = + CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best); + + if (OverloadResult == OR_No_Viable_Function) { + *CallExpr = ExprError(); + return FRS_NoViableFunction; + } + *CallExpr = FinishOverloadedCallExpr(*this, S, Fn, Fn, Loc, &Range, 1, + Loc, 0, CandidateSet, &Best, + OverloadResult, + /*AllowTypoCorrection=*/false); + if (CallExpr->isInvalid() || OverloadResult != OR_Success) { + *CallExpr = ExprError(); + Diag(Range->getLocStart(), diag::note_in_for_range) + << RangeLoc << BEF << Range->getType(); + return FRS_DiagnosticIssued; + } + } + return FRS_Success; +} + + /// FixOverloadedFunctionReference - E is an expression that refers to /// a C++ overloaded function (possibly with some parentheses and /// perhaps a '&' around it). We have resolved the overloaded function @@ -11358,6 +11531,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found, TemplateArgs, type, valueKind, OK_Ordinary); ME->setHadMultipleCandidates(true); + MarkMemberReferenced(ME); return ME; } diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp index 722ac19..a8d75b2 100644 --- a/lib/Sema/SemaPseudoObject.cpp +++ b/lib/Sema/SemaPseudoObject.cpp @@ -31,6 +31,7 @@ //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" +#include "clang/Sema/ScopeInfo.h" #include "clang/Sema/Initialization.h" #include "clang/AST/ExprObjC.h" #include "clang/Lex/Preprocessor.h" @@ -91,9 +92,8 @@ namespace { return new (S.Context) GenericSelectionExpr(S.Context, gse->getGenericLoc(), gse->getControllingExpr(), - assocTypes.data(), - assocs.data(), - numAssocs, + assocTypes, + assocs, gse->getDefaultLoc(), gse->getRParenLoc(), gse->containsUnexpandedParameterPack(), @@ -187,7 +187,7 @@ namespace { UnaryOperatorKind opcode, Expr *op); - ExprResult complete(Expr *syntacticForm); + virtual ExprResult complete(Expr *syntacticForm); OpaqueValueExpr *capture(Expr *op); OpaqueValueExpr *captureValueAsResult(Expr *op); @@ -198,7 +198,14 @@ namespace { } /// Return true if assignments have a non-void result. - virtual bool assignmentsHaveResult() { return true; } + bool CanCaptureValueOfType(QualType ty) { + assert(!ty->isIncompleteType()); + assert(!ty->isDependentType()); + + if (const CXXRecordDecl *ClassDecl = ty->getAsCXXRecordDecl()) + return ClassDecl->isTriviallyCopyable(); + return true; + } virtual Expr *rebuildAndCaptureObject(Expr *) = 0; virtual ExprResult buildGet() = 0; @@ -206,7 +213,7 @@ namespace { bool captureSetValueAsResult) = 0; }; - /// A PseudoOpBuilder for Objective-C @properties. + /// A PseudoOpBuilder for Objective-C \@properties. class ObjCPropertyOpBuilder : public PseudoOpBuilder { ObjCPropertyRefExpr *RefExpr; ObjCPropertyRefExpr *SyntacticRefExpr; @@ -239,6 +246,9 @@ namespace { Expr *rebuildAndCaptureObject(Expr *syntacticBase); ExprResult buildGet(); ExprResult buildSet(Expr *op, SourceLocation, bool); + ExprResult complete(Expr *SyntacticForm); + + bool isWeakProperty() const; }; /// A PseudoOpBuilder for Objective-C array/dictionary indexing. @@ -292,7 +302,7 @@ OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) { /// operation. This routine is safe against expressions which may /// already be captured. /// -/// \param Returns the captured expression, which will be the +/// \returns the captured expression, which will be the /// same as the input if the input was already captured OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) { assert(ResultIndex == PseudoObjectExpr::NoResult); @@ -353,7 +363,7 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc, syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS, opcode, capturedRHS->getType(), capturedRHS->getValueKind(), - OK_Ordinary, opcLoc); + OK_Ordinary, opcLoc, false); } else { ExprResult opLHS = buildGet(); if (opLHS.isInvalid()) return ExprError(); @@ -372,12 +382,12 @@ PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc, OK_Ordinary, opLHS.get()->getType(), result.get()->getType(), - opcLoc); + opcLoc, false); } // The result of the assignment, if not void, is the value set into // the l-value. - result = buildSet(result.take(), opcLoc, assignmentsHaveResult()); + result = buildSet(result.take(), opcLoc, /*captureSetValueAsResult*/ true); if (result.isInvalid()) return ExprError(); addSemanticExpr(result.take()); @@ -401,7 +411,7 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc, QualType resultType = result.get()->getType(); // That's the postfix result. - if (UnaryOperator::isPostfix(opcode) && assignmentsHaveResult()) { + if (UnaryOperator::isPostfix(opcode) && CanCaptureValueOfType(resultType)) { result = capture(result.take()); setResultToLastSemantic(); } @@ -420,8 +430,7 @@ PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc, // Store that back into the result. The value stored is the result // of a prefix operation. - result = buildSet(result.take(), opcLoc, - UnaryOperator::isPrefix(opcode) && assignmentsHaveResult()); + result = buildSet(result.take(), opcLoc, UnaryOperator::isPrefix(opcode)); if (result.isInvalid()) return ExprError(); addSemanticExpr(result.take()); @@ -472,6 +481,23 @@ static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel, return S.LookupMethodInObjectType(sel, IT, false); } +bool ObjCPropertyOpBuilder::isWeakProperty() const { + QualType T; + if (RefExpr->isExplicitProperty()) { + const ObjCPropertyDecl *Prop = RefExpr->getExplicitProperty(); + if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak) + return true; + + T = Prop->getType(); + } else if (Getter) { + T = Getter->getResultType(); + } else { + return false; + } + + return T.getObjCLifetime() == Qualifiers::OCL_Weak; +} + bool ObjCPropertyOpBuilder::findGetter() { if (Getter) return true; @@ -532,7 +558,7 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) { // Do a normal method lookup first. if (ObjCMethodDecl *setter = LookupMethodInReceiverType(S, SetterSelector, RefExpr)) { - if (setter->isSynthesized() && warn) + if (setter->isPropertyAccessor() && warn) if (const ObjCInterfaceDecl *IFace = dyn_cast(setter->getDeclContext())) { const StringRef thisPropertyName(prop->getName()); @@ -617,7 +643,7 @@ ExprResult ObjCPropertyOpBuilder::buildGet() { /// Store to an Objective-C property reference. /// -/// \param bindSetValueAsResult - If true, capture the actual +/// \param captureSetValueAsResult If true, capture the actual /// value being set as the value of the property operation. ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc, bool captureSetValueAsResult) { @@ -676,7 +702,8 @@ ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc, ObjCMessageExpr *msgExpr = cast(msg.get()->IgnoreImplicit()); Expr *arg = msgExpr->getArg(0); - msgExpr->setArg(0, captureValueAsResult(arg)); + if (CanCaptureValueOfType(arg->getType())) + msgExpr->setArg(0, captureValueAsResult(arg)); } return msg; @@ -819,6 +846,19 @@ ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc, return PseudoOpBuilder::buildIncDecOperation(Sc, opcLoc, opcode, op); } +ExprResult ObjCPropertyOpBuilder::complete(Expr *SyntacticForm) { + if (S.getLangOpts().ObjCAutoRefCount && isWeakProperty()) { + DiagnosticsEngine::Level Level = + S.Diags.getDiagnosticLevel(diag::warn_arc_repeated_use_of_weak, + SyntacticForm->getLocStart()); + if (Level != DiagnosticsEngine::Ignored) + S.getCurFunction()->recordUseOfWeak(SyntacticRefExpr, + SyntacticRefExpr->isMessagingGetter()); + } + + return PseudoOpBuilder::complete(SyntacticForm); +} + // ObjCSubscript build stuff. // @@ -1035,7 +1075,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexGetter() { 0 /*TypeSourceInfo */, S.Context.getTranslationUnitDecl(), true /*Instance*/, false/*isVariadic*/, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); @@ -1151,7 +1191,7 @@ bool ObjCSubscriptOpBuilder::findAtIndexSetter() { ResultTInfo, S.Context.getTranslationUnitDecl(), true /*Instance*/, false/*isVariadic*/, - /*isSynthesized=*/false, + /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); @@ -1255,7 +1295,7 @@ ExprResult ObjCSubscriptOpBuilder::buildGet() { /// Store into the container the "op" object at "Index"'ed location /// by building this messaging expression: /// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index; -/// \param bindSetValueAsResult - If true, capture the actual +/// \param captureSetValueAsResult If true, capture the actual /// value being set as the value of the property operation. ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc, bool captureSetValueAsResult) { @@ -1279,7 +1319,8 @@ ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc, ObjCMessageExpr *msgExpr = cast(msg.get()->IgnoreImplicit()); Expr *arg = msgExpr->getArg(0); - msgExpr->setArg(0, captureValueAsResult(arg)); + if (CanCaptureValueOfType(arg->getType())) + msgExpr->setArg(0, captureValueAsResult(arg)); } return msg; @@ -1333,7 +1374,7 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc, // Do nothing if either argument is dependent. if (LHS->isTypeDependent() || RHS->isTypeDependent()) return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy, - VK_RValue, OK_Ordinary, opcLoc); + VK_RValue, OK_Ordinary, opcLoc, false); // Filter out non-overload placeholder types in the RHS. if (RHS->getType()->isNonOverloadPlaceholderType()) { @@ -1404,14 +1445,14 @@ Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) { cop->getObjectKind(), cop->getComputationLHSType(), cop->getComputationResultType(), - cop->getOperatorLoc()); + cop->getOperatorLoc(), false); } else if (BinaryOperator *bop = dyn_cast(syntax)) { Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS()); Expr *rhs = cast(bop->getRHS())->getSourceExpr(); return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(), bop->getType(), bop->getValueKind(), bop->getObjectKind(), - bop->getOperatorLoc()); + bop->getOperatorLoc(), false); } else { assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject)); return stripOpaqueValuesFromPseudoObjectRef(*this, syntax); diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp index 86884b7..f55174e 100644 --- a/lib/Sema/SemaStmt.cpp +++ b/lib/Sema/SemaStmt.cpp @@ -28,27 +28,10 @@ #include "clang/Lex/Preprocessor.h" #include "clang/Basic/TargetInfo.h" #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" -#include "llvm/ADT/Triple.h" -#include "llvm/MC/MCAsmInfo.h" -#include "llvm/MC/MCContext.h" -#include "llvm/MC/MCInst.h" -#include "llvm/MC/MCInstPrinter.h" -#include "llvm/MC/MCInstrInfo.h" -#include "llvm/MC/MCObjectFileInfo.h" -#include "llvm/MC/MCRegisterInfo.h" -#include "llvm/MC/MCStreamer.h" -#include "llvm/MC/MCSubtargetInfo.h" -#include "llvm/MC/MCTargetAsmParser.h" -#include "llvm/MC/MCParser/MCAsmLexer.h" -#include "llvm/MC/MCParser/MCAsmParser.h" -#include "llvm/Support/SourceMgr.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Support/TargetSelect.h" using namespace clang; using namespace sema; @@ -177,6 +160,13 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S) { !E->isUnusedResultAWarning(WarnExpr, Loc, R1, R2, Context)) return; + // If this is a GNU statement expression expanded from a macro, it is probably + // unused because it is a function-like macro that can be used as either an + // expression or statement. Don't warn, because it is almost certainly a + // false positive. + if (isa(E) && Loc.isMacroID()) + return; + // Okay, we have an unused result. Depending on what the base expression is, // we might want to make a more specific diagnostic. Check for one of these // cases now. @@ -271,7 +261,7 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R, MultiStmtArg elts, bool isStmtExpr) { unsigned NumElts = elts.size(); - Stmt **Elts = reinterpret_cast(elts.release()); + Stmt **Elts = elts.data(); // If we're in C89 mode, check that we don't have any decls after stmts. If // so, emit an extension diagnostic. if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) { @@ -381,8 +371,10 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, // Otherwise, things are good. Fill in the declaration and return it. LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt); TheDecl->setStmt(LS); - if (!TheDecl->isGnuLocal()) + if (!TheDecl->isGnuLocal()) { + TheDecl->setLocStart(IdentLoc); TheDecl->setLocation(IdentLoc); + } return Owned(LS); } @@ -1566,25 +1558,6 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc, ForLoc, RParenLoc)); } -namespace { - -enum BeginEndFunction { - BEF_begin, - BEF_end -}; - -/// Build a variable declaration for a for-range statement. -static VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc, - QualType Type, const char *Name) { - DeclContext *DC = SemaRef.CurContext; - IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); - TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); - VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, - TInfo, SC_Auto, SC_None); - Decl->setImplicit(); - return Decl; -} - /// Finish building a variable declaration for a for-range statement. /// \return true if an error occurs. static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init, @@ -1617,12 +1590,14 @@ static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init, return false; } +namespace { + /// Produce a note indicating which begin/end function was implicitly called -/// by a C++0x for-range statement. This is often not obvious from the code, +/// by a C++11 for-range statement. This is often not obvious from the code, /// nor from the diagnostics produced when analysing the implicit expressions /// required in a for-range statement. void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E, - BeginEndFunction BEF) { + Sema::BeginEndFunction BEF) { CallExpr *CE = dyn_cast(E); if (!CE) return; @@ -1643,56 +1618,16 @@ void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E, << BEF << IsTemplate << Description << E->getType(); } -/// Build a call to 'begin' or 'end' for a C++0x for-range statement. If the -/// given LookupResult is non-empty, it is assumed to describe a member which -/// will be invoked. Otherwise, the function will be found via argument -/// dependent lookup. -static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S, - SourceLocation Loc, - VarDecl *Decl, - BeginEndFunction BEF, - const DeclarationNameInfo &NameInfo, - LookupResult &MemberLookup, - Expr *Range) { - ExprResult CallExpr; - if (!MemberLookup.empty()) { - ExprResult MemberRef = - SemaRef.BuildMemberReferenceExpr(Range, Range->getType(), Loc, - /*IsPtr=*/false, CXXScopeSpec(), - /*TemplateKWLoc=*/SourceLocation(), - /*FirstQualifierInScope=*/0, - MemberLookup, - /*TemplateArgs=*/0); - if (MemberRef.isInvalid()) - return ExprError(); - CallExpr = SemaRef.ActOnCallExpr(S, MemberRef.get(), Loc, MultiExprArg(), - Loc, 0); - if (CallExpr.isInvalid()) - return ExprError(); - } else { - UnresolvedSet<0> FoundNames; - // C++0x [stmt.ranged]p1: For the purposes of this name lookup, namespace - // std is an associated namespace. - UnresolvedLookupExpr *Fn = - UnresolvedLookupExpr::Create(SemaRef.Context, /*NamingClass=*/0, - NestedNameSpecifierLoc(), NameInfo, - /*NeedsADL=*/true, /*Overloaded=*/false, - FoundNames.begin(), FoundNames.end(), - /*LookInStdNamespace=*/true); - CallExpr = SemaRef.BuildOverloadedCallExpr(S, Fn, Fn, Loc, &Range, 1, Loc, - 0, /*AllowTypoCorrection=*/false); - if (CallExpr.isInvalid()) { - SemaRef.Diag(Range->getLocStart(), diag::note_for_range_type) - << Range->getType(); - return ExprError(); - } - } - if (FinishForRangeVarDecl(SemaRef, Decl, CallExpr.get(), Loc, - diag::err_for_range_iter_deduction_failure)) { - NoteForRangeBeginEndFunction(SemaRef, CallExpr.get(), BEF); - return ExprError(); - } - return CallExpr; +/// Build a variable declaration for a for-range statement. +VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc, + QualType Type, const char *Name) { + DeclContext *DC = SemaRef.CurContext; + IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); + TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); + VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, + TInfo, SC_Auto, SC_None); + Decl->setImplicit(); + return Decl; } } @@ -1723,7 +1658,7 @@ static bool ObjCEnumerationCollection(Expr *Collection) { StmtResult Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *First, SourceLocation ColonLoc, Expr *Range, - SourceLocation RParenLoc) { + SourceLocation RParenLoc, BuildForRangeKind Kind) { if (!First || !Range) return StmtError(); @@ -1761,15 +1696,137 @@ Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc, return BuildCXXForRangeStmt(ForLoc, ColonLoc, RangeDecl.get(), /*BeginEndDecl=*/0, /*Cond=*/0, /*Inc=*/0, DS, - RParenLoc); + RParenLoc, Kind); } -/// BuildCXXForRangeStmt - Build or instantiate a C++0x for-range statement. +/// \brief Create the initialization, compare, and increment steps for +/// the range-based for loop expression. +/// This function does not handle array-based for loops, +/// which are created in Sema::BuildCXXForRangeStmt. +/// +/// \returns a ForRangeStatus indicating success or what kind of error occurred. +/// BeginExpr and EndExpr are set and FRS_Success is returned on success; +/// CandidateSet and BEF are set and some non-success value is returned on +/// failure. +static Sema::ForRangeStatus BuildNonArrayForRange(Sema &SemaRef, Scope *S, + Expr *BeginRange, Expr *EndRange, + QualType RangeType, + VarDecl *BeginVar, + VarDecl *EndVar, + SourceLocation ColonLoc, + OverloadCandidateSet *CandidateSet, + ExprResult *BeginExpr, + ExprResult *EndExpr, + Sema::BeginEndFunction *BEF) { + DeclarationNameInfo BeginNameInfo( + &SemaRef.PP.getIdentifierTable().get("begin"), ColonLoc); + DeclarationNameInfo EndNameInfo(&SemaRef.PP.getIdentifierTable().get("end"), + ColonLoc); + + LookupResult BeginMemberLookup(SemaRef, BeginNameInfo, + Sema::LookupMemberName); + LookupResult EndMemberLookup(SemaRef, EndNameInfo, Sema::LookupMemberName); + + if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) { + // - if _RangeT is a class type, the unqualified-ids begin and end are + // looked up in the scope of class _RangeT as if by class member access + // lookup (3.4.5), and if either (or both) finds at least one + // declaration, begin-expr and end-expr are __range.begin() and + // __range.end(), respectively; + SemaRef.LookupQualifiedName(BeginMemberLookup, D); + SemaRef.LookupQualifiedName(EndMemberLookup, D); + + if (BeginMemberLookup.empty() != EndMemberLookup.empty()) { + SourceLocation RangeLoc = BeginVar->getLocation(); + *BEF = BeginMemberLookup.empty() ? Sema::BEF_end : Sema::BEF_begin; + + SemaRef.Diag(RangeLoc, diag::err_for_range_member_begin_end_mismatch) + << RangeLoc << BeginRange->getType() << *BEF; + return Sema::FRS_DiagnosticIssued; + } + } else { + // - otherwise, begin-expr and end-expr are begin(__range) and + // end(__range), respectively, where begin and end are looked up with + // argument-dependent lookup (3.4.2). For the purposes of this name + // lookup, namespace std is an associated namespace. + + } + + *BEF = Sema::BEF_begin; + Sema::ForRangeStatus RangeStatus = + SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, BeginVar, + Sema::BEF_begin, BeginNameInfo, + BeginMemberLookup, CandidateSet, + BeginRange, BeginExpr); + + if (RangeStatus != Sema::FRS_Success) + return RangeStatus; + if (FinishForRangeVarDecl(SemaRef, BeginVar, BeginExpr->get(), ColonLoc, + diag::err_for_range_iter_deduction_failure)) { + NoteForRangeBeginEndFunction(SemaRef, BeginExpr->get(), *BEF); + return Sema::FRS_DiagnosticIssued; + } + + *BEF = Sema::BEF_end; + RangeStatus = + SemaRef.BuildForRangeBeginEndCall(S, ColonLoc, ColonLoc, EndVar, + Sema::BEF_end, EndNameInfo, + EndMemberLookup, CandidateSet, + EndRange, EndExpr); + if (RangeStatus != Sema::FRS_Success) + return RangeStatus; + if (FinishForRangeVarDecl(SemaRef, EndVar, EndExpr->get(), ColonLoc, + diag::err_for_range_iter_deduction_failure)) { + NoteForRangeBeginEndFunction(SemaRef, EndExpr->get(), *BEF); + return Sema::FRS_DiagnosticIssued; + } + return Sema::FRS_Success; +} + +/// Speculatively attempt to dereference an invalid range expression. +/// If the attempt fails, this function will return a valid, null StmtResult +/// and emit no diagnostics. +static StmtResult RebuildForRangeWithDereference(Sema &SemaRef, Scope *S, + SourceLocation ForLoc, + Stmt *LoopVarDecl, + SourceLocation ColonLoc, + Expr *Range, + SourceLocation RangeLoc, + SourceLocation RParenLoc) { + // Determine whether we can rebuild the for-range statement with a + // dereferenced range expression. + ExprResult AdjustedRange; + { + Sema::SFINAETrap Trap(SemaRef); + + AdjustedRange = SemaRef.BuildUnaryOp(S, RangeLoc, UO_Deref, Range); + if (AdjustedRange.isInvalid()) + return StmtResult(); + + StmtResult SR = + SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc, + AdjustedRange.get(), RParenLoc, + Sema::BFRK_Check); + if (SR.isInvalid()) + return StmtResult(); + } + + // The attempt to dereference worked well enough that it could produce a valid + // loop. Produce a fixit, and rebuild the loop with diagnostics enabled, in + // case there are any other (non-fatal) problems with it. + SemaRef.Diag(RangeLoc, diag::err_for_range_dereference) + << Range->getType() << FixItHint::CreateInsertion(RangeLoc, "*"); + return SemaRef.ActOnCXXForRangeStmt(ForLoc, LoopVarDecl, ColonLoc, + AdjustedRange.get(), RParenLoc, + Sema::BFRK_Rebuild); +} + +/// BuildCXXForRangeStmt - Build or instantiate a C++11 for-range statement. StmtResult Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEnd, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, - SourceLocation RParenLoc) { + SourceLocation RParenLoc, BuildForRangeKind Kind) { Scope *S = getCurScope(); DeclStmt *RangeDS = cast(RangeDecl); @@ -1855,50 +1912,43 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, return StmtError(); } } else { - DeclarationNameInfo BeginNameInfo(&PP.getIdentifierTable().get("begin"), - ColonLoc); - DeclarationNameInfo EndNameInfo(&PP.getIdentifierTable().get("end"), - ColonLoc); - - LookupResult BeginMemberLookup(*this, BeginNameInfo, LookupMemberName); - LookupResult EndMemberLookup(*this, EndNameInfo, LookupMemberName); - - if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) { - // - if _RangeT is a class type, the unqualified-ids begin and end are - // looked up in the scope of class _RangeT as if by class member access - // lookup (3.4.5), and if either (or both) finds at least one - // declaration, begin-expr and end-expr are __range.begin() and - // __range.end(), respectively; - LookupQualifiedName(BeginMemberLookup, D); - LookupQualifiedName(EndMemberLookup, D); - - if (BeginMemberLookup.empty() != EndMemberLookup.empty()) { - Diag(ColonLoc, diag::err_for_range_member_begin_end_mismatch) - << RangeType << BeginMemberLookup.empty(); - return StmtError(); - } - } else { - // - otherwise, begin-expr and end-expr are begin(__range) and - // end(__range), respectively, where begin and end are looked up with - // argument-dependent lookup (3.4.2). For the purposes of this name - // lookup, namespace std is an associated namespace. + OverloadCandidateSet CandidateSet(RangeLoc); + Sema::BeginEndFunction BEFFailure; + ForRangeStatus RangeStatus = + BuildNonArrayForRange(*this, S, BeginRangeRef.get(), + EndRangeRef.get(), RangeType, + BeginVar, EndVar, ColonLoc, &CandidateSet, + &BeginExpr, &EndExpr, &BEFFailure); + + // If building the range failed, try dereferencing the range expression + // unless a diagnostic was issued or the end function is problematic. + if (Kind == BFRK_Build && RangeStatus == FRS_NoViableFunction && + BEFFailure == BEF_begin) { + StmtResult SR = RebuildForRangeWithDereference(*this, S, ForLoc, + LoopVarDecl, ColonLoc, + Range, RangeLoc, + RParenLoc); + if (SR.isInvalid() || SR.isUsable()) + return SR; } - BeginExpr = BuildForRangeBeginEndCall(*this, S, ColonLoc, BeginVar, - BEF_begin, BeginNameInfo, - BeginMemberLookup, - BeginRangeRef.get()); - if (BeginExpr.isInvalid()) - return StmtError(); - - EndExpr = BuildForRangeBeginEndCall(*this, S, ColonLoc, EndVar, - BEF_end, EndNameInfo, - EndMemberLookup, EndRangeRef.get()); - if (EndExpr.isInvalid()) + // Otherwise, emit diagnostics if we haven't already. + if (RangeStatus == FRS_NoViableFunction) { + Expr *Range = BEFFailure ? EndRangeRef.get() : BeginRangeRef.get(); + Diag(Range->getLocStart(), diag::err_for_range_invalid) + << RangeLoc << Range->getType() << BEFFailure; + CandidateSet.NoteCandidates(*this, OCD_AllCandidates, + llvm::makeArrayRef(&Range, /*NumArgs=*/1)); + } + // Return an error if no fix was discovered. + if (RangeStatus != FRS_Success) return StmtError(); } - // C++0x [decl.spec.auto]p6: BeginType and EndType must be the same. + assert(!BeginExpr.isInvalid() && !EndExpr.isInvalid() && + "invalid range expression in for loop"); + + // C++11 [dcl.spec.auto]p7: BeginType and EndType must be the same. QualType BeginType = BeginVar->getType(), EndType = EndVar->getType(); if (!Context.hasSameType(BeginType, EndType)) { Diag(RangeLoc, diag::err_for_range_begin_end_types_differ) @@ -1930,6 +1980,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, NotEqExpr = ActOnBooleanCondition(S, ColonLoc, NotEqExpr.get()); NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get()); if (NotEqExpr.isInvalid()) { + Diag(RangeLoc, diag::note_for_range_invalid_iterator) + << RangeLoc << 0 << BeginRangeRef.get()->getType(); NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); if (!Context.hasSameType(BeginType, EndType)) NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end); @@ -1945,6 +1997,8 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get()); IncrExpr = ActOnFinishFullExpr(IncrExpr.get()); if (IncrExpr.isInvalid()) { + Diag(RangeLoc, diag::note_for_range_invalid_iterator) + << RangeLoc << 2 << BeginRangeRef.get()->getType() ; NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } @@ -1957,12 +2011,15 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get()); if (DerefExpr.isInvalid()) { + Diag(RangeLoc, diag::note_for_range_invalid_iterator) + << RangeLoc << 1 << BeginRangeRef.get()->getType(); NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin); return StmtError(); } - // Attach *__begin as initializer for VD. - if (!LoopVar->isInvalidDecl()) { + // Attach *__begin as initializer for VD. Don't touch it if we're just + // trying to determine whether this would be a valid range. + if (!LoopVar->isInvalidDecl() && Kind != BFRK_Check) { AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false, /*TypeMayContainAuto=*/true); if (LoopVar->isInvalidDecl()) @@ -1973,6 +2030,11 @@ Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, RangeVar->setUsed(); } + // Don't bother to actually allocate the result if we're just trying to + // determine whether it would be valid. + if (Kind == BFRK_Check) + return StmtResult(); + return Owned(new (Context) CXXForRangeStmt(RangeDS, cast_or_null(BeginEndDecl.get()), NotEqExpr.take(), IncrExpr.take(), @@ -2485,600 +2547,6 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) { return Owned(Result); } -/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently -/// ignore "noop" casts in places where an lvalue is required by an inline asm. -/// We emulate this behavior when -fheinous-gnu-extensions is specified, but -/// provide a strong guidance to not use it. -/// -/// This method checks to see if the argument is an acceptable l-value and -/// returns false if it is a case we can handle. -static bool CheckAsmLValue(const Expr *E, Sema &S) { - // Type dependent expressions will be checked during instantiation. - if (E->isTypeDependent()) - return false; - - if (E->isLValue()) - return false; // Cool, this is an lvalue. - - // Okay, this is not an lvalue, but perhaps it is the result of a cast that we - // are supposed to allow. - const Expr *E2 = E->IgnoreParenNoopCasts(S.Context); - if (E != E2 && E2->isLValue()) { - if (!S.getLangOpts().HeinousExtensions) - S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue) - << E->getSourceRange(); - else - S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue) - << E->getSourceRange(); - // Accept, even if we emitted an error diagnostic. - return false; - } - - // None of the above, just randomly invalid non-lvalue. - return true; -} - -/// isOperandMentioned - Return true if the specified operand # is mentioned -/// anywhere in the decomposed asm string. -static bool isOperandMentioned(unsigned OpNo, - ArrayRef AsmStrPieces) { - for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) { - const AsmStmt::AsmStringPiece &Piece = AsmStrPieces[p]; - if (!Piece.isOperand()) continue; - - // If this is a reference to the input and if the input was the smaller - // one, then we have to reject this asm. - if (Piece.getOperandNo() == OpNo) - return true; - } - return false; -} - -StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple, - bool IsVolatile, unsigned NumOutputs, - unsigned NumInputs, IdentifierInfo **Names, - MultiExprArg constraints, MultiExprArg exprs, - Expr *asmString, MultiExprArg clobbers, - SourceLocation RParenLoc, bool MSAsm) { - unsigned NumClobbers = clobbers.size(); - StringLiteral **Constraints = - reinterpret_cast(constraints.get()); - Expr **Exprs = exprs.get(); - StringLiteral *AsmString = cast(asmString); - StringLiteral **Clobbers = reinterpret_cast(clobbers.get()); - - SmallVector OutputConstraintInfos; - - // The parser verifies that there is a string literal here. - if (!AsmString->isAscii()) - return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character) - << AsmString->getSourceRange()); - - for (unsigned i = 0; i != NumOutputs; i++) { - StringLiteral *Literal = Constraints[i]; - if (!Literal->isAscii()) - return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character) - << Literal->getSourceRange()); - - StringRef OutputName; - if (Names[i]) - OutputName = Names[i]->getName(); - - TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName); - if (!Context.getTargetInfo().validateOutputConstraint(Info)) - return StmtError(Diag(Literal->getLocStart(), - diag::err_asm_invalid_output_constraint) - << Info.getConstraintStr()); - - // Check that the output exprs are valid lvalues. - Expr *OutputExpr = Exprs[i]; - if (CheckAsmLValue(OutputExpr, *this)) { - return StmtError(Diag(OutputExpr->getLocStart(), - diag::err_asm_invalid_lvalue_in_output) - << OutputExpr->getSourceRange()); - } - - OutputConstraintInfos.push_back(Info); - } - - SmallVector InputConstraintInfos; - - for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) { - StringLiteral *Literal = Constraints[i]; - if (!Literal->isAscii()) - return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character) - << Literal->getSourceRange()); - - StringRef InputName; - if (Names[i]) - InputName = Names[i]->getName(); - - TargetInfo::ConstraintInfo Info(Literal->getString(), InputName); - if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos.data(), - NumOutputs, Info)) { - return StmtError(Diag(Literal->getLocStart(), - diag::err_asm_invalid_input_constraint) - << Info.getConstraintStr()); - } - - Expr *InputExpr = Exprs[i]; - - // Only allow void types for memory constraints. - if (Info.allowsMemory() && !Info.allowsRegister()) { - if (CheckAsmLValue(InputExpr, *this)) - return StmtError(Diag(InputExpr->getLocStart(), - diag::err_asm_invalid_lvalue_in_input) - << Info.getConstraintStr() - << InputExpr->getSourceRange()); - } - - if (Info.allowsRegister()) { - if (InputExpr->getType()->isVoidType()) { - return StmtError(Diag(InputExpr->getLocStart(), - diag::err_asm_invalid_type_in_input) - << InputExpr->getType() << Info.getConstraintStr() - << InputExpr->getSourceRange()); - } - } - - ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]); - if (Result.isInvalid()) - return StmtError(); - - Exprs[i] = Result.take(); - InputConstraintInfos.push_back(Info); - } - - // Check that the clobbers are valid. - for (unsigned i = 0; i != NumClobbers; i++) { - StringLiteral *Literal = Clobbers[i]; - if (!Literal->isAscii()) - return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character) - << Literal->getSourceRange()); - - StringRef Clobber = Literal->getString(); - - if (!Context.getTargetInfo().isValidClobber(Clobber)) - return StmtError(Diag(Literal->getLocStart(), - diag::err_asm_unknown_register_name) << Clobber); - } - - AsmStmt *NS = - new (Context) AsmStmt(Context, AsmLoc, IsSimple, IsVolatile, MSAsm, - NumOutputs, NumInputs, Names, Constraints, Exprs, - AsmString, NumClobbers, Clobbers, RParenLoc); - // Validate the asm string, ensuring it makes sense given the operands we - // have. - SmallVector Pieces; - unsigned DiagOffs; - if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) { - Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID) - << AsmString->getSourceRange(); - return StmtError(); - } - - // Validate tied input operands for type mismatches. - for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) { - TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; - - // If this is a tied constraint, verify that the output and input have - // either exactly the same type, or that they are int/ptr operands with the - // same size (int/long, int*/long, are ok etc). - if (!Info.hasTiedOperand()) continue; - - unsigned TiedTo = Info.getTiedOperand(); - unsigned InputOpNo = i+NumOutputs; - Expr *OutputExpr = Exprs[TiedTo]; - Expr *InputExpr = Exprs[InputOpNo]; - - if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent()) - continue; - - QualType InTy = InputExpr->getType(); - QualType OutTy = OutputExpr->getType(); - if (Context.hasSameType(InTy, OutTy)) - continue; // All types can be tied to themselves. - - // Decide if the input and output are in the same domain (integer/ptr or - // floating point. - enum AsmDomain { - AD_Int, AD_FP, AD_Other - } InputDomain, OutputDomain; - - if (InTy->isIntegerType() || InTy->isPointerType()) - InputDomain = AD_Int; - else if (InTy->isRealFloatingType()) - InputDomain = AD_FP; - else - InputDomain = AD_Other; - - if (OutTy->isIntegerType() || OutTy->isPointerType()) - OutputDomain = AD_Int; - else if (OutTy->isRealFloatingType()) - OutputDomain = AD_FP; - else - OutputDomain = AD_Other; - - // They are ok if they are the same size and in the same domain. This - // allows tying things like: - // void* to int* - // void* to int if they are the same size. - // double to long double if they are the same size. - // - uint64_t OutSize = Context.getTypeSize(OutTy); - uint64_t InSize = Context.getTypeSize(InTy); - if (OutSize == InSize && InputDomain == OutputDomain && - InputDomain != AD_Other) - continue; - - // If the smaller input/output operand is not mentioned in the asm string, - // then we can promote the smaller one to a larger input and the asm string - // won't notice. - bool SmallerValueMentioned = false; - - // If this is a reference to the input and if the input was the smaller - // one, then we have to reject this asm. - if (isOperandMentioned(InputOpNo, Pieces)) { - // This is a use in the asm string of the smaller operand. Since we - // codegen this by promoting to a wider value, the asm will get printed - // "wrong". - SmallerValueMentioned |= InSize < OutSize; - } - if (isOperandMentioned(TiedTo, Pieces)) { - // If this is a reference to the output, and if the output is the larger - // value, then it's ok because we'll promote the input to the larger type. - SmallerValueMentioned |= OutSize < InSize; - } - - // If the smaller value wasn't mentioned in the asm string, and if the - // output was a register, just extend the shorter one to the size of the - // larger one. - if (!SmallerValueMentioned && InputDomain != AD_Other && - OutputConstraintInfos[TiedTo].allowsRegister()) - continue; - - // Either both of the operands were mentioned or the smaller one was - // mentioned. One more special case that we'll allow: if the tied input is - // integer, unmentioned, and is a constant, then we'll allow truncating it - // down to the size of the destination. - if (InputDomain == AD_Int && OutputDomain == AD_Int && - !isOperandMentioned(InputOpNo, Pieces) && - InputExpr->isEvaluatable(Context)) { - CastKind castKind = - (OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast); - InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).take(); - Exprs[InputOpNo] = InputExpr; - NS->setInputExpr(i, InputExpr); - continue; - } - - Diag(InputExpr->getLocStart(), - diag::err_asm_tying_incompatible_types) - << InTy << OutTy << OutputExpr->getSourceRange() - << InputExpr->getSourceRange(); - return StmtError(); - } - - return Owned(NS); -} - -// isMSAsmKeyword - Return true if this is an MS-style inline asm keyword. These -// require special handling. -static bool isMSAsmKeyword(StringRef Name) { - bool Ret = llvm::StringSwitch(Name) - .Cases("EVEN", "ALIGN", true) // Alignment directives. - .Cases("LENGTH", "SIZE", "TYPE", true) // Type and variable sizes. - .Case("_emit", true) // _emit Pseudoinstruction. - .Default(false); - return Ret; -} - -static StringRef getSpelling(Sema &SemaRef, Token AsmTok) { - StringRef Asm; - SmallString<512> TokenBuf; - TokenBuf.resize(512); - bool StringInvalid = false; - Asm = SemaRef.PP.getSpelling(AsmTok, TokenBuf, &StringInvalid); - assert (!StringInvalid && "Expected valid string!"); - return Asm; -} - -static void patchMSAsmStrings(Sema &SemaRef, bool &IsSimple, - SourceLocation AsmLoc, - ArrayRef AsmToks, - const TargetInfo &TI, - std::vector &AsmRegs, - std::vector &AsmNames, - std::vector &AsmStrings) { - assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); - - // Assume simple asm stmt until we parse a non-register identifer (or we just - // need to bail gracefully). - IsSimple = true; - - SmallString<512> Asm; - unsigned NumAsmStrings = 0; - for (unsigned i = 0, e = AsmToks.size(); i != e; ++i) { - - // Determine if this should be considered a new asm. - bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() || - AsmToks[i].is(tok::kw_asm); - - // Emit the previous asm string. - if (i && isNewAsm) { - AsmStrings[NumAsmStrings++] = Asm.c_str(); - if (AsmToks[i].is(tok::kw_asm)) { - ++i; // Skip __asm - assert (i != e && "Expected another token."); - } - } - - // Start a new asm string with the opcode. - if (isNewAsm) { - AsmRegs[NumAsmStrings].resize(AsmToks.size()); - AsmNames[NumAsmStrings].resize(AsmToks.size()); - - StringRef Piece = AsmToks[i].getIdentifierInfo()->getName(); - // MS-style inline asm keywords require special handling. - if (isMSAsmKeyword(Piece)) - IsSimple = false; - - // TODO: Verify this is a valid opcode. - Asm = Piece; - continue; - } - - if (i && AsmToks[i].hasLeadingSpace()) - Asm += ' '; - - // Check the operand(s). - switch (AsmToks[i].getKind()) { - default: - IsSimple = false; - Asm += getSpelling(SemaRef, AsmToks[i]); - break; - case tok::comma: Asm += ","; break; - case tok::colon: Asm += ":"; break; - case tok::l_square: Asm += "["; break; - case tok::r_square: Asm += "]"; break; - case tok::l_brace: Asm += "{"; break; - case tok::r_brace: Asm += "}"; break; - case tok::numeric_constant: - Asm += getSpelling(SemaRef, AsmToks[i]); - break; - case tok::identifier: { - IdentifierInfo *II = AsmToks[i].getIdentifierInfo(); - StringRef Name = II->getName(); - - // Valid register? - if (TI.isValidGCCRegisterName(Name)) { - AsmRegs[NumAsmStrings].set(i); - Asm += Name; - break; - } - - IsSimple = false; - - // MS-style inline asm keywords require special handling. - if (isMSAsmKeyword(Name)) { - IsSimple = false; - Asm += Name; - break; - } - - // FIXME: Why are we missing this segment register? - if (Name == "fs") { - Asm += Name; - break; - } - - // Lookup the identifier. - // TODO: Someone with more experience with clang should verify this the - // proper way of doing a symbol lookup. - DeclarationName DeclName(II); - Scope *CurScope = SemaRef.getCurScope(); - LookupResult R(SemaRef, DeclName, AsmLoc, Sema::LookupOrdinaryName); - if (!SemaRef.LookupName(R, CurScope, false/*AllowBuiltinCreation*/)) - break; - - assert (R.isSingleResult() && "Expected a single result?!"); - NamedDecl *Decl = R.getFoundDecl(); - switch (Decl->getKind()) { - default: - assert(0 && "Unknown decl kind."); - break; - case Decl::Var: { - case Decl::ParmVar: - AsmNames[NumAsmStrings].set(i); - - VarDecl *Var = cast(Decl); - QualType Ty = Var->getType(); - (void)Ty; // Avoid warning. - // TODO: Patch identifier with valid operand. One potential idea is to - // probe the backend with type information to guess the possible - // operand. - break; - } - } - break; - } - } - } - - // Emit the final (and possibly only) asm string. - AsmStrings[NumAsmStrings] = Asm.c_str(); -} - -// Build the unmodified MSAsmString. -static std::string buildMSAsmString(Sema &SemaRef, - ArrayRef AsmToks, - unsigned &NumAsmStrings) { - assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); - NumAsmStrings = 0; - - SmallString<512> Asm; - for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) { - bool isNewAsm = i == 0 || AsmToks[i].isAtStartOfLine() || - AsmToks[i].is(tok::kw_asm); - - if (isNewAsm) { - ++NumAsmStrings; - if (i) - Asm += '\n'; - if (AsmToks[i].is(tok::kw_asm)) { - i++; // Skip __asm - assert (i != e && "Expected another token"); - } - } - - if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm) - Asm += ' '; - - Asm += getSpelling(SemaRef, AsmToks[i]); - } - return Asm.c_str(); -} - -StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, - SourceLocation LBraceLoc, - ArrayRef AsmToks, - SourceLocation EndLoc) { - // MS-style inline assembly is not fully supported, so emit a warning. - Diag(AsmLoc, diag::warn_unsupported_msasm); - SmallVector Clobbers; - std::set ClobberRegs; - SmallVector Inputs; - SmallVector Outputs; - - // Empty asm statements don't need to instantiate the AsmParser, etc. - if (AsmToks.empty()) { - StringRef AsmString; - MSAsmStmt *NS = - new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true, - /*IsVolatile*/ true, AsmToks, Inputs, Outputs, - AsmString, Clobbers, EndLoc); - return Owned(NS); - } - - unsigned NumAsmStrings; - std::string AsmString = buildMSAsmString(*this, AsmToks, NumAsmStrings); - - bool IsSimple; - std::vector Regs; - std::vector Names; - std::vector PatchedAsmStrings; - - Regs.resize(NumAsmStrings); - Names.resize(NumAsmStrings); - PatchedAsmStrings.resize(NumAsmStrings); - - // Rewrite operands to appease the AsmParser. - patchMSAsmStrings(*this, IsSimple, AsmLoc, AsmToks, - Context.getTargetInfo(), Regs, Names, PatchedAsmStrings); - - // patchMSAsmStrings doesn't correctly patch non-simple asm statements. - if (!IsSimple) { - MSAsmStmt *NS = - new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true, - /*IsVolatile*/ true, AsmToks, Inputs, Outputs, - AsmString, Clobbers, EndLoc); - return Owned(NS); - } - - // Initialize targets and assembly printers/parsers. - llvm::InitializeAllTargetInfos(); - llvm::InitializeAllTargetMCs(); - llvm::InitializeAllAsmParsers(); - - // Get the target specific parser. - std::string Error; - const std::string &TT = Context.getTargetInfo().getTriple().getTriple(); - const llvm::Target *TheTarget(llvm::TargetRegistry::lookupTarget(TT, Error)); - - OwningPtr MAI(TheTarget->createMCAsmInfo(TT)); - OwningPtr MRI(TheTarget->createMCRegInfo(TT)); - OwningPtr MOFI(new llvm::MCObjectFileInfo()); - OwningPtr - STI(TheTarget->createMCSubtargetInfo(TT, "", "")); - - for (unsigned i = 0, e = PatchedAsmStrings.size(); i != e; ++i) { - llvm::SourceMgr SrcMgr; - llvm::MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr); - llvm::MemoryBuffer *Buffer = - llvm::MemoryBuffer::getMemBuffer(PatchedAsmStrings[i], ""); - - // Tell SrcMgr about this buffer, which is what the parser will pick up. - SrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc()); - - OwningPtr Str(createNullStreamer(Ctx)); - OwningPtr - Parser(createMCAsmParser(SrcMgr, Ctx, *Str.get(), *MAI)); - OwningPtr - TargetParser(TheTarget->createMCAsmParser(*STI, *Parser)); - // Change to the Intel dialect. - Parser->setAssemblerDialect(1); - Parser->setTargetParser(*TargetParser.get()); - - // Prime the lexer. - Parser->Lex(); - - // Parse the opcode. - StringRef IDVal; - Parser->ParseIdentifier(IDVal); - - // Canonicalize the opcode to lower case. - SmallString<128> Opcode; - for (unsigned i = 0, e = IDVal.size(); i != e; ++i) - Opcode.push_back(tolower(IDVal[i])); - - // Parse the operands. - llvm::SMLoc IDLoc; - SmallVector Operands; - bool HadError = TargetParser->ParseInstruction(Opcode.str(), IDLoc, - Operands); - assert (!HadError && "Unexpected error parsing instruction"); - - // Match the MCInstr. - SmallVector Instrs; - HadError = TargetParser->MatchInstruction(IDLoc, Operands, Instrs); - assert (!HadError && "Unexpected error matching instruction"); - assert ((Instrs.size() == 1) && "Expected only a single instruction."); - - // Get the instruction descriptor. - llvm::MCInst Inst = Instrs[0]; - const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo(); - const llvm::MCInstrDesc &Desc = MII->get(Inst.getOpcode()); - llvm::MCInstPrinter *IP = - TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI); - - // Build the list of clobbers. - for (unsigned i = 0, e = Desc.getNumDefs(); i != e; ++i) { - const llvm::MCOperand &Op = Inst.getOperand(i); - if (!Op.isReg()) - continue; - - std::string Reg; - llvm::raw_string_ostream OS(Reg); - IP->printRegName(OS, Op.getReg()); - - StringRef Clobber(OS.str()); - if (!Context.getTargetInfo().isValidClobber(Clobber)) - return StmtError(Diag(AsmLoc, diag::err_asm_unknown_register_name) << - Clobber); - ClobberRegs.insert(Reg); - } - } - for (std::set::iterator I = ClobberRegs.begin(), - E = ClobberRegs.end(); I != E; ++I) - Clobbers.push_back(*I); - - MSAsmStmt *NS = - new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple, - /*IsVolatile*/ true, AsmToks, Inputs, Outputs, - AsmString, Clobbers, EndLoc); - return Owned(NS); -} - StmtResult Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, @@ -3104,7 +2572,7 @@ Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, getCurFunction()->setHasBranchProtectedScope(); unsigned NumCatchStmts = CatchStmts.size(); return Owned(ObjCAtTryStmt::Create(Context, AtLoc, Try, - CatchStmts.release(), + CatchStmts.data(), NumCatchStmts, Finally)); } @@ -3239,7 +2707,7 @@ Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, unsigned NumHandlers = RawHandlers.size(); assert(NumHandlers > 0 && "The parser shouldn't call this if there are no handlers."); - Stmt **Handlers = RawHandlers.get(); + Stmt **Handlers = RawHandlers.data(); SmallVector TypesWithHandlers; diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp new file mode 100644 index 0000000..7c2c766 --- /dev/null +++ b/lib/Sema/SemaStmtAsm.cpp @@ -0,0 +1,661 @@ +//===--- SemaStmtAsm.cpp - Semantic Analysis for Asm Statements -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements semantic analysis for inline asm statements. +// +//===----------------------------------------------------------------------===// + +#include "clang/Sema/SemaInternal.h" +#include "clang/Sema/Scope.h" +#include "clang/Sema/ScopeInfo.h" +#include "clang/Sema/Initialization.h" +#include "clang/Sema/Lookup.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/TypeLoc.h" +#include "clang/Lex/Preprocessor.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCObjectFileInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCTargetAsmParser.h" +#include "llvm/MC/MCParser/MCAsmParser.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/TargetSelect.h" +using namespace clang; +using namespace sema; + +/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently +/// ignore "noop" casts in places where an lvalue is required by an inline asm. +/// We emulate this behavior when -fheinous-gnu-extensions is specified, but +/// provide a strong guidance to not use it. +/// +/// This method checks to see if the argument is an acceptable l-value and +/// returns false if it is a case we can handle. +static bool CheckAsmLValue(const Expr *E, Sema &S) { + // Type dependent expressions will be checked during instantiation. + if (E->isTypeDependent()) + return false; + + if (E->isLValue()) + return false; // Cool, this is an lvalue. + + // Okay, this is not an lvalue, but perhaps it is the result of a cast that we + // are supposed to allow. + const Expr *E2 = E->IgnoreParenNoopCasts(S.Context); + if (E != E2 && E2->isLValue()) { + if (!S.getLangOpts().HeinousExtensions) + S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue) + << E->getSourceRange(); + else + S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue) + << E->getSourceRange(); + // Accept, even if we emitted an error diagnostic. + return false; + } + + // None of the above, just randomly invalid non-lvalue. + return true; +} + +/// isOperandMentioned - Return true if the specified operand # is mentioned +/// anywhere in the decomposed asm string. +static bool isOperandMentioned(unsigned OpNo, + ArrayRef AsmStrPieces) { + for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) { + const GCCAsmStmt::AsmStringPiece &Piece = AsmStrPieces[p]; + if (!Piece.isOperand()) continue; + + // If this is a reference to the input and if the input was the smaller + // one, then we have to reject this asm. + if (Piece.getOperandNo() == OpNo) + return true; + } + return false; +} + +StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, + bool IsVolatile, unsigned NumOutputs, + unsigned NumInputs, IdentifierInfo **Names, + MultiExprArg constraints, MultiExprArg exprs, + Expr *asmString, MultiExprArg clobbers, + SourceLocation RParenLoc) { + unsigned NumClobbers = clobbers.size(); + StringLiteral **Constraints = + reinterpret_cast(constraints.data()); + Expr **Exprs = exprs.data(); + StringLiteral *AsmString = cast(asmString); + StringLiteral **Clobbers = reinterpret_cast(clobbers.data()); + + SmallVector OutputConstraintInfos; + + // The parser verifies that there is a string literal here. + if (!AsmString->isAscii()) + return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character) + << AsmString->getSourceRange()); + + for (unsigned i = 0; i != NumOutputs; i++) { + StringLiteral *Literal = Constraints[i]; + if (!Literal->isAscii()) + return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character) + << Literal->getSourceRange()); + + StringRef OutputName; + if (Names[i]) + OutputName = Names[i]->getName(); + + TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName); + if (!Context.getTargetInfo().validateOutputConstraint(Info)) + return StmtError(Diag(Literal->getLocStart(), + diag::err_asm_invalid_output_constraint) + << Info.getConstraintStr()); + + // Check that the output exprs are valid lvalues. + Expr *OutputExpr = Exprs[i]; + if (CheckAsmLValue(OutputExpr, *this)) { + return StmtError(Diag(OutputExpr->getLocStart(), + diag::err_asm_invalid_lvalue_in_output) + << OutputExpr->getSourceRange()); + } + + OutputConstraintInfos.push_back(Info); + } + + SmallVector InputConstraintInfos; + + for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) { + StringLiteral *Literal = Constraints[i]; + if (!Literal->isAscii()) + return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character) + << Literal->getSourceRange()); + + StringRef InputName; + if (Names[i]) + InputName = Names[i]->getName(); + + TargetInfo::ConstraintInfo Info(Literal->getString(), InputName); + if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos.data(), + NumOutputs, Info)) { + return StmtError(Diag(Literal->getLocStart(), + diag::err_asm_invalid_input_constraint) + << Info.getConstraintStr()); + } + + Expr *InputExpr = Exprs[i]; + + // Only allow void types for memory constraints. + if (Info.allowsMemory() && !Info.allowsRegister()) { + if (CheckAsmLValue(InputExpr, *this)) + return StmtError(Diag(InputExpr->getLocStart(), + diag::err_asm_invalid_lvalue_in_input) + << Info.getConstraintStr() + << InputExpr->getSourceRange()); + } + + if (Info.allowsRegister()) { + if (InputExpr->getType()->isVoidType()) { + return StmtError(Diag(InputExpr->getLocStart(), + diag::err_asm_invalid_type_in_input) + << InputExpr->getType() << Info.getConstraintStr() + << InputExpr->getSourceRange()); + } + } + + ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]); + if (Result.isInvalid()) + return StmtError(); + + Exprs[i] = Result.take(); + InputConstraintInfos.push_back(Info); + } + + // Check that the clobbers are valid. + for (unsigned i = 0; i != NumClobbers; i++) { + StringLiteral *Literal = Clobbers[i]; + if (!Literal->isAscii()) + return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character) + << Literal->getSourceRange()); + + StringRef Clobber = Literal->getString(); + + if (!Context.getTargetInfo().isValidClobber(Clobber)) + return StmtError(Diag(Literal->getLocStart(), + diag::err_asm_unknown_register_name) << Clobber); + } + + GCCAsmStmt *NS = + new (Context) GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs, + NumInputs, Names, Constraints, Exprs, AsmString, + NumClobbers, Clobbers, RParenLoc); + // Validate the asm string, ensuring it makes sense given the operands we + // have. + SmallVector Pieces; + unsigned DiagOffs; + if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) { + Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID) + << AsmString->getSourceRange(); + return StmtError(); + } + + // Validate constraints and modifiers. + for (unsigned i = 0, e = Pieces.size(); i != e; ++i) { + GCCAsmStmt::AsmStringPiece &Piece = Pieces[i]; + if (!Piece.isOperand()) continue; + + // Look for the correct constraint index. + unsigned Idx = 0; + unsigned ConstraintIdx = 0; + for (unsigned i = 0, e = NS->getNumOutputs(); i != e; ++i, ++ConstraintIdx) { + TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; + if (Idx == Piece.getOperandNo()) + break; + ++Idx; + + if (Info.isReadWrite()) { + if (Idx == Piece.getOperandNo()) + break; + ++Idx; + } + } + + for (unsigned i = 0, e = NS->getNumInputs(); i != e; ++i, ++ConstraintIdx) { + TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; + if (Idx == Piece.getOperandNo()) + break; + ++Idx; + + if (Info.isReadWrite()) { + if (Idx == Piece.getOperandNo()) + break; + ++Idx; + } + } + + // Now that we have the right indexes go ahead and check. + StringLiteral *Literal = Constraints[ConstraintIdx]; + const Type *Ty = Exprs[ConstraintIdx]->getType().getTypePtr(); + if (Ty->isDependentType() || Ty->isIncompleteType()) + continue; + + unsigned Size = Context.getTypeSize(Ty); + if (!Context.getTargetInfo() + .validateConstraintModifier(Literal->getString(), Piece.getModifier(), + Size)) + Diag(Exprs[ConstraintIdx]->getLocStart(), + diag::warn_asm_mismatched_size_modifier); + } + + // Validate tied input operands for type mismatches. + for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) { + TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; + + // If this is a tied constraint, verify that the output and input have + // either exactly the same type, or that they are int/ptr operands with the + // same size (int/long, int*/long, are ok etc). + if (!Info.hasTiedOperand()) continue; + + unsigned TiedTo = Info.getTiedOperand(); + unsigned InputOpNo = i+NumOutputs; + Expr *OutputExpr = Exprs[TiedTo]; + Expr *InputExpr = Exprs[InputOpNo]; + + if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent()) + continue; + + QualType InTy = InputExpr->getType(); + QualType OutTy = OutputExpr->getType(); + if (Context.hasSameType(InTy, OutTy)) + continue; // All types can be tied to themselves. + + // Decide if the input and output are in the same domain (integer/ptr or + // floating point. + enum AsmDomain { + AD_Int, AD_FP, AD_Other + } InputDomain, OutputDomain; + + if (InTy->isIntegerType() || InTy->isPointerType()) + InputDomain = AD_Int; + else if (InTy->isRealFloatingType()) + InputDomain = AD_FP; + else + InputDomain = AD_Other; + + if (OutTy->isIntegerType() || OutTy->isPointerType()) + OutputDomain = AD_Int; + else if (OutTy->isRealFloatingType()) + OutputDomain = AD_FP; + else + OutputDomain = AD_Other; + + // They are ok if they are the same size and in the same domain. This + // allows tying things like: + // void* to int* + // void* to int if they are the same size. + // double to long double if they are the same size. + // + uint64_t OutSize = Context.getTypeSize(OutTy); + uint64_t InSize = Context.getTypeSize(InTy); + if (OutSize == InSize && InputDomain == OutputDomain && + InputDomain != AD_Other) + continue; + + // If the smaller input/output operand is not mentioned in the asm string, + // then we can promote the smaller one to a larger input and the asm string + // won't notice. + bool SmallerValueMentioned = false; + + // If this is a reference to the input and if the input was the smaller + // one, then we have to reject this asm. + if (isOperandMentioned(InputOpNo, Pieces)) { + // This is a use in the asm string of the smaller operand. Since we + // codegen this by promoting to a wider value, the asm will get printed + // "wrong". + SmallerValueMentioned |= InSize < OutSize; + } + if (isOperandMentioned(TiedTo, Pieces)) { + // If this is a reference to the output, and if the output is the larger + // value, then it's ok because we'll promote the input to the larger type. + SmallerValueMentioned |= OutSize < InSize; + } + + // If the smaller value wasn't mentioned in the asm string, and if the + // output was a register, just extend the shorter one to the size of the + // larger one. + if (!SmallerValueMentioned && InputDomain != AD_Other && + OutputConstraintInfos[TiedTo].allowsRegister()) + continue; + + // Either both of the operands were mentioned or the smaller one was + // mentioned. One more special case that we'll allow: if the tied input is + // integer, unmentioned, and is a constant, then we'll allow truncating it + // down to the size of the destination. + if (InputDomain == AD_Int && OutputDomain == AD_Int && + !isOperandMentioned(InputOpNo, Pieces) && + InputExpr->isEvaluatable(Context)) { + CastKind castKind = + (OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast); + InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).take(); + Exprs[InputOpNo] = InputExpr; + NS->setInputExpr(i, InputExpr); + continue; + } + + Diag(InputExpr->getLocStart(), + diag::err_asm_tying_incompatible_types) + << InTy << OutTy << OutputExpr->getSourceRange() + << InputExpr->getSourceRange(); + return StmtError(); + } + + return Owned(NS); +} + +// getSpelling - Get the spelling of the AsmTok token. +static StringRef getSpelling(Sema &SemaRef, Token AsmTok) { + StringRef Asm; + SmallString<512> TokenBuf; + TokenBuf.resize(512); + bool StringInvalid = false; + Asm = SemaRef.PP.getSpelling(AsmTok, TokenBuf, &StringInvalid); + assert (!StringInvalid && "Expected valid string!"); + return Asm; +} + +// Build the inline assembly string. Returns true on error. +static bool buildMSAsmString(Sema &SemaRef, + SourceLocation AsmLoc, + ArrayRef AsmToks, + llvm::SmallVectorImpl &TokOffsets, + std::string &AsmString) { + assert (!AsmToks.empty() && "Didn't expect an empty AsmToks!"); + + SmallString<512> Asm; + for (unsigned i = 0, e = AsmToks.size(); i < e; ++i) { + bool isNewAsm = ((i == 0) || + AsmToks[i].isAtStartOfLine() || + AsmToks[i].is(tok::kw_asm)); + if (isNewAsm) { + if (i != 0) + Asm += "\n\t"; + + if (AsmToks[i].is(tok::kw_asm)) { + i++; // Skip __asm + if (i == e) { + SemaRef.Diag(AsmLoc, diag::err_asm_empty); + return true; + } + + } + } + + if (i && AsmToks[i].hasLeadingSpace() && !isNewAsm) + Asm += ' '; + + StringRef Spelling = getSpelling(SemaRef, AsmToks[i]); + Asm += Spelling; + TokOffsets.push_back(Asm.size()); + } + AsmString = Asm.str(); + return false; +} + +namespace { + +class MCAsmParserSemaCallbackImpl : public llvm::MCAsmParserSemaCallback { + Sema &SemaRef; + SourceLocation AsmLoc; + ArrayRef AsmToks; + ArrayRef TokOffsets; + +public: + MCAsmParserSemaCallbackImpl(Sema &Ref, SourceLocation Loc, + ArrayRef Toks, + ArrayRef Offsets) + : SemaRef(Ref), AsmLoc(Loc), AsmToks(Toks), TokOffsets(Offsets) { } + ~MCAsmParserSemaCallbackImpl() {} + + void *LookupInlineAsmIdentifier(StringRef Name, void *SrcLoc, unsigned &Size){ + SourceLocation Loc = SourceLocation::getFromPtrEncoding(SrcLoc); + NamedDecl *OpDecl = SemaRef.LookupInlineAsmIdentifier(Name, Loc, Size); + return static_cast(OpDecl); + } + + bool LookupInlineAsmField(StringRef Base, StringRef Member, + unsigned &Offset) { + return SemaRef.LookupInlineAsmField(Base, Member, Offset, AsmLoc); + } + + static void MSAsmDiagHandlerCallback(const llvm::SMDiagnostic &D, + void *Context) { + ((MCAsmParserSemaCallbackImpl*)Context)->MSAsmDiagHandler(D); + } + void MSAsmDiagHandler(const llvm::SMDiagnostic &D) { + // Compute an offset into the inline asm buffer. + // FIXME: This isn't right if .macro is involved (but hopefully, no + // real-world code does that). + const llvm::SourceMgr &LSM = *D.getSourceMgr(); + const llvm::MemoryBuffer *LBuf = + LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc())); + unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart(); + + // Figure out which token that offset points into. + const unsigned *OffsetPtr = + std::lower_bound(TokOffsets.begin(), TokOffsets.end(), Offset); + unsigned TokIndex = OffsetPtr - TokOffsets.begin(); + + // If we come up with an answer which seems sane, use it; otherwise, + // just point at the __asm keyword. + // FIXME: Assert the answer is sane once we handle .macro correctly. + SourceLocation Loc = AsmLoc; + if (TokIndex < AsmToks.size()) { + const Token *Tok = &AsmToks[TokIndex]; + Loc = Tok->getLocation(); + Loc = Loc.getLocWithOffset(Offset - (*OffsetPtr - Tok->getLength())); + } + SemaRef.Diag(Loc, diag::err_inline_ms_asm_parsing) << D.getMessage(); + } +}; + +} + +NamedDecl *Sema::LookupInlineAsmIdentifier(StringRef Name, SourceLocation Loc, + unsigned &Size) { + Size = 0; + LookupResult Result(*this, &Context.Idents.get(Name), Loc, + Sema::LookupOrdinaryName); + + if (!LookupName(Result, getCurScope())) { + // If we don't find anything, return null; the AsmParser will assume + // it is a label of some sort. + return 0; + } + + if (!Result.isSingleResult()) { + // FIXME: Diagnose result. + return 0; + } + + NamedDecl *ND = Result.getFoundDecl(); + if (isa(ND) || isa(ND)) { + if (VarDecl *Var = dyn_cast(ND)) + Size = Context.getTypeInfo(Var->getType()).first; + + return ND; + } + + // FIXME: Handle other kinds of results? (FieldDecl, etc.) + // FIXME: Diagnose if we find something we can't handle, like a typedef. + return 0; +} + +bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member, + unsigned &Offset, SourceLocation AsmLoc) { + Offset = 0; + LookupResult BaseResult(*this, &Context.Idents.get(Base), SourceLocation(), + LookupOrdinaryName); + + if (!LookupName(BaseResult, getCurScope())) + return true; + + if (!BaseResult.isSingleResult()) + return true; + + NamedDecl *FoundDecl = BaseResult.getFoundDecl(); + const RecordType *RT = 0; + if (VarDecl *VD = dyn_cast(FoundDecl)) { + RT = VD->getType()->getAs(); + } else if (TypedefDecl *TD = dyn_cast(FoundDecl)) { + RT = TD->getUnderlyingType()->getAs(); + } + if (!RT) + return true; + + if (RequireCompleteType(AsmLoc, QualType(RT, 0), 0)) + return true; + + LookupResult FieldResult(*this, &Context.Idents.get(Member), SourceLocation(), + LookupMemberName); + + if (!LookupQualifiedName(FieldResult, RT->getDecl())) + return true; + + // FIXME: Handle IndirectFieldDecl? + FieldDecl *FD = dyn_cast(FieldResult.getFoundDecl()); + if (!FD) + return true; + + const ASTRecordLayout &RL = Context.getASTRecordLayout(RT->getDecl()); + unsigned i = FD->getFieldIndex(); + CharUnits Result = Context.toCharUnitsFromBits(RL.getFieldOffset(i)); + Offset = (unsigned)Result.getQuantity(); + + return false; +} + +StmtResult Sema::ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, + ArrayRef AsmToks,SourceLocation EndLoc) { + SmallVector Names; + SmallVector ConstraintRefs; + SmallVector Exprs; + SmallVector ClobberRefs; + + // Empty asm statements don't need to instantiate the AsmParser, etc. + if (AsmToks.empty()) { + StringRef EmptyAsmStr; + MSAsmStmt *NS = + new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, /*IsSimple*/ true, + /*IsVolatile*/ true, AsmToks, /*NumOutputs*/ 0, + /*NumInputs*/ 0, Names, ConstraintRefs, Exprs, + EmptyAsmStr, ClobberRefs, EndLoc); + return Owned(NS); + } + + std::string AsmString; + llvm::SmallVector TokOffsets; + if (buildMSAsmString(*this, AsmLoc, AsmToks, TokOffsets, AsmString)) + return StmtError(); + + // Get the target specific parser. + std::string Error; + const std::string &TT = Context.getTargetInfo().getTriple().getTriple(); + const llvm::Target *TheTarget(llvm::TargetRegistry::lookupTarget(TT, Error)); + + OwningPtr MAI(TheTarget->createMCAsmInfo(TT)); + OwningPtr MRI(TheTarget->createMCRegInfo(TT)); + OwningPtr MOFI(new llvm::MCObjectFileInfo()); + OwningPtr + STI(TheTarget->createMCSubtargetInfo(TT, "", "")); + + llvm::SourceMgr SrcMgr; + llvm::MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr); + llvm::MemoryBuffer *Buffer = + llvm::MemoryBuffer::getMemBuffer(AsmString, ""); + + // Tell SrcMgr about this buffer, which is what the parser will pick up. + SrcMgr.AddNewSourceBuffer(Buffer, llvm::SMLoc()); + + OwningPtr Str(createNullStreamer(Ctx)); + OwningPtr + Parser(createMCAsmParser(SrcMgr, Ctx, *Str.get(), *MAI)); + OwningPtr + TargetParser(TheTarget->createMCAsmParser(*STI, *Parser)); + + // Get the instruction descriptor. + const llvm::MCInstrInfo *MII = TheTarget->createMCInstrInfo(); + llvm::MCInstPrinter *IP = + TheTarget->createMCInstPrinter(1, *MAI, *MII, *MRI, *STI); + + // Change to the Intel dialect. + Parser->setAssemblerDialect(1); + Parser->setTargetParser(*TargetParser.get()); + Parser->setParsingInlineAsm(true); + TargetParser->setParsingInlineAsm(true); + + MCAsmParserSemaCallbackImpl MCAPSI(*this, AsmLoc, AsmToks, TokOffsets); + TargetParser->setSemaCallback(&MCAPSI); + SrcMgr.setDiagHandler(MCAsmParserSemaCallbackImpl::MSAsmDiagHandlerCallback, + &MCAPSI); + + unsigned NumOutputs; + unsigned NumInputs; + std::string AsmStringIR; + SmallVector, 4> OpDecls; + SmallVector Constraints; + SmallVector Clobbers; + if (Parser->ParseMSInlineAsm(AsmLoc.getPtrEncoding(), AsmStringIR, + NumOutputs, NumInputs, OpDecls, Constraints, + Clobbers, MII, IP, MCAPSI)) + return StmtError(); + + // Build the vector of clobber StringRefs. + unsigned NumClobbers = Clobbers.size(); + ClobberRefs.resize(NumClobbers); + for (unsigned i = 0; i != NumClobbers; ++i) + ClobberRefs[i] = StringRef(Clobbers[i]); + + // Recast the void pointers and build the vector of constraint StringRefs. + unsigned NumExprs = NumOutputs + NumInputs; + Names.resize(NumExprs); + ConstraintRefs.resize(NumExprs); + Exprs.resize(NumExprs); + for (unsigned i = 0, e = NumExprs; i != e; ++i) { + NamedDecl *OpDecl = static_cast(OpDecls[i].first); + if (!OpDecl) + return StmtError(); + + DeclarationNameInfo NameInfo(OpDecl->getDeclName(), AsmLoc); + ExprResult OpExpr = BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, + OpDecl); + if (OpExpr.isInvalid()) + return StmtError(); + + // Need offset of variable. + if (OpDecls[i].second) + OpExpr = BuildUnaryOp(getCurScope(), AsmLoc, clang::UO_AddrOf, + OpExpr.take()); + + Names[i] = OpDecl->getIdentifier(); + ConstraintRefs[i] = StringRef(Constraints[i]); + Exprs[i] = OpExpr.take(); + } + + bool IsSimple = NumExprs > 0; + MSAsmStmt *NS = + new (Context) MSAsmStmt(Context, AsmLoc, LBraceLoc, IsSimple, + /*IsVolatile*/ true, AsmToks, NumOutputs, NumInputs, + Names, ConstraintRefs, Exprs, AsmStringIR, + ClobberRefs, EndLoc); + return Owned(NS); +} diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp index 3c15b7a..b268b45 100644 --- a/lib/Sema/SemaStmtAttr.cpp +++ b/lib/Sema/SemaStmtAttr.cpp @@ -48,11 +48,16 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A, static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A, SourceRange Range) { switch (A.getKind()) { + case AttributeList::UnknownAttribute: + S.Diag(A.getLoc(), A.isDeclspecAttribute() ? + diag::warn_unhandled_ms_attribute_ignored : + diag::warn_unknown_attribute_ignored) << A.getName(); + return 0; case AttributeList::AT_FallThrough: return handleFallThroughAttr(S, St, A, Range); default: - // if we're here, then we parsed an attribute, but didn't recognize it as a - // statement attribute => it is declaration attribute + // if we're here, then we parsed a known attribute, but didn't recognize + // it as a statement attribute => it is declaration attribute S.Diag(A.getRange().getBegin(), diag::warn_attribute_invalid_on_stmt) << A.getName()->getName() << St->getLocStart(); return 0; diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp index 4dbf3e4..f56b054 100644 --- a/lib/Sema/SemaTemplate.cpp +++ b/lib/Sema/SemaTemplate.cpp @@ -333,7 +333,8 @@ void Sema::LookupTemplateName(LookupResult &Found, if (LookupCtx) Diag(Found.getNameLoc(), diag::err_no_member_template_suggest) << Name << LookupCtx << CorrectedQuotedStr << SS.getRange() - << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr); + << FixItHint::CreateReplacement(Corrected.getCorrectionRange(), + CorrectedStr); else Diag(Found.getNameLoc(), diag::err_no_template_suggest) << Name << CorrectedQuotedStr @@ -1104,6 +1105,9 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, if (Attr) ProcessDeclAttributeList(S, NewClass, Attr); + if (PrevClassTemplate) + mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl()); + AddPushedVisibilityAttribute(NewClass); if (TUK != TUK_Friend) @@ -1138,8 +1142,6 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, NewTemplate->setInvalidDecl(); NewClass->setInvalidDecl(); } - if (PrevClassTemplate) - mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl()); ActOnDocumentableDecl(NewTemplate); @@ -1204,11 +1206,17 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S, /// of a template template parameter, recursively. static bool DiagnoseUnexpandedParameterPacks(Sema &S, TemplateTemplateParmDecl *TTP) { + // A template template parameter which is a parameter pack is also a pack + // expansion. + if (TTP->isParameterPack()) + return false; + TemplateParameterList *Params = TTP->getTemplateParameters(); for (unsigned I = 0, N = Params->size(); I != N; ++I) { NamedDecl *P = Params->getParam(I); if (NonTypeTemplateParmDecl *NTTP = dyn_cast(P)) { - if (S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(), + if (!NTTP->isParameterPack() && + S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(), NTTP->getTypeSourceInfo(), Sema::UPPC_NonTypeTemplateParameterType)) return true; @@ -1321,7 +1329,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, } else if (NonTypeTemplateParmDecl *NewNonTypeParm = dyn_cast(*NewParam)) { // Check for unexpanded parameter packs. - if (DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(), + if (!NewNonTypeParm->isParameterPack() && + DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(), NewNonTypeParm->getTypeSourceInfo(), UPPC_NonTypeTemplateParameterType)) { Invalid = true; @@ -1342,7 +1351,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, if (NewNonTypeParm->isParameterPack()) { assert(!NewNonTypeParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); - SawParameterPack = true; + if (!NewNonTypeParm->isPackExpansion()) + SawParameterPack = true; } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() && NewNonTypeParm->hasDefaultArgument()) { OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc(); @@ -1389,7 +1399,8 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, if (NewTemplateParm->isParameterPack()) { assert(!NewTemplateParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); - SawParameterPack = true; + if (!NewTemplateParm->isPackExpansion()) + SawParameterPack = true; } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() && NewTemplateParm->hasDefaultArgument()) { OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation(); @@ -1416,10 +1427,10 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, MissingDefaultArg = true; } - // C++0x [temp.param]p11: + // C++11 [temp.param]p11: // If a template parameter of a primary class template or alias template // is a template parameter pack, it shall be the last template parameter. - if (SawParameterPack && (NewParam + 1) != NewParamEnd && + if (SawParameterPack && (NewParam + 1) != NewParamEnd && (TPC == TPC_ClassTemplate || TPC == TPC_TypeAliasTemplate)) { Diag((*NewParam)->getLocation(), diag::err_template_param_pack_must_be_last_template_parameter); @@ -1998,9 +2009,11 @@ QualType Sema::CheckTemplateIdType(TemplateName Name, for (unsigned I = 0; I < Depth; ++I) TemplateArgLists.addOuterTemplateArguments(0, 0); + LocalInstantiationScope Scope(*this); InstantiatingTemplate Inst(*this, TemplateLoc, Template); if (Inst) return QualType(); + CanonType = SubstType(Pattern->getUnderlyingType(), TemplateArgLists, AliasTemplate->getLocation(), AliasTemplate->getDeclName()); @@ -2085,7 +2098,8 @@ QualType Sema::CheckTemplateIdType(TemplateName Name, Converted.data(), Converted.size(), 0); ClassTemplate->AddSpecialization(Decl, InsertPos); - Decl->setLexicalDeclContext(CurContext); + if (ClassTemplate->isOutOfLine()) + Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext()); } CanonType = Context.getTypeDeclType(Decl); @@ -2137,7 +2151,6 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, } QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs); - TemplateArgsIn.release(); if (Result.isNull()) return true; @@ -2831,6 +2844,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param, case TemplateArgument::Declaration: case TemplateArgument::Integral: + case TemplateArgument::NullPtr: // We've already checked this template argument, so just copy // it to the list of converted arguments. Converted.push_back(Arg.getArgument()); @@ -2947,7 +2961,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param, case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: - if (CheckTemplateArgument(TempParm, Arg)) + if (CheckTemplateArgument(TempParm, Arg, ArgumentPackIndex)) return true; Converted.push_back(Arg.getArgument()); @@ -2965,6 +2979,8 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param, llvm_unreachable("Declaration argument with template template parameter"); case TemplateArgument::Integral: llvm_unreachable("Integral argument with template template parameter"); + case TemplateArgument::NullPtr: + llvm_unreachable("Null pointer argument with template template parameter"); case TemplateArgument::Pack: llvm_unreachable("Caller must expand template argument packs"); @@ -2996,6 +3012,33 @@ static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template, return true; } +/// \brief Check whether the template parameter is a pack expansion, and if so, +/// determine the number of parameters produced by that expansion. For instance: +/// +/// \code +/// template struct A { +/// template class ...TTs, typename ...Us> struct B; +/// }; +/// \endcode +/// +/// In \c A::B, \c NTs and \c TTs have expanded pack size 2, and \c Us +/// is not a pack expansion, so returns an empty Optional. +static llvm::Optional getExpandedPackSize(NamedDecl *Param) { + if (NonTypeTemplateParmDecl *NTTP + = dyn_cast(Param)) { + if (NTTP->isExpandedParameterPack()) + return NTTP->getNumExpansionTypes(); + } + + if (TemplateTemplateParmDecl *TTP + = dyn_cast(Param)) { + if (TTP->isExpandedParameterPack()) + return TTP->getNumExpansionTemplateParameters(); + } + + return llvm::Optional(); +} + /// \brief Check that the given template argument list is well-formed /// for specializing the given template. bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, @@ -3008,15 +3051,9 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, *ExpansionIntoFixedList = false; TemplateParameterList *Params = Template->getTemplateParameters(); - unsigned NumParams = Params->size(); - unsigned NumArgs = TemplateArgs.size(); - bool Invalid = false; SourceLocation RAngleLoc = TemplateArgs.getRAngleLoc(); - bool HasParameterPack = - NumParams > 0 && Params->getParam(NumParams - 1)->isTemplateParameterPack(); - // C++ [temp.arg]p1: // [...] The type and form of each template-argument specified in // a template-id shall match the type and form specified for the @@ -3024,38 +3061,50 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, // template-parameter-list. bool isTemplateTemplateParameter = isa(Template); SmallVector ArgumentPack; - TemplateParameterList::iterator Param = Params->begin(), - ParamEnd = Params->end(); - unsigned ArgIdx = 0; + unsigned ArgIdx = 0, NumArgs = TemplateArgs.size(); LocalInstantiationScope InstScope(*this, true); - bool SawPackExpansion = false; - while (Param != ParamEnd) { - if (ArgIdx < NumArgs) { - // If we have an expanded parameter pack, make sure we don't have too - // many arguments. - // FIXME: This really should fall out from the normal arity checking. - if (NonTypeTemplateParmDecl *NTTP - = dyn_cast(*Param)) { - if (NTTP->isExpandedParameterPack() && - ArgumentPack.size() >= NTTP->getNumExpansionTypes()) { - Diag(TemplateLoc, diag::err_template_arg_list_different_arity) - << true - << (isa(Template)? 0 : - isa(Template)? 1 : - isa(Template)? 2 : 3) - << Template; - Diag(Template->getLocation(), diag::note_template_decl_here) - << Params->getSourceRange(); - return true; - } + for (TemplateParameterList::iterator Param = Params->begin(), + ParamEnd = Params->end(); + Param != ParamEnd; /* increment in loop */) { + // If we have an expanded parameter pack, make sure we don't have too + // many arguments. + if (llvm::Optional Expansions = getExpandedPackSize(*Param)) { + if (*Expansions == ArgumentPack.size()) { + // We're done with this parameter pack. Pack up its arguments and add + // them to the list. + Converted.push_back( + TemplateArgument::CreatePackCopy(Context, + ArgumentPack.data(), + ArgumentPack.size())); + ArgumentPack.clear(); + + // This argument is assigned to the next parameter. + ++Param; + continue; + } else if (ArgIdx == NumArgs && !PartialTemplateArgs) { + // Not enough arguments for this parameter pack. + Diag(TemplateLoc, diag::err_template_arg_list_different_arity) + << false + << (isa(Template)? 0 : + isa(Template)? 1 : + isa(Template)? 2 : 3) + << Template; + Diag(Template->getLocation(), diag::note_template_decl_here) + << Params->getSourceRange(); + return true; } + } + if (ArgIdx < NumArgs) { // Check the template argument we were given. if (CheckTemplateArgument(*Param, TemplateArgs[ArgIdx], Template, TemplateLoc, RAngleLoc, ArgumentPack.size(), Converted)) return true; + // We're now done with this argument. + ++ArgIdx; + if ((*Param)->isTemplateParameterPack()) { // The template parameter was a template parameter pack, so take the // deduced argument and place it on the argument pack. Note that we @@ -3067,16 +3116,47 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, // Move to the next template parameter. ++Param; } - - // If this template argument is a pack expansion, record that fact - // and break out; we can't actually check any more. - if (TemplateArgs[ArgIdx].getArgument().isPackExpansion()) { - SawPackExpansion = true; - ++ArgIdx; - break; + + // If we just saw a pack expansion, then directly convert the remaining + // arguments, because we don't know what parameters they'll match up + // with. + if (TemplateArgs[ArgIdx-1].getArgument().isPackExpansion()) { + bool InFinalParameterPack = Param != ParamEnd && + Param + 1 == ParamEnd && + (*Param)->isTemplateParameterPack() && + !getExpandedPackSize(*Param); + + if (!InFinalParameterPack && !ArgumentPack.empty()) { + // If we were part way through filling in an expanded parameter pack, + // fall back to just producing individual arguments. + Converted.insert(Converted.end(), + ArgumentPack.begin(), ArgumentPack.end()); + ArgumentPack.clear(); + } + + while (ArgIdx < NumArgs) { + if (InFinalParameterPack) + ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument()); + else + Converted.push_back(TemplateArgs[ArgIdx].getArgument()); + ++ArgIdx; + } + + // Push the argument pack onto the list of converted arguments. + if (InFinalParameterPack) { + Converted.push_back( + TemplateArgument::CreatePackCopy(Context, + ArgumentPack.data(), + ArgumentPack.size())); + ArgumentPack.clear(); + } else if (ExpansionIntoFixedList) { + // We have expanded a pack into a fixed list. + *ExpansionIntoFixedList = true; + } + + return false; } - - ++ArgIdx; + continue; } @@ -3087,14 +3167,30 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, ArgumentPack.data(), ArgumentPack.size())); - return Invalid; + return false; } // If we have a template parameter pack with no more corresponding // arguments, just break out now and we'll fill in the argument pack below. - if ((*Param)->isTemplateParameterPack()) - break; - + if ((*Param)->isTemplateParameterPack()) { + assert(!getExpandedPackSize(*Param) && + "Should have dealt with this already"); + + // A non-expanded parameter pack before the end of the parameter list + // only occurs for an ill-formed template parameter list, unless we've + // got a partial argument list for a function template, so just bail out. + if (Param + 1 != ParamEnd) + return true; + + Converted.push_back(TemplateArgument::CreatePackCopy(Context, + ArgumentPack.data(), + ArgumentPack.size())); + ArgumentPack.clear(); + + ++Param; + continue; + } + // Check whether we have a default argument. TemplateArgumentLoc Arg; @@ -3181,86 +3277,12 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, ++ArgIdx; } - // If we saw a pack expansion, then directly convert the remaining arguments, - // because we don't know what parameters they'll match up with. - if (SawPackExpansion) { - bool AddToArgumentPack - = Param != ParamEnd && (*Param)->isTemplateParameterPack(); - while (ArgIdx < NumArgs) { - if (AddToArgumentPack) - ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument()); - else - Converted.push_back(TemplateArgs[ArgIdx].getArgument()); - ++ArgIdx; - } - - // Push the argument pack onto the list of converted arguments. - if (AddToArgumentPack) { - if (ArgumentPack.empty()) - Converted.push_back(TemplateArgument(0, 0)); - else { - Converted.push_back( - TemplateArgument::CreatePackCopy(Context, - ArgumentPack.data(), - ArgumentPack.size())); - ArgumentPack.clear(); - } - } else if (ExpansionIntoFixedList) { - // We have expanded a pack into a fixed list. - *ExpansionIntoFixedList = true; - } - - return Invalid; - } - // If we have any leftover arguments, then there were too many arguments. // Complain and fail. if (ArgIdx < NumArgs) return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs); - - // If we have an expanded parameter pack, make sure we don't have too - // many arguments. - // FIXME: This really should fall out from the normal arity checking. - if (Param != ParamEnd) { - if (NonTypeTemplateParmDecl *NTTP - = dyn_cast(*Param)) { - if (NTTP->isExpandedParameterPack() && - ArgumentPack.size() < NTTP->getNumExpansionTypes()) { - Diag(TemplateLoc, diag::err_template_arg_list_different_arity) - << false - << (isa(Template)? 0 : - isa(Template)? 1 : - isa(Template)? 2 : 3) - << Template; - Diag(Template->getLocation(), diag::note_template_decl_here) - << Params->getSourceRange(); - return true; - } - } - } - - // Form argument packs for each of the parameter packs remaining. - while (Param != ParamEnd) { - // If we're checking a partial list of template arguments, don't fill - // in arguments for non-template parameter packs. - if ((*Param)->isTemplateParameterPack()) { - if (!HasParameterPack) - return true; - if (ArgumentPack.empty()) - Converted.push_back(TemplateArgument(0, 0)); - else { - Converted.push_back(TemplateArgument::CreatePackCopy(Context, - ArgumentPack.data(), - ArgumentPack.size())); - ArgumentPack.clear(); - } - } else if (!PartialTemplateArgs) - return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs); - - ++Param; - } - return Invalid; + return false; } namespace { @@ -3650,7 +3672,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S, switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) { case NPV_NullPointer: S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null); - Converted = TemplateArgument((Decl *)0); + Converted = TemplateArgument(ParamType, /*isNullPtr*/true); return false; case NPV_Error: @@ -3738,7 +3760,7 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S, return true; } - NamedDecl *Entity = DRE->getDecl(); + ValueDecl *Entity = DRE->getDecl(); // Cannot refer to non-static data members if (FieldDecl *Field = dyn_cast(Entity)) { @@ -3926,7 +3948,8 @@ CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S, } // Create the template argument. - Converted = TemplateArgument(Entity->getCanonicalDecl()); + Converted = TemplateArgument(cast(Entity->getCanonicalDecl()), + ParamType->isReferenceType()); S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity); return false; } @@ -3947,7 +3970,7 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S, return true; case NPV_NullPointer: S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null); - Converted = TemplateArgument((Decl *)0); + Converted = TemplateArgument(ParamType, /*isNullPtr*/true); return false; case NPV_NotNullPointer: break; @@ -4016,10 +4039,12 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S, if (isa(VD) || (isa(VD) && S.Context.getCanonicalType(VD->getType()).isConstQualified())) { - if (Arg->isTypeDependent() || Arg->isValueDependent()) + if (Arg->isTypeDependent() || Arg->isValueDependent()) { Converted = TemplateArgument(Arg); - else - Converted = TemplateArgument(VD->getCanonicalDecl()); + } else { + VD = cast(VD->getCanonicalDecl()); + Converted = TemplateArgument(VD, /*isReferenceParam*/false); + } return Invalid; } } @@ -4040,10 +4065,12 @@ static bool CheckTemplateArgumentPointerToMember(Sema &S, // Okay: this is the address of a non-static member, and therefore // a member pointer constant. - if (Arg->isTypeDependent() || Arg->isValueDependent()) + if (Arg->isTypeDependent() || Arg->isValueDependent()) { Converted = TemplateArgument(Arg); - else - Converted = TemplateArgument(DRE->getDecl()->getCanonicalDecl()); + } else { + ValueDecl *D = cast(DRE->getDecl()->getCanonicalDecl()); + Converted = TemplateArgument(D, /*isReferenceParam*/false); + } return Invalid; } @@ -4396,8 +4423,8 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param, case NPV_NullPointer: Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null); - Converted = TemplateArgument((Decl *)0); - return Owned(Arg);; + Converted = TemplateArgument(ParamType, /*isNullPtr*/true); + return Owned(Arg); } } @@ -4417,8 +4444,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param, /// This routine implements the semantics of C++ [temp.arg.template]. /// It returns true if an error occurred, and false otherwise. bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param, - const TemplateArgumentLoc &Arg) { - TemplateName Name = Arg.getArgument().getAsTemplate(); + const TemplateArgumentLoc &Arg, + unsigned ArgumentPackIndex) { + TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern(); TemplateDecl *Template = Name.getAsTemplateDecl(); if (!Template) { // Any dependent template name is fine. @@ -4448,8 +4476,12 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param, << Template; } + TemplateParameterList *Params = Param->getTemplateParameters(); + if (Param->isExpandedParameterPack()) + Params = Param->getExpansionTemplateParameters(ArgumentPackIndex); + return !TemplateParameterListsAreEqual(Template->getTemplateParameters(), - Param->getTemplateParameters(), + Params, true, TPL_TemplateTemplateArgumentMatch, Arg.getLocation()); @@ -4463,12 +4495,9 @@ ExprResult Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc) { - assert(Arg.getKind() == TemplateArgument::Declaration && - "Only declaration template arguments permitted here"); - // For a NULL non-type template argument, return nullptr casted to the // parameter's type. - if (!Arg.getAsDecl()) { + if (Arg.getKind() == TemplateArgument::NullPtr) { return ImpCastExprToType( new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc), ParamType, @@ -4476,7 +4505,9 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, ? CK_NullToMemberPointer : CK_NullToPointer); } - + assert(Arg.getKind() == TemplateArgument::Declaration && + "Only declaration template arguments permitted here"); + ValueDecl *VD = cast(Arg.getAsDecl()); if (VD->getDeclContext()->isRecord() && @@ -4524,7 +4555,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, assert(!RefExpr.isInvalid() && Context.hasSameType(((Expr*) RefExpr.get())->getType(), ParamType.getUnqualifiedType())); - return move(RefExpr); + return RefExpr; } } @@ -4542,7 +4573,7 @@ Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, if (RefExpr.isInvalid()) return ExprError(); - return move(RefExpr); + return RefExpr; } // Take the address of everything else @@ -5071,10 +5102,10 @@ static bool CheckNonTypeClassTemplatePartialSpecializationArgs(Sema &S, continue; } - Expr *ArgExpr = Args[I].getAsExpr(); - if (!ArgExpr) { + if (Args[I].getKind() != TemplateArgument::Expression) continue; - } + + Expr *ArgExpr = Args[I].getAsExpr(); // We can have a pack expansion of any of the bullets below. if (PackExpansionExpr *Expansion = dyn_cast(ArgExpr)) @@ -5173,7 +5204,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, // NOTE: KWLoc is the location of the tag keyword. This will instead // store the location of the outermost template keyword in the declaration. SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0 - ? TemplateParameterLists.get()[0]->getTemplateLoc() : SourceLocation(); + ? TemplateParameterLists[0]->getTemplateLoc() : SourceLocation(); // Find the class template we're specializing TemplateName Name = TemplateD.getAsVal(); @@ -5199,7 +5230,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, = MatchTemplateParametersToScopeSpecifier(TemplateNameLoc, TemplateNameLoc, SS, - (TemplateParameterList**)TemplateParameterLists.get(), + TemplateParameterLists.data(), TemplateParameterLists.size(), TUK == TUK_Friend, isExplicitSpecialization, @@ -5354,7 +5385,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, if (TemplateParameterLists.size() > 0) { Specialization->setTemplateParameterListsInfo(Context, TemplateParameterLists.size(), - (TemplateParameterList**) TemplateParameterLists.release()); + TemplateParameterLists.data()); } PrevDecl = 0; CanonType = Context.getTypeDeclType(Specialization); @@ -5382,7 +5413,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TemplateParams, AS_none, /*ModulePrivateLoc=*/SourceLocation(), TemplateParameterLists.size() - 1, - (TemplateParameterList**) TemplateParameterLists.release()); + TemplateParameterLists.data()); } // Create a new class template partial specialization declaration node. @@ -5406,7 +5437,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, if (TemplateParameterLists.size() > 1 && SS.isSet()) { Partial->setTemplateParameterListsInfo(Context, TemplateParameterLists.size() - 1, - (TemplateParameterList**) TemplateParameterLists.release()); + TemplateParameterLists.data()); } if (!PrevPartial) @@ -5461,7 +5492,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, if (TemplateParameterLists.size() > 0) { Specialization->setTemplateParameterListsInfo(Context, TemplateParameterLists.size(), - (TemplateParameterList**) TemplateParameterLists.release()); + TemplateParameterLists.data()); } if (!PrevDecl) @@ -5544,7 +5575,6 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, Specialization->setTypeAsWritten(WrittenTy); Specialization->setTemplateKeywordLoc(TemplateKWLoc); } - TemplateArgsIn.release(); // C++ [temp.expl.spec]p9: // A template explicit specialization is in the scope of the @@ -5579,7 +5609,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, Decl *Sema::ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D) { - Decl *NewDecl = HandleDeclarator(S, D, move(TemplateParameterLists)); + Decl *NewDecl = HandleDeclarator(S, D, TemplateParameterLists); ActOnDocumentableDecl(NewDecl); return NewDecl; } @@ -5598,7 +5628,7 @@ Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, D.setFunctionDefinitionKind(FDK_Definition); Decl *DP = HandleDeclarator(ParentScope, D, - move(TemplateParameterLists)); + TemplateParameterLists); if (FunctionTemplateDecl *FunctionTemplate = dyn_cast_or_null(DP)) return ActOnStartOfFunctionDef(FnBodyScope, @@ -5902,7 +5932,7 @@ Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD, // Perform template argument deduction to determine whether we may be // specializing this template. // FIXME: It is somewhat wasteful to build - TemplateDeductionInfo Info(Context, FD->getLocation()); + TemplateDeductionInfo Info(FD->getLocation()); FunctionDecl *Specialization = 0; if (TemplateDeductionResult TDK = DeduceTemplateArguments(FunTmpl, ExplicitTemplateArgs, @@ -6399,7 +6429,6 @@ Sema::ActOnExplicitInstantiation(Scope *S, TemplateArgs, Context.getTypeDeclType(Specialization)); Specialization->setTypeAsWritten(WrittenTy); - TemplateArgsIn.release(); // Set source locations for keywords. Specialization->setExternLoc(ExternLoc); @@ -6475,9 +6504,8 @@ Sema::ActOnExplicitInstantiation(Scope *S, Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name, NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(), - MultiTemplateParamsArg(*this, 0, 0), - Owned, IsDependent, SourceLocation(), false, - TypeResult()); + MultiTemplateParamsArg(), Owned, IsDependent, + SourceLocation(), false, TypeResult()); assert(!IsDependent && "explicit instantiation of dependent name not yet handled"); if (!TagD) @@ -6729,12 +6757,10 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S, TemplateIdAnnotation *TemplateId = D.getName().TemplateId; TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc); TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc); - ASTTemplateArgsPtr TemplateArgsPtr(*this, - TemplateId->getTemplateArgs(), + ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); translateTemplateArguments(TemplateArgsPtr, TemplateArgs); HasExplicitTemplateArgs = true; - TemplateArgsPtr.release(); } // C++ [temp.explicit]p1: @@ -6762,7 +6788,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S, if (!FunTmpl) continue; - TemplateDeductionInfo Info(Context, D.getIdentifierLoc()); + TemplateDeductionInfo Info(D.getIdentifierLoc()); FunctionDecl *Specialization = 0; if (TemplateDeductionResult TDK = DeduceTemplateArguments(FunTmpl, diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp index 9500ec3..bf4533d 100644 --- a/lib/Sema/SemaTemplateDeduction.cpp +++ b/lib/Sema/SemaTemplateDeduction.cpp @@ -158,9 +158,6 @@ static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) { /// \brief Determine whether two declaration pointers refer to the same /// declaration. static bool isSameDeclaration(Decl *X, Decl *Y) { - if (!X || !Y) - return !X && !Y; - if (NamedDecl *NX = dyn_cast(X)) X = NX->getUnderlyingDecl(); if (NamedDecl *NY = dyn_cast(Y)) @@ -262,7 +259,27 @@ checkDeducedTemplateArguments(ASTContext &Context, // If we deduced two declarations, make sure they they refer to the // same declaration. if (Y.getKind() == TemplateArgument::Declaration && - isSameDeclaration(X.getAsDecl(), Y.getAsDecl())) + isSameDeclaration(X.getAsDecl(), Y.getAsDecl()) && + X.isDeclForReferenceParam() == Y.isDeclForReferenceParam()) + return X; + + // All other combinations are incompatible. + return DeducedTemplateArgument(); + + case TemplateArgument::NullPtr: + // If we deduced a null pointer and a dependent expression, keep the + // null pointer. + if (Y.getKind() == TemplateArgument::Expression) + return X; + + // If we deduced a null pointer and an integral constant, keep the + // integral constant. + if (Y.getKind() == TemplateArgument::Integral) + return Y; + + // If we deduced two null pointers, make sure they have the same type. + if (Y.getKind() == TemplateArgument::NullPtr && + Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) return X; // All other combinations are incompatible. @@ -356,13 +373,15 @@ DeduceNonTypeTemplateArgument(Sema &S, static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(Sema &S, NonTypeTemplateParmDecl *NTTP, - Decl *D, + ValueDecl *D, TemplateDeductionInfo &Info, SmallVectorImpl &Deduced) { assert(NTTP->getDepth() == 0 && "Cannot deduce non-type template argument with depth > 0"); - DeducedTemplateArgument NewDeduced(D? D->getCanonicalDecl() : 0); + D = D ? cast(D->getCanonicalDecl()) : 0; + TemplateArgument New(D, NTTP->getType()->isReferenceType()); + DeducedTemplateArgument NewDeduced(New); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[NTTP->getIndex()], NewDeduced); @@ -570,7 +589,10 @@ static void PrepareArgumentPackDeduction(Sema &S, SavedPacks[I] = Deduced[PackIndices[I]]; Deduced[PackIndices[I]] = TemplateArgument(); - // If the template arugment pack was explicitly specified, add that to + if (!S.CurrentInstantiationScope) + continue; + + // If the template argument pack was explicitly specified, add that to // the set of deduced arguments. const TemplateArgument *ExplicitArgs; unsigned NumExplicitArgs; @@ -612,7 +634,7 @@ FinishArgumentPackDeduction(Sema &S, if (NewlyDeducedPacks[I].empty()) { // If we deduced an empty argument pack, create it now. - NewPack = DeducedTemplateArgument(TemplateArgument(0, 0)); + NewPack = DeducedTemplateArgument(TemplateArgument::getEmptyPack()); } else { TemplateArgument *ArgumentPack = new (S.Context) TemplateArgument [NewlyDeducedPacks[I].size()]; @@ -1371,9 +1393,11 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S, // If this is a base class, try to perform template argument // deduction from it. if (NextT != RecordT) { + TemplateDeductionInfo BaseInfo(Info.getLocation()); Sema::TemplateDeductionResult BaseResult = DeduceTemplateArguments(S, TemplateParams, SpecParam, - QualType(NextT, 0), Info, Deduced); + QualType(NextT, 0), BaseInfo, + Deduced); // If template argument deduction for this base was successful, // note that we had some success. Otherwise, ignore any deductions @@ -1382,6 +1406,9 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S, Successful = true; DeducedOrig.clear(); DeducedOrig.append(Deduced.begin(), Deduced.end()); + Info.Param = BaseInfo.Param; + Info.FirstArg = BaseInfo.FirstArg; + Info.SecondArg = BaseInfo.SecondArg; } else Deduced = DeducedOrig; @@ -1596,7 +1623,17 @@ DeduceTemplateArguments(Sema &S, case TemplateArgument::Declaration: if (Arg.getKind() == TemplateArgument::Declaration && - isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl())) + isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()) && + Param.isDeclForReferenceParam() == Arg.isDeclForReferenceParam()) + return Sema::TDK_Success; + + Info.FirstArg = Param; + Info.SecondArg = Arg; + return Sema::TDK_NonDeducedMismatch; + + case TemplateArgument::NullPtr: + if (Arg.getKind() == TemplateArgument::NullPtr && + S.Context.hasSameType(Param.getNullPtrType(), Arg.getNullPtrType())) return Sema::TDK_Success; Info.FirstArg = Param; @@ -1867,7 +1904,11 @@ static bool isSameTemplateArg(ASTContext &Context, Context.getCanonicalType(Y.getAsType()); case TemplateArgument::Declaration: - return isSameDeclaration(X.getAsDecl(), Y.getAsDecl()); + return isSameDeclaration(X.getAsDecl(), Y.getAsDecl()) && + X.isDeclForReferenceParam() == Y.isDeclForReferenceParam(); + + case TemplateArgument::NullPtr: + return Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType()); case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: @@ -1937,6 +1978,14 @@ getTrivialTemplateArgumentLoc(Sema &S, return TemplateArgumentLoc(TemplateArgument(E), E); } + case TemplateArgument::NullPtr: { + Expr *E + = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc) + .takeAs(); + return TemplateArgumentLoc(TemplateArgument(NTTPType, /*isNullPtr*/true), + E); + } + case TemplateArgument::Integral: { Expr *E = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).takeAs(); @@ -2162,6 +2211,9 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, TemplateDeductionInfo &Info) { + if (Partial->isInvalidDecl()) + return TDK_Invalid; + // C++ [temp.class.spec.match]p2: // A partial specialization matches a given actual template // argument list if the template arguments of the partial @@ -2601,12 +2653,13 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, // explicitly-specified set (C++0x [temp.arg.explicit]p9). const TemplateArgument *ExplicitArgs; unsigned NumExplicitArgs; - if (CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs, + if (CurrentInstantiationScope && + CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs, &NumExplicitArgs) == Param) Builder.push_back(TemplateArgument(ExplicitArgs, NumExplicitArgs)); else - Builder.push_back(TemplateArgument(0, 0)); + Builder.push_back(TemplateArgument::getEmptyPack()); continue; } @@ -2784,7 +2837,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams, // Otherwise, see if we can resolve a function type FunctionDecl *Specialization = 0; - TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc()); + TemplateDeductionInfo Info(Ovl->getNameLoc()); if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs, Specialization, Info)) continue; @@ -2815,7 +2868,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams, // So we do not reject deductions which were made elsewhere. SmallVector Deduced(TemplateParams->size()); - TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc()); + TemplateDeductionInfo Info(Ovl->getNameLoc()); Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType, ArgType, Info, Deduced, TDF); @@ -2992,8 +3045,6 @@ DeduceTemplateArgumentByListElement(Sema &S, /// /// \param Args the function call arguments /// -/// \param NumArgs the number of arguments in Args -/// /// \param Name the name of the function being called. This is only significant /// when the function template is a conversion function template, in which /// case this routine will also perform template argument deduction based on @@ -3013,6 +3064,9 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, llvm::ArrayRef Args, FunctionDecl *&Specialization, TemplateDeductionInfo &Info) { + if (FunctionTemplate->isInvalidDecl()) + return TDK_Invalid; + FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); // C++ [temp.deduct.call]p1: @@ -3269,6 +3323,9 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ArgFunctionType, FunctionDecl *&Specialization, TemplateDeductionInfo &Info) { + if (FunctionTemplate->isInvalidDecl()) + return TDK_Invalid; + FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); @@ -3328,6 +3385,9 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, TemplateDeductionInfo &Info) { + if (FunctionTemplate->isInvalidDecl()) + return TDK_Invalid; + CXXConversionDecl *Conv = cast(FunctionTemplate->getTemplatedDecl()); QualType FromType = Conv->getConversionType(); @@ -3572,7 +3632,7 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType InitType = Init->getType(); unsigned TDF = 0; - TemplateDeductionInfo Info(Context, Loc); + TemplateDeductionInfo Info(Loc); InitListExpr *InitList = dyn_cast(Init); if (InitList) { @@ -3594,10 +3654,11 @@ Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, return DAR_Failed; } - QualType DeducedType = Deduced[0].getAsType(); - if (DeducedType.isNull()) + if (Deduced[0].getKind() != TemplateArgument::Type) return DAR_Failed; + QualType DeducedType = Deduced[0].getAsType(); + if (InitList) { DeducedType = BuildStdInitializerList(DeducedType, Loc); if (DeducedType.isNull()) @@ -3637,26 +3698,23 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T, llvm::SmallBitVector &Deduced); /// \brief If this is a non-static member function, -static void MaybeAddImplicitObjectParameterType(ASTContext &Context, +static void AddImplicitObjectParameterType(ASTContext &Context, CXXMethodDecl *Method, SmallVectorImpl &ArgTypes) { - if (Method->isStatic()) - return; - - // C++ [over.match.funcs]p4: - // - // For non-static member functions, the type of the implicit - // object parameter is - // - "lvalue reference to cv X" for functions declared without a - // ref-qualifier or with the & ref-qualifier - // - "rvalue reference to cv X" for functions declared with the - // && ref-qualifier + // C++11 [temp.func.order]p3: + // [...] The new parameter is of type "reference to cv A," where cv are + // the cv-qualifiers of the function template (if any) and A is + // the class of which the function template is a member. // - // FIXME: We don't have ref-qualifiers yet, so we don't do that part. + // The standard doesn't say explicitly, but we pick the appropriate kind of + // reference type based on [over.match.funcs]p4. QualType ArgTy = Context.getTypeDeclType(Method->getParent()); ArgTy = Context.getQualifiedType(ArgTy, Qualifiers::fromCVRMask(Method->getTypeQualifiers())); - ArgTy = Context.getLValueReferenceType(ArgTy); + if (Method->getRefQualifier() == RQ_RValue) + ArgTy = Context.getRValueReferenceType(ArgTy); + else + ArgTy = Context.getLValueReferenceType(ArgTy); ArgTypes.push_back(ArgTy); } @@ -3682,7 +3740,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, // C++0x [temp.deduct.partial]p3: // The types used to determine the ordering depend on the context in which // the partial ordering is done: - TemplateDeductionInfo Info(S.Context, Loc); + TemplateDeductionInfo Info(Loc); CXXMethodDecl *Method1 = 0; CXXMethodDecl *Method2 = 0; bool IsNonStatic2 = false; @@ -3697,7 +3755,7 @@ static bool isAtLeastAsSpecializedAs(Sema &S, IsNonStatic1 = Method1 && !Method1->isStatic(); IsNonStatic2 = Method2 && !Method2->isStatic(); - // C++0x [temp.func.order]p3: + // C++11 [temp.func.order]p3: // [...] If only one of the function templates is a non-static // member, that function template is considered to have a new // first parameter inserted in its function parameter list. The @@ -3705,22 +3763,25 @@ static bool isAtLeastAsSpecializedAs(Sema &S, // the cv-qualifiers of the function template (if any) and A is // the class of which the function template is a member. // + // Note that we interpret this to mean "if one of the function + // templates is a non-static member and the other is a non-member"; + // otherwise, the ordering rules for static functions against non-static + // functions don't make any sense. + // // C++98/03 doesn't have this provision, so instead we drop the - // first argument of the free function or static member, which - // seems to match existing practice. + // first argument of the free function, which seems to match + // existing practice. SmallVector Args1; - unsigned Skip1 = !S.getLangOpts().CPlusPlus0x && - IsNonStatic2 && !IsNonStatic1; - if (S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !IsNonStatic2) - MaybeAddImplicitObjectParameterType(S.Context, Method1, Args1); + unsigned Skip1 = !S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !Method1; + if (S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !Method2) + AddImplicitObjectParameterType(S.Context, Method1, Args1); Args1.insert(Args1.end(), Proto1->arg_type_begin() + Skip1, Proto1->arg_type_end()); SmallVector Args2; - Skip2 = !S.getLangOpts().CPlusPlus0x && - IsNonStatic1 && !IsNonStatic2; - if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1) - MaybeAddImplicitObjectParameterType(S.Context, Method2, Args2); + Skip2 = !S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !Method2; + if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !Method1) + AddImplicitObjectParameterType(S.Context, Method2, Args2); Args2.insert(Args2.end(), Proto2->arg_type_begin() + Skip2, Proto2->arg_type_end()); @@ -4118,7 +4179,7 @@ Sema::getMoreSpecializedPartialSpecialization( // template partial specialization's template arguments, for // example. SmallVector Deduced; - TemplateDeductionInfo Info(Context, Loc); + TemplateDeductionInfo Info(Loc); QualType PT1 = PS1->getInjectedSpecializationType(); QualType PT2 = PS2->getInjectedSpecializationType(); @@ -4496,6 +4557,11 @@ MarkUsedTemplateParameters(ASTContext &Ctx, case TemplateArgument::Declaration: break; + case TemplateArgument::NullPtr: + MarkUsedTemplateParameters(Ctx, TemplateArg.getNullPtrType(), OnlyDeduced, + Depth, Used); + break; + case TemplateArgument::Type: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced, Depth, Used); diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp index 20e755f..665dd07 100644 --- a/lib/Sema/SemaTemplateInstantiate.cpp +++ b/lib/Sema/SemaTemplateInstantiate.cpp @@ -769,7 +769,7 @@ namespace { /// instantiating it. Decl *TransformDefinition(SourceLocation Loc, Decl *D); - /// \bried Transform the first qualifier within a scope by instantiating the + /// \brief Transform the first qualifier within a scope by instantiating the /// declaration. NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc); @@ -802,11 +802,24 @@ namespace { ExprResult TransformPredefinedExpr(PredefinedExpr *E); ExprResult TransformDeclRefExpr(DeclRefExpr *E); ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E); + ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E, NonTypeTemplateParmDecl *D); ExprResult TransformSubstNonTypeTemplateParmPackExpr( SubstNonTypeTemplateParmPackExpr *E); - + + /// \brief Rebuild a DeclRefExpr for a ParmVarDecl reference. + ExprResult RebuildParmVarDeclRefExpr(ParmVarDecl *PD, SourceLocation Loc); + + /// \brief Transform a reference to a function parameter pack. + ExprResult TransformFunctionParmPackRefExpr(DeclRefExpr *E, + ParmVarDecl *PD); + + /// \brief Transform a FunctionParmPackExpr which was built when we couldn't + /// expand a function parameter pack reference which refers to an expanded + /// pack. + ExprResult TransformFunctionParmPackExpr(FunctionParmPackExpr *E); + QualType TransformFunctionProtoType(TypeLocBuilder &TLB, FunctionProtoTypeLoc TL); QualType TransformFunctionProtoType(TypeLocBuilder &TLB, @@ -835,7 +848,7 @@ namespace { ExprResult Result = TreeTransform::TransformCallExpr(CE); getSema().CallsUndergoingInstantiation.pop_back(); - return move(Result); + return Result; } ExprResult TransformLambdaExpr(LambdaExpr *E) { @@ -1161,10 +1174,11 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef( result = SemaRef.Owned(argExpr); type = argExpr->getType(); - } else if (arg.getKind() == TemplateArgument::Declaration) { + } else if (arg.getKind() == TemplateArgument::Declaration || + arg.getKind() == TemplateArgument::NullPtr) { ValueDecl *VD; - if (Decl *D = arg.getAsDecl()) { - VD = cast(D); + if (arg.getKind() == TemplateArgument::Declaration) { + VD = cast(arg.getAsDecl()); // Find the instantiation of the template argument. This is // required for nested templates. @@ -1230,8 +1244,81 @@ TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr( } ExprResult +TemplateInstantiator::RebuildParmVarDeclRefExpr(ParmVarDecl *PD, + SourceLocation Loc) { + DeclarationNameInfo NameInfo(PD->getDeclName(), Loc); + return getSema().BuildDeclarationNameExpr(CXXScopeSpec(), NameInfo, PD); +} + +ExprResult +TemplateInstantiator::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { + if (getSema().ArgumentPackSubstitutionIndex != -1) { + // We can expand this parameter pack now. + ParmVarDecl *D = E->getExpansion(getSema().ArgumentPackSubstitutionIndex); + ValueDecl *VD = cast_or_null(TransformDecl(E->getExprLoc(), D)); + if (!VD) + return ExprError(); + return RebuildParmVarDeclRefExpr(cast(VD), E->getExprLoc()); + } + + QualType T = TransformType(E->getType()); + if (T.isNull()) + return ExprError(); + + // Transform each of the parameter expansions into the corresponding + // parameters in the instantiation of the function decl. + llvm::SmallVector Parms; + Parms.reserve(E->getNumExpansions()); + for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end(); + I != End; ++I) { + ParmVarDecl *D = + cast_or_null(TransformDecl(E->getExprLoc(), *I)); + if (!D) + return ExprError(); + Parms.push_back(D); + } + + return FunctionParmPackExpr::Create(getSema().Context, T, + E->getParameterPack(), + E->getParameterPackLocation(), Parms); +} + +ExprResult +TemplateInstantiator::TransformFunctionParmPackRefExpr(DeclRefExpr *E, + ParmVarDecl *PD) { + typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; + llvm::PointerUnion *Found + = getSema().CurrentInstantiationScope->findInstantiationOf(PD); + assert(Found && "no instantiation for parameter pack"); + + Decl *TransformedDecl; + if (DeclArgumentPack *Pack = Found->dyn_cast()) { + // If this is a reference to a function parameter pack which we can substitute + // but can't yet expand, build a FunctionParmPackExpr for it. + if (getSema().ArgumentPackSubstitutionIndex == -1) { + QualType T = TransformType(E->getType()); + if (T.isNull()) + return ExprError(); + return FunctionParmPackExpr::Create(getSema().Context, T, PD, + E->getExprLoc(), *Pack); + } + + TransformedDecl = (*Pack)[getSema().ArgumentPackSubstitutionIndex]; + } else { + TransformedDecl = Found->get(); + } + + // We have either an unexpanded pack or a specific expansion. + return RebuildParmVarDeclRefExpr(cast(TransformedDecl), + E->getExprLoc()); +} + +ExprResult TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) { NamedDecl *D = E->getDecl(); + + // Handle references to non-type template parameters and non-type template + // parameter packs. if (NonTypeTemplateParmDecl *NTTP = dyn_cast(D)) { if (NTTP->getDepth() < TemplateArgs.getNumLevels()) return TransformTemplateParmRefExpr(E, NTTP); @@ -1240,6 +1327,11 @@ TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) { // FindInstantiatedDecl will find it in the local instantiation scope. } + // Handle references to function parameter packs. + if (ParmVarDecl *PD = dyn_cast(D)) + if (PD->isParameterPack()) + return TransformFunctionParmPackRefExpr(E, PD); + return TreeTransform::TransformDeclRefExpr(E); } @@ -2159,7 +2251,7 @@ Sema::InstantiateClassTemplateSpecialization( Template->getPartialSpecializations(PartialSpecs); for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) { ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I]; - TemplateDeductionInfo Info(Context, PointOfInstantiation); + TemplateDeductionInfo Info(PointOfInstantiation); if (TemplateDeductionResult Result = DeduceTemplateArguments(Partial, ClassTemplateSpec->getTemplateArgs(), @@ -2543,8 +2635,25 @@ bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, return Instantiator.TransformTemplateArguments(Args, NumArgs, Result); } + +static const Decl* getCanonicalParmVarDecl(const Decl *D) { + // When storing ParmVarDecls in the local instantiation scope, we always + // want to use the ParmVarDecl from the canonical function declaration, + // since the map is then valid for any redeclaration or definition of that + // function. + if (const ParmVarDecl *PV = dyn_cast(D)) { + if (const FunctionDecl *FD = dyn_cast(PV->getDeclContext())) { + unsigned i = PV->getFunctionScopeIndex(); + return FD->getCanonicalDecl()->getParamDecl(i); + } + } + return D; +} + + llvm::PointerUnion * LocalInstantiationScope::findInstantiationOf(const Decl *D) { + D = getCanonicalParmVarDecl(D); for (LocalInstantiationScope *Current = this; Current; Current = Current->Outer) { @@ -2576,6 +2685,7 @@ LocalInstantiationScope::findInstantiationOf(const Decl *D) { } void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) { + D = getCanonicalParmVarDecl(D); llvm::PointerUnion &Stored = LocalDecls[D]; if (Stored.isNull()) Stored = Inst; @@ -2588,11 +2698,13 @@ void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) { void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D, Decl *Inst) { + D = getCanonicalParmVarDecl(D); DeclArgumentPack *Pack = LocalDecls[D].get(); Pack->push_back(Inst); } void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) { + D = getCanonicalParmVarDecl(D); llvm::PointerUnion &Stored = LocalDecls[D]; assert(Stored.isNull() && "Already instantiated this local"); DeclArgumentPack *Pack = new DeclArgumentPack; diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp index bdbe71d..19c46ab 100644 --- a/lib/Sema/SemaTemplateInstantiateDecl.cpp +++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp @@ -158,6 +158,22 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D, SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType()); } + // HACK: g++ has a bug where it gets the value kind of ?: wrong. + // libstdc++ relies upon this bug in its implementation of common_type. + // If we happen to be processing that implementation, fake up the g++ ?: + // semantics. See LWG issue 2141 for more information on the bug. + const DecltypeType *DT = DI->getType()->getAs(); + CXXRecordDecl *RD = dyn_cast(D->getDeclContext()); + if (DT && RD && isa(DT->getUnderlyingExpr()) && + DT->isReferenceType() && + RD->getEnclosingNamespaceContext() == SemaRef.getStdNamespace() && + RD->getIdentifier() && RD->getIdentifier()->isStr("common_type") && + D->getIdentifier() && D->getIdentifier()->isStr("type") && + SemaRef.getSourceManager().isInSystemHeader(D->getLocStart())) + // Fold it to the (non-reference) type which g++ would have produced. + DI = SemaRef.Context.getTrivialTypeSourceInfo( + DI->getType().getNonReferenceType()); + // Create the new typedef TypedefNameDecl *Typedef; if (IsTypeAlias) @@ -510,7 +526,7 @@ Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) { if (!InstTy) return 0; - FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocation(), + FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocStart(), D->getFriendLoc(), InstTy); if (!FD) return 0; @@ -1008,6 +1024,30 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) { return Record; } +/// \brief Adjust the given function type for an instantiation of the +/// given declaration, to cope with modifications to the function's type that +/// aren't reflected in the type-source information. +/// +/// \param D The declaration we're instantiating. +/// \param TInfo The already-instantiated type. +static QualType adjustFunctionTypeForInstantiation(ASTContext &Context, + FunctionDecl *D, + TypeSourceInfo *TInfo) { + const FunctionProtoType *OrigFunc + = D->getType()->castAs(); + const FunctionProtoType *NewFunc + = TInfo->getType()->castAs(); + if (OrigFunc->getExtInfo() == NewFunc->getExtInfo()) + return TInfo->getType(); + + FunctionProtoType::ExtProtoInfo NewEPI = NewFunc->getExtProtoInfo(); + NewEPI.ExtInfo = OrigFunc->getExtInfo(); + return Context.getFunctionType(NewFunc->getResultType(), + NewFunc->arg_type_begin(), + NewFunc->getNumArgs(), + NewEPI); +} + /// Normal class members are of more specific types and therefore /// don't make it here. This function serves two purposes: /// 1) instantiating function templates @@ -1048,7 +1088,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, TypeSourceInfo *TInfo = SubstFunctionType(D, Params); if (!TInfo) return 0; - QualType T = TInfo->getType(); + QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo); NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc(); if (QualifierLoc) { @@ -1075,7 +1115,7 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, FunctionDecl *Function = FunctionDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(), - D->getLocation(), D->getDeclName(), T, TInfo, + D->getNameInfo(), T, TInfo, D->getStorageClass(), D->getStorageClassAsWritten(), D->isInlineSpecified(), D->hasWrittenPrototype(), D->isConstexpr()); @@ -1366,7 +1406,7 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D, TypeSourceInfo *TInfo = SubstFunctionType(D, Params); if (!TInfo) return 0; - QualType T = TInfo->getType(); + QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo); // \brief If the type of this function, after ignoring parentheses, // is not *directly* a function type, then we're instantiating a function @@ -1657,7 +1697,7 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl( IsExpandedParameterPack = true; DI = D->getTypeSourceInfo(); T = DI->getType(); - } else if (isa(TL)) { + } else if (D->isPackExpansion()) { // The non-type template parameter pack's type is a pack expansion of types. // Determine whether we need to expand this parameter pack into separate // types. @@ -1771,27 +1811,121 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl( return Param; } +static void collectUnexpandedParameterPacks( + Sema &S, + TemplateParameterList *Params, + SmallVectorImpl &Unexpanded) { + for (TemplateParameterList::const_iterator I = Params->begin(), + E = Params->end(); I != E; ++I) { + if ((*I)->isTemplateParameterPack()) + continue; + if (NonTypeTemplateParmDecl *NTTP = dyn_cast(*I)) + S.collectUnexpandedParameterPacks(NTTP->getTypeSourceInfo()->getTypeLoc(), + Unexpanded); + if (TemplateTemplateParmDecl *TTP = dyn_cast(*I)) + collectUnexpandedParameterPacks(S, TTP->getTemplateParameters(), + Unexpanded); + } +} + Decl * TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( TemplateTemplateParmDecl *D) { // Instantiate the template parameter list of the template template parameter. TemplateParameterList *TempParams = D->getTemplateParameters(); TemplateParameterList *InstParams; - { + SmallVector ExpandedParams; + + bool IsExpandedParameterPack = false; + + if (D->isExpandedParameterPack()) { + // The template template parameter pack is an already-expanded pack + // expansion of template parameters. Substitute into each of the expanded + // parameters. + ExpandedParams.reserve(D->getNumExpansionTemplateParameters()); + for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); + I != N; ++I) { + LocalInstantiationScope Scope(SemaRef); + TemplateParameterList *Expansion = + SubstTemplateParams(D->getExpansionTemplateParameters(I)); + if (!Expansion) + return 0; + ExpandedParams.push_back(Expansion); + } + + IsExpandedParameterPack = true; + InstParams = TempParams; + } else if (D->isPackExpansion()) { + // The template template parameter pack expands to a pack of template + // template parameters. Determine whether we need to expand this parameter + // pack into separate parameters. + SmallVector Unexpanded; + collectUnexpandedParameterPacks(SemaRef, D->getTemplateParameters(), + Unexpanded); + + // Determine whether the set of unexpanded parameter packs can and should + // be expanded. + bool Expand = true; + bool RetainExpansion = false; + llvm::Optional NumExpansions; + if (SemaRef.CheckParameterPacksForExpansion(D->getLocation(), + TempParams->getSourceRange(), + Unexpanded, + TemplateArgs, + Expand, RetainExpansion, + NumExpansions)) + return 0; + + if (Expand) { + for (unsigned I = 0; I != *NumExpansions; ++I) { + Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I); + LocalInstantiationScope Scope(SemaRef); + TemplateParameterList *Expansion = SubstTemplateParams(TempParams); + if (!Expansion) + return 0; + ExpandedParams.push_back(Expansion); + } + + // Note that we have an expanded parameter pack. The "type" of this + // expanded parameter pack is the original expansion type, but callers + // will end up using the expanded parameter pack types for type-checking. + IsExpandedParameterPack = true; + InstParams = TempParams; + } else { + // We cannot fully expand the pack expansion now, so just substitute + // into the pattern. + Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1); + + LocalInstantiationScope Scope(SemaRef); + InstParams = SubstTemplateParams(TempParams); + if (!InstParams) + return 0; + } + } else { // Perform the actual substitution of template parameters within a new, // local instantiation scope. LocalInstantiationScope Scope(SemaRef); InstParams = SubstTemplateParams(TempParams); if (!InstParams) - return NULL; + return 0; } // Build the template template parameter. - TemplateTemplateParmDecl *Param - = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(), + TemplateTemplateParmDecl *Param; + if (IsExpandedParameterPack) + Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, + D->getLocation(), + D->getDepth() - TemplateArgs.getNumLevels(), + D->getPosition(), + D->getIdentifier(), InstParams, + ExpandedParams); + else + Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, + D->getLocation(), D->getDepth() - TemplateArgs.getNumLevels(), - D->getPosition(), D->isParameterPack(), - D->getIdentifier(), InstParams); + D->getPosition(), + D->isParameterPack(), + D->getIdentifier(), InstParams); Param->setDefaultArgument(D->getDefaultArgument(), false); Param->setAccess(AS_public); @@ -1813,7 +1947,12 @@ Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { D->getIdentLocation(), D->getNominatedNamespace(), D->getCommonAncestor()); - Owner->addDecl(Inst); + + // Add the using directive to its declaration context + // only if this is not a function or method. + if (!Owner->isFunctionOrMethod()) + Owner->addDecl(Inst); + return Inst; } @@ -2863,7 +3002,7 @@ Sema::InstantiateMemInitializers(CXXConstructorDecl *New, const MultiLevelTemplateArgumentList &TemplateArgs) { SmallVector NewInits; - bool AnyErrors = false; + bool AnyErrors = Tmpl->isInvalidDecl(); // Instantiate all the initializers. for (CXXConstructorDecl::init_const_iterator Inits = Tmpl->init_begin(), @@ -3031,7 +3170,7 @@ ExprResult Sema::SubstInitializer(Expr *Init, isa(Construct)) return SubstExpr(Init, TemplateArgs); - ASTOwningVector NewArgs(*this); + SmallVector NewArgs; if (SubstExprs(Construct->getArgs(), Construct->getNumArgs(), true, TemplateArgs, NewArgs)) return ExprError(); @@ -3043,7 +3182,7 @@ ExprResult Sema::SubstInitializer(Expr *Init, // Build a ParenListExpr to represent anything else. // FIXME: Fake locations! SourceLocation Loc = PP.getLocForEndOfToken(Init->getLocStart()); - return ActOnParenListExpr(Loc, Loc, move_arg(NewArgs)); + return ActOnParenListExpr(Loc, Loc, NewArgs); } // TODO: this could be templated if the various decl types used the @@ -3298,7 +3437,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, if (Decl *FD = Found->dyn_cast()) return cast(FD); - unsigned PackIdx = ArgumentPackSubstitutionIndex; + int PackIdx = ArgumentPackSubstitutionIndex; + assert(PackIdx != -1 && "found declaration pack but not pack expanding"); return cast((*Found->get())[PackIdx]); } diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp index aece90b..6147d63 100644 --- a/lib/Sema/SemaTemplateVariadic.cpp +++ b/lib/Sema/SemaTemplateVariadic.cpp @@ -727,6 +727,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) { case TST_enum: case TST_union: case TST_struct: + case TST_interface: case TST_class: case TST_auto: case TST_unknown_anytype: diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp index 54f8dba..4b23167 100644 --- a/lib/Sema/SemaType.cpp +++ b/lib/Sema/SemaType.cpp @@ -105,7 +105,8 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr, case AttributeList::AT_ThisCall: \ case AttributeList::AT_Pascal: \ case AttributeList::AT_Regparm: \ - case AttributeList::AT_Pcs \ + case AttributeList::AT_Pcs: \ + case AttributeList::AT_PnaclCall \ namespace { /// An object which stores processing state for the entire @@ -552,19 +553,28 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state, SourceLocation loc = declarator.getLocStart(); // ...and *prepend* it to the declarator. + SourceLocation NoLoc; declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction( - /*proto*/ true, - /*variadic*/ false, - /*ambiguous*/ false, SourceLocation(), - /*args*/ 0, 0, - /*type quals*/ 0, - /*ref-qualifier*/true, SourceLocation(), - /*const qualifier*/SourceLocation(), - /*volatile qualifier*/SourceLocation(), - /*mutable qualifier*/SourceLocation(), - /*EH*/ EST_None, SourceLocation(), 0, 0, 0, 0, - /*parens*/ loc, loc, - declarator)); + /*HasProto=*/true, + /*IsAmbiguous=*/false, + /*LParenLoc=*/NoLoc, + /*ArgInfo=*/0, + /*NumArgs=*/0, + /*EllipsisLoc=*/NoLoc, + /*RParenLoc=*/NoLoc, + /*TypeQuals=*/0, + /*RefQualifierIsLvalueRef=*/true, + /*RefQualifierLoc=*/NoLoc, + /*ConstQualifierLoc=*/NoLoc, + /*VolatileQualifierLoc=*/NoLoc, + /*MutableLoc=*/NoLoc, + EST_None, + /*ESpecLoc=*/NoLoc, + /*Exceptions=*/0, + /*ExceptionRanges=*/0, + /*NumExceptions=*/0, + /*NoexceptExpr=*/0, + loc, loc, declarator)); // For consistency, make sure the state still has us as processing // the decl spec. @@ -636,7 +646,7 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { // "" is an objc qualified ID with a missing id. if (DeclSpec::ProtocolQualifierListTy PQ = DS.getProtocolQualifiers()) { Result = Context.getObjCObjectType(Context.ObjCBuiltinIdTy, - (ObjCProtocolDecl**)PQ, + (ObjCProtocolDecl*const*)PQ, DS.getNumProtocolQualifiers()); Result = Context.getObjCObjectPointerType(Result); break; @@ -698,11 +708,15 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { case DeclSpec::TSW_longlong: Result = Context.LongLongTy; - // long long is a C99 feature. - if (!S.getLangOpts().C99) - S.Diag(DS.getTypeSpecWidthLoc(), - S.getLangOpts().CPlusPlus0x ? - diag::warn_cxx98_compat_longlong : diag::ext_longlong); + // 'long long' is a C99 or C++11 feature. + if (!S.getLangOpts().C99) { + if (S.getLangOpts().CPlusPlus) + S.Diag(DS.getTypeSpecWidthLoc(), + S.getLangOpts().CPlusPlus0x ? + diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); + else + S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong); + } break; } } else { @@ -713,11 +727,15 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { case DeclSpec::TSW_longlong: Result = Context.UnsignedLongLongTy; - // long long is a C99 feature. - if (!S.getLangOpts().C99) - S.Diag(DS.getTypeSpecWidthLoc(), - S.getLangOpts().CPlusPlus0x ? - diag::warn_cxx98_compat_longlong : diag::ext_longlong); + // 'long long' is a C99 or C++11 feature. + if (!S.getLangOpts().C99) { + if (S.getLangOpts().CPlusPlus) + S.Diag(DS.getTypeSpecWidthLoc(), + S.getLangOpts().CPlusPlus0x ? + diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); + else + S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong); + } break; } } @@ -753,7 +771,8 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { case DeclSpec::TST_class: case DeclSpec::TST_enum: case DeclSpec::TST_union: - case DeclSpec::TST_struct: { + case DeclSpec::TST_struct: + case DeclSpec::TST_interface: { TypeDecl *D = dyn_cast_or_null(DS.getRepAsDecl()); if (!D) { // This can happen in C++ with ambiguous lookups. @@ -794,18 +813,18 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) { if (DS.getNumProtocolQualifiers()) Result = Context.getObjCObjectType(Result, - (ObjCProtocolDecl**) PQ, + (ObjCProtocolDecl*const*) PQ, DS.getNumProtocolQualifiers()); } else if (Result->isObjCIdType()) { // id Result = Context.getObjCObjectType(Context.ObjCBuiltinIdTy, - (ObjCProtocolDecl**) PQ, + (ObjCProtocolDecl*const*) PQ, DS.getNumProtocolQualifiers()); Result = Context.getObjCObjectPointerType(Result); } else if (Result->isObjCClassType()) { // Class Result = Context.getObjCObjectType(Context.ObjCBuiltinClassTy, - (ObjCProtocolDecl**) PQ, + (ObjCProtocolDecl*const*) PQ, DS.getNumProtocolQualifiers()); Result = Context.getObjCObjectPointerType(Result); } else { @@ -1853,30 +1872,31 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state, case TTK_Struct: Error = 1; /* Struct member */ break; case TTK_Union: Error = 2; /* Union member */ break; case TTK_Class: Error = 3; /* Class member */ break; + case TTK_Interface: Error = 4; /* Interface member */ break; } break; case Declarator::CXXCatchContext: case Declarator::ObjCCatchContext: - Error = 4; // Exception declaration + Error = 5; // Exception declaration break; case Declarator::TemplateParamContext: - Error = 5; // Template parameter + Error = 6; // Template parameter break; case Declarator::BlockLiteralContext: - Error = 6; // Block literal + Error = 7; // Block literal break; case Declarator::TemplateTypeArgContext: - Error = 7; // Template type argument + Error = 8; // Template type argument break; case Declarator::AliasDeclContext: case Declarator::AliasTemplateContext: - Error = 9; // Type alias + Error = 10; // Type alias break; case Declarator::TrailingReturnContext: - Error = 10; // Function return type + Error = 11; // Function return type break; case Declarator::TypeNameContext: - Error = 11; // Generic + Error = 12; // Generic break; case Declarator::FileContext: case Declarator::BlockContext: @@ -1887,11 +1907,11 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state, } if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) - Error = 8; + Error = 9; // In Objective-C it is an error to use 'auto' on a function declarator. if (D.isFunctionDeclarator()) - Error = 10; + Error = 11; // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator // contains a trailing return type. That is only legal at the outermost @@ -2149,16 +2169,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, ASTContext &Context = S.Context; const LangOptions &LangOpts = S.getLangOpts(); - bool ImplicitlyNoexcept = false; - if (D.getName().getKind() == UnqualifiedId::IK_OperatorFunctionId && - LangOpts.CPlusPlus0x) { - OverloadedOperatorKind OO = D.getName().OperatorFunctionId.Operator; - /// In C++0x, deallocation functions (normal and array operator delete) - /// are implicitly noexcept. - if (OO == OO_Delete || OO == OO_Array_Delete) - ImplicitlyNoexcept = true; - } - // The name we're declaring, if any. DeclarationName Name; if (D.getIdentifier()) @@ -2558,12 +2568,6 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, Exceptions, EPI); - if (FTI.getExceptionSpecType() == EST_None && - ImplicitlyNoexcept && chunkIndex == 0) { - // Only the outermost chunk is marked noexcept, of course. - EPI.ExceptionSpecType = EST_BasicNoexcept; - } - T = Context.getFunctionType(T, ArgTys.data(), ArgTys.size(), EPI); } @@ -2657,6 +2661,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, // C++0x [dcl.constexpr]p8: A constexpr specifier for a non-static member // function that is not a constructor declares that function to be const. + // FIXME: This should be deferred until we know whether this is a static + // member function (for an out-of-class definition, we don't know + // this until we perform redeclaration lookup). if (D.getDeclSpec().isConstexprSpecified() && !FreeFunction && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static && D.getName().getKind() != UnqualifiedId::IK_ConstructorName && @@ -2668,6 +2675,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(), FnTy->getNumArgs(), EPI); + // Rebuild any parens around the identifier in the function type. + for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { + if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren) + break; + T = S.BuildParenType(T); + } } // C++11 [dcl.fct]p6 (w/DR1417): @@ -2721,6 +2734,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, T = Context.getFunctionType(FnTy->getResultType(), FnTy->arg_type_begin(), FnTy->getNumArgs(), EPI); + // Rebuild any parens around the identifier in the function type. + for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { + if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren) + break; + T = S.BuildParenType(T); + } } } @@ -2985,6 +3004,8 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) { return AttributeList::AT_Pascal; case AttributedType::attr_pcs: return AttributeList::AT_Pcs; + case AttributedType::attr_pnaclcall: + return AttributeList::AT_PnaclCall; } llvm_unreachable("unexpected attribute kind!"); } @@ -3271,6 +3292,8 @@ namespace { TL.setLocalRangeEnd(Chunk.EndLoc); const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun; + TL.setLParenLoc(FTI.getLParenLoc()); + TL.setRParenLoc(FTI.getRParenLoc()); for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) { ParmVarDecl *Param = cast(FTI.ArgInfo[i].Param); TL.setArg(tpi++, Param); @@ -3588,7 +3611,7 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state, // Forbid __weak if the runtime doesn't support it. if (lifetime == Qualifiers::OCL_Weak && - !S.getLangOpts().ObjCRuntimeHasWeak && !NonObjCPointer) { + !S.getLangOpts().ObjCARCWeak && !NonObjCPointer) { // Actually, delay this until we know what we're parsing. if (S.DelayedDiagnostics.shouldDelayDiagnostics()) { @@ -3611,11 +3634,12 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state, while (const PointerType *ptr = T->getAs()) T = ptr->getPointeeType(); if (const ObjCObjectPointerType *ObjT = T->getAs()) { - ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl(); - if (Class->isArcWeakrefUnavailable()) { - S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class); - S.Diag(ObjT->getInterfaceDecl()->getLocation(), - diag::note_class_declared); + if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) { + if (Class->isArcWeakrefUnavailable()) { + S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class); + S.Diag(ObjT->getInterfaceDecl()->getLocation(), + diag::note_class_declared); + } } } } @@ -3875,14 +3899,14 @@ static bool handleFunctionTypeAttr(TypeProcessingState &state, return true; } + // Delay if the type didn't work out to a function. + if (!unwrapped.isFunctionType()) return false; + // Otherwise, a calling convention. CallingConv CC; if (S.CheckCallingConvAttr(attr, CC)) return true; - // Delay if the type didn't work out to a function. - if (!unwrapped.isFunctionType()) return false; - const FunctionType *fn = unwrapped.get(); CallingConv CCOld = fn->getCallConv(); if (S.Context.getCanonicalCallConv(CC) == @@ -4429,6 +4453,20 @@ bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, return RequireCompleteType(Loc, T, Diagnoser); } +/// \brief Get diagnostic %select index for tag kind for +/// literal type diagnostic message. +/// WARNING: Indexes apply to particular diagnostics only! +/// +/// \returns diagnostic %select index. +static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) { + switch (Tag) { + case TTK_Struct: return 0; + case TTK_Interface: return 1; + case TTK_Class: return 2; + default: llvm_unreachable("Invalid tag kind for literal type diagnostic!"); + } +} + /// @brief Ensure that the type T is a literal type. /// /// This routine checks whether the type @p T is a literal type. If @p T is an @@ -4485,7 +4523,7 @@ bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, // of constexpr constructors. if (RD->getNumVBases()) { Diag(RD->getLocation(), diag::note_non_literal_virtual_base) - << RD->isStruct() << RD->getNumVBases(); + << getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases(); for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(), E = RD->vbases_end(); I != E; ++I) Diag(I->getLocStart(), @@ -4578,15 +4616,21 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) { // member access (5.2.5), decltype(e) is the type of the entity named // by e. If there is no such entity, or if e names a set of overloaded // functions, the program is ill-formed; + // + // We apply the same rules for Objective-C ivar and property references. if (const DeclRefExpr *DRE = dyn_cast(E)) { if (const ValueDecl *VD = dyn_cast(DRE->getDecl())) return VD->getType(); - } - if (const MemberExpr *ME = dyn_cast(E)) { + } else if (const MemberExpr *ME = dyn_cast(E)) { if (const FieldDecl *FD = dyn_cast(ME->getMemberDecl())) return FD->getType(); + } else if (const ObjCIvarRefExpr *IR = dyn_cast(E)) { + return IR->getDecl()->getType(); + } else if (const ObjCPropertyRefExpr *PR = dyn_cast(E)) { + if (PR->isExplicitProperty()) + return PR->getExplicitProperty()->getType(); } - + // C++11 [expr.lambda.prim]p18: // Every occurrence of decltype((x)) where x is a possibly // parenthesized id-expression that names an entity of automatic diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h index 619ad33..294d742 100644 --- a/lib/Sema/TreeTransform.h +++ b/lib/Sema/TreeTransform.h @@ -574,6 +574,10 @@ public: /// \brief Transform the captures and body of a lambda expression. ExprResult TransformLambdaScope(LambdaExpr *E, CXXMethodDecl *CallOperator); + ExprResult TransformAddressOfOperand(Expr *E); + ExprResult TransformDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E, + bool IsAddressOfOperand); + #define STMT(Node, Parent) \ StmtResult Transform##Node(Node *S); #define EXPR(Node, Parent) \ @@ -1162,32 +1166,23 @@ public: /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. - StmtResult RebuildAsmStmt(SourceLocation AsmLoc, - bool IsSimple, - bool IsVolatile, - unsigned NumOutputs, - unsigned NumInputs, - IdentifierInfo **Names, - MultiExprArg Constraints, - MultiExprArg Exprs, - Expr *AsmString, - MultiExprArg Clobbers, - SourceLocation RParenLoc, - bool MSAsm) { - return getSema().ActOnAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs, - NumInputs, Names, move(Constraints), - Exprs, AsmString, Clobbers, - RParenLoc, MSAsm); + StmtResult RebuildGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, + bool IsVolatile, unsigned NumOutputs, + unsigned NumInputs, IdentifierInfo **Names, + MultiExprArg Constraints, MultiExprArg Exprs, + Expr *AsmString, MultiExprArg Clobbers, + SourceLocation RParenLoc) { + return getSema().ActOnGCCAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs, + NumInputs, Names, Constraints, Exprs, + AsmString, Clobbers, RParenLoc); } /// \brief Build a new MS style inline asm statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. - StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc, - SourceLocation LBraceLoc, - ArrayRef AsmToks, - SourceLocation EndLoc) { + StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, + ArrayRef AsmToks, SourceLocation EndLoc) { return getSema().ActOnMSAsmStmt(AsmLoc, LBraceLoc, AsmToks, EndLoc); } @@ -1199,7 +1194,7 @@ public: Stmt *TryBody, MultiStmtArg CatchStmts, Stmt *Finally) { - return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, move(CatchStmts), + return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, CatchStmts, Finally); } @@ -1325,7 +1320,7 @@ public: StmtResult RebuildCXXTryStmt(SourceLocation TryLoc, Stmt *TryBlock, MultiStmtArg Handlers) { - return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, move(Handlers)); + return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, Handlers); } /// \brief Build a new C++0x range-based for statement. @@ -1339,7 +1334,8 @@ public: Stmt *LoopVar, SourceLocation RParenLoc) { return getSema().BuildCXXForRangeStmt(ForLoc, ColonLoc, Range, BeginEnd, - Cond, Inc, LoopVar, RParenLoc); + Cond, Inc, LoopVar, RParenLoc, + Sema::BFRK_Rebuild); } /// \brief Build a new C++0x range-based for statement. @@ -1478,7 +1474,7 @@ public: if (Result.isInvalid()) return ExprError(); - return move(Result); + return Result; } /// \brief Build a new array subscript expression. @@ -1503,7 +1499,7 @@ public: SourceLocation RParenLoc, Expr *ExecConfig = 0) { return getSema().ActOnCallExpr(/*Scope=*/0, Callee, LParenLoc, - move(Args), RParenLoc, ExecConfig); + Args, RParenLoc, ExecConfig); } /// \brief Build a new member access expression. @@ -1638,15 +1634,15 @@ public: SourceLocation RBraceLoc, QualType ResultTy) { ExprResult Result - = SemaRef.ActOnInitList(LBraceLoc, move(Inits), RBraceLoc); + = SemaRef.ActOnInitList(LBraceLoc, Inits, RBraceLoc); if (Result.isInvalid() || ResultTy->isDependentType()) - return move(Result); + return Result; // Patch in the result type we were given, which may have been computed // when the initial InitListExpr was built. InitListExpr *ILE = cast((Expr *)Result.get()); ILE->setType(ResultTy); - return move(Result); + return Result; } /// \brief Build a new designated initializer expression. @@ -1664,8 +1660,7 @@ public: if (Result.isInvalid()) return ExprError(); - ArrayExprs.release(); - return move(Result); + return Result; } /// \brief Build a new value-initialized expression. @@ -1696,7 +1691,7 @@ public: ExprResult RebuildParenListExpr(SourceLocation LParenLoc, MultiExprArg SubExprs, SourceLocation RParenLoc) { - return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, move(SubExprs)); + return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, SubExprs); } /// \brief Build a new address-of-label expression. @@ -1974,8 +1969,7 @@ public: SourceLocation LParenLoc, SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, - MultiExprArg(getSema(), 0, 0), - RParenLoc); + MultiExprArg(), RParenLoc); } /// \brief Build a new C++ "new" expression. @@ -1995,7 +1989,7 @@ public: Expr *Initializer) { return getSema().BuildCXXNew(StartLoc, UseGlobal, PlacementLParen, - move(PlacementArgs), + PlacementArgs, PlacementRParen, TypeIdParens, AllocatedType, @@ -2083,7 +2077,8 @@ public: NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, - const TemplateArgumentListInfo *TemplateArgs) { + const TemplateArgumentListInfo *TemplateArgs, + bool IsAddressOfOperand) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); @@ -2091,7 +2086,8 @@ public: return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs); - return getSema().BuildQualifiedDeclarationNameExpr(SS, NameInfo); + return getSema().BuildQualifiedDeclarationNameExpr(SS, NameInfo, + IsAddressOfOperand); } /// \brief Build a new template-id expression. @@ -2120,13 +2116,13 @@ public: bool RequiresZeroInit, CXXConstructExpr::ConstructionKind ConstructKind, SourceRange ParenRange) { - ASTOwningVector ConvertedArgs(SemaRef); - if (getSema().CompleteConstructorCall(Constructor, move(Args), Loc, + SmallVector ConvertedArgs; + if (getSema().CompleteConstructorCall(Constructor, Args, Loc, ConvertedArgs)) return ExprError(); return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable, - move_arg(ConvertedArgs), + ConvertedArgs, HadMultipleCandidates, RequiresZeroInit, ConstructKind, ParenRange); @@ -2142,7 +2138,7 @@ public: SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, - move(Args), + Args, RParenLoc); } @@ -2156,7 +2152,7 @@ public: SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, - move(Args), + Args, RParenLoc); } @@ -2288,7 +2284,7 @@ public: ReceiverTypeInfo->getType(), /*SuperLoc=*/SourceLocation(), Sel, Method, LBracLoc, SelectorLocs, - RBracLoc, move(Args)); + RBracLoc, Args); } /// \brief Build a new Objective-C instance message. @@ -2303,7 +2299,7 @@ public: Receiver->getType(), /*SuperLoc=*/SourceLocation(), Sel, Method, LBracLoc, SelectorLocs, - RBracLoc, move(Args)); + RBracLoc, Args); } /// \brief Build a new Objective-C ivar reference expression. @@ -2326,7 +2322,7 @@ public: return ExprError(); if (Result.get()) - return move(Result); + return Result; return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(), /*FIXME:*/IvarLoc, IsArrow, @@ -2355,7 +2351,7 @@ public: return ExprError(); if (Result.get()) - return move(Result); + return Result; return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(), /*FIXME:*/PropertyLoc, IsArrow, @@ -2398,7 +2394,7 @@ public: return ExprError(); if (Result.get()) - return move(Result); + return Result; return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(), /*FIXME:*/IsaLoc, IsArrow, @@ -2424,21 +2420,17 @@ public: // Build a reference to the __builtin_shufflevector builtin FunctionDecl *Builtin = cast(*Lookup.first); - ExprResult Callee - = SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(Builtin, false, - Builtin->getType(), - VK_LValue, BuiltinLoc)); - Callee = SemaRef.UsualUnaryConversions(Callee.take()); - if (Callee.isInvalid()) - return ExprError(); + Expr *Callee = new (SemaRef.Context) DeclRefExpr(Builtin, false, + SemaRef.Context.BuiltinFnTy, + VK_RValue, BuiltinLoc); + QualType CalleePtrTy = SemaRef.Context.getPointerType(Builtin->getType()); + Callee = SemaRef.ImpCastExprToType(Callee, CalleePtrTy, + CK_BuiltinFnToFnPtr).take(); // Build the CallExpr - unsigned NumSubExprs = SubExprs.size(); - Expr **Subs = (Expr **)SubExprs.release(); ExprResult TheCall = SemaRef.Owned( - new (SemaRef.Context) CallExpr(SemaRef.Context, Callee.take(), - Subs, NumSubExprs, - Builtin->getCallResultType(), + new (SemaRef.Context) CallExpr(SemaRef.Context, Callee, SubExprs, + Builtin->getCallResultType(), Expr::getValueKindForType(Builtin->getResultType()), RParenLoc)); @@ -2478,6 +2470,7 @@ public: case TemplateArgument::Declaration: case TemplateArgument::Pack: case TemplateArgument::TemplateExpansion: + case TemplateArgument::NullPtr: llvm_unreachable("Pack expansion pattern has no parameter packs"); case TemplateArgument::Type: @@ -2515,10 +2508,7 @@ public: // Just create the expression; there is not any interesting semantic // analysis here because we can't actually build an AtomicExpr until // we are sure it is semantically sound. - unsigned NumSubExprs = SubExprs.size(); - Expr **Subs = (Expr **)SubExprs.release(); - return new (SemaRef.Context) AtomicExpr(BuiltinLoc, Subs, - NumSubExprs, RetTy, Op, + return new (SemaRef.Context) AtomicExpr(BuiltinLoc, SubExprs, RetTy, Op, RParenLoc); } @@ -2963,6 +2953,7 @@ void TreeTransform::InventTemplateArgumentLoc( case TemplateArgument::Declaration: case TemplateArgument::Integral: case TemplateArgument::Pack: + case TemplateArgument::NullPtr: Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo()); break; } @@ -2976,8 +2967,10 @@ bool TreeTransform::TransformTemplateArgument( switch (Arg.getKind()) { case TemplateArgument::Null: case TemplateArgument::Integral: - Output = Input; - return false; + case TemplateArgument::Pack: + case TemplateArgument::Declaration: + case TemplateArgument::NullPtr: + llvm_unreachable("Unexpected TemplateArgument"); case TemplateArgument::Type: { TypeSourceInfo *DI = Input.getTypeSourceInfo(); @@ -2991,28 +2984,6 @@ bool TreeTransform::TransformTemplateArgument( return false; } - case TemplateArgument::Declaration: { - // FIXME: we should never have to transform one of these. - DeclarationName Name; - if (NamedDecl *ND = dyn_cast(Arg.getAsDecl())) - Name = ND->getDeclName(); - TemporaryBase Rebase(*this, Input.getLocation(), Name); - Decl *D = getDerived().TransformDecl(Input.getLocation(), Arg.getAsDecl()); - if (!D) return true; - - Expr *SourceExpr = Input.getSourceDeclExpression(); - if (SourceExpr) { - EnterExpressionEvaluationContext Unevaluated(getSema(), - Sema::ConstantEvaluated); - ExprResult E = getDerived().TransformExpr(SourceExpr); - E = SemaRef.ActOnConstantExpression(E); - SourceExpr = (E.isInvalid() ? 0 : E.take()); - } - - Output = TemplateArgumentLoc(TemplateArgument(D), SourceExpr); - return false; - } - case TemplateArgument::Template: { NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc(); if (QualifierLoc) { @@ -3051,35 +3022,6 @@ bool TreeTransform::TransformTemplateArgument( Output = TemplateArgumentLoc(TemplateArgument(E.take()), E.take()); return false; } - - case TemplateArgument::Pack: { - SmallVector TransformedArgs; - TransformedArgs.reserve(Arg.pack_size()); - for (TemplateArgument::pack_iterator A = Arg.pack_begin(), - AEnd = Arg.pack_end(); - A != AEnd; ++A) { - - // FIXME: preserve source information here when we start - // caring about parameter packs. - - TemplateArgumentLoc InputArg; - TemplateArgumentLoc OutputArg; - getDerived().InventTemplateArgumentLoc(*A, InputArg); - if (getDerived().TransformTemplateArgument(InputArg, OutputArg)) - return true; - - TransformedArgs.push_back(OutputArg.getArgument()); - } - - TemplateArgument *TransformedArgsPtr - = new (getSema().Context) TemplateArgument[TransformedArgs.size()]; - std::copy(TransformedArgs.begin(), TransformedArgs.end(), - TransformedArgsPtr); - Output = TemplateArgumentLoc(TemplateArgument(TransformedArgsPtr, - TransformedArgs.size()), - Input.getLocInfo()); - return false; - } } // Work around bogus GCC warning @@ -4260,6 +4202,8 @@ TreeTransform::TransformFunctionProtoType(TypeLocBuilder &TLB, FunctionProtoTypeLoc NewTL = TLB.push(Result); NewTL.setLocalRangeBegin(TL.getLocalRangeBegin()); + NewTL.setLParenLoc(TL.getLParenLoc()); + NewTL.setRParenLoc(TL.getRParenLoc()); NewTL.setLocalRangeEnd(TL.getLocalRangeEnd()); for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i) NewTL.setArg(i, ParamDecls[i]); @@ -4283,6 +4227,8 @@ QualType TreeTransform::TransformFunctionNoProtoType( FunctionNoProtoTypeLoc NewTL = TLB.push(Result); NewTL.setLocalRangeBegin(TL.getLocalRangeBegin()); + NewTL.setLParenLoc(TL.getLParenLoc()); + NewTL.setRParenLoc(TL.getRParenLoc()); NewTL.setLocalRangeEnd(TL.getLocalRangeEnd()); return Result; @@ -4339,7 +4285,8 @@ template QualType TreeTransform::TransformTypeOfExprType(TypeLocBuilder &TLB, TypeOfExprTypeLoc TL) { // typeof expressions are not potentially evaluated contexts - EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); + EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, + Sema::ReuseLambdaContextDecl); ExprResult E = getDerived().TransformExpr(TL.getUnderlyingExpr()); if (E.isInvalid()) @@ -5099,7 +5046,7 @@ TreeTransform::TransformCompoundStmt(CompoundStmt *S, bool SubStmtInvalid = false; bool SubStmtChanged = false; - ASTOwningVector Statements(getSema()); + SmallVector Statements; for (CompoundStmt::body_iterator B = S->body_begin(), BEnd = S->body_end(); B != BEnd; ++B) { StmtResult Result = getDerived().TransformStmt(*B); @@ -5126,7 +5073,7 @@ TreeTransform::TransformCompoundStmt(CompoundStmt *S, return SemaRef.Owned(S); return getDerived().RebuildCompoundStmt(S->getLBracLoc(), - move_arg(Statements), + Statements, S->getRBracLoc(), IsStmtExpr); } @@ -5533,14 +5480,14 @@ TreeTransform::TransformDeclStmt(DeclStmt *S) { template StmtResult -TreeTransform::TransformAsmStmt(AsmStmt *S) { +TreeTransform::TransformGCCAsmStmt(GCCAsmStmt *S) { - ASTOwningVector Constraints(getSema()); - ASTOwningVector Exprs(getSema()); + SmallVector Constraints; + SmallVector Exprs; SmallVector Names; ExprResult AsmString; - ASTOwningVector Clobbers(getSema()); + SmallVector Clobbers; bool ExprsChanged = false; @@ -5585,23 +5532,15 @@ TreeTransform::TransformAsmStmt(AsmStmt *S) { // Go through the clobbers. for (unsigned I = 0, E = S->getNumClobbers(); I != E; ++I) - Clobbers.push_back(S->getClobber(I)); + Clobbers.push_back(S->getClobberStringLiteral(I)); // No need to transform the asm string literal. AsmString = SemaRef.Owned(S->getAsmString()); - - return getDerived().RebuildAsmStmt(S->getAsmLoc(), - S->isSimple(), - S->isVolatile(), - S->getNumOutputs(), - S->getNumInputs(), - Names.data(), - move_arg(Constraints), - move_arg(Exprs), - AsmString.get(), - move_arg(Clobbers), - S->getRParenLoc(), - S->isMSAsm()); + return getDerived().RebuildGCCAsmStmt(S->getAsmLoc(), S->isSimple(), + S->isVolatile(), S->getNumOutputs(), + S->getNumInputs(), Names.data(), + Constraints, Exprs, AsmString.get(), + Clobbers, S->getRParenLoc()); } template @@ -5624,7 +5563,7 @@ TreeTransform::TransformObjCAtTryStmt(ObjCAtTryStmt *S) { // Transform the @catch statements (if present). bool AnyCatchChanged = false; - ASTOwningVector CatchStmts(SemaRef); + SmallVector CatchStmts; for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { StmtResult Catch = getDerived().TransformStmt(S->getCatchStmt(I)); if (Catch.isInvalid()) @@ -5651,7 +5590,7 @@ TreeTransform::TransformObjCAtTryStmt(ObjCAtTryStmt *S) { // Build a new statement. return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(), - move_arg(CatchStmts), Finally.get()); + CatchStmts, Finally.get()); } template @@ -5855,7 +5794,7 @@ TreeTransform::TransformCXXTryStmt(CXXTryStmt *S) { // Transform the handlers. bool HandlerChanged = false; - ASTOwningVector Handlers(SemaRef); + SmallVector Handlers; for (unsigned I = 0, N = S->getNumHandlers(); I != N; ++I) { StmtResult Handler = getDerived().TransformCXXCatchStmt(S->getHandler(I)); @@ -5872,7 +5811,7 @@ TreeTransform::TransformCXXTryStmt(CXXTryStmt *S) { return SemaRef.Owned(S); return getDerived().RebuildCXXTryStmt(S->getTryLoc(), TryBlock.get(), - move_arg(Handlers)); + Handlers); } template @@ -6205,10 +6144,22 @@ TreeTransform::TransformParenExpr(ParenExpr *E) { E->getRParen()); } +/// \brief The operand of a unary address-of operator has special rules: it's +/// allowed to refer to a non-static member of a class even if there's no 'this' +/// object available. +template +ExprResult +TreeTransform::TransformAddressOfOperand(Expr *E) { + if (DependentScopeDeclRefExpr *DRE = dyn_cast(E)) + return getDerived().TransformDependentScopeDeclRefExpr(DRE, true); + else + return getDerived().TransformExpr(E); +} + template ExprResult TreeTransform::TransformUnaryOperator(UnaryOperator *E) { - ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr()); + ExprResult SubExpr = TransformAddressOfOperand(E->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); @@ -6338,7 +6289,8 @@ TreeTransform::TransformUnaryExprOrTypeTraitExpr( // C++0x [expr.sizeof]p1: // The operand is either an expression, which is an unevaluated operand // [...] - EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); + EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, + Sema::ReuseLambdaContextDecl); ExprResult SubExpr = getDerived().TransformExpr(E->getArgumentExpr()); if (SubExpr.isInvalid()) @@ -6386,7 +6338,7 @@ TreeTransform::TransformCallExpr(CallExpr *E) { // Transform arguments. bool ArgChanged = false; - ASTOwningVector Args(SemaRef); + SmallVector Args; if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgChanged)) return ExprError(); @@ -6394,13 +6346,13 @@ TreeTransform::TransformCallExpr(CallExpr *E) { if (!getDerived().AlwaysRebuild() && Callee.get() == E->getCallee() && !ArgChanged) - return SemaRef.MaybeBindToTemporary(E);; + return SemaRef.MaybeBindToTemporary(E); // FIXME: Wrong source location information for the '('. SourceLocation FakeLParenLoc = ((Expr *)Callee.get())->getSourceRange().getBegin(); return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc, - move_arg(Args), + Args, E->getRParenLoc()); } @@ -6499,6 +6451,9 @@ TreeTransform::TransformBinaryOperator(BinaryOperator *E) { RHS.get() == E->getRHS()) return SemaRef.Owned(E); + Sema::FPContractStateRAII FPContractState(getSema()); + getSema().FPFeatures.fp_contract = E->isFPContractable(); + return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(), LHS.get(), RHS.get()); } @@ -6645,7 +6600,7 @@ ExprResult TreeTransform::TransformInitListExpr(InitListExpr *E) { bool InitChanged = false; - ASTOwningVector Inits(SemaRef); + SmallVector Inits; if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false, Inits, &InitChanged)) return ExprError(); @@ -6653,7 +6608,7 @@ TreeTransform::TransformInitListExpr(InitListExpr *E) { if (!getDerived().AlwaysRebuild() && !InitChanged) return SemaRef.Owned(E); - return getDerived().RebuildInitList(E->getLBraceLoc(), move_arg(Inits), + return getDerived().RebuildInitList(E->getLBraceLoc(), Inits, E->getRBraceLoc(), E->getType()); } @@ -6668,7 +6623,7 @@ TreeTransform::TransformDesignatedInitExpr(DesignatedInitExpr *E) { return ExprError(); // transform the designators. - ASTOwningVector ArrayExprs(SemaRef); + SmallVector ArrayExprs; bool ExprChanged = false; for (DesignatedInitExpr::designators_iterator D = E->designators_begin(), DEnd = E->designators_end(); @@ -6720,7 +6675,7 @@ TreeTransform::TransformDesignatedInitExpr(DesignatedInitExpr *E) { !ExprChanged) return SemaRef.Owned(E); - return getDerived().RebuildDesignatedInitExpr(Desig, move_arg(ArrayExprs), + return getDerived().RebuildDesignatedInitExpr(Desig, ArrayExprs, E->getEqualOrColonLoc(), E->usesGNUSyntax(), Init.get()); } @@ -6768,13 +6723,13 @@ template ExprResult TreeTransform::TransformParenListExpr(ParenListExpr *E) { bool ArgumentChanged = false; - ASTOwningVector Inits(SemaRef); + SmallVector Inits; if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits, &ArgumentChanged)) return ExprError(); return getDerived().RebuildParenListExpr(E->getLParenLoc(), - move_arg(Inits), + Inits, E->getRParenLoc()); } @@ -6875,13 +6830,13 @@ TreeTransform::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) { static_cast(Object.get())->getLocEnd()); // Transform the call arguments. - ASTOwningVector Args(SemaRef); + SmallVector Args; if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true, Args)) return ExprError(); return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc, - move_arg(Args), + Args, E->getLocEnd()); } @@ -6905,7 +6860,11 @@ TreeTransform::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) { if (Callee.isInvalid()) return ExprError(); - ExprResult First = getDerived().TransformExpr(E->getArg(0)); + ExprResult First; + if (E->getOperator() == OO_Amp) + First = getDerived().TransformAddressOfOperand(E->getArg(0)); + else + First = getDerived().TransformExpr(E->getArg(0)); if (First.isInvalid()) return ExprError(); @@ -6922,6 +6881,9 @@ TreeTransform::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) { (E->getNumArgs() != 2 || Second.get() == E->getArg(1))) return SemaRef.MaybeBindToTemporary(E); + Sema::FPContractStateRAII FPContractState(getSema()); + getSema().FPFeatures.fp_contract = E->isFPContractable(); + return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(), E->getOperatorLoc(), Callee.get(), @@ -6950,7 +6912,7 @@ TreeTransform::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) { // Transform arguments. bool ArgChanged = false; - ASTOwningVector Args(SemaRef); + SmallVector Args; if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgChanged)) return ExprError(); @@ -6964,7 +6926,7 @@ TreeTransform::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) { SourceLocation FakeLParenLoc = ((Expr *)Callee.get())->getSourceRange().getBegin(); return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc, - move_arg(Args), + Args, E->getRParenLoc(), EC.get()); } @@ -6989,9 +6951,6 @@ TreeTransform::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) { SourceLocation FakeLAngleLoc = SemaRef.PP.getLocForEndOfToken(E->getOperatorLoc()); SourceLocation FakeRAngleLoc = E->getSubExpr()->getSourceRange().getBegin(); - SourceLocation FakeRParenLoc - = SemaRef.PP.getLocForEndOfToken( - E->getSubExpr()->getSourceRange().getEnd()); return getDerived().RebuildCXXNamedCastExpr(E->getOperatorLoc(), E->getStmtClass(), FakeLAngleLoc, @@ -6999,7 +6958,7 @@ TreeTransform::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) { FakeRAngleLoc, FakeRAngleLoc, SubExpr.get(), - FakeRParenLoc); + E->getRParenLoc()); } template @@ -7074,7 +7033,8 @@ TreeTransform::TransformCXXTypeidExpr(CXXTypeidExpr *E) { // after we perform semantic analysis. We speculatively assume it is // unevaluated; it will get fixed later if the subexpression is in fact // potentially evaluated. - EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); + EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, + Sema::ReuseLambdaContextDecl); ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand()); if (SubExpr.isInvalid()) @@ -7222,7 +7182,7 @@ TreeTransform::TransformCXXNewExpr(CXXNewExpr *E) { // Transform the placement arguments (if any). bool ArgumentChanged = false; - ASTOwningVector PlacementArgs(SemaRef); + SmallVector PlacementArgs; if (getDerived().TransformExprs(E->getPlacementArgs(), E->getNumPlacementArgs(), true, PlacementArgs, &ArgumentChanged)) @@ -7313,7 +7273,7 @@ TreeTransform::TransformCXXNewExpr(CXXNewExpr *E) { return getDerived().RebuildCXXNewExpr(E->getLocStart(), E->isGlobalNew(), /*FIXME:*/E->getLocStart(), - move_arg(PlacementArgs), + PlacementArgs, /*FIXME:*/E->getLocStart(), E->getTypeIdParens(), AllocType, @@ -7512,7 +7472,8 @@ TreeTransform::TransformUnresolvedLookupExpr( // If we have template arguments, rebuild them, then rebuild the // templateid expression. TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc()); - if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(), + if (Old->hasExplicitTemplateArgs() && + getDerived().TransformTemplateArguments(Old->getTemplateArgs(), Old->getNumTemplateArgs(), TransArgs)) return ExprError(); @@ -7732,6 +7693,14 @@ template ExprResult TreeTransform::TransformDependentScopeDeclRefExpr( DependentScopeDeclRefExpr *E) { + return TransformDependentScopeDeclRefExpr(E, /*IsAddressOfOperand*/false); +} + +template +ExprResult +TreeTransform::TransformDependentScopeDeclRefExpr( + DependentScopeDeclRefExpr *E, + bool IsAddressOfOperand) { NestedNameSpecifierLoc QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc()); if (!QualifierLoc) @@ -7758,7 +7727,8 @@ TreeTransform::TransformDependentScopeDeclRefExpr( return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc, TemplateKWLoc, NameInfo, - /*TemplateArgs*/ 0); + /*TemplateArgs*/ 0, + IsAddressOfOperand); } TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc()); @@ -7770,7 +7740,8 @@ TreeTransform::TransformDependentScopeDeclRefExpr( return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc, TemplateKWLoc, NameInfo, - &TransArgs); + &TransArgs, + IsAddressOfOperand); } template @@ -7796,7 +7767,7 @@ TreeTransform::TransformCXXConstructExpr(CXXConstructExpr *E) { return ExprError(); bool ArgumentChanged = false; - ASTOwningVector Args(SemaRef); + SmallVector Args; if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgumentChanged)) return ExprError(); @@ -7813,7 +7784,7 @@ TreeTransform::TransformCXXConstructExpr(CXXConstructExpr *E) { return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(), Constructor, E->isElidable(), - move_arg(Args), + Args, E->hadMultipleCandidates(), E->requiresZeroInitialization(), E->getConstructionKind(), @@ -7857,7 +7828,7 @@ TreeTransform::TransformCXXTemporaryObjectExpr( return ExprError(); bool ArgumentChanged = false; - ASTOwningVector Args(SemaRef); + SmallVector Args; Args.reserve(E->getNumArgs()); if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgumentChanged)) @@ -7874,19 +7845,13 @@ TreeTransform::TransformCXXTemporaryObjectExpr( return getDerived().RebuildCXXTemporaryObjectExpr(T, /*FIXME:*/T->getTypeLoc().getEndLoc(), - move_arg(Args), + Args, E->getLocEnd()); } template ExprResult TreeTransform::TransformLambdaExpr(LambdaExpr *E) { - // Create the local class that will describe the lambda. - CXXRecordDecl *Class - = getSema().createLambdaClosureType(E->getIntroducerRange(), - /*KnownDependent=*/false); - getDerived().transformedLocalDecl(E->getLambdaClass(), Class); - // Transform the type of the lambda parameters and start the definition of // the lambda itself. TypeSourceInfo *MethodTy @@ -7894,6 +7859,13 @@ TreeTransform::TransformLambdaExpr(LambdaExpr *E) { if (!MethodTy) return ExprError(); + // Create the local class that will describe the lambda. + CXXRecordDecl *Class + = getSema().createLambdaClosureType(E->getIntroducerRange(), + MethodTy, + /*KnownDependent=*/false); + getDerived().transformedLocalDecl(E->getLambdaClass(), Class); + // Transform lambda parameters. llvm::SmallVector ParamTypes; llvm::SmallVector Params; @@ -8038,7 +8010,7 @@ TreeTransform::TransformCXXUnresolvedConstructExpr( return ExprError(); bool ArgumentChanged = false; - ASTOwningVector Args(SemaRef); + SmallVector Args; Args.reserve(E->arg_size()); if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args, &ArgumentChanged)) @@ -8052,7 +8024,7 @@ TreeTransform::TransformCXXUnresolvedConstructExpr( // FIXME: we're faking the locations of the commas return getDerived().RebuildCXXUnresolvedConstructExpr(T, E->getLParenLoc(), - move_arg(Args), + Args, E->getRParenLoc()); } @@ -8346,6 +8318,13 @@ TreeTransform::TransformSubstNonTypeTemplateParmExpr( template ExprResult +TreeTransform::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { + // Default behavior is to do nothing with this transformation. + return SemaRef.Owned(E); +} + +template +ExprResult TreeTransform::TransformMaterializeTemporaryExpr( MaterializeTemporaryExpr *E) { return getDerived().TransformExpr(E->GetTemporaryExpr()); @@ -8576,7 +8555,7 @@ ExprResult TreeTransform::TransformObjCMessageExpr(ObjCMessageExpr *E) { // Transform arguments. bool ArgChanged = false; - ASTOwningVector Args(SemaRef); + SmallVector Args; Args.reserve(E->getNumArgs()); if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args, &ArgChanged)) @@ -8602,7 +8581,7 @@ TreeTransform::TransformObjCMessageExpr(ObjCMessageExpr *E) { SelLocs, E->getMethodDecl(), E->getLeftLoc(), - move_arg(Args), + Args, E->getRightLoc()); } @@ -8627,7 +8606,7 @@ TreeTransform::TransformObjCMessageExpr(ObjCMessageExpr *E) { SelLocs, E->getMethodDecl(), E->getLeftLoc(), - move_arg(Args), + Args, E->getRightLoc()); } @@ -8740,7 +8719,7 @@ template ExprResult TreeTransform::TransformShuffleVectorExpr(ShuffleVectorExpr *E) { bool ArgumentChanged = false; - ASTOwningVector SubExprs(SemaRef); + SmallVector SubExprs; SubExprs.reserve(E->getNumSubExprs()); if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false, SubExprs, &ArgumentChanged)) @@ -8751,7 +8730,7 @@ TreeTransform::TransformShuffleVectorExpr(ShuffleVectorExpr *E) { return SemaRef.Owned(E); return getDerived().RebuildShuffleVectorExpr(E->getBuiltinLoc(), - move_arg(SubExprs), + SubExprs, E->getRParenLoc()); } @@ -8854,7 +8833,7 @@ ExprResult TreeTransform::TransformAtomicExpr(AtomicExpr *E) { QualType RetTy = getDerived().TransformType(E->getType()); bool ArgumentChanged = false; - ASTOwningVector SubExprs(SemaRef); + SmallVector SubExprs; SubExprs.reserve(E->getNumSubExprs()); if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false, SubExprs, &ArgumentChanged)) @@ -8864,7 +8843,7 @@ TreeTransform::TransformAtomicExpr(AtomicExpr *E) { !ArgumentChanged) return SemaRef.Owned(E); - return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), move_arg(SubExprs), + return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), SubExprs, RetTy, E->getOp(), E->getRParenLoc()); } @@ -9185,7 +9164,7 @@ TreeTransform::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op, if (Result.isInvalid()) return ExprError(); - return move(Result); + return Result; } } @@ -9200,7 +9179,12 @@ TreeTransform::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op, // IsAcceptableNonMemberOperatorCandidate for each of these? Functions.append(ULE->decls_begin(), ULE->decls_end()); } else { - Functions.addDecl(cast(Callee)->getDecl()); + // If we've resolved this to a particular non-member function, just call + // that function. If we resolved it to a member function, + // CreateOverloaded* will find that function for us. + NamedDecl *ND = cast(Callee)->getDecl(); + if (!isa(ND)) + Functions.addDecl(ND); } // Add any functions found via argument-dependent lookup. @@ -9240,7 +9224,7 @@ TreeTransform::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op, if (Result.isInvalid()) return ExprError(); - return move(Result); + return Result; } template diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp index 67f74f7..0ec03cf 100644 --- a/lib/Serialization/ASTCommon.cpp +++ b/lib/Serialization/ASTCommon.cpp @@ -60,6 +60,9 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) { case BuiltinType::ObjCId: ID = PREDEF_TYPE_OBJC_ID; break; case BuiltinType::ObjCClass: ID = PREDEF_TYPE_OBJC_CLASS; break; case BuiltinType::ObjCSel: ID = PREDEF_TYPE_OBJC_SEL; break; + case BuiltinType::BuiltinFn: + ID = PREDEF_TYPE_BUILTIN_FN; break; + } return TypeIdx(ID); diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp index 3adbc57..deba302 100644 --- a/lib/Serialization/ASTReader.cpp +++ b/lib/Serialization/ASTReader.cpp @@ -30,13 +30,16 @@ #include "clang/Lex/MacroInfo.h" #include "clang/Lex/PreprocessingRecord.h" #include "clang/Lex/Preprocessor.h" +#include "clang/Lex/PreprocessorOptions.h" #include "clang/Lex/HeaderSearch.h" +#include "clang/Lex/HeaderSearchOptions.h" #include "clang/Basic/OnDiskHashTable.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/SourceManagerInternals.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/FileSystemStatCache.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Basic/TargetOptions.h" #include "clang/Basic/Version.h" #include "clang/Basic/VersionTuple.h" #include "llvm/ADT/StringExtras.h" @@ -62,353 +65,306 @@ using namespace clang::serialization::reader; ASTReaderListener::~ASTReaderListener() {} -bool -PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) { - const LangOptions &PPLangOpts = PP.getLangOpts(); - -#define LANGOPT(Name, Bits, Default, Description) \ - if (PPLangOpts.Name != LangOpts.Name) { \ - Reader.Diag(diag::err_pch_langopt_mismatch) \ - << Description << LangOpts.Name << PPLangOpts.Name; \ +/// \brief Compare the given set of language options against an existing set of +/// language options. +/// +/// \param Diags If non-NULL, diagnostics will be emitted via this engine. +/// +/// \returns true if the languagae options mis-match, false otherwise. +static bool checkLanguageOptions(const LangOptions &LangOpts, + const LangOptions &ExistingLangOpts, + DiagnosticsEngine *Diags) { +#define LANGOPT(Name, Bits, Default, Description) \ + if (ExistingLangOpts.Name != LangOpts.Name) { \ + if (Diags) \ + Diags->Report(diag::err_pch_langopt_mismatch) \ + << Description << LangOpts.Name << ExistingLangOpts.Name; \ + return true; \ + } + +#define VALUE_LANGOPT(Name, Bits, Default, Description) \ + if (ExistingLangOpts.Name != LangOpts.Name) { \ + if (Diags) \ + Diags->Report(diag::err_pch_langopt_value_mismatch) \ + << Description; \ return true; \ } -#define VALUE_LANGOPT(Name, Bits, Default, Description) \ - if (PPLangOpts.Name != LangOpts.Name) { \ - Reader.Diag(diag::err_pch_langopt_value_mismatch) \ - << Description; \ - return true; \ -} - -#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \ - if (PPLangOpts.get##Name() != LangOpts.get##Name()) { \ - Reader.Diag(diag::err_pch_langopt_value_mismatch) \ - << Description; \ - return true; \ +#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \ + if (ExistingLangOpts.get##Name() != LangOpts.get##Name()) { \ + if (Diags) \ + Diags->Report(diag::err_pch_langopt_value_mismatch) \ + << Description; \ + return true; \ } #define BENIGN_LANGOPT(Name, Bits, Default, Description) #define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) #include "clang/Basic/LangOptions.def" - if (PPLangOpts.ObjCRuntime != LangOpts.ObjCRuntime) { - Reader.Diag(diag::err_pch_langopt_value_mismatch) + if (ExistingLangOpts.ObjCRuntime != LangOpts.ObjCRuntime) { + if (Diags) + Diags->Report(diag::err_pch_langopt_value_mismatch) << "target Objective-C runtime"; return true; } - + return false; } -bool PCHValidator::ReadTargetTriple(StringRef Triple) { - if (Triple == PP.getTargetInfo().getTriple().str()) - return false; - - Reader.Diag(diag::warn_pch_target_triple) - << Triple << PP.getTargetInfo().getTriple().str(); - return true; -} +/// \brief Compare the given set of target options against an existing set of +/// target options. +/// +/// \param Diags If non-NULL, diagnostics will be emitted via this engine. +/// +/// \returns true if the target options mis-match, false otherwise. +static bool checkTargetOptions(const TargetOptions &TargetOpts, + const TargetOptions &ExistingTargetOpts, + DiagnosticsEngine *Diags) { +#define CHECK_TARGET_OPT(Field, Name) \ + if (TargetOpts.Field != ExistingTargetOpts.Field) { \ + if (Diags) \ + Diags->Report(diag::err_pch_targetopt_mismatch) \ + << Name << TargetOpts.Field << ExistingTargetOpts.Field; \ + return true; \ + } + + CHECK_TARGET_OPT(Triple, "target"); + CHECK_TARGET_OPT(CPU, "target CPU"); + CHECK_TARGET_OPT(ABI, "target ABI"); + CHECK_TARGET_OPT(CXXABI, "target C++ ABI"); + CHECK_TARGET_OPT(LinkerVersion, "target linker version"); +#undef CHECK_TARGET_OPT + + // Compare feature sets. + SmallVector ExistingFeatures( + ExistingTargetOpts.FeaturesAsWritten.begin(), + ExistingTargetOpts.FeaturesAsWritten.end()); + SmallVector ReadFeatures(TargetOpts.FeaturesAsWritten.begin(), + TargetOpts.FeaturesAsWritten.end()); + std::sort(ExistingFeatures.begin(), ExistingFeatures.end()); + std::sort(ReadFeatures.begin(), ReadFeatures.end()); + + unsigned ExistingIdx = 0, ExistingN = ExistingFeatures.size(); + unsigned ReadIdx = 0, ReadN = ReadFeatures.size(); + while (ExistingIdx < ExistingN && ReadIdx < ReadN) { + if (ExistingFeatures[ExistingIdx] == ReadFeatures[ReadIdx]) { + ++ExistingIdx; + ++ReadIdx; + continue; + } -namespace { - struct EmptyStringRef { - bool operator ()(StringRef r) const { return r.empty(); } - }; - struct EmptyBlock { - bool operator ()(const PCHPredefinesBlock &r) const {return r.Data.empty();} - }; -} + if (ReadFeatures[ReadIdx] < ExistingFeatures[ExistingIdx]) { + if (Diags) + Diags->Report(diag::err_pch_targetopt_feature_mismatch) + << false << ReadFeatures[ReadIdx]; + return true; + } -static bool EqualConcatenations(SmallVector L, - PCHPredefinesBlocks R) { - // First, sum up the lengths. - unsigned LL = 0, RL = 0; - for (unsigned I = 0, N = L.size(); I != N; ++I) { - LL += L[I].size(); - } - for (unsigned I = 0, N = R.size(); I != N; ++I) { - RL += R[I].Data.size(); - } - if (LL != RL) - return false; - if (LL == 0 && RL == 0) + if (Diags) + Diags->Report(diag::err_pch_targetopt_feature_mismatch) + << true << ExistingFeatures[ExistingIdx]; return true; - - // Kick out empty parts, they confuse the algorithm below. - L.erase(std::remove_if(L.begin(), L.end(), EmptyStringRef()), L.end()); - R.erase(std::remove_if(R.begin(), R.end(), EmptyBlock()), R.end()); - - // Do it the hard way. At this point, both vectors must be non-empty. - StringRef LR = L[0], RR = R[0].Data; - unsigned LI = 0, RI = 0, LN = L.size(), RN = R.size(); - (void) RN; - for (;;) { - // Compare the current pieces. - if (LR.size() == RR.size()) { - // If they're the same length, it's pretty easy. - if (LR != RR) - return false; - // Both pieces are done, advance. - ++LI; - ++RI; - // If either string is done, they're both done, since they're the same - // length. - if (LI == LN) { - assert(RI == RN && "Strings not the same length after all?"); - return true; - } - LR = L[LI]; - RR = R[RI].Data; - } else if (LR.size() < RR.size()) { - // Right piece is longer. - if (!RR.startswith(LR)) - return false; - ++LI; - assert(LI != LN && "Strings not the same length after all?"); - RR = RR.substr(LR.size()); - LR = L[LI]; - } else { - // Left piece is longer. - if (!LR.startswith(RR)) - return false; - ++RI; - assert(RI != RN && "Strings not the same length after all?"); - LR = LR.substr(RR.size()); - RR = R[RI].Data; - } } -} -static std::pair -FindMacro(const PCHPredefinesBlocks &Buffers, StringRef MacroDef) { - std::pair Res; - for (unsigned I = 0, N = Buffers.size(); I != N; ++I) { - Res.second = Buffers[I].Data.find(MacroDef); - if (Res.second != StringRef::npos) { - Res.first = Buffers[I].BufferID; - break; - } + if (ExistingIdx < ExistingN) { + if (Diags) + Diags->Report(diag::err_pch_targetopt_feature_mismatch) + << true << ExistingFeatures[ExistingIdx]; + return true; } - return Res; -} - -bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers, - StringRef OriginalFileName, - std::string &SuggestedPredefines, - FileManager &FileMgr) { - // We are in the context of an implicit include, so the predefines buffer will - // have a #include entry for the PCH file itself (as normalized by the - // preprocessor initialization). Find it and skip over it in the checking - // below. - SmallString<256> PCHInclude; - PCHInclude += "#include \""; - PCHInclude += HeaderSearch::NormalizeDashIncludePath(OriginalFileName, - FileMgr); - PCHInclude += "\"\n"; - std::pair Split = - StringRef(PP.getPredefines()).split(PCHInclude.str()); - StringRef Left = Split.first, Right = Split.second; - if (Left == PP.getPredefines()) { - Error("Missing PCH include entry!"); + + if (ReadIdx < ReadN) { + if (Diags) + Diags->Report(diag::err_pch_targetopt_feature_mismatch) + << false << ReadFeatures[ReadIdx]; return true; } - // If the concatenation of all the PCH buffers is equal to the adjusted - // command line, we're done. - SmallVector CommandLine; - CommandLine.push_back(Left); - CommandLine.push_back(Right); - if (EqualConcatenations(CommandLine, Buffers)) - return false; + return false; +} - SourceManager &SourceMgr = PP.getSourceManager(); - - // The predefines buffers are different. Determine what the differences are, - // and whether they require us to reject the PCH file. - SmallVector PCHLines; - for (unsigned I = 0, N = Buffers.size(); I != N; ++I) - Buffers[I].Data.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false); - - SmallVector CmdLineLines; - Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false); - - // Pick out implicit #includes after the PCH and don't consider them for - // validation; we will insert them into SuggestedPredefines so that the - // preprocessor includes them. - std::string IncludesAfterPCH; - SmallVector AfterPCHLines; - Right.split(AfterPCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false); - for (unsigned i = 0, e = AfterPCHLines.size(); i != e; ++i) { - if (AfterPCHLines[i].startswith("#include ")) { - IncludesAfterPCH += AfterPCHLines[i]; - IncludesAfterPCH += '\n'; - } else { - CmdLineLines.push_back(AfterPCHLines[i]); - } - } - - // Make sure we add the includes last into SuggestedPredefines before we - // exit this function. - struct AddIncludesRAII { - std::string &SuggestedPredefines; - std::string &IncludesAfterPCH; - - AddIncludesRAII(std::string &SuggestedPredefines, - std::string &IncludesAfterPCH) - : SuggestedPredefines(SuggestedPredefines), - IncludesAfterPCH(IncludesAfterPCH) { } - ~AddIncludesRAII() { - SuggestedPredefines += IncludesAfterPCH; - } - } AddIncludes(SuggestedPredefines, IncludesAfterPCH); - - // Sort both sets of predefined buffer lines, since we allow some extra - // definitions and they may appear at any point in the output. - std::sort(CmdLineLines.begin(), CmdLineLines.end()); - std::sort(PCHLines.begin(), PCHLines.end()); - - // Determine which predefines that were used to build the PCH file are missing - // from the command line. - std::vector MissingPredefines; - std::set_difference(PCHLines.begin(), PCHLines.end(), - CmdLineLines.begin(), CmdLineLines.end(), - std::back_inserter(MissingPredefines)); - - bool MissingDefines = false; - bool ConflictingDefines = false; - for (unsigned I = 0, N = MissingPredefines.size(); I != N; ++I) { - StringRef Missing = MissingPredefines[I]; - if (Missing.startswith("#include ")) { - // An -include was specified when generating the PCH; it is included in - // the PCH, just ignore it. - continue; - } - if (!Missing.startswith("#define ")) { - Reader.Diag(diag::warn_pch_compiler_options_mismatch); - return true; - } +bool +PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts, + bool Complain) { + const LangOptions &ExistingLangOpts = PP.getLangOpts(); + return checkLanguageOptions(LangOpts, ExistingLangOpts, + Complain? &Reader.Diags : 0); +} - // This is a macro definition. Determine the name of the macro we're - // defining. - std::string::size_type StartOfMacroName = strlen("#define "); - std::string::size_type EndOfMacroName - = Missing.find_first_of("( \n\r", StartOfMacroName); - assert(EndOfMacroName != std::string::npos && - "Couldn't find the end of the macro name"); - StringRef MacroName = Missing.slice(StartOfMacroName, EndOfMacroName); - - // Determine whether this macro was given a different definition on the - // command line. - std::string MacroDefStart = "#define " + MacroName.str(); - std::string::size_type MacroDefLen = MacroDefStart.size(); - SmallVector::iterator ConflictPos - = std::lower_bound(CmdLineLines.begin(), CmdLineLines.end(), - MacroDefStart); - for (; ConflictPos != CmdLineLines.end(); ++ConflictPos) { - if (!ConflictPos->startswith(MacroDefStart)) { - // Different macro; we're done. - ConflictPos = CmdLineLines.end(); - break; - } +bool PCHValidator::ReadTargetOptions(const TargetOptions &TargetOpts, + bool Complain) { + const TargetOptions &ExistingTargetOpts = PP.getTargetInfo().getTargetOpts(); + return checkTargetOptions(TargetOpts, ExistingTargetOpts, + Complain? &Reader.Diags : 0); +} - assert(ConflictPos->size() > MacroDefLen && - "Invalid #define in predefines buffer?"); - if ((*ConflictPos)[MacroDefLen] != ' ' && - (*ConflictPos)[MacroDefLen] != '(') - continue; // Longer macro name; keep trying. +namespace { + typedef llvm::StringMap > + MacroDefinitionsMap; +} - // We found a conflicting macro definition. - break; - } +/// \brief Collect the macro definitions provided by the given preprocessor +/// options. +static void collectMacroDefinitions(const PreprocessorOptions &PPOpts, + MacroDefinitionsMap &Macros, + SmallVectorImpl *MacroNames = 0){ + for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) { + StringRef Macro = PPOpts.Macros[I].first; + bool IsUndef = PPOpts.Macros[I].second; - if (ConflictPos != CmdLineLines.end()) { - Reader.Diag(diag::warn_cmdline_conflicting_macro_def) - << MacroName; + std::pair MacroPair = Macro.split('='); + StringRef MacroName = MacroPair.first; + StringRef MacroBody = MacroPair.second; - // Show the definition of this macro within the PCH file. - std::pair MacroLoc = - FindMacro(Buffers, Missing); - assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!"); - SourceLocation PCHMissingLoc = - SourceMgr.getLocForStartOfFile(MacroLoc.first) - .getLocWithOffset(MacroLoc.second); - Reader.Diag(PCHMissingLoc, diag::note_pch_macro_defined_as) << MacroName; + // For an #undef'd macro, we only care about the name. + if (IsUndef) { + if (MacroNames && !Macros.count(MacroName)) + MacroNames->push_back(MacroName); - ConflictingDefines = true; + Macros[MacroName] = std::make_pair("", true); continue; } - // If the macro doesn't conflict, then we'll just pick up the macro - // definition from the PCH file. Warn the user that they made a mistake. - if (ConflictingDefines) - continue; // Don't complain if there are already conflicting defs - - if (!MissingDefines) { - Reader.Diag(diag::warn_cmdline_missing_macro_defs); - MissingDefines = true; + // For a #define'd macro, figure out the actual definition. + if (MacroName.size() == Macro.size()) + MacroBody = "1"; + else { + // Note: GCC drops anything following an end-of-line character. + StringRef::size_type End = MacroBody.find_first_of("\n\r"); + MacroBody = MacroBody.substr(0, End); } - // Show the definition of this macro within the PCH file. - std::pair MacroLoc = - FindMacro(Buffers, Missing); - assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!"); - SourceLocation PCHMissingLoc = - SourceMgr.getLocForStartOfFile(MacroLoc.first) - .getLocWithOffset(MacroLoc.second); - Reader.Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch); + if (MacroNames && !Macros.count(MacroName)) + MacroNames->push_back(MacroName); + Macros[MacroName] = std::make_pair(MacroBody, false); } +} + +/// \brief Check the preprocessor options deserialized from the control block +/// against the preprocessor options in an existing preprocessor. +/// +/// \param Diags If non-null, produce diagnostics for any mismatches incurred. +static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts, + const PreprocessorOptions &ExistingPPOpts, + DiagnosticsEngine *Diags, + FileManager &FileMgr, + std::string &SuggestedPredefines) { + // Check macro definitions. + MacroDefinitionsMap ASTFileMacros; + collectMacroDefinitions(PPOpts, ASTFileMacros); + MacroDefinitionsMap ExistingMacros; + SmallVector ExistingMacroNames; + collectMacroDefinitions(ExistingPPOpts, ExistingMacros, &ExistingMacroNames); + + for (unsigned I = 0, N = ExistingMacroNames.size(); I != N; ++I) { + // Dig out the macro definition in the existing preprocessor options. + StringRef MacroName = ExistingMacroNames[I]; + std::pair Existing = ExistingMacros[MacroName]; + + // Check whether we know anything about this macro name or not. + llvm::StringMap >::iterator Known + = ASTFileMacros.find(MacroName); + if (Known == ASTFileMacros.end()) { + // FIXME: Check whether this identifier was referenced anywhere in the + // AST file. If so, we should reject the AST file. Unfortunately, this + // information isn't in the control block. What shall we do about it? + + if (Existing.second) { + SuggestedPredefines += "#undef "; + SuggestedPredefines += MacroName.str(); + SuggestedPredefines += '\n'; + } else { + SuggestedPredefines += "#define "; + SuggestedPredefines += MacroName.str(); + SuggestedPredefines += ' '; + SuggestedPredefines += Existing.first.str(); + SuggestedPredefines += '\n'; + } + continue; + } - if (ConflictingDefines) - return true; - - // Determine what predefines were introduced based on command-line - // parameters that were not present when building the PCH - // file. Extra #defines are okay, so long as the identifiers being - // defined were not used within the precompiled header. - std::vector ExtraPredefines; - std::set_difference(CmdLineLines.begin(), CmdLineLines.end(), - PCHLines.begin(), PCHLines.end(), - std::back_inserter(ExtraPredefines)); - for (unsigned I = 0, N = ExtraPredefines.size(); I != N; ++I) { - StringRef &Extra = ExtraPredefines[I]; - if (!Extra.startswith("#define ")) { - Reader.Diag(diag::warn_pch_compiler_options_mismatch); + // If the macro was defined in one but undef'd in the other, we have a + // conflict. + if (Existing.second != Known->second.second) { + if (Diags) { + Diags->Report(diag::err_pch_macro_def_undef) + << MacroName << Known->second.second; + } return true; } - // This is an extra macro definition. Determine the name of the - // macro we're defining. - std::string::size_type StartOfMacroName = strlen("#define "); - std::string::size_type EndOfMacroName - = Extra.find_first_of("( \n\r", StartOfMacroName); - assert(EndOfMacroName != std::string::npos && - "Couldn't find the end of the macro name"); - StringRef MacroName = Extra.slice(StartOfMacroName, EndOfMacroName); - - // Check whether this name was used somewhere in the PCH file. If - // so, defining it as a macro could change behavior, so we reject - // the PCH file. - if (IdentifierInfo *II = Reader.get(MacroName)) { - Reader.Diag(diag::warn_macro_name_used_in_pch) << II; - return true; + // If the macro was #undef'd in both, or if the macro bodies are identical, + // it's fine. + if (Existing.second || Existing.first == Known->second.first) + continue; + + // The macro bodies differ; complain. + if (Diags) { + Diags->Report(diag::err_pch_macro_def_conflict) + << MacroName << Known->second.first << Existing.first; + } + return true; + } + + // Check whether we're using predefines. + if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines) { + if (Diags) { + Diags->Report(diag::err_pch_undef) << ExistingPPOpts.UsePredefines; } + return true; + } + + // Compute the #include and #include_macros lines we need. + for (unsigned I = 0, N = ExistingPPOpts.Includes.size(); I != N; ++I) { + StringRef File = ExistingPPOpts.Includes[I]; + if (File == ExistingPPOpts.ImplicitPCHInclude) + continue; + + if (std::find(PPOpts.Includes.begin(), PPOpts.Includes.end(), File) + != PPOpts.Includes.end()) + continue; - // Add this definition to the suggested predefines buffer. - SuggestedPredefines += Extra; - SuggestedPredefines += '\n'; + SuggestedPredefines += "#include \""; + SuggestedPredefines += + HeaderSearch::NormalizeDashIncludePath(File, FileMgr); + SuggestedPredefines += "\"\n"; + } + + for (unsigned I = 0, N = ExistingPPOpts.MacroIncludes.size(); I != N; ++I) { + StringRef File = ExistingPPOpts.MacroIncludes[I]; + if (std::find(PPOpts.MacroIncludes.begin(), PPOpts.MacroIncludes.end(), + File) + != PPOpts.MacroIncludes.end()) + continue; + + SuggestedPredefines += "#__include_macros \""; + SuggestedPredefines += + HeaderSearch::NormalizeDashIncludePath(File, FileMgr); + SuggestedPredefines += "\"\n##\n"; } - // If we get here, it's because the predefines buffer had compatible - // contents. Accept the PCH file. return false; } +bool PCHValidator::ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, + bool Complain, + std::string &SuggestedPredefines) { + const PreprocessorOptions &ExistingPPOpts = PP.getPreprocessorOpts(); + + return checkPreprocessorOptions(PPOpts, ExistingPPOpts, + Complain? &Reader.Diags : 0, + PP.getFileManager(), + SuggestedPredefines); +} + void PCHValidator::ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID) { PP.getHeaderSearchInfo().setHeaderFileInfoForUID(HFI, ID); ++NumHeaderInfos; } -void PCHValidator::ReadCounter(unsigned Value) { +void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) { PP.setCounterValue(Value); } @@ -527,6 +483,7 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k, return II; } + unsigned ObjCOrBuiltinID = ReadUnalignedLE16(d); unsigned Bits = ReadUnalignedLE16(d); bool CPlusPlusOperatorKeyword = Bits & 0x01; Bits >>= 1; @@ -536,13 +493,11 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k, Bits >>= 1; bool ExtensionToken = Bits & 0x01; Bits >>= 1; - bool hasMacroDefinition = Bits & 0x01; + bool hadMacroDefinition = Bits & 0x01; Bits >>= 1; - unsigned ObjCOrBuiltinID = Bits & 0x7FF; - Bits >>= 11; assert(Bits == 0 && "Extra bits in the identifier?"); - DataLen -= 6; + DataLen -= 8; // Build the IdentifierInfo itself and link the identifier ID with // the new IdentifierInfo. @@ -570,31 +525,14 @@ IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k, // If this identifier is a macro, deserialize the macro // definition. - if (hasMacroDefinition) { - // FIXME: Check for conflicts? - uint32_t Offset = ReadUnalignedLE32(d); - unsigned LocalSubmoduleID = ReadUnalignedLE32(d); - - // Determine whether this macro definition should be visible now, or - // whether it is in a hidden submodule. - bool Visible = true; - if (SubmoduleID GlobalSubmoduleID - = Reader.getGlobalSubmoduleID(F, LocalSubmoduleID)) { - if (Module *Owner = Reader.getSubmodule(GlobalSubmoduleID)) { - if (Owner->NameVisibility == Module::Hidden) { - // The owning module is not visible, and this macro definition should - // not be, either. - Visible = false; - - // Note that this macro definition was hidden because its owning - // module is not yet visible. - Reader.HiddenNamesMap[Owner].push_back(II); - } - } + if (hadMacroDefinition) { + SmallVector MacroIDs; + while (uint32_t LocalID = ReadUnalignedLE32(d)) { + MacroIDs.push_back(Reader.getGlobalMacroID(F, LocalID)); + DataLen -= 4; } - - Reader.setIdentifierIsMacro(II, F, Offset, Visible); - DataLen -= 8; + DataLen -= 4; + Reader.setIdentifierIsMacro(II, MacroIDs); } Reader.SetIdentifierInfo(ID, II); @@ -780,16 +718,6 @@ void ASTReader::Error(unsigned DiagID, Diag(DiagID) << Arg1 << Arg2; } -/// \brief Tell the AST listener about the predefines buffers in the chain. -bool ASTReader::CheckPredefinesBuffers() { - if (Listener) - return Listener->ReadPredefinesBuffer(PCHPredefinesBuffers, - ActualOriginalFileName, - SuggestedPredefines, - FileMgr); - return false; -} - //===----------------------------------------------------------------------===// // Source Manager Deserialization //===----------------------------------------------------------------------===// @@ -808,7 +736,7 @@ bool ASTReader::ParseLineTable(ModuleFile &F, unsigned FilenameLen = Record[Idx++]; std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen); Idx += FilenameLen; - MaybeAddSystemRootToFilename(Filename); + MaybeAddSystemRootToFilename(F, Filename); FileIDs[I] = LineTable.getLineTableFilenameID(Filename); } @@ -841,106 +769,8 @@ bool ASTReader::ParseLineTable(ModuleFile &F, return false; } -namespace { - -class ASTStatData { -public: - const ino_t ino; - const dev_t dev; - const mode_t mode; - const time_t mtime; - const off_t size; - - ASTStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s) - : ino(i), dev(d), mode(mo), mtime(m), size(s) {} -}; - -class ASTStatLookupTrait { - public: - typedef const char *external_key_type; - typedef const char *internal_key_type; - - typedef ASTStatData data_type; - - static unsigned ComputeHash(const char *path) { - return llvm::HashString(path); - } - - static internal_key_type GetInternalKey(const char *path) { return path; } - - static bool EqualKey(internal_key_type a, internal_key_type b) { - return strcmp(a, b) == 0; - } - - static std::pair - ReadKeyDataLength(const unsigned char*& d) { - unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d); - unsigned DataLen = (unsigned) *d++; - return std::make_pair(KeyLen + 1, DataLen); - } - - static internal_key_type ReadKey(const unsigned char *d, unsigned) { - return (const char *)d; - } - - static data_type ReadData(const internal_key_type, const unsigned char *d, - unsigned /*DataLen*/) { - using namespace clang::io; - - ino_t ino = (ino_t) ReadUnalignedLE32(d); - dev_t dev = (dev_t) ReadUnalignedLE32(d); - mode_t mode = (mode_t) ReadUnalignedLE16(d); - time_t mtime = (time_t) ReadUnalignedLE64(d); - off_t size = (off_t) ReadUnalignedLE64(d); - return data_type(ino, dev, mode, mtime, size); - } -}; - -/// \brief stat() cache for precompiled headers. -/// -/// This cache is very similar to the stat cache used by pretokenized -/// headers. -class ASTStatCache : public FileSystemStatCache { - typedef OnDiskChainedHashTable CacheTy; - CacheTy *Cache; - - unsigned &NumStatHits, &NumStatMisses; -public: - ASTStatCache(const unsigned char *Buckets, const unsigned char *Base, - unsigned &NumStatHits, unsigned &NumStatMisses) - : Cache(0), NumStatHits(NumStatHits), NumStatMisses(NumStatMisses) { - Cache = CacheTy::Create(Buckets, Base); - } - - ~ASTStatCache() { delete Cache; } - - LookupResult getStat(const char *Path, struct stat &StatBuf, - int *FileDescriptor) { - // Do the lookup for the file's data in the AST file. - CacheTy::iterator I = Cache->find(Path); - - // If we don't get a hit in the AST file just forward to 'stat'. - if (I == Cache->end()) { - ++NumStatMisses; - return statChained(Path, StatBuf, FileDescriptor); - } - - ++NumStatHits; - ASTStatData Data = *I; - - StatBuf.st_ino = Data.ino; - StatBuf.st_dev = Data.dev; - StatBuf.st_mtime = Data.mtime; - StatBuf.st_mode = Data.mode; - StatBuf.st_size = Data.size; - return CacheExists; - } -}; -} // end anonymous namespace - - /// \brief Read a source manager block -ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) { +bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) { using namespace SrcMgr; llvm::BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor; @@ -954,13 +784,13 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) { // The stream itself is going to skip over the source manager block. if (F.Stream.SkipBlock()) { Error("malformed block record in AST file"); - return Failure; + return true; } // Enter the source manager block. if (SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) { Error("malformed source manager block record in AST file"); - return Failure; + return true; } RecordData Record; @@ -969,9 +799,9 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) { if (Code == llvm::bitc::END_BLOCK) { if (SLocEntryCursor.ReadBlockEnd()) { Error("error at end of Source Manager block in AST file"); - return Failure; + return true; } - return Success; + return false; } if (Code == llvm::bitc::ENTER_SUBBLOCK) { @@ -979,7 +809,7 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) { SLocEntryCursor.ReadSubBlockID(); if (SLocEntryCursor.SkipBlock()) { Error("malformed block record in AST file"); - return Failure; + return true; } continue; } @@ -1001,7 +831,7 @@ ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) { case SM_SLOC_BUFFER_ENTRY: case SM_SLOC_EXPANSION_ENTRY: // Once we hit one of the source location entries, we're done. - return Success; + return false; } } } @@ -1039,14 +869,13 @@ resolveFileRelativeToOriginalDir(const std::string &Filename, return currPCHPath.str(); } -/// \brief Read in the source location entry with the given ID. -ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { +bool ASTReader::ReadSLocEntry(int ID) { if (ID == 0) - return Success; + return false; if (unsigned(-ID) - 2 >= getTotalNumSLocs() || ID > 0) { Error("source location entry ID out-of-range for AST file"); - return Failure; + return true; } ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second; @@ -1060,7 +889,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { Code == llvm::bitc::ENTER_SUBBLOCK || Code == llvm::bitc::DEFINE_ABBREV) { Error("incorrectly-formatted source location entry in AST file"); - return Failure; + return true; } RecordData Record; @@ -1069,58 +898,18 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) { default: Error("incorrectly-formatted source location entry in AST file"); - return Failure; + return true; case SM_SLOC_FILE_ENTRY: { - if (Record.size() < 7) { - Error("source location entry is incorrect"); - return Failure; - } - // We will detect whether a file changed and return 'Failure' for it, but // we will also try to fail gracefully by setting up the SLocEntry. - ASTReader::ASTReadResult Result = Success; - - bool OverriddenBuffer = Record[6]; - - std::string OrigFilename(BlobStart, BlobStart + BlobLen); - std::string Filename = OrigFilename; - MaybeAddSystemRootToFilename(Filename); - const FileEntry *File = - OverriddenBuffer? FileMgr.getVirtualFile(Filename, (off_t)Record[4], - (time_t)Record[5]) - : FileMgr.getFile(Filename, /*OpenFile=*/false); - if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() && - OriginalDir != CurrentDir) { - std::string resolved = resolveFileRelativeToOriginalDir(Filename, - OriginalDir, - CurrentDir); - if (!resolved.empty()) - File = FileMgr.getFile(resolved); - } - if (File == 0) - File = FileMgr.getVirtualFile(Filename, (off_t)Record[4], - (time_t)Record[5]); - if (File == 0) { - std::string ErrorStr = "could not find file '"; - ErrorStr += Filename; - ErrorStr += "' referenced by AST file"; - Error(ErrorStr.c_str()); - return Failure; - } + unsigned InputID = Record[4]; + InputFile IF = getInputFile(*F, InputID); + const FileEntry *File = IF.getPointer(); + bool OverriddenBuffer = IF.getInt(); - if (!DisableValidation && - ((off_t)Record[4] != File->getSize() -#if !defined(LLVM_ON_WIN32) - // In our regression testing, the Windows file system seems to - // have inconsistent modification times that sometimes - // erroneously trigger this error-handling path. - || (time_t)Record[5] != File->getModificationTime() -#endif - )) { - Error(diag::err_fe_pch_file_modified, Filename); - Result = Failure; - } + if (!IF.getPointer()) + return true; SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]); if (IncludeLoc.isInvalid() && F->Kind != MK_MainFile) { @@ -1133,12 +922,12 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { ID, BaseOffset + Record[0]); SrcMgr::FileInfo &FileInfo = const_cast(SourceMgr.getSLocEntry(FID).getFile()); - FileInfo.NumCreatedFIDs = Record[7]; + FileInfo.NumCreatedFIDs = Record[5]; if (Record[3]) FileInfo.setHasLineDirectives(); - const DeclID *FirstDecl = F->FileSortedDecls + Record[8]; - unsigned NumFileDecls = Record[9]; + const DeclID *FirstDecl = F->FileSortedDecls + Record[6]; + unsigned NumFileDecls = Record[7]; if (NumFileDecls) { assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?"); FileDeclIDs[FID] = FileDeclsInfo(F, llvm::makeArrayRef(FirstDecl, @@ -1157,23 +946,24 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { if (RecCode != SM_SLOC_BUFFER_BLOB) { Error("AST record has invalid code"); - return Failure; + return true; } llvm::MemoryBuffer *Buffer = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1), - Filename); + File->getName()); SourceMgr.overrideFileContents(File, Buffer); } - if (Result == Failure) - return Failure; break; } case SM_SLOC_BUFFER_ENTRY: { const char *Name = BlobStart; unsigned Offset = Record[0]; + SrcMgr::CharacteristicKind + FileCharacter = (SrcMgr::CharacteristicKind)Record[2]; + SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]); unsigned Code = SLocEntryCursor.ReadCode(); Record.clear(); unsigned RecCode @@ -1181,23 +971,14 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { if (RecCode != SM_SLOC_BUFFER_BLOB) { Error("AST record has invalid code"); - return Failure; + return true; } llvm::MemoryBuffer *Buffer = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1), Name); - FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID, - BaseOffset + Offset); - - if (strcmp(Name, "") == 0 && F->Kind == MK_PCH) { - PCHPredefinesBlock Block = { - BufferID, - StringRef(BlobStart, BlobLen - 1) - }; - PCHPredefinesBuffers.push_back(Block); - } - + SourceMgr.createFileIDForMemBuffer(Buffer, FileCharacter, ID, + BaseOffset + Offset, IncludeLoc); break; } @@ -1213,7 +994,7 @@ ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) { } } - return Success; + return false; } /// \brief Find the location where the module F is imported. @@ -1258,7 +1039,8 @@ bool ASTReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor, } } -void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) { +void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset, + MacroInfo *Hint) { llvm::BitstreamCursor &Stream = F.MacroCursor; // Keep track of where we are in the stream, then jump back there @@ -1270,6 +1052,24 @@ void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) { SmallVector MacroArgs; MacroInfo *Macro = 0; + // RAII object to add the loaded macro information once we're done + // adding tokens. + struct AddLoadedMacroInfoRAII { + Preprocessor &PP; + MacroInfo *Hint; + MacroInfo *MI; + IdentifierInfo *II; + + AddLoadedMacroInfoRAII(Preprocessor &PP, MacroInfo *Hint) + : PP(PP), Hint(Hint), MI(), II() { } + ~AddLoadedMacroInfoRAII( ) { + if (MI) { + // Finally, install the macro. + PP.addLoadedMacroInfo(II, MI, Hint); + } + } + } AddLoadedMacroInfo(PP, Hint); + while (true) { unsigned Code = Stream.ReadCode(); switch (Code) { @@ -1312,18 +1112,31 @@ void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) { Error("macro must have a name in AST file"); return; } - - SourceLocation Loc = ReadSourceLocation(F, Record[1]); - bool isUsed = Record[2]; + unsigned GlobalID = getGlobalMacroID(F, Record[1]); + + // If this macro has already been loaded, don't do so again. + if (MacrosLoaded[GlobalID - NUM_PREDEF_MACRO_IDS]) + return; + + SubmoduleID GlobalSubmoduleID = getGlobalSubmoduleID(F, Record[2]); + unsigned NextIndex = 3; + SourceLocation Loc = ReadSourceLocation(F, Record, NextIndex); MacroInfo *MI = PP.AllocateMacroInfo(Loc); - MI->setIsUsed(isUsed); + + // Record this macro. + MacrosLoaded[GlobalID - NUM_PREDEF_MACRO_IDS] = MI; + + SourceLocation UndefLoc = ReadSourceLocation(F, Record, NextIndex); + if (UndefLoc.isValid()) + MI->setUndefLoc(UndefLoc); + + MI->setIsUsed(Record[NextIndex++]); MI->setIsFromAST(); - bool IsPublic = Record[3]; - unsigned NextIndex = 4; + bool IsPublic = Record[NextIndex++]; MI->setVisibility(IsPublic, ReadSourceLocation(F, Record, NextIndex)); - + if (RecType == PP_MACRO_FUNCTION_LIKE) { // Decode function-like macro info. bool isC99VarArgs = Record[NextIndex++]; @@ -1341,8 +1154,60 @@ void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) { PP.getPreprocessorAllocator()); } - // Finally, install the macro. - PP.setMacroInfo(II, MI, /*LoadedFromAST=*/true); + if (DeserializationListener) + DeserializationListener->MacroRead(GlobalID, MI); + + // If an update record marked this as undefined, do so now. + // FIXME: Only if the submodule this update came from is visible? + MacroUpdatesMap::iterator Update = MacroUpdates.find(GlobalID); + if (Update != MacroUpdates.end()) { + if (MI->getUndefLoc().isInvalid()) { + for (unsigned I = 0, N = Update->second.size(); I != N; ++I) { + bool Hidden = false; + if (unsigned SubmoduleID = Update->second[I].first) { + if (Module *Owner = getSubmodule(SubmoduleID)) { + if (Owner->NameVisibility == Module::Hidden) { + // Note that this #undef is hidden. + Hidden = true; + + // Record this hiding for later. + HiddenNamesMap[Owner].push_back( + HiddenName(II, MI, Update->second[I].second.UndefLoc)); + } + } + } + + if (!Hidden) { + MI->setUndefLoc(Update->second[I].second.UndefLoc); + if (PPMutationListener *Listener = PP.getPPMutationListener()) + Listener->UndefinedMacro(MI); + break; + } + } + } + MacroUpdates.erase(Update); + } + + // Determine whether this macro definition is visible. + bool Hidden = !MI->isPublic(); + if (!Hidden && GlobalSubmoduleID) { + if (Module *Owner = getSubmodule(GlobalSubmoduleID)) { + if (Owner->NameVisibility == Module::Hidden) { + // The owning module is not visible, and this macro definition + // should not be, either. + Hidden = true; + + // Note that this macro definition was hidden because its owning + // module is not yet visible. + HiddenNamesMap[Owner].push_back(HiddenName(II, MI)); + } + } + } + MI->setHidden(Hidden); + + // Make sure we install the macro once we're done. + AddLoadedMacroInfo.MI = MI; + AddLoadedMacroInfo.II = II; // Remember that we saw this macro last so that we add the tokens that // form its body to it. @@ -1451,18 +1316,16 @@ HeaderFileInfoTrait::ReadData(const internal_key_type, const unsigned char *d, return HFI; } -void ASTReader::setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F, - uint64_t LocalOffset, bool Visible) { - if (Visible) { - // Note that this identifier has a macro definition. - II->setHasMacroDefinition(true); - } - - // Adjust the offset to a global offset. - UnreadMacroRecordOffsets[II] = F.GlobalBitOffset + LocalOffset; +void ASTReader::setIdentifierIsMacro(IdentifierInfo *II, ArrayRef IDs){ + II->setHadMacroDefinition(true); + assert(NumCurrentElementsDeserializing > 0 &&"Missing deserialization guard"); + PendingMacroIDs[II].append(IDs.begin(), IDs.end()); } void ASTReader::ReadDefinedMacros() { + // Note that we are loading defined macros. + Deserializing Macros(this); + for (ModuleReverseIterator I = ModuleMgr.rbegin(), E = ModuleMgr.rend(); I != E; ++I) { llvm::BitstreamCursor &MacroCursor = (*I)->MacroCursor; @@ -1514,26 +1377,6 @@ void ASTReader::ReadDefinedMacros() { } } } - - // Drain the unread macro-record offsets map. - while (!UnreadMacroRecordOffsets.empty()) - LoadMacroDefinition(UnreadMacroRecordOffsets.begin()); -} - -void ASTReader::LoadMacroDefinition( - llvm::DenseMap::iterator Pos) { - assert(Pos != UnreadMacroRecordOffsets.end() && "Unknown macro definition"); - uint64_t Offset = Pos->second; - UnreadMacroRecordOffsets.erase(Pos); - - RecordLocation Loc = getLocalBitOffset(Offset); - ReadMacroRecord(*Loc.F, Loc.Offset); -} - -void ASTReader::LoadMacroDefinition(IdentifierInfo *II) { - llvm::DenseMap::iterator Pos - = UnreadMacroRecordOffsets.find(II); - LoadMacroDefinition(Pos); } namespace { @@ -1582,6 +1425,9 @@ namespace { } void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) { + // Note that we are loading an identifier. + Deserializing AnIdentifier(this); + unsigned PriorGeneration = 0; if (getContext().getLangOpts().Modules) PriorGeneration = IdentifierGeneration[&II]; @@ -1602,14 +1448,132 @@ void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) { IdentifierGeneration[II] = CurrentGeneration; } +llvm::PointerIntPair +ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) { + // If this ID is bogus, just return an empty input file. + if (ID == 0 || ID > F.InputFilesLoaded.size()) + return InputFile(); + + // If we've already loaded this input file, return it. + if (F.InputFilesLoaded[ID-1].getPointer()) + return F.InputFilesLoaded[ID-1]; + + // Go find this input file. + llvm::BitstreamCursor &Cursor = F.InputFilesCursor; + SavedStreamPosition SavedPosition(Cursor); + Cursor.JumpToBit(F.InputFileOffsets[ID-1]); + + unsigned Code = Cursor.ReadCode(); + RecordData Record; + const char *BlobStart = 0; + unsigned BlobLen = 0; + switch ((InputFileRecordTypes)Cursor.ReadRecord(Code, Record, + &BlobStart, &BlobLen)) { + case INPUT_FILE: { + unsigned StoredID = Record[0]; + assert(ID == StoredID && "Bogus stored ID or offset"); + (void)StoredID; + off_t StoredSize = (off_t)Record[1]; + time_t StoredTime = (time_t)Record[2]; + bool Overridden = (bool)Record[3]; + + // Get the file entry for this input file. + StringRef OrigFilename(BlobStart, BlobLen); + std::string Filename = OrigFilename; + MaybeAddSystemRootToFilename(F, Filename); + const FileEntry *File + = Overridden? FileMgr.getVirtualFile(Filename, StoredSize, StoredTime) + : FileMgr.getFile(Filename, /*OpenFile=*/false); + + // If we didn't find the file, resolve it relative to the + // original directory from which this AST file was created. + if (File == 0 && !F.OriginalDir.empty() && !CurrentDir.empty() && + F.OriginalDir != CurrentDir) { + std::string Resolved = resolveFileRelativeToOriginalDir(Filename, + F.OriginalDir, + CurrentDir); + if (!Resolved.empty()) + File = FileMgr.getFile(Resolved); + } + + // For an overridden file, create a virtual file with the stored + // size/timestamp. + if (Overridden && File == 0) { + File = FileMgr.getVirtualFile(Filename, StoredSize, StoredTime); + } + + if (File == 0) { + if (Complain) { + std::string ErrorStr = "could not find file '"; + ErrorStr += Filename; + ErrorStr += "' referenced by AST file"; + Error(ErrorStr.c_str()); + } + return InputFile(); + } + + // Note that we've loaded this input file. + F.InputFilesLoaded[ID-1] = InputFile(File, Overridden); + + // Check if there was a request to override the contents of the file + // that was part of the precompiled header. Overridding such a file + // can lead to problems when lexing using the source locations from the + // PCH. + SourceManager &SM = getSourceManager(); + if (!Overridden && SM.isFileOverridden(File)) { + Error(diag::err_fe_pch_file_overridden, Filename); + // After emitting the diagnostic, recover by disabling the override so + // that the original file will be used. + SM.disableFileContentsOverride(File); + // The FileEntry is a virtual file entry with the size of the contents + // that would override the original contents. Set it to the original's + // size/time. + FileMgr.modifyFileEntry(const_cast(File), + StoredSize, StoredTime); + } + + // For an overridden file, there is nothing to validate. + if (Overridden) + return InputFile(File, Overridden); + + // The stat info from the FileEntry came from the cached stat + // info of the PCH, so we cannot trust it. + struct stat StatBuf; + if (::stat(File->getName(), &StatBuf) != 0) { + StatBuf.st_size = File->getSize(); + StatBuf.st_mtime = File->getModificationTime(); + } + + if ((StoredSize != StatBuf.st_size +#if !defined(LLVM_ON_WIN32) + // In our regression testing, the Windows file system seems to + // have inconsistent modification times that sometimes + // erroneously trigger this error-handling path. + || StoredTime != StatBuf.st_mtime +#endif + )) { + if (Complain) + Error(diag::err_fe_pch_file_modified, Filename); + + return InputFile(); + } + + return InputFile(File, Overridden); + } + } + + return InputFile(); +} + const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) { + ModuleFile &M = ModuleMgr.getPrimaryModule(); std::string Filename = filenameStrRef; - MaybeAddSystemRootToFilename(Filename); + MaybeAddSystemRootToFilename(M, Filename); const FileEntry *File = FileMgr.getFile(Filename); - if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() && - OriginalDir != CurrentDir) { + if (File == 0 && !M.OriginalDir.empty() && !CurrentDir.empty() && + M.OriginalDir != CurrentDir) { std::string resolved = resolveFileRelativeToOriginalDir(Filename, - OriginalDir, + M.OriginalDir, CurrentDir); if (!resolved.empty()) File = FileMgr.getFile(resolved); @@ -1621,9 +1585,10 @@ const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) { /// \brief If we are loading a relocatable PCH file, and the filename is /// not an absolute path, add the system root to the beginning of the file /// name. -void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) { +void ASTReader::MaybeAddSystemRootToFilename(ModuleFile &M, + std::string &Filename) { // If this is not a relocatable PCH file, there's nothing to do. - if (!RelocatablePCH) + if (!M.RelocatablePCH) return; if (Filename.empty() || llvm::sys::path::is_absolute(Filename)) @@ -1643,47 +1608,244 @@ void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) { } ASTReader::ASTReadResult -ASTReader::ReadASTBlock(ModuleFile &F) { +ASTReader::ReadControlBlock(ModuleFile &F, + llvm::SmallVectorImpl &Loaded, + unsigned ClientLoadCapabilities) { llvm::BitstreamCursor &Stream = F.Stream; - if (Stream.EnterSubBlock(AST_BLOCK_ID)) { + if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) { Error("malformed block record in AST file"); return Failure; } - // Read all of the records and blocks for the ASt file. + // Read all of the records and blocks in the control block. RecordData Record; while (!Stream.AtEndOfStream()) { unsigned Code = Stream.ReadCode(); if (Code == llvm::bitc::END_BLOCK) { if (Stream.ReadBlockEnd()) { - Error("error at end of module block in AST file"); + Error("error at end of control block in AST file"); return Failure; } + // Validate all of the input files. + if (!DisableValidation) { + bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0; + for (unsigned I = 0, N = Record[0]; I < N; ++I) + if (!getInputFile(F, I+1, Complain).getPointer()) + return OutOfDate; + } + return Success; } if (Code == llvm::bitc::ENTER_SUBBLOCK) { switch (Stream.ReadSubBlockID()) { - case DECLTYPES_BLOCK_ID: - // We lazily load the decls block, but we want to set up the - // DeclsCursor cursor to point into it. Clone our current bitcode - // cursor to it, enter the block and read the abbrevs in that block. - // With the main cursor, we just skip over it. - F.DeclsCursor = Stream; - if (Stream.SkipBlock() || // Skip with the main cursor. + case INPUT_FILES_BLOCK_ID: + F.InputFilesCursor = Stream; + if (Stream.SkipBlock() || // Skip with the main cursor + // Read the abbreviations + ReadBlockAbbrevs(F.InputFilesCursor, INPUT_FILES_BLOCK_ID)) { + Error("malformed block record in AST file"); + return Failure; + } + continue; + + default: + if (!Stream.SkipBlock()) + continue; + break; + } + + Error("malformed block record in AST file"); + return Failure; + } + + if (Code == llvm::bitc::DEFINE_ABBREV) { + Stream.ReadAbbrevRecord(); + continue; + } + + // Read and process a record. + Record.clear(); + const char *BlobStart = 0; + unsigned BlobLen = 0; + switch ((ControlRecordTypes)Stream.ReadRecord(Code, Record, + &BlobStart, &BlobLen)) { + case METADATA: { + if (Record[0] != VERSION_MAJOR && !DisableValidation) { + if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0) + Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old + : diag::warn_pch_version_too_new); + return VersionMismatch; + } + + bool hasErrors = Record[5]; + if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) { + Diag(diag::err_pch_with_compiler_errors); + return HadErrors; + } + + F.RelocatablePCH = Record[4]; + + const std::string &CurBranch = getClangFullRepositoryVersion(); + StringRef ASTBranch(BlobStart, BlobLen); + if (StringRef(CurBranch) != ASTBranch && !DisableValidation) { + if ((ClientLoadCapabilities & ARR_VersionMismatch) == 0) + Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch; + return VersionMismatch; + } + break; + } + + case IMPORTS: { + // Load each of the imported PCH files. + unsigned Idx = 0, N = Record.size(); + while (Idx < N) { + // Read information about the AST file. + ModuleKind ImportedKind = (ModuleKind)Record[Idx++]; + unsigned Length = Record[Idx++]; + SmallString<128> ImportedFile(Record.begin() + Idx, + Record.begin() + Idx + Length); + Idx += Length; + + // Load the AST file. + switch(ReadASTCore(ImportedFile, ImportedKind, &F, Loaded, + ClientLoadCapabilities)) { + case Failure: return Failure; + // If we have to ignore the dependency, we'll have to ignore this too. + case OutOfDate: return OutOfDate; + case VersionMismatch: return VersionMismatch; + case ConfigurationMismatch: return ConfigurationMismatch; + case HadErrors: return HadErrors; + case Success: break; + } + } + break; + } + + case LANGUAGE_OPTIONS: { + bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch) == 0; + if (Listener && &F == *ModuleMgr.begin() && + ParseLanguageOptions(Record, Complain, *Listener) && + !DisableValidation) + return ConfigurationMismatch; + break; + } + + case TARGET_OPTIONS: { + bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0; + if (Listener && &F == *ModuleMgr.begin() && + ParseTargetOptions(Record, Complain, *Listener) && + !DisableValidation) + return ConfigurationMismatch; + break; + } + + case DIAGNOSTIC_OPTIONS: { + bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0; + if (Listener && &F == *ModuleMgr.begin() && + ParseDiagnosticOptions(Record, Complain, *Listener) && + !DisableValidation) + return ConfigurationMismatch; + break; + } + + case FILE_SYSTEM_OPTIONS: { + bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0; + if (Listener && &F == *ModuleMgr.begin() && + ParseFileSystemOptions(Record, Complain, *Listener) && + !DisableValidation) + return ConfigurationMismatch; + break; + } + + case HEADER_SEARCH_OPTIONS: { + bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0; + if (Listener && &F == *ModuleMgr.begin() && + ParseHeaderSearchOptions(Record, Complain, *Listener) && + !DisableValidation) + return ConfigurationMismatch; + break; + } + + case PREPROCESSOR_OPTIONS: { + bool Complain = (ClientLoadCapabilities & ARR_ConfigurationMismatch)==0; + if (Listener && &F == *ModuleMgr.begin() && + ParsePreprocessorOptions(Record, Complain, *Listener, + SuggestedPredefines) && + !DisableValidation) + return ConfigurationMismatch; + break; + } + + case ORIGINAL_FILE: + F.OriginalSourceFileID = FileID::get(Record[0]); + F.ActualOriginalSourceFileName.assign(BlobStart, BlobLen); + F.OriginalSourceFileName = F.ActualOriginalSourceFileName; + MaybeAddSystemRootToFilename(F, F.OriginalSourceFileName); + break; + + case ORIGINAL_PCH_DIR: + F.OriginalDir.assign(BlobStart, BlobLen); + break; + + case INPUT_FILE_OFFSETS: + F.InputFileOffsets = (const uint32_t *)BlobStart; + F.InputFilesLoaded.resize(Record[0]); + break; + } + } + + Error("premature end of bitstream in AST file"); + return Failure; +} + +bool ASTReader::ReadASTBlock(ModuleFile &F) { + llvm::BitstreamCursor &Stream = F.Stream; + + if (Stream.EnterSubBlock(AST_BLOCK_ID)) { + Error("malformed block record in AST file"); + return true; + } + + // Read all of the records and blocks for the AST file. + RecordData Record; + while (!Stream.AtEndOfStream()) { + unsigned Code = Stream.ReadCode(); + if (Code == llvm::bitc::END_BLOCK) { + if (Stream.ReadBlockEnd()) { + Error("error at end of module block in AST file"); + return true; + } + + DeclContext *DC = Context.getTranslationUnitDecl(); + if (!DC->hasExternalVisibleStorage() && DC->hasExternalLexicalStorage()) + DC->setMustBuildLookupTable(); + + return false; + } + + if (Code == llvm::bitc::ENTER_SUBBLOCK) { + switch (Stream.ReadSubBlockID()) { + case DECLTYPES_BLOCK_ID: + // We lazily load the decls block, but we want to set up the + // DeclsCursor cursor to point into it. Clone our current bitcode + // cursor to it, enter the block and read the abbrevs in that block. + // With the main cursor, we just skip over it. + F.DeclsCursor = Stream; + if (Stream.SkipBlock() || // Skip with the main cursor. // Read the abbrevs. ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) { Error("malformed block record in AST file"); - return Failure; + return true; } break; case DECL_UPDATES_BLOCK_ID: if (Stream.SkipBlock()) { Error("malformed block record in AST file"); - return Failure; + return true; } break; @@ -1695,7 +1857,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { if (Stream.SkipBlock() || ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) { Error("malformed block record in AST file"); - return Failure; + return true; } F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo(); break; @@ -1706,7 +1868,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { ReadBlockAbbrevs(F.PreprocessorDetailCursor, PREPROCESSOR_DETAIL_BLOCK_ID)) { Error("malformed preprocessor detail record in AST file"); - return Failure; + return true; } F.PreprocessorDetailStartOffset = F.PreprocessorDetailCursor.GetCurrentBitNo(); @@ -1718,31 +1880,13 @@ ASTReader::ReadASTBlock(ModuleFile &F) { break; case SOURCE_MANAGER_BLOCK_ID: - switch (ReadSourceManagerBlock(F)) { - case Success: - break; - - case Failure: - Error("malformed source manager block in AST file"); - return Failure; - - case IgnorePCH: - return IgnorePCH; - } + if (ReadSourceManagerBlock(F)) + return true; break; case SUBMODULE_BLOCK_ID: - switch (ReadSubmoduleBlock(F)) { - case Success: - break; - - case Failure: - Error("malformed submodule block in AST file"); - return Failure; - - case IgnorePCH: - return IgnorePCH; - } + if (ReadSubmoduleBlock(F)) + return true; break; case COMMENTS_BLOCK_ID: { @@ -1750,7 +1894,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { if (Stream.SkipBlock() || ReadBlockAbbrevs(C, COMMENTS_BLOCK_ID)) { Error("malformed comments block in AST file"); - return Failure; + return true; } CommentsCursors.push_back(std::make_pair(C, &F)); break; @@ -1760,7 +1904,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { if (!Stream.SkipBlock()) break; Error("malformed block record in AST file"); - return Failure; + return true; } continue; } @@ -1779,54 +1923,10 @@ ASTReader::ReadASTBlock(ModuleFile &F) { default: // Default behavior: ignore. break; - case METADATA: { - if (Record[0] != VERSION_MAJOR && !DisableValidation) { - Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old - : diag::warn_pch_version_too_new); - return IgnorePCH; - } - - bool hasErrors = Record[5]; - if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) { - Diag(diag::err_pch_with_compiler_errors); - return IgnorePCH; - } - - RelocatablePCH = Record[4]; - if (Listener) { - std::string TargetTriple(BlobStart, BlobLen); - if (Listener->ReadTargetTriple(TargetTriple)) - return IgnorePCH; - } - break; - } - - case IMPORTS: { - // Load each of the imported PCH files. - unsigned Idx = 0, N = Record.size(); - while (Idx < N) { - // Read information about the AST file. - ModuleKind ImportedKind = (ModuleKind)Record[Idx++]; - unsigned Length = Record[Idx++]; - SmallString<128> ImportedFile(Record.begin() + Idx, - Record.begin() + Idx + Length); - Idx += Length; - - // Load the AST file. - switch(ReadASTCore(ImportedFile, ImportedKind, &F)) { - case Failure: return Failure; - // If we have to ignore the dependency, we'll have to ignore this too. - case IgnorePCH: return IgnorePCH; - case Success: break; - } - } - break; - } - case TYPE_OFFSET: { if (F.LocalNumTypes != 0) { Error("duplicate TYPE_OFFSET record in AST file"); - return Failure; + return true; } F.TypeOffsets = (const uint32_t *)BlobStart; F.LocalNumTypes = Record[0]; @@ -1850,7 +1950,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case DECL_OFFSET: { if (F.LocalNumDecls != 0) { Error("duplicate DECL_OFFSET record in AST file"); - return Failure; + return true; } F.DeclOffsets = (const DeclOffset *)BlobStart; F.LocalNumDecls = Record[0]; @@ -1904,11 +2004,6 @@ ASTReader::ReadASTBlock(ModuleFile &F) { break; } - case LANGUAGE_OPTIONS: - if (ParseLanguageOptions(Record) && !DisableValidation) - return IgnorePCH; - break; - case IDENTIFIER_TABLE: F.IdentifierTableData = BlobStart; if (Record[0]) { @@ -1925,7 +2020,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case IDENTIFIER_OFFSET: { if (F.LocalNumIdentifiers != 0) { Error("duplicate IDENTIFIER_OFFSET record in AST file"); - return Failure; + return true; } F.IdentifierOffsets = (const uint32_t *)BlobStart; F.LocalNumIdentifiers = Record[0]; @@ -1949,7 +2044,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { } break; } - + case EXTERNAL_DEFINITIONS: for (unsigned I = 0, N = Record.size(); I != N; ++I) ExternalDefinitions.push_back(getGlobalDeclID(F, Record[I])); @@ -1980,7 +2075,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case WEAK_UNDECLARED_IDENTIFIERS: if (Record.size() % 4 != 0) { Error("invalid weak identifiers record"); - return Failure; + return true; } // FIXME: Ignore weak undeclared identifiers from non-original PCH @@ -2050,11 +2145,12 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case PP_COUNTER_VALUE: if (!Record.empty() && Listener) - Listener->ReadCounter(Record[0]); + Listener->ReadCounter(F, Record[0]); break; case FILE_SORTED_DECLS: F.FileSortedDecls = (const DeclID *)BlobStart; + F.NumFileSortedDecls = Record[0]; break; case SOURCE_LOCATION_OFFSETS: { @@ -2098,7 +2194,9 @@ ASTReader::ReadASTBlock(ModuleFile &F) { ContinuousRangeMap::Builder SLocRemap(F.SLocRemap); ContinuousRangeMap::Builder IdentifierRemap(F.IdentifierRemap); - ContinuousRangeMap::Builder + ContinuousRangeMap::Builder + MacroRemap(F.MacroRemap); + ContinuousRangeMap::Builder PreprocessedEntityRemap(F.PreprocessedEntityRemap); ContinuousRangeMap::Builder SubmoduleRemap(F.SubmoduleRemap); @@ -2114,11 +2212,12 @@ ASTReader::ReadASTBlock(ModuleFile &F) { ModuleFile *OM = ModuleMgr.lookup(Name); if (!OM) { Error("SourceLocation remap refers to unknown module"); - return Failure; + return true; } uint32_t SLocOffset = io::ReadUnalignedLE32(Data); uint32_t IdentifierIDOffset = io::ReadUnalignedLE32(Data); + uint32_t MacroIDOffset = io::ReadUnalignedLE32(Data); uint32_t PreprocessedEntityIDOffset = io::ReadUnalignedLE32(Data); uint32_t SubmoduleIDOffset = io::ReadUnalignedLE32(Data); uint32_t SelectorIDOffset = io::ReadUnalignedLE32(Data); @@ -2131,6 +2230,8 @@ ASTReader::ReadASTBlock(ModuleFile &F) { IdentifierRemap.insert( std::make_pair(IdentifierIDOffset, OM->BaseIdentifierID - IdentifierIDOffset)); + MacroRemap.insert(std::make_pair(MacroIDOffset, + OM->BaseMacroID - MacroIDOffset)); PreprocessedEntityRemap.insert( std::make_pair(PreprocessedEntityIDOffset, OM->BasePreprocessedEntityID - PreprocessedEntityIDOffset)); @@ -2152,12 +2253,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case SOURCE_MANAGER_LINE_TABLE: if (ParseLineTable(F, Record)) - return Failure; - break; - - case FILE_SOURCE_LOCATION_OFFSETS: - F.SLocFileOffsets = (const uint32_t *)BlobStart; - F.LocalNumSLocFileEntries = Record[0]; + return true; break; case SOURCE_LOCATION_PRELOADS: { @@ -2165,25 +2261,13 @@ ASTReader::ReadASTBlock(ModuleFile &F) { // which is based off F.SLocEntryBaseID. if (!F.PreloadSLocEntries.empty()) { Error("Multiple SOURCE_LOCATION_PRELOADS records in AST file"); - return Failure; + return true; } F.PreloadSLocEntries.swap(Record); break; } - case STAT_CACHE: { - if (!DisableStatCache) { - ASTStatCache *MyStatCache = - new ASTStatCache((const unsigned char *)BlobStart + Record[0], - (const unsigned char *)BlobStart, - NumStatHits, NumStatMisses); - FileMgr.addStatCache(MyStatCache); - F.StatCache = MyStatCache; - } - break; - } - case EXT_VECTOR_DECLS: for (unsigned I = 0, N = Record.size(); I != N; ++I) ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I])); @@ -2192,7 +2276,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case VTABLE_USES: if (Record.size() % 3 != 0) { Error("Invalid VTABLE_USES record"); - return Failure; + return true; } // Later tables overwrite earlier ones. @@ -2215,13 +2299,15 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case PENDING_IMPLICIT_INSTANTIATIONS: if (PendingInstantiations.size() % 2 != 0) { + Error("Invalid existing PendingInstantiations"); + return true; + } + + if (Record.size() % 2 != 0) { Error("Invalid PENDING_IMPLICIT_INSTANTIATIONS block"); - return Failure; + return true; } - - // Later lists of pending instantiations overwrite earlier ones. - // FIXME: This is most certainly wrong for modules. - PendingInstantiations.clear(); + for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) { PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++])); PendingInstantiations.push_back( @@ -2237,34 +2323,6 @@ ASTReader::ReadASTBlock(ModuleFile &F) { SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I])); break; - case ORIGINAL_FILE_NAME: - // The primary AST will be the last to get here, so it will be the one - // that's used. - ActualOriginalFileName.assign(BlobStart, BlobLen); - OriginalFileName = ActualOriginalFileName; - MaybeAddSystemRootToFilename(OriginalFileName); - break; - - case ORIGINAL_FILE_ID: - OriginalFileID = FileID::get(Record[0]); - break; - - case ORIGINAL_PCH_DIR: - // The primary AST will be the last to get here, so it will be the one - // that's used. - OriginalDir.assign(BlobStart, BlobLen); - break; - - case VERSION_CONTROL_BRANCH_REVISION: { - const std::string &CurBranch = getClangFullRepositoryVersion(); - StringRef ASTBranch(BlobStart, BlobLen); - if (StringRef(CurBranch) != ASTBranch && !DisableValidation) { - Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch; - return IgnorePCH; - } - break; - } - case PPD_ENTITIES_OFFSETS: { F.PreprocessedEntityOffsets = (const PPEntityOffset *)BlobStart; assert(BlobLen % sizeof(PPEntityOffset) == 0); @@ -2300,7 +2358,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case DECL_UPDATE_OFFSETS: { if (Record.size() % 2 != 0) { Error("invalid DECL_UPDATE_OFFSETS block in AST file"); - return Failure; + return true; } for (unsigned I = 0, N = Record.size(); I != N; I += 2) DeclUpdateOffsets[getGlobalDeclID(F, Record[I])] @@ -2311,7 +2369,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case DECL_REPLACEMENTS: { if (Record.size() % 3 != 0) { Error("invalid DECL_REPLACEMENTS block in AST file"); - return Failure; + return true; } for (unsigned I = 0, N = Record.size(); I != N; I += 3) ReplacedDecls[getGlobalDeclID(F, Record[I])] @@ -2322,7 +2380,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case OBJC_CATEGORIES_MAP: { if (F.LocalNumObjCCategoriesInMap != 0) { Error("duplicate OBJC_CATEGORIES_MAP record in AST file"); - return Failure; + return true; } F.LocalNumObjCCategoriesInMap = Record[0]; @@ -2337,7 +2395,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case CXX_BASE_SPECIFIER_OFFSETS: { if (F.LocalNumCXXBaseSpecifiers != 0) { Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file"); - return Failure; + return true; } F.LocalNumCXXBaseSpecifiers = Record[0]; @@ -2347,11 +2405,6 @@ ASTReader::ReadASTBlock(ModuleFile &F) { } case DIAG_PRAGMA_MAPPINGS: - if (Record.size() % 2 != 0) { - Error("invalid DIAG_USER_MAPPINGS block in AST file"); - return Failure; - } - if (F.PragmaDiagMappings.empty()) F.PragmaDiagMappings.swap(Record); else @@ -2428,7 +2481,7 @@ ASTReader::ReadASTBlock(ModuleFile &F) { case LOCAL_REDECLARATIONS_MAP: { if (F.LocalNumRedeclarationsInMap != 0) { Error("duplicate LOCAL_REDECLARATIONS_MAP record in AST file"); - return Failure; + return true; } F.LocalNumRedeclarationsInMap = Record[0]; @@ -2445,113 +2498,77 @@ ASTReader::ReadASTBlock(ModuleFile &F) { } break; } - } - } - Error("premature end of bitstream in AST file"); - return Failure; -} - -ASTReader::ASTReadResult ASTReader::validateFileEntries(ModuleFile &M) { - llvm::BitstreamCursor &SLocEntryCursor = M.SLocEntryCursor; - for (unsigned i = 0, e = M.LocalNumSLocFileEntries; i != e; ++i) { - SLocEntryCursor.JumpToBit(M.SLocFileOffsets[i]); - unsigned Code = SLocEntryCursor.ReadCode(); - if (Code == llvm::bitc::END_BLOCK || - Code == llvm::bitc::ENTER_SUBBLOCK || - Code == llvm::bitc::DEFINE_ABBREV) { - Error("incorrectly-formatted source location entry in AST file"); - return Failure; - } + case MACRO_OFFSET: { + if (F.LocalNumMacros != 0) { + Error("duplicate MACRO_OFFSET record in AST file"); + return true; + } + F.MacroOffsets = (const uint32_t *)BlobStart; + F.LocalNumMacros = Record[0]; + unsigned LocalBaseMacroID = Record[1]; + F.BaseMacroID = getTotalNumMacros(); - RecordData Record; - const char *BlobStart; - unsigned BlobLen; - switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) { - default: - Error("incorrectly-formatted source location entry in AST file"); - return Failure; + if (F.LocalNumMacros > 0) { + // Introduce the global -> local mapping for macros within this module. + GlobalMacroMap.insert(std::make_pair(getTotalNumMacros() + 1, &F)); - case SM_SLOC_FILE_ENTRY: { - // If the buffer was overridden, the file need not exist. - if (Record[6]) - break; - - StringRef Filename(BlobStart, BlobLen); - const FileEntry *File = getFileEntry(Filename); + // Introduce the local -> global mapping for macros within this module. + F.MacroRemap.insertOrReplace( + std::make_pair(LocalBaseMacroID, + F.BaseMacroID - LocalBaseMacroID)); - if (File == 0) { - std::string ErrorStr = "could not find file '"; - ErrorStr += Filename; - ErrorStr += "' referenced by AST file"; - Error(ErrorStr.c_str()); - return IgnorePCH; - } - - if (Record.size() < 7) { - Error("source location entry is incorrect"); - return Failure; - } - - off_t StoredSize = (off_t)Record[4]; - time_t StoredTime = (time_t)Record[5]; - - // Check if there was a request to override the contents of the file - // that was part of the precompiled header. Overridding such a file - // can lead to problems when lexing using the source locations from the - // PCH. - SourceManager &SM = getSourceManager(); - if (SM.isFileOverridden(File)) { - Error(diag::err_fe_pch_file_overridden, Filename); - // After emitting the diagnostic, recover by disabling the override so - // that the original file will be used. - SM.disableFileContentsOverride(File); - // The FileEntry is a virtual file entry with the size of the contents - // that would override the original contents. Set it to the original's - // size/time. - FileMgr.modifyFileEntry(const_cast(File), - StoredSize, StoredTime); + MacrosLoaded.resize(MacrosLoaded.size() + F.LocalNumMacros); } + break; + } - // The stat info from the FileEntry came from the cached stat - // info of the PCH, so we cannot trust it. - struct stat StatBuf; - if (::stat(File->getName(), &StatBuf) != 0) { - StatBuf.st_size = File->getSize(); - StatBuf.st_mtime = File->getModificationTime(); - } + case MACRO_UPDATES: { + for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) { + MacroID ID = getGlobalMacroID(F, Record[I++]); + if (I == N) + break; - if ((StoredSize != StatBuf.st_size -#if !defined(LLVM_ON_WIN32) - // In our regression testing, the Windows file system seems to - // have inconsistent modification times that sometimes - // erroneously trigger this error-handling path. - || StoredTime != StatBuf.st_mtime -#endif - )) { - Error(diag::err_fe_pch_file_modified, Filename); - return IgnorePCH; + SourceLocation UndefLoc = ReadSourceLocation(F, Record, I); + SubmoduleID SubmoduleID = getGlobalSubmoduleID(F, Record[I++]);; + MacroUpdate Update; + Update.UndefLoc = UndefLoc; + MacroUpdates[ID].push_back(std::make_pair(SubmoduleID, Update)); } - break; } } } - - return Success; + Error("premature end of bitstream in AST file"); + return true; } void ASTReader::makeNamesVisible(const HiddenNames &Names) { for (unsigned I = 0, N = Names.size(); I != N; ++I) { - if (Decl *D = Names[I].dyn_cast()) - D->Hidden = false; - else { - IdentifierInfo *II = Names[I].get(); - if (!II->hasMacroDefinition()) { - II->setHasMacroDefinition(true); - if (DeserializationListener) - DeserializationListener->MacroVisible(II); + switch (Names[I].getKind()) { + case HiddenName::Declaration: + Names[I].getDecl()->Hidden = false; + break; + + case HiddenName::MacroVisibility: { + std::pair Macro = Names[I].getMacro(); + Macro.second->setHidden(!Macro.second->isPublic()); + if (Macro.second->isDefined()) { + PP.makeLoadedMacroInfoVisible(Macro.first, Macro.second); } + break; + } + + case HiddenName::MacroUndef: { + std::pair Macro = Names[I].getMacro(); + if (Macro.second->isDefined()) { + Macro.second->setUndefLoc(Names[I].getMacroUndefLoc()); + if (PPMutationListener *Listener = PP.getPPMutationListener()) + Listener->UndefinedMacro(Macro.second); + PP.makeLoadedMacroInfoVisible(Macro.first, Macro.second); + } + break; + } } } } @@ -2631,7 +2648,7 @@ void ASTReader::makeModuleVisible(Module *Mod, for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) { Module *Imported = Mod->Imports[I]; - if (Visited.count(Imported)) + if (!Visited.insert(Imported)) continue; bool Acceptable = UnrestrictedWildcard; @@ -2649,32 +2666,62 @@ void ASTReader::makeModuleVisible(Module *Mod, if (!Acceptable) continue; - Visited.insert(Imported); Stack.push_back(Imported); } } } ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName, - ModuleKind Type) { + ModuleKind Type, + unsigned ClientLoadCapabilities) { // Bump the generation number. unsigned PreviousGeneration = CurrentGeneration++; - - switch(ReadASTCore(FileName, Type, /*ImportedBy=*/0)) { - case Failure: return Failure; - case IgnorePCH: return IgnorePCH; - case Success: break; + + unsigned NumModules = ModuleMgr.size(); + llvm::SmallVector Loaded; + switch(ASTReadResult ReadResult = ReadASTCore(FileName, Type, + /*ImportedBy=*/0, Loaded, + ClientLoadCapabilities)) { + case Failure: + case OutOfDate: + case VersionMismatch: + case ConfigurationMismatch: + case HadErrors: + ModuleMgr.removeModules(ModuleMgr.begin() + NumModules, ModuleMgr.end()); + return ReadResult; + + case Success: + break; } // Here comes stuff that we only do once the entire chain is loaded. - // Check the predefines buffers. - if (!DisableValidation && Type == MK_PCH && - // FIXME: CheckPredefinesBuffers also sets the SuggestedPredefines; - // if DisableValidation is true, defines that were set on command-line - // but not in the PCH file will not be added to SuggestedPredefines. - CheckPredefinesBuffers()) - return IgnorePCH; + // Load the AST blocks of all of the modules that we loaded. + for (llvm::SmallVectorImpl::iterator M = Loaded.begin(), + MEnd = Loaded.end(); + M != MEnd; ++M) { + ModuleFile &F = **M; + + // Read the AST block. + if (ReadASTBlock(F)) + return Failure; + + // Once read, set the ModuleFile bit base offset and update the size in + // bits of all files we've seen. + F.GlobalBitOffset = TotalModulesSizeInBits; + TotalModulesSizeInBits += F.SizeInBits; + GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F)); + + // Preload SLocEntries. + for (unsigned I = 0, N = F.PreloadSLocEntries.size(); I != N; ++I) { + int Index = int(F.PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID; + // Load it through the SourceManager and don't call ReadSLocEntry() + // directly because the entry may have already been loaded in which case + // calling ReadSLocEntry() directly would trigger an assertion in + // SourceManager. + SourceMgr.getLoadedSLocEntryByID(Index); + } + } // Mark all of the identifiers in the identifier table as being out of date, // so that various accessors know to check the loaded modules when the @@ -2707,17 +2754,19 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName, if (DeserializationListener) DeserializationListener->ReaderInitialized(this); - if (!OriginalFileID.isInvalid()) { - OriginalFileID = FileID::get(ModuleMgr.getPrimaryModule().SLocEntryBaseID - + OriginalFileID.getOpaqueValue() - 1); + ModuleFile &PrimaryModule = ModuleMgr.getPrimaryModule(); + if (!PrimaryModule.OriginalSourceFileID.isInvalid()) { + PrimaryModule.OriginalSourceFileID + = FileID::get(PrimaryModule.SLocEntryBaseID + + PrimaryModule.OriginalSourceFileID.getOpaqueValue() - 1); - // If this AST file is a precompiled preamble, then set the preamble file ID - // of the source manager to the file source file from which the preamble was - // built. + // If this AST file is a precompiled preamble, then set the + // preamble file ID of the source manager to the file source file + // from which the preamble was built. if (Type == MK_Preamble) { - SourceMgr.setPreambleFileID(OriginalFileID); + SourceMgr.setPreambleFileID(PrimaryModule.OriginalSourceFileID); } else if (Type == MK_MainFile) { - SourceMgr.setMainFileID(OriginalFileID); + SourceMgr.setMainFileID(PrimaryModule.OriginalSourceFileID); } } @@ -2732,9 +2781,12 @@ ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName, return Success; } -ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName, - ModuleKind Type, - ModuleFile *ImportedBy) { +ASTReader::ASTReadResult +ASTReader::ReadASTCore(StringRef FileName, + ModuleKind Type, + ModuleFile *ImportedBy, + llvm::SmallVectorImpl &Loaded, + unsigned ClientLoadCapabilities) { ModuleFile *M; bool NewModule; std::string ErrorStr; @@ -2785,7 +2837,7 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName, unsigned BlockID = Stream.ReadSubBlockID(); - // We only know the AST subblock ID. + // We only know the control subblock ID. switch (BlockID) { case llvm::bitc::BLOCKINFO_BLOCK_ID: if (Stream.ReadBlockInfoBlock()) { @@ -2793,29 +2845,23 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName, return Failure; } break; - case AST_BLOCK_ID: - switch (ReadASTBlock(F)) { + case CONTROL_BLOCK_ID: + switch (ReadControlBlock(F, Loaded, ClientLoadCapabilities)) { case Success: break; - case Failure: - return Failure; - - case IgnorePCH: - // FIXME: We could consider reading through to the end of this - // AST block, skipping subblocks, to see if there are other - // AST blocks elsewhere. - - // FIXME: We can't clear loaded slocentries anymore. - //SourceMgr.ClearPreallocatedSLocEntries(); - - // Remove the stat cache. - if (F.StatCache) - FileMgr.removeStatCache((ASTStatCache*)F.StatCache); - - return IgnorePCH; + case Failure: return Failure; + case OutOfDate: return OutOfDate; + case VersionMismatch: return VersionMismatch; + case ConfigurationMismatch: return ConfigurationMismatch; + case HadErrors: return HadErrors; } break; + case AST_BLOCK_ID: + // Record that we've loaded this module. + Loaded.push_back(M); + return Success; + default: if (Stream.SkipBlock()) { Error("malformed block record in AST file"); @@ -2825,32 +2871,6 @@ ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName, } } - // Once read, set the ModuleFile bit base offset and update the size in - // bits of all files we've seen. - F.GlobalBitOffset = TotalModulesSizeInBits; - TotalModulesSizeInBits += F.SizeInBits; - GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F)); - - // Make sure that the files this module was built against are still available. - if (!DisableValidation) { - switch(validateFileEntries(*M)) { - case Failure: return Failure; - case IgnorePCH: return IgnorePCH; - case Success: break; - } - } - - // Preload SLocEntries. - for (unsigned I = 0, N = M->PreloadSLocEntries.size(); I != N; ++I) { - int Index = int(M->PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID; - // Load it through the SourceManager and don't call ReadSLocEntryRecord() - // directly because the entry may have already been loaded in which case - // calling ReadSLocEntryRecord() directly would trigger an assertion in - // SourceManager. - SourceMgr.getLoadedSLocEntryByID(Index); - } - - return Success; } @@ -3038,8 +3058,8 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName, // We only know the AST subblock ID. switch (BlockID) { - case AST_BLOCK_ID: - if (Stream.EnterSubBlock(AST_BLOCK_ID)) { + case CONTROL_BLOCK_ID: + if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) { Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName; return std::string(); } @@ -3071,19 +3091,191 @@ std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName, Record.clear(); const char *BlobStart = 0; unsigned BlobLen = 0; - if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen) - == ORIGINAL_FILE_NAME) + if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen) == ORIGINAL_FILE) return std::string(BlobStart, BlobLen); } return std::string(); } -ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { +namespace { + class SimplePCHValidator : public ASTReaderListener { + const LangOptions &ExistingLangOpts; + const TargetOptions &ExistingTargetOpts; + const PreprocessorOptions &ExistingPPOpts; + FileManager &FileMgr; + + public: + SimplePCHValidator(const LangOptions &ExistingLangOpts, + const TargetOptions &ExistingTargetOpts, + const PreprocessorOptions &ExistingPPOpts, + FileManager &FileMgr) + : ExistingLangOpts(ExistingLangOpts), + ExistingTargetOpts(ExistingTargetOpts), + ExistingPPOpts(ExistingPPOpts), + FileMgr(FileMgr) + { + } + + virtual bool ReadLanguageOptions(const LangOptions &LangOpts, + bool Complain) { + return checkLanguageOptions(ExistingLangOpts, LangOpts, 0); + } + virtual bool ReadTargetOptions(const TargetOptions &TargetOpts, + bool Complain) { + return checkTargetOptions(ExistingTargetOpts, TargetOpts, 0); + } + virtual bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts, + bool Complain, + std::string &SuggestedPredefines) { + return checkPreprocessorOptions(ExistingPPOpts, PPOpts, 0, FileMgr, + SuggestedPredefines); + } + }; +} + +bool ASTReader::readASTFileControlBlock(StringRef Filename, + FileManager &FileMgr, + ASTReaderListener &Listener) { + // Open the AST file. + std::string ErrStr; + OwningPtr Buffer; + Buffer.reset(FileMgr.getBufferForFile(Filename, &ErrStr)); + if (!Buffer) { + return true; + } + + // Initialize the stream + llvm::BitstreamReader StreamFile; + llvm::BitstreamCursor Stream; + StreamFile.init((const unsigned char *)Buffer->getBufferStart(), + (const unsigned char *)Buffer->getBufferEnd()); + Stream.init(StreamFile); + + // Sniff for the signature. + if (Stream.Read(8) != 'C' || + Stream.Read(8) != 'P' || + Stream.Read(8) != 'C' || + Stream.Read(8) != 'H') { + return true; + } + + RecordData Record; + bool InControlBlock = false; + while (!Stream.AtEndOfStream()) { + unsigned Code = Stream.ReadCode(); + + if (Code == llvm::bitc::ENTER_SUBBLOCK) { + unsigned BlockID = Stream.ReadSubBlockID(); + + // We only know the control subblock ID. + switch (BlockID) { + case CONTROL_BLOCK_ID: + if (Stream.EnterSubBlock(CONTROL_BLOCK_ID)) { + return true; + } else { + InControlBlock = true; + } + break; + + default: + if (Stream.SkipBlock()) + return true; + break; + } + continue; + } + + if (Code == llvm::bitc::END_BLOCK) { + if (Stream.ReadBlockEnd()) { + return true; + } + + InControlBlock = false; + continue; + } + + if (Code == llvm::bitc::DEFINE_ABBREV) { + Stream.ReadAbbrevRecord(); + continue; + } + + Record.clear(); + const char *BlobStart = 0; + unsigned BlobLen = 0; + unsigned RecCode = Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen); + if (InControlBlock) { + switch ((ControlRecordTypes)RecCode) { + case METADATA: { + if (Record[0] != VERSION_MAJOR) { + return true; + } + + const std::string &CurBranch = getClangFullRepositoryVersion(); + StringRef ASTBranch(BlobStart, BlobLen); + if (StringRef(CurBranch) != ASTBranch) + return true; + + break; + } + case LANGUAGE_OPTIONS: + if (ParseLanguageOptions(Record, false, Listener)) + return true; + break; + + case TARGET_OPTIONS: + if (ParseTargetOptions(Record, false, Listener)) + return true; + break; + + case DIAGNOSTIC_OPTIONS: + if (ParseDiagnosticOptions(Record, false, Listener)) + return true; + break; + + case FILE_SYSTEM_OPTIONS: + if (ParseFileSystemOptions(Record, false, Listener)) + return true; + break; + + case HEADER_SEARCH_OPTIONS: + if (ParseHeaderSearchOptions(Record, false, Listener)) + return true; + break; + + case PREPROCESSOR_OPTIONS: { + std::string IgnoredSuggestedPredefines; + if (ParsePreprocessorOptions(Record, false, Listener, + IgnoredSuggestedPredefines)) + return true; + break; + } + + default: + // No other validation to perform. + break; + } + } + } + + return false; +} + + +bool ASTReader::isAcceptableASTFile(StringRef Filename, + FileManager &FileMgr, + const LangOptions &LangOpts, + const TargetOptions &TargetOpts, + const PreprocessorOptions &PPOpts) { + SimplePCHValidator validator(LangOpts, TargetOpts, PPOpts, FileMgr); + return !readASTFileControlBlock(Filename, FileMgr, validator); +} + +bool ASTReader::ReadSubmoduleBlock(ModuleFile &F) { // Enter the submodule block. if (F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) { Error("malformed submodule block record in AST file"); - return Failure; + return true; } ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); @@ -3095,9 +3287,9 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { if (Code == llvm::bitc::END_BLOCK) { if (F.Stream.ReadBlockEnd()) { Error("error at end of submodule block in AST file"); - return Failure; + return true; } - return Success; + return false; } if (Code == llvm::bitc::ENTER_SUBBLOCK) { @@ -3105,7 +3297,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { F.Stream.ReadSubBlockID(); if (F.Stream.SkipBlock()) { Error("malformed block record in AST file"); - return Failure; + return true; } continue; } @@ -3126,12 +3318,12 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_DEFINITION: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (Record.size() < 7) { Error("malformed module definition"); - return Failure; + return true; } StringRef Name(BlobStart, BlobLen); @@ -3157,9 +3349,10 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { if (GlobalIndex >= SubmodulesLoaded.size() || SubmodulesLoaded[GlobalIndex]) { Error("too many submodules"); - return Failure; + return true; } + CurrentModule->setASTFile(F.File); CurrentModule->IsFromModuleFile = true; CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem; CurrentModule->InferSubmodules = InferSubmodules; @@ -3175,7 +3368,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_UMBRELLA_HEADER: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (!CurrentModule) @@ -3187,7 +3380,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { ModMap.setUmbrellaHeader(CurrentModule, Umbrella); else if (CurrentModule->getUmbrellaHeader() != Umbrella) { Error("mismatched umbrella headers in submodule"); - return Failure; + return true; } } break; @@ -3196,7 +3389,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_HEADER: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (!CurrentModule) @@ -3208,15 +3401,51 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { if (std::find(CurrentModule->Headers.begin(), CurrentModule->Headers.end(), File) == CurrentModule->Headers.end()) - ModMap.addHeader(CurrentModule, File); + ModMap.addHeader(CurrentModule, File, false); } break; } - + + case SUBMODULE_EXCLUDED_HEADER: { + if (First) { + Error("missing submodule metadata record at beginning of block"); + return true; + } + + if (!CurrentModule) + break; + + // FIXME: Be more lazy about this! + StringRef FileName(BlobStart, BlobLen); + if (const FileEntry *File = PP.getFileManager().getFile(FileName)) { + if (std::find(CurrentModule->Headers.begin(), + CurrentModule->Headers.end(), + File) == CurrentModule->Headers.end()) + ModMap.addHeader(CurrentModule, File, true); + } + break; + } + + case SUBMODULE_TOPHEADER: { + if (First) { + Error("missing submodule metadata record at beginning of block"); + return true; + } + + if (!CurrentModule) + break; + + // FIXME: Be more lazy about this! + StringRef FileName(BlobStart, BlobLen); + if (const FileEntry *File = PP.getFileManager().getFile(FileName)) + CurrentModule->TopHeaders.insert(File); + break; + } + case SUBMODULE_UMBRELLA_DIR: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (!CurrentModule) @@ -3229,7 +3458,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { ModMap.setUmbrellaDir(CurrentModule, Umbrella); else if (CurrentModule->getUmbrellaDir() != Umbrella) { Error("mismatched umbrella directories in submodule"); - return Failure; + return true; } } break; @@ -3238,7 +3467,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_METADATA: { if (!First) { Error("submodule metadata record not at beginning of block"); - return Failure; + return true; } First = false; @@ -3264,7 +3493,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_IMPORTS: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (!CurrentModule) @@ -3285,7 +3514,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_EXPORTS: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (!CurrentModule) @@ -3309,7 +3538,7 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { case SUBMODULE_REQUIRES: { if (First) { Error("missing submodule metadata record at beginning of block"); - return Failure; + return true; } if (!CurrentModule) @@ -3331,27 +3560,144 @@ ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) { /// them to the AST listener if one is set. /// /// \returns true if the listener deems the file unacceptable, false otherwise. -bool ASTReader::ParseLanguageOptions(const RecordData &Record) { - if (Listener) { - LangOptions LangOpts; - unsigned Idx = 0; +bool ASTReader::ParseLanguageOptions(const RecordData &Record, + bool Complain, + ASTReaderListener &Listener) { + LangOptions LangOpts; + unsigned Idx = 0; #define LANGOPT(Name, Bits, Default, Description) \ LangOpts.Name = Record[Idx++]; #define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \ LangOpts.set##Name(static_cast(Record[Idx++])); #include "clang/Basic/LangOptions.def" - ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++]; - VersionTuple runtimeVersion = ReadVersionTuple(Record, Idx); - LangOpts.ObjCRuntime = ObjCRuntime(runtimeKind, runtimeVersion); - - unsigned Length = Record[Idx++]; - LangOpts.CurrentModule.assign(Record.begin() + Idx, - Record.begin() + Idx + Length); - return Listener->ReadLanguageOptions(LangOpts); + ObjCRuntime::Kind runtimeKind = (ObjCRuntime::Kind) Record[Idx++]; + VersionTuple runtimeVersion = ReadVersionTuple(Record, Idx); + LangOpts.ObjCRuntime = ObjCRuntime(runtimeKind, runtimeVersion); + + unsigned Length = Record[Idx++]; + LangOpts.CurrentModule.assign(Record.begin() + Idx, + Record.begin() + Idx + Length); + return Listener.ReadLanguageOptions(LangOpts, Complain); +} + +bool ASTReader::ParseTargetOptions(const RecordData &Record, + bool Complain, + ASTReaderListener &Listener) { + unsigned Idx = 0; + TargetOptions TargetOpts; + TargetOpts.Triple = ReadString(Record, Idx); + TargetOpts.CPU = ReadString(Record, Idx); + TargetOpts.ABI = ReadString(Record, Idx); + TargetOpts.CXXABI = ReadString(Record, Idx); + TargetOpts.LinkerVersion = ReadString(Record, Idx); + for (unsigned N = Record[Idx++]; N; --N) { + TargetOpts.FeaturesAsWritten.push_back(ReadString(Record, Idx)); + } + for (unsigned N = Record[Idx++]; N; --N) { + TargetOpts.Features.push_back(ReadString(Record, Idx)); } - return false; + return Listener.ReadTargetOptions(TargetOpts, Complain); +} + +bool ASTReader::ParseDiagnosticOptions(const RecordData &Record, bool Complain, + ASTReaderListener &Listener) { + DiagnosticOptions DiagOpts; + unsigned Idx = 0; +#define DIAGOPT(Name, Bits, Default) DiagOpts.Name = Record[Idx++]; +#define ENUM_DIAGOPT(Name, Type, Bits, Default) \ + DiagOpts.set##Name(static_cast(Record[Idx++])); +#include "clang/Basic/DiagnosticOptions.def" + + for (unsigned N = Record[Idx++]; N; --N) { + DiagOpts.Warnings.push_back(ReadString(Record, Idx)); + } + + return Listener.ReadDiagnosticOptions(DiagOpts, Complain); +} + +bool ASTReader::ParseFileSystemOptions(const RecordData &Record, bool Complain, + ASTReaderListener &Listener) { + FileSystemOptions FSOpts; + unsigned Idx = 0; + FSOpts.WorkingDir = ReadString(Record, Idx); + return Listener.ReadFileSystemOptions(FSOpts, Complain); +} + +bool ASTReader::ParseHeaderSearchOptions(const RecordData &Record, + bool Complain, + ASTReaderListener &Listener) { + HeaderSearchOptions HSOpts; + unsigned Idx = 0; + HSOpts.Sysroot = ReadString(Record, Idx); + + // Include entries. + for (unsigned N = Record[Idx++]; N; --N) { + std::string Path = ReadString(Record, Idx); + frontend::IncludeDirGroup Group + = static_cast(Record[Idx++]); + bool IsUserSupplied = Record[Idx++]; + bool IsFramework = Record[Idx++]; + bool IgnoreSysRoot = Record[Idx++]; + bool IsInternal = Record[Idx++]; + bool ImplicitExternC = Record[Idx++]; + HSOpts.UserEntries.push_back( + HeaderSearchOptions::Entry(Path, Group, IsUserSupplied, IsFramework, + IgnoreSysRoot, IsInternal, ImplicitExternC)); + } + + // System header prefixes. + for (unsigned N = Record[Idx++]; N; --N) { + std::string Prefix = ReadString(Record, Idx); + bool IsSystemHeader = Record[Idx++]; + HSOpts.SystemHeaderPrefixes.push_back( + HeaderSearchOptions::SystemHeaderPrefix(Prefix, IsSystemHeader)); + } + + HSOpts.ResourceDir = ReadString(Record, Idx); + HSOpts.ModuleCachePath = ReadString(Record, Idx); + HSOpts.DisableModuleHash = Record[Idx++]; + HSOpts.UseBuiltinIncludes = Record[Idx++]; + HSOpts.UseStandardSystemIncludes = Record[Idx++]; + HSOpts.UseStandardCXXIncludes = Record[Idx++]; + HSOpts.UseLibcxx = Record[Idx++]; + + return Listener.ReadHeaderSearchOptions(HSOpts, Complain); +} + +bool ASTReader::ParsePreprocessorOptions(const RecordData &Record, + bool Complain, + ASTReaderListener &Listener, + std::string &SuggestedPredefines) { + PreprocessorOptions PPOpts; + unsigned Idx = 0; + + // Macro definitions/undefs + for (unsigned N = Record[Idx++]; N; --N) { + std::string Macro = ReadString(Record, Idx); + bool IsUndef = Record[Idx++]; + PPOpts.Macros.push_back(std::make_pair(Macro, IsUndef)); + } + + // Includes + for (unsigned N = Record[Idx++]; N; --N) { + PPOpts.Includes.push_back(ReadString(Record, Idx)); + } + + // Macro Includes + for (unsigned N = Record[Idx++]; N; --N) { + PPOpts.MacroIncludes.push_back(ReadString(Record, Idx)); + } + + PPOpts.UsePredefines = Record[Idx++]; + PPOpts.ImplicitPCHInclude = ReadString(Record, Idx); + PPOpts.ImplicitPTHInclude = ReadString(Record, Idx); + PPOpts.ObjCXXARCStandardLibrary = + static_cast(Record[Idx++]); + SuggestedPredefines.clear(); + return Listener.ReadPreprocessorOptions(PPOpts, Complain, + SuggestedPredefines); } std::pair @@ -3365,6 +3711,23 @@ ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) { return std::make_pair(M, LocalIndex); } +std::pair +ASTReader::getModulePreprocessedEntities(ModuleFile &Mod) const { + if (PreprocessingRecord *PPRec = PP.getPreprocessingRecord()) + return PPRec->getIteratorsForLoadedRange(Mod.BasePreprocessedEntityID, + Mod.NumPreprocessedEntities); + + return std::make_pair(PreprocessingRecord::iterator(), + PreprocessingRecord::iterator()); +} + +std::pair +ASTReader::getModuleFileLevelDecls(ModuleFile &Mod) { + return std::make_pair(ModuleDeclIterator(this, &Mod, Mod.FileSortedDecls), + ModuleDeclIterator(this, &Mod, + Mod.FileSortedDecls + Mod.NumFileSortedDecls)); +} + PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) { PreprocessedEntityID PPID = Index+1; std::pair PPInfo = getModulePreprocessedEntity(Index); @@ -3455,7 +3818,7 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) { InclusionDirective *ID = new (PPRec) InclusionDirective(PPRec, Kind, StringRef(BlobStart, Record[0]), - Record[1], + Record[1], Record[3], File, Range); return ID; @@ -3476,7 +3839,7 @@ PreprocessedEntityID ASTReader::findNextPreprocessedEntity( EndI = GlobalSLocOffsetMap.end(); SLocMapI != EndI; ++SLocMapI) { ModuleFile &M = *SLocMapI->second; if (M.NumPreprocessedEntities) - return getGlobalPreprocessedEntityID(M, M.BasePreprocessedEntityID); + return M.BasePreprocessedEntityID; } return getTotalNumPreprocessedEntities(); @@ -3559,8 +3922,7 @@ ASTReader::findBeginPreprocessedEntity(SourceLocation BLoc) const { if (PPI == pp_end) return findNextPreprocessedEntity(SLocMapI); - return getGlobalPreprocessedEntityID(M, - M.BasePreprocessedEntityID + (PPI - pp_begin)); + return M.BasePreprocessedEntityID + (PPI - pp_begin); } /// \brief Returns the first preprocessed entity ID that begins after \arg ELoc. @@ -3589,8 +3951,7 @@ ASTReader::findEndPreprocessedEntity(SourceLocation ELoc) const { if (PPI == pp_end) return findNextPreprocessedEntity(SLocMapI); - return getGlobalPreprocessedEntityID(M, - M.BasePreprocessedEntityID + (PPI - pp_begin)); + return M.BasePreprocessedEntityID + (PPI - pp_begin); } /// \brief Returns a pair of [Begin, End) indices of preallocated @@ -3681,14 +4042,31 @@ HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) { } void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) { + // FIXME: Make it work properly with modules. + llvm::SmallVector DiagStates; for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) { ModuleFile &F = *(*I); unsigned Idx = 0; + DiagStates.clear(); + assert(!Diag.DiagStates.empty()); + DiagStates.push_back(&Diag.DiagStates.front()); // the command-line one. while (Idx < F.PragmaDiagMappings.size()) { SourceLocation Loc = ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]); + unsigned DiagStateID = F.PragmaDiagMappings[Idx++]; + if (DiagStateID != 0) { + Diag.DiagStatePoints.push_back( + DiagnosticsEngine::DiagStatePoint(DiagStates[DiagStateID-1], + FullSourceLoc(Loc, SourceMgr))); + continue; + } + + assert(DiagStateID == 0); + // A new DiagState was created here. Diag.DiagStates.push_back(*Diag.GetCurDiagState()); + DiagnosticsEngine::DiagState *NewState = &Diag.DiagStates.back(); + DiagStates.push_back(NewState); Diag.DiagStatePoints.push_back( - DiagnosticsEngine::DiagStatePoint(&Diag.DiagStates.back(), + DiagnosticsEngine::DiagStatePoint(NewState, FullSourceLoc(Loc, SourceMgr))); while (1) { assert(Idx < F.PragmaDiagMappings.size() && @@ -4258,6 +4636,8 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) { } void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) { TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx)); + TL.setLParenLoc(ReadSourceLocation(Record, Idx)); + TL.setRParenLoc(ReadSourceLocation(Record, Idx)); TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx)); for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) { TL.setArg(i, ReadDeclAs(Record, Idx)); @@ -4467,6 +4847,10 @@ QualType ASTReader::GetType(TypeID ID) { case PREDEF_TYPE_VA_LIST_TAG: T = Context.getVaListTagType(); break; + + case PREDEF_TYPE_BUILTIN_FN: + T = Context.BuiltinFnTy; + break; } assert(!T.isNull() && "Unknown predefined type"); @@ -4537,7 +4921,9 @@ ASTReader::GetTemplateArgumentLocInfo(ModuleFile &F, case TemplateArgument::Null: case TemplateArgument::Integral: case TemplateArgument::Declaration: + case TemplateArgument::NullPtr: case TemplateArgument::Pack: + // FIXME: Is this right? return TemplateArgumentLocInfo(); } llvm_unreachable("unexpected template argument loc"); @@ -4593,7 +4979,7 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) { } serialization::DeclID -ASTReader::getGlobalDeclID(ModuleFile &F, unsigned LocalID) const { +ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const { if (LocalID < NUM_PREDEF_DECL_IDS) return LocalID; @@ -4930,8 +5316,10 @@ namespace { continue; if (ND->getDeclName() != This->Name) { - assert(!This->Name.getCXXNameType().isNull() && - "Name mismatch without a type"); + // A name might be null because the decl's redeclarable part is + // currently read before reading its name. The lookup is triggered by + // building that decl (likely indirectly), and so it is later in the + // sense of "already existing" and can be ignored here. continue; } @@ -5132,13 +5520,15 @@ void ASTReader::PrintStats() { = IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(), IdentifiersLoaded.end(), (IdentifierInfo *)0); + unsigned NumMacrosLoaded + = MacrosLoaded.size() - std::count(MacrosLoaded.begin(), + MacrosLoaded.end(), + (MacroInfo *)0); unsigned NumSelectorsLoaded = SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(), SelectorsLoaded.end(), Selector()); - std::fprintf(stderr, " %u stat cache hits\n", NumStatHits); - std::fprintf(stderr, " %u stat cache misses\n", NumStatMisses); if (unsigned TotalNumSLocEntries = getTotalNumSLocs()) std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n", NumSLocEntriesRead, TotalNumSLocEntries, @@ -5155,6 +5545,10 @@ void ASTReader::PrintStats() { std::fprintf(stderr, " %u/%u identifiers read (%f%%)\n", NumIdentifiersLoaded, (unsigned)IdentifiersLoaded.size(), ((float)NumIdentifiersLoaded/IdentifiersLoaded.size() * 100)); + if (!MacrosLoaded.empty()) + std::fprintf(stderr, " %u/%u macros read (%f%%)\n", + NumMacrosLoaded, (unsigned)MacrosLoaded.size(), + ((float)NumMacrosLoaded/MacrosLoaded.size() * 100)); if (!SelectorsLoaded.empty()) std::fprintf(stderr, " %u/%u selectors read (%f%%)\n", NumSelectorsLoaded, (unsigned)SelectorsLoaded.size(), @@ -5213,6 +5607,7 @@ void ASTReader::dump() { dumpModuleIDMap("Global type map", GlobalTypeMap); dumpModuleIDMap("Global declaration map", GlobalDeclMap); dumpModuleIDMap("Global identifier map", GlobalIdentifierMap); + dumpModuleIDMap("Global macro map", GlobalMacroMap); dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap); dumpModuleIDMap("Global selector map", GlobalSelectorMap); dumpModuleIDMap("Global preprocessed entity map", @@ -5246,7 +5641,7 @@ void ASTReader::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { void ASTReader::InitializeSema(Sema &S) { SemaObj = &S; - S.ExternalSource = this; + S.addExternalSource(this); // Makes sure any declarations that were deserialized "too early" // still get added to the identifier's declaration chains. @@ -5281,6 +5676,9 @@ void ASTReader::InitializeSema(Sema &S) { } IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) { + // Note that we are loading an identifier. + Deserializing AnIdentifier(this); + IdentifierLookupVisitor Visitor(StringRef(NameStart, NameEnd - NameStart), /*PriorGeneration=*/0); ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor); @@ -5570,6 +5968,7 @@ void ASTReader::ReadPendingInstantiations( ValueDecl *D = cast(GetDecl(PendingInstantiations[Idx++])); SourceLocation Loc = SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]); + Pending.push_back(std::make_pair(D, Loc)); } PendingInstantiations.clear(); @@ -5682,8 +6081,37 @@ IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) { return LocalID + I->second; } -bool ASTReader::ReadSLocEntry(int ID) { - return ReadSLocEntryRecord(ID) != Success; +MacroInfo *ASTReader::getMacro(MacroID ID, MacroInfo *Hint) { + if (ID == 0) + return 0; + + if (MacrosLoaded.empty()) { + Error("no macro table in AST file"); + return 0; + } + + ID -= NUM_PREDEF_MACRO_IDS; + if (!MacrosLoaded[ID]) { + GlobalMacroMapType::iterator I + = GlobalMacroMap.find(ID + NUM_PREDEF_MACRO_IDS); + assert(I != GlobalMacroMap.end() && "Corrupted global macro map"); + ModuleFile *M = I->second; + unsigned Index = ID - M->BaseMacroID; + ReadMacroRecord(*M, M->MacroOffsets[Index], Hint); + } + + return MacrosLoaded[ID]; +} + +MacroID ASTReader::getGlobalMacroID(ModuleFile &M, unsigned LocalID) { + if (LocalID < NUM_PREDEF_MACRO_IDS) + return LocalID; + + ContinuousRangeMap::iterator I + = M.MacroRemap.find(LocalID - NUM_PREDEF_MACRO_IDS); + assert(I != M.MacroRemap.end() && "Invalid index into macro index remap"); + + return LocalID + I->second; } serialization::SubmoduleID @@ -5694,7 +6122,7 @@ ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) { ContinuousRangeMap::iterator I = M.SubmoduleRemap.find(LocalID - NUM_PREDEF_SUBMODULE_IDS); assert(I != M.SubmoduleRemap.end() - && "Invalid index into identifier index remap"); + && "Invalid index into submodule index remap"); return LocalID + I->second; } @@ -5759,7 +6187,7 @@ ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const { ContinuousRangeMap::iterator I = M.SelectorRemap.find(LocalID - NUM_PREDEF_SELECTOR_IDS); assert(I != M.SelectorRemap.end() - && "Invalid index into identifier index remap"); + && "Invalid index into selector index remap"); return LocalID + I->second; } @@ -5926,8 +6354,13 @@ ASTReader::ReadTemplateArgument(ModuleFile &F, return TemplateArgument(); case TemplateArgument::Type: return TemplateArgument(readType(F, Record, Idx)); - case TemplateArgument::Declaration: - return TemplateArgument(ReadDecl(F, Record, Idx)); + case TemplateArgument::Declaration: { + ValueDecl *D = ReadDeclAs(F, Record, Idx); + bool ForReferenceParam = Record[Idx++]; + return TemplateArgument(D, ForReferenceParam); + } + case TemplateArgument::NullPtr: + return TemplateArgument(readType(F, Record, Idx), /*isNullPtr*/true); case TemplateArgument::Integral: { llvm::APSInt Value = ReadAPSInt(Record, Idx); QualType T = readType(F, Record, Idx); @@ -6340,7 +6773,8 @@ void ASTReader::ReadComments() { } void ASTReader::finishPendingActions() { - while (!PendingIdentifierInfos.empty() || !PendingDeclChains.empty()) { + while (!PendingIdentifierInfos.empty() || !PendingDeclChains.empty() || + !PendingMacroIDs.empty()) { // If any identifiers with corresponding top-level declarations have // been loaded, load those declarations now. while (!PendingIdentifierInfos.empty()) { @@ -6355,6 +6789,18 @@ void ASTReader::finishPendingActions() { PendingDeclChainsKnown.erase(PendingDeclChains[I]); } PendingDeclChains.clear(); + + // Load any pending macro definitions. + for (unsigned I = 0; I != PendingMacroIDs.size(); ++I) { + // FIXME: std::move here + SmallVector GlobalIDs = PendingMacroIDs.begin()[I].second; + MacroInfo *Hint = 0; + for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs; + ++IDIdx) { + Hint = getMacro(GlobalIDs[IDIdx], Hint); + } + } + PendingMacroIDs.clear(); } // If we deserialized any C++ or Objective-C class definitions, any @@ -6408,9 +6854,29 @@ void ASTReader::finishPendingActions() { for (RedeclarableTemplateDecl::redecl_iterator R = RTD->redecls_begin(), REnd = RTD->redecls_end(); R != REnd; ++R) - R->Common = RTD->Common; + R->Common = RTD->Common; } PendingDefinitions.clear(); + + // Load the bodies of any functions or methods we've encountered. We do + // this now (delayed) so that we can be sure that the declaration chains + // have been fully wired up. + for (PendingBodiesMap::iterator PB = PendingBodies.begin(), + PBEnd = PendingBodies.end(); + PB != PBEnd; ++PB) { + if (FunctionDecl *FD = dyn_cast(PB->first)) { + // FIXME: Check for =delete/=default? + // FIXME: Complain about ODR violations here? + if (!getContext().getLangOpts().Modules || !FD->hasBody()) + FD->setLazyBody(PB->second); + continue; + } + + ObjCMethodDecl *MD = cast(PB->first); + if (!getContext().getLangOpts().Modules || !MD->hasBody()) + MD->setLazyBody(PB->second); + } + PendingBodies.clear(); } void ASTReader::FinishedDeserializing() { @@ -6442,17 +6908,14 @@ void ASTReader::FinishedDeserializing() { ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context, StringRef isysroot, bool DisableValidation, - bool DisableStatCache, bool AllowASTWithCompilerErrors) + bool AllowASTWithCompilerErrors) : Listener(new PCHValidator(PP, *this)), DeserializationListener(0), SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()), Diags(PP.getDiagnostics()), SemaObj(0), PP(PP), Context(Context), - Consumer(0), ModuleMgr(FileMgr.getFileSystemOptions()), - RelocatablePCH(false), isysroot(isysroot), - DisableValidation(DisableValidation), - DisableStatCache(DisableStatCache), + Consumer(0), ModuleMgr(PP.getFileManager()), + isysroot(isysroot), DisableValidation(DisableValidation), AllowASTWithCompilerErrors(AllowASTWithCompilerErrors), CurrentGeneration(0), CurrSwitchCaseStmts(&SwitchCaseStmts), - NumStatHits(0), NumStatMisses(0), NumSLocEntriesRead(0), TotalNumSLocEntries(0), NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0), TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0), diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp index cb21f82..c42944d 100644 --- a/lib/Serialization/ASTReaderDecl.cpp +++ b/lib/Serialization/ASTReaderDecl.cpp @@ -37,7 +37,6 @@ namespace clang { class ASTDeclReader : public DeclVisitor { ASTReader &Reader; ModuleFile &F; - llvm::BitstreamCursor &Cursor; const DeclID ThisDeclID; const unsigned RawLocation; typedef ASTReader::RecordData RecordData; @@ -48,6 +47,8 @@ namespace clang { DeclID DeclContextIDForTemplateParmDecl; DeclID LexicalDeclContextIDForTemplateParmDecl; + bool HasPendingBody; + uint64_t GetCurrentCursorOffset(); SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) { @@ -116,7 +117,7 @@ namespace clang { GlobalDeclID FirstID; mutable bool Owning; - RedeclarableResult &operator=(RedeclarableResult&); // DO NOT IMPLEMENT + void operator=(RedeclarableResult &) LLVM_DELETED_FUNCTION; public: RedeclarableResult(ASTReader &Reader, GlobalDeclID FirstID) @@ -162,7 +163,7 @@ namespace clang { NamedDecl *Existing; mutable bool AddResult; - FindExistingResult &operator=(FindExistingResult&); // DO NOT IMPLEMENT + void operator=(FindExistingResult&) LLVM_DELETED_FUNCTION; public: FindExistingResult(ASTReader &Reader) @@ -194,16 +195,19 @@ namespace clang { public: ASTDeclReader(ASTReader &Reader, ModuleFile &F, - llvm::BitstreamCursor &Cursor, DeclID thisDeclID, + DeclID thisDeclID, unsigned RawLocation, const RecordData &Record, unsigned &Idx) - : Reader(Reader), F(F), Cursor(Cursor), ThisDeclID(thisDeclID), + : Reader(Reader), F(F), ThisDeclID(thisDeclID), RawLocation(RawLocation), Record(Record), Idx(Idx), - TypeIDForTypeDecl(0) { } + TypeIDForTypeDecl(0), HasPendingBody(false) { } static void attachPreviousDecl(Decl *D, Decl *previous); static void attachLatestDecl(Decl *D, Decl *latest); + /// \brief Determine whether this declaration has a pending body. + bool hasPendingBody() const { return HasPendingBody; } + void Visit(Decl *D); void UpdateDecl(Decl *D, ModuleFile &ModuleFile, @@ -321,8 +325,14 @@ void ASTDeclReader::Visit(Decl *D) { ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull(); } else if (FunctionDecl *FD = dyn_cast(D)) { // FunctionDecl's body was written last after all other Stmts/Exprs. - if (Record[Idx++]) - FD->setLazyBody(GetCurrentCursorOffset()); + // We only read it if FD doesn't already have a body (e.g., from another + // module). + // FIXME: Also consider = default and = delete. + // FIXME: Can we diagnose ODR violations somehow? + if (Record[Idx++]) { + Reader.PendingBodies[FD] = GetCurrentCursorOffset(); + HasPendingBody = true; + } } else if (D->isTemplateParameter()) { // If we have a fully initialized template parameter, we can now // set its DeclContext. @@ -590,8 +600,10 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) { TemplArgs.size(), C); void *InsertPos = 0; CanonTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos); - assert(InsertPos && "Another specialization already inserted!"); - CanonTemplate->getSpecializations().InsertNode(FTInfo, InsertPos); + if (InsertPos) + CanonTemplate->getSpecializations().InsertNode(FTInfo, InsertPos); + else + assert(0 && "Another specialization already inserted!"); } break; } @@ -628,19 +640,16 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) { void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) { VisitNamedDecl(MD); if (Record[Idx++]) { - // In practice, this won't be executed (since method definitions - // don't occur in header files). - // Switch case IDs for this method body. - ASTReader::SwitchCaseMapTy SwitchCaseStmtsForObjCMethod; - SaveAndRestore - SCFOM(Reader.CurrSwitchCaseStmts, &SwitchCaseStmtsForObjCMethod); - MD->setBody(Reader.ReadStmt(F)); + // Load the body on-demand. Most clients won't care, because method + // definitions rarely show up in headers. + Reader.PendingBodies[MD] = GetCurrentCursorOffset(); + HasPendingBody = true; MD->setSelfDecl(ReadDeclAs(Record, Idx)); MD->setCmdDecl(ReadDeclAs(Record, Idx)); } MD->setInstanceMethod(Record[Idx++]); MD->setVariadic(Record[Idx++]); - MD->setSynthesized(Record[Idx++]); + MD->setPropertyAccessor(Record[Idx++]); MD->setDefined(Record[Idx++]); MD->IsOverriding = Record[Idx++]; @@ -846,6 +855,8 @@ void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { D->setSuperClass(ReadDeclAs(Record, Idx)); D->setIvarLBraceLoc(ReadSourceLocation(Record, Idx)); D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx)); + D->setHasNonZeroConstructors(Record[Idx++]); + D->setHasDestructors(Record[Idx++]); llvm::tie(D->IvarInitializers, D->NumIvarInitializers) = Reader.ReadCXXCtorInitializers(F, Record, Idx); } @@ -897,7 +908,8 @@ void ASTDeclReader::VisitVarDecl(VarDecl *VD) { VD->VarDeclBits.NRVOVariable = Record[Idx++]; VD->VarDeclBits.CXXForRangeDecl = Record[Idx++]; VD->VarDeclBits.ARCPseudoStrong = Record[Idx++]; - + VD->VarDeclBits.IsConstexpr = Record[Idx++]; + // Only true variables (not parameters or implicit parameters) can be merged. if (VD->getKind() == Decl::Var) mergeRedeclarable(VD, Redecl); @@ -1135,6 +1147,7 @@ void ASTDeclReader::ReadCXXDefinitionData( Lambda.Captures = (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures); Capture *ToCapture = Lambda.Captures; + Lambda.MethodTyInfo = GetTypeSourceInfo(Record, Idx); for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) { SourceLocation Loc = ReadSourceLocation(Record, Idx); bool IsImplicit = Record[Idx++]; @@ -1155,7 +1168,8 @@ void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) { // allocate the appropriate DefinitionData structure. bool IsLambda = Record[Idx++]; if (IsLambda) - D->DefinitionData = new (C) CXXRecordDecl::LambdaDefinitionData(D, false); + D->DefinitionData = new (C) CXXRecordDecl::LambdaDefinitionData(D, 0, + false); else D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D); @@ -1242,6 +1256,7 @@ void ASTDeclReader::VisitImportDecl(ImportDecl *D) { SourceLocation *StoredLocs = reinterpret_cast(D + 1); for (unsigned I = 0, N = Record.back(); I != N; ++I) StoredLocs[I] = ReadSourceLocation(Record, Idx); + ++Idx; // The number of stored source locations. } void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) { @@ -1308,10 +1323,12 @@ ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) { D->setMemberSpecialization(); } } - + VisitTemplateDecl(D); D->IdentifierNamespace = Record[Idx++]; - + + mergeRedeclarable(D, Redecl); + return Redecl; } @@ -1336,10 +1353,10 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) { for (unsigned I = 0; I != Size; ++I) SpecIDs.push_back(ReadDeclID(Record, Idx)); + ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr(); if (SpecIDs[0]) { typedef serialization::DeclID DeclID; - ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr(); // FIXME: Append specializations! CommonPtr->LazySpecializations = new (Reader.getContext()) DeclID [SpecIDs.size()]; @@ -1347,7 +1364,7 @@ void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) { SpecIDs.size() * sizeof(DeclID)); } - // InjectedClassNameType is computed. + CommonPtr->InjectedClassNameType = Reader.readType(F, Record, Idx); } } @@ -1391,14 +1408,17 @@ void ASTDeclReader::VisitClassTemplateSpecializationDecl( TemplArgs.size()); D->PointOfInstantiation = ReadSourceLocation(Record, Idx); D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++]; - - if (D->isCanonicalDecl()) { // It's kept in the folding set. + + bool writtenAsCanonicalDecl = Record[Idx++]; + if (writtenAsCanonicalDecl) { ClassTemplateDecl *CanonPattern = ReadDeclAs(Record,Idx); - if (ClassTemplatePartialSpecializationDecl *Partial - = dyn_cast(D)) { - CanonPattern->getCommonPtr()->PartialSpecializations.InsertNode(Partial); - } else { - CanonPattern->getCommonPtr()->Specializations.InsertNode(D); + if (D->isCanonicalDecl()) { // It's kept in the folding set. + if (ClassTemplatePartialSpecializationDecl *Partial + = dyn_cast(D)) { + CanonPattern->getCommonPtr()->PartialSpecializations.GetOrInsertNode(Partial); + } else { + CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D); + } } } } @@ -1486,11 +1506,18 @@ void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { // TemplateParmPosition. D->setDepth(Record[Idx++]); D->setPosition(Record[Idx++]); - // Rest of TemplateTemplateParmDecl. - TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(F, Record, Idx); - bool IsInherited = Record[Idx++]; - D->setDefaultArgument(Arg, IsInherited); - D->ParameterPack = Record[Idx++]; + if (D->isExpandedParameterPack()) { + void **Data = reinterpret_cast(D + 1); + for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); + I != N; ++I) + Data[I] = Reader.ReadTemplateParameterList(F, Record, Idx); + } else { + // Rest of TemplateTemplateParmDecl. + TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(F, Record, Idx); + bool IsInherited = Record[Idx++]; + D->setDefaultArgument(Arg, IsInherited); + D->ParameterPack = Record[Idx++]; + } } void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) { @@ -1640,7 +1667,7 @@ inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) { /// This routine should return true for anything that might affect /// code generation, e.g., inline function definitions, Objective-C /// declarations with metadata, etc. -static bool isConsumerInterestedIn(Decl *D) { +static bool isConsumerInterestedIn(Decl *D, bool HasBody) { // An ObjCMethodDecl is never considered as "interesting" because its // implementation container always is. @@ -1652,7 +1679,7 @@ static bool isConsumerInterestedIn(Decl *D) { return Var->isFileVarDecl() && Var->isThisDeclarationADefinition() == VarDecl::Definition; if (FunctionDecl *Func = dyn_cast(D)) - return Func->doesThisDeclarationHaveABody(); + return Func->doesThisDeclarationHaveABody() || HasBody; return false; } @@ -1719,8 +1746,10 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) { if (TagDecl *TagX = dyn_cast(X)) { TagDecl *TagY = cast(Y); return (TagX->getTagKind() == TagY->getTagKind()) || - ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class) && - (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class)); + ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class || + TagX->getTagKind() == TTK_Interface) && + (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class || + TagY->getTagKind() == TTK_Interface)); } // Functions with the same type and linkage match. @@ -1731,7 +1760,7 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) { return (FuncX->getLinkage() == FuncY->getLinkage()) && FuncX->getASTContext().hasSameType(FuncX->getType(), FuncY->getType()); } - + // Variables with the same type and linkage match. if (VarDecl *VarX = dyn_cast(X)) { VarDecl *VarY = cast(Y); @@ -1744,7 +1773,11 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) { NamespaceDecl *NamespaceY = cast(Y); return NamespaceX->isInline() == NamespaceY->isInline(); } - + + // Identical template names and kinds match. + if (isa(X)) + return true; + // FIXME: Many other cases to implement. return false; } @@ -1753,11 +1786,13 @@ ASTDeclReader::FindExistingResult::~FindExistingResult() { if (!AddResult || Existing) return; - DeclContext *DC = New->getDeclContext()->getRedeclContext(); - if (DC->isTranslationUnit() && Reader.SemaObj) { + if (New->getDeclContext()->getRedeclContext()->isTranslationUnit() + && Reader.SemaObj) { Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, New->getDeclName()); - } else if (DC->isNamespace()) { - DC->addDecl(New); + } else { + DeclContext *DC = New->getLexicalDeclContext(); + if (DC->isNamespace()) + DC->addDecl(New); } } @@ -1899,7 +1934,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) { RecordData Record; unsigned Code = DeclsCursor.ReadCode(); unsigned Idx = 0; - ASTDeclReader Reader(*this, *Loc.F, DeclsCursor, ID, RawLocation, Record,Idx); + ASTDeclReader Reader(*this, *Loc.F, ID, RawLocation, Record,Idx); Decl *D = 0; switch ((DeclCode)DeclsCursor.ReadRecord(Code, Record)) { @@ -2002,6 +2037,10 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) { case DECL_TEMPLATE_TEMPLATE_PARM: D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID); break; + case DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK: + D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID, + Record[Idx++]); + break; case DECL_TYPE_ALIAS_TEMPLATE: D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID); break; @@ -2110,6 +2149,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) { } PendingVisibleUpdates.erase(I); } + + if (!DC->hasExternalVisibleStorage() && DC->hasExternalLexicalStorage()) + DC->setMustBuildLookupTable(); } assert(Idx == Record.size()); @@ -2125,9 +2167,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) { // AST consumer might need to know about, queue it. // We don't pass it to the consumer immediately because we may be in recursive // loading, and some declarations may still be initializing. - if (isConsumerInterestedIn(D)) + if (isConsumerInterestedIn(D, Reader.hasPendingBody())) InterestingDecls.push_back(D); - + return D; } @@ -2152,7 +2194,7 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) { assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!"); unsigned Idx = 0; - ASTDeclReader Reader(*this, *F, Cursor, ID, 0, Record, Idx); + ASTDeclReader Reader(*this, *F, ID, 0, Record, Idx); Reader.UpdateDecl(D, *F, Record); } } @@ -2207,10 +2249,8 @@ namespace { if (!D) return; - if (Deserialized.count(D)) { - Deserialized.erase(D); + if (Deserialized.erase(D)) Chain.push_back(D); - } } void searchForID(ModuleFile &M, GlobalDeclID GlobalID) { @@ -2275,7 +2315,7 @@ void ASTReader::loadPendingDeclChain(serialization::GlobalDeclID ID) { } MergedDeclsMap::iterator MergedPos = combineStoredMergedDecls(CanonDecl, ID); if (MergedPos != MergedDecls.end()) - SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end()); + SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end()); // Build up the list of redeclarations. RedeclChainVisitor Visitor(*this, SearchDecls, RedeclsDeserialized, CanonID); @@ -2331,9 +2371,8 @@ namespace { void add(ObjCCategoryDecl *Cat) { // Only process each category once. - if (!Deserialized.count(Cat)) + if (!Deserialized.erase(Cat)) return; - Deserialized.erase(Cat); // Check for duplicate categories. if (Cat->getDeclName()) { diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp index c5325b5..367f75f 100644 --- a/lib/Serialization/ASTReaderStmt.cpp +++ b/lib/Serialization/ASTReaderStmt.cpp @@ -288,7 +288,7 @@ void ASTStmtReader::VisitDeclStmt(DeclStmt *S) { } } -void ASTStmtReader::VisitAsmStmt(AsmStmt *S) { +void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) { VisitStmt(S); unsigned NumOutputs = Record[Idx++]; unsigned NumInputs = Record[Idx++]; @@ -297,7 +297,6 @@ void ASTStmtReader::VisitAsmStmt(AsmStmt *S) { S->setRParenLoc(ReadSourceLocation(Record, Idx)); S->setVolatile(Record[Idx++]); S->setSimple(Record[Idx++]); - S->setMSAsm(Record[Idx++]); S->setAsmString(cast_or_null(Reader.ReadSubStmt())); @@ -566,6 +565,7 @@ void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) { E->setRHS(Reader.ReadSubExpr()); E->setOpcode((BinaryOperator::Opcode)Record[Idx++]); E->setOperatorLoc(ReadSourceLocation(Record, Idx)); + E->setFPContractable((bool)Record[Idx++]); } void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) { @@ -627,7 +627,8 @@ void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) { void ASTStmtReader::VisitInitListExpr(InitListExpr *E) { VisitExpr(E); - E->setSyntacticForm(cast_or_null(Reader.ReadSubStmt())); + if (InitListExpr *SyntForm = cast_or_null(Reader.ReadSubStmt())) + E->setSyntacticForm(SyntForm); E->setLBraceLoc(ReadSourceLocation(Record, Idx)); E->setRBraceLoc(ReadSourceLocation(Record, Idx)); bool isArrayFiller = Record[Idx++]; @@ -1087,6 +1088,7 @@ void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) { VisitCallExpr(E); E->Operator = (OverloadedOperatorKind)Record[Idx++]; E->Range = Reader.ReadSourceRange(F, Record, Idx); + E->setFPContractable((bool)Record[Idx++]); } void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) { @@ -1242,7 +1244,7 @@ void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) { E->setOperatorDelete(ReadDeclAs(Record, Idx)); E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx); E->TypeIdParens = ReadSourceRange(Record, Idx); - E->StartLoc = ReadSourceLocation(Record, Idx); + E->Range = ReadSourceRange(Record, Idx); E->DirectInitRange = ReadSourceRange(Record, Idx); E->AllocateArgsArray(Reader.getContext(), isArray, NumPlacementArgs, @@ -1367,8 +1369,6 @@ void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) { void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) { VisitOverloadExpr(E); E->RequiresADL = Record[Idx++]; - if (E->RequiresADL) - E->StdIsAssociatedNamespace = Record[Idx++]; E->Overloaded = Record[Idx++]; E->NamingClass = ReadDeclAs(Record, Idx); } @@ -1469,6 +1469,16 @@ void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr( E->NameLoc = ReadSourceLocation(Record, Idx); } +void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) { + VisitExpr(E); + E->NumParameters = Record[Idx++]; + E->ParamPack = ReadDeclAs(Record, Idx); + E->NameLoc = ReadSourceLocation(Record, Idx); + ParmVarDecl **Parms = reinterpret_cast(E+1); + for (unsigned i = 0, n = E->NumParameters; i != n; ++i) + Parms[i] = ReadDeclAs(Record, Idx); +} + void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { VisitExpr(E); E->Temporary = Reader.ReadSubExpr(); @@ -1701,8 +1711,12 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { S = new (Context) DeclStmt(Empty); break; - case STMT_ASM: - S = new (Context) AsmStmt(Empty); + case STMT_GCCASM: + S = new (Context) GCCAsmStmt(Empty); + break; + + case STMT_MSASM: + S = new (Context) MSAsmStmt(Empty); break; case EXPR_PREDEFINED: @@ -2180,6 +2194,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK: S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty); break; + + case EXPR_FUNCTION_PARM_PACK: + S = FunctionParmPackExpr::CreateEmpty(Context, + Record[ASTStmtReader::NumExprFields]); + break; case EXPR_MATERIALIZE_TEMPORARY: S = new (Context) MaterializeTemporaryExpr(Empty); diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp index 425d2e3..a2e8b71 100644 --- a/lib/Serialization/ASTWriter.cpp +++ b/lib/Serialization/ASTWriter.cpp @@ -25,9 +25,11 @@ #include "clang/AST/Type.h" #include "clang/AST/TypeLocVisitor.h" #include "clang/Serialization/ASTReader.h" +#include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/MacroInfo.h" #include "clang/Lex/PreprocessingRecord.h" #include "clang/Lex/Preprocessor.h" +#include "clang/Lex/PreprocessorOptions.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/FileSystemStatCache.h" @@ -35,6 +37,7 @@ #include "clang/Basic/SourceManager.h" #include "clang/Basic/SourceManagerInternals.h" #include "clang/Basic/TargetInfo.h" +#include "clang/Basic/TargetOptions.h" #include "clang/Basic/Version.h" #include "clang/Basic/VersionTuple.h" #include "llvm/ADT/APFloat.h" @@ -485,6 +488,8 @@ void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) { } void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) { Writer.AddSourceLocation(TL.getLocalRangeBegin(), Record); + Writer.AddSourceLocation(TL.getLParenLoc(), Record); + Writer.AddSourceLocation(TL.getRParenLoc(), Record); Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record); for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) Writer.AddDeclRef(TL.getArg(i), Record); @@ -667,7 +672,8 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream, RECORD(STMT_BREAK); RECORD(STMT_RETURN); RECORD(STMT_DECL); - RECORD(STMT_ASM); + RECORD(STMT_GCCASM); + RECORD(STMT_MSASM); RECORD(EXPR_PREDEFINED); RECORD(EXPR_DECL_REF); RECORD(EXPR_INTEGER_LITERAL); @@ -763,14 +769,27 @@ void ASTWriter::WriteBlockInfoBlock() { #define BLOCK(X) EmitBlockID(X ## _ID, #X, Stream, Record) #define RECORD(X) EmitRecordID(X, #X, Stream, Record) + // Control Block. + BLOCK(CONTROL_BLOCK); + RECORD(METADATA); + RECORD(IMPORTS); + RECORD(LANGUAGE_OPTIONS); + RECORD(TARGET_OPTIONS); + RECORD(ORIGINAL_FILE); + RECORD(ORIGINAL_PCH_DIR); + RECORD(INPUT_FILE_OFFSETS); + RECORD(DIAGNOSTIC_OPTIONS); + RECORD(FILE_SYSTEM_OPTIONS); + RECORD(HEADER_SEARCH_OPTIONS); + RECORD(PREPROCESSOR_OPTIONS); + + BLOCK(INPUT_FILES_BLOCK); + RECORD(INPUT_FILE); + // AST Top-Level Block. BLOCK(AST_BLOCK); - RECORD(ORIGINAL_FILE_NAME); - RECORD(ORIGINAL_FILE_ID); RECORD(TYPE_OFFSET); RECORD(DECL_OFFSET); - RECORD(LANGUAGE_OPTIONS); - RECORD(METADATA); RECORD(IDENTIFIER_OFFSET); RECORD(IDENTIFIER_TABLE); RECORD(EXTERNAL_DEFINITIONS); @@ -784,11 +803,8 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(PP_COUNTER_VALUE); RECORD(SOURCE_LOCATION_OFFSETS); RECORD(SOURCE_LOCATION_PRELOADS); - RECORD(STAT_CACHE); RECORD(EXT_VECTOR_DECLS); - RECORD(VERSION_CONTROL_BRANCH_REVISION); RECORD(PPD_ENTITIES_OFFSETS); - RECORD(IMPORTS); RECORD(REFERENCED_SELECTOR_POOL); RECORD(TU_UPDATE_LEXICAL); RECORD(LOCAL_REDECLARATIONS_MAP); @@ -803,11 +819,9 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(DIAG_PRAGMA_MAPPINGS); RECORD(CUDA_SPECIAL_DECL_REFS); RECORD(HEADER_SEARCH_TABLE); - RECORD(ORIGINAL_PCH_DIR); RECORD(FP_PRAGMA_OPTIONS); RECORD(OPENCL_EXTENSIONS); RECORD(DELEGATING_CTORS); - RECORD(FILE_SOURCE_LOCATION_OFFSETS); RECORD(KNOWN_NAMESPACES); RECORD(MODULE_OFFSET_MAP); RECORD(SOURCE_MANAGER_LINE_TABLE); @@ -817,6 +831,8 @@ void ASTWriter::WriteBlockInfoBlock() { RECORD(MERGED_DECLARATIONS); RECORD(LOCAL_REDECLARATIONS); RECORD(OBJC_CATEGORIES); + RECORD(MACRO_OFFSET); + RECORD(MACRO_UPDATES); // SourceManager Block. BLOCK(SOURCE_MANAGER_BLOCK); @@ -972,25 +988,25 @@ adjustFilenameForRelocatablePCH(const char *Filename, StringRef isysroot) { return Filename + Pos; } -/// \brief Write the AST metadata (e.g., i686-apple-darwin9). -void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot, - const std::string &OutputFile) { +/// \brief Write the control block. +void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context, + StringRef isysroot, + const std::string &OutputFile) { using namespace llvm; - - // Metadata - const TargetInfo &Target = Context.getTargetInfo(); - BitCodeAbbrev *MetaAbbrev = new BitCodeAbbrev(); - MetaAbbrev->Add(BitCodeAbbrevOp(METADATA)); - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // AST major - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // AST minor - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Has errors - MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple - unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev); - + Stream.EnterSubblock(CONTROL_BLOCK_ID, 5); RecordData Record; + + // Metadata + BitCodeAbbrev *MetadataAbbrev = new BitCodeAbbrev(); + MetadataAbbrev->Add(BitCodeAbbrevOp(METADATA)); + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Major + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Minor + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang maj. + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang min. + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Errors + MetadataAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag + unsigned MetadataAbbrevCode = Stream.EmitAbbrev(MetadataAbbrev); Record.push_back(METADATA); Record.push_back(VERSION_MAJOR); Record.push_back(VERSION_MINOR); @@ -998,9 +1014,10 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot, Record.push_back(CLANG_VERSION_MINOR); Record.push_back(!isysroot.empty()); Record.push_back(ASTHasCompilerErrors); - const std::string &Triple = Target.getTriple().getTriple(); - Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, Triple); + Stream.EmitRecordWithBlob(MetadataAbbrevCode, Record, + getClangFullRepositoryVersion()); + // Imports if (Chain) { serialization::ModuleManager &Mgr = Chain->getModuleManager(); llvm::SmallVector ModulePaths; @@ -1022,11 +1039,131 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot, Stream.EmitRecord(IMPORTS, Record); } + // Language options. + Record.clear(); + const LangOptions &LangOpts = Context.getLangOpts(); +#define LANGOPT(Name, Bits, Default, Description) \ + Record.push_back(LangOpts.Name); +#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \ + Record.push_back(static_cast(LangOpts.get##Name())); +#include "clang/Basic/LangOptions.def" + + Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind()); + AddVersionTuple(LangOpts.ObjCRuntime.getVersion(), Record); + + Record.push_back(LangOpts.CurrentModule.size()); + Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end()); + Stream.EmitRecord(LANGUAGE_OPTIONS, Record); + + // Target options. + Record.clear(); + const TargetInfo &Target = Context.getTargetInfo(); + const TargetOptions &TargetOpts = Target.getTargetOpts(); + AddString(TargetOpts.Triple, Record); + AddString(TargetOpts.CPU, Record); + AddString(TargetOpts.ABI, Record); + AddString(TargetOpts.CXXABI, Record); + AddString(TargetOpts.LinkerVersion, Record); + Record.push_back(TargetOpts.FeaturesAsWritten.size()); + for (unsigned I = 0, N = TargetOpts.FeaturesAsWritten.size(); I != N; ++I) { + AddString(TargetOpts.FeaturesAsWritten[I], Record); + } + Record.push_back(TargetOpts.Features.size()); + for (unsigned I = 0, N = TargetOpts.Features.size(); I != N; ++I) { + AddString(TargetOpts.Features[I], Record); + } + Stream.EmitRecord(TARGET_OPTIONS, Record); + + // Diagnostic options. + Record.clear(); + const DiagnosticOptions &DiagOpts + = Context.getDiagnostics().getDiagnosticOptions(); +#define DIAGOPT(Name, Bits, Default) Record.push_back(DiagOpts.Name); +#define ENUM_DIAGOPT(Name, Type, Bits, Default) \ + Record.push_back(static_cast(DiagOpts.get##Name())); +#include "clang/Basic/DiagnosticOptions.def" + Record.push_back(DiagOpts.Warnings.size()); + for (unsigned I = 0, N = DiagOpts.Warnings.size(); I != N; ++I) + AddString(DiagOpts.Warnings[I], Record); + // Note: we don't serialize the log or serialization file names, because they + // are generally transient files and will almost always be overridden. + Stream.EmitRecord(DIAGNOSTIC_OPTIONS, Record); + + // File system options. + Record.clear(); + const FileSystemOptions &FSOpts + = Context.getSourceManager().getFileManager().getFileSystemOptions(); + AddString(FSOpts.WorkingDir, Record); + Stream.EmitRecord(FILE_SYSTEM_OPTIONS, Record); + + // Header search options. + Record.clear(); + const HeaderSearchOptions &HSOpts + = PP.getHeaderSearchInfo().getHeaderSearchOpts(); + AddString(HSOpts.Sysroot, Record); + + // Include entries. + Record.push_back(HSOpts.UserEntries.size()); + for (unsigned I = 0, N = HSOpts.UserEntries.size(); I != N; ++I) { + const HeaderSearchOptions::Entry &Entry = HSOpts.UserEntries[I]; + AddString(Entry.Path, Record); + Record.push_back(static_cast(Entry.Group)); + Record.push_back(Entry.IsUserSupplied); + Record.push_back(Entry.IsFramework); + Record.push_back(Entry.IgnoreSysRoot); + Record.push_back(Entry.IsInternal); + Record.push_back(Entry.ImplicitExternC); + } + + // System header prefixes. + Record.push_back(HSOpts.SystemHeaderPrefixes.size()); + for (unsigned I = 0, N = HSOpts.SystemHeaderPrefixes.size(); I != N; ++I) { + AddString(HSOpts.SystemHeaderPrefixes[I].Prefix, Record); + Record.push_back(HSOpts.SystemHeaderPrefixes[I].IsSystemHeader); + } + + AddString(HSOpts.ResourceDir, Record); + AddString(HSOpts.ModuleCachePath, Record); + Record.push_back(HSOpts.DisableModuleHash); + Record.push_back(HSOpts.UseBuiltinIncludes); + Record.push_back(HSOpts.UseStandardSystemIncludes); + Record.push_back(HSOpts.UseStandardCXXIncludes); + Record.push_back(HSOpts.UseLibcxx); + Stream.EmitRecord(HEADER_SEARCH_OPTIONS, Record); + + // Preprocessor options. + Record.clear(); + const PreprocessorOptions &PPOpts = PP.getPreprocessorOpts(); + + // Macro definitions. + Record.push_back(PPOpts.Macros.size()); + for (unsigned I = 0, N = PPOpts.Macros.size(); I != N; ++I) { + AddString(PPOpts.Macros[I].first, Record); + Record.push_back(PPOpts.Macros[I].second); + } + + // Includes + Record.push_back(PPOpts.Includes.size()); + for (unsigned I = 0, N = PPOpts.Includes.size(); I != N; ++I) + AddString(PPOpts.Includes[I], Record); + + // Macro includes + Record.push_back(PPOpts.MacroIncludes.size()); + for (unsigned I = 0, N = PPOpts.MacroIncludes.size(); I != N; ++I) + AddString(PPOpts.MacroIncludes[I], Record); + + Record.push_back(PPOpts.UsePredefines); + AddString(PPOpts.ImplicitPCHInclude, Record); + AddString(PPOpts.ImplicitPTHInclude, Record); + Record.push_back(static_cast(PPOpts.ObjCXXARCStandardLibrary)); + Stream.EmitRecord(PREPROCESSOR_OPTIONS, Record); + // Original file name and file ID SourceManager &SM = Context.getSourceManager(); if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) { BitCodeAbbrev *FileAbbrev = new BitCodeAbbrev(); - FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE_NAME)); + FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE)); + FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // File ID FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev); @@ -1037,13 +1174,10 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot, const char *MainFileNameStr = MainFilePath.c_str(); MainFileNameStr = adjustFilenameForRelocatablePCH(MainFileNameStr, isysroot); - RecordData Record; - Record.push_back(ORIGINAL_FILE_NAME); - Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr); - Record.clear(); + Record.push_back(ORIGINAL_FILE); Record.push_back(SM.getMainFileID().getOpaqueValue()); - Stream.EmitRecord(ORIGINAL_FILE_ID, Record); + Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr); } // Original PCH directory @@ -1063,32 +1197,86 @@ void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot, Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir); } - // Repository branch/version information. - BitCodeAbbrev *RepoAbbrev = new BitCodeAbbrev(); - RepoAbbrev->Add(BitCodeAbbrevOp(VERSION_CONTROL_BRANCH_REVISION)); - RepoAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag - unsigned RepoAbbrevCode = Stream.EmitAbbrev(RepoAbbrev); - Record.clear(); - Record.push_back(VERSION_CONTROL_BRANCH_REVISION); - Stream.EmitRecordWithBlob(RepoAbbrevCode, Record, - getClangFullRepositoryVersion()); + WriteInputFiles(Context.SourceMgr, isysroot); + Stream.ExitBlock(); } -/// \brief Write the LangOptions structure. -void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) { +void ASTWriter::WriteInputFiles(SourceManager &SourceMgr, StringRef isysroot) { + using namespace llvm; + Stream.EnterSubblock(INPUT_FILES_BLOCK_ID, 4); RecordData Record; -#define LANGOPT(Name, Bits, Default, Description) \ - Record.push_back(LangOpts.Name); -#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \ - Record.push_back(static_cast(LangOpts.get##Name())); -#include "clang/Basic/LangOptions.def" + + // Create input-file abbreviation. + BitCodeAbbrev *IFAbbrev = new BitCodeAbbrev(); + IFAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE)); + IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID + IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size + IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time + IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Overridden + IFAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name + unsigned IFAbbrevCode = Stream.EmitAbbrev(IFAbbrev); + + // Write out all of the input files. + std::vector InputFileOffsets; + for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size(); I != N; ++I) { + // Get this source location entry. + const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I); + assert(&SourceMgr.getSLocEntry(FileID::get(I)) == SLoc); - Record.push_back((unsigned) LangOpts.ObjCRuntime.getKind()); - AddVersionTuple(LangOpts.ObjCRuntime.getVersion(), Record); + // We only care about file entries that were not overridden. + if (!SLoc->isFile()) + continue; + const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache(); + if (!Cache->OrigEntry) + continue; + + // Record this entry's offset. + InputFileOffsets.push_back(Stream.GetCurrentBitNo()); + InputFileIDs[Cache->OrigEntry] = InputFileOffsets.size(); + + Record.clear(); + Record.push_back(INPUT_FILE); + Record.push_back(InputFileOffsets.size()); + + // Emit size/modification time for this file. + Record.push_back(Cache->OrigEntry->getSize()); + Record.push_back(Cache->OrigEntry->getModificationTime()); + + // Whether this file was overridden. + Record.push_back(Cache->BufferOverridden); + + // Turn the file name into an absolute path, if it isn't already. + const char *Filename = Cache->OrigEntry->getName(); + SmallString<128> FilePath(Filename); + + // Ask the file manager to fixup the relative path for us. This will + // honor the working directory. + SourceMgr.getFileManager().FixupRelativePath(FilePath); + + // FIXME: This call to make_absolute shouldn't be necessary, the + // call to FixupRelativePath should always return an absolute path. + llvm::sys::fs::make_absolute(FilePath); + Filename = FilePath.c_str(); + + Filename = adjustFilenameForRelocatablePCH(Filename, isysroot); + + Stream.EmitRecordWithBlob(IFAbbrevCode, Record, Filename); + } - Record.push_back(LangOpts.CurrentModule.size()); - Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end()); - Stream.EmitRecord(LANGUAGE_OPTIONS, Record); + Stream.ExitBlock(); + + // Create input file offsets abbreviation. + BitCodeAbbrev *OffsetsAbbrev = new BitCodeAbbrev(); + OffsetsAbbrev->Add(BitCodeAbbrevOp(INPUT_FILE_OFFSETS)); + OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # input files + OffsetsAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Array + unsigned OffsetsAbbrevCode = Stream.EmitAbbrev(OffsetsAbbrev); + + // Write input file offsets. + Record.clear(); + Record.push_back(INPUT_FILE_OFFSETS); + Record.push_back(InputFileOffsets.size()); + Stream.EmitRecordWithBlob(OffsetsAbbrevCode, Record, data(InputFileOffsets)); } //===----------------------------------------------------------------------===// @@ -1139,46 +1327,6 @@ public: }; } // end anonymous namespace -/// \brief Write the stat() system call cache to the AST file. -void ASTWriter::WriteStatCache(MemorizeStatCalls &StatCalls) { - // Build the on-disk hash table containing information about every - // stat() call. - OnDiskChainedHashTableGenerator Generator; - unsigned NumStatEntries = 0; - for (MemorizeStatCalls::iterator Stat = StatCalls.begin(), - StatEnd = StatCalls.end(); - Stat != StatEnd; ++Stat, ++NumStatEntries) { - StringRef Filename = Stat->first(); - Generator.insert(Filename.data(), Stat->second); - } - - // Create the on-disk hash table in a buffer. - SmallString<4096> StatCacheData; - uint32_t BucketOffset; - { - llvm::raw_svector_ostream Out(StatCacheData); - // Make sure that no bucket is at offset 0 - clang::io::Emit32(Out, 0); - BucketOffset = Generator.Emit(Out); - } - - // Create a blob abbreviation - using namespace llvm; - BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); - Abbrev->Add(BitCodeAbbrevOp(STAT_CACHE)); - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); - unsigned StatCacheAbbrev = Stream.EmitAbbrev(Abbrev); - - // Write the stat cache - RecordData Record; - Record.push_back(STAT_CACHE); - Record.push_back(BucketOffset); - Record.push_back(NumStatEntries); - Stream.EmitRecordWithBlob(StatCacheAbbrev, Record, StatCacheData.str()); -} - //===----------------------------------------------------------------------===// // Source Manager Serialization //===----------------------------------------------------------------------===// @@ -1194,13 +1342,10 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) { Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives // FileEntry fields. - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // BufferOverridden + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Input File ID Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumCreatedFIDs Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 24)); // FirstDeclIndex Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumDecls - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name return Stream.EmitAbbrev(Abbrev); } @@ -1329,8 +1474,6 @@ namespace { /// \brief Write the header search block for the list of files that /// /// \param HS The header search structure to save. -/// -/// \param Chain Whether we're creating a chained AST file. void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) { SmallVector FilesByUID; HS.getFileMgr().GetUniqueIDMapping(FilesByUID); @@ -1427,15 +1570,14 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, // Write out the source location entry table. We skip the first // entry, which is always the same dummy entry. std::vector SLocEntryOffsets; - // Write out the offsets of only source location file entries. - // We will go through them in ASTReader::validateFileEntries(). - std::vector SLocFileEntryOffsets; RecordData PreloadSLocs; SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1); for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size(); I != N; ++I) { // Get this source location entry. const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I); + FileID FID = FileID::get(I); + assert(&SourceMgr.getSLocEntry(FID) == SLoc); // Record the offset of this source-location entry. SLocEntryOffsets.push_back(Stream.GetCurrentBitNo()); @@ -1446,7 +1588,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache(); if (Cache->OrigEntry) { Code = SM_SLOC_FILE_ENTRY; - SLocFileEntryOffsets.push_back(Stream.GetCurrentBitNo()); } else Code = SM_SLOC_BUFFER_ENTRY; } else @@ -1467,16 +1608,13 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, assert(Content->OrigEntry == Content->ContentsEntry && "Writing to AST an overridden file is not supported"); - // The source location entry is a file. The blob associated - // with this entry is the file name. + // The source location entry is a file. Emit input file ID. + assert(InputFileIDs[Content->OrigEntry] != 0 && "Missed file entry"); + Record.push_back(InputFileIDs[Content->OrigEntry]); - // Emit size/modification time for this file. - Record.push_back(Content->OrigEntry->getSize()); - Record.push_back(Content->OrigEntry->getModificationTime()); - Record.push_back(Content->BufferOverridden); Record.push_back(File.NumCreatedFIDs); - FileDeclIDsTy::iterator FDI = FileDeclIDs.find(SLoc); + FileDeclIDsTy::iterator FDI = FileDeclIDs.find(FID); if (FDI != FileDeclIDs.end()) { Record.push_back(FDI->second->FirstDeclIndex); Record.push_back(FDI->second->DeclIDs.size()); @@ -1485,21 +1623,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, Record.push_back(0); } - // Turn the file name into an absolute path, if it isn't already. - const char *Filename = Content->OrigEntry->getName(); - SmallString<128> FilePath(Filename); - - // Ask the file manager to fixup the relative path for us. This will - // honor the working directory. - SourceMgr.getFileManager().FixupRelativePath(FilePath); - - // FIXME: This call to make_absolute shouldn't be necessary, the - // call to FixupRelativePath should always return an absolute path. - llvm::sys::fs::make_absolute(FilePath); - Filename = FilePath.c_str(); - - Filename = adjustFilenameForRelocatablePCH(Filename, isysroot); - Stream.EmitRecordWithBlob(SLocFileAbbrv, Record, Filename); + Stream.EmitRecordWithAbbrev(SLocFileAbbrv, Record); if (Content->BufferOverridden) { Record.clear(); @@ -1570,18 +1694,6 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr, Record.push_back(SourceMgr.getNextLocalOffset() - 1); // skip dummy Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record, data(SLocEntryOffsets)); - Abbrev = new BitCodeAbbrev(); - Abbrev->Add(BitCodeAbbrevOp(FILE_SOURCE_LOCATION_OFFSETS)); - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs - Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets - unsigned SLocFileOffsetsAbbrev = Stream.EmitAbbrev(Abbrev); - - Record.clear(); - Record.push_back(FILE_SOURCE_LOCATION_OFFSETS); - Record.push_back(SLocFileEntryOffsets.size()); - Stream.EmitRecordWithBlob(SLocFileOffsetsAbbrev, Record, - data(SLocFileEntryOffsets)); - // Write the source location entry preloads array, telling the AST // reader which source locations entries it should load eagerly. Stream.EmitRecord(SOURCE_LOCATION_PRELOADS, PreloadSLocs); @@ -1675,102 +1787,129 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) { SmallVector, 2> MacrosToEmit; llvm::SmallPtrSet MacroDefinitionsSeen; - for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0), + for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0), E = PP.macro_end(Chain == 0); I != E; ++I) { - const IdentifierInfo *Name = I->first; if (!IsModule || I->second->isPublic()) { - MacroDefinitionsSeen.insert(Name); + MacroDefinitionsSeen.insert(I->first); MacrosToEmit.push_back(std::make_pair(I->first, I->second)); } } - + // Sort the set of macro definitions that need to be serialized by the // name of the macro, to provide a stable ordering. - llvm::array_pod_sort(MacrosToEmit.begin(), MacrosToEmit.end(), + llvm::array_pod_sort(MacrosToEmit.begin(), MacrosToEmit.end(), &compareMacroDefinitions); - - // Resolve any identifiers that defined macros at the time they were - // deserialized, adding them to the list of macros to emit (if appropriate). - for (unsigned I = 0, N = DeserializedMacroNames.size(); I != N; ++I) { - IdentifierInfo *Name - = const_cast(DeserializedMacroNames[I]); - if (Name->hasMacroDefinition() && MacroDefinitionsSeen.insert(Name)) - MacrosToEmit.push_back(std::make_pair(Name, PP.getMacroInfo(Name))); - } - + + /// \brief Offsets of each of the macros into the bitstream, indexed by + /// the local macro ID + /// + /// For each identifier that is associated with a macro, this map + /// provides the offset into the bitstream where that macro is + /// defined. + std::vector MacroOffsets; + for (unsigned I = 0, N = MacrosToEmit.size(); I != N; ++I) { const IdentifierInfo *Name = MacrosToEmit[I].first; - MacroInfo *MI = MacrosToEmit[I].second; - if (!MI) - continue; - - // Don't emit builtin macros like __LINE__ to the AST file unless they have - // been redefined by the header (in which case they are not isBuiltinMacro). - // Also skip macros from a AST file if we're chaining. - - // FIXME: There is a (probably minor) optimization we could do here, if - // the macro comes from the original PCH but the identifier comes from a - // chained PCH, by storing the offset into the original PCH rather than - // writing the macro definition a second time. - if (MI->isBuiltinMacro() || - (Chain && - Name->isFromAST() && !Name->hasChangedSinceDeserialization() && - MI->isFromAST() && !MI->hasChangedAfterLoad())) - continue; - AddIdentifierRef(Name, Record); - MacroOffsets[Name] = Stream.GetCurrentBitNo(); - Record.push_back(MI->getDefinitionLoc().getRawEncoding()); - Record.push_back(MI->isUsed()); - Record.push_back(MI->isPublic()); - AddSourceLocation(MI->getVisibilityLocation(), Record); - unsigned Code; - if (MI->isObjectLike()) { - Code = PP_MACRO_OBJECT_LIKE; - } else { - Code = PP_MACRO_FUNCTION_LIKE; + for (MacroInfo *MI = MacrosToEmit[I].second; MI; + MI = MI->getPreviousDefinition()) { + MacroID ID = getMacroRef(MI); + if (!ID) + continue; - Record.push_back(MI->isC99Varargs()); - Record.push_back(MI->isGNUVarargs()); - Record.push_back(MI->getNumArgs()); - for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end(); - I != E; ++I) - AddIdentifierRef(*I, Record); - } + // Skip macros from a AST file if we're chaining. + if (Chain && MI->isFromAST() && !MI->hasChangedAfterLoad()) + continue; - // If we have a detailed preprocessing record, record the macro definition - // ID that corresponds to this macro. - if (PPRec) - Record.push_back(MacroDefinitions[PPRec->findMacroDefinition(MI)]); + if (ID < FirstMacroID) { + // This will have been dealt with via an update record. + assert(MacroUpdates.count(MI) > 0 && "Missing macro update"); + continue; + } - Stream.EmitRecord(Code, Record); - Record.clear(); + // Record the local offset of this macro. + unsigned Index = ID - FirstMacroID; + if (Index == MacroOffsets.size()) + MacroOffsets.push_back(Stream.GetCurrentBitNo()); + else { + if (Index > MacroOffsets.size()) + MacroOffsets.resize(Index + 1); - // Emit the tokens array. - for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) { - // Note that we know that the preprocessor does not have any annotation - // tokens in it because they are created by the parser, and thus can't be - // in a macro definition. - const Token &Tok = MI->getReplacementToken(TokNo); - - Record.push_back(Tok.getLocation().getRawEncoding()); - Record.push_back(Tok.getLength()); - - // FIXME: When reading literal tokens, reconstruct the literal pointer if - // it is needed. - AddIdentifierRef(Tok.getIdentifierInfo(), Record); - // FIXME: Should translate token kind to a stable encoding. - Record.push_back(Tok.getKind()); - // FIXME: Should translate token flags to a stable encoding. - Record.push_back(Tok.getFlags()); - - Stream.EmitRecord(PP_TOKEN, Record); + MacroOffsets[Index] = Stream.GetCurrentBitNo(); + } + + AddIdentifierRef(Name, Record); + addMacroRef(MI, Record); + Record.push_back(inferSubmoduleIDFromLocation(MI->getDefinitionLoc())); + AddSourceLocation(MI->getDefinitionLoc(), Record); + AddSourceLocation(MI->getUndefLoc(), Record); + Record.push_back(MI->isUsed()); + Record.push_back(MI->isPublic()); + AddSourceLocation(MI->getVisibilityLocation(), Record); + unsigned Code; + if (MI->isObjectLike()) { + Code = PP_MACRO_OBJECT_LIKE; + } else { + Code = PP_MACRO_FUNCTION_LIKE; + + Record.push_back(MI->isC99Varargs()); + Record.push_back(MI->isGNUVarargs()); + Record.push_back(MI->getNumArgs()); + for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end(); + I != E; ++I) + AddIdentifierRef(*I, Record); + } + + // If we have a detailed preprocessing record, record the macro definition + // ID that corresponds to this macro. + if (PPRec) + Record.push_back(MacroDefinitions[PPRec->findMacroDefinition(MI)]); + + Stream.EmitRecord(Code, Record); Record.clear(); + + // Emit the tokens array. + for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) { + // Note that we know that the preprocessor does not have any annotation + // tokens in it because they are created by the parser, and thus can't + // be in a macro definition. + const Token &Tok = MI->getReplacementToken(TokNo); + + Record.push_back(Tok.getLocation().getRawEncoding()); + Record.push_back(Tok.getLength()); + + // FIXME: When reading literal tokens, reconstruct the literal pointer + // if it is needed. + AddIdentifierRef(Tok.getIdentifierInfo(), Record); + // FIXME: Should translate token kind to a stable encoding. + Record.push_back(Tok.getKind()); + // FIXME: Should translate token flags to a stable encoding. + Record.push_back(Tok.getFlags()); + + Stream.EmitRecord(PP_TOKEN, Record); + Record.clear(); + } + ++NumMacros; } - ++NumMacros; } Stream.ExitBlock(); + + // Write the offsets table for macro IDs. + using namespace llvm; + BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); + Abbrev->Add(BitCodeAbbrevOp(MACRO_OFFSET)); + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of macros + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); + + unsigned MacroOffsetAbbrev = Stream.EmitAbbrev(Abbrev); + Record.clear(); + Record.push_back(MACRO_OFFSET); + Record.push_back(MacroOffsets.size()); + Record.push_back(FirstMacroID - NUM_PREDEF_MACRO_IDS); + Stream.EmitRecordWithBlob(MacroOffsetAbbrev, Record, + data(MacroOffsets)); } void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) { @@ -1794,6 +1933,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) { Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // filename length Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // in quotes Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // kind + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // imported module Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); InclusionAbbrev = Stream.EmitAbbrev(Abbrev); } @@ -1836,6 +1976,7 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) { Record.push_back(ID->getFileName().size()); Record.push_back(ID->wasInQuotes()); Record.push_back(static_cast(ID->getKind())); + Record.push_back(ID->importedModule()); SmallString<64> Buffer; Buffer += ID->getFileName(); // Check that the FileEntry is not null because it was not resolved and @@ -1935,6 +2076,11 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { unsigned HeaderAbbrev = Stream.EmitAbbrev(Abbrev); Abbrev = new BitCodeAbbrev(); + Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_TOPHEADER)); + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name + unsigned TopHeaderAbbrev = Stream.EmitAbbrev(Abbrev); + + Abbrev = new BitCodeAbbrev(); Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_DIR)); Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name unsigned UmbrellaDirAbbrev = Stream.EmitAbbrev(Abbrev); @@ -1944,6 +2090,11 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Feature unsigned RequiresAbbrev = Stream.EmitAbbrev(Abbrev); + Abbrev = new BitCodeAbbrev(); + Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_EXCLUDED_HEADER)); + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name + unsigned ExcludedHeaderAbbrev = Stream.EmitAbbrev(Abbrev); + // Write the submodule metadata block. RecordData Record; Record.push_back(getNumberOfModules(WritingModule)); @@ -2005,6 +2156,19 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) { Stream.EmitRecordWithBlob(HeaderAbbrev, Record, Mod->Headers[I]->getName()); } + // Emit the excluded headers. + for (unsigned I = 0, N = Mod->ExcludedHeaders.size(); I != N; ++I) { + Record.clear(); + Record.push_back(SUBMODULE_EXCLUDED_HEADER); + Stream.EmitRecordWithBlob(ExcludedHeaderAbbrev, Record, + Mod->ExcludedHeaders[I]->getName()); + } + for (unsigned I = 0, N = Mod->TopHeaders.size(); I != N; ++I) { + Record.clear(); + Record.push_back(SUBMODULE_TOPHEADER); + Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, + Mod->TopHeaders[I]->getName()); + } // Emit the imports. if (!Mod->Imports.empty()) { @@ -2067,24 +2231,35 @@ ASTWriter::inferSubmoduleIDFromLocation(SourceLocation Loc) { } void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag) { + // FIXME: Make it work properly with modules. + llvm::SmallDenseMap + DiagStateIDMap; + unsigned CurrID = 0; + DiagStateIDMap[&Diag.DiagStates.front()] = ++CurrID; // the command-line one. RecordData Record; for (DiagnosticsEngine::DiagStatePointsTy::const_iterator I = Diag.DiagStatePoints.begin(), E = Diag.DiagStatePoints.end(); I != E; ++I) { - const DiagnosticsEngine::DiagStatePoint &point = *I; + const DiagnosticsEngine::DiagStatePoint &point = *I; if (point.Loc.isInvalid()) continue; Record.push_back(point.Loc.getRawEncoding()); - for (DiagnosticsEngine::DiagState::const_iterator - I = point.State->begin(), E = point.State->end(); I != E; ++I) { - if (I->second.isPragma()) { - Record.push_back(I->first); - Record.push_back(I->second.getMapping()); + unsigned &DiagStateID = DiagStateIDMap[point.State]; + Record.push_back(DiagStateID); + + if (DiagStateID == 0) { + DiagStateID = ++CurrID; + for (DiagnosticsEngine::DiagState::const_iterator + I = point.State->begin(), E = point.State->end(); I != E; ++I) { + if (I->second.isPragma()) { + Record.push_back(I->first); + Record.push_back(I->second.getMapping()); + } } + Record.push_back(-1); // mark the end of the diag/map pairs for this + // location. } - Record.push_back(-1); // mark the end of the diag/map pairs for this - // location. } if (!Record.empty()) @@ -2238,9 +2413,11 @@ void ASTWriter::WriteFileDeclIDsMap() { BitCodeAbbrev *Abbrev = new BitCodeAbbrev(); Abbrev->Add(BitCodeAbbrevOp(FILE_SORTED_DECLS)); + Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev); Record.push_back(FILE_SORTED_DECLS); + Record.push_back(FileSortedIDs.size()); Stream.EmitRecordWithBlob(AbbrevCode, Record, data(FileSortedIDs)); } @@ -2495,17 +2672,17 @@ class ASTIdentifierTableTrait { II->getFETokenInfo()) return true; - return hasMacroDefinition(II, Macro); + return hadMacroDefinition(II, Macro); } - - bool hasMacroDefinition(IdentifierInfo *II, MacroInfo *&Macro) { - if (!II->hasMacroDefinition()) + + bool hadMacroDefinition(IdentifierInfo *II, MacroInfo *&Macro) { + if (!II->hadMacroDefinition()) return false; - - if (Macro || (Macro = PP.getMacroInfo(II))) + + if (Macro || (Macro = PP.getMacroInfoHistory(II))) return !Macro->isBuiltinMacro() && (!IsModule || Macro->isPublic()); - - return false; + + return false; } public: @@ -2529,10 +2706,17 @@ public: unsigned DataLen = 4; // 4 bytes for the persistent ID << 1 MacroInfo *Macro = 0; if (isInterestingIdentifier(II, Macro)) { - DataLen += 2; // 2 bytes for builtin ID, flags - if (hasMacroDefinition(II, Macro)) - DataLen += 8; - + DataLen += 2; // 2 bytes for builtin ID + DataLen += 2; // 2 bytes for flags + if (hadMacroDefinition(II, Macro)) { + for (MacroInfo *M = Macro; M; M = M->getPreviousDefinition()) { + if (Writer.getMacroRef(M) != 0) + DataLen += 4; + } + + DataLen += 4; + } + for (IdentifierResolver::iterator D = IdResolver.begin(II), DEnd = IdResolver.end(); D != DEnd; ++D) @@ -2563,23 +2747,28 @@ public: } clang::io::Emit32(Out, (ID << 1) | 0x01); - uint32_t Bits = 0; - bool HasMacroDefinition = hasMacroDefinition(II, Macro); - Bits = (uint32_t)II->getObjCOrBuiltinID(); - assert((Bits & 0x7ff) == Bits && "ObjCOrBuiltinID too big for ASTReader."); - Bits = (Bits << 1) | unsigned(HasMacroDefinition); + uint32_t Bits = (uint32_t)II->getObjCOrBuiltinID(); + assert((Bits & 0xffff) == Bits && "ObjCOrBuiltinID too big for ASTReader."); + clang::io::Emit16(Out, Bits); + Bits = 0; + bool HadMacroDefinition = hadMacroDefinition(II, Macro); + Bits = (Bits << 1) | unsigned(HadMacroDefinition); Bits = (Bits << 1) | unsigned(II->isExtensionToken()); Bits = (Bits << 1) | unsigned(II->isPoisoned()); Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier()); Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword()); clang::io::Emit16(Out, Bits); - if (HasMacroDefinition) { - clang::io::Emit32(Out, Writer.getMacroOffset(II)); - clang::io::Emit32(Out, - Writer.inferSubmoduleIDFromLocation(Macro->getDefinitionLoc())); + if (HadMacroDefinition) { + // Write all of the macro IDs associated with this identifier. + for (MacroInfo *M = Macro; M; M = M->getPreviousDefinition()) { + if (MacroID ID = Writer.getMacroRef(M)) + clang::io::Emit32(Out, ID); + } + + clang::io::Emit32(Out, 0); } - + // Emit the declaration IDs in reverse order, because the // IdentifierResolver provides the declarations as they would be // visible (e.g., the function "stat" would come before the struct @@ -2756,30 +2945,32 @@ public: void EmitKey(raw_ostream& Out, DeclarationName Name, unsigned) { using namespace clang::io; - assert(Name.getNameKind() < 0x100 && "Invalid name kind ?"); Emit8(Out, Name.getNameKind()); switch (Name.getNameKind()) { case DeclarationName::Identifier: Emit32(Out, Writer.getIdentifierRef(Name.getAsIdentifierInfo())); - break; + return; case DeclarationName::ObjCZeroArgSelector: case DeclarationName::ObjCOneArgSelector: case DeclarationName::ObjCMultiArgSelector: Emit32(Out, Writer.getSelectorRef(Name.getObjCSelector())); - break; + return; case DeclarationName::CXXOperatorName: - assert(Name.getCXXOverloadedOperator() < 0x100 && "Invalid operator ?"); + assert(Name.getCXXOverloadedOperator() < NUM_OVERLOADED_OPERATORS && + "Invalid operator?"); Emit8(Out, Name.getCXXOverloadedOperator()); - break; + return; case DeclarationName::CXXLiteralOperatorName: Emit32(Out, Writer.getIdentifierRef(Name.getCXXLiteralIdentifier())); - break; + return; case DeclarationName::CXXConstructorName: case DeclarationName::CXXDestructorName: case DeclarationName::CXXConversionFunctionName: case DeclarationName::CXXUsingDirective: - break; + return; } + + llvm_unreachable("Invalid name kind?"); } void EmitData(raw_ostream& Out, key_type_ref, @@ -3147,7 +3338,8 @@ ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream) ASTHasCompilerErrors(false), FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID), FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID), - FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID), + FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID), + FirstMacroID(NUM_PREDEF_MACRO_IDS), NextMacroID(FirstMacroID), FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS), NextSubmoduleID(FirstSubmoduleID), FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID), @@ -3171,7 +3363,7 @@ ASTWriter::~ASTWriter() { delete I->second; } -void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls, +void ASTWriter::WriteAST(Sema &SemaRef, const std::string &OutputFile, Module *WritingModule, StringRef isysroot, bool hasErrors) { @@ -3190,7 +3382,7 @@ void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls, Context = &SemaRef.Context; PP = &SemaRef.PP; this->WritingModule = WritingModule; - WriteASTCore(SemaRef, StatCalls, isysroot, OutputFile, WritingModule); + WriteASTCore(SemaRef, isysroot, OutputFile, WritingModule); Context = 0; PP = 0; this->WritingModule = 0; @@ -3207,7 +3399,7 @@ static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec, } } -void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls, +void ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot, const std::string &OutputFile, Module *WritingModule) { @@ -3357,14 +3549,13 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls, if (!I->second) AddDeclRef(I->first, KnownNamespaces); } - + + // Write the control block + WriteControlBlock(PP, Context, isysroot, OutputFile); + // Write the remaining AST contents. RecordData Record; Stream.EnterSubblock(AST_BLOCK_ID, 5); - WriteMetadata(Context, isysroot, OutputFile); - WriteLanguageOptions(Context.getLangOpts()); - if (StatCalls && isysroot.empty()) - WriteStatCache(*StatCalls); // Create a lexical update block containing all of the declarations in the // translation unit that do not come from other AST files. @@ -3482,6 +3673,7 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls, Out.write(FileName.data(), FileName.size()); io::Emit32(Out, (*M)->SLocEntryBaseOffset); io::Emit32(Out, (*M)->BaseIdentifierID); + io::Emit32(Out, (*M)->BaseMacroID); io::Emit32(Out, (*M)->BasePreprocessedEntityID); io::Emit32(Out, (*M)->BaseSubmoduleID); io::Emit32(Out, (*M)->BaseSelectorID); @@ -3595,7 +3787,8 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls, Stream.EmitRecord(IMPORTED_MODULES, ImportedModules); } } - + + WriteMacroUpdates(); WriteDeclUpdatesBlocks(); WriteDeclReplacementsBlock(); WriteMergedDecls(); @@ -3612,6 +3805,21 @@ void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls, Stream.ExitBlock(); } +void ASTWriter::WriteMacroUpdates() { + if (MacroUpdates.empty()) + return; + + RecordData Record; + for (MacroUpdatesMap::iterator I = MacroUpdates.begin(), + E = MacroUpdates.end(); + I != E; ++I) { + addMacroRef(I->first, Record); + AddSourceLocation(I->second.UndefLoc, Record); + Record.push_back(inferSubmoduleIDFromLocation(I->second.UndefLoc)); + } + Stream.EmitRecord(MACRO_UPDATES, Record); +} + /// \brief Go through the declaration update blocks and resolve declaration /// pointers into declaration IDs. void ASTWriter::ResolveDeclUpdatesBlocks() { @@ -3707,6 +3915,10 @@ void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Recor Record.push_back(getIdentifierRef(II)); } +void ASTWriter::addMacroRef(MacroInfo *MI, RecordDataImpl &Record) { + Record.push_back(getMacroRef(MI)); +} + IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) { if (II == 0) return 0; @@ -3717,6 +3929,19 @@ IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) { return ID; } +MacroID ASTWriter::getMacroRef(MacroInfo *MI) { + // Don't emit builtin macros like __LINE__ to the AST file unless they + // have been redefined by the header (in which case they are not + // isBuiltinMacro). + if (MI == 0 || MI->isBuiltinMacro()) + return 0; + + MacroID &ID = MacroIDs[MI]; + if (ID == 0) + ID = NextMacroID++; + return ID; +} + void ASTWriter::AddSelectorRef(const Selector SelRef, RecordDataImpl &Record) { Record.push_back(getSelectorRef(SelRef)); } @@ -3774,7 +3999,9 @@ void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind, case TemplateArgument::Null: case TemplateArgument::Integral: case TemplateArgument::Declaration: + case TemplateArgument::NullPtr: case TemplateArgument::Pack: + // FIXME: Is this right? break; } } @@ -3931,10 +4158,9 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) { llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc); if (FID.isInvalid()) return; - const SrcMgr::SLocEntry *Entry = &SM.getSLocEntry(FID); - assert(Entry->isFile()); + assert(SM.getSLocEntry(FID).isFile()); - DeclIDInFileInfo *&Info = FileDeclIDs[Entry]; + DeclIDInFileInfo *&Info = FileDeclIDs[FID]; if (!Info) Info = new DeclIDInFileInfo(); @@ -4191,6 +4417,10 @@ void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg, break; case TemplateArgument::Declaration: AddDeclRef(Arg.getAsDecl(), Record); + Record.push_back(Arg.isDeclForReferenceParam()); + break; + case TemplateArgument::NullPtr: + AddTypeRef(Arg.getNullPtrType(), Record); break; case TemplateArgument::Integral: AddAPSInt(Arg.getAsIntegral(), Record); @@ -4403,6 +4633,7 @@ void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Rec Record.push_back(Lambda.NumExplicitCaptures); Record.push_back(Lambda.ManglingNumber); AddDeclRef(Lambda.ContextDecl, Record); + AddTypeSourceInfo(Lambda.MethodTyInfo, Record); for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) { LambdaExpr::Capture &Capture = Lambda.Captures[I]; AddSourceLocation(Capture.getLocation(), Record); @@ -4423,6 +4654,7 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) { assert(FirstDeclID == NextDeclID && FirstTypeID == NextTypeID && FirstIdentID == NextIdentID && + FirstMacroID == NextMacroID && FirstSubmoduleID == NextSubmoduleID && FirstSelectorID == NextSelectorID && "Setting chain after writing has started."); @@ -4432,19 +4664,23 @@ void ASTWriter::ReaderInitialized(ASTReader *Reader) { FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls(); FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes(); FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers(); + FirstMacroID = NUM_PREDEF_MACRO_IDS + Chain->getTotalNumMacros(); FirstSubmoduleID = NUM_PREDEF_SUBMODULE_IDS + Chain->getTotalNumSubmodules(); FirstSelectorID = NUM_PREDEF_SELECTOR_IDS + Chain->getTotalNumSelectors(); NextDeclID = FirstDeclID; NextTypeID = FirstTypeID; NextIdentID = FirstIdentID; + NextMacroID = FirstMacroID; NextSelectorID = FirstSelectorID; NextSubmoduleID = FirstSubmoduleID; } void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) { IdentifierIDs[II] = ID; - if (II->hasMacroDefinition()) - DeserializedMacroNames.push_back(II); +} + +void ASTWriter::MacroRead(serialization::MacroID ID, MacroInfo *MI) { + MacroIDs[MI] = ID; } void ASTWriter::TypeRead(TypeIdx Idx, QualType T) { @@ -4468,15 +4704,15 @@ void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID, MacroDefinitions[MD] = ID; } -void ASTWriter::MacroVisible(IdentifierInfo *II) { - DeserializedMacroNames.push_back(II); -} - void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) { assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end()); SubmoduleIDs[Mod] = ID; } +void ASTWriter::UndefinedMacro(MacroInfo *MI) { + MacroUpdates[MI].UndefLoc = MI->getUndefLoc(); +} + void ASTWriter::CompletedTagDefinition(const TagDecl *D) { assert(D->isCompleteDefinition()); assert(!WritingAST && "Already writing the AST!"); @@ -4490,6 +4726,7 @@ void ASTWriter::CompletedTagDefinition(const TagDecl *D) { } } } + void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) { assert(!WritingAST && "Already writing the AST!"); diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp index 602943b..7486565 100644 --- a/lib/Serialization/ASTWriterDecl.cpp +++ b/lib/Serialization/ASTWriterDecl.cpp @@ -416,7 +416,7 @@ void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) { } Record.push_back(D->isInstanceMethod()); Record.push_back(D->isVariadic()); - Record.push_back(D->isSynthesized()); + Record.push_back(D->isPropertyAccessor()); Record.push_back(D->isDefined()); Record.push_back(D->IsOverriding); @@ -606,6 +606,8 @@ void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) { Writer.AddDeclRef(D->getSuperClass(), Record); Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record); Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record); + Record.push_back(D->hasNonZeroConstructors()); + Record.push_back(D->hasDestructors()); Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers, Record); Code = serialization::DECL_OBJC_IMPLEMENTATION; @@ -675,6 +677,8 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) { Record.push_back(D->isNRVOVariable()); Record.push_back(D->isCXXForRangeDecl()); Record.push_back(D->isARCPseudoStrong()); + Record.push_back(D->isConstexpr()); + if (D->getInit()) { Record.push_back(!D->isInitKnownICE() ? 1 : (D->isInitICE() ? 3 : 2)); Writer.AddStmt(D->getInit()); @@ -705,6 +709,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) { D->getInitStyle() == VarDecl::CInit && D->getInit() == 0 && !isa(D) && + !D->isConstexpr() && !SpecInfo) AbbrevToUse = Writer.getDeclVarAbbrev(); @@ -945,11 +950,16 @@ void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) { void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) { VisitFunctionDecl(D); - Record.push_back(D->size_overridden_methods()); - for (CXXMethodDecl::method_iterator - I = D->begin_overridden_methods(), E = D->end_overridden_methods(); - I != E; ++I) - Writer.AddDeclRef(*I, Record); + if (D->isCanonicalDecl()) { + Record.push_back(D->size_overridden_methods()); + for (CXXMethodDecl::method_iterator + I = D->begin_overridden_methods(), E = D->end_overridden_methods(); + I != E; ++I) + Writer.AddDeclRef(*I, Record); + } else { + // We only need to record overridden methods once for the canonical decl. + Record.push_back(0); + } Code = serialization::DECL_CXX_METHOD; } @@ -981,6 +991,7 @@ void ASTDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) { void ASTDeclWriter::VisitImportDecl(ImportDecl *D) { VisitDecl(D); + Record.push_back(Writer.getSubmoduleID(D->getImportedModule())); ArrayRef IdentifierLocs = D->getIdentifierLocs(); Record.push_back(!IdentifierLocs.empty()); if (IdentifierLocs.empty()) { @@ -1073,7 +1084,7 @@ void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) { Writer.AddDeclRef(&*I, Record); } - // InjectedClassNameType is computed, no need to write it. + Writer.AddTypeRef(D->getCommonPtr()->InjectedClassNameType, Record); } Code = serialization::DECL_CLASS_TEMPLATE; } @@ -1103,6 +1114,7 @@ void ASTDeclWriter::VisitClassTemplateSpecializationDecl( Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record); Writer.AddSourceLocation(D->getPointOfInstantiation(), Record); Record.push_back(D->getSpecializationKind()); + Record.push_back(D->isCanonicalDecl()); if (D->isCanonicalDecl()) { // When reading, we'll add it to the folding set of the following template. @@ -1172,7 +1184,8 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) { void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { // For an expanded parameter pack, record the number of expansion types here - // so that it's easier for + // so that it's easier for deserialization to allocate the right amount of + // memory. if (D->isExpandedParameterPack()) Record.push_back(D->getNumExpansionTypes()); @@ -1201,15 +1214,30 @@ void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) { } void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) { + // For an expanded parameter pack, record the number of expansion types here + // so that it's easier for deserialization to allocate the right amount of + // memory. + if (D->isExpandedParameterPack()) + Record.push_back(D->getNumExpansionTemplateParameters()); + VisitTemplateDecl(D); // TemplateParmPosition. Record.push_back(D->getDepth()); Record.push_back(D->getPosition()); - // Rest of TemplateTemplateParmDecl. - Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record); - Record.push_back(D->defaultArgumentWasInherited()); - Record.push_back(D->isParameterPack()); - Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM; + + if (D->isExpandedParameterPack()) { + for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); + I != N; ++I) + Writer.AddTemplateParameterList(D->getExpansionTemplateParameters(I), + Record); + Code = serialization::DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK; + } else { + // Rest of TemplateTemplateParmDecl. + Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record); + Record.push_back(D->defaultArgumentWasInherited()); + Record.push_back(D->isParameterPack()); + Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM; + } } void ASTDeclWriter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) { @@ -1462,6 +1490,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() { Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable Abv->Add(BitCodeAbbrevOp(0)); // isCXXForRangeDecl Abv->Add(BitCodeAbbrevOp(0)); // isARCPseudoStrong + Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr Abv->Add(BitCodeAbbrevOp(0)); // HasInit Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo // ParmVarDecl @@ -1540,6 +1569,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() { Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong + Abv->Add(BitCodeAbbrevOp(0)); // isConstexpr Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasInit Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasMemberSpecInfo // Type Source Info @@ -1603,7 +1633,7 @@ void ASTWriter::WriteDeclsBlockAbbrevs() { //Character Literal Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getValue Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location - Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsWide + Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // getKind CharacterLiteralAbbrev = Stream.EmitAbbrev(Abv); Abv = new BitCodeAbbrev(); diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp index f63388f..7e8ce42 100644 --- a/lib/Serialization/ASTWriterStmt.cpp +++ b/lib/Serialization/ASTWriterStmt.cpp @@ -218,7 +218,7 @@ void ASTStmtWriter::VisitDeclStmt(DeclStmt *S) { Code = serialization::STMT_DECL; } -void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) { +void ASTStmtWriter::VisitGCCAsmStmt(GCCAsmStmt *S) { VisitStmt(S); Record.push_back(S->getNumOutputs()); Record.push_back(S->getNumInputs()); @@ -227,7 +227,6 @@ void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) { Writer.AddSourceLocation(S->getRParenLoc(), Record); Record.push_back(S->isVolatile()); Record.push_back(S->isSimple()); - Record.push_back(S->isMSAsm()); Writer.AddStmt(S->getAsmString()); // Outputs @@ -246,14 +245,16 @@ void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) { // Clobbers for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I) - Writer.AddStmt(S->getClobber(I)); + Writer.AddStmt(S->getClobberStringLiteral(I)); - Code = serialization::STMT_ASM; + Code = serialization::STMT_GCCASM; } void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) { // FIXME: Statement writer not yet implemented for MS style inline asm. VisitStmt(S); + + Code = serialization::STMT_MSASM; } void ASTStmtWriter::VisitExpr(Expr *E) { @@ -535,6 +536,7 @@ void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) { Writer.AddStmt(E->getRHS()); Record.push_back(E->getOpcode()); // FIXME: stable encoding Writer.AddSourceLocation(E->getOperatorLoc(), Record); + Record.push_back(E->isFPContractable()); Code = serialization::EXPR_BINARY_OPERATOR; } @@ -604,6 +606,8 @@ void ASTStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) { void ASTStmtWriter::VisitInitListExpr(InitListExpr *E) { VisitExpr(E); + // NOTE: only add the (possibly null) syntactic form. + // No need to serialize the isSemanticForm flag and the semantic form. Writer.AddStmt(E->getSyntacticForm()); Writer.AddSourceLocation(E->getLBraceLoc(), Record); Writer.AddSourceLocation(E->getRBraceLoc(), Record); @@ -1054,6 +1058,7 @@ void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) { VisitCallExpr(E); Record.push_back(E->getOperator()); Writer.AddSourceRange(E->Range, Record); + Record.push_back(E->isFPContractable()); Code = serialization::EXPR_CXX_OPERATOR_CALL; } @@ -1233,7 +1238,7 @@ void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) { Writer.AddDeclRef(E->getOperatorDelete(), Record); Writer.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo(), Record); Writer.AddSourceRange(E->getTypeIdParens(), Record); - Writer.AddSourceLocation(E->getStartLoc(), Record); + Writer.AddSourceRange(E->getSourceRange(), Record); Writer.AddSourceRange(E->getDirectInitRange(), Record); for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end(); I != e; ++I) @@ -1382,8 +1387,6 @@ void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) { void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) { VisitOverloadExpr(E); Record.push_back(E->requiresADL()); - if (E->requiresADL()) - Record.push_back(E->isStdAssociatedNamespace()); Record.push_back(E->isOverloaded()); Writer.AddDeclRef(E->getNamingClass(), Record); Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP; @@ -1480,6 +1483,17 @@ void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr( Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK; } +void ASTStmtWriter::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) { + VisitExpr(E); + Record.push_back(E->getNumExpansions()); + Writer.AddDeclRef(E->getParameterPack(), Record); + Writer.AddSourceLocation(E->getParameterPackLocation(), Record); + for (FunctionParmPackExpr::iterator I = E->begin(), End = E->end(); + I != End; ++I) + Writer.AddDeclRef(*I, Record); + Code = serialization::EXPR_FUNCTION_PARM_PACK; +} + void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { VisitExpr(E); Writer.AddStmt(E->Temporary); diff --git a/lib/Serialization/GeneratePCH.cpp b/lib/Serialization/GeneratePCH.cpp index 02aed10..870d654 100644 --- a/lib/Serialization/GeneratePCH.cpp +++ b/lib/Serialization/GeneratePCH.cpp @@ -18,7 +18,6 @@ #include "clang/AST/ASTConsumer.h" #include "clang/Lex/Preprocessor.h" #include "clang/Basic/FileManager.h" -#include "clang/Basic/FileSystemStatCache.h" #include "llvm/Bitcode/BitstreamWriter.h" #include "llvm/Support/raw_ostream.h" #include @@ -32,11 +31,7 @@ PCHGenerator::PCHGenerator(const Preprocessor &PP, raw_ostream *OS) : PP(PP), OutputFile(OutputFile), Module(Module), isysroot(isysroot.str()), Out(OS), - SemaPtr(0), StatCalls(0), Stream(Buffer), Writer(Stream) { - // Install a stat() listener to keep track of all of the stat() - // calls. - StatCalls = new MemorizeStatCalls(); - PP.getFileManager().addStatCache(StatCalls, /*AtBeginning=*/false); + SemaPtr(0), Stream(Buffer), Writer(Stream) { } PCHGenerator::~PCHGenerator() { @@ -48,7 +43,7 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) { // Emit the PCH file assert(SemaPtr && "No Sema?"); - Writer.WriteAST(*SemaPtr, StatCalls, OutputFile, Module, isysroot); + Writer.WriteAST(*SemaPtr, OutputFile, Module, isysroot); // Write the generated bitstream to "Out". Out->write((char *)&Buffer.front(), Buffer.size()); @@ -60,6 +55,10 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) { Buffer.clear(); } +PPMutationListener *PCHGenerator::GetPPMutationListener() { + return &Writer; +} + ASTMutationListener *PCHGenerator::GetASTMutationListener() { return &Writer; } diff --git a/lib/Serialization/Module.cpp b/lib/Serialization/Module.cpp index ff241d3..5e42ab4 100644 --- a/lib/Serialization/Module.cpp +++ b/lib/Serialization/Module.cpp @@ -21,12 +21,15 @@ using namespace serialization; using namespace reader; ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation) - : Kind(Kind), DirectlyImported(false), Generation(Generation), SizeInBits(0), + : Kind(Kind), File(0), DirectlyImported(false), + Generation(Generation), SizeInBits(0), LocalNumSLocEntries(0), SLocEntryBaseID(0), SLocEntryBaseOffset(0), SLocEntryOffsets(0), - SLocFileOffsets(0), LocalNumIdentifiers(0), + LocalNumIdentifiers(0), IdentifierOffsets(0), BaseIdentifierID(0), IdentifierTableData(0), - IdentifierLookupTable(0), BasePreprocessedEntityID(0), + IdentifierLookupTable(0), + LocalNumMacros(0), MacroOffsets(0), + BasePreprocessedEntityID(0), PreprocessedEntityOffsets(0), NumPreprocessedEntities(0), LocalNumHeaderFileInfos(0), HeaderFileInfoTableData(0), HeaderFileInfoTable(0), @@ -35,9 +38,10 @@ ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation) SelectorLookupTableData(0), SelectorLookupTable(0), LocalNumDecls(0), DeclOffsets(0), BaseDeclID(0), LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(0), - FileSortedDecls(0), RedeclarationsMap(0), LocalNumRedeclarationsInMap(0), + FileSortedDecls(0), NumFileSortedDecls(0), + RedeclarationsMap(0), LocalNumRedeclarationsInMap(0), ObjCCategoriesMap(0), LocalNumObjCCategoriesInMap(0), - LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0), StatCache(0) + LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0) {} ModuleFile::~ModuleFile() { @@ -89,6 +93,10 @@ void ModuleFile::dump() { << " Number of identifiers: " << LocalNumIdentifiers << '\n'; dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap); + llvm::errs() << " Base macro ID: " << BaseMacroID << '\n' + << " Number of macros: " << LocalNumMacros << '\n'; + dumpLocalRemap("Macro ID local -> global map", MacroRemap); + llvm::errs() << " Base submodule ID: " << BaseSubmoduleID << '\n' << " Number of submodules: " << LocalNumSubmodules << '\n'; dumpLocalRemap("Submodule ID local -> global map", SubmoduleRemap); diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp index ab364b7..efe4421 100644 --- a/lib/Serialization/ModuleManager.cpp +++ b/lib/Serialization/ModuleManager.cpp @@ -50,6 +50,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type, // Allocate a new module. ModuleFile *New = new ModuleFile(Type, Generation); New->FileName = FileName.str(); + New->File = Entry; Chain.push_back(New); NewModule = true; ModuleEntry = New; @@ -87,6 +88,45 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type, return std::make_pair(ModuleEntry, NewModule); } +namespace { + /// \brief Predicate that checks whether a module file occurs within + /// the given set. + class IsInModuleFileSet : public std::unary_function { + llvm::SmallPtrSet &Removed; + + public: + IsInModuleFileSet(llvm::SmallPtrSet &Removed) + : Removed(Removed) { } + + bool operator()(ModuleFile *MF) const { + return Removed.count(MF); + } + }; +} + +void ModuleManager::removeModules(ModuleIterator first, ModuleIterator last) { + if (first == last) + return; + + // Collect the set of module file pointers that we'll be removing. + llvm::SmallPtrSet victimSet(first, last); + + // Remove any references to the now-destroyed modules. + IsInModuleFileSet checkInSet(victimSet); + for (unsigned i = 0, n = Chain.size(); i != n; ++i) { + Chain[i]->ImportedBy.remove_if(checkInSet); + } + + // Delete the modules and erase them from the various structures. + for (ModuleIterator victim = first; victim != last; ++victim) { + Modules.erase((*victim)->File); + delete *victim; + } + + // Remove the modules from the chain. + Chain.erase(first, last); +} + void ModuleManager::addInMemoryBuffer(StringRef FileName, llvm::MemoryBuffer *Buffer) { @@ -95,7 +135,7 @@ void ModuleManager::addInMemoryBuffer(StringRef FileName, InMemoryBuffers[Entry] = Buffer; } -ModuleManager::ModuleManager(const FileSystemOptions &FSO) : FileMgr(FSO) { } +ModuleManager::ModuleManager(FileManager &FileMgr) : FileMgr(FileMgr) { } ModuleManager::~ModuleManager() { for (unsigned i = 0, e = Chain.size(); i != e; ++i) diff --git a/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp b/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp deleted file mode 100644 index 84ea8c7..0000000 --- a/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp +++ /dev/null @@ -1,92 +0,0 @@ -//== AdjustedReturnValueChecker.cpp -----------------------------*- C++ -*--==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines AdjustedReturnValueChecker, a simple check to see if the -// return value of a function call is different than the one the caller thinks -// it is. -// -//===----------------------------------------------------------------------===// - -#include "ClangSACheckers.h" -#include "clang/StaticAnalyzer/Core/Checker.h" -#include "clang/StaticAnalyzer/Core/CheckerManager.h" -#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" -#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" - -using namespace clang; -using namespace ento; - -namespace { -class AdjustedReturnValueChecker : - public Checker< check::PostStmt > { -public: - void checkPostStmt(const CallExpr *CE, CheckerContext &C) const; -}; -} - -void AdjustedReturnValueChecker::checkPostStmt(const CallExpr *CE, - CheckerContext &C) const { - - // Get the result type of the call. - QualType expectedResultTy = CE->getType(); - - // Fetch the signature of the called function. - ProgramStateRef state = C.getState(); - const LocationContext *LCtx = C.getLocationContext(); - - SVal V = state->getSVal(CE, LCtx); - - if (V.isUnknown()) - return; - - // Casting to void? Discard the value. - if (expectedResultTy->isVoidType()) { - C.addTransition(state->BindExpr(CE, LCtx, UnknownVal())); - return; - } - - const MemRegion *callee = state->getSVal(CE->getCallee(), LCtx).getAsRegion(); - if (!callee) - return; - - QualType actualResultTy; - - if (const FunctionTextRegion *FT = dyn_cast(callee)) { - const FunctionDecl *FD = FT->getDecl(); - actualResultTy = FD->getResultType(); - } - else if (const BlockDataRegion *BD = dyn_cast(callee)) { - const BlockTextRegion *BR = BD->getCodeRegion(); - const BlockPointerType *BT=BR->getLocationType()->getAs(); - const FunctionType *FT = BT->getPointeeType()->getAs(); - actualResultTy = FT->getResultType(); - } - - // Can this happen? - if (actualResultTy.isNull()) - return; - - // For now, ignore references. - if (actualResultTy->getAs()) - return; - - - // Are they the same? - if (expectedResultTy != actualResultTy) { - // FIXME: Do more checking and actual emit an error. At least performing - // the cast avoids some assertion failures elsewhere. - SValBuilder &svalBuilder = C.getSValBuilder(); - V = svalBuilder.evalCast(V, expectedResultTy, actualResultTy); - C.addTransition(state->BindExpr(CE, LCtx, V)); - } -} - -void ento::registerAdjustedReturnValueChecker(CheckerManager &mgr) { - mgr.registerChecker(); -} diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp index b2ad184..535d8ee 100644 --- a/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp @@ -78,7 +78,7 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS, new BugReport(*BT, BT->getDescription(), N); report->addRange(LoadS->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); return; } diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp index c6efe94..457c870 100644 --- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp +++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp @@ -208,7 +208,7 @@ void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext, break; } - checkerContext.EmitReport(new BugReport(*BT, os.str(), errorNode)); + checkerContext.emitReport(new BugReport(*BT, os.str(), errorNode)); } void RegionRawOffsetV2::dump() const { diff --git a/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp b/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp index c582cfc..81e8dd8 100644 --- a/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp @@ -105,9 +105,9 @@ void AttrNonNullChecker::checkPreCall(const CallEvent &Call, // Highlight the range of the argument that was null. R->addRange(Call.getArgSourceRange(idx)); if (const Expr *ArgE = Call.getArgExpr(idx)) - bugreporter::addTrackNullOrUndefValueVisitor(errorNode, ArgE, R); + bugreporter::trackNullOrUndefValue(errorNode, ArgE, *R); // Emit the bug report. - C.EmitReport(R); + C.emitReport(R); } // Always return. Either we cached out or we just emitted an error. diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp index 955e79a..eba534e 100644 --- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp +++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp @@ -117,7 +117,7 @@ void NilArgChecker::WarnNilArg(CheckerContext &C, BugReport *R = new BugReport(*BT, os.str(), N); R->addRange(msg.getArgSourceRange(Arg)); - C.EmitReport(R); + C.emitReport(R); } } @@ -358,20 +358,20 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE, BugReport *report = new BugReport(*BT, os.str(), N); report->addRange(CE->getArg(2)->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } } //===----------------------------------------------------------------------===// -// CFRetain/CFRelease checking for null arguments. +// CFRetain/CFRelease/CFMakeCollectable checking for null arguments. //===----------------------------------------------------------------------===// namespace { class CFRetainReleaseChecker : public Checker< check::PreStmt > { mutable OwningPtr BT; - mutable IdentifierInfo *Retain, *Release; + mutable IdentifierInfo *Retain, *Release, *MakeCollectable; public: - CFRetainReleaseChecker(): Retain(0), Release(0) {} + CFRetainReleaseChecker(): Retain(0), Release(0), MakeCollectable(0) {} void checkPreStmt(const CallExpr *CE, CheckerContext &C) const; }; } // end anonymous namespace @@ -392,12 +392,14 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE, ASTContext &Ctx = C.getASTContext(); Retain = &Ctx.Idents.get("CFRetain"); Release = &Ctx.Idents.get("CFRelease"); - BT.reset(new APIMisuse("null passed to CFRetain/CFRelease")); + MakeCollectable = &Ctx.Idents.get("CFMakeCollectable"); + BT.reset( + new APIMisuse("null passed to CFRetain/CFRelease/CFMakeCollectable")); } - // Check if we called CFRetain/CFRelease. + // Check if we called CFRetain/CFRelease/CFMakeCollectable. const IdentifierInfo *FuncII = FD->getIdentifier(); - if (!(FuncII == Retain || FuncII == Release)) + if (!(FuncII == Retain || FuncII == Release || FuncII == MakeCollectable)) return; // FIXME: The rest of this just checks that the argument is non-null. @@ -426,14 +428,20 @@ void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE, if (!N) return; - const char *description = (FuncII == Retain) - ? "Null pointer argument in call to CFRetain" - : "Null pointer argument in call to CFRelease"; + const char *description; + if (FuncII == Retain) + description = "Null pointer argument in call to CFRetain"; + else if (FuncII == Release) + description = "Null pointer argument in call to CFRelease"; + else if (FuncII == MakeCollectable) + description = "Null pointer argument in call to CFMakeCollectable"; + else + llvm_unreachable("impossible case"); BugReport *report = new BugReport(*BT, description, N); report->addRange(Arg->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, Arg, report); - C.EmitReport(report); + bugreporter::trackNullOrUndefValue(N, Arg, *report); + C.emitReport(report); return; } @@ -491,7 +499,7 @@ void ClassReleaseChecker::checkPreObjCMessage(const ObjCMethodCall &msg, BugReport *report = new BugReport(*BT, os.str(), N); report->addRange(msg.getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } } @@ -644,7 +652,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg, BugReport *R = new BugReport(*BT, os.str(), errorNode.getValue()); R->addRange(msg.getArgSourceRange(I)); - C.EmitReport(R); + C.emitReport(R); } } @@ -716,6 +724,73 @@ void ObjCLoopChecker::checkPostStmt(const ObjCForCollectionStmt *FCS, C.addTransition(State); } +namespace { +/// \class ObjCNonNilReturnValueChecker +/// \brief The checker restricts the return values of APIs known to +/// never (or almost never) return 'nil'. +class ObjCNonNilReturnValueChecker + : public Checker { + mutable bool Initialized; + mutable Selector ObjectAtIndex; + mutable Selector ObjectAtIndexedSubscript; + +public: + ObjCNonNilReturnValueChecker() : Initialized(false) {} + void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const; +}; +} + +static ProgramStateRef assumeExprIsNonNull(const Expr *NonNullExpr, + ProgramStateRef State, + CheckerContext &C) { + SVal Val = State->getSVal(NonNullExpr, C.getLocationContext()); + if (DefinedOrUnknownSVal *DV = dyn_cast(&Val)) + return State->assume(*DV, true); + return State; +} + +void ObjCNonNilReturnValueChecker::checkPostObjCMessage(const ObjCMethodCall &M, + CheckerContext &C) + const { + ProgramStateRef State = C.getState(); + + if (!Initialized) { + ASTContext &Ctx = C.getASTContext(); + ObjectAtIndex = GetUnarySelector("objectAtIndex", Ctx); + ObjectAtIndexedSubscript = GetUnarySelector("objectAtIndexedSubscript", Ctx); + } + + // Check the receiver type. + if (const ObjCInterfaceDecl *Interface = M.getReceiverInterface()) { + + // Assume that object returned from '[self init]' or '[super init]' is not + // 'nil' if we are processing an inlined function/method. + // + // A defensive callee will (and should) check if the object returned by + // '[super init]' is 'nil' before doing it's own initialization. However, + // since 'nil' is rarely returned in practice, we should not warn when the + // caller to the defensive constructor uses the object in contexts where + // 'nil' is not accepted. + if (!C.inTopFrame() && M.getDecl() && + M.getDecl()->getMethodFamily() == OMF_init && + M.isReceiverSelfOrSuper()) { + State = assumeExprIsNonNull(M.getOriginExpr(), State, C); + } + + // Objects returned from + // [NSArray|NSOrderedSet]::[ObjectAtIndex|ObjectAtIndexedSubscript] + // are never 'nil'. + FoundationClass Cl = findKnownClass(Interface); + if (Cl == FC_NSArray || Cl == FC_NSOrderedSet) { + Selector Sel = M.getSelector(); + if (Sel == ObjectAtIndex || Sel == ObjectAtIndexedSubscript) { + // Go ahead and assume the value is non-nil. + State = assumeExprIsNonNull(M.getOriginExpr(), State, C); + } + } + } + C.addTransition(State); +} //===----------------------------------------------------------------------===// // Check registration. @@ -744,3 +819,7 @@ void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) { void ento::registerObjCLoopChecker(CheckerManager &mgr) { mgr.registerChecker(); } + +void ento::registerObjCNonNilReturnValueChecker(CheckerManager &mgr) { + mgr.registerChecker(); +} diff --git a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp index a4fc396..92edefe 100644 --- a/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp @@ -35,7 +35,7 @@ void BoolAssignmentChecker::emitReport(ProgramStateRef state, if (ExplodedNode *N = C.addTransition(state)) { if (!BT) BT.reset(new BuiltinBug("Assignment of a non-Boolean value")); - C.EmitReport(new BugReport(*BT, BT->getDescription(), N)); + C.emitReport(new BugReport(*BT, BT->getDescription(), N)); } } diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp index 509bc79..6ef022b 100644 --- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp @@ -55,7 +55,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE, // FIXME: Refactor into StoreManager itself? MemRegionManager& RM = C.getStoreManager().getRegionManager(); const AllocaRegion* R = - RM.getAllocaRegion(CE, C.getCurrentBlockCount(), C.getLocationContext()); + RM.getAllocaRegion(CE, C.blockCount(), C.getLocationContext()); // Set the extent of the region in bytes. This enables us to use the // SVal of the argument directly. If we save the extent in bits, we diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt index 7fe51d3..8e455de 100644 --- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt +++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt @@ -4,7 +4,6 @@ clang_tablegen(Checkers.inc -gen-clang-sa-checkers TARGET ClangSACheckers) add_clang_library(clangStaticAnalyzerCheckers - AdjustedReturnValueChecker.cpp AnalyzerStatsChecker.cpp ArrayBoundChecker.cpp ArrayBoundCheckerV2.cpp @@ -28,12 +27,15 @@ add_clang_library(clangStaticAnalyzerCheckers DeadStoresChecker.cpp DebugCheckers.cpp DereferenceChecker.cpp + DirectIvarAssignment.cpp DivZeroChecker.cpp DynamicTypePropagation.cpp ExprInspectionChecker.cpp + SimpleStreamChecker.cpp FixedAddressChecker.cpp GenericTaintChecker.cpp IdempotentOperationChecker.cpp + IvarInvalidationChecker.cpp LLVMConventionsChecker.cpp MacOSKeychainAPIChecker.cpp MacOSXAPIChecker.cpp @@ -43,10 +45,10 @@ add_clang_library(clangStaticAnalyzerCheckers NSAutoreleasePoolChecker.cpp NSErrorChecker.cpp NoReturnFunctionChecker.cpp - OSAtomicChecker.cpp ObjCAtSyncChecker.cpp ObjCContainersASTChecker.cpp ObjCContainersChecker.cpp + ObjCMissingSuperCallChecker.cpp ObjCSelfInitChecker.cpp ObjCUnusedIVarsChecker.cpp PointerArithChecker.cpp diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp index 483082a..eae9ddf 100644 --- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp @@ -188,21 +188,9 @@ public: NonLoc right) const; }; -class CStringLength { -public: - typedef llvm::ImmutableMap EntryMap; -}; } //end anonymous namespace -namespace clang { -namespace ento { - template <> - struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { return CStringChecker::getTag(); } - }; -} -} +REGISTER_MAP_WITH_PROGRAMSTATE(CStringLength, const MemRegion *, SVal) //===----------------------------------------------------------------------===// // Individual checks and utility methods. @@ -252,8 +240,8 @@ ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C, BugReport *report = new BugReport(*BT, os.str(), N); report->addRange(S->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, S, report); - C.EmitReport(report); + bugreporter::trackNullOrUndefValue(N, S, *report); + C.emitReport(report); return NULL; } @@ -327,7 +315,7 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C, // reference is outside the range. report->addRange(S->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); return NULL; } @@ -544,7 +532,7 @@ void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state, report->addRange(First->getSourceRange()); report->addRange(Second->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C, @@ -607,7 +595,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C, // Generate a report for this bug. BugReport *report = new BugReport(*BT_AdditionOverflow, warning, N); - C.EmitReport(report); + C.emitReport(report); return NULL; } @@ -673,11 +661,11 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C, } // Otherwise, get a new symbol and update the state. - unsigned Count = C.getCurrentBlockCount(); SValBuilder &svalBuilder = C.getSValBuilder(); QualType sizeTy = svalBuilder.getContext().getSizeType(); SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(), - MR, Ex, sizeTy, Count); + MR, Ex, sizeTy, + C.blockCount()); if (!hypothetical) state = state->set(MR, strLength); @@ -714,7 +702,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state, os.str(), N); report->addRange(Ex->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } return UndefinedVal(); @@ -778,7 +766,7 @@ SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state, os.str(), N); report->addRange(Ex->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } return UndefinedVal(); @@ -826,15 +814,14 @@ ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C, } // Invalidate this region. - unsigned Count = C.getCurrentBlockCount(); const LocationContext *LCtx = C.getPredecessor()->getLocationContext(); - return state->invalidateRegions(R, E, Count, LCtx); + return state->invalidateRegions(R, E, C.blockCount(), LCtx); } // If we have a non-region value by chance, just remove the binding. // FIXME: is this necessary or correct? This handles the non-Region // cases. Is it ever valid to store to these? - return state->unbindLoc(*L); + return state->killBinding(*L); } bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx, @@ -843,7 +830,7 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx, switch (MR->getKind()) { case MemRegion::FunctionTextRegionKind: { - const FunctionDecl *FD = cast(MR)->getDecl(); + const NamedDecl *FD = cast(MR)->getDecl(); if (FD) os << "the address of the function '" << *FD << '\''; else @@ -957,9 +944,8 @@ void CStringChecker::evalCopyCommon(CheckerContext &C, } else { // If we don't know how much we copied, we can at least // conjure a return value for later. - unsigned Count = C.getCurrentBlockCount(); - SVal result = - C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count); + SVal result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, + C.blockCount()); state = state->BindExpr(CE, LCtx, result); } @@ -1093,8 +1079,7 @@ void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const { state = CheckBufferAccess(C, state, Size, Left, Right); if (state) { // The return value is the comparison result, which we don't know. - unsigned Count = C.getCurrentBlockCount(); - SVal CmpV = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count); + SVal CmpV = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount()); state = state->BindExpr(CE, LCtx, CmpV); C.addTransition(state); } @@ -1206,8 +1191,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE, // no guarantee the full string length will actually be returned. // All we know is the return value is the min of the string length // and the limit. This is better than nothing. - unsigned Count = C.getCurrentBlockCount(); - result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count); + result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, C.blockCount()); NonLoc *resultNL = cast(&result); if (strLengthNL) { @@ -1234,8 +1218,7 @@ void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE, // If we don't know the length of the string, conjure a return // value, so it can be used in constraints, at least. if (result.isUnknown()) { - unsigned Count = C.getCurrentBlockCount(); - result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count); + result = C.getSValBuilder().conjureSymbolVal(0, CE, LCtx, C.blockCount()); } } @@ -1612,8 +1595,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE, // If this is a stpcpy-style copy, but we were unable to check for a buffer // overflow, we still need a result. Conjure a return value. if (returnEnd && Result.isUnknown()) { - unsigned Count = C.getCurrentBlockCount(); - Result = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count); + Result = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount()); } // Set the return value. @@ -1770,8 +1752,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE, if (!canComputeResult) { // Conjure a symbolic value. It's the best we can do. - unsigned Count = C.getCurrentBlockCount(); - SVal resultVal = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count); + SVal resultVal = svalBuilder.conjureSymbolVal(0, CE, LCtx, C.blockCount()); state = state->BindExpr(CE, LCtx, resultVal); } @@ -1885,7 +1866,7 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const { } bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const { - CStringLength::EntryMap Entries = state->get(); + CStringLengthTy Entries = state->get(); return !Entries.isEmpty(); } @@ -1895,7 +1876,7 @@ CStringChecker::checkRegionChanges(ProgramStateRef state, ArrayRef ExplicitRegions, ArrayRef Regions, const CallEvent *Call) const { - CStringLength::EntryMap Entries = state->get(); + CStringLengthTy Entries = state->get(); if (Entries.isEmpty()) return state; @@ -1915,10 +1896,10 @@ CStringChecker::checkRegionChanges(ProgramStateRef state, } } - CStringLength::EntryMap::Factory &F = state->get_context(); + CStringLengthTy::Factory &F = state->get_context(); // Then loop over the entries in the current state. - for (CStringLength::EntryMap::iterator I = Entries.begin(), + for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end(); I != E; ++I) { const MemRegion *MR = I.getKey(); @@ -1945,9 +1926,9 @@ CStringChecker::checkRegionChanges(ProgramStateRef state, void CStringChecker::checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const { // Mark all symbols in our string length map as valid. - CStringLength::EntryMap Entries = state->get(); + CStringLengthTy Entries = state->get(); - for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end(); + for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end(); I != E; ++I) { SVal Len = I.getData(); @@ -1963,12 +1944,12 @@ void CStringChecker::checkDeadSymbols(SymbolReaper &SR, return; ProgramStateRef state = C.getState(); - CStringLength::EntryMap Entries = state->get(); + CStringLengthTy Entries = state->get(); if (Entries.isEmpty()) return; - CStringLength::EntryMap::Factory &F = state->get_context(); - for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end(); + CStringLengthTy::Factory &F = state->get_context(); + for (CStringLengthTy::iterator I = Entries.begin(), E = Entries.end(); I != E; ++I) { SVal Len = I.getData(); if (SymbolRef Sym = Len.getAsSymbol()) { diff --git a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp index befc935..f1a3aac 100644 --- a/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp @@ -33,7 +33,6 @@ namespace { class WalkAST: public StmtVisitor { BugReporter &BR; AnalysisDeclContext* AC; - ASTContext &ASTC; /// Check if two expressions refer to the same declaration. inline bool sameDecl(const Expr *A1, const Expr *A2) { @@ -58,8 +57,8 @@ class WalkAST: public StmtVisitor { const FunctionDecl *FD = CE->getDirectCallee(); if (!FD) return false; - return (CheckerContext::isCLibraryFunction(FD, "strlen", ASTC) - && sameDecl(CE->getArg(0), WithArg)); + return (CheckerContext::isCLibraryFunction(FD, "strlen") && + sameDecl(CE->getArg(0), WithArg)); } return false; } @@ -83,7 +82,7 @@ class WalkAST: public StmtVisitor { public: WalkAST(BugReporter &br, AnalysisDeclContext* ac) : - BR(br), AC(ac), ASTC(AC->getASTContext()) { + BR(br), AC(ac) { } // Statement visitor methods. @@ -136,7 +135,7 @@ void WalkAST::VisitCallExpr(CallExpr *CE) { if (!FD) return; - if (CheckerContext::isCLibraryFunction(FD, "strncat", ASTC)) { + if (CheckerContext::isCLibraryFunction(FD, "strncat")) { if (containsBadStrncatPattern(CE)) { const Expr *DstArg = CE->getArg(0); const Expr *LenArg = CE->getArg(2); diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp index 5edcf09..82bc136 100644 --- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp @@ -75,13 +75,13 @@ void CallAndMessageChecker::emitBadCall(BugType *BT, CheckerContext &C, BugReport *R = new BugReport(*BT, BT->getName(), N); if (BadE) { R->addRange(BadE->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, BadE, R); + bugreporter::trackNullOrUndefValue(N, BadE, *R); } - C.EmitReport(R); + C.emitReport(R); } -StringRef describeUninitializedArgumentInCall(const CallEvent &Call, - bool IsFirstArgument) { +static StringRef describeUninitializedArgumentInCall(const CallEvent &Call, + bool IsFirstArgument) { switch (Call.getKind()) { case CE_ObjCMessage: { const ObjCMethodCall &Msg = cast(Call); @@ -122,8 +122,8 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C, BugReport *R = new BugReport(*BT, Desc, N); R->addRange(argRange); if (argEx) - bugreporter::addTrackNullOrUndefValueVisitor(N, argEx, R); - C.EmitReport(R); + bugreporter::trackNullOrUndefValue(N, argEx, *R); + C.emitReport(R); } return true; } @@ -207,7 +207,7 @@ bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C, // FIXME: enhance track back for uninitialized value for arbitrary // memregions - C.EmitReport(R); + C.emitReport(R); } return true; } @@ -335,8 +335,8 @@ void CallAndMessageChecker::checkPreObjCMessage(const ObjCMethodCall &msg, // FIXME: getTrackNullOrUndefValueVisitor can't handle "super" yet. if (const Expr *ReceiverE = ME->getInstanceReceiver()) - bugreporter::addTrackNullOrUndefValueVisitor(N, ReceiverE, R); - C.EmitReport(R); + bugreporter::trackNullOrUndefValue(N, ReceiverE, *R); + C.emitReport(R); } return; } else { @@ -377,9 +377,9 @@ void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C, report->addRange(ME->getReceiverRange()); // FIXME: This won't track "self" in messages to super. if (const Expr *receiver = ME->getInstanceReceiver()) { - bugreporter::addTrackNullOrUndefValueVisitor(N, receiver, report); + bugreporter::trackNullOrUndefValue(N, receiver, *report); } - C.EmitReport(report); + C.emitReport(report); } static bool supportsNilWithFloatRet(const llvm::Triple &triple) { diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp index 2e184fb..1cb8a8d 100644 --- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp @@ -75,7 +75,7 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const { BugReport *R = new BugReport(*BT, BT->getDescription(), errorNode); R->addRange(CE->getSourceRange()); - C.EmitReport(R); + C.emitReport(R); } } } diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp index 1407638..d6d0e3c 100644 --- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp @@ -64,7 +64,7 @@ void CastToStructChecker::checkPreStmt(const CastExpr *CE, "errors or data corruption.")); BugReport *R = new BugReport(*BT,BT->getDescription(), N); R->addRange(CE->getSourceRange()); - C.EmitReport(R); + C.emitReport(R); } } } diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp index 7a25865..9087205 100644 --- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp +++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp @@ -85,7 +85,7 @@ static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID, Expr::NPC_ValueDependentIsNull)) { // This is only a 'release' if the property kind is not // 'assign'. - return PD->getSetterKind() != ObjCPropertyDecl::Assign;; + return PD->getSetterKind() != ObjCPropertyDecl::Assign; } // Recurse to children. diff --git a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp index b8b7c36..5cd6194 100644 --- a/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp +++ b/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp @@ -270,6 +270,7 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) { // Emit the error. First figure out which DeclRefExpr in the condition // referenced the compared variable. + assert(drInc->getDecl()); const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS; SmallVector ranges; diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp index 0e9efaa..efaec2b 100644 --- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp +++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp @@ -25,6 +25,7 @@ using namespace ento; // All checkers should be placed into anonymous namespace. // We place the CheckerDocumentation inside ento namespace to make the // it visible in doxygen. +namespace clang { namespace ento { /// This checker documents the callback functions checkers can use to implement @@ -33,8 +34,8 @@ namespace ento { /// checking. /// /// \sa CheckerContext -class CheckerDocumentation : public Checker< check::PreStmt, - check::PostStmt, +class CheckerDocumentation : public Checker< check::PreStmt, + check::PostStmt, check::PreObjCMessage, check::PostObjCMessage, check::PreCall, @@ -64,8 +65,8 @@ public: /// See checkBranchCondition() callback for performing custom processing of /// the branching statements. /// - /// check::PreStmt - void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {} + /// check::PreStmt + void checkPreStmt(const ReturnStmt *DS, CheckerContext &C) const {} /// \brief Post-visit the Statement. /// @@ -74,8 +75,8 @@ public: /// which does not include the control flow statements such as IfStmt. The /// callback can be specialized to be called with any subclass of Stmt. /// - /// check::PostStmt - void checkPostStmt(const CallExpr *DS, CheckerContext &C) const; + /// check::PostStmt + void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const; /// \brief Pre-visit the Objective C message. /// @@ -98,8 +99,8 @@ public: /// behavior for functions and methods no matter how they are being invoked. /// /// Note that this includes ALL cross-body invocations, so if you want to - /// limit your checks to, say, function calls, you can either test for that - /// or fall back to the explicit callback (i.e. check::PreStmt). + /// limit your checks to, say, function calls, you should test for that at the + /// beginning of your callback function. /// /// check::PreCall void checkPreCall(const CallEvent &Call, CheckerContext &C) const {} @@ -151,9 +152,8 @@ public: /// check::DeadSymbols void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {} - /// \brief Called when an end of path is reached in the ExplodedGraph. - /// - /// This callback should be used to check if the allocated resources are freed. + /// \brief Called when the analyzer core reaches the end of the top-level + /// function being analyzed. /// /// check::EndPath void checkEndPath(CheckerContext &Ctx) const {} @@ -213,21 +213,35 @@ public: /// check::LiveSymbols void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {} - + /// \brief Called to determine if the checker currently needs to know if when + /// contents of any regions change. + /// + /// Since it is not necessarily cheap to compute which regions are being + /// changed, this allows the analyzer core to skip the more expensive + /// #checkRegionChanges when no checkers are tracking any state. bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; } - /// \brief Allows tracking regions which get invalidated. + /// \brief Called when the contents of one or more regions change. + /// + /// This can occur in many different ways: an explicit bind, a blanket + /// invalidation of the region contents, or by passing a region to a function + /// call whose behavior the analyzer cannot model perfectly. /// /// \param State The current program state. /// \param Invalidated A set of all symbols potentially touched by the change. /// \param ExplicitRegions The regions explicitly requested for invalidation. - /// For example, in the case of a function call, these would be arguments. - /// \param Regions The transitive closure of accessible regions, - /// i.e. all regions that may have been touched by this change. - /// \param Call The call expression wrapper if the regions are invalidated - /// by a call, 0 otherwise. - /// Note, in order to be notified, the checker should also implement the - /// wantsRegionChangeUpdate callback. + /// For a function call, this would be the arguments. For a bind, this + /// would be the region being bound to. + /// \param Regions The transitive closure of regions accessible from, + /// \p ExplicitRegions, i.e. all regions that may have been touched + /// by this change. For a simple bind, this list will be the same as + /// \p ExplicitRegions, since a bind does not affect the contents of + /// anything accessible through the base region. + /// \param Call The opaque call triggering this invalidation. Will be 0 if the + /// change was not triggered by a call. + /// + /// Note that this callback will not be invoked unless + /// #wantsRegionChangeUpdate returns \c true. /// /// check::RegionChanges ProgramStateRef @@ -256,9 +270,10 @@ public: }; -void CheckerDocumentation::checkPostStmt(const CallExpr *DS, +void CheckerDocumentation::checkPostStmt(const DeclStmt *DS, CheckerContext &C) const { return; } -} // end namespace +} // end namespace ento +} // end namespace clang diff --git a/lib/StaticAnalyzer/Checkers/Checkers.td b/lib/StaticAnalyzer/Checkers/Checkers.td index 8110bd0..235e633 100644 --- a/lib/StaticAnalyzer/Checkers/Checkers.td +++ b/lib/StaticAnalyzer/Checkers/Checkers.td @@ -13,33 +13,33 @@ include "clang/StaticAnalyzer/Checkers/CheckerBase.td" // Packages. //===----------------------------------------------------------------------===// -def Experimental : Package<"experimental">; +def Alpha : Package<"alpha">; def Core : Package<"core">; def CoreBuiltin : Package<"builtin">, InPackage; def CoreUninitialized : Package<"uninitialized">, InPackage; -def CoreExperimental : Package<"core">, InPackage, Hidden; +def CoreAlpha : Package<"core">, InPackage, Hidden; def Cplusplus : Package<"cplusplus">; -def CplusplusExperimental : Package<"cplusplus">, InPackage, Hidden; +def CplusplusAlpha : Package<"cplusplus">, InPackage, Hidden; def DeadCode : Package<"deadcode">; -def DeadCodeExperimental : Package<"deadcode">, InPackage, Hidden; +def DeadCodeAlpha : Package<"deadcode">, InPackage, Hidden; def Security : Package <"security">; def InsecureAPI : Package<"insecureAPI">, InPackage; -def SecurityExperimental : Package<"security">, InPackage, Hidden; -def Taint : Package<"taint">, InPackage, Hidden; +def SecurityAlpha : Package<"security">, InPackage, Hidden; +def Taint : Package<"taint">, InPackage, Hidden; def Unix : Package<"unix">; -def UnixExperimental : Package<"unix">, InPackage, Hidden; +def UnixAlpha : Package<"unix">, InPackage, Hidden; def CString : Package<"cstring">, InPackage, Hidden; -def CStringExperimental : Package<"cstring">, InPackage, Hidden; +def CStringAlpha : Package<"cstring">, InPackage, Hidden; def OSX : Package<"osx">; -def OSXExperimental : Package<"osx">, InPackage, Hidden; +def OSXAlpha : Package<"osx">, InPackage, Hidden; def Cocoa : Package<"cocoa">, InPackage; -def CocoaExperimental : Package<"cocoa">, InPackage, Hidden; +def CocoaAlpha : Package<"cocoa">, InPackage, Hidden; def CoreFoundation : Package<"coreFoundation">, InPackage; def Containers : Package<"containers">, InPackage; @@ -60,10 +60,6 @@ def CallAndMessageChecker : Checker<"CallAndMessage">, HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers)">, DescFile<"CallAndMessageChecker.cpp">; -def AdjustedReturnValueChecker : Checker<"AdjustedReturnValue">, - HelpText<"Check to see if the return value of a function call is different than the caller expects (e.g., from calls through function pointers)">, - DescFile<"AdjustedReturnValueChecker.cpp">; - def AttrNonNullChecker : Checker<"AttributeNonNull">, HelpText<"Check for null pointers passed as arguments to a function whose arguments are marked with the 'nonnull' attribute">, DescFile<"AttrNonNullChecker.cpp">; @@ -90,7 +86,7 @@ def DynamicTypePropagation : Checker<"DynamicTypePropagation">, } // end "core" -let ParentPackage = CoreExperimental in { +let ParentPackage = CoreAlpha in { def BoolAssignmentChecker : Checker<"BoolAssignment">, HelpText<"Warn about assigning non-{0,1} values to Boolean variables">, @@ -120,7 +116,7 @@ def SizeofPointerChecker : Checker<"SizeofPtr">, HelpText<"Warn about unintended use of sizeof() on pointer expressions">, DescFile<"CheckSizeofPointer.cpp">; -} // end "core.experimental" +} // end "alpha.core" //===----------------------------------------------------------------------===// // Evaluate "builtin" functions. @@ -170,13 +166,13 @@ def ReturnUndefChecker : Checker<"UndefReturn">, // C++ checkers. //===----------------------------------------------------------------------===// -let ParentPackage = CplusplusExperimental in { +let ParentPackage = CplusplusAlpha in { def VirtualCallChecker : Checker<"VirtualCall">, HelpText<"Check virtual function calls during construction or destruction">, DescFile<"VirtualCallChecker.cpp">; -} // end: "cplusplus.experimental" +} // end: "alpha.cplusplus" //===----------------------------------------------------------------------===// // Deadcode checkers. @@ -189,7 +185,7 @@ def DeadStoresChecker : Checker<"DeadStores">, DescFile<"DeadStoresChecker.cpp">; } // end DeadCode -let ParentPackage = DeadCodeExperimental in { +let ParentPackage = DeadCodeAlpha in { def IdempotentOperationChecker : Checker<"IdempotentOperations">, HelpText<"Warn about idempotent operations">, @@ -199,7 +195,7 @@ def UnreachableCodeChecker : Checker<"UnreachableCode">, HelpText<"Check unreachable code">, DescFile<"UnreachableCodeChecker.cpp">; -} // end "deadcode.experimental" +} // end "alpha.deadcode" //===----------------------------------------------------------------------===// // Security checkers. @@ -237,7 +233,7 @@ let ParentPackage = Security in { DescFile<"CheckSecuritySyntaxOnly.cpp">; } -let ParentPackage = SecurityExperimental in { +let ParentPackage = SecurityAlpha in { def ArrayBoundChecker : Checker<"ArrayBound">, HelpText<"Warn about buffer overflows (older checker)">, @@ -255,7 +251,7 @@ def MallocOverflowSecurityChecker : Checker<"MallocOverflow">, HelpText<"Check for overflows in the arguments to malloc()">, DescFile<"MallocOverflowSecurityChecker.cpp">; -} // end "security.experimental" +} // end "alpha.security" //===----------------------------------------------------------------------===// // Taint checkers. @@ -267,7 +263,7 @@ def GenericTaintChecker : Checker<"TaintPropagation">, HelpText<"Generate taint information used by other checkers">, DescFile<"GenericTaintChecker.cpp">; -} // end "experimental.security.taint" +} // end "alpha.security.taint" //===----------------------------------------------------------------------===// // Unix API checkers. @@ -289,7 +285,7 @@ def MallocSizeofChecker : Checker<"MallocSizeof">, } // end "unix" -let ParentPackage = UnixExperimental in { +let ParentPackage = UnixAlpha in { def ChrootChecker : Checker<"Chroot">, HelpText<"Check improper use of chroot">, @@ -307,7 +303,11 @@ def StreamChecker : Checker<"Stream">, HelpText<"Check stream handling functions">, DescFile<"StreamChecker.cpp">; -} // end "unix.experimental" +def SimpleStreamChecker : Checker<"SimpleStream">, + HelpText<"Check for misuses of stream APIs">, + DescFile<"SimpleStreamChecker.cpp">; + +} // end "alpha.unix" let ParentPackage = CString in { @@ -320,7 +320,7 @@ def CStringSyntaxChecker : Checker<"BadSizeArg">, DescFile<"CStringSyntaxChecker.cpp">; } -let ParentPackage = CStringExperimental in { +let ParentPackage = CStringAlpha in { def CStringOutOfBounds : Checker<"OutOfBounds">, HelpText<"Check for out-of-bounds access in string functions">, @@ -346,11 +346,6 @@ def MacOSXAPIChecker : Checker<"API">, HelpText<"Check for proper uses of various Mac OS X APIs">, DescFile<"MacOSXAPIChecker.cpp">; -def OSAtomicChecker : Checker<"AtomicCAS">, - InPackage, - HelpText<"Evaluate calls to OSAtomic functions">, - DescFile<"OSAtomicChecker.cpp">; - def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">, InPackage, HelpText<"Check for proper uses of Secure Keychain APIs">, @@ -397,6 +392,10 @@ def ObjCLoopChecker : Checker<"Loops">, HelpText<"Improved modeling of loops using Cocoa collection types">, DescFile<"BasicObjCFoundationChecks.cpp">; +def ObjCNonNilReturnValueChecker : Checker<"NonNilReturnValue">, + HelpText<"Model the APIs that are guaranteed to return a non-nil value">, + DescFile<"BasicObjCFoundationChecks.cpp">; + def NSErrorChecker : Checker<"NSError">, HelpText<"Check usage of NSError** parameters">, DescFile<"NSErrorChecker.cpp">; @@ -407,13 +406,25 @@ def RetainCountChecker : Checker<"RetainCount">, } // end "osx.cocoa" -let ParentPackage = CocoaExperimental in { +let ParentPackage = CocoaAlpha in { def ObjCDeallocChecker : Checker<"Dealloc">, HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">, DescFile<"CheckObjCDealloc.cpp">; -} // end "cocoa.experimental" +def IvarInvalidationChecker : Checker<"InstanceVariableInvalidation">, + HelpText<"Check that the invalidatable instance variables are invalidated in the methods annotated with objc_instance_variable_invalidator">, + DescFile<"IvarInvalidationChecker.cpp">; + +def DirectIvarAssignment : Checker<"DirectIvarAssignment">, + HelpText<"Check that the invalidatable instance variables are invalidated in the methods annotated with objc_instance_variable_invalidator">, + DescFile<"DirectIvarAssignment.cpp">; + +def ObjCSuperCallChecker : Checker<"MissingSuperCall">, + HelpText<"Warn about Objective-C methods that lack a necessary call to super">, + DescFile<"ObjCMissingSuperCallChecker.cpp">; + +} // end "alpha.osx.cocoa" let ParentPackage = CoreFoundation in { @@ -422,7 +433,7 @@ def CFNumberCreateChecker : Checker<"CFNumber">, DescFile<"BasicObjCFoundationChecks.cpp">; def CFRetainReleaseChecker : Checker<"CFRetainRelease">, - HelpText<"Check for null arguments to CFRetain/CFRelease">, + HelpText<"Check for null arguments to CFRetain/CFRelease/CFMakeCollectable">, DescFile<"BasicObjCFoundationChecks.cpp">; def CFErrorChecker : Checker<"CFError">, @@ -479,6 +490,10 @@ def CallGraphDumper : Checker<"DumpCallGraph">, HelpText<"Display Call Graph">, DescFile<"DebugCheckers.cpp">; +def ConfigDumper : Checker<"ConfigDumper">, + HelpText<"Dump config table">, + DescFile<"DebugCheckers.cpp">; + def TraversalDumper : Checker<"DumpTraversal">, HelpText<"Print branch conditions as they are traversed by the engine">, DescFile<"TraversalChecker.cpp">; diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp index 30d0609..c885616 100644 --- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp @@ -147,7 +147,7 @@ void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const { "after chroot")); BugReport *R = new BugReport(*BT_BreakJail, BT_BreakJail->getDescription(), N); - C.EmitReport(R); + C.emitReport(R); } return; diff --git a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp index 510e8cd..59e03ec 100644 --- a/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp @@ -13,22 +13,54 @@ //===----------------------------------------------------------------------===// #include "ClangSACheckers.h" -#include "clang/StaticAnalyzer/Core/Checker.h" -#include "clang/Analysis/Analyses/LiveVariables.h" -#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h" -#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" -#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" -#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h" -#include "clang/Basic/Diagnostic.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ParentMap.h" -#include "llvm/ADT/SmallPtrSet.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/Analysis/Analyses/LiveVariables.h" +#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" +#include "clang/StaticAnalyzer/Core/Checker.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "llvm/ADT/BitVector.h" #include "llvm/ADT/SmallString.h" +#include "llvm/Support/SaveAndRestore.h" using namespace clang; using namespace ento; -namespace { +namespace { + +/// A simple visitor to record what VarDecls occur in EH-handling code. +class EHCodeVisitor : public RecursiveASTVisitor { +public: + bool inEH; + llvm::DenseSet &S; + + bool TraverseObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { + SaveAndRestore inFinally(inEH, true); + return ::RecursiveASTVisitor::TraverseObjCAtFinallyStmt(S); + } + + bool TraverseObjCAtCatchStmt(ObjCAtCatchStmt *S) { + SaveAndRestore inCatch(inEH, true); + return ::RecursiveASTVisitor::TraverseObjCAtCatchStmt(S); + } + + bool TraverseCXXCatchStmt(CXXCatchStmt *S) { + SaveAndRestore inCatch(inEH, true); + return TraverseStmt(S->getHandlerBlock()); + } + + bool VisitDeclRefExpr(DeclRefExpr *DR) { + if (inEH) + if (const VarDecl *D = dyn_cast(DR->getDecl())) + S.insert(D); + return true; + } + + EHCodeVisitor(llvm::DenseSet &S) : + inEH(false), S(S) {} +}; // FIXME: Eventually migrate into its own file, and have it managed by // AnalysisManager. @@ -93,6 +125,7 @@ class DeadStoreObs : public LiveVariables::Observer { llvm::SmallPtrSet Escaped; OwningPtr reachableCode; const CFGBlock *currentBlock; + llvm::OwningPtr > InEH; enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit }; @@ -105,6 +138,23 @@ public: virtual ~DeadStoreObs() {} + bool isLive(const LiveVariables::LivenessValues &Live, const VarDecl *D) { + if (Live.isLive(D)) + return true; + // Lazily construct the set that records which VarDecls are in + // EH code. + if (!InEH.get()) { + InEH.reset(new llvm::DenseSet()); + EHCodeVisitor V(*InEH.get()); + V.TraverseStmt(AC->getBody()); + } + // Treat all VarDecls that occur in EH code as being "always live" + // when considering to suppress dead stores. Frequently stores + // are followed by reads in EH code, but we don't have the ability + // to analyze that yet. + return InEH->count(D); + } + void Report(const VarDecl *V, DeadStoreKind dsk, PathDiagnosticLocation L, SourceRange R) { if (Escaped.count(V)) @@ -159,7 +209,7 @@ public: if (VD->getType()->getAs()) return; - if (!Live.isLive(VD) && + if (!isLive(Live, VD) && !(VD->getAttr() || VD->getAttr())) { PathDiagnosticLocation ExLoc = @@ -285,7 +335,7 @@ public: // A dead initialization is a variable that is dead after it // is initialized. We don't flag warnings for those variables // marked 'unused'. - if (!Live.isLive(V) && V->getAttr() == 0) { + if (!isLive(Live, V) && V->getAttr() == 0) { // Special case: check for initializations with constants. // // e.g. : int x = 0; diff --git a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp index 34053cd..7ad9c59 100644 --- a/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp +++ b/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp @@ -144,3 +144,38 @@ public: void ento::registerCallGraphDumper(CheckerManager &mgr) { mgr.registerChecker(); } + + +//===----------------------------------------------------------------------===// +// ConfigDumper +//===----------------------------------------------------------------------===// + +namespace { +class ConfigDumper : public Checker< check::EndOfTranslationUnit > { +public: + void checkEndOfTranslationUnit(const TranslationUnitDecl *TU, + AnalysisManager& mgr, + BugReporter &BR) const { + + const AnalyzerOptions::ConfigTable &Config = mgr.options.Config; + AnalyzerOptions::ConfigTable::const_iterator I = + Config.begin(), E = Config.end(); + + std::vector Keys; + for (; I != E ; ++I) { Keys.push_back(I->getKey()); } + sort(Keys.begin(), Keys.end()); + + llvm::errs() << "[config]\n"; + for (unsigned i = 0, n = Keys.size(); i < n ; ++i) { + StringRef Key = Keys[i]; + I = Config.find(Key); + llvm::errs() << Key << " = " << I->second << '\n'; + } + llvm::errs() << "[stats]\n" << "num-entries = " << Keys.size() << '\n'; + } +}; +} + +void ento::registerConfigDumper(CheckerManager &mgr) { + mgr.registerChecker(); +} diff --git a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp index e98c131..3ace4be 100644 --- a/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp @@ -39,7 +39,7 @@ public: CheckerContext &C) const; void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const; - static const MemRegion *AddDerefSource(raw_ostream &os, + static void AddDerefSource(raw_ostream &os, SmallVectorImpl &Ranges, const Expr *Ex, const ProgramState *state, const LocationContext *LCtx, @@ -47,7 +47,7 @@ public: }; } // end anonymous namespace -const MemRegion * +void DereferenceChecker::AddDerefSource(raw_ostream &os, SmallVectorImpl &Ranges, const Expr *Ex, @@ -55,7 +55,6 @@ DereferenceChecker::AddDerefSource(raw_ostream &os, const LocationContext *LCtx, bool loadedFrom) { Ex = Ex->IgnoreParenLValueCasts(); - const MemRegion *sourceR = 0; switch (Ex->getStmtClass()) { default: break; @@ -65,7 +64,6 @@ DereferenceChecker::AddDerefSource(raw_ostream &os, os << " (" << (loadedFrom ? "loaded from" : "from") << " variable '" << VD->getName() << "')"; Ranges.push_back(DR->getSourceRange()); - sourceR = state->getLValue(VD, LCtx).getAsRegion(); } break; } @@ -78,7 +76,6 @@ DereferenceChecker::AddDerefSource(raw_ostream &os, break; } } - return sourceR; } void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S, @@ -94,6 +91,8 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S, BT_null.reset(new BuiltinBug("Dereference of null pointer")); SmallString<100> buf; + llvm::raw_svector_ostream os(buf); + SmallVector Ranges; // Walk through lvalue casts to get the original expression @@ -101,8 +100,6 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S, if (const Expr *expr = dyn_cast(S)) S = expr->IgnoreParenLValueCasts(); - const MemRegion *sourceR = 0; - if (IsBind) { if (const BinaryOperator *BO = dyn_cast(S)) { if (BO->isAssignmentOp()) @@ -117,68 +114,55 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S, switch (S->getStmtClass()) { case Stmt::ArraySubscriptExprClass: { - llvm::raw_svector_ostream os(buf); os << "Array access"; const ArraySubscriptExpr *AE = cast(S); - sourceR = AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(), - State.getPtr(), N->getLocationContext()); + AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(), + State.getPtr(), N->getLocationContext()); os << " results in a null pointer dereference"; break; } case Stmt::UnaryOperatorClass: { - llvm::raw_svector_ostream os(buf); os << "Dereference of null pointer"; const UnaryOperator *U = cast(S); - sourceR = AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(), - State.getPtr(), N->getLocationContext(), true); + AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(), + State.getPtr(), N->getLocationContext(), true); break; } case Stmt::MemberExprClass: { const MemberExpr *M = cast(S); - if (M->isArrow()) { - llvm::raw_svector_ostream os(buf); + if (M->isArrow() || bugreporter::isDeclRefExprToReference(M->getBase())) { os << "Access to field '" << M->getMemberNameInfo() << "' results in a dereference of a null pointer"; - sourceR = AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(), - State.getPtr(), N->getLocationContext(), true); + AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(), + State.getPtr(), N->getLocationContext(), true); } break; } case Stmt::ObjCIvarRefExprClass: { const ObjCIvarRefExpr *IV = cast(S); - if (const DeclRefExpr *DR = - dyn_cast(IV->getBase()->IgnoreParenCasts())) { - if (const VarDecl *VD = dyn_cast(DR->getDecl())) { - llvm::raw_svector_ostream os(buf); - os << "Instance variable access (via '" << VD->getName() - << "') results in a null pointer dereference"; - } - } - Ranges.push_back(IV->getSourceRange()); + os << "Access to instance variable '" << *IV->getDecl() + << "' results in a dereference of a null pointer"; + AddDerefSource(os, Ranges, IV->getBase()->IgnoreParenCasts(), + State.getPtr(), N->getLocationContext(), true); break; } default: break; } + os.flush(); BugReport *report = new BugReport(*BT_null, buf.empty() ? BT_null->getDescription() : buf.str(), N); - bugreporter::addTrackNullOrUndefValueVisitor(N, bugreporter::GetDerefExpr(N), - report); + bugreporter::trackNullOrUndefValue(N, bugreporter::GetDerefExpr(N), *report); for (SmallVectorImpl::iterator I = Ranges.begin(), E = Ranges.end(); I!=E; ++I) report->addRange(*I); - if (sourceR) { - report->markInteresting(sourceR); - report->markInteresting(State->getRawSVal(loc::MemRegionVal(sourceR))); - } - - C.EmitReport(report); + C.emitReport(report); } void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S, @@ -191,11 +175,9 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S, BugReport *report = new BugReport(*BT_undef, BT_undef->getDescription(), N); - bugreporter::addTrackNullOrUndefValueVisitor(N, - bugreporter::GetDerefExpr(N), - report); - report->disablePathPruning(); - C.EmitReport(report); + bugreporter::trackNullOrUndefValue(N, bugreporter::GetDerefExpr(N), + *report); + C.emitReport(report); } return; } diff --git a/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp new file mode 100644 index 0000000..dc90b67 --- /dev/null +++ b/lib/StaticAnalyzer/Checkers/DirectIvarAssignment.cpp @@ -0,0 +1,180 @@ +//=- DirectIvarAssignment.cpp - Check rules on ObjC properties -*- C++ ----*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Check that Objective C properties follow the following rules: +// - The property should be set with the setter, not though a direct +// assignment. +// +//===----------------------------------------------------------------------===// + +#include "ClangSACheckers.h" +#include "clang/StaticAnalyzer/Core/Checker.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/ADT/DenseMap.h" + +using namespace clang; +using namespace ento; + +namespace { + +class DirectIvarAssignment : + public Checker > { + + typedef llvm::DenseMap IvarToPropertyMapTy; + + /// A helper class, which walks the AST and locates all assignments to ivars + /// in the given function. + class MethodCrawler : public ConstStmtVisitor { + const IvarToPropertyMapTy &IvarToPropMap; + const ObjCMethodDecl *MD; + const ObjCInterfaceDecl *InterfD; + BugReporter &BR; + LocationOrAnalysisDeclContext DCtx; + + public: + MethodCrawler(const IvarToPropertyMapTy &InMap, const ObjCMethodDecl *InMD, + const ObjCInterfaceDecl *InID, + BugReporter &InBR, AnalysisDeclContext *InDCtx) + : IvarToPropMap(InMap), MD(InMD), InterfD(InID), BR(InBR), DCtx(InDCtx) {} + + void VisitStmt(const Stmt *S) { VisitChildren(S); } + + void VisitBinaryOperator(const BinaryOperator *BO); + + void VisitChildren(const Stmt *S) { + for (Stmt::const_child_range I = S->children(); I; ++I) + if (*I) + this->Visit(*I); + } + }; + +public: + void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& Mgr, + BugReporter &BR) const; +}; + +static const ObjCIvarDecl *findPropertyBackingIvar(const ObjCPropertyDecl *PD, + const ObjCInterfaceDecl *InterD, + ASTContext &Ctx) { + // Check for synthesized ivars. + ObjCIvarDecl *ID = PD->getPropertyIvarDecl(); + if (ID) + return ID; + + ObjCInterfaceDecl *NonConstInterD = const_cast(InterD); + + // Check for existing "_PropName". + ID = NonConstInterD->lookupInstanceVariable(PD->getDefaultSynthIvarName(Ctx)); + if (ID) + return ID; + + // Check for existing "PropName". + IdentifierInfo *PropIdent = PD->getIdentifier(); + ID = NonConstInterD->lookupInstanceVariable(PropIdent); + + return ID; +} + +void DirectIvarAssignment::checkASTDecl(const ObjCImplementationDecl *D, + AnalysisManager& Mgr, + BugReporter &BR) const { + const ObjCInterfaceDecl *InterD = D->getClassInterface(); + + + IvarToPropertyMapTy IvarToPropMap; + + // Find all properties for this class. + for (ObjCInterfaceDecl::prop_iterator I = InterD->prop_begin(), + E = InterD->prop_end(); I != E; ++I) { + ObjCPropertyDecl *PD = *I; + + // Find the corresponding IVar. + const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterD, + Mgr.getASTContext()); + + if (!ID) + continue; + + // Store the IVar to property mapping. + IvarToPropMap[ID] = PD; + } + + if (IvarToPropMap.empty()) + return; + + for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(), + E = D->instmeth_end(); I != E; ++I) { + + ObjCMethodDecl *M = *I; + AnalysisDeclContext *DCtx = Mgr.getAnalysisDeclContext(M); + + // Skip the init, dealloc functions and any functions that might be doing + // initialization based on their name. + if (M->getMethodFamily() == OMF_init || + M->getMethodFamily() == OMF_dealloc || + M->getMethodFamily() == OMF_copy || + M->getMethodFamily() == OMF_mutableCopy || + M->getSelector().getNameForSlot(0).find("init") != StringRef::npos || + M->getSelector().getNameForSlot(0).find("Init") != StringRef::npos) + continue; + + const Stmt *Body = M->getBody(); + assert(Body); + + MethodCrawler MC(IvarToPropMap, M->getCanonicalDecl(), InterD, BR, DCtx); + MC.VisitStmt(Body); + } +} + +void DirectIvarAssignment::MethodCrawler::VisitBinaryOperator( + const BinaryOperator *BO) { + if (!BO->isAssignmentOp()) + return; + + const ObjCIvarRefExpr *IvarRef = + dyn_cast(BO->getLHS()->IgnoreParenCasts()); + + if (!IvarRef) + return; + + if (const ObjCIvarDecl *D = IvarRef->getDecl()) { + IvarToPropertyMapTy::const_iterator I = IvarToPropMap.find(D); + if (I != IvarToPropMap.end()) { + const ObjCPropertyDecl *PD = I->second; + + ObjCMethodDecl *GetterMethod = + InterfD->getInstanceMethod(PD->getGetterName()); + ObjCMethodDecl *SetterMethod = + InterfD->getInstanceMethod(PD->getSetterName()); + + if (SetterMethod && SetterMethod->getCanonicalDecl() == MD) + return; + + if (GetterMethod && GetterMethod->getCanonicalDecl() == MD) + return; + + BR.EmitBasicReport(MD, + "Property access", + categories::CoreFoundationObjectiveC, + "Direct assignment to an instance variable backing a property; " + "use the setter instead", PathDiagnosticLocation(IvarRef, + BR.getSourceManager(), + DCtx)); + } + } +} +} + +void ento::registerDirectIvarAssignment(CheckerManager &mgr) { + mgr.registerChecker(); +} diff --git a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp index dcf6a86..76fb3f2 100644 --- a/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp @@ -39,13 +39,9 @@ void DivZeroChecker::reportBug(const char *Msg, if (!BT) BT.reset(new BuiltinBug("Division by zero")); - BugReport *R = - new BugReport(*BT, Msg, N); - - bugreporter::addTrackNullOrUndefValueVisitor(N, - bugreporter::GetDenomExpr(N), - R); - C.EmitReport(R); + BugReport *R = new BugReport(*BT, Msg, N); + bugreporter::trackNullOrUndefValue(N, bugreporter::GetDenomExpr(N), *R); + C.emitReport(R); } } diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp index b636efb..b0a4bc6 100644 --- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp +++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp @@ -83,14 +83,14 @@ void DynamicTypePropagation::checkPreCall(const CallEvent &Call, if (const CXXDestructorCall *Dtor = dyn_cast(&Call)) { // C++11 [class.cdtor]p4 (see above) + if (!Dtor->isBaseDestructor()) + return; const MemRegion *Target = Dtor->getCXXThisVal().getAsRegion(); if (!Target) return; - // FIXME: getRuntimeDefinition() can be expensive. It would be better to do - // this when we are entering the stack frame for the destructor. - const Decl *D = Dtor->getRuntimeDefinition().getDecl(); + const Decl *D = Dtor->getDecl(); if (!D) return; @@ -105,8 +105,7 @@ void DynamicTypePropagation::checkPostCall(const CallEvent &Call, if (const ObjCMethodCall *Msg = dyn_cast(&Call)) { // Get the returned value if it's a region. - SVal Result = C.getSVal(Call.getOriginExpr()); - const MemRegion *RetReg = Result.getAsRegion(); + const MemRegion *RetReg = Call.getReturnValue().getAsRegion(); if (!RetReg) return; diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp index 7acf223..e7e3162 100644 --- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp @@ -93,7 +93,7 @@ void ExprInspectionChecker::analyzerEval(const CallExpr *CE, BT.reset(new BugType("Checking analyzer assumptions", "debug")); BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N); - C.EmitReport(R); + C.emitReport(R); } void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE, @@ -113,7 +113,7 @@ void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE, BT.reset(new BugType("Checking analyzer assumptions", "debug")); BugReport *R = new BugReport(*BT, getArgumentValueString(CE, C), N); - C.EmitReport(R); + C.emitReport(R); } void ento::registerExprInspectionChecker(CheckerManager &Mgr) { diff --git a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp index a1f2f3b..7fde689 100644 --- a/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp @@ -58,7 +58,7 @@ void FixedAddressChecker::checkPreStmt(const BinaryOperator *B, "environments or platforms.")); BugReport *R = new BugReport(*BT, BT->getDescription(), N); R->addRange(B->getRHS()->getSourceRange()); - C.EmitReport(R); + C.emitReport(R); } } diff --git a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp index afb862c..a9e0217 100644 --- a/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp @@ -192,13 +192,7 @@ const char GenericTaintChecker::MsgTaintedBufferSize[] = /// to the call post-visit. The values are unsigned integers, which are either /// ReturnValueIndex, or indexes of the pointer/reference argument, which /// points to data, which should be tainted on return. -namespace { struct TaintArgsOnPostVisit{}; } -namespace clang { namespace ento { -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait > { - static void *GDMIndex() { return GenericTaintChecker::getTag(); } -}; -}} +REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, unsigned) GenericTaintChecker::TaintPropagationRule GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule( @@ -337,7 +331,7 @@ bool GenericTaintChecker::propagateFromPre(const CallExpr *CE, // Depending on what was tainted at pre-visit, we determined a set of // arguments which should be tainted after the function returns. These are // stored in the state as TaintArgsOnPostVisit set. - llvm::ImmutableSet TaintArgs = State->get(); + TaintArgsOnPostVisitTy TaintArgs = State->get(); if (TaintArgs.isEmpty()) return false; @@ -653,7 +647,7 @@ bool GenericTaintChecker::generateReportIfTainted(const Expr *E, initBugType(); BugReport *report = new BugReport(*BT, Msg, N); report->addRange(E->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); return true; } return false; diff --git a/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp index 9d0b83f..ffbbb8b 100644 --- a/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp @@ -430,7 +430,7 @@ void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G, FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS); } - BR.EmitReport(report); + BR.emitReport(report); } } diff --git a/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp new file mode 100644 index 0000000..bf256cd --- /dev/null +++ b/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp @@ -0,0 +1,550 @@ +//=- IvarInvalidationChecker.cpp - -*- C++ -------------------------------*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This checker implements annotation driven invalidation checking. If a class +// contains a method annotated with 'objc_instance_variable_invalidator', +// - (void) foo +// __attribute__((annotate("objc_instance_variable_invalidator"))); +// all the "ivalidatable" instance variables of this class should be +// invalidated. We call an instance variable ivalidatable if it is an object of +// a class which contains an invalidation method. There could be multiple +// methods annotated with such annotations per class, either one can be used +// to invalidate the ivar. An ivar or property are considered to be +// invalidated if they are being assigned 'nil' or an invalidation method has +// been called on them. An invalidation method should either invalidate all +// the ivars or call another invalidation method (on self). +// +//===----------------------------------------------------------------------===// + +#include "ClangSACheckers.h" +#include "clang/StaticAnalyzer/Core/Checker.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/StmtVisitor.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallString.h" + +using namespace clang; +using namespace ento; + +namespace { +class IvarInvalidationChecker : + public Checker > { + + typedef llvm::DenseSet MethodSet; + typedef llvm::DenseMap MethToIvarMapTy; + typedef llvm::DenseMap PropToIvarMapTy; + typedef llvm::DenseMap IvarToPropMapTy; + + + struct IvarInfo { + /// Has the ivar been invalidated? + bool IsInvalidated; + + /// The methods which can be used to invalidate the ivar. + MethodSet InvalidationMethods; + + IvarInfo() : IsInvalidated(false) {} + void addInvalidationMethod(const ObjCMethodDecl *MD) { + InvalidationMethods.insert(MD); + } + + bool needsInvalidation() const { + return !InvalidationMethods.empty(); + } + + void markInvalidated() { + IsInvalidated = true; + } + + bool markInvalidated(const ObjCMethodDecl *MD) { + if (IsInvalidated) + return true; + for (MethodSet::iterator I = InvalidationMethods.begin(), + E = InvalidationMethods.end(); I != E; ++I) { + if (*I == MD) { + IsInvalidated = true; + return true; + } + } + return false; + } + + bool isInvalidated() const { + return IsInvalidated; + } + }; + + typedef llvm::DenseMap IvarSet; + + /// Statement visitor, which walks the method body and flags the ivars + /// referenced in it (either directly or via property). + class MethodCrawler : public ConstStmtVisitor { + /// The set of Ivars which need to be invalidated. + IvarSet &IVars; + + /// Flag is set as the result of a message send to another + /// invalidation method. + bool &CalledAnotherInvalidationMethod; + + /// Property setter to ivar mapping. + const MethToIvarMapTy &PropertySetterToIvarMap; + + /// Property getter to ivar mapping. + const MethToIvarMapTy &PropertyGetterToIvarMap; + + /// Property to ivar mapping. + const PropToIvarMapTy &PropertyToIvarMap; + + /// The invalidation method being currently processed. + const ObjCMethodDecl *InvalidationMethod; + + ASTContext &Ctx; + + /// Peel off parens, casts, OpaqueValueExpr, and PseudoObjectExpr. + const Expr *peel(const Expr *E) const; + + /// Does this expression represent zero: '0'? + bool isZero(const Expr *E) const; + + /// Mark the given ivar as invalidated. + void markInvalidated(const ObjCIvarDecl *Iv); + + /// Checks if IvarRef refers to the tracked IVar, if yes, marks it as + /// invalidated. + void checkObjCIvarRefExpr(const ObjCIvarRefExpr *IvarRef); + + /// Checks if ObjCPropertyRefExpr refers to the tracked IVar, if yes, marks + /// it as invalidated. + void checkObjCPropertyRefExpr(const ObjCPropertyRefExpr *PA); + + /// Checks if ObjCMessageExpr refers to (is a getter for) the tracked IVar, + /// if yes, marks it as invalidated. + void checkObjCMessageExpr(const ObjCMessageExpr *ME); + + /// Checks if the Expr refers to an ivar, if yes, marks it as invalidated. + void check(const Expr *E); + + public: + MethodCrawler(IvarSet &InIVars, + bool &InCalledAnotherInvalidationMethod, + const MethToIvarMapTy &InPropertySetterToIvarMap, + const MethToIvarMapTy &InPropertyGetterToIvarMap, + const PropToIvarMapTy &InPropertyToIvarMap, + ASTContext &InCtx) + : IVars(InIVars), + CalledAnotherInvalidationMethod(InCalledAnotherInvalidationMethod), + PropertySetterToIvarMap(InPropertySetterToIvarMap), + PropertyGetterToIvarMap(InPropertyGetterToIvarMap), + PropertyToIvarMap(InPropertyToIvarMap), + InvalidationMethod(0), + Ctx(InCtx) {} + + void VisitStmt(const Stmt *S) { VisitChildren(S); } + + void VisitBinaryOperator(const BinaryOperator *BO); + + void VisitObjCMessageExpr(const ObjCMessageExpr *ME); + + void VisitChildren(const Stmt *S) { + for (Stmt::const_child_range I = S->children(); I; ++I) { + if (*I) + this->Visit(*I); + if (CalledAnotherInvalidationMethod) + return; + } + } + }; + + /// Check if the any of the methods inside the interface are annotated with + /// the invalidation annotation, update the IvarInfo accordingly. + static void containsInvalidationMethod(const ObjCContainerDecl *D, + IvarInfo &Out); + + /// Check if ivar should be tracked and add to TrackedIvars if positive. + /// Returns true if ivar should be tracked. + static bool trackIvar(const ObjCIvarDecl *Iv, IvarSet &TrackedIvars); + + /// Given the property declaration, and the list of tracked ivars, finds + /// the ivar backing the property when possible. Returns '0' when no such + /// ivar could be found. + static const ObjCIvarDecl *findPropertyBackingIvar( + const ObjCPropertyDecl *Prop, + const ObjCInterfaceDecl *InterfaceD, + IvarSet &TrackedIvars); + +public: + void checkASTDecl(const ObjCMethodDecl *D, AnalysisManager& Mgr, + BugReporter &BR) const; + + // TODO: We are currently ignoring the ivars coming from class extensions. +}; + +static bool isInvalidationMethod(const ObjCMethodDecl *M) { + for (specific_attr_iterator + AI = M->specific_attr_begin(), + AE = M->specific_attr_end(); AI != AE; ++AI) { + const AnnotateAttr *Ann = *AI; + if (Ann->getAnnotation() == "objc_instance_variable_invalidator") + return true; + } + return false; +} + +void IvarInvalidationChecker::containsInvalidationMethod( + const ObjCContainerDecl *D, IvarInfo &OutInfo) { + + // TODO: Cache the results. + + if (!D) + return; + + // Check all methods. + for (ObjCContainerDecl::method_iterator + I = D->meth_begin(), + E = D->meth_end(); I != E; ++I) { + const ObjCMethodDecl *MDI = *I; + if (isInvalidationMethod(MDI)) + OutInfo.addInvalidationMethod( + cast(MDI->getCanonicalDecl())); + } + + // If interface, check all parent protocols and super. + // TODO: Visit all categories in case the invalidation method is declared in + // a category. + if (const ObjCInterfaceDecl *InterfaceD = dyn_cast(D)) { + for (ObjCInterfaceDecl::protocol_iterator + I = InterfaceD->protocol_begin(), + E = InterfaceD->protocol_end(); I != E; ++I) { + containsInvalidationMethod(*I, OutInfo); + } + containsInvalidationMethod(InterfaceD->getSuperClass(), OutInfo); + return; + } + + // If protocol, check all parent protocols. + if (const ObjCProtocolDecl *ProtD = dyn_cast(D)) { + for (ObjCInterfaceDecl::protocol_iterator + I = ProtD->protocol_begin(), + E = ProtD->protocol_end(); I != E; ++I) { + containsInvalidationMethod(*I, OutInfo); + } + return; + } + + llvm_unreachable("One of the casts above should have succeeded."); +} + +bool IvarInvalidationChecker::trackIvar(const ObjCIvarDecl *Iv, + IvarSet &TrackedIvars) { + QualType IvQTy = Iv->getType(); + const ObjCObjectPointerType *IvTy = IvQTy->getAs(); + if (!IvTy) + return false; + const ObjCInterfaceDecl *IvInterf = IvTy->getInterfaceDecl(); + + IvarInfo Info; + containsInvalidationMethod(IvInterf, Info); + if (Info.needsInvalidation()) { + TrackedIvars[cast(Iv->getCanonicalDecl())] = Info; + return true; + } + return false; +} + +const ObjCIvarDecl *IvarInvalidationChecker::findPropertyBackingIvar( + const ObjCPropertyDecl *Prop, + const ObjCInterfaceDecl *InterfaceD, + IvarSet &TrackedIvars) { + const ObjCIvarDecl *IvarD = 0; + + // Lookup for the synthesized case. + IvarD = Prop->getPropertyIvarDecl(); + if (IvarD) { + if (TrackedIvars.count(IvarD)) { + return IvarD; + } + // If the ivar is synthesized we still want to track it. + if (trackIvar(IvarD, TrackedIvars)) + return IvarD; + } + + // Lookup IVars named "_PropName"or "PropName" among the tracked Ivars. + StringRef PropName = Prop->getIdentifier()->getName(); + for (IvarSet::const_iterator I = TrackedIvars.begin(), + E = TrackedIvars.end(); I != E; ++I) { + const ObjCIvarDecl *Iv = I->first; + StringRef IvarName = Iv->getName(); + + if (IvarName == PropName) + return Iv; + + SmallString<128> PropNameWithUnderscore; + { + llvm::raw_svector_ostream os(PropNameWithUnderscore); + os << '_' << PropName; + } + if (IvarName == PropNameWithUnderscore.str()) + return Iv; + } + + // Note, this is a possible source of false positives. We could look at the + // getter implementation to find the ivar when its name is not derived from + // the property name. + return 0; +} + +void IvarInvalidationChecker::checkASTDecl(const ObjCMethodDecl *D, + AnalysisManager& Mgr, + BugReporter &BR) const { + // We are only interested in checking the cleanup methods. + if (!D->hasBody() || !isInvalidationMethod(D)) + return; + + // Collect all ivars that need cleanup. + IvarSet Ivars; + const ObjCInterfaceDecl *InterfaceD = D->getClassInterface(); + + // Collect ivars declared in this class, its extensions and its implementation + ObjCInterfaceDecl *IDecl = const_cast(InterfaceD); + for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv; + Iv= Iv->getNextIvar()) + trackIvar(Iv, Ivars); + + // Construct Property/Property Accessor to Ivar maps to assist checking if an + // ivar which is backing a property has been reset. + MethToIvarMapTy PropSetterToIvarMap; + MethToIvarMapTy PropGetterToIvarMap; + PropToIvarMapTy PropertyToIvarMap; + IvarToPropMapTy IvarToPopertyMap; + + ObjCInterfaceDecl::PropertyMap PropMap; + InterfaceD->collectPropertiesToImplement(PropMap); + + for (ObjCInterfaceDecl::PropertyMap::iterator + I = PropMap.begin(), E = PropMap.end(); I != E; ++I) { + const ObjCPropertyDecl *PD = I->second; + + const ObjCIvarDecl *ID = findPropertyBackingIvar(PD, InterfaceD, Ivars); + if (!ID) { + continue; + } + + // Store the mappings. + PD = cast(PD->getCanonicalDecl()); + PropertyToIvarMap[PD] = ID; + IvarToPopertyMap[ID] = PD; + + // Find the setter and the getter. + const ObjCMethodDecl *SetterD = PD->getSetterMethodDecl(); + if (SetterD) { + SetterD = cast(SetterD->getCanonicalDecl()); + PropSetterToIvarMap[SetterD] = ID; + } + + const ObjCMethodDecl *GetterD = PD->getGetterMethodDecl(); + if (GetterD) { + GetterD = cast(GetterD->getCanonicalDecl()); + PropGetterToIvarMap[GetterD] = ID; + } + } + + + // Check which ivars have been invalidated in the method body. + bool CalledAnotherInvalidationMethod = false; + MethodCrawler(Ivars, + CalledAnotherInvalidationMethod, + PropSetterToIvarMap, + PropGetterToIvarMap, + PropertyToIvarMap, + BR.getContext()).VisitStmt(D->getBody()); + + if (CalledAnotherInvalidationMethod) + return; + + // Warn on the ivars that were not accessed by the method. + for (IvarSet::const_iterator I = Ivars.begin(), E = Ivars.end(); I != E; ++I){ + if (!I->second.isInvalidated()) { + const ObjCIvarDecl *IvarDecl = I->first; + + PathDiagnosticLocation IvarDecLocation = + PathDiagnosticLocation::createEnd(D->getBody(), BR.getSourceManager(), + Mgr.getAnalysisDeclContext(D)); + + SmallString<128> sbuf; + llvm::raw_svector_ostream os(sbuf); + + // Construct the warning message. + if (IvarDecl->getSynthesize()) { + const ObjCPropertyDecl *PD = IvarToPopertyMap[IvarDecl]; + assert(PD && + "Do we synthesize ivars for something other than properties?"); + os << "Property "<< PD->getName() << + " needs to be invalidated or set to nil"; + } else { + os << "Instance variable "<< IvarDecl->getName() + << " needs to be invalidated or set to nil"; + } + + BR.EmitBasicReport(D, + "Incomplete invalidation", + categories::CoreFoundationObjectiveC, os.str(), + IvarDecLocation); + } + } +} + +void IvarInvalidationChecker::MethodCrawler::markInvalidated( + const ObjCIvarDecl *Iv) { + IvarSet::iterator I = IVars.find(Iv); + if (I != IVars.end()) { + // If InvalidationMethod is present, we are processing the message send and + // should ensure we are invalidating with the appropriate method, + // otherwise, we are processing setting to 'nil'. + if (InvalidationMethod) + I->second.markInvalidated(InvalidationMethod); + else + I->second.markInvalidated(); + } +} + +const Expr *IvarInvalidationChecker::MethodCrawler::peel(const Expr *E) const { + E = E->IgnoreParenCasts(); + if (const PseudoObjectExpr *POE = dyn_cast(E)) + E = POE->getSyntacticForm()->IgnoreParenCasts(); + if (const OpaqueValueExpr *OVE = dyn_cast(E)) + E = OVE->getSourceExpr()->IgnoreParenCasts(); + return E; +} + +void IvarInvalidationChecker::MethodCrawler::checkObjCIvarRefExpr( + const ObjCIvarRefExpr *IvarRef) { + if (const Decl *D = IvarRef->getDecl()) + markInvalidated(cast(D->getCanonicalDecl())); +} + +void IvarInvalidationChecker::MethodCrawler::checkObjCMessageExpr( + const ObjCMessageExpr *ME) { + const ObjCMethodDecl *MD = ME->getMethodDecl(); + if (MD) { + MD = cast(MD->getCanonicalDecl()); + MethToIvarMapTy::const_iterator IvI = PropertyGetterToIvarMap.find(MD); + if (IvI != PropertyGetterToIvarMap.end()) + markInvalidated(IvI->second); + } +} + +void IvarInvalidationChecker::MethodCrawler::checkObjCPropertyRefExpr( + const ObjCPropertyRefExpr *PA) { + + if (PA->isExplicitProperty()) { + const ObjCPropertyDecl *PD = PA->getExplicitProperty(); + if (PD) { + PD = cast(PD->getCanonicalDecl()); + PropToIvarMapTy::const_iterator IvI = PropertyToIvarMap.find(PD); + if (IvI != PropertyToIvarMap.end()) + markInvalidated(IvI->second); + return; + } + } + + if (PA->isImplicitProperty()) { + const ObjCMethodDecl *MD = PA->getImplicitPropertySetter(); + if (MD) { + MD = cast(MD->getCanonicalDecl()); + MethToIvarMapTy::const_iterator IvI =PropertyGetterToIvarMap.find(MD); + if (IvI != PropertyGetterToIvarMap.end()) + markInvalidated(IvI->second); + return; + } + } +} + +bool IvarInvalidationChecker::MethodCrawler::isZero(const Expr *E) const { + E = peel(E); + + return (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull) + != Expr::NPCK_NotNull); +} + +void IvarInvalidationChecker::MethodCrawler::check(const Expr *E) { + E = peel(E); + + if (const ObjCIvarRefExpr *IvarRef = dyn_cast(E)) { + checkObjCIvarRefExpr(IvarRef); + return; + } + + if (const ObjCPropertyRefExpr *PropRef = dyn_cast(E)) { + checkObjCPropertyRefExpr(PropRef); + return; + } + + if (const ObjCMessageExpr *MsgExpr = dyn_cast(E)) { + checkObjCMessageExpr(MsgExpr); + return; + } +} + +void IvarInvalidationChecker::MethodCrawler::VisitBinaryOperator( + const BinaryOperator *BO) { + VisitStmt(BO); + + if (BO->getOpcode() != BO_Assign) + return; + + // Do we assign zero? + if (!isZero(BO->getRHS())) + return; + + // Check the variable we are assigning to. + check(BO->getLHS()); +} + +void IvarInvalidationChecker::MethodCrawler::VisitObjCMessageExpr( + const ObjCMessageExpr *ME) { + const ObjCMethodDecl *MD = ME->getMethodDecl(); + const Expr *Receiver = ME->getInstanceReceiver(); + + // Stop if we are calling '[self invalidate]'. + if (Receiver && isInvalidationMethod(MD)) + if (Receiver->isObjCSelfExpr()) { + CalledAnotherInvalidationMethod = true; + return; + } + + // Check if we call a setter and set the property to 'nil'. + if (MD && (ME->getNumArgs() == 1) && isZero(ME->getArg(0))) { + MD = cast(MD->getCanonicalDecl()); + MethToIvarMapTy::const_iterator IvI = PropertySetterToIvarMap.find(MD); + if (IvI != PropertySetterToIvarMap.end()) { + markInvalidated(IvI->second); + return; + } + } + + // Check if we call the 'invalidation' routine on the ivar. + if (Receiver) { + InvalidationMethod = MD; + check(Receiver->IgnoreParenCasts()); + InvalidationMethod = 0; + } + + VisitStmt(ME); +} +} + +// Register the checker. +void ento::registerIvarInvalidationChecker(CheckerManager &mgr) { + mgr.registerChecker(); +} diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp index 969f2dd..76f20b6 100644 --- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp @@ -158,16 +158,9 @@ private: /// ProgramState traits to store the currently allocated (and not yet freed) /// symbols. This is a map from the allocated content symbol to the /// corresponding AllocationState. -typedef llvm::ImmutableMap AllocatedSetTy; - -namespace { struct AllocatedData {}; } -namespace clang { namespace ento { -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index = 0; return &index; } -}; -}} +REGISTER_MAP_WITH_PROGRAMSTATE(AllocatedData, + SymbolRef, + MacOSKeychainAPIChecker::AllocationState) static bool isEnclosingFunctionParam(const Expr *E) { E = E->IgnoreParenCasts(); @@ -282,7 +275,7 @@ void MacOSKeychainAPIChecker:: Report->addVisitor(new SecKeychainBugVisitor(AP.first)); Report->addRange(ArgExpr->getSourceRange()); markInteresting(Report, AP); - C.EmitReport(Report); + C.emitReport(Report); } void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE, @@ -323,7 +316,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE, Report->addVisitor(new SecKeychainBugVisitor(V)); Report->addRange(ArgExpr->getSourceRange()); Report->markInteresting(AS->Region); - C.EmitReport(Report); + C.emitReport(Report); } } return; @@ -376,7 +369,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE, Report->addRange(ArgExpr->getSourceRange()); if (AS) Report->markInteresting(AS->Region); - C.EmitReport(Report); + C.emitReport(Report); return; } @@ -440,7 +433,7 @@ void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE, Report->addVisitor(new SecKeychainBugVisitor(ArgSM)); Report->addRange(ArgExpr->getSourceRange()); Report->markInteresting(AS->Region); - C.EmitReport(Report); + C.emitReport(Report); return; } @@ -571,13 +564,13 @@ BugReport *MacOSKeychainAPIChecker:: void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const { ProgramStateRef State = C.getState(); - AllocatedSetTy ASet = State->get(); + AllocatedDataTy ASet = State->get(); if (ASet.isEmpty()) return; bool Changed = false; AllocationPairVec Errors; - for (AllocatedSetTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) { + for (AllocatedDataTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) { if (SR.isLive(I->first)) continue; @@ -585,7 +578,9 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR, State = State->remove(I->first); // If the allocated symbol is null or if the allocation call might have // returned an error, do not report. - if (State->getSymVal(I->first) || + ConstraintManager &CMgr = State->getConstraintManager(); + ConditionTruthVal AllocFailed = CMgr.isNull(State, I.getKey()); + if (AllocFailed.isConstrainedTrue() || definitelyReturnedError(I->second.Region, State, C.getSValBuilder())) continue; Errors.push_back(std::make_pair(I->first, &I->second)); @@ -602,7 +597,7 @@ void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR, // Generate the error reports. for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end(); I != E; ++I) { - C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C)); + C.emitReport(generateAllocatedDataNotReleasedReport(*I, N, C)); } // Generate the new, cleaned up state. @@ -617,7 +612,7 @@ void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const { if (C.getLocationContext()->getParent() != 0) return; - AllocatedSetTy AS = state->get(); + AllocatedDataTy AS = state->get(); if (AS.isEmpty()) return; @@ -625,12 +620,14 @@ void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const { // found here, so report it. bool Changed = false; AllocationPairVec Errors; - for (AllocatedSetTy::iterator I = AS.begin(), E = AS.end(); I != E; ++I ) { + for (AllocatedDataTy::iterator I = AS.begin(), E = AS.end(); I != E; ++I ) { Changed = true; state = state->remove(I->first); // If the allocated symbol is null or if error code was returned at // allocation, do not report. - if (state->getSymVal(I.getKey()) || + ConstraintManager &CMgr = state->getConstraintManager(); + ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey()); + if (AllocFailed.isConstrainedTrue() || definitelyReturnedError(I->second.Region, state, C.getSValBuilder())) { continue; @@ -650,7 +647,7 @@ void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const { // Generate the error reports. for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end(); I != E; ++I) { - C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C)); + C.emitReport(generateAllocatedDataNotReleasedReport(*I, N, C)); } C.addTransition(state, N); diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp index cfdb55d..467b8b1 100644 --- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp @@ -70,6 +70,16 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE, BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'", "Mac OS X API")); + // Handle _dispatch_once. In some versions of the OS X SDK we have the case + // that dispatch_once is a macro that wraps a call to _dispatch_once. + // _dispatch_once is then a function which then calls the real dispatch_once. + // Users do not care; they just want the warning at the top-level call. + if (CE->getLocStart().isMacroID()) { + StringRef TrimmedFName = FName.ltrim("_"); + if (TrimmedFName != FName) + FName = TrimmedFName; + } + SmallString<256> S; llvm::raw_svector_ostream os(S); os << "Call to '" << FName << "' uses"; @@ -84,7 +94,7 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE, BugReport *report = new BugReport(*BT_dispatchOnce, os.str(), N); report->addRange(CE->getArg(0)->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } //===----------------------------------------------------------------------===// @@ -99,7 +109,9 @@ void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE, SubChecker SC = llvm::StringSwitch(Name) - .Cases("dispatch_once", "dispatch_once_f", + .Cases("dispatch_once", + "_dispatch_once", + "dispatch_once_f", &MacOSXAPIChecker::CheckDispatchOnce) .Default(NULL); diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp index dfcedf6..caf70ca 100644 --- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp @@ -26,6 +26,7 @@ #include "llvm/ADT/ImmutableMap.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/StringExtras.h" #include using namespace clang; @@ -70,17 +71,31 @@ public: } }; +enum ReallocPairKind { + RPToBeFreedAfterFailure, + // The symbol has been freed when reallocation failed. + RPIsFreeOnFailure, + // The symbol does not need to be freed after reallocation fails. + RPDoNotTrackAfterFailure +}; + +/// \class ReallocPair +/// \brief Stores information about the symbol being reallocated by a call to +/// 'realloc' to allow modeling failed reallocation later in the path. struct ReallocPair { + // \brief The symbol which realloc reallocated. SymbolRef ReallocatedSym; - bool IsFreeOnFailure; - ReallocPair(SymbolRef S, bool F) : ReallocatedSym(S), IsFreeOnFailure(F) {} + ReallocPairKind Kind; + + ReallocPair(SymbolRef S, ReallocPairKind K) : + ReallocatedSym(S), Kind(K) {} void Profile(llvm::FoldingSetNodeID &ID) const { - ID.AddInteger(IsFreeOnFailure); + ID.AddInteger(Kind); ID.AddPointer(ReallocatedSym); } bool operator==(const ReallocPair &X) const { return ReallocatedSym == X.ReallocatedSym && - IsFreeOnFailure == X.IsFreeOnFailure; + Kind == X.Kind; } }; @@ -92,7 +107,7 @@ class MallocChecker : public Checker, check::PostStmt, check::PostStmt, - check::PreObjCMessage, + check::PostObjCMessage, check::Location, check::Bind, eval::Assume, @@ -120,7 +135,7 @@ public: void checkPreStmt(const CallExpr *S, CheckerContext &C) const; void checkPostStmt(const CallExpr *CE, CheckerContext &C) const; - void checkPreObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const; + void checkPostObjCMessage(const ObjCMethodCall &Call, CheckerContext &C) const; void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const; void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const; void checkEndPath(CheckerContext &C) const; @@ -177,11 +192,15 @@ private: const OwnershipAttr* Att) const; ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE, ProgramStateRef state, unsigned Num, - bool Hold) const; + bool Hold, + bool &ReleasedAllocated, + bool ReturnsNullOnFailure = false) const; ProgramStateRef FreeMemAux(CheckerContext &C, const Expr *Arg, const Expr *ParentExpr, - ProgramStateRef state, - bool Hold) const; + ProgramStateRef State, + bool Hold, + bool &ReleasedAllocated, + bool ReturnsNullOnFailure = false) const; ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE, bool FreesMemOnFailure) const; @@ -301,13 +320,14 @@ private: : StackHintGeneratorForSymbol(S, M) {} virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex) { + // Printed parameters start at 1, not 0. + ++ArgIndex; + SmallString<200> buf; llvm::raw_svector_ostream os(buf); - os << "Reallocation of "; - // Printed parameters start at 1, not 0. - printOrdinal(++ArgIndex, os); - os << " parameter failed"; + os << "Reallocation of " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex) + << " parameter failed"; return os.str(); } @@ -320,25 +340,12 @@ private: }; } // end anonymous namespace -typedef llvm::ImmutableMap RegionStateTy; -typedef llvm::ImmutableMap ReallocMap; -class RegionState {}; -class ReallocPairs {}; -namespace clang { -namespace ento { - template <> - struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { static int x; return &x; } - }; +REGISTER_MAP_WITH_PROGRAMSTATE(RegionState, SymbolRef, RefState) +REGISTER_MAP_WITH_PROGRAMSTATE(ReallocPairs, SymbolRef, ReallocPair) - template <> - struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { static int x; return &x; } - }; -} -} +// A map from the freed symbol to the symbol representing the return value of +// the free function. +REGISTER_MAP_WITH_PROGRAMSTATE(FreeReturnValue, SymbolRef, SymbolRef) namespace { class StopTrackingCallback : public SymbolVisitor { @@ -426,11 +433,15 @@ bool MallocChecker::isFreeFunction(const FunctionDecl *FD, ASTContext &C) const } void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { + if (C.wasInlined) + return; + const FunctionDecl *FD = C.getCalleeDecl(CE); if (!FD) return; ProgramStateRef State = C.getState(); + bool ReleasedAllocatedMemory = false; if (FD->getKind() == Decl::Function) { initIdentifierInfo(C.getASTContext()); @@ -447,7 +458,7 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const { } else if (FunI == II_calloc) { State = CallocMem(C, CE); } else if (FunI == II_free) { - State = FreeMemAux(C, CE, State, 0, false); + State = FreeMemAux(C, CE, State, 0, false, ReleasedAllocatedMemory); } else if (FunI == II_strdup) { State = MallocUpdateRefState(C, CE, State); } else if (FunI == II_strndup) { @@ -487,21 +498,26 @@ static bool isFreeWhenDoneSetToZero(const ObjCMethodCall &Call) { return false; } -void MallocChecker::checkPreObjCMessage(const ObjCMethodCall &Call, - CheckerContext &C) const { +void MallocChecker::checkPostObjCMessage(const ObjCMethodCall &Call, + CheckerContext &C) const { // If the first selector is dataWithBytesNoCopy, assume that the memory will // be released with 'free' by the new object. // Ex: [NSData dataWithBytesNoCopy:bytes length:10]; // Unless 'freeWhenDone' param set to 0. // TODO: Check that the memory was allocated with malloc. + bool ReleasedAllocatedMemory = false; Selector S = Call.getSelector(); if ((S.getNameForSlot(0) == "dataWithBytesNoCopy" || S.getNameForSlot(0) == "initWithBytesNoCopy" || S.getNameForSlot(0) == "initWithCharactersNoCopy") && !isFreeWhenDoneSetToZero(Call)){ unsigned int argIdx = 0; - C.addTransition(FreeMemAux(C, Call.getArgExpr(argIdx), - Call.getOriginExpr(), C.getState(), true)); + ProgramStateRef State = FreeMemAux(C, Call.getArgExpr(argIdx), + Call.getOriginExpr(), C.getState(), true, + ReleasedAllocatedMemory, + /* RetNullOnFailure*/ true); + + C.addTransition(State); } } @@ -526,7 +542,7 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C, // Bind the return value to the symbolic value from the heap region. // TODO: We could rewrite post visit to eval call; 'malloc' does not have // side effects other than what we model here. - unsigned Count = C.getCurrentBlockCount(); + unsigned Count = C.blockCount(); SValBuilder &svalBuilder = C.getSValBuilder(); const LocationContext *LCtx = C.getPredecessor()->getLocationContext(); DefinedSVal RetVal = @@ -584,11 +600,13 @@ ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C, return 0; ProgramStateRef State = C.getState(); + bool ReleasedAllocated = false; for (OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end(); I != E; ++I) { ProgramStateRef StateI = FreeMemAux(C, CE, State, *I, - Att->getOwnKind() == OwnershipAttr::Holds); + Att->getOwnKind() == OwnershipAttr::Holds, + ReleasedAllocated); if (StateI) State = StateI; } @@ -599,20 +617,40 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE, ProgramStateRef state, unsigned Num, - bool Hold) const { + bool Hold, + bool &ReleasedAllocated, + bool ReturnsNullOnFailure) const { if (CE->getNumArgs() < (Num + 1)) return 0; - return FreeMemAux(C, CE->getArg(Num), CE, state, Hold); + return FreeMemAux(C, CE->getArg(Num), CE, state, Hold, + ReleasedAllocated, ReturnsNullOnFailure); +} + +/// Checks if the previous call to free on the given symbol failed - if free +/// failed, returns true. Also, returns the corresponding return value symbol. +bool didPreviousFreeFail(ProgramStateRef State, + SymbolRef Sym, SymbolRef &RetStatusSymbol) { + const SymbolRef *Ret = State->get(Sym); + if (Ret) { + assert(*Ret && "We should not store the null return symbol"); + ConstraintManager &CMgr = State->getConstraintManager(); + ConditionTruthVal FreeFailed = CMgr.isNull(State, *Ret); + RetStatusSymbol = *Ret; + return FreeFailed.isConstrainedTrue(); + } + return false; } ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, const Expr *ArgExpr, const Expr *ParentExpr, - ProgramStateRef state, - bool Hold) const { + ProgramStateRef State, + bool Hold, + bool &ReleasedAllocated, + bool ReturnsNullOnFailure) const { - SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext()); + SVal ArgVal = State->getSVal(ArgExpr, C.getLocationContext()); if (!isa(ArgVal)) return 0; DefinedOrUnknownSVal location = cast(ArgVal); @@ -623,7 +661,7 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, // The explicit NULL case, no operation is performed. ProgramStateRef notNullState, nullState; - llvm::tie(notNullState, nullState) = state->assume(location); + llvm::tie(notNullState, nullState) = State->assume(location); if (nullState && !notNullState) return 0; @@ -672,10 +710,14 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, return 0; SymbolRef Sym = SR->getSymbol(); - const RefState *RS = state->get(Sym); + const RefState *RS = State->get(Sym); + SymbolRef PreviousRetStatusSymbol = 0; // Check double free. - if (RS && (RS->isReleased() || RS->isRelinquished())) { + if (RS && + (RS->isReleased() || RS->isRelinquished()) && + !didPreviousFreeFail(State, Sym, PreviousRetStatusSymbol)) { + if (ExplodedNode *N = C.generateSink()) { if (!BT_DoubleFree) BT_DoubleFree.reset( @@ -685,16 +727,34 @@ ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C, "Attempt to free non-owned memory"), N); R->addRange(ArgExpr->getSourceRange()); R->markInteresting(Sym); + if (PreviousRetStatusSymbol) + R->markInteresting(PreviousRetStatusSymbol); R->addVisitor(new MallocBugVisitor(Sym)); - C.EmitReport(R); + C.emitReport(R); } return 0; } + ReleasedAllocated = (RS != 0); + + // Clean out the info on previous call to free return info. + State = State->remove(Sym); + + // Keep track of the return value. If it is NULL, we will know that free + // failed. + if (ReturnsNullOnFailure) { + SVal RetVal = C.getSVal(ParentExpr); + SymbolRef RetStatusSymbol = RetVal.getAsSymbol(); + if (RetStatusSymbol) { + C.getSymbolManager().addSymbolDependency(Sym, RetStatusSymbol); + State = State->set(Sym, RetStatusSymbol); + } + } + // Normal free. if (Hold) - return state->set(Sym, RefState::getRelinquished(ParentExpr)); - return state->set(Sym, RefState::getReleased(ParentExpr)); + return State->set(Sym, RefState::getRelinquished(ParentExpr)); + return State->set(Sym, RefState::getReleased(ParentExpr)); } bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) { @@ -714,7 +774,7 @@ bool MallocChecker::SummarizeRegion(raw_ostream &os, const MemRegion *MR) { switch (MR->getKind()) { case MemRegion::FunctionTextRegionKind: { - const FunctionDecl *FD = cast(MR)->getDecl(); + const NamedDecl *FD = cast(MR)->getDecl(); if (FD) os << "the address of the function '" << *FD << '\''; else @@ -819,7 +879,7 @@ void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal, BugReport *R = new BugReport(*BT_BadFree, os.str(), N); R->markInteresting(MR); R->addRange(range); - C.EmitReport(R); + C.emitReport(R); } } @@ -886,9 +946,12 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, if (!FromPtr || !ToPtr) return 0; + bool ReleasedAllocated = false; + // If the size is 0, free the memory. if (SizeIsZero) - if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero,0,false)){ + if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero, 0, + false, ReleasedAllocated)){ // The semantics of the return value are: // If size was equal to 0, either NULL or a pointer suitable to be passed // to free() is returned. We just free the input pointer and do not add @@ -897,14 +960,25 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C, } // Default behavior. - if (ProgramStateRef stateFree = FreeMemAux(C, CE, state, 0, false)) { - // FIXME: We should copy the content of the original buffer. + if (ProgramStateRef stateFree = + FreeMemAux(C, CE, state, 0, false, ReleasedAllocated)) { + ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1), UnknownVal(), stateFree); if (!stateRealloc) return 0; + + ReallocPairKind Kind = RPToBeFreedAfterFailure; + if (FreesOnFail) + Kind = RPIsFreeOnFailure; + else if (!ReleasedAllocated) + Kind = RPDoNotTrackAfterFailure; + + // Record the info about the reallocated symbol so that we could properly + // process failed reallocation. stateRealloc = stateRealloc->set(ToPtr, - ReallocPair(FromPtr, FreesOnFail)); + ReallocPair(FromPtr, Kind)); + // The reallocated symbol should stay alive for as long as the new symbol. C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr); return stateRealloc; } @@ -1004,7 +1078,7 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N, BugReport *R = new BugReport(*BT_Leak, os.str(), N, LocUsedForUniqueing); R->markInteresting(Sym); R->addVisitor(new MallocBugVisitor(Sym, true)); - C.EmitReport(R); + C.emitReport(R); } void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper, @@ -1017,14 +1091,11 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper, RegionStateTy RS = state->get(); RegionStateTy::Factory &F = state->get_context(); - bool generateReport = false; llvm::SmallVector Errors; for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) { if (SymReaper.isDead(I->first)) { - if (I->second.isAllocated()) { - generateReport = true; + if (I->second.isAllocated()) Errors.push_back(I->first); - } // Remove the dead symbol from the map. RS = F.remove(RS, I->first); @@ -1032,24 +1103,34 @@ void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper, } // Cleanup the Realloc Pairs Map. - ReallocMap RP = state->get(); - for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) { + ReallocPairsTy RP = state->get(); + for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) { if (SymReaper.isDead(I->first) || SymReaper.isDead(I->second.ReallocatedSym)) { state = state->remove(I->first); } } - // Generate leak node. - static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak"); - ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag); + // Cleanup the FreeReturnValue Map. + FreeReturnValueTy FR = state->get(); + for (FreeReturnValueTy::iterator I = FR.begin(), E = FR.end(); I != E; ++I) { + if (SymReaper.isDead(I->first) || + SymReaper.isDead(I->second)) { + state = state->remove(I->first); + } + } - if (generateReport) { + // Generate leak node. + ExplodedNode *N = C.getPredecessor(); + if (!Errors.empty()) { + static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak"); + N = C.addTransition(C.getState(), C.getPredecessor(), &Tag); for (llvm::SmallVector::iterator - I = Errors.begin(), E = Errors.end(); I != E; ++I) { + I = Errors.begin(), E = Errors.end(); I != E; ++I) { reportLeak(*I, N, C); } } + C.addTransition(state->set(RS), N); } @@ -1182,7 +1263,7 @@ bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C, R->addRange(S->getSourceRange()); R->markInteresting(Sym); R->addVisitor(new MallocBugVisitor(Sym)); - C.EmitReport(R); + C.emitReport(R); return true; } } @@ -1249,28 +1330,36 @@ ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state, bool Assumption) const { RegionStateTy RS = state->get(); for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) { - // If the symbol is assumed to NULL or another constant, this will - // return an APSInt*. - if (state->getSymVal(I.getKey())) + // If the symbol is assumed to be NULL, remove it from consideration. + ConstraintManager &CMgr = state->getConstraintManager(); + ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey()); + if (AllocFailed.isConstrainedTrue()) state = state->remove(I.getKey()); } // Realloc returns 0 when reallocation fails, which means that we should // restore the state of the pointer being reallocated. - ReallocMap RP = state->get(); - for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) { - // If the symbol is assumed to NULL or another constant, this will - // return an APSInt*. - if (state->getSymVal(I.getKey())) { - SymbolRef ReallocSym = I.getData().ReallocatedSym; - const RefState *RS = state->get(ReallocSym); - if (RS) { - if (RS->isReleased() && ! I.getData().IsFreeOnFailure) + ReallocPairsTy RP = state->get(); + for (ReallocPairsTy::iterator I = RP.begin(), E = RP.end(); I != E; ++I) { + // If the symbol is assumed to be NULL, remove it from consideration. + ConstraintManager &CMgr = state->getConstraintManager(); + ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey()); + if (!AllocFailed.isConstrainedTrue()) + continue; + + SymbolRef ReallocSym = I.getData().ReallocatedSym; + if (const RefState *RS = state->get(ReallocSym)) { + if (RS->isReleased()) { + if (I.getData().Kind == RPToBeFreedAfterFailure) state = state->set(ReallocSym, - RefState::getAllocated(RS->getStmt())); + RefState::getAllocated(RS->getStmt())); + else if (I.getData().Kind == RPDoNotTrackAfterFailure) + state = state->remove(ReallocSym); + else + assert(I.getData().Kind == RPIsFreeOnFailure); } - state = state->remove(I.getKey()); } + state = state->remove(I.getKey()); } return state; @@ -1463,10 +1552,10 @@ MallocChecker::checkRegionChanges(ProgramStateRef State, static SymbolRef findFailedReallocSymbol(ProgramStateRef currState, ProgramStateRef prevState) { - ReallocMap currMap = currState->get(); - ReallocMap prevMap = prevState->get(); + ReallocPairsTy currMap = currState->get(); + ReallocPairsTy prevMap = prevState->get(); - for (ReallocMap::iterator I = prevMap.begin(), E = prevMap.end(); + for (ReallocPairsTy::iterator I = prevMap.begin(), E = prevMap.end(); I != E; ++I) { SymbolRef sym = I.getKey(); if (!currMap.lookup(sym)) diff --git a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp index 6292a47..fb40f22 100644 --- a/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp @@ -146,9 +146,9 @@ static bool typesCompatible(ASTContext &C, QualType A, QualType B) { if (const PointerType *ptrA = A->getAs()) if (const PointerType *ptrB = B->getAs()) { - A = ptrA->getPointeeType(); - B = ptrB->getPointeeType(); - continue; + A = ptrA->getPointeeType(); + B = ptrB->getPointeeType(); + continue; } break; @@ -157,6 +157,18 @@ static bool typesCompatible(ASTContext &C, QualType A, QualType B) { return false; } +static bool compatibleWithArrayType(ASTContext &C, QualType PT, QualType T) { + // Ex: 'int a[10][2]' is compatible with 'int', 'int[2]', 'int[10][2]'. + while (const ArrayType *AT = T->getAsArrayTypeUnsafe()) { + QualType ElemType = AT->getElementType(); + if (typesCompatible(C, PT, AT->getElementType())) + return true; + T = ElemType; + } + + return false; +} + class MallocSizeofChecker : public Checker { public: void checkASTCodeBody(const Decl *D, AnalysisManager& mgr, @@ -184,38 +196,49 @@ public: continue; QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument(); - if (!typesCompatible(BR.getContext(), PointeeType, SizeofType)) { - const TypeSourceInfo *TSI = 0; - if (i->CastedExprParent.is()) { - TSI = + + if (typesCompatible(BR.getContext(), PointeeType, SizeofType)) + continue; + + // If the argument to sizeof is an array, the result could be a + // pointer to any array element. + if (compatibleWithArrayType(BR.getContext(), PointeeType, SizeofType)) + continue; + + const TypeSourceInfo *TSI = 0; + if (i->CastedExprParent.is()) { + TSI = i->CastedExprParent.get()->getTypeSourceInfo(); - } else { - TSI = i->ExplicitCastType; - } - - SmallString<64> buf; - llvm::raw_svector_ostream OS(buf); - - OS << "Result of '" - << i->AllocCall->getDirectCallee()->getIdentifier()->getName() - << "' is converted to a pointer of type '" - << PointeeType.getAsString() << "', which is incompatible with " - << "sizeof operand type '" << SizeofType.getAsString() << "'"; - llvm::SmallVector Ranges; - Ranges.push_back(i->AllocCall->getCallee()->getSourceRange()); - Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange()); - if (TSI) - Ranges.push_back(TSI->getTypeLoc().getSourceRange()); - - PathDiagnosticLocation L = + } else { + TSI = i->ExplicitCastType; + } + + SmallString<64> buf; + llvm::raw_svector_ostream OS(buf); + + OS << "Result of "; + const FunctionDecl *Callee = i->AllocCall->getDirectCallee(); + if (Callee && Callee->getIdentifier()) + OS << '\'' << Callee->getIdentifier()->getName() << '\''; + else + OS << "call"; + OS << " is converted to a pointer of type '" + << PointeeType.getAsString() << "', which is incompatible with " + << "sizeof operand type '" << SizeofType.getAsString() << "'"; + llvm::SmallVector Ranges; + Ranges.push_back(i->AllocCall->getCallee()->getSourceRange()); + Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange()); + if (TSI) + Ranges.push_back(TSI->getTypeLoc().getSourceRange()); + + PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(), - BR.getSourceManager(), ADC); + BR.getSourceManager(), ADC); - BR.EmitBasicReport(D, "Allocator sizeof operand mismatch", - categories::UnixAPI, - OS.str(), - L, Ranges.data(), Ranges.size()); - } + BR.EmitBasicReport(D, "Allocator sizeof operand mismatch", + categories::UnixAPI, + OS.str(), + L, Ranges.data(), Ranges.size()); } } } diff --git a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp index aad3b0f..3331bc8 100644 --- a/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp @@ -71,7 +71,7 @@ void NSAutoreleasePoolChecker::checkPreObjCMessage(const ObjCMethodCall &msg, BugReport *Report = new BugReport(*BT, "Use -drain instead of -release when " "using NSAutoreleasePool and garbage collection", N); Report->addRange(msg.getSourceRange()); - C.EmitReport(Report); + C.emitReport(Report); } void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) { diff --git a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp index f826573..7a66ec3 100644 --- a/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp @@ -163,23 +163,9 @@ public: }; } -namespace { struct NSErrorOut {}; } -namespace { struct CFErrorOut {}; } - typedef llvm::ImmutableMap ErrorOutFlag; - -namespace clang { -namespace ento { - template <> - struct ProgramStateTrait : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index = 0; return &index; } - }; - template <> - struct ProgramStateTrait : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index = 0; return &index; } - }; -} -} +REGISTER_TRAIT_WITH_PROGRAMSTATE(NSErrorOut, ErrorOutFlag) +REGISTER_TRAIT_WITH_PROGRAMSTATE(CFErrorOut, ErrorOutFlag) template static bool hasFlag(SVal val, ProgramStateRef state) { @@ -285,7 +271,7 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const { bug = new CFErrorDerefBug(); BugReport *report = new BugReport(*bug, os.str(), event.SinkNode); - BR.EmitReport(report); + BR.emitReport(report); } static bool IsNSError(QualType T, IdentifierInfo *II) { diff --git a/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp b/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp deleted file mode 100644 index 7b724d2..0000000 --- a/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp +++ /dev/null @@ -1,218 +0,0 @@ -//=== OSAtomicChecker.cpp - OSAtomic functions evaluator --------*- C++ -*-===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This checker evaluates OSAtomic functions. -// -//===----------------------------------------------------------------------===// - -#include "ClangSACheckers.h" -#include "clang/StaticAnalyzer/Core/Checker.h" -#include "clang/StaticAnalyzer/Core/CheckerManager.h" -#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" -#include "clang/Basic/Builtins.h" - -using namespace clang; -using namespace ento; - -namespace { - -class OSAtomicChecker : public Checker { -public: - bool inlineCall(const CallExpr *CE, ExprEngine &Eng, - ExplodedNode *Pred, ExplodedNodeSet &Dst) const; - -private: - bool evalOSAtomicCompareAndSwap(const CallExpr *CE, - ExprEngine &Eng, - ExplodedNode *Pred, - ExplodedNodeSet &Dst) const; -}; -} - -static StringRef getCalleeName(ProgramStateRef State, - const CallExpr *CE, - const LocationContext *LCtx) { - const Expr *Callee = CE->getCallee(); - SVal L = State->getSVal(Callee, LCtx); - const FunctionDecl *funDecl = L.getAsFunctionDecl(); - if (!funDecl) - return StringRef(); - IdentifierInfo *funI = funDecl->getIdentifier(); - if (!funI) - return StringRef(); - return funI->getName(); -} - -bool OSAtomicChecker::inlineCall(const CallExpr *CE, - ExprEngine &Eng, - ExplodedNode *Pred, - ExplodedNodeSet &Dst) const { - StringRef FName = getCalleeName(Pred->getState(), - CE, Pred->getLocationContext()); - if (FName.empty()) - return false; - - // Check for compare and swap. - if (FName.startswith("OSAtomicCompareAndSwap") || - FName.startswith("objc_atomicCompareAndSwap")) - return evalOSAtomicCompareAndSwap(CE, Eng, Pred, Dst); - - // FIXME: Other atomics. - return false; -} - -bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE, - ExprEngine &Eng, - ExplodedNode *Pred, - ExplodedNodeSet &Dst) const { - // Not enough arguments to match OSAtomicCompareAndSwap? - if (CE->getNumArgs() != 3) - return false; - - ASTContext &Ctx = Eng.getContext(); - const Expr *oldValueExpr = CE->getArg(0); - QualType oldValueType = Ctx.getCanonicalType(oldValueExpr->getType()); - - const Expr *newValueExpr = CE->getArg(1); - QualType newValueType = Ctx.getCanonicalType(newValueExpr->getType()); - - // Do the types of 'oldValue' and 'newValue' match? - if (oldValueType != newValueType) - return false; - - const Expr *theValueExpr = CE->getArg(2); - const PointerType *theValueType=theValueExpr->getType()->getAs(); - - // theValueType not a pointer? - if (!theValueType) - return false; - - QualType theValueTypePointee = - Ctx.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType(); - - // The pointee must match newValueType and oldValueType. - if (theValueTypePointee != newValueType) - return false; - - static SimpleProgramPointTag OSAtomicLoadTag("OSAtomicChecker : Load"); - static SimpleProgramPointTag OSAtomicStoreTag("OSAtomicChecker : Store"); - - // Load 'theValue'. - ProgramStateRef state = Pred->getState(); - const LocationContext *LCtx = Pred->getLocationContext(); - ExplodedNodeSet Tmp; - SVal location = state->getSVal(theValueExpr, LCtx); - // Here we should use the value type of the region as the load type, because - // we are simulating the semantics of the function, not the semantics of - // passing argument. So the type of theValue expr is not we are loading. - // But usually the type of the varregion is not the type we want either, - // we still need to do a CastRetrievedVal in store manager. So actually this - // LoadTy specifying can be omitted. But we put it here to emphasize the - // semantics. - QualType LoadTy; - if (const TypedValueRegion *TR = - dyn_cast_or_null(location.getAsRegion())) { - LoadTy = TR->getValueType(); - } - Eng.evalLoad(Tmp, CE, theValueExpr, Pred, - state, location, &OSAtomicLoadTag, LoadTy); - - if (Tmp.empty()) { - // If no nodes were generated, other checkers must have generated sinks. - // We return an empty Dst. - return true; - } - - for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); - I != E; ++I) { - - ExplodedNode *N = *I; - ProgramStateRef stateLoad = N->getState(); - - // Use direct bindings from the environment since we are forcing a load - // from a location that the Environment would typically not be used - // to bind a value. - SVal theValueVal_untested = stateLoad->getSVal(theValueExpr, LCtx, true); - - SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr, LCtx); - - // FIXME: Issue an error. - if (theValueVal_untested.isUndef() || oldValueVal_untested.isUndef()) { - return false; - } - - DefinedOrUnknownSVal theValueVal = - cast(theValueVal_untested); - DefinedOrUnknownSVal oldValueVal = - cast(oldValueVal_untested); - - SValBuilder &svalBuilder = Eng.getSValBuilder(); - - // Perform the comparison. - DefinedOrUnknownSVal Cmp = - svalBuilder.evalEQ(stateLoad,theValueVal,oldValueVal); - - ProgramStateRef stateEqual = stateLoad->assume(Cmp, true); - - // Were they equal? - if (stateEqual) { - // Perform the store. - ExplodedNodeSet TmpStore; - SVal val = stateEqual->getSVal(newValueExpr, LCtx); - - // Handle implicit value casts. - if (const TypedValueRegion *R = - dyn_cast_or_null(location.getAsRegion())) { - val = svalBuilder.evalCast(val,R->getValueType(), newValueExpr->getType()); - } - - Eng.evalStore(TmpStore, CE, theValueExpr, N, - stateEqual, location, val, &OSAtomicStoreTag); - - if (TmpStore.empty()) { - // If no nodes were generated, other checkers must have generated sinks. - // We return an empty Dst. - return true; - } - - StmtNodeBuilder B(TmpStore, Dst, Eng.getBuilderContext()); - // Now bind the result of the comparison. - for (ExplodedNodeSet::iterator I2 = TmpStore.begin(), - E2 = TmpStore.end(); I2 != E2; ++I2) { - ExplodedNode *predNew = *I2; - ProgramStateRef stateNew = predNew->getState(); - // Check for 'void' return type if we have a bogus function prototype. - SVal Res = UnknownVal(); - QualType T = CE->getType(); - if (!T->isVoidType()) - Res = Eng.getSValBuilder().makeTruthVal(true, T); - B.generateNode(CE, predNew, stateNew->BindExpr(CE, LCtx, Res), - false, this); - } - } - - // Were they not equal? - if (ProgramStateRef stateNotEqual = stateLoad->assume(Cmp, false)) { - // Check for 'void' return type if we have a bogus function prototype. - SVal Res = UnknownVal(); - QualType T = CE->getType(); - if (!T->isVoidType()) - Res = Eng.getSValBuilder().makeTruthVal(false, CE->getType()); - StmtNodeBuilder B(N, Dst, Eng.getBuilderContext()); - B.generateNode(CE, N, stateNotEqual->BindExpr(CE, LCtx, Res), - false, this); - } - } - - return true; -} - -void ento::registerOSAtomicChecker(CheckerManager &mgr) { - mgr.registerChecker(); -} diff --git a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp index 4cc92ce..9d84f52 100644 --- a/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp @@ -18,7 +18,6 @@ #include "clang/StaticAnalyzer/Core/CheckerManager.h" #include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" #include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" -#include "clang/StaticAnalyzer/Checkers/DereferenceChecker.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" using namespace clang; @@ -50,8 +49,8 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S, "for @synchronized")); BugReport *report = new BugReport(*BT_undef, BT_undef->getDescription(), N); - bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report); - C.EmitReport(report); + bugreporter::trackNullOrUndefValue(N, Ex, *report); + C.emitReport(report); } return; } @@ -73,9 +72,9 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S, "(no synchronization will occur)")); BugReport *report = new BugReport(*BT_null, BT_null->getDescription(), N); - bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report); + bugreporter::trackNullOrUndefValue(N, Ex, *report); - C.EmitReport(report); + C.emitReport(report); return; } } diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp index f2929c0..63a8480 100644 --- a/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp @@ -31,8 +31,6 @@ class WalkAST : public StmtVisitor { ASTContext &ASTC; uint64_t PtrWidth; - static const unsigned InvalidArgIndex = UINT_MAX; - /// Check if the type has pointer size (very conservative). inline bool isPointerSize(const Type *T) { if (!T) @@ -102,16 +100,18 @@ void WalkAST::VisitCallExpr(CallExpr *CE) { return; const Expr *Arg = 0; - unsigned ArgNum = InvalidArgIndex; + unsigned ArgNum; if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) { + if (CE->getNumArgs() != 4) + return; ArgNum = 1; Arg = CE->getArg(ArgNum)->IgnoreParenCasts(); if (hasPointerToPointerSizedType(Arg)) return; - } - - if (Arg == 0 && Name.equals("CFDictionaryCreate")) { + } else if (Name.equals("CFDictionaryCreate")) { + if (CE->getNumArgs() != 6) + return; // Check first argument. ArgNum = 1; Arg = CE->getArg(ArgNum)->IgnoreParenCasts(); @@ -125,17 +125,18 @@ void WalkAST::VisitCallExpr(CallExpr *CE) { } } - if (ArgNum != InvalidArgIndex) { + if (Arg) { assert(ArgNum == 1 || ArgNum == 2); - SmallString<256> BufName; + SmallString<64> BufName; llvm::raw_svector_ostream OsName(BufName); - assert(ArgNum == 1 || ArgNum == 2); OsName << " Invalid use of '" << Name << "'" ; SmallString<256> Buf; llvm::raw_svector_ostream Os(Buf); - Os << " The "<< ((ArgNum == 1) ? "first" : "second") << " argument to '" + // Use "second" and "third" since users will expect 1-based indexing + // for parameter names when mentioned in prose. + Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '" << Name << "' must be a C array of pointer-sized values, not '" << Arg->getType().getAsString() << "'"; diff --git a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp index 2ab49ed..999c994 100644 --- a/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp @@ -55,16 +55,8 @@ public: }; } // end anonymous namespace -// ProgramState trait - a map from array symbol to it's state. -typedef llvm::ImmutableMap ArraySizeM; - -namespace { struct ArraySizeMap {}; } -namespace clang { namespace ento { -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { return ObjCContainersChecker::getTag(); } -}; -}} +// ProgramState trait - a map from array symbol to its state. +REGISTER_MAP_WITH_PROGRAMSTATE(ArraySizeMap, SymbolRef, DefinedSVal) void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size, CheckerContext &C) const { @@ -146,7 +138,7 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE, initBugType(); BugReport *R = new BugReport(*BT, "Index is out of bounds", N); R->addRange(IdxExpr->getSourceRange()); - C.EmitReport(R); + C.emitReport(R); return; } } diff --git a/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp new file mode 100644 index 0000000..e906e8a --- /dev/null +++ b/lib/StaticAnalyzer/Checkers/ObjCMissingSuperCallChecker.cpp @@ -0,0 +1,203 @@ +//==- ObjCMissingSuperCallChecker.cpp - Check missing super-calls in ObjC --==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines a ObjCMissingSuperCallChecker, a checker that +// analyzes a UIViewController implementation to determine if it +// correctly calls super in the methods where this is mandatory. +// +//===----------------------------------------------------------------------===// + +#include "ClangSACheckers.h" +#include "clang/StaticAnalyzer/Core/Checker.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h" +#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" +#include "clang/AST/ExprObjC.h" +#include "clang/AST/Expr.h" +#include "clang/AST/DeclObjC.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace ento; + +static bool isUIViewControllerSubclass(ASTContext &Ctx, + const ObjCImplementationDecl *D) { + IdentifierInfo *ViewControllerII = &Ctx.Idents.get("UIViewController"); + const ObjCInterfaceDecl *ID = D->getClassInterface(); + + for ( ; ID; ID = ID->getSuperClass()) + if (ID->getIdentifier() == ViewControllerII) + return true; + return false; +} + +//===----------------------------------------------------------------------===// +// FindSuperCallVisitor - Identify specific calls to the superclass. +//===----------------------------------------------------------------------===// + +class FindSuperCallVisitor : public RecursiveASTVisitor { +public: + explicit FindSuperCallVisitor(Selector S) : DoesCallSuper(false), Sel(S) {} + + bool VisitObjCMessageExpr(ObjCMessageExpr *E) { + if (E->getSelector() == Sel) + if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance) + DoesCallSuper = true; + + // Recurse if we didn't find the super call yet. + return !DoesCallSuper; + } + + bool DoesCallSuper; + +private: + Selector Sel; +}; + +//===----------------------------------------------------------------------===// +// ObjCSuperCallChecker +//===----------------------------------------------------------------------===// + +namespace { +class ObjCSuperCallChecker : public Checker< + check::ASTDecl > { +public: + void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager &Mgr, + BugReporter &BR) const; +}; +} + +void ObjCSuperCallChecker::checkASTDecl(const ObjCImplementationDecl *D, + AnalysisManager &Mgr, + BugReporter &BR) const { + ASTContext &Ctx = BR.getContext(); + + if (!isUIViewControllerSubclass(Ctx, D)) + return; + + const char *SelectorNames[] = + {"addChildViewController", "viewDidAppear", "viewDidDisappear", + "viewWillAppear", "viewWillDisappear", "removeFromParentViewController", + "didReceiveMemoryWarning", "viewDidUnload", "viewWillUnload", + "viewDidLoad"}; + const unsigned SelectorArgumentCounts[] = + {1, 1, 1, 1, 1, 0, 0, 0, 0, 0}; + const size_t SelectorCount = llvm::array_lengthof(SelectorNames); + assert(llvm::array_lengthof(SelectorArgumentCounts) == SelectorCount); + + // Fill the Selectors SmallSet with all selectors we want to check. + llvm::SmallSet Selectors; + for (size_t i = 0; i < SelectorCount; i++) { + unsigned ArgumentCount = SelectorArgumentCounts[i]; + const char *SelectorCString = SelectorNames[i]; + + // Get the selector. + IdentifierInfo *II = &Ctx.Idents.get(SelectorCString); + Selectors.insert(Ctx.Selectors.getSelector(ArgumentCount, &II)); + } + + // Iterate over all instance methods. + for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(), + E = D->instmeth_end(); + I != E; ++I) { + Selector S = (*I)->getSelector(); + // Find out whether this is a selector that we want to check. + if (!Selectors.count(S)) + continue; + + ObjCMethodDecl *MD = *I; + + // Check if the method calls its superclass implementation. + if (MD->getBody()) + { + FindSuperCallVisitor Visitor(S); + Visitor.TraverseDecl(MD); + + // It doesn't call super, emit a diagnostic. + if (!Visitor.DoesCallSuper) { + PathDiagnosticLocation DLoc = + PathDiagnosticLocation::createEnd(MD->getBody(), + BR.getSourceManager(), + Mgr.getAnalysisDeclContext(D)); + + const char *Name = "Missing call to superclass"; + SmallString<256> Buf; + llvm::raw_svector_ostream os(Buf); + + os << "The '" << S.getAsString() + << "' instance method in UIViewController subclass '" << *D + << "' is missing a [super " << S.getAsString() << "] call"; + + BR.EmitBasicReport(MD, Name, categories::CoreFoundationObjectiveC, + os.str(), DLoc); + } + } + } +} + + +//===----------------------------------------------------------------------===// +// Check registration. +//===----------------------------------------------------------------------===// + +void ento::registerObjCSuperCallChecker(CheckerManager &Mgr) { + Mgr.registerChecker(); +} + + +/* + ToDo list for expanding this check in the future, the list is not exhaustive. + There are also cases where calling super is suggested but not "mandatory". + In addition to be able to check the classes and methods below, architectural + improvements like being able to allow for the super-call to be done in a called + method would be good too. + +*** trivial cases: +UIResponder subclasses +- resignFirstResponder + +NSResponder subclasses +- cursorUpdate + +*** more difficult cases: + +UIDocument subclasses +- finishedHandlingError:recovered: (is multi-arg) +- finishedHandlingError:recovered: (is multi-arg) + +UIViewController subclasses +- loadView (should *never* call super) +- transitionFromViewController:toViewController: + duration:options:animations:completion: (is multi-arg) + +UICollectionViewController subclasses +- loadView (take care because UIViewController subclasses should NOT call super + in loadView, but UICollectionViewController subclasses should) + +NSObject subclasses +- doesNotRecognizeSelector (it only has to call super if it doesn't throw) + +UIPopoverBackgroundView subclasses (some of those are class methods) +- arrowDirection (should *never* call super) +- arrowOffset (should *never* call super) +- arrowBase (should *never* call super) +- arrowHeight (should *never* call super) +- contentViewInsets (should *never* call super) + +UITextSelectionRect subclasses (some of those are properties) +- rect (should *never* call super) +- range (should *never* call super) +- writingDirection (should *never* call super) +- isVertical (should *never* call super) +- containsStart (should *never* call super) +- containsEnd (should *never* call super) +*/ diff --git a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp index be45da1..98d2a85a 100644 --- a/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp @@ -72,6 +72,8 @@ public: void checkPreCall(const CallEvent &CE, CheckerContext &C) const; void checkPostCall(const CallEvent &CE, CheckerContext &C) const; + void printState(raw_ostream &Out, ProgramStateRef State, + const char *NL, const char *Sep) const; }; } // end anonymous namespace @@ -97,31 +99,14 @@ enum SelfFlagEnum { }; } -typedef llvm::ImmutableMap SelfFlag; -namespace { struct CalledInit {}; } -namespace { struct PreCallSelfFlags {}; } - -namespace clang { -namespace ento { - template<> - struct ProgramStateTrait : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index = 0; return &index; } - }; - template <> - struct ProgramStateTrait : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index = 0; return &index; } - }; - - /// \brief A call receiving a reference to 'self' invalidates the object that - /// 'self' contains. This keeps the "self flags" assigned to the 'self' - /// object before the call so we can assign them to the new object that 'self' - /// points to after the call. - template <> - struct ProgramStateTrait : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index = 0; return &index; } - }; -} -} +REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned) +REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool) + +/// \brief A call receiving a reference to 'self' invalidates the object that +/// 'self' contains. This keeps the "self flags" assigned to the 'self' +/// object before the call so we can assign them to the new object that 'self' +/// points to after the call. +REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, unsigned) static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) { if (SymbolRef sym = val.getAsSymbol()) @@ -138,7 +123,8 @@ static void addSelfFlag(ProgramStateRef state, SVal val, SelfFlagEnum flag, CheckerContext &C) { // We tag the symbol that the SVal wraps. if (SymbolRef sym = val.getAsSymbol()) - C.addTransition(state->set(sym, getSelfFlags(val, C) | flag)); + state = state->set(sym, getSelfFlags(val, state) | flag); + C.addTransition(state); } static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) { @@ -176,7 +162,7 @@ static void checkForInvalidSelf(const Expr *E, CheckerContext &C, BugReport *report = new BugReport(*new InitSelfBug(), errorStr, N); - C.EmitReport(report); + C.emitReport(report); } void ObjCSelfInitChecker::checkPostObjCMessage(const ObjCMethodCall &Msg, @@ -305,13 +291,12 @@ void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE, // returns 'self'. So assign the flags, which were set on 'self' to the // return value. // EX: self = performMoreInitialization(self) - const Expr *CallExpr = CE.getOriginExpr(); - if (CallExpr) - addSelfFlag(state, state->getSVal(CallExpr, C.getLocationContext()), - prevFlags, C); + addSelfFlag(state, CE.getReturnValue(), prevFlags, C); return; } } + + C.addTransition(state); } void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad, @@ -346,6 +331,53 @@ void ObjCSelfInitChecker::checkBind(SVal loc, SVal val, const Stmt *S, } } +void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State, + const char *NL, const char *Sep) const { + SelfFlagTy FlagMap = State->get(); + bool DidCallInit = State->get(); + SelfFlagEnum PreCallFlags = (SelfFlagEnum)State->get(); + + if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags) + return; + + Out << Sep << NL << "ObjCSelfInitChecker:" << NL; + + if (DidCallInit) + Out << " An init method has been called." << NL; + + if (PreCallFlags != SelfFlag_None) { + if (PreCallFlags & SelfFlag_Self) { + Out << " An argument of the current call came from the 'self' variable." + << NL; + } + if (PreCallFlags & SelfFlag_InitRes) { + Out << " An argument of the current call came from an init method." + << NL; + } + } + + Out << NL; + for (SelfFlagTy::iterator I = FlagMap.begin(), E = FlagMap.end(); + I != E; ++I) { + Out << I->first << " : "; + + if (I->second == SelfFlag_None) + Out << "none"; + + if (I->second & SelfFlag_Self) + Out << "self variable"; + + if (I->second & SelfFlag_InitRes) { + if (I->second != SelfFlag_InitRes) + Out << " | "; + Out << "result of init method"; + } + + Out << NL; + } +} + + // FIXME: A callback should disable checkers at the start of functions. static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) { if (!ND) diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp index fe4845b..b5d9959 100644 --- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp @@ -59,7 +59,7 @@ void PointerArithChecker::checkPreStmt(const BinaryOperator *B, "dangerous.")); BugReport *R = new BugReport(*BT, BT->getDescription(), N); R->addRange(B->getSourceRange()); - C.EmitReport(R); + C.emitReport(R); } } } diff --git a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp index fa5c6a3..47da87f 100644 --- a/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp @@ -67,7 +67,7 @@ void PointerSubChecker::checkPreStmt(const BinaryOperator *B, "the same memory chunk may cause incorrect result.")); BugReport *R = new BugReport(*BT, BT->getDescription(), N); R->addRange(B->getSourceRange()); - C.EmitReport(R); + C.emitReport(R); } } diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp index 2d018ef..d9b6384 100644 --- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp @@ -43,15 +43,7 @@ public: } // end anonymous namespace // GDM Entry for tracking lock state. -namespace { class LockSet {}; } -namespace clang { -namespace ento { -template <> struct ProgramStateTrait : - public ProgramStatePartialTrait > { - static void *GDMIndex() { static int x = 0; return &x; } -}; -} // end of ento (ProgramState) namespace -} // end clang namespace +REGISTER_LIST_WITH_PROGRAMSTATE(LockSet, const MemRegion *) void PthreadLockChecker::checkPostStmt(const CallExpr *CE, @@ -118,7 +110,7 @@ void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE, "This lock has already " "been acquired", N); report->addRange(CE->getArg(0)->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); return; } @@ -163,7 +155,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE, return; ProgramStateRef state = C.getState(); - llvm::ImmutableList LS = state->get(); + LockSetTy LS = state->get(); // FIXME: Better analysis requires IPA for wrappers. // FIXME: check for double unlocks @@ -183,7 +175,7 @@ void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE, "Possible lock order " "reversal", N); report->addRange(CE->getArg(0)->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); return; } diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp index 3c00d99..304051c 100644 --- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp @@ -40,24 +40,6 @@ using namespace clang; using namespace ento; using llvm::StrInStrNoCase; -namespace { -/// Wrapper around different kinds of node builder, so that helper functions -/// can have a common interface. -class GenericNodeBuilderRefCount { - CheckerContext *C; - const ProgramPointTag *tag; -public: - GenericNodeBuilderRefCount(CheckerContext &c, - const ProgramPointTag *t = 0) - : C(&c), tag(t){} - - ExplodedNode *MakeNode(ProgramStateRef state, ExplodedNode *Pred, - bool MarkAsSink = false) { - return C->addTransition(state, Pred, tag, MarkAsSink); - } -}; -} // end anonymous namespace - //===----------------------------------------------------------------------===// // Primitives used for constructing summaries for function/method calls. //===----------------------------------------------------------------------===// @@ -66,9 +48,23 @@ public: /// particular argument. enum ArgEffect { DoNothing, Autorelease, Dealloc, DecRef, DecRefMsg, DecRefBridgedTransfered, - DecRefAndStopTracking, DecRefMsgAndStopTracking, IncRefMsg, IncRef, MakeCollectable, MayEscape, - NewAutoreleasePool, StopTracking }; + NewAutoreleasePool, + + // Stop tracking the argument - the effect of the call is + // unknown. + StopTracking, + + // In some cases, we obtain a better summary for this checker + // by looking at the call site than by inlining the function. + // Signifies that we should stop tracking the symbol even if + // the function is inlined. + StopTrackingHard, + + // The function decrements the reference count and the checker + // should stop tracking the argument. + DecRefAndStopTrackingHard, DecRefMsgAndStopTrackingHard + }; namespace llvm { template <> struct FoldingSetTrait { @@ -90,7 +86,13 @@ class RetEffect { public: enum Kind { NoRet, OwnedSymbol, OwnedAllocatedSymbol, NotOwnedSymbol, GCNotOwnedSymbol, ARCNotOwnedSymbol, - OwnedWhenTrackedReceiver }; + OwnedWhenTrackedReceiver, + // Treat this function as returning a non-tracked symbol even if + // the function has been inlined. This is used where the call + // site summary is more presise than the summary indirectly produced + // by inlining the function + NoRetHard + }; enum ObjKind { CF, ObjC, AnyObj }; @@ -133,6 +135,9 @@ public: static RetEffect MakeNoRet() { return RetEffect(NoRet); } + static RetEffect MakeNoRetHard() { + return RetEffect(NoRetHard); + } void Profile(llvm::FoldingSetNodeID& ID) const { ID.AddInteger((unsigned) K); @@ -337,20 +342,7 @@ void RefVal::print(raw_ostream &Out) const { // RefBindings - State used to track object reference counts. //===----------------------------------------------------------------------===// -typedef llvm::ImmutableMap RefBindings; - -namespace clang { -namespace ento { -template<> -struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { - static int RefBIndex = 0; - return &RefBIndex; - } -}; -} -} +REGISTER_MAP_WITH_PROGRAMSTATE(RefBindings, SymbolRef, RefVal) static inline const RefVal *getRefBinding(ProgramStateRef State, SymbolRef Sym) { @@ -893,7 +885,7 @@ static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) { return FName.find("MakeCollectable") != StringRef::npos; } -static ArgEffect getStopTrackingEquivalent(ArgEffect E) { +static ArgEffect getStopTrackingHardEquivalent(ArgEffect E) { switch (E) { case DoNothing: case Autorelease: @@ -904,13 +896,14 @@ static ArgEffect getStopTrackingEquivalent(ArgEffect E) { case MayEscape: case NewAutoreleasePool: case StopTracking: - return StopTracking; + case StopTrackingHard: + return StopTrackingHard; case DecRef: - case DecRefAndStopTracking: - return DecRefAndStopTracking; + case DecRefAndStopTrackingHard: + return DecRefAndStopTrackingHard; case DecRefMsg: - case DecRefMsgAndStopTracking: - return DecRefMsgAndStopTracking; + case DecRefMsgAndStopTrackingHard: + return DecRefMsgAndStopTrackingHard; case Dealloc: return Dealloc; } @@ -921,33 +914,65 @@ static ArgEffect getStopTrackingEquivalent(ArgEffect E) { void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S, const CallEvent &Call) { if (Call.hasNonZeroCallbackArg()) { - ArgEffect RecEffect = getStopTrackingEquivalent(S->getReceiverEffect()); - ArgEffect DefEffect = getStopTrackingEquivalent(S->getDefaultArgEffect()); + ArgEffect RecEffect = + getStopTrackingHardEquivalent(S->getReceiverEffect()); + ArgEffect DefEffect = + getStopTrackingHardEquivalent(S->getDefaultArgEffect()); ArgEffects CustomArgEffects = S->getArgEffects(); for (ArgEffects::iterator I = CustomArgEffects.begin(), E = CustomArgEffects.end(); I != E; ++I) { - ArgEffect Translated = getStopTrackingEquivalent(I->second); + ArgEffect Translated = getStopTrackingHardEquivalent(I->second); if (Translated != DefEffect) ScratchArgs = AF.add(ScratchArgs, I->first, Translated); } - RetEffect RE = RetEffect::MakeNoRet(); + RetEffect RE = RetEffect::MakeNoRetHard(); // Special cases where the callback argument CANNOT free the return value. // This can generally only happen if we know that the callback will only be // called when the return value is already being deallocated. if (const FunctionCall *FC = dyn_cast(&Call)) { - IdentifierInfo *Name = FC->getDecl()->getIdentifier(); - - // This callback frees the associated buffer. - if (Name->isStr("CGBitmapContextCreateWithData")) - RE = S->getRetEffect(); + if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) { + // When the CGBitmapContext is deallocated, the callback here will free + // the associated data buffer. + if (Name->isStr("CGBitmapContextCreateWithData")) + RE = S->getRetEffect(); + } } S = getPersistentSummary(RE, RecEffect, DefEffect); } + + // Special case '[super init];' and '[self init];' + // + // Even though calling '[super init]' without assigning the result to self + // and checking if the parent returns 'nil' is a bad pattern, it is common. + // Additionally, our Self Init checker already warns about it. To avoid + // overwhelming the user with messages from both checkers, we model the case + // of '[super init]' in cases when it is not consumed by another expression + // as if the call preserves the value of 'self'; essentially, assuming it can + // never fail and return 'nil'. + // Note, we don't want to just stop tracking the value since we want the + // RetainCount checker to report leaks and use-after-free if SelfInit checker + // is turned off. + if (const ObjCMethodCall *MC = dyn_cast(&Call)) { + if (MC->getMethodFamily() == OMF_init && MC->isReceiverSelfOrSuper()) { + + // Check if the message is not consumed, we know it will not be used in + // an assignment, ex: "self = [super init]". + const Expr *ME = MC->getOriginExpr(); + const LocationContext *LCtx = MC->getLocationContext(); + ParentMap &PM = LCtx->getAnalysisDeclContext()->getParentMap(); + if (!PM.isConsumedExpr(ME)) { + RetainSummaryTemplate ModifiableSummaryTemplate(S, *this); + ModifiableSummaryTemplate->setReceiverEffect(DoNothing); + ModifiableSummaryTemplate->setRetEffect(RetEffect::MakeNoRet()); + } + } + + } } const RetainSummary * @@ -1036,6 +1061,8 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) { // The headers on OS X 10.8 use cf_consumed/ns_returns_retained, // but we can fully model NSMakeCollectable ourselves. AllowAnnotations = false; + } else if (FName == "CFPlugInInstanceCreate") { + S = getPersistentSummary(RetEffect::MakeNoRet()); } else if (FName == "IOBSDNameMatching" || FName == "IOServiceMatching" || FName == "IOServiceNameMatching" || @@ -1108,6 +1135,11 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) { break; if (RetTy->isPointerType()) { + if (FD->getAttr()) { + S = getCFCreateGetRuleSummary(FD); + break; + } + // For CoreFoundation ('CF') types. if (cocoa::isRefType(RetTy, "CF", FName)) { if (isRetain(FD, FName)) @@ -1347,22 +1379,6 @@ RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ, const RetainSummary * RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD, Selector S, QualType RetTy) { - - if (MD) { - // Scan the method decl for 'void*' arguments. These should be treated - // as 'StopTracking' because they are often used with delegates. - // Delegates are a frequent form of false positives with the retain - // count checker. - unsigned i = 0; - for (ObjCMethodDecl::param_const_iterator I = MD->param_begin(), - E = MD->param_end(); I != E; ++I, ++i) - if (const ParmVarDecl *PD = *I) { - QualType Ty = Ctx.getCanonicalType(PD->getType()); - if (Ty.getLocalUnqualifiedType() == Ctx.VoidPtrTy) - ScratchArgs = AF.add(ScratchArgs, i, StopTracking); - } - } - // Any special effects? ArgEffect ReceiverEff = DoNothing; RetEffect ResultEff = RetEffect::MakeNoRet(); @@ -1441,9 +1457,9 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD, StringRef Slot = S.getNameForSlot(i); if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) { if (ResultEff == ObjCInitRetE) - ResultEff = RetEffect::MakeNoRet(); + ResultEff = RetEffect::MakeNoRetHard(); else - ReceiverEff = StopTracking; + ReceiverEff = StopTrackingHard; } } } @@ -2174,6 +2190,7 @@ GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N, // If allocation happened in a function different from the leak node context, // do not report the binding. + assert(N && "Could not find allocation node"); if (N->getLocationContext() != LeakContext) { FirstBinding = 0; } @@ -2229,27 +2246,36 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC, // Get the retain count. const RefVal* RV = getRefBinding(EndN->getState(), Sym); + assert(RV); if (RV->getKind() == RefVal::ErrorLeakReturned) { // FIXME: Per comments in rdar://6320065, "create" only applies to CF // objects. Only "copy", "alloc", "retain" and "new" transfer ownership // to the caller for NS objects. const Decl *D = &EndN->getCodeDecl(); - if (const ObjCMethodDecl *MD = dyn_cast(D)) { - os << " is returned from a method whose name ('" - << MD->getSelector().getAsString() - << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'." - " This violates the naming convention rules" - " given in the Memory Management Guide for Cocoa"; - } + + os << (isa(D) ? " is returned from a method " + : " is returned from a function "); + + if (D->getAttr()) + os << "that is annotated as CF_RETURNS_NOT_RETAINED"; + else if (D->getAttr()) + os << "that is annotated as NS_RETURNS_NOT_RETAINED"; else { - const FunctionDecl *FD = cast(D); - os << " is returned from a function whose name ('" - << *FD - << "') does not contain 'Copy' or 'Create'. This violates the naming" - " convention rules given in the Memory Management Guide for Core" - " Foundation"; - } + if (const ObjCMethodDecl *MD = dyn_cast(D)) { + os << "whose name ('" << MD->getSelector().getAsString() + << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'." + " This violates the naming convention rules" + " given in the Memory Management Guide for Cocoa"; + } + else { + const FunctionDecl *FD = cast(D); + os << "whose name ('" << *FD + << "') does not contain 'Copy' or 'Create'. This violates the naming" + " convention rules given in the Memory Management Guide for Core" + " Foundation"; + } + } } else if (RV->getKind() == RefVal::ErrorGCLeakReturned) { ObjCMethodDecl &MD = cast(EndN->getCodeDecl()); @@ -2474,6 +2500,10 @@ public: void checkSummary(const RetainSummary &Summ, const CallEvent &Call, CheckerContext &C) const; + void processSummaryOfInlined(const RetainSummary &Summ, + const CallEvent &Call, + CheckerContext &C) const; + bool evalCall(const CallExpr *CE, CheckerContext &C) const; ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond, @@ -2499,8 +2529,8 @@ public: void checkEndPath(CheckerContext &C) const; ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym, - RefVal V, ArgEffect E, RefVal::Kind &hasErr, - CheckerContext &C) const; + RefVal V, ArgEffect E, RefVal::Kind &hasErr, + CheckerContext &C) const; void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange, RefVal::Kind ErrorKind, SymbolRef Sym, @@ -2515,13 +2545,12 @@ public: SmallVectorImpl &Leaked) const; std::pair - handleAutoreleaseCounts(ProgramStateRef state, - GenericNodeBuilderRefCount Bd, ExplodedNode *Pred, - CheckerContext &Ctx, SymbolRef Sym, RefVal V) const; + handleAutoreleaseCounts(ProgramStateRef state, ExplodedNode *Pred, + const ProgramPointTag *Tag, CheckerContext &Ctx, + SymbolRef Sym, RefVal V) const; ExplodedNode *processLeaks(ProgramStateRef state, SmallVectorImpl &Leaked, - GenericNodeBuilderRefCount &Builder, CheckerContext &Ctx, ExplodedNode *Pred = 0) const; }; @@ -2685,11 +2714,13 @@ void RetainCountChecker::checkPostStmt(const ObjCBoxedExpr *Ex, void RetainCountChecker::checkPostCall(const CallEvent &Call, CheckerContext &C) const { - if (C.wasInlined) - return; - RetainSummaryManager &Summaries = getSummaryManager(C); const RetainSummary *Summ = Summaries.getSummary(Call, C.getState()); + + if (C.wasInlined) { + processSummaryOfInlined(*Summ, Call, C); + return; + } checkSummary(*Summ, Call, C); } @@ -2721,6 +2752,45 @@ static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) { return RetTy; } +// We don't always get the exact modeling of the function with regards to the +// retain count checker even when the function is inlined. For example, we need +// to stop tracking the symbols which were marked with StopTrackingHard. +void RetainCountChecker::processSummaryOfInlined(const RetainSummary &Summ, + const CallEvent &CallOrMsg, + CheckerContext &C) const { + ProgramStateRef state = C.getState(); + + // Evaluate the effect of the arguments. + for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) { + if (Summ.getArg(idx) == StopTrackingHard) { + SVal V = CallOrMsg.getArgSVal(idx); + if (SymbolRef Sym = V.getAsLocSymbol()) { + state = removeRefBinding(state, Sym); + } + } + } + + // Evaluate the effect on the message receiver. + const ObjCMethodCall *MsgInvocation = dyn_cast(&CallOrMsg); + if (MsgInvocation) { + if (SymbolRef Sym = MsgInvocation->getReceiverSVal().getAsLocSymbol()) { + if (Summ.getReceiverEffect() == StopTrackingHard) { + state = removeRefBinding(state, Sym); + } + } + } + + // Consult the summary for the return value. + RetEffect RE = Summ.getRetEffect(); + if (RE.getKind() == RetEffect::NoRetHard) { + SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol(); + if (Sym) + state = removeRefBinding(state, Sym); + } + + C.addTransition(state); +} + void RetainCountChecker::checkSummary(const RetainSummary &Summ, const CallEvent &CallOrMsg, CheckerContext &C) const { @@ -2755,7 +2825,7 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ, if (const RefVal *T = getRefBinding(state, Sym)) { ReceiverIsTracked = true; state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(), - hasErr, C); + hasErr, C); if (hasErr) { ErrorRange = MsgInvocation->getOriginExpr()->getReceiverRange(); ErrorSym = Sym; @@ -2786,13 +2856,13 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ, llvm_unreachable("Unhandled RetEffect."); case RetEffect::NoRet: + case RetEffect::NoRetHard: // No work necessary. break; case RetEffect::OwnedAllocatedSymbol: case RetEffect::OwnedSymbol: { - SymbolRef Sym = state->getSVal(CallOrMsg.getOriginExpr(), - C.getLocationContext()).getAsSymbol(); + SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol(); if (!Sym) break; @@ -2811,10 +2881,10 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ, case RetEffect::ARCNotOwnedSymbol: case RetEffect::NotOwnedSymbol: { const Expr *Ex = CallOrMsg.getOriginExpr(); - SymbolRef Sym = state->getSVal(Ex, C.getLocationContext()).getAsSymbol(); + SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol(); if (!Sym) break; - + assert(Ex); // Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *. QualType ResultTy = GetReturnType(Ex, C.getASTContext()); state = setRefBinding(state, Sym, RefVal::makeNotOwned(RE.getObjKind(), @@ -2864,8 +2934,8 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case DecRefMsg: E = IgnoreRetainMsg ? DoNothing : DecRef; break; - case DecRefMsgAndStopTracking: - E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTracking; + case DecRefMsgAndStopTrackingHard: + E = IgnoreRetainMsg ? StopTracking : DecRefAndStopTrackingHard; break; case MakeCollectable: E = C.isObjCGCEnabled() ? DecRef : DoNothing; @@ -2886,7 +2956,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case DecRefMsg: case IncRefMsg: case MakeCollectable: - case DecRefMsgAndStopTracking: + case DecRefMsgAndStopTrackingHard: llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted"); case Dealloc: @@ -2935,6 +3005,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, break; case StopTracking: + case StopTrackingHard: return removeRefBinding(state, sym); case IncRef: @@ -2955,7 +3026,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case DecRef: case DecRefBridgedTransfered: - case DecRefAndStopTracking: + case DecRefAndStopTrackingHard: switch (V.getKind()) { default: // case 'RefVal::Released' handled above. @@ -2966,7 +3037,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, if (V.getCount() == 1) V = V ^ (E == DecRefBridgedTransfered ? RefVal::NotOwned : RefVal::Released); - else if (E == DecRefAndStopTracking) + else if (E == DecRefAndStopTrackingHard) return removeRefBinding(state, sym); V = V - 1; @@ -2974,7 +3045,7 @@ RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym, case RefVal::NotOwned: if (V.getCount() > 0) { - if (E == DecRefAndStopTracking) + if (E == DecRefAndStopTrackingHard) return removeRefBinding(state, sym); V = V - 1; } else { @@ -3035,7 +3106,7 @@ void RetainCountChecker::processNonLeakError(ProgramStateRef St, C.isObjCGCEnabled(), SummaryLog, N, Sym); report->addRange(ErrorRange); - C.EmitReport(report); + C.emitReport(report); } //===----------------------------------------------------------------------===// @@ -3090,8 +3161,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { if (RetVal.isUnknown()) { // If the receiver is unknown, conjure a return value. SValBuilder &SVB = C.getSValBuilder(); - unsigned Count = C.getCurrentBlockCount(); - RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count); + RetVal = SVB.conjureSymbolVal(0, CE, LCtx, ResultTy, C.blockCount()); } state = state->BindExpr(CE, LCtx, RetVal, false); @@ -3105,8 +3175,7 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { Binding = getRefBinding(state, Sym); // Invalidate the argument region. - unsigned Count = C.getCurrentBlockCount(); - state = state->invalidateRegions(ArgRegion, CE, Count, LCtx); + state = state->invalidateRegions(ArgRegion, CE, C.blockCount(), LCtx); // Restore the refcount status of the argument. if (Binding) @@ -3121,12 +3190,6 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { // Handle return statements. //===----------------------------------------------------------------------===// -// Return true if the current LocationContext has no caller context. -static bool inTopFrame(CheckerContext &C) { - const LocationContext *LC = C.getLocationContext(); - return LC->getParent() == 0; -} - void RetainCountChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const { @@ -3135,7 +3198,7 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S, // better checking even for inlined calls, and see if they match // with their expected semantics (e.g., the method should return a retained // object, etc.). - if (!inTopFrame(C)) + if (!C.inTopFrame()) return; const Expr *RetE = S->getRetValue(); @@ -3196,8 +3259,8 @@ void RetainCountChecker::checkPreStmt(const ReturnStmt *S, // Update the autorelease counts. static SimpleProgramPointTag AutoreleaseTag("RetainCountChecker : Autorelease"); - GenericNodeBuilderRefCount Bd(C, &AutoreleaseTag); - llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C, Sym, X); + llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Pred, &AutoreleaseTag, + C, Sym, X); // Did we cache out? if (!Pred) @@ -3267,7 +3330,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S, new CFRefLeakReport(*getLeakAtReturnBug(LOpts, GCEnabled), LOpts, GCEnabled, SummaryLog, N, Sym, C); - C.EmitReport(report); + C.emitReport(report); } } } @@ -3288,7 +3351,7 @@ void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S, new CFRefReport(*returnNotOwnedForOwned, C.getASTContext().getLangOpts(), C.isObjCGCEnabled(), SummaryLog, N, Sym); - C.EmitReport(report); + C.emitReport(report); } } } @@ -3354,18 +3417,19 @@ ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state, // too bad since the number of symbols we will track in practice are // probably small and evalAssume is only called at branches and a few // other places. - RefBindings B = state->get(); + RefBindingsTy B = state->get(); if (B.isEmpty()) return state; bool changed = false; - RefBindings::Factory &RefBFactory = state->get_context(); + RefBindingsTy::Factory &RefBFactory = state->get_context(); - for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) { - // Check if the symbol is null (or equal to any constant). - // If this is the case, stop tracking the symbol. - if (state->getSymVal(I.getKey())) { + for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) { + // Check if the symbol is null stop tracking the symbol. + ConstraintManager &CMgr = state->getConstraintManager(); + ConditionTruthVal AllocFailed = CMgr.isNull(state, I.getKey()); + if (AllocFailed.isConstrainedTrue()) { changed = true; B = RefBFactory.remove(B, I.getKey()); } @@ -3410,8 +3474,8 @@ RetainCountChecker::checkRegionChanges(ProgramStateRef state, std::pair RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state, - GenericNodeBuilderRefCount Bd, ExplodedNode *Pred, + const ProgramPointTag *Tag, CheckerContext &Ctx, SymbolRef Sym, RefVal V) const { unsigned ACnt = V.getAutoreleaseCount(); @@ -3440,7 +3504,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state, V.setAutoreleaseCount(0); } state = setRefBinding(state, Sym, V); - ExplodedNode *N = Bd.MakeNode(state, Pred); + ExplodedNode *N = Ctx.addTransition(state, Pred, Tag); if (N == 0) state = 0; return std::make_pair(N, state); @@ -3451,7 +3515,8 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state, V = V ^ RefVal::ErrorOverAutorelease; state = setRefBinding(state, Sym, V); - if (ExplodedNode *N = Bd.MakeNode(state, Pred, true)) { + ExplodedNode *N = Ctx.generateSink(state, Pred, Tag); + if (N) { SmallString<128> sbuf; llvm::raw_svector_ostream os(sbuf); os << "Object over-autoreleased: object was sent -autorelease "; @@ -3466,7 +3531,7 @@ RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state, CFRefReport *report = new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false, SummaryLog, N, Sym, os.str()); - Ctx.EmitReport(report); + Ctx.emitReport(report); } return std::make_pair((ExplodedNode *)0, (ProgramStateRef )0); @@ -3492,14 +3557,13 @@ RetainCountChecker::handleSymbolDeath(ProgramStateRef state, ExplodedNode * RetainCountChecker::processLeaks(ProgramStateRef state, SmallVectorImpl &Leaked, - GenericNodeBuilderRefCount &Builder, CheckerContext &Ctx, ExplodedNode *Pred) const { if (Leaked.empty()) return Pred; // Generate an intermediate node representing the leak point. - ExplodedNode *N = Builder.MakeNode(state, Pred); + ExplodedNode *N = Ctx.addTransition(state, Pred); if (N) { for (SmallVectorImpl::iterator @@ -3513,7 +3577,7 @@ RetainCountChecker::processLeaks(ProgramStateRef state, CFRefLeakReport *report = new CFRefLeakReport(*BT, LOpts, GCEnabled, SummaryLog, N, *I, Ctx); - Ctx.EmitReport(report); + Ctx.emitReport(report); } } @@ -3522,13 +3586,12 @@ RetainCountChecker::processLeaks(ProgramStateRef state, void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const { ProgramStateRef state = Ctx.getState(); - GenericNodeBuilderRefCount Bd(Ctx); - RefBindings B = state->get(); + RefBindingsTy B = state->get(); ExplodedNode *Pred = Ctx.getPredecessor(); - for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) { - llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, Ctx, - I->first, I->second); + for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) { + llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Pred, /*Tag=*/0, + Ctx, I->first, I->second); if (!state) return; } @@ -3543,10 +3606,10 @@ void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const { B = state->get(); SmallVector Leaked; - for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) + for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) state = handleSymbolDeath(state, I->first, I->second, Leaked); - processLeaks(state, Leaked, Bd, Ctx, Pred); + processLeaks(state, Leaked, Ctx, Pred); } const ProgramPointTag * @@ -3567,7 +3630,7 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper, ExplodedNode *Pred = C.getPredecessor(); ProgramStateRef state = C.getState(); - RefBindings B = state->get(); + RefBindingsTy B = state->get(); // Update counts from autorelease pools for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), @@ -3576,8 +3639,8 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper, if (const RefVal *T = B.lookup(Sym)){ // Use the symbol as the tag. // FIXME: This might not be as unique as we would like. - GenericNodeBuilderRefCount Bd(C, getDeadSymbolTag(Sym)); - llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C, + const ProgramPointTag *Tag = getDeadSymbolTag(Sym); + llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Pred, Tag, C, Sym, *T); if (!state) return; @@ -3593,17 +3656,14 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper, state = handleSymbolDeath(state, *I, *T, Leaked); } - { - GenericNodeBuilderRefCount Bd(C, this); - Pred = processLeaks(state, Leaked, Bd, C, Pred); - } + Pred = processLeaks(state, Leaked, C, Pred); // Did we cache out? if (!Pred) return; // Now generate a new node that nukes the old bindings. - RefBindings::Factory &F = state->get_context(); + RefBindingsTy::Factory &F = state->get_context(); for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), E = SymReaper.dead_end(); I != E; ++I) @@ -3616,12 +3676,12 @@ void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper, void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State, const char *NL, const char *Sep) const { - RefBindings B = State->get(); + RefBindingsTy B = State->get(); if (!B.isEmpty()) Out << Sep << NL; - for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) { + for (RefBindingsTy::iterator I = B.begin(), E = B.end(); I != E; ++I) { Out << I->first << " : "; I->second.print(Out); Out << NL; diff --git a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp index 6e56593..f3560aa 100644 --- a/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp @@ -82,7 +82,7 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS, new BugReport(*BT, BT->getDescription(), N); report->addRange(RetE->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } } diff --git a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp index ca2a55d..37ec1aa 100644 --- a/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp @@ -16,6 +16,7 @@ #include "ClangSACheckers.h" #include "clang/StaticAnalyzer/Core/Checker.h" #include "clang/StaticAnalyzer/Core/CheckerManager.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" #include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" #include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" @@ -41,6 +42,19 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS, if (!C.getState()->getSVal(RetE, C.getLocationContext()).isUndef()) return; + // "return;" is modeled to evaluate to an UndefinedValue. Allow UndefinedValue + // to be returned in functions returning void to support the following pattern: + // void foo() { + // return; + // } + // void test() { + // return foo(); + // } + const StackFrameContext *SFC = C.getStackFrame(); + QualType RT = CallEvent::getDeclaredResultType(SFC->getDecl()); + if (!RT.isNull() && RT->isSpecificBuiltinType(BuiltinType::Void)) + return; + ExplodedNode *N = C.generateSink(); if (!N) @@ -53,11 +67,10 @@ void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS, BugReport *report = new BugReport(*BT, BT->getDescription(), N); - report->disablePathPruning(); report->addRange(RetE->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, RetE, report); + bugreporter::trackNullOrUndefValue(N, RetE, *report); - C.EmitReport(report); + C.emitReport(report); } void ento::registerReturnUndefChecker(CheckerManager &mgr) { diff --git a/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp new file mode 100644 index 0000000..ee055ad --- /dev/null +++ b/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp @@ -0,0 +1,348 @@ +//===-- SimpleStreamChecker.cpp -----------------------------------------*- C++ -*--// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Defines a checker for proper use of fopen/fclose APIs. +// - If a file has been closed with fclose, it should not be accessed again. +// Accessing a closed file results in undefined behavior. +// - If a file was opened with fopen, it must be closed with fclose before +// the execution ends. Failing to do so results in a resource leak. +// +//===----------------------------------------------------------------------===// + +#include "ClangSACheckers.h" +#include "clang/StaticAnalyzer/Core/Checker.h" +#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" + +using namespace clang; +using namespace ento; + +namespace { +typedef llvm::SmallVector SymbolVector; + +struct StreamState { +private: + enum Kind { Opened, Closed } K; + StreamState(Kind InK) : K(InK) { } + +public: + bool isOpened() const { return K == Opened; } + bool isClosed() const { return K == Closed; } + + static StreamState getOpened() { return StreamState(Opened); } + static StreamState getClosed() { return StreamState(Closed); } + + bool operator==(const StreamState &X) const { + return K == X.K; + } + void Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddInteger(K); + } +}; + +class SimpleStreamChecker : public Checker { + + mutable IdentifierInfo *IIfopen, *IIfclose; + + OwningPtr DoubleCloseBugType; + OwningPtr LeakBugType; + + void initIdentifierInfo(ASTContext &Ctx) const; + + void reportDoubleClose(SymbolRef FileDescSym, + const CallEvent &Call, + CheckerContext &C) const; + + void reportLeaks(SymbolVector LeakedStreams, + CheckerContext &C, + ExplodedNode *ErrNode) const; + + bool guaranteedNotToCloseFile(const CallEvent &Call) const; + +public: + SimpleStreamChecker(); + + /// Process fopen. + void checkPostCall(const CallEvent &Call, CheckerContext &C) const; + /// Process fclose. + void checkPreCall(const CallEvent &Call, CheckerContext &C) const; + + void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const; + + /// Deal with symbol escape as a byproduct of a bind. + void checkBind(SVal location, SVal val, const Stmt*S, + CheckerContext &C) const; + + /// Deal with symbol escape as a byproduct of a region change. + ProgramStateRef + checkRegionChanges(ProgramStateRef state, + const StoreManager::InvalidatedSymbols *invalidated, + ArrayRef ExplicitRegions, + ArrayRef Regions, + const CallEvent *Call) const; + bool wantsRegionChangeUpdate(ProgramStateRef state) const { + return true; + } +}; + +} // end anonymous namespace + +/// The state of the checker is a map from tracked stream symbols to their +/// state. Let's store it in the ProgramState. +REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState) + +namespace { +class StopTrackingCallback : public SymbolVisitor { + ProgramStateRef state; +public: + StopTrackingCallback(ProgramStateRef st) : state(st) {} + ProgramStateRef getState() const { return state; } + + bool VisitSymbol(SymbolRef sym) { + state = state->remove(sym); + return true; + } +}; +} // end anonymous namespace + + +SimpleStreamChecker::SimpleStreamChecker() : IIfopen(0), IIfclose(0) { + // Initialize the bug types. + DoubleCloseBugType.reset(new BugType("Double fclose", + "Unix Stream API Error")); + + LeakBugType.reset(new BugType("Resource Leak", + "Unix Stream API Error")); + // Sinks are higher importance bugs as well as calls to assert() or exit(0). + LeakBugType->setSuppressOnSink(true); +} + +void SimpleStreamChecker::checkPostCall(const CallEvent &Call, + CheckerContext &C) const { + initIdentifierInfo(C.getASTContext()); + + if (!Call.isGlobalCFunction()) + return; + + if (Call.getCalleeIdentifier() != IIfopen) + return; + + // Get the symbolic value corresponding to the file handle. + SymbolRef FileDesc = Call.getReturnValue().getAsSymbol(); + if (!FileDesc) + return; + + // Generate the next transition (an edge in the exploded graph). + ProgramStateRef State = C.getState(); + State = State->set(FileDesc, StreamState::getOpened()); + C.addTransition(State); +} + +void SimpleStreamChecker::checkPreCall(const CallEvent &Call, + CheckerContext &C) const { + initIdentifierInfo(C.getASTContext()); + + if (!Call.isGlobalCFunction()) + return; + + if (Call.getCalleeIdentifier() != IIfclose) + return; + + if (Call.getNumArgs() != 1) + return; + + // Get the symbolic value corresponding to the file handle. + SymbolRef FileDesc = Call.getArgSVal(0).getAsSymbol(); + if (!FileDesc) + return; + + // Check if the stream has already been closed. + ProgramStateRef State = C.getState(); + const StreamState *SS = State->get(FileDesc); + if (SS && SS->isClosed()) { + reportDoubleClose(FileDesc, Call, C); + return; + } + + // Generate the next transition, in which the stream is closed. + State = State->set(FileDesc, StreamState::getClosed()); + C.addTransition(State); +} + +static bool isLeaked(SymbolRef Sym, const StreamState &SS, + bool IsSymDead, ProgramStateRef State) { + if (IsSymDead && SS.isOpened()) { + // If a symbol is NULL, assume that fopen failed on this path. + // A symbol should only be considered leaked if it is non-null. + ConstraintManager &CMgr = State->getConstraintManager(); + ConditionTruthVal OpenFailed = CMgr.isNull(State, Sym); + return !OpenFailed.isConstrainedTrue(); + } + return false; +} + +void SimpleStreamChecker::checkDeadSymbols(SymbolReaper &SymReaper, + CheckerContext &C) const { + ProgramStateRef State = C.getState(); + SymbolVector LeakedStreams; + StreamMapTy TrackedStreams = State->get(); + for (StreamMapTy::iterator I = TrackedStreams.begin(), + E = TrackedStreams.end(); I != E; ++I) { + SymbolRef Sym = I->first; + bool IsSymDead = SymReaper.isDead(Sym); + + // Collect leaked symbols. + if (isLeaked(Sym, I->second, IsSymDead, State)) + LeakedStreams.push_back(Sym); + + // Remove the dead symbol from the streams map. + if (IsSymDead) + State = State->remove(Sym); + } + + ExplodedNode *N = C.addTransition(State); + reportLeaks(LeakedStreams, C, N); +} + +void SimpleStreamChecker::reportDoubleClose(SymbolRef FileDescSym, + const CallEvent &Call, + CheckerContext &C) const { + // We reached a bug, stop exploring the path here by generating a sink. + ExplodedNode *ErrNode = C.generateSink(); + // If we've already reached this node on another path, return. + if (!ErrNode) + return; + + // Generate the report. + BugReport *R = new BugReport(*DoubleCloseBugType, + "Closing a previously closed file stream", ErrNode); + R->addRange(Call.getSourceRange()); + R->markInteresting(FileDescSym); + C.emitReport(R); +} + +void SimpleStreamChecker::reportLeaks(SymbolVector LeakedStreams, + CheckerContext &C, + ExplodedNode *ErrNode) const { + // Attach bug reports to the leak node. + // TODO: Identify the leaked file descriptor. + for (llvm::SmallVector::iterator + I = LeakedStreams.begin(), E = LeakedStreams.end(); I != E; ++I) { + BugReport *R = new BugReport(*LeakBugType, + "Opened file is never closed; potential resource leak", ErrNode); + R->markInteresting(*I); + C.emitReport(R); + } +} + +// Check various ways a symbol can be invalidated. +// Stop tracking symbols when a value escapes as a result of checkBind. +// A value escapes in three possible cases: +// (1) We are binding to something that is not a memory region. +// (2) We are binding to a MemRegion that does not have stack storage +// (3) We are binding to a MemRegion with stack storage that the store +// does not understand. +void SimpleStreamChecker::checkBind(SVal loc, SVal val, const Stmt *S, + CheckerContext &C) const { + // Are we storing to something that causes the value to "escape"? + bool escapes = true; + ProgramStateRef state = C.getState(); + + if (loc::MemRegionVal *regionLoc = dyn_cast(&loc)) { + escapes = !regionLoc->getRegion()->hasStackStorage(); + + if (!escapes) { + // To test (3), generate a new state with the binding added. If it is + // the same state, then it escapes (since the store cannot represent + // the binding). Do this only if we know that the store is not supposed + // to generate the same state. + SVal StoredVal = state->getSVal(regionLoc->getRegion()); + if (StoredVal != val) + escapes = (state == (state->bindLoc(*regionLoc, val))); + } + } + + // If our store can represent the binding and we aren't storing to something + // that doesn't have local storage then just return the state and + // continue as is. + if (!escapes) + return; + + // Otherwise, find all symbols referenced by 'val' that we are tracking + // and stop tracking them. + state = state->scanReachableSymbols(val).getState(); + C.addTransition(state); +} + +bool SimpleStreamChecker::guaranteedNotToCloseFile(const CallEvent &Call) const{ + // If it's not in a system header, assume it might close a file. + if (!Call.isInSystemHeader()) + return false; + + // Handle cases where we know a buffer's /address/ can escape. + if (Call.argumentsMayEscape()) + return false; + + // Note, even though fclose closes the file, we do not list it here + // since the checker is modeling the call. + + return true; +} + +// If the symbol we are tracking is invalidated, do not track the symbol as +// we cannot reason about it anymore. +ProgramStateRef +SimpleStreamChecker::checkRegionChanges(ProgramStateRef State, + const StoreManager::InvalidatedSymbols *invalidated, + ArrayRef ExplicitRegions, + ArrayRef Regions, + const CallEvent *Call) const { + + if (!invalidated || invalidated->empty()) + return State; + + // If it's a call which might close the file, we assume that all regions + // (explicit and implicit) escaped. Otherwise, whitelist explicit pointers + // (the parameters to the call); we still can track them. + llvm::SmallPtrSet WhitelistedSymbols; + if (!Call || guaranteedNotToCloseFile(*Call)) { + for (ArrayRef::iterator I = ExplicitRegions.begin(), + E = ExplicitRegions.end(); I != E; ++I) { + if (const SymbolicRegion *R = (*I)->StripCasts()->getAs()) + WhitelistedSymbols.insert(R->getSymbol()); + } + } + + for (StoreManager::InvalidatedSymbols::const_iterator I=invalidated->begin(), + E = invalidated->end(); I!=E; ++I) { + SymbolRef sym = *I; + if (WhitelistedSymbols.count(sym)) + continue; + // The symbol escaped. Optimistically, assume that the corresponding file + // handle will be closed somewhere else. + State = State->remove(sym); + } + return State; +} + +void SimpleStreamChecker::initIdentifierInfo(ASTContext &Ctx) const { + if (IIfopen) + return; + IIfopen = &Ctx.Idents.get("fopen"); + IIfclose = &Ctx.Idents.get("fclose"); +} + +void ento::registerSimpleStreamChecker(CheckerManager &mgr) { + mgr.registerChecker(); +} diff --git a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp index 54cf569..0c2f266 100644 --- a/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp @@ -109,7 +109,7 @@ void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion * if (range.isValid()) report->addRange(range); - C.EmitReport(report); + C.emitReport(report); } void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS, @@ -118,8 +118,10 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS, const Expr *RetE = RS->getRetValue(); if (!RetE) return; - - SVal V = C.getState()->getSVal(RetE, C.getLocationContext()); + RetE = RetE->IgnoreParens(); + + const LocationContext *LCtx = C.getLocationContext(); + SVal V = C.getState()->getSVal(RetE, LCtx); const MemRegion *R = V.getAsRegion(); if (!R) @@ -132,8 +134,9 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS, return; // Return stack memory in an ancestor stack frame is fine. - const StackFrameContext *SFC = SS->getStackFrame(); - if (SFC != C.getLocationContext()->getCurrentStackFrame()) + const StackFrameContext *CurFrame = LCtx->getCurrentStackFrame(); + const StackFrameContext *MemFrame = SS->getStackFrame(); + if (MemFrame != CurFrame) return; // Automatic reference counting automatically copies blocks. @@ -141,6 +144,14 @@ void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS, isa(R)) return; + // Returning a record by value is fine. (In this case, the returned + // expression will be a copy-constructor, possibly wrapped in an + // ExprWithCleanups node.) + if (const ExprWithCleanups *Cleanup = dyn_cast(RetE)) + RetE = Cleanup->getSubExpr(); + if (isa(RetE) && RetE->getType()->isRecordType()) + return; + EmitStackError(C, R, RetE); } @@ -221,7 +232,7 @@ void StackAddrEscapeChecker::checkEndPath(CheckerContext &Ctx) const { if (range.isValid()) report->addRange(range); - Ctx.EmitReport(report); + Ctx.emitReport(report); } } diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp index 731dd66..c06ba7c 100644 --- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp @@ -104,15 +104,8 @@ private: } // end anonymous namespace -namespace clang { -namespace ento { - template <> - struct ProgramStateTrait - : public ProgramStatePartialTrait > { - static void *GDMIndex() { static int x; return &x; } - }; -} -} +REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState) + bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const { const FunctionDecl *FD = C.getCalleeDecl(CE); @@ -219,11 +212,11 @@ void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const { void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const { ProgramStateRef state = C.getState(); - unsigned Count = C.getCurrentBlockCount(); SValBuilder &svalBuilder = C.getSValBuilder(); const LocationContext *LCtx = C.getPredecessor()->getLocationContext(); DefinedSVal RetVal = - cast(svalBuilder.getConjuredSymbolVal(0, CE, LCtx, Count)); + cast(svalBuilder.conjureSymbolVal(0, CE, LCtx, + C.blockCount())); state = state->BindExpr(CE, C.getLocationContext(), RetVal); ConstraintManager &CM = C.getConstraintManager(); @@ -235,9 +228,9 @@ void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const { if (SymbolRef Sym = RetVal.getAsSymbol()) { // if RetVal is not NULL, set the symbol's state to Opened. stateNotNull = - stateNotNull->set(Sym,StreamState::getOpened(CE)); + stateNotNull->set(Sym,StreamState::getOpened(CE)); stateNull = - stateNull->set(Sym, StreamState::getOpenFailed(CE)); + stateNull->set(Sym, StreamState::getOpenFailed(CE)); C.addTransition(stateNotNull); C.addTransition(stateNull); @@ -287,7 +280,7 @@ void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const { "SEEK_SET, SEEK_END, or SEEK_CUR.")); BugReport *R = new BugReport(*BT_illegalwhence, BT_illegalwhence->getDescription(), N); - C.EmitReport(R); + C.emitReport(R); } } @@ -363,7 +356,7 @@ ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state, BT_nullfp.reset(new BuiltinBug("NULL stream pointer", "Stream pointer might be NULL.")); BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N); - C.EmitReport(R); + C.emitReport(R); } return 0; } @@ -378,7 +371,7 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE, if (!Sym) return state; - const StreamState *SS = state->get(Sym); + const StreamState *SS = state->get(Sym); // If the file stream is not tracked, return. if (!SS) @@ -395,22 +388,24 @@ ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE, " closed. Cause undefined behaviour.")); BugReport *R = new BugReport(*BT_doubleclose, BT_doubleclose->getDescription(), N); - C.EmitReport(R); + C.emitReport(R); } return NULL; } // Close the File Descriptor. - return state->set(Sym, StreamState::getClosed(CE)); + return state->set(Sym, StreamState::getClosed(CE)); } void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const { + // TODO: Clean up the state. for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(), E = SymReaper.dead_end(); I != E; ++I) { SymbolRef Sym = *I; ProgramStateRef state = C.getState(); - const StreamState *SS = state->get(Sym); + const StreamState *SS = state->get(Sym); + // TODO: Shouldn't we have a continue here? if (!SS) return; @@ -422,7 +417,7 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper, "Opened File never closed. Potential Resource leak.")); BugReport *R = new BugReport(*BT_ResourceLeak, BT_ResourceLeak->getDescription(), N); - C.EmitReport(R); + C.emitReport(R); } } } @@ -430,10 +425,9 @@ void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper, void StreamChecker::checkEndPath(CheckerContext &Ctx) const { ProgramStateRef state = Ctx.getState(); - typedef llvm::ImmutableMap SymMap; - SymMap M = state->get(); + StreamMapTy M = state->get(); - for (SymMap::iterator I = M.begin(), E = M.end(); I != E; ++I) { + for (StreamMapTy::iterator I = M.begin(), E = M.end(); I != E; ++I) { StreamState SS = I->second; if (SS.isOpened()) { ExplodedNode *N = Ctx.addTransition(state); @@ -443,7 +437,7 @@ void StreamChecker::checkEndPath(CheckerContext &Ctx) const { "Opened File never closed. Potential Resource leak.")); BugReport *R = new BugReport(*BT_ResourceLeak, BT_ResourceLeak->getDescription(), N); - Ctx.EmitReport(R); + Ctx.emitReport(R); } } } @@ -460,12 +454,12 @@ void StreamChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const { if (!Sym) return; - const StreamState *SS = state->get(Sym); + const StreamState *SS = state->get(Sym); if(!SS) return; if (SS->isOpened()) - state = state->set(Sym, StreamState::getEscaped(S)); + state = state->set(Sym, StreamState::getEscaped(S)); C.addTransition(state); } diff --git a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp index 1133682..382be84 100644 --- a/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp @@ -52,7 +52,7 @@ void TaintTesterChecker::checkPostStmt(const Expr *E, initBugType(); BugReport *report = new BugReport(*BT, "tainted",N); report->addRange(E->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } } } diff --git a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp index 70a33c7..70e141e 100644 --- a/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp @@ -99,11 +99,10 @@ void UndefBranchChecker::checkBranchCondition(const Stmt *Condition, // Emit the bug report. BugReport *R = new BugReport(*BT, BT->getDescription(), N); - bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, R); + bugreporter::trackNullOrUndefValue(N, Ex, *R); R->addRange(Ex->getSourceRange()); - R->disablePathPruning(); - Ctx.EmitReport(R); + Ctx.emitReport(R); } } } diff --git a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp index 675b38a..30ccffa 100644 --- a/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp @@ -96,7 +96,7 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE, R->addVisitor(new FindLastStoreBRVisitor(VRVal, VR)); R->disablePathPruning(); // need location of block - C.EmitReport(R); + C.emitReport(R); } } } diff --git a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp index e220499..415bab5 100644 --- a/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp @@ -76,13 +76,12 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B, BugReport *report = new BugReport(*BT, OS.str(), N); if (Ex) { report->addRange(Ex->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, Ex, report); + bugreporter::trackNullOrUndefValue(N, Ex, *report); } else - bugreporter::addTrackNullOrUndefValueVisitor(N, B, report); + bugreporter::trackNullOrUndefValue(N, B, *report); - report->disablePathPruning(); - C.EmitReport(report); + C.emitReport(report); } } diff --git a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp index 6ae3c18..b3a83e8 100644 --- a/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp @@ -42,8 +42,8 @@ UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A, // Generate a report for this bug. BugReport *R = new BugReport(*BT, BT->getName(), N); R->addRange(A->getIdx()->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, A->getIdx(), R); - C.EmitReport(R); + bugreporter::trackNullOrUndefValue(N, A->getIdx(), *R); + C.emitReport(R); } } } diff --git a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp index 14a884e..410010a 100644 --- a/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp @@ -78,10 +78,9 @@ void UndefinedAssignmentChecker::checkBind(SVal location, SVal val, BugReport *R = new BugReport(*BT, str, N); if (ex) { R->addRange(ex->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, ex, R); + bugreporter::trackNullOrUndefValue(N, ex, *R); } - R->disablePathPruning(); - C.EmitReport(R); + C.emitReport(R); } void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) { diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp index d35455c..171e15b 100644 --- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp @@ -41,6 +41,7 @@ public: void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const; void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const; void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const; + void CheckReallocfZero(CheckerContext &C, const CallExpr *CE) const; void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const; void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const; @@ -138,7 +139,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const { "Call to 'open' requires a third argument when " "the 'O_CREAT' flag is set", N); report->addRange(oflagsEx->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } } @@ -183,11 +184,12 @@ void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C, BugReport *report = new BugReport(*BT_pthreadOnce, os.str(), N); report->addRange(CE->getArg(0)->getSourceRange()); - C.EmitReport(report); + C.emitReport(report); } //===----------------------------------------------------------------------===// -// "calloc", "malloc", "realloc", "alloca" and "valloc" with allocation size 0 +// "calloc", "malloc", "realloc", "reallocf", "alloca" and "valloc" +// with allocation size 0 //===----------------------------------------------------------------------===// // FIXME: Eventually these should be rolled into the MallocChecker, but right now // they're more basic and valuable for widespread use. @@ -224,8 +226,8 @@ bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C, BugReport *report = new BugReport(*BT_mallocZero, os.str(), N); report->addRange(arg->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, arg, report); - C.EmitReport(report); + bugreporter::trackNullOrUndefValue(N, arg, *report); + C.emitReport(report); return true; } @@ -307,6 +309,11 @@ void UnixAPIChecker::CheckReallocZero(CheckerContext &C, BasicAllocationCheck(C, CE, 2, 1, "realloc"); } +void UnixAPIChecker::CheckReallocfZero(CheckerContext &C, + const CallExpr *CE) const { + BasicAllocationCheck(C, CE, 2, 1, "reallocf"); +} + void UnixAPIChecker::CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const { BasicAllocationCheck(C, CE, 1, 0, "alloca"); @@ -339,6 +346,7 @@ void UnixAPIChecker::checkPreStmt(const CallExpr *CE, .Case("calloc", &UnixAPIChecker::CheckCallocZero) .Case("malloc", &UnixAPIChecker::CheckMallocZero) .Case("realloc", &UnixAPIChecker::CheckReallocZero) + .Case("reallocf", &UnixAPIChecker::CheckReallocfZero) .Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero) .Case("valloc", &UnixAPIChecker::CheckVallocZero) .Default(NULL); diff --git a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp index fab4adf..58f9ec0 100644 --- a/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp +++ b/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp @@ -69,8 +69,8 @@ void VLASizeChecker::reportBug(VLASize_Kind Kind, BugReport *report = new BugReport(*BT, os.str(), N); report->addRange(SizeE->getSourceRange()); - bugreporter::addTrackNullOrUndefValueVisitor(N, SizeE, report); - C.EmitReport(report); + bugreporter::trackNullOrUndefValue(N, SizeE, *report); + C.emitReport(report); return; } diff --git a/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/lib/StaticAnalyzer/Core/AnalysisManager.cpp index efeba17..011d4c09 100644 --- a/lib/StaticAnalyzer/Core/AnalysisManager.cpp +++ b/lib/StaticAnalyzer/Core/AnalysisManager.cpp @@ -20,33 +20,19 @@ AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags, StoreManagerCreator storemgr, ConstraintManagerCreator constraintmgr, CheckerManager *checkerMgr, - unsigned maxnodes, unsigned maxvisit, - bool vizdot, bool vizubi, - AnalysisPurgeMode purge, - bool eager, bool trim, - bool useUnoptimizedCFG, - bool addImplicitDtors, - bool eagerlyTrimEGraph, - AnalysisIPAMode ipa, - unsigned inlineMaxStack, - unsigned inlineMaxFunctionSize, - AnalysisInliningMode IMode, - bool NoRetry) - : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, /*addInitializers=*/true), - Ctx(ctx), Diags(diags), LangOpts(lang), + AnalyzerOptions &Options) + : AnaCtxMgr(Options.UnoptimizedCFG, + /*AddImplicitDtors=*/true, + /*AddInitializers=*/true, + Options.includeTemporaryDtorsInCFG(), + Options.shouldSynthesizeBodies()), + Ctx(ctx), + Diags(diags), + LangOpts(lang), PathConsumers(PDC), CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr), - CheckerMgr(checkerMgr), - MaxNodes(maxnodes), MaxVisit(maxvisit), - VisualizeEGDot(vizdot), VisualizeEGUbi(vizubi), PurgeDead(purge), - EagerlyAssume(eager), TrimGraph(trim), - EagerlyTrimEGraph(eagerlyTrimEGraph), - IPAMode(ipa), - InlineMaxStackDepth(inlineMaxStack), - InlineMaxFunctionSize(inlineMaxFunctionSize), - InliningMode(IMode), - NoRetryExhausted(NoRetry) -{ + CheckerMgr(checkerMgr), + options(Options) { AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd(); } diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp new file mode 100644 index 0000000..da88589 --- /dev/null +++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp @@ -0,0 +1,138 @@ +//===-- AnalyzerOptions.cpp - Analysis Engine Options -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains special accessors for analyzer configuration options +// with string representations. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/raw_ostream.h" + +using namespace clang; +using namespace llvm; + +bool +AnalyzerOptions::mayInlineCXXMemberFunction(CXXInlineableMemberKind K) { + if (IPAMode < Inlining) + return false; + + if (!CXXMemberInliningMode) { + static const char *ModeKey = "c++-inlining"; + + StringRef ModeStr(Config.GetOrCreateValue(ModeKey, + "methods").getValue()); + + CXXInlineableMemberKind &MutableMode = + const_cast(CXXMemberInliningMode); + + MutableMode = llvm::StringSwitch(ModeStr) + .Case("constructors", CIMK_Constructors) + .Case("destructors", CIMK_Destructors) + .Case("none", CIMK_None) + .Case("methods", CIMK_MemberFunctions) + .Default(CXXInlineableMemberKind()); + + if (!MutableMode) { + // FIXME: We should emit a warning here about an unknown inlining kind, + // but the AnalyzerOptions doesn't have access to a diagnostic engine. + MutableMode = CIMK_None; + } + } + + return CXXMemberInliningMode >= K; +} + +static StringRef toString(bool b) { return b ? "true" : "false"; } + +bool AnalyzerOptions::getBooleanOption(StringRef Name, bool DefaultVal) { + // FIXME: We should emit a warning here if the value is something other than + // "true", "false", or the empty string (meaning the default value), + // but the AnalyzerOptions doesn't have access to a diagnostic engine. + StringRef V(Config.GetOrCreateValue(Name, toString(DefaultVal)).getValue()); + return llvm::StringSwitch(V) + .Case("true", true) + .Case("false", false) + .Default(DefaultVal); +} + +bool AnalyzerOptions::getBooleanOption(llvm::Optional &V, + StringRef Name, + bool DefaultVal) { + if (!V.hasValue()) + V = getBooleanOption(Name, DefaultVal); + return V.getValue(); +} + +bool AnalyzerOptions::includeTemporaryDtorsInCFG() { + return getBooleanOption(IncludeTemporaryDtorsInCFG, + "cfg-temporary-dtors", + /* Default = */ false); +} + +bool AnalyzerOptions::mayInlineCXXStandardLibrary() { + return getBooleanOption(InlineCXXStandardLibrary, + "c++-stdlib-inlining", + /*Default=*/true); +} + +bool AnalyzerOptions::mayInlineTemplateFunctions() { + return getBooleanOption(InlineTemplateFunctions, + "c++-template-inlining", + /*Default=*/true); +} + +bool AnalyzerOptions::mayInlineObjCMethod() { + return getBooleanOption(ObjCInliningMode, + "objc-inlining", + /* Default = */ true); +} + +bool AnalyzerOptions::shouldPruneNullReturnPaths() { + return getBooleanOption(PruneNullReturnPaths, + "suppress-null-return-paths", + /* Default = */ true); +} + +bool AnalyzerOptions::shouldAvoidSuppressingNullArgumentPaths() { + return getBooleanOption(AvoidSuppressingNullArgumentPaths, + "avoid-suppressing-null-argument-paths", + /* Default = */ false); +} + +int AnalyzerOptions::getOptionAsInteger(StringRef Name, int DefaultVal) { + llvm::SmallString<10> StrBuf; + llvm::raw_svector_ostream OS(StrBuf); + OS << DefaultVal; + + StringRef V(Config.GetOrCreateValue(Name, OS.str()).getValue()); + int Res = DefaultVal; + bool b = V.getAsInteger(10, Res); + assert(!b && "analyzer-config option should be numeric"); + (void) b; + return Res; +} + +unsigned AnalyzerOptions::getAlwaysInlineSize() { + if (!AlwaysInlineSize.hasValue()) + AlwaysInlineSize = getOptionAsInteger("ipa-always-inline-size", 3); + return AlwaysInlineSize.getValue(); +} + +unsigned AnalyzerOptions::getGraphTrimInterval() { + if (!GraphTrimInterval.hasValue()) + GraphTrimInterval = getOptionAsInteger("graph-trim-interval", 1000); + return GraphTrimInterval.getValue(); +} + +bool AnalyzerOptions::shouldSynthesizeBodies() { + return getBooleanOption("faux-bodies", true); +} diff --git a/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp b/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp deleted file mode 100644 index 8897756..0000000 --- a/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp +++ /dev/null @@ -1,446 +0,0 @@ -//== BasicConstraintManager.cpp - Manage basic constraints.------*- C++ -*--==// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines BasicConstraintManager, a class that tracks simple -// equality and inequality constraints on symbolic values of ProgramState. -// -//===----------------------------------------------------------------------===// - -#include "SimpleConstraintManager.h" -#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h" -#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" -#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h" -#include "llvm/Support/raw_ostream.h" - -using namespace clang; -using namespace ento; - - -namespace { class ConstNotEq {}; } -namespace { class ConstEq {}; } - -typedef llvm::ImmutableMap ConstNotEqTy; -typedef llvm::ImmutableMap ConstEqTy; - -static int ConstEqIndex = 0; -static int ConstNotEqIndex = 0; - -namespace clang { -namespace ento { -template<> -struct ProgramStateTrait : - public ProgramStatePartialTrait { - static inline void *GDMIndex() { return &ConstNotEqIndex; } -}; - -template<> -struct ProgramStateTrait : public ProgramStatePartialTrait { - static inline void *GDMIndex() { return &ConstEqIndex; } -}; -} -} - -namespace { -// BasicConstraintManager only tracks equality and inequality constraints of -// constants and integer variables. -class BasicConstraintManager - : public SimpleConstraintManager { - ProgramState::IntSetTy::Factory ISetFactory; -public: - BasicConstraintManager(ProgramStateManager &statemgr, SubEngine &subengine) - : SimpleConstraintManager(subengine, statemgr.getBasicVals()), - ISetFactory(statemgr.getAllocator()) {} - - ProgramStateRef assumeSymEquality(ProgramStateRef State, SymbolRef Sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment, - bool Assumption); - - ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment) { - return assumeSymEquality(State, Sym, V, Adjustment, false); - } - - ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment) { - return assumeSymEquality(State, Sym, V, Adjustment, true); - } - - ProgramStateRef assumeSymLT(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V, - const llvm::APSInt& Adjustment); - - ProgramStateRef assumeSymGT(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V, - const llvm::APSInt& Adjustment); - - ProgramStateRef assumeSymGE(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V, - const llvm::APSInt& Adjustment); - - ProgramStateRef assumeSymLE(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V, - const llvm::APSInt& Adjustment); - - ProgramStateRef AddEQ(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V); - - ProgramStateRef AddNE(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V); - - const llvm::APSInt* getSymVal(ProgramStateRef state, - SymbolRef sym) const; - - bool isNotEqual(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V) const; - - bool isEqual(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V) const; - - ProgramStateRef removeDeadBindings(ProgramStateRef state, - SymbolReaper& SymReaper); - - bool performTest(llvm::APSInt SymVal, llvm::APSInt Adjustment, - BinaryOperator::Opcode Op, llvm::APSInt ComparisonVal); - - void print(ProgramStateRef state, - raw_ostream &Out, - const char* nl, - const char *sep); -}; - -} // end anonymous namespace - -ConstraintManager* -ento::CreateBasicConstraintManager(ProgramStateManager& statemgr, - SubEngine &subengine) { - return new BasicConstraintManager(statemgr, subengine); -} - -// FIXME: This is a more general utility and should live somewhere else. -bool BasicConstraintManager::performTest(llvm::APSInt SymVal, - llvm::APSInt Adjustment, - BinaryOperator::Opcode Op, - llvm::APSInt ComparisonVal) { - APSIntType Type(Adjustment); - Type.apply(SymVal); - Type.apply(ComparisonVal); - SymVal += Adjustment; - - assert(BinaryOperator::isComparisonOp(Op)); - BasicValueFactory &BVF = getBasicVals(); - const llvm::APSInt *Result = BVF.evalAPSInt(Op, SymVal, ComparisonVal); - assert(Result && "Comparisons should always have valid results."); - - return Result->getBoolValue(); -} - -ProgramStateRef -BasicConstraintManager::assumeSymEquality(ProgramStateRef State, SymbolRef Sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment, - bool Assumption) { - // Before we do any real work, see if the value can even show up. - APSIntType AdjustmentType(Adjustment); - if (AdjustmentType.testInRange(V) != APSIntType::RTR_Within) - return Assumption ? NULL : State; - - // Get the symbol type. - BasicValueFactory &BVF = getBasicVals(); - ASTContext &Ctx = BVF.getContext(); - APSIntType SymbolType = BVF.getAPSIntType(Sym->getType(Ctx)); - - // First, see if the adjusted value is within range for the symbol. - llvm::APSInt Adjusted = AdjustmentType.convert(V) - Adjustment; - if (SymbolType.testInRange(Adjusted) != APSIntType::RTR_Within) - return Assumption ? NULL : State; - - // Now we can do things properly in the symbol space. - SymbolType.apply(Adjusted); - - // Second, determine if sym == X, where X+Adjustment != V. - if (const llvm::APSInt *X = getSymVal(State, Sym)) { - bool IsFeasible = (*X == Adjusted); - return (IsFeasible == Assumption) ? State : NULL; - } - - // Third, determine if we already know sym+Adjustment != V. - if (isNotEqual(State, Sym, Adjusted)) - return Assumption ? NULL : State; - - // If we reach here, sym is not a constant and we don't know if it is != V. - // Make the correct assumption. - if (Assumption) - return AddEQ(State, Sym, Adjusted); - else - return AddNE(State, Sym, Adjusted); -} - -// The logic for these will be handled in another ConstraintManager. -// Approximate it here anyway by handling some edge cases. -ProgramStateRef -BasicConstraintManager::assumeSymLT(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment) { - APSIntType ComparisonType(V), AdjustmentType(Adjustment); - - // Is 'V' out of range above the type? - llvm::APSInt Max = AdjustmentType.getMaxValue(); - if (V > ComparisonType.convert(Max)) { - // This path is trivially feasible. - return state; - } - - // Is 'V' the smallest possible value, or out of range below the type? - llvm::APSInt Min = AdjustmentType.getMinValue(); - if (V <= ComparisonType.convert(Min)) { - // sym cannot be any value less than 'V'. This path is infeasible. - return NULL; - } - - // Reject a path if the value of sym is a constant X and !(X+Adj < V). - if (const llvm::APSInt *X = getSymVal(state, sym)) { - bool isFeasible = performTest(*X, Adjustment, BO_LT, V); - return isFeasible ? state : NULL; - } - - // FIXME: For now have assuming x < y be the same as assuming sym != V; - return assumeSymNE(state, sym, V, Adjustment); -} - -ProgramStateRef -BasicConstraintManager::assumeSymGT(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment) { - APSIntType ComparisonType(V), AdjustmentType(Adjustment); - - // Is 'V' the largest possible value, or out of range above the type? - llvm::APSInt Max = AdjustmentType.getMaxValue(); - if (V >= ComparisonType.convert(Max)) { - // sym cannot be any value greater than 'V'. This path is infeasible. - return NULL; - } - - // Is 'V' out of range below the type? - llvm::APSInt Min = AdjustmentType.getMinValue(); - if (V < ComparisonType.convert(Min)) { - // This path is trivially feasible. - return state; - } - - // Reject a path if the value of sym is a constant X and !(X+Adj > V). - if (const llvm::APSInt *X = getSymVal(state, sym)) { - bool isFeasible = performTest(*X, Adjustment, BO_GT, V); - return isFeasible ? state : NULL; - } - - // FIXME: For now have assuming x > y be the same as assuming sym != V; - return assumeSymNE(state, sym, V, Adjustment); -} - -ProgramStateRef -BasicConstraintManager::assumeSymGE(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment) { - APSIntType ComparisonType(V), AdjustmentType(Adjustment); - - // Is 'V' the largest possible value, or out of range above the type? - llvm::APSInt Max = AdjustmentType.getMaxValue(); - ComparisonType.apply(Max); - - if (V > Max) { - // sym cannot be any value greater than 'V'. This path is infeasible. - return NULL; - } else if (V == Max) { - // If the path is feasible then as a consequence we know that - // 'sym+Adjustment == V' because there are no larger values. - // Add this constraint. - return assumeSymEQ(state, sym, V, Adjustment); - } - - // Is 'V' out of range below the type? - llvm::APSInt Min = AdjustmentType.getMinValue(); - if (V < ComparisonType.convert(Min)) { - // This path is trivially feasible. - return state; - } - - // Reject a path if the value of sym is a constant X and !(X+Adj >= V). - if (const llvm::APSInt *X = getSymVal(state, sym)) { - bool isFeasible = performTest(*X, Adjustment, BO_GE, V); - return isFeasible ? state : NULL; - } - - return state; -} - -ProgramStateRef -BasicConstraintManager::assumeSymLE(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt &V, - const llvm::APSInt &Adjustment) { - APSIntType ComparisonType(V), AdjustmentType(Adjustment); - - // Is 'V' out of range above the type? - llvm::APSInt Max = AdjustmentType.getMaxValue(); - if (V > ComparisonType.convert(Max)) { - // This path is trivially feasible. - return state; - } - - // Is 'V' the smallest possible value, or out of range below the type? - llvm::APSInt Min = AdjustmentType.getMinValue(); - ComparisonType.apply(Min); - - if (V < Min) { - // sym cannot be any value less than 'V'. This path is infeasible. - return NULL; - } else if (V == Min) { - // If the path is feasible then as a consequence we know that - // 'sym+Adjustment == V' because there are no smaller values. - // Add this constraint. - return assumeSymEQ(state, sym, V, Adjustment); - } - - // Reject a path if the value of sym is a constant X and !(X+Adj >= V). - if (const llvm::APSInt *X = getSymVal(state, sym)) { - bool isFeasible = performTest(*X, Adjustment, BO_LE, V); - return isFeasible ? state : NULL; - } - - return state; -} - -ProgramStateRef BasicConstraintManager::AddEQ(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V) { - // Now that we have an actual value, we can throw out the NE-set. - // Create a new state with the old bindings replaced. - state = state->remove(sym); - return state->set(sym, &getBasicVals().getValue(V)); -} - -ProgramStateRef BasicConstraintManager::AddNE(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V) { - - // First, retrieve the NE-set associated with the given symbol. - ConstNotEqTy::data_type* T = state->get(sym); - ProgramState::IntSetTy S = T ? *T : ISetFactory.getEmptySet(); - - // Now add V to the NE set. - S = ISetFactory.add(S, &getBasicVals().getValue(V)); - - // Create a new state with the old binding replaced. - return state->set(sym, S); -} - -const llvm::APSInt* BasicConstraintManager::getSymVal(ProgramStateRef state, - SymbolRef sym) const { - const ConstEqTy::data_type* T = state->get(sym); - return T ? *T : NULL; -} - -bool BasicConstraintManager::isNotEqual(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V) const { - - // Retrieve the NE-set associated with the given symbol. - const ConstNotEqTy::data_type* T = state->get(sym); - - // See if V is present in the NE-set. - return T ? T->contains(&getBasicVals().getValue(V)) : false; -} - -bool BasicConstraintManager::isEqual(ProgramStateRef state, - SymbolRef sym, - const llvm::APSInt& V) const { - // Retrieve the EQ-set associated with the given symbol. - const ConstEqTy::data_type* T = state->get(sym); - // See if V is present in the EQ-set. - return T ? **T == V : false; -} - -/// Scan all symbols referenced by the constraints. If the symbol is not alive -/// as marked in LSymbols, mark it as dead in DSymbols. -ProgramStateRef -BasicConstraintManager::removeDeadBindings(ProgramStateRef state, - SymbolReaper& SymReaper) { - - ConstEqTy CE = state->get(); - ConstEqTy::Factory& CEFactory = state->get_context(); - - for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) { - SymbolRef sym = I.getKey(); - if (SymReaper.maybeDead(sym)) - CE = CEFactory.remove(CE, sym); - } - state = state->set(CE); - - ConstNotEqTy CNE = state->get(); - ConstNotEqTy::Factory& CNEFactory = state->get_context(); - - for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) { - SymbolRef sym = I.getKey(); - if (SymReaper.maybeDead(sym)) - CNE = CNEFactory.remove(CNE, sym); - } - - return state->set(CNE); -} - -void BasicConstraintManager::print(ProgramStateRef state, - raw_ostream &Out, - const char* nl, const char *sep) { - // Print equality constraints. - - ConstEqTy CE = state->get(); - - if (!CE.isEmpty()) { - Out << nl << sep << "'==' constraints:"; - for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) - Out << nl << " $" << I.getKey() << " : " << *I.getData(); - } - - // Print != constraints. - - ConstNotEqTy CNE = state->get(); - - if (!CNE.isEmpty()) { - Out << nl << sep << "'!=' constraints:"; - - for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) { - Out << nl << " $" << I.getKey() << " : "; - bool isFirst = true; - - ProgramState::IntSetTy::iterator J = I.getData().begin(), - EJ = I.getData().end(); - - for ( ; J != EJ; ++J) { - if (isFirst) isFirst = false; - else Out << ", "; - - Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream. - } - } - } -} diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp index 20c7361..a6c400f 100644 --- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp +++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp @@ -101,11 +101,7 @@ const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth, const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) { - unsigned bits = Ctx.getTypeSize(T); - llvm::APSInt V(bits, - T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T)); - V = X; - return getValue(V); + return getValue(getAPSIntType(T).getValue(X)); } const CompoundValData* diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp index 571baec..c898d65 100644 --- a/lib/StaticAnalyzer/Core/BugReporter.cpp +++ b/lib/StaticAnalyzer/Core/BugReporter.cpp @@ -118,10 +118,82 @@ GetCurrentOrNextStmt(const ExplodedNode *N) { // Diagnostic cleanup. //===----------------------------------------------------------------------===// +static PathDiagnosticEventPiece * +eventsDescribeSameCondition(PathDiagnosticEventPiece *X, + PathDiagnosticEventPiece *Y) { + // Prefer diagnostics that come from ConditionBRVisitor over + // those that came from TrackConstraintBRVisitor. + const void *tagPreferred = ConditionBRVisitor::getTag(); + const void *tagLesser = TrackConstraintBRVisitor::getTag(); + + if (X->getLocation() != Y->getLocation()) + return 0; + + if (X->getTag() == tagPreferred && Y->getTag() == tagLesser) + return X; + + if (Y->getTag() == tagPreferred && X->getTag() == tagLesser) + return Y; + + return 0; +} + +/// An optimization pass over PathPieces that removes redundant diagnostics +/// generated by both ConditionBRVisitor and TrackConstraintBRVisitor. Both +/// BugReporterVisitors use different methods to generate diagnostics, with +/// one capable of emitting diagnostics in some cases but not in others. This +/// can lead to redundant diagnostic pieces at the same point in a path. +static void removeRedundantMsgs(PathPieces &path) { + unsigned N = path.size(); + if (N < 2) + return; + // NOTE: this loop intentionally is not using an iterator. Instead, we + // are streaming the path and modifying it in place. This is done by + // grabbing the front, processing it, and if we decide to keep it append + // it to the end of the path. The entire path is processed in this way. + for (unsigned i = 0; i < N; ++i) { + IntrusiveRefCntPtr piece(path.front()); + path.pop_front(); + + switch (piece->getKind()) { + case clang::ento::PathDiagnosticPiece::Call: + removeRedundantMsgs(cast(piece)->path); + break; + case clang::ento::PathDiagnosticPiece::Macro: + removeRedundantMsgs(cast(piece)->subPieces); + break; + case clang::ento::PathDiagnosticPiece::ControlFlow: + break; + case clang::ento::PathDiagnosticPiece::Event: { + if (i == N-1) + break; + + if (PathDiagnosticEventPiece *nextEvent = + dyn_cast(path.front().getPtr())) { + PathDiagnosticEventPiece *event = + cast(piece); + // Check to see if we should keep one of the two pieces. If we + // come up with a preference, record which piece to keep, and consume + // another piece from the path. + if (PathDiagnosticEventPiece *pieceToKeep = + eventsDescribeSameCondition(event, nextEvent)) { + piece = pieceToKeep; + path.pop_front(); + ++i; + } + } + break; + } + } + path.push_back(piece); + } +} + /// Recursively scan through a path and prune out calls and macros pieces /// that aren't needed. Return true if afterwards the path contains /// "interesting stuff" which means it should be pruned from the parent path. -static bool RemoveUneededCalls(PathPieces &pieces) { +bool BugReporter::RemoveUneededCalls(PathPieces &pieces, BugReport *R, + PathDiagnosticCallPiece *CallWithLoc) { bool containsSomethingInteresting = false; const unsigned N = pieces.size(); @@ -131,30 +203,49 @@ static bool RemoveUneededCalls(PathPieces &pieces) { IntrusiveRefCntPtr piece(pieces.front()); pieces.pop_front(); + // Throw away pieces with invalid locations. + if (piece->getKind() != PathDiagnosticPiece::Call && + piece->getLocation().asLocation().isInvalid()) + continue; + switch (piece->getKind()) { case PathDiagnosticPiece::Call: { PathDiagnosticCallPiece *call = cast(piece); + // Check if the location context is interesting. + assert(LocationContextMap.count(call)); + if (R->isInteresting(LocationContextMap[call])) { + containsSomethingInteresting = true; + break; + } // Recursively clean out the subclass. Keep this call around if // it contains any informative diagnostics. - if (!RemoveUneededCalls(call->path)) + PathDiagnosticCallPiece *NewCallWithLoc = + call->getLocation().asLocation().isValid() + ? call : CallWithLoc; + + if (!RemoveUneededCalls(call->path, R, NewCallWithLoc)) continue; + + if (NewCallWithLoc == CallWithLoc && CallWithLoc) { + call->callEnter = CallWithLoc->callEnter; + } + containsSomethingInteresting = true; break; } case PathDiagnosticPiece::Macro: { PathDiagnosticMacroPiece *macro = cast(piece); - if (!RemoveUneededCalls(macro->subPieces)) + if (!RemoveUneededCalls(macro->subPieces, R)) continue; containsSomethingInteresting = true; break; } case PathDiagnosticPiece::Event: { PathDiagnosticEventPiece *event = cast(piece); + // We never throw away an event, but we do throw it away wholesale // as part of a path if we throw the entire path away. - if (event->isPrunable()) - continue; - containsSomethingInteresting = true; + containsSomethingInteresting |= !event->isPrunable(); break; } case PathDiagnosticPiece::ControlFlow: @@ -382,6 +473,35 @@ PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) { } //===----------------------------------------------------------------------===// +// "Visitors only" path diagnostic generation algorithm. +//===----------------------------------------------------------------------===// +static bool GenerateVisitorsOnlyPathDiagnostic(PathDiagnostic &PD, + PathDiagnosticBuilder &PDB, + const ExplodedNode *N, + ArrayRef visitors) { + // All path generation skips the very first node (the error node). + // This is because there is special handling for the end-of-path note. + N = N->getFirstPred(); + if (!N) + return true; + + BugReport *R = PDB.getBugReport(); + while (const ExplodedNode *Pred = N->getFirstPred()) { + for (ArrayRef::iterator I = visitors.begin(), + E = visitors.end(); + I != E; ++I) { + // Visit all the node pairs, but throw the path pieces away. + PathDiagnosticPiece *Piece = (*I)->VisitNode(N, Pred, PDB, *R); + delete Piece; + } + + N = Pred; + } + + return R->isValid(); +} + +//===----------------------------------------------------------------------===// // "Minimal" path diagnostic generation algorithm. //===----------------------------------------------------------------------===// typedef std::pair StackDiagPair; @@ -412,7 +532,7 @@ static void updateStackPiecesWithMessage(PathDiagnosticPiece *P, static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM); -static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, +static bool GenerateMinimalPathDiagnostic(PathDiagnostic& PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N, ArrayRef visitors) { @@ -430,55 +550,60 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, NextNode = GetPredecessorNode(N); ProgramPoint P = N->getLocation(); - - if (const CallExitEnd *CE = dyn_cast(&P)) { - PathDiagnosticCallPiece *C = - PathDiagnosticCallPiece::construct(N, *CE, SMgr); - PD.getActivePath().push_front(C); - PD.pushActivePath(&C->path); - CallStack.push_back(StackDiagPair(C, N)); - continue; - } - - if (const CallEnter *CE = dyn_cast(&P)) { - // Flush all locations, and pop the active path. - bool VisitedEntireCall = PD.isWithinCall(); - PD.popActivePath(); - - // Either we just added a bunch of stuff to the top-level path, or - // we have a previous CallExitEnd. If the former, it means that the - // path terminated within a function call. We must then take the - // current contents of the active path and place it within - // a new PathDiagnosticCallPiece. - PathDiagnosticCallPiece *C; - if (VisitedEntireCall) { - C = cast(PD.getActivePath().front()); - } else { - const Decl *Caller = CE->getLocationContext()->getDecl(); - C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller); + + do { + if (const CallExitEnd *CE = dyn_cast(&P)) { + PathDiagnosticCallPiece *C = + PathDiagnosticCallPiece::construct(N, *CE, SMgr); + GRBugReporter& BR = PDB.getBugReporter(); + BR.addCallPieceLocationContextPair(C, CE->getCalleeContext()); + PD.getActivePath().push_front(C); + PD.pushActivePath(&C->path); + CallStack.push_back(StackDiagPair(C, N)); + break; } - C->setCallee(*CE, SMgr); - if (!CallStack.empty()) { - assert(CallStack.back().first == C); - CallStack.pop_back(); + if (const CallEnter *CE = dyn_cast(&P)) { + // Flush all locations, and pop the active path. + bool VisitedEntireCall = PD.isWithinCall(); + PD.popActivePath(); + + // Either we just added a bunch of stuff to the top-level path, or + // we have a previous CallExitEnd. If the former, it means that the + // path terminated within a function call. We must then take the + // current contents of the active path and place it within + // a new PathDiagnosticCallPiece. + PathDiagnosticCallPiece *C; + if (VisitedEntireCall) { + C = cast(PD.getActivePath().front()); + } else { + const Decl *Caller = CE->getLocationContext()->getDecl(); + C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller); + GRBugReporter& BR = PDB.getBugReporter(); + BR.addCallPieceLocationContextPair(C, CE->getCalleeContext()); + } + + C->setCallee(*CE, SMgr); + if (!CallStack.empty()) { + assert(CallStack.back().first == C); + CallStack.pop_back(); + } + break; } - continue; - } - if (const BlockEdge *BE = dyn_cast(&P)) { - const CFGBlock *Src = BE->getSrc(); - const CFGBlock *Dst = BE->getDst(); - const Stmt *T = Src->getTerminator(); + if (const BlockEdge *BE = dyn_cast(&P)) { + const CFGBlock *Src = BE->getSrc(); + const CFGBlock *Dst = BE->getDst(); + const Stmt *T = Src->getTerminator(); - if (!T) - continue; + if (!T) + break; - PathDiagnosticLocation Start = - PathDiagnosticLocation::createBegin(T, SMgr, - N->getLocationContext()); + PathDiagnosticLocation Start = + PathDiagnosticLocation::createBegin(T, SMgr, + N->getLocationContext()); - switch (T->getStmtClass()) { + switch (T->getStmtClass()) { default: break; @@ -487,16 +612,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, const Stmt *S = GetNextStmt(N); if (!S) - continue; + break; std::string sbuf; llvm::raw_string_ostream os(sbuf); const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S); os << "Control jumps to line " - << End.asLocation().getExpansionLineNumber(); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + << End.asLocation().getExpansionLineNumber(); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); break; } @@ -509,52 +634,52 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, PathDiagnosticLocation End(S, SMgr, LC); switch (S->getStmtClass()) { - default: - os << "No cases match in the switch statement. " - "Control jumps to line " - << End.asLocation().getExpansionLineNumber(); - break; - case Stmt::DefaultStmtClass: - os << "Control jumps to the 'default' case at line " - << End.asLocation().getExpansionLineNumber(); - break; - - case Stmt::CaseStmtClass: { - os << "Control jumps to 'case "; - const CaseStmt *Case = cast(S); - const Expr *LHS = Case->getLHS()->IgnoreParenCasts(); - - // Determine if it is an enum. - bool GetRawInt = true; - - if (const DeclRefExpr *DR = dyn_cast(LHS)) { - // FIXME: Maybe this should be an assertion. Are there cases - // were it is not an EnumConstantDecl? - const EnumConstantDecl *D = + default: + os << "No cases match in the switch statement. " + "Control jumps to line " + << End.asLocation().getExpansionLineNumber(); + break; + case Stmt::DefaultStmtClass: + os << "Control jumps to the 'default' case at line " + << End.asLocation().getExpansionLineNumber(); + break; + + case Stmt::CaseStmtClass: { + os << "Control jumps to 'case "; + const CaseStmt *Case = cast(S); + const Expr *LHS = Case->getLHS()->IgnoreParenCasts(); + + // Determine if it is an enum. + bool GetRawInt = true; + + if (const DeclRefExpr *DR = dyn_cast(LHS)) { + // FIXME: Maybe this should be an assertion. Are there cases + // were it is not an EnumConstantDecl? + const EnumConstantDecl *D = dyn_cast(DR->getDecl()); - if (D) { - GetRawInt = false; - os << *D; - } + if (D) { + GetRawInt = false; + os << *D; } + } - if (GetRawInt) - os << LHS->EvaluateKnownConstInt(PDB.getASTContext()); + if (GetRawInt) + os << LHS->EvaluateKnownConstInt(PDB.getASTContext()); - os << ":' at line " - << End.asLocation().getExpansionLineNumber(); - break; - } + os << ":' at line " + << End.asLocation().getExpansionLineNumber(); + break; } - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + } + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } else { os << "'Default' branch taken. "; const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } break; @@ -565,12 +690,12 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, std::string sbuf; llvm::raw_string_ostream os(sbuf); PathDiagnosticLocation End = PDB.ExecutionContinues(os, N); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); break; } - // Determine control-flow for ternary '?'. + // Determine control-flow for ternary '?'. case Stmt::BinaryConditionalOperatorClass: case Stmt::ConditionalOperatorClass: { std::string sbuf; @@ -587,12 +712,12 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); break; } - // Determine control-flow for short-circuited '&&' and '||'. + // Determine control-flow for short-circuited '&&' and '||'. case Stmt::BinaryOperatorClass: { if (!PDB.supportsLogicalOpControlFlow()) break; @@ -609,16 +734,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, os << "false"; PathDiagnosticLocation End(B->getLHS(), SMgr, LC); PathDiagnosticLocation Start = - PathDiagnosticLocation::createOperatorLoc(B, SMgr); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PathDiagnosticLocation::createOperatorLoc(B, SMgr); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } else { os << "true"; PathDiagnosticLocation Start(B->getLHS(), SMgr, LC); PathDiagnosticLocation End = PDB.ExecutionContinues(N); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } } else { @@ -629,16 +754,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, os << "false"; PathDiagnosticLocation Start(B->getLHS(), SMgr, LC); PathDiagnosticLocation End = PDB.ExecutionContinues(N); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } else { os << "true"; PathDiagnosticLocation End(B->getLHS(), SMgr, LC); PathDiagnosticLocation Start = - PathDiagnosticLocation::createOperatorLoc(B, SMgr); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PathDiagnosticLocation::createOperatorLoc(B, SMgr); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } } @@ -656,8 +781,8 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } else { PathDiagnosticLocation End = PDB.ExecutionContinues(N); @@ -665,8 +790,8 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - "Loop condition is false. Exiting loop")); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, "Loop condition is false. Exiting loop")); } break; @@ -683,16 +808,16 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - os.str())); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, os.str())); } else { PathDiagnosticLocation End = PDB.ExecutionContinues(N); if (const Stmt *S = End.asStmt()) End = PDB.getEnclosingStmtLocation(S); - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - "Loop condition is true. Entering loop body")); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, "Loop condition is true. Entering loop body")); } break; @@ -705,16 +830,17 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, End = PDB.getEnclosingStmtLocation(S); if (*(Src->succ_begin()+1) == Dst) - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - "Taking false branch")); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, "Taking false branch")); else - PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End, - "Taking true branch")); + PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece( + Start, End, "Taking true branch")); break; } + } } - } + } while(0); if (NextNode) { // Add diagnostic pieces from custom visitors. @@ -730,9 +856,13 @@ static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD, } } + if (!PDB.getBugReport()->isValid()) + return false; + // After constructing the full PathDiagnostic, do a pass over it to compact // PathDiagnosticPieces that occur within a macro. CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager()); + return true; } //===----------------------------------------------------------------------===// @@ -944,6 +1074,11 @@ void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) { const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc); const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc); + if (PrevLocClean.asLocation().isInvalid()) { + PrevLoc = NewLoc; + return; + } + if (NewLocClean.asLocation() == PrevLocClean.asLocation()) return; @@ -1133,7 +1268,7 @@ static void reversePropagateInterestingSymbols(BugReport &R, } } -static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, +static bool GenerateExtensivePathDiagnostic(PathDiagnostic& PD, PathDiagnosticBuilder &PDB, const ExplodedNode *N, ArrayRef visitors) { @@ -1166,6 +1301,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, PathDiagnosticCallPiece *C = PathDiagnosticCallPiece::construct(N, *CE, SM); + GRBugReporter& BR = PDB.getBugReporter(); + BR.addCallPieceLocationContextPair(C, CE->getCalleeContext()); EB.addEdge(C->callReturn, true); EB.flushLocations(); @@ -1202,6 +1339,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, } else { const Decl *Caller = CE->getLocationContext()->getDecl(); C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller); + GRBugReporter& BR = PDB.getBugReporter(); + BR.addCallPieceLocationContextPair(C, CE->getCalleeContext()); } C->setCallee(*CE, SM); @@ -1234,20 +1373,15 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, } } - const CFGBlock &Blk = *BE->getSrc(); - const Stmt *Term = Blk.getTerminator(); - // Are we jumping to the head of a loop? Add a special diagnostic. - if (const Stmt *Loop = BE->getDst()->getLoopTarget()) { + if (const Stmt *Loop = BE->getSrc()->getLoopTarget()) { PathDiagnosticLocation L(Loop, SM, PDB.LC); const CompoundStmt *CS = NULL; - if (!Term) { - if (const ForStmt *FS = dyn_cast(Loop)) - CS = dyn_cast(FS->getBody()); - else if (const WhileStmt *WS = dyn_cast(Loop)) - CS = dyn_cast(WS->getBody()); - } + if (const ForStmt *FS = dyn_cast(Loop)) + CS = dyn_cast(FS->getBody()); + else if (const WhileStmt *WS = dyn_cast(Loop)) + CS = dyn_cast(WS->getBody()); PathDiagnosticEventPiece *p = new PathDiagnosticEventPiece(L, @@ -1263,15 +1397,16 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, EB.addEdge(BL); } } - - if (Term) + + if (const Stmt *Term = BE->getSrc()->getTerminator()) EB.addContext(Term); break; } if (const BlockEntrance *BE = dyn_cast(&P)) { - if (const CFGStmt *S = BE->getFirstElement().getAs()) { + CFGElement First = BE->getFirstElement(); + if (const CFGStmt *S = First.getAs()) { const Stmt *stmt = S->getStmt(); if (IsControlFlowExpr(stmt)) { // Add the proper context for '&&', '||', and '?'. @@ -1306,6 +1441,8 @@ static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD, } } } + + return PDB.getBugReport()->isValid(); } //===----------------------------------------------------------------------===// @@ -1414,6 +1551,12 @@ void BugReport::markInteresting(SVal V) { markInteresting(V.getAsSymbol()); } +void BugReport::markInteresting(const LocationContext *LC) { + if (!LC) + return; + InterestingLocationContexts.insert(LC); +} + bool BugReport::isInteresting(SVal V) { return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol()); } @@ -1438,6 +1581,12 @@ bool BugReport::isInteresting(const MemRegion *R) { return false; } +bool BugReport::isInteresting(const LocationContext *LC) { + if (!LC) + return false; + return InterestingLocationContexts.count(LC); +} + void BugReport::lazyInitializeInterestingSets() { if (interestingSymbols.empty()) { interestingSymbols.push_back(new Symbols()); @@ -1823,17 +1972,27 @@ static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) { path.push_back(*I); } -void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, +bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD, PathDiagnosticConsumer &PC, ArrayRef &bugReports) { - assert(!bugReports.empty()); + + bool HasValid = false; SmallVector errorNodes; for (ArrayRef::iterator I = bugReports.begin(), E = bugReports.end(); I != E; ++I) { + if ((*I)->isValid()) { + HasValid = true; errorNodes.push_back((*I)->getErrorNode()); + } else { + errorNodes.push_back(0); + } } + // If all the reports have been marked invalid, we're done. + if (!HasValid) + return false; + // Construct a new graph that contains only a single path from the error // node to a root. const std::pair, @@ -1844,6 +2003,7 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, assert(GPair.second.second < bugReports.size()); BugReport *R = bugReports[GPair.second.second]; assert(R && "No original report found for sliced graph."); + assert(R->isValid() && "Report selected from trimmed graph marked invalid."); OwningPtr ReportGraph(GPair.first.first); OwningPtr BackMap(GPair.first.second); @@ -1870,36 +2030,53 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, visitors.push_back((*I)->clone()); // Clear out the active path from any previous work. - PD.getActivePath().clear(); + PD.resetPath(); originalReportConfigToken = R->getConfigurationChangeToken(); // Generate the very last diagnostic piece - the piece is visible before // the trace is expanded. - PathDiagnosticPiece *LastPiece = 0; - for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end(); - I != E; ++I) { - if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) { - assert (!LastPiece && - "There can only be one final piece in a diagnostic."); - LastPiece = Piece; + if (PDB.getGenerationScheme() != PathDiagnosticConsumer::None) { + PathDiagnosticPiece *LastPiece = 0; + for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end(); + I != E; ++I) { + if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) { + assert (!LastPiece && + "There can only be one final piece in a diagnostic."); + LastPiece = Piece; + } } + if (!LastPiece) + LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R); + if (LastPiece) + PD.setEndOfPath(LastPiece); + else + return false; } - if (!LastPiece) - LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R); - if (LastPiece) - PD.getActivePath().push_back(LastPiece); - else - return; switch (PDB.getGenerationScheme()) { case PathDiagnosticConsumer::Extensive: - GenerateExtensivePathDiagnostic(PD, PDB, N, visitors); + if (!GenerateExtensivePathDiagnostic(PD, PDB, N, visitors)) { + assert(!R->isValid() && "Failed on valid report"); + // Try again. We'll filter out the bad report when we trim the graph. + // FIXME: It would be more efficient to use the same intermediate + // trimmed graph, and just repeat the shortest-path search. + return generatePathDiagnostic(PD, PC, bugReports); + } break; case PathDiagnosticConsumer::Minimal: - GenerateMinimalPathDiagnostic(PD, PDB, N, visitors); + if (!GenerateMinimalPathDiagnostic(PD, PDB, N, visitors)) { + assert(!R->isValid() && "Failed on valid report"); + // Try again. We'll filter out the bad report when we trim the graph. + return generatePathDiagnostic(PD, PC, bugReports); + } break; case PathDiagnosticConsumer::None: - llvm_unreachable("PathDiagnosticConsumer::None should never appear here"); + if (!GenerateVisitorsOnlyPathDiagnostic(PD, PDB, N, visitors)) { + assert(!R->isValid() && "Failed on valid report"); + // Try again. We'll filter out the bad report when we trim the graph. + return generatePathDiagnostic(PD, PC, bugReports); + } + break; } // Clean up the visitors we used. @@ -1910,18 +2087,26 @@ void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD, } while(finalReportConfigToken != originalReportConfigToken); // Finally, prune the diagnostic path of uninteresting stuff. - if (R->shouldPrunePath()) { - bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces()); - assert(hasSomethingInteresting); - (void) hasSomethingInteresting; + if (!PD.path.empty()) { + // Remove messages that are basically the same. + removeRedundantMsgs(PD.getMutablePieces()); + + if (R->shouldPrunePath()) { + bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces(), + R); + assert(hasSomethingInteresting); + (void) hasSomethingInteresting; + } } + + return true; } void BugReporter::Register(BugType *BT) { BugTypes = F.add(BugTypes, BT); } -void BugReporter::EmitReport(BugReport* R) { +void BugReporter::emitReport(BugReport* R) { // Compute the bug report's hash to determine its equivalence class. llvm::FoldingSetNodeID ID; R->Profile(ID); @@ -2078,17 +2263,17 @@ void BugReporter::FlushReport(BugReport *exampleReport, OwningPtr D(new PathDiagnostic(exampleReport->getDeclWithIssue(), exampleReport->getBugType().getName(), - PD.useVerboseDescription() - ? exampleReport->getDescription() - : exampleReport->getShortDescription(), + exampleReport->getDescription(), + exampleReport->getShortDescription(/*Fallback=*/false), BT.getCategory())); // Generate the full path diagnostic, using the generation scheme - // specified by the PathDiagnosticConsumer. - if (PD.getGenerationScheme() != PathDiagnosticConsumer::None) { - if (!bugReports.empty()) - GeneratePathDiagnostic(*D.get(), PD, bugReports); - } + // specified by the PathDiagnosticConsumer. Note that we have to generate + // path diagnostics even for consumers which do not support paths, because + // the BugReporterVisitors may mark this bug as a false positive. + if (!bugReports.empty()) + if (!generatePathDiagnostic(*D.get(), PD, bugReports)) + return; // If the path is empty, generate a single step path with the location // of the issue. @@ -2100,7 +2285,7 @@ void BugReporter::FlushReport(BugReport *exampleReport, llvm::tie(Beg, End) = exampleReport->getRanges(); for ( ; Beg != End; ++Beg) piece->addRange(*Beg); - D->getActivePath().push_back(piece); + D->setEndOfPath(piece); } // Get the meta data. @@ -2124,7 +2309,7 @@ void BugReporter::EmitBasicReport(const Decl *DeclWithIssue, BugReport *R = new BugReport(*BT, str, Loc); R->setDeclWithIssue(DeclWithIssue); for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg); - EmitReport(R); + emitReport(R); } BugType *BugReporter::getBugTypeForName(StringRef name, diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp index e729587..328e8a6 100644 --- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp +++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp @@ -17,10 +17,13 @@ #include "clang/AST/ExprObjC.h" #include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h" #include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" #include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" using namespace clang; using namespace ento; @@ -29,10 +32,24 @@ using namespace ento; // Utility functions. //===----------------------------------------------------------------------===// +bool bugreporter::isDeclRefExprToReference(const Expr *E) { + if (const DeclRefExpr *DRE = dyn_cast(E)) { + return DRE->getDecl()->getType()->isReferenceType(); + } + return false; +} + const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) { // Pattern match for a few useful cases (do something smarter later): // a[0], p->f, *p - const Stmt *S = N->getLocationAs()->getStmt(); + const PostStmt *Loc = N->getLocationAs(); + if (!Loc) + return 0; + + const Expr *S = dyn_cast(Loc->getStmt()); + if (!S) + return 0; + S = S->IgnoreParenCasts(); while (true) { if (const BinaryOperator *B = dyn_cast(S)) { @@ -45,7 +62,12 @@ const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) { return U->getSubExpr()->IgnoreParenCasts(); } else if (const MemberExpr *ME = dyn_cast(S)) { - return ME->getBase()->IgnoreParenCasts(); + if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) { + return ME->getBase()->IgnoreParenCasts(); + } + } + else if (const ObjCIvarRefExpr *IvarRef = dyn_cast(S)) { + return IvarRef->getBase()->IgnoreParenCasts(); } else if (const ArraySubscriptExpr *AE = dyn_cast(S)) { return AE->getBase(); @@ -103,6 +125,228 @@ BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC, } +namespace { +/// Emits an extra note at the return statement of an interesting stack frame. +/// +/// The returned value is marked as an interesting value, and if it's null, +/// adds a visitor to track where it became null. +/// +/// This visitor is intended to be used when another visitor discovers that an +/// interesting value comes from an inlined function call. +class ReturnVisitor : public BugReporterVisitorImpl { + const StackFrameContext *StackFrame; + enum { + Initial, + MaybeSuppress, + Satisfied + } Mode; + +public: + ReturnVisitor(const StackFrameContext *Frame) + : StackFrame(Frame), Mode(Initial) {} + + static void *getTag() { + static int Tag = 0; + return static_cast(&Tag); + } + + virtual void Profile(llvm::FoldingSetNodeID &ID) const { + ID.AddPointer(ReturnVisitor::getTag()); + ID.AddPointer(StackFrame); + } + + /// Adds a ReturnVisitor if the given statement represents a call that was + /// inlined. + /// + /// This will search back through the ExplodedGraph, starting from the given + /// node, looking for when the given statement was processed. If it turns out + /// the statement is a call that was inlined, we add the visitor to the + /// bug report, so it can print a note later. + static void addVisitorIfNecessary(const ExplodedNode *Node, const Stmt *S, + BugReport &BR) { + if (!CallEvent::isCallStmt(S)) + return; + + // First, find when we processed the statement. + do { + if (const CallExitEnd *CEE = Node->getLocationAs()) + if (CEE->getCalleeContext()->getCallSite() == S) + break; + if (const StmtPoint *SP = Node->getLocationAs()) + if (SP->getStmt() == S) + break; + + Node = Node->getFirstPred(); + } while (Node); + + // Next, step over any post-statement checks. + while (Node && isa(Node->getLocation())) + Node = Node->getFirstPred(); + + // Finally, see if we inlined the call. + if (Node) { + if (const CallExitEnd *CEE = Node->getLocationAs()) { + const StackFrameContext *CalleeContext = CEE->getCalleeContext(); + if (CalleeContext->getCallSite() == S) { + BR.markInteresting(CalleeContext); + BR.addVisitor(new ReturnVisitor(CalleeContext)); + } + } + } + } + + /// Returns true if any counter-suppression heuristics are enabled for + /// ReturnVisitor. + static bool hasCounterSuppression(AnalyzerOptions &Options) { + return Options.shouldAvoidSuppressingNullArgumentPaths(); + } + + PathDiagnosticPiece *visitNodeInitial(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext &BRC, + BugReport &BR) { + // Only print a message at the interesting return statement. + if (N->getLocationContext() != StackFrame) + return 0; + + const StmtPoint *SP = N->getLocationAs(); + if (!SP) + return 0; + + const ReturnStmt *Ret = dyn_cast(SP->getStmt()); + if (!Ret) + return 0; + + // Okay, we're at the right return statement, but do we have the return + // value available? + ProgramStateRef State = N->getState(); + SVal V = State->getSVal(Ret, StackFrame); + if (V.isUnknownOrUndef()) + return 0; + + // Don't print any more notes after this one. + Mode = Satisfied; + + const Expr *RetE = Ret->getRetValue(); + assert(RetE && "Tracking a return value for a void function"); + RetE = RetE->IgnoreParenCasts(); + + // If we can't prove the return value is 0, just mark it interesting, and + // make sure to track it into any further inner functions. + if (State->assume(cast(V), true)) { + BR.markInteresting(V); + ReturnVisitor::addVisitorIfNecessary(N, RetE, BR); + return 0; + } + + // If we're returning 0, we should track where that 0 came from. + bugreporter::trackNullOrUndefValue(N, RetE, BR); + + // Build an appropriate message based on the return value. + SmallString<64> Msg; + llvm::raw_svector_ostream Out(Msg); + + if (isa(V)) { + // If we are pruning null-return paths as unlikely error paths, mark the + // report invalid. We still want to emit a path note, however, in case + // the report is resurrected as valid later on. + ExprEngine &Eng = BRC.getBugReporter().getEngine(); + AnalyzerOptions &Options = Eng.getAnalysisManager().options; + if (Options.shouldPruneNullReturnPaths()) { + if (hasCounterSuppression(Options)) + Mode = MaybeSuppress; + else + BR.markInvalid(ReturnVisitor::getTag(), StackFrame); + } + + if (RetE->getType()->isObjCObjectPointerType()) + Out << "Returning nil"; + else + Out << "Returning null pointer"; + } else { + Out << "Returning zero"; + } + + // FIXME: We should have a more generalized location printing mechanism. + if (const DeclRefExpr *DR = dyn_cast(RetE)) + if (const DeclaratorDecl *DD = dyn_cast(DR->getDecl())) + Out << " (loaded from '" << *DD << "')"; + + PathDiagnosticLocation L(Ret, BRC.getSourceManager(), StackFrame); + return new PathDiagnosticEventPiece(L, Out.str()); + } + + PathDiagnosticPiece *visitNodeMaybeSuppress(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext &BRC, + BugReport &BR) { + // Are we at the entry node for this call? + const CallEnter *CE = N->getLocationAs(); + if (!CE) + return 0; + + if (CE->getCalleeContext() != StackFrame) + return 0; + + Mode = Satisfied; + + ExprEngine &Eng = BRC.getBugReporter().getEngine(); + AnalyzerOptions &Options = Eng.getAnalysisManager().options; + if (Options.shouldAvoidSuppressingNullArgumentPaths()) { + // Don't automatically suppress a report if one of the arguments is + // known to be a null pointer. Instead, start tracking /that/ null + // value back to its origin. + ProgramStateManager &StateMgr = BRC.getStateManager(); + CallEventManager &CallMgr = StateMgr.getCallEventManager(); + + ProgramStateRef State = N->getState(); + CallEventRef<> Call = CallMgr.getCaller(StackFrame, State); + for (unsigned I = 0, E = Call->getNumArgs(); I != E; ++I) { + SVal ArgV = Call->getArgSVal(I); + if (!isa(ArgV)) + continue; + + const Expr *ArgE = Call->getArgExpr(I); + if (!ArgE) + continue; + + // Is it possible for this argument to be non-null? + if (State->assume(cast(ArgV), true)) + continue; + + if (bugreporter::trackNullOrUndefValue(N, ArgE, BR, /*IsArg=*/true)) + return 0; + + // If we /can't/ track the null pointer, we should err on the side of + // false negatives, and continue towards marking this report invalid. + // (We will still look at the other arguments, though.) + } + } + + // There is no reason not to suppress this report; go ahead and do it. + BR.markInvalid(ReturnVisitor::getTag(), StackFrame); + return 0; + } + + PathDiagnosticPiece *VisitNode(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext &BRC, + BugReport &BR) { + switch (Mode) { + case Initial: + return visitNodeInitial(N, PrevN, BRC, BR); + case MaybeSuppress: + return visitNodeMaybeSuppress(N, PrevN, BRC, BR); + case Satisfied: + return 0; + } + + llvm_unreachable("Invalid visit mode!"); + } +}; +} // end anonymous namespace + + void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const { static int tag = 0; ID.AddPointer(&tag); @@ -110,59 +354,90 @@ void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const { ID.Add(V); } -PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N, - const ExplodedNode *PrevN, - BugReporterContext &BRC, - BugReport &BR) { +PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ, + const ExplodedNode *Pred, + BugReporterContext &BRC, + BugReport &BR) { if (satisfied) return NULL; - if (!StoreSite) { - // Make sure the region is actually bound to value V here. - // This is necessary because the region may not actually be live at the - // report's error node. - if (N->getState()->getSVal(R) != V) - return NULL; - - const ExplodedNode *Node = N, *Last = N; - - // Now look for the store of V. - for ( ; Node ; Node = Node->getFirstPred()) { - if (const VarRegion *VR = dyn_cast(R)) { - if (const PostStmt *P = Node->getLocationAs()) - if (const DeclStmt *DS = P->getStmtAs()) - if (DS->getSingleDecl() == VR->getDecl()) { - // Record the last seen initialization point. - Last = Node; - break; - } + const ExplodedNode *StoreSite = 0; + const Expr *InitE = 0; + bool IsParam = false; + + // First see if we reached the declaration of the region. + if (const VarRegion *VR = dyn_cast(R)) { + if (const PostStmt *P = Pred->getLocationAs()) { + if (const DeclStmt *DS = P->getStmtAs()) { + if (DS->getSingleDecl() == VR->getDecl()) { + StoreSite = Pred; + InitE = VR->getDecl()->getInit(); + } } - - // Does the region still bind to value V? If not, we are done - // looking for store sites. - if (Node->getState()->getSVal(R) != V) - break; - - Last = Node; } + } - if (!Node) { - satisfied = true; + // Otherwise, check that Succ has this binding and Pred does not, i.e. this is + // where the binding first occurred. + if (!StoreSite) { + if (Succ->getState()->getSVal(R) != V) + return NULL; + if (Pred->getState()->getSVal(R) == V) return NULL; - } - StoreSite = Last; + StoreSite = Succ; + + // If this is an assignment expression, we can track the value + // being assigned. + if (const PostStmt *P = Succ->getLocationAs()) + if (const BinaryOperator *BO = P->getStmtAs()) + if (BO->isAssignmentOp()) + InitE = BO->getRHS(); + + // If this is a call entry, the variable should be a parameter. + // FIXME: Handle CXXThisRegion as well. (This is not a priority because + // 'this' should never be NULL, but this visitor isn't just for NULL and + // UndefinedVal.) + if (const CallEnter *CE = Succ->getLocationAs()) { + const VarRegion *VR = cast(R); + const ParmVarDecl *Param = cast(VR->getDecl()); + + ProgramStateManager &StateMgr = BRC.getStateManager(); + CallEventManager &CallMgr = StateMgr.getCallEventManager(); + + CallEventRef<> Call = CallMgr.getCaller(CE->getCalleeContext(), + Succ->getState()); + InitE = Call->getArgExpr(Param->getFunctionScopeIndex()); + IsParam = true; + } } - if (StoreSite != N) + if (!StoreSite) return NULL; - satisfied = true; + + // If we have an expression that provided the value, try to track where it + // came from. + if (InitE) { + if (V.isUndef() || isa(V)) { + if (!IsParam) + InitE = InitE->IgnoreParenCasts(); + bugreporter::trackNullOrUndefValue(StoreSite, InitE, BR, IsParam); + } else { + ReturnVisitor::addVisitorIfNecessary(StoreSite, InitE->IgnoreParenCasts(), + BR); + } + } + + if (!R->canPrintPretty()) + return 0; + + // Okay, we've found the binding. Emit an appropriate message. SmallString<256> sbuf; llvm::raw_svector_ostream os(sbuf); - if (const PostStmt *PS = N->getLocationAs()) { + if (const PostStmt *PS = StoreSite->getLocationAs()) { if (const DeclStmt *DS = PS->getStmtAs()) { if (const VarRegion *VR = dyn_cast(R)) { @@ -201,6 +476,30 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N, os << "initialized here"; } } + } else if (isa(StoreSite->getLocation())) { + const ParmVarDecl *Param = cast(cast(R)->getDecl()); + + os << "Passing "; + + if (isa(V)) { + if (Param->getType()->isObjCObjectPointerType()) + os << "nil object reference"; + else + os << "null pointer value"; + } else if (V.isUndef()) { + os << "uninitialized value"; + } else if (isa(V)) { + os << "the value " << cast(V).getValue(); + } else { + os << "value"; + } + + // Printed parameter indexes are 1-based, not 0-based. + unsigned Idx = Param->getFunctionScopeIndex() + 1; + os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter '"; + + R->printPretty(os); + os << '\''; } if (os.str().empty()) { @@ -228,17 +527,19 @@ PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N, else os << "Value assigned to "; - if (const VarRegion *VR = dyn_cast(R)) { - os << '\'' << *VR->getDecl() << '\''; - } - else - return NULL; + os << '\''; + R->printPretty(os); + os << '\''; } // Construct a new PathDiagnosticPiece. - ProgramPoint P = N->getLocation(); - PathDiagnosticLocation L = - PathDiagnosticLocation::create(P, BRC.getSourceManager()); + ProgramPoint P = StoreSite->getLocation(); + PathDiagnosticLocation L; + if (isa(P)) + L = PathDiagnosticLocation(InitE, BRC.getSourceManager(), + P.getLocationContext()); + else + L = PathDiagnosticLocation::create(P, BRC.getSourceManager()); if (!L.isValid()) return NULL; return new PathDiagnosticEventPiece(L, os.str()); @@ -251,6 +552,12 @@ void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const { ID.Add(Constraint); } +/// Return the tag associated with this visitor. This tag will be used +/// to make all PathDiagnosticPieces created by this visitor. +const char *TrackConstraintBRVisitor::getTag() { + return "TrackConstraintBRVisitor"; +} + PathDiagnosticPiece * TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *PrevN, @@ -290,62 +597,97 @@ TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N, PathDiagnosticLocation::create(P, BRC.getSourceManager()); if (!L.isValid()) return NULL; - return new PathDiagnosticEventPiece(L, os.str()); + + PathDiagnosticEventPiece *X = new PathDiagnosticEventPiece(L, os.str()); + X->setTag(getTag()); + return X; } return NULL; } -void bugreporter::addTrackNullOrUndefValueVisitor(const ExplodedNode *N, - const Stmt *S, - BugReport *report) { +bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N, const Stmt *S, + BugReport &report, bool IsArg) { if (!S || !N) - return; + return false; - ProgramStateManager &StateMgr = N->getState()->getStateManager(); + if (const OpaqueValueExpr *OVE = dyn_cast(S)) + S = OVE->getSourceExpr(); + + if (IsArg) { + assert(isa(N->getLocation()) && "Tracking arg but not at call"); + } else { + // Walk through nodes until we get one that matches the statement exactly. + do { + const ProgramPoint &pp = N->getLocation(); + if (const PostStmt *ps = dyn_cast(&pp)) { + if (ps->getStmt() == S) + break; + } else if (const CallExitEnd *CEE = dyn_cast(&pp)) { + if (CEE->getCalleeContext()->getCallSite() == S) + break; + } + N = N->getFirstPred(); + } while (N); - // Walk through nodes until we get one that matches the statement - // exactly. - while (N) { - const ProgramPoint &pp = N->getLocation(); - if (const PostStmt *ps = dyn_cast(&pp)) { - if (ps->getStmt() == S) - break; - } - N = N->getFirstPred(); + if (!N) + return false; } - - if (!N) - return; ProgramStateRef state = N->getState(); - // Walk through lvalue-to-rvalue conversions. - const Expr *Ex = dyn_cast(S); - if (Ex) { + // See if the expression we're interested refers to a variable. + // If so, we can track both its contents and constraints on its value. + if (const Expr *Ex = dyn_cast(S)) { + // Strip off parens and casts. Note that this will never have issues with + // C++ user-defined implicit conversions, because those have a constructor + // or function call inside. Ex = Ex->IgnoreParenCasts(); if (const DeclRefExpr *DR = dyn_cast(Ex)) { + // FIXME: Right now we only track VarDecls because it's non-trivial to + // get a MemRegion for any other DeclRefExprs. if (const VarDecl *VD = dyn_cast(DR->getDecl())) { - const VarRegion *R = - StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext()); + ProgramStateManager &StateMgr = state->getStateManager(); + MemRegionManager &MRMgr = StateMgr.getRegionManager(); + const VarRegion *R = MRMgr.getVarRegion(VD, N->getLocationContext()); - // What did we load? + // Mark both the variable region and its contents as interesting. SVal V = state->getRawSVal(loc::MemRegionVal(R)); - report->markInteresting(R); - report->markInteresting(V); + // If the value matches the default for the variable region, that + // might mean that it's been cleared out of the state. Fall back to + // the full argument expression (with casts and such intact). + if (IsArg) { + bool UseArgValue = V.isUnknownOrUndef() || V.isZeroConstant(); + if (!UseArgValue) { + const SymbolRegionValue *SRV = + dyn_cast_or_null(V.getAsLocSymbol()); + if (SRV) + UseArgValue = (SRV->getRegion() == R); + } + if (UseArgValue) + V = state->getSValAsScalarOrLoc(S, N->getLocationContext()); + } + + report.markInteresting(R); + report.markInteresting(V); + report.addVisitor(new UndefOrNullArgVisitor(R)); + + // If the contents are symbolic, find out when they became null. if (V.getAsLocSymbol()) { BugReporterVisitor *ConstraintTracker - = new TrackConstraintBRVisitor(cast(V), false); - report->addVisitor(ConstraintTracker); + = new TrackConstraintBRVisitor(cast(V), false); + report.addVisitor(ConstraintTracker); } - report->addVisitor(new FindLastStoreBRVisitor(V, R)); - return; + report.addVisitor(new FindLastStoreBRVisitor(V, R)); + return true; } } } + // If the expression does NOT refer to a variable, we can still track + // constraints on its contents. SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext()); // Uncomment this to find cases where we aren't properly getting the @@ -354,17 +696,27 @@ void bugreporter::addTrackNullOrUndefValueVisitor(const ExplodedNode *N, // Is it a symbolic value? if (loc::MemRegionVal *L = dyn_cast(&V)) { - const SubRegion *R = cast(L->getRegion()); - while (R && !isa(R)) { - R = dyn_cast(R->getSuperRegion()); - } + // At this point we are dealing with the region's LValue. + // However, if the rvalue is a symbolic region, we should track it as well. + SVal RVal = state->getSVal(L->getRegion()); + const MemRegion *RegionRVal = RVal.getAsRegion(); + report.addVisitor(new UndefOrNullArgVisitor(L->getRegion())); + - if (R) { - report->markInteresting(R); - report->addVisitor(new TrackConstraintBRVisitor(loc::MemRegionVal(R), - false)); + if (RegionRVal && isa(RegionRVal)) { + report.markInteresting(RegionRVal); + report.addVisitor(new TrackConstraintBRVisitor( + loc::MemRegionVal(RegionRVal), false)); } + } else { + // Otherwise, if the value came from an inlined function call, + // we should at least make sure that function isn't pruned in our output. + if (const Expr *E = dyn_cast(S)) + S = E->IgnoreParenCasts(); + ReturnVisitor::addVisitorIfNecessary(N, S, report); } + + return true; } BugReporterVisitor * @@ -406,7 +758,7 @@ PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N, // The receiver was nil, and hence the method was skipped. // Register a BugReporterVisitor to issue a message telling us how // the receiver was null. - bugreporter::addTrackNullOrUndefValueVisitor(N, Receiver, &BR); + bugreporter::trackNullOrUndefValue(N, Receiver, BR); // Issue a message saying that the method was skipped. PathDiagnosticLocation L(Receiver, BRC.getSourceManager(), N->getLocationContext()); @@ -452,14 +804,23 @@ void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR, //===----------------------------------------------------------------------===// // Visitor that tries to report interesting diagnostics from conditions. //===----------------------------------------------------------------------===// + +/// Return the tag associated with this visitor. This tag will be used +/// to make all PathDiagnosticPieces created by this visitor. +const char *ConditionBRVisitor::getTag() { + return "ConditionBRVisitor"; +} + PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N, const ExplodedNode *Prev, BugReporterContext &BRC, BugReport &BR) { PathDiagnosticPiece *piece = VisitNodeImpl(N, Prev, BRC, BR); - if (PathDiagnosticEventPiece *ev = - dyn_cast_or_null(piece)) - ev->setPrunable(true, /* override */ false); + if (piece) { + piece->setTag(getTag()); + if (PathDiagnosticEventPiece *ev=dyn_cast(piece)) + ev->setPrunable(true, /* override */ false); + } return piece; } @@ -468,8 +829,7 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N, BugReporterContext &BRC, BugReport &BR) { - const ProgramPoint &progPoint = N->getLocation(); - + ProgramPoint progPoint = N->getLocation(); ProgramStateRef CurrentState = N->getState(); ProgramStateRef PrevState = Prev->getState(); @@ -494,7 +854,7 @@ PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N, // violation. const std::pair &tags = cast(BRC.getBugReporter()). - getEngine().getEagerlyAssumeTags(); + getEngine().geteagerlyAssumeBinOpBifurcationTags(); const ProgramPointTag *tag = PS->getTag(); if (tag == tags.first) @@ -533,8 +893,7 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term, assert(Cond); assert(srcBlk->succ_size() == 2); const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk; - return VisitTrueTest(Cond->IgnoreParenNoopCasts(BRC.getASTContext()), - tookTrue, BRC, R, N); + return VisitTrueTest(Cond, tookTrue, BRC, R, N); } PathDiagnosticPiece * @@ -547,7 +906,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const Expr *Ex = Cond; while (true) { - Ex = Ex->IgnoreParens(); + Ex = Ex->IgnoreParenCasts(); switch (Ex->getStmtClass()) { default: return 0; @@ -561,7 +920,7 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, const UnaryOperator *UO = cast(Ex); if (UO->getOpcode() == UO_LNot) { tookTrue = !tookTrue; - Ex = UO->getSubExpr()->IgnoreParenNoopCasts(BRC.getASTContext()); + Ex = UO->getSubExpr(); continue; } return 0; @@ -802,3 +1161,54 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond, return event; } +PathDiagnosticPiece * +UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N, + const ExplodedNode *PrevN, + BugReporterContext &BRC, + BugReport &BR) { + + ProgramStateRef State = N->getState(); + ProgramPoint ProgLoc = N->getLocation(); + + // We are only interested in visiting CallEnter nodes. + CallEnter *CEnter = dyn_cast(&ProgLoc); + if (!CEnter) + return 0; + + // Check if one of the arguments is the region the visitor is tracking. + CallEventManager &CEMgr = BRC.getStateManager().getCallEventManager(); + CallEventRef<> Call = CEMgr.getCaller(CEnter->getCalleeContext(), State); + unsigned Idx = 0; + for (CallEvent::param_iterator I = Call->param_begin(), + E = Call->param_end(); I != E; ++I, ++Idx) { + const MemRegion *ArgReg = Call->getArgSVal(Idx).getAsRegion(); + + // Are we tracking the argument or its subregion? + if ( !ArgReg || (ArgReg != R && !R->isSubRegionOf(ArgReg->StripCasts()))) + continue; + + // Check the function parameter type. + const ParmVarDecl *ParamDecl = *I; + assert(ParamDecl && "Formal parameter has no decl?"); + QualType T = ParamDecl->getType(); + + if (!(T->isAnyPointerType() || T->isReferenceType())) { + // Function can only change the value passed in by address. + continue; + } + + // If it is a const pointer value, the function does not intend to + // change the value. + if (T->getPointeeType().isConstQualified()) + continue; + + // Mark the call site (LocationContext) as interesting if the value of the + // argument is undefined or '0'/'NULL'. + SVal BoundVal = State->getSVal(R); + if (BoundVal.isUndef() || BoundVal.isZeroConstant()) { + BR.markInteresting(CEnter->getCalleeContext()); + return 0; + } + } + return 0; +} diff --git a/lib/StaticAnalyzer/Core/CMakeLists.txt b/lib/StaticAnalyzer/Core/CMakeLists.txt index b16b233d..91f15b3 100644 --- a/lib/StaticAnalyzer/Core/CMakeLists.txt +++ b/lib/StaticAnalyzer/Core/CMakeLists.txt @@ -1,9 +1,9 @@ set(LLVM_LINK_COMPONENTS support) add_clang_library(clangStaticAnalyzerCore - AnalysisManager.cpp APSIntType.cpp - BasicConstraintManager.cpp + AnalysisManager.cpp + AnalyzerOptions.cpp BasicValueFactory.cpp BlockCounter.cpp BugReporter.cpp @@ -14,6 +14,7 @@ add_clang_library(clangStaticAnalyzerCore CheckerHelpers.cpp CheckerManager.cpp CheckerRegistry.cpp + ConstraintManager.cpp CoreEngine.cpp Environment.cpp ExplodedGraph.cpp @@ -54,5 +55,5 @@ target_link_libraries(clangStaticAnalyzerCore clangLex clangAST clangFrontend - clangRewrite + clangRewriteCore ) diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp index 5345bd5..c5cb317 100644 --- a/lib/StaticAnalyzer/Core/CallEvent.cpp +++ b/lib/StaticAnalyzer/Core/CallEvent.cpp @@ -14,6 +14,7 @@ //===----------------------------------------------------------------------===// #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" +#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h" #include "clang/Analysis/ProgramPoint.h" #include "clang/AST/ParentMap.h" #include "llvm/ADT/SmallSet.h" @@ -23,10 +24,26 @@ using namespace clang; using namespace ento; QualType CallEvent::getResultType() const { - QualType ResultTy = getDeclaredResultType(); + const Expr *E = getOriginExpr(); + assert(E && "Calls without origin expressions do not have results"); + QualType ResultTy = E->getType(); - if (ResultTy.isNull()) - ResultTy = getOriginExpr()->getType(); + ASTContext &Ctx = getState()->getStateManager().getContext(); + + // A function that returns a reference to 'int' will have a result type + // of simply 'int'. Check the origin expr's value kind to recover the + // proper type. + switch (E->getValueKind()) { + case VK_LValue: + ResultTy = Ctx.getLValueReferenceType(ResultTy); + break; + case VK_XValue: + ResultTy = Ctx.getRValueReferenceType(ResultTy); + break; + case VK_RValue: + // No adjustment is necessary. + break; + } return ResultTy; } @@ -45,7 +62,7 @@ static bool isCallbackArg(SVal V, QualType T) { // Check if a callback is passed inside a struct (for both, struct passed by // reference and by value). Dig just one level into the struct for now. - if (isa(T) || isa(T)) + if (T->isAnyPointerType() || T->isReferenceType()) T = T->getPointeeType(); if (const RecordType *RT = T->getAsStructureType()) { @@ -83,6 +100,14 @@ bool CallEvent::hasNonZeroCallbackArg() const { return false; } +bool CallEvent::isGlobalCFunction(StringRef FunctionName) const { + const FunctionDecl *FD = dyn_cast_or_null(getDecl()); + if (!FD) + return false; + + return CheckerContext::isCLibraryFunction(FD, FunctionName); +} + /// \brief Returns true if a type is a pointer-to-const or reference-to-const /// with no further indirection. static bool isPointerToConst(QualType Ty) { @@ -207,6 +232,13 @@ SourceRange CallEvent::getArgSourceRange(unsigned Index) const { return ArgE->getSourceRange(); } +SVal CallEvent::getReturnValue() const { + const Expr *E = getOriginExpr(); + if (!E) + return UndefinedVal(); + return getSVal(E); +} + void CallEvent::dump() const { dump(llvm::errs()); } @@ -230,10 +262,20 @@ void CallEvent::dump(raw_ostream &Out) const { } -bool CallEvent::mayBeInlined(const Stmt *S) { - // FIXME: Kill this. +bool CallEvent::isCallStmt(const Stmt *S) { return isa(S) || isa(S) - || isa(S); + || isa(S) + || isa(S); +} + +/// \brief Returns the result type, adjusted for references. +QualType CallEvent::getDeclaredResultType(const Decl *D) { + assert(D); + if (const FunctionDecl* FD = dyn_cast(D)) + return FD->getResultType(); + else if (const ObjCMethodDecl* MD = dyn_cast(D)) + return MD->getResultType(); + return QualType(); } static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx, @@ -285,14 +327,6 @@ void AnyFunctionCall::getInitialStackFrameContents( D->param_begin(), D->param_end()); } -QualType AnyFunctionCall::getDeclaredResultType() const { - const FunctionDecl *D = getDecl(); - if (!D) - return QualType(); - - return D->getResultType(); -} - bool AnyFunctionCall::argumentsMayEscape() const { if (hasNonZeroCallbackArg()) return true; @@ -303,7 +337,7 @@ bool AnyFunctionCall::argumentsMayEscape() const { const IdentifierInfo *II = D->getIdentifier(); if (!II) - return true; + return false; // This set of "escaping" APIs is @@ -376,6 +410,17 @@ void CXXInstanceCall::getExtraInvalidatedRegions(RegionList &Regions) const { Regions.push_back(R); } +SVal CXXInstanceCall::getCXXThisVal() const { + const Expr *Base = getCXXThisExpr(); + // FIXME: This doesn't handle an overloaded ->* operator. + if (!Base) + return UnknownVal(); + + SVal ThisVal = getSVal(Base); + assert(ThisVal.isUnknownOrUndef() || isa(ThisVal)); + return ThisVal; +} + RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const { // Do we have a decl at all? @@ -400,13 +445,30 @@ RuntimeDefinition CXXInstanceCall::getRuntimeDefinition() const { // Is the type a C++ class? (This is mostly a defensive check.) QualType RegionType = DynType.getType()->getPointeeType(); + assert(!RegionType.isNull() && "DynamicTypeInfo should always be a pointer."); + const CXXRecordDecl *RD = RegionType->getAsCXXRecordDecl(); if (!RD || !RD->hasDefinition()) return RuntimeDefinition(); // Find the decl for this method in that class. const CXXMethodDecl *Result = MD->getCorrespondingMethodInClass(RD, true); - assert(Result && "At the very least the static decl should show up."); + if (!Result) { + // We might not even get the original statically-resolved method due to + // some particularly nasty casting (e.g. casts to sister classes). + // However, we should at least be able to search up and down our own class + // hierarchy, and some real bugs have been caught by checking this. + assert(!RD->isDerivedFrom(MD->getParent()) && "Couldn't find known method"); + + // FIXME: This is checking that our DynamicTypeInfo is at least as good as + // the static type. However, because we currently don't update + // DynamicTypeInfo when an object is cast, we can't actually be sure the + // DynamicTypeInfo is up to date. This assert should be re-enabled once + // this is fixed. + //assert(!MD->getParent()->isDerivedFrom(RD) && "Bad DynamicTypeInfo"); + + return RuntimeDefinition(); + } // Does the decl that we found have an implementation? const FunctionDecl *Definition; @@ -459,6 +521,18 @@ const Expr *CXXMemberCall::getCXXThisExpr() const { return getOriginExpr()->getImplicitObjectArgument(); } +RuntimeDefinition CXXMemberCall::getRuntimeDefinition() const { + // C++11 [expr.call]p1: ...If the selected function is non-virtual, or if the + // id-expression in the class member access expression is a qualified-id, + // that function is called. Otherwise, its final overrider in the dynamic type + // of the object expression is called. + if (const MemberExpr *ME = dyn_cast(getOriginExpr()->getCallee())) + if (ME->hasQualifier()) + return AnyFunctionCall::getRuntimeDefinition(); + + return CXXInstanceCall::getRuntimeDefinition(); +} + const Expr *CXXMemberOperatorCall::getCXXThisExpr() const { return getOriginExpr()->getArg(0); @@ -501,15 +575,6 @@ void BlockCall::getInitialStackFrameContents(const StackFrameContext *CalleeCtx, } -QualType BlockCall::getDeclaredResultType() const { - const BlockDataRegion *BR = getBlockRegion(); - if (!BR) - return QualType(); - QualType BlockTy = BR->getCodeRegion()->getLocationType(); - return cast(BlockTy->getPointeeType())->getResultType(); -} - - SVal CXXConstructorCall::getCXXThisVal() const { if (Data) return loc::MemRegionVal(static_cast(Data)); @@ -539,10 +604,19 @@ void CXXConstructorCall::getInitialStackFrameContents( SVal CXXDestructorCall::getCXXThisVal() const { if (Data) - return loc::MemRegionVal(static_cast(Data)); + return loc::MemRegionVal(DtorDataTy::getFromOpaqueValue(Data).getPointer()); return UnknownVal(); } +RuntimeDefinition CXXDestructorCall::getRuntimeDefinition() const { + // Base destructors are always called non-virtually. + // Skip CXXInstanceCall's devirtualization logic in this case. + if (isBaseDestructor()) + return AnyFunctionCall::getRuntimeDefinition(); + + return CXXInstanceCall::getRuntimeDefinition(); +} + CallEvent::param_iterator ObjCMethodCall::param_begin() const { const ObjCMethodDecl *D = getDecl(); @@ -566,12 +640,12 @@ ObjCMethodCall::getExtraInvalidatedRegions(RegionList &Regions) const { Regions.push_back(R); } -QualType ObjCMethodCall::getDeclaredResultType() const { - const ObjCMethodDecl *D = getDecl(); - if (!D) - return QualType(); - - return D->getResultType(); +SVal ObjCMethodCall::getSelfSVal() const { + const LocationContext *LCtx = getLocationContext(); + const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl(); + if (!SelfDecl) + return SVal(); + return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx)); } SVal ObjCMethodCall::getReceiverSVal() const { @@ -584,10 +658,23 @@ SVal ObjCMethodCall::getReceiverSVal() const { // An instance message with no expression means we are sending to super. // In this case the object reference is the same as 'self'. - const LocationContext *LCtx = getLocationContext(); - const ImplicitParamDecl *SelfDecl = LCtx->getSelfDecl(); - assert(SelfDecl && "No message receiver Expr, but not in an ObjC method"); - return getState()->getSVal(getState()->getRegion(SelfDecl, LCtx)); + assert(getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance); + SVal SelfVal = getSelfSVal(); + assert(SelfVal.isValid() && "Calling super but not in ObjC method"); + return SelfVal; +} + +bool ObjCMethodCall::isReceiverSelfOrSuper() const { + if (getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperInstance || + getOriginExpr()->getReceiverKind() == ObjCMessageExpr::SuperClass) + return true; + + if (!isInstanceMessage()) + return false; + + SVal RecVal = getSVal(getOriginExpr()->getInstanceReceiver()); + + return (RecVal == getSelfSVal()); } SourceRange ObjCMethodCall::getSourceRange() const { @@ -820,7 +907,8 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx, return getSimpleCall(CE, State, CallerCtx); switch (CallSite->getStmtClass()) { - case Stmt::CXXConstructExprClass: { + case Stmt::CXXConstructExprClass: + case Stmt::CXXTemporaryObjectExprClass: { SValBuilder &SVB = State->getStateManager().getSValBuilder(); const CXXMethodDecl *Ctor = cast(CalleeCtx->getDecl()); Loc ThisPtr = SVB.getCXXThis(Ctor, CalleeCtx); @@ -858,5 +946,5 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx, Trigger = Dtor->getBody(); return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(), - State, CallerCtx); + isa(E), State, CallerCtx); } diff --git a/lib/StaticAnalyzer/Core/CheckerContext.cpp b/lib/StaticAnalyzer/Core/CheckerContext.cpp index 0a047d9..74eeef1 100644 --- a/lib/StaticAnalyzer/Core/CheckerContext.cpp +++ b/lib/StaticAnalyzer/Core/CheckerContext.cpp @@ -38,17 +38,14 @@ StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const { bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD, StringRef Name) { - return isCLibraryFunction(FD, Name, getASTContext()); -} - -bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD, - StringRef Name, ASTContext &Context) { // To avoid false positives (Ex: finding user defined functions with // similar names), only perform fuzzy name matching when it's a builtin. // Using a string compare is slow, we might want to switch on BuiltinID here. unsigned BId = FD->getBuiltinID(); if (BId != 0) { - StringRef BName = Context.BuiltinInfo.GetName(BId); + if (Name.empty()) + return true; + StringRef BName = FD->getASTContext().BuiltinInfo.GetName(BId); if (BName.find(Name) != StringRef::npos) return true; } @@ -59,6 +56,24 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD, if (!II) return false; + // Look through 'extern "C"' and anything similar invented in the future. + const DeclContext *DC = FD->getDeclContext(); + while (DC->isTransparentContext()) + DC = DC->getParent(); + + // If this function is in a namespace, it is not a C library function. + if (!DC->isTranslationUnit()) + return false; + + // If this function is not externally visible, it is not a C library function. + // Note that we make an exception for inline functions, which may be + // declared in header files without external linkage. + if (!FD->isInlined() && FD->getLinkage() != ExternalLinkage) + return false; + + if (Name.empty()) + return true; + StringRef FName = II->getName(); if (FName.equals(Name)) return true; diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp index c786655..3672952 100644 --- a/lib/StaticAnalyzer/Core/CheckerManager.cpp +++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp @@ -36,8 +36,7 @@ bool CheckerManager::hasPathSensitiveCheckers() const { !DeadSymbolsCheckers.empty() || !RegionChangesCheckers.empty() || !EvalAssumeCheckers.empty() || - !EvalCallCheckers.empty() || - !InlineCallCheckers.empty(); + !EvalCallCheckers.empty(); } void CheckerManager::finishedCheckerRegistration() { @@ -314,20 +313,19 @@ namespace { SVal Val; const Stmt *S; ExprEngine &Eng; - ProgramPoint::Kind PointKind; + const ProgramPoint &PP; CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); } CheckersTy::const_iterator checkers_end() { return Checkers.end(); } CheckBindContext(const CheckersTy &checkers, SVal loc, SVal val, const Stmt *s, ExprEngine &eng, - ProgramPoint::Kind PK) - : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PointKind(PK) {} + const ProgramPoint &pp) + : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PP(pp) {} void runChecker(CheckerManager::CheckBindFunc checkFn, NodeBuilder &Bldr, ExplodedNode *Pred) { - const ProgramPoint &L = ProgramPoint::getProgramPoint(S, PointKind, - Pred->getLocationContext(), checkFn.Checker); + const ProgramPoint &L = PP.withTag(checkFn.Checker); CheckerContext C(Bldr, Eng, Pred, L); checkFn(Loc, Val, S, C); @@ -340,8 +338,8 @@ void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst, const ExplodedNodeSet &Src, SVal location, SVal val, const Stmt *S, ExprEngine &Eng, - ProgramPoint::Kind PointKind) { - CheckBindContext C(BindCheckers, location, val, S, Eng, PointKind); + const ProgramPoint &PP) { + CheckBindContext C(BindCheckers, location, val, S, Eng, PP); expandGraphWithCheckers(C, Dst, Src); } @@ -357,8 +355,8 @@ void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G, // for this callback since end of path nodes are expected to be final. void CheckerManager::runCheckersForEndPath(NodeBuilderContext &BC, ExplodedNodeSet &Dst, + ExplodedNode *Pred, ExprEngine &Eng) { - ExplodedNode *Pred = BC.Pred; // We define the builder outside of the loop bacause if at least one checkers // creates a sucsessor for Pred, we do not need to generate an @@ -509,41 +507,13 @@ void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst, const CallExpr *CE = cast(Call.getOriginExpr()); for (ExplodedNodeSet::iterator NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) { - ExplodedNode *Pred = *NI; bool anyEvaluated = false; - // First, check if any of the InlineCall callbacks can evaluate the call. - assert(InlineCallCheckers.size() <= 1 && - "InlineCall is a special hacky callback to allow intrusive" - "evaluation of the call (which simulates inlining). It is " - "currently only used by OSAtomicChecker and should go away " - "at some point."); - for (std::vector::iterator - EI = InlineCallCheckers.begin(), EE = InlineCallCheckers.end(); - EI != EE; ++EI) { - ExplodedNodeSet checkDst; - bool evaluated = (*EI)(CE, Eng, Pred, checkDst); - assert(!(evaluated && anyEvaluated) - && "There are more than one checkers evaluating the call"); - if (evaluated) { - anyEvaluated = true; - Dst.insert(checkDst); -#ifdef NDEBUG - break; // on release don't check that no other checker also evals. -#endif - } - } - -#ifdef NDEBUG // on release don't check that no other checker also evals. - if (anyEvaluated) { - break; - } -#endif - ExplodedNodeSet checkDst; NodeBuilder B(Pred, checkDst, Eng.getBuilderContext()); - // Next, check if any of the EvalCall callbacks can evaluate the call. + + // Check if any of the EvalCall callbacks can evaluate the call. for (std::vector::iterator EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end(); EI != EE; ++EI) { @@ -679,10 +649,6 @@ void CheckerManager::_registerForEvalCall(EvalCallFunc checkfn) { EvalCallCheckers.push_back(checkfn); } -void CheckerManager::_registerForInlineCall(InlineCallFunc checkfn) { - InlineCallCheckers.push_back(checkfn); -} - void CheckerManager::_registerForEndOfTranslationUnit( CheckEndOfTranslationUnit checkfn) { EndOfTranslationUnitCheckers.push_back(checkfn); diff --git a/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/lib/StaticAnalyzer/Core/ConstraintManager.cpp new file mode 100644 index 0000000..4dec526 --- /dev/null +++ b/lib/StaticAnalyzer/Core/ConstraintManager.cpp @@ -0,0 +1,39 @@ +//== ConstraintManager.cpp - Constraints on symbolic values -----*- C++ -*--==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defined the interface to manage constraints on symbolic values. +// +//===----------------------------------------------------------------------===// + +#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" + +using namespace clang; +using namespace ento; + +ConstraintManager::~ConstraintManager() {} + +static DefinedSVal getLocFromSymbol(const ProgramStateRef &State, + SymbolRef Sym) { + const MemRegion *R = State->getStateManager().getRegionManager() + .getSymbolicRegion(Sym); + return loc::MemRegionVal(R); +} + +ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State, + SymbolRef Sym) { + QualType Ty = Sym->getType(); + DefinedSVal V = Loc::isLocType(Ty) ? getLocFromSymbol(State, Sym) + : nonloc::SymbolVal(Sym); + const ProgramStatePair &P = assumeDual(State, V); + if (P.first && !P.second) + return ConditionTruthVal(false); + if (!P.first && P.second) + return ConditionTruthVal(true); + return ConditionTruthVal(); +} diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp index 1f13742..ec23792 100644 --- a/lib/StaticAnalyzer/Core/CoreEngine.cpp +++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp @@ -243,11 +243,6 @@ void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc, case ProgramPoint::CallEnterKind: { CallEnter CEnter = cast(Loc); - if (AnalyzedCallees) - if (const CallExpr* CE = - dyn_cast_or_null(CEnter.getCallExpr())) - if (const Decl *CD = CE->getCalleeDecl()) - AnalyzedCallees->insert(CD); SubEng.processCallEnter(CEnter, Pred); break; } @@ -303,7 +298,7 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) { && "EXIT block cannot contain Stmts."); // Process the final state transition. - SubEng.processEndOfFunction(BuilderCtx); + SubEng.processEndOfFunction(BuilderCtx, Pred); // This path is done. Don't enqueue any more nodes. return; @@ -313,7 +308,7 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) { ExplodedNodeSet dstNodes; BlockEntrance BE(Blk, Pred->getLocationContext()); NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE); - SubEng.processCFGBlockEntrance(L, nodeBuilder); + SubEng.processCFGBlockEntrance(L, nodeBuilder, Pred); // Auto-generate a node. if (!nodeBuilder.hasGeneratedNodes()) { @@ -519,9 +514,9 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N, return; } - const CFGStmt *CS = (*Block)[Idx].getAs(); - const Stmt *St = CS ? CS->getStmt() : 0; - PostStmt Loc(St, N->getLocationContext()); + // At this point, we know we're processing a normal statement. + CFGStmt CS = cast((*Block)[Idx]); + PostStmt Loc(CS.getStmt(), N->getLocationContext()); if (Loc == N->getLocation()) { // Note: 'N' should be a fresh node because otherwise it shouldn't be diff --git a/lib/StaticAnalyzer/Core/Environment.cpp b/lib/StaticAnalyzer/Core/Environment.cpp index 52644f7..bab89c5 100644 --- a/lib/StaticAnalyzer/Core/Environment.cpp +++ b/lib/StaticAnalyzer/Core/Environment.cpp @@ -20,6 +20,44 @@ using namespace clang; using namespace ento; +static const Expr *ignoreTransparentExprs(const Expr *E) { + E = E->IgnoreParens(); + + switch (E->getStmtClass()) { + case Stmt::OpaqueValueExprClass: + E = cast(E)->getSourceExpr(); + break; + case Stmt::ExprWithCleanupsClass: + E = cast(E)->getSubExpr(); + break; + case Stmt::CXXBindTemporaryExprClass: + E = cast(E)->getSubExpr(); + break; + case Stmt::SubstNonTypeTemplateParmExprClass: + E = cast(E)->getReplacement(); + break; + case Stmt::CXXDefaultArgExprClass: + E = cast(E)->getExpr(); + break; + default: + // This is the base case: we can't look through more than we already have. + return E; + } + + return ignoreTransparentExprs(E); +} + +static const Stmt *ignoreTransparentExprs(const Stmt *S) { + if (const Expr *E = dyn_cast(S)) + return ignoreTransparentExprs(E); + return S; +} + +EnvironmentEntry::EnvironmentEntry(const Stmt *S, const LocationContext *L) + : std::pair(ignoreTransparentExprs(S), + L ? L->getCurrentStackFrame() : 0) {} + SVal Environment::lookupExpr(const EnvironmentEntry &E) const { const SVal* X = ExprBindings.lookup(E); if (X) { @@ -30,101 +68,72 @@ SVal Environment::lookupExpr(const EnvironmentEntry &E) const { } SVal Environment::getSVal(const EnvironmentEntry &Entry, - SValBuilder& svalBuilder, - bool useOnlyDirectBindings) const { - - if (useOnlyDirectBindings) { - // This branch is rarely taken, but can be exercised by - // checkers that explicitly bind values to arbitrary - // expressions. It is crucial that we do not ignore any - // expression here, and do a direct lookup. - return lookupExpr(Entry); + SValBuilder& svalBuilder) const { + const Stmt *S = Entry.getStmt(); + const LocationContext *LCtx = Entry.getLocationContext(); + + switch (S->getStmtClass()) { + case Stmt::CXXBindTemporaryExprClass: + case Stmt::CXXDefaultArgExprClass: + case Stmt::ExprWithCleanupsClass: + case Stmt::GenericSelectionExprClass: + case Stmt::OpaqueValueExprClass: + case Stmt::ParenExprClass: + case Stmt::SubstNonTypeTemplateParmExprClass: + llvm_unreachable("Should have been handled by ignoreTransparentExprs"); + + case Stmt::AddrLabelExprClass: + return svalBuilder.makeLoc(cast(S)); + + case Stmt::CharacterLiteralClass: { + const CharacterLiteral *C = cast(S); + return svalBuilder.makeIntVal(C->getValue(), C->getType()); } - const Stmt *E = Entry.getStmt(); - const LocationContext *LCtx = Entry.getLocationContext(); - - for (;;) { - if (const Expr *Ex = dyn_cast(E)) - E = Ex->IgnoreParens(); - - switch (E->getStmtClass()) { - case Stmt::AddrLabelExprClass: - return svalBuilder.makeLoc(cast(E)); - case Stmt::OpaqueValueExprClass: { - const OpaqueValueExpr *ope = cast(E); - E = ope->getSourceExpr(); - continue; - } - case Stmt::ParenExprClass: - case Stmt::GenericSelectionExprClass: - llvm_unreachable("ParenExprs and GenericSelectionExprs should " - "have been handled by IgnoreParens()"); - case Stmt::CharacterLiteralClass: { - const CharacterLiteral* C = cast(E); - return svalBuilder.makeIntVal(C->getValue(), C->getType()); - } - case Stmt::CXXBoolLiteralExprClass: { - const SVal *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx)); - if (X) - return *X; - else - return svalBuilder.makeBoolVal(cast(E)); - } - case Stmt::CXXScalarValueInitExprClass: - case Stmt::ImplicitValueInitExprClass: { - QualType Ty = cast(E)->getType(); - return svalBuilder.makeZeroVal(Ty); - } - case Stmt::IntegerLiteralClass: { - // In C++, this expression may have been bound to a temporary object. - SVal const *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx)); - if (X) - return *X; - else - return svalBuilder.makeIntVal(cast(E)); - } - case Stmt::ObjCBoolLiteralExprClass: - return svalBuilder.makeBoolVal(cast(E)); - - // For special C0xx nullptr case, make a null pointer SVal. - case Stmt::CXXNullPtrLiteralExprClass: - return svalBuilder.makeNull(); - case Stmt::ExprWithCleanupsClass: - E = cast(E)->getSubExpr(); - continue; - case Stmt::CXXBindTemporaryExprClass: - E = cast(E)->getSubExpr(); - continue; - case Stmt::SubstNonTypeTemplateParmExprClass: - E = cast(E)->getReplacement(); - continue; - case Stmt::ObjCStringLiteralClass: { - MemRegionManager &MRMgr = svalBuilder.getRegionManager(); - const ObjCStringLiteral *SL = cast(E); - return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL)); - } - case Stmt::StringLiteralClass: { - MemRegionManager &MRMgr = svalBuilder.getRegionManager(); - const StringLiteral *SL = cast(E); - return svalBuilder.makeLoc(MRMgr.getStringRegion(SL)); - } - case Stmt::ReturnStmtClass: { - const ReturnStmt *RS = cast(E); - if (const Expr *RE = RS->getRetValue()) { - E = RE; - continue; - } - return UndefinedVal(); - } - - // Handle all other Stmt* using a lookup. - default: - break; - }; + case Stmt::CXXBoolLiteralExprClass: + return svalBuilder.makeBoolVal(cast(S)); + + case Stmt::CXXScalarValueInitExprClass: + case Stmt::ImplicitValueInitExprClass: { + QualType Ty = cast(S)->getType(); + return svalBuilder.makeZeroVal(Ty); + } + + case Stmt::IntegerLiteralClass: + return svalBuilder.makeIntVal(cast(S)); + + case Stmt::ObjCBoolLiteralExprClass: + return svalBuilder.makeBoolVal(cast(S)); + + // For special C0xx nullptr case, make a null pointer SVal. + case Stmt::CXXNullPtrLiteralExprClass: + return svalBuilder.makeNull(); + + case Stmt::ObjCStringLiteralClass: { + MemRegionManager &MRMgr = svalBuilder.getRegionManager(); + const ObjCStringLiteral *SL = cast(S); + return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL)); + } + + case Stmt::StringLiteralClass: { + MemRegionManager &MRMgr = svalBuilder.getRegionManager(); + const StringLiteral *SL = cast(S); + return svalBuilder.makeLoc(MRMgr.getStringRegion(SL)); + } + + case Stmt::ReturnStmtClass: { + const ReturnStmt *RS = cast(S); + if (const Expr *RE = RS->getRetValue()) + return getSVal(EnvironmentEntry(RE, LCtx), svalBuilder); + return UndefinedVal(); + } + + // Handle all other Stmt* using a lookup. + default: break; } - return lookupExpr(EnvironmentEntry(E, LCtx)); + + return lookupExpr(EnvironmentEntry(S, LCtx)); } Environment EnvironmentManager::bindExpr(Environment Env, @@ -140,16 +149,16 @@ Environment EnvironmentManager::bindExpr(Environment Env, return Environment(F.add(Env.ExprBindings, E, V)); } -static inline EnvironmentEntry MakeLocation(const EnvironmentEntry &E) { - const Stmt *S = E.getStmt(); - S = (const Stmt*) (((uintptr_t) S) | 0x1); - return EnvironmentEntry(S, E.getLocationContext()); +EnvironmentEntry EnvironmentEntry::makeLocation() const { + EnvironmentEntry Result = *this; + reinterpret_cast(Result.first) |= 0x1; + return Result; } Environment EnvironmentManager::bindExprAndLocation(Environment Env, const EnvironmentEntry &E, SVal location, SVal V) { - return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(E), location), + return Environment(F.add(F.add(Env.ExprBindings, E.makeLocation(), location), E, V)); } @@ -286,7 +295,8 @@ void Environment::printAux(raw_ostream &Out, bool printLocations, S = (Stmt*) (((uintptr_t) S) & ((uintptr_t) ~0x1)); } - Out << " (" << (void*) En.getLocationContext() << ',' << (void*) S << ") "; + Out << " (" << (const void*) En.getLocationContext() << ',' + << (const void*) S << ") "; LangOptions LO; // FIXME. S->printPretty(Out, 0, PrintingPolicy(LO)); Out << " : " << I.getData(); diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp index b79f3f5..c284bd7 100644 --- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp +++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp @@ -47,10 +47,8 @@ void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) { // Cleanup. //===----------------------------------------------------------------------===// -static const unsigned CounterTop = 1000; - ExplodedGraph::ExplodedGraph() - : NumNodes(0), reclaimNodes(false), reclaimCounter(CounterTop) {} + : NumNodes(0), ReclaimNodeInterval(0) {} ExplodedGraph::~ExplodedGraph() {} @@ -63,12 +61,12 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) { // // (1) 1 predecessor (that has one successor) // (2) 1 successor (that has one predecessor) - // (3) The ProgramPoint is for a PostStmt. + // (3) The ProgramPoint is for a PostStmt, but not a PostStore. // (4) There is no 'tag' for the ProgramPoint. // (5) The 'store' is the same as the predecessor. // (6) The 'GDM' is the same as the predecessor. // (7) The LocationContext is the same as the predecessor. - // (8) The PostStmt is for a non-consumed Stmt or Expr. + // (8) The PostStmt isn't for a non-consumed Stmt or Expr. // (9) The successor is not a CallExpr StmtPoint (so that we would be able to // find it when retrying a call with no inlining). // FIXME: It may be safe to reclaim PreCall and PostCall nodes as well. @@ -87,16 +85,13 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) { // Condition 3. ProgramPoint progPoint = node->getLocation(); - if (!isa(progPoint)) + if (!isa(progPoint) || isa(progPoint)) return false; // Condition 4. PostStmt ps = cast(progPoint); if (ps.getTag()) return false; - - if (isa(ps.getStmt())) - return false; // Conditions 5, 6, and 7. ProgramStateRef state = node->getState(); @@ -106,6 +101,12 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) { return false; // Condition 8. + // Do not collect nodes for non-consumed Stmt or Expr to ensure precise + // diagnostic generation; specifically, so that we could anchor arrows + // pointing to the beginning of statements (as written in code). + if (!isa(ps.getStmt())) + return false; + if (const Expr *Ex = dyn_cast(ps.getStmt())) { ParentMap &PM = progPoint.getLocationContext()->getParentMap(); if (!PM.isConsumedExpr(Ex)) @@ -115,7 +116,7 @@ bool ExplodedGraph::shouldCollect(const ExplodedNode *node) { // Condition 9. const ProgramPoint SuccLoc = succ->getLocation(); if (const StmtPoint *SP = dyn_cast(&SuccLoc)) - if (CallEvent::mayBeInlined(SP->getStmt())) + if (CallEvent::isCallStmt(SP->getStmt())) return false; return true; @@ -141,13 +142,13 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() { if (ChangedNodes.empty()) return; - // Only periodically relcaim nodes so that we can build up a set of + // Only periodically reclaim nodes so that we can build up a set of // nodes that meet the reclamation criteria. Freshly created nodes // by definition have no successor, and thus cannot be reclaimed (see below). - assert(reclaimCounter > 0); - if (--reclaimCounter != 0) + assert(ReclaimCounter > 0); + if (--ReclaimCounter != 0) return; - reclaimCounter = CounterTop; + ReclaimCounter = ReclaimNodeInterval; for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end(); it != et; ++it) { @@ -162,9 +163,18 @@ void ExplodedGraph::reclaimRecentlyAllocatedNodes() { // ExplodedNode. //===----------------------------------------------------------------------===// -static inline BumpVector& getVector(void *P) { - return *reinterpret_cast*>(P); -} +// An NodeGroup's storage type is actually very much like a TinyPtrVector: +// it can be either a pointer to a single ExplodedNode, or a pointer to a +// BumpVector allocated with the ExplodedGraph's allocator. This allows the +// common case of single-node NodeGroups to be implemented with no extra memory. +// +// Consequently, each of the NodeGroup methods have up to four cases to handle: +// 1. The flag is set and this group does not actually contain any nodes. +// 2. The group is empty, in which case the storage value is null. +// 3. The group contains a single node. +// 4. The group contains more than one node. +typedef BumpVector ExplodedNodeVector; +typedef llvm::PointerUnion GroupStorage; void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) { assert (!V->isSink()); @@ -176,71 +186,77 @@ void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) { } void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) { - assert(getKind() == Size1); - P = reinterpret_cast(node); - assert(getKind() == Size1); + assert(!getFlag()); + + GroupStorage &Storage = reinterpret_cast(P); + assert(Storage.is()); + Storage = node; + assert(Storage.is()); } void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) { - assert((reinterpret_cast(N) & Mask) == 0x0); assert(!getFlag()); - if (getKind() == Size1) { - if (ExplodedNode *NOld = getNode()) { - BumpVectorContext &Ctx = G.getNodeAllocator(); - BumpVector *V = - G.getAllocator().Allocate >(); - new (V) BumpVector(Ctx, 4); - - assert((reinterpret_cast(V) & Mask) == 0x0); - V->push_back(NOld, Ctx); - V->push_back(N, Ctx); - P = reinterpret_cast(V) | SizeOther; - assert(getPtr() == (void*) V); - assert(getKind() == SizeOther); - } - else { - P = reinterpret_cast(N); - assert(getKind() == Size1); - } + GroupStorage &Storage = reinterpret_cast(P); + if (Storage.isNull()) { + Storage = N; + assert(Storage.is()); + return; } - else { - assert(getKind() == SizeOther); - getVector(getPtr()).push_back(N, G.getNodeAllocator()); + + ExplodedNodeVector *V = Storage.dyn_cast(); + + if (!V) { + // Switch from single-node to multi-node representation. + ExplodedNode *Old = Storage.get(); + + BumpVectorContext &Ctx = G.getNodeAllocator(); + V = G.getAllocator().Allocate(); + new (V) ExplodedNodeVector(Ctx, 4); + V->push_back(Old, Ctx); + + Storage = V; + assert(!getFlag()); + assert(Storage.is()); } + + V->push_back(N, G.getNodeAllocator()); } unsigned ExplodedNode::NodeGroup::size() const { if (getFlag()) return 0; - if (getKind() == Size1) - return getNode() ? 1 : 0; - else - return getVector(getPtr()).size(); + const GroupStorage &Storage = reinterpret_cast(P); + if (Storage.isNull()) + return 0; + if (ExplodedNodeVector *V = Storage.dyn_cast()) + return V->size(); + return 1; } -ExplodedNode **ExplodedNode::NodeGroup::begin() const { +ExplodedNode * const *ExplodedNode::NodeGroup::begin() const { if (getFlag()) - return NULL; + return 0; - if (getKind() == Size1) - return (ExplodedNode**) (getPtr() ? &P : NULL); - else - return const_cast(&*(getVector(getPtr()).begin())); + const GroupStorage &Storage = reinterpret_cast(P); + if (Storage.isNull()) + return 0; + if (ExplodedNodeVector *V = Storage.dyn_cast()) + return V->begin(); + return Storage.getAddrOfPtr1(); } -ExplodedNode** ExplodedNode::NodeGroup::end() const { +ExplodedNode * const *ExplodedNode::NodeGroup::end() const { if (getFlag()) - return NULL; - - if (getKind() == Size1) - return (ExplodedNode**) (getPtr() ? &P+1 : NULL); - else { - // Dereferencing end() is undefined behaviour. The vector is not empty, so - // we can dereference the last elem and then add 1 to the result. - return const_cast(getVector(getPtr()).end()); - } + return 0; + + const GroupStorage &Storage = reinterpret_cast(P); + if (Storage.isNull()) + return 0; + if (ExplodedNodeVector *V = Storage.dyn_cast()) + return V->end(); + return Storage.getAddrOfPtr1() + 1; } ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L, @@ -266,7 +282,7 @@ ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L, new (V) NodeTy(L, State, IsSink); - if (reclaimNodes) + if (ReclaimNodeInterval) ChangedNodes.push_back(V); // Insert the node into the node set and return it. @@ -314,8 +330,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources, // ===- Pass 1 (reverse DFS) -=== for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) { - assert(*I); - WL1.push_back(*I); + if (*I) + WL1.push_back(*I); } // Process the first worklist until it is empty. Because it is a std::list @@ -338,7 +354,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources, } // Visit our predecessors and enqueue them. - for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) + for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end(); + I != E; ++I) WL1.push_back(*I); } @@ -375,7 +392,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources, // Walk through the predecessors of 'N' and hook up their corresponding // nodes in the new graph (if any) to the freshly created node. - for (ExplodedNode **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) { + for (ExplodedNode::pred_iterator I = N->Preds.begin(), E = N->Preds.end(); + I != E; ++I) { Pass2Ty::iterator PI = Pass2.find(*I); if (PI == Pass2.end()) continue; @@ -387,7 +405,8 @@ ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources, // been created, we should hook them up as successors. Otherwise, enqueue // the new nodes from the original graph that should have nodes created // in the new graph. - for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) { + for (ExplodedNode::succ_iterator I = N->Succs.begin(), E = N->Succs.end(); + I != E; ++I) { Pass2Ty::iterator PI = Pass2.find(*I); if (PI != Pass2.end()) { PI->second->addPredecessor(NewN, *G); diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp index b0435fb..045591c 100644 --- a/lib/StaticAnalyzer/Core/ExprEngine.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp @@ -55,32 +55,32 @@ STATISTIC(NumTimesRetriedWithoutInlining, //===----------------------------------------------------------------------===// ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled, - SetOfConstDecls *VisitedCallees, + SetOfConstDecls *VisitedCalleesIn, FunctionSummariesTy *FS) : AMgr(mgr), AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()), - Engine(*this, VisitedCallees, FS), + Engine(*this, FS), G(Engine.getGraph()), StateMgr(getContext(), mgr.getStoreManagerCreator(), mgr.getConstraintManagerCreator(), G.getAllocator(), - *this), + this), SymMgr(StateMgr.getSymbolManager()), svalBuilder(StateMgr.getSValBuilder()), EntryNode(NULL), - currentStmt(NULL), currentStmtIdx(0), currentBuilderContext(0), - NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL), - RaiseSel(GetNullarySelector("raise", getContext())), - ObjCGCEnabled(gcEnabled), BR(mgr, *this) { - - if (mgr.shouldEagerlyTrimExplodedGraph()) { - // Enable eager node reclaimation when constructing the ExplodedGraph. - G.enableNodeReclamation(); + currStmt(NULL), currStmtIdx(0), currBldrCtx(0), + ObjCNoRet(mgr.getASTContext()), + ObjCGCEnabled(gcEnabled), BR(mgr, *this), + VisitedCallees(VisitedCalleesIn) +{ + unsigned TrimInterval = mgr.options.getGraphTrimInterval(); + if (TrimInterval != 0) { + // Enable eager node reclaimation when constructing the ExplodedGraph. + G.enableNodeReclamation(TrimInterval); } } ExprEngine::~ExprEngine() { BR.FlushReports(); - delete [] NSExceptionInstanceRaiseSelectors; } //===----------------------------------------------------------------------===// @@ -164,6 +164,23 @@ ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) { return state; } +/// If the value of the given expression is a NonLoc, copy it into a new +/// temporary region, and replace the value of the expression with that. +static ProgramStateRef createTemporaryRegionIfNeeded(ProgramStateRef State, + const LocationContext *LC, + const Expr *E) { + SVal V = State->getSVal(E, LC); + + if (isa(V)) { + MemRegionManager &MRMgr = State->getStateManager().getRegionManager(); + const MemRegion *R = MRMgr.getCXXTempObjectRegion(E, LC); + State = State->bindLoc(loc::MemRegionVal(R), V); + State = State->BindExpr(E, LC, loc::MemRegionVal(R)); + } + + return State; +} + //===----------------------------------------------------------------------===// // Top-level transfer function logic (Dispatcher). //===----------------------------------------------------------------------===// @@ -200,8 +217,8 @@ void ExprEngine::processEndWorklist(bool hasWorkRemaining) { void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred, unsigned StmtIdx, NodeBuilderContext *Ctx) { - currentStmtIdx = StmtIdx; - currentBuilderContext = Ctx; + currStmtIdx = StmtIdx; + currBldrCtx = Ctx; switch (E.getKind()) { case CFGElement::Invalid: @@ -219,7 +236,7 @@ void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred, ProcessImplicitDtor(*E.getAs(), Pred); return; } - currentBuilderContext = 0; + currBldrCtx = 0; } static bool shouldRemoveDeadBindings(AnalysisManager &AMgr, @@ -228,7 +245,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr, const LocationContext *LC) { // Are we never purging state values? - if (AMgr.getPurgeMode() == PurgeNone) + if (AMgr.options.AnalysisPurgeOpt == PurgeNone) return false; // Is this the beginning of a basic block? @@ -240,7 +257,7 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr, return true; // Run before processing a call. - if (CallEvent::mayBeInlined(S.getStmt())) + if (CallEvent::isCallStmt(S.getStmt())) return true; // Is this an expression that is consumed by another expression? If so, @@ -251,12 +268,12 @@ static bool shouldRemoveDeadBindings(AnalysisManager &AMgr, void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out, const Stmt *ReferenceStmt, - const LocationContext *LC, + const StackFrameContext *LC, const Stmt *DiagnosticStmt, ProgramPoint::Kind K) { assert((K == ProgramPoint::PreStmtPurgeDeadSymbolsKind || - ReferenceStmt == 0) && "PreStmt is not generally supported by " - "the SymbolReaper yet"); + ReferenceStmt == 0) + && "PostStmt is not generally supported by the SymbolReaper yet"); NumRemoveDeadBindings++; CleanedState = Pred->getState(); SymbolReaper SymReaper(LC, ReferenceStmt, SymMgr, getStoreManager()); @@ -276,8 +293,8 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out, // Generate a CleanedNode that has the environment and store cleaned // up. Since no symbols are dead, we can optimize and not clean out // the constraint manager. - StmtNodeBuilder Bldr(Pred, Out, *currentBuilderContext); - Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, false, &cleanupTag,K); + StmtNodeBuilder Bldr(Pred, Out, *currBldrCtx); + Bldr.generateNode(DiagnosticStmt, Pred, CleanedState, &cleanupTag, K); } else { // Call checkers with the non-cleaned state so that they could query the @@ -289,7 +306,7 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out, // For each node in CheckedSet, generate CleanedNodes that have the // environment, the store, and the constraints cleaned up but have the // user-supplied states as the predecessors. - StmtNodeBuilder Bldr(CheckedSet, Out, *currentBuilderContext); + StmtNodeBuilder Bldr(CheckedSet, Out, *currBldrCtx); for (ExplodedNodeSet::const_iterator I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) { ProgramStateRef CheckerState = (*I)->getState(); @@ -309,8 +326,7 @@ void ExprEngine::removeDead(ExplodedNode *Pred, ExplodedNodeSet &Out, // generate a transition to that state. ProgramStateRef CleanedCheckerSt = StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState); - Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, false, - &cleanupTag, K); + Bldr.generateNode(DiagnosticStmt, *I, CleanedCheckerSt, &cleanupTag, K); } } } @@ -320,17 +336,17 @@ void ExprEngine::ProcessStmt(const CFGStmt S, // Reclaim any unnecessary nodes in the ExplodedGraph. G.reclaimRecentlyAllocatedNodes(); - currentStmt = S.getStmt(); + currStmt = S.getStmt(); PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(), - currentStmt->getLocStart(), + currStmt->getLocStart(), "Error evaluating statement"); // Remove dead bindings and symbols. EntryNode = Pred; ExplodedNodeSet CleanedStates; if (shouldRemoveDeadBindings(AMgr, S, Pred, EntryNode->getLocationContext())){ - removeDead(EntryNode, CleanedStates, currentStmt, - Pred->getLocationContext(), currentStmt); + removeDead(EntryNode, CleanedStates, currStmt, + Pred->getStackFrame(), currStmt); } else CleanedStates.Add(EntryNode); @@ -340,44 +356,45 @@ void ExprEngine::ProcessStmt(const CFGStmt S, E = CleanedStates.end(); I != E; ++I) { ExplodedNodeSet DstI; // Visit the statement. - Visit(currentStmt, *I, DstI); + Visit(currStmt, *I, DstI); Dst.insert(DstI); } // Enqueue the new nodes onto the work list. - Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx); + Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx); // NULL out these variables to cleanup. CleanedState = NULL; EntryNode = NULL; - currentStmt = 0; + currStmt = 0; } void ExprEngine::ProcessInitializer(const CFGInitializer Init, ExplodedNode *Pred) { - ExplodedNodeSet Dst; - NodeBuilder Bldr(Pred, Dst, *currentBuilderContext); - - ProgramStateRef State = Pred->getState(); - const CXXCtorInitializer *BMI = Init.getInitializer(); PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(), BMI->getSourceLocation(), "Error evaluating initializer"); - // We don't set EntryNode and currentStmt. And we don't clean up state. + // We don't set EntryNode and currStmt. And we don't clean up state. const StackFrameContext *stackFrame = cast(Pred->getLocationContext()); const CXXConstructorDecl *decl = cast(stackFrame->getDecl()); + + ProgramStateRef State = Pred->getState(); SVal thisVal = State->getSVal(svalBuilder.getCXXThis(decl, stackFrame)); + PostInitializer PP(BMI, stackFrame); + ExplodedNodeSet Tmp(Pred); + // Evaluate the initializer, if necessary if (BMI->isAnyMemberInitializer()) { // Constructors build the object directly in the field, // but non-objects must be copied in from the initializer. - if (!isa(BMI->getInit())) { + const Expr *Init = BMI->getInit(); + if (!isa(Init)) { SVal FieldLoc; if (BMI->isIndirectMemberInitializer()) FieldLoc = State->getLValue(BMI->getIndirectMember(), thisVal); @@ -385,22 +402,26 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init, FieldLoc = State->getLValue(BMI->getMember(), thisVal); SVal InitVal = State->getSVal(BMI->getInit(), stackFrame); - State = State->bindLoc(FieldLoc, InitVal); + + Tmp.clear(); + evalBind(Tmp, Init, Pred, FieldLoc, InitVal, /*isInit=*/true, &PP); } } else { assert(BMI->isBaseInitializer() || BMI->isDelegatingInitializer()); // We already did all the work when visiting the CXXConstructExpr. } - // Construct a PostInitializer node whether the state changed or not, + // Construct PostInitializer nodes whether the state changed or not, // so that the diagnostics don't get confused. - PostInitializer PP(BMI, stackFrame); - // Builder automatically add the generated node to the deferred set, - // which are processed in the builder's dtor. - Bldr.generateNode(PP, State, Pred); + ExplodedNodeSet Dst; + NodeBuilder Bldr(Tmp, Dst, *currBldrCtx); + for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E; ++I) { + ExplodedNode *N = *I; + Bldr.generateNode(PP, N->getState(), N); + } // Enqueue the new nodes onto the work list. - Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx); + Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx); } void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D, @@ -424,7 +445,7 @@ void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D, } // Enqueue the new nodes onto the work list. - Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx); + Engine.enqueue(Dst, currBldrCtx->getBlock(), currStmtIdx); } void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor, @@ -441,7 +462,7 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor, Loc dest = state->getLValue(varDecl, Pred->getLocationContext()); VisitCXXDestructor(varType, cast(dest).getRegion(), - Dtor.getTriggerStmt(), Pred, Dst); + Dtor.getTriggerStmt(), /*IsBase=*/false, Pred, Dst); } void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D, @@ -459,7 +480,7 @@ void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D, SVal BaseVal = getStoreManager().evalDerivedToBase(ThisVal, BaseTy); VisitCXXDestructor(BaseTy, cast(BaseVal).getRegion(), - CurDtor->getBody(), Pred, Dst); + CurDtor->getBody(), /*IsBase=*/true, Pred, Dst); } void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D, @@ -475,7 +496,7 @@ void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D, VisitCXXDestructor(Member->getType(), cast(FieldVal).getRegion(), - CurDtor->getBody(), Pred, Dst); + CurDtor->getBody(), /*IsBase=*/false, Pred, Dst); } void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D, @@ -488,7 +509,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, S->getLocStart(), "Error evaluating statement"); ExplodedNodeSet Dst; - StmtNodeBuilder Bldr(Pred, DstTop, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, DstTop, *currBldrCtx); // Expressions to ignore. if (const Expr *Ex = dyn_cast(S)) @@ -498,7 +519,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, // this check when we KNOW that there is no block-level subexpression. // The motivation is that this check requires a hashtable lookup. - if (S != currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) + if (S != currStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S)) return; switch (S->getStmtClass()) { @@ -521,21 +542,16 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::CXXNoexceptExprClass: case Stmt::PackExpansionExprClass: case Stmt::SubstNonTypeTemplateParmPackExprClass: + case Stmt::FunctionParmPackExprClass: case Stmt::SEHTryStmtClass: case Stmt::SEHExceptStmtClass: case Stmt::LambdaExprClass: case Stmt::SEHFinallyStmtClass: { - const ExplodedNode *node = Bldr.generateNode(S, Pred, Pred->getState(), - /* sink */ true); - Engine.addAbortedBlock(node, currentBuilderContext->getBlock()); + const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState()); + Engine.addAbortedBlock(node, currBldrCtx->getBlock()); break; } - // We don't handle default arguments either yet, but we can fake it - // for now by just skipping them. - case Stmt::CXXDefaultArgExprClass: - break; - case Stmt::ParenExprClass: llvm_unreachable("ParenExprs already handled."); case Stmt::GenericSelectionExprClass: @@ -607,11 +623,6 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::AtomicExprClass: // Fall through. - // Currently all handling of 'throw' just falls to the CFG. We - // can consider doing more if necessary. - case Stmt::CXXThrowExprClass: - // Fall through. - // Cases we intentionally don't evaluate, since they don't need // to be explicitly evaluated. case Stmt::AddrLabelExprClass: @@ -626,6 +637,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::StringLiteralClass: case Stmt::ObjCStringLiteralClass: case Stmt::CXXBindTemporaryExprClass: + case Stmt::CXXDefaultArgExprClass: case Stmt::SubstNonTypeTemplateParmExprClass: case Stmt::CXXNullPtrLiteralExprClass: { Bldr.takeNodes(Pred); @@ -647,7 +659,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this); ExplodedNodeSet Tmp; - StmtNodeBuilder Bldr2(preVisit, Tmp, *currentBuilderContext); + StmtNodeBuilder Bldr2(preVisit, Tmp, *currBldrCtx); const Expr *Ex = cast(S); QualType resultType = Ex->getType(); @@ -656,9 +668,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, it != et; ++it) { ExplodedNode *N = *it; const LocationContext *LCtx = N->getLocationContext(); - SVal result = - svalBuilder.getConjuredSymbolVal(0, Ex, LCtx, resultType, - currentBuilderContext->getCurrentBlockCount()); + SVal result = svalBuilder.conjureSymbolVal(0, Ex, LCtx, resultType, + currBldrCtx->blockCount()); ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result); Bldr2.generateNode(S, N, state); } @@ -674,9 +685,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, Bldr.addNodes(Dst); break; - case Stmt::AsmStmtClass: + case Stmt::GCCAsmStmtClass: Bldr.takeNodes(Pred); - VisitAsmStmt(cast(S), Pred, Dst); + VisitGCCAsmStmt(cast(S), Pred, Dst); Bldr.addNodes(Dst); break; @@ -711,11 +722,11 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, Bldr.takeNodes(Pred); - if (AMgr.shouldEagerlyAssume() && + if (AMgr.options.eagerlyAssumeBinOpBifurcation && (B->isRelationalOp() || B->isEqualityOp())) { ExplodedNodeSet Tmp; VisitBinaryOperator(cast(S), Pred, Tmp); - evalEagerlyAssume(Dst, Tmp, cast(S)); + evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, cast(S)); } else VisitBinaryOperator(cast(S), Pred, Dst); @@ -724,8 +735,26 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, break; } + case Stmt::CXXOperatorCallExprClass: { + const CXXOperatorCallExpr *OCE = cast(S); + + // For instance method operators, make sure the 'this' argument has a + // valid region. + const Decl *Callee = OCE->getCalleeDecl(); + if (const CXXMethodDecl *MD = dyn_cast_or_null(Callee)) { + if (MD->isInstance()) { + ProgramStateRef State = Pred->getState(); + const LocationContext *LCtx = Pred->getLocationContext(); + ProgramStateRef NewState = + createTemporaryRegionIfNeeded(State, LCtx, OCE->getArg(0)); + if (NewState != State) + Pred = Bldr.generateNode(OCE, Pred, NewState, /*Tag=*/0, + ProgramPoint::PreStmtKind); + } + } + // FALLTHROUGH + } case Stmt::CallExprClass: - case Stmt::CXXOperatorCallExprClass: case Stmt::CXXMemberCallExprClass: case Stmt::UserDefinedLiteralClass: { Bldr.takeNodes(Pred); @@ -846,12 +875,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Expr::MaterializeTemporaryExprClass: { Bldr.takeNodes(Pred); - const MaterializeTemporaryExpr *Materialize - = cast(S); - if (Materialize->getType()->isRecordType()) - Dst.Add(Pred); - else - CreateCXXTemporaryObject(Materialize, Pred, Dst); + const MaterializeTemporaryExpr *MTE = cast(S); + CreateCXXTemporaryObject(MTE, Pred, Dst); Bldr.addNodes(Dst); break; } @@ -886,12 +911,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, Bldr.addNodes(Dst); break; - case Stmt::ObjCAtThrowStmtClass: { + case Stmt::ObjCAtThrowStmtClass: + case Stmt::CXXThrowExprClass: // FIXME: This is not complete. We basically treat @throw as // an abort. - Bldr.generateNode(S, Pred, Pred->getState()); + Bldr.generateSink(S, Pred, Pred->getState()); break; - } case Stmt::ReturnStmtClass: Bldr.takeNodes(Pred); @@ -935,10 +960,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred, case Stmt::UnaryOperatorClass: { Bldr.takeNodes(Pred); const UnaryOperator *U = cast(S); - if (AMgr.shouldEagerlyAssume() && (U->getOpcode() == UO_LNot)) { + if (AMgr.options.eagerlyAssumeBinOpBifurcation && (U->getOpcode() == UO_LNot)) { ExplodedNodeSet Tmp; VisitUnaryOperator(U, Pred, Tmp); - evalEagerlyAssume(Dst, Tmp, U); + evalEagerlyAssumeBinOpBifurcation(Dst, Tmp, U); } else VisitUnaryOperator(U, Pred, Dst); @@ -1030,19 +1055,18 @@ bool ExprEngine::replayWithoutInlining(ExplodedNode *N, /// Block entrance. (Update counters). void ExprEngine::processCFGBlockEntrance(const BlockEdge &L, - NodeBuilderWithSinks &nodeBuilder) { + NodeBuilderWithSinks &nodeBuilder, + ExplodedNode *Pred) { // FIXME: Refactor this into a checker. - ExplodedNode *pred = nodeBuilder.getContext().getPred(); - - if (nodeBuilder.getContext().getCurrentBlockCount() >= AMgr.getMaxVisit()) { + if (nodeBuilder.getContext().blockCount() >= AMgr.options.maxBlockVisitOnPath) { static SimpleProgramPointTag tag("ExprEngine : Block count exceeded"); const ExplodedNode *Sink = - nodeBuilder.generateNode(pred->getState(), pred, &tag, true); + nodeBuilder.generateSink(Pred->getState(), Pred, &tag); // Check if we stopped at the top level function or not. // Root node should have the location context of the top most function. - const LocationContext *CalleeLC = pred->getLocation().getLocationContext(); + const LocationContext *CalleeLC = Pred->getLocation().getLocationContext(); const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame(); const LocationContext *RootLC = (*G.roots_begin())->getLocation().getLocationContext(); @@ -1053,7 +1077,8 @@ void ExprEngine::processCFGBlockEntrance(const BlockEdge &L, // no-inlining policy in the state and enqueuing the new work item on // the list. Replay should almost never fail. Use the stats to catch it // if it does. - if ((!AMgr.NoRetryExhausted && replayWithoutInlining(pred, CalleeLC))) + if ((!AMgr.options.NoRetryExhausted && + replayWithoutInlining(Pred, CalleeLC))) return; NumMaxBlockCountReachedInInlined++; } else @@ -1155,7 +1180,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term, ExplodedNodeSet &Dst, const CFGBlock *DstT, const CFGBlock *DstF) { - currentBuilderContext = &BldCtx; + currBldrCtx = &BldCtx; // Check for NULL conditions; e.g. "for(;;)" if (!Condition) { @@ -1238,7 +1263,7 @@ void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term, builder.markInfeasible(false); } } - currentBuilderContext = 0; + currBldrCtx = 0; } /// processIndirectGoto - Called by CoreEngine. Used to generate successor @@ -1287,10 +1312,25 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) { /// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path /// nodes when the control reaches the end of a function. -void ExprEngine::processEndOfFunction(NodeBuilderContext& BC) { - StateMgr.EndPath(BC.Pred->getState()); +void ExprEngine::processEndOfFunction(NodeBuilderContext& BC, + ExplodedNode *Pred) { + StateMgr.EndPath(Pred->getState()); + ExplodedNodeSet Dst; - getCheckerManager().runCheckersForEndPath(BC, Dst, *this); + if (Pred->getLocationContext()->inTopFrame()) { + // Remove dead symbols. + ExplodedNodeSet AfterRemovedDead; + removeDeadOnEndOfFunction(BC, Pred, AfterRemovedDead); + + // Notify checkers. + for (ExplodedNodeSet::iterator I = AfterRemovedDead.begin(), + E = AfterRemovedDead.end(); I != E; ++I) { + getCheckerManager().runCheckersForEndPath(BC, Dst, *I, *this); + } + } else { + getCheckerManager().runCheckersForEndPath(BC, Dst, Pred, *this); + } + Engine.enqueueEndOfFunction(Dst); } @@ -1404,7 +1444,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) { void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); ProgramStateRef state = Pred->getState(); const LocationContext *LCtx = Pred->getLocationContext(); @@ -1422,7 +1462,7 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D, V = UnknownVal(); } - Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0, + Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0, ProgramPoint::PostLValueKind); return; } @@ -1434,19 +1474,23 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D, } if (const FunctionDecl *FD = dyn_cast(D)) { SVal V = svalBuilder.getFunctionPointer(FD); - Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0, + Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0, ProgramPoint::PostLValueKind); return; } if (isa(D)) { - // FIXME: Compute lvalue of fields. - Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, UnknownVal()), - false, 0, ProgramPoint::PostLValueKind); + // FIXME: Compute lvalue of field pointers-to-member. + // Right now we just use a non-null void pointer, so that it gives proper + // results in boolean contexts. + SVal V = svalBuilder.conjureSymbolVal(Ex, LCtx, getContext().VoidPtrTy, + currBldrCtx->blockCount()); + state = state->assume(cast(V), true); + Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), 0, + ProgramPoint::PostLValueKind); return; } - assert (false && - "ValueDecl support for this ValueDecl not implemented."); + llvm_unreachable("Support for this Decl not implemented."); } /// VisitArraySubscriptExpr - Transfer function for array accesses @@ -1461,7 +1505,7 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A, ExplodedNodeSet checkerPreStmt; getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this); - StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currBldrCtx); for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(), ei = checkerPreStmt.end(); it != ei; ++it) { @@ -1471,8 +1515,8 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A, state->getSVal(Idx, LCtx), state->getSVal(Base, LCtx)); assert(A->isGLValue()); - Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), - false, 0, ProgramPoint::PostLValueKind); + Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), 0, + ProgramPoint::PostLValueKind); } } @@ -1480,52 +1524,40 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A, void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred, ExplodedNodeSet &TopDst) { - StmtNodeBuilder Bldr(Pred, TopDst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, TopDst, *currBldrCtx); ExplodedNodeSet Dst; - Decl *member = M->getMemberDecl(); + ValueDecl *Member = M->getMemberDecl(); - if (VarDecl *VD = dyn_cast(member)) { - assert(M->isGLValue()); + // Handle static member variables and enum constants accessed via + // member syntax. + if (isa(Member) || isa(Member)) { Bldr.takeNodes(Pred); - VisitCommonDeclRefExpr(M, VD, Pred, Dst); + VisitCommonDeclRefExpr(M, Member, Pred, Dst); Bldr.addNodes(Dst); return; } - // Handle C++ method calls. - if (const CXXMethodDecl *MD = dyn_cast(member)) { - Bldr.takeNodes(Pred); - SVal MDVal = svalBuilder.getFunctionPointer(MD); - ProgramStateRef state = - Pred->getState()->BindExpr(M, Pred->getLocationContext(), MDVal); - Bldr.generateNode(M, Pred, state); - return; - } + ProgramStateRef state = Pred->getState(); + const LocationContext *LCtx = Pred->getLocationContext(); + Expr *BaseExpr = M->getBase(); + // Handle C++ method calls. + if (const CXXMethodDecl *MD = dyn_cast(Member)) { + if (MD->isInstance()) + state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr); - FieldDecl *field = dyn_cast(member); - if (!field) // FIXME: skipping member expressions for non-fields - return; + SVal MDVal = svalBuilder.getFunctionPointer(MD); + state = state->BindExpr(M, LCtx, MDVal); - Expr *baseExpr = M->getBase()->IgnoreParens(); - ProgramStateRef state = Pred->getState(); - const LocationContext *LCtx = Pred->getLocationContext(); - SVal baseExprVal = state->getSVal(baseExpr, Pred->getLocationContext()); - if (isa(baseExprVal) || - isa(baseExprVal) || - // FIXME: This can originate by conjuring a symbol for an unknown - // temporary struct object, see test/Analysis/fields.c: - // (p = getit()).x - isa(baseExprVal)) { - Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, UnknownVal())); + Bldr.generateNode(M, Pred, state); return; } - // FIXME: Should we insert some assumption logic in here to determine - // if "Base" is a valid piece of memory? Before we put this assumption - // later when using FieldOffset lvals (which we no longer have). + // Handle regular struct fields / member variables. + state = createTemporaryRegionIfNeeded(state, LCtx, BaseExpr); + SVal baseExprVal = state->getSVal(BaseExpr, LCtx); - // For all other cases, compute an lvalue. + FieldDecl *field = cast(Member); SVal L = state->getLValue(field, baseExprVal); if (M->isGLValue()) { if (field->getType()->isReferenceType()) { @@ -1535,7 +1567,7 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred, L = UnknownVal(); } - Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), false, 0, + Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), 0, ProgramPoint::PostLValueKind); } else { Bldr.takeNodes(Pred); @@ -1548,40 +1580,48 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred, /// This method is used by evalStore and (soon) VisitDeclStmt, and others. void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred, - SVal location, SVal Val, bool atDeclInit) { + SVal location, SVal Val, + bool atDeclInit, const ProgramPoint *PP) { + + const LocationContext *LC = Pred->getLocationContext(); + PostStmt PS(StoreE, LC); + if (!PP) + PP = &PS; // Do a previsit of the bind. ExplodedNodeSet CheckedSet; getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val, - StoreE, *this, - ProgramPoint::PostStmtKind); + StoreE, *this, *PP); + // If the location is not a 'Loc', it will already be handled by + // the checkers. There is nothing left to do. + if (!isa(location)) { + Dst = CheckedSet; + return; + } + ExplodedNodeSet TmpDst; - StmtNodeBuilder Bldr(CheckedSet, TmpDst, *currentBuilderContext); + StmtNodeBuilder Bldr(CheckedSet, TmpDst, *currBldrCtx); - const LocationContext *LC = Pred->getLocationContext(); for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end(); I!=E; ++I) { ExplodedNode *PredI = *I; ProgramStateRef state = PredI->getState(); - - if (atDeclInit) { - const VarRegion *VR = - cast(cast(location).getRegion()); - - state = state->bindDecl(VR, Val); - } else { - state = state->bindLoc(location, Val); - } - + + // When binding the value, pass on the hint that this is a initialization. + // For initializations, we do not need to inform clients of region + // changes. + state = state->bindLoc(cast(location), + Val, /* notifyChanges = */ !atDeclInit); + const MemRegion *LocReg = 0; - if (loc::MemRegionVal *LocRegVal = dyn_cast(&location)) + if (loc::MemRegionVal *LocRegVal = dyn_cast(&location)) { LocReg = LocRegVal->getRegion(); - + } + const ProgramPoint L = PostStore(StoreE, LC, LocReg, 0); - Bldr.generateNode(L, PredI, state, false); + Bldr.generateNode(L, state, PredI); } - Dst.insert(TmpDst); } @@ -1671,7 +1711,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst, if (Tmp.empty()) return; - StmtNodeBuilder Bldr(Tmp, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Tmp, Dst, *currBldrCtx); if (location.isUndef()) return; @@ -1684,8 +1724,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst, // This is important. We must nuke the old binding. Bldr.generateNode(NodeEx, *NI, state->BindExpr(BoundEx, LCtx, UnknownVal()), - false, tag, - ProgramPoint::PostLoadKind); + tag, ProgramPoint::PostLoadKind); } else { if (LoadTy.isNull()) @@ -1693,7 +1732,7 @@ void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst, SVal V = state->getSVal(cast(location), LoadTy); Bldr.generateNode(NodeEx, *NI, state->bindExprAndLocation(BoundEx, LCtx, location, V), - false, tag, ProgramPoint::PostLoadKind); + tag, ProgramPoint::PostLoadKind); } } } @@ -1706,7 +1745,7 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst, SVal location, const ProgramPointTag *tag, bool isLoad) { - StmtNodeBuilder BldrTop(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder BldrTop(Pred, Dst, *currBldrCtx); // Early checks for performance reason. if (location.isUnknown()) { return; @@ -1714,7 +1753,7 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst, ExplodedNodeSet Src; BldrTop.takeNodes(Pred); - StmtNodeBuilder Bldr(Pred, Src, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Src, *currBldrCtx); if (Pred->getState() != state) { // Associate this new state with an ExplodedNode. // FIXME: If I pass null tag, the graph is incorrect, e.g for @@ -1725,9 +1764,8 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst, // instead "int *p" is noted as // "Variable 'p' initialized to a null pointer value" - // FIXME: why is 'tag' not used instead of etag? - static SimpleProgramPointTag etag("ExprEngine: Location"); - Bldr.generateNode(NodeEx, Pred, state, false, &etag); + static SimpleProgramPointTag tag("ExprEngine: Location"); + Bldr.generateNode(NodeEx, Pred, state, &tag); } ExplodedNodeSet Tmp; getCheckerManager().runCheckersForLocation(Tmp, Src, location, isLoad, @@ -1736,16 +1774,18 @@ void ExprEngine::evalLocation(ExplodedNodeSet &Dst, } std::pair -ExprEngine::getEagerlyAssumeTags() { +ExprEngine::geteagerlyAssumeBinOpBifurcationTags() { static SimpleProgramPointTag - EagerlyAssumeTrue("ExprEngine : Eagerly Assume True"), - EagerlyAssumeFalse("ExprEngine : Eagerly Assume False"); - return std::make_pair(&EagerlyAssumeTrue, &EagerlyAssumeFalse); + eagerlyAssumeBinOpBifurcationTrue("ExprEngine : Eagerly Assume True"), + eagerlyAssumeBinOpBifurcationFalse("ExprEngine : Eagerly Assume False"); + return std::make_pair(&eagerlyAssumeBinOpBifurcationTrue, + &eagerlyAssumeBinOpBifurcationFalse); } -void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src, - const Expr *Ex) { - StmtNodeBuilder Bldr(Src, Dst, *currentBuilderContext); +void ExprEngine::evalEagerlyAssumeBinOpBifurcation(ExplodedNodeSet &Dst, + ExplodedNodeSet &Src, + const Expr *Ex) { + StmtNodeBuilder Bldr(Src, Dst, *currBldrCtx); for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) { ExplodedNode *Pred = *I; @@ -1762,28 +1802,28 @@ void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src, nonloc::SymbolVal *SEV = dyn_cast(&V); if (SEV && SEV->isExpression()) { const std::pair &tags = - getEagerlyAssumeTags(); + geteagerlyAssumeBinOpBifurcationTags(); // First assume that the condition is true. if (ProgramStateRef StateTrue = state->assume(*SEV, true)) { SVal Val = svalBuilder.makeIntVal(1U, Ex->getType()); StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val); - Bldr.generateNode(Ex, Pred, StateTrue, false, tags.first); + Bldr.generateNode(Ex, Pred, StateTrue, tags.first); } // Next, assume that the condition is false. if (ProgramStateRef StateFalse = state->assume(*SEV, false)) { SVal Val = svalBuilder.makeIntVal(0U, Ex->getType()); StateFalse = StateFalse->BindExpr(Ex, Pred->getLocationContext(), Val); - Bldr.generateNode(Ex, Pred, StateFalse, false, tags.second); + Bldr.generateNode(Ex, Pred, StateFalse, tags.second); } } } } -void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, - ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); +void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); // We have processed both the inputs and the outputs. All of the outputs // should evaluate to Locs. Nuke all of their values. @@ -1793,7 +1833,7 @@ void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, ProgramStateRef state = Pred->getState(); - for (AsmStmt::const_outputs_iterator OI = A->begin_outputs(), + for (GCCAsmStmt::const_outputs_iterator OI = A->begin_outputs(), OE = A->end_outputs(); OI != OE; ++OI) { SVal X = state->getSVal(*OI, Pred->getLocationContext()); assert (!isa(X)); // Should be an Lval, or unknown, undef. @@ -1807,7 +1847,7 @@ void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); Bldr.generateNode(A, Pred, Pred->getState()); } @@ -1932,7 +1972,7 @@ struct DOTGraphTraits : if (StmtPoint *L = dyn_cast(&Loc)) { const Stmt *S = L->getStmt(); - Out << S->getStmtClassName() << ' ' << (void*) S << ' '; + Out << S->getStmtClassName() << ' ' << (const void*) S << ' '; LangOptions LO; // FIXME. S->printPretty(Out, 0, PrintingPolicy(LO)); printLocation(Out, S->getLocStart()); @@ -2038,8 +2078,8 @@ struct DOTGraphTraits : } ProgramStateRef state = N->getState(); - Out << "\\|StateID: " << (void*) state.getPtr() - << " NodeID: " << (void*) N << "\\|"; + Out << "\\|StateID: " << (const void*) state.getPtr() + << " NodeID: " << (const void*) N << "\\|"; state->printDOT(Out); Out << "\\l"; diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp index 46cba81..00b2f4a 100644 --- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp @@ -45,8 +45,8 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B, // EXPERIMENTAL: "Conjured" symbols. // FIXME: Handle structs. if (RightV.isUnknown()) { - unsigned Count = currentBuilderContext->getCurrentBlockCount(); - RightV = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx, Count); + unsigned Count = currBldrCtx->blockCount(); + RightV = svalBuilder.conjureSymbolVal(0, B->getRHS(), LCtx, Count); } // Simulate the effects of a "store": bind the value of the RHS // to the L-Value represented by the LHS. @@ -57,7 +57,7 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B, } if (!B->isAssignmentOp()) { - StmtNodeBuilder Bldr(*it, Tmp2, *currentBuilderContext); + StmtNodeBuilder Bldr(*it, Tmp2, *currBldrCtx); if (B->isAdditiveOp()) { // If one of the operands is a location, conjure a symbol for the other @@ -65,16 +65,16 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B, // results in an ElementRegion. // TODO: This can be removed after we enable history tracking with // SymSymExpr. - unsigned Count = currentBuilderContext->getCurrentBlockCount(); + unsigned Count = currBldrCtx->blockCount(); if (isa(LeftV) && RHS->getType()->isIntegerType() && RightV.isUnknown()) { - RightV = svalBuilder.getConjuredSymbolVal(RHS, LCtx, - RHS->getType(), Count); + RightV = svalBuilder.conjureSymbolVal(RHS, LCtx, RHS->getType(), + Count); } if (isa(RightV) && LHS->getType()->isIntegerType() && LeftV.isUnknown()) { - LeftV = svalBuilder.getConjuredSymbolVal(LHS, LCtx, - LHS->getType(), Count); + LeftV = svalBuilder.conjureSymbolVal(LHS, LCtx, LHS->getType(), + Count); } } @@ -145,15 +145,11 @@ void ExprEngine::VisitBinaryOperator(const BinaryOperator* B, SVal LHSVal; if (Result.isUnknown()) { - - unsigned Count = currentBuilderContext->getCurrentBlockCount(); - // The symbolic value is actually for the type of the left-hand side // expression, not the computation type, as this is the value the // LValue on the LHS will bind to. - LHSVal = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx, - LTy, Count); - + LHSVal = svalBuilder.conjureSymbolVal(0, B->getRHS(), LCtx, LTy, + currBldrCtx->blockCount()); // However, we need to convert the symbol to the computation type. Result = svalBuilder.evalCast(LHSVal, CTy, LTy); } @@ -208,11 +204,10 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred, } ExplodedNodeSet Tmp; - StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx); Bldr.generateNode(BE, Pred, State->BindExpr(BE, Pred->getLocationContext(), V), - false, 0, - ProgramPoint::PostLValueKind); + 0, ProgramPoint::PostLValueKind); // FIXME: Move all post/pre visits to ::Visit(). getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this); @@ -242,12 +237,14 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, if (const ExplicitCastExpr *ExCast=dyn_cast_or_null(CastE)) T = ExCast->getTypeAsWritten(); - StmtNodeBuilder Bldr(dstPreStmt, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(dstPreStmt, Dst, *currBldrCtx); for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end(); I != E; ++I) { Pred = *I; - + ProgramStateRef state = Pred->getState(); + const LocationContext *LCtx = Pred->getLocationContext(); + switch (CastE->getCastKind()) { case CK_LValueToRValue: llvm_unreachable("LValueToRValue casts handled earlier."); @@ -267,7 +264,10 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, case CK_NonAtomicToAtomic: // True no-ops. case CK_NoOp: - case CK_FunctionToPointerDecay: { + case CK_ConstructorConversion: + case CK_UserDefinedConversion: + case CK_FunctionToPointerDecay: + case CK_BuiltinFnToFnPtr: { // Copy the SVal of Ex to CastE. ProgramStateRef state = Pred->getState(); const LocationContext *LCtx = Pred->getLocationContext(); @@ -276,6 +276,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, Bldr.generateNode(CastE, Pred, state); continue; } + case CK_MemberPointerToBoolean: + // FIXME: For now, member pointers are represented by void *. + // FALLTHROUGH case CK_Dependent: case CK_ArrayToPointerDecay: case CK_BitCast: @@ -304,8 +307,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, case CK_AnyPointerToBlockPointerCast: case CK_ObjCObjectLValueCast: { // Delegate to SValBuilder to process. - ProgramStateRef state = Pred->getState(); - const LocationContext *LCtx = Pred->getLocationContext(); SVal V = state->getSVal(Ex, LCtx); V = svalBuilder.evalCast(V, T, ExTy); state = state->BindExpr(CastE, LCtx, V); @@ -315,8 +316,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, case CK_DerivedToBase: case CK_UncheckedDerivedToBase: { // For DerivedToBase cast, delegate to the store manager. - ProgramStateRef state = Pred->getState(); - const LocationContext *LCtx = Pred->getLocationContext(); SVal val = state->getSVal(Ex, LCtx); val = getStoreManager().evalDerivedToBase(val, CastE); state = state->BindExpr(CastE, LCtx, val); @@ -325,8 +324,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, } // Handle C++ dyn_cast. case CK_Dynamic: { - ProgramStateRef state = Pred->getState(); - const LocationContext *LCtx = Pred->getLocationContext(); SVal val = state->getSVal(Ex, LCtx); // Compute the type of the result. @@ -347,7 +344,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, if (T->isReferenceType()) { // A bad_cast exception is thrown if input value is a reference. // Currently, we model this, by generating a sink. - Bldr.generateNode(CastE, Pred, state, true); + Bldr.generateSink(CastE, Pred, state); continue; } else { // If the cast fails on a pointer, bind to 0. @@ -356,9 +353,9 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, } else { // If we don't know if the cast succeeded, conjure a new symbol. if (val.isUnknown()) { - DefinedOrUnknownSVal NewSym = svalBuilder.getConjuredSymbolVal(NULL, - CastE, LCtx, resultType, - currentBuilderContext->getCurrentBlockCount()); + DefinedOrUnknownSVal NewSym = + svalBuilder.conjureSymbolVal(0, CastE, LCtx, resultType, + currBldrCtx->blockCount()); state = state->BindExpr(CastE, LCtx, NewSym); } else // Else, bind to the derived region value. @@ -367,27 +364,29 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, Bldr.generateNode(CastE, Pred, state); continue; } + case CK_NullToMemberPointer: { + // FIXME: For now, member pointers are represented by void *. + SVal V = svalBuilder.makeIntValWithPtrWidth(0, true); + state = state->BindExpr(CastE, LCtx, V); + Bldr.generateNode(CastE, Pred, state); + continue; + } // Various C++ casts that are not handled yet. case CK_ToUnion: case CK_BaseToDerived: - case CK_NullToMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: case CK_ReinterpretMemberPointer: - case CK_UserDefinedConversion: - case CK_ConstructorConversion: case CK_VectorSplat: - case CK_MemberPointerToBoolean: case CK_LValueBitCast: { // Recover some path-sensitivty by conjuring a new value. QualType resultType = CastE->getType(); if (CastE->isGLValue()) resultType = getContext().getPointerType(resultType); - const LocationContext *LCtx = Pred->getLocationContext(); - SVal result = svalBuilder.getConjuredSymbolVal(NULL, CastE, LCtx, - resultType, currentBuilderContext->getCurrentBlockCount()); - ProgramStateRef state = Pred->getState()->BindExpr(CastE, LCtx, - result); + SVal result = svalBuilder.conjureSymbolVal(0, CastE, LCtx, + resultType, + currBldrCtx->blockCount()); + state = state->BindExpr(CastE, LCtx, result); Bldr.generateNode(CastE, Pred, state); continue; } @@ -398,7 +397,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex, void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder B(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder B(Pred, Dst, *currBldrCtx); const InitListExpr *ILE = cast(CL->getInitializer()->IgnoreParens()); @@ -442,7 +441,7 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred, ExplodedNodeSet dstPreVisit; getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this); - StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext); + StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); const VarDecl *VD = dyn_cast(D); for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end(); I!=E; ++I) { @@ -478,8 +477,8 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred, Ty = getContext().getPointerType(Ty); } - InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx, LC, Ty, - currentBuilderContext->getCurrentBlockCount()); + InitVal = svalBuilder.conjureSymbolVal(0, InitEx, LC, Ty, + currBldrCtx->blockCount()); } B.takeNodes(N); ExplodedNodeSet Dst2; @@ -488,7 +487,7 @@ void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred, } } else { - B.generateNode(DS, N,state->bindDeclWithNoInit(state->getRegion(VD, LC))); + B.generateNode(DS, N, state); } } } @@ -498,7 +497,7 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred, assert(B->getOpcode() == BO_LAnd || B->getOpcode() == BO_LOr); - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); ProgramStateRef state = Pred->getState(); ExplodedNode *N = Pred; @@ -531,10 +530,28 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred, else { // If there is no terminator, by construction the last statement // in SrcBlock is the value of the enclosing expression. + // However, we still need to constrain that value to be 0 or 1. assert(!SrcBlock->empty()); CFGStmt Elem = cast(*SrcBlock->rbegin()); - const Stmt *S = Elem.getStmt(); - X = N->getState()->getSVal(S, Pred->getLocationContext()); + const Expr *RHS = cast(Elem.getStmt()); + SVal RHSVal = N->getState()->getSVal(RHS, Pred->getLocationContext()); + + DefinedOrUnknownSVal DefinedRHS = cast(RHSVal); + ProgramStateRef StTrue, StFalse; + llvm::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS); + if (StTrue) { + if (StFalse) { + // We can't constrain the value to 0 or 1; the best we can do is a cast. + X = getSValBuilder().evalCast(RHSVal, B->getType(), RHS->getType()); + } else { + // The value is known to be true. + X = getSValBuilder().makeIntVal(1, B->getType()); + } + } else { + // The value is known to be false. + assert(StFalse && "Infeasible path!"); + X = getSValBuilder().makeIntVal(0, B->getType()); + } } Bldr.generateNode(B, Pred, state->BindExpr(B, Pred->getLocationContext(), X)); @@ -543,14 +560,15 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred, void ExprEngine::VisitInitListExpr(const InitListExpr *IE, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder B(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder B(Pred, Dst, *currBldrCtx); ProgramStateRef state = Pred->getState(); const LocationContext *LCtx = Pred->getLocationContext(); QualType T = getContext().getCanonicalType(IE->getType()); unsigned NumInitElements = IE->getNumInits(); - if (T->isArrayType() || T->isRecordType() || T->isVectorType()) { + if (T->isArrayType() || T->isRecordType() || T->isVectorType() || + T->isAnyComplexType()) { llvm::ImmutableList vals = getBasicVals().getEmptySValList(); // Handle base case where the initializer has no elements. @@ -590,7 +608,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex, const Expr *R, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder B(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder B(Pred, Dst, *currBldrCtx); ProgramStateRef state = Pred->getState(); const LocationContext *LCtx = Pred->getLocationContext(); const CFGBlock *SrcBlock = 0; @@ -631,7 +649,7 @@ void ExprEngine::VisitGuardedExpr(const Expr *Ex, void ExprEngine:: VisitOffsetOfExpr(const OffsetOfExpr *OOE, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder B(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder B(Pred, Dst, *currBldrCtx); APSInt IV; if (OOE->EvaluateAsInt(IV, getContext())) { assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType())); @@ -650,7 +668,7 @@ void ExprEngine:: VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); QualType T = Ex->getTypeOfArgument(); @@ -683,7 +701,7 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex, void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); switch (U->getOpcode()) { default: { Bldr.takeNodes(Pred); @@ -816,7 +834,7 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U, evalLoad(Tmp, U, Ex, Pred, state, loc); ExplodedNodeSet Dst2; - StmtNodeBuilder Bldr(Tmp, Dst2, *currentBuilderContext); + StmtNodeBuilder Bldr(Tmp, Dst2, *currBldrCtx); for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) { state = (*I)->getState(); @@ -840,16 +858,17 @@ void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U, if (U->getType()->isAnyPointerType()) RHS = svalBuilder.makeArrayIndex(1); - else + else if (U->getType()->isIntegralOrEnumerationType()) RHS = svalBuilder.makeIntVal(1, U->getType()); + else + RHS = UnknownVal(); SVal Result = evalBinOp(state, Op, V2, RHS, U->getType()); // Conjure a new symbol if necessary to recover precision. if (Result.isUnknown()){ DefinedOrUnknownSVal SymVal = - svalBuilder.getConjuredSymbolVal(NULL, Ex, LCtx, - currentBuilderContext->getCurrentBlockCount()); + svalBuilder.conjureSymbolVal(0, Ex, LCtx, currBldrCtx->blockCount()); Result = SymVal; // If the value is a location, ++/-- should always preserve diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp index 44a860f..b3baa79 100644 --- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp @@ -25,20 +25,28 @@ using namespace ento; void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); const Expr *tempExpr = ME->GetTemporaryExpr()->IgnoreParens(); ProgramStateRef state = Pred->getState(); const LocationContext *LCtx = Pred->getLocationContext(); // Bind the temporary object to the value of the expression. Then bind // the expression to the location of the object. - SVal V = state->getSVal(tempExpr, Pred->getLocationContext()); - - const MemRegion *R = - svalBuilder.getRegionManager().getCXXTempObjectRegion(ME, LCtx); + SVal V = state->getSVal(tempExpr, LCtx); + + // If the value is already a CXXTempObjectRegion, it is fine as it is. + // Otherwise, create a new CXXTempObjectRegion, and copy the value into it. + const MemRegion *MR = V.getAsRegion(); + if (!MR || !isa(MR)) { + const MemRegion *R = + svalBuilder.getRegionManager().getCXXTempObjectRegion(ME, LCtx); + + SVal L = loc::MemRegionVal(R); + state = state->bindLoc(L, V); + V = L; + } - state = state->bindLoc(loc::MemRegionVal(R), V); - Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, loc::MemRegionVal(R))); + Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, V)); } void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE, @@ -53,9 +61,9 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE, case CXXConstructExpr::CK_Complete: { // See if we're constructing an existing region by looking at the next // element in the CFG. - const CFGBlock *B = currentBuilderContext->getBlock(); - if (currentStmtIdx + 1 < B->size()) { - CFGElement Next = (*B)[currentStmtIdx+1]; + const CFGBlock *B = currBldrCtx->getBlock(); + if (currStmtIdx + 1 < B->size()) { + CFGElement Next = (*B)[currStmtIdx+1]; // Is this a constructor for a local variable? if (const CFGStmt *StmtElem = dyn_cast(&Next)) { @@ -101,8 +109,12 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE, // FIXME: This will eventually need to handle new-expressions as well. } - // If we couldn't find an existing region to construct into, we'll just - // generate a symbolic region, which is fine. + // If we couldn't find an existing region to construct into, assume we're + // constructing a temporary. + if (!Target) { + MemRegionManager &MRMgr = getSValBuilder().getRegionManager(); + Target = MRMgr.getCXXTempObjectRegion(CE, LCtx); + } break; } @@ -137,7 +149,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE, *Call, *this); ExplodedNodeSet DstInvalidated; - StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currentBuilderContext); + StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx); for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end(); I != E; ++I) defaultEvalCall(Bldr, *I, *Call); @@ -151,6 +163,7 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE, void ExprEngine::VisitCXXDestructor(QualType ObjectType, const MemRegion *Dest, const Stmt *S, + bool IsBaseDtor, ExplodedNode *Pred, ExplodedNodeSet &Dst) { const LocationContext *LCtx = Pred->getLocationContext(); @@ -171,7 +184,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType, CallEventManager &CEMgr = getStateManager().getCallEventManager(); CallEventRef Call = - CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, State, LCtx); + CEMgr.getCXXDestructorCall(DtorDecl, S, Dest, IsBaseDtor, State, LCtx); PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(), Call->getSourceRange().getBegin(), @@ -182,7 +195,7 @@ void ExprEngine::VisitCXXDestructor(QualType ObjectType, *Call, *this); ExplodedNodeSet DstInvalidated; - StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currentBuilderContext); + StmtNodeBuilder Bldr(DstPreCall, DstInvalidated, *currBldrCtx); for (ExplodedNodeSet::iterator I = DstPreCall.begin(), E = DstPreCall.end(); I != E; ++I) defaultEvalCall(Bldr, *I, *Call); @@ -198,12 +211,13 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred, // Also, we need to decide how allocators actually work -- they're not // really part of the CXXNewExpr because they happen BEFORE the // CXXConstructExpr subexpression. See PR12014 for some discussion. - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); - unsigned blockCount = currentBuilderContext->getCurrentBlockCount(); + unsigned blockCount = currBldrCtx->blockCount(); const LocationContext *LCtx = Pred->getLocationContext(); - DefinedOrUnknownSVal symVal = - svalBuilder.getConjuredSymbolVal(0, CNE, LCtx, CNE->getType(), blockCount); + DefinedOrUnknownSVal symVal = svalBuilder.conjureSymbolVal(0, CNE, LCtx, + CNE->getType(), + blockCount); ProgramStateRef State = Pred->getState(); CallEventManager &CEMgr = getStateManager().getCallEventManager(); @@ -215,6 +229,18 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred, // we should be using the usual pre-/(default-)eval-/post-call checks here. State = Call->invalidateRegions(blockCount); + // If we're compiling with exceptions enabled, and this allocation function + // is not declared as non-throwing, failures /must/ be signalled by + // exceptions, and thus the return value will never be NULL. + // C++11 [basic.stc.dynamic.allocation]p3. + FunctionDecl *FD = CNE->getOperatorNew(); + if (FD && getContext().getLangOpts().CXXExceptions) { + QualType Ty = FD->getType(); + if (const FunctionProtoType *ProtoType = Ty->getAs()) + if (!ProtoType->isNothrow(getContext())) + State = State->assume(symVal, true); + } + if (CNE->isArray()) { // FIXME: allocating an array requires simulating the constructors. // For now, just return a symbolicated region. @@ -232,11 +258,12 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred, // CXXNewExpr, we need to make sure that the constructed object is not // immediately invalidated here. (The placement call should happen before // the constructor call anyway.) - FunctionDecl *FD = CNE->getOperatorNew(); if (FD && FD->isReservedGlobalPlacementOperator()) { // Non-array placement new should always return the placement location. SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx); - State = State->BindExpr(CNE, LCtx, PlacementLoc); + SVal Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(), + CNE->getPlacementArg(0)->getType()); + State = State->BindExpr(CNE, LCtx, Result); } else { State = State->BindExpr(CNE, LCtx, symVal); } @@ -259,7 +286,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred, void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); ProgramStateRef state = Pred->getState(); Bldr.generateNode(CDE, Pred, state); } @@ -274,18 +301,18 @@ void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS, } const LocationContext *LCtx = Pred->getLocationContext(); - SVal V = svalBuilder.getConjuredSymbolVal(CS, LCtx, VD->getType(), - currentBuilderContext->getCurrentBlockCount()); + SVal V = svalBuilder.conjureSymbolVal(CS, LCtx, VD->getType(), + currBldrCtx->blockCount()); ProgramStateRef state = Pred->getState(); state = state->bindLoc(state->getLValue(VD, LCtx), V); - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); Bldr.generateNode(CS, Pred, state); } void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred, ExplodedNodeSet &Dst) { - StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx); // Get the this object region from StoreManager. const LocationContext *LCtx = Pred->getLocationContext(); diff --git a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp index 3b2e4ec..3ead081 100644 --- a/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp @@ -17,19 +17,22 @@ #include "clang/StaticAnalyzer/Core/CheckerManager.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h" #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" +#include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclCXX.h" +#include "clang/AST/ParentMap.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Support/SaveAndRestore.h" -#define CXX_INLINING_ENABLED 1 - using namespace clang; using namespace ento; STATISTIC(NumOfDynamicDispatchPathSplits, "The # of times we split the path due to imprecise dynamic dispatch info"); +STATISTIC(NumInlinedCalls, + "The # of times we inlined a call"); + void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) { // Get the entry block in the CFG of the callee. const StackFrameContext *calleeCtx = CE.getCalleeContext(); @@ -64,35 +67,47 @@ static std::pairgetLocation().getLocationContext()->getCurrentStackFrame(); - // Back up through the ExplodedGraph until we reach a statement node. + // Back up through the ExplodedGraph until we reach a statement node in this + // stack frame. while (Node) { const ProgramPoint &PP = Node->getLocation(); - if (const StmtPoint *SP = dyn_cast(&PP)) { - S = SP->getStmt(); - break; - } else if (const CallExitEnd *CEE = dyn_cast(&PP)) { - S = CEE->getCalleeContext()->getCallSite(); - if (S) + if (PP.getLocationContext()->getCurrentStackFrame() == SF) { + if (const StmtPoint *SP = dyn_cast(&PP)) { + S = SP->getStmt(); break; - // If we have an implicit call, we'll probably end up with a - // StmtPoint inside the callee, which is acceptable. - // (It's possible a function ONLY contains implicit calls -- such as an - // implicitly-generated destructor -- so we shouldn't just skip back to - // the CallEnter node and keep going.) + } else if (const CallExitEnd *CEE = dyn_cast(&PP)) { + S = CEE->getCalleeContext()->getCallSite(); + if (S) + break; + + // If there is no statement, this is an implicitly-generated call. + // We'll walk backwards over it and then continue the loop to find + // an actual statement. + const CallEnter *CE; + do { + Node = Node->getFirstPred(); + CE = Node->getLocationAs(); + } while (!CE || CE->getCalleeContext() != CEE->getCalleeContext()); + + // Continue searching the graph. + } } else if (const CallEnter *CE = dyn_cast(&PP)) { // If we reached the CallEnter for this function, it has no statements. if (CE->getCalleeContext() == SF) break; } + if (Node->pred_empty()) + return std::pair((Stmt*)0, (CFGBlock*)0); + Node = *Node->pred_begin(); } const CFGBlock *Blk = 0; if (S) { // Now, get the enclosing basic block. - while (Node && Node->pred_size() >=1 ) { + while (Node) { const ProgramPoint &PP = Node->getLocation(); if (isa(PP) && (PP.getLocationContext()->getCurrentStackFrame() == SF)) { @@ -100,6 +115,9 @@ static std::pairpred_empty()) + return std::pair(S, (CFGBlock*)0); + Node = *Node->pred_begin(); } } @@ -107,6 +125,82 @@ static std::pair(S, Blk); } +/// Adjusts a return value when the called function's return type does not +/// match the caller's expression type. This can happen when a dynamic call +/// is devirtualized, and the overridding method has a covariant (more specific) +/// return type than the parent's method. For C++ objects, this means we need +/// to add base casts. +static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy, + StoreManager &StoreMgr) { + // For now, the only adjustments we handle apply only to locations. + if (!isa(V)) + return V; + + // If the types already match, don't do any unnecessary work. + ExpectedTy = ExpectedTy.getCanonicalType(); + ActualTy = ActualTy.getCanonicalType(); + if (ExpectedTy == ActualTy) + return V; + + // No adjustment is needed between Objective-C pointer types. + if (ExpectedTy->isObjCObjectPointerType() && + ActualTy->isObjCObjectPointerType()) + return V; + + // C++ object pointers may need "derived-to-base" casts. + const CXXRecordDecl *ExpectedClass = ExpectedTy->getPointeeCXXRecordDecl(); + const CXXRecordDecl *ActualClass = ActualTy->getPointeeCXXRecordDecl(); + if (ExpectedClass && ActualClass) { + CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, + /*DetectVirtual=*/false); + if (ActualClass->isDerivedFrom(ExpectedClass, Paths) && + !Paths.isAmbiguous(ActualTy->getCanonicalTypeUnqualified())) { + return StoreMgr.evalDerivedToBase(V, Paths.front()); + } + } + + // Unfortunately, Objective-C does not enforce that overridden methods have + // covariant return types, so we can't assert that that never happens. + // Be safe and return UnknownVal(). + return UnknownVal(); +} + +void ExprEngine::removeDeadOnEndOfFunction(NodeBuilderContext& BC, + ExplodedNode *Pred, + ExplodedNodeSet &Dst) { + NodeBuilder Bldr(Pred, Dst, BC); + + // Find the last statement in the function and the corresponding basic block. + const Stmt *LastSt = 0; + const CFGBlock *Blk = 0; + llvm::tie(LastSt, Blk) = getLastStmt(Pred); + if (!Blk || !LastSt) { + return; + } + + // If the last statement is return, everything it references should stay live. + if (isa(LastSt)) + return; + + // Here, we call the Symbol Reaper with 0 stack context telling it to clean up + // everything on the stack. We use LastStmt as a diagnostic statement, with + // which the PreStmtPurgeDead point will be associated. + currBldrCtx = &BC; + removeDead(Pred, Dst, 0, 0, LastSt, + ProgramPoint::PostStmtPurgeDeadSymbolsKind); + currBldrCtx = 0; +} + +static bool wasDifferentDeclUsedForInlining(CallEventRef<> Call, + const StackFrameContext *calleeCtx) { + const Decl *RuntimeCallee = calleeCtx->getDecl(); + const Decl *StaticDecl = Call->getDecl(); + assert(RuntimeCallee); + if (!StaticDecl) + return true; + return RuntimeCallee->getCanonicalDecl() != StaticDecl->getCanonicalDecl(); +} + /// The call exit is simulated with a sequence of nodes, which occur between /// CallExitBegin and CallExitEnd. The following operations occur between the /// two program points: @@ -133,6 +227,11 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { const CFGBlock *Blk = 0; llvm::tie(LastSt, Blk) = getLastStmt(CEBNode); + // Generate a CallEvent /before/ cleaning the state, so that we can get the + // correct value for 'this' (if necessary). + CallEventManager &CEMgr = getStateManager().getCallEventManager(); + CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); + // Step 2: generate node with bound return value: CEBNode -> BindedRetNode. // If the callee returns an expression, bind its value to CallExpr. @@ -140,6 +239,19 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { if (const ReturnStmt *RS = dyn_cast_or_null(LastSt)) { const LocationContext *LCtx = CEBNode->getLocationContext(); SVal V = state->getSVal(RS, LCtx); + + // Ensure that the return type matches the type of the returned Expr. + if (wasDifferentDeclUsedForInlining(Call, calleeCtx)) { + QualType ReturnedTy = + CallEvent::getDeclaredResultType(calleeCtx->getDecl()); + if (!ReturnedTy.isNull()) { + if (const Expr *Ex = dyn_cast(CE)) { + V = adjustReturnValue(V, Ex->getType(), ReturnedTy, + getStoreManager()); + } + } + } + state = state->BindExpr(CE, callerCtx, V); } @@ -149,23 +261,25 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { svalBuilder.getCXXThis(CCE->getConstructor()->getParent(), calleeCtx); SVal ThisV = state->getSVal(This); - // Always bind the region to the CXXConstructExpr. + // If the constructed object is a prvalue, get its bindings. + // Note that we have to be careful here because constructors embedded + // in DeclStmts are not marked as lvalues. + if (!CCE->isGLValue()) + if (const MemRegion *MR = ThisV.getAsRegion()) + if (isa(MR)) + ThisV = state->getSVal(cast(ThisV)); + state = state->BindExpr(CCE, callerCtx, ThisV); } } - // Generate a CallEvent /before/ cleaning the state, so that we can get the - // correct value for 'this' (if necessary). - CallEventManager &CEMgr = getStateManager().getCallEventManager(); - CallEventRef<> Call = CEMgr.getCaller(calleeCtx, state); - // Step 3: BindedRetNode -> CleanedNodes // If we can find a statement and a block in the inlined function, run remove // dead bindings before returning from the call. This is important to ensure // that we report the issues such as leaks in the stack contexts in which // they occurred. ExplodedNodeSet CleanedNodes; - if (LastSt && Blk) { + if (LastSt && Blk && AMgr.options.AnalysisPurgeOpt != PurgeNone) { static SimpleProgramPointTag retValBind("ExprEngine : Bind Return Value"); PostStmt Loc(LastSt, calleeCtx, &retValBind); bool isNew; @@ -175,14 +289,14 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { return; NodeBuilderContext Ctx(getCoreEngine(), Blk, BindedRetNode); - currentBuilderContext = &Ctx; + currBldrCtx = &Ctx; // Here, we call the Symbol Reaper with 0 statement and caller location // context, telling it to clean up everything in the callee's context // (and it's children). We use LastStmt as a diagnostic statement, which // which the PreStmtPurge Dead point will be associated. removeDead(BindedRetNode, CleanedNodes, 0, callerCtx, LastSt, ProgramPoint::PostStmtPurgeDeadSymbolsKind); - currentBuilderContext = 0; + currBldrCtx = 0; } else { CleanedNodes.Add(CEBNode); } @@ -204,9 +318,9 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { // result onto the work list. // CEENode -> Dst -> WorkList NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), CEENode); - SaveAndRestore NBCSave(currentBuilderContext, + SaveAndRestore NBCSave(currBldrCtx, &Ctx); - SaveAndRestore CBISave(currentStmtIdx, calleeCtx->getIndex()); + SaveAndRestore CBISave(currStmtIdx, calleeCtx->getIndex()); CallEventRef<> UpdatedCall = Call.cloneWithState(CEEState); @@ -236,14 +350,48 @@ void ExprEngine::processCallExit(ExplodedNode *CEBNode) { } } -static unsigned getNumberStackFrames(const LocationContext *LCtx) { - unsigned count = 0; +void ExprEngine::examineStackFrames(const Decl *D, const LocationContext *LCtx, + bool &IsRecursive, unsigned &StackDepth) { + IsRecursive = false; + StackDepth = 0; + while (LCtx) { - if (isa(LCtx)) - ++count; + if (const StackFrameContext *SFC = dyn_cast(LCtx)) { + const Decl *DI = SFC->getDecl(); + + // Mark recursive (and mutually recursive) functions and always count + // them when measuring the stack depth. + if (DI == D) { + IsRecursive = true; + ++StackDepth; + LCtx = LCtx->getParent(); + continue; + } + + // Do not count the small functions when determining the stack depth. + AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(DI); + const CFG *CalleeCFG = CalleeADC->getCFG(); + if (CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) + ++StackDepth; + } LCtx = LCtx->getParent(); } - return count; + +} + +static bool IsInStdNamespace(const FunctionDecl *FD) { + const DeclContext *DC = FD->getEnclosingNamespaceContext(); + const NamespaceDecl *ND = dyn_cast(DC); + if (!ND) + return false; + + while (const DeclContext *Parent = ND->getParent()) { + if (!isa(Parent)) + break; + ND = cast(Parent); + } + + return ND->getName() == "std"; } // Determine if we should inline the call. @@ -256,14 +404,18 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) { if (!CalleeCFG) return false; - if (getNumberStackFrames(Pred->getLocationContext()) - == AMgr.InlineMaxStackDepth) + bool IsRecursive = false; + unsigned StackDepth = 0; + examineStackFrames(D, Pred->getLocationContext(), IsRecursive, StackDepth); + if ((StackDepth >= AMgr.options.InlineMaxStackDepth) && + ((CalleeCFG->getNumBlockIDs() > AMgr.options.getAlwaysInlineSize()) + || IsRecursive)) return false; if (Engine.FunctionSummaries->hasReachedMaxBlockCount(D)) return false; - if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize) + if (CalleeCFG->getNumBlockIDs() > AMgr.options.InlineMaxFunctionSize) return false; // Do not inline variadic calls (for now). @@ -276,6 +428,21 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) { return false; } + if (getContext().getLangOpts().CPlusPlus) { + if (const FunctionDecl *FD = dyn_cast(D)) { + // Conditionally allow the inlining of template functions. + if (!getAnalysisManager().options.mayInlineTemplateFunctions()) + if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate) + return false; + + // Conditionally allow the inlining of C++ standard library functions. + if (!getAnalysisManager().options.mayInlineCXXStandardLibrary()) + if (getContext().getSourceManager().isInSystemHeader(FD->getLocation())) + if (IsInStdNamespace(FD)) + return false; + } + } + // It is possible that the live variables analysis cannot be // run. If so, bail out. if (!CalleeADC->getAnalysis()) @@ -284,26 +451,21 @@ bool ExprEngine::shouldInlineDecl(const Decl *D, ExplodedNode *Pred) { return true; } -/// The GDM component containing the dynamic dispatch bifurcation info. When -/// the exact type of the receiver is not known, we want to explore both paths - -/// one on which we do inline it and the other one on which we don't. This is -/// done to ensure we do not drop coverage. -/// This is the map from the receiver region to a bool, specifying either we -/// consider this region's information precise or not along the given path. -namespace clang { -namespace ento { -enum DynamicDispatchMode { DynamicDispatchModeInlined = 1, - DynamicDispatchModeConservative }; - -struct DynamicDispatchBifurcationMap {}; -typedef llvm::ImmutableMap DynamicDispatchBifur; -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index; return &index; } -}; - -}} +// The GDM component containing the dynamic dispatch bifurcation info. When +// the exact type of the receiver is not known, we want to explore both paths - +// one on which we do inline it and the other one on which we don't. This is +// done to ensure we do not drop coverage. +// This is the map from the receiver region to a bool, specifying either we +// consider this region's information precise or not along the given path. +namespace { + enum DynamicDispatchMode { + DynamicDispatchModeInlined = 1, + DynamicDispatchModeConservative + }; +} +REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap, + CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, + unsigned)) bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, NodeBuilder &Bldr, ExplodedNode *Pred, @@ -314,24 +476,19 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, const StackFrameContext *CallerSFC = CurLC->getCurrentStackFrame(); const LocationContext *ParentOfCallee = 0; + AnalyzerOptions &Opts = getAnalysisManager().options; + // FIXME: Refactor this check into a hypothetical CallEvent::canInline. switch (Call.getKind()) { case CE_Function: break; case CE_CXXMember: case CE_CXXMemberOperator: - if (!CXX_INLINING_ENABLED) + if (!Opts.mayInlineCXXMemberFunction(CIMK_MemberFunctions)) return false; break; case CE_CXXConstructor: { - if (!CXX_INLINING_ENABLED) - return false; - - // Only inline constructors and destructors if we built the CFGs for them - // properly. - const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); - if (!ADC->getCFGBuildOptions().AddImplicitDtors || - !ADC->getCFGBuildOptions().AddInitializers) + if (!Opts.mayInlineCXXMemberFunction(CIMK_Constructors)) return false; const CXXConstructorCall &Ctor = cast(Call); @@ -341,9 +498,31 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, if (Target && isa(Target)) return false; + // FIXME: This is a hack. We don't use the correct region for a new + // expression, so if we inline the constructor its result will just be + // thrown away. This short-term hack is tracked in + // and the longer-term possible fix is discussed in PR12014. + const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); + if (const Stmt *Parent = CurLC->getParentMap().getParent(CtorExpr)) + if (isa(Parent)) + return false; + + // Inlining constructors requires including initializers in the CFG. + const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); + assert(ADC->getCFGBuildOptions().AddInitializers && "No CFG initializers"); + (void)ADC; + + // If the destructor is trivial, it's always safe to inline the constructor. + if (Ctor.getDecl()->getParent()->hasTrivialDestructor()) + break; + + // For other types, only inline constructors if destructor inlining is + // also enabled. + if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) + return false; + // FIXME: This is a hack. We don't handle temporary destructors // right now, so we shouldn't inline their constructors. - const CXXConstructExpr *CtorExpr = Ctor.getOriginExpr(); if (CtorExpr->getConstructionKind() == CXXConstructExpr::CK_Complete) if (!Target || !isa(Target)) return false; @@ -351,15 +530,13 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, break; } case CE_CXXDestructor: { - if (!CXX_INLINING_ENABLED) + if (!Opts.mayInlineCXXMemberFunction(CIMK_Destructors)) return false; - // Only inline constructors and destructors if we built the CFGs for them - // properly. + // Inlining destructors requires building the CFG correctly. const AnalysisDeclContext *ADC = CallerSFC->getAnalysisDeclContext(); - if (!ADC->getCFGBuildOptions().AddImplicitDtors || - !ADC->getCFGBuildOptions().AddInitializers) - return false; + assert(ADC->getCFGBuildOptions().AddImplicitDtors && "No CFG destructors"); + (void)ADC; const CXXDestructorCall &Dtor = cast(Call); @@ -371,9 +548,6 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, break; } case CE_CXXAllocator: - if (!CXX_INLINING_ENABLED) - return false; - // Do not inline allocators until we model deallocators. // This is unfortunate, but basically necessary for smart pointers and such. return false; @@ -387,8 +561,10 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, break; } case CE_ObjCMessage: - if (!(getAnalysisManager().IPAMode == DynamicDispatch || - getAnalysisManager().IPAMode == DynamicDispatchBifurcate)) + if (!Opts.mayInlineObjCMethod()) + return false; + if (!(getAnalysisManager().options.IPAMode == DynamicDispatch || + getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate)) return false; break; } @@ -406,8 +582,8 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(D); const StackFrameContext *CalleeSFC = CalleeADC->getStackFrame(ParentOfCallee, CallE, - currentBuilderContext->getBlock(), - currentStmtIdx); + currBldrCtx->getBlock(), + currStmtIdx); CallEnter Loc(CallE, CalleeSFC, CurLC); @@ -426,6 +602,12 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D, // added onto the work list so remove it from the node builder. Bldr.takeNodes(Pred); + NumInlinedCalls++; + + // Mark the decl as visited. + if (VisitedCallees) + VisitedCallees->insert(D); + return true; } @@ -520,8 +702,8 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, // Conjure a symbol if the return value is unknown. QualType ResultTy = Call.getResultType(); SValBuilder &SVB = getSValBuilder(); - unsigned Count = currentBuilderContext->getCurrentBlockCount(); - SVal R = SVB.getConjuredSymbolVal(0, E, LCtx, ResultTy, Count); + unsigned Count = currBldrCtx->blockCount(); + SVal R = SVB.conjureSymbolVal(0, E, LCtx, ResultTy, Count); return State->BindExpr(E, LCtx, R); } @@ -529,8 +711,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call, // a conjured return value. void ExprEngine::conservativeEvalCall(const CallEvent &Call, NodeBuilder &Bldr, ExplodedNode *Pred, ProgramStateRef State) { - unsigned Count = currentBuilderContext->getCurrentBlockCount(); - State = Call.invalidateRegions(Count, State); + State = Call.invalidateRegions(currBldrCtx->blockCount(), State); State = bindReturnValue(Call, Pred->getLocationContext(), State); // And make the result node. @@ -562,13 +743,13 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred, if (D) { if (RD.mayHaveOtherDefinitions()) { // Explore with and without inlining the call. - if (getAnalysisManager().IPAMode == DynamicDispatchBifurcate) { + if (getAnalysisManager().options.IPAMode == DynamicDispatchBifurcate) { BifurcateCall(RD.getDispatchRegion(), *Call, D, Bldr, Pred); return; } // Don't inline if we're not in any dynamic dispatch mode. - if (getAnalysisManager().IPAMode != DynamicDispatch) { + if (getAnalysisManager().options.IPAMode != DynamicDispatch) { conservativeEvalCall(*Call, Bldr, Pred, State); return; } @@ -593,7 +774,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg, // Check if we've performed the split already - note, we only want // to split the path once per memory region. ProgramStateRef State = Pred->getState(); - const unsigned int *BState = + const unsigned *BState = State->get(BifurReg); if (BState) { // If we are on "inline path", keep inlining if possible. @@ -630,7 +811,7 @@ void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred, ExplodedNodeSet dstPreVisit; getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this); - StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext); + StmtNodeBuilder B(dstPreVisit, Dst, *currBldrCtx); if (RS->getRetValue()) { for (ExplodedNodeSet::iterator it = dstPreVisit.begin(), diff --git a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp index e3bc498..51dda19b 100644 --- a/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp +++ b/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp @@ -28,7 +28,7 @@ void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex, SVal location = state->getLValue(Ex->getDecl(), baseVal); ExplodedNodeSet dstIvar; - StmtNodeBuilder Bldr(Pred, dstIvar, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, dstIvar, *currBldrCtx); Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location)); // Perform the post-condition check of the ObjCIvarRefExpr and store @@ -88,7 +88,7 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S, evalLocation(dstLocation, S, elem, Pred, state, elementV, NULL, false); ExplodedNodeSet Tmp; - StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext); + StmtNodeBuilder Bldr(Pred, Tmp, *currBldrCtx); for (ExplodedNodeSet::iterator NI = dstLocation.begin(), NE = dstLocation.end(); NI!=NE; ++NI) { @@ -112,8 +112,8 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S, // For now, just 'conjure' up a symbolic value. QualType T = R->getValueType(); assert(Loc::isLocType(T)); - unsigned Count = currentBuilderContext->getCurrentBlockCount(); - SymbolRef Sym = SymMgr.getConjuredSymbol(elem, LCtx, T, Count); + SymbolRef Sym = SymMgr.conjureSymbol(elem, LCtx, T, + currBldrCtx->blockCount()); SVal V = svalBuilder.makeLoc(Sym); hasElems = hasElems->bindLoc(elementV, V); @@ -132,14 +132,6 @@ void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S, getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this); } -static bool isSubclass(const ObjCInterfaceDecl *Class, IdentifierInfo *II) { - if (!Class) - return false; - if (Class->getIdentifier() == II) - return true; - return isSubclass(Class->getSuperClass(), II); -} - void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME, ExplodedNode *Pred, ExplodedNodeSet &Dst) { @@ -157,7 +149,7 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME, // Proceed with evaluate the message expression. ExplodedNodeSet dstEval; - StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currentBuilderContext); + StmtNodeBuilder Bldr(dstGenericPrevisit, dstEval, *currBldrCtx); for (ExplodedNodeSet::iterator DI = dstGenericPrevisit.begin(), DE = dstGenericPrevisit.end(); DI != DE; ++DI) { @@ -184,68 +176,30 @@ void ExprEngine::VisitObjCMessage(const ObjCMessageExpr *ME, // Check if the "raise" message was sent. assert(notNilState); - if (Msg->getSelector() == RaiseSel) { + if (ObjCNoRet.isImplicitNoReturn(ME)) { // If we raise an exception, for now treat it as a sink. // Eventually we will want to handle exceptions properly. - Bldr.generateNode(currentStmt, Pred, State, true); + Bldr.generateSink(currStmt, Pred, State); continue; } // Generate a transition to non-Nil state. - if (notNilState != State) - Pred = Bldr.generateNode(currentStmt, Pred, notNilState); + if (notNilState != State) { + Pred = Bldr.generateNode(currStmt, Pred, notNilState); + assert(Pred && "Should have cached out already!"); + } } } else { - // Check for special class methods. - if (const ObjCInterfaceDecl *Iface = Msg->getReceiverInterface()) { - if (!NSExceptionII) { - ASTContext &Ctx = getContext(); - NSExceptionII = &Ctx.Idents.get("NSException"); - } - - if (isSubclass(Iface, NSExceptionII)) { - enum { NUM_RAISE_SELECTORS = 2 }; - - // Lazily create a cache of the selectors. - if (!NSExceptionInstanceRaiseSelectors) { - ASTContext &Ctx = getContext(); - NSExceptionInstanceRaiseSelectors = - new Selector[NUM_RAISE_SELECTORS]; - SmallVector II; - unsigned idx = 0; - - // raise:format: - II.push_back(&Ctx.Idents.get("raise")); - II.push_back(&Ctx.Idents.get("format")); - NSExceptionInstanceRaiseSelectors[idx++] = - Ctx.Selectors.getSelector(II.size(), &II[0]); - - // raise:format:arguments: - II.push_back(&Ctx.Idents.get("arguments")); - NSExceptionInstanceRaiseSelectors[idx++] = - Ctx.Selectors.getSelector(II.size(), &II[0]); - } - - Selector S = Msg->getSelector(); - bool RaisesException = false; - for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i) { - if (S == NSExceptionInstanceRaiseSelectors[i]) { - RaisesException = true; - break; - } - } - if (RaisesException) { - // If we raise an exception, for now treat it as a sink. - // Eventually we will want to handle exceptions properly. - Bldr.generateNode(currentStmt, Pred, Pred->getState(), true); - continue; - } - - } + // Check for special class methods that are known to not return + // and that we should treat as a sink. + if (ObjCNoRet.isImplicitNoReturn(ME)) { + // If we raise an exception, for now treat it as a sink. + // Eventually we will want to handle exceptions properly. + Bldr.generateSink(currStmt, Pred, Pred->getState()); + continue; } } - // Evaluate the call. defaultEvalCall(Bldr, Pred, *UpdatedMsg); } diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp index 982bcbf..fd875f6 100644 --- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp +++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp @@ -17,8 +17,8 @@ #include "clang/AST/Decl.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/FileManager.h" -#include "clang/Rewrite/Rewriter.h" -#include "clang/Rewrite/HTMLRewrite.h" +#include "clang/Rewrite/Core/Rewriter.h" +#include "clang/Rewrite/Core/HTMLRewrite.h" #include "clang/Lex/Lexer.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Support/FileSystem.h" @@ -189,7 +189,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, << (*path.rbegin())->getLocation().asLocation().getExpansionColumnNumber() << "\n" "Description:" - << D.getDescription() << "\n"; + << D.getVerboseDescription() << "\n"; // Output any other meta data. @@ -209,15 +209,15 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, std::string s; llvm::raw_string_ostream os(s); - const std::string& BugDesc = D.getDescription(); + StringRef BugDesc = D.getVerboseDescription(); if (!BugDesc.empty()) os << "\n\n"; - const std::string& BugType = D.getBugType(); + StringRef BugType = D.getBugType(); if (!BugType.empty()) os << "\n\n"; - const std::string& BugCategory = D.getCategory(); + StringRef BugCategory = D.getCategory(); if (!BugCategory.empty()) os << "\n\n"; @@ -267,8 +267,7 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D, } if (filesMade) { - filesMade->push_back(std::make_pair(StringRef(getName()), - llvm::sys::path::filename(H.str()))); + filesMade->addDiagnostic(D, getName(), llvm::sys::path::filename(H.str())); } // Emit the HTML to disk. diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp index 62e602a..fab10cf 100644 --- a/lib/StaticAnalyzer/Core/MemRegion.cpp +++ b/lib/StaticAnalyzer/Core/MemRegion.cpp @@ -352,7 +352,7 @@ void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const { } void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, - const FunctionDecl *FD, + const NamedDecl *FD, const MemRegion*) { ID.AddInteger(MemRegion::FunctionTextRegionKind); ID.AddPointer(FD); @@ -444,7 +444,7 @@ void MemRegion::dumpToStream(raw_ostream &os) const { } void AllocaRegion::dumpToStream(raw_ostream &os) const { - os << "alloca{" << (void*) Ex << ',' << Cnt << '}'; + os << "alloca{" << (const void*) Ex << ',' << Cnt << '}'; } void FunctionTextRegion::dumpToStream(raw_ostream &os) const { @@ -452,7 +452,7 @@ void FunctionTextRegion::dumpToStream(raw_ostream &os) const { } void BlockTextRegion::dumpToStream(raw_ostream &os) const { - os << "block_code{" << (void*) this << '}'; + os << "block_code{" << (const void*) this << '}'; } void BlockDataRegion::dumpToStream(raw_ostream &os) const { @@ -461,12 +461,12 @@ void BlockDataRegion::dumpToStream(raw_ostream &os) const { void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const { // FIXME: More elaborate pretty-printing. - os << "{ " << (void*) CL << " }"; + os << "{ " << (const void*) CL << " }"; } void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const { os << "temp_object{" << getValueType().getAsString() << ',' - << (void*) Ex << '}'; + << (const void*) Ex << '}'; } void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const { @@ -748,11 +748,11 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D, } else { assert(D->isStaticLocal()); - const Decl *D = STC->getDecl(); - if (const FunctionDecl *FD = dyn_cast(D)) + const Decl *STCD = STC->getDecl(); + if (isa(STCD) || isa(STCD)) sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind, - getFunctionTextRegion(FD)); - else if (const BlockDecl *BD = dyn_cast(D)) { + getFunctionTextRegion(cast(STCD))); + else if (const BlockDecl *BD = dyn_cast(STCD)) { const BlockTextRegion *BTR = getBlockTextRegion(BD, C.getCanonicalType(BD->getSignatureAsWritten()->getType()), @@ -761,8 +761,6 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D, BTR); } else { - // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now - // just use the main global memspace. sReg = getGlobalsRegion(); } } @@ -845,7 +843,7 @@ MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx, } const FunctionTextRegion * -MemRegionManager::getFunctionTextRegion(const FunctionDecl *FD) { +MemRegionManager::getFunctionTextRegion(const NamedDecl *FD) { return getSubRegion(FD, getCodeRegion()); } @@ -990,6 +988,10 @@ const MemRegion *MemRegion::getBaseRegion() const { return R; } +bool MemRegion::isSubRegionOf(const MemRegion *R) const { + return false; +} + //===----------------------------------------------------------------------===// // View handling. //===----------------------------------------------------------------------===// @@ -1107,7 +1109,7 @@ RegionOffset MemRegion::getAsOffset() const { // If our base region is symbolic, we don't know what type it really is. // Pretend the type of the symbol is the true dynamic type. // (This will at least be self-consistent for the life of the symbol.) - Ty = SR->getSymbol()->getType(getContext())->getPointeeType(); + Ty = SR->getSymbol()->getType()->getPointeeType(); } const CXXRecordDecl *Child = Ty->getAsCXXRecordDecl(); @@ -1166,8 +1168,12 @@ RegionOffset MemRegion::getAsOffset() const { R = FR->getSuperRegion(); const RecordDecl *RD = FR->getDecl()->getParent(); - if (!RD->isCompleteDefinition()) { + if (RD->isUnion() || !RD->isCompleteDefinition()) { // We cannot compute offset for incomplete type. + // For unions, we could treat everything as offset 0, but we'd rather + // treat each field as a symbolic offset so they aren't stored on top + // of each other, since we depend on things in typed regions actually + // matching their types. SymbolicOffsetBase = R; } diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp index c849778..0f48d1e 100644 --- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp +++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp @@ -21,6 +21,7 @@ #include "clang/AST/ParentMap.h" #include "clang/AST/StmtCXX.h" #include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" using namespace clang; using namespace ento; @@ -104,11 +105,12 @@ void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current, PathDiagnostic::~PathDiagnostic() {} PathDiagnostic::PathDiagnostic(const Decl *declWithIssue, - StringRef bugtype, StringRef desc, - StringRef category) + StringRef bugtype, StringRef verboseDesc, + StringRef shortDesc, StringRef category) : DeclWithIssue(declWithIssue), BugType(StripTrailingDots(bugtype)), - Desc(StripTrailingDots(desc)), + VerboseDesc(StripTrailingDots(verboseDesc)), + ShortDesc(StripTrailingDots(shortDesc)), Category(StripTrailingDots(category)), path(pathImpl) {} @@ -198,6 +200,7 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) { if (orig_size <= new_size) return; + assert(orig != D); Diags.RemoveNode(orig); delete orig; } @@ -205,39 +208,151 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) { Diags.InsertNode(OwningD.take()); } +static llvm::Optional comparePath(const PathPieces &X, + const PathPieces &Y); +static llvm::Optional +compareControlFlow(const PathDiagnosticControlFlowPiece &X, + const PathDiagnosticControlFlowPiece &Y) { + FullSourceLoc XSL = X.getStartLocation().asLocation(); + FullSourceLoc YSL = Y.getStartLocation().asLocation(); + if (XSL != YSL) + return XSL.isBeforeInTranslationUnitThan(YSL); + FullSourceLoc XEL = X.getEndLocation().asLocation(); + FullSourceLoc YEL = Y.getEndLocation().asLocation(); + if (XEL != YEL) + return XEL.isBeforeInTranslationUnitThan(YEL); + return llvm::Optional(); +} + +static llvm::Optional +compareMacro(const PathDiagnosticMacroPiece &X, + const PathDiagnosticMacroPiece &Y) { + return comparePath(X.subPieces, Y.subPieces); +} + +static llvm::Optional +compareCall(const PathDiagnosticCallPiece &X, + const PathDiagnosticCallPiece &Y) { + FullSourceLoc X_CEL = X.callEnter.asLocation(); + FullSourceLoc Y_CEL = Y.callEnter.asLocation(); + if (X_CEL != Y_CEL) + return X_CEL.isBeforeInTranslationUnitThan(Y_CEL); + FullSourceLoc X_CEWL = X.callEnterWithin.asLocation(); + FullSourceLoc Y_CEWL = Y.callEnterWithin.asLocation(); + if (X_CEWL != Y_CEWL) + return X_CEWL.isBeforeInTranslationUnitThan(Y_CEWL); + FullSourceLoc X_CRL = X.callReturn.asLocation(); + FullSourceLoc Y_CRL = Y.callReturn.asLocation(); + if (X_CRL != Y_CRL) + return X_CRL.isBeforeInTranslationUnitThan(Y_CRL); + return comparePath(X.path, Y.path); +} + +static llvm::Optional comparePiece(const PathDiagnosticPiece &X, + const PathDiagnosticPiece &Y) { + if (X.getKind() != Y.getKind()) + return X.getKind() < Y.getKind(); + + FullSourceLoc XL = X.getLocation().asLocation(); + FullSourceLoc YL = Y.getLocation().asLocation(); + if (XL != YL) + return XL.isBeforeInTranslationUnitThan(YL); + + if (X.getString() != Y.getString()) + return X.getString() < Y.getString(); + + if (X.getRanges().size() != Y.getRanges().size()) + return X.getRanges().size() < Y.getRanges().size(); + + const SourceManager &SM = XL.getManager(); + + for (unsigned i = 0, n = X.getRanges().size(); i < n; ++i) { + SourceRange XR = X.getRanges()[i]; + SourceRange YR = Y.getRanges()[i]; + if (XR != YR) { + if (XR.getBegin() != YR.getBegin()) + return SM.isBeforeInTranslationUnit(XR.getBegin(), YR.getBegin()); + return SM.isBeforeInTranslationUnit(XR.getEnd(), YR.getEnd()); + } + } + + switch (X.getKind()) { + case clang::ento::PathDiagnosticPiece::ControlFlow: + return compareControlFlow(cast(X), + cast(Y)); + case clang::ento::PathDiagnosticPiece::Event: + return llvm::Optional(); + case clang::ento::PathDiagnosticPiece::Macro: + return compareMacro(cast(X), + cast(Y)); + case clang::ento::PathDiagnosticPiece::Call: + return compareCall(cast(X), + cast(Y)); + } + llvm_unreachable("all cases handled"); +} + +static llvm::Optional comparePath(const PathPieces &X, + const PathPieces &Y) { + if (X.size() != Y.size()) + return X.size() < Y.size(); + for (unsigned i = 0, n = X.size(); i != n; ++i) { + llvm::Optional b = comparePiece(*X[i], *Y[i]); + if (b.hasValue()) + return b.getValue(); + } + return llvm::Optional(); +} + +static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) { + FullSourceLoc XL = X.getLocation().asLocation(); + FullSourceLoc YL = Y.getLocation().asLocation(); + if (XL != YL) + return XL.isBeforeInTranslationUnitThan(YL); + if (X.getBugType() != Y.getBugType()) + return X.getBugType() < Y.getBugType(); + if (X.getCategory() != Y.getCategory()) + return X.getCategory() < Y.getCategory(); + if (X.getVerboseDescription() != Y.getVerboseDescription()) + return X.getVerboseDescription() < Y.getVerboseDescription(); + if (X.getShortDescription() != Y.getShortDescription()) + return X.getShortDescription() < Y.getShortDescription(); + if (X.getDeclWithIssue() != Y.getDeclWithIssue()) { + const Decl *XD = X.getDeclWithIssue(); + if (!XD) + return true; + const Decl *YD = Y.getDeclWithIssue(); + if (!YD) + return false; + SourceLocation XDL = XD->getLocation(); + SourceLocation YDL = YD->getLocation(); + if (XDL != YDL) { + const SourceManager &SM = XL.getManager(); + return SM.isBeforeInTranslationUnit(XDL, YDL); + } + } + PathDiagnostic::meta_iterator XI = X.meta_begin(), XE = X.meta_end(); + PathDiagnostic::meta_iterator YI = Y.meta_begin(), YE = Y.meta_end(); + if (XE - XI != YE - YI) + return (XE - XI) < (YE - YI); + for ( ; XI != XE ; ++XI, ++YI) { + if (*XI != *YI) + return (*XI) < (*YI); + } + llvm::Optional b = comparePath(X.path, Y.path); + assert(b.hasValue()); + return b.getValue(); +} namespace { struct CompareDiagnostics { // Compare if 'X' is "<" than 'Y'. bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const { - // First compare by location - const FullSourceLoc &XLoc = X->getLocation().asLocation(); - const FullSourceLoc &YLoc = Y->getLocation().asLocation(); - if (XLoc < YLoc) - return true; - if (XLoc != YLoc) + if (X == Y) return false; - - // Next, compare by bug type. - StringRef XBugType = X->getBugType(); - StringRef YBugType = Y->getBugType(); - if (XBugType < YBugType) - return true; - if (XBugType != YBugType) - return false; - - // Next, compare by bug description. - StringRef XDesc = X->getDescription(); - StringRef YDesc = Y->getDescription(); - if (XDesc < YDesc) - return true; - if (XDesc != YDesc) - return false; - - // FIXME: Further refine by comparing PathDiagnosticPieces? - return false; - } -}; + return compare(*X, *Y); + } +}; } void PathDiagnosticConsumer::FlushDiagnostics( @@ -250,11 +365,9 @@ void PathDiagnosticConsumer::FlushDiagnostics( std::vector BatchDiags; for (llvm::FoldingSet::iterator it = Diags.begin(), et = Diags.end(); it != et; ++it) { - BatchDiags.push_back(&*it); + const PathDiagnostic *D = &*it; + BatchDiags.push_back(D); } - - // Clear out the FoldingSet. - Diags.clear(); // Sort the diagnostics so that they are always emitted in a deterministic // order. @@ -269,6 +382,42 @@ void PathDiagnosticConsumer::FlushDiagnostics( const PathDiagnostic *D = *it; delete D; } + + // Clear out the FoldingSet. + Diags.clear(); +} + +void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD, + StringRef ConsumerName, + StringRef FileName) { + llvm::FoldingSetNodeID NodeID; + NodeID.Add(PD); + void *InsertPos; + PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos); + if (!Entry) { + Entry = Alloc.Allocate(); + Entry = new (Entry) PDFileEntry(NodeID); + InsertNode(Entry, InsertPos); + } + + // Allocate persistent storage for the file name. + char *FileName_cstr = (char*) Alloc.Allocate(FileName.size(), 1); + memcpy(FileName_cstr, FileName.data(), FileName.size()); + + Entry->files.push_back(std::make_pair(ConsumerName, + StringRef(FileName_cstr, + FileName.size()))); +} + +PathDiagnosticConsumer::PDFileEntry::ConsumerFiles * +PathDiagnosticConsumer::FilesMade::getFiles(const PathDiagnostic &PD) { + llvm::FoldingSetNodeID NodeID; + NodeID.Add(PD); + void *InsertPos; + PDFileEntry *Entry = FindNodeOrInsertPos(NodeID, InsertPos); + if (!Entry) + return 0; + return &Entry->files; } //===----------------------------------------------------------------------===// @@ -437,8 +586,8 @@ PathDiagnosticLocation const CFGBlock *BSrc = BE->getSrc(); S = BSrc->getTerminatorCondition(); } - else if (const PostStmt *PS = dyn_cast(&P)) { - S = PS->getStmt(); + else if (const StmtPoint *SP = dyn_cast(&P)) { + S = SP->getStmt(); } else if (const PostImplicitCall *PIE = dyn_cast(&P)) { return PathDiagnosticLocation(PIE->getLocation(), SMng); @@ -453,6 +602,9 @@ PathDiagnosticLocation CEE->getLocationContext(), SMng); } + else { + llvm_unreachable("Unexpected ProgramPoint"); + } return PathDiagnosticLocation(S, SMng, P.getLocationContext()); } @@ -463,21 +615,26 @@ PathDiagnosticLocation assert(N && "Cannot create a location with a null node."); const ExplodedNode *NI = N; + const Stmt *S = 0; while (NI) { ProgramPoint P = NI->getLocation(); - const LocationContext *LC = P.getLocationContext(); if (const StmtPoint *PS = dyn_cast(&P)) - return PathDiagnosticLocation(PS->getStmt(), SM, LC); - else if (const BlockEdge *BE = dyn_cast(&P)) { - const Stmt *Term = BE->getSrc()->getTerminator(); - if (Term) { - return PathDiagnosticLocation(Term, SM, LC); - } - } + S = PS->getStmt(); + else if (const BlockEdge *BE = dyn_cast(&P)) + S = BE->getSrc()->getTerminator(); + if (S) + break; NI = NI->succ_empty() ? 0 : *(NI->succ_begin()); } + if (S) { + const LocationContext *LC = NI->getLocationContext(); + if (S->getLocStart().isValid()) + return PathDiagnosticLocation(S, SM, LC); + return PathDiagnosticLocation(getValidSourceLocation(S, LC), SM); + } + return createDeclEnd(N->getLocationContext(), SM); } @@ -587,24 +744,6 @@ void PathDiagnosticLocation::flatten() { } } -PathDiagnosticLocation PathDiagnostic::getLocation() const { - assert(path.size() > 0 && - "getLocation() requires a non-empty PathDiagnostic."); - - PathDiagnosticPiece *p = path.rbegin()->getPtr(); - - while (true) { - if (PathDiagnosticCallPiece *cp = dyn_cast(p)) { - assert(!cp->path.empty()); - p = cp->path.rbegin()->getPtr(); - continue; - } - break; - } - - return p->getLocation(); -} - //===----------------------------------------------------------------------===// // Manipulation of PathDiagnosticCallPieces. //===----------------------------------------------------------------------===// @@ -753,10 +892,9 @@ void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const { } void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const { - if (!path.empty()) - getLocation().Profile(ID); + ID.Add(getLocation()); ID.AddString(BugType); - ID.AddString(Desc); + ID.AddString(VerboseDesc); ID.AddString(Category); } @@ -818,42 +956,16 @@ std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){ return getMessageForSymbolNotFound(); } -/// TODO: This is copied from clang diagnostics. Maybe we could just move it to -/// some common place. (Same as HandleOrdinalModifier.) -void StackHintGeneratorForSymbol::printOrdinal(unsigned ValNo, - llvm::raw_svector_ostream &Out) { - assert(ValNo != 0 && "ValNo must be strictly positive!"); - - // We could use text forms for the first N ordinals, but the numeric - // forms are actually nicer in diagnostics because they stand out. - Out << ValNo; - - // It is critically important that we do this perfectly for - // user-written sequences with over 100 elements. - switch (ValNo % 100) { - case 11: - case 12: - case 13: - Out << "th"; return; - default: - switch (ValNo % 10) { - case 1: Out << "st"; return; - case 2: Out << "nd"; return; - case 3: Out << "rd"; return; - default: Out << "th"; return; - } - } -} - std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE, - unsigned ArgIndex) { + unsigned ArgIndex) { + // Printed parameters start at 1, not 0. + ++ArgIndex; + SmallString<200> buf; llvm::raw_svector_ostream os(buf); - os << Msg << " via "; - // Printed parameters start at 1, not 0. - printOrdinal(++ArgIndex, os); - os << " parameter"; + os << Msg << " via " << ArgIndex << llvm::getOrdinalSuffix(ArgIndex) + << " parameter"; return os.str(); } diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp index d5fdd9d..17ef4cf 100644 --- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp +++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp @@ -13,8 +13,9 @@ #include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h" #include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h" -#include "clang/Basic/SourceManager.h" #include "clang/Basic/FileManager.h" +#include "clang/Basic/SourceManager.h" +#include "clang/Basic/Version.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/Casting.h" @@ -47,7 +48,6 @@ namespace { PathGenerationScheme getGenerationScheme() const { return Extensive; } bool supportsLogicalOpControlFlow() const { return true; } bool supportsAllBlockEdges() const { return true; } - virtual bool useVerboseDescription() const { return false; } virtual bool supportsCrossFileDiagnostics() const { return SupportsCrossFileDiagnostics; } @@ -247,6 +247,7 @@ static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P, // Output the short text. // FIXME: Really use a short string. Indent(o, indent) << "message\n"; + Indent(o, indent); EmitString(o, P.getString()) << '\n'; // Finish up. @@ -409,10 +410,13 @@ void PlistDiagnostics::FlushDiagnosticsImpl( "\n"; // Write the root object: a containing... + // - "clang_version", the string representation of clang version // - "files", an mapping from FIDs to file names // - "diagnostics", an containing the path diagnostics - o << "\n" - " files\n" + o << "\n" << + " clang_version\n"; + EmitString(o, getClangFullVersion()) << '\n'; + o << " files\n" " \n"; for (SmallVectorImpl::iterator I=Fids.begin(), E=Fids.end(); @@ -443,7 +447,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl( // Output the bug type and bug category. o << " description"; - EmitString(o, D->getDescription()) << '\n'; + EmitString(o, D->getShortDescription()) << '\n'; o << " category"; EmitString(o, D->getCategory()) << '\n'; o << " type"; @@ -499,19 +503,23 @@ void PlistDiagnostics::FlushDiagnosticsImpl( // Output the diagnostic to the sub-diagnostic client, if any. if (!filesMade->empty()) { StringRef lastName; - for (FilesMade::iterator I = filesMade->begin(), E = filesMade->end(); - I != E; ++I) { - StringRef newName = I->first; - if (newName != lastName) { - if (!lastName.empty()) - o << " \n"; - lastName = newName; - o << " " << lastName << "_files\n"; - o << " \n"; + PDFileEntry::ConsumerFiles *files = filesMade->getFiles(*D); + if (files) { + for (PDFileEntry::ConsumerFiles::const_iterator CI = files->begin(), + CE = files->end(); CI != CE; ++CI) { + StringRef newName = CI->first; + if (newName != lastName) { + if (!lastName.empty()) { + o << " \n"; + } + lastName = newName; + o << " " << lastName << "_files\n"; + o << " \n"; + } + o << " " << CI->second << "\n"; } - o << " " << I->second << "\n"; + o << " \n"; } - o << " \n"; } // Close up the entry. @@ -521,10 +529,5 @@ void PlistDiagnostics::FlushDiagnosticsImpl( o << " \n"; // Finish. - o << "\n"; - - if (filesMade) { - StringRef Name(getName()); - filesMade->push_back(std::make_pair(Name, OutputFile)); - } + o << "\n"; } diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp index 2000338..b49a11e 100644 --- a/lib/StaticAnalyzer/Core/ProgramState.cpp +++ b/lib/StaticAnalyzer/Core/ProgramState.cpp @@ -22,10 +22,6 @@ using namespace clang; using namespace ento; -// Give the vtable for ConstraintManager somewhere to live. -// FIXME: Move this elsewhere. -ConstraintManager::~ConstraintManager() {} - namespace clang { namespace ento { /// Increments the number of times this state is referenced. @@ -75,8 +71,8 @@ ProgramStateManager::ProgramStateManager(ASTContext &Ctx, StoreManagerCreator CreateSMgr, ConstraintManagerCreator CreateCMgr, llvm::BumpPtrAllocator &alloc, - SubEngine &SubEng) - : Eng(&SubEng), EnvMgr(alloc), GDMFactory(alloc), + SubEngine *SubEng) + : Eng(SubEng), EnvMgr(alloc), GDMFactory(alloc), svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)), CallEventMgr(new CallEventManager(alloc)), Alloc(alloc) { StoreMgr.reset((*CreateSMgr)(*this)); @@ -110,47 +106,25 @@ ProgramStateManager::removeDeadBindings(ProgramStateRef state, SymReaper); NewState.setStore(newStore); SymReaper.setReapedStore(newStore); - - return getPersistentState(NewState); -} - -ProgramStateRef ProgramStateManager::MarshalState(ProgramStateRef state, - const StackFrameContext *InitLoc) { - // make up an empty state for now. - ProgramState State(this, - EnvMgr.getInitialEnvironment(), - StoreMgr->getInitialStore(InitLoc), - GDMFactory.getEmptyMap()); - return getPersistentState(State); + ProgramStateRef Result = getPersistentState(NewState); + return ConstraintMgr->removeDeadBindings(Result, SymReaper); } ProgramStateRef ProgramState::bindCompoundLiteral(const CompoundLiteralExpr *CL, const LocationContext *LC, SVal V) const { const StoreRef &newStore = - getStateManager().StoreMgr->BindCompoundLiteral(getStore(), CL, LC, V); + getStateManager().StoreMgr->bindCompoundLiteral(getStore(), CL, LC, V); return makeWithStore(newStore); } -ProgramStateRef ProgramState::bindDecl(const VarRegion* VR, SVal IVal) const { - const StoreRef &newStore = - getStateManager().StoreMgr->BindDecl(getStore(), VR, IVal); - return makeWithStore(newStore); -} - -ProgramStateRef ProgramState::bindDeclWithNoInit(const VarRegion* VR) const { - const StoreRef &newStore = - getStateManager().StoreMgr->BindDeclWithNoInit(getStore(), VR); - return makeWithStore(newStore); -} - -ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V) const { +ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V, bool notifyChanges) const { ProgramStateManager &Mgr = getStateManager(); ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(), LV, V)); const MemRegion *MR = LV.getAsRegion(); - if (MR && Mgr.getOwningEngine()) + if (MR && Mgr.getOwningEngine() && notifyChanges) return Mgr.getOwningEngine()->processRegionChange(newState, MR); return newState; @@ -204,11 +178,12 @@ ProgramState::invalidateRegionsImpl(ArrayRef Regions, return makeWithStore(newStore); } -ProgramStateRef ProgramState::unbindLoc(Loc LV) const { +ProgramStateRef ProgramState::killBinding(Loc LV) const { assert(!isa(LV) && "Use invalidateRegion instead."); Store OldStore = getStore(); - const StoreRef &newStore = getStateManager().StoreMgr->Remove(OldStore, LV); + const StoreRef &newStore = + getStateManager().StoreMgr->killBinding(OldStore, LV); if (newStore.getStore() == OldStore) return this; @@ -249,7 +224,9 @@ SVal ProgramState::getSVal(Loc location, QualType T) const { // about). if (!T.isNull()) { if (SymbolRef sym = V.getAsSymbol()) { - if (const llvm::APSInt *Int = getSymVal(sym)) { + if (const llvm::APSInt *Int = getStateManager() + .getConstraintManager() + .getSymVal(this, sym)) { // FIXME: Because we don't correctly model (yet) sign-extension // and truncation of symbolic values, we need to convert // the integer value to the correct signedness and bitwidth. @@ -710,7 +687,9 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const { bool Tainted = false; for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end(); SI != SE; ++SI) { - assert(isa(*SI)); + if (!isa(*SI)) + continue; + const TaintTagType *Tag = get(*SI); Tainted = (Tag && *Tag == Kind); @@ -734,15 +713,10 @@ bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const { } /// The GDM component containing the dynamic type info. This is a map from a -/// symbol to it's most likely type. -namespace clang { -namespace ento { -typedef llvm::ImmutableMap DynamicTypeMap; -template<> struct ProgramStateTrait - : public ProgramStatePartialTrait { - static void *GDMIndex() { static int index; return &index; } -}; -}} +/// symbol to its most likely type. +REGISTER_TRAIT_WITH_PROGRAMSTATE(DynamicTypeMap, + CLANG_ENTO_PROGRAMSTATE_MAP(const MemRegion *, + DynamicTypeInfo)) DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const { Reg = Reg->StripCasts(); @@ -758,7 +732,7 @@ DynamicTypeInfo ProgramState::getDynamicTypeInfo(const MemRegion *Reg) const { if (const SymbolicRegion *SR = dyn_cast(Reg)) { SymbolRef Sym = SR->getSymbol(); - return DynamicTypeInfo(Sym->getType(getStateManager().getContext())); + return DynamicTypeInfo(Sym->getType()); } return DynamicTypeInfo(); diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp index 550404a..411094b 100644 --- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp +++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp @@ -24,9 +24,6 @@ using namespace clang; using namespace ento; -namespace { class ConstraintRange {}; } -static int ConstraintRangeIndex = 0; - /// A Range represents the closed range [from, to]. The caller must /// guarantee that from <= to. Note that Range is immutable, so as not /// to subvert RangeSet's immutability. @@ -280,23 +277,15 @@ public: }; } // end anonymous namespace -typedef llvm::ImmutableMap ConstraintRangeTy; - -namespace clang { -namespace ento { -template<> -struct ProgramStateTrait - : public ProgramStatePartialTrait { - static inline void *GDMIndex() { return &ConstraintRangeIndex; } -}; -} -} +REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintRange, + CLANG_ENTO_PROGRAMSTATE_MAP(SymbolRef, + RangeSet)) namespace { class RangeConstraintManager : public SimpleConstraintManager{ RangeSet GetRange(ProgramStateRef state, SymbolRef sym); public: - RangeConstraintManager(SubEngine &subengine, BasicValueFactory &BVF) + RangeConstraintManager(SubEngine *subengine, BasicValueFactory &BVF) : SimpleConstraintManager(subengine, BVF) {} ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym, @@ -324,12 +313,7 @@ public: const llvm::APSInt& Adjustment); const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) const; - - // FIXME: Refactor into SimpleConstraintManager? - bool isEqual(ProgramStateRef St, SymbolRef sym, const llvm::APSInt& V) const { - const llvm::APSInt *i = getSymVal(St, sym); - return i ? *i == V : false; - } + ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym); ProgramStateRef removeDeadBindings(ProgramStateRef St, SymbolReaper& SymReaper); @@ -343,7 +327,7 @@ private: } // end anonymous namespace ConstraintManager * -ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine &Eng) { +ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) { return new RangeConstraintManager(Eng, StMgr.getBasicVals()); } @@ -353,6 +337,30 @@ const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St, return T ? T->getConcreteValue() : NULL; } +ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State, + SymbolRef Sym) { + const RangeSet *Ranges = State->get(Sym); + + // If we don't have any information about this symbol, it's underconstrained. + if (!Ranges) + return ConditionTruthVal(); + + // If we have a concrete value, see if it's zero. + if (const llvm::APSInt *Value = Ranges->getConcreteValue()) + return *Value == 0; + + BasicValueFactory &BV = getBasicVals(); + APSIntType IntType = BV.getAPSIntType(Sym->getType()); + llvm::APSInt Zero = IntType.getZeroValue(); + + // Check if zero is in the set of possible values. + if (Ranges->Intersect(BV, F, Zero, Zero).isEmpty()) + return false; + + // Zero is a possible value, but it is not the /only/ possible value. + return ConditionTruthVal(); +} + /// Scan all symbols referenced by the constraints. If the symbol is not alive /// as marked in LSymbols, mark it as dead in DSymbols. ProgramStateRef @@ -379,8 +387,18 @@ RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) { // Lazily generate a new RangeSet representing all possible values for the // given symbol type. BasicValueFactory &BV = getBasicVals(); - QualType T = sym->getType(BV.getContext()); - return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T)); + QualType T = sym->getType(); + + RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T)); + + // Special case: references are known to be non-zero. + if (T->isReferenceType()) { + APSIntType IntType = BV.getAPSIntType(T); + Result = Result.Intersect(BV, F, ++IntType.getZeroValue(), + --IntType.getZeroValue()); + } + + return Result; } //===------------------------------------------------------------------------=== diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp index bc4e4bb..aed994d 100644 --- a/lib/StaticAnalyzer/Core/RegionStore.cpp +++ b/lib/StaticAnalyzer/Core/RegionStore.cpp @@ -15,9 +15,6 @@ // //===----------------------------------------------------------------------===// #include "clang/AST/CharUnits.h" -#include "clang/AST/DeclCXX.h" -#include "clang/AST/ExprCXX.h" -#include "clang/AST/CXXInheritance.h" #include "clang/Analysis/Analyses/LiveVariables.h" #include "clang/Analysis/AnalysisContext.h" #include "clang/Basic/TargetInfo.h" @@ -193,19 +190,6 @@ public: /// casts from arrays to pointers. SVal ArrayToPointer(Loc Array); - /// For DerivedToBase casts, create a CXXBaseObjectRegion and return it. - virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType); - - /// \brief Evaluates C++ dynamic_cast cast. - /// The callback may result in the following 3 scenarios: - /// - Successful cast (ex: derived is subclass of base). - /// - Failed cast (ex: derived is definitely not a subclass of base). - /// - We don't know (base is a symbolic region and we don't have - /// enough info to determine if the cast will succeed at run time). - /// The function returns an SVal representing the derived class; it's - /// valid only if Failed flag is set to false. - virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,bool &Failed); - StoreRef getInitialStore(const LocationContext *InitLoc) { return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this); } @@ -244,7 +228,7 @@ public: // Made public for helper classes. RegionBindings removeBinding(RegionBindings B, BindingKey K); RegionBindings removeBinding(RegionBindings B, const MemRegion *R, - BindingKey::Kind k); + BindingKey::Kind k); RegionBindings removeBinding(RegionBindings B, const MemRegion *R) { return removeBinding(removeBinding(B, R, BindingKey::Direct), R, @@ -266,15 +250,20 @@ public: // Part of public interface to class. .getRootWithoutRetain(), *this); } - StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr *CL, + /// \brief Create a new store that binds a value to a compound literal. + /// + /// \param ST The original store whose bindings are the basis for the new + /// store. + /// + /// \param CL The compound literal to bind (the binding key). + /// + /// \param LC The LocationContext for the binding. + /// + /// \param V The value to bind to the compound literal. + StoreRef bindCompoundLiteral(Store ST, + const CompoundLiteralExpr *CL, const LocationContext *LC, SVal V); - StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal); - - StoreRef BindDeclWithNoInit(Store store, const VarRegion *) { - return StoreRef(store, *this); - } - /// BindStruct - Bind a compound value to a structure. StoreRef BindStruct(Store store, const TypedValueRegion* R, SVal V); @@ -287,7 +276,10 @@ public: // Part of public interface to class. /// as a Default binding. StoreRef BindAggregate(Store store, const TypedRegion *R, SVal DefaultVal); - StoreRef Remove(Store store, Loc LV); + /// \brief Create a new store with the specified binding removed. + /// \param ST the original store, that is the basis for the new store. + /// \param L the location whose binding should be removed. + StoreRef killBinding(Store ST, Loc L); void incrementReferenceCount(Store store) { GetRegionBindings(store).manualRetain(); @@ -477,12 +469,8 @@ public: } bool AddToWorkList(const MemRegion *R, const ClusterBindings *C) { - if (C) { - if (Visited.count(C)) - return false; - Visited.insert(C); - } - + if (C && !Visited.insert(C)) + return false; WL.push_back(R); return true; } @@ -534,6 +522,46 @@ bool RegionStoreManager::scanReachableSymbols(Store S, const MemRegion *R, return true; } +static inline bool isUnionField(const FieldRegion *FR) { + return FR->getDecl()->getParent()->isUnion(); +} + +typedef SmallVector FieldVector; + +void getSymbolicOffsetFields(BindingKey K, FieldVector &Fields) { + assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys"); + + const MemRegion *Base = K.getConcreteOffsetRegion(); + const MemRegion *R = K.getRegion(); + + while (R != Base) { + if (const FieldRegion *FR = dyn_cast(R)) + if (!isUnionField(FR)) + Fields.push_back(FR->getDecl()); + + R = cast(R)->getSuperRegion(); + } +} + +static bool isCompatibleWithFields(BindingKey K, const FieldVector &Fields) { + assert(K.hasSymbolicOffset() && "Not implemented for concrete offset keys"); + + if (Fields.empty()) + return true; + + FieldVector FieldsInBindingKey; + getSymbolicOffsetFields(K, FieldsInBindingKey); + + ptrdiff_t Delta = FieldsInBindingKey.size() - Fields.size(); + if (Delta >= 0) + return std::equal(FieldsInBindingKey.begin() + Delta, + FieldsInBindingKey.end(), + Fields.begin()); + else + return std::equal(FieldsInBindingKey.begin(), FieldsInBindingKey.end(), + Fields.begin() - Delta); +} + RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B, const SubRegion *R) { BindingKey SRKey = BindingKey::Make(R, BindingKey::Default); @@ -543,10 +571,12 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B, return RBFactory.remove(B, R); } - if (SRKey.hasSymbolicOffset()) { - const SubRegion *Base = cast(SRKey.getConcreteOffsetRegion()); - B = removeSubRegionBindings(B, Base); - return addBinding(B, Base, BindingKey::Default, UnknownVal()); + FieldVector FieldsInSymbolicSubregions; + bool HasSymbolicOffset = SRKey.hasSymbolicOffset(); + if (HasSymbolicOffset) { + getSymbolicOffsetFields(SRKey, FieldsInSymbolicSubregions); + R = cast(SRKey.getConcreteOffsetRegion()); + SRKey = BindingKey::Make(R, BindingKey::Default); } // This assumes the region being invalidated is char-aligned. This isn't @@ -574,11 +604,17 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B, I != E; ++I) { BindingKey NextKey = I.getKey(); if (NextKey.getRegion() == SRKey.getRegion()) { + // FIXME: This doesn't catch the case where we're really invalidating a + // region with a symbolic offset. Example: + // R: points[i].y + // Next: points[0].x + if (NextKey.getOffset() > SRKey.getOffset() && NextKey.getOffset() - SRKey.getOffset() < Length) { // Case 1: The next binding is inside the region we're invalidating. // Remove it. Result = CBFactory.remove(Result, NextKey); + } else if (NextKey.getOffset() == SRKey.getOffset()) { // Case 2: The next binding is at the same offset as the region we're // invalidating. In this case, we need to leave default bindings alone, @@ -589,6 +625,7 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B, if (NextKey.isDirect()) Result = CBFactory.remove(Result, NextKey); } + } else if (NextKey.hasSymbolicOffset()) { const MemRegion *Base = NextKey.getConcreteOffsetRegion(); if (R->isSubRegionOf(Base)) { @@ -596,16 +633,24 @@ RegionBindings RegionStoreManager::removeSubRegionBindings(RegionBindings B, // its concrete region. We don't know if the binding is still valid, so // we'll be conservative and remove it. if (NextKey.isDirect()) - Result = CBFactory.remove(Result, NextKey); + if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions)) + Result = CBFactory.remove(Result, NextKey); } else if (const SubRegion *BaseSR = dyn_cast(Base)) { // Case 4: The next key is symbolic, but we changed a known // super-region. In this case the binding is certainly no longer valid. if (R == Base || BaseSR->isSubRegionOf(R)) - Result = CBFactory.remove(Result, NextKey); + if (isCompatibleWithFields(NextKey, FieldsInSymbolicSubregions)) + Result = CBFactory.remove(Result, NextKey); } } } + // If we're invalidating a region with a symbolic offset, we need to make sure + // we don't treat the base region as uninitialized anymore. + // FIXME: This isn't very precise; see the example in the loop. + if (HasSymbolicOffset) + Result = CBFactory.add(Result, SRKey, UnknownVal()); + if (Result.isEmpty()) return RBFactory.remove(B, ClusterHead); return RBFactory.add(B, ClusterHead, Result); @@ -724,7 +769,7 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) { // Invalidate the region by setting its default value to // conjured symbol. The type of the symbol is irrelavant. DefinedOrUnknownSVal V = - svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count); + svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count); B = RM.addBinding(B, baseR, BindingKey::Default, V); return; } @@ -739,8 +784,8 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) { if (T->isStructureOrClassType()) { // Invalidate the region by setting its default value to // conjured symbol. The type of the symbol is irrelavant. - DefinedOrUnknownSVal V = - svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count); + DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, + Ctx.IntTy, Count); B = RM.addBinding(B, baseR, BindingKey::Default, V); return; } @@ -748,7 +793,7 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) { if (const ArrayType *AT = Ctx.getAsArrayType(T)) { // Set the default value of the array to conjured symbol. DefinedOrUnknownSVal V = - svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, + svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, AT->getElementType(), Count); B = RM.addBinding(B, baseR, BindingKey::Default, V); return; @@ -764,8 +809,8 @@ void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) { } - DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, - T,Count); + DefinedOrUnknownSVal V = svalBuilder.conjureSymbolVal(baseR, Ex, LCtx, + T,Count); assert(SymbolManager::canSymbolicate(T) || V.isUnknown()); B = RM.addBinding(B, baseR, BindingKey::Direct, V); } @@ -779,10 +824,9 @@ RegionBindings RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K, // Bind the globals memory space to a new symbol that we will use to derive // the bindings for all globals. const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K); - SVal V = - svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex, LCtx, - /* symbol type, doesn't matter */ Ctx.IntTy, - Count); + SVal V = svalBuilder.conjureSymbolVal(/* SymbolTag = */ (const void*) GS, Ex, LCtx, + /* type does not matter */ Ctx.IntTy, + Count); B = removeBinding(B, GS); B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V); @@ -897,103 +941,6 @@ SVal RegionStoreManager::ArrayToPointer(Loc Array) { return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx)); } -// This mirrors Type::getCXXRecordDeclForPointerType(), but there doesn't -// appear to be another need for this in the rest of the codebase. -static const CXXRecordDecl *GetCXXRecordDeclForReferenceType(QualType Ty) { - if (const ReferenceType *RT = Ty->getAs()) - if (const RecordType *RCT = RT->getPointeeType()->getAs()) - return dyn_cast(RCT->getDecl()); - return 0; -} - -SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) { - const CXXRecordDecl *baseDecl; - - if (baseType->isPointerType()) - baseDecl = baseType->getCXXRecordDeclForPointerType(); - else if (baseType->isReferenceType()) - baseDecl = GetCXXRecordDeclForReferenceType(baseType); - else - baseDecl = baseType->getAsCXXRecordDecl(); - - assert(baseDecl && "not a CXXRecordDecl?"); - - loc::MemRegionVal *derivedRegVal = dyn_cast(&derived); - if (!derivedRegVal) - return derived; - - const MemRegion *baseReg = - MRMgr.getCXXBaseObjectRegion(baseDecl, derivedRegVal->getRegion()); - - return loc::MemRegionVal(baseReg); -} - -SVal RegionStoreManager::evalDynamicCast(SVal base, QualType derivedType, - bool &Failed) { - Failed = false; - - loc::MemRegionVal *baseRegVal = dyn_cast(&base); - if (!baseRegVal) - return UnknownVal(); - const MemRegion *BaseRegion = baseRegVal->stripCasts(/*StripBases=*/false); - - // Assume the derived class is a pointer or a reference to a CXX record. - derivedType = derivedType->getPointeeType(); - assert(!derivedType.isNull()); - const CXXRecordDecl *DerivedDecl = derivedType->getAsCXXRecordDecl(); - if (!DerivedDecl && !derivedType->isVoidType()) - return UnknownVal(); - - // Drill down the CXXBaseObject chains, which represent upcasts (casts from - // derived to base). - const MemRegion *SR = BaseRegion; - while (const TypedRegion *TSR = dyn_cast_or_null(SR)) { - QualType BaseType = TSR->getLocationType()->getPointeeType(); - assert(!BaseType.isNull()); - const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl(); - if (!SRDecl) - return UnknownVal(); - - // If found the derived class, the cast succeeds. - if (SRDecl == DerivedDecl) - return loc::MemRegionVal(TSR); - - if (!derivedType->isVoidType()) { - // Static upcasts are marked as DerivedToBase casts by Sema, so this will - // only happen when multiple or virtual inheritance is involved. - CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, - /*DetectVirtual=*/false); - if (SRDecl->isDerivedFrom(DerivedDecl, Paths)) { - SVal Result = loc::MemRegionVal(TSR); - const CXXBasePath &Path = *Paths.begin(); - for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end(); - I != E; ++I) { - Result = evalDerivedToBase(Result, I->Base->getType()); - } - return Result; - } - } - - if (const CXXBaseObjectRegion *R = dyn_cast(TSR)) - // Drill down the chain to get the derived classes. - SR = R->getSuperRegion(); - else { - // We reached the bottom of the hierarchy. - - // If this is a cast to void*, return the region. - if (derivedType->isVoidType()) - return loc::MemRegionVal(TSR); - - // We did not find the derived class. We we must be casting the base to - // derived, so the cast should fail. - Failed = true; - return UnknownVal(); - } - } - - return UnknownVal(); -} - //===----------------------------------------------------------------------===// // Loading values from regions. //===----------------------------------------------------------------------===// @@ -1047,7 +994,7 @@ SVal RegionStoreManager::getBinding(Store store, Loc L, QualType T) { T = TR->getLocationType(); else { const SymbolicRegion *SR = cast(MR); - T = SR->getSymbol()->getType(Ctx); + T = SR->getSymbol()->getType(); } } MR = GetElementZeroRegion(MR, T); @@ -1540,14 +1487,14 @@ bool RegionStoreManager::includedInBindings(Store store, // Binding values to regions. //===----------------------------------------------------------------------===// -StoreRef RegionStoreManager::Remove(Store store, Loc L) { +StoreRef RegionStoreManager::killBinding(Store ST, Loc L) { if (isa(L)) if (const MemRegion* R = cast(L).getRegion()) - return StoreRef(removeBinding(GetRegionBindings(store), + return StoreRef(removeBinding(GetRegionBindings(ST), R).getRootWithoutRetain(), *this); - return StoreRef(store, *this); + return StoreRef(ST, *this); } StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) { @@ -1560,6 +1507,8 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) { // Check if the region is a struct region. if (const TypedValueRegion* TR = dyn_cast(R)) { QualType Ty = TR->getValueType(); + if (Ty->isArrayType()) + return BindArray(store, TR, V); if (Ty->isStructureOrClassType()) return BindStruct(store, TR, V); if (Ty->isVectorType()) @@ -1569,13 +1518,9 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) { if (const SymbolicRegion *SR = dyn_cast(R)) { // Binding directly to a symbolic region should be treated as binding // to element 0. - QualType T = SR->getSymbol()->getType(Ctx); - - // FIXME: Is this the right way to handle symbols that are references? - if (const PointerType *PT = T->getAs()) - T = PT->getPointeeType(); - else - T = T->getAs()->getPointeeType(); + QualType T = SR->getSymbol()->getType(); + if (T->isAnyPointerType() || T->isReferenceType()) + T = T->getPointeeType(); R = GetElementZeroRegion(SR, T); } @@ -1589,26 +1534,12 @@ StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) { return StoreRef(addBinding(B, Key, V).getRootWithoutRetain(), *this); } -StoreRef RegionStoreManager::BindDecl(Store store, const VarRegion *VR, - SVal InitVal) { - - QualType T = VR->getDecl()->getType(); - - if (T->isArrayType()) - return BindArray(store, VR, InitVal); - if (T->isStructureOrClassType()) - return BindStruct(store, VR, InitVal); - - return Bind(store, svalBuilder.makeLoc(VR), InitVal); -} - // FIXME: this method should be merged into Bind(). -StoreRef RegionStoreManager::BindCompoundLiteral(Store store, +StoreRef RegionStoreManager::bindCompoundLiteral(Store ST, const CompoundLiteralExpr *CL, const LocationContext *LC, SVal V) { - return Bind(store, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)), - V); + return Bind(ST, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)), V); } StoreRef RegionStoreManager::setImplicitDefaultValue(Store store, @@ -1864,7 +1795,7 @@ RegionBindings RegionStoreManager::removeBinding(RegionBindings B, RegionBindings RegionStoreManager::removeBinding(RegionBindings B, const MemRegion *R, - BindingKey::Kind k){ + BindingKey::Kind k){ return removeBinding(B, BindingKey::Make(R, k)); } @@ -1897,7 +1828,6 @@ public: void VisitAddedToCluster(const MemRegion *baseR, const ClusterBindings &C); void VisitCluster(const MemRegion *baseR, const ClusterBindings &C); - void VisitBindingKey(BindingKey K); bool UpdatePostponed(); void VisitBinding(SVal V); }; @@ -1932,17 +1862,21 @@ void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR, const StackArgumentsSpaceRegion *StackReg = cast(TR->getSuperRegion()); const StackFrameContext *RegCtx = StackReg->getStackFrame(); - if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx)) + if (CurrentLCtx && + (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))) AddToWorkList(TR, &C); } } void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR, const ClusterBindings &C) { - for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I) { - VisitBindingKey(I.getKey()); + // Mark the symbol for any SymbolicRegion with live bindings as live itself. + // This means we should continue to track that symbol. + if (const SymbolicRegion *SymR = dyn_cast(baseR)) + SymReaper.markLive(SymR->getSymbol()); + + for (ClusterBindings::iterator I = C.begin(), E = C.end(); I != E; ++I) VisitBinding(I.getData()); - } } void removeDeadBindingsWorker::VisitBinding(SVal V) { @@ -1979,8 +1913,8 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) { if (const BlockDataRegion *BR = dyn_cast(R)) { BlockDataRegion::referenced_vars_iterator I = BR->referenced_vars_begin(), E = BR->referenced_vars_end(); - for ( ; I != E; ++I) - AddToWorkList(I.getCapturedRegion()); + for ( ; I != E; ++I) + AddToWorkList(I.getCapturedRegion()); } } @@ -1991,20 +1925,6 @@ void removeDeadBindingsWorker::VisitBinding(SVal V) { SymReaper.markLive(*SI); } -void removeDeadBindingsWorker::VisitBindingKey(BindingKey K) { - const MemRegion *R = K.getRegion(); - - // Mark this region "live" by adding it to the worklist. This will cause - // use to visit all regions in the cluster (if we haven't visited them - // already). - if (AddToWorkList(R)) { - // Mark the symbol for any live SymbolicRegion as "live". This means we - // should continue to track that symbol. - if (const SymbolicRegion *SymR = dyn_cast(R)) - SymReaper.markLive(SymR->getSymbol()); - } -} - bool removeDeadBindingsWorker::UpdatePostponed() { // See if any postponed SymbolicRegions are actually live now, after // having done a scan. @@ -2012,7 +1932,7 @@ bool removeDeadBindingsWorker::UpdatePostponed() { for (SmallVectorImpl::iterator I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) { - if (const SymbolicRegion *SR = cast_or_null(*I)) { + if (const SymbolicRegion *SR = *I) { if (SymReaper.isLive(SR->getSymbol())) { changed |= AddToWorkList(SR); *I = NULL; diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp index d1936cd..b87169a 100644 --- a/lib/StaticAnalyzer/Core/SValBuilder.cpp +++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp @@ -106,25 +106,23 @@ SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) { return nonloc::SymbolVal(sym); } -DefinedOrUnknownSVal -SValBuilder::getConjuredSymbolVal(const void *symbolTag, - const Expr *expr, - const LocationContext *LCtx, - unsigned count) { +DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag, + const Expr *expr, + const LocationContext *LCtx, + unsigned count) { QualType T = expr->getType(); - return getConjuredSymbolVal(symbolTag, expr, LCtx, T, count); + return conjureSymbolVal(symbolTag, expr, LCtx, T, count); } -DefinedOrUnknownSVal -SValBuilder::getConjuredSymbolVal(const void *symbolTag, - const Expr *expr, - const LocationContext *LCtx, - QualType type, - unsigned count) { +DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const void *symbolTag, + const Expr *expr, + const LocationContext *LCtx, + QualType type, + unsigned count) { if (!SymbolManager::canSymbolicate(type)) return UnknownVal(); - SymbolRef sym = SymMgr.getConjuredSymbol(expr, LCtx, type, count, symbolTag); + SymbolRef sym = SymMgr.conjureSymbol(expr, LCtx, type, count, symbolTag); if (Loc::isLocType(type)) return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); @@ -133,15 +131,14 @@ SValBuilder::getConjuredSymbolVal(const void *symbolTag, } -DefinedOrUnknownSVal -SValBuilder::getConjuredSymbolVal(const Stmt *stmt, - const LocationContext *LCtx, - QualType type, - unsigned visitCount) { +DefinedOrUnknownSVal SValBuilder::conjureSymbolVal(const Stmt *stmt, + const LocationContext *LCtx, + QualType type, + unsigned visitCount) { if (!SymbolManager::canSymbolicate(type)) return UnknownVal(); - SymbolRef sym = SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount); + SymbolRef sym = SymMgr.conjureSymbol(stmt, LCtx, type, visitCount); if (Loc::isLocType(type)) return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym)); @@ -157,7 +154,7 @@ SValBuilder::getConjuredHeapSymbolVal(const Expr *E, assert(Loc::isLocType(T)); assert(SymbolManager::canSymbolicate(T)); - SymbolRef sym = SymMgr.getConjuredSymbol(E, LCtx, T, VisitCount); + SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, T, VisitCount); return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym)); } diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp index 8437f50..e34ab6a 100644 --- a/lib/StaticAnalyzer/Core/SVals.cpp +++ b/lib/StaticAnalyzer/Core/SVals.cpp @@ -51,7 +51,8 @@ const FunctionDecl *SVal::getAsFunctionDecl() const { if (const loc::MemRegionVal* X = dyn_cast(this)) { const MemRegion* R = X->getRegion(); if (const FunctionTextRegion *CTR = R->getAs()) - return CTR->getDecl(); + if (const FunctionDecl *FD = dyn_cast(CTR->getDecl())) + return FD; } return 0; diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp index 5568f1c..4236ee4 100644 --- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp +++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp @@ -67,7 +67,9 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, Loc cond, bool assumption) { state = assumeAux(state, cond, assumption); - return SU.processAssume(state, cond, assumption); + if (NotifyAssumeClients && SU) + return SU->processAssume(state, cond, assumption); + return state; } ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state, @@ -113,7 +115,9 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, NonLoc cond, bool assumption) { state = assumeAux(state, cond, assumption); - return SU.processAssume(state, cond, assumption); + if (NotifyAssumeClients && SU) + return SU->processAssume(state, cond, assumption); + return state; } static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) { @@ -136,7 +140,7 @@ ProgramStateRef SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State, SymbolRef Sym, bool Assumption) { BasicValueFactory &BVF = getBasicVals(); - QualType T = Sym->getType(BVF.getContext()); + QualType T = Sym->getType(); // None of the constraint solvers currently support non-integer types. if (!T->isIntegerType()) @@ -186,7 +190,7 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state, BinaryOperator::Opcode op = SE->getOpcode(); // Implicitly compare non-comparison expressions to 0. if (!BinaryOperator::isComparisonOp(op)) { - QualType T = SE->getType(BasicVals.getContext()); + QualType T = SE->getType(); const llvm::APSInt &zero = BasicVals.getValue(0, T); op = (Assumption ? BO_NE : BO_EQ); return assumeSymRel(state, SE, op, zero); @@ -235,11 +239,9 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state, assert(BinaryOperator::isComparisonOp(op) && "Non-comparison ops should be rewritten as comparisons to zero."); - BasicValueFactory &BVF = getBasicVals(); - ASTContext &Ctx = BVF.getContext(); - // Get the type used for calculating wraparound. - APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType(Ctx)); + BasicValueFactory &BVF = getBasicVals(); + APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType()); // We only handle simple comparisons of the form "$sym == constant" // or "($sym+constant1) == constant2". diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h index 088d70c..01f0b4e 100644 --- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h +++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h @@ -22,10 +22,10 @@ namespace clang { namespace ento { class SimpleConstraintManager : public ConstraintManager { - SubEngine &SU; + SubEngine *SU; BasicValueFactory &BVF; public: - SimpleConstraintManager(SubEngine &subengine, BasicValueFactory &BV) + SimpleConstraintManager(SubEngine *subengine, BasicValueFactory &BV) : SU(subengine), BVF(BV) {} virtual ~SimpleConstraintManager(); diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp index ad58a07..fbc6ba0 100644 --- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp +++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp @@ -81,7 +81,7 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) { } if (const SymExpr *se = val.getAsSymbolicExpression()) { - QualType T = Context.getCanonicalType(se->getType(Context)); + QualType T = Context.getCanonicalType(se->getType()); // If types are the same or both are integers, ignore the cast. // FIXME: Remove this hack when we support symbolic truncation/extension. // HACK: If both castTy and T are integers, ignore the cast. This is @@ -276,7 +276,7 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS, // with the given constant. // FIXME: This is an approximation of Sema::UsualArithmeticConversions. ASTContext &Ctx = getContext(); - QualType SymbolType = LHS->getType(Ctx); + QualType SymbolType = LHS->getType(); uint64_t ValWidth = RHS.getBitWidth(); uint64_t TypeWidth = Ctx.getTypeSize(SymbolType); @@ -318,7 +318,9 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state, return makeTruthVal(false, resultTy); case BO_Xor: case BO_Sub: - return makeIntVal(0, resultTy); + if (resultTy->isIntegralOrEnumerationType()) + return makeIntVal(0, resultTy); + return evalCastFromNonLoc(makeIntVal(0, /*Unsigned=*/false), resultTy); case BO_Or: case BO_And: return evalCastFromNonLoc(lhs, resultTy); @@ -459,7 +461,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state, case BO_NE: // Negate the comparison and make a value. opc = NegateComparison(opc); - assert(symIntExpr->getType(Context) == resultTy); + assert(symIntExpr->getType() == resultTy); return makeNonLoc(symIntExpr->getLHS(), opc, symIntExpr->getRHS(), resultTy); } @@ -505,7 +507,8 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state, } else if (isa(Sym)) { // Does the symbol simplify to a constant? If so, "fold" the constant // by setting 'lhs' to a ConcreteInt and try again. - if (const llvm::APSInt *Constant = state->getSymVal(Sym)) { + if (const llvm::APSInt *Constant = state->getConstraintManager() + .getSymVal(state, Sym)) { lhs = nonloc::ConcreteInt(*Constant); continue; } @@ -916,14 +919,8 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state, else if (isa(region)) { superR = region; index = rhs; - if (const PointerType *PT = resultTy->getAs()) { - elementType = PT->getPointeeType(); - } - else { - const ObjCObjectPointerType *OT = - resultTy->getAs(); - elementType = OT->getPointeeType(); - } + if (resultTy->isAnyPointerType()) + elementType = resultTy->getPointeeType(); } if (NonLoc *indexV = dyn_cast(&index)) { @@ -946,7 +943,7 @@ const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state, return &X->getValue(); if (SymbolRef Sym = V.getAsSymbol()) - return state->getSymVal(Sym); + return state->getConstraintManager().getSymVal(state, Sym); // FIXME: Add support for SymExprs. return NULL; diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp index 3af60a1..939ae54 100644 --- a/lib/StaticAnalyzer/Core/Store.cpp +++ b/lib/StaticAnalyzer/Core/Store.cpp @@ -15,6 +15,7 @@ #include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h" #include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h" #include "clang/AST/CharUnits.h" +#include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" using namespace clang; @@ -233,6 +234,91 @@ SVal StoreManager::evalDerivedToBase(SVal Derived, const CastExpr *Cast) { return Result; } +SVal StoreManager::evalDerivedToBase(SVal Derived, const CXXBasePath &Path) { + // Walk through the path to create nested CXXBaseRegions. + SVal Result = Derived; + for (CXXBasePath::const_iterator I = Path.begin(), E = Path.end(); + I != E; ++I) { + Result = evalDerivedToBase(Result, I->Base->getType()); + } + return Result; +} + +SVal StoreManager::evalDerivedToBase(SVal Derived, QualType BaseType) { + loc::MemRegionVal *DerivedRegVal = dyn_cast(&Derived); + if (!DerivedRegVal) + return Derived; + + const CXXRecordDecl *BaseDecl = BaseType->getPointeeCXXRecordDecl(); + if (!BaseDecl) + BaseDecl = BaseType->getAsCXXRecordDecl(); + assert(BaseDecl && "not a C++ object?"); + + const MemRegion *BaseReg = + MRMgr.getCXXBaseObjectRegion(BaseDecl, DerivedRegVal->getRegion()); + + return loc::MemRegionVal(BaseReg); +} + +SVal StoreManager::evalDynamicCast(SVal Base, QualType DerivedType, + bool &Failed) { + Failed = false; + + loc::MemRegionVal *BaseRegVal = dyn_cast(&Base); + if (!BaseRegVal) + return UnknownVal(); + const MemRegion *BaseRegion = BaseRegVal->stripCasts(/*StripBases=*/false); + + // Assume the derived class is a pointer or a reference to a CXX record. + DerivedType = DerivedType->getPointeeType(); + assert(!DerivedType.isNull()); + const CXXRecordDecl *DerivedDecl = DerivedType->getAsCXXRecordDecl(); + if (!DerivedDecl && !DerivedType->isVoidType()) + return UnknownVal(); + + // Drill down the CXXBaseObject chains, which represent upcasts (casts from + // derived to base). + const MemRegion *SR = BaseRegion; + while (const TypedRegion *TSR = dyn_cast_or_null(SR)) { + QualType BaseType = TSR->getLocationType()->getPointeeType(); + assert(!BaseType.isNull()); + const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl(); + if (!SRDecl) + return UnknownVal(); + + // If found the derived class, the cast succeeds. + if (SRDecl == DerivedDecl) + return loc::MemRegionVal(TSR); + + if (!DerivedType->isVoidType()) { + // Static upcasts are marked as DerivedToBase casts by Sema, so this will + // only happen when multiple or virtual inheritance is involved. + CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/true, + /*DetectVirtual=*/false); + if (SRDecl->isDerivedFrom(DerivedDecl, Paths)) + return evalDerivedToBase(loc::MemRegionVal(TSR), Paths.front()); + } + + if (const CXXBaseObjectRegion *R = dyn_cast(TSR)) + // Drill down the chain to get the derived classes. + SR = R->getSuperRegion(); + else { + // We reached the bottom of the hierarchy. + + // If this is a cast to void*, return the region. + if (DerivedType->isVoidType()) + return loc::MemRegionVal(TSR); + + // We did not find the derived class. We we must be casting the base to + // derived, so the cast should fail. + Failed = true; + return UnknownVal(); + } + } + + return UnknownVal(); +} + /// CastRetrievedVal - Used by subclasses of StoreManager to implement /// implicit casts that arise from loads from regions that are reinterpreted diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp index 0bc192d..0c5098b 100644 --- a/lib/StaticAnalyzer/Core/SymbolManager.cpp +++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp @@ -117,21 +117,17 @@ bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const { SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) { itr.push_back(SE); - while (!isa(itr.back())) expand(); } SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() { assert(!itr.empty() && "attempting to iterate on an 'end' iterator"); - assert(isa(itr.back())); - itr.pop_back(); - if (!itr.empty()) - while (!isa(itr.back())) expand(); + expand(); return *this; } SymbolRef SymExpr::symbol_iterator::operator*() { assert(!itr.empty() && "attempting to dereference an 'end' iterator"); - return cast(itr.back()); + return itr.back(); } void SymExpr::symbol_iterator::expand() { @@ -187,11 +183,11 @@ SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) { return cast(SD); } -const SymbolConjured* -SymbolManager::getConjuredSymbol(const Stmt *E, const LocationContext *LCtx, - QualType T, unsigned Count, - const void *SymbolTag) { - +const SymbolConjured* SymbolManager::conjureSymbol(const Stmt *E, + const LocationContext *LCtx, + QualType T, + unsigned Count, + const void *SymbolTag) { llvm::FoldingSetNodeID profile; SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag); void *InsertPos; @@ -328,23 +324,24 @@ const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs, return cast(data); } -QualType SymbolConjured::getType(ASTContext&) const { +QualType SymbolConjured::getType() const { return T; } -QualType SymbolDerived::getType(ASTContext &Ctx) const { +QualType SymbolDerived::getType() const { return R->getValueType(); } -QualType SymbolExtent::getType(ASTContext &Ctx) const { +QualType SymbolExtent::getType() const { + ASTContext &Ctx = R->getMemRegionManager()->getContext(); return Ctx.getSizeType(); } -QualType SymbolMetadata::getType(ASTContext&) const { +QualType SymbolMetadata::getType() const { return T; } -QualType SymbolRegionValue::getType(ASTContext &C) const { +QualType SymbolRegionValue::getType() const { return R->getValueType(); } @@ -466,41 +463,56 @@ bool SymbolReaper::isLive(SymbolRef sym) { markDependentsLive(sym); return true; } - - if (const SymbolDerived *derived = dyn_cast(sym)) { - if (isLive(derived->getParentSymbol())) { - markLive(sym); - return true; - } - return false; - } - - if (const SymbolExtent *extent = dyn_cast(sym)) { - if (isLiveRegion(extent->getRegion())) { - markLive(sym); - return true; - } - return false; + + bool KnownLive; + + switch (sym->getKind()) { + case SymExpr::RegionValueKind: + // FIXME: We should be able to use isLiveRegion here (this behavior + // predates isLiveRegion), but doing so causes test failures. Investigate. + KnownLive = true; + break; + case SymExpr::ConjuredKind: + KnownLive = false; + break; + case SymExpr::DerivedKind: + KnownLive = isLive(cast(sym)->getParentSymbol()); + break; + case SymExpr::ExtentKind: + KnownLive = isLiveRegion(cast(sym)->getRegion()); + break; + case SymExpr::MetadataKind: + KnownLive = MetadataInUse.count(sym) && + isLiveRegion(cast(sym)->getRegion()); + if (KnownLive) + MetadataInUse.erase(sym); + break; + case SymExpr::SymIntKind: + KnownLive = isLive(cast(sym)->getLHS()); + break; + case SymExpr::IntSymKind: + KnownLive = isLive(cast(sym)->getRHS()); + break; + case SymExpr::SymSymKind: + KnownLive = isLive(cast(sym)->getLHS()) && + isLive(cast(sym)->getRHS()); + break; + case SymExpr::CastSymbolKind: + KnownLive = isLive(cast(sym)->getOperand()); + break; } - if (const SymbolMetadata *metadata = dyn_cast(sym)) { - if (MetadataInUse.count(sym)) { - if (isLiveRegion(metadata->getRegion())) { - markLive(sym); - MetadataInUse.erase(sym); - return true; - } - } - return false; - } + if (KnownLive) + markLive(sym); - // Interogate the symbol. It may derive from an input value to - // the analyzed function/method. - return isa(sym); + return KnownLive; } bool SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const { + if (LCtx == 0) + return false; + if (LCtx != ELCtx) { // If the reaper's location context is a parent of the expression's // location context, then the expression value is now "out of scope". @@ -508,6 +520,7 @@ SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const { return false; return true; } + // If no statement is provided, everything is this and parent contexts is live. if (!Loc) return true; @@ -517,10 +530,16 @@ SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const { bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{ const StackFrameContext *VarContext = VR->getStackFrame(); + + if (!VarContext) + return true; + + if (!LCtx) + return false; const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame(); if (VarContext == CurrentContext) { - // If no statemetnt is provided, everything is live. + // If no statement is provided, everything is live. if (!Loc) return true; diff --git a/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp index 66bf4bb..e09f4e3 100644 --- a/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp +++ b/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp @@ -41,7 +41,6 @@ public: PathGenerationScheme getGenerationScheme() const { return Minimal; } bool supportsLogicalOpControlFlow() const { return true; } bool supportsAllBlockEdges() const { return true; } - virtual bool useVerboseDescription() const { return true; } virtual bool supportsCrossFileDiagnostics() const { return true; } }; diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp index 34b5266..7dbac3c 100644 --- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp +++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp @@ -34,7 +34,7 @@ #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" -#include "clang/Frontend/AnalyzerOptions.h" +#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h" #include "clang/Lex/Preprocessor.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/Path.h" @@ -78,7 +78,6 @@ public: ClangDiagPathDiagConsumer(DiagnosticsEngine &Diag) : Diag(Diag) {} virtual ~ClangDiagPathDiagConsumer() {} virtual StringRef getName() const { return "ClangDiags"; } - virtual bool useVerboseDescription() const { return false; } virtual PathGenerationScheme getGenerationScheme() const { return None; } void FlushDiagnosticsImpl(std::vector &Diags, @@ -86,7 +85,7 @@ public: for (std::vector::iterator I = Diags.begin(), E = Diags.end(); I != E; ++I) { const PathDiagnostic *PD = *I; - StringRef desc = PD->getDescription(); + StringRef desc = PD->getShortDescription(); SmallString<512> TmpStr; llvm::raw_svector_ostream Out(TmpStr); for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) { @@ -121,11 +120,12 @@ namespace { class AnalysisConsumer : public ASTConsumer, public RecursiveASTVisitor { - enum AnalysisMode { - ANALYSIS_SYNTAX, - ANALYSIS_PATH, - ANALYSIS_ALL + enum { + AM_None = 0, + AM_Syntax = 0x1, + AM_Path = 0x2 }; + typedef unsigned AnalysisMode; /// Mode of the analyzes while recursively visiting Decls. AnalysisMode RecVisitorMode; @@ -136,7 +136,7 @@ public: ASTContext *Ctx; const Preprocessor &PP; const std::string OutDir; - AnalyzerOptions Opts; + AnalyzerOptionsRef Opts; ArrayRef Plugins; /// \brief Stores the declarations from the local translation unit. @@ -164,19 +164,19 @@ public: AnalysisConsumer(const Preprocessor& pp, const std::string& outdir, - const AnalyzerOptions& opts, + AnalyzerOptionsRef opts, ArrayRef plugins) - : RecVisitorMode(ANALYSIS_ALL), RecVisitorBR(0), + : RecVisitorMode(0), RecVisitorBR(0), Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins) { DigestAnalyzerOptions(); - if (Opts.PrintStats) { + if (Opts->PrintStats) { llvm::EnableStatistics(); TUTotalTimer = new llvm::Timer("Analyzer Total Time"); } } ~AnalysisConsumer() { - if (Opts.PrintStats) + if (Opts->PrintStats) delete TUTotalTimer; } @@ -185,49 +185,52 @@ public: PathConsumers.push_back(new ClangDiagPathDiagConsumer(PP.getDiagnostics())); if (!OutDir.empty()) { - switch (Opts.AnalysisDiagOpt) { + switch (Opts->AnalysisDiagOpt) { default: #define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE) \ case PD_##NAME: CREATEFN(PathConsumers, OutDir, PP); break; -#include "clang/Frontend/Analyses.def" +#include "clang/StaticAnalyzer/Core/Analyses.def" } - } else if (Opts.AnalysisDiagOpt == PD_TEXT) { + } else if (Opts->AnalysisDiagOpt == PD_TEXT) { // Create the text client even without a specified output file since // it just uses diagnostic notes. createTextPathDiagnosticConsumer(PathConsumers, "", PP); } // Create the analyzer component creators. - switch (Opts.AnalysisStoreOpt) { + switch (Opts->AnalysisStoreOpt) { default: llvm_unreachable("Unknown store manager."); #define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \ case NAME##Model: CreateStoreMgr = CREATEFN; break; -#include "clang/Frontend/Analyses.def" +#include "clang/StaticAnalyzer/Core/Analyses.def" } - switch (Opts.AnalysisConstraintsOpt) { + switch (Opts->AnalysisConstraintsOpt) { default: llvm_unreachable("Unknown store manager."); #define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \ case NAME##Model: CreateConstraintMgr = CREATEFN; break; -#include "clang/Frontend/Analyses.def" +#include "clang/StaticAnalyzer/Core/Analyses.def" } } void DisplayFunction(const Decl *D, AnalysisMode Mode) { - if (!Opts.AnalyzerDisplayProgress) + if (!Opts->AnalyzerDisplayProgress) return; SourceManager &SM = Mgr->getASTContext().getSourceManager(); PresumedLoc Loc = SM.getPresumedLoc(D->getLocation()); if (Loc.isValid()) { llvm::errs() << "ANALYZE"; - switch (Mode) { - case ANALYSIS_SYNTAX: llvm::errs() << "(Syntax)"; break; - case ANALYSIS_PATH: llvm::errs() << "(Path Sensitive)"; break; - case ANALYSIS_ALL: break; - }; + + if (Mode == AM_Syntax) + llvm::errs() << " (Syntax)"; + else if (Mode == AM_Path) + llvm::errs() << " (Path)"; + else + assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!"); + llvm::errs() << ": " << Loc.getFilename(); if (isa(D) || isa(D)) { const NamedDecl *ND = cast(D); @@ -246,7 +249,7 @@ public: virtual void Initialize(ASTContext &Context) { Ctx = &Context; - checkerMgr.reset(createCheckerManager(Opts, PP.getLangOpts(), Plugins, + checkerMgr.reset(createCheckerManager(*Opts, PP.getLangOpts(), Plugins, PP.getDiagnostics())); Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(), @@ -255,17 +258,7 @@ public: CreateStoreMgr, CreateConstraintMgr, checkerMgr.get(), - Opts.MaxNodes, Opts.MaxLoop, - Opts.VisualizeEGDot, Opts.VisualizeEGUbi, - Opts.AnalysisPurgeOpt, Opts.EagerlyAssume, - Opts.TrimGraph, - Opts.UnoptimizedCFG, Opts.CFGAddImplicitDtors, - Opts.EagerlyTrimEGraph, - Opts.IPAMode, - Opts.InlineMaxStackDepth, - Opts.InlineMaxFunctionSize, - Opts.InliningMode, - Opts.NoRetryExhausted)); + *Opts)); } /// \brief Store the top level decls in the set to be processed later on. @@ -277,7 +270,7 @@ public: /// \brief Build the call graph for all the top level decls of this TU and /// use it to define the order in which the functions should be visited. - void HandleDeclsGallGraph(const unsigned LocalTUDeclsSize); + void HandleDeclsCallGraph(const unsigned LocalTUDeclsSize); /// \brief Run analyzes(syntax or path sensitive) on the given function. /// \param Mode - determines if we are requesting syntax only or path @@ -297,7 +290,9 @@ public: /// Handle callbacks for arbitrary Decls. bool VisitDecl(Decl *D) { - checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR); + AnalysisMode Mode = getModeForDecl(D, RecVisitorMode); + if (Mode & AM_Syntax) + checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR); return true; } @@ -316,7 +311,6 @@ public: } bool VisitObjCMethodDecl(ObjCMethodDecl *MD) { - checkerMgr->runCheckersOnASTDecl(MD, *Mgr, *RecVisitorBR); if (MD->isThisDeclarationADefinition()) HandleCode(MD, RecVisitorMode); return true; @@ -326,7 +320,7 @@ private: void storeTopLevelDecls(DeclGroupRef DG); /// \brief Check if we should skip (not analyze) the given function. - bool skipFunction(Decl *D); + AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode); }; } // end anonymous namespace @@ -358,7 +352,23 @@ void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) { } } -void AnalysisConsumer::HandleDeclsGallGraph(const unsigned LocalTUDeclsSize) { +static bool shouldSkipFunction(CallGraphNode *N, + SmallPtrSet Visited) { + // We want to re-analyse the functions as top level in several cases: + // - The 'init' methods should be reanalyzed because + // ObjCNonNilReturnValueChecker assumes that '[super init]' never returns + // 'nil' and unless we analyze the 'init' functions as top level, we will not + // catch errors within defensive code. + // - We want to reanalyze all ObjC methods as top level to report Retain + // Count naming convention errors more aggressively. + if (isa(N->getDecl())) + return false; + + // Otherwise, if we visited the function before, do not reanalyze it. + return Visited.count(N); +} + +void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) { // Otherwise, use the Callgraph to derive the order. // Build the Call Graph. CallGraph CG; @@ -407,21 +417,21 @@ void AnalysisConsumer::HandleDeclsGallGraph(const unsigned LocalTUDeclsSize) { // Push the children into the queue. for (CallGraphNode::const_iterator CI = N->begin(), CE = N->end(); CI != CE; ++CI) { - if (!Visited.count(*CI)) + if (!shouldSkipFunction(*CI, Visited)) BFSQueue.push_back(*CI); } // Skip the functions which have been processed already or previously // inlined. - if (Visited.count(N)) + if (shouldSkipFunction(N, Visited)) continue; // Analyze the function. SetOfConstDecls VisitedCallees; Decl *D = N->getDecl(); assert(D); - HandleCode(D, ANALYSIS_PATH, - (Mgr->InliningMode == All ? 0 : &VisitedCallees)); + HandleCode(D, AM_Path, + (Mgr->options.InliningMode == All ? 0 : &VisitedCallees)); // Add the visited callees to the global visited set. for (SetOfConstDecls::iterator I = VisitedCallees.begin(), @@ -451,7 +461,9 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) { // Run the AST-only checks using the order in which functions are defined. // If inlining is not turned on, use the simplest function order for path // sensitive analyzes as well. - RecVisitorMode = (Mgr->shouldInlineCall() ? ANALYSIS_SYNTAX : ANALYSIS_ALL); + RecVisitorMode = AM_Syntax; + if (!Mgr->shouldInlineCall()) + RecVisitorMode |= AM_Path; RecVisitorBR = &BR; // Process all the top level declarations. @@ -466,7 +478,7 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) { } if (Mgr->shouldInlineCall()) - HandleDeclsGallGraph(LocalTUDeclsSize); + HandleDeclsCallGraph(LocalTUDeclsSize); // After all decls handled, run checkers on the entire TranslationUnit. checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR); @@ -513,24 +525,32 @@ static std::string getFunctionName(const Decl *D) { return ""; } -bool AnalysisConsumer::skipFunction(Decl *D) { - if (!Opts.AnalyzeSpecificFunction.empty() && - getFunctionName(D) != Opts.AnalyzeSpecificFunction) - return true; - - // Don't run the actions on declarations in header files unless - // otherwise specified. +AnalysisConsumer::AnalysisMode +AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) { + if (!Opts->AnalyzeSpecificFunction.empty() && + getFunctionName(D) != Opts->AnalyzeSpecificFunction) + return AM_None; + + // Unless -analyze-all is specified, treat decls differently depending on + // where they came from: + // - Main source file: run both path-sensitive and non-path-sensitive checks. + // - Header files: run non-path-sensitive checks only. + // - System headers: don't run any checks. SourceManager &SM = Ctx->getSourceManager(); SourceLocation SL = SM.getExpansionLoc(D->getLocation()); - if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL)) - return true; + if (!Opts->AnalyzeAll && !SM.isFromMainFile(SL)) { + if (SL.isInvalid() || SM.isInSystemHeader(SL)) + return AM_None; + return Mode & ~AM_Path; + } - return false; + return Mode; } void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode, SetOfConstDecls *VisitedCallees) { - if (skipFunction(D)) + Mode = getModeForDecl(D, Mode); + if (Mode == AM_None) return; DisplayFunction(D, Mode); @@ -548,16 +568,16 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode, SmallVector WL; WL.push_back(D); - if (D->hasBody() && Opts.AnalyzeNestedBlocks) + if (D->hasBody() && Opts->AnalyzeNestedBlocks) FindBlocks(cast(D), WL); BugReporter BR(*Mgr); for (SmallVectorImpl::iterator WI=WL.begin(), WE=WL.end(); WI != WE; ++WI) if ((*WI)->hasBody()) { - if (Mode != ANALYSIS_PATH) + if (Mode & AM_Syntax) checkerMgr->runCheckersOnASTBody(*WI, *Mgr, BR); - if (Mode != ANALYSIS_SYNTAX && checkerMgr->hasPathSensitiveCheckers()) { + if ((Mode & AM_Path) && checkerMgr->hasPathSensitiveCheckers()) { RunPathSensitiveChecks(*WI, VisitedCallees); NumFunctionsAnalyzed++; } @@ -583,22 +603,22 @@ void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled, // Set the graph auditor. OwningPtr Auditor; - if (Mgr->shouldVisualizeUbigraph()) { + if (Mgr->options.visualizeExplodedGraphWithUbiGraph) { Auditor.reset(CreateUbiViz()); ExplodedNode::SetAuditor(Auditor.get()); } // Execute the worklist algorithm. Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D), - Mgr->getMaxNodes()); + Mgr->options.MaxNodes); // Release the auditor (if any) so that it doesn't monitor the graph // created BugReporter. ExplodedNode::SetAuditor(0); // Visualize the exploded graph. - if (Mgr->shouldVisualizeGraphviz()) - Eng.ViewGraph(Mgr->shouldTrimGraph()); + if (Mgr->options.visualizeExplodedGraphWithGraphViz) + Eng.ViewGraph(Mgr->options.TrimGraph); // Display warnings. Eng.getBugReporter().FlushReports(); @@ -629,7 +649,7 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D, ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp, const std::string& outDir, - const AnalyzerOptions& opts, + AnalyzerOptionsRef opts, ArrayRef plugins) { // Disable the effects of '-Werror' when using the AnalysisConsumer. pp.getDiagnostics().setWarningsAsErrors(false); diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h index 5a16bff..b75220b 100644 --- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h +++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h @@ -16,11 +16,11 @@ #define LLVM_CLANG_GR_ANALYSISCONSUMER_H #include "clang/Basic/LLVM.h" +#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h" #include namespace clang { -class AnalyzerOptions; class ASTConsumer; class Preprocessor; class DiagnosticsEngine; @@ -33,7 +33,7 @@ class CheckerManager; /// options.) ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp, const std::string &output, - const AnalyzerOptions& opts, + AnalyzerOptionsRef opts, ArrayRef plugins); } // end GR namespace diff --git a/lib/StaticAnalyzer/Frontend/CMakeLists.txt b/lib/StaticAnalyzer/Frontend/CMakeLists.txt index 06d1485..aafb249 100644 --- a/lib/StaticAnalyzer/Frontend/CMakeLists.txt +++ b/lib/StaticAnalyzer/Frontend/CMakeLists.txt @@ -25,6 +25,7 @@ target_link_libraries(clangStaticAnalyzerFrontend clangLex clangAST clangFrontend - clangRewrite + clangRewriteCore + clangRewriteFrontend clangStaticAnalyzerCheckers ) diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp index 0229aed..e8daa65 100644 --- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp +++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp @@ -17,7 +17,7 @@ #include "clang/StaticAnalyzer/Core/CheckerManager.h" #include "clang/StaticAnalyzer/Core/CheckerOptInfo.h" #include "clang/StaticAnalyzer/Core/CheckerRegistry.h" -#include "clang/Frontend/AnalyzerOptions.h" +#include "clang/StaticAnalyzer/Core/AnalyzerOptions.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Basic/Diagnostic.h" #include "llvm/Support/DynamicLibrary.h" diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt index 49d3101..d29e564 100644 --- a/lib/Tooling/CMakeLists.txt +++ b/lib/Tooling/CMakeLists.txt @@ -2,8 +2,10 @@ set(LLVM_LINK_COMPONENTS support) add_clang_library(clangTooling ArgumentsAdjusters.cpp - CommandLineClangTool.cpp + CommonOptionsParser.cpp CompilationDatabase.cpp + FileMatchTrie.cpp + JSONCompilationDatabase.cpp Refactoring.cpp RefactoringCallbacks.cpp Tooling.cpp @@ -23,5 +25,6 @@ target_link_libraries(clangTooling clangFrontend clangAST clangASTMatchers - clangRewrite + clangRewriteCore + clangRewriteFrontend ) diff --git a/lib/Tooling/CommandLineClangTool.cpp b/lib/Tooling/CommandLineClangTool.cpp deleted file mode 100644 index 8da2a33..0000000 --- a/lib/Tooling/CommandLineClangTool.cpp +++ /dev/null @@ -1,80 +0,0 @@ -//===--- CommandLineClangTool.cpp - command-line clang tools driver -------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file implements the CommandLineClangTool class used to run clang -// tools as separate command-line applications with a consistent common -// interface for handling compilation database and input files. -// -// It provides a common subset of command-line options, common algorithm -// for locating a compilation database and source files, and help messages -// for the basic command-line interface. -// -// It creates a CompilationDatabase, initializes a ClangTool and runs a -// user-specified FrontendAction over all TUs in which the given files are -// compiled. -// -//===----------------------------------------------------------------------===// - -#include "clang/Frontend/FrontendActions.h" -#include "clang/Tooling/CommandLineClangTool.h" -#include "clang/Tooling/Tooling.h" - -using namespace clang::tooling; -using namespace llvm; - -static const char *MoreHelpText = - "\n" - "-p is used to read a compile command database.\n" - "\n" - "\tFor example, it can be a CMake build directory in which a file named\n" - "\tcompile_commands.json exists (use -DCMAKE_EXPORT_COMPILE_COMMANDS=ON\n" - "\tCMake option to get this output). When no build path is specified,\n" - "\tclang-check will attempt to locate it automatically using all parent\n" - "\tpaths of the first input file. See:\n" - "\thttp://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n" - "\texample of setting up Clang Tooling on a source tree.\n" - "\n" - " ... specify the paths of source files. These paths are looked\n" - "\tup in the compile command database. If the path of a file is absolute,\n" - "\tit needs to point into CMake's source tree. If the path is relative,\n" - "\tthe current working directory needs to be in the CMake source tree and\n" - "\tthe file must be in a subdirectory of the current working directory.\n" - "\t\"./\" prefixes in the relative files will be automatically removed,\n" - "\tbut the rest of a relative path must be a suffix of a path in the\n" - "\tcompile command database.\n" - "\n"; - -CommandLineClangTool::CommandLineClangTool() : - BuildPath("p", cl::desc("Build path"), cl::Optional), - SourcePaths(cl::Positional, cl::desc(" [... ]"), - cl::OneOrMore), - MoreHelp(MoreHelpText) { -} - -void CommandLineClangTool::initialize(int argc, const char **argv) { - Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, argv)); - cl::ParseCommandLineOptions(argc, argv); - if (!Compilations) { - std::string ErrorMessage; - if (!BuildPath.empty()) { - Compilations.reset(CompilationDatabase::autoDetectFromDirectory( - BuildPath, ErrorMessage)); - } else { - Compilations.reset(CompilationDatabase::autoDetectFromSource( - SourcePaths[0], ErrorMessage)); - } - if (!Compilations) - llvm::report_fatal_error(ErrorMessage); - } -} - -int CommandLineClangTool::run(FrontendActionFactory *ActionFactory) { - ClangTool Tool(*Compilations, SourcePaths); - return Tool.run(ActionFactory); -} diff --git a/lib/Tooling/CommonOptionsParser.cpp b/lib/Tooling/CommonOptionsParser.cpp new file mode 100644 index 0000000..15091c7 --- /dev/null +++ b/lib/Tooling/CommonOptionsParser.cpp @@ -0,0 +1,79 @@ +//===--- CommonOptionsParser.cpp - common options for clang tools ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the CommonOptionsParser class used to parse common +// command-line options for clang tools, so that they can be run as separate +// command-line applications with a consistent common interface for handling +// compilation database and input files. +// +// It provides a common subset of command-line options, common algorithm +// for locating a compilation database and source files, and help messages +// for the basic command-line interface. +// +// It creates a CompilationDatabase and reads common command-line options. +// +// This class uses the Clang Tooling infrastructure, see +// http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html +// for details on setting it up with LLVM source tree. +// +//===----------------------------------------------------------------------===// + +#include "llvm/Support/CommandLine.h" +#include "clang/Tooling/CommonOptionsParser.h" +#include "clang/Tooling/Tooling.h" + +using namespace clang::tooling; +using namespace llvm; + +const char *const CommonOptionsParser::HelpMessage = + "\n" + "-p is used to read a compile command database.\n" + "\n" + "\tFor example, it can be a CMake build directory in which a file named\n" + "\tcompile_commands.json exists (use -DCMAKE_EXPORT_COMPILE_COMMANDS=ON\n" + "\tCMake option to get this output). When no build path is specified,\n" + "\tclang-check will attempt to locate it automatically using all parent\n" + "\tpaths of the first input file. See:\n" + "\thttp://clang.llvm.org/docs/HowToSetupToolingForLLVM.html for an\n" + "\texample of setting up Clang Tooling on a source tree.\n" + "\n" + " ... specify the paths of source files. These paths are\n" + "\tlooked up in the compile command database. If the path of a file is\n" + "\tabsolute, it needs to point into CMake's source tree. If the path is\n" + "\trelative, the current working directory needs to be in the CMake\n" + "\tsource tree and the file must be in a subdirectory of the current\n" + "\tworking directory. \"./\" prefixes in the relative files will be\n" + "\tautomatically removed, but the rest of a relative path must be a\n" + "\tsuffix of a path in the compile command database.\n" + "\n"; + +CommonOptionsParser::CommonOptionsParser(int &argc, const char **argv) { + static cl::opt BuildPath( + "p", cl::desc("Build path"), cl::Optional); + + static cl::list SourcePaths( + cl::Positional, cl::desc(" [... ]"), cl::OneOrMore); + + Compilations.reset(FixedCompilationDatabase::loadFromCommandLine(argc, + argv)); + cl::ParseCommandLineOptions(argc, argv); + SourcePathList = SourcePaths; + if (!Compilations) { + std::string ErrorMessage; + if (!BuildPath.empty()) { + Compilations.reset(CompilationDatabase::autoDetectFromDirectory( + BuildPath, ErrorMessage)); + } else { + Compilations.reset(CompilationDatabase::autoDetectFromSource( + SourcePaths[0], ErrorMessage)); + } + if (!Compilations) + llvm::report_fatal_error(ErrorMessage); + } +} diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp index 3139cc2..4149cda 100644 --- a/lib/Tooling/CompilationDatabase.cpp +++ b/lib/Tooling/CompilationDatabase.cpp @@ -7,132 +7,49 @@ // //===----------------------------------------------------------------------===// // -// This file contains multiple implementations for CompilationDatabases. +// This file contains implementations of the CompilationDatabase base class +// and the FixedCompilationDatabase. // //===----------------------------------------------------------------------===// +#include #include "clang/Tooling/CompilationDatabase.h" +#include "clang/Tooling/CompilationDatabasePluginRegistry.h" #include "clang/Tooling/Tooling.h" #include "llvm/ADT/SmallString.h" -#include "llvm/Support/YAMLParser.h" #include "llvm/Support/Path.h" #include "llvm/Support/system_error.h" -#ifdef USE_CUSTOM_COMPILATION_DATABASE -#include "CustomCompilationDatabase.h" -#endif - namespace clang { namespace tooling { -namespace { - -/// \brief A parser for escaped strings of command line arguments. -/// -/// Assumes \-escaping for quoted arguments (see the documentation of -/// unescapeCommandLine(...)). -class CommandLineArgumentParser { - public: - CommandLineArgumentParser(StringRef CommandLine) - : Input(CommandLine), Position(Input.begin()-1) {} - - std::vector parse() { - bool HasMoreInput = true; - while (HasMoreInput && nextNonWhitespace()) { - std::string Argument; - HasMoreInput = parseStringInto(Argument); - CommandLine.push_back(Argument); - } - return CommandLine; - } - - private: - // All private methods return true if there is more input available. - - bool parseStringInto(std::string &String) { - do { - if (*Position == '"') { - if (!parseQuotedStringInto(String)) return false; - } else { - if (!parseFreeStringInto(String)) return false; - } - } while (*Position != ' '); - return true; - } - - bool parseQuotedStringInto(std::string &String) { - if (!next()) return false; - while (*Position != '"') { - if (!skipEscapeCharacter()) return false; - String.push_back(*Position); - if (!next()) return false; - } - return next(); - } - - bool parseFreeStringInto(std::string &String) { - do { - if (!skipEscapeCharacter()) return false; - String.push_back(*Position); - if (!next()) return false; - } while (*Position != ' ' && *Position != '"'); - return true; - } - - bool skipEscapeCharacter() { - if (*Position == '\\') { - return next(); - } - return true; - } - - bool nextNonWhitespace() { - do { - if (!next()) return false; - } while (*Position == ' '); - return true; - } - - bool next() { - ++Position; - return Position != Input.end(); - } - - const StringRef Input; - StringRef::iterator Position; - std::vector CommandLine; -}; - -std::vector unescapeCommandLine( - StringRef EscapedCommandLine) { - CommandLineArgumentParser parser(EscapedCommandLine); - return parser.parse(); -} - -} // end namespace - CompilationDatabase::~CompilationDatabase() {} CompilationDatabase * CompilationDatabase::loadFromDirectory(StringRef BuildDirectory, std::string &ErrorMessage) { - llvm::SmallString<1024> JSONDatabasePath(BuildDirectory); - llvm::sys::path::append(JSONDatabasePath, "compile_commands.json"); - llvm::OwningPtr Database( - JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage)); - if (!Database) { - return NULL; + std::stringstream ErrorStream; + for (CompilationDatabasePluginRegistry::iterator + It = CompilationDatabasePluginRegistry::begin(), + Ie = CompilationDatabasePluginRegistry::end(); + It != Ie; ++It) { + std::string DatabaseErrorMessage; + OwningPtr Plugin(It->instantiate()); + if (CompilationDatabase *DB = + Plugin->loadFromDirectory(BuildDirectory, DatabaseErrorMessage)) + return DB; + else + ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n"; } - return Database.take(); + ErrorMessage = ErrorStream.str(); + return NULL; } static CompilationDatabase * -findCompilationDatabaseFromDirectory(StringRef Directory) { -#ifdef USE_CUSTOM_COMPILATION_DATABASE - if (CompilationDatabase *DB = - ::clang::tooling::findCompilationDatabaseForDirectory(Directory)) - return DB; -#endif +findCompilationDatabaseFromDirectory(StringRef Directory, + std::string &ErrorMessage) { + std::stringstream ErrorStream; + bool HasErrorMessage = false; while (!Directory.empty()) { std::string LoadErrorMessage; @@ -140,8 +57,15 @@ findCompilationDatabaseFromDirectory(StringRef Directory) { CompilationDatabase::loadFromDirectory(Directory, LoadErrorMessage)) return DB; + if (!HasErrorMessage) { + ErrorStream << "No compilation database found in " << Directory.str() + << " or any parent directory\n" << LoadErrorMessage; + HasErrorMessage = true; + } + Directory = llvm::sys::path::parent_path(Directory); } + ErrorMessage = ErrorStream.str(); return NULL; } @@ -151,11 +75,12 @@ CompilationDatabase::autoDetectFromSource(StringRef SourceFile, llvm::SmallString<1024> AbsolutePath(getAbsolutePath(SourceFile)); StringRef Directory = llvm::sys::path::parent_path(AbsolutePath); - CompilationDatabase *DB = findCompilationDatabaseFromDirectory(Directory); + CompilationDatabase *DB = findCompilationDatabaseFromDirectory(Directory, + ErrorMessage); if (!DB) ErrorMessage = ("Could not auto-detect compilation database for file \"" + - SourceFile + "\"").str(); + SourceFile + "\"\n" + ErrorMessage).str(); return DB; } @@ -164,14 +89,17 @@ CompilationDatabase::autoDetectFromDirectory(StringRef SourceDir, std::string &ErrorMessage) { llvm::SmallString<1024> AbsolutePath(getAbsolutePath(SourceDir)); - CompilationDatabase *DB = findCompilationDatabaseFromDirectory(AbsolutePath); + CompilationDatabase *DB = findCompilationDatabaseFromDirectory(AbsolutePath, + ErrorMessage); if (!DB) ErrorMessage = ("Could not auto-detect compilation database from directory \"" + - SourceDir + "\"").str(); + SourceDir + "\"\n" + ErrorMessage).str(); return DB; } +CompilationDatabasePlugin::~CompilationDatabasePlugin() {} + FixedCompilationDatabase * FixedCompilationDatabase::loadFromCommandLine(int &Argc, const char **Argv, @@ -204,153 +132,10 @@ FixedCompilationDatabase::getAllFiles() const { return std::vector(); } -JSONCompilationDatabase * -JSONCompilationDatabase::loadFromFile(StringRef FilePath, - std::string &ErrorMessage) { - llvm::OwningPtr DatabaseBuffer; - llvm::error_code Result = - llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer); - if (Result != 0) { - ErrorMessage = "Error while opening JSON database: " + Result.message(); - return NULL; - } - llvm::OwningPtr Database( - new JSONCompilationDatabase(DatabaseBuffer.take())); - if (!Database->parse(ErrorMessage)) - return NULL; - return Database.take(); -} - -JSONCompilationDatabase * -JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString, - std::string &ErrorMessage) { - llvm::OwningPtr DatabaseBuffer( - llvm::MemoryBuffer::getMemBuffer(DatabaseString)); - llvm::OwningPtr Database( - new JSONCompilationDatabase(DatabaseBuffer.take())); - if (!Database->parse(ErrorMessage)) - return NULL; - return Database.take(); -} - -std::vector -JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const { - llvm::SmallString<128> NativeFilePath; - llvm::sys::path::native(FilePath, NativeFilePath); - llvm::StringMap< std::vector >::const_iterator - CommandsRefI = IndexByFile.find(NativeFilePath); - if (CommandsRefI == IndexByFile.end()) - return std::vector(); - const std::vector &CommandsRef = CommandsRefI->getValue(); - std::vector Commands; - for (int I = 0, E = CommandsRef.size(); I != E; ++I) { - llvm::SmallString<8> DirectoryStorage; - llvm::SmallString<1024> CommandStorage; - Commands.push_back(CompileCommand( - // FIXME: Escape correctly: - CommandsRef[I].first->getValue(DirectoryStorage), - unescapeCommandLine(CommandsRef[I].second->getValue(CommandStorage)))); - } - return Commands; -} - -std::vector -JSONCompilationDatabase::getAllFiles() const { - std::vector Result; - - llvm::StringMap< std::vector >::const_iterator - CommandsRefI = IndexByFile.begin(); - const llvm::StringMap< std::vector >::const_iterator - CommandsRefEnd = IndexByFile.end(); - for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) { - Result.push_back(CommandsRefI->first().str()); - } - - return Result; -} - -bool JSONCompilationDatabase::parse(std::string &ErrorMessage) { - llvm::yaml::document_iterator I = YAMLStream.begin(); - if (I == YAMLStream.end()) { - ErrorMessage = "Error while parsing YAML."; - return false; - } - llvm::yaml::Node *Root = I->getRoot(); - if (Root == NULL) { - ErrorMessage = "Error while parsing YAML."; - return false; - } - llvm::yaml::SequenceNode *Array = - llvm::dyn_cast(Root); - if (Array == NULL) { - ErrorMessage = "Expected array."; - return false; - } - for (llvm::yaml::SequenceNode::iterator AI = Array->begin(), - AE = Array->end(); - AI != AE; ++AI) { - llvm::yaml::MappingNode *Object = - llvm::dyn_cast(&*AI); - if (Object == NULL) { - ErrorMessage = "Expected object."; - return false; - } - llvm::yaml::ScalarNode *Directory = NULL; - llvm::yaml::ScalarNode *Command = NULL; - llvm::yaml::ScalarNode *File = NULL; - for (llvm::yaml::MappingNode::iterator KVI = Object->begin(), - KVE = Object->end(); - KVI != KVE; ++KVI) { - llvm::yaml::Node *Value = (*KVI).getValue(); - if (Value == NULL) { - ErrorMessage = "Expected value."; - return false; - } - llvm::yaml::ScalarNode *ValueString = - llvm::dyn_cast(Value); - if (ValueString == NULL) { - ErrorMessage = "Expected string as value."; - return false; - } - llvm::yaml::ScalarNode *KeyString = - llvm::dyn_cast((*KVI).getKey()); - if (KeyString == NULL) { - ErrorMessage = "Expected strings as key."; - return false; - } - llvm::SmallString<8> KeyStorage; - if (KeyString->getValue(KeyStorage) == "directory") { - Directory = ValueString; - } else if (KeyString->getValue(KeyStorage) == "command") { - Command = ValueString; - } else if (KeyString->getValue(KeyStorage) == "file") { - File = ValueString; - } else { - ErrorMessage = ("Unknown key: \"" + - KeyString->getRawValue() + "\"").str(); - return false; - } - } - if (!File) { - ErrorMessage = "Missing key: \"file\"."; - return false; - } - if (!Command) { - ErrorMessage = "Missing key: \"command\"."; - return false; - } - if (!Directory) { - ErrorMessage = "Missing key: \"directory\"."; - return false; - } - llvm::SmallString<8> FileStorage; - llvm::SmallString<128> NativeFilePath; - llvm::sys::path::native(File->getValue(FileStorage), NativeFilePath); - IndexByFile[NativeFilePath].push_back( - CompileCommandRef(Directory, Command)); - } - return true; -} +// This anchor is used to force the linker to link in the generated object file +// and thus register the JSONCompilationDatabasePlugin. +extern volatile int JSONAnchorSource; +static int JSONAnchorDest = JSONAnchorSource; } // end namespace tooling } // end namespace clang diff --git a/lib/Tooling/CustomCompilationDatabase.h b/lib/Tooling/CustomCompilationDatabase.h deleted file mode 100644 index b375f8d..0000000 --- a/lib/Tooling/CustomCompilationDatabase.h +++ /dev/null @@ -1,42 +0,0 @@ -//===--- CustomCompilationDatabase.h --------------------------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file contains a hook to supply a custom \c CompilationDatabase -// implementation. -// -// The mechanism can be used by IDEs or non-public code bases to integrate with -// their build system. Currently we support statically linking in an -// implementation of \c findCompilationDatabaseForDirectory and enabling it -// with -DUSE_CUSTOM_COMPILATION_DATABASE when compiling the Tooling library. -// -// FIXME: The strategy forward is to provide a plugin system that can load -// custom compilation databases and make enabling that a build option. -// -//===----------------------------------------------------------------------===// -#ifndef LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H -#define LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H - -#include "llvm/ADT/StringRef.h" - -namespace clang { -namespace tooling { -class CompilationDatabase; - -/// \brief Returns a CompilationDatabase for the given \c Directory. -/// -/// \c Directory can be any directory within a project. This methods will -/// then try to find compilation database files in \c Directory or any of its -/// parents. If a compilation database cannot be found or loaded, returns NULL. -clang::tooling::CompilationDatabase *findCompilationDatabaseForDirectory( - llvm::StringRef Directory); - -} // namespace tooling -} // namespace clang - -#endif // LLVM_CLANG_TOOLING_CUSTOM_COMPILATION_DATABASE_H diff --git a/lib/Tooling/FileMatchTrie.cpp b/lib/Tooling/FileMatchTrie.cpp new file mode 100644 index 0000000..8f25a8c --- /dev/null +++ b/lib/Tooling/FileMatchTrie.cpp @@ -0,0 +1,188 @@ +//===--- FileMatchTrie.cpp - ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation of a FileMatchTrie. +// +//===----------------------------------------------------------------------===// + +#include +#include "clang/Tooling/FileMatchTrie.h" +#include "llvm/ADT/StringMap.h" +#include "llvm/Support/FileSystem.h" +#include "llvm/Support/PathV2.h" +#include "llvm/Support/raw_ostream.h" + +namespace clang { +namespace tooling { + +/// \brief Default \c PathComparator using \c llvm::sys::fs::equivalent(). +struct DefaultPathComparator : public PathComparator { + virtual ~DefaultPathComparator() {} + virtual bool equivalent(StringRef FileA, StringRef FileB) const { + return FileA == FileB || llvm::sys::fs::equivalent(FileA, FileB); + } +}; + +/// \brief A node of the \c FileMatchTrie. +/// +/// Each node has storage for up to one path and a map mapping a path segment to +/// child nodes. The trie starts with an empty root node. +class FileMatchTrieNode { +public: + /// \brief Inserts 'NewPath' into this trie. \c ConsumedLength denotes + /// the number of \c NewPath's trailing characters already consumed during + /// recursion. + /// + /// An insert of a path + /// 'p'starts at the root node and does the following: + /// - If the node is empty, insert 'p' into its storage and abort. + /// - If the node has a path 'p2' but no children, take the last path segment + /// 's' of 'p2', put a new child into the map at 's' an insert the rest of + /// 'p2' there. + /// - Insert a new child for the last segment of 'p' and insert the rest of + /// 'p' there. + /// + /// An insert operation is linear in the number of a path's segments. + void insert(StringRef NewPath, unsigned ConsumedLength = 0) { + // We cannot put relative paths into the FileMatchTrie as then a path can be + // a postfix of another path, violating a core assumption of the trie. + if (llvm::sys::path::is_relative(NewPath)) + return; + if (Path.empty()) { + // This is an empty leaf. Store NewPath and return. + Path = NewPath; + return; + } + if (Children.empty()) { + // This is a leaf, ignore duplicate entry if 'Path' equals 'NewPath'. + if (NewPath == Path) + return; + // Make this a node and create a child-leaf with 'Path'. + StringRef Element(llvm::sys::path::filename( + StringRef(Path).drop_back(ConsumedLength))); + Children[Element].Path = Path; + } + StringRef Element(llvm::sys::path::filename( + StringRef(NewPath).drop_back(ConsumedLength))); + Children[Element].insert(NewPath, ConsumedLength + Element.size() + 1); + } + + /// \brief Tries to find the node under this \c FileMatchTrieNode that best + /// matches 'FileName'. + /// + /// If multiple paths fit 'FileName' equally well, \c IsAmbiguous is set to + /// \c true and an empty string is returned. If no path fits 'FileName', an + /// empty string is returned. \c ConsumedLength denotes the number of + /// \c Filename's trailing characters already consumed during recursion. + /// + /// To find the best matching node for a given path 'p', the + /// \c findEquivalent() function is called recursively for each path segment + /// (back to fron) of 'p' until a node 'n' is reached that does not .. + /// - .. have children. In this case it is checked + /// whether the stored path is equivalent to 'p'. If yes, the best match is + /// found. Otherwise continue with the parent node as if this node did not + /// exist. + /// - .. a child matching the next path segment. In this case, all children of + /// 'n' are an equally good match for 'p'. All children are of 'n' are found + /// recursively and their equivalence to 'p' is determined. If none are + /// equivalent, continue with the parent node as if 'n' didn't exist. If one + /// is equivalent, the best match is found. Otherwise, report and ambigiuity + /// error. + StringRef findEquivalent(const PathComparator& Comparator, + StringRef FileName, + bool &IsAmbiguous, + unsigned ConsumedLength = 0) const { + if (Children.empty()) { + if (Comparator.equivalent(StringRef(Path), FileName)) + return StringRef(Path); + return StringRef(); + } + StringRef Element(llvm::sys::path::filename(FileName.drop_back( + ConsumedLength))); + llvm::StringMap::const_iterator MatchingChild = + Children.find(Element); + if (MatchingChild != Children.end()) { + StringRef Result = MatchingChild->getValue().findEquivalent( + Comparator, FileName, IsAmbiguous, + ConsumedLength + Element.size() + 1); + if (!Result.empty() || IsAmbiguous) + return Result; + } + std::vector AllChildren; + getAll(AllChildren, MatchingChild); + StringRef Result; + for (unsigned i = 0; i < AllChildren.size(); i++) { + if (Comparator.equivalent(AllChildren[i], FileName)) { + if (Result.empty()) { + Result = AllChildren[i]; + } else { + IsAmbiguous = true; + return StringRef(); + } + } + } + return Result; + } + +private: + /// \brief Gets all paths under this FileMatchTrieNode. + void getAll(std::vector &Results, + llvm::StringMap::const_iterator Except) const { + if (Path.empty()) + return; + if (Children.empty()) { + Results.push_back(StringRef(Path)); + return; + } + for (llvm::StringMap::const_iterator + It = Children.begin(), E = Children.end(); + It != E; ++It) { + if (It == Except) + continue; + It->getValue().getAll(Results, Children.end()); + } + } + + // The stored absolute path in this node. Only valid for leaf nodes, i.e. + // nodes where Children.empty(). + std::string Path; + + // The children of this node stored in a map based on the next path segment. + llvm::StringMap Children; +}; + +FileMatchTrie::FileMatchTrie() + : Root(new FileMatchTrieNode), Comparator(new DefaultPathComparator()) {} + +FileMatchTrie::FileMatchTrie(PathComparator *Comparator) + : Root(new FileMatchTrieNode), Comparator(Comparator) {} + +FileMatchTrie::~FileMatchTrie() { + delete Root; +} + +void FileMatchTrie::insert(StringRef NewPath) { + Root->insert(NewPath); +} + +StringRef FileMatchTrie::findEquivalent(StringRef FileName, + llvm::raw_ostream &Error) const { + if (llvm::sys::path::is_relative(FileName)) { + Error << "Cannot resolve relative paths"; + return StringRef(); + } + bool IsAmbiguous = false; + StringRef Result = Root->findEquivalent(*Comparator, FileName, IsAmbiguous); + if (IsAmbiguous) + Error << "Path is ambiguous"; + return Result; +} + +} // end namespace tooling +} // end namespace clang diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp new file mode 100644 index 0000000..cf35a25 --- /dev/null +++ b/lib/Tooling/JSONCompilationDatabase.cpp @@ -0,0 +1,303 @@ +//===--- JSONCompilationDatabase.cpp - ------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation of the JSONCompilationDatabase. +// +//===----------------------------------------------------------------------===// + +#include "clang/Tooling/JSONCompilationDatabase.h" + +#include "clang/Tooling/CompilationDatabase.h" +#include "clang/Tooling/CompilationDatabasePluginRegistry.h" +#include "clang/Tooling/Tooling.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/Path.h" +#include "llvm/Support/system_error.h" + +namespace clang { +namespace tooling { + +namespace { + +/// \brief A parser for escaped strings of command line arguments. +/// +/// Assumes \-escaping for quoted arguments (see the documentation of +/// unescapeCommandLine(...)). +class CommandLineArgumentParser { + public: + CommandLineArgumentParser(StringRef CommandLine) + : Input(CommandLine), Position(Input.begin()-1) {} + + std::vector parse() { + bool HasMoreInput = true; + while (HasMoreInput && nextNonWhitespace()) { + std::string Argument; + HasMoreInput = parseStringInto(Argument); + CommandLine.push_back(Argument); + } + return CommandLine; + } + + private: + // All private methods return true if there is more input available. + + bool parseStringInto(std::string &String) { + do { + if (*Position == '"') { + if (!parseQuotedStringInto(String)) return false; + } else { + if (!parseFreeStringInto(String)) return false; + } + } while (*Position != ' '); + return true; + } + + bool parseQuotedStringInto(std::string &String) { + if (!next()) return false; + while (*Position != '"') { + if (!skipEscapeCharacter()) return false; + String.push_back(*Position); + if (!next()) return false; + } + return next(); + } + + bool parseFreeStringInto(std::string &String) { + do { + if (!skipEscapeCharacter()) return false; + String.push_back(*Position); + if (!next()) return false; + } while (*Position != ' ' && *Position != '"'); + return true; + } + + bool skipEscapeCharacter() { + if (*Position == '\\') { + return next(); + } + return true; + } + + bool nextNonWhitespace() { + do { + if (!next()) return false; + } while (*Position == ' '); + return true; + } + + bool next() { + ++Position; + return Position != Input.end(); + } + + const StringRef Input; + StringRef::iterator Position; + std::vector CommandLine; +}; + +std::vector unescapeCommandLine( + StringRef EscapedCommandLine) { + CommandLineArgumentParser parser(EscapedCommandLine); + return parser.parse(); +} + +} // end namespace + +class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin { + virtual CompilationDatabase *loadFromDirectory( + StringRef Directory, std::string &ErrorMessage) { + llvm::SmallString<1024> JSONDatabasePath(Directory); + llvm::sys::path::append(JSONDatabasePath, "compile_commands.json"); + llvm::OwningPtr Database( + JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage)); + if (!Database) + return NULL; + return Database.take(); + } +}; + +// Register the JSONCompilationDatabasePlugin with the +// CompilationDatabasePluginRegistry using this statically initialized variable. +static CompilationDatabasePluginRegistry::Add +X("json-compilation-database", "Reads JSON formatted compilation databases"); + +// This anchor is used to force the linker to link in the generated object file +// and thus register the JSONCompilationDatabasePlugin. +volatile int JSONAnchorSource = 0; + +JSONCompilationDatabase * +JSONCompilationDatabase::loadFromFile(StringRef FilePath, + std::string &ErrorMessage) { + llvm::OwningPtr DatabaseBuffer; + llvm::error_code Result = + llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer); + if (Result != 0) { + ErrorMessage = "Error while opening JSON database: " + Result.message(); + return NULL; + } + llvm::OwningPtr Database( + new JSONCompilationDatabase(DatabaseBuffer.take())); + if (!Database->parse(ErrorMessage)) + return NULL; + return Database.take(); +} + +JSONCompilationDatabase * +JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString, + std::string &ErrorMessage) { + llvm::OwningPtr DatabaseBuffer( + llvm::MemoryBuffer::getMemBuffer(DatabaseString)); + llvm::OwningPtr Database( + new JSONCompilationDatabase(DatabaseBuffer.take())); + if (!Database->parse(ErrorMessage)) + return NULL; + return Database.take(); +} + +std::vector +JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const { + llvm::SmallString<128> NativeFilePath; + llvm::sys::path::native(FilePath, NativeFilePath); + std::vector PossibleMatches; + std::string Error; + llvm::raw_string_ostream ES(Error); + StringRef Match = MatchTrie.findEquivalent(NativeFilePath.str(), ES); + if (Match.empty()) { + if (Error.empty()) + Error = "No match found."; + llvm::outs() << Error << "\n"; + return std::vector(); + } + llvm::StringMap< std::vector >::const_iterator + CommandsRefI = IndexByFile.find(Match); + if (CommandsRefI == IndexByFile.end()) + return std::vector(); + const std::vector &CommandsRef = CommandsRefI->getValue(); + std::vector Commands; + for (int I = 0, E = CommandsRef.size(); I != E; ++I) { + llvm::SmallString<8> DirectoryStorage; + llvm::SmallString<1024> CommandStorage; + Commands.push_back(CompileCommand( + // FIXME: Escape correctly: + CommandsRef[I].first->getValue(DirectoryStorage), + unescapeCommandLine(CommandsRef[I].second->getValue(CommandStorage)))); + } + return Commands; +} + +std::vector +JSONCompilationDatabase::getAllFiles() const { + std::vector Result; + + llvm::StringMap< std::vector >::const_iterator + CommandsRefI = IndexByFile.begin(); + const llvm::StringMap< std::vector >::const_iterator + CommandsRefEnd = IndexByFile.end(); + for (; CommandsRefI != CommandsRefEnd; ++CommandsRefI) { + Result.push_back(CommandsRefI->first().str()); + } + + return Result; +} + +bool JSONCompilationDatabase::parse(std::string &ErrorMessage) { + llvm::yaml::document_iterator I = YAMLStream.begin(); + if (I == YAMLStream.end()) { + ErrorMessage = "Error while parsing YAML."; + return false; + } + llvm::yaml::Node *Root = I->getRoot(); + if (Root == NULL) { + ErrorMessage = "Error while parsing YAML."; + return false; + } + llvm::yaml::SequenceNode *Array = + llvm::dyn_cast(Root); + if (Array == NULL) { + ErrorMessage = "Expected array."; + return false; + } + for (llvm::yaml::SequenceNode::iterator AI = Array->begin(), + AE = Array->end(); + AI != AE; ++AI) { + llvm::yaml::MappingNode *Object = + llvm::dyn_cast(&*AI); + if (Object == NULL) { + ErrorMessage = "Expected object."; + return false; + } + llvm::yaml::ScalarNode *Directory = NULL; + llvm::yaml::ScalarNode *Command = NULL; + llvm::yaml::ScalarNode *File = NULL; + for (llvm::yaml::MappingNode::iterator KVI = Object->begin(), + KVE = Object->end(); + KVI != KVE; ++KVI) { + llvm::yaml::Node *Value = (*KVI).getValue(); + if (Value == NULL) { + ErrorMessage = "Expected value."; + return false; + } + llvm::yaml::ScalarNode *ValueString = + llvm::dyn_cast(Value); + if (ValueString == NULL) { + ErrorMessage = "Expected string as value."; + return false; + } + llvm::yaml::ScalarNode *KeyString = + llvm::dyn_cast((*KVI).getKey()); + if (KeyString == NULL) { + ErrorMessage = "Expected strings as key."; + return false; + } + llvm::SmallString<8> KeyStorage; + if (KeyString->getValue(KeyStorage) == "directory") { + Directory = ValueString; + } else if (KeyString->getValue(KeyStorage) == "command") { + Command = ValueString; + } else if (KeyString->getValue(KeyStorage) == "file") { + File = ValueString; + } else { + ErrorMessage = ("Unknown key: \"" + + KeyString->getRawValue() + "\"").str(); + return false; + } + } + if (!File) { + ErrorMessage = "Missing key: \"file\"."; + return false; + } + if (!Command) { + ErrorMessage = "Missing key: \"command\"."; + return false; + } + if (!Directory) { + ErrorMessage = "Missing key: \"directory\"."; + return false; + } + llvm::SmallString<8> FileStorage; + StringRef FileName = File->getValue(FileStorage); + llvm::SmallString<128> NativeFilePath; + if (llvm::sys::path::is_relative(FileName)) { + llvm::SmallString<8> DirectoryStorage; + llvm::SmallString<128> AbsolutePath( + Directory->getValue(DirectoryStorage)); + llvm::sys::path::append(AbsolutePath, FileName); + llvm::sys::path::native(AbsolutePath.str(), NativeFilePath); + } else { + llvm::sys::path::native(FileName, NativeFilePath); + } + IndexByFile[NativeFilePath].push_back( + CompileCommandRef(Directory, Command)); + MatchTrie.insert(NativeFilePath.str()); + } + return true; +} + +} // end namespace tooling +} // end namespace clang diff --git a/lib/Tooling/Refactoring.cpp b/lib/Tooling/Refactoring.cpp index 6284353..c5002ef 100644 --- a/lib/Tooling/Refactoring.cpp +++ b/lib/Tooling/Refactoring.cpp @@ -11,12 +11,12 @@ // //===----------------------------------------------------------------------===// +#include "clang/Basic/DiagnosticOptions.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" -#include "clang/Frontend/DiagnosticOptions.h" #include "clang/Frontend/TextDiagnosticPrinter.h" #include "clang/Lex/Lexer.h" -#include "clang/Rewrite/Rewriter.h" +#include "clang/Rewrite/Core/Rewriter.h" #include "clang/Tooling/Refactoring.h" #include "llvm/Support/raw_os_ostream.h" @@ -164,12 +164,11 @@ Replacements &RefactoringTool::getReplacements() { return Replace; } int RefactoringTool::run(FrontendActionFactory *ActionFactory) { int Result = Tool.run(ActionFactory); LangOptions DefaultLangOptions; - DiagnosticOptions DefaultDiagnosticOptions; - TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(), - DefaultDiagnosticOptions); + IntrusiveRefCntPtr DiagOpts = new DiagnosticOptions(); + TextDiagnosticPrinter DiagnosticPrinter(llvm::errs(), &*DiagOpts); DiagnosticsEngine Diagnostics( llvm::IntrusiveRefCntPtr(new DiagnosticIDs()), - &DiagnosticPrinter, false); + &*DiagOpts, &DiagnosticPrinter, false); SourceManager Sources(Diagnostics, Tool.getFiles()); Rewriter Rewrite(Sources, DefaultLangOptions); if (!applyAllReplacements(Replace, Rewrite)) { diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp index e93e0c9..af20254 100644 --- a/lib/Tooling/Tooling.cpp +++ b/lib/Tooling/Tooling.cpp @@ -97,17 +97,22 @@ static clang::CompilerInvocation *newInvocation( bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code, const Twine &FileName) { + return runToolOnCodeWithArgs( + ToolAction, Code, std::vector(), FileName); +} + +bool runToolOnCodeWithArgs(clang::FrontendAction *ToolAction, const Twine &Code, + const std::vector &Args, + const Twine &FileName) { SmallString<16> FileNameStorage; StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage); - const char *const CommandLine[] = { - "clang-tool", "-fsyntax-only", FileNameRef.data() - }; + std::vector Commands; + Commands.push_back("clang-tool"); + Commands.push_back("-fsyntax-only"); + Commands.insert(Commands.end(), Args.begin(), Args.end()); + Commands.push_back(FileNameRef.data()); FileManager Files((FileSystemOptions())); - ToolInvocation Invocation( - std::vector( - CommandLine, - CommandLine + llvm::array_lengthof(CommandLine)), - ToolAction, &Files); + ToolInvocation Invocation(Commands, ToolAction, &Files); SmallString<1024> CodeStorage; Invocation.mapVirtualFile(FileNameRef, @@ -154,11 +159,12 @@ bool ToolInvocation::run() { for (int I = 0, E = CommandLine.size(); I != E; ++I) Argv.push_back(CommandLine[I].c_str()); const char *const BinaryName = Argv[0]; - DiagnosticOptions DefaultDiagnosticOptions; + IntrusiveRefCntPtr DiagOpts = new DiagnosticOptions(); TextDiagnosticPrinter DiagnosticPrinter( - llvm::errs(), DefaultDiagnosticOptions); - DiagnosticsEngine Diagnostics(llvm::IntrusiveRefCntPtr( - new DiagnosticIDs()), &DiagnosticPrinter, false); + llvm::errs(), &*DiagOpts); + DiagnosticsEngine Diagnostics( + llvm::IntrusiveRefCntPtr(new DiagnosticIDs()), + &*DiagOpts, &DiagnosticPrinter, false); const llvm::OwningPtr Driver( newDriver(&Diagnostics, BinaryName)); diff --git a/runtime/compiler-rt/Makefile b/runtime/compiler-rt/Makefile index f133591..68b2941 100644 --- a/runtime/compiler-rt/Makefile +++ b/runtime/compiler-rt/Makefile @@ -74,8 +74,9 @@ RuntimeDirs := ifeq ($(OS),Darwin) RuntimeDirs += darwin RuntimeLibrary.darwin.Configs := \ - eprintf 10.4 osx cc_kext \ - asan_osx profile_osx + eprintf.a 10.4.a osx.a ios.a cc_kext.a cc_kext_ios5.a \ + asan_osx.a asan_osx_dynamic.dylib \ + profile_osx.a profile_ios.a endif # On Linux, include a library which has all the runtime functions. @@ -83,14 +84,37 @@ ifeq ($(OS),Linux) RuntimeDirs += linux RuntimeLibrary.linux.Configs := +# TryCompile compiler source flags +# Returns exit code of running a compiler invocation. +TryCompile = \ + $(shell \ + cflags=""; \ + for flag in $(3); do \ + cflags="$$cflags $$flag"; \ + done; \ + $(1) $$cflags $(2) -o /dev/null > /dev/null 2> /dev/null ; \ + echo $$?) + # We currently only try to generate runtime libraries on x86. ifeq ($(ARCH),x86) RuntimeLibrary.linux.Configs += \ - full-i386 profile-i386 asan-i386 + full-i386.a profile-i386.a asan-i386.a endif + ifeq ($(ARCH),x86_64) RuntimeLibrary.linux.Configs += \ - full-x86_64 profile-x86_64 asan-x86_64 tsan-x86_64 + full-x86_64.a profile-x86_64.a asan-x86_64.a tsan-x86_64.a +# We need to build 32-bit ASan library on 64-bit platform, and add it to the +# list of runtime libraries to make "clang -faddress-sanitizer -m32" work. +# We check that Clang can produce working 32-bit binaries by compiling a simple +# executable. +test_source = $(LLVM_SRC_ROOT)/tools/clang/runtime/compiler-rt/clang_linux_test_input.c +ifeq ($(call TryCompile,$(ToolDir)/clang,$(test_source),-m32),0) +RuntimeLibrary.linux.Configs += asan-i386.a +endif +ifneq ($(LLVM_ANDROID_TOOLCHAIN_DIR),) +RuntimeLibrary.linux.Configs += asan-arm-android.so +endif endif endif @@ -109,6 +133,7 @@ BuildRuntimeLibraries: ProjSrcRoot=$(COMPILERRT_SRC_ROOT) \ ProjObjRoot=$(PROJ_OBJ_DIR) \ CC="$(ToolDir)/clang" \ + LLVM_ANDROID_TOOLCHAIN_DIR="$(LLVM_ANDROID_TOOLCHAIN_DIR)" \ $(RuntimeDirs:%=clang_%) .PHONY: BuildRuntimeLibraries CleanRuntimeLibraries: @@ -126,6 +151,10 @@ $(PROJ_resources_lib): define RuntimeLibraryTemplate $(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.a: BuildRuntimeLibraries @true +$(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.so: BuildRuntimeLibraries + @true +$(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.dylib: BuildRuntimeLibraries + @true .PRECIOUS: $(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.a # Rule to copy the libraries to their resource directory location. @@ -134,8 +163,20 @@ $(ResourceLibDir)/$1/libclang_rt.%.a: \ $(ResourceLibDir)/$1/.dir $(Echo) Copying runtime library $1/$$* to build dir $(Verb) cp $(PROJ_OBJ_DIR)/clang_$1/$$*/libcompiler_rt.a $$@ +$(ResourceLibDir)/$1/libclang_rt.%.so: \ + $(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.so \ + $(ResourceLibDir)/$1/.dir + $(Echo) Copying runtime library $1/$$* to build dir + $(Verb) cp $(PROJ_OBJ_DIR)/clang_$1/$$*/libcompiler_rt.so $$@ +$(ResourceLibDir)/$1/libclang_rt.%.dylib: \ + $(PROJ_OBJ_DIR)/clang_$1/%/libcompiler_rt.dylib \ + $(ResourceLibDir)/$1/.dir + $(Echo) Copying runtime library $1/$$* to build dir + $(Verb) cp $(PROJ_OBJ_DIR)/clang_$1/$$*/libcompiler_rt.dylib $$@ + $(Echo) Fixing LC_ID_DYLIB of $$@ + $(Verb) install_name_tool $$@ -id $$@ RuntimeLibrary.$1: \ - $(RuntimeLibrary.$1.Configs:%=$(ResourceLibDir)/$1/libclang_rt.%.a) + $(RuntimeLibrary.$1.Configs:%=$(ResourceLibDir)/$1/libclang_rt.%) .PHONY: RuntimeLibrary.$1 $(PROJ_resources_lib)/$1: $(PROJ_resources_lib) @@ -145,10 +186,18 @@ $(PROJ_resources_lib)/$1/libclang_rt.%.a: \ $(ResourceLibDir)/$1/libclang_rt.%.a | $(PROJ_resources_lib)/$1 $(Echo) Installing compiler runtime library: $1/$$* $(Verb) $(DataInstall) $$< $(PROJ_resources_lib)/$1 +$(PROJ_resources_lib)/$1/libclang_rt.%.so: \ + $(ResourceLibDir)/$1/libclang_rt.%.so | $(PROJ_resources_lib)/$1 + $(Echo) Installing compiler runtime library: $1/$$* + $(Verb) $(DataInstall) $$< $(PROJ_resources_lib)/$1 +$(PROJ_resources_lib)/$1/libclang_rt.%.dylib: \ + $(ResourceLibDir)/$1/libclang_rt.%.dylib | $(PROJ_resources_lib)/$1 + $(Echo) Installing compiler runtime library: $1/$$* + $(Verb) $(DataInstall) $$< $(PROJ_resources_lib)/$1 # Rule to install runtime libraries. RuntimeLibraryInstall.$1: \ - $(RuntimeLibrary.$1.Configs:%=$(PROJ_resources_lib)/$1/libclang_rt.%.a) + $(RuntimeLibrary.$1.Configs:%=$(PROJ_resources_lib)/$1/libclang_rt.%) .PHONY: RuntimeLibraryInstall.$1 endef $(foreach lib,$(RuntimeDirs), $(eval $(call RuntimeLibraryTemplate,$(lib)))) diff --git a/runtime/compiler-rt/clang_linux_test_input.c b/runtime/compiler-rt/clang_linux_test_input.c new file mode 100644 index 0000000..e65ce98 --- /dev/null +++ b/runtime/compiler-rt/clang_linux_test_input.c @@ -0,0 +1,4 @@ +// This file is used to check if we can produce working executables +// for i386 and x86_64 archs on Linux. +#include +int main(){} diff --git a/test/ARCMT/cxx-checking.mm b/test/ARCMT/cxx-checking.mm index 9f9e3d8..2f5d5d5 100644 --- a/test/ARCMT/cxx-checking.mm +++ b/test/ARCMT/cxx-checking.mm @@ -1,16 +1,16 @@ -// RUN: %clang_cc1 -arcmt-check -verify -triple x86_64-apple-darwin10 -fsyntax-only -fblocks -Warc-abi %s +// RUN: %clang_cc1 -arcmt-check -verify -triple x86_64-apple-darwin10 -fsyntax-only -fblocks %s // DISABLE: mingw32 // Classes that have an Objective-C object pointer. -struct HasObjectMember0 { // expected-warning{{'HasObjectMember0' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} +struct HasObjectMember0 { id x; }; -struct HasObjectMember1 { // expected-warning{{'HasObjectMember1' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} +struct HasObjectMember1 { id x[3]; }; -struct HasObjectMember2 { // expected-warning{{'HasObjectMember2' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} +struct HasObjectMember2 { id x[3][2]; }; @@ -27,11 +27,11 @@ struct HasObjectMember3 { __unsafe_unretained id x[3][2]; }; -struct HasBlockPointerMember0 { // expected-warning{{'HasBlockPointerMember0' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} +struct HasBlockPointerMember0 { int (^bp)(int); }; -struct HasBlockPointerMember1 { // expected-warning{{'HasBlockPointerMember1' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} +struct HasBlockPointerMember1 { int (^bp[2][3])(int); }; @@ -39,20 +39,17 @@ struct NonPOD { NonPOD(const NonPOD&); }; -struct HasObjectMemberAndNonPOD0 { // expected-warning{{'HasObjectMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ - // expected-warning{{'HasObjectMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} +struct HasObjectMemberAndNonPOD0 { id x; NonPOD np; }; -struct HasObjectMemberAndNonPOD1 { // expected-warning{{'HasObjectMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ - // expected-warning{{'HasObjectMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} +struct HasObjectMemberAndNonPOD1 { NonPOD np; id x[3]; }; -struct HasObjectMemberAndNonPOD2 { // expected-warning{{'HasObjectMemberAndNonPOD2' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ - // expected-warning{{'HasObjectMemberAndNonPOD2' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} +struct HasObjectMemberAndNonPOD2 { NonPOD np; id x[3][2]; }; @@ -64,14 +61,12 @@ struct HasObjectMemberAndNonPOD3 { id x[3][2]; }; -struct HasBlockPointerMemberAndNonPOD0 { // expected-warning{{'HasBlockPointerMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ -// expected-warning{{'HasBlockPointerMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} +struct HasBlockPointerMemberAndNonPOD0 { NonPOD np; int (^bp)(int); }; -struct HasBlockPointerMemberAndNonPOD1 { // expected-warning{{'HasBlockPointerMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ -// expected-warning{{'HasBlockPointerMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} +struct HasBlockPointerMemberAndNonPOD1 { NonPOD np; int (^bp[2][3])(int); }; diff --git a/test/ARCMT/verify.m b/test/ARCMT/verify.m index 9110fe6..bfb3b4b 100644 --- a/test/ARCMT/verify.m +++ b/test/ARCMT/verify.m @@ -8,6 +8,7 @@ #error should not be ignored // expected-error@-1 {{should not be ignored}} -// CHECK: error: 'error' diagnostics seen but not expected: +// CHECK: error: no expected directives found: consider use of 'expected-no-diagnostics' +// CHECK-NEXT: error: 'error' diagnostics seen but not expected: // CHECK-NEXT: (frontend): error reading '{{.*}}verify.m.tmp.invalid' -// CHECK-NEXT: 1 error generated. +// CHECK-NEXT: 2 errors generated. diff --git a/test/ASTMerge/exprs.c b/test/ASTMerge/exprs.c index 0a4e1e5..c82e683 100644 --- a/test/ASTMerge/exprs.c +++ b/test/ASTMerge/exprs.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-pch -o %t.1.ast %S/Inputs/exprs1.c // RUN: %clang_cc1 -emit-pch -o %t.2.ast %S/Inputs/exprs2.c // RUN: %clang_cc1 -ast-merge %t.1.ast -ast-merge %t.2.ast -fsyntax-only -verify %s +// expected-no-diagnostics diff --git a/test/Analysis/CFContainers-invalid.c b/test/Analysis/CFContainers-invalid.c new file mode 100644 index 0000000..3268e1e --- /dev/null +++ b/test/Analysis/CFContainers-invalid.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=osx.coreFoundation.containers.PointerSizedValues -triple x86_64-apple-darwin -verify %s +// expected-no-diagnostics + +typedef const struct __CFAllocator * CFAllocatorRef; +typedef const struct __CFArray * CFArrayRef; +typedef const struct __CFDictionary * CFDictionaryRef; +typedef const struct __CFSet * CFSetRef; + +extern const CFAllocatorRef kCFAllocatorDefault; + +// Unexpected declarations for these: +CFArrayRef CFArrayCreate(CFAllocatorRef); +CFDictionaryRef CFDictionaryCreate(CFAllocatorRef); +CFSetRef CFSetCreate(CFAllocatorRef); + +void testNoCrash() { + (void)CFArrayCreate(kCFAllocatorDefault); + (void)CFDictionaryCreate(kCFAllocatorDefault); + (void)CFSetCreate(kCFAllocatorDefault); +} diff --git a/test/Analysis/CFContainers.mm b/test/Analysis/CFContainers.mm index 7d6c175..b019423 100644 --- a/test/Analysis/CFContainers.mm +++ b/test/Analysis/CFContainers.mm @@ -94,16 +94,16 @@ CFSetRef CFSetCreate(CFAllocatorRef allocator, const void **values, CFIndex numV #define NULL __null // Done with the headers. -// Test experimental.osx.cocoa.ContainerAPI checker. +// Test alpha.osx.cocoa.ContainerAPI checker. void testContainers(int **xNoWarn, CFIndex count) { int x[] = { 1, 2, 3 }; - CFArrayRef foo = CFArrayCreate(kCFAllocatorDefault, (const void **) x, sizeof(x) / sizeof(x[0]), 0);// expected-warning {{The first argument to 'CFArrayCreate' must be a C array of pointer-sized}} + CFArrayRef foo = CFArrayCreate(kCFAllocatorDefault, (const void **) x, sizeof(x) / sizeof(x[0]), 0);// expected-warning {{The second argument to 'CFArrayCreate' must be a C array of pointer-sized}} CFArrayRef fooNoWarn = CFArrayCreate(kCFAllocatorDefault, (const void **) xNoWarn, sizeof(xNoWarn) / sizeof(xNoWarn[0]), 0); // no warning CFArrayRef fooNoWarn2 = CFArrayCreate(kCFAllocatorDefault, 0, sizeof(xNoWarn) / sizeof(xNoWarn[0]), 0);// no warning, passing in 0 CFArrayRef fooNoWarn3 = CFArrayCreate(kCFAllocatorDefault, NULL, sizeof(xNoWarn) / sizeof(xNoWarn[0]), 0);// no warning, passing in NULL - CFSetRef set = CFSetCreate(NULL, (const void **)x, 3, &kCFTypeSetCallBacks); // expected-warning {{The first argument to 'CFSetCreate' must be a C array of pointer-sized values}} + CFSetRef set = CFSetCreate(NULL, (const void **)x, 3, &kCFTypeSetCallBacks); // expected-warning {{The second argument to 'CFSetCreate' must be a C array of pointer-sized values}} CFArrayRef* pairs = new CFArrayRef[count]; CFSetRef fSet = CFSetCreate(kCFAllocatorDefault, (const void**) pairs, count - 1, &kCFTypeSetCallBacks);// no warning } @@ -125,8 +125,8 @@ void CreateDict(int *elems) { const CFDictionaryKeyCallBacks keyCB = kCFCopyStringDictionaryKeyCallBacks; const CFDictionaryValueCallBacks valCB = kCFTypeDictionaryValueCallBacks; CFDictionaryRef dict1 = CFDictionaryCreate(kCFAllocatorDefault, (const void**)keys, (const void**)values, numValues, &keyCB, &valCB); // no warning - CFDictionaryRef dict2 = CFDictionaryCreate(kCFAllocatorDefault, (const void**)elems[0], (const void**)values, numValues, &keyCB, &valCB); //expected-warning {{The first argument to 'CFDictionaryCreate' must be a C array of}} - CFDictionaryRef dict3 = CFDictionaryCreate(kCFAllocatorDefault, (const void**)keys, (const void**)elems, numValues, &keyCB, &valCB); // expected-warning {{The second argument to 'CFDictionaryCreate' must be a C array of pointer-sized values}} + CFDictionaryRef dict2 = CFDictionaryCreate(kCFAllocatorDefault, (const void**)elems[0], (const void**)values, numValues, &keyCB, &valCB); //expected-warning {{The second argument to 'CFDictionaryCreate' must be a C array of}} expected-warning {{cast to 'const void **' from smaller integer type 'int'}} + CFDictionaryRef dict3 = CFDictionaryCreate(kCFAllocatorDefault, (const void**)keys, (const void**)elems, numValues, &keyCB, &valCB); // expected-warning {{The third argument to 'CFDictionaryCreate' must be a C array of pointer-sized values}} } void OutOfBoundsSymbolicOffByOne(const void ** input, CFIndex S) { @@ -177,11 +177,11 @@ void TestPointerToArray(int *elems, void *p1, void *p2, void *p3, unsigned count CFArrayCreate(0, (const void **) &fn, count, 0); // false negative CFArrayCreate(0, (const void **) fn, count, 0); // no warning - CFArrayCreate(0, (const void **) cp, count, 0); // expected-warning {{The first argument to 'CFArrayCreate' must be a C array of pointer-sized}} + CFArrayCreate(0, (const void **) cp, count, 0); // expected-warning {{The second argument to 'CFArrayCreate' must be a C array of pointer-sized}} char cc[] = { 0, 2, 3 }; - CFArrayCreate(0, (const void **) &cc, count, 0); // expected-warning {{The first argument to 'CFArrayCreate' must be a C array of pointer-sized}} - CFArrayCreate(0, (const void **) cc, count, 0); // expected-warning {{The first argument to 'CFArrayCreate' must be a C array of pointer-sized}} + CFArrayCreate(0, (const void **) &cc, count, 0); // expected-warning {{The second argument to 'CFArrayCreate' must be a C array of pointer-sized}} + CFArrayCreate(0, (const void **) cc, count, 0); // expected-warning {{The second argument to 'CFArrayCreate' must be a C array of pointer-sized}} } void TestUndef(CFArrayRef A, CFIndex sIndex, void* x[]) { diff --git a/test/Analysis/CFDateGC.m b/test/Analysis/CFDateGC.m index 4884bb9..e123062 100644 --- a/test/Analysis/CFDateGC.m +++ b/test/Analysis/CFDateGC.m @@ -1,4 +1,3 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount -analyzer-store=region -analyzer-constraints=basic -verify -fobjc-gc %s -Wno-implicit-function-declaration // RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount -analyzer-store=region -analyzer-constraints=range -verify -fobjc-gc %s -Wno-implicit-function-declaration //===----------------------------------------------------------------------===// diff --git a/test/Analysis/CFNumber.c b/test/Analysis/CFNumber.c index 537e497..ebb3b1a 100644 --- a/test/Analysis/CFNumber.c +++ b/test/Analysis/CFNumber.c @@ -1,4 +1,3 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.coreFoundation.CFNumber,osx.cocoa.RetainCount -analyzer-store=region -analyzer-constraints=basic -verify -triple x86_64-apple-darwin9 %s // RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.coreFoundation.CFNumber,osx.cocoa.RetainCount -analyzer-store=region -analyzer-constraints=range -verify -triple x86_64-apple-darwin9 %s typedef signed long CFIndex; diff --git a/test/Analysis/CFRetainRelease_NSAssertionHandler.m b/test/Analysis/CFRetainRelease_NSAssertionHandler.m index e0c9be1..853bfb2 100644 --- a/test/Analysis/CFRetainRelease_NSAssertionHandler.m +++ b/test/Analysis/CFRetainRelease_NSAssertionHandler.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -verify %s -analyzer-constraints=basic -analyzer-store=region -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -verify %s -analyzer-constraints=range -analyzer-store=region +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -verify %s -analyzer-constraints=range -analyzer-store=region +// expected-no-diagnostics typedef struct objc_selector *SEL; typedef signed char BOOL; diff --git a/test/Analysis/CGColorSpace.c b/test/Analysis/CGColorSpace.c index 1bd20fa..e2da364 100644 --- a/test/Analysis/CGColorSpace.c +++ b/test/Analysis/CGColorSpace.c @@ -1,4 +1,3 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount -analyzer-store=region -analyzer-constraints=basic -verify %s // RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount -analyzer-store=region -analyzer-constraints=range -verify %s typedef struct CGColorSpace *CGColorSpaceRef; diff --git a/test/Analysis/CheckNSError.m b/test/Analysis/CheckNSError.m index cdec1d5..af645da 100644 --- a/test/Analysis/CheckNSError.m +++ b/test/Analysis/CheckNSError.m @@ -1,4 +1,3 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.NSError,osx.coreFoundation.CFError -analyzer-store=region -analyzer-constraints=basic -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.NSError,osx.coreFoundation.CFError -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s diff --git a/test/Analysis/Inputs/system-header-simulator-cxx.h b/test/Analysis/Inputs/system-header-simulator-cxx.h new file mode 100644 index 0000000..e762d0a --- /dev/null +++ b/test/Analysis/Inputs/system-header-simulator-cxx.h @@ -0,0 +1,57 @@ +#pragma clang system_header + +namespace std { + template + struct pair { + T1 first; + T2 second; + + pair() : first(), second() {} + pair(const T1 &a, const T2 &b) : first(a), second(b) {} + + template + pair(const pair &other) : first(other.first), second(other.second) {} + }; + + typedef __typeof__(sizeof(int)) size_t; + + template + class vector { + T *_start; + T *_finish; + T *_end_of_storage; + public: + vector() : _start(0), _finish(0), _end_of_storage(0) {} + ~vector(); + + size_t size() const { + return size_t(_finish - _start); + } + + void push_back(); + T pop_back(); + + T &operator[](size_t n) { + return _start[n]; + } + + const T &operator[](size_t n) const { + return _start[n]; + } + + T *begin() { return _start; } + const T *begin() const { return _start; } + + T *end() { return _finish; } + const T *end() const { return _finish; } + }; + + class exception { + public: + exception() throw(); + virtual ~exception() throw(); + virtual const char *what() const throw() { + return 0; + } + }; +} diff --git a/test/Analysis/Inputs/system-header-simulator-for-simple-stream.h b/test/Analysis/Inputs/system-header-simulator-for-simple-stream.h new file mode 100644 index 0000000..99986f4 --- /dev/null +++ b/test/Analysis/Inputs/system-header-simulator-for-simple-stream.h @@ -0,0 +1,11 @@ + +#pragma clang system_header + +typedef struct __sFILE { + unsigned char *_p; +} FILE; +FILE *fopen(const char * restrict, const char * restrict) __asm("_" "fopen" ); +int fputc(int, FILE *); +int fputs(const char * restrict, FILE * restrict) __asm("_" "fputs" ); +int fclose(FILE *); +void exit(int); diff --git a/test/Analysis/Inputs/system-header-simulator-objc.h b/test/Analysis/Inputs/system-header-simulator-objc.h new file mode 100644 index 0000000..a647b37 --- /dev/null +++ b/test/Analysis/Inputs/system-header-simulator-objc.h @@ -0,0 +1,130 @@ +#pragma clang system_header + +typedef unsigned int UInt32; +typedef unsigned short UInt16; + +typedef signed long CFIndex; +typedef signed char BOOL; +typedef unsigned long NSUInteger; +typedef unsigned short unichar; +typedef UInt16 UniChar; + +enum { + NSASCIIStringEncoding = 1, + NSNEXTSTEPStringEncoding = 2, + NSJapaneseEUCStringEncoding = 3, + NSUTF8StringEncoding = 4, + NSISOLatin1StringEncoding = 5, + NSSymbolStringEncoding = 6, + NSNonLossyASCIIStringEncoding = 7, +}; +typedef const struct __CFString * CFStringRef; +typedef struct __CFString * CFMutableStringRef; +typedef NSUInteger NSStringEncoding; +typedef UInt32 CFStringEncoding; + +typedef const void * CFTypeRef; + +typedef const struct __CFAllocator * CFAllocatorRef; +extern const CFAllocatorRef kCFAllocatorDefault; +extern const CFAllocatorRef kCFAllocatorSystemDefault; +extern const CFAllocatorRef kCFAllocatorMalloc; +extern const CFAllocatorRef kCFAllocatorMallocZone; +extern const CFAllocatorRef kCFAllocatorNull; + +@class NSString, Protocol; +extern void NSLog(NSString *format, ...) __attribute__((format(__NSString__, 1, 2))); +typedef struct _NSZone NSZone; +@class NSInvocation, NSMethodSignature, NSCoder, NSString, NSEnumerator; +@protocol NSObject +- (BOOL)isEqual:(id)object; +- (id)retain; +- (id)copy; +- (oneway void)release; +- (id)autorelease; +- (id)init; +@end @protocol NSCopying - (id)copyWithZone:(NSZone *)zone; +@end @protocol NSMutableCopying - (id)mutableCopyWithZone:(NSZone *)zone; +@end @protocol NSCoding - (void)encodeWithCoder:(NSCoder *)aCoder; +@end +@interface NSObject {} ++ (id)allocWithZone:(NSZone *)zone; ++ (id)alloc; +- (void)dealloc; +@end +@interface NSObject (NSCoderMethods) +- (id)awakeAfterUsingCoder:(NSCoder *)aDecoder; +@end +extern id NSAllocateObject(Class aClass, NSUInteger extraBytes, NSZone *zone); +typedef struct { +} +NSFastEnumerationState; +@protocol NSFastEnumeration - (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len; +@end @class NSString, NSDictionary; +@interface NSValue : NSObject - (void)getValue:(void *)value; +@end @interface NSNumber : NSValue - (char)charValue; +- (id)initWithInt:(int)value; +@end @class NSString; +@interface NSArray : NSObject - (NSUInteger)count; +@end @interface NSArray (NSArrayCreation) + (id)array; +@end @interface NSAutoreleasePool : NSObject { +} +- (void)drain; +@end extern NSString * const NSBundleDidLoadNotification; +typedef double NSTimeInterval; +@interface NSDate : NSObject - (NSTimeInterval)timeIntervalSinceReferenceDate; +@end + +@interface NSString : NSObject +- (NSUInteger)length; +- (NSString *)stringByAppendingString:(NSString *)aString; +- ( const char *)UTF8String; +- (id)initWithUTF8String:(const char *)nullTerminatedCString; +- (id)initWithCharactersNoCopy:(unichar *)characters length:(NSUInteger)length freeWhenDone:(BOOL)freeBuffer; +- (id)initWithCharacters:(const unichar *)characters length:(NSUInteger)length; +- (id)initWithBytes:(const void *)bytes length:(NSUInteger)len encoding:(NSStringEncoding)encoding; +- (id)initWithBytesNoCopy:(void *)bytes length:(NSUInteger)len encoding:(NSStringEncoding)encoding freeWhenDone:(BOOL)freeBuffer; ++ (id)stringWithUTF8String:(const char *)nullTerminatedCString; ++ (id)stringWithString:(NSString *)string; +@end @class NSString, NSURL, NSError; + +@interface NSMutableString : NSString +- (void)appendFormat:(NSString *)format, ... __attribute__((format(__NSString__, 1, 2))); +@end + +@interface NSData : NSObject - (NSUInteger)length; ++ (id)dataWithBytesNoCopy:(void *)bytes length:(NSUInteger)length; ++ (id)dataWithBytesNoCopy:(void *)bytes length:(NSUInteger)length freeWhenDone:(BOOL)b; +- (id)initWithBytesNoCopy:(void *)bytes length:(NSUInteger)length; +- (id)initWithBytesNoCopy:(void *)bytes length:(NSUInteger)length freeWhenDone:(BOOL)b; +@end + +typedef struct { +} +CFDictionaryKeyCallBacks; +extern const CFDictionaryKeyCallBacks kCFTypeDictionaryKeyCallBacks; +typedef struct { +} +CFDictionaryValueCallBacks; +extern const CFDictionaryValueCallBacks kCFTypeDictionaryValueCallBacks; +typedef const struct __CFDictionary * CFDictionaryRef; +typedef struct __CFDictionary * CFMutableDictionaryRef; +extern CFMutableDictionaryRef CFDictionaryCreateMutable(CFAllocatorRef allocator, CFIndex capacity, const CFDictionaryKeyCallBacks *keyCallBacks, const CFDictionaryValueCallBacks *valueCallBacks); +void CFDictionarySetValue(CFMutableDictionaryRef, const void *, const void *); + + +extern void CFRelease(CFTypeRef cf); + +extern CFMutableStringRef CFStringCreateMutableWithExternalCharactersNoCopy(CFAllocatorRef alloc, UniChar *chars, CFIndex numChars, CFIndex capacity, CFAllocatorRef externalCharactersAllocator); +extern CFStringRef CFStringCreateWithCStringNoCopy(CFAllocatorRef alloc, const char *cStr, CFStringEncoding encoding, CFAllocatorRef contentsDeallocator); +extern void CFStringAppend(CFMutableStringRef theString, CFStringRef appendedString); + +void SystemHeaderFunctionWithBlockParam(void *, void (^block)(void *), unsigned); + +@interface NSPointerArray : NSObject +- (void)addPointer:(void *)pointer; +- (void)insertPointer:(void *)item atIndex:(NSUInteger)index; +- (void)replacePointerAtIndex:(NSUInteger)index withPointer:(void *)item; +- (void *)pointerAtIndex:(NSUInteger)index; +@end + diff --git a/test/Analysis/Inputs/system-header-simulator.h b/test/Analysis/Inputs/system-header-simulator.h new file mode 100644 index 0000000..e28b890 --- /dev/null +++ b/test/Analysis/Inputs/system-header-simulator.h @@ -0,0 +1,64 @@ +#pragma clang system_header + +typedef struct _FILE FILE; +extern FILE *stdin; +extern FILE *stdout; +extern FILE *stderr; +// Include a variant of standard streams that occur in the pre-processed file. +extern FILE *__stdinp; +extern FILE *__stdoutp; +extern FILE *__stderrp; + + +int fscanf(FILE *restrict, const char *restrict, ...); + +// Note, on some platforms errno macro gets replaced with a function call. +extern int errno; + +typedef __typeof(sizeof(int)) size_t; + +size_t strlen(const char *); + +char *strcpy(char *restrict, const char *restrict); + +typedef unsigned long __darwin_pthread_key_t; +typedef __darwin_pthread_key_t pthread_key_t; +int pthread_setspecific(pthread_key_t, const void *); + +typedef long long __int64_t; +typedef __int64_t __darwin_off_t; +typedef __darwin_off_t fpos_t; + +void setbuf(FILE * restrict, char * restrict); +int setvbuf(FILE * restrict, char * restrict, int, size_t); + +FILE *funopen(const void *, + int (*)(void *, char *, int), + int (*)(void *, const char *, int), + fpos_t (*)(void *, fpos_t, int), + int (*)(void *)); + +int sqlite3_bind_text_my(int, const char*, int n, void(*)(void*)); + +typedef void (*freeCallback) (void*); +typedef struct { + int i; + freeCallback fc; +} StWithCallback; + +int dealocateMemWhenDoneByVal(void*, StWithCallback); +int dealocateMemWhenDoneByRef(StWithCallback*, const void*); + +typedef struct CGContext *CGContextRef; +CGContextRef CGBitmapContextCreate(void *data/*, size_t width, size_t height, + size_t bitsPerComponent, size_t bytesPerRow, + CGColorSpaceRef space, + CGBitmapInfo bitmapInfo*/); +void *CGBitmapContextGetData(CGContextRef context); + +// Include xpc. +typedef struct _xpc_connection_s * xpc_connection_t; +typedef void (*xpc_finalizer_t)(void *value); +void xpc_connection_set_context(xpc_connection_t connection, void *context); +void xpc_connection_set_finalizer_f(xpc_connection_t connection, xpc_finalizer_t finalizer); +void xpc_connection_resume(xpc_connection_t connection); diff --git a/test/Analysis/MissingDealloc.m b/test/Analysis/MissingDealloc.m index 589fcf5..b465959 100644 --- a/test/Analysis/MissingDealloc.m +++ b/test/Analysis/MissingDealloc.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.osx.cocoa.Dealloc %s -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.osx.cocoa.Dealloc %s -verify +// expected-no-diagnostics typedef signed char BOOL; @protocol NSObject - (BOOL)isEqual:(id)object; diff --git a/test/Analysis/NSPanel.m b/test/Analysis/NSPanel.m index 578658e..1d77d1e 100644 --- a/test/Analysis/NSPanel.m +++ b/test/Analysis/NSPanel.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify -Wno-objc-root-class %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s +// expected-no-diagnostics // BEGIN delta-debugging reduced header stuff diff --git a/test/Analysis/NSString.m b/test/Analysis/NSString.m index 4035cc9..9339069 100644 --- a/test/Analysis/NSString.m +++ b/test/Analysis/NSString.m @@ -1,7 +1,5 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,osx.AtomicCAS,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify -Wno-objc-root-class %s -// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,osx.AtomicCAS,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s -// RUN: %clang_cc1 -DTEST_64 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,osx.AtomicCAS,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify -Wno-objc-root-class %s -// RUN: %clang_cc1 -DTEST_64 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,osx.AtomicCAS,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -DTEST_64 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.cocoa.NilArg,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -Wno-objc-root-class %s //===----------------------------------------------------------------------===// diff --git a/test/Analysis/NSWindow.m b/test/Analysis/NSWindow.m index 495f8e1..a2e7297 100644 --- a/test/Analysis/NSWindow.m +++ b/test/Analysis/NSWindow.m @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core,deadcode.DeadStores -analyzer-store=region -analyzer-constraints=basic -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core,deadcode.DeadStores -analyzer-store=region -analyzer-constraints=range -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core,deadcode.DeadStores -analyzer-store=region -analyzer-constraints=range -verify %s // These declarations were reduced using Delta-Debugging from Foundation.h // on Mac OS X. The test cases are below. diff --git a/test/Analysis/NoReturn.m b/test/Analysis/NoReturn.m index 1d948fa..6d547f4 100644 --- a/test/Analysis/NoReturn.m +++ b/test/Analysis/NoReturn.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -verify %s +// expected-no-diagnostics #include diff --git a/test/Analysis/OSAtomic_mac.cpp b/test/Analysis/OSAtomic_mac.cpp index 8ad7b3c..f938958 100644 --- a/test/Analysis/OSAtomic_mac.cpp +++ b/test/Analysis/OSAtomic_mac.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,osx -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s +// expected-no-diagnostics // Test handling of OSAtomicCompareAndSwap when C++ inserts "no-op" casts and we // do a forced load and binding to the environment on an expression that would regularly diff --git a/test/Analysis/ObjCProperties.m b/test/Analysis/ObjCProperties.m index b9ed9b9..1712fef 100644 --- a/test/Analysis/ObjCProperties.m +++ b/test/Analysis/ObjCProperties.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=basic -Wno-objc-root-class %s -verify -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -Wno-objc-root-class %s -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -Wno-objc-root-class %s -verify +// expected-no-diagnostics // The point of this test cases is to exercise properties in the static // analyzer diff --git a/test/Analysis/ObjCRetSigs.m b/test/Analysis/ObjCRetSigs.m index 13078a3..b5a3e7c 100644 --- a/test/Analysis/ObjCRetSigs.m +++ b/test/Analysis/ObjCRetSigs.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core -analyzer-checker=osx.cocoa.IncompatibleMethodTypes -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core -analyzer-checker=osx.cocoa.IncompatibleMethodTypes -verify -Wno-objc-root-class %s int printf(const char *, ...); diff --git a/test/Analysis/PR2599.m b/test/Analysis/PR2599.m index 5436063..fb368e3 100644 --- a/test/Analysis/PR2599.m +++ b/test/Analysis/PR2599.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-constraints=range -analyzer-store=region -fobjc-gc -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -analyzer-constraints=range -analyzer-store=region -fobjc-gc -verify %s typedef const void * CFTypeRef; typedef const struct __CFString * CFStringRef; diff --git a/test/Analysis/PR2978.m b/test/Analysis/PR2978.m index ea139d2..4684ae9 100644 --- a/test/Analysis/PR2978.m +++ b/test/Analysis/PR2978.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core -analyzer-checker=experimental.osx.cocoa.Dealloc %s -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core -analyzer-checker=alpha.osx.cocoa.Dealloc %s -verify // Tests for the checker which checks missing/extra ivar 'release' calls // in dealloc. diff --git a/test/Analysis/PR3991.m b/test/Analysis/PR3991.m index 38d0bc0..4d76fd3 100644 --- a/test/Analysis/PR3991.m +++ b/test/Analysis/PR3991.m @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify -triple x86_64-apple-darwin9 %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -triple x86_64-apple-darwin9 %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -triple x86_64-apple-darwin9 %s //===----------------------------------------------------------------------===// // Delta-debugging produced forward declarations. diff --git a/test/Analysis/PR9741.cpp b/test/Analysis/PR9741.cpp index 7497d56..2807c44 100644 --- a/test/Analysis/PR9741.cpp +++ b/test/Analysis/PR9741.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -cc1 -std=c++11 -Wuninitialized -verify %s +// expected-no-diagnostics void f() { int a[] = { 1, 2, 3 }; diff --git a/test/Analysis/additive-folding.cpp b/test/Analysis/additive-folding.cpp index 0d749ec..4d58f1c 100644 --- a/test/Analysis/additive-folding.cpp +++ b/test/Analysis/additive-folding.cpp @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -verify -analyzer-constraints=basic -Wno-tautological-compare %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -verify -analyzer-constraints=range -Wno-tautological-compare %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -verify -analyzer-constraints=range -Wno-tautological-compare -Wtautological-constant-out-of-range-compare %s void clang_analyzer_eval(bool); @@ -129,10 +128,10 @@ void tautologies(unsigned a) { // Tautologies from outside the range of the symbol void tautologiesOutside(unsigned char a) { - clang_analyzer_eval(a <= 0x100); // expected-warning{{TRUE}} - clang_analyzer_eval(a < 0x100); // expected-warning{{TRUE}} + clang_analyzer_eval(a <= 0x100); // expected-warning{{comparison of constant 256 with expression of type 'unsigned char' is always true}} expected-warning{{TRUE}} + clang_analyzer_eval(a < 0x100); // expected-warning{{comparison of constant 256 with expression of type 'unsigned char' is always true}} expected-warning{{TRUE}} - clang_analyzer_eval(a != 0x100); // expected-warning{{TRUE}} + clang_analyzer_eval(a != 0x100); // expected-warning{{comparison of constant 256 with expression of type 'unsigned char' is always true}} expected-warning{{TRUE}} clang_analyzer_eval(a != -1); // expected-warning{{TRUE}} clang_analyzer_eval(a > -1); // expected-warning{{TRUE}} diff --git a/test/Analysis/analyzer-config.c b/test/Analysis/analyzer-config.c new file mode 100644 index 0000000..990f578 --- /dev/null +++ b/test/Analysis/analyzer-config.c @@ -0,0 +1,13 @@ +// RUN: %clang --analyze %s -o /dev/null -Xclang -analyzer-checker=debug.ConfigDumper > %t 2>&1 +// RUN: FileCheck --input-file=%t %s + +void bar() {} +void foo() { bar(); } + +// CHECK: [config] +// CHECK-NEXT: cfg-temporary-dtors = false +// CHECK-NEXT: faux-bodies = true +// CHECK-NEXT: graph-trim-interval = 1000 +// CHECK-NEXT: ipa-always-inline-size = 3 +// CHECK-NEXT: [stats] +// CHECK-NEXT: num-entries = 4 diff --git a/test/Analysis/analyzer-config.cpp b/test/Analysis/analyzer-config.cpp new file mode 100644 index 0000000..fb14266 --- /dev/null +++ b/test/Analysis/analyzer-config.cpp @@ -0,0 +1,22 @@ +// RUN: %clang --analyze %s -o /dev/null -Xclang -analyzer-checker=debug.ConfigDumper > %t 2>&1 +// RUN: FileCheck --input-file=%t %s + +void bar() {} +void foo() { bar(); } + +class Foo { +public: + void bar() {} + void foo() { bar(); } +}; + +// CHECK: [config] +// CHECK-NEXT: c++-inlining = methods +// CHECK-NEXT: c++-stdlib-inlining = true +// CHECK-NEXT: c++-template-inlining = true +// CHECK-NEXT: cfg-temporary-dtors = false +// CHECK-NEXT: faux-bodies = true +// CHECK-NEXT: graph-trim-interval = 1000 +// CHECK-NEXT: ipa-always-inline-size = 3 +// CHECK-NEXT: [stats] +// CHECK-NEXT: num-entries = 7 diff --git a/test/Analysis/array-struct-region.c b/test/Analysis/array-struct-region.c index 244bc97..d628c47 100644 --- a/test/Analysis/array-struct-region.c +++ b/test/Analysis/array-struct-region.c @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,debug.ExprInspection -analyzer-store=region -analyzer-constraints=basic -analyzer-ipa=inlining -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,debug.ExprInspection -analyzer-store=region -analyzer-constraints=range -analyzer-ipa=inlining -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -analyzer-constraints=range -verify %s void clang_analyzer_eval(int); @@ -184,3 +183,110 @@ int testConcreteInvalidationDoubleStruct(int index) { } +int testNonOverlappingStructFieldsSimple() { + S val; + + val.x = 1; + val.y = 2; + clang_analyzer_eval(val.x == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(val.y == 2); // expected-warning{{TRUE}} + + return val.z; // expected-warning{{garbage}} +} + +int testNonOverlappingStructFieldsSymbolicBase(int index, int anotherIndex) { + SS vals; + + vals.a[index].x = 42; + vals.a[index].y = 42; + clang_analyzer_eval(vals.a[index].x == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(vals.a[index].y == 42); // expected-warning{{TRUE}} + + vals.a[anotherIndex].x = 42; + clang_analyzer_eval(vals.a[index].x == 42); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(vals.a[index].y == 42); // expected-warning{{TRUE}} + + // FIXME: False negative. No bind ever set a field 'z'. + return vals.a[index].z; // no-warning +} + +int testStructFieldChains(int index, int anotherIndex) { + SS vals[4]; + + vals[index].a[0].x = 42; + vals[anotherIndex].a[1].y = 42; + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(vals[anotherIndex].a[1].y == 42); // expected-warning{{TRUE}} + + // This doesn't affect anything in the 'a' array field. + vals[anotherIndex].b[1].x = 42; + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(vals[anotherIndex].a[1].y == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(vals[anotherIndex].b[1].x == 42); // expected-warning{{TRUE}} + + // This doesn't affect anything in the 'b' array field. + vals[index].a[anotherIndex].x = 42; + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(vals[anotherIndex].a[0].x == 42); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(vals[anotherIndex].a[1].y == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(vals[anotherIndex].b[1].x == 42); // expected-warning{{TRUE}} + + // FIXME: False negative. No bind ever set a field 'z'. + return vals[index].a[0].z; // no-warning +} + +int testStructFieldChainsNested(int index, int anotherIndex) { + SS vals[4]; + + vals[index].a[0].x = 42; + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{TRUE}} + + vals[index].b[0] = makeS(); + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{TRUE}} + + vals[index].a[0] = makeS(); + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{UNKNOWN}} + + vals[index].a[0].x = 42; + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{TRUE}} + + return 0; +} + + +// -------------------- +// False positives +// -------------------- + +int testMixSymbolicAndConcrete(int index, int anotherIndex) { + SS vals; + + vals.a[index].x = 42; + vals.a[0].y = 42; + + // FIXME: Should be TRUE. + clang_analyzer_eval(vals.a[index].x == 42); // expected-warning{{UNKNOWN}} + // Should be TRUE; we set this explicitly. + clang_analyzer_eval(vals.a[0].y == 42); // expected-warning{{TRUE}} + + vals.a[anotherIndex].y = 42; + + // Should be UNKNOWN; we set an 'x'. + clang_analyzer_eval(vals.a[index].x == 42); // expected-warning{{UNKNOWN}} + // FIXME: Should be TRUE. + clang_analyzer_eval(vals.a[0].y == 42); // expected-warning{{UNKNOWN}} + + return vals.a[0].x; // no-warning +} + +void testFieldChainIsNotEnough(int index) { + SS vals[4]; + + vals[index].a[0].x = 42; + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{TRUE}} + + vals[index].a[1] = makeS(); + // FIXME: Should be TRUE. + clang_analyzer_eval(vals[index].a[0].x == 42); // expected-warning{{UNKNOWN}} +} + diff --git a/test/Analysis/array-struct-region.cpp b/test/Analysis/array-struct-region.cpp new file mode 100644 index 0000000..12ae5d3 --- /dev/null +++ b/test/Analysis/array-struct-region.cpp @@ -0,0 +1,176 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -verify -x c %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -verify -x c++ -analyzer-config c++-inlining=constructors %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -DINLINE -verify -x c %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -DINLINE -verify -x c++ -analyzer-config c++-inlining=constructors %s + +void clang_analyzer_eval(int); + +struct S { + int field; + +#if __cplusplus + const struct S *getThis() const { return this; } + const struct S *operator +() const { return this; } + + bool check() const { return this == this; } + bool operator !() const { return this != this; } + + int operator *() const { return field; } +#endif +}; + +#if __cplusplus +const struct S *operator -(const struct S &s) { return &s; } +bool operator ~(const struct S &s) { return &s != &s; } +#endif + + +#ifdef INLINE +struct S getS() { + struct S s = { 42 }; + return s; +} +#else +struct S getS(); +#endif + + +void testAssignment() { + struct S s = getS(); + + if (s.field != 42) return; + clang_analyzer_eval(s.field == 42); // expected-warning{{TRUE}} + + s.field = 0; + clang_analyzer_eval(s.field == 0); // expected-warning{{TRUE}} + +#if __cplusplus + clang_analyzer_eval(s.getThis() == &s); // expected-warning{{TRUE}} + clang_analyzer_eval(+s == &s); // expected-warning{{TRUE}} + clang_analyzer_eval(-s == &s); // expected-warning{{TRUE}} + + clang_analyzer_eval(s.check()); // expected-warning{{TRUE}} + clang_analyzer_eval(!s); // expected-warning{{FALSE}} + clang_analyzer_eval(~s); // expected-warning{{FALSE}} + + clang_analyzer_eval(*s == 0); // expected-warning{{TRUE}} +#endif +} + + +void testImmediateUse() { + int x = getS().field; + + if (x != 42) return; + clang_analyzer_eval(x == 42); // expected-warning{{TRUE}} + +#if __cplusplus + clang_analyzer_eval((void *)getS().getThis() == (void *)&x); // expected-warning{{FALSE}} + clang_analyzer_eval((void *)+getS() == (void *)&x); // expected-warning{{FALSE}} + clang_analyzer_eval((void *)-getS() == (void *)&x); // expected-warning{{FALSE}} + + clang_analyzer_eval(getS().check()); // expected-warning{{TRUE}} + clang_analyzer_eval(!getS()); // expected-warning{{FALSE}} + clang_analyzer_eval(~getS()); // expected-warning{{FALSE}} +#endif +} + +int getConstrainedField(struct S s) { + if (s.field != 42) return 42; + return s.field; +} + +int getAssignedField(struct S s) { + s.field = 42; + return s.field; +} + +void testArgument() { + clang_analyzer_eval(getConstrainedField(getS()) == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(getAssignedField(getS()) == 42); // expected-warning{{TRUE}} +} + +void testImmediateUseParens() { + int x = ((getS())).field; + + if (x != 42) return; + clang_analyzer_eval(x == 42); // expected-warning{{TRUE}} + + clang_analyzer_eval(getConstrainedField(((getS()))) == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(getAssignedField(((getS()))) == 42); // expected-warning{{TRUE}} + +#if __cplusplus + clang_analyzer_eval(((getS())).check()); // expected-warning{{TRUE}} + clang_analyzer_eval(!((getS()))); // expected-warning{{FALSE}} + clang_analyzer_eval(~((getS()))); // expected-warning{{FALSE}} +#endif +} + + +//-------------------- +// C++-only tests +//-------------------- + +#if __cplusplus +void testReferenceAssignment() { + const S &s = getS(); + + if (s.field != 42) return; + clang_analyzer_eval(s.field == 42); // expected-warning{{TRUE}} + + clang_analyzer_eval(s.getThis() == &s); // expected-warning{{TRUE}} + clang_analyzer_eval(+s == &s); // expected-warning{{TRUE}} + + clang_analyzer_eval(s.check()); // expected-warning{{TRUE}} + clang_analyzer_eval(!s); // expected-warning{{FALSE}} + clang_analyzer_eval(~s); // expected-warning{{FALSE}} + + clang_analyzer_eval(*s == 42); // expected-warning{{TRUE}} +} + + +int getConstrainedFieldRef(const S &s) { + if (s.field != 42) return 42; + return s.field; +} + +bool checkThis(const S &s) { + return s.getThis() == &s; +} + +bool checkThisOp(const S &s) { + return +s == &s; +} + +bool checkThisStaticOp(const S &s) { + return -s == &s; +} + +void testReferenceArgument() { + clang_analyzer_eval(getConstrainedFieldRef(getS()) == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(checkThis(getS())); // expected-warning{{TRUE}} + clang_analyzer_eval(checkThisOp(getS())); // expected-warning{{TRUE}} + clang_analyzer_eval(checkThisStaticOp(getS())); // expected-warning{{TRUE}} +} + + +int getConstrainedFieldOp(S s) { + if (*s != 42) return 42; + return *s; +} + +int getConstrainedFieldRefOp(const S &s) { + if (*s != 42) return 42; + return *s; +} + +void testImmediateUseOp() { + int x = *getS(); + if (x != 42) return; + clang_analyzer_eval(x == 42); // expected-warning{{TRUE}} + + clang_analyzer_eval(getConstrainedFieldOp(getS()) == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(getConstrainedFieldRefOp(getS()) == 42); // expected-warning{{TRUE}} +} + +#endif diff --git a/test/Analysis/array-struct.c b/test/Analysis/array-struct.c index 1b36190..c22f979 100644 --- a/test/Analysis/array-struct.c +++ b/test/Analysis/array-struct.c @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core.CastToStruct -analyzer-store=region -analyzer-constraints=basic -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core.CastToStruct -analyzer-store=region -analyzer-constraints=range -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core.CastToStruct -analyzer-store=region -analyzer-constraints=range -verify %s struct s { int data; @@ -176,3 +175,11 @@ void f18() { if (*q) { // no-warning } } + + +// [PR13927] offsetof replacement macro flagged as "dereference of a null pointer" +int offset_of_data_array(void) +{ + return ((char *)&(((struct s*)0)->data_array)) - ((char *)0); // no-warning +} + diff --git a/test/Analysis/auto-obj-dtors-cfg-output.cpp b/test/Analysis/auto-obj-dtors-cfg-output.cpp index 566e6ca..e4b49dc 100644 --- a/test/Analysis/auto-obj-dtors-cfg-output.cpp +++ b/test/Analysis/auto-obj-dtors-cfg-output.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -analyze -analyzer-checker=debug.DumpCFG -cfg-add-implicit-dtors %s > %t 2>&1 +// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -analyze -analyzer-checker=debug.DumpCFG %s > %t 2>&1 // RUN: FileCheck --input-file=%t %s // XPASS: * diff --git a/test/Analysis/base-init.cpp b/test/Analysis/base-init.cpp index e63d508..34e01aa 100644 --- a/test/Analysis/base-init.cpp +++ b/test/Analysis/base-init.cpp @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-store region -analyzer-ipa=inlining -verify %s -// XFAIL: * +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -analyzer-config c++-inlining=constructors -verify %s void clang_analyzer_eval(bool); diff --git a/test/Analysis/bitwise-ops.c b/test/Analysis/bitwise-ops.c new file mode 100644 index 0000000..bf282ec --- /dev/null +++ b/test/Analysis/bitwise-ops.c @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -verify %s + +void clang_analyzer_eval(int); +#define CHECK(expr) if (!(expr)) return; clang_analyzer_eval(expr) + +void testPersistentConstraints(int x, int y) { + // Sanity check + CHECK(x); // expected-warning{{TRUE}} + CHECK(x & 1); // expected-warning{{TRUE}} + + // False positives due to SValBuilder giving up on certain kinds of exprs. + CHECK(1 - x); // expected-warning{{UNKNOWN}} + CHECK(x & y); // expected-warning{{UNKNOWN}} +} \ No newline at end of file diff --git a/test/Analysis/blocks.m b/test/Analysis/blocks.m index ff376d1..54ff58c 100644 --- a/test/Analysis/blocks.m +++ b/test/Analysis/blocks.m @@ -26,10 +26,12 @@ typedef struct _NSZone NSZone; @protocol NSCoding - (void)encodeWithCoder:(NSCoder *)aCoder; @end @interface NSObject {} + (id)alloc; +- (id)copy; @end extern id NSAllocateObject(Class aClass, NSUInteger extraBytes, NSZone *zone); -@interface NSString : NSObject - (NSUInteger)length; -- ( const char *)UTF8String; +@interface NSString : NSObject +- (NSUInteger)length; +- (const char *)UTF8String; - (id)initWithFormat:(NSString *)format arguments:(va_list)argList __attribute__((format(__NSString__, 1, 0))); @end @class NSString, NSData; @@ -85,4 +87,10 @@ void test2_b() { void test2_c() { typedef void (^myblock)(void); myblock f = ^() { f(); }; // expected-warning{{Variable 'f' is uninitialized when captured by block}} -} \ No newline at end of file +} + + +void testMessaging() { + // + [[^(){} copy] release]; +} diff --git a/test/Analysis/bool-assignment.cpp b/test/Analysis/bool-assignment.cpp index e573129..9361d93 100644 --- a/test/Analysis/bool-assignment.cpp +++ b/test/Analysis/bool-assignment.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core.BoolAssignment -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core.BoolAssignment -analyzer-store=region -verify %s // Test C++'s bool diff --git a/test/Analysis/bool-assignment2.c b/test/Analysis/bool-assignment2.c index 9de26cf..22f4237 100644 --- a/test/Analysis/bool-assignment2.c +++ b/test/Analysis/bool-assignment2.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c99 -analyze -analyzer-checker=core,experimental.core.BoolAssignment -analyzer-store=region -verify %s +// RUN: %clang_cc1 -std=c99 -analyze -analyzer-checker=core,alpha.core.BoolAssignment -analyzer-store=region -verify %s // Test stdbool.h's _Bool diff --git a/test/Analysis/bstring.c b/test/Analysis/bstring.c index d383d42..69d281a 100644 --- a/test/Analysis/bstring.c +++ b/test/Analysis/bstring.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s -// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s -// RUN: %clang_cc1 -analyze -DVARIANT -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s -// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -DVARIANT -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -DVARIANT -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -DVARIANT -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -verify %s //===----------------------------------------------------------------------=== // Declarations diff --git a/test/Analysis/casts.c b/test/Analysis/casts.c index f862ddf..1c0f357 100644 --- a/test/Analysis/casts.c +++ b/test/Analysis/casts.c @@ -1,5 +1,6 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// expected-no-diagnostics // Test if the 'storage' region gets properly initialized after it is cast to // 'struct sockaddr *'. diff --git a/test/Analysis/casts.m b/test/Analysis/casts.m index 6f19211..1a78940 100644 --- a/test/Analysis/casts.m +++ b/test/Analysis/casts.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// expected-no-diagnostics // Test function pointer casts. Currently we track function addresses using // loc::FunctionVal. Because casts can be arbitrary, do we need to model diff --git a/test/Analysis/cfref_PR2519.c b/test/Analysis/cfref_PR2519.c index 5292109..7fce226 100644 --- a/test/Analysis/cfref_PR2519.c +++ b/test/Analysis/cfref_PR2519.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-store=region -analyzer-constraints=range -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -verify %s +// expected-no-diagnostics typedef unsigned char Boolean; typedef signed long CFIndex; diff --git a/test/Analysis/cfref_rdar6080742.c b/test/Analysis/cfref_rdar6080742.c index ea4c3ee..0023d13 100644 --- a/test/Analysis/cfref_rdar6080742.c +++ b/test/Analysis/cfref_rdar6080742.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -verify %s +// expected-no-diagnostics // This test case was reported in . // It tests path-sensitivity with respect to '!(cfstring != 0)' (negation of inequality). diff --git a/test/Analysis/chroot.c b/test/Analysis/chroot.c index 1948f48..1b818a8 100644 --- a/test/Analysis/chroot.c +++ b/test/Analysis/chroot.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.unix.Chroot -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.unix.Chroot -analyzer-store region -verify %s extern int chroot(const char* path); extern int chdir(const char* path); diff --git a/test/Analysis/comparison-implicit-casts.cpp b/test/Analysis/comparison-implicit-casts.cpp index df9d870..96aa0ff 100644 --- a/test/Analysis/comparison-implicit-casts.cpp +++ b/test/Analysis/comparison-implicit-casts.cpp @@ -1,5 +1,3 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,debug.ExprInspection -analyzer-constraints=basic -triple i386-apple-darwin9 -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,debug.ExprInspection -analyzer-constraints=basic -triple x86_64-apple-darwin9 -verify %s // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,debug.ExprInspection -analyzer-constraints=range -triple i386-apple-darwin9 -verify %s // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,debug.ExprInspection -analyzer-constraints=range -triple x86_64-apple-darwin9 -verify %s diff --git a/test/Analysis/complex-init-list.cpp b/test/Analysis/complex-init-list.cpp new file mode 100644 index 0000000..bbff64b --- /dev/null +++ b/test/Analysis/complex-init-list.cpp @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -verify %s +// expected-no-diagnostics + +// Do not crash on initialization to complex numbers. +void init_complex() { + _Complex float valid1 = { 0.0f, 0.0f }; +} diff --git a/test/Analysis/complex.c b/test/Analysis/complex.c index c118a61..3900bcb 100644 --- a/test/Analysis/complex.c +++ b/test/Analysis/complex.c @@ -1,4 +1,3 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -analyzer-constraints=basic -verify -Wno-unreachable-code -ffreestanding %s // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -analyzer-constraints=range -verify -Wno-unreachable-code -ffreestanding %s #include diff --git a/test/Analysis/concrete-address.c b/test/Analysis/concrete-address.c index f6c445e..819afca 100644 --- a/test/Analysis/concrete-address.c +++ b/test/Analysis/concrete-address.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// expected-no-diagnostics void foo() { int *p = (int*) 0x10000; // Should not crash here. diff --git a/test/Analysis/conditional-operator-path-notes.c b/test/Analysis/conditional-operator-path-notes.c new file mode 100644 index 0000000..de313a7 --- /dev/null +++ b/test/Analysis/conditional-operator-path-notes.c @@ -0,0 +1,1083 @@ +// RUN: %clang --analyze %s -Xanalyzer -analyzer-output=text -Xclang -verify +// RUN: %clang --analyze %s -o %t +// RUN: FileCheck --input-file=%t %s + +void testCondOp(int *p) { + int *x = p ? p : p; + // expected-note@-1 {{Assuming 'p' is null}} + // expected-note@-2 {{'?' condition is false}} + // expected-note@-3 {{Variable 'x' initialized to a null pointer value}} + *x = 1; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'x')}} +} + +void testCondProblem(int *p) { + if (p) return; + // expected-note@-1 {{Assuming 'p' is null}} + // expected-note@-2 {{Taking false branch}} + + int x = *p ? 0 : 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'p')}} + (void)x; +} + +void testLHSProblem(int *p) { + int x = !p ? *p : 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} + // expected-note@-1 {{Assuming 'p' is null}} + // expected-note@-2 {{'?' condition is true}} + // expected-note@-3 {{Dereference of null pointer (loaded from variable 'p')}} + (void)x; +} + +void testRHSProblem(int *p) { + int x = p ? 1 : *p; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} + // expected-note@-1 {{Assuming 'p' is null}} + // expected-note@-2 {{'?' condition is false}} + // expected-note@-3 {{Dereference of null pointer (loaded from variable 'p')}} + (void)x; +} + +void testBinaryCondOp(int *p) { + int *x = p ?: p; + // expected-note@-1 {{'?' condition is false}} + // expected-note@-2 {{Variable 'x' initialized to a null pointer value}} + *x = 1; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'x')}} +} + +void testBinaryLHSProblem(int *p) { + if (p) return; + // expected-note@-1 {{Assuming 'p' is null}} + // expected-note@-2 {{Taking false branch}} + + int x = *p ?: 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'p')}} + (void)x; +} + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'x' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'x' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestCondOp +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestCondProblem +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestLHSProblem +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestRHSProblem +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'x' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'x' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestBinaryCondOp +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestBinaryLHSProblem +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/coverage.c b/test/Analysis/coverage.c index 8116913..66f0a5e 100644 --- a/test/Analysis/coverage.c +++ b/test/Analysis/coverage.c @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc -analyzer-store=region -analyzer-max-loop 4 -verify %s -#include "system-header-simulator.h" +#include "Inputs/system-header-simulator.h" typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); diff --git a/test/Analysis/cstring-syntax-cxx.cpp b/test/Analysis/cstring-syntax-cxx.cpp index f8975ab..bae3d0a 100644 --- a/test/Analysis/cstring-syntax-cxx.cpp +++ b/test/Analysis/cstring-syntax-cxx.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=unix.cstring.BadSizeArg -analyzer-store=region -verify %s +// expected-no-diagnostics // Ensure we don't crash on C++ declarations with special names. struct X { diff --git a/test/Analysis/ctor-inlining.mm b/test/Analysis/ctor-inlining.mm index fe4781c..ac963e5 100644 --- a/test/Analysis/ctor-inlining.mm +++ b/test/Analysis/ctor-inlining.mm @@ -1,6 +1,7 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -fobjc-arc -cfg-add-implicit-dtors -Wno-null-dereference -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -fobjc-arc -analyzer-ipa=inlining -analyzer-config c++-inlining=constructors -Wno-null-dereference -verify %s void clang_analyzer_eval(bool); +void clang_analyzer_checkInlined(bool); struct Wrapper { __strong id obj; @@ -86,4 +87,33 @@ namespace ConstructorVirtualCalls { } } +namespace TemporaryConstructor { + class BoolWrapper { + public: + BoolWrapper() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + value = true; + } + bool value; + }; + + void test() { + // PR13717 - Don't crash when a CXXTemporaryObjectExpr is inlined. + if (BoolWrapper().value) + return; + } +} + +namespace ConstructorUsedAsRValue { + using TemporaryConstructor::BoolWrapper; + + bool extractValue(BoolWrapper b) { + return b.value; + } + + void test() { + bool result = extractValue(BoolWrapper()); + clang_analyzer_eval(result); // expected-warning{{TRUE}} + } +} diff --git a/test/Analysis/cxx-crashes.cpp b/test/Analysis/cxx-crashes.cpp index 8912bf6..e3f8125 100644 --- a/test/Analysis/cxx-crashes.cpp +++ b/test/Analysis/cxx-crashes.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -verify %s +// REQUIRES: LP64 void clang_analyzer_eval(bool); diff --git a/test/Analysis/cxx-method-names.cpp b/test/Analysis/cxx-method-names.cpp index 8afbb85..21be5e4 100644 --- a/test/Analysis/cxx-method-names.cpp +++ b/test/Analysis/cxx-method-names.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix,osx,experimental.unix,experimental.security.taint -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix,osx,alpha.unix,alpha.security.taint -analyzer-store region -verify %s +// expected-no-diagnostics class Evil { public: diff --git a/test/Analysis/cxx11-crashes.cpp b/test/Analysis/cxx11-crashes.cpp index 16bfc89..d0b9222 100644 --- a/test/Analysis/cxx11-crashes.cpp +++ b/test/Analysis/cxx11-crashes.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core -std=c++11 -verify %s +// expected-no-diagnostics // radar://11485149, PR12871 class PlotPoint { diff --git a/test/Analysis/dead-stores.c b/test/Analysis/dead-stores.c index b8d195d..165a12b 100644 --- a/test/Analysis/dead-stores.c +++ b/test/Analysis/dead-stores.c @@ -1,6 +1,5 @@ -// RUN: %clang_cc1 -Wunused-variable -analyze -analyzer-checker=core,deadcode.DeadStores,experimental.deadcode.IdempotentOperations -fblocks -verify -Wno-unreachable-code -analyzer-opt-analyze-nested-blocks %s -// RUN: %clang_cc1 -Wunused-variable -analyze -analyzer-checker=core,deadcode.DeadStores,experimental.deadcode.IdempotentOperations -analyzer-store=region -analyzer-constraints=basic -fblocks -verify -Wno-unreachable-code -analyzer-opt-analyze-nested-blocks %s -// RUN: %clang_cc1 -Wunused-variable -analyze -analyzer-checker=core,deadcode.DeadStores,experimental.deadcode.IdempotentOperations -analyzer-store=region -analyzer-constraints=range -fblocks -verify -Wno-unreachable-code -analyzer-opt-analyze-nested-blocks %s +// RUN: %clang_cc1 -Wunused-variable -analyze -analyzer-checker=core,deadcode.DeadStores,alpha.deadcode.IdempotentOperations -fblocks -verify -Wno-unreachable-code -analyzer-opt-analyze-nested-blocks %s +// RUN: %clang_cc1 -Wunused-variable -analyze -analyzer-checker=core,deadcode.DeadStores,alpha.deadcode.IdempotentOperations -analyzer-store=region -analyzer-constraints=range -fblocks -verify -Wno-unreachable-code -analyzer-opt-analyze-nested-blocks %s void f1() { int k, y; // expected-warning{{unused variable 'k'}} expected-warning{{unused variable 'y'}} diff --git a/test/Analysis/dead-stores.cpp b/test/Analysis/dead-stores.cpp index 43d8796..86d84f0 100644 --- a/test/Analysis/dead-stores.cpp +++ b/test/Analysis/dead-stores.cpp @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -fcxx-exceptions -fexceptions -analyze -analyzer-checker=deadcode.DeadStores -verify -Wno-unreachable-code %s -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -analyze -analyzer-store=region -analyzer-constraints=basic -analyzer-checker=deadcode.DeadStores -verify -Wno-unreachable-code %s // RUN: %clang_cc1 -fcxx-exceptions -fexceptions -analyze -analyzer-store=region -analyzer-constraints=range -analyzer-checker=deadcode.DeadStores -verify -Wno-unreachable-code %s //===----------------------------------------------------------------------===// @@ -110,3 +109,43 @@ namespace foo { } } +//===----------------------------------------------------------------------===// +// Dead stores in with EH code. +//===----------------------------------------------------------------------===// + +void test_5_Aux(); +int test_5() { + int x = 0; + try { + x = 2; // no-warning + test_5_Aux(); + } + catch (int z) { + return x + z; + } + return 1; +} + + +int test_6_aux(unsigned x); + +void test_6() { + unsigned currDestLen = 0; // no-warning + try { + while (test_6_aux(currDestLen)) { + currDestLen += 2; // no-warning + } + } + catch (void *) {} +} + +void test_6b() { + unsigned currDestLen = 0; // no-warning + try { + while (test_6_aux(currDestLen)) { + currDestLen += 2; // expected-warning {{Value stored to 'currDestLen' is never read}} + break; + } + } + catch (void *) {} +} diff --git a/test/Analysis/dead-stores.m b/test/Analysis/dead-stores.m index fe56554..5a807ed 100644 --- a/test/Analysis/dead-stores.m +++ b/test/Analysis/dead-stores.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core -analyzer-checker=deadcode.DeadStores,osx.cocoa.RetainCount -fblocks -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core -analyzer-checker=deadcode.DeadStores,osx.cocoa.RetainCount -fblocks -verify -Wno-objc-root-class %s +// expected-no-diagnostics typedef signed char BOOL; typedef unsigned int NSUInteger; diff --git a/test/Analysis/delegates.m b/test/Analysis/delegates.m index 7fc4f2b..28e9006 100644 --- a/test/Analysis/delegates.m +++ b/test/Analysis/delegates.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount -analyzer-store=region -Wno-objc-root-class -verify %s +// expected-no-diagnostics //===----------------------------------------------------------------------===// diff --git a/test/Analysis/derived-to-base.cpp b/test/Analysis/derived-to-base.cpp index f833678..30e7a31 100644 --- a/test/Analysis/derived-to-base.cpp +++ b/test/Analysis/derived-to-base.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -verify %s void clang_analyzer_eval(bool); diff --git a/test/Analysis/diagnostics/deref-track-symbolic-region.c b/test/Analysis/diagnostics/deref-track-symbolic-region.c new file mode 100644 index 0000000..3ba2707 --- /dev/null +++ b/test/Analysis/diagnostics/deref-track-symbolic-region.c @@ -0,0 +1,354 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=plist-multi-file %s -o - | FileCheck %s + +struct S { + int *x; + int y; +}; + +int *foo(); + +void inlined(struct S *s, int m) { + if (s->x) + //expected-note@-1{{Taking false branch}} + //expected-note@-2{{Assuming pointer value is null}} + + m++; + +} +void test(struct S syz, int *pp) { + int m = 0; + syz.x = foo(); + inlined(&syz, m); + // expected-note@-1{{Calling 'inlined'}} + // expected-note@-2{{Returning from 'inlined'}} + m += *syz.x; // expected-warning{{Dereference of null pointer (loaded from field 'x')}} + // expected-note@-1{{Dereference of null pointer (loaded from field 'x')}} +} + +//CHECK: +//CHECK: files +//CHECK: +//CHECK: +//CHECK: diagnostics +//CHECK: +//CHECK: +//CHECK: path +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line20 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line20 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line22 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col18 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Calling 'inlined' +//CHECK: message +//CHECK: Calling 'inlined' +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line11 +//CHECK: col1 +//CHECK: file0 +//CHECK: +//CHECK: depth1 +//CHECK: extended_message +//CHECK: Entered call from 'test' +//CHECK: message +//CHECK: Entered call from 'test' +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line11 +//CHECK: col1 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line11 +//CHECK: col4 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col4 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col4 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col7 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col7 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line12 +//CHECK: col7 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col7 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line12 +//CHECK: col10 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth1 +//CHECK: extended_message +//CHECK: Assuming pointer value is null +//CHECK: message +//CHECK: Assuming pointer value is null +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line22 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col18 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth1 +//CHECK: extended_message +//CHECK: Returning from 'inlined' +//CHECK: message +//CHECK: Returning from 'inlined' +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line22 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line25 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col13 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line25 +//CHECK: col13 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Dereference of null pointer (loaded from field 'x') +//CHECK: message +//CHECK: Dereference of null pointer (loaded from field 'x') +//CHECK: +//CHECK: +//CHECK: descriptionDereference of null pointer (loaded from field 'x') +//CHECK: categoryLogic error +//CHECK: typeDereference of null pointer +//CHECK: issue_context_kindfunction +//CHECK: issue_contexttest +//CHECK: issue_hash6 +//CHECK: location +//CHECK: +//CHECK: line25 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: diff --git a/test/Analysis/diagnostics/deref-track-symbolic-region.cpp b/test/Analysis/diagnostics/deref-track-symbolic-region.cpp new file mode 100644 index 0000000..fb493d7 --- /dev/null +++ b/test/Analysis/diagnostics/deref-track-symbolic-region.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -verify %s + +struct S { + int *x; + int y; +}; + +S &getSomeReference(); +void test(S *p) { + S &r = *p; //expected-note {{Variable 'r' initialized here}} + if (p) return; + //expected-note@-1{{Taking false branch}} + //expected-note@-2{{Assuming 'p' is null}} + r.y = 5; // expected-warning {{Access to field 'y' results in a dereference of a null pointer (loaded from variable 'r')}} + // expected-note@-1{{Access to field 'y' results in a dereference of a null pointer (loaded from variable 'r')}} +} diff --git a/test/Analysis/diagnostics/undef-value-caller.c b/test/Analysis/diagnostics/undef-value-caller.c index 928839d..627b334 100644 --- a/test/Analysis/diagnostics/undef-value-caller.c +++ b/test/Analysis/diagnostics/undef-value-caller.c @@ -4,235 +4,162 @@ #include "undef-value-callee.h" // This code used to cause a crash since we were not adding fileID of the header to the plist diagnostic. -// Note, in the future, we do not even need to step into this callee since it does not influence the result. + int test_calling_unimportant_callee(int argc, char *argv[]) { int x; callee(); return x; // expected-warning {{Undefined or garbage value returned to caller}} } -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: -// CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line9 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line9 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line9 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'x' declared without an initial value -// CHECK: message -// CHECK: Variable 'x' declared without an initial value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line9 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line9 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'callee' -// CHECK: message -// CHECK: Calling 'callee' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line2 -// CHECK: col1 -// CHECK: file1 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'test_calling_unimportant_callee' -// CHECK: message -// CHECK: Entered call from 'test_calling_unimportant_callee' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returning from 'callee' -// CHECK: message -// CHECK: Returning from 'callee' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Undefined or garbage value returned to caller -// CHECK: message -// CHECK: Undefined or garbage value returned to caller -// CHECK: -// CHECK: -// CHECK: descriptionUndefined or garbage value returned to caller -// CHECK: categoryLogic error -// CHECK: typeGarbage return value -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_calling_unimportant_callee -// CHECK: location -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +//CHECK: +//CHECK: files +//CHECK: +//CHECK: +//CHECK: diagnostics +//CHECK: +//CHECK: +//CHECK: path +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line9 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line9 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line9 +//CHECK: col7 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Variable 'x' declared without an initial value +//CHECK: message +//CHECK: Variable 'x' declared without an initial value +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line9 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line9 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line10 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line10 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line10 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line10 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line11 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line11 +//CHECK: col8 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line11 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line11 +//CHECK: col10 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line11 +//CHECK: col10 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Undefined or garbage value returned to caller +//CHECK: message +//CHECK: Undefined or garbage value returned to caller +//CHECK: +//CHECK: +//CHECK: descriptionUndefined or garbage value returned to caller +//CHECK: categoryLogic error +//CHECK: typeGarbage return value +//CHECK: issue_context_kindfunction +//CHECK: issue_contexttest_calling_unimportant_callee +//CHECK: issue_hash3 +//CHECK: location +//CHECK: +//CHECK: line11 +//CHECK: col3 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: diff --git a/test/Analysis/diagnostics/undef-value-param.c b/test/Analysis/diagnostics/undef-value-param.c new file mode 100644 index 0000000..88d87cf --- /dev/null +++ b/test/Analysis/diagnostics/undef-value-param.c @@ -0,0 +1,1183 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=plist-multi-file %s -o - | FileCheck %s + +void foo_irrelevant(int c) { + if (c) + return; + c++; + return; +} +void foo(int c, int *x) { + if (c) + //expected-note@-1{{Assuming 'c' is not equal to 0}} + //expected-note@-2{{Taking true branch}} + return; + *x = 5; +} + +int use(int c) { + int xx; //expected-note{{Variable 'xx' declared without an initial value}} + int *y = &xx; + foo (c, y); + //expected-note@-1{{Calling 'foo'}} + //expected-note@-2{{Returning from 'foo'}} + foo_irrelevant(c); + return xx+3; //expected-warning{{The left operand of '+' is a garbage value}} + //expected-note@-1{{The left operand of '+' is a garbage value}} +} + +void initArray(int x, double XYZ[3]) { + if (x <= 0) //expected-note {{Taking true branch}} + //expected-note@-1 {{Assuming 'x' is <= 0}} + return; + XYZ[0] = 1; + XYZ[1] = 1; + XYZ[2] = 1; +} +int testPassingParentRegionArray(int x) { + double XYZ[3]; + initArray(x, XYZ); //expected-note {{Calling 'initArray'}} + //expected-note@-1 {{Returning from 'initArray'}} + return 1 * XYZ[1]; //expected-warning {{The right operand of '*' is a garbage value}} + //expected-note@-1 {{The right operand of '*' is a garbage value}} +} + +double *getValidPtr(); +struct WithFields { + double *f1; +}; +void initStruct(int x, struct WithFields *X) { + if (x <= 0) //expected-note {{Taking true branch}} + //expected-note@-1 {{Assuming 'x' is <= 0}} + + return; + X->f1 = getValidPtr(); +} +double testPassingParentRegionStruct(int x) { + struct WithFields st; + st.f1 = 0; + initStruct(x, &st); //expected-note {{Calling 'initStruct'}} + //expected-note@-1 {{Returning from 'initStruct'}} + return (*st.f1); //expected-warning {{Dereference of null pointer}} + //expected-note@-1{{Dereference of null pointer (loaded from field 'f1')}} +} + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'xx' declared without an initial value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'xx' declared without an initial value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'foo' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'foo' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'c' is not equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'c' is not equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'foo' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'foo' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: The left operand of '+' is a garbage value +// CHECK-NEXT: message +// CHECK-NEXT: The left operand of '+' is a garbage value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionThe left operand of '+' is a garbage value +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeResult of operation is garbage or undefined +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextuse +// CHECK-NEXT: issue_hash7 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'initArray' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'initArray' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testPassingParentRegionArray' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testPassingParentRegionArray' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is <= 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is <= 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'initArray' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'initArray' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: The right operand of '*' is a garbage value +// CHECK-NEXT: message +// CHECK-NEXT: The right operand of '*' is a garbage value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionThe right operand of '*' is a garbage value +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeResult of operation is garbage or undefined +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestPassingParentRegionArray +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'initStruct' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'initStruct' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testPassingParentRegionStruct' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testPassingParentRegionStruct' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is <= 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is <= 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'initStruct' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'initStruct' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from field 'f1') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from field 'f1') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from field 'f1') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestPassingParentRegionStruct +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/diagnostics/undef-value-param.m b/test/Analysis/diagnostics/undef-value-param.m new file mode 100644 index 0000000..d2a7a08 --- /dev/null +++ b/test/Analysis/diagnostics/undef-value-param.m @@ -0,0 +1,476 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx -analyzer-output=text -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx -analyzer-output=plist-multi-file %s -o - | FileCheck %s + +typedef signed char BOOL; +@protocol NSObject - (BOOL)isEqual:(id)object; @end +@interface NSObject {} ++(id)alloc; ++(id)new; +-(id)init; +-(id)autorelease; +-(id)copy; +- (Class)class; +-(id)retain; +@end +typedef const void * CFTypeRef; +extern void CFRelease(CFTypeRef cf); + +@interface Cell : NSObject +- (void)test; +@end + +@interface SpecialString ++ (id)alloc; +- (oneway void)release; +@end + +typedef SpecialString* SCDynamicStoreRef; +static void CreateRef(SCDynamicStoreRef *storeRef, unsigned x); +SCDynamicStoreRef anotherCreateRef(unsigned *err, unsigned x); + +@implementation Cell +- (void) test { + SCDynamicStoreRef storeRef = 0; //expected-note{{Variable 'storeRef' initialized to nil}} + CreateRef(&storeRef, 4); + //expected-note@-1{{Calling 'CreateRef'}} + //expected-note@-2{{Returning from 'CreateRef'}} + CFRelease(storeRef); //expected-warning {{Null pointer argument in call to CFRelease}} + //expected-note@-1{{Null pointer argument in call to CFRelease}} +} +@end + +static void CreateRef(SCDynamicStoreRef *storeRef, unsigned x) { + unsigned err = 0; + SCDynamicStoreRef ref = anotherCreateRef(&err, x); // why this is being inlined? + if (err) { + //expected-note@-1{{Assuming 'err' is not equal to 0}} + //expected-note@-2{{Taking true branch}} + CFRelease(ref); + ref = 0; + } + *storeRef = ref; +} + +//CHECK: +//CHECK: files +//CHECK: +//CHECK: +//CHECK: diagnostics +//CHECK: +//CHECK: +//CHECK: path +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line33 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line33 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line33 +//CHECK: col30 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Variable 'storeRef' initialized to nil +//CHECK: message +//CHECK: Variable 'storeRef' initialized to nil +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line33 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line33 +//CHECK: col21 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col13 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line34 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col27 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Calling 'CreateRef' +//CHECK: message +//CHECK: Calling 'CreateRef' +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line42 +//CHECK: col1 +//CHECK: file0 +//CHECK: +//CHECK: depth1 +//CHECK: extended_message +//CHECK: Entered call from 'test' +//CHECK: message +//CHECK: Entered call from 'test' +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line42 +//CHECK: col1 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line42 +//CHECK: col6 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line43 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line43 +//CHECK: col12 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line43 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line43 +//CHECK: col12 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col6 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col6 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col11 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line45 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col11 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth1 +//CHECK: extended_message +//CHECK: Assuming 'err' is not equal to 0 +//CHECK: message +//CHECK: Assuming 'err' is not equal to 0 +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line45 +//CHECK: col11 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line48 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line48 +//CHECK: col17 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line48 +//CHECK: col9 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line48 +//CHECK: col17 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line51 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line51 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line34 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col27 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth1 +//CHECK: extended_message +//CHECK: Returning from 'CreateRef' +//CHECK: message +//CHECK: Returning from 'CreateRef' +//CHECK: +//CHECK: +//CHECK: kindcontrol +//CHECK: edges +//CHECK: +//CHECK: +//CHECK: start +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line34 +//CHECK: col13 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: end +//CHECK: +//CHECK: +//CHECK: line37 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line37 +//CHECK: col13 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: kindevent +//CHECK: location +//CHECK: +//CHECK: line37 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: ranges +//CHECK: +//CHECK: +//CHECK: +//CHECK: line37 +//CHECK: col15 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: line37 +//CHECK: col22 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: depth0 +//CHECK: extended_message +//CHECK: Null pointer argument in call to CFRelease +//CHECK: message +//CHECK: Null pointer argument in call to CFRelease +//CHECK: +//CHECK: +//CHECK: descriptionNull pointer argument in call to CFRelease +//CHECK: categoryAPI Misuse (Apple) +//CHECK: typenull passed to CFRetain/CFRelease/CFMakeCollectable +//CHECK: issue_context_kindObjective-C method +//CHECK: issue_contexttest +//CHECK: issue_hash5 +//CHECK: location +//CHECK: +//CHECK: line37 +//CHECK: col5 +//CHECK: file0 +//CHECK: +//CHECK: +//CHECK: +//CHECK: +//CHECK: diff --git a/test/Analysis/domtest.c b/test/Analysis/domtest.c index 0f370db..dd72117 100644 --- a/test/Analysis/domtest.c +++ b/test/Analysis/domtest.c @@ -1,5 +1,5 @@ // RUN: rm -f %t -// RUN: %clang -cc1 -analyze -analyzer-checker=debug.DumpDominators %s > %t 2>&1 +// RUN: %clang_cc1 -analyze -analyzer-checker=debug.DumpDominators %s > %t 2>&1 // RUN: FileCheck --input-file=%t %s // Test the DominatorsTree implementation with various control flows diff --git a/test/Analysis/dtor.cpp b/test/Analysis/dtor.cpp index 1f45925..f461945 100644 --- a/test/Analysis/dtor.cpp +++ b/test/Analysis/dtor.cpp @@ -1,6 +1,7 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -analyzer-store region -analyzer-ipa=inlining -cfg-add-implicit-dtors -Wno-null-dereference -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -analyzer-ipa=inlining -analyzer-config c++-inlining=destructors -Wno-null-dereference -verify %s void clang_analyzer_eval(bool); +void clang_analyzer_checkInlined(bool); class A { public: @@ -102,7 +103,7 @@ void testMultipleInheritance3() { // Remove dead bindings... doSomething(); // destructor called here - // expected-warning@27 {{Attempt to free released memory}} + // expected-warning@28 {{Attempt to free released memory}} } } @@ -227,3 +228,76 @@ namespace DestructorVirtualCalls { clang_analyzer_eval(c == 3); // expected-warning{{TRUE}} } } + + +namespace DestructorsShouldNotAffectReturnValues { + class Dtor { + public: + ~Dtor() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + } + }; + + void *allocate() { + Dtor d; + return malloc(4); // no-warning + } + + void test() { + // At one point we had an issue where the statements inside an + // inlined destructor kept us from finding the return statement, + // leading the analyzer to believe that the malloc'd memory had leaked. + void *p = allocate(); + free(p); // no-warning + } +} + +namespace MultipleInheritanceVirtualDtors { + class VirtualDtor { + protected: + virtual ~VirtualDtor() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + } + }; + + class NonVirtualDtor { + protected: + ~NonVirtualDtor() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + } + }; + + class SubclassA : public VirtualDtor, public NonVirtualDtor { + public: + virtual ~SubclassA() {} + }; + class SubclassB : public NonVirtualDtor, public VirtualDtor { + public: + virtual ~SubclassB() {} + }; + + void test() { + SubclassA a; + SubclassB b; + } +} + +namespace ExplicitDestructorCall { + class VirtualDtor { + public: + virtual ~VirtualDtor() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + } + }; + + class Subclass : public VirtualDtor { + public: + virtual ~Subclass() { + clang_analyzer_checkInlined(false); // no-warning + } + }; + + void destroy(Subclass *obj) { + obj->VirtualDtor::~VirtualDtor(); + } +} diff --git a/test/Analysis/dtors-in-dtor-cfg-output.cpp b/test/Analysis/dtors-in-dtor-cfg-output.cpp index 68ba37e..f0546fc 100644 --- a/test/Analysis/dtors-in-dtor-cfg-output.cpp +++ b/test/Analysis/dtors-in-dtor-cfg-output.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=debug.DumpCFG -cfg-add-implicit-dtors %s 2>&1 | FileCheck %s +// RUN: %clang_cc1 -analyze -analyzer-checker=debug.DumpCFG %s 2>&1 | FileCheck %s // XPASS: * class A { diff --git a/test/Analysis/elementtype.c b/test/Analysis/elementtype.c index 7452a0a..1b26811 100644 --- a/test/Analysis/elementtype.c +++ b/test/Analysis/elementtype.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region %s typedef struct added_obj_st { int type; diff --git a/test/Analysis/engine/replay-without-inlining.c b/test/Analysis/engine/replay-without-inlining.c index 9ec2d08..0602973 100644 --- a/test/Analysis/engine/replay-without-inlining.c +++ b/test/Analysis/engine/replay-without-inlining.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc -verify %s +// expected-no-diagnostics typedef struct { char I[4]; diff --git a/test/Analysis/exceptions.mm b/test/Analysis/exceptions.mm new file mode 100644 index 0000000..dab1b5e --- /dev/null +++ b/test/Analysis/exceptions.mm @@ -0,0 +1,38 @@ +// RUN: %clang_cc1 -analyze -fexceptions -fobjc-exceptions -fcxx-exceptions -analyzer-checker=core,unix.Malloc,debug.ExprInspection -verify %s + +void clang_analyzer_checkInlined(bool); + +typedef typeof(sizeof(int)) size_t; +void *malloc(size_t); +void free(void *); + + +id getException(); +void inlinedObjC() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + @throw getException(); +} + +int testObjC() { + int a; // uninitialized + void *mem = malloc(4); // no-warning (ObjC exceptions are usually fatal) + inlinedObjC(); + free(mem); + return a; // no-warning +} + + +void inlinedCXX() { + clang_analyzer_checkInlined(true); // expected-warning{{TRUE}} + throw -1; +} + +int testCXX() { + int a; // uninitialized + // FIXME: this should be reported as a leak, because C++ exceptions are + // often not fatal. + void *mem = malloc(4); + inlinedCXX(); + free(mem); + return a; // no-warning +} diff --git a/test/Analysis/exercise-ps.c b/test/Analysis/exercise-ps.c index b592c33..675dd4e 100644 --- a/test/Analysis/exercise-ps.c +++ b/test/Analysis/exercise-ps.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s // // Just exercise the analyzer on code that has at one point caused issues // (i.e., no assertions or crashes). diff --git a/test/Analysis/fields.c b/test/Analysis/fields.c index 2e72c77..12e8bbf 100644 --- a/test/Analysis/fields.c +++ b/test/Analysis/fields.c @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core %s -analyzer-store=region -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection %s -analyzer-store=region -verify + +void clang_analyzer_eval(int); unsigned foo(); typedef struct bf { unsigned x:2; } bf; @@ -26,3 +28,11 @@ void test() { Point p; (void)(p = getit()).x; } + + +void testLazyCompoundVal() { + Point p = {42, 0}; + Point q; + clang_analyzer_eval((q = p).x == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(q.x == 42); // expected-warning{{TRUE}} +} diff --git a/test/Analysis/free.c b/test/Analysis/free.c index f688db7..0b283ee 100644 --- a/test/Analysis/free.c +++ b/test/Analysis/free.c @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-checker=core,unix.Malloc -fblocks -verify %s -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-checker=core,experimental.unix.MallocWithAnnotations -fblocks -verify %s +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-checker=core,alpha.unix.MallocWithAnnotations -fblocks -verify %s void free(void *); void t1 () { diff --git a/test/Analysis/func.c b/test/Analysis/func.c index 709ebf7..9abb560 100644 --- a/test/Analysis/func.c +++ b/test/Analysis/func.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,debug.ExprInspection -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -analyzer-store=region -verify %s void clang_analyzer_eval(int); diff --git a/test/Analysis/global-region-invalidation.c b/test/Analysis/global-region-invalidation.c index 71a7285..2d64b49 100644 --- a/test/Analysis/global-region-invalidation.c +++ b/test/Analysis/global-region-invalidation.c @@ -1,9 +1,9 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -disable-free -analyzer-eagerly-assume -analyzer-checker=core,deadcode,experimental.security.taint,debug.TaintTest,debug.ExprInspection -verify %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -disable-free -analyzer-eagerly-assume -analyzer-checker=core,deadcode,alpha.security.taint,debug.TaintTest,debug.ExprInspection -verify %s void clang_analyzer_eval(int); // Note, we do need to include headers here, since the analyzer checks if the function declaration is located in a system header. -#include "system-header-simulator.h" +#include "Inputs/system-header-simulator.h" // Test that system header does not invalidate the internal global. int size_rdar9373039 = 1; diff --git a/test/Analysis/idempotent-operations-limited-loops.c b/test/Analysis/idempotent-operations-limited-loops.c index 71e7c27..c8c51cf 100644 --- a/test/Analysis/idempotent-operations-limited-loops.c +++ b/test/Analysis/idempotent-operations-limited-loops.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=core,experimental.deadcode.IdempotentOperations -analyzer-max-loop 3 -verify %s -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=core,experimental.deadcode.IdempotentOperations -analyzer-max-loop 4 -verify %s -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=core,experimental.deadcode.IdempotentOperations %s -verify +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=core,alpha.deadcode.IdempotentOperations -analyzer-max-loop 3 -verify %s +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=core,alpha.deadcode.IdempotentOperations -analyzer-max-loop 4 -verify %s +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=core,alpha.deadcode.IdempotentOperations %s -verify void always_warning() { int *p = 0; *p = 0xDEADBEEF; } // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} diff --git a/test/Analysis/idempotent-operations.c b/test/Analysis/idempotent-operations.c index 6cc9a01..04c9bc1 100644 --- a/test/Analysis/idempotent-operations.c +++ b/test/Analysis/idempotent-operations.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=experimental.deadcode.IdempotentOperations -verify %s +// RUN: %clang_cc1 -Wno-int-to-pointer-cast -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=alpha.deadcode.IdempotentOperations -verify %s // Basic tests @@ -234,3 +234,11 @@ void rdar8601243() { (void) start; } + +float testFloatCast(int i) { + float f = i; + + // Don't crash when trying to create a "zero" float. + return f - f; +} + diff --git a/test/Analysis/idempotent-operations.cpp b/test/Analysis/idempotent-operations.cpp index 51b5905..9663665 100644 --- a/test/Analysis/idempotent-operations.cpp +++ b/test/Analysis/idempotent-operations.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=experimental.deadcode.IdempotentOperations -verify %s +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=alpha.deadcode.IdempotentOperations -verify %s // C++ specific false positives diff --git a/test/Analysis/idempotent-operations.m b/test/Analysis/idempotent-operations.m index 9a9820c..306376d 100644 --- a/test/Analysis/idempotent-operations.m +++ b/test/Analysis/idempotent-operations.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=experimental.deadcode.IdempotentOperations,osx.cocoa.RetainCount -verify %s +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-opt-analyze-nested-blocks -analyzer-checker=alpha.deadcode.IdempotentOperations,osx.cocoa.RetainCount -verify %s +// expected-no-diagnostics typedef signed char BOOL; typedef unsigned long NSUInteger; diff --git a/test/Analysis/initializer.cpp b/test/Analysis/initializer.cpp index d43c8cf..92d581b 100644 --- a/test/Analysis/initializer.cpp +++ b/test/Analysis/initializer.cpp @@ -1,6 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-store region -cfg-add-implicit-dtors -std=c++11 -verify %s - -// We don't inline constructors unless we have destructors turned on. +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -analyzer-ipa=inlining -analyzer-config c++-inlining=constructors -std=c++11 -verify %s void clang_analyzer_eval(bool); @@ -57,10 +55,6 @@ void testDelegatingConstructor() { } -// ------------------------------------ -// False negatives -// ------------------------------------ - struct RefWrapper { RefWrapper(int *p) : x(*p) {} RefWrapper(int &r) : x(r) {} @@ -69,10 +63,20 @@ struct RefWrapper { void testReferenceMember() { int *p = 0; - RefWrapper X(p); // should warn in the constructor + RefWrapper X(p); // expected-warning@-7 {{Dereference of null pointer}} } void testReferenceMember2() { int *p = 0; - RefWrapper X(*p); // should warn here + // FIXME: We should warn here, since we're creating the reference here. + RefWrapper X(*p); // expected-warning@-12 {{Dereference of null pointer}} } + + +extern "C" char *strdup(const char *); + +class StringWrapper { + char *str; +public: + StringWrapper(const char *input) : str(strdup(input)) {} // no-warning +}; diff --git a/test/Analysis/inline-not-supported.c b/test/Analysis/inline-not-supported.c index bff0e4d..756d5d8 100644 --- a/test/Analysis/inline-not-supported.c +++ b/test/Analysis/inline-not-supported.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fblocks -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-store region -verify %s +// RUN: %clang_cc1 -fblocks -analyze -analyzer-checker=core -verify %s // For now, don't inline varargs. void foo(int *x, ...) { @@ -16,7 +16,7 @@ void taz() { baz(0, 2); // no-warning } -// For now, don't inline blocks. +// For now, don't inline global blocks. void (^qux)(int *p) = ^(int *p) { *p = 1; }; void test_qux() { qux(0); // no-warning diff --git a/test/Analysis/inline-plist.c b/test/Analysis/inline-plist.c index 1523e82..999ebdb 100644 --- a/test/Analysis/inline-plist.c +++ b/test/Analysis/inline-plist.c @@ -1,40 +1,53 @@ -// RUN: %clang --analyze %s -Xclang -analyzer-ipa=inlining -fblocks -o %t +// RUN: %clang --analyze %s -fblocks -Xanalyzer -analyzer-output=text -Xanalyzer -analyzer-config -Xanalyzer suppress-null-return-paths=false -Xclang -verify %s +// RUN: %clang --analyze %s -fblocks -Xanalyzer -analyzer-config -Xanalyzer suppress-null-return-paths=false -o %t // RUN: FileCheck -input-file %t %s // void mmm(int y) { if (y != 0) - y++; + y++; } int foo(int x, int y) { - mmm(y); - if (x != 0) - x++; - return 5/x; + mmm(y); + if (x != 0) { + // expected-note@-1 {{Assuming 'x' is equal to 0}} + // expected-note@-2 {{Taking false branch}} + x++; + } + return 5/x; // expected-warning{{Division by zero}} expected-note{{Division by zero}} } // Test a bug triggering only when inlined. void has_bug(int *p) { - *p = 0xDEADBEEF; + *p = 0xDEADBEEF; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} expected-note{{Dereference of null pointer (loaded from variable 'p')}} } void test_has_bug() { has_bug(0); + // expected-note@-1 {{Passing null pointer value via 1st parameter 'p'}} + // expected-note@-2 {{Calling 'has_bug'}} } void triggers_bug(int *p) { - *p = 0xDEADBEEF; + *p = 0xDEADBEEF; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} expected-note{{Dereference of null pointer (loaded from variable 'p')}} } // This function triggers a bug by calling triggers_bug(). The diagnostics // should show when p is assumed to be null. void bar(int *p) { - if (!!p) + if (!!p) { + // expected-note@-1 {{Assuming 'p' is null}} + // expected-note@-2 {{Taking false branch}} return; + } - if (p == 0) + if (p == 0) { + // expected-note@-1 {{Taking true branch}} triggers_bug(p); + // expected-note@-1 {{Passing null pointer value via 1st parameter 'p'}} + // expected-note@-2 {{Calling 'triggers_bug'}} + } } // ========================================================================== // @@ -42,1148 +55,1841 @@ void bar(int *p) { // ========================================================================== // void test_block__capture_null() { - int *p = 0; - ^(){ *p = 1; }(); + int *p = 0; // expected-note{{Variable 'p' initialized to a null pointer value}} + ^(){ // expected-note {{Calling anonymous block}} + *p = 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} expected-note{{Dereference of null pointer (loaded from variable 'p')}} + }(); + } void test_block_ret() { - int *p = ^(){ int *q = 0; return q; }(); - *p = 1; + int *p = ^(){ // expected-note {{Calling anonymous block}} expected-note{{Returning to caller}} expected-note {{Variable 'p' initialized to a null pointer value}} + int *q = 0; // expected-note {{Variable 'q' initialized to a null pointer value}} + return q; // expected-note {{Returning null pointer (loaded from 'q')}} + }(); + *p = 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} expected-note{{Dereference of null pointer (loaded from variable 'p')}} } void test_block_blockvar() { __block int *p; - ^(){ p = 0; }(); - *p = 1; + ^(){ // expected-note{{Calling anonymous block}} expected-note{{Returning to caller}} + p = 0; // expected-note{{Null pointer value stored to 'p'}} + }(); + *p = 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} expected-note{{Dereference of null pointer (loaded from variable 'p')}} } void test_block_arg() { int *p; - ^(int **q){ *q = 0; }(&p); - *p = 1; + ^(int **q){ // expected-note{{Calling anonymous block}} expected-note{{Returning to caller}} + *q = 0; // expected-note{{Null pointer value stored to 'p'}} + }(&p); + *p = 1; // expected-warning{{Dereference of null pointer (loaded from variable 'p')}} expected-note{{Dereference of null pointer (loaded from variable 'p')}} } -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line12 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming 'x' is equal to 0 -// CHECK: message -// CHECK: Assuming 'x' is equal to 0 -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line14 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Division by zero -// CHECK: message -// CHECK: Division by zero -// CHECK: -// CHECK: -// CHECK: descriptionDivision by zero -// CHECK: categoryLogic error -// CHECK: typeDivision by zero -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfoo -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line14 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'has_bug' -// CHECK: message -// CHECK: Calling 'has_bug' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'test_has_bug' -// CHECK: message -// CHECK: Entered call from 'test_has_bug' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexthas_bug -// CHECK: issue_hash1 -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line33 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col16 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line37 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'triggers_bug' -// CHECK: message -// CHECK: Calling 'triggers_bug' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line26 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'bar' -// CHECK: message -// CHECK: Entered call from 'bar' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line27 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttriggers_bug -// CHECK: issue_hash1 -// CHECK: location -// CHECK: -// CHECK: line27 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling anonymous block -// CHECK: message -// CHECK: Calling anonymous block -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'test_block__capture_null' -// CHECK: message -// CHECK: Entered call from 'test_block__capture_null' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line46 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: location -// CHECK: -// CHECK: line46 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_block_ret -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_block_blockvar -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line61 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line61 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line62 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line62 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line62 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line62 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line63 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_block_arg -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line63 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Division by zero +// CHECK-NEXT: message +// CHECK-NEXT: Division by zero +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDivision by zero +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDivision by zero +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfoo +// CHECK-NEXT: issue_hash7 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'has_bug' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'has_bug' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_has_bug' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_has_bug' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexthas_bug +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'triggers_bug' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'triggers_bug' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'bar' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'bar' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttriggers_bug +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_block__capture_null' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_block__capture_null' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_block_ret' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_block_ret' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'q' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'q' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer (loaded from 'q') +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer (loaded from 'q') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_block_ret +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line74 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line74 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_block_blockvar' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_block_blockvar' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_block_blockvar +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line85 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_block_arg' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_block_arg' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line85 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_block_arg +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/inline-unique-reports.c b/test/Analysis/inline-unique-reports.c index 9248ad2..356ab72 100644 --- a/test/Analysis/inline-unique-reports.c +++ b/test/Analysis/inline-unique-reports.c @@ -1,4 +1,4 @@ -// RUN: %clang --analyze %s -Xclang -analyzer-ipa=inlining -o %t > /dev/null 2>&1 +// RUN: %clang --analyze %s -o %t > /dev/null 2>&1 // RUN: FileCheck -input-file %t %s static inline bug(int *p) { diff --git a/test/Analysis/inline.c b/test/Analysis/inline.c index 944e1e2..8cba63f 100644 --- a/test/Analysis/inline.c +++ b/test/Analysis/inline.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -verify %s void clang_analyzer_eval(int); void clang_analyzer_checkInlined(int); diff --git a/test/Analysis/inline.cpp b/test/Analysis/inline.cpp index 6b9a885..ddcf5d0 100644 --- a/test/Analysis/inline.cpp +++ b/test/Analysis/inline.cpp @@ -1,8 +1,18 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -analyzer-ipa=inlining -verify %s void clang_analyzer_eval(bool); void clang_analyzer_checkInlined(bool); +typedef __typeof__(sizeof(int)) size_t; +extern "C" void *malloc(size_t); + +// This is the standard placement new. +inline void* operator new(size_t, void* __p) throw() +{ + return __p; +} + + class A { public: int getZero() { return 0; } @@ -193,3 +203,167 @@ namespace Invalidation { } }; } + +namespace DefaultArgs { + int takesDefaultArgs(int i = 42) { + return -i; + } + + void testFunction() { + clang_analyzer_eval(takesDefaultArgs(1) == -1); // expected-warning{{TRUE}} + clang_analyzer_eval(takesDefaultArgs() == -42); // expected-warning{{TRUE}} + } + + class Secret { + public: + static const int value = 42; + int get(int i = value) { + return i; + } + }; + + void testMethod() { + Secret obj; + clang_analyzer_eval(obj.get(1) == 1); // expected-warning{{TRUE}} + + // FIXME: Should be 'TRUE'. See PR13673 or . + clang_analyzer_eval(obj.get() == 42); // expected-warning{{UNKNOWN}} + + // FIXME: Even if we constrain the variable, we still have a problem. + // See PR13385 or . + if (Secret::value != 42) + return; + clang_analyzer_eval(Secret::value == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(obj.get() == 42); // expected-warning{{UNKNOWN}} + } +} + +namespace OperatorNew { + class IntWrapper { + public: + int value; + + IntWrapper(int input) : value(input) { + // We don't want this constructor to be inlined unless we can actually + // use the proper region for operator new. + // See PR12014 and . + clang_analyzer_checkInlined(false); // no-warning + } + }; + + void test() { + IntWrapper *obj = new IntWrapper(42); + // should be TRUE + clang_analyzer_eval(obj->value == 42); // expected-warning{{UNKNOWN}} + } + + void testPlacement() { + IntWrapper *obj = static_cast(malloc(sizeof(IntWrapper))); + IntWrapper *alias = new (obj) IntWrapper(42); + + clang_analyzer_eval(alias == obj); // expected-warning{{TRUE}} + + // should be TRUE + clang_analyzer_eval(obj->value == 42); // expected-warning{{UNKNOWN}} + } +} + + +namespace VirtualWithSisterCasts { + // This entire set of tests exercises casts from sister classes and + // from classes outside the hierarchy, which can very much confuse + // code that uses DynamicTypeInfo or needs to construct CXXBaseObjectRegions. + // These examples used to cause crashes in +Asserts builds. + struct Parent { + virtual int foo(); + int x; + }; + + struct A : Parent { + virtual int foo() { return 42; } + }; + + struct B : Parent { + virtual int foo(); + }; + + struct Grandchild : public A {}; + + struct Unrelated {}; + + void testDowncast(Parent *b) { + A *a = (A *)(void *)b; + clang_analyzer_eval(a->foo() == 42); // expected-warning{{UNKNOWN}} + + a->x = 42; + clang_analyzer_eval(a->x == 42); // expected-warning{{TRUE}} + } + + void testRelated(B *b) { + A *a = (A *)(void *)b; + clang_analyzer_eval(a->foo() == 42); // expected-warning{{UNKNOWN}} + + a->x = 42; + clang_analyzer_eval(a->x == 42); // expected-warning{{TRUE}} + } + + void testUnrelated(Unrelated *b) { + A *a = (A *)(void *)b; + clang_analyzer_eval(a->foo() == 42); // expected-warning{{UNKNOWN}} + + a->x = 42; + clang_analyzer_eval(a->x == 42); // expected-warning{{TRUE}} + } + + void testCastViaNew(B *b) { + Grandchild *g = new (b) Grandchild(); + // FIXME: We actually now have perfect type info because of 'new'. + // This should be TRUE. + clang_analyzer_eval(g->foo() == 42); // expected-warning{{UNKNOWN}} + + g->x = 42; + clang_analyzer_eval(g->x == 42); // expected-warning{{TRUE}} + } +} + + +namespace QualifiedCalls { + void test(One *object) { + // This uses the One class from the top of the file. + clang_analyzer_eval(object->getNum() == 1); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(object->One::getNum() == 1); // expected-warning{{TRUE}} + clang_analyzer_eval(object->A::getNum() == 0); // expected-warning{{TRUE}} + + // getZero is non-virtual. + clang_analyzer_eval(object->getZero() == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(object->One::getZero() == 0); // expected-warning{{TRUE}} + clang_analyzer_eval(object->A::getZero() == 0); // expected-warning{{TRUE}} +} +} + + +namespace rdar12409977 { + struct Base { + int x; + }; + + struct Parent : public Base { + virtual Parent *vGetThis(); + Parent *getThis() { return vGetThis(); } + }; + + struct Child : public Parent { + virtual Child *vGetThis() { return this; } + }; + + void test() { + Child obj; + obj.x = 42; + + // Originally, calling a devirtualized method with a covariant return type + // caused a crash because the return value had the wrong type. When we then + // go to layer a CXXBaseObjectRegion on it, the base isn't a direct base of + // the object region and we get an assertion failure. + clang_analyzer_eval(obj.getThis()->x == 42); // expected-warning{{TRUE}} + } +} diff --git a/test/Analysis/inline2.c b/test/Analysis/inline2.c index 473146c..bae7434 100644 --- a/test/Analysis/inline2.c +++ b/test/Analysis/inline2.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -verify %s +// expected-no-diagnostics // Test parameter 'a' is registered to LiveVariables analysis data although it // is not referenced in the function body. diff --git a/test/Analysis/inline3.c b/test/Analysis/inline3.c index 968d82a..e7f4775 100644 --- a/test/Analysis/inline3.c +++ b/test/Analysis/inline3.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -verify %s +// expected-no-diagnostics // Test when entering f1(), we set the right AnalysisDeclContext to Environment. // Otherwise, block-level expr '1 && a' would not be block-level. diff --git a/test/Analysis/inline4.c b/test/Analysis/inline4.c index e7715e0..71a379a 100644 --- a/test/Analysis/inline4.c +++ b/test/Analysis/inline4.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -verify %s +// expected-no-diagnostics int g(int a) { return a; diff --git a/test/Analysis/inlining/DynDispatchBifurcate.m b/test/Analysis/inlining/DynDispatchBifurcate.m index 6637dfd..1fffb65 100644 --- a/test/Analysis/inlining/DynDispatchBifurcate.m +++ b/test/Analysis/inlining/DynDispatchBifurcate.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=dynamic-bifurcate -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx -analyzer-ipa=dynamic-bifurcate -verify %s #include "InlineObjCInstanceMethod.h" @@ -179,3 +179,13 @@ int testPropertySynthesized(PublicClass *p) { [p setValue1:0]; return 5/[p value1]; } + +// Test definition not available edge case. +@interface DefNotAvailClass : NSObject +@end +id testDefNotAvailableInlined(DefNotAvailClass *C) { + return [C mem]; // expected-warning {{instance method '-mem' not found}} +} +id testDefNotAvailable(DefNotAvailClass *C) { + return testDefNotAvailableInlined(C); +} \ No newline at end of file diff --git a/test/Analysis/inlining/InlineObjCInstanceMethod.m b/test/Analysis/inlining/InlineObjCInstanceMethod.m index 31b6d5b..f6aa50a 100644 --- a/test/Analysis/inlining/InlineObjCInstanceMethod.m +++ b/test/Analysis/inlining/InlineObjCInstanceMethod.m @@ -1,15 +1,32 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=dynamic-bifurcate -verify %s +// RUN: %clang --analyze -Xanalyzer -analyzer-checker=osx.cocoa.IncompatibleMethodTypes,osx.coreFoundation.CFRetainRelease -Xclang -verify %s #include "InlineObjCInstanceMethod.h" +typedef const struct __CFString * CFStringRef; +typedef const void * CFTypeRef; +extern CFTypeRef CFRetain(CFTypeRef cf); +extern void CFRelease(CFTypeRef cf); +extern CFStringRef getString(void); + // Method is defined in the parent; called through self. @interface MyParent : NSObject - (int)getInt; +- (const struct __CFString *) testCovariantReturnType __attribute__((cf_returns_retained)); @end @implementation MyParent - (int)getInt { return 0; } + +- (CFStringRef) testCovariantReturnType __attribute__((cf_returns_retained)) { + CFStringRef Str = ((void*)0); + Str = getString(); + if (Str) { + CFRetain(Str); + } + return Str; +} + @end @interface MyClass : MyParent @@ -83,4 +100,49 @@ // Don't crash if we don't know the receiver's region. void randomlyMessageAnObject(MyClass *arr[], int i) { (void)[arr[i] getInt]; -} \ No newline at end of file +} + + +@interface EvilChild : MyParent +- (id)getInt; +- (const struct __CFString *) testCovariantReturnType __attribute__((cf_returns_retained)); +@end + +@implementation EvilChild +- (id)getInt { // expected-warning {{types are incompatible}} + return self; +} +- (CFStringRef) testCovariantReturnType __attribute__((cf_returns_retained)) { + CFStringRef Str = ((void*)0); + Str = getString(); + if (Str) { + CFRetain(Str); + } + return Str; +} + +@end + +int testNonCovariantReturnType() { + MyParent *obj = [[EvilChild alloc] init]; + + // Devirtualization allows us to directly call -[EvilChild getInt], but + // that returns an id, not an int. There is an off-by-default warning for + // this, -Woverriding-method-mismatch, and an on-by-default analyzer warning, + // osx.cocoa.IncompatibleMethodTypes. This code would probably crash at + // runtime, but at least the analyzer shouldn't crash. + int x = 1 + [obj getInt]; + + [obj release]; + return 5/(x-1); // no-warning +} + +int testCovariantReturnTypeNoErrorSinceTypesMatch() { + MyParent *obj = [[EvilChild alloc] init]; + + CFStringRef S = ((void*)0); + S = [obj testCovariantReturnType]; + if (S) + CFRelease(S); + CFRelease(obj); +} diff --git a/test/Analysis/inlining/RetainCountExamples.m b/test/Analysis/inlining/RetainCountExamples.m index 2b682c2..276ab52 100644 --- a/test/Analysis/inlining/RetainCountExamples.m +++ b/test/Analysis/inlining/RetainCountExamples.m @@ -15,6 +15,7 @@ typedef struct objc_object { -(id)copy; - (Class)class; -(id)retain; +- (oneway void)release; @end @interface SelfStaysLive : NSObject @@ -30,4 +31,97 @@ typedef struct objc_object { void selfStaysLive() { SelfStaysLive *foo = [[SelfStaysLive alloc] init]; [foo release]; -} \ No newline at end of file +} + +// Test that retain release checker warns on leaks and use-after-frees when +// self init is not enabled. +// radar://12115830 +@interface ParentOfCell : NSObject +- (id)initWithInt: (int)inInt; +@end +@interface Cell : ParentOfCell{ + int x; +} +- (id)initWithInt: (int)inInt; ++ (void)testOverRelease; ++ (void)testLeak; +@property int x; +@end +@implementation Cell +@synthesize x; +- (id) initWithInt: (int)inInt { + [super initWithInt: inInt]; + self.x = inInt; // no-warning + return self; // Self Init checker would produce a warning here. +} ++ (void) testOverRelease { + Cell *sharedCell3 = [[Cell alloc] initWithInt: 3]; + [sharedCell3 release]; + [sharedCell3 release]; // expected-warning {{Reference-counted object is used after it is released}} +} ++ (void) testLeak { + Cell *sharedCell4 = [[Cell alloc] initWithInt: 3]; // expected-warning {{leak}} +} +@end + +// We should stop tracking some objects even when we inline the call. +// Specialically, the objects passed into calls with delegate and callback +// parameters. +@class DelegateTest; +typedef void (*ReleaseCallbackTy) (DelegateTest *c); + +@interface Delegate : NSObject +@end + +@interface DelegateTest : NSObject { + Delegate *myDel; +} +// Object initialized with a delagate which could potentially release it. +- (id)initWithDelegate: (id) d; + +- (void) setDelegate: (id) d; + +// Releases object through callback. ++ (void)updateObject:(DelegateTest*)obj WithCallback:(ReleaseCallbackTy)rc; + ++ (void)test: (Delegate *)d; + +@property (assign) Delegate* myDel; +@end + +void releaseObj(DelegateTest *c); + +// Releases object through callback. +void updateObject(DelegateTest *c, ReleaseCallbackTy rel) { + rel(c); +} + +@implementation DelegateTest +@synthesize myDel; + +- (id) initWithDelegate: (id) d { + if ((self = [super init])) + myDel = d; + return self; +} + +- (void) setDelegate: (id) d { + myDel = d; +} + ++ (void)updateObject:(DelegateTest*)obj WithCallback:(ReleaseCallbackTy)rc { + rc(obj); +} + ++ (void) test: (Delegate *)d { + DelegateTest *obj1 = [[DelegateTest alloc] initWithDelegate: d]; // no-warning + DelegateTest *obj2 = [[DelegateTest alloc] init]; // no-warning + DelegateTest *obj3 = [[DelegateTest alloc] init]; // no-warning + updateObject(obj2, releaseObj); + [DelegateTest updateObject: obj3 + WithCallback: releaseObj]; + DelegateTest *obj4 = [[DelegateTest alloc] init]; // no-warning + [obj4 setDelegate: d]; +} +@end + diff --git a/test/Analysis/inlining/assume-super-init-does-not-return-nil.m b/test/Analysis/inlining/assume-super-init-does-not-return-nil.m new file mode 100644 index 0000000..cda1e87 --- /dev/null +++ b/test/Analysis/inlining/assume-super-init-does-not-return-nil.m @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -analyze -analyzer-ipa=dynamic-bifurcate -analyzer-checker=core,osx -verify %s + +typedef signed char BOOL; + +@protocol NSObject - (BOOL)isEqual:(id)object; @end +@interface NSObject {} ++(id)alloc; ++(id)new; +-(id)init; +-(id)autorelease; +-(id)copy; +- (Class)class; +-(id)retain; +- (oneway void)release; +@end + +@interface Cell : NSObject { + int x; +} +- (id) init; +- (void)test; +@end + +@implementation Cell +- (id) init { + if ((self = [super init])) { + return self; + } + // Test that this is being analyzed. + int m; + m = m + 1; //expected-warning {{The left operand of '+' is a garbage value}} + return self; +} + +// Make sure that we do not propagate the 'nil' check from inlined 'init' to 'test'. +- (void) test { + Cell *newCell = [[Cell alloc] init]; + newCell->x = 5; // no-warning + [newCell release]; +} +@end diff --git a/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp b/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp index fa473ae..3771348 100644 --- a/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp +++ b/test/Analysis/inlining/dyn-dispatch-bifurcate.cpp @@ -15,3 +15,19 @@ void testKnown() { A a; clang_analyzer_eval(a.get() == 0); // expected-warning{{TRUE}} } + + +namespace ReinterpretDisruptsDynamicTypeInfo { + class Parent {}; + + class Child : public Parent { + public: + virtual int foo() { return 42; } + }; + + void test(Parent *a) { + Child *b = reinterpret_cast(a); + if (!b) return; + clang_analyzer_eval(b->foo() == 42); // expected-warning{{UNKNOWN}} + } +} diff --git a/test/Analysis/inlining/eager-reclamation-path-notes.c b/test/Analysis/inlining/eager-reclamation-path-notes.c new file mode 100644 index 0000000..6c7c05a --- /dev/null +++ b/test/Analysis/inlining/eager-reclamation-path-notes.c @@ -0,0 +1,788 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -analyzer-config graph-trim-interval=5 -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=plist-multi-file -analyzer-config graph-trim-interval=5 %s -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s + +void use(int *ptr, int val) { + *ptr = val; // expected-warning {{Dereference of null pointer (loaded from variable 'ptr')}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'ptr')}} +} + +int compute() { + // Do something that will take enough processing to trigger trimming. + // FIXME: This is actually really sensitive. If the interval timing is just + // wrong, the node for the actual dereference may also be collected, and all + // the path notes will disappear. + return 2 + 3 + 4 + 5 + 6; +} + +void testSimple() { + int *p = 0; + // expected-note@-1 {{Variable 'p' initialized to a null pointer value}} + use(p, compute()); + // expected-note@-1 {{Passing null pointer value via 1st parameter 'ptr'}} + // expected-note@-2 {{Calling 'use'}} +} + + +void use2(int *ptr, int val) { + *ptr = val; // expected-warning {{Dereference of null pointer (loaded from variable 'ptr')}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'ptr')}} +} + +void passThrough(int *p) { + use2(p, compute()); + // expected-note@-1 {{Passing null pointer value via 1st parameter 'ptr'}} + // expected-note@-2 {{Calling 'use2'}} +} + +void testChainedCalls() { + int *ptr = 0; + // expected-note@-1 {{Variable 'ptr' initialized to a null pointer value}} + passThrough(ptr); + // expected-note@-1 {{Passing null pointer value via 1st parameter 'p'}} + // expected-note@-2 {{Calling 'passThrough'}} +} + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'ptr' +// CHECK-NEXT: message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'ptr' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line21 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'use' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'use' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testSimple' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testSimple' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'ptr') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'ptr') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'ptr') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextuse +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'ptr' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'ptr' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'passThrough' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'passThrough' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testChainedCalls' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testChainedCalls' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'ptr' +// CHECK-NEXT: message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'ptr' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'use2' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'use2' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'passThrough' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'passThrough' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'ptr') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'ptr') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'ptr') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextuse2 +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/inlining/false-positive-suppression.c b/test/Analysis/inlining/false-positive-suppression.c new file mode 100644 index 0000000..20cc311 --- /dev/null +++ b/test/Analysis/inlining/false-positive-suppression.c @@ -0,0 +1,184 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-config suppress-null-return-paths=false -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -verify -DSUPPRESSED=1 %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-config avoid-suppressing-null-argument-paths=true -DSUPPRESSED=1 -DNULL_ARGS=1 -verify %s + +int opaquePropertyCheck(void *object); +int coin(); + +int *getNull() { + return 0; +} + +int *dynCastToInt(void *ptr) { + if (opaquePropertyCheck(ptr)) + return (int *)ptr; + return 0; +} + +int *dynCastOrNull(void *ptr) { + if (!ptr) + return 0; + if (opaquePropertyCheck(ptr)) + return (int *)ptr; + return 0; +} + + +void testDynCast(void *p) { + int *casted = dynCastToInt(p); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +void testDynCastOrNull(void *p) { + int *casted = dynCastOrNull(p); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + + +void testBranch(void *p) { + int *casted; + + // Although the report will be suppressed on one branch, it should still be + // valid on the other. + if (coin()) { + casted = dynCastToInt(p); + } else { + if (p) + return; + casted = (int *)p; + } + + *casted = 1; // expected-warning {{Dereference of null pointer}} +} + +void testBranchReversed(void *p) { + int *casted; + + // Although the report will be suppressed on one branch, it should still be + // valid on the other. + if (coin()) { + if (p) + return; + casted = (int *)p; + } else { + casted = dynCastToInt(p); + } + + *casted = 1; // expected-warning {{Dereference of null pointer}} +} + + +// -------------------------- +// "Suppression suppression" +// -------------------------- + +void testDynCastOrNullOfNull() { + // Don't suppress when one of the arguments is NULL. + int *casted = dynCastOrNull(0); + *casted = 1; +#if !SUPPRESSED || NULL_ARGS + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +void testDynCastOfNull() { + // Don't suppress when one of the arguments is NULL. + int *casted = dynCastToInt(0); + *casted = 1; +#if !SUPPRESSED || NULL_ARGS + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +int *lookUpInt(int unused) { + if (coin()) + return 0; + static int x; + return &x; +} + +void testZeroIsNotNull() { + // /Do/ suppress when the argument is 0 (an integer). + int *casted = lookUpInt(0); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +void testTrackNull() { + // /Do/ suppress if the null argument came from another call returning null. + int *casted = dynCastOrNull(getNull()); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +void testTrackNullVariable() { + // /Do/ suppress if the null argument came from another call returning null. + int *ptr; + ptr = getNull(); + int *casted = dynCastOrNull(ptr); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + + +// --------------------------------------- +// FALSE NEGATIVES (over-suppression) +// --------------------------------------- + +void testNoArguments() { + // In this case the function has no branches, and MUST return null. + int *casted = getNull(); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +int *getNullIfNonNull(void *input) { + if (input) + return 0; + static int x; + return &x; +} + +void testKnownPath(void *input) { + if (!input) + return; + + // In this case we have a known value for the argument, and thus the path + // through the function doesn't ever split. + int *casted = getNullIfNonNull(input); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + +int *alwaysReturnNull(void *input) { + if (opaquePropertyCheck(input)) + return 0; + return 0; +} + +void testAlwaysReturnNull(void *input) { + // In this case all paths out of the function return 0, but they are all + // dominated by a branch whose condition we don't know! + int *casted = alwaysReturnNull(input); + *casted = 1; +#ifndef SUPPRESSED + // expected-warning@-2 {{Dereference of null pointer}} +#endif +} + diff --git a/test/Analysis/inlining/path-notes.c b/test/Analysis/inlining/path-notes.c index 0885eafa..9e70802 100644 --- a/test/Analysis/inlining/path-notes.c +++ b/test/Analysis/inlining/path-notes.c @@ -1,5 +1,6 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-output=text -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-output=plist-multi-file %s -o - | FileCheck %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -analyzer-config suppress-null-return-paths=false -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=plist-multi-file -analyzer-config suppress-null-return-paths=false %s -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s void zero(int **p) { *p = 0; @@ -18,8 +19,7 @@ void testZero(int *a) { void check(int *p) { if (p) { // expected-note@-1 + {{Assuming 'p' is null}} - // expected-note@-2 + {{Assuming pointer value is null}} - // expected-note@-3 + {{Taking false branch}} + // expected-note@-2 + {{Taking false branch}} return; } return; @@ -57,1235 +57,3178 @@ void testStoreCheck(int *a) { } -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: {{.*}}path-notes.c -// CHECK: +int *getZero() { + int *p = 0; + // expected-note@-1 + {{Variable 'p' initialized to a null pointer value}} + // ^ This note checks that we add a second visitor for the return value. + return p; + // expected-note@-1 + {{Returning null pointer (loaded from 'p')}} +} + +void testReturnZero() { + *getZero() = 1; // expected-warning{{Dereference of null pointer}} + // expected-note@-1 {{Calling 'getZero'}} + // expected-note@-2 {{Returning from 'getZero'}} + // expected-note@-3 {{Dereference of null pointer}} +} + +int testReturnZero2() { + return *getZero(); // expected-warning{{Dereference of null pointer}} + // expected-note@-1 {{Calling 'getZero'}} + // expected-note@-2 {{Returning from 'getZero'}} + // expected-note@-3 {{Dereference of null pointer}} +} + +void testInitZero() { + int *a = getZero(); + // expected-note@-1 {{Calling 'getZero'}} + // expected-note@-2 {{Returning from 'getZero'}} + // expected-note@-3 {{Variable 'a' initialized to a null pointer value}} + *a = 1; // expected-warning{{Dereference of null pointer}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'a')}} +} + +void testStoreZero(int *a) { + a = getZero(); + // expected-note@-1 {{Calling 'getZero'}} + // expected-note@-2 {{Returning from 'getZero'}} + // expected-note@-3 {{Null pointer value stored to 'a'}} + *a = 1; // expected-warning{{Dereference of null pointer}} + // expected-note@-1 {{Dereference of null pointer (loaded from variable 'a')}} +} + +void usePointer(int *p) { + *p = 1; // expected-warning{{Dereference of null pointer}} + // expected-note@-1 {{Dereference of null pointer}} +} + +void testUseOfNullPointer() { + // Test the case where an argument expression is itself a call. + usePointer(getZero()); + // expected-note@-1 {{Calling 'getZero'}} + // expected-note@-2 {{Returning from 'getZero'}} + // expected-note@-3 {{Passing null pointer value via 1st parameter 'p'}} + // expected-note@-4 {{Calling 'usePointer'}} +} + // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'zero' -// CHECK: message -// CHECK: Calling 'zero' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line4 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'testZero' -// CHECK: message -// CHECK: Entered call from 'testZero' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line4 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line4 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Null pointer value stored to 'a' -// CHECK: message -// CHECK: Null pointer value stored to 'a' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returning from 'zero' -// CHECK: message -// CHECK: Returning from 'zero' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'a') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestZero -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line29 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'check' -// CHECK: message -// CHECK: Calling 'check' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'testCheck' -// CHECK: message -// CHECK: Entered call from 'testCheck' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Assuming pointer value is null -// CHECK: message -// CHECK: Assuming pointer value is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line25 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line25 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line29 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returning from 'check' -// CHECK: message -// CHECK: Returning from 'check' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line32 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line32 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line32 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line32 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line32 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'a') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestCheck -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line32 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line40 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line40 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line40 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'a' initialized here -// CHECK: message -// CHECK: Variable 'a' initialized here -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line40 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line40 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'check' -// CHECK: message -// CHECK: Calling 'check' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'testInitCheck' -// CHECK: message -// CHECK: Entered call from 'testInitCheck' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line25 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line25 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returning from 'check' -// CHECK: message -// CHECK: Returning from 'check' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'a') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestInitCheck -// CHECK: issue_hash6 -// CHECK: location -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Value assigned to 'a' -// CHECK: message -// CHECK: Value assigned to 'a' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'check' -// CHECK: message -// CHECK: Calling 'check' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'testStoreCheck' -// CHECK: message -// CHECK: Entered call from 'testStoreCheck' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line25 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line25 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returning from 'check' -// CHECK: message -// CHECK: Returning from 'check' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'a') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'a') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestStoreCheck -// CHECK: issue_hash6 -// CHECK: location -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'zero' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'zero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testZero' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'a' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'a' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'zero' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'zero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestZero +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'check' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'check' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testCheck' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testCheck' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'check' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'check' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestCheck +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line32 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line40 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line40 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line40 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'a' initialized here +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'a' initialized here +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line40 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line40 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'check' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'check' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testInitCheck' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testInitCheck' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'check' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'check' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestInitCheck +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line45 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Value assigned to 'a' +// CHECK-NEXT: message +// CHECK-NEXT: Value assigned to 'a' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'check' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'check' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testStoreCheck' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testStoreCheck' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'check' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'check' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestStoreCheck +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testReturnZero' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testReturnZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestReturnZero +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testReturnZero2' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testReturnZero2' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestReturnZero2 +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testInitZero' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testInitZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'a' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'a' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestInitZero +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testStoreZero' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testStoreZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'a' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'a' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line92 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'a') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestStoreZero +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testUseOfNullPointer' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testUseOfNullPointer' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer (loaded from 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'getZero' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Passing null pointer value via 1st parameter 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line107 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'usePointer' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'usePointer' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testUseOfNullPointer' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testUseOfNullPointer' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextusePointer +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/inlining/path-notes.m b/test/Analysis/inlining/path-notes.m new file mode 100644 index 0000000..b15b869 --- /dev/null +++ b/test/Analysis/inlining/path-notes.m @@ -0,0 +1,469 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=text -analyzer-config suppress-null-return-paths=false -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=plist-multi-file -analyzer-config suppress-null-return-paths=false %s -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s + +@interface Test +@property int *p; +@end + +int *getZeroIfNil(Test *x) { + return x.p; + // expected-note@-1 {{No method is called because the receiver is nil}} + // expected-note@-2 {{Returning null pointer}} +} + +void testReturnZeroIfNil() { + *getZeroIfNil(0) = 1; // expected-warning{{Dereference of null pointer}} + // expected-note@-1 {{Calling 'getZeroIfNil'}} + // expected-note@-2 {{Passing nil object reference via 1st parameter 'x'}} + // expected-note@-3 {{Returning from 'getZeroIfNil'}} + // expected-note@-4 {{Dereference of null pointer}} +} + + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Passing nil object reference via 1st parameter 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Passing nil object reference via 1st parameter 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'getZeroIfNil' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'getZeroIfNil' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line9 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'testReturnZeroIfNil' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'testReturnZeroIfNil' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line9 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line9 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: No method is called because the receiver is nil +// CHECK-NEXT: message +// CHECK-NEXT: No method is called because the receiver is nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning null pointer +// CHECK-NEXT: message +// CHECK-NEXT: Returning null pointer +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'getZeroIfNil' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'getZeroIfNil' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestReturnZeroIfNil +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/inlining/retain-count-self-init.m b/test/Analysis/inlining/retain-count-self-init.m new file mode 100644 index 0000000..ee8dbe3 --- /dev/null +++ b/test/Analysis/inlining/retain-count-self-init.m @@ -0,0 +1,68 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,osx.cocoa.SelfInit -analyzer-ipa=dynamic-bifurcate -verify %s + +typedef signed char BOOL; +typedef struct objc_class *Class; +typedef struct objc_object { + Class isa; +} *id; +@protocol NSObject - (BOOL)isEqual:(id)object; @end +@interface NSObject {} ++(id)alloc; ++(id)new; +- (oneway void)release; +-(id)init; +-(id)autorelease; +-(id)copy; +- (Class)class; +-(id)retain; +@end + +// We do not want to overhelm user with error messages in case they forgot to +// assign to self and check that the result of [super init] is non-nil. So +// stop tracking the receiver of init with respect to Retain Release checker. +// radar://12115830 +@interface ParentOfCell : NSObject +- (id)initWithInt: (int)inInt; +@end +@interface Cell : ParentOfCell{ + int x; +} +- (id)init; ++ (void)test; +@property int x; +@end +@implementation Cell +@synthesize x; +- (id) init { + [super init]; + self.x = 3; // no-warning + return self; // expected-warning {{Returning 'self' while it is not set to the result of '[(super or self)}} +} +- (id) initWithInt: (int)inInt { + [super initWithInt: inInt]; + self.x = inInt; // no-warning + return self; // expected-warning {{Returning 'self' while it is not set to the result of '[(super or self)}} +} +- (id) init2 { + [self init]; // The call [self init] is inlined. We will warn inside the inlined body. + self.x = 2; // no-warning + return self; +} + +- (id) initWithIntGood: (int)inInt { + if (self = [super initWithInt: inInt]) { + self.x = inInt; + } + return self; +} ++ (void) test { + Cell *sharedCell1 = [[Cell alloc] init]; + [sharedCell1 release]; + Cell *sharedCell2 = [[Cell alloc] initWithInt: 3]; + [sharedCell2 release]; + Cell *sharedCell3 = [[Cell alloc] initWithIntGood: 3]; + [sharedCell3 release]; +} + +@end + diff --git a/test/Analysis/inlining/stl.cpp b/test/Analysis/inlining/stl.cpp new file mode 100644 index 0000000..cec7821 --- /dev/null +++ b/test/Analysis/inlining/stl.cpp @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -analyzer-ipa=dynamic -analyzer-config c++-stdlib-inlining=false -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc,debug.ExprInspection -analyzer-ipa=dynamic -analyzer-config c++-stdlib-inlining=true -DINLINE=1 -verify %s + +#include "../Inputs/system-header-simulator-cxx.h" + +void clang_analyzer_eval(bool); + +void testVector(std::vector &nums) { + if (nums.begin()) return; + if (nums.end()) return; + + clang_analyzer_eval(nums.size() == 0); +#if INLINE + // expected-warning@-2 {{TRUE}} +#else + // expected-warning@-4 {{UNKNOWN}} +#endif +} + +void testException(std::exception e) { + // Notice that the argument is NOT passed by reference, so we can devirtualize. + const char *x = e.what(); + clang_analyzer_eval(x == 0); +#if INLINE + // expected-warning@-2 {{TRUE}} +#else + // expected-warning@-4 {{UNKNOWN}} +#endif +} diff --git a/test/Analysis/inlining/test-always-inline-size-option.c b/test/Analysis/inlining/test-always-inline-size-option.c new file mode 100644 index 0000000..6b3c13d --- /dev/null +++ b/test/Analysis/inlining/test-always-inline-size-option.c @@ -0,0 +1,48 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-inline-max-stack-depth=3 -analyzer-config ipa-always-inline-size=3 -verify %s + +void clang_analyzer_eval(int); +int nested5() { + if (5 < 3) + return 0; + else + if (3 == 3) + return 0; + return 0; +} +int nested4() { + return nested5(); +} +int nested3() { + return nested4(); +} +int nested2() { + return nested3(); +} +int nested1() { + return nested2(); +} + +void testNested() { + clang_analyzer_eval(nested1() == 0); // expected-warning{{TRUE}} +} + +// Make sure we terminate a recursive path. +int recursive() { + return recursive(); +} +int callRecursive() { + return recursive(); +} + +int mutuallyRecursive1(); + +int mutuallyRecursive2() { + return mutuallyRecursive1(); +} + +int mutuallyRecursive1() { + return mutuallyRecursive2(); +} +int callMutuallyRecursive() { + return mutuallyRecursive1(); +} diff --git a/test/Analysis/inlining/test_objc_inlining_option.m b/test/Analysis/inlining/test_objc_inlining_option.m new file mode 100644 index 0000000..34502c4 --- /dev/null +++ b/test/Analysis/inlining/test_objc_inlining_option.m @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=dynamic-bifurcate -analyzer-config objc-inlining=false -verify %s +// expected-no-diagnostics + +typedef signed char BOOL; +typedef struct objc_class *Class; +typedef struct objc_object { + Class isa; +} *id; +@protocol NSObject - (BOOL)isEqual:(id)object; @end +@interface NSObject {} ++(id)alloc; +-(id)init; +-(id)autorelease; +-(id)copy; +- (Class)class; +-(id)retain; +@end + +// Vanila: ObjC class method is called by name. +@interface MyParent : NSObject ++ (int)getInt; +@end +@interface MyClass : MyParent ++ (int)getInt; +@end +@implementation MyClass ++ (int)testClassMethodByName { + int y = [MyClass getInt]; + return 5/y; // no-warning +} ++ (int)getInt { + return 0; +} +@end \ No newline at end of file diff --git a/test/Analysis/ivars.m b/test/Analysis/ivars.m index 42e92d2..c717da6 100644 --- a/test/Analysis/ivars.m +++ b/test/Analysis/ivars.m @@ -130,3 +130,11 @@ struct S makeS(); } @end + + +int testNull(Root *obj) { + if (obj) return 0; + + int *x = &obj->uniqueID; + return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}} +} diff --git a/test/Analysis/keychainAPI.m b/test/Analysis/keychainAPI.m index 585a32d..fe6c61d 100644 --- a/test/Analysis/keychainAPI.m +++ b/test/Analysis/keychainAPI.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=osx.SecKeychainAPI %s -analyzer-ipa=inlining -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=osx.SecKeychainAPI %s -verify // Fake typedefs. typedef unsigned int OSStatus; diff --git a/test/Analysis/logical-ops.c b/test/Analysis/logical-ops.c new file mode 100644 index 0000000..a1223b3 --- /dev/null +++ b/test/Analysis/logical-ops.c @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -verify %s + +void clang_analyzer_eval(int); + +void testAnd(int i, int *p) { + int *nullP = 0; + int *knownP = &i; + clang_analyzer_eval((knownP && knownP) == 1); // expected-warning{{TRUE}} + clang_analyzer_eval((knownP && nullP) == 0); // expected-warning{{TRUE}} + clang_analyzer_eval((knownP && p) == 1); // expected-warning{{UNKNOWN}} +} + +void testOr(int i, int *p) { + int *nullP = 0; + int *knownP = &i; + clang_analyzer_eval((nullP || knownP) == 1); // expected-warning{{TRUE}} + clang_analyzer_eval((nullP || nullP) == 0); // expected-warning{{TRUE}} + clang_analyzer_eval((nullP || p) == 1); // expected-warning{{UNKNOWN}} +} + + +// PR13461 +int testTypeIsInt(int i, void *p) { + if (i | (p && p)) + return 1; + return 0; +} diff --git a/test/Analysis/lvalue.cpp b/test/Analysis/lvalue.cpp index 0cc42f5..9a6bd59 100644 --- a/test/Analysis/lvalue.cpp +++ b/test/Analysis/lvalue.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -verify %s +// expected-no-diagnostics int f1() { int x = 0, y = 1; diff --git a/test/Analysis/malloc-annotations.c b/test/Analysis/malloc-annotations.c index 9c040b6..2a078b6 100644 --- a/test/Analysis/malloc-annotations.c +++ b/test/Analysis/malloc-annotations.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.deadcode.UnreachableCode,experimental.core.CastSize,experimental.unix.MallocWithAnnotations -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.deadcode.UnreachableCode,alpha.core.CastSize,alpha.unix.MallocWithAnnotations -analyzer-store=region -verify %s typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); void free(void *); diff --git a/test/Analysis/malloc-interprocedural.c b/test/Analysis/malloc-interprocedural.c index 589bc4f..79cbf24 100644 --- a/test/Analysis/malloc-interprocedural.c +++ b/test/Analysis/malloc-interprocedural.c @@ -1,15 +1,17 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=unix.Malloc -analyzer-ipa=inlining -analyzer-inline-max-stack-depth=5 -analyzer-inline-max-function-size=6 -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=unix.Malloc -analyzer-inline-max-stack-depth=5 -verify %s -#include "system-header-simulator.h" +#include "Inputs/system-header-simulator.h" -typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); void *valloc(size_t); void free(void *); void *realloc(void *ptr, size_t size); void *reallocf(void *ptr, size_t size); void *calloc(size_t nmemb, size_t size); -extern void exit(int) __attribute__ ((__noreturn__)); + +void exit(int) __attribute__ ((__noreturn__)); +void *memcpy(void * restrict s1, const void * restrict s2, size_t n); +size_t strlen(const char *); static void my_malloc1(void **d, size_t size) { *d = malloc(size); @@ -96,3 +98,34 @@ int uafAndCallsFooWithEmptyReturn() { fooWithEmptyReturn(12); return *x; // expected-warning {{Use of memory after it is freed}} } + + +// If we inline any of the malloc-family functions, the checker shouldn't also +// try to do additional modeling. +char *strndup(const char *str, size_t n) { + if (!str) + return 0; + + // DO NOT FIX. This is to test that we are actually using the inlined + // behavior! + if (n < 5) + return 0; + + size_t length = strlen(str); + if (length < n) + n = length; + + char *result = malloc(n + 1); + memcpy(result, str, n); + result[n] = '\0'; + return result; +} + +void useStrndup(size_t n) { + if (n == 0) + (void)strndup(0, 20); // no-warning + else if (n < 5) + (void)strndup("hi there", n); // no-warning + else + (void)strndup("hi there", n); // expected-warning{{leak}} +} diff --git a/test/Analysis/malloc-overflow.c b/test/Analysis/malloc-overflow.c index 714fd3d3..2f443ca 100644 --- a/test/Analysis/malloc-overflow.c +++ b/test/Analysis/malloc-overflow.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.security.MallocOverflow -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.security.MallocOverflow -verify %s #define NULL ((void *) 0) typedef __typeof__(sizeof(int)) size_t; diff --git a/test/Analysis/malloc-overflow.cpp b/test/Analysis/malloc-overflow.cpp index c1ac6be..e3a0408 100644 --- a/test/Analysis/malloc-overflow.cpp +++ b/test/Analysis/malloc-overflow.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.security.MallocOverflow -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.security.MallocOverflow -verify %s +// expected-no-diagnostics class A { public: diff --git a/test/Analysis/malloc-plist.c b/test/Analysis/malloc-plist.c index 11eef3e1..12430a6 100644 --- a/test/Analysis/malloc-plist.c +++ b/test/Analysis/malloc-plist.c @@ -1,5 +1,6 @@ +// RUN: rm -f %t // RUN: %clang_cc1 -analyze -analyzer-checker=unix.Malloc -analyzer-output=plist -o %t %s -// RUN: FileCheck --input-file %t %s +// RUN: FileCheck -input-file %t %s typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); @@ -168,4265 +169,4344 @@ void use_function_with_leak7() { function_with_leak7(); } -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line11 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line14 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line14 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'p' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'p' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'p' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextdiagnosticTest -// CHECK: issue_hash5 -// CHECK: location -// CHECK: -// CHECK: line14 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col30 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line21 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line21 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line21 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'A' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'A' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'A' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextmyArrayAllocation -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line21 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line24 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line26 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Attempt to reallocate memory -// CHECK: message -// CHECK: Attempt to reallocate memory -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line26 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line27 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming 'tmp' is null -// CHECK: message -// CHECK: Assuming 'tmp' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line27 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reallocation failed -// CHECK: message -// CHECK: Reallocation failed -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line27 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line28 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'buf' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'buf' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'buf' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextreallocDiagnostics -// CHECK: issue_hash5 -// CHECK: location -// CHECK: -// CHECK: line28 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line43 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'wrapper' -// CHECK: message -// CHECK: Calling 'wrapper' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line34 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'test_wrapper' -// CHECK: message -// CHECK: Entered call from 'test_wrapper' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line34 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line34 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line35 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Assuming 'x' is non-null -// CHECK: message -// CHECK: Assuming 'x' is non-null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line38 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line38 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line43 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returned allocated memory -// CHECK: message -// CHECK: Returned allocated memory -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line45 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'buf' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'buf' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'buf' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_wrapper -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line45 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line60 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'my_malloc_and_free' -// CHECK: message -// CHECK: Calling 'my_malloc_and_free' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line52 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'test_double_action_call' -// CHECK: message -// CHECK: Entered call from 'test_double_action_call' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line53 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Calling 'my_free' -// CHECK: message -// CHECK: Calling 'my_free' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line49 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth2 -// CHECK: extended_message -// CHECK: Entered call from 'my_malloc_and_free' -// CHECK: message -// CHECK: Entered call from 'my_malloc_and_free' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line50 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth2 -// CHECK: extended_message -// CHECK: Memory is released -// CHECK: message -// CHECK: Memory is released -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth2 -// CHECK: extended_message -// CHECK: Returned released memory via 1st parameter -// CHECK: message -// CHECK: Returned released memory via 1st parameter -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line60 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returned released memory via 1st parameter -// CHECK: message -// CHECK: Returned released memory via 1st parameter -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line61 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line61 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line61 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line61 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line61 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Use of memory after it is freed -// CHECK: message -// CHECK: Use of memory after it is freed -// CHECK: -// CHECK: -// CHECK: descriptionUse of memory after it is freed -// CHECK: categoryMemory Error -// CHECK: typeUse-after-free -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_double_action_call -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line61 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col30 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line74 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col35 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col30 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line75 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'my_realloc' -// CHECK: message -// CHECK: Calling 'my_realloc' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line65 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'reallocIntra' -// CHECK: message -// CHECK: Entered call from 'reallocIntra' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line67 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Attempt to reallocate memory -// CHECK: message -// CHECK: Attempt to reallocate memory -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line67 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line68 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Assuming 'tmp' is null -// CHECK: message -// CHECK: Assuming 'tmp' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line68 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Reallocation failed -// CHECK: message -// CHECK: Reallocation failed -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line68 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line75 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Reallocation of 1st parameter failed -// CHECK: message -// CHECK: Reallocation of 1st parameter failed -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'buf' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'buf' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'buf' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextreallocIntra -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line84 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line84 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line85 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'malloc_wrapper_ret' -// CHECK: message -// CHECK: Calling 'malloc_wrapper_ret' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line80 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_ret' -// CHECK: message -// CHECK: Entered call from 'use_ret' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line80 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line80 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line81 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line85 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returned allocated memory -// CHECK: message -// CHECK: Returned allocated memory -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line85 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line86 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line86 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line86 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'v' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'v' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'v' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextuse_ret -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line86 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line90 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line90 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line92 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line92 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line97 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line97 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line97 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'm' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'm' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'm' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextLeakedSymbol -// CHECK: issue_hash8 -// CHECK: location -// CHECK: -// CHECK: line97 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line105 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line105 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line105 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak1' -// CHECK: message -// CHECK: Calling 'function_with_leak1' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line101 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak1' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak1' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line102 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line102 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'x' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfunction_with_leak1 -// CHECK: issue_hash1 -// CHECK: location -// CHECK: -// CHECK: line102 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line114 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line114 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line114 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak2' -// CHECK: message -// CHECK: Calling 'function_with_leak2' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line109 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak2' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak2' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line109 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line109 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line110 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line110 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line111 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line111 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line111 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'x' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfunction_with_leak2 -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line111 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line123 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak3' -// CHECK: message -// CHECK: Calling 'function_with_leak3' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line117 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak3' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak3' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line117 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line117 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line118 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line120 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line120 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line120 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'x' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfunction_with_leak3 -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line120 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line134 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak4' -// CHECK: message -// CHECK: Calling 'function_with_leak4' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line126 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak4' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak4' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line126 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line126 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line127 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line127 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line131 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line131 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line131 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'x' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfunction_with_leak4 -// CHECK: issue_hash5 -// CHECK: location -// CHECK: -// CHECK: line131 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line145 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line145 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line145 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak5' -// CHECK: message -// CHECK: Calling 'function_with_leak5' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line140 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak5' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak5' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line141 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line141 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line142 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line142 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line142 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'x' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfunction_with_leak5 -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line142 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line156 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line156 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line156 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak6' -// CHECK: message -// CHECK: Calling 'function_with_leak6' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line151 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak6' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak6' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line151 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line151 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line152 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line152 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line153 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line153 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line153 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: message -// CHECK: Memory is never released; potential leak of memory pointed to by 'x' -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak of memory pointed to by 'x' -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextfunction_with_leak6 -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line153 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line168 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Calling 'function_with_leak7' -// CHECK: message -// CHECK: Calling 'function_with_leak7' -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line164 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Entered call from 'use_function_with_leak7' -// CHECK: message -// CHECK: Entered call from 'use_function_with_leak7' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line164 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line164 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col24 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line165 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line165 -// CHECK: col28 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Memory is allocated -// CHECK: message -// CHECK: Memory is allocated -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line168 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col25 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth1 -// CHECK: extended_message -// CHECK: Returned allocated memory -// CHECK: message -// CHECK: Returned allocated memory -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line169 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line169 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line169 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Memory is never released; potential leak -// CHECK: message -// CHECK: Memory is never released; potential leak -// CHECK: -// CHECK: -// CHECK: descriptionMemory is never released; potential leak -// CHECK: categoryMemory Error -// CHECK: typeMemory leak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextuse_function_with_leak7 -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line169 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'in' is > 5 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'in' is > 5 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'p' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextdiagnosticTest +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line15 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line20 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'A' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'A' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'A' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextmyArrayAllocation +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line22 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Attempt to reallocate memory +// CHECK-NEXT: message +// CHECK-NEXT: Attempt to reallocate memory +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line27 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'tmp' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'tmp' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reallocation failed +// CHECK-NEXT: message +// CHECK-NEXT: Reallocation failed +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextreallocDiagnostics +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'wrapper' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'wrapper' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line35 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_wrapper' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_wrapper' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line35 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line35 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returned allocated memory +// CHECK-NEXT: message +// CHECK-NEXT: Returned allocated memory +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_wrapper +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'my_malloc_and_free' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'my_malloc_and_free' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_double_action_call' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_double_action_call' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'my_free' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'my_free' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'my_malloc_and_free' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'my_malloc_and_free' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is released +// CHECK-NEXT: message +// CHECK-NEXT: Memory is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returned released memory via 1st parameter +// CHECK-NEXT: message +// CHECK-NEXT: Returned released memory via 1st parameter +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returned released memory via 1st parameter +// CHECK-NEXT: message +// CHECK-NEXT: Returned released memory via 1st parameter +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Use of memory after it is freed +// CHECK-NEXT: message +// CHECK-NEXT: Use of memory after it is freed +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionUse of memory after it is freed +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeUse-after-free +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_double_action_call +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'my_realloc' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'my_realloc' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'reallocIntra' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'reallocIntra' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Attempt to reallocate memory +// CHECK-NEXT: message +// CHECK-NEXT: Attempt to reallocate memory +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line68 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'tmp' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'tmp' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reallocation failed +// CHECK-NEXT: message +// CHECK-NEXT: Reallocation failed +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line69 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reallocation of 1st parameter failed +// CHECK-NEXT: message +// CHECK-NEXT: Reallocation of 1st parameter failed +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'buf' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextreallocIntra +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line85 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line85 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'malloc_wrapper_ret' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'malloc_wrapper_ret' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line81 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_ret' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_ret' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line81 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line81 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returned allocated memory +// CHECK-NEXT: message +// CHECK-NEXT: Returned allocated memory +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'v' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'v' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'v' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextuse_ret +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line87 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line93 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'm' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'm' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'm' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextLeakedSymbol +// CHECK-NEXT: issue_hash8 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line106 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line106 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line106 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak1' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak1' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak1' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak1' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfunction_with_leak1 +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line115 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line115 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line115 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak2' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak2' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line110 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak2' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak2' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line110 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line110 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line111 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfunction_with_leak2 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak3' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak3' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak3' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak3' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'y' is not equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'y' is not equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line121 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line121 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line121 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfunction_with_leak3 +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line121 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak4' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak4' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line127 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak4' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak4' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line127 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line127 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line128 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'y' is 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'y' is 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line132 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line132 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line132 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfunction_with_leak4 +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line132 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line146 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line146 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line146 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak5' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak5' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak5' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak5' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line142 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line143 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line143 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line143 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfunction_with_leak5 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line143 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line157 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line157 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line157 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak6' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak6' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line152 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak6' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak6' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line152 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line152 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line153 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line154 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line154 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line154 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak of memory pointed to by 'x' +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextfunction_with_leak6 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line154 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'function_with_leak7' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'function_with_leak7' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line165 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'use_function_with_leak7' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'use_function_with_leak7' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line165 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line165 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line166 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: message +// CHECK-NEXT: Memory is allocated +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returned allocated memory +// CHECK-NEXT: message +// CHECK-NEXT: Returned allocated memory +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Memory is never released; potential leak +// CHECK-NEXT: message +// CHECK-NEXT: Memory is never released; potential leak +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionMemory is never released; potential leak +// CHECK-NEXT: categoryMemory Error +// CHECK-NEXT: typeMemory leak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextuse_function_with_leak7 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/malloc-sizeof.c b/test/Analysis/malloc-sizeof.c index 6eb466a..7a8585f 100644 --- a/test/Analysis/malloc-sizeof.c +++ b/test/Analysis/malloc-sizeof.c @@ -34,3 +34,19 @@ void ignore_const() { const char ***y = (const char ***)malloc(1 * sizeof(char *)); // expected-warning {{Result of 'malloc' is converted to a pointer of type 'const char **', which is incompatible with sizeof operand type 'char *'}} free(x); } + +int *mallocArraySize() { + static const int sTable[10]; + static const int nestedTable[10][2]; + int *table = malloc(sizeof sTable); + int *table1 = malloc(sizeof nestedTable); + int (*table2)[2] = malloc(sizeof nestedTable); + int (*table3)[10][2] = malloc(sizeof nestedTable); + return table; +} + +int *mallocWrongArraySize() { + static const double sTable[10]; + int *table = malloc(sizeof sTable); // expected-warning {{Result of 'malloc' is converted to a pointer of type 'int', which is incompatible with sizeof operand type 'const double [10]'}} + return table; +} diff --git a/test/Analysis/malloc.c b/test/Analysis/malloc.c index e3d92d9..68308fd 100644 --- a/test/Analysis/malloc.c +++ b/test/Analysis/malloc.c @@ -1,5 +1,7 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.deadcode.UnreachableCode,experimental.core.CastSize,unix.Malloc,debug.ExprInspection -analyzer-store=region -verify %s -#include "system-header-simulator.h" +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.deadcode.UnreachableCode,alpha.core.CastSize,unix.Malloc,debug.ExprInspection -analyzer-store=region -verify %s +// REQUIRES: LP64 + +#include "Inputs/system-header-simulator.h" void clang_analyzer_eval(int); @@ -1000,17 +1002,36 @@ void freeButNoMalloc(int *p, int x){ } struct HasPtr { - int *p; + char *p; }; -int* reallocButNoMalloc(struct HasPtr *a, int c, int size) { +char* reallocButNoMalloc(struct HasPtr *a, int c, int size) { int *s; - a->p = (int *)realloc(a->p, size); - if (a->p == 0) - return 0; // expected-warning{{Memory is never released; potential leak}} + char *b = realloc(a->p, size); + char *m = realloc(a->p, size); // expected-warning {{Attempt to free released memory}} return a->p; } +// We should not warn in this case since the caller will presumably free a->p in all cases. +int reallocButNoMallocPR13674(struct HasPtr *a, int c, int size) { + int *s; + char *b = realloc(a->p, size); + if (b == 0) + return -1; + a->p = b; + return 0; +} + +// Test realloc with no visible malloc. +void *test(void *ptr) { + void *newPtr = realloc(ptr, 4); + if (newPtr == 0) { + if (ptr) + free(ptr); // no-warning + } + return newPtr; +} + // ---------------------------------------------------------------------------- // False negatives. diff --git a/test/Analysis/malloc.cpp b/test/Analysis/malloc.cpp index 72b9272..220d746 100644 --- a/test/Analysis/malloc.cpp +++ b/test/Analysis/malloc.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.deadcode.UnreachableCode,experimental.core.CastSize,unix.Malloc -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.deadcode.UnreachableCode,alpha.core.CastSize,unix.Malloc -analyzer-store=region -verify %s typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); @@ -6,6 +6,11 @@ void free(void *); void *realloc(void *ptr, size_t size); void *calloc(size_t nmemb, size_t size); + +void checkThatMallocCheckerIsRunning() { + malloc(4); // expected-warning{{leak}} +} + // Test for radar://11110132. struct Foo { mutable void* m_data; @@ -35,3 +40,23 @@ void r11160612_3(CanFreeMemory* p) { const_ptr_and_callback_def_param(0, x, 12, p->myFree); } + +namespace PR13751 { + class OwningVector { + void **storage; + size_t length; + public: + OwningVector(); + ~OwningVector(); + void push_back(void *Item) { + storage[length++] = Item; + } + }; + + void testDestructors() { + OwningVector v; + v.push_back(malloc(4)); + // no leak warning; freed in destructor + } +} + diff --git a/test/Analysis/malloc.m b/test/Analysis/malloc.m index 08206f3..21d2daf 100644 --- a/test/Analysis/malloc.m +++ b/test/Analysis/malloc.m @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc -analyzer-store=region -verify -Wno-objc-root-class -fblocks %s -#include "system-header-simulator-objc.h" +#include "Inputs/system-header-simulator-objc.h" @class NSString; typedef __typeof(sizeof(int)) size_t; diff --git a/test/Analysis/malloc.mm b/test/Analysis/malloc.mm index 7a9d881..c92c966 100644 --- a/test/Analysis/malloc.mm +++ b/test/Analysis/malloc.mm @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.Malloc -analyzer-store=region -verify -fblocks %s -#include "system-header-simulator-objc.h" +#include "Inputs/system-header-simulator-objc.h" typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); @@ -221,4 +221,60 @@ void foo(NSPointerArray* pointerArray) { void noCrashOnVariableArgumentSelector() { NSMutableString *myString = [NSMutableString stringWithString:@"some text"]; [myString appendFormat:@"some text = %d", 3]; -} \ No newline at end of file +} + +void test12365078_check() { + unichar *characters = (unichar*)malloc(12); + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string) free(characters); // no-warning +} + +void test12365078_nocheck() { + unichar *characters = (unichar*)malloc(12); + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; +} + +void test12365078_false_negative() { + unichar *characters = (unichar*)malloc(12); + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string) {;} +} + +void test12365078_no_malloc(unichar *characters) { + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string) {free(characters);} +} + +NSString *test12365078_no_malloc_returnValue(unichar *characters) { + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string) { + return 0; // no-warning + } + return string; +} + +void test12365078_nocheck_nomalloc(unichar *characters) { + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + free(characters); // expected-warning {{Attempt to free non-owned memory}} +} + +void test12365078_nested(unichar *characters) { + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string) { + NSString *string2 = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string2) { + NSString *string3 = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string3) { + NSString *string4 = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (!string4) + free(characters); + } + } + } +} + +void test12365078_check_positive() { + unichar *characters = (unichar*)malloc(12); + NSString *string = [[NSString alloc] initWithCharactersNoCopy:characters length:12 freeWhenDone:1]; + if (string) free(characters); // expected-warning{{Attempt to free non-owned memory}} +} diff --git a/test/Analysis/member-expr.cpp b/test/Analysis/member-expr.cpp new file mode 100644 index 0000000..cf43738 --- /dev/null +++ b/test/Analysis/member-expr.cpp @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection %s -verify + +void clang_analyzer_eval(int); + +namespace EnumsViaMemberExpr { + struct Foo { + enum E { + Bar = 1 + }; + }; + + void testEnumVal(Foo Baz) { + clang_analyzer_eval(Baz.Bar == Foo::Bar); // expected-warning{{TRUE}} + } + + void testEnumRef(Foo &Baz) { + clang_analyzer_eval(Baz.Bar == Foo::Bar); // expected-warning{{TRUE}} + } + + void testEnumPtr(Foo *Baz) { + clang_analyzer_eval(Baz->Bar == Foo::Bar); // expected-warning{{TRUE}} + } +} \ No newline at end of file diff --git a/test/Analysis/method-call-intra-p.cpp b/test/Analysis/method-call-intra-p.cpp index 701479f..a170942 100644 --- a/test/Analysis/method-call-intra-p.cpp +++ b/test/Analysis/method-call-intra-p.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store region -verify %s +// expected-no-diagnostics // Intra-procedural C++ tests. diff --git a/test/Analysis/method-call-path-notes.cpp b/test/Analysis/method-call-path-notes.cpp index 17034b9..a41a786 100644 --- a/test/Analysis/method-call-path-notes.cpp +++ b/test/Analysis/method-call-path-notes.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-output=text -verify %s -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-output=plist-multi-file %s -o - | FileCheck %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-output=plist-multi-file %s -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s // Test warning about null or uninitialized pointer values used as instance member // calls. @@ -25,7 +26,7 @@ void test_ic_set_to_null() { } void test_ic_null(TestInstanceCall *p) { - if (!p) // expected-note {{Assuming pointer value is null}} expected-note {{Taking true branch}} + if (!p) // expected-note {{Assuming 'p' is null}} expected-note {{Taking true branch}} p->foo(); // expected-warning {{Called C++ object pointer is null}} expected-note{{Called C++ object pointer is null}} } @@ -37,713 +38,766 @@ void test_ic_member_ptr() { } void test_cast(const TestInstanceCall *p) { - if (!p) // expected-note {{Assuming pointer value is null}} expected-note {{Taking true branch}} + if (!p) // expected-note {{Assuming 'p' is null}} expected-note {{Taking true branch}} const_cast(p)->foo(); // expected-warning {{Called C++ object pointer is null}} expected-note {{Called C++ object pointer is null}} } -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: {{.*}}method-call-path-notes.cpp -// CHECK: // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' declared without an initial value -// CHECK: message -// CHECK: Variable 'p' declared without an initial value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Called C++ object pointer is uninitialized -// CHECK: message -// CHECK: Called C++ object pointer is uninitialized -// CHECK: -// CHECK: -// CHECK: descriptionCalled C++ object pointer is uninitialized -// CHECK: categoryLogic error -// CHECK: typeCalled C++ object pointer is uninitialized -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_ic -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line13 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Called C++ object pointer is null -// CHECK: message -// CHECK: Called C++ object pointer is null -// CHECK: -// CHECK: -// CHECK: descriptionCalled C++ object pointer is null -// CHECK: categoryLogic error -// CHECK: typeCalled C++ object pointer is null -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_ic_null -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Null pointer value stored to 'p' -// CHECK: message -// CHECK: Null pointer value stored to 'p' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line24 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Called C++ object pointer is null -// CHECK: message -// CHECK: Called C++ object pointer is null -// CHECK: -// CHECK: -// CHECK: descriptionCalled C++ object pointer is null -// CHECK: categoryLogic error -// CHECK: typeCalled C++ object pointer is null -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_ic_set_to_null -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line24 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming pointer value is null -// CHECK: message -// CHECK: Assuming pointer value is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Called C++ object pointer is null -// CHECK: message -// CHECK: Called C++ object pointer is null -// CHECK: -// CHECK: -// CHECK: descriptionCalled C++ object pointer is null -// CHECK: categoryLogic error -// CHECK: typeCalled C++ object pointer is null -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_ic_null -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line33 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line33 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Called C++ object pointer is null -// CHECK: message -// CHECK: Called C++ object pointer is null -// CHECK: -// CHECK: -// CHECK: descriptionCalled C++ object pointer is null -// CHECK: categoryLogic error -// CHECK: typeCalled C++ object pointer is null -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_ic_member_ptr -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line40 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line40 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line41 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line41 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line41 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line41 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line41 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Called C++ object pointer is null -// CHECK: message -// CHECK: Called C++ object pointer is null -// CHECK: -// CHECK: -// CHECK: descriptionCalled C++ object pointer is null -// CHECK: categoryLogic error -// CHECK: typeCalled C++ object pointer is null -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_cast -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line41 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' declared without an initial value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' declared without an initial value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line13 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Called C++ object pointer is uninitialized +// CHECK-NEXT: message +// CHECK-NEXT: Called C++ object pointer is uninitialized +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCalled C++ object pointer is uninitialized +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeCalled C++ object pointer is uninitialized +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_ic +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCalled C++ object pointer is null +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeCalled C++ object pointer is null +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_ic_null +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCalled C++ object pointer is null +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeCalled C++ object pointer is null +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_ic_set_to_null +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line25 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCalled C++ object pointer is null +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeCalled C++ object pointer is null +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_ic_null +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line34 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line34 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line34 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line34 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line34 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line37 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line37 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line37 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line37 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line37 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCalled C++ object pointer is null +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeCalled C++ object pointer is null +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_ic_member_ptr +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line37 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line41 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: message +// CHECK-NEXT: Called C++ object pointer is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCalled C++ object pointer is null +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeCalled C++ object pointer is null +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_cast +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line42 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/method-call.cpp b/test/Analysis/method-call.cpp index 9120627..1a2fedd 100644 --- a/test/Analysis/method-call.cpp +++ b/test/Analysis/method-call.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -analyzer-config c++-inlining=constructors -verify %s void clang_analyzer_eval(bool); @@ -9,29 +9,39 @@ struct A { int getx() const { return x; } }; +struct B{ + int x; +}; + void testNullObject(A *a) { clang_analyzer_eval(a); // expected-warning{{UNKNOWN}} (void)a->getx(); // assume we know what we're doing clang_analyzer_eval(a); // expected-warning{{TRUE}} } - -// FIXME: These require constructor inlining to be enabled. - void f1() { A x(3); - // should be TRUE - clang_analyzer_eval(x.getx() == 3); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} } void f2() { const A &x = A(3); - // should be TRUE - clang_analyzer_eval(x.getx() == 3); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} } void f3() { const A &x = (A)3; - // should be TRUE - clang_analyzer_eval(x.getx() == 3); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} +} + +void f4() { + A x = 3; + clang_analyzer_eval(x.getx() == 3); // expected-warning{{TRUE}} +} + +void checkThatCopyConstructorDoesNotInvalidateObjectBeingCopied() { + B t; + t.x = 0; + B t2(t); + clang_analyzer_eval(t.x == 0); // expected-warning{{TRUE}} } diff --git a/test/Analysis/misc-ps-64.m b/test/Analysis/misc-ps-64.m index e20a27f..1fbbeef 100644 --- a/test/Analysis/misc-ps-64.m +++ b/test/Analysis/misc-ps-64.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=basic -verify -fblocks %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -fblocks %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -fblocks %s +// expected-no-diagnostics // - A bunch of misc. failures involving evaluating // these expressions and building CFGs. These tests are here to prevent diff --git a/test/Analysis/misc-ps-arm.m b/test/Analysis/misc-ps-arm.m index a909ef1..1ff0f17 100644 --- a/test/Analysis/misc-ps-arm.m +++ b/test/Analysis/misc-ps-arm.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple thumbv7-apple-ios0.0.0 -analyze -analyzer-checker=core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks -Wno-objc-root-class %s +// expected-no-diagnostics // - Handle casts of vectors to structs, and loading // a value. diff --git a/test/Analysis/misc-ps-eager-assume.m b/test/Analysis/misc-ps-eager-assume.m index 0aff8e4..9fbe3d0 100644 --- a/test/Analysis/misc-ps-eager-assume.m +++ b/test/Analysis/misc-ps-eager-assume.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -fblocks %s -analyzer-eagerly-assume +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -fblocks %s -analyzer-eagerly-assume +// expected-no-diagnostics // Delta-reduced header stuff (needed for test cases). typedef signed char BOOL; diff --git a/test/Analysis/misc-ps-ranges.m b/test/Analysis/misc-ps-ranges.m index 00337f4..90add22 100644 --- a/test/Analysis/misc-ps-ranges.m +++ b/test/Analysis/misc-ps-ranges.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -analyzer-constraints=range -verify -fblocks %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -analyzer-constraints=range -verify -fblocks %s // // main's 'argc' argument is always > 0 diff --git a/test/Analysis/misc-ps-region-store-i386.m b/test/Analysis/misc-ps-region-store-i386.m index 3106a24..bed6fc3 100644 --- a/test/Analysis/misc-ps-region-store-i386.m +++ b/test/Analysis/misc-ps-region-store-i386.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify -fblocks %s +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify -fblocks %s +// expected-no-diagnostics // Here is a case where a pointer is treated as integer, invalidated as an // integer, and then used again as a pointer. This test just makes sure diff --git a/test/Analysis/misc-ps-region-store-x86_64.m b/test/Analysis/misc-ps-region-store-x86_64.m index 2c604cf..4575ad4 100644 --- a/test/Analysis/misc-ps-region-store-x86_64.m +++ b/test/Analysis/misc-ps-region-store-x86_64.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify -fblocks %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify -fblocks %s +// expected-no-diagnostics // Here is a case where a pointer is treated as integer, invalidated as an // integer, and then used again as a pointer. This test just makes sure diff --git a/test/Analysis/misc-ps-region-store.cpp b/test/Analysis/misc-ps-region-store.cpp index e30cedb..a106cf0 100644 --- a/test/Analysis/misc-ps-region-store.cpp +++ b/test/Analysis/misc-ps-region-store.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s -fexceptions -fcxx-exceptions -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s -fexceptions -fcxx-exceptions +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify -fblocks -analyzer-ipa=inlining -analyzer-opt-analyze-nested-blocks %s -fexceptions -fcxx-exceptions +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify -fblocks -analyzer-ipa=inlining -analyzer-opt-analyze-nested-blocks %s -fexceptions -fcxx-exceptions // Test basic handling of references. char &test1_aux(); @@ -537,7 +537,8 @@ MyEnum rdar10892489_positive() { throw MyEnumValue; } catch (MyEnum e) { int *p = 0; - *p = 0xDEADBEEF; // expected-warning {{null}} + // FALSE NEGATIVE + *p = 0xDEADBEEF; // {{null}} return e; } return MyEnumValue; @@ -562,7 +563,8 @@ void PR11545_positive() { catch (...) { int *p = 0; - *p = 0xDEADBEEF; // expected-warning {{null}} + // FALSE NEGATIVE + *p = 0xDEADBEEF; // {{null}} } } diff --git a/test/Analysis/misc-ps-region-store.m b/test/Analysis/misc-ps-region-store.m index 2b481c4..f772894 100644 --- a/test/Analysis/misc-ps-region-store.m +++ b/test/Analysis/misc-ps-region-store.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.deadcode.IdempotentOperations,experimental.core.CastToStruct,experimental.security.ReturnPtrRange,experimental.security.ArrayBound -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks -Wno-objc-root-class %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -DTEST_64 -analyze -analyzer-checker=core,experimental.deadcode.IdempotentOperations,experimental.core.CastToStruct,experimental.security.ReturnPtrRange,experimental.security.ArrayBound -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.deadcode.IdempotentOperations,alpha.core.CastToStruct,alpha.security.ReturnPtrRange,alpha.security.ArrayBound -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -DTEST_64 -analyze -analyzer-checker=core,alpha.deadcode.IdempotentOperations,alpha.core.CastToStruct,alpha.security.ReturnPtrRange,alpha.security.ArrayBound -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks -Wno-objc-root-class %s typedef long unsigned int size_t; void *memcpy(void *, const void *, size_t); @@ -1086,6 +1086,9 @@ pr8052(u_int boot_addr) u_char *src = (u_char *) ((u_long) bootMP); u_char *dst = (u_char *) boot_addr + ((vm_offset_t) ((((((((1 << 12) / (sizeof(pd_entry_t))) - 1) - 1) - (260 - 2))) << 22) | ((0) << 12))); +#ifdef TEST_64 +// expected-warning@-3 {{cast to 'u_char *' (aka 'unsigned char *') from smaller integer type 'u_int' (aka 'unsigned int')}} +#endif for (x = 0; x < size; ++x) diff --git a/test/Analysis/misc-ps-region-store.mm b/test/Analysis/misc-ps-region-store.mm index 8615c6a..c19a90f 100644 --- a/test/Analysis/misc-ps-region-store.mm +++ b/test/Analysis/misc-ps-region-store.mm @@ -1,5 +1,6 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify -fblocks -analyzer-opt-analyze-nested-blocks %s +// expected-no-diagnostics //===------------------------------------------------------------------------------------------===// // This files tests our path-sensitive handling of Objective-c++ files. diff --git a/test/Analysis/misc-ps.c b/test/Analysis/misc-ps.c index 8ff710b..ef89321 100644 --- a/test/Analysis/misc-ps.c +++ b/test/Analysis/misc-ps.c @@ -133,3 +133,21 @@ int isctype(char c, unsigned long f) return (c < 1 || c > 10) ? 0 : !!(c & f); } +// Test that symbolic array offsets are modeled conservatively. +// This was triggering a false "use of uninitialized value" warning. +void rdar_12075238__aux(unsigned long y); +int rdar_12075238_(unsigned long count) { + if ((count < 3) || (count > 6)) + return 0; + + unsigned long array[6]; + unsigned long i = 0; + for (; i <= count - 2; i++) + { + array[i] = i; + } + array[count - 1] = i; + rdar_12075238__aux(array[2]); // no-warning + return 0; +} + diff --git a/test/Analysis/misc-ps.m b/test/Analysis/misc-ps.m index bcc0472..b261b10 100644 --- a/test/Analysis/misc-ps.m +++ b/test/Analysis/misc-ps.m @@ -1,8 +1,6 @@ // NOTE: Use '-fobjc-gc' to test the analysis being run twice, and multiple reports are not issued. -// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,experimental.deadcode.IdempotentOperations,experimental.core,osx.cocoa.AtSync,osx.AtomicCAS -analyzer-store=region -analyzer-constraints=basic -verify -fblocks -Wno-unreachable-code -Wno-null-dereference -Wno-objc-root-class %s -// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,experimental.deadcode.IdempotentOperations,experimental.core,osx.cocoa.AtSync,osx.AtomicCAS -analyzer-store=region -analyzer-constraints=range -verify -fblocks -Wno-unreachable-code -Wno-null-dereference -Wno-objc-root-class %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,experimental.deadcode.IdempotentOperations,experimental.core,osx.cocoa.AtSync,osx.AtomicCAS -analyzer-store=region -analyzer-constraints=basic -verify -fblocks -Wno-unreachable-code -Wno-null-dereference -Wno-objc-root-class %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,experimental.deadcode.IdempotentOperations,experimental.core,osx.cocoa.AtSync,osx.AtomicCAS -analyzer-store=region -analyzer-constraints=range -verify -fblocks -Wno-unreachable-code -Wno-null-dereference -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,alpha.deadcode.IdempotentOperations,alpha.core,osx.cocoa.AtSync -analyzer-store=region -analyzer-constraints=range -verify -fblocks -Wno-unreachable-code -Wno-null-dereference -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,alpha.deadcode.IdempotentOperations,alpha.core,osx.cocoa.AtSync -analyzer-store=region -analyzer-constraints=range -verify -fblocks -Wno-unreachable-code -Wno-null-dereference -Wno-objc-root-class %s #ifndef __clang_analyzer__ #error __clang_analyzer__ not defined @@ -1152,11 +1150,11 @@ void rdar8578650(id x) { @implementation RDar6352035 - (void)foo { RDar6352035 *friend = 0; - friend->c = 7; // expected-warning{{Instance variable access (via 'friend') results in a null pointer dereference}} + friend->c = 7; // expected-warning{{Access to instance variable 'c' results in a dereference of a null pointer (loaded from variable 'friend')}} } - (void)bar { self = 0; - c = 7; // expected-warning{{Instance variable access (via 'self') results in a null pointer dereference}} + c = 7; // expected-warning{{Access to instance variable 'c' results in a dereference of a null pointer (loaded from variable 'self')}} } @end diff --git a/test/Analysis/new-with-exceptions.cpp b/test/Analysis/new-with-exceptions.cpp new file mode 100644 index 0000000..d909f1f --- /dev/null +++ b/test/Analysis/new-with-exceptions.cpp @@ -0,0 +1,71 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-store region -std=c++11 -fexceptions -fcxx-exceptions -verify -DEXCEPTIONS %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-store region -std=c++11 -verify %s + +void clang_analyzer_eval(bool); + +typedef __typeof__(sizeof(int)) size_t; +extern "C" void *malloc(size_t); + +// This is the standard placement new. +inline void* operator new(size_t, void* __p) throw() +{ + return __p; +} + +struct NoThrow { + void *operator new(size_t) throw(); +}; + +struct NoExcept { + void *operator new(size_t) noexcept; +}; + +struct DefaultThrow { + void *operator new(size_t); +}; + +struct ExplicitThrow { + void *operator new(size_t) throw(int); +}; + +void testNew() { + clang_analyzer_eval(new NoThrow); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(new NoExcept); // expected-warning{{UNKNOWN}} + + clang_analyzer_eval(new DefaultThrow); + clang_analyzer_eval(new ExplicitThrow); +#ifdef EXCEPTIONS + // expected-warning@-3 {{TRUE}} + // expected-warning@-3 {{TRUE}} +#else + // expected-warning@-6 {{UNKNOWN}} + // expected-warning@-6 {{UNKNOWN}} +#endif +} + +void testNewArray() { + clang_analyzer_eval(new NoThrow[2]); + clang_analyzer_eval(new NoExcept[2]); + clang_analyzer_eval(new DefaultThrow[2]); + clang_analyzer_eval(new ExplicitThrow[2]); +#ifdef EXCEPTIONS + // expected-warning@-5 {{TRUE}} + // expected-warning@-5 {{TRUE}} + // expected-warning@-5 {{TRUE}} + // expected-warning@-5 {{TRUE}} +#else + // expected-warning@-10 {{UNKNOWN}} + // expected-warning@-10 {{UNKNOWN}} + // expected-warning@-10 {{UNKNOWN}} + // expected-warning@-10 {{UNKNOWN}} +#endif +} + +extern void *operator new[](size_t, int) noexcept; + +void testNewArrayNoThrow() { + clang_analyzer_eval(new (1) NoThrow[2]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(new (1) NoExcept[2]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(new (1) DefaultThrow[2]); // expected-warning{{UNKNOWN}} + clang_analyzer_eval(new (1) ExplicitThrow[2]); // expected-warning{{UNKNOWN}} +} diff --git a/test/Analysis/new.cpp b/test/Analysis/new.cpp index fb77de2..fdd16da3 100644 --- a/test/Analysis/new.cpp +++ b/test/Analysis/new.cpp @@ -74,6 +74,18 @@ void testScalarInitialization() { } +struct PtrWrapper { + int *x; + + PtrWrapper(int *input) : x(input) {} +}; + +PtrWrapper *testNewInvalidation() { + // Ensure that we don't consider this a leak. + return new PtrWrapper(static_cast(malloc(4))); +} + + //-------------------------------- // Incorrectly-modelled behavior //-------------------------------- diff --git a/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret-region.m b/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret-region.m index c1cc076..384501a 100644 --- a/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret-region.m +++ b/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret-region.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin8 -analyze -analyzer-checker=core,experimental.core -analyzer-constraints=range -analyzer-store=region -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple i386-apple-darwin8 -analyze -analyzer-checker=core,alpha.core -analyzer-constraints=range -analyzer-store=region -verify -Wno-objc-root-class %s // - This test case shows that a nil instance // variable can possibly be initialized by a method. diff --git a/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m b/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m index 7cf2aee..86e8911 100644 --- a/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m +++ b/test/Analysis/nil-receiver-undefined-larger-than-voidptr-ret.m @@ -1,6 +1,9 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin8 -analyze -analyzer-checker=core,experimental.core -analyzer-constraints=basic -analyzer-store=region -Wno-objc-root-class %s 2>&1 | FileCheck -check-prefix=darwin8 %s -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-constraints=basic -analyzer-store=region -Wno-objc-root-class %s 2>&1 | FileCheck -check-prefix=darwin9 %s -// RUN: %clang_cc1 -triple thumbv6-apple-ios4.0 -analyze -analyzer-checker=core,experimental.core -analyzer-constraints=basic -analyzer-store=region -Wno-objc-root-class %s 2>&1 | FileCheck -check-prefix=darwin9 %s +// RUN: %clang_cc1 -triple i386-apple-darwin8 -analyze -analyzer-checker=core,alpha.core -analyzer-constraints=range -analyzer-store=region -Wno-objc-root-class %s > %t.1 2>&1 +// RUN: FileCheck -input-file=%t.1 -check-prefix=darwin8 %s +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-constraints=range -analyzer-store=region -Wno-objc-root-class %s > %t.2 2>&1 +// RUN: FileCheck -input-file=%t.2 -check-prefix=darwin9 %s +// RUN: %clang_cc1 -triple thumbv6-apple-ios4.0 -analyze -analyzer-checker=core,alpha.core -analyzer-constraints=range -analyzer-store=region -Wno-objc-root-class %s > %t.3 2>&1 +// RUN: FileCheck -input-file=%t.3 -check-prefix=darwin9 %s @interface MyClass {} - (void *)voidPtrM; diff --git a/test/Analysis/no-exit-cfg.c b/test/Analysis/no-exit-cfg.c index 1a80a25..b3c3fbc 100644 --- a/test/Analysis/no-exit-cfg.c +++ b/test/Analysis/no-exit-cfg.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// expected-no-diagnostics // This is a test case for the issue reported in PR 2819: // http://llvm.org/bugs/show_bug.cgi?id=2819 diff --git a/test/Analysis/no-outofbounds.c b/test/Analysis/no-outofbounds.c index 821f486..84f86d7 100644 --- a/test/Analysis/no-outofbounds.c +++ b/test/Analysis/no-outofbounds.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,experimental.unix,experimental.security.ArrayBound -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,alpha.unix,alpha.security.ArrayBound -analyzer-store=region -verify %s //===----------------------------------------------------------------------===// // This file tests cases where we should not flag out-of-bounds warnings. diff --git a/test/Analysis/null-deref-path-notes.m b/test/Analysis/null-deref-path-notes.m new file mode 100644 index 0000000..993f633 --- /dev/null +++ b/test/Analysis/null-deref-path-notes.m @@ -0,0 +1,488 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -analyzer-output=text -fblocks -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -analyzer-output=plist-multi-file -fblocks -Wno-objc-root-class %s -o %t +// RUN: FileCheck --input-file=%t %s + +@interface Root { +@public + int uniqueID; +} +- (id)initWithID:(int)newID; +- (void)refreshID; +@end + +int testNull(Root *obj) { + if (obj) return 0; + // expected-note@-1 {{Assuming 'obj' is nil}} + // expected-note@-2 {{Taking false branch}} + + int *x = &obj->uniqueID; // expected-note{{Variable 'x' initialized to a null pointer value}} + return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}} expected-note{{Dereference of null pointer (loaded from variable 'x')}} +} + + +@interface Subclass : Root +@end + +@implementation Subclass +- (id)initWithID:(int)newID { + self = [super initWithID:newID]; // expected-note{{Value assigned to 'self'}} + if (self) return self; + // expected-note@-1 {{Assuming 'self' is nil}} + // expected-note@-2 {{Taking false branch}} + + uniqueID = newID; // expected-warning{{Access to instance variable 'uniqueID' results in a dereference of a null pointer (loaded from variable 'self')}} expected-note{{Access to instance variable 'uniqueID' results in a dereference of a null pointer (loaded from variable 'self')}} + return self; +} + +@end + + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'obj' is nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'obj' is nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line14 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'x' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'x' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'x') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestNull +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Value assigned to 'self' +// CHECK-NEXT: message +// CHECK-NEXT: Value assigned to 'self' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line28 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'self' is nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'self' is nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Access to instance variable 'uniqueID' results in a dereference of a null pointer (loaded from variable 'self') +// CHECK-NEXT: message +// CHECK-NEXT: Access to instance variable 'uniqueID' results in a dereference of a null pointer (loaded from variable 'self') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionAccess to instance variable 'uniqueID' results in a dereference of a null pointer (loaded from variable 'self') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextinitWithID: +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line33 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/null-deref-ps-region.c b/test/Analysis/null-deref-ps-region.c index 08bfac7..5bb9454 100644 --- a/test/Analysis/null-deref-ps-region.c +++ b/test/Analysis/null-deref-ps-region.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -std=gnu99 -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -std=gnu99 -analyzer-store=region -verify %s +// expected-no-diagnostics // The store for 'a[1]' should not be removed mistakenly. SymbolicRegions may diff --git a/test/Analysis/null-deref-ps.c b/test/Analysis/null-deref-ps.c index a707970..4dc8fc4 100644 --- a/test/Analysis/null-deref-ps.c +++ b/test/Analysis/null-deref-ps.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,deadcode,experimental.deadcode.IdempotentOperations,experimental.core -std=gnu99 -analyzer-store=region -analyzer-constraints=range -analyzer-purge=none -verify %s -Wno-error=return-type -// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,deadcode,experimental.deadcode.IdempotentOperations,experimental.core -std=gnu99 -analyzer-store=region -analyzer-constraints=range -verify %s -Wno-error=return-type +// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,deadcode,alpha.deadcode.IdempotentOperations,alpha.core -std=gnu99 -analyzer-store=region -analyzer-constraints=range -analyzer-purge=none -verify %s -Wno-error=return-type +// RUN: %clang_cc1 -triple i386-apple-darwin10 -analyze -analyzer-checker=core,deadcode,alpha.deadcode.IdempotentOperations,alpha.core -std=gnu99 -analyzer-store=region -analyzer-constraints=range -verify %s -Wno-error=return-type typedef unsigned uintptr_t; diff --git a/test/Analysis/objc-bool.m b/test/Analysis/objc-bool.m index f95736a..a2d10cc 100644 --- a/test/Analysis/objc-bool.m +++ b/test/Analysis/objc-bool.m @@ -1,4 +1,5 @@ // RUN: %clang --analyze %s -o %t -Xclang -verify +// expected-no-diagnostics // Test handling of ObjC bool literals. diff --git a/test/Analysis/objc-for.m b/test/Analysis/objc-for.m index 52a55b0..1561ef8 100644 --- a/test/Analysis/objc-for.m +++ b/test/Analysis/objc-for.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=osx.cocoa.Loops,debug.ExprInspection -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=osx.cocoa.Loops,debug.ExprInspection -verify %s void clang_analyzer_eval(int); diff --git a/test/Analysis/objc-method-coverage.m b/test/Analysis/objc-method-coverage.m index 056aafe..3088a29 100644 --- a/test/Analysis/objc-method-coverage.m +++ b/test/Analysis/objc-method-coverage.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=inlining -analyzer-stats -fblocks %s 2>&1 | FileCheck %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-stats -fblocks %s 2>&1 | FileCheck %s // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-ipa=none -analyzer-stats -fblocks %s 2>&1 | FileCheck %s @interface I diff --git a/test/Analysis/objc-properties.m b/test/Analysis/objc-properties.m new file mode 100644 index 0000000..87ab7f7 --- /dev/null +++ b/test/Analysis/objc-properties.m @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.osx.cocoa.DirectIvarAssignment -fobjc-default-synthesize-properties -verify -fblocks %s + +typedef signed char BOOL; +@protocol NSObject - (BOOL)isEqual:(id)object; @end +@interface NSObject {} ++(id)alloc; +-(id)init; +-(id)autorelease; +-(id)copy; +-(id)retain; +@end + +@interface MyClass; +@end +@interface TestProperty :NSObject { + MyClass *_Z; + id _nonSynth; +} + + @property (assign, nonatomic) MyClass* A; // explicitely synthesized, not implemented, non-default ivar name + + @property (assign) MyClass* X; // automatically synthesized, not implemented + + @property (assign, nonatomic) MyClass* Y; // automatically synthesized, implemented + + @property (assign, nonatomic) MyClass* Z; // non synthesized ivar, implemented setter + @property (readonly) id nonSynth; // non synthesized, explicitly implemented to return ivar with expected name + + - (id) initWithPtr:(MyClass*) value; + - (id) myInitWithPtr:(MyClass*) value; + - (void) someMethod: (MyClass*)In; +@end + +@implementation TestProperty + @synthesize A = __A; + + - (id) initWithPtr: (MyClass*) value { + _Y = value; // no-warning + return self; + } + + - (id) copyWithPtrY: (TestProperty*) value { + TestProperty *another = [[TestProperty alloc] init]; + another->_Y = value->_Y; // no-warning + return another; + } + + - (id) myInitWithPtr: (MyClass*) value { + _Y = value; // no-warning + return self; + } + + - (void) setY:(MyClass*) NewValue { + _Y = NewValue; // no-warning + } + + - (void) setZ:(MyClass*) NewValue { + _Z = NewValue; // no-warning + } + + - (id)nonSynth { + return _nonSynth; + } + + - (void) someMethod: (MyClass*)In { + (__A) = In; // expected-warning {{Direct assignment to an instance variable backing a property; use the setter instead}} + _X = In; // expected-warning {{Direct assignment to an instance variable backing a property; use the setter instead}} + _Y = In; // expected-warning {{Direct assignment to an instance variable backing a property; use the setter instead}} + _Z = In; // expected-warning {{Direct assignment to an instance variable backing a property; use the setter instead}} + _nonSynth = 0; // expected-warning {{Direct assignment to an instance variable backing a property; use the setter instead}} + } +@end \ No newline at end of file diff --git a/test/Analysis/objc_invalidation.m b/test/Analysis/objc_invalidation.m new file mode 100644 index 0000000..357c5e8 --- /dev/null +++ b/test/Analysis/objc_invalidation.m @@ -0,0 +1,153 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.osx.cocoa.InstanceVariableInvalidation -fobjc-default-synthesize-properties -verify %s + +@protocol NSObject +@end +@interface NSObject {} ++(id)alloc; ++(id)new; +-(id)init; +-(id)autorelease; +-(id)copy; +- (Class)class; +-(id)retain; +-(id)description; +@end +@class NSString; + +extern void NSLog(NSString *format, ...) __attribute__((format(__NSString__, 1, 2))); + +@protocol Invalidation1 +- (void) invalidate __attribute__((annotate("objc_instance_variable_invalidator"))); +@end + +@protocol Invalidation2 +- (void) invalidate __attribute__((annotate("objc_instance_variable_invalidator"))); +@end + +@protocol Invalidation3 +- (void) invalidate __attribute__((annotate("objc_instance_variable_invalidator"))); +- (void) invalidate2 __attribute__((annotate("objc_instance_variable_invalidator"))); +@end + +@interface Invalidation2Class +@end + +@interface Invalidation1Class +@end + +@interface SomeInvalidationImplementingObject: NSObject { + SomeInvalidationImplementingObject *ObjA; // invalidation in the parent +} +@end + +@implementation SomeInvalidationImplementingObject +- (void)invalidate{ + ObjA = 0; +} +- (void)invalidate2 { + [self invalidate]; +} +@end + +@interface SomeSubclassInvalidatableObject : SomeInvalidationImplementingObject { + SomeInvalidationImplementingObject *Ivar1; // regular ivar + SomeInvalidationImplementingObject *Ivar2; // regular ivar, sending invalidate message + SomeInvalidationImplementingObject *_Ivar3; // no property, call -description + SomeInvalidationImplementingObject *_Ivar4; // no property, provide as argument to NSLog() + + SomeInvalidationImplementingObject *_Prop1; // partially implemented property, set to 0 with dot syntax + SomeInvalidationImplementingObject *_Prop2; // fully implemented prop, set to 0 with dot syntax + SomeInvalidationImplementingObject *_propIvar; // property with custom named ivar, set to 0 via setter + Invalidation1Class *MultipleProtocols; // regular ivar belonging to a different class + Invalidation2Class *MultInheritance; // regular ivar belonging to a different class + SomeInvalidationImplementingObject *_Prop3; // property, invalidate via sending a message to a getter method + SomeInvalidationImplementingObject *_Prop4; // property with @synthesize, invalidate via property + SomeInvalidationImplementingObject *_Prop5; // property with @synthesize, invalidate via getter method + SomeInvalidationImplementingObject *_Prop8; + + // No warnings on these as they are not invalidatable. + NSObject *NIvar1; + NSObject *NObj2; + NSObject *_NProp1; + NSObject *_NpropIvar; +} + +@property (assign) SomeInvalidationImplementingObject* Prop0; +@property (nonatomic, assign) SomeInvalidationImplementingObject* Prop1; +@property (assign) SomeInvalidationImplementingObject* Prop2; +@property (assign) SomeInvalidationImplementingObject* Prop3; +@property (assign) SomeInvalidationImplementingObject *Prop5; +@property (assign) SomeInvalidationImplementingObject *Prop4; + +@property (assign) SomeInvalidationImplementingObject* Prop6; // automatically synthesized prop +@property (assign) SomeInvalidationImplementingObject* Prop7; // automatically synthesized prop +@property (assign) SomeInvalidationImplementingObject *SynthIvarProp; + +@property (assign) NSObject* NProp0; +@property (nonatomic, assign) NSObject* NProp1; +@property (assign) NSObject* NProp2; + +-(void)setProp1: (SomeInvalidationImplementingObject*) InO; +-(void)setNProp1: (NSObject*) InO; + +-(void)invalidate; + +@end + +@interface SomeSubclassInvalidatableObject() +@property (assign) SomeInvalidationImplementingObject* Prop8; +@end + +@implementation SomeSubclassInvalidatableObject{ + @private + SomeInvalidationImplementingObject *Ivar5; +} + +@synthesize Prop7 = _propIvar; +@synthesize Prop3 = _Prop3; +@synthesize Prop5 = _Prop5; +@synthesize Prop4 = _Prop4; +@synthesize Prop8 = _Prop8; + + +- (void) setProp1: (SomeInvalidationImplementingObject*) InObj { + _Prop1 = InObj; +} + +- (void) setProp2: (SomeInvalidationImplementingObject*) InObj { + _Prop2 = InObj; +} +- (SomeInvalidationImplementingObject*) Prop2 { + return _Prop2; +} + +@synthesize NProp2 = _NpropIvar; + +- (void) setNProp1: (NSObject*) InObj { + _NProp1 = InObj; +} + +- (void) invalidate { + [Ivar2 invalidate]; + self.Prop0 = 0; + self.Prop1 = 0; + [self setProp2:0]; + [self setProp3:0]; + [[self Prop5] invalidate2]; + [self.Prop4 invalidate]; + [self.Prop8 invalidate]; + self.Prop6 = 0; + [[self Prop7] invalidate]; + + [_Ivar3 description]; + NSLog(@"%@", _Ivar4); + [super invalidate]; +} +// expected-warning@-1 {{Instance variable Ivar1 needs to be invalidated}} + // expected-warning@-2 {{Instance variable MultipleProtocols needs to be invalidated}} + // expected-warning@-3 {{Instance variable MultInheritance needs to be invalidated}} + // expected-warning@-4 {{Property SynthIvarProp needs to be invalidated or set to nil}} + // expected-warning@-5 {{Instance variable _Ivar3 needs to be invalidated}} + // expected-warning@-6 {{Instance variable _Ivar4 needs to be invalidated}} + // expected-warning@-7 {{Instance variable Ivar5 needs to be invalidated or set to nil}} +@end diff --git a/test/Analysis/operator-calls.cpp b/test/Analysis/operator-calls.cpp index dbc63bc..066f6a3 100644 --- a/test/Analysis/operator-calls.cpp +++ b/test/Analysis/operator-calls.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,debug.ExprInspection -analyzer-ipa=inlining -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -analyzer-ipa=inlining -verify %s void clang_analyzer_eval(bool); struct X0 { }; @@ -30,3 +30,22 @@ struct IntComparable { void testMemberOperator(IntComparable B) { clang_analyzer_eval(B == 0); // expected-warning{{TRUE}} } + + + +namespace UserDefinedConversions { + class Convertible { + public: + operator int() const { + return 42; + } + operator bool() const { + return true; + } + }; + + void test(const Convertible &obj) { + clang_analyzer_eval((int)obj == 42); // expected-warning{{TRUE}} + clang_analyzer_eval(obj); // expected-warning{{TRUE}} + } +} diff --git a/test/Analysis/out-of-bounds.c b/test/Analysis/out-of-bounds.c index c172170..dd593c5 100644 --- a/test/Analysis/out-of-bounds.c +++ b/test/Analysis/out-of-bounds.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -Wno-array-bounds -analyze -analyzer-checker=core,experimental.security.ArrayBoundV2 -verify %s +// RUN: %clang_cc1 -Wno-array-bounds -analyze -analyzer-checker=core,alpha.security.ArrayBoundV2 -verify %s // Tests doing an out-of-bounds access after the end of an array using: // - constant integer index diff --git a/test/Analysis/outofbound-notwork.c b/test/Analysis/outofbound-notwork.c index c1fa2d7..68b01e0 100644 --- a/test/Analysis/outofbound-notwork.c +++ b/test/Analysis/outofbound-notwork.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -Wno-array-bounds -analyze -analyzer-checker=core,experimental.security.ArrayBound -analyzer-store=region -verify %s +// RUN: %clang_cc1 -Wno-array-bounds -analyze -analyzer-checker=core,alpha.security.ArrayBound -analyzer-store=region -verify %s // XFAIL: * // Once we better handle modeling of sizes of VLAs, we can pull this back diff --git a/test/Analysis/outofbound.c b/test/Analysis/outofbound.c index 2783ac2..45786ec 100644 --- a/test/Analysis/outofbound.c +++ b/test/Analysis/outofbound.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -Wno-array-bounds -analyze -analyzer-checker=core,experimental.unix,experimental.security.ArrayBound -analyzer-store=region -verify %s +// RUN: %clang_cc1 -Wno-array-bounds -analyze -analyzer-checker=core,alpha.unix,alpha.security.ArrayBound -analyzer-store=region -verify %s typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); diff --git a/test/Analysis/override-werror.c b/test/Analysis/override-werror.c index d3b75f6..a68ee1e 100644 --- a/test/Analysis/override-werror.c +++ b/test/Analysis/override-werror.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -Werror %s -analyzer-store=region -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -Werror %s -analyzer-store=region -verify // This test case illustrates that using '-analyze' overrides the effect of // -Werror. This allows basic warnings not to interfere with producing diff --git a/test/Analysis/plist-html-macros.c b/test/Analysis/plist-html-macros.c new file mode 100644 index 0000000..954aab0 --- /dev/null +++ b/test/Analysis/plist-html-macros.c @@ -0,0 +1,31 @@ +// REQUIRES: shell +// RUN: %clang_cc1 -analyze -analyzer-checker=core -verify %s +// (sanity check) + +// RUN: rm -rf %t.dir +// RUN: mkdir -p %t.dir +// RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-output=plist-html -o %t.dir/index.plist %s +// RUN: ls %t.dir | grep \\.html | count 1 +// RUN: grep \\.html %t.dir/index.plist | count 1 + +// This tests two things: that the two calls to null_deref below are coalesced +// into a single bug by both the plist and HTML diagnostics, and that the plist +// diagnostics have a reference to the HTML diagnostics. (It would be nice to +// check more carefully that the two actually match, but that's hard to write +// in a lit RUN line.) + +#define CALL_FN(a) null_deref(a) + +void null_deref(int *a) { + if (a) + return; + *a = 1; // expected-warning{{null}} +} + +void test1() { + CALL_FN(0); +} + +void test2(int *p) { + CALL_FN(p); +} diff --git a/test/Analysis/plist-output-alternate.m b/test/Analysis/plist-output-alternate.m index a338a79..423574d 100644 --- a/test/Analysis/plist-output-alternate.m +++ b/test/Analysis/plist-output-alternate.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-output=plist -o %t %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -analyzer-store=region -analyzer-constraints=range -fblocks -analyzer-output=plist -o %t %s // RUN: FileCheck --input-file %t %s void test_null_init(void) { @@ -57,923 +57,1220 @@ void rdar8331641(int x) { (void) value; } -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line6 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line6 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line6 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line6 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line6 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_init -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line6 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line12 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_assign -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line12 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line16 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line16 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line19 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'q') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'q') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'q') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_assign_transitive -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line19 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line24 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line24 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_cond -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line24 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line31 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line31 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line31 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line31 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line31 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_cond_transitive -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line31 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line36 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line38 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line38 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line38 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line38 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line38 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from field 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from field 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from field 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_field -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line38 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line53 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col36 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line54 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col82 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count -// CHECK: message -// CHECK: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col23 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col36 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col14 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line58 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'value' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextrdar8331641 -// CHECK: issue_hash6 -// CHECK: location -// CHECK: -// CHECK: line58 -// CHECK: col1 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_init +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_assign +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'q' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'q' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'q') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'q') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'q') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_assign_transitive +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_cond +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'q' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'q' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_cond_transitive +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from field 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from field 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from field 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_field +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col82 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line54 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'value' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar8331641 +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/plist-output.m b/test/Analysis/plist-output.m index 769c1bc..cefa762 100644 --- a/test/Analysis/plist-output.m +++ b/test/Analysis/plist-output.m @@ -1,4 +1,5 @@ -// RUN: %clang --analyze %s -o - 2>/dev/null | FileCheck %s +// RUN: %clang --analyze %s -Xanalyzer -analyzer-checker=osx.cocoa.RetainCount -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s void test_null_init(void) { int *p = 0; @@ -76,1475 +77,2193 @@ int test_cond_assign() { *p = 0xDEADBEEF; // expected-warning {{deference}} } } + +// The original source for the above Radar contains another problem: +// if the end-of-path node is an implicit statement, it may not have a valid +// source location. +- (void)test2 { + if (bar_cond_assign()) { + id foo = [[RDar10797980 alloc] init]; // leak + } + (void)y; // first statement after the 'if' is an implicit 'self' DeclRefExpr +} + @end -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: {{.*}}plist-output.m -// CHECK: +// Test that loops are documented in the path. +void rdar12280665() { + for (unsigned i = 0; i < 2; ++i) { + if (i == 1) { + int *p = 0; + *p = 0xDEADBEEF; // expected-warning {{dereference}} + } + } +} + // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line4 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line4 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line4 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line4 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line4 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line5 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_init -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line5 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line9 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line9 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Null pointer value stored to 'p' -// CHECK: message -// CHECK: Null pointer value stored to 'p' -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line10 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line11 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_assign -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line11 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line15 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line15 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'q' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'q' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line17 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line18 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'q') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'q') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'q') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_assign_transitive -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line18 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming pointer value is null -// CHECK: message -// CHECK: Assuming pointer value is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line22 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line23 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line23 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_cond -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line23 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line28 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line29 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line30 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line30 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_cond_transitive -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line30 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line35 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line37 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line37 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from field 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from field 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from field 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_null_field -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line37 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_assumptions -// CHECK: issue_hash8 -// CHECK: location -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line54 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Assuming 'p' is null -// CHECK: message -// CHECK: Assuming 'p' is null -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line57 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttest_cond_assign -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line57 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line74 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line75 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: message -// CHECK: Variable 'p' initialized to a null pointer value -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col7 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col6 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: message -// CHECK: Dereference of null pointer (loaded from variable 'p') -// CHECK: -// CHECK: -// CHECK: descriptionDereference of null pointer (loaded from variable 'p') -// CHECK: categoryLogic error -// CHECK: typeDereference of null pointer -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contexttest -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line76 -// CHECK: col5 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line5 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_init +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line6 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line10 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line11 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_assign +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line12 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line16 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer value stored to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line17 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'q' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'q' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line18 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'q') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'q') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'q') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_assign_transitive +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line19 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line23 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_cond +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line24 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'q' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'q' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line29 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line30 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_cond_transitive +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line31 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line36 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from field 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from field 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from field 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_null_field +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line38 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'a' is not equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'a' is not equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'b' is equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'b' is equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_assumptions +// CHECK-NEXT: issue_hash8 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Value assigned to 'p' +// CHECK-NEXT: message +// CHECK-NEXT: Value assigned to 'p' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming pointer value is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming pointer value is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_cond_assign +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line75 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contexttest +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Value stored to 'foo' during its initialization is never read +// CHECK-NEXT: message +// CHECK-NEXT: Value stored to 'foo' during its initialization is never read +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionValue stored to 'foo' during its initialization is never read +// CHECK-NEXT: categoryDead store +// CHECK-NEXT: typeDead initialization +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contexttest2 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line85 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line85 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line86 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'foo' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'foo' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'foo' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contexttest2 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Looping back to the head of the loop +// CHECK-NEXT: message +// CHECK-NEXT: Looping back to the head of the loop +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar12280665 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line98 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/pointer-to-member.cpp b/test/Analysis/pointer-to-member.cpp new file mode 100644 index 0000000..cef5dc5 --- /dev/null +++ b/test/Analysis/pointer-to-member.cpp @@ -0,0 +1,44 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -verify %s + +void clang_analyzer_eval(bool); + +struct A { + // This conversion operator allows implicit conversion to bool but not to other integer types. + typedef A * (A::*MemberPointer); + operator MemberPointer() const { return m_ptr ? &A::m_ptr : 0; } + + A *m_ptr; +}; + +void testConditionalUse() { + A obj; + + obj.m_ptr = &obj; + clang_analyzer_eval(obj.m_ptr); // expected-warning{{TRUE}} + clang_analyzer_eval(&A::m_ptr); // expected-warning{{TRUE}} + clang_analyzer_eval(obj); // expected-warning{{TRUE}} + + obj.m_ptr = 0; + clang_analyzer_eval(obj.m_ptr); // expected-warning{{FALSE}} + clang_analyzer_eval(A::MemberPointer(0)); // expected-warning{{FALSE}} + clang_analyzer_eval(obj); // expected-warning{{FALSE}} +} + +// --------------- +// FALSE NEGATIVES +// --------------- + +bool testDereferencing() { + A obj; + obj.m_ptr = 0; + + A::MemberPointer member = &A::m_ptr; + + // FIXME: Should be TRUE. + clang_analyzer_eval(obj.*member == 0); // expected-warning{{UNKNOWN}} + + member = 0; + + // FIXME: Should emit a null dereference. + return obj.*member; // no-warning +} diff --git a/test/Analysis/pr4209.m b/test/Analysis/pr4209.m index e16d1aa..a5e7db5 100644 --- a/test/Analysis/pr4209.m +++ b/test/Analysis/pr4209.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -triple i386-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s // This test case was crashing due to how CFRefCount.cpp resolved the // ObjCInterfaceDecl* and ClassName in EvalObjCMessageExpr. diff --git a/test/Analysis/pr_2542_rdar_6793404.m b/test/Analysis/pr_2542_rdar_6793404.m index 19c140d..6f1ebf9 100644 --- a/test/Analysis/pr_2542_rdar_6793404.m +++ b/test/Analysis/pr_2542_rdar_6793404.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -pedantic -analyzer-store=region -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -pedantic -analyzer-store=region -verify -Wno-objc-root-class %s // BEGIN delta-debugging reduced header stuff diff --git a/test/Analysis/pr_4164.c b/test/Analysis/pr_4164.c index 187f4c6..0d2ca41 100644 --- a/test/Analysis/pr_4164.c +++ b/test/Analysis/pr_4164.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// expected-no-diagnostics // PR 4164: http://llvm.org/bugs/show_bug.cgi?id=4164 // diff --git a/test/Analysis/pthreadlock.c b/test/Analysis/pthreadlock.c index 4735d20..b904774 100644 --- a/test/Analysis/pthreadlock.c +++ b/test/Analysis/pthreadlock.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.unix.PthreadLock -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.unix.PthreadLock -verify %s // Tests performing normal locking patterns and wrong locking orders diff --git a/test/Analysis/ptr-arith.c b/test/Analysis/ptr-arith.c index 884ae5b..9294c18 100644 --- a/test/Analysis/ptr-arith.c +++ b/test/Analysis/ptr-arith.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core.FixedAddr,experimental.core.PointerArithm,experimental.core.PointerSub,debug.ExprInspection -analyzer-store=region -verify -triple x86_64-apple-darwin9 %s -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core.FixedAddr,experimental.core.PointerArithm,experimental.core.PointerSub,debug.ExprInspection -analyzer-store=region -verify -triple i686-apple-darwin9 %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core.FixedAddr,alpha.core.PointerArithm,alpha.core.PointerSub,debug.ExprInspection -analyzer-store=region -verify -triple x86_64-apple-darwin9 %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core.FixedAddr,alpha.core.PointerArithm,alpha.core.PointerSub,debug.ExprInspection -analyzer-store=region -verify -triple i686-apple-darwin9 %s void clang_analyzer_eval(int); diff --git a/test/Analysis/rdar-6442306-1.m b/test/Analysis/rdar-6442306-1.m index 62992d0..0fb49c2 100644 --- a/test/Analysis/rdar-6442306-1.m +++ b/test/Analysis/rdar-6442306-1.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core %s -analyzer-store=region -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core %s -analyzer-store=region -verify +// expected-no-diagnostics typedef int bar_return_t; typedef struct { diff --git a/test/Analysis/rdar-6540084.m b/test/Analysis/rdar-6540084.m index d710c47..7070709 100644 --- a/test/Analysis/rdar-6540084.m +++ b/test/Analysis/rdar-6540084.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core -analyzer-checker=deadcode.DeadStores -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core -analyzer-checker=deadcode.DeadStores -verify %s // // This test exercises the live variables analysis (LiveVariables.cpp). // The case originally identified a non-termination bug. diff --git a/test/Analysis/rdar-6541136-region.c b/test/Analysis/rdar-6541136-region.c index b90d4f4..83b7605 100644 --- a/test/Analysis/rdar-6541136-region.c +++ b/test/Analysis/rdar-6541136-region.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -analyze -analyzer-checker=core,experimental.security.ArrayBound -analyzer-store=region %s +// RUN: %clang_cc1 -verify -analyze -analyzer-checker=core,alpha.security.ArrayBound -analyzer-store=region %s struct tea_cheese { unsigned magic; }; typedef struct tea_cheese kernel_tea_cheese_t; diff --git a/test/Analysis/rdar-6562655.m b/test/Analysis/rdar-6562655.m index 3a59273..1c866bb 100644 --- a/test/Analysis/rdar-6562655.m +++ b/test/Analysis/rdar-6562655.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-constraints=basic -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -analyzer-constraints=range -analyzer-store=region -verify %s +// expected-no-diagnostics // // This test case mainly checks that the retain/release checker doesn't crash // on this file. diff --git a/test/Analysis/rdar-6600344-nil-receiver-undefined-struct-ret.m b/test/Analysis/rdar-6600344-nil-receiver-undefined-struct-ret.m index 5af4776..74d5484 100644 --- a/test/Analysis/rdar-6600344-nil-receiver-undefined-struct-ret.m +++ b/test/Analysis/rdar-6600344-nil-receiver-undefined-struct-ret.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-constraints=basic -analyzer-store=region -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-constraints=range -analyzer-store=region -verify -Wno-objc-root-class %s +// expected-no-diagnostics typedef struct Foo { int x; } Bar; diff --git a/test/Analysis/rdar-7168531.m b/test/Analysis/rdar-7168531.m index 4ccc7d7..b128d32 100644 --- a/test/Analysis/rdar-7168531.m +++ b/test/Analysis/rdar-7168531.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -triple i386-apple-darwin10 -fobjc-runtime=macosx-fragile-10.5 -analyzer-store=region %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -triple i386-apple-darwin10 -fobjc-runtime=macosx-fragile-10.5 -analyzer-store=region %s // Note that the target triple is important for this test case. It specifies that we use the // fragile Objective-C ABI. diff --git a/test/Analysis/redefined_system.c b/test/Analysis/redefined_system.c index 3c08b5e..ae5bf26 100644 --- a/test/Analysis/redefined_system.c +++ b/test/Analysis/redefined_system.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=unix,core,experimental.security.taint -w -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=unix,core,alpha.security.taint -w -verify %s +// expected-no-diagnostics // Make sure we don't crash when someone redefines a system function we reason about. diff --git a/test/Analysis/refcnt_naming.m b/test/Analysis/refcnt_naming.m index 1d3a9b7..7a83fc1 100644 --- a/test/Analysis/refcnt_naming.m +++ b/test/Analysis/refcnt_naming.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,experimental.core -analyzer-ipa=none -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,osx.cocoa.RetainCount,alpha.core -analyzer-ipa=none -analyzer-store=region -verify %s typedef const struct __CFString * CFStringRef; typedef const struct __CFAllocator * CFAllocatorRef; diff --git a/test/Analysis/reference.cpp b/test/Analysis/reference.cpp index 06e4a50..ce0ee8e 100644 --- a/test/Analysis/reference.cpp +++ b/test/Analysis/reference.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core,debug.ExprInspection -analyzer-store=region -analyzer-constraints=range -verify -Wno-null-dereference %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core,debug.ExprInspection -analyzer-store=region -analyzer-constraints=range -verify -Wno-null-dereference %s void clang_analyzer_eval(bool); @@ -110,6 +110,31 @@ void testRetroactiveNullReference(int *x) { y = 5; // expected-warning{{Dereference of null pointer}} } +void testReferenceAddress(int &x) { + clang_analyzer_eval(&x != 0); // expected-warning{{TRUE}} + clang_analyzer_eval(&ref() != 0); // expected-warning{{TRUE}} + + struct S { int &x; }; + + extern S getS(); + clang_analyzer_eval(&getS().x != 0); // expected-warning{{TRUE}} + + extern S *getSP(); + clang_analyzer_eval(&getSP()->x != 0); // expected-warning{{TRUE}} +} + + +void testFunctionPointerReturn(void *opaque) { + typedef int &(*RefFn)(); + + RefFn getRef = (RefFn)opaque; + + // Don't crash writing to or reading from this reference. + int &x = getRef(); + x = 42; + clang_analyzer_eval(x == 42); // expected-warning{{TRUE}} +} + // ------------------------------------ // False negatives @@ -127,5 +152,4 @@ namespace rdar11212286 { B *x = 0; return *x; // should warn here! } - } diff --git a/test/Analysis/region-1.m b/test/Analysis/region-1.m index be92766..9edb35b 100644 --- a/test/Analysis/region-1.m +++ b/test/Analysis/region-1.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.core -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s +// expected-no-diagnostics // // This test case simply should not crash. It evaluates the logic of not // using MemRegion::getRValueType in incorrect places. diff --git a/test/Analysis/region-store.c b/test/Analysis/region-store.c index 09c3f10..d620150 100644 --- a/test/Analysis/region-store.c +++ b/test/Analysis/region-store.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix -verify %s +// expected-no-diagnostics int printf(const char *restrict,...); diff --git a/test/Analysis/retain-release-gc-only.m b/test/Analysis/retain-release-gc-only.m index 0340a3c..1a5ed34 100644 --- a/test/Analysis/retain-release-gc-only.m +++ b/test/Analysis/retain-release-gc-only.m @@ -107,9 +107,14 @@ NSFastEnumerationState; @end @interface NSNumber : NSValue - (char)charValue; - (id)initWithInt:(int)value; @end @class NSString; -@interface NSArray : NSObject - (NSUInteger)count; -@end @interface NSArray (NSArrayCreation) + (id)array; -@end @interface NSAutoreleasePool : NSObject { +@interface NSArray : NSObject +- (NSUInteger)count; +@end +@interface NSArray (NSArrayCreation) ++ (id)array; ++ (id)arrayWithObjects:(const id [])objects count:(NSUInteger)cnt; +@end + @interface NSAutoreleasePool : NSObject { } - (void)drain; - (id)init; @@ -385,3 +390,45 @@ CFDateRef returnsRetainedCFDate() { return (NSDate*) returnsRetainedCFDate(); // expected-warning{{leak}} } @end + + +#if __has_feature(attribute_ns_consumed) +#define NS_CONSUMED __attribute__((ns_consumed)) +#endif +#if __has_feature(attribute_cf_consumed) +#define CF_CONSUMED __attribute__((cf_consumed)) +#endif + +void consumeAndStopTracking(id NS_CONSUMED obj, void (^callback)(void)); +void CFConsumeAndStopTracking(CFTypeRef CF_CONSUMED obj, void (^callback)(void)); + +void testConsumeAndStopTracking() { + id retained = [@[] retain]; // +0, GC + consumeAndStopTracking(retained, ^{}); // no-warning + + id doubleRetained = [[@[] retain] retain]; // +0, GC + consumeAndStopTracking(doubleRetained, ^{ + [doubleRetained release]; + }); // no-warning + + id unretained = @[]; // +0 + consumeAndStopTracking(unretained, ^{}); // no-warning, GC +} + +void testCFConsumeAndStopTrackingMsg() { + id retained = [@[] retain]; // +0, GC + CFConsumeAndStopTracking((CFTypeRef)retained, ^{}); // expected-warning {{Incorrect decrement of the reference count of an object that is not owned at this point by the caller}} +} + +void testCFConsumeAndStopTracking() { + CFTypeRef retained = returnsRetainedCFDate(); // +1 + CFConsumeAndStopTracking(retained, ^{}); // no-warning + + CFTypeRef doubleRetained = CFRetain(returnsRetainedCFDate()); // +2 + CFConsumeAndStopTracking(doubleRetained, ^{ + CFRelease(doubleRetained); + }); // no-warning + + id unretained = @[]; // +0 + CFConsumeAndStopTracking((CFTypeRef)unretained, ^{}); // expected-warning {{Incorrect decrement of the reference count of an object that is not owned at this point by the caller}} +} diff --git a/test/Analysis/retain-release-inline.m b/test/Analysis/retain-release-inline.m index 610df7f..6ff9e9a 100644 --- a/test/Analysis/retain-release-inline.m +++ b/test/Analysis/retain-release-inline.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fblocks -analyzer-ipa=inlining -verify %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -fblocks -verify %s //===----------------------------------------------------------------------===// // The following code is reduced using delta-debugging from Mac OS X headers: @@ -343,5 +343,21 @@ void test_test_return_inline_2(char *bytes) { CFRelease(str); } +extern CFStringRef getString(void); +CFStringRef testCovariantReturnType(void) __attribute__((cf_returns_retained)); +void usetestCovariantReturnType() { + CFStringRef S = ((void*)0); + S = testCovariantReturnType(); + if (S) + CFRelease(S); +} +CFStringRef testCovariantReturnType() { + CFStringRef Str = ((void*)0); + Str = getString(); + if (Str) { + CFRetain(Str); + } + return Str; +} diff --git a/test/Analysis/retain-release-path-notes-gc.m b/test/Analysis/retain-release-path-notes-gc.m index feee525..c24bf70 100644 --- a/test/Analysis/retain-release-path-notes-gc.m +++ b/test/Analysis/retain-release-path-notes-gc.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fobjc-gc-only -analyzer-output=text -verify %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fobjc-gc-only -analyzer-output=plist-multi-file %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fobjc-gc-only -analyzer-output=plist-multi-file %s -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s /*** This file is for testing the path-sensitive notes for retain/release errors. @@ -72,1339 +73,1328 @@ void retainReleaseIgnored () { } @end - -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: {{.*}}retain-release-path-notes-gc.m -// CHECK: // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line42 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line42 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line43 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line43 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak (when using garbage collection) of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak of object when using garbage collection -// CHECK: issue_context_kindfunction -// CHECK: issue_contextcreationViaCFCreate -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line43 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line47 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line47 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count incremented. The object now has a +2 retain count -// CHECK: message -// CHECK: Reference count incremented. The object now has a +2 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line48 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: In GC mode a call to 'CFMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. An object must have a 0 retain count to be garbage collected. After this call its retain count is +1 -// CHECK: message -// CHECK: In GC mode a call to 'CFMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. An object must have a 0 retain count to be garbage collected. After this call its retain count is +1 -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line49 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: In GC mode a call to 'NSMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. Since it now has a 0 retain count the object can be automatically collected by the garbage collector -// CHECK: message -// CHECK: In GC mode a call to 'NSMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. Since it now has a 0 retain count the object can be automatically collected by the garbage collector -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count incremented. The object now has a +1 retain count. The object is not eligible for garbage collection until the retain count reaches 0 again -// CHECK: message -// CHECK: Reference count incremented. The object now has a +1 retain count. The object is not eligible for garbage collection until the retain count reaches 0 again -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line52 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak (when using garbage collection) of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak of object when using garbage collection -// CHECK: issue_context_kindfunction -// CHECK: issue_contextmakeCollectable -// CHECK: issue_hash6 -// CHECK: location -// CHECK: -// CHECK: line52 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line56 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: In GC mode the 'retain' message has no effect -// CHECK: message -// CHECK: In GC mode the 'retain' message has no effect -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: In GC mode the 'release' message has no effect -// CHECK: message -// CHECK: In GC mode the 'release' message has no effect -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: In GC mode an 'autorelease' has no effect -// CHECK: message -// CHECK: In GC mode an 'autorelease' has no effect -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line60 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col13 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line60 -// CHECK: col29 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: -// CHECK: -// CHECK: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeBad release -// CHECK: issue_context_kindfunction -// CHECK: issue_contextretainReleaseIgnored -// CHECK: issue_hash5 -// CHECK: location -// CHECK: -// CHECK: line60 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col36 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line65 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col36 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line66 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line66 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line66 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'object' and returned from method 'getViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'object' and returned from method 'getViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak (when using garbage collection) of an object stored into 'object' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak of returned object when using garbage collection -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextgetViolation -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line66 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col36 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line70 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col36 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'object' and returned from method 'copyViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'object' and returned from method 'copyViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak (when using garbage collection) of an object stored into 'object' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak of returned object when using garbage collection -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextcopyViolation -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line43 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak (when using garbage collection) of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of object when using garbage collection +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcreationViaCFCreate +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line44 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line48 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line49 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: In GC mode a call to 'CFMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. An object must have a 0 retain count to be garbage collected. After this call its retain count is +1 +// CHECK-NEXT: message +// CHECK-NEXT: In GC mode a call to 'CFMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. An object must have a 0 retain count to be garbage collected. After this call its retain count is +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: In GC mode a call to 'NSMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. Since it now has a 0 retain count the object can be automatically collected by the garbage collector +// CHECK-NEXT: message +// CHECK-NEXT: In GC mode a call to 'NSMakeCollectable' decrements an object's retain count and registers the object with the garbage collector. Since it now has a 0 retain count the object can be automatically collected by the garbage collector +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count. The object is not eligible for garbage collection until the retain count reaches 0 again +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count. The object is not eligible for garbage collection until the retain count reaches 0 again +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak (when using garbage collection) of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of object when using garbage collection +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextmakeCollectable +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line53 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: In GC mode the 'retain' message has no effect +// CHECK-NEXT: message +// CHECK-NEXT: In GC mode the 'retain' message has no effect +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: In GC mode the 'release' message has no effect +// CHECK-NEXT: message +// CHECK-NEXT: In GC mode the 'release' message has no effect +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: In GC mode an 'autorelease' has no effect +// CHECK-NEXT: message +// CHECK-NEXT: In GC mode an 'autorelease' has no effect +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextretainReleaseIgnored +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'object' and returned from method 'getViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'object' and returned from method 'getViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak (when using garbage collection) of an object stored into 'object' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object when using garbage collection +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextgetViolation +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line67 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count. Core Foundation objects are not automatically garbage collected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'object' and returned from method 'copyViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'object' and returned from method 'copyViolation' is potentially leaked when using garbage collection. Callers of this method do not expect a returned object with a +1 retain count since they expect the object to be managed by the garbage collector +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak (when using garbage collection) of an object stored into 'object' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object when using garbage collection +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextcopyViolation +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/retain-release-path-notes.m b/test/Analysis/retain-release-path-notes.m index ebcfd6a..0daeecb 100644 --- a/test/Analysis/retain-release-path-notes.m +++ b/test/Analysis/retain-release-path-notes.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -analyzer-output=text -verify %s -// RN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -analyzer-output=plist-multi-file %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -analyzer-output=plist-multi-file %s -o %t +// RUN: FileCheck --input-file=%t %s /*** This file is for testing the path-sensitive notes for retain/release errors. @@ -190,4424 +191,4414 @@ void testDictionary(id key, id value) { } -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: files -// CHECK: -// CHECK: {{.*}}retain-release-path-notes.m -// CHECK: // CHECK: diagnostics -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line45 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line45 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line46 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextcreationViaAlloc -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line46 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line50 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line50 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line51 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextcreationViaCFCreate -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line51 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line55 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col35 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line55 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count incremented. The object now has a +1 retain count -// CHECK: message -// CHECK: Reference count incremented. The object now has a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line56 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count incremented. The object now has a +2 retain count -// CHECK: message -// CHECK: Reference count incremented. The object now has a +2 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line57 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count decremented. The object now has a +1 retain count -// CHECK: message -// CHECK: Reference count decremented. The object now has a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line58 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line59 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextacquisitionViaMethod -// CHECK: issue_hash5 -// CHECK: location -// CHECK: -// CHECK: line59 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line63 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Property returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Property returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line63 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line64 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count incremented. The object now has a +1 retain count -// CHECK: message -// CHECK: Reference count incremented. The object now has a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line64 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line65 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line65 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextacquisitionViaProperty -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line65 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col35 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line69 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count -// CHECK: message -// CHECK: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line69 -// CHECK: col35 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line70 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col12 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col17 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference count incremented. The object now has a +1 retain count -// CHECK: message -// CHECK: Reference count incremented. The object now has a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line70 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line71 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextacquisitionViaCFFunction -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line71 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line75 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line75 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line76 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object released by directly sending the '-dealloc' message -// CHECK: message -// CHECK: Object released by directly sending the '-dealloc' message -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line76 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line77 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line77 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line77 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line77 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line77 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference-counted object is used after it is released -// CHECK: message -// CHECK: Reference-counted object is used after it is released -// CHECK: -// CHECK: -// CHECK: descriptionReference-counted object is used after it is released -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeUse-after-release -// CHECK: issue_context_kindfunction -// CHECK: issue_contextexplicitDealloc -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line77 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line81 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line81 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line82 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object released -// CHECK: message -// CHECK: Object released -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line82 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line83 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line83 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line83 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line83 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line83 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Reference-counted object is used after it is released -// CHECK: message -// CHECK: Reference-counted object is used after it is released -// CHECK: -// CHECK: -// CHECK: descriptionReference-counted object is used after it is released -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeUse-after-release -// CHECK: issue_context_kindfunction -// CHECK: issue_contextimplicitDealloc -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line83 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line87 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line87 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line88 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object sent -autorelease message -// CHECK: message -// CHECK: Object sent -autorelease message -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line88 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line89 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object sent -autorelease message -// CHECK: message -// CHECK: Object sent -autorelease message -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line89 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line90 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line90 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line90 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line90 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line90 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count -// CHECK: message -// CHECK: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count -// CHECK: -// CHECK: -// CHECK: descriptionObject sent -autorelease too many times -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeObject sent -autorelease too many times -// CHECK: issue_context_kindfunction -// CHECK: issue_contextoverAutorelease -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line90 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line94 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Property returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Property returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line94 -// CHECK: col31 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line95 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object sent -autorelease message -// CHECK: message -// CHECK: Object sent -autorelease message -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line95 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line96 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line96 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line96 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line96 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line96 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object over-autoreleased: object was sent -autorelease but the object has a +0 retain count -// CHECK: message -// CHECK: Object over-autoreleased: object was sent -autorelease but the object has a +0 retain count -// CHECK: -// CHECK: -// CHECK: descriptionObject sent -autorelease too many times -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeObject sent -autorelease too many times -// CHECK: issue_context_kindfunction -// CHECK: issue_contextautoreleaseUnowned -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line96 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line100 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line100 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line101 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: When GC is not enabled a call to 'CFMakeCollectable' has no effect on its argument -// CHECK: message -// CHECK: When GC is not enabled a call to 'CFMakeCollectable' has no effect on its argument -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line101 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line102 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col21 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col26 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: When GC is not enabled a call to 'NSMakeCollectable' has no effect on its argument -// CHECK: message -// CHECK: When GC is not enabled a call to 'NSMakeCollectable' has no effect on its argument -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line102 -// CHECK: col19 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line103 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line103 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line103 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line103 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line103 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'leaked' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak -// CHECK: issue_context_kindfunction -// CHECK: issue_contextmakeCollectableIgnored -// CHECK: issue_hash4 -// CHECK: location -// CHECK: -// CHECK: line103 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col35 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line107 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col37 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count -// CHECK: message -// CHECK: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line107 -// CHECK: col35 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line108 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line108 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line108 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: -// CHECK: -// CHECK: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeMethod should return an owned object -// CHECK: issue_context_kindfunction -// CHECK: issue_contextCFCopyRuleViolation -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line108 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col11 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line112 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col40 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count -// CHECK: message -// CHECK: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line112 -// CHECK: col38 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line113 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line113 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line113 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'object' is returned from a function whose name ('CFGetRuleViolation') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'object' is returned from a function whose name ('CFGetRuleViolation') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'object' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak of returned object -// CHECK: issue_context_kindfunction -// CHECK: issue_contextCFGetRuleViolation -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line113 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col32 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line118 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col32 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Property returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Property returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line118 -// CHECK: col32 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line119 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line119 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line119 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: -// CHECK: -// CHECK: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeMethod should return an owned object -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextcopyViolation -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line119 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line123 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Subscript returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Subscript returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line123 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line124 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line124 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line124 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: -// CHECK: -// CHECK: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeMethod should return an owned object -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextcopyViolationIndexedSubscript -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line124 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line128 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Subscript returns an Objective-C object with a +0 retain count -// CHECK: message -// CHECK: Subscript returns an Objective-C object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line128 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line129 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line129 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line129 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: -// CHECK: -// CHECK: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeMethod should return an owned object -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextcopyViolationKeyedSubscript -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line129 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line133 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col32 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line133 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line134 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: message -// CHECK: Object returned to caller as an owning reference (single retain count transferred to caller) -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line134 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line134 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object leaked: object allocated and stored into 'result' is returned from a method whose name ('getViolation') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa -// CHECK: message -// CHECK: Object leaked: object allocated and stored into 'result' is returned from a method whose name ('getViolation') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa -// CHECK: -// CHECK: -// CHECK: descriptionPotential leak of an object stored into 'result' -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeLeak of returned object -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextgetViolation -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line134 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line138 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col32 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: message -// CHECK: Method returns an Objective-C object with a +1 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line138 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line139 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col22 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object sent -autorelease message -// CHECK: message -// CHECK: Object sent -autorelease message -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line139 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col8 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line140 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col10 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: message -// CHECK: Object returned to caller with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line140 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line140 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: message -// CHECK: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: -// CHECK: -// CHECK: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeMethod should return an owned object -// CHECK: issue_context_kindObjective-C method -// CHECK: issue_contextcopyAutorelease -// CHECK: issue_hash3 -// CHECK: location -// CHECK: -// CHECK: line140 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line168 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col16 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: NSNumber literal is an object with a +0 retain count -// CHECK: message -// CHECK: NSNumber literal is an object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line168 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line169 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line169 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line169 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line169 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line169 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: -// CHECK: -// CHECK: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeBad release -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestNumericLiteral -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line169 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line173 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col18 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: NSNumber boxed expression produces an object with a +0 retain count -// CHECK: message -// CHECK: NSNumber boxed expression produces an object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line173 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line174 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line174 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line174 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line174 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line174 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: -// CHECK: -// CHECK: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeBad release -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestBoxedInt -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line174 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line178 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: NSString boxed expression produces an object with a +0 retain count -// CHECK: message -// CHECK: NSString boxed expression produces an object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line178 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line179 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line179 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line179 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line179 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line179 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: -// CHECK: -// CHECK: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeBad release -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestBoxedString -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line179 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line183 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col20 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: NSArray literal is an object with a +0 retain count -// CHECK: message -// CHECK: NSArray literal is an object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line183 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line184 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line184 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line184 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line184 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line184 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: -// CHECK: -// CHECK: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeBad release -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestArray -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line184 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: path -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line188 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col27 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: NSDictionary literal is an object with a +0 retain count -// CHECK: message -// CHECK: NSDictionary literal is an object with a +0 retain count -// CHECK: -// CHECK: -// CHECK: kindcontrol -// CHECK: edges -// CHECK: -// CHECK: -// CHECK: start -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line188 -// CHECK: col15 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: end -// CHECK: -// CHECK: -// CHECK: line189 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line189 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: kindevent -// CHECK: location -// CHECK: -// CHECK: line189 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: ranges -// CHECK: -// CHECK: -// CHECK: -// CHECK: line189 -// CHECK: col4 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: line189 -// CHECK: col9 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: depth0 -// CHECK: extended_message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: message -// CHECK: Incorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: -// CHECK: -// CHECK: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller -// CHECK: categoryMemory (Core Foundation/Objective-C) -// CHECK: typeBad release -// CHECK: issue_context_kindfunction -// CHECK: issue_contexttestDictionary -// CHECK: issue_hash2 -// CHECK: location -// CHECK: -// CHECK: line189 -// CHECK: col3 -// CHECK: file0 -// CHECK: -// CHECK: -// CHECK: -// CHECK: -// CHECK: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line46 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcreationViaAlloc +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line47 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line51 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcreationViaCFCreate +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line56 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line57 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line58 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count decremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count decremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line59 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextacquisitionViaMethod +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line60 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Property returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Property returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line64 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line65 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextacquisitionViaProperty +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line66 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line70 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line71 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextacquisitionViaCFFunction +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line76 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released by directly sending the '-dealloc' message +// CHECK-NEXT: message +// CHECK-NEXT: Object released by directly sending the '-dealloc' message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line77 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextexplicitDealloc +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line78 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line83 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextimplicitDealloc +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line84 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line88 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line89 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line90 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject sent -autorelease too many times +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeObject sent -autorelease too many times +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextoverAutorelease +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line91 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Property returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Property returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line95 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line96 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease but the object has a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease but the object has a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject sent -autorelease too many times +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeObject sent -autorelease too many times +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextautoreleaseUnowned +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line97 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line101 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: When GC is not enabled a call to 'CFMakeCollectable' has no effect on its argument +// CHECK-NEXT: message +// CHECK-NEXT: When GC is not enabled a call to 'CFMakeCollectable' has no effect on its argument +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line102 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: When GC is not enabled a call to 'NSMakeCollectable' has no effect on its argument +// CHECK-NEXT: message +// CHECK-NEXT: When GC is not enabled a call to 'NSMakeCollectable' has no effect on its argument +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line103 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line104 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line104 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line104 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line104 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line104 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'leaked' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'leaked' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextmakeCollectableIgnored +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line104 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFGetSomething' returns a Core Foundation object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line108 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextCFCopyRuleViolation +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line109 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFCreateSomething' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line113 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'object' is returned from a function whose name ('CFGetRuleViolation') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'object' is returned from a function whose name ('CFGetRuleViolation') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'object' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextCFGetRuleViolation +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line114 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Property returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Property returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line119 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextcopyViolation +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line120 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Subscript returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Subscript returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line124 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextcopyViolationIndexedSubscript +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line125 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Subscript returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Subscript returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line129 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextcopyViolationKeyedSubscript +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line130 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line134 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'result' is returned from a method whose name ('getViolation') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'result' is returned from a method whose name ('getViolation') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'result' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextgetViolation +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line135 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line139 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line140 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextcopyAutorelease +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line141 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSNumber literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSNumber literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line169 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestNumericLiteral +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line170 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSNumber boxed expression produces an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSNumber boxed expression produces an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line174 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestBoxedInt +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSString boxed expression produces an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSString boxed expression produces an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line179 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line180 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line180 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line180 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line180 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line180 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestBoxedString +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line180 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line185 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line185 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line185 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line185 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line185 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestArray +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line185 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSDictionary literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSDictionary literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestDictionary +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/retain-release.m b/test/Analysis/retain-release.m index ba492b7..eb2554f 100644 --- a/test/Analysis/retain-release.m +++ b/test/Analysis/retain-release.m @@ -1,5 +1,8 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fblocks -verify -Wno-objc-root-class %s -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fblocks -verify -x objective-c++ -Wno-objc-root-class %s +// RUN: rm -f %t.objc.plist %t.objcpp.plist +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fblocks -verify -Wno-objc-root-class %s -analyzer-output=plist -o %t.objc.plist +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,osx.coreFoundation.CFRetainRelease,osx.cocoa.ClassRelease,osx.cocoa.RetainCount -analyzer-store=region -fblocks -verify -x objective-c++ -std=gnu++98 -Wno-objc-root-class %s -analyzer-output=plist -o %t.objcpp.plist +// FIXLATER: cat %t.objc.plist ; FileCheck --input-file=%t.objc.plist %s +// FIXLATER: cat %t.objcpp.plist ; FileCheck --input-file=%t.objcpp.plist %s #if __has_feature(attribute_ns_returns_retained) #define NS_RETURNS_RETAINED __attribute__((ns_returns_retained)) @@ -59,6 +62,7 @@ typedef const struct __CFAllocator * CFAllocatorRef; extern const CFAllocatorRef kCFAllocatorDefault; extern CFTypeRef CFRetain(CFTypeRef cf); extern void CFRelease(CFTypeRef cf); +extern CFTypeRef CFMakeCollectable(CFTypeRef cf); typedef struct { } CFArrayCallBacks; @@ -303,6 +307,10 @@ extern CGColorSpaceRef CGColorSpaceCreateDeviceRGB(void); // This is how NSMakeCollectable is declared in the OS X 10.8 headers. id NSMakeCollectable(CFTypeRef __attribute__((cf_consumed))) __attribute__((ns_returns_retained)); +typedef const struct __CFUUID * CFUUIDRef; + +extern +void *CFPlugInInstanceCreate(CFAllocatorRef allocator, CFUUIDRef factoryUUID, CFUUIDRef typeUUID); //===----------------------------------------------------------------------===// // Test cases. @@ -501,31 +509,39 @@ void f15() { CFRelease(*B); // no-warning } -// Test when we pass NULL to CFRetain/CFRelease. +// Test when we pass NULL to CFRetain/CFRelease/CFMakeCollectable. void f16(int x, CFTypeRef p) { if (p) return; - if (x) { + if (x > 0) { CFRelease(p); // expected-warning{{Null pointer argument in call to CFRelease}} } - else { + else if (x < 0) { CFRetain(p); // expected-warning{{Null pointer argument in call to CFRetain}} } + else { + CFMakeCollectable(p); // expected-warning{{Null pointer argument in call to CFMakeCollectable}} + } } // Test that an object is non-null after being CFRetained/CFReleased. void f17(int x, CFTypeRef p) { - if (x) { + if (x > 0) { CFRelease(p); if (!p) CFRelease(0); // no-warning } - else { + else if (x < 0) { CFRetain(p); if (!p) CFRetain(0); // no-warning } + else { + CFMakeCollectable(p); + if (!p) + CFMakeCollectable(0); // no-warning + } } // Test basic tracking of ivars associated with 'self'. For the retain/release @@ -828,8 +844,8 @@ int RDar6320065_test() { @end void test_RDar6859457(RDar6859457 *x, void *bytes, NSUInteger dataLength) { - [x NoCopyString]; // no-warning - [x noCopyString]; // no-warning + [x NoCopyString]; // expected-warning{{leak}} + [x noCopyString]; // expected-warning{{leak}} [NSData dataWithBytesNoCopy:bytes length:dataLength]; // no-warning [NSData dataWithBytesNoCopy:bytes length:dataLength freeWhenDone:1]; // no-warning } @@ -1341,6 +1357,15 @@ void testattr4() { consume_cf(y); } +@interface TestOwnershipAttr2 : NSObject +- (NSString*) newString NS_RETURNS_NOT_RETAINED; // no-warning +@end + +@implementation TestOwnershipAttr2 +- (NSString*) newString { + return [NSString alloc]; // expected-warning {{Potential leak of an object}} +} +@end @interface MyClassTestCFAttr : NSObject {} - (NSDate*) returnsCFRetained CF_RETURNS_RETAINED; @@ -1748,7 +1773,7 @@ extern id NSApp; @end //===----------------------------------------------------------------------===// // Test returning allocated memory in a struct. -// +// // We currently don't have a general way to track pointers that "escape". // Here we test that RetainCountChecker doesn't get excited about returning // allocated CF objects in struct fields. @@ -1855,3 +1880,22797 @@ id makeCollectableNonLeak() { [objCObject release]; // +1 return [objCObject autorelease]; // +0 } + + +void consumeAndStopTracking(id NS_CONSUMED obj, void (^callback)(void)); +void CFConsumeAndStopTracking(CFTypeRef CF_CONSUMED obj, void (^callback)(void)); + +void testConsumeAndStopTracking() { + id retained = [@[] retain]; // +1 + consumeAndStopTracking(retained, ^{}); // no-warning + + id doubleRetained = [[@[] retain] retain]; // +2 + consumeAndStopTracking(doubleRetained, ^{ + [doubleRetained release]; + }); // no-warning + + id unretained = @[]; // +0 + consumeAndStopTracking(unretained, ^{}); // expected-warning {{Incorrect decrement of the reference count of an object that is not owned at this point by the caller}} +} + +void testCFConsumeAndStopTracking() { + id retained = [@[] retain]; // +1 + CFConsumeAndStopTracking((CFTypeRef)retained, ^{}); // no-warning + + id doubleRetained = [[@[] retain] retain]; // +2 + CFConsumeAndStopTracking((CFTypeRef)doubleRetained, ^{ + [doubleRetained release]; + }); // no-warning + + id unretained = @[]; // +0 + CFConsumeAndStopTracking((CFTypeRef)unretained, ^{}); // expected-warning {{Incorrect decrement of the reference count of an object that is not owned at this point by the caller}} +} +//===----------------------------------------------------------------------===// +// Test 'pragma clang arc_cf_code_audited' support. +//===----------------------------------------------------------------------===// + +typedef void *MyCFType; +#pragma clang arc_cf_code_audited begin +MyCFType CreateMyCFType(); +#pragma clang arc_cf_code_audited end + +void test_custom_cf() { + MyCFType x = CreateMyCFType(); // expected-warning {{leak of an object stored into 'x'}} +} + +//===----------------------------------------------------------------------===// +// Test calling CFPlugInInstanceCreate, which appears in CF but doesn't +// return a CF object. +//===----------------------------------------------------------------------===// + +void test_CFPlugInInstanceCreate(CFUUIDRef factoryUUID, CFUUIDRef typeUUID) { + CFPlugInInstanceCreate(kCFAllocatorDefault, factoryUUID, typeUUID); // no-warning +} + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line319 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line319 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line320 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line321 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count decremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count decremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line322 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line324 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf1 +// CHECK-NEXT: issue_hash7 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line325 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line330 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line330 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line331 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line332 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count decremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count decremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line333 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line335 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf2 +// CHECK-NEXT: issue_hash7 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line336 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line366 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line366 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line367 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line369 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'date' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf5 +// CHECK-NEXT: issue_hash7 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line372 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col62 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line378 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line379 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line380 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'date' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf6 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col62 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line387 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +2 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +2 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'date' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf7 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line386 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line388 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is returned from a function whose name ('f7') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is returned from a function whose name ('f7') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'date' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf7 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line389 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'MyDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'MyDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line397 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line399 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line400 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line400 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line400 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'date' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'date' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf8 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line400 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line403 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line403 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line404 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'date' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'date' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf9 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line406 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col75 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'DADiskCreateFromBSDName' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'DADiskCreateFromBSDName' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'disk' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'disk' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'disk' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf10 +// CHECK-NEXT: issue_hash7 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col46 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col49 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'DADiskCopyDescription' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'DADiskCopyDescription' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col46 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dict' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dict' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dict' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dict' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'dict' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf10 +// CHECK-NEXT: issue_hash8 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'DADiskCopyWholeDisk' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'DADiskCopyWholeDisk' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'disk' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'disk' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'disk' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf10 +// CHECK-NEXT: issue_hash11 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col63 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'DADiskCreateFromIOMedia' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'DADiskCreateFromIOMedia' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'disk' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'disk' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'disk' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf10 +// CHECK-NEXT: issue_hash14 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col46 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line428 +// CHECK-NEXT: col68 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'DADissenterCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'DADissenterCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col46 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dissenter' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dissenter' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dissenter' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dissenter' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'dissenter' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf10 +// CHECK-NEXT: issue_hash15 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line415 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line416 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line418 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line419 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line421 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dict' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line422 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line424 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'disk' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line425 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line427 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'dissenter' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'dissenter' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line429 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col61 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'DASessionCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'DASessionCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line431 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'session' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'session' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'session' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'session' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'session' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf10 +// CHECK-NEXT: issue_hash18 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line432 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line438 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line438 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col43 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col49 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayGetValueAtIndex' returns a Core Foundation object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayGetValueAtIndex' returns a Core Foundation object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line451 +// CHECK-NEXT: col43 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line457 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line457 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line457 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line457 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line457 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf11 +// CHECK-NEXT: issue_hash21 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line457 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'MyCreateFun' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'MyCreateFun' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line465 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line466 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line466 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line466 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'o' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'o' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'o' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf12 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line466 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col75 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line474 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line477 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line477 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line477 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line476 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject sent -autorelease too many times +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeObject sent -autorelease too many times +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf13_autorelease_b +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line477 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col75 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line480 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line481 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line482 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line483 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line483 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line483 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line483 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line483 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject sent -autorelease too many times +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeObject sent -autorelease too many times +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf13_autorelease_c +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line483 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col75 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line487 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line488 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line489 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col75 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object over-autoreleased: object was sent -autorelease 2 times but the object has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject sent -autorelease too many times +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeObject sent -autorelease too many times +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf13_autorelease_d +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line490 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line498 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line498 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line498 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line498 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line498 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line499 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line499 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line499 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf14_leakimmediately +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line499 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming pointer value is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming pointer value is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is not equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is not equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line517 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line517 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line517 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line517 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line517 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer argument in call to CFRelease +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer argument in call to CFRelease +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionNull pointer argument in call to CFRelease +// CHECK-NEXT: categoryAPI Misuse (Apple) +// CHECK-NEXT: typenull passed to CFRetain/CFRelease +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf16 +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line517 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'p' is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming pointer value is null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming pointer value is null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line513 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line516 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line520 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line520 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line520 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line520 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line520 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Null pointer argument in call to CFRetain +// CHECK-NEXT: message +// CHECK-NEXT: Null pointer argument in call to CFRetain +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionNull pointer argument in call to CFRetain +// CHECK-NEXT: categoryAPI Misuse (Apple) +// CHECK-NEXT: typenull passed to CFRetain/CFRelease +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextf16 +// CHECK-NEXT: issue_hash8 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line520 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col55 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line561 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextnewString +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line562 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col63 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'name' is nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'name' is nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line583 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line583 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line583 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line583 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line583 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'kind' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'kind' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'kind' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_6659160 +// CHECK-NEXT: issue_hash13 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line583 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'name' is non-nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'name' is non-nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'kindC' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'kindC' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'kind' is nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'kind' is nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Array access (from variable 'kindC') results in a null pointer dereference +// CHECK-NEXT: message +// CHECK-NEXT: Array access (from variable 'kindC') results in a null pointer dereference +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionArray access (from variable 'kindC') results in a null pointer dereference +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_6659160 +// CHECK-NEXT: issue_hash27 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line575 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col57 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line581 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'name' is non-nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'name' is non-nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line582 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line585 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'kind' is non-nil +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'kind' is non-nil +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line593 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line594 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line594 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line594 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line594 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line595 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line596 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line597 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line599 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line599 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line599 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line599 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line602 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line602 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line602 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line602 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line603 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line603 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line603 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line603 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line603 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_6659160 +// CHECK-NEXT: issue_hash33 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line603 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line625 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released by directly sending the '-dealloc' message +// CHECK-NEXT: message +// CHECK-NEXT: Object released by directly sending the '-dealloc' message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line626 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line627 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line627 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line627 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line627 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line627 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextpr3820_ReleaseAfterDealloc +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line627 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line633 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line633 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line634 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line635 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line636 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line636 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line636 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line636 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line636 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextpr3820_DeallocAfterRelease +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line636 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col2 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col76 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col84 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col76 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line688 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line693 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line693 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line693 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dict' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dict' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'dict' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextapplicationDidFinishLaunching: +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line693 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col2 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col76 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col84 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col76 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line700 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line701 +// CHECK-NEXT: col2 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line701 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line701 +// CHECK-NEXT: col2 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line701 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line703 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line703 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line703 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dict' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'dict' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'dict' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextradar10102244 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line703 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line711 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line711 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line712 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line713 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line713 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line713 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line713 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line713 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_6257780_Case1 +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line713 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line788 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line788 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line789 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line791 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line791 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line791 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_context_initReturningNewClassBad +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line791 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line793 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line793 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col43 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line794 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextinitReturningNewClassBad2 +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line795 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('NoCopyString') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('NoCopyString') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextNoCopyString +// CHECK-NEXT: issue_hash0 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line833 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('noCopyString') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('noCopyString') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextnoCopyString +// CHECK-NEXT: issue_hash0 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line838 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line838 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'noCopyString' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'noCopyString' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_RDar6859457' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_RDar6859457' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col37 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line834 +// CHECK-NEXT: col59 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'noCopyString' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'noCopyString' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line839 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line842 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line842 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line842 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_RDar6859457 +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line842 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name (':') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name (':') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_context: +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line866 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line896 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line896 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line896 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line896 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line896 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line900 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line900 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line900 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar6902710 +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line900 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line908 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line908 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line908 +// CHECK-NEXT: col45 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line908 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line908 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line909 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line909 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line909 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar6945561 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line909 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line917 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line917 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line917 +// CHECK-NEXT: col49 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'IOBSDNameMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'IOBSDNameMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line917 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line917 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line918 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line918 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line918 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOBSDNameMatching_wrapper +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line918 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line921 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line921 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line921 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'IOServiceMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'IOServiceMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line921 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line921 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line922 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line922 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line922 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOServiceMatching_wrapper +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line922 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line925 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line925 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line925 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'IOServiceNameMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'IOServiceNameMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line925 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line925 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line926 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line926 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line926 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOServiceNameMatching_wrapper +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line926 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line933 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line934 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line935 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line935 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line935 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line935 +// CHECK-NEXT: col58 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line935 +// CHECK-NEXT: col65 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOServiceAddNotification_wrapper +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line935 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line940 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line940 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line940 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'IORegistryEntryIDMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'IORegistryEntryIDMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line940 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line940 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line941 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line941 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line941 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIORegistryEntryIDMatching_wrapper +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line941 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line945 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line945 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line945 +// CHECK-NEXT: col55 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'IOOpenFirmwarePathMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'IOOpenFirmwarePathMatching' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line945 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line945 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line946 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line946 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line946 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOOpenFirmwarePathMatching_wrapper +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line946 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line949 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col51 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col43 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col50 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line950 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line951 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line951 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line951 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line951 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line951 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOServiceGetMatchingService_wrapper +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line951 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line955 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col62 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col51 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line956 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line957 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line957 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line957 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line957 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line957 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOServiceGetMatchingServices_wrapper +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line957 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CreateDict' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line963 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col106 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col73 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line964 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line965 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line965 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line965 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line965 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line965 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextIOServiceAddMatchingNotification_wrapper +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line965 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1003 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1003 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1006 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1007 +// CHECK-NEXT: col46 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1007 +// CHECK-NEXT: col56 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1007 +// CHECK-NEXT: col46 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1007 +// CHECK-NEXT: col56 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count decremented +// CHECK-NEXT: message +// CHECK-NEXT: Reference count decremented +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1008 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1009 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1010 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1010 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1010 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1010 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1010 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'number' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_7152619 +// CHECK-NEXT: issue_hash8 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1010 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1019 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1019 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col69 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CGColorSpaceCreateDeviceRGB' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CGColorSpaceCreateDeviceRGB' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1031 +// CHECK-NEXT: col67 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_7184450 +// CHECK-NEXT: issue_hash12 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1030 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1041 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1041 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col68 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CGColorSpaceCreateDeviceRGB' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CGColorSpaceCreateDeviceRGB' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_7184450_pos +// CHECK-NEXT: issue_hash12 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1041 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1041 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1052 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col107 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CGGradientCreateWithColorComponents' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CGGradientCreateWithColorComponents' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1053 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1057 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1057 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1057 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'myGradient' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'myGradient' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'myGradient' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_7184450_pos +// CHECK-NEXT: issue_hash17 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1057 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1091 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1092 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1092 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1092 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'number' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_7299394_positive +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1092 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1224 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1224 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1226 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1226 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1226 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1226 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1227 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CGBitmapContextCreateWithData' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CGBitmapContextCreateWithData' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1226 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1226 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1228 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1228 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1228 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_7358899 +// CHECK-NEXT: issue_hash9 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1228 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1244 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1245 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1245 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1245 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'y' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'y' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'y' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar7265711_a +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1245 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1264 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1264 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1265 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1266 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1266 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1266 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'number' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar7306898 +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1266 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: The 'release' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: message +// CHECK-NEXT: The 'release' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionThe 'release' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: categoryAPI Misuse (Apple) +// CHECK-NEXT: typemessage incorrectly sent to class instead of class instance +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar7252064 +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1276 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1276 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1276 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1276 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1276 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: The 'retain' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: message +// CHECK-NEXT: The 'retain' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionThe 'retain' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: categoryAPI Misuse (Apple) +// CHECK-NEXT: typemessage incorrectly sent to class instead of class instance +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar7252064 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1276 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1277 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1277 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1277 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1277 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1277 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: The 'autorelease' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: message +// CHECK-NEXT: The 'autorelease' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionThe 'autorelease' message should be sent to instances of class 'RDar7252064' and not the class directly +// CHECK-NEXT: categoryAPI Misuse (Apple) +// CHECK-NEXT: typemessage incorrectly sent to class instead of class instance +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar7252064 +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1277 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1275 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1278 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1278 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1278 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1278 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1278 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: The 'drain' message should be sent to instances of class 'NSAutoreleasePool' and not the class directly +// CHECK-NEXT: message +// CHECK-NEXT: The 'drain' message should be sent to instances of class 'NSAutoreleasePool' and not the class directly +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionThe 'drain' message should be sent to instances of class 'NSAutoreleasePool' and not the class directly +// CHECK-NEXT: categoryAPI Misuse (Apple) +// CHECK-NEXT: typemessage incorrectly sent to class instead of class instance +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar7252064 +// CHECK-NEXT: issue_hash4 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1278 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1304 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1305 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1305 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1305 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'str' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'str' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'str' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_attr_1 +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1305 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col44 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1308 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1309 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1309 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1309 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'str' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'str' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'str' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_attr_1b +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1309 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1312 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1312 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col38 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1313 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1314 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1314 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1314 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'str2' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'str2' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'str2' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_attr1c +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1314 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col50 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1317 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1318 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1318 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1318 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'x' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestattr2_a +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1318 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col63 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1321 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1322 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1322 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1322 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'x' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestattr2_b +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1322 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col63 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1325 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1327 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1327 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1327 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'x' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestattr2_b_11358224_self_assign_looses_the_leak +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1327 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a method that is annotated as NS_RETURNS_NOT_RETAINED +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a method that is annotated as NS_RETURNS_NOT_RETAINED +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextnewString +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1357 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'returnsCFRetainedAsCF' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'returnsCFRetainedAsCF' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'newCFRetainedAsCFNoAttr' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'newCFRetainedAsCFNoAttr' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1381 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'returnsRetainedCFDate' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'returnsRetainedCFDate' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1371 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'returnsCFRetainedAsCF' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'returnsCFRetainedAsCF' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1382 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'returnsRetainedCFDate' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'returnsRetainedCFDate' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'returnsCFRetainedAsCF' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'returnsCFRetainedAsCF' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: message +// CHECK-NEXT: Object sent -autorelease message +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: message +// CHECK-NEXT: Object with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionObject with a +0 retain count returned to caller where a +1 (owning) retain count is expected +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeMethod should return an owned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextnewCFRetainedAsCFNoAttr +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1390 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'returnsRetainedCFDate' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'returnsRetainedCFDate' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1371 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'alsoReturnsRetained' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'alsoReturnsRetained' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'returnsRetainedCFDate' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'returnsRetainedCFDate' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col40 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col42 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('alsoReturnsRetained') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('alsoReturnsRetained') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextalsoReturnsRetained +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1394 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'returnsRetainedCFDate' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'returnsRetainedCFDate' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1371 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'alsoReturnsRetainedAsCF' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'alsoReturnsRetainedAsCF' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1373 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning from 'returnsRetainedCFDate' +// CHECK-NEXT: message +// CHECK-NEXT: Returning from 'returnsRetainedCFDate' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col32 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('alsoReturnsRetainedAsCF') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a method whose name ('alsoReturnsRetainedAsCF') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'. This violates the naming convention rules given in the Memory Management Guide for Cocoa +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindObjective-C method +// CHECK-NEXT: issue_contextalsoReturnsRetainedAsCF +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1398 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1418 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1418 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col82 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1419 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1420 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1420 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1420 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'value' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_panic_negative +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1420 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1429 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1429 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col82 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFNumberCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1430 +// CHECK-NEXT: col36 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'x' is 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1431 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1433 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1433 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1433 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'value' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_panic_neg_2 +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1433 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1453 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1454 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1454 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1454 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1454 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1454 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'number' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_blocks_1_pos +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1454 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col53 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1474 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_blocks_1_indirect_retain_via_call' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_blocks_1_indirect_retain_via_call' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +2 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col39 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: message +// CHECK-NEXT: Returning to caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1475 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1476 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1476 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1476 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +2 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'number' is not referenced later in this execution path and has a retain count of +2 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'number' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_blocks_1_indirect_retain_via_call +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1476 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1526 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1526 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1529 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1529 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1529 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1529 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1530 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1530 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1530 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1530 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col49 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFErrorCopyUserInfo' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFErrorCopyUserInfo' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1532 +// CHECK-NEXT: col34 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col30 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'info' is not equal to null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'info' is not equal to null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col13 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1534 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1537 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1537 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1537 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1537 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1537 +// CHECK-NEXT: col91 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'info' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'info' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'info' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar_8724287 +// CHECK-NEXT: issue_hash12 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1537 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camelcase_createno') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camelcase_createno') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcamelcase_createno +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1582 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camelcase_copying') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camelcase_copying') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcamelcase_copying +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1590 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camel_creat') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camel_creat') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcamel_creat +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1611 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFArrayCreateMutable' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: message +// CHECK-NEXT: Object returned to caller as an owning reference (single retain count transferred to caller) +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col60 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camel_copymachine') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: allocated object is returned from a function whose name ('camel_copymachine') does not contain 'Copy' or 'Create'. This violates the naming convention rules given in the Memory Management Guide for Core Foundation +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak of returned object +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextcamel_copymachine +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1623 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1643 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1643 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col41 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CFDateCreate' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1644 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1645 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1645 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1645 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'vals' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'vals' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'vals' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar6582778 +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1645 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1669 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1669 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col64 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1671 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1672 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar10232019_positive +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1674 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1795 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1798 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1798 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1798 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1798 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1798 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'a' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_arrays +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1798 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col56 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1804 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1807 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1807 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1807 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1807 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1807 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a2' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a2' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'a2' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_arrays +// CHECK-NEXT: issue_hash15 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1807 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1812 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1815 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1815 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1815 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1815 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1815 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a3' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a3' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'a3' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_arrays +// CHECK-NEXT: issue_hash23 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1815 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col57 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1820 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1824 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1824 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1824 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1824 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1824 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'a' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_arrays +// CHECK-NEXT: issue_hash32 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1824 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1793 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1794 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSDictionary literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSDictionary literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col43 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1829 +// CHECK-NEXT: col27 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1833 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1833 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1833 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1833 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1833 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'a' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'a' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_arrays +// CHECK-NEXT: issue_hash41 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1833 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSNumber literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSNumber literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1838 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1840 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1840 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1840 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'value' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_integer_literals +// CHECK-NEXT: issue_hash3 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1840 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSNumber boxed expression produces an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSNumber boxed expression produces an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col18 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1847 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1847 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1847 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1847 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1847 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'value' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_boxed_expressions +// CHECK-NEXT: issue_hash5 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1847 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1843 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSString boxed expression produces an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSString boxed expression produces an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col23 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Reference count incremented. The object now has a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1846 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1848 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1848 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1848 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'value' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'value' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_objc_boxed_expressions +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1848 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1853 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1853 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1854 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1854 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1854 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1854 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col12 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'y' is <= 2 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'y' is <= 2 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1855 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col43 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Method returns an Objective-C object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1858 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1859 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1859 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1859 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1859 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object released +// CHECK-NEXT: message +// CHECK-NEXT: Object released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1860 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1861 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1861 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1861 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1861 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1861 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: message +// CHECK-NEXT: Reference-counted object is used after it is released +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionReference-counted object is used after it is released +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeUse-after-release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextrdar11400885 +// CHECK-NEXT: issue_hash9 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1861 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1880 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1880 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1888 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1889 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1889 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1889 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1889 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1889 +// CHECK-NEXT: col35 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestConsumeAndStopTracking +// CHECK-NEXT: issue_hash10 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1889 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1893 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1893 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: message +// CHECK-NEXT: NSArray literal is an object with a +0 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1901 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1902 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1902 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1902 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1902 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1902 +// CHECK-NEXT: col48 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: message +// CHECK-NEXT: Incorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionIncorrect decrement of the reference count of an object that is not owned at this point by the caller +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeBad release +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttestCFConsumeAndStopTracking +// CHECK-NEXT: issue_hash10 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1902 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col10 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to function 'CreateMyCFType' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: message +// CHECK-NEXT: Call to function 'CreateMyCFType' returns a Core Foundation object with a +1 retain count +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1914 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1915 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line1915 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1915 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: message +// CHECK-NEXT: Object leaked: object allocated and stored into 'x' is not referenced later in this execution path and has a retain count of +1 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionPotential leak of an object stored into 'x' +// CHECK-NEXT: categoryMemory (Core Foundation/Objective-C) +// CHECK-NEXT: typeLeak +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_custom_cf +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line1915 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/retain-release.mm b/test/Analysis/retain-release.mm index 01727ea..d92237b 100644 --- a/test/Analysis/retain-release.mm +++ b/test/Analysis/retain-release.mm @@ -366,3 +366,22 @@ NSString * radar11152419(NSString *string1, NSString *key1, NSMapTable *map) { return string; } +//===----------------------------------------------------------------------===// +// Don't crash on non-member functions with "callbacks" but without names. +//===----------------------------------------------------------------------===// + +struct IntWrapper { + int arg; +}; + +int operator>> (const IntWrapper &W, int (*f)(int)) { + return f(W.arg); +} + +void testCallback() { + IntWrapper val = { 42 }; + + extern int process(int); + val >> process; +} + diff --git a/test/Analysis/security-syntax-checks-no-emit.c b/test/Analysis/security-syntax-checks-no-emit.c index c2869ca..7759aa7 100644 --- a/test/Analysis/security-syntax-checks-no-emit.c +++ b/test/Analysis/security-syntax-checks-no-emit.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i686-pc-linux-gnu -analyze -analyzer-checker=security.insecureAPI,security.FloatLoopCounter %s -verify +// expected-no-diagnostics // This file complements 'security-syntax-checks.m', but tests that we omit // specific checks on platforms where they don't make sense. diff --git a/test/Analysis/simple-stream-checks.c b/test/Analysis/simple-stream-checks.c new file mode 100644 index 0000000..2f09e5d --- /dev/null +++ b/test/Analysis/simple-stream-checks.c @@ -0,0 +1,78 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.unix.SimpleStream -verify %s + +#include "Inputs/system-header-simulator-for-simple-stream.h" + +void checkDoubleFClose(int *Data) { + FILE *F = fopen("myfile.txt", "w"); + if (F != 0) { + fputs ("fopen example", F); + if (!Data) + fclose(F); + else + fputc(*Data, F); + fclose(F); // expected-warning {{Closing a previously closed file stream}} + } +} + +int checkLeak(int *Data) { + FILE *F = fopen("myfile.txt", "w"); + if (F != 0) { + fputs ("fopen example", F); + } + + if (Data) // expected-warning {{Opened file is never closed; potential resource leak}} + return *Data; + else + return 0; +} + +void checkLeakFollowedByAssert(int *Data) { + FILE *F = fopen("myfile.txt", "w"); + if (F != 0) { + fputs ("fopen example", F); + if (!Data) + exit(0); + fclose(F); + } +} + +void CloseOnlyOnValidFileHandle() { + FILE *F = fopen("myfile.txt", "w"); + if (F) + fclose(F); + int x = 0; // no warning +} + +void leakOnEnfOfPath1(int *Data) { + FILE *F = fopen("myfile.txt", "w");// expected-warning {{Opened file is never closed; potential resource leak}} +} + +void leakOnEnfOfPath2(int *Data) { + FILE *F = fopen("myfile.txt", "w"); + return; // expected-warning {{Opened file is never closed; potential resource leak}} +} + +FILE *leakOnEnfOfPath3(int *Data) { + FILE *F = fopen("myfile.txt", "w"); + return F; +} + +void myfclose(FILE *F); +void SymbolEscapedThroughFunctionCall() { + FILE *F = fopen("myfile.txt", "w"); + myfclose(F); + return; // no warning +} + +FILE *GlobalF; +void SymbolEscapedThroughAssignmentToGloabl() { + FILE *F = fopen("myfile.txt", "w"); + GlobalF = F; + return; // no warning +} + +void SymbolDoesNotEscapeThoughStringAPIs(char *Data) { + FILE *F = fopen("myfile.txt", "w"); + fputc(*Data, F); + return; // expected-warning {{Opened file is never closed; potential resource leak}} +} diff --git a/test/Analysis/sizeofpointer.c b/test/Analysis/sizeofpointer.c index aa85fc0..a9e045b 100644 --- a/test/Analysis/sizeofpointer.c +++ b/test/Analysis/sizeofpointer.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.core.SizeofPtr -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.core.SizeofPtr -verify %s struct s { }; diff --git a/test/Analysis/stack-addr-ps.cpp b/test/Analysis/stack-addr-ps.cpp index cbdb143..a27bef7 100644 --- a/test/Analysis/stack-addr-ps.cpp +++ b/test/Analysis/stack-addr-ps.cpp @@ -87,6 +87,6 @@ struct TS { // rdar://11345441 int* f5() { - int& i = i; // expected-warning {{Assigned value is garbage or undefined}} expected-note {{binding reference variable 'i' here}} expected-warning{{variable 'i' is uninitialized when used within its own initialization}} + int& i = i; // expected-warning {{Assigned value is garbage or undefined}} expected-note {{binding reference variable 'i' here}} expected-warning{{reference 'i' is not yet bound to a value when used within its own initialization}} return &i; // expected-warning {{address of stack memory associated with local variable 'i' returned}} } diff --git a/test/Analysis/static_local.m b/test/Analysis/static_local.m new file mode 100644 index 0000000..dcd49e1 --- /dev/null +++ b/test/Analysis/static_local.m @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core -verify -Wno-objc-root-class %s +// expected-no-diagnostics + +// Test reasoning about static locals in ObjCMethods. +int *getValidPtr(); +@interface Radar11275803 +- (int) useStaticInMethod; +@end +@implementation Radar11275803 + +- (int) useStaticInMethod +{ + static int *explInit = 0; + static int implInit; + if (!implInit) + explInit = getValidPtr(); + return *explInit; //no-warning +} +@end \ No newline at end of file diff --git a/test/Analysis/stream.c b/test/Analysis/stream.c index 4a095cf..329a782 100644 --- a/test/Analysis/stream.c +++ b/test/Analysis/stream.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.unix.Stream -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.unix.Stream -analyzer-store region -verify %s typedef __typeof__(sizeof(int)) size_t; typedef struct _IO_FILE FILE; diff --git a/test/Analysis/string.c b/test/Analysis/string.c index 32f5db3..fd836c4 100644 --- a/test/Analysis/string.c +++ b/test/Analysis/string.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s -// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s -// RUN: %clang_cc1 -analyze -DVARIANT -analyzer-checker=core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s -// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -DVARIANT -analyzer-checker=experimental.security.taint,core,unix.cstring,experimental.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s +// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s +// RUN: %clang_cc1 -analyze -DVARIANT -analyzer-checker=core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s +// RUN: %clang_cc1 -analyze -DUSE_BUILTINS -DVARIANT -analyzer-checker=alpha.security.taint,core,unix.cstring,alpha.unix.cstring,debug.ExprInspection -analyzer-store=region -Wno-null-dereference -verify %s //===----------------------------------------------------------------------=== // Declarations diff --git a/test/Analysis/svalbuilder-logic.c b/test/Analysis/svalbuilder-logic.c index bc79f85..41d4fe2 100644 --- a/test/Analysis/svalbuilder-logic.c +++ b/test/Analysis/svalbuilder-logic.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,unix -verify %s +// expected-no-diagnostics // Testing core functionality of the SValBuilder. diff --git a/test/Analysis/system-header-simulator-objc.h b/test/Analysis/system-header-simulator-objc.h deleted file mode 100644 index a647b37..0000000 --- a/test/Analysis/system-header-simulator-objc.h +++ /dev/null @@ -1,130 +0,0 @@ -#pragma clang system_header - -typedef unsigned int UInt32; -typedef unsigned short UInt16; - -typedef signed long CFIndex; -typedef signed char BOOL; -typedef unsigned long NSUInteger; -typedef unsigned short unichar; -typedef UInt16 UniChar; - -enum { - NSASCIIStringEncoding = 1, - NSNEXTSTEPStringEncoding = 2, - NSJapaneseEUCStringEncoding = 3, - NSUTF8StringEncoding = 4, - NSISOLatin1StringEncoding = 5, - NSSymbolStringEncoding = 6, - NSNonLossyASCIIStringEncoding = 7, -}; -typedef const struct __CFString * CFStringRef; -typedef struct __CFString * CFMutableStringRef; -typedef NSUInteger NSStringEncoding; -typedef UInt32 CFStringEncoding; - -typedef const void * CFTypeRef; - -typedef const struct __CFAllocator * CFAllocatorRef; -extern const CFAllocatorRef kCFAllocatorDefault; -extern const CFAllocatorRef kCFAllocatorSystemDefault; -extern const CFAllocatorRef kCFAllocatorMalloc; -extern const CFAllocatorRef kCFAllocatorMallocZone; -extern const CFAllocatorRef kCFAllocatorNull; - -@class NSString, Protocol; -extern void NSLog(NSString *format, ...) __attribute__((format(__NSString__, 1, 2))); -typedef struct _NSZone NSZone; -@class NSInvocation, NSMethodSignature, NSCoder, NSString, NSEnumerator; -@protocol NSObject -- (BOOL)isEqual:(id)object; -- (id)retain; -- (id)copy; -- (oneway void)release; -- (id)autorelease; -- (id)init; -@end @protocol NSCopying - (id)copyWithZone:(NSZone *)zone; -@end @protocol NSMutableCopying - (id)mutableCopyWithZone:(NSZone *)zone; -@end @protocol NSCoding - (void)encodeWithCoder:(NSCoder *)aCoder; -@end -@interface NSObject {} -+ (id)allocWithZone:(NSZone *)zone; -+ (id)alloc; -- (void)dealloc; -@end -@interface NSObject (NSCoderMethods) -- (id)awakeAfterUsingCoder:(NSCoder *)aDecoder; -@end -extern id NSAllocateObject(Class aClass, NSUInteger extraBytes, NSZone *zone); -typedef struct { -} -NSFastEnumerationState; -@protocol NSFastEnumeration - (NSUInteger)countByEnumeratingWithState:(NSFastEnumerationState *)state objects:(id *)stackbuf count:(NSUInteger)len; -@end @class NSString, NSDictionary; -@interface NSValue : NSObject - (void)getValue:(void *)value; -@end @interface NSNumber : NSValue - (char)charValue; -- (id)initWithInt:(int)value; -@end @class NSString; -@interface NSArray : NSObject - (NSUInteger)count; -@end @interface NSArray (NSArrayCreation) + (id)array; -@end @interface NSAutoreleasePool : NSObject { -} -- (void)drain; -@end extern NSString * const NSBundleDidLoadNotification; -typedef double NSTimeInterval; -@interface NSDate : NSObject - (NSTimeInterval)timeIntervalSinceReferenceDate; -@end - -@interface NSString : NSObject -- (NSUInteger)length; -- (NSString *)stringByAppendingString:(NSString *)aString; -- ( const char *)UTF8String; -- (id)initWithUTF8String:(const char *)nullTerminatedCString; -- (id)initWithCharactersNoCopy:(unichar *)characters length:(NSUInteger)length freeWhenDone:(BOOL)freeBuffer; -- (id)initWithCharacters:(const unichar *)characters length:(NSUInteger)length; -- (id)initWithBytes:(const void *)bytes length:(NSUInteger)len encoding:(NSStringEncoding)encoding; -- (id)initWithBytesNoCopy:(void *)bytes length:(NSUInteger)len encoding:(NSStringEncoding)encoding freeWhenDone:(BOOL)freeBuffer; -+ (id)stringWithUTF8String:(const char *)nullTerminatedCString; -+ (id)stringWithString:(NSString *)string; -@end @class NSString, NSURL, NSError; - -@interface NSMutableString : NSString -- (void)appendFormat:(NSString *)format, ... __attribute__((format(__NSString__, 1, 2))); -@end - -@interface NSData : NSObject - (NSUInteger)length; -+ (id)dataWithBytesNoCopy:(void *)bytes length:(NSUInteger)length; -+ (id)dataWithBytesNoCopy:(void *)bytes length:(NSUInteger)length freeWhenDone:(BOOL)b; -- (id)initWithBytesNoCopy:(void *)bytes length:(NSUInteger)length; -- (id)initWithBytesNoCopy:(void *)bytes length:(NSUInteger)length freeWhenDone:(BOOL)b; -@end - -typedef struct { -} -CFDictionaryKeyCallBacks; -extern const CFDictionaryKeyCallBacks kCFTypeDictionaryKeyCallBacks; -typedef struct { -} -CFDictionaryValueCallBacks; -extern const CFDictionaryValueCallBacks kCFTypeDictionaryValueCallBacks; -typedef const struct __CFDictionary * CFDictionaryRef; -typedef struct __CFDictionary * CFMutableDictionaryRef; -extern CFMutableDictionaryRef CFDictionaryCreateMutable(CFAllocatorRef allocator, CFIndex capacity, const CFDictionaryKeyCallBacks *keyCallBacks, const CFDictionaryValueCallBacks *valueCallBacks); -void CFDictionarySetValue(CFMutableDictionaryRef, const void *, const void *); - - -extern void CFRelease(CFTypeRef cf); - -extern CFMutableStringRef CFStringCreateMutableWithExternalCharactersNoCopy(CFAllocatorRef alloc, UniChar *chars, CFIndex numChars, CFIndex capacity, CFAllocatorRef externalCharactersAllocator); -extern CFStringRef CFStringCreateWithCStringNoCopy(CFAllocatorRef alloc, const char *cStr, CFStringEncoding encoding, CFAllocatorRef contentsDeallocator); -extern void CFStringAppend(CFMutableStringRef theString, CFStringRef appendedString); - -void SystemHeaderFunctionWithBlockParam(void *, void (^block)(void *), unsigned); - -@interface NSPointerArray : NSObject -- (void)addPointer:(void *)pointer; -- (void)insertPointer:(void *)item atIndex:(NSUInteger)index; -- (void)replacePointerAtIndex:(NSUInteger)index withPointer:(void *)item; -- (void *)pointerAtIndex:(NSUInteger)index; -@end - diff --git a/test/Analysis/system-header-simulator.h b/test/Analysis/system-header-simulator.h deleted file mode 100644 index 5790fb9..0000000 --- a/test/Analysis/system-header-simulator.h +++ /dev/null @@ -1,62 +0,0 @@ -#pragma clang system_header - -typedef struct _FILE FILE; -extern FILE *stdin; -extern FILE *stdout; -extern FILE *stderr; -// Include a variant of standard streams that occur in the pre-processed file. -extern FILE *__stdinp; -extern FILE *__stdoutp; -extern FILE *__stderrp; - - -int fscanf(FILE *restrict, const char *restrict, ...); - -// Note, on some platforms errno macro gets replaced with a function call. -extern int errno; - -unsigned long strlen(const char *); - -char *strcpy(char *restrict, const char *restrict); - -typedef unsigned long __darwin_pthread_key_t; -typedef __darwin_pthread_key_t pthread_key_t; -int pthread_setspecific(pthread_key_t, const void *); - -typedef long long __int64_t; -typedef __int64_t __darwin_off_t; -typedef __darwin_off_t fpos_t; - -void setbuf(FILE * restrict, char * restrict); -int setvbuf(FILE * restrict, char * restrict, int, size_t); - -FILE *funopen(const void *, - int (*)(void *, char *, int), - int (*)(void *, const char *, int), - fpos_t (*)(void *, fpos_t, int), - int (*)(void *)); - -int sqlite3_bind_text_my(int, const char*, int n, void(*)(void*)); - -typedef void (*freeCallback) (void*); -typedef struct { - int i; - freeCallback fc; -} StWithCallback; - -int dealocateMemWhenDoneByVal(void*, StWithCallback); -int dealocateMemWhenDoneByRef(StWithCallback*, const void*); - -typedef struct CGContext *CGContextRef; -CGContextRef CGBitmapContextCreate(void *data/*, size_t width, size_t height, - size_t bitsPerComponent, size_t bytesPerRow, - CGColorSpaceRef space, - CGBitmapInfo bitmapInfo*/); -void *CGBitmapContextGetData(CGContextRef context); - -// Include xpc. -typedef struct _xpc_connection_s * xpc_connection_t; -typedef void (*xpc_finalizer_t)(void *value); -void xpc_connection_set_context(xpc_connection_t connection, void *context); -void xpc_connection_set_finalizer_f(xpc_connection_t connection, xpc_finalizer_t finalizer); -void xpc_connection_resume(xpc_connection_t connection); diff --git a/test/Analysis/taint-generic.c b/test/Analysis/taint-generic.c index 8ee1896..696db67 100644 --- a/test/Analysis/taint-generic.c +++ b/test/Analysis/taint-generic.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.security.taint,core,experimental.security.ArrayBoundV2 -Wno-format-security -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.security.taint,core,alpha.security.ArrayBoundV2 -Wno-format-security -verify %s int scanf(const char *restrict format, ...); int getchar(void); diff --git a/test/Analysis/taint-tester.c b/test/Analysis/taint-tester.c index a83ee32..7b0ab2a 100644 --- a/test/Analysis/taint-tester.c +++ b/test/Analysis/taint-tester.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.security.taint,debug.TaintTest %s -verify +// RUN: %clang_cc1 -Wno-int-to-pointer-cast -analyze -analyzer-checker=alpha.security.taint,debug.TaintTest %s -verify #include diff --git a/test/Analysis/taint-tester.cpp b/test/Analysis/taint-tester.cpp index 679fbc2..f97eefb 100644 --- a/test/Analysis/taint-tester.cpp +++ b/test/Analysis/taint-tester.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.security.taint,debug.TaintTest %s -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.security.taint,debug.TaintTest %s -verify +// expected-no-diagnostics typedef struct _FILE FILE; typedef __typeof(sizeof(int)) size_t; diff --git a/test/Analysis/taint-tester.m b/test/Analysis/taint-tester.m index ae55c66..b5663ca 100644 --- a/test/Analysis/taint-tester.m +++ b/test/Analysis/taint-tester.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.security.taint,debug.TaintTest %s -verify +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.security.taint,debug.TaintTest %s -verify +// expected-no-diagnostics #import diff --git a/test/Analysis/temp-obj-dtors-cfg-output.cpp b/test/Analysis/temp-obj-dtors-cfg-output.cpp index 6dbbc82..c884475 100644 --- a/test/Analysis/temp-obj-dtors-cfg-output.cpp +++ b/test/Analysis/temp-obj-dtors-cfg-output.cpp @@ -1,5 +1,5 @@ // RUN: rm -f %t -// RUN: %clang_cc1 -analyze -analyzer-checker=debug.DumpCFG -cfg-add-implicit-dtors %s > %t 2>&1 +// RUN: %clang_cc1 -analyze -analyzer-checker=debug.DumpCFG -analyzer-config cfg-temporary-dtors=true %s > %t 2>&1 // RUN: FileCheck --input-file=%t %s // XPASS: * diff --git a/test/Analysis/templates.cpp b/test/Analysis/templates.cpp index 671aa78..faa5c1a 100644 --- a/test/Analysis/templates.cpp +++ b/test/Analysis/templates.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -fblocks -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -fblocks -analyzer-config c++-template-inlining=false -DNO_INLINE -verify %s void clang_analyzer_eval(bool); @@ -39,6 +40,11 @@ inline unsigned array_lengthof(T (&)[N]) { void testNonTypeTemplateInstantiation() { const char *S[] = { "a", "b" }; - clang_analyzer_eval(array_lengthof(S) == 2); // expected-warning{{TRUE}} + clang_analyzer_eval(array_lengthof(S) == 2); +#ifndef NO_INLINE + // expected-warning@-2 {{TRUE}} +#else + // expected-warning@-4 {{UNKNOWN}} +#endif } diff --git a/test/Analysis/temporaries.cpp b/test/Analysis/temporaries.cpp new file mode 100644 index 0000000..df1ab5a --- /dev/null +++ b/test/Analysis/temporaries.cpp @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.ExprInspection -analyzer-ipa=inlining -verify -w %s + +struct Trivial { + Trivial(int x) : value(x) {} + int value; +}; + +struct NonTrivial : public Trivial { + NonTrivial(int x) : Trivial(x) {} + ~NonTrivial(); +}; + + +Trivial getTrivial() { + return Trivial(42); // no-warning +} + +const Trivial &getTrivialRef() { + return Trivial(42); // expected-warning {{Address of stack memory associated with temporary object of type 'struct Trivial' returned to caller}} +} + + +NonTrivial getNonTrivial() { + return NonTrivial(42); // no-warning +} + +const NonTrivial &getNonTrivialRef() { + return NonTrivial(42); // expected-warning {{Address of stack memory associated with temporary object of type 'struct NonTrivial' returned to caller}} +} + diff --git a/test/Analysis/test-objc-non-nil-return-value-checker.m b/test/Analysis/test-objc-non-nil-return-value-checker.m new file mode 100644 index 0000000..8b1e6a5 --- /dev/null +++ b/test/Analysis/test-objc-non-nil-return-value-checker.m @@ -0,0 +1,50 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=osx.cocoa.NonNilReturnValue,debug.ExprInspection -verify %s + +typedef unsigned int NSUInteger; +typedef signed char BOOL; + +@protocol NSObject - (BOOL)isEqual:(id)object; @end + +@interface NSObject {} ++(id)alloc; ++(id)new; +-(id)init; +-(id)autorelease; +-(id)copy; +- (Class)class; +-(id)retain; +@end + +@interface NSArray : NSObject +- (id)objectAtIndex:(unsigned long)index; +@end + +@interface NSArray (NSExtendedArray) +- (id)objectAtIndexedSubscript:(NSUInteger)idx; +@end + +@interface NSMutableArray : NSArray +- (void)replaceObjectAtIndex:(NSUInteger)index withObject:(id)anObject; +@end + +@interface NSOrderedSet : NSObject +@end +@interface NSOrderedSet (NSOrderedSetCreation) +- (id)objectAtIndexedSubscript:(NSUInteger)idx; +@end + +void clang_analyzer_eval(id); + +void assumeThatNSArrayObjectAtIndexIsNeverNull(NSArray *A, NSUInteger i) { + clang_analyzer_eval([A objectAtIndex: i]); // expected-warning {{TRUE}} + id subscriptObj = A[1]; + clang_analyzer_eval(subscriptObj); // expected-warning {{TRUE}} +} + +void assumeThatNSMutableArrayObjectAtIndexIsNeverNull(NSMutableArray *A, NSUInteger i) { + clang_analyzer_eval([A objectAtIndex: i]); // expected-warning {{TRUE}} +} + +void assumeThatNSArrayObjectAtIndexedSubscriptIsNeverNull(NSOrderedSet *A, NSUInteger i) { + clang_analyzer_eval(A[i]); // expected-warning {{TRUE}} +} \ No newline at end of file diff --git a/test/Analysis/traversal-path-unification.c b/test/Analysis/traversal-path-unification.c new file mode 100644 index 0000000..f53d2ff --- /dev/null +++ b/test/Analysis/traversal-path-unification.c @@ -0,0 +1,28 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.DumpTraversal %s | FileCheck %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,debug.DumpTraversal -DUSE_EXPR %s | FileCheck %s + +int a(); +int b(); +int c(); + +#ifdef USE_EXPR +#define CHECK(x) ((x) & 1) +#else +#define CHECK(x) (x) +#endif + +void testRemoveDeadBindings() { + int i = a(); + if (CHECK(i)) + a(); + else + b(); + + // At this point the symbol bound to 'i' is dead. + // The effects of a() and b() are identical (they both invalidate globals). + // We should unify the two paths here and only get one end-of-path node. + c(); +} + +// CHECK: --END PATH-- +// CHECK-NOT: --END PATH-- \ No newline at end of file diff --git a/test/Analysis/undef-buffers.c b/test/Analysis/undef-buffers.c index cfdd7f4..f18d6e5 100644 --- a/test/Analysis/undef-buffers.c +++ b/test/Analysis/undef-buffers.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,experimental.unix,core.uninitialized -analyzer-store=region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.unix,core.uninitialized -analyzer-store=region -verify %s typedef __typeof(sizeof(int)) size_t; void *malloc(size_t); void free(void *); diff --git a/test/Analysis/uninit-vals-ps-region.m b/test/Analysis/uninit-vals-ps-region.m index d613c71..614ce2f 100644 --- a/test/Analysis/uninit-vals-ps-region.m +++ b/test/Analysis/uninit-vals-ps-region.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-checker=core,experimental.deadcode.IdempotentOperations -verify %s +// RUN: %clang_cc1 -analyze -analyzer-store=region -analyzer-checker=core,alpha.deadcode.IdempotentOperations -verify %s struct s { int data; diff --git a/test/Analysis/uninit-vals-ps.c b/test/Analysis/uninit-vals-ps.c index f301157..09736ef 100644 --- a/test/Analysis/uninit-vals-ps.c +++ b/test/Analysis/uninit-vals-ps.c @@ -122,3 +122,15 @@ int pr4631_f1_b(void) return x; // no-warning } +void foo_radar12278788() { return; } +void test_radar12278788() { + return foo_radar12278788(); // no-warning +} + +void foo_radar12278788_fp() { return; } +typedef int (*RetIntFuncType)(); +typedef void (*RetVoidFuncType)(); +int test_radar12278788_FP() { + RetVoidFuncType f = foo_radar12278788_fp; + return ((RetIntFuncType)f)(); //expected-warning {{Undefined or garbage value returned to caller}} +} diff --git a/test/Analysis/uninit-vals.m b/test/Analysis/uninit-vals.m index 4ba26f5..1cd5759 100644 --- a/test/Analysis/uninit-vals.m +++ b/test/Analysis/uninit-vals.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -verify %s +// expected-no-diagnostics typedef unsigned int NSUInteger; diff --git a/test/Analysis/unions-region.m b/test/Analysis/unions-region.m index 1a71684..3a9fb93 100644 --- a/test/Analysis/unions-region.m +++ b/test/Analysis/unions-region.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -analyze -analyzer-checker=core -analyzer-store=region -analyzer-constraints=range %s -verify +// expected-no-diagnostics //===-- unions-region.m ---------------------------------------------------===// // diff --git a/test/Analysis/unions.cpp b/test/Analysis/unions.cpp new file mode 100644 index 0000000..2bffe78 --- /dev/null +++ b/test/Analysis/unions.cpp @@ -0,0 +1,51 @@ +// RUN: %clang_cc1 -analyze -analyzer-checker=core %s -verify +// expected-no-diagnostics + +namespace PR14054_reduced { + struct Definition; + struct ParseNode { + union { + Definition *lexdef; + ParseNode *data; + } pn_u; + }; + struct Definition : public ParseNode { }; + + void CloneParseTree(ParseNode *opn, ParseNode *pn, ParseNode *x) { + // This used to cause an assertion failure because: + // 1. The implicit operator= for unions assigns all members of the union, + // not just the active one (b/c there's no way to know which is active). + // 2. RegionStore dutifully stored all the variants at the same offset; + // the last one won. + // 3. We asked for the value of the first variant but got back a conjured + // symbol for the second variant. + // 4. We ended up trying to add a base cast to a region of the wrong type. + // + // Now (at the time this test was added), we instead treat all variants of + // a union as different offsets, but only allow one to be active at a time. + *pn = *opn; + x = pn->pn_u.lexdef->pn_u.lexdef; + } +} + +namespace PR14054_original { + struct Definition; + struct ParseNode { + union { + struct { + union {}; + Definition *lexdef; + } name; + class { + int *target; + ParseNode *data; + } xmlpi; + } pn_u; + }; + struct Definition : public ParseNode { }; + + void CloneParseTree(ParseNode *opn, ParseNode *pn, ParseNode *x) { + pn->pn_u = opn->pn_u; + x = pn->pn_u.name.lexdef->pn_u.name.lexdef; + } +} diff --git a/test/Analysis/unix-fns.c b/test/Analysis/unix-fns.c index ec62098..edab5e1 100644 --- a/test/Analysis/unix-fns.c +++ b/test/Analysis/unix-fns.c @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=unix.API,osx.API %s -analyzer-store=region -fblocks -verify +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -analyze -analyzer-checker=core,unix.API,osx.API %s -analyzer-store=region -analyzer-output=plist -analyzer-ipa=inlining -analyzer-eagerly-assume -analyzer-config faux-bodies=true -fblocks -verify -o %t.plist +// RUN: FileCheck --input-file=%t.plist %s struct _opaque_pthread_once_t { long __sig; @@ -12,12 +13,31 @@ typedef __darwin_size_t size_t; void *calloc(size_t, size_t); void *malloc(size_t); void *realloc(void *, size_t); +void *reallocf(void *, size_t); void *alloca(size_t); void *valloc(size_t); +typedef union { + struct _os_object_s *_os_obj; + struct dispatch_object_s *_do; + struct dispatch_continuation_s *_dc; + struct dispatch_queue_s *_dq; + struct dispatch_queue_attr_s *_dqa; + struct dispatch_group_s *_dg; + struct dispatch_source_s *_ds; + struct dispatch_source_attr_s *_dsa; + struct dispatch_semaphore_s *_dsema; + struct dispatch_data_s *_ddata; + struct dispatch_io_s *_dchannel; + struct dispatch_operation_s *_doperation; + struct dispatch_disk_s *_ddisk; +} dispatch_object_t __attribute__((__transparent_union__)); + typedef void (^dispatch_block_t)(void); typedef long dispatch_once_t; +typedef struct dispatch_queue_s *dispatch_queue_t; void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block); +void dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); #ifndef O_CREAT #define O_CREAT 0x0200 @@ -94,12 +114,24 @@ void test_realloc(char *ptr) { foo[i] = 0; } } +void test_reallocf(char *ptr) { + char *foo = reallocf(ptr, 0); // expected-warning{{Call to 'reallocf' has an allocation size of 0 bytes}} + for (unsigned i = 0; i < 100; i++) { + foo[i] = 0; + } +} void test_realloc_nowarn(char *ptr, size_t size) { char *foo = realloc(ptr, size); // no-warning for (unsigned i = 0; i < 100; i++) { foo[i] = 0; } } +void test_reallocf_nowarn(char *ptr, size_t size) { + char *foo = reallocf(ptr, size); // no-warning + for (unsigned i = 0; i < 100; i++) { + foo[i] = 0; + } +} void test_alloca() { char *foo = alloca(0); // expected-warning{{Call to 'alloca' has an allocation size of 0 bytes}} for(unsigned i = 0; i < 100; i++) { @@ -136,3 +168,1826 @@ void test_valloc_nowarn(size_t sz) { foo[i] = 0; } } + +// Test dispatch_once being a macro that wraps a call to _dispatch_once, which in turn +// calls the real dispatch_once. + +static inline void _dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) +{ + dispatch_once(predicate, block); +} + +#define dispatch_once _dispatch_once + +void test_dispatch_once_in_macro() { + dispatch_once_t pred = 0; + dispatch_once(&pred, ^(){}); // expected-warning {{Call to 'dispatch_once' uses the local variable 'pred' for the predicate value}} +} + +// Test inlining of dispatch_sync. +void test_dispatch_sync(dispatch_queue_t queue, int *q) { + int *p = 0; + dispatch_sync(queue, ^(void){ + if (q) { + *p = 1; // expected-warning {{null pointer}} + } + }); +} + +// Test inlining if dispatch_once. +void test_inline_dispatch_once() { + static dispatch_once_t pred; + int *p = 0; + dispatch_once(&pred, ^(void) { + *p = 1; // expected-warning {{null}} + }); +} + +// CHECK: diagnostics +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line50 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'fd' is not equal to 0 +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'fd' is not equal to 0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line52 +// CHECK-NEXT: col7 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col11 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col19 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col25 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'open' requires a third argument when the 'O_CREAT' flag is set +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'open' requires a third argument when the 'O_CREAT' flag is set +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'open' requires a third argument when the 'O_CREAT' flag is set +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeImproper use of 'open' +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_open +// CHECK-NEXT: issue_hash6 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line55 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line61 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col9 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col64 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col66 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col72 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'dispatch_once' uses the local variable 'pred' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'dispatch_once' uses the local variable 'pred' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'dispatch_once' uses the local variable 'pred' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: categoryMac OS X API +// CHECK-NEXT: typeImproper use of 'dispatch_once' +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_dispatch_once +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line62 +// CHECK-NEXT: col52 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line72 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line73 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line73 +// CHECK-NEXT: col14 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line73 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line73 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line73 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'pthread_once' uses the local variable 'pred' for the "control" value. Using such transient memory for the control value is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'pthread_once' uses the local variable 'pred' for the "control" value. Using such transient memory for the control value is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'pthread_once' uses the local variable 'pred' for the "control" value. Using such transient memory for the control value is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeImproper use of 'pthread_once' +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_pthread_once +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line73 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'malloc' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'malloc' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'malloc' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contextpr2899 +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line82 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'calloc' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'calloc' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'calloc' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_calloc +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line94 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col26 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'calloc' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'calloc' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'calloc' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_calloc2 +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line100 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col28 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'realloc' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'realloc' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'realloc' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_realloc +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line112 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col29 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'reallocf' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'reallocf' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'reallocf' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_reallocf +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line118 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'alloca' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'alloca' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'alloca' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_alloca +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line136 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col31 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'alloca' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'alloca' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'alloca' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_builtin_alloca +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line148 +// CHECK-NEXT: col16 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col20 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col22 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'valloc' has an allocation size of 0 bytes +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'valloc' has an allocation size of 0 bytes +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'valloc' has an allocation size of 0 bytes +// CHECK-NEXT: categoryUnix API +// CHECK-NEXT: typeUndefined allocation of 0 bytes (CERT MEM04-C; CWE-131) +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_valloc +// CHECK-NEXT: issue_hash1 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line160 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line183 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line183 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col17 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col21 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Call to 'dispatch_once' uses the local variable 'pred' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: message +// CHECK-NEXT: Call to 'dispatch_once' uses the local variable 'pred' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionCall to 'dispatch_once' uses the local variable 'pred' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'? +// CHECK-NEXT: categoryMac OS X API +// CHECK-NEXT: typeImproper use of 'dispatch_once' +// CHECK-NEXT: issue_context_kindfunction +// CHECK-NEXT: issue_contexttest_dispatch_once_in_macro +// CHECK-NEXT: issue_hash2 +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line184 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line189 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line194 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'dispatch_sync' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'dispatch_sync' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line40 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_dispatch_sync' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_dispatch_sync' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line194 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'dispatch_sync' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'dispatch_sync' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line190 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Assuming 'q' is non-null +// CHECK-NEXT: message +// CHECK-NEXT: Assuming 'q' is non-null +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line191 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line192 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line192 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line192 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line192 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line192 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line192 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: path +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line199 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line199 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line200 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line200 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line200 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line200 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line200 +// CHECK-NEXT: col8 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: message +// CHECK-NEXT: Variable 'p' initialized to a null pointer value +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line201 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line201 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line203 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth0 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling '_dispatch_once' +// CHECK-NEXT: message +// CHECK-NEXT: Calling '_dispatch_once' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'test_inline_dispatch_once' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'test_inline_dispatch_once' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line175 +// CHECK-NEXT: col6 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col15 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth1 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling 'dispatch_once' +// CHECK-NEXT: message +// CHECK-NEXT: Calling 'dispatch_once' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line39 +// CHECK-NEXT: col1 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from '_dispatch_once' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from '_dispatch_once' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col3 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line177 +// CHECK-NEXT: col33 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth2 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: message +// CHECK-NEXT: Calling anonymous block +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line201 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: depth3 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Entered call from 'dispatch_once' +// CHECK-NEXT: message +// CHECK-NEXT: Entered call from 'dispatch_once' +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindcontrol +// CHECK-NEXT: edges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: start +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line201 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line201 +// CHECK-NEXT: col24 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: end +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line202 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line202 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: kindevent +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line202 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: ranges +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line202 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: line202 +// CHECK-NEXT: col5 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: depth3 +// CHECK-NEXT: extended_message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: message +// CHECK-NEXT: Dereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: descriptionDereference of null pointer (loaded from variable 'p') +// CHECK-NEXT: categoryLogic error +// CHECK-NEXT: typeDereference of null pointer +// CHECK-NEXT: location +// CHECK-NEXT: +// CHECK-NEXT: line202 +// CHECK-NEXT: col4 +// CHECK-NEXT: file0 +// CHECK-NEXT: +// CHECK-NEXT: +// CHECK-NEXT: diff --git a/test/Analysis/unreachable-code-path.c b/test/Analysis/unreachable-code-path.c index 48a3462..61a4fd4 100644 --- a/test/Analysis/unreachable-code-path.c +++ b/test/Analysis/unreachable-code-path.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=core,deadcode.DeadStores,experimental.deadcode.UnreachableCode -verify -analyzer-opt-analyze-nested-blocks -Wno-unused-value %s +// RUN: %clang_cc1 -analyze -analyzer-checker=core,deadcode.DeadStores,alpha.deadcode.UnreachableCode -verify -analyzer-opt-analyze-nested-blocks -Wno-unused-value %s extern void foo(int a); diff --git a/test/Analysis/viewcontroller.m b/test/Analysis/viewcontroller.m new file mode 100644 index 0000000..a8c4580 --- /dev/null +++ b/test/Analysis/viewcontroller.m @@ -0,0 +1,135 @@ +// RUN: %clang_cc1 -fblocks -analyze -analyzer-checker=alpha.osx.cocoa.MissingSuperCall -verify -Wno-objc-root-class %s + +@protocol NSObject +- (id)retain; +- (oneway void)release; +@end +@interface NSObject {} +- (id)init; ++ (id)alloc; +@end + +typedef char BOOL; +typedef double NSTimeInterval; +typedef enum UIViewAnimationOptions { + UIViewAnimationOptionLayoutSubviews = 1 << 0 +} UIViewAnimationOptions; + +@interface UIViewController : NSObject {} +- (void)addChildViewController:(UIViewController *)childController; +- (void)viewDidAppear:(BOOL)animated; +- (void)viewDidDisappear:(BOOL)animated; +- (void)viewDidUnload; +- (void)viewDidLoad; +- (void)viewWillUnload; +- (void)viewWillAppear:(BOOL)animated; +- (void)viewWillDisappear:(BOOL)animated; +- (void)didReceiveMemoryWarning; +- (void)removeFromParentViewController; +- (void)transitionFromViewController:(UIViewController *)fromViewController + toViewController:(UIViewController *)toViewController + duration:(NSTimeInterval)duration options:(UIViewAnimationOptions)options + animations:(void (^)(void))animations + completion:(void (^)(BOOL finished))completion; +@end + +// Do not warn if UIViewController isn't our superclass +@interface TestA +@end +@implementation TestA + +- (void)addChildViewController:(UIViewController *)childController {} +- (void)viewDidAppear:(BOOL)animated {} +- (void)viewDidDisappear:(BOOL)animated {} +- (void)viewDidUnload {} +- (void)viewDidLoad {} +- (void)viewWillUnload {} +- (void)viewWillAppear:(BOOL)animated {} +- (void)viewWillDisappear:(BOOL)animated {} +- (void)didReceiveMemoryWarning {} +- (void)removeFromParentViewController {} + +@end + +// Warn if UIViewController is our superclass and we do not call super +@interface TestB : UIViewController {} +@end +@implementation TestB + +- (void)addChildViewController:(UIViewController *)childController { + int addChildViewController = 5; + for (int i = 0; i < addChildViewController; i++) + [self viewDidAppear:i]; +} // expected-warning {{The 'addChildViewController:' instance method in UIViewController subclass 'TestB' is missing a [super addChildViewController:] call}} +- (void)viewDidAppear:(BOOL)animated {} // expected-warning {{The 'viewDidAppear:' instance method in UIViewController subclass 'TestB' is missing a [super viewDidAppear:] call}} +- (void)viewDidDisappear:(BOOL)animated {} // expected-warning {{The 'viewDidDisappear:' instance method in UIViewController subclass 'TestB' is missing a [super viewDidDisappear:] call}} +- (void)viewDidUnload {} // expected-warning {{The 'viewDidUnload' instance method in UIViewController subclass 'TestB' is missing a [super viewDidUnload] call}} +- (void)viewDidLoad {} // expected-warning {{The 'viewDidLoad' instance method in UIViewController subclass 'TestB' is missing a [super viewDidLoad] call}} +- (void)viewWillUnload {} // expected-warning {{The 'viewWillUnload' instance method in UIViewController subclass 'TestB' is missing a [super viewWillUnload] call}} +- (void)viewWillAppear:(BOOL)animated {} // expected-warning {{The 'viewWillAppear:' instance method in UIViewController subclass 'TestB' is missing a [super viewWillAppear:] call}} +- (void)viewWillDisappear:(BOOL)animated {} // expected-warning {{The 'viewWillDisappear:' instance method in UIViewController subclass 'TestB' is missing a [super viewWillDisappear:] call}} +- (void)didReceiveMemoryWarning {} // expected-warning {{The 'didReceiveMemoryWarning' instance method in UIViewController subclass 'TestB' is missing a [super didReceiveMemoryWarning] call}} +- (void)removeFromParentViewController {} // expected-warning {{The 'removeFromParentViewController' instance method in UIViewController subclass 'TestB' is missing a [super removeFromParentViewController] call}} + +// Do not warn for methods were it shouldn't +- (void)shouldAutorotate {}; +@end + +// Do not warn if UIViewController is our superclass but we did call super +@interface TestC : UIViewController {} +@end +@implementation TestC + +- (BOOL)methodReturningStuff { + return 1; +} + +- (void)methodDoingStuff { + [super removeFromParentViewController]; +} + +- (void)addChildViewController:(UIViewController *)childController { + [super addChildViewController:childController]; +} + +- (void)viewDidAppear:(BOOL)animated { + [super viewDidAppear:animated]; +} + +- (void)viewDidDisappear:(BOOL)animated { + [super viewDidDisappear:animated]; +} + +- (void)viewDidUnload { + [super viewDidUnload]; +} + +- (void)viewDidLoad { + [super viewDidLoad]; +} + +- (void)viewWillUnload { + [super viewWillUnload]; +} + +- (void)viewWillAppear:(BOOL)animated { + int i = 0; // Also don't start warning just because we do additional stuff + i++; + [self viewDidDisappear:i]; + [super viewWillAppear:animated]; +} + +- (void)viewWillDisappear:(BOOL)animated { + [super viewWillDisappear:[self methodReturningStuff]]; +} + +- (void)didReceiveMemoryWarning { + [super didReceiveMemoryWarning]; +} + +// We expect a warning here because at the moment the super-call can't be +// done from another method. +- (void)removeFromParentViewController { + [self methodDoingStuff]; +} // expected-warning {{The 'removeFromParentViewController' instance method in UIViewController subclass 'TestC' is missing a [super removeFromParentViewController] call}} +@end diff --git a/test/Analysis/virtualcall.cpp b/test/Analysis/virtualcall.cpp index 127d04f..c3319b0 100644 --- a/test/Analysis/virtualcall.cpp +++ b/test/Analysis/virtualcall.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -analyze -analyzer-checker=experimental.cplusplus.VirtualCall -analyzer-store region -verify %s +// RUN: %clang_cc1 -analyze -analyzer-checker=alpha.cplusplus.VirtualCall -analyzer-store region -verify %s class A { public: @@ -51,3 +51,9 @@ int main() { B *b; C *c; } + +#include "virtualcall.h" + +#define AS_SYSTEM +#include "virtualcall.h" +#undef AS_SYSTEM diff --git a/test/Analysis/virtualcall.h b/test/Analysis/virtualcall.h new file mode 100644 index 0000000..9f7094d --- /dev/null +++ b/test/Analysis/virtualcall.h @@ -0,0 +1,28 @@ +#ifdef AS_SYSTEM +#pragma clang system_header + +namespace system { + class A { + public: + A() { + foo(); // no-warning + } + + virtual int foo(); + }; +} + +#else + +namespace header { + class A { + public: + A() { + foo(); // expected-warning{{Call virtual functions during construction or destruction will never go to a more derived class}} + } + + virtual int foo(); + }; +} + +#endif diff --git a/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2-template-id.cpp b/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2-template-id.cpp index f650ad5..eda869b 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2-template-id.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2-template-id.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace N1 { struct X { }; diff --git a/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2.cpp b/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2.cpp index f5ad68b..df9a2cd 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.argdep/p2.cpp @@ -108,3 +108,27 @@ namespace test6 { test6_function(args); } } + +// PR13682: we might need to instantiate class temploids. +namespace test7 { + namespace inner { + class A {}; + void test7_function(A &); + } + template class B : public inner::A {}; + + void test(B &ref) { + test7_function(ref); + } +} + +// Like test7, but ensure we don't complain if the type is properly +// incomplete. +namespace test8 { + template class B; + void test8_function(B &); + + void test(B &ref) { + test8_function(ref); + } +} diff --git a/test/CXX/basic/basic.lookup/basic.lookup.classref/p3.cpp b/test/CXX/basic/basic.lookup/basic.lookup.classref/p3.cpp index cd7e669..ef4243e 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.classref/p3.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.classref/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // C++0x [basic.lookup.classref]p3: // If the unqualified-id is ~type-name, the type-name is looked up in the diff --git a/test/CXX/basic/basic.lookup/basic.lookup.classref/p4-cxx11.cpp b/test/CXX/basic/basic.lookup/basic.lookup.classref/p4-cxx11.cpp index 7925454..a4721d6 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.classref/p4-cxx11.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.classref/p4-cxx11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -verify +// expected-no-diagnostics struct A { void f(); }; struct C { void f(); }; diff --git a/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p3.cpp b/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p3.cpp index dc0f8b4..1060f61 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p3.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // This is basically paraphrased from the standard. diff --git a/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p4.cpp b/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p4.cpp index 38eccfa..7c292d5 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p4.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.qual/namespace.qual/p4.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace A { int a; diff --git a/test/CXX/basic/basic.lookup/basic.lookup.udir/p1.cpp b/test/CXX/basic/basic.lookup/basic.lookup.udir/p1.cpp index ab0dc24..91f5a54 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.udir/p1.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.udir/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // When looking up a namespace-name in a using-directive or // namespace-alias-definition, only namespace names are considered. diff --git a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p12.cpp b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p12.cpp index 878ff07..6bf74c1 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p12.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p12.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct S {}; S E0; diff --git a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p13.cpp b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p13.cpp index 58d7ff4..ba34571 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p13.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p13.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct S { static const int f0 = 0; diff --git a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p14.cpp b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p14.cpp index 0fa4f65..4ffe538 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p14.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p14.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // C++0x [basic.lookup.unqual]p14: // If a variable member of a namespace is defined outside of the diff --git a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p3.cpp b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p3.cpp index 20a7ae0..abcc6ee 100644 --- a/test/CXX/basic/basic.lookup/basic.lookup.unqual/p3.cpp +++ b/test/CXX/basic/basic.lookup/basic.lookup.unqual/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef int f; diff --git a/test/CXX/basic/basic.scope/basic.scope.local/p2.cpp b/test/CXX/basic/basic.scope/basic.scope.local/p2.cpp new file mode 100644 index 0000000..91e96b6 --- /dev/null +++ b/test/CXX/basic/basic.scope/basic.scope.local/p2.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -std=c++11 -fsyntax-only -fcxx-exceptions -fexceptions -verify %s + +void func1(int i) { // expected-note{{previous definition is here}} + int i; // expected-error{{redefinition of 'i'}} +} + +void func2(int i) try { // expected-note{{previous definition is here}} + int i; // expected-error{{redefinition of 'i'}} +} catch (...) { +} + +void func3(int i) try { // FIXME: note {{previous definition is here}} +} catch (int i) { // FIXME: error {{redefinition of 'i'}} +} + +void func4(int i) try { // expected-note{{previous definition is here}} +} catch (...) { + int i; // expected-error{{redefinition of 'i'}} +} + +void func5() try { + int i; +} catch (...) { + int j = i; // expected-error{{use of undeclared identifier 'i'}} +} + +void func6() try { +} catch (int i) { // expected-note{{previous definition is here}} + int i; // expected-error{{redefinition of 'i'}} +} + +void func7() { + try { + } catch (int i) { // expected-note{{previous definition is here}} + int i; // expected-error{{redefinition of 'i'}} + } +} diff --git a/test/CXX/basic/basic.scope/basic.scope.pdecl/p9.cpp b/test/CXX/basic/basic.scope/basic.scope.pdecl/p9.cpp index e64b675..c627535 100644 --- a/test/CXX/basic/basic.scope/basic.scope.pdecl/p9.cpp +++ b/test/CXX/basic/basic.scope/basic.scope.pdecl/p9.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // Template type parameters. typedef unsigned char T; diff --git a/test/CXX/basic/basic.start/basic.start.main/p2a.cpp b/test/CXX/basic/basic.start/basic.start.main/p2a.cpp index b8dfbe7..b27d492 100644 --- a/test/CXX/basic/basic.start/basic.start.main/p2a.cpp +++ b/test/CXX/basic/basic.start/basic.start.main/p2a.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef int Int; typedef char Char; diff --git a/test/CXX/basic/basic.start/basic.start.main/p2b.cpp b/test/CXX/basic/basic.start/basic.start.main/p2b.cpp index 785382c..65cd202 100644 --- a/test/CXX/basic/basic.start/basic.start.main/p2b.cpp +++ b/test/CXX/basic/basic.start/basic.start.main/p2b.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef int Int; typedef char Char; diff --git a/test/CXX/basic/basic.start/basic.start.main/p2c.cpp b/test/CXX/basic/basic.start/basic.start.main/p2c.cpp index 81b08b9..2b082ec 100644 --- a/test/CXX/basic/basic.start/basic.start.main/p2c.cpp +++ b/test/CXX/basic/basic.start/basic.start.main/p2c.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int main() { } diff --git a/test/CXX/basic/basic.start/basic.start.main/p2g.cpp b/test/CXX/basic/basic.start/basic.start.main/p2g.cpp index e3209fd..45f643f 100644 --- a/test/CXX/basic/basic.start/basic.start.main/p2g.cpp +++ b/test/CXX/basic/basic.start/basic.start.main/p2g.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int main(int argc, const char* const* argv) { } diff --git a/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-nodef.cpp b/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-nodef.cpp index 6cd587c..9a740df 100644 --- a/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-nodef.cpp +++ b/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-nodef.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int *use_new(int N) { return new int [N]; diff --git a/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-noexceptions.cpp b/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-noexceptions.cpp index 4567c46..9819ea0 100644 --- a/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-noexceptions.cpp +++ b/test/CXX/basic/basic.stc/basic.stc.dynamic/p2-noexceptions.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace std { class bad_alloc { }; diff --git a/test/CXX/class.access/class.friend/p1.cpp b/test/CXX/class.access/class.friend/p1.cpp index 7cb192b..19d94cf 100644 --- a/test/CXX/class.access/class.friend/p1.cpp +++ b/test/CXX/class.access/class.friend/p1.cpp @@ -20,7 +20,7 @@ void test1() { g()->f(); S::f(); X::g(); // expected-error{{no member named 'g' in 'X'}} - X::S x_s; // expected-error{{no member named 'S' in 'X'}} + X::S x_s; // expected-error{{no type named 'S' in 'X'}} X x; x.g(); // expected-error{{no member named 'g' in 'X'}} } @@ -44,16 +44,16 @@ namespace N { S s; S::f(); X::g(); // expected-error{{no member named 'g' in 'N::X'}} - X::S x_s; // expected-error{{no member named 'S' in 'N::X'}} + X::S x_s; // expected-error{{no type named 'S' in 'N::X'}} X x; x.g(); // expected-error{{no member named 'g' in 'N::X'}} g2(); S2 s2; ::g2(); // expected-error{{no member named 'g2' in the global namespace}} - ::S2 g_s2; // expected-error{{no member named 'S2' in the global namespace}} + ::S2 g_s2; // expected-error{{no type named 'S2' in the global namespace}} X::g2(); // expected-error{{no member named 'g2' in 'N::X'}} - X::S2 x_s2; // expected-error{{no member named 'S2' in 'N::X'}} + X::S2 x_s2; // expected-error{{no type named 'S2' in 'N::X'}} x.g2(); // expected-error{{no member named 'g2' in 'N::X'}} } } @@ -356,3 +356,19 @@ namespace PR9103 { } }; } + +// PR13642. When computing the effective context, we were walking up +// the DC chain for the canonical decl, which is unfortunate if that's +// (e.g.) a friend declaration. +namespace test14 { + class A { + class B { // expected-note {{implicitly declared private here}} + static int i; + friend void c(); + }; + }; + + void c() { + A::B::i = 5; // expected-error {{'B' is a private member of 'test14::A'}} + } +} diff --git a/test/CXX/class.access/class.friend/p3-cxx0x.cpp b/test/CXX/class.access/class.friend/p3-cxx0x.cpp index 00fc0a3..e4d5fd5 100644 --- a/test/CXX/class.access/class.friend/p3-cxx0x.cpp +++ b/test/CXX/class.access/class.friend/p3-cxx0x.cpp @@ -27,3 +27,29 @@ struct Y3 { X1 x1a; X1 x1b; X1 x1c; // expected-note{{in instantiation of template class 'X1' requested here}} + +template +class A { + T x; +public: + class foo {}; + static int y; +}; + +struct { + // Ill-formed + int friend; // expected-error {{'friend' must appear first in a non-function declaration}} + unsigned friend int; // expected-error {{'friend' must appear first in a non-function declaration}} + const volatile friend int; // expected-error {{'friend' must appear first in a non-function declaration}} + int + friend; // expected-error {{'friend' must appear first in a non-function declaration}} + + // OK + int friend foo(void); + friend int; + friend const volatile int; + friend + + float; + template friend class A::foo; +} a; diff --git a/test/CXX/class.access/class.protected/p1-cxx11.cpp b/test/CXX/class.access/class.protected/p1-cxx11.cpp index dc9b20d..c1cf047 100644 --- a/test/CXX/class.access/class.protected/p1-cxx11.cpp +++ b/test/CXX/class.access/class.protected/p1-cxx11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR12497 namespace test0 { diff --git a/test/CXX/class.access/class.protected/p1.cpp b/test/CXX/class.access/class.protected/p1.cpp index c9491e1..132ff61 100644 --- a/test/CXX/class.access/class.protected/p1.cpp +++ b/test/CXX/class.access/class.protected/p1.cpp @@ -423,7 +423,7 @@ namespace test12 { // This friendship is not considered because a public member of A is // inaccessible in C. namespace test13 { - class A { protected: int foo(); }; // expected-note {{can only access this member on an object of type}} + class A { protected: int foo(); }; // expected-note {{declared protected here}} class B : private virtual A {}; class C : private B { friend void test(); }; class D : public virtual A {}; diff --git a/test/CXX/class.derived/class.abstract/p16.cpp b/test/CXX/class.derived/class.abstract/p16.cpp new file mode 100644 index 0000000..93f905c --- /dev/null +++ b/test/CXX/class.derived/class.abstract/p16.cpp @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify %s + +struct A { + virtual void a(); // expected-note{{overridden virtual function is here}} + virtual void b() = delete; // expected-note{{overridden virtual function is here}} +}; + +struct B: A { + virtual void a() = delete; // expected-error{{deleted function 'a' cannot override a non-deleted function}} + virtual void b(); // expected-error{{non-deleted function 'b' cannot override a deleted function}} +}; + +struct C: A { + virtual void a(); + virtual void b() = delete; +}; diff --git a/test/CXX/class.derived/p2.cpp b/test/CXX/class.derived/p2.cpp index 7ef53d3..87e0f74 100644 --- a/test/CXX/class.derived/p2.cpp +++ b/test/CXX/class.derived/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // "During the lookup for a base class name, non-type names are ignored" namespace PR5840 { diff --git a/test/CXX/class/class.friend/p1-ambiguous.cpp b/test/CXX/class/class.friend/p1-ambiguous.cpp index a9dca4f..3bb3271 100644 --- a/test/CXX/class/class.friend/p1-ambiguous.cpp +++ b/test/CXX/class/class.friend/p1-ambiguous.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // Make sure that friend declarations don't introduce ambiguous // declarations. diff --git a/test/CXX/class/class.friend/p1-cxx11.cpp b/test/CXX/class/class.friend/p1-cxx11.cpp index 235f295..6e3d850 100644 --- a/test/CXX/class/class.friend/p1-cxx11.cpp +++ b/test/CXX/class/class.friend/p1-cxx11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics class A { class AInner { diff --git a/test/CXX/class/class.nest/p3.cpp b/test/CXX/class/class.nest/p3.cpp index c4c4ca7..677411f 100644 --- a/test/CXX/class/class.nest/p3.cpp +++ b/test/CXX/class/class.nest/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // C++0x [class.nest] p3: // If class X is defined in a namespace scope, a nested class Y may be diff --git a/test/CXX/class/p1-0x.cpp b/test/CXX/class/p1-0x.cpp index be5fdff..5c32788 100644 --- a/test/CXX/class/p1-0x.cpp +++ b/test/CXX/class/p1-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics namespace Test1 { class A final { }; diff --git a/test/CXX/class/p2-0x.cpp b/test/CXX/class/p2-0x.cpp index dbb01e5..5b39e0a 100644 --- a/test/CXX/class/p2-0x.cpp +++ b/test/CXX/class/p2-0x.cpp @@ -26,3 +26,11 @@ struct C : A { }; // expected-error {{base 'A' is marked 'final'}} } +namespace Test4 { + +struct A final { virtual void func() = 0; }; // expected-warning {{abstract class is marked 'final'}} expected-note {{unimplemented pure virtual method 'func' in 'A'}} +struct B { virtual void func() = 0; }; // expected-note {{unimplemented pure virtual method 'func' in 'C'}} + +struct C final : B { }; // expected-warning {{abstract class is marked 'final'}} + +} diff --git a/test/CXX/class/p6-0x.cpp b/test/CXX/class/p6-0x.cpp index e153b4d..cf628a6 100644 --- a/test/CXX/class/p6-0x.cpp +++ b/test/CXX/class/p6-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics class Trivial { int n; void f(); }; class NonTrivial1 { NonTrivial1(const NonTrivial1 &); }; diff --git a/test/CXX/conv/conv.prom/p2.cpp b/test/CXX/conv/conv.prom/p2.cpp index 8d75419..ca64cfa 100644 --- a/test/CXX/conv/conv.prom/p2.cpp +++ b/test/CXX/conv/conv.prom/p2.cpp @@ -1,5 +1,6 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -std=c++0x -ffreestanding %s -// RUN: %clang_cc1 -fsyntax-only -verify -std=c++0x -fshort-wchar -ffreestanding %s +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++0x -triple x86_64-pc-linux-gnu -ffreestanding %s +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++0x -triple x86_64-pc-linux-gnu -ffreestanding -fshort-wchar %s +// expected-no-diagnostics #include diff --git a/test/CXX/conv/conv.prom/p4.cpp b/test/CXX/conv/conv.prom/p4.cpp index 02a91cd..8c86d2a 100644 --- a/test/CXX/conv/conv.prom/p4.cpp +++ b/test/CXX/conv/conv.prom/p4.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++0x %s +// expected-no-diagnostics enum X : short { A, B }; extern decltype(+A) x; @@ -7,3 +8,21 @@ extern int x; enum Y : long { C, D }; extern decltype(+C) y; extern long y; + +// An enum with a fixed underlying type has an integral promotion to that type, +// and to its promoted type. +enum B : bool { false_, true_ }; +template struct T {}; +T f; +T t; +// FIXME: DR1407 will make this ill-formed +T<+true_> q; // desired-error {{conversion from 'int' to 'bool'}} + +enum B2 : bool { + a = false, + b = true, + c = false_, + d = true_, + // FIXME: DR1407 will make this ill-formed + e = +false_ // desired-error {{conversion from 'int' to 'bool'}} +}; diff --git a/test/CXX/conv/conv.ptr/p2.cpp b/test/CXX/conv/conv.ptr/p2.cpp index 8808d20..b761769 100644 --- a/test/CXX/conv/conv.ptr/p2.cpp +++ b/test/CXX/conv/conv.ptr/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace pr7801 { extern void* x[]; diff --git a/test/CXX/conv/conv.qual/pr6089.cpp b/test/CXX/conv/conv.qual/pr6089.cpp index ae75ec4..bfadc6c 100644 --- a/test/CXX/conv/conv.qual/pr6089.cpp +++ b/test/CXX/conv/conv.qual/pr6089.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics bool is_char_ptr( const char* ); diff --git a/test/CXX/dcl.dcl/basic.namespace/namespace.def/p2.cpp b/test/CXX/dcl.dcl/basic.namespace/namespace.def/p2.cpp index 411c16c..943e053 100644 --- a/test/CXX/dcl.dcl/basic.namespace/namespace.def/p2.cpp +++ b/test/CXX/dcl.dcl/basic.namespace/namespace.def/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR8430 namespace N { diff --git a/test/CXX/dcl.dcl/basic.namespace/namespace.def/p7.cpp b/test/CXX/dcl.dcl/basic.namespace/namespace.def/p7.cpp index 98d12f9..f923623 100644 --- a/test/CXX/dcl.dcl/basic.namespace/namespace.def/p7.cpp +++ b/test/CXX/dcl.dcl/basic.namespace/namespace.def/p7.cpp @@ -3,11 +3,11 @@ namespace NIL {} // expected-note {{previous definition}} inline namespace NIL {} // expected-error {{cannot be reopened as inline}} inline namespace IL {} // expected-note {{previous definition}} -namespace IL {} // expected-warning{{inline namespace cannot be re-opened as a non-inline namespace}} +namespace IL {} // expected-warning{{inline namespace cannot be reopened as a non-inline namespace}} namespace {} // expected-note {{previous definition}} inline namespace {} // expected-error {{cannot be reopened as inline}} namespace X { inline namespace {} // expected-note {{previous definition}} - namespace {} // expected-error {{cannot be reopened as non-inline}} + namespace {} // expected-warning {{cannot be reopened as a non-inline namespace}} } diff --git a/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p10.cpp b/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p10.cpp index 546c4a4..ae40062 100644 --- a/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p10.cpp +++ b/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p10.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace test0 { namespace ns0 { diff --git a/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p13.cpp b/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p13.cpp index dd44bfc..699d80a 100644 --- a/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p13.cpp +++ b/test/CXX/dcl.dcl/basic.namespace/namespace.udecl/p13.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // C++03 [namespace.udecl]p3: // For the purpose of overload resolution, the functions which are diff --git a/test/CXX/dcl.dcl/basic.namespace/namespace.udir/p6.cpp b/test/CXX/dcl.dcl/basic.namespace/namespace.udir/p6.cpp index 4cb91cd..2bcbe26 100644 --- a/test/CXX/dcl.dcl/basic.namespace/namespace.udir/p6.cpp +++ b/test/CXX/dcl.dcl/basic.namespace/namespace.udir/p6.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // typedef int pid_t; diff --git a/test/CXX/dcl.dcl/dcl.attr/dcl.attr.grammar/p6.cpp b/test/CXX/dcl.dcl/dcl.attr/dcl.attr.grammar/p6.cpp index f9702ba..416aeb4 100644 --- a/test/CXX/dcl.dcl/dcl.attr/dcl.attr.grammar/p6.cpp +++ b/test/CXX/dcl.dcl/dcl.attr/dcl.attr.grammar/p6.cpp @@ -6,7 +6,8 @@ int p[10]; void f() { int x = 42, y[5]; // FIXME: Produce a better diagnostic for this case. - int(p[[x] { return x; }()]); // expected-error {{expected ']'}} + int(p[[x] { return x; }()]); // expected-error {{expected ']'}} \ + // expected-warning {{unknown attribute 'x' ignored}} y[[] { return 2; }()] = 2; // expected-error {{consecutive left square brackets}} } diff --git a/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p5.cpp b/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p5.cpp index fd17d35..3c1152c 100644 --- a/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p5.cpp +++ b/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p5.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 -fcxx-exceptions %s -// RUN: %clang_cc1 -fsyntax-only -std=c++11 -fcxx-exceptions -Wno-invalid-constexpr %s +// RUN: %clang_cc1 -fsyntax-only -triple x86_64-unknown-unknown -verify -std=c++11 -fcxx-exceptions %s +// RUN: %clang_cc1 -fsyntax-only -triple x86_64-unknown-unknown -std=c++11 -fcxx-exceptions -Wno-invalid-constexpr %s namespace StdExample { @@ -80,7 +80,7 @@ constexpr int Conditional2(bool b, int n) { return b ? n * ng : n + ng; } // exp // __builtin_constant_p ? : is magical, and is always a potential constant. constexpr bool BcpCall(int n) { - return __builtin_constant_p((int*)n != &n) ? (int*)n != &n : (int*)n != &n; + return __builtin_constant_p((int*)n != &n) ? (int*)n != &n : (int*)n != &n; // expected-warning 3 {{cast to 'int *' from smaller integer type 'int'}} } static_assert(BcpCall(0), ""); diff --git a/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p3.cpp b/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p3.cpp index f732255..e91cacf 100644 --- a/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p3.cpp +++ b/test/CXX/dcl.dcl/dcl.spec/dcl.type/dcl.spec.auto/p3.cpp @@ -86,3 +86,6 @@ namespace PR13293 { template void h(); } #endif + +auto fail((unknown)); // expected-error{{use of undeclared identifier 'unknown'}} +int& crash = fail; diff --git a/test/CXX/dcl.dcl/dcl.spec/dcl.type/p3-0x.cpp b/test/CXX/dcl.dcl/dcl.spec/dcl.type/p3-0x.cpp index 0b518bb..02cc973 100644 --- a/test/CXX/dcl.dcl/dcl.spec/dcl.type/p3-0x.cpp +++ b/test/CXX/dcl.dcl/dcl.spec/dcl.type/p3-0x.cpp @@ -34,7 +34,7 @@ void f() { void g() throw (struct Ex {}) { // expected-error {{'Ex' can not be defined in a type specifier}} } -int alignas(struct Aa {}) x; // expected-error {{'Aa' can not be defined in a type specifier}} +alignas(struct Aa {}) int x; // expected-error {{'Aa' can not be defined in a type specifier}} int a = sizeof(struct So {}); // expected-error {{'So' can not be defined in a type specifier}} int b = alignof(struct Ao {}); // expected-error {{'Ao' can not be defined in a type specifier}} diff --git a/test/CXX/dcl.decl/dcl.init/dcl.init.aggr/p4.cpp b/test/CXX/dcl.decl/dcl.init/dcl.init.aggr/p4.cpp index 1041571..2342807 100644 --- a/test/CXX/dcl.decl/dcl.init/dcl.init.aggr/p4.cpp +++ b/test/CXX/dcl.decl/dcl.init/dcl.init.aggr/p4.cpp @@ -16,3 +16,9 @@ void tf() { // Allowed by GNU extension int a4[] = {}; // expected-error {{zero size arrays}} + +struct Incomplete; // expected-note {{forward declaration of 'Incomplete'}} +struct A { + Incomplete i; // expected-error {{field has incomplete type 'Incomplete'}} +}; +A a[] = { 0 }; // PR13971: don't hang. diff --git a/test/CXX/dcl.decl/dcl.init/dcl.init.ref/basic.cpp b/test/CXX/dcl.decl/dcl.init/dcl.init.ref/basic.cpp index 885d11b..2c9cd88 100644 --- a/test/CXX/dcl.decl/dcl.init/dcl.init.ref/basic.cpp +++ b/test/CXX/dcl.decl/dcl.init/dcl.init.ref/basic.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR5787 class C { diff --git a/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p1.cpp b/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p1.cpp index 20c059e..76053f0 100644 --- a/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p1.cpp +++ b/test/CXX/dcl.decl/dcl.init/dcl.init.ref/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics int g(int); void f() { int i; diff --git a/test/CXX/dcl.decl/dcl.init/dcl.init.string/p1.cpp b/test/CXX/dcl.decl/dcl.init/dcl.init.string/p1.cpp index 3631af1..878d2c6 100644 --- a/test/CXX/dcl.decl/dcl.init/dcl.init.string/p1.cpp +++ b/test/CXX/dcl.decl/dcl.init/dcl.init.string/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics char x1[]("hello"); extern char x1[6]; diff --git a/test/CXX/dcl.decl/dcl.init/p7.cpp b/test/CXX/dcl.decl/dcl.init/p7.cpp new file mode 100644 index 0000000..03216f4 --- /dev/null +++ b/test/CXX/dcl.decl/dcl.init/p7.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -std=c++11 -verify %s + +struct NotAggregateBase {}; + +struct A : NotAggregateBase { +private: + A() = default; // expected-note {{here}} +}; +A a = {}; // expected-error {{calling a private constructor}} + +struct B : NotAggregateBase { + explicit B() = default; // expected-note {{here}} +}; +B b = {}; // expected-error {{chosen constructor is explicit}} diff --git a/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p2.cpp b/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p2.cpp index 0a107eb..68aabca 100644 --- a/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p2.cpp +++ b/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics void point(int = 3, int = 4); diff --git a/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p3.cpp b/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p3.cpp index e9c5e0c..5467a92 100644 --- a/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p3.cpp +++ b/test/CXX/dcl.decl/dcl.meaning/dcl.fct.default/p3.cpp @@ -1,9 +1,9 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -void nondecl(int (*f)(int x = 5)) // {expected-error {{default arguments can only be specified}}} +void nondecl(int (*f)(int x = 5)) // expected-error {{default arguments can only be specified}} { - void (*f2)(int = 17) // {expected-error {{default arguments can only be specified}}} - = (void (*)(int = 42))f; // {expected-error {{default arguments can only be specified}}} + void (*f2)(int = 17) // expected-error {{default arguments can only be specified}} + = (void (*)(int = 42))f; // expected-error {{default arguments can only be specified}} } struct X0 { diff --git a/test/CXX/dcl.decl/dcl.meaning/dcl.fct/p14.cpp b/test/CXX/dcl.decl/dcl.meaning/dcl.fct/p14.cpp index 0e69521..bc249b5 100644 --- a/test/CXX/dcl.decl/dcl.meaning/dcl.fct/p14.cpp +++ b/test/CXX/dcl.decl/dcl.meaning/dcl.fct/p14.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template struct identity; template struct tuple; diff --git a/test/CXX/dcl.decl/dcl.meaning/dcl.ref/p6-0x.cpp b/test/CXX/dcl.decl/dcl.meaning/dcl.ref/p6-0x.cpp index 4ce80bc..cd623df 100644 --- a/test/CXX/dcl.decl/dcl.meaning/dcl.ref/p6-0x.cpp +++ b/test/CXX/dcl.decl/dcl.meaning/dcl.ref/p6-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template struct is_same { diff --git a/test/CXX/dcl.decl/dcl.meaning/p1.cpp b/test/CXX/dcl.decl/dcl.meaning/p1.cpp index 3672ea0..ec9a261 100644 --- a/test/CXX/dcl.decl/dcl.meaning/p1.cpp +++ b/test/CXX/dcl.decl/dcl.meaning/p1.cpp @@ -7,7 +7,7 @@ namespace PR8019 { struct PR8019::x { int x; }; // expected-error{{non-friend class member 'x' cannot have a qualified name}} struct inner; - struct y::inner { }; // expected-warning{{extra qualification on member 'inner'}} + struct y::inner { }; // expected-error{{extra qualification on member 'inner'}} template struct PR8019::x2 { }; // expected-error{{non-friend class member 'x2' cannot have a qualified name}} @@ -16,7 +16,7 @@ namespace PR8019 { struct inner_template; template - struct y::inner_template { }; // expected-warning{{extra qualification on member 'inner_template'}} + struct y::inner_template { }; // expected-error{{extra qualification on member 'inner_template'}} }; } @@ -29,9 +29,9 @@ namespace NS { template void wibble(T); } namespace NS { - void NS::foo() {} // expected-warning{{extra qualification on member 'foo'}} - int NS::bar; // expected-warning{{extra qualification on member 'bar'}} - struct NS::X { }; // expected-warning{{extra qualification on member 'X'}} - template struct NS::Y; // expected-warning{{extra qualification on member 'Y'}} - template void NS::wibble(T) { } // expected-warning{{extra qualification on member 'wibble'}} + void NS::foo() {} // expected-error{{extra qualification on member 'foo'}} + int NS::bar; // expected-error{{extra qualification on member 'bar'}} + struct NS::X { }; // expected-error{{extra qualification on member 'X'}} + template struct NS::Y; // expected-error{{extra qualification on member 'Y'}} + template void NS::wibble(T) { } // expected-error{{extra qualification on member 'wibble'}} } diff --git a/test/CXX/dcl.decl/dcl.name/p1.cpp b/test/CXX/dcl.decl/dcl.name/p1.cpp index 9838b4f..e032a7f 100644 --- a/test/CXX/dcl.decl/dcl.name/p1.cpp +++ b/test/CXX/dcl.decl/dcl.name/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace pr6200 { struct v {}; diff --git a/test/CXX/dcl.decl/p4-0x.cpp b/test/CXX/dcl.decl/p4-0x.cpp index 98c33b2..35177a0 100644 --- a/test/CXX/dcl.decl/p4-0x.cpp +++ b/test/CXX/dcl.decl/p4-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics struct X { void f() &; diff --git a/test/CXX/except/except.spec/canonical.cpp b/test/CXX/except/except.spec/canonical.cpp index 81ca2ae..b6d3e9c 100644 --- a/test/CXX/except/except.spec/canonical.cpp +++ b/test/CXX/except/except.spec/canonical.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics // PR10087: Make sure that we don't conflate exception specifications // from different functions in the canonical type system. diff --git a/test/CXX/except/except.spec/p11.cpp b/test/CXX/except/except.spec/p11.cpp index 0e4fad5..1f6bf21 100644 --- a/test/CXX/except/except.spec/p11.cpp +++ b/test/CXX/except/except.spec/p11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fexceptions -fcxx-exceptions -fsyntax-only -verify %s +// expected-no-diagnostics // This is the "let the user shoot himself in the foot" clause. void f() noexcept { diff --git a/test/CXX/except/except.spec/p14.cpp b/test/CXX/except/except.spec/p14.cpp index 4f50afb..ff21ab8 100644 --- a/test/CXX/except/except.spec/p14.cpp +++ b/test/CXX/except/except.spec/p14.cpp @@ -63,3 +63,41 @@ namespace PR13381 { static_assert(!noexcept(X(X::val())), ""); static_assert(!noexcept(X::ref() = X::val()), ""); } + +namespace PR14141 { + // Part of DR1351: the implicit exception-specification is noexcept(false) if + // the set of potential exceptions of the special member function contains + // "any". Hence it is compatible with noexcept(false). + struct ThrowingBase { + ThrowingBase() noexcept(false); + ThrowingBase(const ThrowingBase&) noexcept(false); + ThrowingBase(ThrowingBase&&) noexcept(false); + ThrowingBase &operator=(const ThrowingBase&) noexcept(false); + ThrowingBase &operator=(ThrowingBase&&) noexcept(false); + ~ThrowingBase() noexcept(false); + }; + struct Derived : ThrowingBase { + Derived() noexcept(false) = default; + Derived(const Derived&) noexcept(false) = default; + Derived(Derived&&) noexcept(false) = default; + Derived &operator=(const Derived&) noexcept(false) = default; + Derived &operator=(Derived&&) noexcept(false) = default; + ~Derived() noexcept(false) = default; + }; + struct Derived2 : ThrowingBase { + Derived2() = default; + Derived2(const Derived2&) = default; + Derived2(Derived2&&) = default; + Derived2 &operator=(const Derived2&) = default; + Derived2 &operator=(Derived2&&) = default; + ~Derived2() = default; + }; + struct Derived3 : ThrowingBase { + Derived3() noexcept(true) = default; // expected-error {{does not match the calculated}} + Derived3(const Derived3&) noexcept(true) = default; // expected-error {{does not match the calculated}} + Derived3(Derived3&&) noexcept(true) = default; // expected-error {{does not match the calculated}} + Derived3 &operator=(const Derived3&) noexcept(true) = default; // expected-error {{does not match the calculated}} + Derived3 &operator=(Derived3&&) noexcept(true) = default; // expected-error {{does not match the calculated}} + ~Derived3() noexcept(true) = default; // expected-error {{does not match the calculated}} + }; +} diff --git a/test/CXX/except/except.spec/p15.cpp b/test/CXX/except/except.spec/p15.cpp index 110ec3fa..fcf1235 100644 --- a/test/CXX/except/except.spec/p15.cpp +++ b/test/CXX/except/except.spec/p15.cpp @@ -9,16 +9,20 @@ void f() { delete[] new int[1]; } -void operator delete(void*) noexcept; -void operator delete[](void*) noexcept; +void operator delete(void*); +void operator delete[](void*); + +static_assert(noexcept(operator delete(0)), ""); +static_assert(noexcept(operator delete[](0)), ""); // Same goes for explicit declarations. void operator delete(void*, float); -void operator delete(void*, float) noexcept; - void operator delete[](void*, float); -void operator delete[](void*, float) noexcept; + +static_assert(noexcept(operator delete(0, 0.f)), ""); +static_assert(noexcept(operator delete[](0, 0.f)), ""); // But explicit specs stay. void operator delete(void*, double) throw(int); // expected-note {{previous}} +static_assert(!noexcept(operator delete(0, 0.)), ""); void operator delete(void*, double) noexcept; // expected-error {{does not match}} diff --git a/test/CXX/except/except.spec/p4.cpp b/test/CXX/except/except.spec/p4.cpp new file mode 100644 index 0000000..1bf7018 --- /dev/null +++ b/test/CXX/except/except.spec/p4.cpp @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -std=c++11 %s -verify -fcxx-exceptions + +// We permit overriding an implicit exception specification with an explicit one +// as an extension, for compatibility with existing code. + +struct S { + void a(); // expected-note {{here}} + ~S(); // expected-note {{here}} + void operator delete(void*); // expected-note {{here}} +}; + +void S::a() noexcept {} // expected-error {{does not match previous}} +S::~S() noexcept {} // expected-warning {{function previously declared with an implicit exception specification redeclared with an explicit exception specification}} +void S::operator delete(void*) noexcept {} // expected-warning {{function previously declared with an implicit exception specification redeclared with an explicit exception specification}} + +struct T { + void a() noexcept; // expected-note {{here}} + ~T() noexcept; // expected-note {{here}} + void operator delete(void*) noexcept; // expected-note {{here}} +}; + +void T::a() {} // expected-warning {{missing exception specification 'noexcept'}} +T::~T() {} // expected-warning {{function previously declared with an explicit exception specification redeclared with an implicit exception specification}} +void T::operator delete(void*) {} // expected-warning {{function previously declared with an explicit exception specification redeclared with an implicit exception specification}} + + +// The extension does not extend to function templates. + +template struct U { + T t; + ~U(); // expected-note {{here}} + void operator delete(void*); // expected-note {{here}} +}; + +template U::~U() noexcept(true) {} // expected-error {{exception specification in declaration does not match previous declaration}} +template void U::operator delete(void*) noexcept(false) {} // expected-error {{exception specification in declaration does not match previous declaration}} diff --git a/test/CXX/expr/expr.cast/p4-0x.cpp b/test/CXX/expr/expr.cast/p4-0x.cpp index 96bf5f9..76ac318 100644 --- a/test/CXX/expr/expr.cast/p4-0x.cpp +++ b/test/CXX/expr/expr.cast/p4-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics struct X { }; struct Y : X { }; diff --git a/test/CXX/expr/expr.const/p3-0x-nowarn.cpp b/test/CXX/expr/expr.const/p3-0x-nowarn.cpp index c891374..7d12ced 100644 --- a/test/CXX/expr/expr.const/p3-0x-nowarn.cpp +++ b/test/CXX/expr/expr.const/p3-0x-nowarn.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 -Wno-c++11-narrowing -verify %s +// expected-no-diagnostics // void f(int x) { diff --git a/test/CXX/expr/expr.const/p5-0x.cpp b/test/CXX/expr/expr.const/p5-0x.cpp index 60fabe3..bdb2b23 100644 --- a/test/CXX/expr/expr.const/p5-0x.cpp +++ b/test/CXX/expr/expr.const/p5-0x.cpp @@ -61,10 +61,10 @@ enum NotFixed { // [dcl.align]p2: When the alignment-specifier is of the form // alignas(assignment-expression), the assignment-expression shall be an // integral constant expression -int alignas(ok) alignas1; -int alignas(incomplete) alignas2; // expected-error {{incomplete}} -int alignas(expl) alignas3; // expected-error {{explicit conversion}} -int alignas(ambig) alignas4; // expected-error {{ambiguous conversion}} +alignas(ok) int alignas1; +alignas(incomplete) int alignas2; // expected-error {{incomplete}} +alignas(expl) int alignas3; // expected-error {{explicit conversion}} +alignas(ambig) int alignas4; // expected-error {{ambiguous conversion}} // [dcl.array]p1: If the constant-expression is present, it shall be an integral // constant expression diff --git a/test/CXX/expr/expr.post/expr.const.cast/p1-0x.cpp b/test/CXX/expr/expr.post/expr.const.cast/p1-0x.cpp index 6ba8d51..be89876 100644 --- a/test/CXX/expr/expr.post/expr.const.cast/p1-0x.cpp +++ b/test/CXX/expr/expr.post/expr.const.cast/p1-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // The result of the expression const_cast(v) is of type T. If T is // an lvalue reference to object type, the result is an lvalue; if T diff --git a/test/CXX/expr/expr.post/expr.ref/p3.cpp b/test/CXX/expr/expr.post/expr.ref/p3.cpp index 98771d3..db33c01 100644 --- a/test/CXX/expr/expr.post/expr.ref/p3.cpp +++ b/test/CXX/expr/expr.post/expr.ref/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify -fsyntax-only %s +// expected-no-diagnostics template struct Node { int lhs; diff --git a/test/CXX/expr/expr.post/expr.static.cast/p3-0x.cpp b/test/CXX/expr/expr.post/expr.static.cast/p3-0x.cpp index 9ef15e6..830ccda 100644 --- a/test/CXX/expr/expr.post/expr.static.cast/p3-0x.cpp +++ b/test/CXX/expr/expr.post/expr.static.cast/p3-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to // cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1" (8.5.3). diff --git a/test/CXX/expr/expr.post/expr.static.cast/p9-0x.cpp b/test/CXX/expr/expr.post/expr.static.cast/p9-0x.cpp index 731c508..c624c7e 100644 --- a/test/CXX/expr/expr.post/expr.static.cast/p9-0x.cpp +++ b/test/CXX/expr/expr.post/expr.static.cast/p9-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics enum class EC { ec1 }; diff --git a/test/CXX/expr/expr.post/expr.type.conv/p1-0x.cpp b/test/CXX/expr/expr.post/expr.type.conv/p1-0x.cpp index 253744e..568c61b 100644 --- a/test/CXX/expr/expr.post/expr.type.conv/p1-0x.cpp +++ b/test/CXX/expr/expr.post/expr.type.conv/p1-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics struct foo { foo(); diff --git a/test/CXX/expr/expr.prim/expr.prim.general/p3-0x.cpp b/test/CXX/expr/expr.prim/expr.prim.general/p3-0x.cpp index 030c90c..b84cec6 100644 --- a/test/CXX/expr/expr.prim/expr.prim.general/p3-0x.cpp +++ b/test/CXX/expr/expr.prim/expr.prim.general/p3-0x.cpp @@ -91,11 +91,11 @@ namespace Static { namespace PR12564 { struct Base { - void bar(Base&) {} // unexpected-note {{here}} + void bar(Base&) {} // FIXME: expected-note {{here}} }; struct Derived : Base { // FIXME: This should be accepted. - void foo(Derived& d) noexcept(noexcept(d.bar(d))) {} // unexpected-error {{cannot bind to a value of unrelated type}} + void foo(Derived& d) noexcept(noexcept(d.bar(d))) {} // expected-error {{cannot bind to a value of unrelated type}} }; } diff --git a/test/CXX/expr/expr.prim/expr.prim.lambda/p14.cpp b/test/CXX/expr/expr.prim/expr.prim.lambda/p14.cpp index 678fa4b..6358215 100644 --- a/test/CXX/expr/expr.prim/expr.prim.lambda/p14.cpp +++ b/test/CXX/expr/expr.prim/expr.prim.lambda/p14.cpp @@ -73,3 +73,18 @@ struct ExpectedThisLayout { static_assert(sizeof(x) == sizeof(ExpectedThisLayout), "Layout mismatch!"); } }; + +struct CaptureArrayAndThis { + int value; + + void f() { + int array[3]; + [=]() -> int { + int result = value; + for (unsigned i = 0; i < 3; ++i) + result += array[i]; + return result; + }(); + } +}; + diff --git a/test/CXX/expr/expr.prim/expr.prim.lambda/p15.cpp b/test/CXX/expr/expr.prim/expr.prim.lambda/p15.cpp index c4deba9..b4b1605 100644 --- a/test/CXX/expr/expr.prim/expr.prim.lambda/p15.cpp +++ b/test/CXX/expr/expr.prim/expr.prim.lambda/p15.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 %s -verify +// expected-no-diagnostics class NonCopyable { NonCopyable(const NonCopyable&); diff --git a/test/CXX/expr/expr.prim/expr.prim.lambda/p18.cpp b/test/CXX/expr/expr.prim/expr.prim.lambda/p18.cpp index 930a4b3..93c2805 100644 --- a/test/CXX/expr/expr.prim/expr.prim.lambda/p18.cpp +++ b/test/CXX/expr/expr.prim/expr.prim.lambda/p18.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -Wunused -verify +// expected-no-diagnostics template struct is_same { diff --git a/test/CXX/expr/expr.prim/expr.prim.lambda/p20.cpp b/test/CXX/expr/expr.prim/expr.prim.lambda/p20.cpp index 4487cfc..17eb841 100644 --- a/test/CXX/expr/expr.prim/expr.prim.lambda/p20.cpp +++ b/test/CXX/expr/expr.prim/expr.prim.lambda/p20.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -Wunused -verify +// expected-no-diagnostics template void destroy(T* ptr) { diff --git a/test/CXX/expr/expr.prim/expr.prim.lambda/p21.cpp b/test/CXX/expr/expr.prim/expr.prim.lambda/p21.cpp index 7139058..bc2c999 100644 --- a/test/CXX/expr/expr.prim/expr.prim.lambda/p21.cpp +++ b/test/CXX/expr/expr.prim/expr.prim.lambda/p21.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 %s -verify +// expected-no-diagnostics struct DirectInitOnly { explicit DirectInitOnly(DirectInitOnly&); diff --git a/test/CXX/expr/expr.unary/expr.unary.noexcept/sema.cpp b/test/CXX/expr/expr.unary/expr.unary.noexcept/sema.cpp index b5de1a7..1f5969d 100644 --- a/test/CXX/expr/expr.unary/expr.unary.noexcept/sema.cpp +++ b/test/CXX/expr/expr.unary/expr.unary.noexcept/sema.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fcxx-exceptions -fexceptions -fsyntax-only -verify -std=c++11 -fms-extensions %s +// expected-no-diagnostics #define P(e) static_assert(noexcept(e), "expected nothrow") #define N(e) static_assert(!noexcept(e), "expected throw") diff --git a/test/CXX/expr/expr.unary/expr.unary.op/p3.cpp b/test/CXX/expr/expr.unary/expr.unary.op/p3.cpp index 2dd6b23..08ab0ca 100644 --- a/test/CXX/expr/expr.unary/expr.unary.op/p3.cpp +++ b/test/CXX/expr/expr.unary/expr.unary.op/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only %s -verify +// expected-no-diagnostics namespace rdar10544564 { // Check that we don't attempt to use an overloaded operator& when diff --git a/test/CXX/expr/p8.cpp b/test/CXX/expr/p8.cpp index 2f6c094..471d1c5 100644 --- a/test/CXX/expr/p8.cpp +++ b/test/CXX/expr/p8.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int a0; const volatile int a1 = 2; diff --git a/test/CXX/expr/p9.cpp b/test/CXX/expr/p9.cpp index 803b0cc..4c60b8b 100644 --- a/test/CXX/expr/p9.cpp +++ b/test/CXX/expr/p9.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // floating-point overloads diff --git a/test/CXX/lex/lex.literal/lex.ccon/p1.cpp b/test/CXX/lex/lex.literal/lex.ccon/p1.cpp index 5342153..f84f5fb 100644 --- a/test/CXX/lex/lex.literal/lex.ccon/p1.cpp +++ b/test/CXX/lex/lex.literal/lex.ccon/p1.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Check types of char literals extern char a; diff --git a/test/CXX/lex/lex.trigraph/p3.cpp b/test/CXX/lex/lex.trigraph/p3.cpp index 2be0328..c74d8f3 100644 --- a/test/CXX/lex/lex.trigraph/p3.cpp +++ b/test/CXX/lex/lex.trigraph/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -trigraphs -Wtrigraphs -verify %s +// expected-no-diagnostics char a[] = "?? ??\"??#??$??%??&??*??+??,??.??0??1??2??3??4??5??6" diff --git a/test/CXX/over/over.built/p1.cpp b/test/CXX/over/over.built/p1.cpp deleted file mode 100644 index 6000f5b..0000000 --- a/test/CXX/over/over.built/p1.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// RUN: %clang_cc1 -fsyntax-only -verify %s - -enum E1 { one }; -enum E2 { two }; - -bool operator >= (E1, E1) { - return false; -} - -bool operator >= (E1, const E2) { - return false; -} - -bool test(E1 a, E1 b, E2 c) { - return a >= b || a >= c; -} diff --git a/test/CXX/over/over.built/p23.cpp b/test/CXX/over/over.built/p23.cpp index 4125521..a1c0d4f 100644 --- a/test/CXX/over/over.built/p23.cpp +++ b/test/CXX/over/over.built/p23.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify %s +// expected-no-diagnostics struct Variant { template operator T(); diff --git a/test/CXX/over/over.built/p25.cpp b/test/CXX/over/over.built/p25.cpp index aea3854..09e550d 100644 --- a/test/CXX/over/over.built/p25.cpp +++ b/test/CXX/over/over.built/p25.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics enum class Color { Red, Green, Blue }; diff --git a/test/CXX/over/over.match/over.match.best/over.ics.rank/p3-0x.cpp b/test/CXX/over/over.match/over.match.best/over.ics.rank/p3-0x.cpp index 3971acc..f813305 100644 --- a/test/CXX/over/over.match/over.match.best/over.ics.rank/p3-0x.cpp +++ b/test/CXX/over/over.match/over.match.best/over.ics.rank/p3-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics namespace std_example { int i; int f1(); diff --git a/test/CXX/over/over.match/over.match.best/p1.cpp b/test/CXX/over/over.match/over.match.best/p1.cpp index 5c315a7..59e3dac 100644 --- a/test/CXX/over/over.match/over.match.best/p1.cpp +++ b/test/CXX/over/over.match/over.match.best/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template int &f0(T*, int); float &f0(void*, int); diff --git a/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp b/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp new file mode 100644 index 0000000..35f8808 --- /dev/null +++ b/test/CXX/over/over.match/over.match.funcs/over.match.oper/p3.cpp @@ -0,0 +1,29 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics + +// This is specifically testing the bullet: +// "do not have the same parameter-type-list as any non-template +// non-member candidate." +// The rest is sort of hard to test separately. + +enum E1 { one }; +enum E2 { two }; + +struct A; + +A operator >= (E1, E1); +A operator >= (E1, const E2); + +E1 a; +E2 b; + +extern A test1; +extern decltype(a >= a) test1; +extern decltype(a >= b) test1; + +template A operator <= (E1, T); +extern bool test2; +extern decltype(a <= a) test2; + +extern A test3; +extern decltype(a <= b) test3; \ No newline at end of file diff --git a/test/CXX/over/over.match/over.match.funcs/p4-0x.cpp b/test/CXX/over/over.match/over.match.funcs/p4-0x.cpp index 3845af0..68c7990 100644 --- a/test/CXX/over/over.match/over.match.funcs/p4-0x.cpp +++ b/test/CXX/over/over.match/over.match.funcs/p4-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template T &lvalue(); template T &&xvalue(); diff --git a/test/CXX/over/over.oper/over.literal/p7.cpp b/test/CXX/over/over.oper/over.literal/p7.cpp index 72411b9..74e9457 100644 --- a/test/CXX/over/over.oper/over.literal/p7.cpp +++ b/test/CXX/over/over.oper/over.literal/p7.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -verify +// expected-no-diagnostics constexpr int operator "" _a(const char *c) { return c[0]; diff --git a/test/CXX/over/over.oper/over.literal/p8.cpp b/test/CXX/over/over.oper/over.literal/p8.cpp index 3f76082..6f63610 100644 --- a/test/CXX/over/over.oper/over.literal/p8.cpp +++ b/test/CXX/over/over.oper/over.literal/p8.cpp @@ -9,11 +9,10 @@ void operator "" _km(long double); // ok string operator "" _i18n(const char*, std::size_t); // ok // FIXME: This should be accepted once we support UCNs template int operator "" \u03C0(); // ok, UCN for lowercase pi // expected-error {{expected identifier}} -float operator ""E(const char *); // expected-error {{C++11 requires a space between literal and identifier}} expected-warning {{reserved}} +float operator ""E(const char *); // expected-error {{invalid suffix on literal}} expected-warning {{reserved}} float operator " " B(const char *); // expected-error {{must be '""'}} expected-warning {{reserved}} string operator "" 5X(const char *, std::size_t); // expected-error {{expected identifier}} double operator "" _miles(double); // expected-error {{parameter}} template int operator "" j(const char*); // expected-error {{parameter}} -// FIXME: Accept this as an extension, with a fix-it to add the space -float operator ""_E(const char *); // expected-error {{C++11 requires a space between the "" and the user-defined suffix in a literal operator}} +float operator ""_E(const char *); diff --git a/test/CXX/special/class.conv/class.conv.ctor/p1.cpp b/test/CXX/special/class.conv/class.conv.ctor/p1.cpp index d2add82..5a45f7c 100644 --- a/test/CXX/special/class.conv/class.conv.ctor/p1.cpp +++ b/test/CXX/special/class.conv/class.conv.ctor/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -verify +// expected-no-diagnostics namespace PR13003 { struct void_type diff --git a/test/CXX/special/class.copy/p15-0x.cpp b/test/CXX/special/class.copy/p15-0x.cpp index fff8844..9d03a55 100644 --- a/test/CXX/special/class.copy/p15-0x.cpp +++ b/test/CXX/special/class.copy/p15-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify %s +// expected-no-diagnostics namespace PR10622 { struct foo { diff --git a/test/CXX/special/class.copy/p8-cxx11.cpp b/test/CXX/special/class.copy/p8-cxx11.cpp index a2613f4..4a9f3f2 100644 --- a/test/CXX/special/class.copy/p8-cxx11.cpp +++ b/test/CXX/special/class.copy/p8-cxx11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -verify +// expected-no-diagnostics // C++98 [class.copy]p5 / C++11 [class.copy]p8. diff --git a/test/CXX/special/class.ctor/p1.cpp b/test/CXX/special/class.ctor/p1.cpp index 9500a7d..4d82184 100644 --- a/test/CXX/special/class.ctor/p1.cpp +++ b/test/CXX/special/class.ctor/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct X0 { struct type { }; diff --git a/test/CXX/special/class.dtor/p2.cpp b/test/CXX/special/class.dtor/p2.cpp index b05c992..4a10eb9 100644 --- a/test/CXX/special/class.dtor/p2.cpp +++ b/test/CXX/special/class.dtor/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR5548 struct A {~A();}; diff --git a/test/CXX/special/class.dtor/p3-0x.cpp b/test/CXX/special/class.dtor/p3-0x.cpp index 44bf5aa..291353a 100644 --- a/test/CXX/special/class.dtor/p3-0x.cpp +++ b/test/CXX/special/class.dtor/p3-0x.cpp @@ -45,7 +45,7 @@ G::~G() {} struct H { B b; - ~H(); + ~H() throw(int); }; H::~H() throw(int) {} diff --git a/test/CXX/special/class.dtor/p3.cpp b/test/CXX/special/class.dtor/p3.cpp new file mode 100644 index 0000000..6f4d5c7 --- /dev/null +++ b/test/CXX/special/class.dtor/p3.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -verify %s + +// The exception specification of a destructor declaration is matched *before* +// the exception specification adjustment occurs. +namespace DR1492 { + struct A { ~A(); }; // expected-note {{here}} + A::~A() noexcept {} // expected-warning {{previously declared with an implicit exception specification}} + + struct B { ~B() noexcept; }; // expected-note {{here}} + B::~B() {} // expected-warning {{previously declared with an explicit exception specification}} + + template struct C { + T t; + ~C(); // expected-note {{here}} + }; + template C::~C() noexcept {} // expected-error {{does not match previous}} +} diff --git a/test/CXX/stmt.stmt/stmt.iter/stmt.ranged/p1.cpp b/test/CXX/stmt.stmt/stmt.iter/stmt.ranged/p1.cpp index 96bb472..3952afd 100644 --- a/test/CXX/stmt.stmt/stmt.iter/stmt.ranged/p1.cpp +++ b/test/CXX/stmt.stmt/stmt.iter/stmt.ranged/p1.cpp @@ -3,20 +3,24 @@ struct pr12960 { int begin; void foo(int x) { - for (int& it : x) { // expected-error {{use of undeclared identifier 'begin'}} expected-note {{range has type 'int'}} + for (int& it : x) { // expected-error {{invalid range expression of type 'int'; no viable 'begin' function available}} } } }; -namespace std { +struct null_t { + operator int*(); +}; + +namespace X { template - auto begin(T &&t) -> decltype(t.begin()) { return t.begin(); } // expected-note 4{{ignored: substitution failure}} + auto begin(T &&t) -> decltype(t.begin()) { return t.begin(); } // expected-note 2{{ignored: substitution failure}} template auto end(T &&t) -> decltype(t.end()) { return t.end(); } // expected-note {{candidate template ignored: substitution failure [with T = }} template auto begin(T &&t) -> decltype(t.alt_begin()) { return t.alt_begin(); } // expected-note {{selected 'begin' template [with T = }} \ - expected-note 4{{candidate template ignored: substitution failure [with T = }} + expected-note 2{{candidate template ignored: substitution failure [with T = }} template auto end(T &&t) -> decltype(t.alt_end()) { return t.alt_end(); } // expected-note {{candidate template ignored: substitution failure [with T = }} @@ -27,19 +31,28 @@ namespace std { } using namespace inner; -} -struct A { // expected-note 2 {{candidate constructor}} - A(); - int *begin(); // expected-note 3{{selected 'begin' function with iterator type 'int *'}} expected-note {{'begin' declared here}} - int *end(); -}; + struct A { // expected-note 2 {{candidate constructor}} + A(); + int *begin(); // expected-note 3{{selected 'begin' function with iterator type 'int *'}} expected-note {{'begin' declared here}} + int *end(); + }; -struct B { - B(); - int *alt_begin(); - int *alt_end(); -}; + struct B { + B(); + int *alt_begin(); + int *alt_end(); + }; + + struct NoBeginADL { + null_t alt_end(); + }; + struct NoEndADL { + null_t alt_begin(); + }; +} + +using X::A; void f(); void f(int); @@ -49,19 +62,19 @@ void g() { A __begin; for (char *a : A()) { // expected-error {{cannot initialize a variable of type 'char *' with an lvalue of type 'int'}} } - for (char *a : B()) { // expected-error {{cannot initialize a variable of type 'char *' with an lvalue of type 'int'}} + for (char *a : X::B()) { // expected-error {{cannot initialize a variable of type 'char *' with an lvalue of type 'int'}} } // FIXME: Terrible diagnostic here. auto deduction should fail, but does not! for (double a : f) { // expected-error {{cannot use type '' as a range}} } for (auto a : A()) { } - for (auto a : B()) { + for (auto a : X::B()) { } for (auto *a : A()) { // expected-error {{variable 'a' with type 'auto *' has incompatible initializer of type 'int'}} } // : is not a typo for :: here. - for (A NS:A()) { // expected-error {{no viable conversion from 'int' to 'A'}} + for (A NS:A()) { // expected-error {{no viable conversion from 'int' to 'X::A'}} } for (auto not_in_scope : not_in_scope) { // expected-error {{use of undeclared identifier 'not_in_scope'}} } @@ -92,9 +105,6 @@ void g() { for (auto a : VoidBegin()) // expected-error {{cannot use type 'void' as an iterator}} ; - struct null_t { - operator int*(); - }; struct Differ { int *begin(); // expected-note {{selected 'begin' function with iterator type 'int *'}} null_t end(); // expected-note {{selected 'end' function with iterator type 'null_t'}} @@ -110,15 +120,9 @@ void g() { for (register int a : A()) {} // expected-error {{loop variable 'a' may not be declared 'register'}} for (constexpr int a : A()) {} // expected-error {{loop variable 'a' may not be declared 'constexpr'}} - struct NoBeginADL { - null_t alt_end(); - }; - struct NoEndADL { - null_t alt_begin(); - }; - for (auto u : NoBeginADL()) { // expected-error {{no matching function for call to 'begin'}} expected-note {{range has type 'NoBeginADL'}} + for (auto u : X::NoBeginADL()) { // expected-error {{invalid range expression of type 'X::NoBeginADL'; no viable 'begin' function available}} } - for (auto u : NoEndADL()) { // expected-error {{no matching function for call to 'end'}} expected-note {{range has type 'NoEndADL'}} + for (auto u : X::NoEndADL()) { // expected-error {{invalid range expression of type 'X::NoEndADL'; no viable 'end' function available}} } struct NoBegin { @@ -129,14 +133,15 @@ void g() { }; for (auto u : NoBegin()) { // expected-error {{range type 'NoBegin' has 'end' member but no 'begin' member}} } - for (auto u : NoEnd()) { // expected-error {{range type 'NoEnd' has 'begin' member but no 'end' member}} + for (auto u : NoEnd()) { // expected-error {{range type 'NoEnd' has 'begin' member but no 'end' member}} } struct NoIncr { void *begin(); // expected-note {{selected 'begin' function with iterator type 'void *'}} void *end(); }; - for (auto u : NoIncr()) { // expected-error {{arithmetic on a pointer to void}} + for (auto u : NoIncr()) { // expected-error {{arithmetic on a pointer to void}}\ + expected-note {{in implicit call to 'operator++' for iterator of type 'NoIncr'}} } struct NoNotEq { @@ -144,7 +149,19 @@ void g() { NoNotEq end(); void operator++(); }; - for (auto u : NoNotEq()) { // expected-error {{invalid operands to binary expression}} + for (auto u : NoNotEq()) { // expected-error {{invalid operands to binary expression}}\ + expected-note {{in implicit call to 'operator!=' for iterator of type 'NoNotEq'}} + } + + struct NoDeref { + NoDeref begin(); // expected-note {{selected 'begin' function}} + NoDeref end(); + void operator++(); + bool operator!=(NoDeref &); + }; + + for (auto u : NoDeref()) { // expected-error {{indirection requires pointer operand}} \ + expected-note {{in implicit call to 'operator*' for iterator of type 'NoDeref'}} } struct NoCopy { @@ -156,8 +173,7 @@ void g() { for (int n : NoCopy()) { // ok } - for (int n : 42) { // expected-error {{no matching function for call to 'begin'}} \ - expected-note {{range has type 'int'}} + for (int n : 42) { // expected-error {{invalid range expression of type 'int'; no viable 'begin' function available}} } for (auto a : *also_incomplete) { // expected-error {{cannot use incomplete type 'struct Incomplete' as a range}} @@ -166,7 +182,7 @@ void g() { template void h(T t) { - for (U u : t) { // expected-error {{no viable conversion from 'A' to 'int'}} + for (U u : t) { // expected-error {{no viable conversion from 'X::A' to 'int'}} } for (auto u : t) { } @@ -179,14 +195,24 @@ template void h(A(&)[13]); // expected-note {{requested here}} template void i(T t) { - for (auto u : t) { // expected-error {{no matching function for call to 'begin'}} \ + for (auto u : t) { // expected-error {{invalid range expression of type 'X::A *'; no viable 'begin' function available}} \ expected-error {{member function 'begin' not viable}} \ - expected-note {{range has type}} + expected-note {{when looking up 'begin' function}} + } } template void i(A*); // expected-note {{requested here}} template void i(const A); // expected-note {{requested here}} +struct StdBeginEnd {}; +namespace std { + int *begin(StdBeginEnd); + int *end(StdBeginEnd); +} +void DR1442() { + for (auto a : StdBeginEnd()) {} // expected-error {{invalid range expression of type 'StdBeginEnd'; no viable 'begin'}} +} + namespace NS { class ADL {}; int *begin(ADL); // expected-note {{no known conversion from 'NS::NoADL' to 'NS::ADL'}} @@ -204,9 +230,10 @@ void end(VoidBeginADL); void j() { for (auto u : NS::ADL()) { } - for (auto u : NS::NoADL()) { // expected-error {{no matching function for call to 'begin'}} expected-note {{range has type}} + for (auto u : NS::NoADL()) { // expected-error {{invalid range expression of type 'NS::NoADL'; no viable 'begin' function available}} } for (auto a : VoidBeginADL()) { // expected-error {{cannot use type 'void' as an iterator}} + } } @@ -215,4 +242,3 @@ void example() { for (int &x : array) x *= 2; } - diff --git a/test/CXX/stmt.stmt/stmt.select/stmt.switch/p2-0x.cpp b/test/CXX/stmt.stmt/stmt.select/stmt.switch/p2-0x.cpp index 000c870..d0f15d4 100644 --- a/test/CXX/stmt.stmt/stmt.select/stmt.switch/p2-0x.cpp +++ b/test/CXX/stmt.stmt/stmt.select/stmt.switch/p2-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 %s -verify +// expected-no-diagnostics struct Value { constexpr Value(int n) : n(n) {} diff --git a/test/CXX/temp/p3.cpp b/test/CXX/temp/p3.cpp index c146bc4..11f72de 100644 --- a/test/CXX/temp/p3.cpp +++ b/test/CXX/temp/p3.cpp @@ -8,7 +8,7 @@ template int S::a, S::b; // expected-error {{can only declare template struct A { static A a; } A::a; // expected-error {{expected ';' after struct}} \ expected-error {{use of undeclared identifier 'T'}} \ - expected-warning{{extra qualification}} + expected-error{{extra qualification}} template struct B { } f(); // expected-error {{expected ';' after struct}} \ expected-error {{requires a type specifier}} diff --git a/test/CXX/temp/temp.arg/temp.arg.type/p2-cxx0x.cpp b/test/CXX/temp/temp.arg/temp.arg.type/p2-cxx0x.cpp index b03ed46..67f317b 100644 --- a/test/CXX/temp/temp.arg/temp.arg.type/p2-cxx0x.cpp +++ b/test/CXX/temp/temp.arg/temp.arg.type/p2-cxx0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify %s +// expected-no-diagnostics // C++03 imposed restrictions in this paragraph that were lifted with 0x, so we // just test that the example given now parses cleanly. diff --git a/test/CXX/temp/temp.decls/temp.alias/p1.cpp b/test/CXX/temp/temp.decls/temp.alias/p1.cpp index 966e3c1..aafe480 100644 --- a/test/CXX/temp/temp.decls/temp.alias/p1.cpp +++ b/test/CXX/temp/temp.decls/temp.alias/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template using U = T; diff --git a/test/CXX/temp/temp.decls/temp.class.spec/p9.cpp b/test/CXX/temp/temp.decls/temp.class.spec/p9.cpp index 2a3e914..df0e68d 100644 --- a/test/CXX/temp/temp.decls/temp.class.spec/p9.cpp +++ b/test/CXX/temp/temp.decls/temp.class.spec/p9.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR8905 template diff --git a/test/CXX/temp/temp.decls/temp.class.spec/temp.class.order/p2.cpp b/test/CXX/temp/temp.decls/temp.class.spec/temp.class.order/p2.cpp index 97457ea..64cc592 100644 --- a/test/CXX/temp/temp.decls/temp.class.spec/temp.class.order/p2.cpp +++ b/test/CXX/temp/temp.decls/temp.class.spec/temp.class.order/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct X { static const int value = 0; }; diff --git a/test/CXX/temp/temp.decls/temp.class.spec/temp.class.spec.mfunc/p1.cpp b/test/CXX/temp/temp.decls/temp.class.spec/temp.class.spec.mfunc/p1.cpp index 87e21e4..184160a 100644 --- a/test/CXX/temp/temp.decls/temp.class.spec/temp.class.spec.mfunc/p1.cpp +++ b/test/CXX/temp/temp.decls/temp.class.spec/temp.class.spec.mfunc/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct A; diff --git a/test/CXX/temp/temp.decls/temp.class/temp.mem.func/p1-retmem.cpp b/test/CXX/temp/temp.decls/temp.class/temp.mem.func/p1-retmem.cpp index 4c05c62..213f0c6 100644 --- a/test/CXX/temp/temp.decls/temp.class/temp.mem.func/p1-retmem.cpp +++ b/test/CXX/temp/temp.decls/temp.class/temp.mem.func/p1-retmem.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct X1 { }; diff --git a/test/CXX/temp/temp.decls/temp.class/temp.mem.func/pr5056.cpp b/test/CXX/temp/temp.decls/temp.class/temp.mem.func/pr5056.cpp index 70c9c70..fcbb724 100644 --- a/test/CXX/temp/temp.decls/temp.class/temp.mem.func/pr5056.cpp +++ b/test/CXX/temp/temp.decls/temp.class/temp.mem.func/pr5056.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics extern "C" void * malloc(int); diff --git a/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3-0x.cpp b/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3-0x.cpp index 63909fb..6d22f88 100644 --- a/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3-0x.cpp +++ b/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics // Core DR 532. namespace PR8130 { @@ -15,3 +16,30 @@ namespace PR8130 { int &ir = b * a; } } + +namespace OperatorWithRefQualifier { + struct A { }; + template struct B { + template int &operator*(R&) &&; + }; + + template float &operator*(T&&, R&); + void test() { + A a; + B b; + float &ir = b * a; + int &ir2 = B() * a; + } +} + +namespace OrderWithStaticMember { + struct A { + template int g(T**, int=0) { return 0; } + template static int g(T*) { return 1; } + }; + void f() { + A a; + int **p; + a.g(p); + } +} diff --git a/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3.cpp b/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3.cpp index 2ffdd95..8212a12 100644 --- a/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3.cpp +++ b/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace DeduceVsMember { template @@ -14,3 +15,15 @@ namespace DeduceVsMember { float& ir = (xi == xf); } } + +namespace OrderWithStaticMember { + struct A { + template int g(T**, int=0) { return 0; } + template static int g(T*) { return 1; } + }; + void f() { + A a; + int **p; + a.g(p); + } +} diff --git a/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p5.cpp b/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p5.cpp index 4d34968..5f2dbb6 100644 --- a/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p5.cpp +++ b/test/CXX/temp/temp.decls/temp.fct/temp.func.order/p5.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template int &f(T); template float &f(T*, int=1); diff --git a/test/CXX/temp/temp.decls/temp.fct/temp.over.link/p4.cpp b/test/CXX/temp/temp.decls/temp.fct/temp.over.link/p4.cpp index f42b94a..d24a3fb 100644 --- a/test/CXX/temp/temp.decls/temp.fct/temp.over.link/p4.cpp +++ b/test/CXX/temp/temp.decls/temp.fct/temp.over.link/p4.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // All of these function templates are distinct. template void f0(T) { } diff --git a/test/CXX/temp/temp.decls/temp.friend/p5.cpp b/test/CXX/temp/temp.decls/temp.friend/p5.cpp index 63fd3df..4b899e4 100644 --- a/test/CXX/temp/temp.decls/temp.friend/p5.cpp +++ b/test/CXX/temp/temp.decls/temp.friend/p5.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace test0 { template class A { diff --git a/test/CXX/temp/temp.decls/temp.mem/p1.cpp b/test/CXX/temp/temp.decls/temp.mem/p1.cpp index f5f1205..01eab24 100644 --- a/test/CXX/temp/temp.decls/temp.mem/p1.cpp +++ b/test/CXX/temp/temp.decls/temp.mem/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct A { static T cond; diff --git a/test/CXX/temp/temp.decls/temp.variadic/deduction.cpp b/test/CXX/temp/temp.decls/temp.variadic/deduction.cpp index fec8060..2e24fc0 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/deduction.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/deduction.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics namespace DeductionForInstantiation { template diff --git a/test/CXX/temp/temp.decls/temp.variadic/example-bind.cpp b/test/CXX/temp/temp.decls/temp.variadic/example-bind.cpp index db28eea..83e03bc 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/example-bind.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/example-bind.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Example bind implementation from the variadic templates proposal, // ISO C++ committee document number N2080. diff --git a/test/CXX/temp/temp.decls/temp.variadic/example-function.cpp b/test/CXX/temp/temp.decls/temp.variadic/example-function.cpp index e15203a..4cbacf8 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/example-function.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/example-function.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Example function implementation from the variadic templates proposal, // ISO C++ committee document number N2080. diff --git a/test/CXX/temp/temp.decls/temp.variadic/example-tuple.cpp b/test/CXX/temp/temp.decls/temp.variadic/example-tuple.cpp index 9de5fa8..f580047 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/example-tuple.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/example-tuple.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Example tuple implementation from the variadic templates proposal, // ISO C++ committee document number N2080. diff --git a/test/CXX/temp/temp.decls/temp.variadic/injected-class-name.cpp b/test/CXX/temp/temp.decls/temp.variadic/injected-class-name.cpp index b5786ac..c09c0b2 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/injected-class-name.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/injected-class-name.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Check for declaration matching with out-of-line declarations and // variadic templates, which involves proper computation of the diff --git a/test/CXX/temp/temp.decls/temp.variadic/multi-level-substitution.cpp b/test/CXX/temp/temp.decls/temp.variadic/multi-level-substitution.cpp index 485068e..e0ffef5 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/multi-level-substitution.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/multi-level-substitution.cpp @@ -187,6 +187,30 @@ namespace PacksAtDifferentLevels { add_pointer, add_const>>::value == 0? 1 : -1]; + namespace PR13811 { + constexpr int g(int n, int m) { return n * 10 + m; } + + template + struct X6 { + template + constexpr auto f1(A ...a) -> decltype(g(A(a + B())...)) { return g(A(a + B())...); } + + template + constexpr auto f2(A ...a, B ...b) -> decltype(g((&a)[b] ...)) { return g((&a)[b] ...); } // expected-note {{past-the-end}} + + template struct Inner { + template + constexpr auto f(A ...a, B ...b, C ...c) -> decltype(g(a+b+c...)) { return g(a+b+c...); } + }; + }; + struct A { constexpr operator int() { return 2; } }; + struct B { constexpr operator int() { return 1; } }; + + static_assert(X6().f1(255, 1) == 12, ""); + static_assert(X6().f2(3, 4, 0, 0) == 34, ""); + static_assert(X6().f2(3, 4, 0, 1) == 34, ""); // expected-error {{constant expression}} expected-note {{in call}} + static_assert(X6::Inner().f(1, 2, 3, 4, 5, 6) == 102, ""); + } } namespace ExpandingNonTypeTemplateParameters { diff --git a/test/CXX/temp/temp.decls/temp.variadic/partial-ordering.cpp b/test/CXX/temp/temp.decls/temp.variadic/partial-ordering.cpp index 71bd6aa..36535e3 100644 --- a/test/CXX/temp/temp.decls/temp.variadic/partial-ordering.cpp +++ b/test/CXX/temp/temp.decls/temp.variadic/partial-ordering.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Various tests related to partial ordering of variadic templates. template struct tuple; diff --git a/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p3-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p3-0x.cpp index 4d29b74..36b0700 100644 --- a/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p3-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p3-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics namespace ParameterPacksWithFunctions { template struct count; diff --git a/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p9-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p9-0x.cpp index 81addfe..a9bda62 100644 --- a/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p9-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.arg.explicit/p9-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Metafunction to extract the Nth type from a set of types. template struct get_nth_type; diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/cwg1170.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/cwg1170.cpp index c14b063..47184ec 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/cwg1170.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/cwg1170.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics #if !__has_feature(cxx_access_control_sfinae) # error No support for access control as part of SFINAE? diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/sfinae-1.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/sfinae-1.cpp index 6481485..1907bd7 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/sfinae-1.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/sfinae-1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify %s +// expected-no-diagnostics typedef char one_byte; struct two_bytes { char data[2]; }; diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p2.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p2.cpp index c165c45..4be81d8 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p2.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct A { }; // bullet 1 diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p4.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p4.cpp index 83b5f23..132d618 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p4.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.call/p4.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace PR8598 { template struct identity { typedef T type; }; diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p2.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p2.cpp index 5a9ea08..badd5a8 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p2.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // FIXME: [temp.deduct.conv]p2 bullets 1 and 2 can't actually happen without // references? diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p3.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p3.cpp index e23e98a..a5916ba 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p3.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.conv/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct AnyPtr { template operator T*() const; diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p12.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p12.cpp index b965300..ec7e897 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p12.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p12.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Note: Partial ordering of function templates containing template // parameter packs is independent of the number of deduced arguments diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p9-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p9-0x.cpp index f204caf..cc129c0 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p9-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.partial/p9-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template int &f0(T&); template float &f0(T&&); diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p10-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p10-0x.cpp index 8183061..b38ade3 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p10-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p10-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template void f(T&&); template<> void f(int&) { } void (*fp)(int&) = &f; diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p2-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p2-0x.cpp index 5b031c2..e3a9f57 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p2-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p2-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // If type deduction cannot be done for any P/A pair, or if for any // pair the deduction leads to more than one possible set of deduced diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p21.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p21.cpp index 4e98a6d..20e6ea2 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p21.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p21.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Note: Template argument deduction involving parameter packs // (14.5.3) can deduce zero or more arguments for each parameter pack. diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p22.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p22.cpp index fcc6cf7..09b1648 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p22.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p22.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // If the original function parameter associated with A is a function // parameter pack and the function parameter associated with P is not diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p5-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p5-0x.cpp index c819d97..d239a5e 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p5-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p5-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // FIXME: More bullets to go! diff --git a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p8-0x.cpp b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p8-0x.cpp index a6b1172..6ef8e2f 100644 --- a/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p8-0x.cpp +++ b/test/CXX/temp/temp.fct.spec/temp.deduct/temp.deduct.type/p8-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics // Deductions specific to C++0x. diff --git a/test/CXX/temp/temp.names/p2.cpp b/test/CXX/temp/temp.names/p2.cpp index 93e45dd..532dd84 100644 --- a/test/CXX/temp/temp.names/p2.cpp +++ b/test/CXX/temp/temp.names/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // Ensure that when enforcing access control an unqualified template name with // explicit template arguments, we don't lose the context of the name lookup diff --git a/test/CXX/temp/temp.names/p4.cpp b/test/CXX/temp/temp.names/p4.cpp index 103a1bd..64ca805 100644 --- a/test/CXX/temp/temp.names/p4.cpp +++ b/test/CXX/temp/temp.names/p4.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct meta { template diff --git a/test/CXX/temp/temp.param/p10-0x.cpp b/test/CXX/temp/temp.param/p10-0x.cpp index 37bb284..21a96bf 100644 --- a/test/CXX/temp/temp.param/p10-0x.cpp +++ b/test/CXX/temp/temp.param/p10-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics template struct Y1; template struct Y2; diff --git a/test/CXX/temp/temp.param/p10.cpp b/test/CXX/temp/temp.param/p10.cpp index b9dac75..4feea82 100644 --- a/test/CXX/temp/temp.param/p10.cpp +++ b/test/CXX/temp/temp.param/p10.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct Y1; template struct Y2; diff --git a/test/CXX/temp/temp.param/p13.cpp b/test/CXX/temp/temp.param/p13.cpp index 7e7dbe5..257b36f 100644 --- a/test/CXX/temp/temp.param/p13.cpp +++ b/test/CXX/temp/temp.param/p13.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // The scope of atemplate-parameterextends from its point of // declaration until the end of its template. In particular, a diff --git a/test/CXX/temp/temp.param/p15-cxx0x.cpp b/test/CXX/temp/temp.param/p15-cxx0x.cpp index 5fc57a4..59618d2 100644 --- a/test/CXX/temp/temp.param/p15-cxx0x.cpp +++ b/test/CXX/temp/temp.param/p15-cxx0x.cpp @@ -22,3 +22,157 @@ void f(const X x) { template struct X1 { }; X1> x1a; + + +namespace ParameterPackExpansions { + +// A template parameter pack that [contains an unexpanded parameter pack] is a +// pack expansion. + +template struct Outer { + // From [temp.variadic]p4: + // In a template parameter pack that is a pack expansion, the pattern is + // [...the template-parameter...] without the ellipsis. + // Therefore the resulting sequence of parameters is not a parameter pack, + // so is not required to be the last template parameter. + template class ...Bs, typename ...Cs> struct Inner { + struct Check : Bs... { + Check(Cs...); + }; + }; +}; + +template struct TemplateInt {}; +template struct TemplateChar {}; +template struct TemplateIntPtr {}; +int x; + +Outer:: +Inner<12345, 'x', &x, + TemplateInt, TemplateChar, TemplateIntPtr, + int*>:: +Check check(&x); + + +template struct types; + +enum place { _ }; +template struct places {}; + +template struct append_places; +template +struct append_places, places> { + typedef places type; +}; + +template +struct make_places : append_places::type, + typename make_places::type> {}; +template<> struct make_places<0> { typedef places<> type; }; +template<> struct make_places<1> { typedef places<_> type; }; + +template struct wrap { + template struct inner { typedef T type; }; +}; + +template struct takedrop_impl; +template struct takedrop_impl> { + template class ...Take, + template class ...Drop> + struct inner { // expected-note 2{{declared}} + typedef types::type...> take; + typedef types::type...> drop; + }; +}; + +template struct take { + using type = typename takedrop_impl::type>:: + template inner::template inner...>::take; // expected-error {{too few template arguments}} +}; +template struct drop { + using type = typename takedrop_impl::type>:: + template inner::template inner...>::drop; // expected-error {{too few template arguments}} +}; + +using T1 = take<3, int, char, double, long>::type; // expected-note {{previous}} +// FIXME: Desguar the types on the RHS in this diagnostic. +// desired-error {{'types' vs 'types'}} +using T1 = types; // expected-error {{'types' vs 'types::type, typename inner<_>::type, typename inner<_>::type, (no argument)>'}} +using D1 = drop<3, int, char, double, long>::type; +using D1 = types; + +using T2 = take<4, int, char, double, long>::type; // expected-note {{previous}} +using T2 = types; +// FIXME: Desguar the types on the RHS in this diagnostic. +// desired-error {{'types' vs 'types'}} +using T2 = types; // expected-error {{'types' vs 'types::type, typename inner<_>::type, typename inner<_>::type, typename inner<_>::type>'}} +using D2 = drop<4, int, char, double, long>::type; +using D2 = types<>; + +using T3 = take<5, int, char, double, long>::type; // expected-note {{in instantiation of}} +using D3 = drop<5, int, char, double, long>::type; // expected-note {{in instantiation of}} + + +// FIXME: We should accept this code. A parameter pack within a default argument +// in a template template parameter pack is expanded, because the pack is +// implicitly a pack expansion. +template struct DefArg { + template class ...Classes> struct Inner { // expected-error {{default argument contains unexpanded parameter pack}} expected-note {{here}} + Inner(Classes<>...); // expected-error {{too few}} + }; +}; +template struct vector {}; +template struct list {}; +vector vi; +list lc; +DefArg::Inner defarg(vi, lc); + + +// FIXME: +// A template parameter pack that is a pack expansion shall not expand a +// parameter pack declared in the same template-parameter-list. +template void error(); // desired-error + +// This case should not produce an error, because in A's instantiation, Cs is +// not a parameter pack. +template void consume(Ts...); +template struct A { + template class ...Cs, Cs ...Vs> struct B { // ok + B() { + consume([]{ + int arr[Vs]; // expected-error {{negative size}} + }...); + } + }; +}; +template using Int = int; +template using Char = char; +A::B b; // expected-note {{here}} + +} + +namespace PR9023 { + template struct A { + template class ...> struct B { + }; + }; + + template struct C { }; + template struct D { }; + + int main() { + A::B e; + } +} + +namespace std_examples { + template class Tuple; + template struct multi_array; + template struct value_holder { + template struct apply { }; + }; + template struct static_array; // expected-error {{must be the last}} + + int n; + value_holder::apply<12345, 'x', &n> test; +} diff --git a/test/CXX/temp/temp.param/p2.cpp b/test/CXX/temp/temp.param/p2.cpp index fed6e9c..4eca057 100644 --- a/test/CXX/temp/temp.param/p2.cpp +++ b/test/CXX/temp/temp.param/p2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // There is no semantic difference between class and typename in a // template-parameter. typename followed by an unqualified-id names a diff --git a/test/CXX/temp/temp.param/p5.cpp b/test/CXX/temp/temp.param/p5.cpp index 3cbb3b7..67efc4e 100644 --- a/test/CXX/temp/temp.param/p5.cpp +++ b/test/CXX/temp/temp.param/p5.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify %s -std=c++11 +// expected-no-diagnostics template struct S { decltype(I) n; diff --git a/test/CXX/temp/temp.param/p8.cpp b/test/CXX/temp/temp.param/p8.cpp index fed048c..592e41e 100644 --- a/test/CXX/temp/temp.param/p8.cpp +++ b/test/CXX/temp/temp.param/p8.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct A; template struct A; template struct B; diff --git a/test/CXX/temp/temp.res/temp.dep/p3.cpp b/test/CXX/temp/temp.res/temp.dep/p3.cpp index c41a4c6..88b4752 100644 --- a/test/CXX/temp/temp.res/temp.dep/p3.cpp +++ b/test/CXX/temp/temp.res/temp.dep/p3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct A0 { struct K { }; }; diff --git a/test/CXX/temp/temp.res/temp.dep/temp.dep.constexpr/p2-0x.cpp b/test/CXX/temp/temp.res/temp.dep/temp.dep.constexpr/p2-0x.cpp index 0aba402..8f2a599 100644 --- a/test/CXX/temp/temp.res/temp.dep/temp.dep.constexpr/p2-0x.cpp +++ b/test/CXX/temp/temp.res/temp.dep/temp.dep.constexpr/p2-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -verify %s +// expected-no-diagnostics template struct S; diff --git a/test/CXX/temp/temp.res/temp.local/p1.cpp b/test/CXX/temp/temp.res/temp.local/p1.cpp index 1ad4464..f6ef636 100644 --- a/test/CXX/temp/temp.res/temp.local/p1.cpp +++ b/test/CXX/temp/temp.res/temp.local/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // C++0x [temp.local]p1: // Like normal (non-template) classes, class templates have an diff --git a/test/CXX/temp/temp.res/temp.local/p7.cpp b/test/CXX/temp/temp.res/temp.local/p7.cpp index bd05e75..3fa9c99 100644 --- a/test/CXX/temp/temp.res/temp.local/p7.cpp +++ b/test/CXX/temp/temp.res/temp.local/p7.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template struct A { int B; diff --git a/test/CXX/temp/temp.res/temp.local/p8.cpp b/test/CXX/temp/temp.res/temp.local/p8.cpp index 5d9d509..fecfed0 100644 --- a/test/CXX/temp/temp.res/temp.local/p8.cpp +++ b/test/CXX/temp/temp.res/temp.local/p8.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace N { enum { C }; diff --git a/test/CXX/temp/temp.spec/temp.expl.spec/p1.cpp b/test/CXX/temp/temp.spec/temp.expl.spec/p1.cpp index 3843c0d..263356e 100644 --- a/test/CXX/temp/temp.spec/temp.expl.spec/p1.cpp +++ b/test/CXX/temp/temp.spec/temp.expl.spec/p1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // This test creates cases where implicit instantiations of various entities // would cause a diagnostic, but provides expliict specializations for those diff --git a/test/CXX/temp/temp.spec/temp.expl.spec/p11.cpp b/test/CXX/temp/temp.spec/temp.expl.spec/p11.cpp index 5fa2f62..f03811f 100644 --- a/test/CXX/temp/temp.spec/temp.expl.spec/p11.cpp +++ b/test/CXX/temp/temp.spec/temp.expl.spec/p11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template class Array { /* ... */ }; template void sort(Array& v); diff --git a/test/CXX/temp/temp.spec/temp.expl.spec/p9.cpp b/test/CXX/temp/temp.spec/temp.expl.spec/p9.cpp index d4ce01f..10ec66d 100644 --- a/test/CXX/temp/temp.spec/temp.expl.spec/p9.cpp +++ b/test/CXX/temp/temp.spec/temp.expl.spec/p9.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace N { template class X { /* ... */ }; diff --git a/test/CXX/temp/temp.spec/temp.explicit/p11.cpp b/test/CXX/temp/temp.spec/temp.explicit/p11.cpp index 4ca5428..5363cbe 100644 --- a/test/CXX/temp/temp.spec/temp.explicit/p11.cpp +++ b/test/CXX/temp/temp.spec/temp.explicit/p11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics class X { template class Y {}; diff --git a/test/CXX/temp/temp.spec/temp.explicit/p3-0x.cpp b/test/CXX/temp/temp.spec/temp.explicit/p3-0x.cpp index 1028830..146b6b5 100644 --- a/test/CXX/temp/temp.spec/temp.explicit/p3-0x.cpp +++ b/test/CXX/temp/temp.spec/temp.explicit/p3-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -verify %s +// expected-no-diagnostics // If the name declared in the explicit instantiation is an // unqualified name, the explicit instantiation shall appear in the diff --git a/test/CXX/temp/temp.spec/temp.explicit/p6.cpp b/test/CXX/temp/temp.spec/temp.explicit/p6.cpp index 1382272..0f5db21 100644 --- a/test/CXX/temp/temp.spec/temp.explicit/p6.cpp +++ b/test/CXX/temp/temp.spec/temp.explicit/p6.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics template class Array { /* ... */ }; template void sort(Array& v) { } diff --git a/test/CodeCompletion/Inputs/macros.h b/test/CodeCompletion/Inputs/macros.h index 98b5ac6..5f15dfc 100644 --- a/test/CodeCompletion/Inputs/macros.h +++ b/test/CodeCompletion/Inputs/macros.h @@ -2,3 +2,10 @@ #define BAR(X, Y) X, Y #define IDENTITY(X) X #define WIBBLE(...) +#define DEAD_MACRO +#undef DEAD_MACRO +#define MACRO_WITH_HISTORY a +#undef MACRO_WITH_HISTORY +#define MACRO_WITH_HISTORY b, c +#undef MACRO_WITH_HISTORY +#define MACRO_WITH_HISTORY(X, Y) X->Y diff --git a/test/CodeCompletion/macros.c b/test/CodeCompletion/macros.c index 0758bbf..28f69b2 100644 --- a/test/CodeCompletion/macros.c +++ b/test/CodeCompletion/macros.c @@ -13,11 +13,15 @@ void test(struct Point *p) { // RUN: %clang_cc1 -include %S/Inputs/macros.h -fsyntax-only -code-completion-macros -code-completion-at=%s:14:9 %s -o - | FileCheck -check-prefix=CC2 %s case } + // RUN: %clang_cc1 -include %S/Inputs/macros.h -fsyntax-only -code-completion-macros -code-completion-at=%s:17:7 %s -o - | FileCheck -check-prefix=CC3 %s +#ifdef Q +#endif // Run the same tests, this time with macros loaded from the PCH file. // RUN: %clang_cc1 -emit-pch -o %t %S/Inputs/macros.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -code-completion-macros -code-completion-at=%s:12:14 %s -o - | FileCheck -check-prefix=CC1 %s // RUN: %clang_cc1 -include-pch %t -fsyntax-only -code-completion-macros -code-completion-at=%s:14:9 %s -o - | FileCheck -check-prefix=CC2 %s + // RUN: %clang_cc1 -include-pch %t -fsyntax-only -code-completion-macros -code-completion-at=%s:17:7 %s -o - | FileCheck -check-prefix=CC3 %s // CC1: color // CC1: x @@ -29,6 +33,14 @@ void test(struct Point *p) { // CC2: FOO // CC2: Green // CC2: IDENTITY(<#X#>) + // CC2: MACRO_WITH_HISTORY(<#X#>, <#Y#>) // CC2: Red // CC2: WIBBLE + + // CC3: BAR + // CC3: DEAD_MACRO + // CC3: FOO + // CC3: IDENTITY + // CC3: MACRO_WITH_HISTORY + // CC3: WIBBLE } diff --git a/test/CodeCompletion/preamble.c b/test/CodeCompletion/preamble.c index f322e09..90ed565 100644 --- a/test/CodeCompletion/preamble.c +++ b/test/CodeCompletion/preamble.c @@ -4,4 +4,4 @@ void foo() { x. // RUN: env CINDEXTEST_EDITING=1 c-index-test -code-completion-at=%s:4:5 -Xclang -code-completion-patterns %s | FileCheck -check-prefix=CHECK-CC1 %s -// CHECK-CC1: FieldDecl:{ResultType int}{TypedText m} (35) (parent: StructDecl 'X') +// CHECK-CC1: FieldDecl:{ResultType int}{TypedText m} (35) diff --git a/test/CodeGen/2004-03-16-AsmRegisterCrash.c b/test/CodeGen/2004-03-16-AsmRegisterCrash.c index 515d243..492e248 100644 --- a/test/CodeGen/2004-03-16-AsmRegisterCrash.c +++ b/test/CodeGen/2004-03-16-AsmRegisterCrash.c @@ -1,6 +1,5 @@ -// RUN: %clang_cc1 -emit-llvm %s -o /dev/null -// XFAIL: * -// XTARGET: arm, i386, i686, x86_64 +// RUN: %clang_cc1 -triple armv7-unknown-unknown %s -o /dev/null +// RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -o /dev/null int foo() { #ifdef __arm__ diff --git a/test/CodeGen/2008-01-25-ByValReadNone.c b/test/CodeGen/2008-01-25-ByValReadNone.c index d977139..ca21f6c 100644 --- a/test/CodeGen/2008-01-25-ByValReadNone.c +++ b/test/CodeGen/2008-01-25-ByValReadNone.c @@ -1,7 +1,9 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | not grep readonly -// RUN: %clang_cc1 -emit-llvm -o - %s | not grep readnone +// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s -// XFAIL: arm,mips +// XFAIL: mips + +// CHECK-NOT: readonly +// CHECK-NOT: readnone // The struct being passed byval means that we cannot mark the // function readnone. Readnone would allow stores to the arg to diff --git a/test/CodeGen/2008-01-25-ZeroSizedAggregate.c b/test/CodeGen/2008-01-25-ZeroSizedAggregate.c index d905985..3ffcc7b 100644 --- a/test/CodeGen/2008-01-25-ZeroSizedAggregate.c +++ b/test/CodeGen/2008-01-25-ZeroSizedAggregate.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm -o - +// REQUIRES: LP64 // Aggregates of size zero should be dropped from argument list. typedef long int Tlong; diff --git a/test/CodeGen/2008-12-23-AsmIntPointerTie.c b/test/CodeGen/2008-12-23-AsmIntPointerTie.c index df646b7..04b285e 100644 --- a/test/CodeGen/2008-12-23-AsmIntPointerTie.c +++ b/test/CodeGen/2008-12-23-AsmIntPointerTie.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm -O1 -o - +// REQUIRES: LP64 typedef long intptr_t; int test(void *b) { diff --git a/test/CodeGen/2009-06-01-addrofknr.c b/test/CodeGen/2009-06-01-addrofknr.c index 17d6fdf..f987e32 100644 --- a/test/CodeGen/2009-06-01-addrofknr.c +++ b/test/CodeGen/2009-06-01-addrofknr.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -o %t -emit-llvm -verify +// expected-no-diagnostics // PR4289 struct funcptr { diff --git a/test/CodeGen/2010-06-17-asmcrash.c b/test/CodeGen/2010-06-17-asmcrash.c index 8e9485b..1b5efd3 100644 --- a/test/CodeGen/2010-06-17-asmcrash.c +++ b/test/CodeGen/2010-06-17-asmcrash.c @@ -1,6 +1,5 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | llc -mtriple=x86_64-apple-darwin | FileCheck %s -// XFAIL: * -// XTARGET: x86,i386,i686 +// REQUIRES: x86-64-registered-target +// RUN: %clang_cc1 -triple x86_64-unknown-unknown -O1 -S -o - %s | FileCheck %s typedef long long int64_t; typedef unsigned char uint8_t; diff --git a/test/CodeGen/PR3589-freestanding-libcalls.c b/test/CodeGen/PR3589-freestanding-libcalls.c index 8b8282f..40e5fb1 100644 --- a/test/CodeGen/PR3589-freestanding-libcalls.c +++ b/test/CodeGen/PR3589-freestanding-libcalls.c @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -emit-llvm %s -o - | grep 'declare i32 @printf' | count 1 -// RUN: %clang_cc1 -O2 -emit-llvm %s -o - | grep 'declare i32 @puts' | count 1 -// RUN: %clang_cc1 -ffreestanding -O2 -emit-llvm %s -o - | grep 'declare i32 @puts' | count 0 +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm %s -o - | grep 'declare i32 @printf' | count 1 +// RUN: %clang_cc1 -triple i386-unknown-unknown -O2 -emit-llvm %s -o - | grep 'declare i32 @puts' | count 1 +// RUN: %clang_cc1 -triple i386-unknown-unknown -ffreestanding -O2 -emit-llvm %s -o - | grep 'declare i32 @puts' | count 0 int printf(const char *, ...); diff --git a/test/CodeGen/a15.c b/test/CodeGen/a15.c new file mode 100644 index 0000000..e4986d8 --- /dev/null +++ b/test/CodeGen/a15.c @@ -0,0 +1,5 @@ +// RUN: %clang -target armv7-none-linux-gnueabi -mcpu=cortex-a15 -emit-llvm -S %s -o /dev/null + +int main() { + return 0; +} diff --git a/test/CodeGen/address-safety-attr.cpp b/test/CodeGen/address-safety-attr.cpp index da68b1d..5c9862d 100644 --- a/test/CodeGen/address-safety-attr.cpp +++ b/test/CodeGen/address-safety-attr.cpp @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -emit-llvm -o - %s -faddress-sanitizer | FileCheck -check-prefix ASAN %s +// RUN: %clang_cc1 -emit-llvm -o - %s -fsanitize=address | FileCheck -check-prefix ASAN %s // The address_safety attribute should be attached to functions // when AddressSanitizer is enabled, unless no_address_safety_analysis attribute diff --git a/test/CodeGen/alias.c b/test/CodeGen/alias.c index f2e87a5..0ccbca6 100644 --- a/test/CodeGen/alias.c +++ b/test/CodeGen/alias.c @@ -1,32 +1,46 @@ -// RUN: %clang_cc1 -triple i386-pc-linux-gnu -emit-llvm -o %t %s -// RUN: grep '@g0 = common global i32 0' %t -// RUN: grep '@f1 = alias void ()\* @f0' %t -// RUN: grep '@g1 = alias i32\* @g0' %t -// RUN: grep 'define void @f0() nounwind {' %t - -void f0(void) { } -extern void f1(void); -extern void f1(void) __attribute((alias("f0"))); +// RUN: %clang_cc1 -triple i386-pc-linux-gnu -emit-llvm -o - %s | FileCheck -check-prefix=CHECKBASIC %s +// RUN: %clang_cc1 -triple armv7a-eabi -mfloat-abi hard -emit-llvm -o - %s | FileCheck -check-prefix=CHECKCC %s int g0; +// CHECKBASIC: @g0 = common global i32 0 +static int bar1 = 42; +// CHECKBASIC: @bar1 = internal global i32 42 + extern int g1; extern int g1 __attribute((alias("g0"))); +// CHECKBASIC: @g1 = alias i32* @g0 + +void f0(void) { } +extern void f1(void); +extern void f1(void) __attribute((alias("f0"))); +// CHECKBASIC: @f1 = alias void ()* @f0 +// CHECKBASIC: define void @f0() nounwind { // Make sure that aliases cause referenced values to be emitted. // PR3200 -// RUN: grep 'define internal i32 @foo1()' %t static inline int foo1() { return 0; } +// CHECKBASIC: define internal i32 @foo1() int foo() __attribute__((alias("foo1"))); - - -// RUN: grep '@bar1 = internal global i32 42' %t -static int bar1 = 42; int bar() __attribute__((alias("bar1"))); - extern int test6(); void test7() { test6(); } // test6 is emitted as extern. // test6 changes to alias. int test6() __attribute__((alias("test7"))); +static int inner(int a) { return 0; } +static int inner_weak(int a) { return 0; } +extern __typeof(inner) inner_a __attribute__((alias("inner"))); +static __typeof(inner_weak) inner_weak_a __attribute__((weakref, alias("inner_weak"))); +// CHECKCC: @inner_a = alias i32 (i32)* @inner +// CHECKCC: define internal arm_aapcs_vfpcc i32 @inner(i32 %a) nounwind { + +int outer(int a) { return inner(a); } +// CHECKCC: define arm_aapcs_vfpcc i32 @outer(i32 %a) nounwind { +// CHECKCC: call arm_aapcs_vfpcc i32 @inner(i32 %{{.*}}) + +int outer_weak(int a) { return inner_weak_a(a); } +// CHECKCC: define arm_aapcs_vfpcc i32 @outer_weak(i32 %a) nounwind { +// CHECKCC: call arm_aapcs_vfpcc i32 @inner_weak(i32 %{{.*}}) +// CHECKCC: define internal arm_aapcs_vfpcc i32 @inner_weak(i32 %a) nounwind { diff --git a/test/CodeGen/arm-aapcs-vfp.c b/test/CodeGen/arm-aapcs-vfp.c index 614b52d..7210229 100644 --- a/test/CodeGen/arm-aapcs-vfp.c +++ b/test/CodeGen/arm-aapcs-vfp.c @@ -6,6 +6,12 @@ // RUN: -ffreestanding \ // RUN: -emit-llvm -w -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple armv7-unknown-nacl-gnueabi \ +// RUN: -target-cpu cortex-a8 \ +// RUN: -mfloat-abi hard \ +// RUN: -ffreestanding \ +// RUN: -emit-llvm -w -o - %s | FileCheck %s + #include struct homogeneous_struct { diff --git a/test/CodeGen/arm-aapcs-zerolength-bitfield.c b/test/CodeGen/arm-aapcs-zerolength-bitfield.c index 140ff6c..2855045 100644 --- a/test/CodeGen/arm-aapcs-zerolength-bitfield.c +++ b/test/CodeGen/arm-aapcs-zerolength-bitfield.c @@ -1,5 +1,6 @@ // REQUIRES: arm-registered-target // RUN: %clang_cc1 -target-abi aapcs -triple armv7-apple-darwin10 %s -verify +// expected-no-diagnostics #include diff --git a/test/CodeGen/arm-abi-vector.c b/test/CodeGen/arm-abi-vector.c new file mode 100644 index 0000000..12e38ba --- /dev/null +++ b/test/CodeGen/arm-abi-vector.c @@ -0,0 +1,263 @@ +// RUN: %clang_cc1 -triple armv7-apple-darwin -target-abi aapcs -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple armv7-apple-darwin -target-abi apcs-gnu -emit-llvm -o - %s | FileCheck -check-prefix=APCS-GNU %s + +#include + +typedef __attribute__(( ext_vector_type(2) )) int __int2; +typedef __attribute__(( ext_vector_type(3) )) char __char3; +typedef __attribute__(( ext_vector_type(5) )) char __char5; +typedef __attribute__(( ext_vector_type(9) )) char __char9; +typedef __attribute__(( ext_vector_type(19) )) char __char19; +typedef __attribute__(( ext_vector_type(3) )) short __short3; +typedef __attribute__(( ext_vector_type(5) )) short __short5; + +// Passing legal vector types as varargs. +double varargs_vec_2i(int fixed, ...) { +// CHECK: varargs_vec_2i +// CHECK: alloca <2 x i32>, align 8 +// CHECK: [[ALIGN:%.*]] = and i32 [[VAR:%.*]], -8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 8 +// CHECK: bitcast i8* [[AP_ALIGN]] to <2 x i32>* +// APCS-GNU: varargs_vec_2i +// APCS-GNU: alloca <2 x i32>, align 8 +// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <2 x i32> +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* {{%.*}}, i32 8 +// APCS-GNU: bitcast <2 x i32>* [[VAR_ALIGN]] to i8* +// APCS-GNU: call void @llvm.memcpy +// APCS-GNU: load <2 x i32>* [[VAR_ALIGN]] + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __int2 c3 = va_arg(ap, __int2); + sum = sum + c3.x + c3.y; + va_end(ap); + return sum; +} + +double test_2i(__int2 *in) { +// CHECK: test_2i +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_2i(i32 3, <2 x i32> {{%.*}}) +// APCS-GNU: test_2i +// APCS-GNU: call double (i32, ...)* @varargs_vec_2i(i32 3, <2 x i32> {{%.*}}) + return varargs_vec_2i(3, *in); +} + +double varargs_vec_3c(int fixed, ...) { +// CHECK: varargs_vec_3c +// CHECK: alloca <3 x i8>, align 4 +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP:%.*]], i32 4 +// CHECK: bitcast i8* [[AP]] to <3 x i8>* +// APCS-GNU: varargs_vec_3c +// APCS-GNU: alloca <3 x i8>, align 4 +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* [[AP:%.*]], i32 4 +// APCS-GNU: bitcast i8* [[AP]] to <3 x i8>* + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __char3 c3 = va_arg(ap, __char3); + sum = sum + c3.x + c3.y; + va_end(ap); + return sum; +} + +double test_3c(__char3 *in) { +// CHECK: test_3c +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_3c(i32 3, i32 {{%.*}}) +// APCS-GNU: test_3c +// APCS-GNU: call double (i32, ...)* @varargs_vec_3c(i32 3, i32 {{%.*}}) + return varargs_vec_3c(3, *in); +} + +double varargs_vec_5c(int fixed, ...) { +// CHECK: varargs_vec_5c +// CHECK: alloca <5 x i8>, align 8 +// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 8 +// CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i8>* +// APCS-GNU: varargs_vec_5c +// APCS-GNU: alloca <5 x i8>, align 8 +// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <5 x i8> +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* {{%.*}}, i32 8 +// APCS-GNU: bitcast <5 x i8>* [[VAR_ALIGN]] to i8* +// APCS-GNU: call void @llvm.memcpy +// APCS-GNU: load <5 x i8>* [[VAR_ALIGN]] + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __char5 c5 = va_arg(ap, __char5); + sum = sum + c5.x + c5.y; + va_end(ap); + return sum; +} + +double test_5c(__char5 *in) { +// CHECK: test_5c +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) +// APCS-GNU: test_5c +// APCS-GNU: call double (i32, ...)* @varargs_vec_5c(i32 5, <2 x i32> {{%.*}}) + return varargs_vec_5c(5, *in); +} + +double varargs_vec_9c(int fixed, ...) { +// CHECK: varargs_vec_9c +// CHECK: alloca <9 x i8>, align 16 +// CHECK: [[VAR_ALIGN:%.*]] = alloca <9 x i8> +// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16 +// CHECK: bitcast <9 x i8>* [[VAR_ALIGN]] to i8* +// CHECK: call void @llvm.memcpy +// CHECK: load <9 x i8>* [[VAR_ALIGN]] +// APCS-GNU: varargs_vec_9c +// APCS-GNU: alloca <9 x i8>, align 16 +// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <9 x i8> +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* {{%.*}}, i32 16 +// APCS-GNU: bitcast <9 x i8>* [[VAR_ALIGN]] to i8* +// APCS-GNU: call void @llvm.memcpy +// APCS-GNU: load <9 x i8>* [[VAR_ALIGN]] + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __char9 c9 = va_arg(ap, __char9); + sum = sum + c9.x + c9.y; + va_end(ap); + return sum; +} + +double test_9c(__char9 *in) { +// CHECK: test_9c +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) +// APCS-GNU: test_9c +// APCS-GNU: call double (i32, ...)* @varargs_vec_9c(i32 9, <4 x i32> {{%.*}}) + return varargs_vec_9c(9, *in); +} + +double varargs_vec_19c(int fixed, ...) { +// CHECK: varargs_vec_19c +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP:%.*]], i32 4 +// CHECK: [[VAR:%.*]] = bitcast i8* [[AP]] to i8** +// CHECK: [[VAR2:%.*]] = load i8** [[VAR]] +// CHECK: bitcast i8* [[VAR2]] to <19 x i8>* +// APCS-GNU: varargs_vec_19c +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* [[AP:%.*]], i32 4 +// APCS-GNU: [[VAR:%.*]] = bitcast i8* [[AP]] to i8** +// APCS-GNU: [[VAR2:%.*]] = load i8** [[VAR]] +// APCS-GNU: bitcast i8* [[VAR2]] to <19 x i8>* + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __char19 c19 = va_arg(ap, __char19); + sum = sum + c19.x + c19.y; + va_end(ap); + return sum; +} + +double test_19c(__char19 *in) { +// CHECK: test_19c +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_19c(i32 19, <19 x i8>* {{%.*}}) +// APCS-GNU: test_19c +// APCS-GNU: call double (i32, ...)* @varargs_vec_19c(i32 19, <19 x i8>* {{%.*}}) + return varargs_vec_19c(19, *in); +} + +double varargs_vec_3s(int fixed, ...) { +// CHECK: varargs_vec_3s +// CHECK: alloca <3 x i16>, align 8 +// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 8 +// CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i16>* +// APCS-GNU: varargs_vec_3s +// APCS-GNU: alloca <3 x i16>, align 8 +// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <3 x i16> +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* {{%.*}}, i32 8 +// APCS-GNU: bitcast <3 x i16>* [[VAR_ALIGN]] to i8* +// APCS-GNU: call void @llvm.memcpy +// APCS-GNU: load <3 x i16>* [[VAR_ALIGN]] + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __short3 c3 = va_arg(ap, __short3); + sum = sum + c3.x + c3.y; + va_end(ap); + return sum; +} + +double test_3s(__short3 *in) { +// CHECK: test_3s +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_3s(i32 3, <2 x i32> {{%.*}}) +// APCS-GNU: test_3s +// APCS-GNU: call double (i32, ...)* @varargs_vec_3s(i32 3, <2 x i32> {{%.*}}) + return varargs_vec_3s(3, *in); +} + +double varargs_vec_5s(int fixed, ...) { +// CHECK: varargs_vec_5s +// CHECK: alloca <5 x i16>, align 16 +// CHECK: [[VAR_ALIGN:%.*]] = alloca <5 x i16> +// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16 +// CHECK: bitcast <5 x i16>* [[VAR_ALIGN]] to i8* +// CHECK: call void @llvm.memcpy +// CHECK: load <5 x i16>* [[VAR_ALIGN]] +// APCS-GNU: varargs_vec_5s +// APCS-GNU: alloca <5 x i16>, align 16 +// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca <5 x i16> +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* {{%.*}}, i32 16 +// APCS-GNU: bitcast <5 x i16>* [[VAR_ALIGN]] to i8* +// APCS-GNU: call void @llvm.memcpy +// APCS-GNU: load <5 x i16>* [[VAR_ALIGN]] + va_list ap; + double sum = fixed; + va_start(ap, fixed); + __short5 c5 = va_arg(ap, __short5); + sum = sum + c5.x + c5.y; + va_end(ap); + return sum; +} + +double test_5s(__short5 *in) { +// CHECK: test_5s +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) +// APCS-GNU: test_5s +// APCS-GNU: call double (i32, ...)* @varargs_vec_5s(i32 5, <4 x i32> {{%.*}}) + return varargs_vec_5s(5, *in); +} + +// Pass struct as varargs. +typedef struct +{ + __int2 i2; + float f; +} StructWithVec; + +double varargs_struct(int fixed, ...) { +// CHECK: varargs_struct +// CHECK: [[ALIGN:%.*]] = and i32 {{%.*}}, -8 +// CHECK: [[AP_ALIGN:%.*]] = inttoptr i32 [[ALIGN]] to i8* +// CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16 +// CHECK: bitcast i8* [[AP_ALIGN]] to %struct.StructWithVec* +// APCS-GNU: varargs_struct +// APCS-GNU: [[VAR_ALIGN:%.*]] = alloca %struct.StructWithVec +// APCS-GNU: [[AP_NEXT:%.*]] = getelementptr i8* {{%.*}}, i32 16 +// APCS-GNU: bitcast %struct.StructWithVec* [[VAR_ALIGN]] to i8* +// APCS-GNU: call void @llvm.memcpy + va_list ap; + double sum = fixed; + va_start(ap, fixed); + StructWithVec c3 = va_arg(ap, StructWithVec); + sum = sum + c3.i2.x + c3.i2.y + c3.f; + va_end(ap); + return sum; +} + +double test_struct(StructWithVec* d) { +// CHECK: test_struct +// CHECK: call arm_aapcscc double (i32, ...)* @varargs_struct(i32 3, [2 x i64] {{%.*}}) +// APCS-GNU: test_struct +// APCS-GNU: call double (i32, ...)* @varargs_struct(i32 3, [2 x i64] {{%.*}}) + return varargs_struct(3, *d); +} diff --git a/test/CodeGen/arm-apcs-zerolength-bitfield.c b/test/CodeGen/arm-apcs-zerolength-bitfield.c index 049ffae..763db65 100644 --- a/test/CodeGen/arm-apcs-zerolength-bitfield.c +++ b/test/CodeGen/arm-apcs-zerolength-bitfield.c @@ -1,5 +1,6 @@ // REQUIRES: arm-registered-target // RUN: %clang_cc1 -target-abi apcs-gnu -triple armv7-apple-darwin10 %s -verify +// expected-no-diagnostics // // Note: gcc forces the alignment to 4 bytes, regardless of the type of the // zero length bitfield. diff --git a/test/CodeGen/arm-arguments.c b/test/CodeGen/arm-arguments.c index 2ec729e..63ecd4c 100644 --- a/test/CodeGen/arm-arguments.c +++ b/test/CodeGen/arm-arguments.c @@ -178,3 +178,48 @@ struct s33 { char buf[32*32]; }; void f33(struct s33 s) { } // APCS-GNU: define void @f33(%struct.s33* byval %s) // AAPCS: define arm_aapcscc void @f33(%struct.s33* byval %s) + +// PR14048 +struct s34 { char c; }; +void f34(struct s34 s); +void g34(struct s34 *s) { f34(*s); } +// APCS-GNU: @g34(%struct.s34* %s) +// APCS-GNU: %[[a:.*]] = alloca { [1 x i32] } +// APCS-GNU: %[[gep:.*]] = getelementptr { [1 x i32] }* %[[a]], i32 0, i32 0 +// APCS-GNU: load [1 x i32]* %[[gep]] +// AAPCS: @g34(%struct.s34* %s) +// AAPCS: %[[a:.*]] = alloca { [1 x i32] } +// AAPCS: %[[gep:.*]] = getelementptr { [1 x i32] }* %[[a]], i32 0, i32 0 +// AAPCS: load [1 x i32]* %[[gep]] + +// rdar://12596507 +struct s35 +{ + float v[18]; //make sure byval is on. +} __attribute__((aligned(16))); +typedef struct s35 s35_with_align; + +typedef __attribute__((neon_vector_type(4))) float float32x4_t; +static __attribute__((__always_inline__, __nodebug__)) float32x4_t vaddq_f32( + float32x4_t __a, float32x4_t __b) { + return __a + __b; +} +float32x4_t f35(int i, s35_with_align s1, s35_with_align s2) { + float32x4_t v = vaddq_f32(*(float32x4_t *)&s1, + *(float32x4_t *)&s2); + return v; +} +// APCS-GNU: define <4 x float> @f35(i32 %i, %struct.s35* byval, %struct.s35* byval) +// APCS-GNU: %[[a:.*]] = alloca %struct.s35, align 16 +// APCS-GNU: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8* +// APCS-GNU: %[[c:.*]] = bitcast %struct.s35* %0 to i8* +// APCS-GNU: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[b]], i8* %[[c]] +// APCS-GNU: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>* +// APCS-GNU: load <4 x float>* %[[d]], align 16 +// AAPCS: define arm_aapcscc <4 x float> @f35(i32 %i, %struct.s35* byval, %struct.s35* byval) +// AAPCS: %[[a:.*]] = alloca %struct.s35, align 16 +// AAPCS: %[[b:.*]] = bitcast %struct.s35* %[[a]] to i8* +// AAPCS: %[[c:.*]] = bitcast %struct.s35* %0 to i8* +// AAPCS: call void @llvm.memcpy.p0i8.p0i8.i32(i8* %[[b]], i8* %[[c]] +// AAPCS: %[[d:.*]] = bitcast %struct.s35* %[[a]] to <4 x float>* +// AAPCS: load <4 x float>* %[[d]], align 16 diff --git a/test/CodeGen/arm-asm-warn.c b/test/CodeGen/arm-asm-warn.c new file mode 100644 index 0000000..0c4e97a --- /dev/null +++ b/test/CodeGen/arm-asm-warn.c @@ -0,0 +1,18 @@ +// REQUIRES: arm-registered-target +// RUN: %clang_cc1 -triple armv7 %s -emit-llvm -o /dev/null +// + +typedef __attribute__((neon_vector_type(2))) long long int64x2_t; +typedef struct int64x2x4_t { + int64x2_t val[4]; +} int64x2x4_t; +int64x2x4_t t2(const long long a[]) { + int64x2x4_t r; + __asm__("vldm %[a], { %q[r0], %q[r1], %q[r2], %q[r3] }" + : [r0] "=r"(r.val[0]), // expected-warning {{the size being stored is truncated, use a modifier to specify the size}} + [r1] "=r"(r.val[1]), // expected-warning {{the size being stored is truncated, use a modifier to specify the size}} + [r2] "=r"(r.val[2]), // expected-warning {{the size being stored is truncated, use a modifier to specify the size}} + [r3] "=r"(r.val[3]) // expected-warning {{the size being stored is truncated, use a modifier to specify the size}} + : [a] "r"(a)); + return r; +} diff --git a/test/CodeGen/arm-homogenous.c b/test/CodeGen/arm-homogenous.c index b8d046a..5d21088 100644 --- a/test/CodeGen/arm-homogenous.c +++ b/test/CodeGen/arm-homogenous.c @@ -156,6 +156,47 @@ void test_return_union_with_struct_with_fundamental_elems(void) { } // CHECK: declare arm_aapcs_vfpcc %union.union_with_struct_with_fundamental_elems @returns_union_with_struct_with_fundamental_elems() +// Make sure HAs that can be partially fit into VFP registers will be allocated +// on stack and that later VFP candidates will go on stack as well. +typedef struct { + double x; + double a2; + double a3; + double a4; +} struct_of_four_doubles; +extern void takes_struct_of_four_doubles(double a, struct_of_four_doubles b, struct_of_four_doubles c, double d); +struct_of_four_doubles g_s4d; + +void test_struct_of_four_doubles(void) { +// CHECK: test_struct_of_four_doubles +// CHECK: call arm_aapcs_vfpcc void @takes_struct_of_four_doubles(double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, [6 x float] undef, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}) + takes_struct_of_four_doubles(3.0, g_s4d, g_s4d, 4.0); +} + +extern void takes_struct_with_backfill(float f1, double a, float f2, struct_of_four_doubles b, struct_of_four_doubles c, double d); +void test_struct_with_backfill(void) { +// CHECK: test_struct_with_backfill +// CHECK: call arm_aapcs_vfpcc void @takes_struct_with_backfill(float {{.*}}, double {{.*}}, float {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, [4 x float] undef, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}, double {{.*}}) + takes_struct_with_backfill(3.0, 3.1, 3.2, g_s4d, g_s4d, 4.0); +} + +typedef __attribute__(( ext_vector_type(8) )) char __char8; +typedef __attribute__(( ext_vector_type(4) )) short __short4; +typedef struct { + __char8 a1; + __short4 a2; + __char8 a3; + __short4 a4; +} struct_of_vecs; +extern void takes_struct_of_vecs(double a, struct_of_vecs b, struct_of_vecs c, double d); +struct_of_vecs g_vec; + +void test_struct_of_vecs(void) { +// CHECK: test_struct_of_vecs +// CHECK: call arm_aapcs_vfpcc void @takes_struct_of_vecs(double {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, [6 x float] undef, <8 x i8> {{.*}}, <4 x i16> {{.*}}, <8 x i8> {{.*}}, <4 x i16> {{.*}}, double {{.*}}) + takes_struct_of_vecs(3.0, g_vec, g_vec, 4.0); +} + // FIXME: Tests necessary: // - Vectors // - C++ stuff diff --git a/test/CodeGen/arm-pnaclcall.c b/test/CodeGen/arm-pnaclcall.c new file mode 100644 index 0000000..5025995 --- /dev/null +++ b/test/CodeGen/arm-pnaclcall.c @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -triple armv7-unknown-nacl-gnueabi \ +// RUN: -ffreestanding -mfloat-abi hard -target-cpu cortex-a8 \ +// RUN: -emit-llvm -w -o - %s | FileCheck %s + +// Test that functions with pnaclcall attribute generate portable bitcode +// like the le32 arch target + +typedef struct { + int a; + int b; +} s1; +// CHECK: define i32 @f48(%struct.s1* byval %s) +int __attribute__((pnaclcall)) f48(s1 s) { return s.a; } + +// CHECK: define void @f49(%struct.s1* noalias sret %agg.result) +s1 __attribute__((pnaclcall)) f49() { s1 s; s.a = s.b = 1; return s; } + +union simple_union { + int a; + char b; +}; +// Unions should be passed as byval structs +// CHECK: define void @f50(%union.simple_union* byval %s) +void __attribute__((pnaclcall)) f50(union simple_union s) {} + +typedef struct { + int b4 : 4; + int b3 : 3; + int b8 : 8; +} bitfield1; +// Bitfields should be passed as byval structs +// CHECK: define void @f51(%struct.bitfield1* byval %bf1) +void __attribute__((pnaclcall)) f51(bitfield1 bf1) {} diff --git a/test/CodeGen/asm.c b/test/CodeGen/asm.c index b009736..670c244 100644 --- a/test/CodeGen/asm.c +++ b/test/CodeGen/asm.c @@ -230,3 +230,12 @@ void t27(void) { // CHECK-NOT: ia_nsdialect // CHECK: ret void } + +// Check handling of '*' and '#' constraint modifiers. +void t28(void) +{ + asm volatile ("/* %0 */" : : "i#*X,*r" (1)); +// CHECK: @t28 +// CHECK: call void asm sideeffect "/* $0 */", "i|r,~{dirflag},~{fpsr},~{flags}"(i32 1) +} + diff --git a/test/CodeGen/atomic-ops.c b/test/CodeGen/atomic-ops.c index 1a9ed36..d79f405 100644 --- a/test/CodeGen/atomic-ops.c +++ b/test/CodeGen/atomic-ops.c @@ -311,4 +311,13 @@ void atomic_init_foo() // CHECK: } } +// CHECK: @invalid_atomic +void invalid_atomic(_Atomic(int) *i) { + __c11_atomic_store(i, 1, memory_order_consume); + __c11_atomic_store(i, 1, memory_order_acquire); + __c11_atomic_store(i, 1, memory_order_acq_rel); + __c11_atomic_load(i, memory_order_release); + __c11_atomic_load(i, memory_order_acq_rel); +} + #endif diff --git a/test/CodeGen/attr-minsize.cpp b/test/CodeGen/attr-minsize.cpp new file mode 100644 index 0000000..a422a62 --- /dev/null +++ b/test/CodeGen/attr-minsize.cpp @@ -0,0 +1,75 @@ +// RUN: %clang_cc1 -Oz -emit-llvm %s -o - | FileCheck %s -check-prefix=Oz +// RUN: %clang_cc1 -O0 -emit-llvm %s -o - | FileCheck %s -check-prefix=OTHER +// RUN: %clang_cc1 -O1 -emit-llvm %s -o - | FileCheck %s -check-prefix=OTHER +// RUN: %clang_cc1 -O2 -emit-llvm %s -o - | FileCheck %s -check-prefix=OTHER +// RUN: %clang_cc1 -O3 -emit-llvm %s -o - | FileCheck %s -check-prefix=OTHER +// RUN: %clang_cc1 -Os -emit-llvm %s -o - | FileCheck %s -check-prefix=OTHER +// Check that we set the minsize attribute on each function +// when Oz optimization level is set. + +int test1() { + return 42; +// Oz: @{{.*}}test1{{.*}}minsize +// Oz: ret +// OTHER: @{{.*}}test1 +// OTHER-NOT: minsize +// OTHER: ret +} + +int test2() { + return 42; +// Oz: @{{.*}}test2{{.*}}minsize +// Oz: ret +// OTHER: @{{.*}}test2 +// OTHER-NOT: minsize +// OTHER: ret +} + +__attribute__((minsize)) +int test3() { + return 42; +// Oz: @{{.*}}test3{{.*}}minsize +// OTHER: @{{.*}}test3{{.*}}minsize +} + +// Check that the minsize attribute is well propagated through +// template instantiation + +template +__attribute__((minsize)) +void test4(T arg) { + return; +} + +template +void test4(int arg); +// Oz: define{{.*}}void @{{.*}}test4 +// Oz: minsize +// OTHER: define{{.*}}void @{{.*}}test4 +// OTHER: minsize + +template +void test4(float arg); +// Oz: define{{.*}}void @{{.*}}test4 +// Oz: minsize +// OTHER: define{{.*}}void @{{.*}}test4 +// OTHER: minsize + +template +void test5(T arg) { + return; +} + +template +void test5(int arg); +// Oz: define{{.*}}void @{{.*}}test5 +// Oz: minsize +// OTHER: define{{.*}}void @{{.*}}test5 +// OTHER-NOT: minsize + +template +void test5(float arg); +// Oz: define{{.*}}void @{{.*}}test5 +// Oz: minsize +// OTHER: define{{.*}}void @{{.*}}test5 +// OTHER-NOT: minsize diff --git a/test/CodeGen/attr-weakref.c b/test/CodeGen/attr-weakref.c index c1cc03b..560d391 100644 --- a/test/CodeGen/attr-weakref.c +++ b/test/CodeGen/attr-weakref.c @@ -53,6 +53,12 @@ void test6_foo(void) { test6_f(); } +// CHECK: declare extern_weak void @test8_f() +static void test8_g(void) __attribute__((weakref("test8_f"))); +void test8_h(void) { + if (test8_g) + test8_g(); +} // CHECK: declare extern_weak void @test7_f() void test7_f(void); static void test7_g(void) __attribute__((weakref("test7_f"))); diff --git a/test/CodeGen/attributes.c b/test/CodeGen/attributes.c index e971a79..00688dc 100644 --- a/test/CodeGen/attributes.c +++ b/test/CodeGen/attributes.c @@ -80,7 +80,7 @@ void t21(void) { fptr(10); } // CHECK: [[FPTRVAR:%[a-z0-9]+]] = load void (i32)** @fptr -// CHECK-NEXT: call x86_fastcallcc void [[FPTRVAR]](i32 10) +// CHECK-NEXT: call x86_fastcallcc void [[FPTRVAR]](i32 inreg 10) // PR9356: We might want to err on this, but for now at least make sure we diff --git a/test/CodeGen/bitfield-promote.c b/test/CodeGen/bitfield-promote.c index 4c3292c..93aaa9d 100644 --- a/test/CodeGen/bitfield-promote.c +++ b/test/CodeGen/bitfield-promote.c @@ -1,18 +1,22 @@ -// RUN: %clang -O3 -emit-llvm -S -o %t %s -// RUN: grep 'ret i64 4294967292' %t | count 2 -// RUN: grep 'ret i64 -4' %t | count 1 +// RUN: %clang -O3 -emit-llvm -S -o - %s | FileCheck %s long long f0(void) { struct { unsigned f0 : 32; } x = { 18 }; return (long long) (x.f0 - (int) 22); } +// CHECK: @f0() +// CHECK: ret i64 4294967292 long long f1(void) { struct { unsigned f0 : 31; } x = { 18 }; return (long long) (x.f0 - (int) 22); } +// CHECK: @f1() +// CHECK: ret i64 -4 long long f2(void) { struct { unsigned f0 ; } x = { 18 }; return (long long) (x.f0 - (int) 22); } +// CHECK: @f2() +// CHECK: ret i64 4294967292 diff --git a/test/CodeGen/bmi2-builtins.c b/test/CodeGen/bmi2-builtins.c index 18b2319..201cac6 100644 --- a/test/CodeGen/bmi2-builtins.c +++ b/test/CodeGen/bmi2-builtins.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -O3 -triple=x86_64-apple-darwin -target-feature +bmi2 -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -O3 -triple=i386-apple-darwin -target-feature +bmi2 -emit-llvm -o - | FileCheck %s --check-prefix=B32 // Don't include mm_malloc.h, it's system specific. #define __MM_MALLOC_H @@ -20,6 +21,15 @@ unsigned int test_pext_u32(unsigned int __X, unsigned int __Y) { return _pext_u32(__X, __Y); } +unsigned int test_mulx_u32(unsigned int __X, unsigned int __Y, + unsigned int *__P) { + // CHECK: @test_mulx_u32 + // CHECK-NOT: mul i64 + // B32: @test_mulx_u32 + // B32: mul i64 + return _mulx_u32(__X, __Y, __P); +} + unsigned long long test_bzhi_u64(unsigned long long __X, unsigned long long __Y) { // CHECK: @llvm.x86.bmi.bzhi.64 return _bzhi_u64(__X, __Y); @@ -34,3 +44,10 @@ unsigned long long test_pext_u64(unsigned long long __X, unsigned long long __Y) // CHECK: @llvm.x86.bmi.pext.64 return _pext_u64(__X, __Y); } + +unsigned long long test_mulx_u64(unsigned long long __X, unsigned long long __Y, + unsigned long long *__P) { + // CHECK: @test_mulx_u64 + // CHECK: mul i128 + return _mulx_u64(__X, __Y, __P); +} diff --git a/test/CodeGen/builtin-memfns.c b/test/CodeGen/builtin-memfns.c index 72d3406..4a06160 100644 --- a/test/CodeGen/builtin-memfns.c +++ b/test/CodeGen/builtin-memfns.c @@ -63,3 +63,23 @@ int test7(int *p) { __builtin_memset(hwparams, 0, 256); // No crash alignment = 1 // CHECK: call void @llvm.memset{{.*}}256, i32 1, i1 false) } + +// +// Make sure we don't over-estimate the alignment of fields of +// packed structs. +struct PS { + int modes[4]; +} __attribute__((packed)); +struct PS ps; +void test8(int *arg) { + // CHECK: @test8 + // CHECK: call void @llvm.memcpy{{.*}} 16, i32 1, i1 false) + __builtin_memcpy(arg, ps.modes, sizeof(struct PS)); +} + +__attribute((aligned(16))) int x[4], y[4]; +void test9() { + // CHECK: @test9 + // CHECK: call void @llvm.memcpy{{.*}} 16, i32 16, i1 false) + __builtin_memcpy(x, y, sizeof(y)); +} diff --git a/test/CodeGen/builtin-ms-noop.cpp b/test/CodeGen/builtin-ms-noop.cpp new file mode 100644 index 0000000..42c2501 --- /dev/null +++ b/test/CodeGen/builtin-ms-noop.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 -triple i686-pc-win32 -emit-llvm %s -o - | FileCheck %s + +class A { + public: + ~A() {} +}; + +void f() { +// CHECK: @_Z1fv +// CHECK-NOT: call void @_ZN1AD1Ev +// CHECK: ret void + __noop(A()); +}; + diff --git a/test/CodeGen/builtins-mips-args.c b/test/CodeGen/builtins-mips-args.c index a961b36..fd3e314 100644 --- a/test/CodeGen/builtins-mips-args.c +++ b/test/CodeGen/builtins-mips-args.c @@ -11,4 +11,27 @@ void foo() { __builtin_mips_rddsp(-1); // expected-error{{argument should be a value from 0 to 63}} __builtin_mips_wrdsp(2052, 64); // expected-error{{argument should be a value from 0 to 63}} __builtin_mips_rddsp(64); // expected-error{{argument should be a value from 0 to 63}} + + // MIPS DSP Rev 2 + + __builtin_mips_append(1, 2, a); // expected-error{{argument to '__builtin_mips_append' must be a constant integer}} + __builtin_mips_balign(1, 2, a); // expected-error{{argument to '__builtin_mips_balign' must be a constant integer}} + __builtin_mips_precr_sra_ph_w(1, 2, a); // expected-error{{argument to '__builtin_mips_precr_sra_ph_w' must be a constant integer}} + __builtin_mips_precr_sra_r_ph_w(1, 2, a); // expected-error{{argument to '__builtin_mips_precr_sra_r_ph_w' must be a constant integer}} + __builtin_mips_prepend(1, 2, a); // expected-error{{argument to '__builtin_mips_prepend' must be a constant integer}} + + __builtin_mips_append(1, 2, -1); // expected-error{{argument should be a value from 0 to 31}} + __builtin_mips_append(1, 2, 32); // expected-error{{argument should be a value from 0 to 31}} + + __builtin_mips_balign(1, 2, -1); // expected-error{{argument should be a value from 0 to 3}} + __builtin_mips_balign(1, 2, 4); // expected-error{{argument should be a value from 0 to 3}} + + __builtin_mips_precr_sra_ph_w(1, 2, -1); // expected-error{{argument should be a value from 0 to 31}} + __builtin_mips_precr_sra_ph_w(1, 2, 32); // expected-error{{argument should be a value from 0 to 31}} + + __builtin_mips_precr_sra_r_ph_w(1, 2, -1); // expected-error{{argument should be a value from 0 to 31}} + __builtin_mips_precr_sra_r_ph_w(1, 2, 32); // expected-error{{argument should be a value from 0 to 31}} + + __builtin_mips_prepend(1, 2, -1); // expected-error{{argument should be a value from 0 to 31}} + __builtin_mips_prepend(1, 2, -1); // expected-error{{argument should be a value from 0 to 31}} } diff --git a/test/CodeGen/builtins-mips.c b/test/CodeGen/builtins-mips.c index 8155a43..ef4662c 100644 --- a/test/CodeGen/builtins-mips.c +++ b/test/CodeGen/builtins-mips.c @@ -8,10 +8,14 @@ typedef unsigned int ui32; typedef long long a64; typedef signed char v4i8 __attribute__ ((vector_size(4))); +typedef signed char v4q7 __attribute__ ((vector_size(4))); +typedef short v2i16 __attribute__ ((vector_size(4))); typedef short v2q15 __attribute__ ((vector_size(4))); void foo() { v2q15 v2q15_r, v2q15_a, v2q15_b, v2q15_c; + v2i16 v2i16_r, v2i16_a, v2i16_b, v2i16_c; + v4q7 v4q7_r, v4q7_a, v4q7_b; v4i8 v4i8_r, v4i8_a, v4i8_b, v4i8_c; q31 q31_r, q31_a, q31_b, q31_c; i32 i32_r, i32_a, i32_b, i32_c; @@ -321,4 +325,210 @@ void foo() { int array_c[100]; i32_r = __builtin_mips_lwx(array_c, 20); // CHECK: call i32 @llvm.mips.lwx + + // MIPS DSP Rev 2 + + v4q7_a = (v4q7) {0x81, 0xff, 0x80, 0x23}; + v4q7_r = __builtin_mips_absq_s_qb (v4q7_a); +// CHECK: call <4 x i8> @llvm.mips.absq.s.qb + + v2q15_a = (v2q15) {0x3334, 0x4444}; + v2q15_b = (v2q15) {0x1111, 0x2222}; + v2q15_r = __builtin_mips_addqh_ph(v2q15_a, v2q15_b); +// CHECK: call <2 x i16> @llvm.mips.addqh.ph + v2q15_a = (v2q15) {0x3334, 0x4444}; + v2q15_b = (v2q15) {0x1111, 0x2222}; + v2q15_r = __builtin_mips_addqh_r_ph(v2q15_a, v2q15_b); +// CHECK: call <2 x i16> @llvm.mips.addqh.r.ph + q31_a = 0x11111112; + q31_b = 0x99999999; + q31_r = __builtin_mips_addqh_w(q31_a, q31_b); +// CHECK: call i32 @llvm.mips.addqh.w + q31_a = 0x11111112; + q31_b = 0x99999999; + q31_r = __builtin_mips_addqh_r_w(q31_a, q31_b); +// CHECK: call i32 @llvm.mips.addqh.r.w + + v2i16_a = (v2i16) {0xffff, 0x2468}; + v2i16_b = (v2i16) {0x1234, 0x1111}; + v2i16_r = __builtin_mips_addu_ph(v2i16_a, v2i16_b); +// CHECK: call <2 x i16> @llvm.mips.addu.ph + v2i16_a = (v2i16) {0xffff, 0x2468}; + v2i16_b = (v2i16) {0x1234, 0x1111}; + v2i16_r = __builtin_mips_addu_s_ph(v2i16_a, v2i16_b); +// CHECK: call <2 x i16> @llvm.mips.addu.s.ph + v4i8_a = (v4i8) {0x11, 0x22, 0x33, 0xff}; + v4i8_b = (v4i8) {0x11, 0x33, 0x99, 0xff}; + v4i8_r = __builtin_mips_adduh_qb(v4i8_a, v4i8_b); +// CHECK: call <4 x i8> @llvm.mips.adduh.qb + v4i8_a = (v4i8) {0x11, 0x22, 0x33, 0xff}; + v4i8_b = (v4i8) {0x11, 0x33, 0x99, 0xff}; + v4i8_r = __builtin_mips_adduh_r_qb(v4i8_a, v4i8_b); +// CHECK: call <4 x i8> @llvm.mips.adduh.r.qb + + i32_a = 0x12345678; + i32_b = 0x87654321; + i32_r = __builtin_mips_append(i32_a, i32_b, 16); +// CHECK: call i32 @llvm.mips.append + i32_a = 0x12345678; + i32_b = 0x87654321; + i32_r = __builtin_mips_balign(i32_a, i32_b, 3); +// CHECK: call i32 @llvm.mips.balign + + v4i8_a = (v4i8) {0x11, 0x22, 0x33, 0x44}; + v4i8_b = (v4i8) {0x11, 0x33, 0x33, 0x44}; + i32_r = __builtin_mips_cmpgdu_eq_qb(v4i8_a, v4i8_b); +// CHECK: call i32 @llvm.mips.cmpgdu.eq.qb + v4i8_a = (v4i8) {0x11, 0x22, 0x33, 0x44}; + v4i8_b = (v4i8) {0x11, 0x33, 0x33, 0x44}; + i32_r = __builtin_mips_cmpgdu_lt_qb(v4i8_a, v4i8_b); +// CHECK: call i32 @llvm.mips.cmpgdu.lt.qb + v4i8_a = (v4i8) {0x11, 0x22, 0x33, 0x54}; + v4i8_b = (v4i8) {0x11, 0x33, 0x33, 0x44}; + i32_r = __builtin_mips_cmpgdu_le_qb(v4i8_a, v4i8_b); +// CHECK: call i32 @llvm.mips.cmpgdu.le.qb + + a64_a = 0x12345678; + v2i16_b = (v2i16) {0xffff, 0x1555}; + v2i16_c = (v2i16) {0x1234, 0x3322}; + a64_r = __builtin_mips_dpa_w_ph(a64_a, v2i16_b, v2i16_c); +// CHECK: call i64 @llvm.mips.dpa.w.ph + a64_a = 0x12345678; + v2i16_b = (v2i16) {0xffff, 0x1555}; + v2i16_c = (v2i16) {0x1234, 0x3322}; + a64_r = __builtin_mips_dps_w_ph(a64_a, v2i16_b, v2i16_c); +// CHECK: call i64 @llvm.mips.dps.w.ph + + a64_a = 0x70000000; + v2q15_b = (v2q15) {0x4000, 0x2000}; + v2q15_c = (v2q15) {0x2000, 0x4000}; + a64_r = __builtin_mips_dpaqx_s_w_ph(a64_a, v2q15_b, v2q15_c); +// CHECK: call i64 @llvm.mips.dpaqx.s.w.ph + a64_a = 0x70000000; + v2q15_b = (v2q15) {0x4000, 0x2000}; + v2q15_c = (v2q15) {0x2000, 0x4000}; + a64_r = __builtin_mips_dpaqx_sa_w_ph(a64_a, v2q15_b, v2q15_c); +// CHECK: call i64 @llvm.mips.dpaqx.sa.w.ph + a64_a = 0x1111222212345678LL; + v2i16_b = (v2i16) {0x1, 0x2}; + v2i16_c = (v2i16) {0x3, 0x4}; + a64_r = __builtin_mips_dpax_w_ph(a64_a, v2i16_b, v2i16_c); +// CHECK: call i64 @llvm.mips.dpax.w.ph + a64_a = 0x9999111112345678LL; + v2i16_b = (v2i16) {0x1, 0x2}; + v2i16_c = (v2i16) {0x3, 0x4}; + a64_r = __builtin_mips_dpsx_w_ph(a64_a, v2i16_b, v2i16_c); +// CHECK: call i64 @llvm.mips.dpsx.w.ph + a64_a = 0x70000000; + v2q15_b = (v2q15) {0x4000, 0x2000}; + v2q15_c = (v2q15) {0x2000, 0x4000}; + a64_r = __builtin_mips_dpsqx_s_w_ph(a64_a, v2q15_b, v2q15_c); +// CHECK: call i64 @llvm.mips.dpsqx.s.w.ph + a64_a = 0xFFFFFFFF80000000LL; + v2q15_b = (v2q15) {0x4000, 0x2000}; + v2q15_c = (v2q15) {0x2000, 0x4000}; + a64_r = __builtin_mips_dpsqx_sa_w_ph(a64_a, v2q15_b, v2q15_c); +// CHECK: call i64 @llvm.mips.dpsqx.sa.w.ph + + v2i16_a = (v2i16) {0xffff, 0x2468}; + v2i16_b = (v2i16) {0x1234, 0x1111}; + v2i16_r = __builtin_mips_mul_ph(v2i16_a, v2i16_b); +// CHECK: call <2 x i16> @llvm.mips.mul.ph + v2i16_a = (v2i16) {0x8000, 0x7fff}; + v2i16_b = (v2i16) {0x1234, 0x1111}; + v2i16_r = __builtin_mips_mul_s_ph(v2i16_a, v2i16_b); +// CHECK: call <2 x i16> @llvm.mips.mul.s.ph + + q31_a = 0x80000000; + q31_b = 0x80000000; + q31_r = __builtin_mips_mulq_rs_w(q31_a, q31_b); +// CHECK: call i32 @llvm.mips.mulq.rs.w + v2q15_a = (v2q15) {0xffff, 0x8000}; + v2q15_b = (v2q15) {0x1111, 0x8000}; + v2q15_r = __builtin_mips_mulq_s_ph(v2q15_a, v2q15_b); +// CHECK: call <2 x i16> @llvm.mips.mulq.s.ph + q31_a = 0x00000002; + q31_b = 0x80000000; + q31_r = __builtin_mips_mulq_s_w(q31_a, q31_b); +// CHECK: call i32 @llvm.mips.mulq.s.w + a64_a = 0x19848419; + v2i16_b = (v2i16) {0xffff, 0x8000}; + v2i16_c = (v2i16) {0x1111, 0x8000}; + a64_r = __builtin_mips_mulsa_w_ph(a64_a, v2i16_b, v2i16_c); +// CHECK: call i64 @llvm.mips.mulsa.w.ph + + v2i16_a = (v2i16) {0x1234, 0x5678}; + v2i16_b = (v2i16) {0x2233, 0x5566}; + v4i8_r = __builtin_mips_precr_qb_ph(v2i16_a, v2i16_b); +// CHECK: call <4 x i8> @llvm.mips.precr.qb.ph + i32_a = 0x12345678; + i32_b = 0x33334444; + v2i16_r = __builtin_mips_precr_sra_ph_w(i32_a, i32_b, 4); +// CHECK: call <2 x i16> @llvm.mips.precr.sra.ph.w + i32_a = 0x12345678; + i32_b = 0x33334444; + v2i16_r = __builtin_mips_precr_sra_r_ph_w(i32_a, i32_b, 4); +// CHECK: call <2 x i16> @llvm.mips.precr.sra.r.ph.w + + i32_a = 0x12345678; + i32_b = 0x87654321; + i32_r = __builtin_mips_prepend(i32_a, i32_b, 16); +// CHECK: call i32 @llvm.mips.prepend + + v4i8_a = (v4i8) {0x12, 0x45, 0x77, 0x99}; + v4i8_r = __builtin_mips_shra_qb(v4i8_a, 1); +// CHECK: call <4 x i8> @llvm.mips.shra.qb + v4i8_a = (v4i8) {0x12, 0x45, 0x77, 0x99}; + i32_b = 1; + v4i8_r = __builtin_mips_shra_qb(v4i8_a, i32_b); +// CHECK: call <4 x i8> @llvm.mips.shra.qb + v4i8_a = (v4i8) {0x12, 0x45, 0x77, 0x99}; + v4i8_r = __builtin_mips_shra_r_qb(v4i8_a, 1); +// CHECK: call <4 x i8> @llvm.mips.shra.r.qb + v4i8_a = (v4i8) {0x12, 0x45, 0x77, 0x99}; + i32_b = 1; + v4i8_r = __builtin_mips_shra_r_qb(v4i8_a, i32_b); +// CHECK: call <4 x i8> @llvm.mips.shra.r.qb + v2i16_a = (v2i16) {0x1357, 0x2468}; + v2i16_r = __builtin_mips_shrl_ph(v2i16_a, 4); +// CHECK: call <2 x i16> @llvm.mips.shrl.ph + v2i16_a = (v2i16) {0x1357, 0x2468}; + i32_b = 8; + v2i16_r = __builtin_mips_shrl_ph (v2i16_a, i32_b); +// CHECK: call <2 x i16> @llvm.mips.shrl.ph + + v2q15_a = (v2q15) {0x3334, 0x4444}; + v2q15_b = (v2q15) {0x1111, 0x2222}; + v2q15_r = __builtin_mips_subqh_ph(v2q15_a, v2q15_b); +// CHECK: call <2 x i16> @llvm.mips.subqh.ph + v2q15_a = (v2q15) {0x3334, 0x4444}; + v2q15_b = (v2q15) {0x1111, 0x2222}; + v2q15_r = __builtin_mips_subqh_r_ph(v2q15_a, v2q15_b); +// CHECK: call <2 x i16> @llvm.mips.subqh.r.ph + q31_a = 0x11111112; + q31_b = 0x99999999; + q31_r = __builtin_mips_subqh_w(q31_a, q31_b); +// CHECK: call i32 @llvm.mips.subqh.w + q31_a = 0x11111112; + q31_b = 0x99999999; + q31_r = __builtin_mips_subqh_r_w(q31_a, q31_b); +// CHECK: call i32 @llvm.mips.subqh.r.w + + v2i16_a = (v2i16) {0x1357, 0x4455}; + v2i16_b = (v2i16) {0x3333, 0x4444}; + v2i16_r = __builtin_mips_subu_ph(v2i16_a, v2i16_b); +// CHECK: call <2 x i16> @llvm.mips.subu.ph + v2i16_a = (v2i16) {0x1357, 0x4455}; + v2i16_b = (v2i16) {0x3333, 0x4444}; + v2i16_r = __builtin_mips_subu_s_ph(v2i16_a, v2i16_b); +// CHECK: call <2 x i16> @llvm.mips.subu.s.ph + + v4i8_a = (v4i8) {0x33 ,0x44, 0x55, 0x66}; + v4i8_b = (v4i8) {0x99 ,0x15, 0x85, 0xff}; + v4i8_r = __builtin_mips_subuh_qb(v4i8_a, v4i8_b); +// CHECK: call <4 x i8> @llvm.mips.subuh.qb + v4i8_a = (v4i8) {0x33 ,0x44, 0x55, 0x66}; + v4i8_b = (v4i8) {0x99 ,0x15, 0x85, 0xff}; + v4i8_r = __builtin_mips_subuh_r_qb(v4i8_a, v4i8_b); +// CHECK: call <4 x i8> @llvm.mips.subuh.r.qb } diff --git a/test/CodeGen/builtins-nvptx.c b/test/CodeGen/builtins-nvptx.c index fa6b14c..2c7e0c1 100644 --- a/test/CodeGen/builtins-nvptx.c +++ b/test/CodeGen/builtins-nvptx.c @@ -1,8 +1,15 @@ -// RUN: %clang_cc1 -triple nvptx-unknown-unknown -emit-llvm -o %t %s -// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -emit-llvm -o %t %s +// REQUIRES: nvptx-registered-target +// REQUIRES: nvptx64-registered-target +// RUN: %clang_cc1 -triple nvptx-unknown-unknown -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -S -emit-llvm -o - %s | FileCheck %s int read_tid() { +// CHECK: call i32 @llvm.ptx.read.tid.x() +// CHECK: call i32 @llvm.ptx.read.tid.y() +// CHECK: call i32 @llvm.ptx.read.tid.z() +// CHECK: call i32 @llvm.ptx.read.tid.w() + int x = __builtin_ptx_read_tid_x(); int y = __builtin_ptx_read_tid_y(); int z = __builtin_ptx_read_tid_z(); @@ -14,6 +21,11 @@ int read_tid() { int read_ntid() { +// CHECK: call i32 @llvm.ptx.read.ntid.x() +// CHECK: call i32 @llvm.ptx.read.ntid.y() +// CHECK: call i32 @llvm.ptx.read.ntid.z() +// CHECK: call i32 @llvm.ptx.read.ntid.w() + int x = __builtin_ptx_read_ntid_x(); int y = __builtin_ptx_read_ntid_y(); int z = __builtin_ptx_read_ntid_z(); @@ -25,6 +37,11 @@ int read_ntid() { int read_ctaid() { +// CHECK: call i32 @llvm.ptx.read.ctaid.x() +// CHECK: call i32 @llvm.ptx.read.ctaid.y() +// CHECK: call i32 @llvm.ptx.read.ctaid.z() +// CHECK: call i32 @llvm.ptx.read.ctaid.w() + int x = __builtin_ptx_read_ctaid_x(); int y = __builtin_ptx_read_ctaid_y(); int z = __builtin_ptx_read_ctaid_z(); @@ -36,6 +53,11 @@ int read_ctaid() { int read_nctaid() { +// CHECK: call i32 @llvm.ptx.read.nctaid.x() +// CHECK: call i32 @llvm.ptx.read.nctaid.y() +// CHECK: call i32 @llvm.ptx.read.nctaid.z() +// CHECK: call i32 @llvm.ptx.read.nctaid.w() + int x = __builtin_ptx_read_nctaid_x(); int y = __builtin_ptx_read_nctaid_y(); int z = __builtin_ptx_read_nctaid_z(); @@ -47,6 +69,13 @@ int read_nctaid() { int read_ids() { +// CHECK: call i32 @llvm.ptx.read.laneid() +// CHECK: call i32 @llvm.ptx.read.warpid() +// CHECK: call i32 @llvm.ptx.read.nwarpid() +// CHECK: call i32 @llvm.ptx.read.smid() +// CHECK: call i32 @llvm.ptx.read.nsmid() +// CHECK: call i32 @llvm.ptx.read.gridid() + int a = __builtin_ptx_read_laneid(); int b = __builtin_ptx_read_warpid(); int c = __builtin_ptx_read_nwarpid(); @@ -60,6 +89,12 @@ int read_ids() { int read_lanemasks() { +// CHECK: call i32 @llvm.ptx.read.lanemask.eq() +// CHECK: call i32 @llvm.ptx.read.lanemask.le() +// CHECK: call i32 @llvm.ptx.read.lanemask.lt() +// CHECK: call i32 @llvm.ptx.read.lanemask.ge() +// CHECK: call i32 @llvm.ptx.read.lanemask.gt() + int a = __builtin_ptx_read_lanemask_eq(); int b = __builtin_ptx_read_lanemask_le(); int c = __builtin_ptx_read_lanemask_lt(); @@ -73,6 +108,9 @@ int read_lanemasks() { long read_clocks() { +// CHECK: call i32 @llvm.ptx.read.clock() +// CHECK: call i64 @llvm.ptx.read.clock64() + int a = __builtin_ptx_read_clock(); long b = __builtin_ptx_read_clock64(); @@ -82,6 +120,11 @@ long read_clocks() { int read_pms() { +// CHECK: call i32 @llvm.ptx.read.pm0() +// CHECK: call i32 @llvm.ptx.read.pm1() +// CHECK: call i32 @llvm.ptx.read.pm2() +// CHECK: call i32 @llvm.ptx.read.pm3() + int a = __builtin_ptx_read_pm0(); int b = __builtin_ptx_read_pm1(); int c = __builtin_ptx_read_pm2(); @@ -93,6 +136,33 @@ int read_pms() { void sync() { +// CHECK: call void @llvm.ptx.bar.sync(i32 0) + __builtin_ptx_bar_sync(0); } + + +// NVVM intrinsics + +// The idea is not to test all intrinsics, just that Clang is recognizing the +// builtins defined in BuiltinsNVPTX.def +void nvvm_math(float f1, float f2, double d1, double d2) { +// CHECK: call float @llvm.nvvm.fmax.f + float t1 = __nvvm_fmax_f(f1, f2); +// CHECK: call float @llvm.nvvm.fmin.f + float t2 = __nvvm_fmin_f(f1, f2); +// CHECK: call float @llvm.nvvm.sqrt.rn.f + float t3 = __nvvm_sqrt_rn_f(f1); +// CHECK: call float @llvm.nvvm.rcp.rn.f + float t4 = __nvvm_rcp_rn_f(f2); + +// CHECK: call double @llvm.nvvm.fmax.d + double td1 = __nvvm_fmax_d(d1, d2); +// CHECK: call double @llvm.nvvm.fmin.d + double td2 = __nvvm_fmin_d(d1, d2); +// CHECK: call double @llvm.nvvm.sqrt.rn.d + double td3 = __nvvm_sqrt_rn_d(d1); +// CHECK: call double @llvm.nvvm.rcp.rn.d + double td4 = __nvvm_rcp_rn_d(d2); +} diff --git a/test/CodeGen/builtins.c b/test/CodeGen/builtins.c index 65b9ad1..9ba12bb 100644 --- a/test/CodeGen/builtins.c +++ b/test/CodeGen/builtins.c @@ -113,6 +113,7 @@ int main() { // Whatever + P(bswap16, (N)); P(bswap32, (N)); P(bswap64, (N)); // FIXME diff --git a/test/CodeGen/catch-undef-behavior.c b/test/CodeGen/catch-undef-behavior.c index ee0b658..4198b62 100644 --- a/test/CodeGen/catch-undef-behavior.c +++ b/test/CodeGen/catch-undef-behavior.c @@ -1,17 +1,248 @@ -// RUN: %clang_cc1 -fcatch-undefined-behavior -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -fsanitize=alignment,null,object-size,shift,return,signed-integer-overflow,vla-bound,float-cast-overflow,divide-by-zero -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s +// RUN: %clang_cc1 -fsanitize=null -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-NULL +// RUN: %clang_cc1 -fsanitize=signed-integer-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s --check-prefix=CHECK-OVERFLOW + +// CHECK: @[[INT:.*]] = private unnamed_addr constant { i16, i16, [6 x i8] } { i16 0, i16 11, [6 x i8] c"'int'\00" } + +// FIXME: When we only emit each type once, use [[INT]] more below. +// CHECK: @[[LINE_100:.*]] = private unnamed_addr constant {{.*}}, i32 100, i32 5 {{.*}} @[[INT]], i64 4, i8 1 +// CHECK: @[[LINE_200:.*]] = {{.*}}, i32 200, i32 10 {{.*}}, i64 4, i8 0 +// CHECK: @[[LINE_300_A:.*]] = {{.*}}, i32 300, i32 12 {{.*}} @{{.*}}, {{.*}} @{{.*}} +// CHECK: @[[LINE_300_B:.*]] = {{.*}}, i32 300, i32 12 {{.*}} @{{.*}}, {{.*}} @{{.*}} +// CHECK: @[[LINE_400:.*]] = {{.*}}, i32 400, i32 12 {{.*}} @{{.*}}, {{.*}} @{{.*}} +// CHECK: @[[LINE_500:.*]] = {{.*}}, i32 500, i32 10 {{.*}} @{{.*}}, i64 4, i8 0 } +// CHECK: @[[LINE_600:.*]] = {{.*}}, i32 600, i32 3 {{.*}} @{{.*}}, i64 4, i8 1 } + +// CHECK: @[[STRUCT_S:.*]] = private unnamed_addr constant { i16, i16, [11 x i8] } { i16 -1, i16 0, [11 x i8] c"'struct S'\00" } + +// CHECK: @[[LINE_700:.*]] = {{.*}}, i32 700, i32 14 {{.*}} @[[STRUCT_S]], i64 4, i8 3 } +// CHECK: @[[LINE_800:.*]] = {{.*}}, i32 800, i32 12 {{.*}} @{{.*}} } +// CHECK: @[[LINE_900:.*]] = {{.*}}, i32 900, i32 11 {{.*}} @{{.*}} } + +// CHECK-NULL: @[[LINE_100:.*]] = private unnamed_addr constant {{.*}}, i32 100, i32 5 {{.*}} // PR6805 // CHECK: @foo +// CHECK-NULL: @foo void foo() { union { int i; } u; - // CHECK: objectsize - // CHECK: icmp uge + // CHECK: %[[CHECK0:.*]] = icmp ne {{.*}}* %[[PTR:.*]], null + + // CHECK: %[[I8PTR:.*]] = bitcast i32* %[[PTR]] to i8* + // CHECK-NEXT: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64(i8* %[[I8PTR]], i1 false) + // CHECK-NEXT: %[[CHECK1:.*]] = icmp uge i64 %[[SIZE]], 4 + // CHECK-NEXT: %[[CHECK01:.*]] = and i1 %[[CHECK0]], %[[CHECK1]] + + // CHECK: %[[PTRTOINT:.*]] = ptrtoint {{.*}}* %[[PTR]] to i64 + // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRTOINT]], 3 + // CHECK-NEXT: %[[CHECK2:.*]] = icmp eq i64 %[[MISALIGN]], 0 + + // CHECK: %[[OK:.*]] = and i1 %[[CHECK01]], %[[CHECK2]] + // CHECK-NEXT: br i1 %[[OK]] + + // CHECK: %[[ARG:.*]] = ptrtoint {{.*}} %[[PTR]] to i64 + // CHECK-NEXT: call void @__ubsan_handle_type_mismatch(i8* bitcast ({{.*}} @[[LINE_100]] to i8*), i64 %[[ARG]]) noreturn nounwind + + // With -fsanitize=null, only perform the null check. + // CHECK-NULL: %[[NULL:.*]] = icmp ne {{.*}}, null + // CHECK-NULL: br i1 %[[NULL]] + // CHECK-NULL: call void @__ubsan_handle_type_mismatch(i8* bitcast ({{.*}} @[[LINE_100]] to i8*), i64 %{{.*}}) noreturn nounwind +#line 100 u.i=1; } // CHECK: @bar int bar(int *a) { - // CHECK: objectsize - // CHECK: icmp uge + // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64 + // CHECK-NEXT: icmp uge i64 %[[SIZE]], 4 + + // CHECK: %[[PTRINT:.*]] = ptrtoint + // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3 + // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0 + + // CHECK: %[[ARG:.*]] = ptrtoint + // CHECK-NEXT: call void @__ubsan_handle_type_mismatch(i8* bitcast ({{.*}} @[[LINE_200]] to i8*), i64 %[[ARG]]) noreturn nounwind +#line 200 + return *a; +} + +// CHECK: @addr_space +int addr_space(int __attribute__((address_space(256))) *a) { + // CHECK-NOT: __ubsan return *a; } + +// CHECK: @lsh_overflow +int lsh_overflow(int a, int b) { + // CHECK: %[[INBOUNDS:.*]] = icmp ule i32 %[[RHS:.*]], 31 + // CHECK-NEXT: br i1 %[[INBOUNDS]] + + // FIXME: Only emit one trap block here. + // CHECK: %[[ARG1:.*]] = zext + // CHECK-NEXT: %[[ARG2:.*]] = zext + // CHECK-NEXT: call void @__ubsan_handle_shift_out_of_bounds(i8* bitcast ({{.*}} @[[LINE_300_A]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]]) noreturn nounwind + + // CHECK: %[[SHIFTED_OUT_WIDTH:.*]] = sub nuw nsw i32 31, %[[RHS]] + // CHECK-NEXT: %[[SHIFTED_OUT:.*]] = lshr i32 %[[LHS:.*]], %[[SHIFTED_OUT_WIDTH]] + // CHECK-NEXT: %[[NO_OVERFLOW:.*]] = icmp eq i32 %[[SHIFTED_OUT]], 0 + // CHECK-NEXT: br i1 %[[NO_OVERFLOW]] + + // CHECK: %[[ARG1:.*]] = zext + // CHECK-NEXT: %[[ARG2:.*]] = zext + // CHECK-NEXT: call void @__ubsan_handle_shift_out_of_bounds(i8* bitcast ({{.*}} @[[LINE_300_B]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]]) noreturn nounwind + + // CHECK: %[[RET:.*]] = shl i32 %[[LHS]], %[[RHS]] + // CHECK-NEXT: ret i32 %[[RET]] +#line 300 + return a << b; +} + +// CHECK: @rsh_inbounds +int rsh_inbounds(int a, int b) { + // CHECK: %[[INBOUNDS:.*]] = icmp ult i32 %[[RHS:.*]], 32 + // CHECK: br i1 %[[INBOUNDS]] + + // CHECK: %[[ARG1:.*]] = zext + // CHECK-NEXT: %[[ARG2:.*]] = zext + // CHECK-NEXT: call void @__ubsan_handle_shift_out_of_bounds(i8* bitcast ({{.*}} @[[LINE_400]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]]) noreturn nounwind + + // CHECK: %[[RET:.*]] = ashr i32 %[[LHS]], %[[RHS]] + // CHECK-NEXT: ret i32 %[[RET]] +#line 400 + return a >> b; +} + +// CHECK: @load +int load(int *p) { + // CHECK: call void @__ubsan_handle_type_mismatch(i8* bitcast ({{.*}} @[[LINE_500]] to i8*), i64 %{{.*}}) noreturn nounwind +#line 500 + return *p; +} + +// CHECK: @store +void store(int *p, int q) { + // CHECK: call void @__ubsan_handle_type_mismatch(i8* bitcast ({{.*}} @[[LINE_600]] to i8*), i64 %{{.*}}) noreturn nounwind +#line 600 + *p = q; +} + +struct S { int k; }; + +// CHECK: @member_access +int *member_access(struct S *p) { + // CHECK: call void @__ubsan_handle_type_mismatch(i8* bitcast ({{.*}} @[[LINE_700]] to i8*), i64 %{{.*}}) noreturn nounwind +#line 700 + return &p->k; +} + +// CHECK: @signed_overflow +int signed_overflow(int a, int b) { + // CHECK: %[[ARG1:.*]] = zext + // CHECK-NEXT: %[[ARG2:.*]] = zext + // CHECK-NEXT: call void @__ubsan_handle_add_overflow(i8* bitcast ({{.*}} @[[LINE_800]] to i8*), i64 %[[ARG1]], i64 %[[ARG2]]) noreturn nounwind +#line 800 + return a + b; +} + +// CHECK: @no_return +int no_return() { + // Reaching the end of a noreturn function is fine in C. + // FIXME: If the user explicitly requests -fsanitize=return, we should catch + // that here even though it's not undefined behavior. + // CHECK-NOT: call + // CHECK-NOT: unreachable + // CHECK: ret i32 +} + +// CHECK: @vla_bound +void vla_bound(int n) { + // CHECK: icmp sgt i32 %[[PARAM:.*]], 0 + // + // CHECK: %[[ARG:.*]] = zext i32 %[[PARAM]] to i64 + // CHECK-NEXT: call void @__ubsan_handle_vla_bound_not_positive(i8* bitcast ({{.*}} @[[LINE_900]] to i8*), i64 %[[ARG]]) noreturn nounwind +#line 900 + int arr[n * 3]; +} + +// CHECK: @int_float_no_overflow +float int_float_no_overflow(__int128 n) { + // CHECK-NOT: call void @__ubsan_handle + return n; +} + +// CHECK: @int_float_overflow +float int_float_overflow(unsigned __int128 n) { + // This is 2**104. FLT_MAX is 2**128 - 2**104. + // CHECK: icmp ule i128 %{{.*}}, -20282409603651670423947251286016 + // CHECK: call void @__ubsan_handle_float_cast_overflow( + return n; +} + +// CHECK: @int_fp16_overflow +void int_fp16_overflow(int n, __fp16 *p) { + // CHECK: %[[GE:.*]] = icmp sge i32 %{{.*}}, -65504 + // CHECK: %[[LE:.*]] = icmp sle i32 %{{.*}}, 65504 + // CHECK: and i1 %[[GE]], %[[LE]] + // CHECK: call void @__ubsan_handle_float_cast_overflow( + *p = n; +} + +// CHECK: @float_int_overflow +int float_int_overflow(float f) { + // CHECK: %[[GE:.*]] = fcmp oge float %[[F:.*]], 0xC1E0000000000000 + // CHECK: %[[LE:.*]] = fcmp ole float %[[F]], 0x41DFFFFFE0000000 + // CHECK: and i1 %[[GE]], %[[LE]] + // CHECK: call void @__ubsan_handle_float_cast_overflow( + return f; +} + +// CHECK: @float_uint_overflow +unsigned float_uint_overflow(float f) { + // CHECK: %[[GE:.*]] = fcmp oge float %[[F:.*]], 0.{{0*}}e+00 + // CHECK: %[[LE:.*]] = fcmp ole float %[[F]], 0x41EFFFFFE0000000 + // CHECK: and i1 %[[GE]], %[[LE]] + // CHECK: call void @__ubsan_handle_float_cast_overflow( + return f; +} + +// CHECK: @fp16_char_overflow +signed char fp16_char_overflow(__fp16 *p) { + // CHECK: %[[GE:.*]] = fcmp oge float %[[F:.*]], -1.28{{0*}}e+02 + // CHECK: %[[LE:.*]] = fcmp ole float %[[F]], 1.27{{0*}}e+02 + // CHECK: and i1 %[[GE]], %[[LE]] + // CHECK: call void @__ubsan_handle_float_cast_overflow( + return *p; +} + +// CHECK: @float_float_overflow +float float_float_overflow(double f) { + // CHECK: %[[GE:.*]] = fcmp oge double %[[F:.*]], 0xC7EFFFFFE0000000 + // CHECK: %[[LE:.*]] = fcmp ole double %[[F]], 0x47EFFFFFE0000000 + // CHECK: and i1 %[[GE]], %[[LE]] + // CHECK: call void @__ubsan_handle_float_cast_overflow( + return f; +} + +// CHECK: @int_divide_overflow +// CHECK-OVERFLOW: @int_divide_overflow +int int_divide_overflow(int a, int b) { + // CHECK: %[[ZERO:.*]] = icmp ne i32 %[[B:.*]], 0 + // CHECK-OVERFLOW-NOT: icmp ne i32 %{{.*}}, 0 + + // CHECK: %[[AOK:.*]] = icmp ne i32 %[[A:.*]], -2147483648 + // CHECK-NEXT: %[[BOK:.*]] = icmp ne i32 %[[B]], -1 + // CHECK-NEXT: %[[OVER:.*]] = or i1 %[[AOK]], %[[BOK]] + + // CHECK-OVERFLOW: %[[AOK:.*]] = icmp ne i32 %[[A:.*]], -2147483648 + // CHECK-OVERFLOW-NEXT: %[[BOK:.*]] = icmp ne i32 %[[B:.*]], -1 + // CHECK-OVERFLOW-NEXT: %[[OK:.*]] = or i1 %[[AOK]], %[[BOK]] + + // CHECK: %[[OK:.*]] = and i1 %[[ZERO]], %[[OVER]] + + // CHECK: br i1 %[[OK]] + // CHECK-OVERFLOW: br i1 %[[OK]] + return a / b; + + // CHECK: } + // CHECK-OVERFLOW: } +} diff --git a/test/CodeGen/const-init.c b/test/CodeGen/const-init.c index 4f3f7ab..5f729b8 100644 --- a/test/CodeGen/const-init.c +++ b/test/CodeGen/const-init.c @@ -144,3 +144,18 @@ void g28() { static v12i16 b = (v2f80){1,2}; static v2f80 c = (v12i16){0,0,0,-32768,16383,0,0,0,0,-32768,16384,0}; } + +// PR13643 +void g29() { + typedef char DCC_PASSWD[2]; + typedef struct + { + DCC_PASSWD passwd; + } DCC_SRVR_NM; + // CHECK: @g29.a = internal global %struct.DCC_SRVR_NM { [2 x i8] c"@\00" }, align 1 + // CHECK: @g29.b = internal global [1 x i32] [i32 ptrtoint ([5 x i8]* @.str to i32)], align 4 + // CHECK: @g29.c = internal global [1 x i32] [i32 97], align 4 + static DCC_SRVR_NM a = { {"@"} }; + static int b[1] = { "asdf" }; + static int c[1] = { L"a" }; +} diff --git a/test/CodeGen/const-label-addr.c b/test/CodeGen/const-label-addr.c index 9d99f88..e606c3b 100644 --- a/test/CodeGen/const-label-addr.c +++ b/test/CodeGen/const-label-addr.c @@ -1,4 +1,17 @@ -// RUN: %clang_cc1 %s -emit-llvm -o %t +// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s +// REQUIRES: asserts + +// CHECK: @a.a = internal global i8* blockaddress(@a, %A) int a() { A:;static void* a = &&A; } + +// PR14005 +// CHECK: @b.ar = internal global {{.*}} sub (i{{..}} ptrtoint (i8* blockaddress(@b, %l2) to i{{..}}), i{{..}} ptrtoint (i8* blockaddress(@b, %l1) to i{{..}})) +int b() { + static int ar = &&l2 - &&l1; +l1: + return 10; +l2: + return 11; +} diff --git a/test/CodeGen/debug-info-iv.c b/test/CodeGen/debug-info-iv.c index 6684fe3..aafd71d 100644 --- a/test/CodeGen/debug-info-iv.c +++ b/test/CodeGen/debug-info-iv.c @@ -27,7 +27,7 @@ int main() { Array[i][j] = 0; test_indvars(Array[0], Array); -//CHECK: .loc 2 31 8 +//CHECK: .loc 2 31 for (i=0; i < 100; i+=2) for (j=0; j < 200; j++) sum += Array[i][j]; diff --git a/test/CodeGen/debug-info-line3.c b/test/CodeGen/debug-info-line3.c index a4e35e7..d01b023 100644 --- a/test/CodeGen/debug-info-line3.c +++ b/test/CodeGen/debug-info-line3.c @@ -12,5 +12,5 @@ void func(char c, char* d) } -// CHECK: ret void, !dbg !17 -// CHECK: !17 = metadata !{i32 6, +// CHECK: ret void, !dbg [[LINE:.*]] +// CHECK: [[LINE]] = metadata !{i32 6, diff --git a/test/CodeGen/debug-info-line4.c b/test/CodeGen/debug-info-line4.c new file mode 100644 index 0000000..004176c --- /dev/null +++ b/test/CodeGen/debug-info-line4.c @@ -0,0 +1,11 @@ +// RUN: %clang %s -g -gcolumn-info -S -emit-llvm -o - | FileCheck %s +// Checks that clang emits column information when -gcolumn-info is passed. + +int foo(int a, int b) { int c = a + b; + + + return c; +} + +// Without column information we wouldn't change locations for b. +// CHECK: metadata !{i32 4, i32 20, diff --git a/test/CodeGen/debug-info.c b/test/CodeGen/debug-info.c index af2ce96..12ba605 100644 --- a/test/CodeGen/debug-info.c +++ b/test/CodeGen/debug-info.c @@ -1,5 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-unk-unk -o %t -emit-llvm -g %s -// RUN: FileCheck --input-file=%t %s +// RUN: %clang_cc1 -triple x86_64-unk-unk -o - -emit-llvm -g %s | FileCheck %s // PR3023 void convert(void) { @@ -8,7 +7,7 @@ void convert(void) { // PR2784 -struct OPAQUE; +struct OPAQUE; // CHECK: DW_TAG_structure_type typedef struct OPAQUE *PTR; PTR p; diff --git a/test/CodeGen/debug-line-1.c b/test/CodeGen/debug-line-1.c index 0c2d185..be1da08 100644 --- a/test/CodeGen/debug-line-1.c +++ b/test/CodeGen/debug-line-1.c @@ -4,7 +4,7 @@ // Check to make sure that we emit the block for the break so that we can count the line. // CHECK: sw.bb: ; preds = %entry -// CHECK: br label %sw.epilog, !dbg !19 +// CHECK: br label %sw.epilog, !dbg ! extern int atoi(const char *); diff --git a/test/CodeGen/decl-in-prototype.c b/test/CodeGen/decl-in-prototype.c index 949793d..2c0fc4f 100644 --- a/test/CodeGen/decl-in-prototype.c +++ b/test/CodeGen/decl-in-prototype.c @@ -1,4 +1,4 @@ -// RUN: %clang -emit-llvm -S -o - %s | FileCheck %s +// RUN: %clang -target i386-unknown-unknown -emit-llvm -S -o - %s | FileCheck %s const int AA = 5; diff --git a/test/CodeGen/dostmt.c b/test/CodeGen/dostmt.c index 1a2e02a..54973dc 100644 --- a/test/CodeGen/dostmt.c +++ b/test/CodeGen/dostmt.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - +// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s int bar(); int test0() { @@ -66,5 +66,11 @@ void test5() { do { break; } while(0); } - +// PR14191 +void test6f(void); +void test6() { + do { + } while (test6f(), 0); + // CHECK: call void @test6f() +} diff --git a/test/CodeGen/exprs.c b/test/CodeGen/exprs.c index cc03be6..f8f2833 100644 --- a/test/CodeGen/exprs.c +++ b/test/CodeGen/exprs.c @@ -174,3 +174,13 @@ void f16() { lbl: ; } + +// PR13704: negative increment in i128 is not preserved. +// CHECK: define void @f17() +void f17() { + extern void extfunc(__int128); + __int128 x = 2; + x--; + extfunc(x); +// CHECK: add nsw i128 %{{.}}, -1 +} diff --git a/test/CodeGen/extern-inline.c b/test/CodeGen/extern-inline.c index e3df996..77cb270 100644 --- a/test/CodeGen/extern-inline.c +++ b/test/CodeGen/extern-inline.c @@ -1,5 +1,5 @@ -// RUN: %clang -S -emit-llvm -std=gnu89 -o - %s | FileCheck %s -// RUN: %clang -S -emit-llvm -fgnu89-inline -o - %s | FileCheck %s +// RUN: %clang -target i386-unknown-unknown -S -emit-llvm -std=gnu89 -o - %s | FileCheck %s +// RUN: %clang -target i386-unknown-unknown -S -emit-llvm -fgnu89-inline -o - %s | FileCheck %s // PR5253 // If an extern inline function is redefined, functions should call the diff --git a/test/CodeGen/f16c-builtins.c b/test/CodeGen/f16c-builtins.c new file mode 100644 index 0000000..28430d5 --- /dev/null +++ b/test/CodeGen/f16c-builtins.c @@ -0,0 +1,26 @@ +// RUN: %clang_cc1 %s -O3 -triple=x86_64-apple-darwin -target-feature +f16c -emit-llvm -o - | FileCheck %s + +// Don't include mm_malloc.h, it's system specific. +#define __MM_MALLOC_H + +#include + +__m128 test_mm_cvtph_ps(__m128i a) { + // CHECK: @llvm.x86.vcvtph2ps.128 + return _mm_cvtph_ps(a); +} + +__m256 test_mm256_cvtph_ps(__m128i a) { + // CHECK: @llvm.x86.vcvtph2ps.256 + return _mm256_cvtph_ps(a); +} + +__m128i test_mm_cvtps_ph(__m128 a) { + // CHECK: @llvm.x86.vcvtps2ph.128 + return _mm_cvtps_ph(a, 0); +} + +__m128i test_mm256_cvtps_ph(__m256 a) { + // CHECK: @llvm.x86.vcvtps2ph.256 + return _mm256_cvtps_ph(a, 0); +} diff --git a/test/CodeGen/ffp-contract-option.c b/test/CodeGen/ffp-contract-option.c new file mode 100644 index 0000000..eb95f1e --- /dev/null +++ b/test/CodeGen/ffp-contract-option.c @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -O3 -ffp-contract=fast -triple=powerpc-apple-darwin10 -S -o - %s | FileCheck %s +// REQUIRES: ppc32-registered-target + +float fma_test1(float a, float b, float c) { +// CHECK: fmadds + float x = a * b; + float y = x + c; + return y; +} diff --git a/test/CodeGen/fold-const-declref.c b/test/CodeGen/fold-const-declref.c index 5a7ba8e..f49611c 100644 --- a/test/CodeGen/fold-const-declref.c +++ b/test/CodeGen/fold-const-declref.c @@ -1,9 +1,9 @@ -// RUN: %clang_cc1 -verify -emit-llvm-only +// RUN: %clang_cc1 -verify -emit-llvm-only %s // PR7242: Check that this doesn't crash. int main(void) { int __negative = 1; const int __max = __negative && 0 ; - __max / 0; + __max / 0; // expected-warning{{expression result unused}} expected-warning{{division by zero is undefined}} } diff --git a/test/CodeGen/fp-contract-pragma.cpp b/test/CodeGen/fp-contract-pragma.cpp new file mode 100644 index 0000000..afd8c43 --- /dev/null +++ b/test/CodeGen/fp-contract-pragma.cpp @@ -0,0 +1,64 @@ +// RUN: %clang_cc1 -O3 -emit-llvm -o - %s | FileCheck %s + +// Is FP_CONTRACT is honored in a simple case? +float fp_contract_1(float a, float b, float c) { +// CHECK: _Z13fp_contract_1fff +// CHECK: tail call float @llvm.fmuladd + #pragma STDC FP_CONTRACT ON + return a * b + c; +} + +// Is FP_CONTRACT state cleared on exiting compound statements? +float fp_contract_2(float a, float b, float c) { +// CHECK: _Z13fp_contract_2fff +// CHECK: %[[M:.+]] = fmul float %a, %b +// CHECK-NEXT: fadd float %[[M]], %c + { + #pragma STDC FP_CONTRACT ON + } + return a * b + c; +} + +// Does FP_CONTRACT survive template instatiation? +class Foo {}; +Foo operator+(Foo, Foo); + +template +T template_muladd(T a, T b, T c) { + #pragma STDC FP_CONTRACT ON + return a * b + c; +} + +float fp_contract_3(float a, float b, float c) { +// CHECK: _Z13fp_contract_3fff +// CHECK: tail call float @llvm.fmuladd + return template_muladd(a, b, c); +} + +template class fp_contract_4 { + float method(float a, float b, float c) { + #pragma STDC FP_CONTRACT ON + return a * b + c; + } +}; + +template class fp_contract_4; +// CHECK: _ZN13fp_contract_4IiE6methodEfff +// CHECK: tail call float @llvm.fmuladd + +// Check file-scoped FP_CONTRACT +#pragma STDC FP_CONTRACT ON +float fp_contract_5(float a, float b, float c) { +// CHECK: _Z13fp_contract_5fff +// CHECK: tail call float @llvm.fmuladd + return a * b + c; +} + +#pragma STDC FP_CONTRACT OFF +float fp_contract_6(float a, float b, float c) { +// CHECK: _Z13fp_contract_6fff +// CHECK: %[[M:.+]] = fmul float %a, %b +// CHECK-NEXT: fadd float %[[M]], %c + return a * b + c; +} + diff --git a/test/CodeGen/fp-contract.c b/test/CodeGen/fp-contract.c deleted file mode 100644 index eb95f1e..0000000 --- a/test/CodeGen/fp-contract.c +++ /dev/null @@ -1,9 +0,0 @@ -// RUN: %clang_cc1 -O3 -ffp-contract=fast -triple=powerpc-apple-darwin10 -S -o - %s | FileCheck %s -// REQUIRES: ppc32-registered-target - -float fma_test1(float a, float b, float c) { -// CHECK: fmadds - float x = a * b; - float y = x + c; - return y; -} diff --git a/test/CodeGen/func-ptr-cast-decl.c b/test/CodeGen/func-ptr-cast-decl.c index e630796..28364de 100644 --- a/test/CodeGen/func-ptr-cast-decl.c +++ b/test/CodeGen/func-ptr-cast-decl.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only %s -verify +// expected-no-diagnostics // PR5882 int q_sk_num(void *a); diff --git a/test/CodeGen/init.c b/test/CodeGen/init.c index 426233d..259d34d 100644 --- a/test/CodeGen/init.c +++ b/test/CodeGen/init.c @@ -69,6 +69,8 @@ char test8(int X) { // CHECK: store i8 97 // CHECK: store i8 98 // CHECK: store i8 99 +// CHECK-NOT: getelementptr +// CHECK: load } void bar(void*); diff --git a/test/CodeGen/inline.c b/test/CodeGen/inline.c index 2a01f25..addb30b 100644 --- a/test/CodeGen/inline.c +++ b/test/CodeGen/inline.c @@ -1,5 +1,5 @@ // RUN: echo "GNU89 tests:" -// RUN: %clang %s -O1 -emit-llvm -S -o %t -std=gnu89 +// RUN: %clang %s -target i386-unknown-unknown -O1 -emit-llvm -S -o %t -std=gnu89 // RUN: grep "define available_externally i32 @ei()" %t // RUN: grep "define i32 @foo()" %t // RUN: grep "define i32 @bar()" %t @@ -21,7 +21,7 @@ // RUN: grep "define void @testC" %t // RUN: echo "C99 tests:" -// RUN: %clang %s -O1 -emit-llvm -S -o %t -std=gnu99 +// RUN: %clang %s -target i386-unknown-unknown -O1 -emit-llvm -S -o %t -std=gnu99 // RUN: grep "define i32 @ei()" %t // RUN: grep "define available_externally i32 @foo()" %t // RUN: grep "define i32 @bar()" %t @@ -43,7 +43,7 @@ // RUN: grep "define void @testC" %t // RUN: echo "C++ tests:" -// RUN: %clang -x c++ %s -O1 -emit-llvm -S -o %t -std=c++98 +// RUN: %clang -x c++ %s -target i386-unknown-unknown -O1 -emit-llvm -S -o %t -std=c++98 // RUN: grep "define linkonce_odr i32 @_Z2eiv()" %t // RUN: grep "define linkonce_odr i32 @_Z3foov()" %t // RUN: grep "define i32 @_Z3barv()" %t diff --git a/test/CodeGen/integer-overflow.c b/test/CodeGen/integer-overflow.c index d7fff4e..ed2dede 100644 --- a/test/CodeGen/integer-overflow.c +++ b/test/CodeGen/integer-overflow.c @@ -1,6 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - | FileCheck %s --check-prefix=DEFAULT // RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - -fwrapv | FileCheck %s --check-prefix=WRAPV // RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - -ftrapv | FileCheck %s --check-prefix=TRAPV +// RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - -fsanitize=signed-integer-overflow | FileCheck %s --check-prefix=CATCH_UB // RUN: %clang_cc1 -triple x86_64-apple-darwin %s -emit-llvm -o - -ftrapv -ftrapv-handler foo | FileCheck %s --check-prefix=TRAPV_HANDLER @@ -15,24 +16,28 @@ void test1() { // DEFAULT: add nsw i32 // WRAPV: add i32 // TRAPV: llvm.sadd.with.overflow.i32 + // CATCH_UB: llvm.sadd.with.overflow.i32 // TRAPV_HANDLER: foo( f11G = a + b; // DEFAULT: sub nsw i32 // WRAPV: sub i32 // TRAPV: llvm.ssub.with.overflow.i32 + // CATCH_UB: llvm.ssub.with.overflow.i32 // TRAPV_HANDLER: foo( f11G = a - b; // DEFAULT: mul nsw i32 // WRAPV: mul i32 // TRAPV: llvm.smul.with.overflow.i32 + // CATCH_UB: llvm.smul.with.overflow.i32 // TRAPV_HANDLER: foo( f11G = a * b; // DEFAULT: sub nsw i32 0, // WRAPV: sub i32 0, // TRAPV: llvm.ssub.with.overflow.i32(i32 0 + // CATCH_UB: llvm.ssub.with.overflow.i32(i32 0 // TRAPV_HANDLER: foo( f11G = -a; @@ -41,12 +46,14 @@ void test1() { // DEFAULT: add nsw i32 {{.*}}, 1 // WRAPV: add i32 {{.*}}, 1 // TRAPV: llvm.sadd.with.overflow.i32({{.*}}, i32 1) + // CATCH_UB: llvm.sadd.with.overflow.i32({{.*}}, i32 1) // TRAPV_HANDLER: foo( ++a; // DEFAULT: add nsw i32 {{.*}}, -1 // WRAPV: add i32 {{.*}}, -1 // TRAPV: llvm.sadd.with.overflow.i32({{.*}}, i32 -1) + // CATCH_UB: llvm.sadd.with.overflow.i32({{.*}}, i32 -1) // TRAPV_HANDLER: foo( --a; @@ -56,11 +63,13 @@ void test1() { // DEFAULT: getelementptr inbounds i32* // WRAPV: getelementptr i32* // TRAPV: getelementptr inbounds i32* + // CATCH_UB: getelementptr inbounds i32* // PR9350: char increment never overflows. extern volatile signed char PR9350; // DEFAULT: add i8 {{.*}}, 1 // WRAPV: add i8 {{.*}}, 1 // TRAPV: add i8 {{.*}}, 1 + // CATCH_UB: add i8 {{.*}}, 1 ++PR9350; } diff --git a/test/CodeGen/le32-arguments.c b/test/CodeGen/le32-arguments.c new file mode 100644 index 0000000..2cbbc0f --- /dev/null +++ b/test/CodeGen/le32-arguments.c @@ -0,0 +1,61 @@ +// RUN: %clang_cc1 -triple le32-unknown-nacl %s -emit-llvm -o - | FileCheck %s + +// Basic argument/attribute tests for le32/PNaCl + +// CHECK: define void @f0(i32 %i, i32 %j, double %k) +void f0(int i, long j, double k) {} + +typedef struct { + int aa; + int bb; +} s1; +// Structs should be passed byval and not split up +// CHECK: define void @f1(%struct.s1* byval %i) +void f1(s1 i) {} + +typedef struct { + int cc; +} s2; +// Structs should be returned sret and not simplified by the frontend +// CHECK: define void @f2(%struct.s2* noalias sret %agg.result) +s2 f2() { + s2 foo; + return foo; +} + +// CHECK: define void @f3(i64 %i) +void f3(long long i) {} + +// i8/i16 should be signext, i32 and higher should not +// CHECK: define void @f4(i8 signext %a, i16 signext %b) +void f4(char a, short b) {} + +// CHECK: define void @f5(i8 zeroext %a, i16 zeroext %b) +void f5(unsigned char a, unsigned short b) {} + + +enum my_enum { + ENUM1, + ENUM2, + ENUM3, +}; +// Enums should be treated as the underlying i32 +// CHECK: define void @f6(i32 %a) +void f6(enum my_enum a) {} + +union simple_union { + int a; + char b; +}; +// Unions should be passed as byval structs +// CHECK: define void @f7(%union.simple_union* byval %s) +void f7(union simple_union s) {} + +typedef struct { + int b4 : 4; + int b3 : 3; + int b8 : 8; +} bitfield1; +// Bitfields should be passed as byval structs +// CHECK: define void @f8(%struct.bitfield1* byval %bf1) +void f8(bitfield1 bf1) {} diff --git a/test/CodeGen/le32-regparm.c b/test/CodeGen/le32-regparm.c new file mode 100644 index 0000000..6ab5a11 --- /dev/null +++ b/test/CodeGen/le32-regparm.c @@ -0,0 +1,41 @@ +// RUN: %clang_cc1 -triple le32-unknown-nacl %s -emit-llvm -o - | FileCheck %s + +#define FASTCALL __attribute__((regparm(2))) + +typedef struct { + int aaa; + double bbbb; + int ccc[200]; +} foo; + +// 2 inreg arguments are supported. +void FASTCALL f1(int i, int j, int k); +// CHECK: define void @f1(i32 inreg %i, i32 inreg %j, i32 %k) +void f1(int i, int j, int k) { } + +// inreg structs are not supported. +// CHECK: define void @f2(%struct.foo* inreg %a) +void __attribute__((regparm(1))) f2(foo* a) {} + +// Only the first 2 arguments can be passed inreg, and the first +// non-integral type consumes remaining available registers. +// CHECK: define void @f3(%struct.foo* byval %a, i32 %b) +void __attribute__((regparm(2))) f3(foo a, int b) {} + +// Only 64 total bits are supported +// CHECK: define void @f4(i64 inreg %g, i32 %h) +void __attribute__((regparm(2))) f4(long long g, int h) {} + +typedef void (*FType)(int, int) __attribute ((regparm (2))); +FType bar; +extern void FASTCALL reduced(char b, double c, foo* d, double e, int f); + +int +main(void) { + // The presence of double c means that foo* d is not passed inreg. This + // behavior is different from current x86-32 behavior + // CHECK: call void @reduced(i8 signext inreg 0, {{.*}} %struct.foo* null + reduced(0, 0.0, 0, 0.0, 0); + // CHECK: call void {{.*}}(i32 inreg 1, i32 inreg 2) + bar(1,2); +} diff --git a/test/CodeGen/libcall-declarations.c b/test/CodeGen/libcall-declarations.c new file mode 100644 index 0000000..4517643 --- /dev/null +++ b/test/CodeGen/libcall-declarations.c @@ -0,0 +1,191 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin12 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=CHECK-NOERRNO +// RUN: %clang_cc1 -triple x86_64-linux-gnu -S -o - -emit-llvm -fmath-errno %s | FileCheck %s -check-prefix=CHECK-ERRNO + +// Prototypes. +double acos(double); +long double acosl(long double); +float acosf(float); +double asin(double); +long double asinl(long double); +float asinf(float); +double atan(double); +long double atanl(long double); +float atanf(float); +double atan2(double, double); +long double atan2l(long double, long double); +float atan2f(float, float); +double ceil(double); +long double ceill(long double); +float ceilf(float); +double copysign(double, double); +long double copysignl(long double, long double); +float copysignf(float, float); +double cos(double); +long double cosl(long double); +float cosf(float); +double exp(double); +long double expl(long double); +float expf(float); +double exp2(double); +long double exp2l(long double); +float exp2f(float); +double fabs(double); +long double fabsl(long double); +float fabsf(float); +double floor(double); +long double floorl(long double); +float floorf(float); +double fma(double, double, double); +long double fmal(long double, long double, long double); +float fmaf(float, float, float); +double fmax(double, double); +long double fmaxl(long double, long double); +float fmaxf(float, float); +double fmin(double, double); +long double fminl(long double, long double); +float fminf(float, float); +double log(double); +long double logl(long double); +float logf(float); +double log2(double); +long double log2l(long double); +float log2f(float); +double nearbyint(double); +long double nearbyintl(long double); +float nearbyintf(float); +double pow(double, double); +long double powl(long double, long double); +float powf(float, float); +double rint(double); +long double rintl(long double); +float rintf(float); +double round(double); +long double roundl(long double); +float roundf(float); +double sin(double); +long double sinl(long double); +float sinf(float); +double sqrt(double); +long double sqrtl(long double); +float sqrtf(float); +double tan(double); +long double tanl(long double); +float tanf(float); +double trunc(double); +long double truncl(long double); +float truncf(float); + +// Force emission of the declare statements. +void *use[] = { + acos, acosl, acosf, asin, asinl, asinf, atan, atanl, atanf, atan2, atan2l, + atan2f, ceil, ceill, ceilf, copysign, copysignl, copysignf, cos, cosl, cosf, + exp, expl, expf, exp2, exp2l, exp2f, fabs, fabsl, fabsf, floor, floorl, + floorf, fma, fmal, fmaf, fmax, fmaxl, fmaxf, fmin, fminl, fminf, log, logl, + logf, log2, log2l, log2f, nearbyint, nearbyintl, nearbyintf, pow, powl, powf, + rint, rintl, rintf, round, roundl, roundf, sin, sinl, sinf, sqrt, sqrtl, + sqrtf, tan, tanl, tanf, trunc, truncl, truncf +}; + +// CHECK-NOERRNO: declare double @acos(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @acosl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @acosf(float) nounwind readnone +// CHECK-NOERRNO: declare double @asin(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @asinl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @asinf(float) nounwind readnone +// CHECK-NOERRNO: declare double @atan(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @atanl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @atanf(float) nounwind readnone +// CHECK-NOERRNO: declare double @atan2(double, double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @atan2l(x86_fp80, x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @atan2f(float, float) nounwind readnone +// CHECK-NOERRNO: declare double @ceil(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @ceill(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @ceilf(float) nounwind readnone +// CHECK-NOERRNO: declare double @copysign(double, double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @copysignf(float, float) nounwind readnone +// CHECK-NOERRNO: declare double @cos(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @cosl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @cosf(float) nounwind readnone +// CHECK-NOERRNO: declare double @exp(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @expl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @expf(float) nounwind readnone +// CHECK-NOERRNO: declare double @exp2(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @exp2l(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @exp2f(float) nounwind readnone +// CHECK-NOERRNO: declare double @fabs(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @fabsl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @fabsf(float) nounwind readnone +// CHECK-NOERRNO: declare double @floor(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @floorl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @floorf(float) nounwind readnone +// CHECK-NOERRNO: declare double @fma(double, double, double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @fmal(x86_fp80, x86_fp80, x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @fmaf(float, float, float) nounwind readnone +// CHECK-NOERRNO: declare double @fmax(double, double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @fmaxf(float, float) nounwind readnone +// CHECK-NOERRNO: declare double @fmin(double, double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @fminf(float, float) nounwind readnone +// CHECK-NOERRNO: declare double @log(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @logl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @logf(float) nounwind readnone +// CHECK-NOERRNO: declare double @log2(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @log2l(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @log2f(float) nounwind readnone +// CHECK-NOERRNO: declare double @nearbyint(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @nearbyintl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @nearbyintf(float) nounwind readnone +// CHECK-NOERRNO: declare double @pow(double, double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @powl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @powf(float, float) nounwind readnone +// CHECK-NOERRNO: declare double @rint(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @rintl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @rintf(float) nounwind readnone +// CHECK-NOERRNO: declare double @round(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @roundl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @roundf(float) nounwind readnone +// CHECK-NOERRNO: declare double @sin(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @sinl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @sinf(float) nounwind readnone +// CHECK-NOERRNO: declare double @sqrt(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @sqrtl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @sqrtf(float) nounwind readnone +// CHECK-NOERRNO: declare double @tan(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @tanl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @tanf(float) nounwind readnone +// CHECK-NOERRNO: declare double @trunc(double) nounwind readnone +// CHECK-NOERRNO: declare x86_fp80 @truncl(x86_fp80) nounwind readnone +// CHECK-NOERRNO: declare float @truncf(float) nounwind readnone + +// CHECK-ERRNO: declare double @ceil(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @ceill(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @ceilf(float) nounwind readnone +// CHECK-ERRNO: declare double @copysign(double, double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @copysignl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @copysignf(float, float) nounwind readnone +// CHECK-ERRNO: declare double @fabs(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @fabsl(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @fabsf(float) nounwind readnone +// CHECK-ERRNO: declare double @floor(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @floorl(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @floorf(float) nounwind readnone +// CHECK-ERRNO: declare double @fmax(double, double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @fmaxl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @fmaxf(float, float) nounwind readnone +// CHECK-ERRNO: declare double @fmin(double, double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @fminl(x86_fp80, x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @fminf(float, float) nounwind readnone +// CHECK-ERRNO: declare double @nearbyint(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @nearbyintl(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @nearbyintf(float) nounwind readnone +// CHECK-ERRNO: declare double @rint(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @rintl(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @rintf(float) nounwind readnone +// CHECK-ERRNO: declare double @round(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @roundl(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @roundf(float) nounwind readnone +// CHECK-ERRNO: declare double @trunc(double) nounwind readnone +// CHECK-ERRNO: declare x86_fp80 @truncl(x86_fp80) nounwind readnone +// CHECK-ERRNO: declare float @truncf(float) nounwind readnone diff --git a/test/CodeGen/libcalls-fno-builtin.c b/test/CodeGen/libcalls-fno-builtin.c index ce10759..e7f3ef7 100644 --- a/test/CodeGen/libcalls-fno-builtin.c +++ b/test/CodeGen/libcalls-fno-builtin.c @@ -1,11 +1,32 @@ // RUN: %clang_cc1 -S -O3 -fno-builtin -o - %s | FileCheck %s // rdar://10551066 +typedef __SIZE_TYPE__ size_t; + double ceil(double x); double copysign(double,double); double cos(double x); double fabs(double x); double floor(double x); +char *strcat(char *s1, const char *s2); +char *strncat(char *s1, const char *s2, size_t n); +char *strchr(const char *s, int c); +char *strrchr(const char *s, int c); +int strcmp(const char *s1, const char *s2); +int strncmp(const char *s1, const char *s2, size_t n); +char *strcpy(char *s1, const char *s2); +char *stpcpy(char *s1, const char *s2); +char *strncpy(char *s1, const char *s2, size_t n); +size_t strlen(const char *s); +char *strpbrk(const char *s1, const char *s2); +size_t strspn(const char *s1, const char *s2); +double strtod(const char *nptr, char **endptr); +float strtof(const char *nptr, char **endptr); +long double strtold(const char *nptr, char **endptr); +long int strtol(const char *nptr, char **endptr, int base); +long long int strtoll(const char *nptr, char **endptr, int base); +unsigned long int strtoul(const char *nptr, char **endptr, int base); +unsigned long long int strtoull(const char *nptr, char **endptr, int base); double t1(double x) { return ceil(x); } // CHECK: t1 @@ -26,3 +47,79 @@ double t4(double x) { return fabs(x); } double t5(double x) { return floor(x); } // CHECK: t5 // CHECK: floor + +char *t6(char *x) { return strcat(x, ""); } +// CHECK: t6 +// CHECK: strcat + +char *t7(char *x) { return strncat(x, "", 1); } +// CHECK: t7 +// CHECK: strncat + +char *t8(void) { return strchr("hello, world", 'w'); } +// CHECK: t8 +// CHECK: strchr + +char *t9(void) { return strrchr("hello, world", 'w'); } +// CHECK: t9 +// CHECK: strrchr + +int t10(void) { return strcmp("foo", "bar"); } +// CHECK: t10 +// CHECK: strcmp + +int t11(void) { return strncmp("foo", "bar", 3); } +// CHECK: t11 +// CHECK: strncmp + +char *t12(char *x) { return strcpy(x, "foo"); } +// CHECK: t12 +// CHECK: strcpy + +char *t13(char *x) { return stpcpy(x, "foo"); } +// CHECK: t13 +// CHECK: stpcpy + +char *t14(char *x) { return strncpy(x, "foo", 3); } +// CHECK: t14 +// CHECK: strncpy + +size_t t15(void) { return strlen("foo"); } +// CHECK: t15 +// CHECK: strlen + +char *t16(char *x) { return strpbrk(x, ""); } +// CHECK: t16 +// CHECK: strpbrk + +size_t t17(char *x) { return strspn(x, ""); } +// CHECK: t17 +// CHECK: strspn + +double t18(char **x) { return strtod("123.4", x); } +// CHECK: t18 +// CHECK: strtod + +float t19(char **x) { return strtof("123.4", x); } +// CHECK: t19 +// CHECK: strtof + +long double t20(char **x) { return strtold("123.4", x); } +// CHECK: t20 +// CHECK: strtold + +long int t21(char **x) { return strtol("1234", x, 10); } +// CHECK: t21 +// CHECK: strtol + +long int t22(char **x) { return strtoll("1234", x, 10); } +// CHECK: t22 +// CHECK: strtoll + +long int t23(char **x) { return strtoul("1234", x, 10); } +// CHECK: t23 +// CHECK: strtoul + +long int t24(char **x) { return strtoull("1234", x, 10); } +// CHECK: t24 +// CHECK: strtoull diff --git a/test/CodeGen/long-double-x86-nacl.c b/test/CodeGen/long-double-x86-nacl.c new file mode 100644 index 0000000..175129c --- /dev/null +++ b/test/CodeGen/long-double-x86-nacl.c @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-unknown-nacl | FileCheck %s + +long double x = 0; +int checksize[sizeof(x) == 8 ? 1 : -1]; + +// CHECK: define void @s1(double %a) +void s1(long double a) {} diff --git a/test/CodeGen/microsoft-call-conv-x64.c b/test/CodeGen/microsoft-call-conv-x64.c new file mode 100644 index 0000000..97a1d99 --- /dev/null +++ b/test/CodeGen/microsoft-call-conv-x64.c @@ -0,0 +1,39 @@ +// RUN: %clang_cc1 -triple x86_64-pc-win32 -emit-llvm < %s | FileCheck %s + +void __fastcall f1(void); +void __stdcall f2(void); +void __fastcall f4(void) { +// CHECK: define void @f4() + f1(); +// CHECK: call void @f1() +} +void __stdcall f5(void) { +// CHECK: define void @f5() + f2(); +// CHECK: call void @f2() +} + +// PR5280 +void (__fastcall *pf1)(void) = f1; +void (__stdcall *pf2)(void) = f2; +void (__fastcall *pf4)(void) = f4; +void (__stdcall *pf5)(void) = f5; + +int main(void) { + f4(); f5(); + // CHECK: call void @f4() + // CHECK: call void @f5() + pf1(); pf2(); pf4(); pf5(); + // CHECK: call void %{{.*}}() + // CHECK: call void %{{.*}}() + // CHECK: call void %{{.*}}() + // CHECK: call void %{{.*}}() + return 0; +} + +// PR7117 +void __stdcall f7(foo) int foo; {} +void f8(void) { + f7(0); + // CHECK: call void @f7(i32 0) +} diff --git a/test/CodeGen/microsoft-call-conv.c b/test/CodeGen/microsoft-call-conv.c index 390c3be..64d10fb 100644 --- a/test/CodeGen/microsoft-call-conv.c +++ b/test/CodeGen/microsoft-call-conv.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm < %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-pc-linux -emit-llvm < %s | FileCheck %s void __fastcall f1(void); void __stdcall f2(void); diff --git a/test/CodeGen/mips-byval-arg.c b/test/CodeGen/mips-byval-arg.c index 4e5f41a..41ccd60 100644 --- a/test/CodeGen/mips-byval-arg.c +++ b/test/CodeGen/mips-byval-arg.c @@ -1,5 +1,5 @@ -// RUN: %clang -target mipsel-unknown-linux -ccc-clang-archs mipsel -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 +// RUN: %clang -target mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 typedef struct { float f[3]; diff --git a/test/CodeGen/mips-clobber-reg.c b/test/CodeGen/mips-clobber-reg.c index 2a06e53..be18353 100644 --- a/test/CodeGen/mips-clobber-reg.c +++ b/test/CodeGen/mips-clobber-reg.c @@ -1,4 +1,4 @@ -// RUN: %clang -target mipsel-unknown-linux -ccc-clang-archs mipsel -S -o - -emit-llvm %s +// RUN: %clang -target mipsel-unknown-linux -S -o - -emit-llvm %s /* This checks that the frontend will accept both diff --git a/test/CodeGen/mips-constraint-regs.c b/test/CodeGen/mips-constraint-regs.c index 075be05..ea063b5 100644 --- a/test/CodeGen/mips-constraint-regs.c +++ b/test/CodeGen/mips-constraint-regs.c @@ -1,4 +1,5 @@ -// RUN: %clang -target mipsel-unknown-linux -ccc-clang-archs mipsel -S -o - -emit-llvm %s +// RUN: %clang -target mipsel-unknown-linux -S -o - -emit-llvm %s \ +// RUN: | FileCheck %s // This checks that the frontend will accept inline asm constraints // c', 'l' and 'x'. Semantic checking will happen in the @@ -10,6 +11,7 @@ int main() // 'c': 16 bit address register for Mips16, GPR for all others // I am using 'c' to constrain both the target and one of the source // registers. We are looking for syntactical correctness. + // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "addi $0,$1,$2 \0A\09\09", "=c,c,I"(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) nounwind, !srcloc !{{[0-9]+}} int __s, __v = 17; int __t; __asm__ __volatile__( @@ -20,6 +22,7 @@ int main() // 'l': lo register // We are making it clear that destination register is lo with the // use of the 'l' constraint ("=l"). + // CHECK: %{{[0-9]+}} = call i32 asm sideeffect "mtlo $1 \0A\09\09", "=l,r,~{lo}"(i32 %{{[0-9]+}}) nounwind, !srcloc !{{[0-9]+}} int i_temp = 44; int i_result; __asm__ __volatile__( @@ -31,6 +34,7 @@ int main() // 'x': Combined lo/hi registers // We are specifying that destination registers are the hi/lo pair with the // use of the 'x' constraint ("=x"). + // CHECK: %{{[0-9]+}} = call i64 asm sideeffect "mthi $1 \0A\09\09mtlo $2 \0A\09\09", "=x,r,r"(i32 %{{[0-9]+}}, i32 %{{[0-9]+}}) nounwind, !srcloc !{{[0-9]+}} int i_hi = 3; int i_lo = 2; long long ll_result = 0; @@ -40,5 +44,6 @@ int main() : "=x" (ll_result) : "r" (i_hi), "r" (i_lo) : ); + return 0; } diff --git a/test/CodeGen/mips-vector-arg.c b/test/CodeGen/mips-vector-arg.c index 39998d9..584192f 100644 --- a/test/CodeGen/mips-vector-arg.c +++ b/test/CodeGen/mips-vector-arg.c @@ -1,5 +1,5 @@ -// RUN: %clang -target mipsel-unknown-linux -ccc-clang-archs mipsel -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 +// RUN: %clang -target mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 // check that // 1. vector arguments are passed in integer registers diff --git a/test/CodeGen/mips-vector-return.c b/test/CodeGen/mips-vector-return.c index 12e71fa..0bff969 100644 --- a/test/CodeGen/mips-vector-return.c +++ b/test/CodeGen/mips-vector-return.c @@ -1,5 +1,5 @@ -// RUN: %clang -target mipsel-unknown-linux -ccc-clang-archs mipsel -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 +// RUN: %clang -target mipsel-unknown-linux -O3 -S -o - -emit-llvm %s | FileCheck %s -check-prefix=O32 +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s -check-prefix=N64 // vectors larger than 16-bytes are returned via the hidden pointer argument. // N64/N32 returns vectors whose size is equal to or smaller than 16-bytes in diff --git a/test/CodeGen/mips64-class-return.cpp b/test/CodeGen/mips64-class-return.cpp index 8e32d5c..2a786df 100644 --- a/test/CodeGen/mips64-class-return.cpp +++ b/test/CodeGen/mips64-class-return.cpp @@ -1,4 +1,4 @@ -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s class B0 { double d; diff --git a/test/CodeGen/mips64-f128-literal.c b/test/CodeGen/mips64-f128-literal.c index 2f01520..9121169 100644 --- a/test/CodeGen/mips64-f128-literal.c +++ b/test/CodeGen/mips64-f128-literal.c @@ -1,4 +1,4 @@ -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s typedef long double LD; diff --git a/test/CodeGen/mips64-nontrivial-return.cpp b/test/CodeGen/mips64-nontrivial-return.cpp index 8aff9ab..2164b20 100644 --- a/test/CodeGen/mips64-nontrivial-return.cpp +++ b/test/CodeGen/mips64-nontrivial-return.cpp @@ -1,4 +1,4 @@ -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s class B { public: diff --git a/test/CodeGen/mips64-padding-arg.c b/test/CodeGen/mips64-padding-arg.c index b4dcfba..9d7f877 100644 --- a/test/CodeGen/mips64-padding-arg.c +++ b/test/CodeGen/mips64-padding-arg.c @@ -1,4 +1,4 @@ -// RUN: %clang -target mips64el-unknown-linux -ccc-clang-archs mips64el -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s +// RUN: %clang -target mips64el-unknown-linux -O3 -S -mabi=n64 -o - -emit-llvm %s | FileCheck %s typedef struct { double d; diff --git a/test/CodeGen/ms-inline-asm-64.c b/test/CodeGen/ms-inline-asm-64.c new file mode 100644 index 0000000..a74ede0 --- /dev/null +++ b/test/CodeGen/ms-inline-asm-64.c @@ -0,0 +1,16 @@ +// REQUIRES: x86-64-registered-target +// RUN: %clang_cc1 %s -triple x86_64-apple-darwin10 -O0 -fms-extensions -fenable-experimental-ms-inline-asm -w -emit-llvm -o - | FileCheck %s + +void t1() { + int var = 10; + __asm mov rax, offset var ; rax = address of myvar +// CHECK: t1 +// CHECK: call void asm sideeffect inteldialect "mov rax, $0", "r,~{rax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}) nounwind +} + +void t2() { + int var = 10; + __asm mov [eax], offset var +// CHECK: t2 +// CHECK: call void asm sideeffect inteldialect "mov [eax], $0", "r,~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}) nounwind +} diff --git a/test/CodeGen/ms-inline-asm.c b/test/CodeGen/ms-inline-asm.c index c140d60..7f43da8 100644 --- a/test/CodeGen/ms-inline-asm.c +++ b/test/CodeGen/ms-inline-asm.c @@ -1,17 +1,18 @@ -// RUN: %clang_cc1 %s -triple x86_64-apple-darwin10 -O0 -fms-extensions -fenable-experimental-ms-inline-asm -w -emit-llvm -o - | FileCheck %s +// REQUIRES: x86-64-registered-target +// RUN: %clang_cc1 %s -triple i386-apple-darwin10 -O0 -fms-extensions -fenable-experimental-ms-inline-asm -w -emit-llvm -o - | FileCheck %s void t1() { // CHECK: @t1 -// CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "", "~{dirflag},~{fpsr},~{flags}"() nounwind // CHECK: ret void __asm {} } void t2() { // CHECK: @t2 -// CHECK: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -// CHECK: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -// CHECK: call void asm sideeffect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect "nop", "~{dirflag},~{fpsr},~{flags}"() nounwind // CHECK: ret void __asm nop __asm nop @@ -20,15 +21,15 @@ void t2() { void t3() { // CHECK: @t3 -// CHECK: call void asm sideeffect "nop\0Anop\0Anop", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "nop\0A\09nop\0A\09nop", "~{dirflag},~{fpsr},~{flags}"() nounwind // CHECK: ret void __asm nop __asm nop __asm nop } void t4(void) { // CHECK: @t4 -// CHECK: call void asm sideeffect "mov ebx, eax", "~{ebx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -// CHECK: call void asm sideeffect "mov ecx, ebx", "~{ecx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "mov ebx, eax", "~{ebx},~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect "mov ecx, ebx", "~{ecx},~{dirflag},~{fpsr},~{flags}"() nounwind // CHECK: ret void __asm mov ebx, eax __asm mov ecx, ebx @@ -36,7 +37,7 @@ void t4(void) { void t5(void) { // CHECK: @t5 -// CHECK: call void asm sideeffect "mov ebx, eax\0Amov ecx, ebx", "~{ebx},~{ecx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "mov ebx, eax\0A\09mov ecx, ebx", "~{ebx},~{ecx},~{dirflag},~{fpsr},~{flags}"() nounwind // CHECK: ret void __asm mov ebx, eax __asm mov ecx, ebx } @@ -44,77 +45,158 @@ void t5(void) { void t6(void) { __asm int 0x2c // CHECK: t6 -// CHECK: call void asm sideeffect "int 0x2c", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "int $$0x2c", "~{dirflag},~{fpsr},~{flags}"() nounwind } -void* t7(void) { - __asm mov eax, fs:[0x10] -// CHECK: t7 -// CHECK: call void asm sideeffect "mov eax, fs:[0x10]", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -} - -void t8() { +void t7() { __asm { int 0x2c ; } asm comments are fun! }{ } __asm {} -// CHECK: t8 -// CHECK: call void asm sideeffect "int 0x2c", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -// CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: t7 +// CHECK: call void asm sideeffect inteldialect "int $$0x2c", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect "", "~{dirflag},~{fpsr},~{flags}"() nounwind } -int t9() { - __asm int 3 ; } comments for single-line asm + +int t8() { + __asm int 4 ; } comments for single-line asm __asm {} __asm int 4 return 10; -// CHECK: t9 -// CHECK: call void asm sideeffect "int 3", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -// CHECK: call void asm sideeffect "", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect -// CHECK: call void asm sideeffect "int 4", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: t8 +// CHECK: call void asm sideeffect inteldialect "int $$4", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect "", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect "int $$4", "~{dirflag},~{fpsr},~{flags}"() nounwind // CHECK: ret i32 10 } -void t10() { + +void t9() { __asm { push ebx mov ebx, 0x07 pop ebx } -// CHECK: t10 -// CHECK: call void asm sideeffect "push ebx\0Amov ebx, 0x07\0Apop ebx", "~{ebx},~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: t9 +// CHECK: call void asm sideeffect inteldialect "push ebx\0A\09mov ebx, $$0x07\0A\09pop ebx", "~{ebx},~{dirflag},~{fpsr},~{flags}"() nounwind } -unsigned t11(void) { +unsigned t10(void) { unsigned i = 1, j; __asm { mov eax, i mov j, eax } return j; -// CHECK: t11 +// CHECK: t10 // CHECK: [[I:%[a-zA-Z0-9]+]] = alloca i32, align 4 // CHECK: [[J:%[a-zA-Z0-9]+]] = alloca i32, align 4 // CHECK: store i32 1, i32* [[I]], align 4 -// CHECK: call void asm sideeffect "mov eax, i\0Amov j, eax", "~{dirflag},~{fpsr},~{flags}"() nounwind ia_nsdialect +// CHECK: call void asm sideeffect inteldialect "mov eax, dword ptr $1\0A\09mov dword ptr $0, eax", "=*m,*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}, i32* %{{.*}}) nounwind // CHECK: [[RET:%[a-zA-Z0-9]+]] = load i32* [[J]], align 4 // CHECK: ret i32 [[RET]] } -void t12(void) { - __asm EVEN - __asm ALIGN +void t11(void) { + __asm mov eax, 1 +// CHECK: t11 +// CHECK: call void asm sideeffect inteldialect "mov eax, $$1", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind +} + +unsigned t12(void) { + unsigned i = 1, j, l = 1, m; + __asm { + mov eax, i + mov j, eax + mov eax, l + mov m, eax + } + return j + m; +// CHECK: t12 +// CHECK: call void asm sideeffect inteldialect "mov eax, dword ptr $2\0A\09mov dword ptr $0, eax\0A\09mov eax, dword ptr $3\0A\09mov dword ptr $1, eax", "=*m,=*m,*m,*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}, i32* %{{.*}}) nounwind +} + +void t13() { + char i = 1; + short j = 2; + __asm movzx eax, i + __asm movzx eax, j +// CHECK: t13 +// CHECK: call void asm sideeffect inteldialect "movzx eax, byte ptr $0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i8* %{{.*}}) nounwind +// CHECK: call void asm sideeffect inteldialect "movzx eax, word ptr $0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i16* %{{.*}}) nounwind +} + +void t14() { + unsigned i = 1, j = 2; + __asm { + .if 1 + mov eax, i + .else + mov ebx, j + .endif + } +// CHECK: t14 +// CHECK: call void asm sideeffect inteldialect ".if 1\0A\09mov eax, dword ptr $0\0A\09.else\0A\09mov ebx, j\0A\09.endif", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}) nounwind +} + +void t15() { + int var = 10; + __asm mov eax, var ; eax = 10 + __asm mov eax, offset var ; eax = address of myvar +// CHECK: t15 +// CHECK: call void asm sideeffect inteldialect "mov eax, dword ptr $0", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}) nounwind +// CHECK: call void asm sideeffect inteldialect "mov eax, $0", "r,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}) nounwind +} + +void t16() { + int var = 10; + __asm mov [eax], offset var +// CHECK: t16 +// CHECK: call void asm sideeffect inteldialect "mov [eax], $0", "r,~{dirflag},~{fpsr},~{flags}"(i32* %{{.*}}) nounwind +} + +void t17() { + __asm _emit 0x4A + __asm _emit 0x43 + __asm _emit 0x4B +// CHECK: t17 +// CHECK: call void asm sideeffect inteldialect ".byte 0x4A", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect ".byte 0x43", "~{dirflag},~{fpsr},~{flags}"() nounwind +// CHECK: call void asm sideeffect inteldialect ".byte 0x4B", "~{dirflag},~{fpsr},~{flags}"() nounwind +} + +struct t18_type { int a, b; }; + +int t18() { + struct t18_type foo; + foo.a = 1; + foo.b = 2; + __asm { + lea ebx, foo + mov eax, [ebx].0 + mov [ebx].4, ecx + } + return foo.b; +// CHECK: t18 +// CHECK: call void asm sideeffect inteldialect "lea ebx, foo\0A\09mov eax, [ebx].0\0A\09mov [ebx].4, ecx", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind } -void t13(void) { +int t19() { + struct t18_type foo; + foo.a = 1; + foo.b = 2; __asm { - _emit 0x4A - _emit 0x43 - _emit 0x4B + lea ebx, foo + mov eax, [ebx].foo.a + mov [ebx].foo.b, ecx } + return foo.b; +// CHECK: t19 +// CHECK: call void asm sideeffect inteldialect "lea ebx, foo\0A\09mov eax, [ebx].0\0A\09mov [ebx].4, ecx", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind } -void t14(void) { - unsigned arr[10]; - __asm LENGTH arr ; sizeof(arr)/sizeof(arr[0]) - __asm SIZE arr ; sizeof(arr) - __asm TYPE arr ; sizeof(arr[0]) +void t20() { + int foo; + __asm mov eax, TYPE foo +// CHECK: t20 +// CHECK: call void asm sideeffect inteldialect "mov eax, $$4", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind } diff --git a/test/CodeGen/ppc-atomics.c b/test/CodeGen/ppc-atomics.c new file mode 100644 index 0000000..3fcb0fb --- /dev/null +++ b/test/CodeGen/ppc-atomics.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -triple powerpc-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=32 +// RUN: %clang_cc1 -triple powerpc64-linux-gnu -emit-llvm %s -o - | FileCheck %s -check-prefix=64 + +unsigned char c1, c2; +unsigned short s1, s2; +unsigned int i1, i2; +unsigned long long ll1, ll2; + +enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst +}; + +void test1(void) { + (void)__atomic_load(&c1, &c2, memory_order_seq_cst); + (void)__atomic_load(&s1, &s2, memory_order_seq_cst); + (void)__atomic_load(&i1, &i2, memory_order_seq_cst); + (void)__atomic_load(&ll1, &ll2, memory_order_seq_cst); + +// 32: define void @test1 +// 32: load atomic i8* @c1 seq_cst +// 32: load atomic i16* @s1 seq_cst +// 32: load atomic i32* @i1 seq_cst +// 32: call void @__atomic_load(i32 8, i8* bitcast (i64* @ll1 to i8*) + +// 64: define void @test1 +// 64: load atomic i8* @c1 seq_cst +// 64: load atomic i16* @s1 seq_cst +// 64: load atomic i32* @i1 seq_cst +// 64: load atomic i64* @ll1 seq_cst +} diff --git a/test/CodeGen/ppc64-align-long-double.c b/test/CodeGen/ppc64-align-long-double.c new file mode 100644 index 0000000..c4dcfa0 --- /dev/null +++ b/test/CodeGen/ppc64-align-long-double.c @@ -0,0 +1,18 @@ +// REQUIRES: ppc64-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s + +// CHECK: -f128:128:128- + +struct S { + double a; + long double b; +}; + +// CHECK: %struct.{{[a-zA-Z0-9]+}} = type { double, ppc_fp128 } + +long double test (struct S x) +{ + return x.b; +} + +// CHECK: %{{[0-9]}} = load ppc_fp128* %{{[a-zA-Z0-9]+}}, align 16 diff --git a/test/CodeGen/ppc64-extend.c b/test/CodeGen/ppc64-extend.c new file mode 100644 index 0000000..f4d6bf9 --- /dev/null +++ b/test/CodeGen/ppc64-extend.c @@ -0,0 +1,15 @@ +// REQUIRES: ppc64-registered-target +// RUN: %clang_cc1 -O0 -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s + +void f1(int x) { return; } +// CHECK: define void @f1(i32 signext %x) nounwind + +void f2(unsigned int x) { return; } +// CHECK: define void @f2(i32 zeroext %x) nounwind + +int f3(void) { return 0; } +// CHECK: define signext i32 @f3() nounwind + +unsigned int f4(void) { return 0; } +// CHECK: define zeroext i32 @f4() nounwind + diff --git a/test/CodeGen/ppc64-struct-onefloat.c b/test/CodeGen/ppc64-struct-onefloat.c new file mode 100644 index 0000000..4f9e194 --- /dev/null +++ b/test/CodeGen/ppc64-struct-onefloat.c @@ -0,0 +1,49 @@ +// REQUIRES: ppc64-registered-target +// RUN: %clang_cc1 -O0 -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s + +typedef struct s1 { float f; } Sf; +typedef struct s2 { double d; } Sd; +typedef struct s4 { Sf fs; } SSf; +typedef struct s5 { Sd ds; } SSd; + +void bar(Sf a, Sd b, SSf d, SSd e) {} + +// CHECK: define void @bar +// CHECK: %a = alloca %struct.s1, align 4 +// CHECK: %b = alloca %struct.s2, align 8 +// CHECK: %d = alloca %struct.s4, align 4 +// CHECK: %e = alloca %struct.s5, align 8 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1* %a, i32 0, i32 0 +// CHECK: store float %a.coerce, float* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2* %b, i32 0, i32 0 +// CHECK: store double %b.coerce, double* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4* %d, i32 0, i32 0 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0 +// CHECK: store float %d.coerce, float* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5* %e, i32 0, i32 0 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0 +// CHECK: store double %e.coerce, double* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: ret void + +void foo(void) +{ + Sf p1 = { 22.63f }; + Sd p2 = { 19.47 }; + SSf p4 = { { 22.63f } }; + SSd p5 = { { 19.47 } }; + bar(p1, p2, p4, p5); +} + +// CHECK: define void @foo +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1* %p1, i32 0, i32 0 +// CHECK: %{{[0-9]+}} = load float* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2* %p2, i32 0, i32 0 +// CHECK: %{{[0-9]+}} = load double* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s4* %p4, i32 0, i32 0 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s1* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0 +// CHECK: %{{[0-9]+}} = load float* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s5* %p5, i32 0, i32 0 +// CHECK: %{{[a-zA-Z0-9.]+}} = getelementptr %struct.s2* %{{[a-zA-Z0-9.]+}}, i32 0, i32 0 +// CHECK: %{{[0-9]+}} = load double* %{{[a-zA-Z0-9.]+}}, align 1 +// CHECK: call void @bar(float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}}, float inreg %{{[0-9]+}}, double inreg %{{[0-9]+}}) +// CHECK: ret void diff --git a/test/CodeGen/ppc64-varargs-struct.c b/test/CodeGen/ppc64-varargs-struct.c new file mode 100644 index 0000000..61c33b0 --- /dev/null +++ b/test/CodeGen/ppc64-varargs-struct.c @@ -0,0 +1,30 @@ +// REQUIRES: ppc64-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s + +#include + +struct x { + long a; + double b; +}; + +void testva (int n, ...) +{ + va_list ap; + + struct x t = va_arg (ap, struct x); +// CHECK: bitcast i8* %{{[a-z.0-9]*}} to %struct.x* +// CHECK: bitcast %struct.x* %t to i8* +// CHECK: bitcast %struct.x* %{{[0-9]+}} to i8* +// CHECK: call void @llvm.memcpy + + int v = va_arg (ap, int); +// CHECK: ptrtoint i8* %{{[a-z.0-9]*}} to i64 +// CHECK: add i64 %{{[0-9]+}}, 4 +// CHECK: inttoptr i64 %{{[0-9]+}} to i8* +// CHECK: bitcast i8* %{{[0-9]+}} to i32* + + __int128_t u = va_arg (ap, __int128_t); +// CHECK: bitcast i8* %{{[a-z.0-9]+}} to i128* +// CHECK-NEXT: load i128* %{{[0-9]+}} +} diff --git a/test/CodeGen/pragma-weak.c b/test/CodeGen/pragma-weak.c index 7ad2b77..2efc2eb 100644 --- a/test/CodeGen/pragma-weak.c +++ b/test/CodeGen/pragma-weak.c @@ -157,6 +157,15 @@ void PR10878() { SHA384Pad(0); } // CHECK: call void @SHA384Pad(i8* null) +// PR14046: Parse #pragma weak in function-local context +extern int PR14046e(void); +void PR14046f() { +#pragma weak PR14046e + PR14046e(); +} +// CHECK: declare extern_weak i32 @PR14046e() + + ///////////// TODO: stuff that still doesn't work // due to the fact that disparate TopLevelDecls cannot affect each other diff --git a/test/CodeGen/rtm-builtins.c b/test/CodeGen/rtm-builtins.c new file mode 100644 index 0000000..c4939a9 --- /dev/null +++ b/test/CodeGen/rtm-builtins.c @@ -0,0 +1,23 @@ +// RUN: %clang_cc1 %s -O3 -triple=x86_64-apple-darwin -target-feature +rtm -emit-llvm -o - | FileCheck %s + +// Don't include mm_malloc.h, it's system specific. +#define __MM_MALLOC_H + +#include + +unsigned int test_xbegin(void) { + // CHECK: i32 @llvm.x86.xbegin() + return _xbegin(); +} + +void +test_xend(void) { + // CHECK: void @llvm.x86.xend() + _xend(); +} + +void +test_xabort(void) { + // CHECK: void @llvm.x86.xabort(i8 2) + _xabort(2); +} diff --git a/test/CodeGen/sse-builtins.c b/test/CodeGen/sse-builtins.c index 0e48560..400209f 100644 --- a/test/CodeGen/sse-builtins.c +++ b/test/CodeGen/sse-builtins.c @@ -1,8 +1,39 @@ // RUN: %clang_cc1 -ffreestanding -triple i386-apple-darwin9 -target-cpu pentium4 -target-feature +sse4.1 -g -emit-llvm %s -o - | FileCheck %s +#include #include #include +__m128 test_rsqrt_ss(__m128 x) { + // CHECK: define {{.*}} @test_rsqrt_ss + // CHECK: call <4 x float> @llvm.x86.sse.rsqrt.ss + // CHECK: extractelement <4 x float> {{.*}}, i32 0 + // CHECK: extractelement <4 x float> {{.*}}, i32 1 + // CHECK: extractelement <4 x float> {{.*}}, i32 2 + // CHECK: extractelement <4 x float> {{.*}}, i32 3 + return _mm_rsqrt_ss(x); +} + +__m128 test_rcp_ss(__m128 x) { + // CHECK: define {{.*}} @test_rcp_ss + // CHECK: call <4 x float> @llvm.x86.sse.rcp.ss + // CHECK: extractelement <4 x float> {{.*}}, i32 0 + // CHECK: extractelement <4 x float> {{.*}}, i32 1 + // CHECK: extractelement <4 x float> {{.*}}, i32 2 + // CHECK: extractelement <4 x float> {{.*}}, i32 3 + return _mm_rcp_ss(x); +} + +__m128 test_sqrt_ss(__m128 x) { + // CHECK: define {{.*}} @test_sqrt_ss + // CHECK: call <4 x float> @llvm.x86.sse.sqrt.ss + // CHECK: extractelement <4 x float> {{.*}}, i32 0 + // CHECK: extractelement <4 x float> {{.*}}, i32 1 + // CHECK: extractelement <4 x float> {{.*}}, i32 2 + // CHECK: extractelement <4 x float> {{.*}}, i32 3 + return _mm_sqrt_ss(x); +} + __m128 test_loadl_pi(__m128 x, void* y) { // CHECK: define {{.*}} @test_loadl_pi // CHECK: load <2 x float>* {{.*}}, align 1{{$}} diff --git a/test/CodeGen/statements.c b/test/CodeGen/statements.c index e2bbb5a..5affb9a 100644 --- a/test/CodeGen/statements.c +++ b/test/CodeGen/statements.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -Wno-error=return-type %s -emit-llvm-only +// REQUIRES: LP64 void test1(int x) { switch (x) { diff --git a/test/CodeGen/stdcall-fastcall.c b/test/CodeGen/stdcall-fastcall.c index 3de7b67..d518178 100644 --- a/test/CodeGen/stdcall-fastcall.c +++ b/test/CodeGen/stdcall-fastcall.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm < %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm < %s | FileCheck %s void __attribute__((fastcall)) f1(void); void __attribute__((stdcall)) f2(void); @@ -48,3 +48,99 @@ void f8(void) { f7(0); // CHECK: call x86_stdcallcc void @f7(i32 0) } + +void __attribute__((fastcall)) foo1(int y); +void bar1(int y) { + // CHECK: define void @bar1 + // CHECK: call x86_fastcallcc void @foo1(i32 inreg % + foo1(y); +} + +struct S1 { + int x; +}; +void __attribute__((fastcall)) foo2(struct S1 y); +void bar2(struct S1 y) { + // CHECK: define void @bar2 + // CHECK: call x86_fastcallcc void @foo2(i32 inreg undef, i32 % + foo2(y); +} + +void __attribute__((fastcall)) foo3(int *y); +void bar3(int *y) { + // CHECK: define void @bar3 + // CHECK: call x86_fastcallcc void @foo3(i32* inreg % + foo3(y); +} + +enum Enum {Eval}; +void __attribute__((fastcall)) foo4(enum Enum y); +void bar4(enum Enum y) { + // CHECK: define void @bar4 + // CHECK: call x86_fastcallcc void @foo4(i32 inreg % + foo4(y); +} + +struct S2 { + int x1; + double x2; + double x3; +}; +void __attribute__((fastcall)) foo5(struct S2 y); +void bar5(struct S2 y) { + // CHECK: define void @bar5 + // CHECK: call x86_fastcallcc void @foo5(%struct.S2* byval align 4 % + foo5(y); +} + +void __attribute__((fastcall)) foo6(long long y); +void bar6(long long y) { + // CHECK: define void @bar6 + // CHECK: call x86_fastcallcc void @foo6(i64 % + foo6(y); +} + +void __attribute__((fastcall)) foo7(int a, struct S1 b, int c); +void bar7(int a, struct S1 b, int c) { + // CHECK: define void @bar7 + // CHECK: call x86_fastcallcc void @foo7(i32 inreg %{{.*}}, i32 %{{.*}}, i32 %{{.*}} + foo7(a, b, c); +} + +void __attribute__((fastcall)) foo8(struct S1 a, int b); +void bar8(struct S1 a, int b) { + // CHECK: define void @bar8 + // CHECK: call x86_fastcallcc void @foo8(i32 inreg undef, i32 %{{.*}}, i32 inreg % + foo8(a, b); +} + +void __attribute__((fastcall)) foo9(struct S2 a, int b); +void bar9(struct S2 a, int b) { + // CHECK: define void @bar9 + // CHECK: call x86_fastcallcc void @foo9(%struct.S2* byval align 4 %{{.*}}, i32 % + foo9(a, b); +} + +void __attribute__((fastcall)) foo10(float y, int x); +void bar10(float y, int x) { + // CHECK: define void @bar10 + // CHECK: call x86_fastcallcc void @foo10(float %{{.*}}, i32 inreg % + foo10(y, x); +} + +void __attribute__((fastcall)) foo11(double y, int x); +void bar11(double y, int x) { + // CHECK: define void @bar11 + // CHECK: call x86_fastcallcc void @foo11(double %{{.*}}, i32 inreg % + foo11(y, x); +} + +struct S3 { + float x; +}; +void __attribute__((fastcall)) foo12(struct S3 y, int x); +void bar12(struct S3 y, int x) { + // CHECK: define void @bar12 + // CHECK: call x86_fastcallcc void @foo12(float %{{.*}}, i32 inreg % + foo12(y, x); +} diff --git a/test/CodeGen/tbaa-for-vptr.cpp b/test/CodeGen/tbaa-for-vptr.cpp index b9a68fe..9369036 100644 --- a/test/CodeGen/tbaa-for-vptr.cpp +++ b/test/CodeGen/tbaa-for-vptr.cpp @@ -1,6 +1,6 @@ -// RUN: %clang_cc1 -emit-llvm -o - -O0 -fthread-sanitizer %s | FileCheck %s +// RUN: %clang_cc1 -emit-llvm -o - -O0 -fsanitize=thread %s | FileCheck %s // RUN: %clang_cc1 -emit-llvm -o - -O1 %s | FileCheck %s -// RUN: %clang_cc1 -emit-llvm -o - -O1 -relaxed-aliasing -fthread-sanitizer %s | FileCheck %s +// RUN: %clang_cc1 -emit-llvm -o - -O1 -relaxed-aliasing -fsanitize=thread %s | FileCheck %s // // RUN: %clang_cc1 -emit-llvm -o - -O0 %s | FileCheck %s --check-prefix=NOTBAA // RUN: %clang_cc1 -emit-llvm -o - -O2 -relaxed-aliasing %s | FileCheck %s --check-prefix=NOTBAA @@ -21,7 +21,7 @@ void CallFoo(A *a) { a->foo(); } -// CHECK: %{{.*}} = load {{.*}} !tbaa !0 -// CHECK: store {{.*}} !tbaa !0 -// CHECK: = metadata !{metadata !"vtable pointer", metadata !{{.*}}} +// CHECK: %{{.*}} = load {{.*}} !tbaa ![[NUM:[0-9]+]] +// CHECK: store {{.*}} !tbaa ![[NUM]] +// CHECK: [[NUM]] = metadata !{metadata !"vtable pointer", metadata !{{.*}}} // NOTBAA-NOT: = metadata !{metadata !"Simple C/C++ TBAA"} diff --git a/test/CodeGen/tbaa-struct.cpp b/test/CodeGen/tbaa-struct.cpp new file mode 100644 index 0000000..8b30aa0 --- /dev/null +++ b/test/CodeGen/tbaa-struct.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -emit-llvm -o - -O1 %s | FileCheck %s +// +// Check that we generate !tbaa.struct metadata for struct copies. +struct A { + short s; + int i; + char c; + int j; +}; + +void copy(struct A *a, struct A *b) { + *a = *b; +} + +// CHECK: target datalayout = "{{.*}}p:[[P:64|32]] +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i[[P]](i8* %{{.*}}, i8* %{{.*}}, i[[P]] 16, i32 4, i1 false), !tbaa.struct [[TS:!.*]] +// CHECK: [[TS]] = metadata !{i64 0, i64 2, metadata !{{.*}}, i64 4, i64 4, metadata !{{.*}}, i64 8, i64 1, metadata !{{.*}}, i64 12, i64 4, metadata !{{.*}}} diff --git a/test/CodeGen/trapv.c b/test/CodeGen/trapv.c index f52dad5..bc8bc70 100644 --- a/test/CodeGen/trapv.c +++ b/test/CodeGen/trapv.c @@ -17,7 +17,8 @@ void test0() { // CHECK-NEXT: [[T3:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 [[T2]]) // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T3]], 0 // CHECK-NEXT: [[T5:%.*]] = extractvalue { i32, i1 } [[T3]], 1 - // CHECK-NEXT: br i1 [[T5]] + // CHECK-NEXT: [[T6:%.*]] = xor i1 [[T5]], true + // CHECK-NEXT: br i1 [[T6]] // CHECK: call void @llvm.trap() i = j + k; } @@ -31,7 +32,8 @@ void test1() { // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 - // CHECK-NEXT: br i1 [[T4]] + // CHECK-NEXT: [[T5:%.*]] = xor i1 [[T4]], true + // CHECK-NEXT: br i1 [[T5]] // CHECK: call void @llvm.trap() } @@ -44,6 +46,16 @@ void test2() { // CHECK-NEXT: [[T2:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[T1]], i32 1) // CHECK-NEXT: [[T3:%.*]] = extractvalue { i32, i1 } [[T2]], 0 // CHECK-NEXT: [[T4:%.*]] = extractvalue { i32, i1 } [[T2]], 1 - // CHECK-NEXT: br i1 [[T4]] + // CHECK-NEXT: [[T5:%.*]] = xor i1 [[T4]], true + // CHECK-NEXT: br i1 [[T5]] // CHECK: call void @llvm.trap() } + +// CHECK: define void @test3( +void test3(int a, int b, float c, float d) { + // CHECK-NOT: @llvm.trap + (void)(a / b); + (void)(a % b); + (void)(c / d); + // CHECK: } +} diff --git a/test/CodeGen/unwind-attr.c b/test/CodeGen/unwind-attr.c index c588ca8..7a79cb6 100644 --- a/test/CodeGen/unwind-attr.c +++ b/test/CodeGen/unwind-attr.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -fexceptions -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck -check-prefix NOEXC %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -fexceptions -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck -check-prefix NOEXC %s int opaque(); diff --git a/test/CodeGen/x86_64-arguments-nacl.c b/test/CodeGen/x86_64-arguments-nacl.c new file mode 100644 index 0000000..8f756ca --- /dev/null +++ b/test/CodeGen/x86_64-arguments-nacl.c @@ -0,0 +1,120 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-nacl -emit-llvm -o - %s| FileCheck %s +#include +// Test for x86-64 structure representation (instead of pnacl representation), +// in particular for unions. Also crib a few tests from x86 Linux. + +union PP_VarValue { + int as_int; + double as_double; + long long as_i64; +}; + +struct PP_Var { + int type; + int padding; + union PP_VarValue value; +}; + +// CHECK: define { i64, i64 } @f0() +struct PP_Var f0() { + struct PP_Var result = { 0, 0, 0 }; + return result; +} + +// CHECK: define void @f1(i64 %p1.coerce0, i64 %p1.coerce1) +void f1(struct PP_Var p1) { while(1) {} } + +// long doubles are 64 bits on NaCl +// CHECK: define double @f5() +long double f5(void) { + return 0; +} + +// CHECK: define void @f6(i8 signext %a0, i16 signext %a1, i32 %a2, i64 %a3, i8* %a4) +void f6(char a0, short a1, int a2, long long a3, void *a4) { +} + +// CHECK: define i64 @f8_1() +// CHECK: define void @f8_2(i64 %a0.coerce) +union u8 { + long double a; + int b; +}; +union u8 f8_1() { while (1) {} } +void f8_2(union u8 a0) {} + +// CHECK: define i64 @f9() +struct s9 { int a; int b; int : 0; } f9(void) { while (1) {} } + +// CHECK: define void @f10(i64 %a0.coerce) +struct s10 { int a; int b; int : 0; }; +void f10(struct s10 a0) {} + +// CHECK: define double @f11() +union { long double a; float b; } f11() { while (1) {} } + +// CHECK: define i32 @f12_0() +// CHECK: define void @f12_1(i32 %a0.coerce) +struct s12 { int a __attribute__((aligned(16))); }; +struct s12 f12_0(void) { while (1) {} } +void f12_1(struct s12 a0) {} + +// Check that sret parameter is accounted for when checking available integer +// registers. +// CHECK: define void @f13(%struct.s13_0* noalias sret %agg.result, i32 %a, i32 %b, i32 %c, i32 %d, {{.*}}* byval align 8 %e, i32 %f) + +struct s13_0 { long long f0[3]; }; +struct s13_1 { long long f0[2]; }; +struct s13_0 f13(int a, int b, int c, int d, + struct s13_1 e, int f) { while (1) {} } + +// CHECK: define void @f20(%struct.s20* byval align 32 %x) +struct __attribute__((aligned(32))) s20 { + int x; + int y; +}; +void f20(struct s20 x) {} + + +// CHECK: declare void @func(i64) +typedef struct _str { + union { + long double a; + long c; + }; +} str; + +void func(str s); +str ss; +void f9122143() +{ + func(ss); +} + + +typedef struct { + int a; + int b; +} s1; +// CHECK: define i32 @f48(%struct.s1* byval %s) +int __attribute__((pnaclcall)) f48(s1 s) { return s.a; } + +// CHECK: define void @f49(%struct.s1* noalias sret %agg.result) +s1 __attribute__((pnaclcall)) f49() { s1 s; s.a = s.b = 1; return s; } + +union simple_union { + int a; + char b; +}; +// Unions should be passed as byval structs +// CHECK: define void @f50(%union.simple_union* byval %s) +void __attribute__((pnaclcall)) f50(union simple_union s) {} + +typedef struct { + int b4 : 4; + int b3 : 3; + int b8 : 8; +} bitfield1; +// Bitfields should be passed as byval structs +// CHECK: define void @f51(%struct.bitfield1* byval %bf1) +void __attribute__((pnaclcall)) f51(bitfield1 bf1) {} diff --git a/test/CodeGenCUDA/address-spaces.cu b/test/CodeGenCUDA/address-spaces.cu index 61d4d6b..9df7e3f 100644 --- a/test/CodeGenCUDA/address-spaces.cu +++ b/test/CodeGenCUDA/address-spaces.cu @@ -20,5 +20,17 @@ __device__ void foo() { // CHECK: load i32* bitcast (i32 addrspace(3)* @k to i32*) k++; + + static int li; + // CHECK: load i32 addrspace(1)* @_ZZ3foovE2li + li++; + + __constant__ int lj; + // CHECK: load i32 addrspace(4)* @_ZZ3foovE2lj + lj++; + + __shared__ int lk; + // CHECK: load i32 addrspace(3)* @_ZZ3foovE2lk + lk++; } diff --git a/test/CodeGenCXX/2005-01-03-StaticInitializers.cpp b/test/CodeGenCXX/2005-01-03-StaticInitializers.cpp index 875c412..1c9d120 100644 --- a/test/CodeGenCXX/2005-01-03-StaticInitializers.cpp +++ b/test/CodeGenCXX/2005-01-03-StaticInitializers.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s +// REQUIRES: LP64 struct S { int A[2]; diff --git a/test/CodeGenCXX/2009-05-04-PureConstNounwind.cpp b/test/CodeGenCXX/2009-05-04-PureConstNounwind.cpp index 8361680..7acc07d 100644 --- a/test/CodeGenCXX/2009-05-04-PureConstNounwind.cpp +++ b/test/CodeGenCXX/2009-05-04-PureConstNounwind.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fexceptions -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -fexceptions -emit-llvm %s -o - | FileCheck %s int c(void) __attribute__((const)); int p(void) __attribute__((pure)); int t(void); diff --git a/test/CodeGenCXX/assign-construct-memcpy.cpp b/test/CodeGenCXX/assign-construct-memcpy.cpp new file mode 100644 index 0000000..3d42051 --- /dev/null +++ b/test/CodeGenCXX/assign-construct-memcpy.cpp @@ -0,0 +1,89 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin12 -emit-llvm -o - -std=c++11 %s -DPOD | FileCheck %s -check-prefix=CHECK-POD +// RUN: %clang_cc1 -triple x86_64-apple-darwin12 -emit-llvm -o - -std=c++11 %s | FileCheck %s -check-prefix=CHECK-NONPOD + +// Declare the reserved placement operators. +typedef __typeof__(sizeof(0)) size_t; +void *operator new(size_t, void*) throw(); +void operator delete(void*, void*) throw(); +void *operator new[](size_t, void*) throw(); +void operator delete[](void*, void*) throw(); +template T &&move(T&); + +struct foo { +#ifndef POD + foo() {} // non-POD +#endif + void *a, *b; + bool c; +}; + +// It is not legal to copy the tail padding in all cases, but if it is it can +// yield better codegen. + +foo *test1(void *f, const foo &x) { + return new (f) foo(x); +// CHECK-POD: test1 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test1 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 +} + +foo *test2(const foo &x) { + return new foo(x); +// CHECK-POD: test2 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test2 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 +} + +foo test3(const foo &x) { + foo f = x; + return f; +// CHECK-POD: test3 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test3 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 +} + +foo *test4(foo &&x) { + return new foo(x); +// CHECK-POD: test4 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test4 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 +} + +void test5(foo &f, const foo &x) { + f = x; +// CHECK-POD: test5 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test5 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 17, i32 8 +} + +extern foo globtest; + +void test6(foo &&x) { + globtest = move(x); +// CHECK-POD: test6 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test6 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 17, i32 8 +} + +void byval(foo f); + +void test7(const foo &x) { + byval(x); +// CHECK-POD: test7 +// CHECK-POD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 + +// CHECK-NONPOD: test7 +// CHECK-NONPOD: call void @llvm.memcpy.p0i8.p0i8.i64({{.*}}i64 24, i32 8 +} diff --git a/test/CodeGenCXX/attr.cpp b/test/CodeGenCXX/attr.cpp index 9e8740e5..a0dd748 100644 --- a/test/CodeGenCXX/attr.cpp +++ b/test/CodeGenCXX/attr.cpp @@ -10,17 +10,21 @@ class C { virtual void bar1() __attribute__((aligned(1))); virtual void bar2() __attribute__((aligned(2))); virtual void bar3() __attribute__((aligned(1024))); + void bar4() __attribute__((aligned(1024))); } c; -// CHECK: define void @_ZN1C4bar1Ev(%class.C* %this) nounwind align 2 +// CHECK: define void @_ZN1C4bar1Ev(%class.C* %this) unnamed_addr nounwind align 2 void C::bar1() { } -// CHECK: define void @_ZN1C4bar2Ev(%class.C* %this) nounwind align 2 +// CHECK: define void @_ZN1C4bar2Ev(%class.C* %this) unnamed_addr nounwind align 2 void C::bar2() { } -// CHECK: define void @_ZN1C4bar3Ev(%class.C* %this) nounwind align 1024 +// CHECK: define void @_ZN1C4bar3Ev(%class.C* %this) unnamed_addr nounwind align 1024 void C::bar3() { } +// CHECK: define void @_ZN1C4bar4Ev(%class.C* %this) nounwind align 1024 +void C::bar4() { } + // PR6635 // CHECK: define i32 @_Z5test1v() int test1() { return 10; } diff --git a/test/CodeGenCXX/builtins.cpp b/test/CodeGenCXX/builtins.cpp index 4542563..0629c31 100644 --- a/test/CodeGenCXX/builtins.cpp +++ b/test/CodeGenCXX/builtins.cpp @@ -7,15 +7,3 @@ int main() { // CHECK: call signext i8 @memmove() return memmove(); } - -// - -template -int equal(const char *s1, const char *s2) { - return Compare(s1, s2) == 0; -} - -// CHECK: define weak_odr i32 @_Z5equalIXadL_Z16__builtin_strcmpPKcS1_EEEiS1_S1_ -// CHECK: call i32 @strcmp -template int equal<&__builtin_strcmp>(const char*, const char*); - diff --git a/test/CodeGenCXX/catch-undef-behavior.cpp b/test/CodeGenCXX/catch-undef-behavior.cpp new file mode 100644 index 0000000..fd9e3d7 --- /dev/null +++ b/test/CodeGenCXX/catch-undef-behavior.cpp @@ -0,0 +1,134 @@ +// RUN: %clang_cc1 -fsanitize=signed-integer-overflow,divide-by-zero,shift,unreachable,return,vla-bound,alignment,null,vptr,object-size,float-cast-overflow -emit-llvm %s -o - -triple x86_64-linux-gnu | FileCheck %s + +// CHECK: @_Z17reference_binding +void reference_binding(int *p) { + // C++ core issue 453: If an lvalue to which a reference is directly bound + // designates neither an existing object or function of an appropriate type, + // nor a region of storage of suitable size and alignment to contain an object + // of the reference's type, the behavior is undefined. + + // CHECK: icmp ne {{.*}}, null + + // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64 + // CHECK-NEXT: icmp uge i64 %[[SIZE]], 4 + + // CHECK: %[[PTRINT:.*]] = ptrtoint + // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3 + // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0 + int &r = *p; +} + +struct S { + double d; + int a, b; + virtual int f(); +}; + +// CHECK: @_Z13member_access +void member_access(S *p) { + // (1a) Check 'p' is appropriately sized and aligned for member access. + + // CHECK: icmp ne {{.*}}, null + + // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64 + // CHECK-NEXT: icmp uge i64 %[[SIZE]], 24 + + // CHECK: %[[PTRINT:.*]] = ptrtoint + // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 7 + // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0 + + // (1b) Check that 'p' actually points to an 'S'. + + // CHECK: %[[VPTRADDR:.*]] = bitcast {{.*}} to i64* + // CHECK-NEXT: %[[VPTR:.*]] = load i64* %[[VPTRADDR]] + // + // hash_16_bytes: + // + // If this number changes, it indicates that either the mangled name of ::S + // has changed, or that LLVM's hashing function has changed. The latter case + // is OK if the hashing function is still stable. + // + // The two hash values are for 64- and 32-bit Clang binaries, respectively. + // FIXME: We should produce a 64-bit value either way. + // + // CHECK-NEXT: xor i64 {{-4030275160588942838|2562089159}}, %[[VPTR]] + // CHECK-NEXT: mul i64 {{.*}}, -7070675565921424023 + // CHECK-NEXT: lshr i64 {{.*}}, 47 + // CHECK-NEXT: xor i64 + // CHECK-NEXT: xor i64 %[[VPTR]] + // CHECK-NEXT: mul i64 {{.*}}, -7070675565921424023 + // CHECK-NEXT: lshr i64 {{.*}}, 47 + // CHECK-NEXT: xor i64 + // CHECK-NEXT: %[[HASH:.*]] = mul i64 {{.*}}, -7070675565921424023 + // + // Check the hash against the table: + // + // CHECK-NEXT: %[[IDX:.*]] = and i64 %{{.*}}, 127 + // CHECK-NEXT: getelementptr inbounds [128 x i64]* @__ubsan_vptr_type_cache, i32 0, i64 %[[IDX]] + // CHECK-NEXT: %[[CACHEVAL:.*]] = load i64* + // CHECK-NEXT: icmp eq i64 %[[CACHEVAL]], %[[HASH]] + // CHECK-NEXT: br i1 + + // CHECK: call void @__ubsan_handle_dynamic_type_cache_miss({{.*}}, i64 %{{.*}}, i64 %[[HASH]]) + + // (2) Check 'p->b' is appropriately sized and aligned for a load. + + // FIXME: Suppress this in the trivial case of a member access, because we + // know we've just checked the member access expression itself. + + // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64 + // CHECK-NEXT: icmp uge i64 %[[SIZE]], 4 + + // CHECK: %[[PTRINT:.*]] = ptrtoint + // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 3 + // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0 + int k = p->b; + + // (3a) Check 'p' is appropriately sized and aligned for member function call. + + // CHECK: icmp ne {{.*}}, null + + // CHECK: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64 + // CHECK-NEXT: icmp uge i64 %[[SIZE]], 24 + + // CHECK: %[[PTRINT:.*]] = ptrtoint + // CHECK-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRINT]], 7 + // CHECK-NEXT: icmp eq i64 %[[MISALIGN]], 0 + + // (3b) Check that 'p' actually points to an 'S' + + // CHECK: load i64* + // CHECK-NEXT: xor i64 {{-4030275160588942838|2562089159}}, + // [...] + // CHECK: getelementptr inbounds [128 x i64]* @__ubsan_vptr_type_cache, i32 0, i64 % + // CHECK: br i1 + // CHECK: call void @__ubsan_handle_dynamic_type_cache_miss({{.*}}, i64 %{{.*}}, i64 %{{.*}}) + + k = p->f(); +} + +// CHECK: @_Z12lsh_overflow +int lsh_overflow(int a, int b) { + // CHECK: %[[INBOUNDS:.*]] = icmp ule i32 %[[RHS:.*]], 31 + // CHECK-NEXT: br i1 %[[INBOUNDS]] + + // CHECK: %[[SHIFTED_OUT_WIDTH:.*]] = sub nuw nsw i32 31, %[[RHS]] + // CHECK-NEXT: %[[SHIFTED_OUT:.*]] = lshr i32 %[[LHS:.*]], %[[SHIFTED_OUT_WIDTH]] + + // This is present for C++11 but not for C: C++ core issue 1457 allows a '1' + // to be shifted into the sign bit, but not out of it. + // CHECK-NEXT: %[[SHIFTED_OUT_NOT_SIGN:.*]] = lshr i32 %[[SHIFTED_OUT]], 1 + + // CHECK-NEXT: %[[NO_OVERFLOW:.*]] = icmp eq i32 %[[SHIFTED_OUT_NOT_SIGN]], 0 + // CHECK-NEXT: br i1 %[[NO_OVERFLOW]] + + // CHECK: %[[RET:.*]] = shl i32 %[[LHS]], %[[RHS]] + // CHECK-NEXT: ret i32 %[[RET]] + return a << b; +} + +// CHECK: @_Z9no_return +int no_return() { + // CHECK: call void @__ubsan_handle_missing_return(i8* bitcast ({{.*}}* @{{.*}} to i8*)) noreturn nounwind + // CHECK-NEXT: unreachable +} diff --git a/test/CodeGenCXX/compound-literals.cpp b/test/CodeGenCXX/compound-literals.cpp index 17a3114..5df0ea5 100644 --- a/test/CodeGenCXX/compound-literals.cpp +++ b/test/CodeGenCXX/compound-literals.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple armv7-none-eabi -emit-llvm -o - %s | FileCheck %s struct X { X(); @@ -18,10 +18,10 @@ int f() { // CHECK-NEXT: [[I:%[a-z0-9]+]] = getelementptr inbounds {{.*}}* [[LVALUE]], i32 0, i32 0 // CHECK-NEXT: store i32 17, i32* [[I]] // CHECK-NEXT: [[X:%[a-z0-9]+]] = getelementptr inbounds {{.*}} [[LVALUE]], i32 0, i32 1 - // CHECK-NEXT: call void @_ZN1XC1EPKc({{.*}}[[X]] + // CHECK-NEXT: call %struct.X* @_ZN1XC1EPKc({{.*}}[[X]] // CHECK-NEXT: [[I:%[a-z0-9]+]] = getelementptr inbounds {{.*}} [[LVALUE]], i32 0, i32 0 // CHECK-NEXT: [[RESULT:%[a-z0-9]+]] = load i32* - // CHECK-NEXT: call void @_ZN1YD1Ev + // CHECK-NEXT: call %struct.Y* @_ZN1YD1Ev // CHECK-NEXT: ret i32 [[RESULT]] return ((Y){17, "seventeen"}).i; } diff --git a/test/CodeGenCXX/const-global-linkage.cpp b/test/CodeGenCXX/const-global-linkage.cpp index d0a055b..df78fdd 100644 --- a/test/CodeGenCXX/const-global-linkage.cpp +++ b/test/CodeGenCXX/const-global-linkage.cpp @@ -2,12 +2,16 @@ const int x = 10; const int y = 20; +const volatile int z = 30; // CHECK-NOT: @x +// CHECK: @z = constant i32 30 // CHECK: @_ZL1y = internal constant i32 20 const int& b() { return y; } const char z1[] = "asdf"; const char z2[] = "zxcv"; +const volatile char z3[] = "zxcv"; // CHECK-NOT: @z1 +// CHECK: @z3 = constant // CHECK: @_ZL2z2 = internal constant const char* b2() { return z2; } diff --git a/test/CodeGenCXX/const-init-cxx11.cpp b/test/CodeGenCXX/const-init-cxx11.cpp index db1bb41..833adba 100644 --- a/test/CodeGenCXX/const-init-cxx11.cpp +++ b/test/CodeGenCXX/const-init-cxx11.cpp @@ -432,11 +432,7 @@ namespace InitFromConst { // CHECK: call void @_ZN13InitFromConst7consumeIRKNS_1SEEEvT_(%"struct.InitFromConst::S"* @_ZN13InitFromConstL1sE) consume(s); - // FIXME CHECK-NOT: call void @_ZN13InitFromConst7consumeIRKNS_1SEEEvT_(%"struct.InitFromConst::S"* @_ZN13InitFromConstL1sE) - // There's no lvalue-to-rvalue conversion here, so 'r' is odr-used, and - // we're permitted to emit a load of it. This seems likely to be a defect - // in the standard. If we start emitting a direct reference to 's', update - // this test. + // CHECK: call void @_ZN13InitFromConst7consumeIRKNS_1SEEEvT_(%"struct.InitFromConst::S"* @_ZN13InitFromConstL1sE) consume(r); // CHECK: call void @_ZN13InitFromConst7consumeIPKNS_1SEEEvT_(%"struct.InitFromConst::S"* @_ZN13InitFromConstL1sE) diff --git a/test/CodeGenCXX/conversion-operator-base.cpp b/test/CodeGenCXX/conversion-operator-base.cpp index 8fbeadf..e62e225 100644 --- a/test/CodeGenCXX/conversion-operator-base.cpp +++ b/test/CodeGenCXX/conversion-operator-base.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only %s -verify +// expected-no-diagnostics // PR5730 struct A { operator int(); float y; }; diff --git a/test/CodeGenCXX/copy-assign-synthesis-3.cpp b/test/CodeGenCXX/copy-assign-synthesis-3.cpp index ce4640a..5469d11 100644 --- a/test/CodeGenCXX/copy-assign-synthesis-3.cpp +++ b/test/CodeGenCXX/copy-assign-synthesis-3.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics struct A { A& operator=(A&); diff --git a/test/CodeGenCXX/copy-constructor-elim-2.cpp b/test/CodeGenCXX/copy-constructor-elim-2.cpp index 9480cbf..4ff8775 100644 --- a/test/CodeGenCXX/copy-constructor-elim-2.cpp +++ b/test/CodeGenCXX/copy-constructor-elim-2.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple armv7-none-eabi -emit-llvm -o - %s | FileCheck %s struct A { int x; A(int); ~A(); }; A f() { return A(0); } @@ -66,7 +66,7 @@ namespace PR12139 { // CHECK: define i32 @_ZN7PR121394testEv int test() { // CHECK: call void @_ZN7PR121391A5makeAEv - // CHECK-NEXT: call void @_ZN7PR121391AC1ERKS0_i + // CHECK-NEXT: call %"struct.PR12139::A"* @_ZN7PR121391AC1ERKS0_i A a(A::makeA(), 3); // CHECK-NEXT: getelementptr inbounds // CHECK-NEXT: load diff --git a/test/CodeGenCXX/cxx0x-delegating-ctors.cpp b/test/CodeGenCXX/cxx0x-delegating-ctors.cpp index ad6492f..338159c 100644 --- a/test/CodeGenCXX/cxx0x-delegating-ctors.cpp +++ b/test/CodeGenCXX/cxx0x-delegating-ctors.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm -fexceptions -fcxx-exceptions -std=c++11 -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -fexceptions -fcxx-exceptions -std=c++11 -o - %s | FileCheck %s struct non_trivial { non_trivial(); diff --git a/test/CodeGenCXX/cxx0x-initializer-array.cpp b/test/CodeGenCXX/cxx0x-initializer-array.cpp index b773178..df68997 100644 --- a/test/CodeGenCXX/cxx0x-initializer-array.cpp +++ b/test/CodeGenCXX/cxx0x-initializer-array.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++11 -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -std=c++11 -S -emit-llvm -o - %s | FileCheck %s struct A { int a[1]; }; typedef A x[]; diff --git a/test/CodeGenCXX/cxx0x-initializer-constructors.cpp b/test/CodeGenCXX/cxx0x-initializer-constructors.cpp index 48fbe85..be5b44d 100644 --- a/test/CodeGenCXX/cxx0x-initializer-constructors.cpp +++ b/test/CodeGenCXX/cxx0x-initializer-constructors.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++11 -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -std=c++11 -S -triple x86_64-none-linux-gnu -emit-llvm -o - %s | FileCheck %s struct S { S(int x) { } diff --git a/test/CodeGenCXX/cxx0x-initializer-references.cpp b/test/CodeGenCXX/cxx0x-initializer-references.cpp index 660b018..10586c1 100644 --- a/test/CodeGenCXX/cxx0x-initializer-references.cpp +++ b/test/CodeGenCXX/cxx0x-initializer-references.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++11 -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -std=c++11 -S -triple armv7-none-eabi -emit-llvm -o - %s | FileCheck %s namespace reference { struct A { @@ -64,10 +64,10 @@ namespace reference { { // Ensure lifetime extension. - // CHECK: call void @_ZN9reference1BC1Ev + // CHECK: call %"struct.reference::B"* @_ZN9reference1BC1Ev // CHECK-NEXT: store %{{.*}}* %{{.*}}, %{{.*}}** % const B &rb{ B() }; - // CHECK: call void @_ZN9reference1BD1Ev + // CHECK: call %"struct.reference::B"* @_ZN9reference1BD1Ev } } diff --git a/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist-startend.cpp b/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist-startend.cpp index c533e45..209ee65 100644 --- a/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist-startend.cpp +++ b/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist-startend.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++11 -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -std=c++11 -S -triple x86_64-none-linux-gnu -emit-llvm -o - %s | FileCheck %s namespace std { typedef decltype(sizeof(int)) size_t; diff --git a/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp b/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp index 81ce559..8d9fce0 100644 --- a/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp +++ b/test/CodeGenCXX/cxx0x-initializer-stdinitializerlist.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++11 -S -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -std=c++11 -S -triple x86_64-none-linux-gnu -emit-llvm -o - %s | FileCheck %s namespace std { typedef decltype(sizeof(int)) size_t; diff --git a/test/CodeGenCXX/cxx11-special-members.cpp b/test/CodeGenCXX/cxx11-special-members.cpp new file mode 100644 index 0000000..59461f9 --- /dev/null +++ b/test/CodeGenCXX/cxx11-special-members.cpp @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 %s -std=c++11 -emit-llvm -o - -triple=i686-linux-gnu | FileCheck %s + +struct A { + A(const A&); + A &operator=(const A&); +}; + +struct B { + A a; + B(B&&) = default; + B &operator=(B&&) = default; +}; + +// CHECK: define {{.*}} @_Z2f1 +void f1(B &x) { + // CHECK-NOT: memcpy + // CHECK: call {{.*}} @_ZN1BC1EOS_( + B b(static_cast(x)); +} + +// CHECK: define {{.*}} @_Z2f2 +void f2(B &x, B &y) { + // CHECK-NOT: memcpy + // CHECK: call {{.*}} @_ZN1BaSEOS_( + x = static_cast(y); +} + +// CHECK: define {{.*}} @_ZN1BaSEOS_( +// CHECK: call {{.*}} @_ZN1AaSERKS_( + +// CHECK: define {{.*}} @_ZN1BC2EOS_( +// CHECK: call {{.*}} @_ZN1AC1ERKS_( diff --git a/test/CodeGenCXX/debug-info-artificial-arg.cpp b/test/CodeGenCXX/debug-info-artificial-arg.cpp index 35e5f27..ee93849 100644 --- a/test/CodeGenCXX/debug-info-artificial-arg.cpp +++ b/test/CodeGenCXX/debug-info-artificial-arg.cpp @@ -23,7 +23,7 @@ int main(int argc, char **argv) { } // FIXME: The numbers are truly awful. -// CHECK: !16 = metadata !{i32 {{.*}}, i32 0, metadata !"", i32 0, i32 0, i64 64, i64 64, i64 0, i32 64, metadata !17} ; [ DW_TAG_pointer_type ] +// CHECK: !16 = metadata !{i32 786447, i32 0, metadata !"", i32 0, i32 0, i64 64, i64 64, i64 0, i32 1088, metadata !17} ; [ DW_TAG_pointer_type ] [line 0, size 64, align 64, offset 0] [from A] // CHECK: !17 = metadata !{i32 {{.*}}, null, metadata !"A", metadata !6, i32 8, i64 128, i64 64, i32 0, i32 0, null, metadata !18, i32 0, metadata !17, null} ; [ DW_TAG_class_type ] // CHECK: metadata !17, metadata !"A", metadata !"A", metadata !"", metadata !6, i32 12, metadata !43, i1 false, i1 false, i32 0, i32 0, null, i32 256, i1 false, null, null, i32 0, metadata !45, i32 12} ; [ DW_TAG_subprogram ] // CHECK: metadata !"", i32 0, i32 0, i64 0, i64 0, i64 0, i32 0, null, metadata !44, i32 0, i32 0} ; [ DW_TAG_subroutine_type ] diff --git a/test/CodeGenCXX/debug-info-blocks.cpp b/test/CodeGenCXX/debug-info-blocks.cpp new file mode 100644 index 0000000..5b20db5 --- /dev/null +++ b/test/CodeGenCXX/debug-info-blocks.cpp @@ -0,0 +1,14 @@ +// RUN: %clang_cc1 %s -gline-tables-only -fblocks -S -emit-llvm -o - | FileCheck %s + +struct A { + A(); + A(const A &); + ~A(); +}; + +void test() { + __block A a; +} + +// CHECK: [ DW_TAG_subprogram ] [line 10] [local] [def] [__Block_byref_object_copy_] +// CHECK: [ DW_TAG_subprogram ] [line 10] [local] [def] [__Block_byref_object_dispose_] diff --git a/test/CodeGenCXX/debug-info-class.cpp b/test/CodeGenCXX/debug-info-class.cpp index 151c5f9..062227a 100644 --- a/test/CodeGenCXX/debug-info-class.cpp +++ b/test/CodeGenCXX/debug-info-class.cpp @@ -1,12 +1,24 @@ -// RUN: %clang -emit-llvm -g -S %s -o - | grep HdrSize -struct A { +// RUN: %clang -emit-llvm -g -S %s -o - | FileCheck %s +struct foo; +void func(foo *f) { // CHECK: DW_TAG_structure_type +} +class bar; +void func(bar *f) { // CHECK: DW_TAG_class_type +} +union baz; +void func(baz *f) { // CHECK: DW_TAG_union_type +} +struct A { // CHECK: DW_TAG_structure_type int one; - static const int HdrSize = 52; + static const int HdrSize = 52; // CHECK: HdrSize int two; A() { int x = 1; } }; +class B { // CHECK: DW_TAG_class_type +}; int main() { A a; + B b; } diff --git a/test/CodeGenCXX/debug-info-enum-class.cpp b/test/CodeGenCXX/debug-info-enum-class.cpp index 6f88439..fd243ab 100644 --- a/test/CodeGenCXX/debug-info-enum-class.cpp +++ b/test/CodeGenCXX/debug-info-enum-class.cpp @@ -12,4 +12,18 @@ D d; // CHECK: metadata !{i32 {{.*}}, null, metadata !"A", metadata !4, i32 3, i64 32, i64 32, i32 0, i32 0, metadata !5, metadata !6, i32 0, i32 0} ; [ DW_TAG_enumeration_type ] // CHECK: metadata !{i32 {{.*}}, null, metadata !"B", metadata !4, i32 4, i64 64, i64 64, i32 0, i32 0, metadata !9, metadata !10, i32 0, i32 0} ; [ DW_TAG_enumeration_type ] // CHECK: metadata !{i32 {{.*}}, null, metadata !"C", metadata !4, i32 5, i64 32, i64 32, i32 0, i32 0, null, metadata !13, i32 0, i32 0} ; [ DW_TAG_enumeration_type ] -// CHECK: metadata !{i32 {{.*}}, null, metadata !"D", metadata !4, i32 6, i64 16, i64 16, i32 0, i32 4, null, metadata !16, i32 0, i32 0} ; [ DW_TAG_enumeration_type ] +// CHECK: metadata !{i32 {{.*}}, null, metadata !"D", metadata !4, i32 6, i64 16, i64 16, i32 0, i32 4, null, null, i32 0} ; [ DW_TAG_enumeration_type ] + +namespace PR14029 { + // Make sure this doesn't crash/assert. + template struct Test { + enum class Tag { + test = 0 + }; + Test() { + auto t = Tag::test; + } + Tag tag() const { return static_cast(1); } + }; + Test t; +} diff --git a/test/CodeGenCXX/debug-info-fwd-ref.cpp b/test/CodeGenCXX/debug-info-fwd-ref.cpp index e7f2ed1..9135032 100644 --- a/test/CodeGenCXX/debug-info-fwd-ref.cpp +++ b/test/CodeGenCXX/debug-info-fwd-ref.cpp @@ -16,11 +16,10 @@ int main(int argc, char** argv) { return 0; } -// Make sure we have two DW_TAG_class_types for baz and bar and no forward +// Make sure we have two DW_TAG_structure_types for baz and bar and no forward // references. -// FIXME: These should be struct types to match the declaration. -// CHECK: metadata !{i32 {{.*}}, null, metadata !"bar", metadata !6, i32 8, i64 128, i64 64, i32 0, i32 0, null, metadata !18, i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: metadata !{i32 {{.*}}, null, metadata !"baz", metadata !6, i32 3, i64 32, i64 32, i32 0, i32 0, null, metadata !21, i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK-NOT: metadata !{i32 {{.*}}, null, metadata !"bar", metadata !6, i32 8, i64 0, i64 0, i32 0, i32 4, i32 0, null, i32 0, i32 0} ; [ DW_TAG_class_type ] -// CHECK-NOT: metadata !{i32 {{.*}}, null, metadata !"baz", metadata !6, i32 3, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null} ; [ DW_TAG_class_type ] +// CHECK: metadata !{i32 {{.*}}, null, metadata !"bar", metadata !6, i32 8, i64 128, i64 64, i32 0, i32 0, null, metadata !18, i32 0, null, null} ; [ DW_TAG_structure_type ] +// CHECK: metadata !{i32 {{.*}}, null, metadata !"baz", metadata !6, i32 3, i64 32, i64 32, i32 0, i32 0, null, metadata !21, i32 0, null, null} ; [ DW_TAG_structure_type ] +// CHECK-NOT: metadata !{i32 {{.*}}, null, metadata !"bar", metadata !6, i32 8, i64 0, i64 0, i32 0, i32 4, i32 0, null, i32 0, i32 0} ; [ DW_TAG_structure_type ] +// CHECK-NOT: metadata !{i32 {{.*}}, null, metadata !"baz", metadata !6, i32 3, i64 0, i64 0, i32 0, i32 4, null, null, i32 0, null, null} ; [ DW_TAG_structure_type ] diff --git a/test/CodeGenCXX/debug-info-global-ctor-dtor.cpp b/test/CodeGenCXX/debug-info-global-ctor-dtor.cpp new file mode 100644 index 0000000..bdaf58c --- /dev/null +++ b/test/CodeGenCXX/debug-info-global-ctor-dtor.cpp @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 %s -g -fno-use-cxa-atexit -S -emit-llvm -o - \ +// RUN: | FileCheck %s --check-prefix=CHECK-NOKEXT +// RUN: %clang_cc1 %s -g -fno-use-cxa-atexit -fapple-kext -S -emit-llvm -o - \ +// RUN: | FileCheck %s --check-prefix=CHECK-KEXT + +class A { + public: + A() {} + virtual ~A() {} +}; + +A glob; +A array[2]; + +void foo() { + static A stat; +} + +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line 12] [local] [def] [__cxx_global_var_init] +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line 12] [local] [def] [__dtor_glob] +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line 13] [local] [def] [__cxx_global_var_init1] +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line 13] [local] [def] [__cxx_global_array_dtor] +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line 13] [local] [def] [__dtor_] +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line 16] [local] [def] [__dtor__ZZ3foovE4stat] +// CHECK-NOKEXT: [ DW_TAG_subprogram ] [line {{.*}}] [local] [def] [_GLOBAL__I_a] + +// CHECK-KEXT: [ DW_TAG_subprogram ] [line {{.*}}] [local] [def] [_GLOBAL__D_a] diff --git a/test/CodeGenCXX/debug-info-globalinit.cpp b/test/CodeGenCXX/debug-info-globalinit.cpp index ff50fac..b3891c1 100644 --- a/test/CodeGenCXX/debug-info-globalinit.cpp +++ b/test/CodeGenCXX/debug-info-globalinit.cpp @@ -27,4 +27,4 @@ int main(void) {} // CHECK-NOT: dbg // CHECK: store i32 %[[C1]], i32* @_ZL1j, align 4 // -// CHECK: ![[LINE]] = metadata !{i32 13, i32 16 +// CHECK: ![[LINE]] = metadata !{i32 13, i32 diff --git a/test/CodeGenCXX/debug-info-pubtypes.cpp b/test/CodeGenCXX/debug-info-pubtypes.cpp index a7abade..612b6b5 100644 --- a/test/CodeGenCXX/debug-info-pubtypes.cpp +++ b/test/CodeGenCXX/debug-info-pubtypes.cpp @@ -1,5 +1,5 @@ // REQUIRES: x86-64-registered-target -// RUN: %clang -cc1 -triple x86_64-apple-darwin10 -g -fno-limit-debug-info -S %s -o %t +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -g -fno-limit-debug-info -S %s -o %t // RUN: FileCheck %s < %t // FIXME: This testcase shouldn't rely on assembly emission. diff --git a/test/CodeGenCXX/debug-info-template-member.cpp b/test/CodeGenCXX/debug-info-template-member.cpp index f21718d..6208c80 100644 --- a/test/CodeGenCXX/debug-info-template-member.cpp +++ b/test/CodeGenCXX/debug-info-template-member.cpp @@ -17,5 +17,5 @@ private: MyClass m; // CHECK: metadata !{i32 {{.*}}, null, metadata !"MyClass", metadata {{.*}}, i32 {{.*}}, i64 8, i64 8, i32 0, i32 0, null, metadata [[C_MEM:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[C_MEM]] = metadata !{metadata {{.*}}, metadata [[C_TEMP:.*]]} +// CHECK: [[C_MEM]] = metadata !{metadata {{.*}}, metadata [[C_TEMP:.*]], metadata {{.*}}} // CHECK: [[C_TEMP]] = metadata !{i32 {{.*}}, i32 0, metadata {{.*}}, metadata !"add<2>", metadata !"add<2>", metadata !"_ZN7MyClass3addILi2EEEii", metadata {{.*}} diff --git a/test/CodeGenCXX/debug-info-template-quals.cpp b/test/CodeGenCXX/debug-info-template-quals.cpp index e5a9082..ffb1ca3 100644 --- a/test/CodeGenCXX/debug-info-template-quals.cpp +++ b/test/CodeGenCXX/debug-info-template-quals.cpp @@ -20,4 +20,4 @@ void foo (const char *c) { // CHECK: [[CH]] = metadata !{i32 {{.*}}, metadata !"char", {{.*}}} ; [ DW_TAG_base_type ] [char] [line 0, size 8, align 8, offset 0, enc DW_ATE_signed_char] // CHECK: metadata !{i32 {{.*}}, metadata !"_ZN12basic_stringIcE6assignEPKc", metadata !6, i32 7, metadata [[TYPE:.*]], i1 false, i1 true, i32 0, i32 0, null, i32 256, i1 false, %struct.basic_string* (%struct.basic_string*, i8*)* @_ZN12basic_stringIcE6assignEPKc, null, metadata !18, metadata !1, i32 8} ; [ DW_TAG_subprogram ] [line 7] [def] [scope 8] [assign] // CHECK: [[TYPE]] = metadata !{i32 {{.*}}, null, metadata [[ARGS:.*]], i32 0, i32 0} -// CHECK: [[ARGS]] = metadata !{metadata !15, metadata !23, metadata [[P]]} +// CHECK: [[ARGS]] = metadata !{metadata !15, metadata !24, metadata [[P]]} diff --git a/test/CodeGenCXX/debug-info-thunk.cpp b/test/CodeGenCXX/debug-info-thunk.cpp new file mode 100644 index 0000000..394ebd8 --- /dev/null +++ b/test/CodeGenCXX/debug-info-thunk.cpp @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 %s -g -S -emit-llvm -o - | FileCheck %s + +struct A { + virtual void f(); +}; + +struct B { + virtual void f(); +}; + +struct C : A, B { + virtual void f(); +}; + +void C::f() { } + +// CHECK: [ DW_TAG_subprogram ] [line 15] [def] [_ZThn{{4|8}}_N1C1fEv] diff --git a/test/CodeGenCXX/debug-info-user-def.cpp b/test/CodeGenCXX/debug-info-user-def.cpp deleted file mode 100644 index ecd3878..0000000 --- a/test/CodeGenCXX/debug-info-user-def.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// RUN: %clang_cc1 -emit-llvm -g -triple x86_64-apple-darwin -std=c++11 %s -o - | FileCheck %s - -class A { -}; - -template class B { - T t; -}; - -A a; -B b; - -// Check that no subprograms are emitted into debug info. -// CHECK-NOT: [ DW_TAG_subprogram ] diff --git a/test/CodeGenCXX/debug-lambda-expressions.cpp b/test/CodeGenCXX/debug-lambda-expressions.cpp index 1c82399..430371f 100644 --- a/test/CodeGenCXX/debug-lambda-expressions.cpp +++ b/test/CodeGenCXX/debug-lambda-expressions.cpp @@ -31,39 +31,41 @@ int d(int x) { D y[10]; [x,y] { return y[x].x; }(); } // Back to D. -- 24 // CHECK: [[LAM_D:.*]] = metadata !{i32 {{.*}}, metadata [[D_FUNC]], metadata !"", metadata [[FILE]], i32 [[D_LINE]], i64 352, i64 32, i32 0, i32 0, null, metadata [[LAM_D_ARGS:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[LAM_D_ARGS]] = metadata !{metadata [[CAP_D_X:.*]], metadata [[CAP_D_Y:.*]], metadata [[CON_LAM_D:.*]]} +// CHECK: [[LAM_D_ARGS]] = metadata !{metadata [[CAP_D_X:.*]], metadata [[CAP_D_Y:.*]], metadata [[CON_LAM_D:.*]], metadata [[DES_LAM_D:.*]]} // CHECK: [[CAP_D_X]] = metadata !{i32 {{.*}}, metadata [[LAM_D]], metadata !"x", metadata [[FILE]], i32 [[D_LINE]], i64 32, i64 32, i64 0, i32 1, metadata {{.*}} ; [ DW_TAG_member ] // CHECK: [[CAP_D_Y]] = metadata !{i32 {{.*}}, metadata [[LAM_D]], metadata !"y", metadata [[FILE]], i32 [[D_LINE]], i64 320, i64 32, i64 32, i32 1, metadata {{.*}} ; [ DW_TAG_member ] // CHECK: [[CON_LAM_D]] = metadata {{.*}}[[LAM_D]], metadata !"operator()", metadata !"operator()"{{.*}}[ DW_TAG_subprogram ] +// CHECK: [[DES_LAM_D]] = metadata {{.*}}[[LAM_D]], metadata !"~", metadata !"~"{{.*}}[ DW_TAG_subprogram ] // Back to C. -- 55 // CHECK: [[LAM_C:.*]] = metadata !{i32 {{.*}}, metadata [[C_FUNC]], metadata !"", metadata [[FILE]], i32 [[C_LINE]], i64 64, i64 64, i32 0, i32 0, null, metadata [[LAM_C_ARGS:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[LAM_C_ARGS]] = metadata !{metadata [[CAP_C:.*]], metadata [[CON_LAM_C:.*]]} +// CHECK: [[LAM_C_ARGS]] = metadata !{metadata [[CAP_C:.*]], metadata [[CON_LAM_C:.*]], metadata [[DES_LAM_C:.*]]} // Ignoring the member type for now. // CHECK: [[CAP_C]] = metadata !{i32 {{.*}}, metadata [[LAM_C]], metadata !"x", metadata [[FILE]], i32 [[C_LINE]], i64 64, i64 64, i64 0, i32 1, metadata {{.*}}} ; [ DW_TAG_member ] // CHECK: [[CON_LAM_C]] = metadata {{.*}}[[LAM_C]], metadata !"operator()", metadata !"operator()"{{.*}}[ DW_TAG_subprogram ] +// CHECK: [[DES_LAM_C]] = metadata {{.*}}[[LAM_C]], metadata !"~", metadata !"~"{{.*}}[ DW_TAG_subprogram ] // Back to B. -- 67 // CHECK: [[LAM_B:.*]] = metadata !{i32 {{.*}}, metadata [[B_FUNC]], metadata !"", metadata [[FILE]], i32 [[B_LINE]], i64 32, i64 32, i32 0, i32 0, null, metadata [[LAM_B_ARGS:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[LAM_B_ARGS]] = metadata !{metadata [[CAP_B:.*]], metadata [[CON_LAM_B:.*]]} +// CHECK: [[LAM_B_ARGS]] = metadata !{metadata [[CAP_B:.*]], metadata [[CON_LAM_B:.*]], metadata [[DES_LAM_B:.*]]} // CHECK: [[CAP_B]] = metadata !{i32 {{.*}}, metadata [[LAM_B]], metadata !"x", metadata [[FILE]], i32 [[B_LINE]], i64 32, i64 32, i64 0, i32 1, metadata {{.*}}} ; [ DW_TAG_member ] // CHECK: [[CON_LAM_B]] = metadata {{.*}}[[LAM_B]], metadata !"operator()", metadata !"operator()"{{.*}}[ DW_TAG_subprogram ] - +// CHECK: [[DES_LAM_B]] = metadata {{.*}}[[LAM_B]], metadata !"~", metadata !"~"{{.*}}[ DW_TAG_subprogram ] // Back to A. -- 78 // CHECK: [[LAM_A:.*]] = metadata !{i32 {{.*}}, metadata [[A_FUNC]], metadata !"", metadata [[FILE]], i32 [[A_LINE]], i64 8, i64 8, i32 0, i32 0, null, metadata [[LAM_A_ARGS:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[LAM_A_ARGS]] = metadata !{metadata [[CON_LAM_A:.*]]} +// CHECK: [[LAM_A_ARGS]] = metadata !{metadata [[CON_LAM_A:.*]], metadata [[DES_LAM_A:.*]]} // CHECK: [[CON_LAM_A]] = metadata {{.*}}[[LAM_A]], metadata !"operator()", metadata !"operator()"{{.*}}[ DW_TAG_subprogram ] - +// CHECK: [[DES_LAM_A]] = metadata {{.*}}[[LAM_A]], metadata !"~", metadata !"~"{{.*}}[ DW_TAG_subprogram ] // CVAR: // CHECK: metadata !{i32 {{.*}}, i32 0, null, metadata !"cvar", metadata !"cvar", metadata !"", metadata [[FILE]], i32 [[CVAR_LINE:.*]], metadata ![[CVAR_T:.*]], i32 0, i32 1, %class.anon.0* @cvar} ; [ DW_TAG_variable ] // CHECK: [[CVAR_T]] = metadata !{i32 {{.*}}, null, metadata !"", metadata [[FILE]], i32 [[CVAR_LINE]], i64 8, i64 8, i32 0, i32 0, null, metadata ![[CVAR_ARGS:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[CVAR_ARGS]] = metadata !{metadata !{{.*}}} +// CHECK: [[CVAR_ARGS]] = metadata !{metadata !{{.*}}, metadata !{{.*}}, metadata !{{.*}}} // VAR: // CHECK: metadata !{i32 {{.*}}, i32 0, null, metadata !"var", metadata !"var", metadata !"", metadata [[FILE]], i32 [[VAR_LINE:.*]], metadata ![[VAR_T:.*]], i32 1, i32 1, %class.anon* @var} ; [ DW_TAG_variable ] // CHECK: [[VAR_T]] = metadata !{i32 {{.*}}, null, metadata !"", metadata [[FILE]], i32 [[VAR_LINE]], i64 8, i64 8, i32 0, i32 0, null, metadata ![[VAR_ARGS:.*]], i32 0, null, null} ; [ DW_TAG_class_type ] -// CHECK: [[VAR_ARGS]] = metadata !{metadata !{{.*}}} +// CHECK: [[VAR_ARGS]] = metadata !{metadata !{{.*}}, metadata !{{.*}}, metadata !{{.*}}} diff --git a/test/CodeGenCXX/debug-lambda-this.cpp b/test/CodeGenCXX/debug-lambda-this.cpp new file mode 100644 index 0000000..7c37fbe --- /dev/null +++ b/test/CodeGenCXX/debug-lambda-this.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin10.0.0 -emit-llvm -o - %s -fexceptions -std=c++11 -g | FileCheck %s + +struct D { + D(); + D(const D&); + int x; + int d(int x); +}; +int D::d(int x) { + [=] { + return this->x; + }(); +} + +// CHECK: metadata !{i32 {{.*}}, metadata !"this", metadata !6, i32 11, i64 64, i64 64, i64 0, i32 1, metadata !37} ; [ DW_TAG_member ] [this] [line 11, size 64, align 64, offset 0] [private] [from ] diff --git a/test/CodeGenCXX/delete.cpp b/test/CodeGenCXX/delete.cpp index 5a88f9f..7a91ca8 100644 --- a/test/CodeGenCXX/delete.cpp +++ b/test/CodeGenCXX/delete.cpp @@ -113,12 +113,23 @@ namespace test4 { // CHECK: define void @_ZN5test421global_delete_virtualEPNS_1XE void global_delete_virtual(X *xp) { - // CHECK: [[VTABLE:%.*]] = load void ([[X:%.*]])*** - // CHECK-NEXT: [[VFN:%.*]] = getelementptr inbounds void ([[X]])** [[VTABLE]], i64 0 - // CHECK-NEXT: [[VFNPTR:%.*]] = load void ([[X]])** [[VFN]] - // CHECK-NEXT: call void [[VFNPTR]]([[X]] [[OBJ:%.*]]) - // CHECK-NEXT: [[OBJVOID:%.*]] = bitcast [[X]] [[OBJ]] to i8* - // CHECK-NEXT: call void @_ZdlPv(i8* [[OBJVOID]]) nounwind + // Load the offset-to-top from the vtable and apply it. + // This has to be done first because the dtor can mess it up. + // CHECK: [[T0:%.*]] = bitcast [[X:%.*]]* [[XP:%.*]] to i64** + // CHECK-NEXT: [[VTABLE:%.*]] = load i64** [[T0]] + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds i64* [[VTABLE]], i64 -2 + // CHECK-NEXT: [[OFFSET:%.*]] = load i64* [[T0]], align 8 + // CHECK-NEXT: [[T0:%.*]] = bitcast [[X]]* [[XP]] to i8* + // CHECK-NEXT: [[ALLOCATED:%.*]] = getelementptr inbounds i8* [[T0]], i64 [[OFFSET]] + // Load the complete-object destructor (not the deleting destructor) + // and call it. + // CHECK-NEXT: [[T0:%.*]] = bitcast [[X:%.*]]* [[XP:%.*]] to void ([[X]]*)*** + // CHECK-NEXT: [[VTABLE:%.*]] = load void ([[X]]*)*** [[T0]] + // CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds void ([[X]]*)** [[VTABLE]], i64 0 + // CHECK-NEXT: [[DTOR:%.*]] = load void ([[X]]*)** [[T0]] + // CHECK-NEXT: call void [[DTOR]]([[X]]* [[OBJ:%.*]]) + // Call the global operator delete. + // CHECK-NEXT: call void @_ZdlPv(i8* [[ALLOCATED]]) nounwind ::delete xp; } } diff --git a/test/CodeGenCXX/dependent-type-member-pointer.cpp b/test/CodeGenCXX/dependent-type-member-pointer.cpp index 41bb5e2..99b8ecd 100644 --- a/test/CodeGenCXX/dependent-type-member-pointer.cpp +++ b/test/CodeGenCXX/dependent-type-member-pointer.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics // PR7736 template int InitMember(scriptmemberptr); diff --git a/test/CodeGenCXX/destructor-debug-info.cpp b/test/CodeGenCXX/destructor-debug-info.cpp index 385c86d..f2e2a39 100644 --- a/test/CodeGenCXX/destructor-debug-info.cpp +++ b/test/CodeGenCXX/destructor-debug-info.cpp @@ -19,4 +19,4 @@ void foo() { } } // Check there is a line number entry for line 19 where b1 is destructed. -// CHECK: i32 19, i32 3, metadata +// CHECK: i32 19, i32 0, metadata diff --git a/test/CodeGenCXX/devirtualize-virtual-function-calls-final.cpp b/test/CodeGenCXX/devirtualize-virtual-function-calls-final.cpp index 634bf84..40f3cad 100644 --- a/test/CodeGenCXX/devirtualize-virtual-function-calls-final.cpp +++ b/test/CodeGenCXX/devirtualize-virtual-function-calls-final.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -std=c++11 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -std=c++11 %s -emit-llvm -o - | FileCheck %s namespace Test1 { struct A { @@ -131,8 +131,7 @@ namespace Test7 { // CHECK: alloca // CHECK-NEXT: store // CHECK-NEXT: load - // CHECK-NEXT: bitcast - // CHECK-NEXT: call {{.*}} @_ZN5Test73zed1fEv + // CHECK-NEXT: call i32 @_ZN5Test73zed1fEv // CHECK-NEXT: ret return static_cast(z)->f(); } diff --git a/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp b/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp index 7ef4864..52f1cd3 100644 --- a/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp +++ b/test/CodeGenCXX/devirtualize-virtual-function-calls.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple armv7-none-eabi -emit-llvm -o - | FileCheck %s struct A { virtual void f(); @@ -66,7 +66,7 @@ namespace test2 { void f(bar *b) { // CHECK: call void @_ZN5test23foo1fEv - // CHECK: call void @_ZN5test23fooD1Ev + // CHECK: call %"struct.test2::foo"* @_ZN5test23fooD1Ev b->foo::f(); b->foo::~foo(); } diff --git a/test/CodeGenCXX/enum.cpp b/test/CodeGenCXX/enum.cpp index cfcd264..3985e96 100644 --- a/test/CodeGenCXX/enum.cpp +++ b/test/CodeGenCXX/enum.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics enum A { a } __attribute((packed)); int func(A x) { return x==a; } diff --git a/test/CodeGenCXX/exceptions.cpp b/test/CodeGenCXX/exceptions.cpp index 8c20c9e..723e8d1 100644 --- a/test/CodeGenCXX/exceptions.cpp +++ b/test/CodeGenCXX/exceptions.cpp @@ -365,6 +365,7 @@ namespace test7 { // CHECK-NEXT: invoke void @_ZN5test71BC1ERKNS_1AEPS0_( // CHECK: store i1 false, i1* [[OUTER_NEW]] // CHECK: phi + // CHECK-NEXT: store [[B]]* // Destroy the inner A object. // CHECK-NEXT: load i1* [[INNER_A]] diff --git a/test/CodeGenCXX/fastcall.cpp b/test/CodeGenCXX/fastcall.cpp new file mode 100644 index 0000000..c0a9106 --- /dev/null +++ b/test/CodeGenCXX/fastcall.cpp @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s + +void __attribute__((fastcall)) foo1(int &y); +void bar1(int &y) { + // CHECK: define void @_Z4bar1Ri + // CHECK: call x86_fastcallcc void @_Z4foo1Ri(i32* inreg % + foo1(y); +} + +struct S1 { + int x; + S1(const S1 &y); +}; + +void __attribute__((fastcall)) foo2(S1 a, int b); +void bar2(S1 a, int b) { + // CHECK: define void @_Z4bar22S1i + // CHECK: call x86_fastcallcc void @_Z4foo22S1i(%struct.S1* inreg %{{.*}}, i32 inreg % + foo2(a, b); +} diff --git a/test/CodeGenCXX/for-range.cpp b/test/CodeGenCXX/for-range.cpp index 0f35dda..926fe44 100644 --- a/test/CodeGenCXX/for-range.cpp +++ b/test/CodeGenCXX/for-range.cpp @@ -27,10 +27,8 @@ struct D { B *end(); }; -namespace std { - B *begin(C&); - B *end(C&); -} +B *begin(C&); +B *end(C&); extern B array[5]; @@ -42,7 +40,7 @@ void for_array() { // CHECK-NOT: 5begin // CHECK-NOT: 3end // CHECK: getelementptr {{.*}}, i32 0 - // CHECK: getelementptr {{.*}}, i64 5 + // CHECK: getelementptr {{.*}}, i64 1, i64 0 // CHECK: br label %[[COND:.*]] // CHECK: [[COND]]: @@ -69,8 +67,8 @@ void for_range() { A a; for (B b : C()) { // CHECK: call void @_ZN1CC1Ev( - // CHECK: = call %struct.B* @_ZSt5beginR1C( - // CHECK: = call %struct.B* @_ZSt3endR1C( + // CHECK: = call %struct.B* @_Z5beginR1C( + // CHECK: = call %struct.B* @_Z3endR1C( // CHECK: br label %[[COND:.*]] // CHECK: [[COND]]: diff --git a/test/CodeGenCXX/implicit-copy-constructor.cpp b/test/CodeGenCXX/implicit-copy-constructor.cpp index 8bc84a5..8a3a422 100644 --- a/test/CodeGenCXX/implicit-copy-constructor.cpp +++ b/test/CodeGenCXX/implicit-copy-constructor.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o - %s -std=c++11 | FileCheck %s struct A { A(); @@ -80,3 +80,29 @@ namespace test3 { y = x; } } + +namespace test4 { + // When determining whether to implement an array copy as a memcpy, look at + // whether the *selected* constructor is trivial. + struct S { + int arr[5][5]; + S(S &); + S(const S &) = default; + }; + // CHECK: @_ZN5test42f1 + void f1(S a) { + // CHECK-NOT: memcpy + // CHECK: call void @_ZN5test41SC1ERS0_ + // CHECK-NOT: memcpy + S b(a); + // CHECK: } + } + // CHECK: @_ZN5test42f2 + void f2(const S a) { + // CHECK-NOT: call void @_ZN5test41SC1ERS0_ + // CHECK: memcpy + // CHECK-NOT: call void @_ZN5test41SC1ERS0_ + S b(a); + // CHECK: } + } +} diff --git a/test/CodeGenCXX/incomplete-types.cpp b/test/CodeGenCXX/incomplete-types.cpp index 1d4f430..802ed46 100644 --- a/test/CodeGenCXX/incomplete-types.cpp +++ b/test/CodeGenCXX/incomplete-types.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm-only -verify +// expected-no-diagnostics // PR5489 template diff --git a/test/CodeGenCXX/init-priority-attr.cpp b/test/CodeGenCXX/init-priority-attr.cpp new file mode 100644 index 0000000..ef9343c --- /dev/null +++ b/test/CodeGenCXX/init-priority-attr.cpp @@ -0,0 +1,46 @@ +// RUN: %clang_cc1 %s -triple x86_64-apple-darwin10 -emit-llvm -o - | FileCheck %s +// PR + +void foo(int); + +class A { +public: + A() { foo(1); } +}; + +class A1 { +public: + A1() { foo(2); } +}; + +class B { +public: + B() { foo(3); } +}; + +class C { +public: + static A a; + C() { foo(4); } +}; + + +A C::a = A(); + +// CHECK: @llvm.global_ctors = appending global [3 x { i32, void ()* }] [{ i32, void ()* } { i32 200, void ()* @_GLOBAL__I_000200 }, { i32, void ()* } { i32 300, void ()* @_GLOBAL__I_000300 }, { i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }] + +// CHECK: _GLOBAL__I_000200() +// CHECK_NEXT: _Z3fooi(i32 3) + +// CHECK: _GLOBAL__I_000300() +// CHECK_NEXT: _Z3fooi(i32 2) +// CHECK_NEXT: _Z3fooi(i32 1) + +// CHECK: _GLOBAL__I_a() +// CHECK_NEXT: _Z3fooi(i32 1) +// CHECK_NEXT: _Z3fooi(i32 4) + +C c; +A1 a1 __attribute__((init_priority (300))); +A a __attribute__((init_priority (300))); +B b __attribute__((init_priority (200))); diff --git a/test/CodeGenCXX/instantiate-init-list.cpp b/test/CodeGenCXX/instantiate-init-list.cpp index 49c6f51..e498d24 100644 --- a/test/CodeGenCXX/instantiate-init-list.cpp +++ b/test/CodeGenCXX/instantiate-init-list.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm-only -verify +// expected-no-diagnostics struct F { void (*x)(); diff --git a/test/CodeGenCXX/lambda-expressions.cpp b/test/CodeGenCXX/lambda-expressions.cpp index e872cc4..cee4f17 100644 --- a/test/CodeGenCXX/lambda-expressions.cpp +++ b/test/CodeGenCXX/lambda-expressions.cpp @@ -71,6 +71,15 @@ void f() { int (*fp)(int, int) = [](int x, int y){ return x + y; }; } +static int k; +int g() { + int &r = k; + // CHECK: define internal i32 @"_ZZ1gvENK3$_6clEv"( + // CHECK-NOT: } + // CHECK: load i32* @_ZL1k, + return [] { return r; } (); +}; + // CHECK: define internal i32 @"_ZZ1fvEN3$_58__invokeEii" // CHECK: store i32 // CHECK-NEXT: store i32 diff --git a/test/CodeGenCXX/mangle-exprs.cpp b/test/CodeGenCXX/mangle-exprs.cpp index 30da4fb..2d7a883 100644 --- a/test/CodeGenCXX/mangle-exprs.cpp +++ b/test/CodeGenCXX/mangle-exprs.cpp @@ -190,4 +190,11 @@ namespace test4 { // CHECK: void @_ZN5test43tf3INS_1XEEEvDTnw_T_ilLi1EEE template void tf3(X*); + +} + +namespace test5 { + template void a(decltype(noexcept(T()))) {} + template void a(decltype(noexcept(int()))); + // CHECK: void @_ZN5test51aIiEEvDTnxcvT__EE( } diff --git a/test/CodeGenCXX/mangle-extern-local.cpp b/test/CodeGenCXX/mangle-extern-local.cpp index ed91da4..1394c8f 100644 --- a/test/CodeGenCXX/mangle-extern-local.cpp +++ b/test/CodeGenCXX/mangle-extern-local.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple i386-unknown-unknown -emit-llvm -o - | FileCheck %s // CHECK: @var1 = external global i32 // CHECK: @_ZN1N4var2E = external global i32 diff --git a/test/CodeGenCXX/mangle-lambdas.cpp b/test/CodeGenCXX/mangle-lambdas.cpp index 16ddf48..0bd5ad2 100644 --- a/test/CodeGenCXX/mangle-lambdas.cpp +++ b/test/CodeGenCXX/mangle-lambdas.cpp @@ -172,6 +172,33 @@ template int PR12917::n[3] = { PR12917 pr12917; int *pr12917_p = PR12917::n; +namespace std { + struct type_info; +} +namespace PR12123 { + struct A { virtual ~A(); } g; + struct B { + void f(const std::type_info& x = typeid([]()->A& { return g; }())); + void h(); + }; + void B::h() { f(); } +} +// CHECK: define linkonce_odr %"struct.PR12123::A"* @_ZZN7PR121231B1fERKSt9type_infoEd_NKUlvE_clEv + +namespace PR12808 { + template struct B { + int a; + template constexpr B(L&& x) : a(x()) { } + }; + template void b(int) { + [&]{ (void)B([&]{ return 1; }); }(); + } + void f() { + b(1); + } + // CHECK: define linkonce_odr void @_ZZN7PR128081bIiEEviENKS0_IiEUlvE_clEv + // CHECK: define linkonce_odr i32 @_ZZZN7PR128081bIiEEviENKS0_IiEUlvE_clEvENKUlvE_clEv +} // CHECK: define linkonce_odr void @_Z1fIZZNK23TestNestedInstantiationclEvENKUlvE_clEvEUlvE_EvT_ diff --git a/test/CodeGenCXX/mangle-ms-arg-qualifiers.cpp b/test/CodeGenCXX/mangle-ms-arg-qualifiers.cpp new file mode 100644 index 0000000..0ac9b3f --- /dev/null +++ b/test/CodeGenCXX/mangle-ms-arg-qualifiers.cpp @@ -0,0 +1,78 @@ +// RUN: %clang_cc1 -emit-llvm %s -o - -cxx-abi microsoft -triple=i386-pc-win32 | FileCheck %s + +void foo(const unsigned int) {} +// CHECK: "\01?foo@@YAXI@Z" + +void foo(const double) {} +// CHECK: "\01?foo@@YAXN@Z" + +void bar(const volatile double) {} +// CHECK: "\01?bar@@YAXN@Z" + +void foo_pad(char * x) {} +// CHECK: "\01?foo_pad@@YAXPAD@Z" + +void foo_pbd(const char * x) {} +// CHECK: "\01?foo_pbd@@YAXPBD@Z" + +void foo_pcd(volatile char * x) {} +// CHECK: "\01?foo_pcd@@YAXPCD@Z" + +void foo_qad(char * const x) {} +// CHECK: "\01?foo_qad@@YAXQAD@Z" + +void foo_rad(char * volatile x) {} +// CHECK: "\01?foo_rad@@YAXRAD@Z" + +void foo_sad(char * const volatile x) {} +// CHECK: "\01?foo_sad@@YAXSAD@Z" + +void foo_papad(char ** x) {} +// CHECK: "\01?foo_papad@@YAXPAPAD@Z" + +void foo_papbd(char const ** x) {} +// CHECK: "\01?foo_papbd@@YAXPAPBD@Z" + +void foo_papcd(char volatile ** x) {} +// CHECK: "\01?foo_papcd@@YAXPAPCD@Z" + +void foo_pbqad(char * const* x) {} +// CHECK: "\01?foo_pbqad@@YAXPBQAD@Z" + +void foo_pcrad(char * volatile* x) {} +// CHECK: "\01?foo_pcrad@@YAXPCRAD@Z" + +void foo_qapad(char ** const x) {} +// CHECK: "\01?foo_qapad@@YAXQAPAD@Z" + +void foo_rapad(char ** volatile x) {} +// CHECK: "\01?foo_rapad@@YAXRAPAD@Z" + +void foo_pbqbd(const char * const* x) {} +// CHECK: "\01?foo_pbqbd@@YAXPBQBD@Z" + +void foo_pbqcd(volatile char * const* x) {} +// CHECK: "\01?foo_pbqcd@@YAXPBQCD@Z" + +void foo_pcrbd(const char * volatile* x) {} +// CHECK: "\01?foo_pcrbd@@YAXPCRBD@Z" + +void foo_pcrcd(volatile char * volatile* x) {} +// CHECK: "\01?foo_pcrcd@@YAXPCRCD@Z" + +typedef double Vector[3]; + +void foo(Vector*) {} +// CHECK: "\01?foo@@YAXPAY02N@Z" + +void foo(Vector) {} +// CHECK: "\01?foo@@YAXQAN@Z" + +void foo_const(const Vector) {} +// CHECK: "\01?foo_const@@YAXQBN@Z" + +void foo_volatile(volatile Vector) {} +// CHECK: "\01?foo_volatile@@YAXQCN@Z" + +void foo(Vector*, const Vector, const double) {} +// CHECK: "\01?foo@@YAXPAY02NQBNN@Z" diff --git a/test/CodeGenCXX/mangle-ms-return-qualifiers.cpp b/test/CodeGenCXX/mangle-ms-return-qualifiers.cpp index a5d03b3..63bc4a9 100644 --- a/test/CodeGenCXX/mangle-ms-return-qualifiers.cpp +++ b/test/CodeGenCXX/mangle-ms-return-qualifiers.cpp @@ -167,7 +167,4 @@ function_pointer* g3() { return 0; } // CHECK: "\01?g3@@YAPAP6AHH@ZXZ" const function_pointer* g4() { return 0; } -// The mangling of g4 is currently "\01?g4@@YAPQ6AHH@ZXZ" which is wrong. -// This looks related to http://llvm.org/PR13444 -// FIXME: replace CHECK-NOT with CHECK once it is fixed. -// CHECK-NOT: "\01?g4@@YAPBQ6AHH@ZXZ" +// CHECK: "\01?g4@@YAPBQ6AHH@ZXZ" diff --git a/test/CodeGenCXX/mangle-ms-template-callback.cpp b/test/CodeGenCXX/mangle-ms-template-callback.cpp new file mode 100644 index 0000000..6878148 --- /dev/null +++ b/test/CodeGenCXX/mangle-ms-template-callback.cpp @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -fblocks -emit-llvm %s -o - -cxx-abi microsoft -triple=i386-pc-win32 | FileCheck %s + +template +class C; + +template +class C {}; +typedef C C0; + +template +class C {}; + +template +class C {}; + +C0 callback_void; +// CHECK: "\01?callback_void@@3V?$C@$$A6AXXZ@@A" + +volatile C0 callback_void_volatile; +// CHECK: "\01?callback_void_volatile@@3V?$C@$$A6AXXZ@@C" + +class Type {}; + +C callback_int; +// CHECK: "\01?callback_int@@3V?$C@$$A6AHXZ@@A" +C callback_Type; +// CHECK: "\01?callback_Type@@3V?$C@$$A6A?AVType@@XZ@@A" + +C callback_void_int; +// CHECK: "\01?callback_void_int@@3V?$C@$$A6AXH@Z@@A" +C callback_int_int; +// CHECK: "\01?callback_int_int@@3V?$C@$$A6AHH@Z@@A" +C callback_void_Type; +// CHECK: "\01?callback_void_Type@@3V?$C@$$A6AXVType@@@Z@@A" + +void foo(C0 c) {} +// CHECK: "\01?foo@@YAXV?$C@$$A6AXXZ@@@Z" + +// Here be dragons! +// Let's face the magic of template partial specialization... + +void function(C) {} +// CHECK: "\01?function@@YAXV?$C@$$A6AXXZ@@@Z" + +template class C {}; +void function_pointer(C) {} +// CHECK: "\01?function_pointer@@YAXV?$C@P6AXXZ@@@Z" + +// Block equivalent to the previous definitions. +template class C {}; +void block(C) {} +// CHECK: "\01?block@@YAXV?$C@P_EAXXZ@@@Z" +// FYI blocks are not present in MSVS, so we're free to choose the spec. + +template class C {}; +class Z { + public: + void method() {} +}; +void member_pointer(C) {} +// CHECK: "\01?member_pointer@@YAXV?$C@P8Z@@AEXXZ@@@Z" + +template void bar(T) {} + +void call_bar() { + bar(0); +// CHECK: "\01??$bar@P6AHH@Z@@YAXP6AHH@Z@Z" + + bar(0); +// CHECK: "\01??$bar@P_EAHH@Z@@YAXP_EAHH@Z@Z" +// FYI blocks are not present in MSVS, so we're free to choose the spec. +} diff --git a/test/CodeGenCXX/mangle-ms-templates.cpp b/test/CodeGenCXX/mangle-ms-templates.cpp index 977ef35..e16fe93 100644 --- a/test/CodeGenCXX/mangle-ms-templates.cpp +++ b/test/CodeGenCXX/mangle-ms-templates.cpp @@ -48,12 +48,24 @@ void template_mangling() { // CHECK: call {{.*}} @"\01??0?$BoolTemplate@$00@@QAE@XZ" // CHECK: call {{.*}} @"\01??$Foo@H@?$BoolTemplate@$00@@QAEXH@Z" + IntTemplate<0> zero; +// CHECK: call {{.*}} @"\01??0?$IntTemplate@$0A@@@QAE@XZ" + IntTemplate<5> five; // CHECK: call {{.*}} @"\01??0?$IntTemplate@$04@@QAE@XZ" IntTemplate<11> eleven; // CHECK: call {{.*}} @"\01??0?$IntTemplate@$0L@@@QAE@XZ" + IntTemplate<256> _256; +// CHECK: call {{.*}} @"\01??0?$IntTemplate@$0BAA@@@QAE@XZ" + + IntTemplate<513> _513; +// CHECK: call {{.*}} @"\01??0?$IntTemplate@$0CAB@@@QAE@XZ" + + IntTemplate<1026> _1026; +// CHECK: call {{.*}} @"\01??0?$IntTemplate@$0EAC@@@QAE@XZ" + IntTemplate<65535> ffff; // CHECK: call {{.*}} @"\01??0?$IntTemplate@$0PPPP@@@QAE@XZ" } diff --git a/test/CodeGenCXX/mangle-ms.cpp b/test/CodeGenCXX/mangle-ms.cpp index f392c17..0edb4b4 100644 --- a/test/CodeGenCXX/mangle-ms.cpp +++ b/test/CodeGenCXX/mangle-ms.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fms-extensions -fblocks -emit-llvm %s -o - -cxx-abi microsoft -triple=i386-pc-win32 | FileCheck %s +// RUN: %clang_cc1 -fms-compatibility -fblocks -emit-llvm %s -o - -cxx-abi microsoft -triple=x86_64-pc-win32 | FileCheck -check-prefix X64 %s // CHECK: @"\01?a@@3HA" // CHECK: @"\01?b@N@@3HA" @@ -7,16 +8,17 @@ // CHECK: @"\01?e@foo@@1JC" // CHECK: @"\01?f@foo@@2DD" // CHECK: @"\01?g@bar@@2HA" -// CHECK: @"\01?h@@3QAHA" +// CHECK: @"\01?h1@@3QAHA" +// CHECK: @"\01?h2@@3QBHB" // CHECK: @"\01?i@@3PAY0BE@HA" // CHECK: @"\01?j@@3P6GHCE@ZA" // CHECK: @"\01?k@@3PTfoo@@DA" // CHECK: @"\01?l@@3P8foo@@AEHH@ZA" // CHECK: @"\01?color1@@3PANA" +// CHECK: @"\01?color2@@3QBNB" -// FIXME: The following three tests currently fail, see PR13182. +// FIXME: The following three tests currently fail, see http://llvm.org/PR13182 // Replace "CHECK-NOT" with "CHECK" when it is fixed. -// CHECK-NOT: @"\01?color2@@3QBNB" // CHECK-NOT: @"\01?color3@@3QAY02$$CBNA" // CHECK-NOT: @"\01?color4@@3QAY02$$CBNA" @@ -87,7 +89,8 @@ const volatile char foo::f = 'C'; int bar::g; -extern int * const h = &a; +extern int * const h1 = &a; +extern const int * const h2 = &a; int i[10][20]; @@ -107,6 +110,7 @@ bool __fastcall beta(long long a, wchar_t b) throw(signed char, unsigned char) { } // CHECK: @"\01?alpha@@YGXMN@Z" +// X64: @"\01?alpha@@YAXMN@Z" // Make sure tag-type mangling works. void gamma(class foo, struct bar, union baz, enum quux) {} @@ -128,6 +132,10 @@ void zeta(int (*)(int, int)) {} void eta(int (^)(int, int)) {} // CHECK: @"\01?eta@@YAXP_EAHHH@Z@Z" +typedef int theta_arg(int,int); +void theta(theta_arg^ block) {} +// CHECK: @"\01?theta@@YAXP_EAHHH@Z@Z" + void operator_new_delete() { char *ptr = new char; // CHECK: @"\01??2@YAPAXI@Z" @@ -147,7 +155,7 @@ void (redundant_parens)(); void redundant_parens_use() { redundant_parens(); } // CHECK: @"\01?redundant_parens@@YAXXZ" -// PR13182, PR13047 +// PR13047 typedef double RGB[3]; RGB color1; extern const RGB color2 = {}; @@ -162,3 +170,24 @@ E fooE() { return E(); } class X {}; // CHECK: "\01?fooX@@YA?AVX@@XZ" X fooX() { return X(); } + +namespace PR13182 { + extern char s0[]; + // CHECK: @"\01?s0@PR13182@@3PADA" + extern char s1[42]; + // CHECK: @"\01?s1@PR13182@@3PADA" + extern const char s2[]; + // CHECK: @"\01?s2@PR13182@@3QBDB" + extern const char s3[42]; + // CHECK: @"\01?s3@PR13182@@3QBDB" + extern volatile char s4[]; + // CHECK: @"\01?s4@PR13182@@3RCDC" + extern const volatile char s5[]; + // CHECK: @"\01?s5@PR13182@@3SDDD" + extern const char* const* s6; + // CHECK: @"\01?s6@PR13182@@3PBQBDB" + + char foo() { + return s0[0] + s1[0] + s2[0] + s3[0] + s4[0] + s5[0] + s6[0][0]; + } +} diff --git a/test/CodeGenCXX/mangle-nullptr-arg.cpp b/test/CodeGenCXX/mangle-nullptr-arg.cpp index 393de0b..07bf52f 100644 --- a/test/CodeGenCXX/mangle-nullptr-arg.cpp +++ b/test/CodeGenCXX/mangle-nullptr-arg.cpp @@ -11,3 +11,6 @@ template struct PM {}; // CHECK: define void @_Z5test22PMILM1Xi0EE void test2(PM) { } +// CHECK: define void @_Z5test316DependentTypePtrIPiLS0_0EE +template struct DependentTypePtr {}; +void test3(DependentTypePtr) { } diff --git a/test/CodeGenCXX/mangle-template.cpp b/test/CodeGenCXX/mangle-template.cpp index 05c3a58..15a85c7 100644 --- a/test/CodeGenCXX/mangle-template.cpp +++ b/test/CodeGenCXX/mangle-template.cpp @@ -162,11 +162,23 @@ namespace test12 { void use() { // CHECK: define internal void @_ZN6test124testIFivEXadL_ZNS_L1fEvEEEEvv( test(); - // CHECK: define internal void @_ZN6test124testIRFivEXadL_ZNS_L1fEvEEEEvv( + // CHECK: define internal void @_ZN6test124testIRFivELZNS_L1fEvEEEvv( test(); // CHECK: define internal void @_ZN6test124testIPKiXadL_ZNS_L1nEEEEEvv( test(); - // CHECK: define internal void @_ZN6test124testIRKiXadL_ZNS_L1nEEEEEvv( + // CHECK: define internal void @_ZN6test124testIRKiLZNS_L1nEEEEvv( test(); } } + +// rdar://problem/12072531 +// Test the boundary condition of minimal signed integers. +namespace test13 { + template char returnChar() { return c; } + template char returnChar<-128>(); + // CHECK: @_ZN6test1310returnCharILcn128EEEcv() + + template short returnShort() { return s; } + template short returnShort<-32768>(); + // CHECK: @_ZN6test1311returnShortILsn32768EEEsv() +} diff --git a/test/CodeGenCXX/mangle-valist.cpp b/test/CodeGenCXX/mangle-valist.cpp new file mode 100644 index 0000000..73fd58e --- /dev/null +++ b/test/CodeGenCXX/mangle-valist.cpp @@ -0,0 +1,44 @@ +#include "stdarg.h" + +namespace test1 { + void test1(const char *fmt, va_list ap) { + } +} + +class Test2 { +public: + void test2(const char *fmt, va_list ap); +}; + +void Test2::test2(const char *fmt, va_list ap) { +} + +// RUN: %clang_cc1 %s -emit-llvm -o - \ +// RUN: -triple armv7-unknown-linux \ +// RUN: | FileCheck -check-prefix=MANGLE-ARM-AAPCS %s +// CHECK-MANGLE-ARM-AAPCS: @_ZN5test15test1EPKcSt9__va_list +// CHECK-MANGLE-ARM-AAPCS: @_ZN5Test25test2EPKcSt9__va_list + +// RUN: %clang_cc1 %s -emit-llvm -o - \ +// RUN: -triple armv7-unknown-linux -target-abi apcs-gnu \ +// RUN: | FileCheck -check-prefix=MANGLE-ARM-APCS %s +// CHECK-MANGLE-ARM-APCS: @_ZN5test15test1EPKcPv +// CHECK-MANGLE-ARM-APCS: @_ZN5Test25test2EPKcPv + +// RUN: %clang_cc1 %s -emit-llvm -o - \ +// RUN: -triple mipsel-unknown-linux \ +// RUN: | FileCheck -check-prefix=MANGLE-MIPSEL %s +// CHECK-MANGLE-MIPSEL: @_ZN5test15test1EPKcPv +// CHECK-MANGLE-MIPSEL: @_ZN5Test25test2EPKcPv + +// RUN: %clang_cc1 %s -emit-llvm -o - \ +// RUN: -triple i686-unknown-linux \ +// RUN: | FileCheck -check-prefix=MANGLE-X86 %s +// CHECK-MANGLE-X86: @_ZN5test15test1EPKcPc +// CHECK-MANGLE-X86: @_ZN5Test25test2EPKcPc + +// RUN: %clang_cc1 %s -emit-llvm -o - \ +// RUN: -triple x86_64-unknown-linux \ +// RUN: | FileCheck -check-prefix=MANGLE-X86-64 %s +// CHECK-MANGLE-X86-64: @_ZN5test15test1EPKcP13__va_list_tag +// CHECK-MANGLE-X86-64: @_ZN5Test25test2EPKcP13__va_list_tag diff --git a/test/CodeGenCXX/member-alignment.cpp b/test/CodeGenCXX/member-alignment.cpp index 8e120f7..78026d4 100644 --- a/test/CodeGenCXX/member-alignment.cpp +++ b/test/CodeGenCXX/member-alignment.cpp @@ -1,5 +1,4 @@ // RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s -// XFAIL: arm,powerpc // rdar://7268289 diff --git a/test/CodeGenCXX/member-call-parens.cpp b/test/CodeGenCXX/member-call-parens.cpp index 2054137..3def43e 100644 --- a/test/CodeGenCXX/member-call-parens.cpp +++ b/test/CodeGenCXX/member-call-parens.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics struct A { int a(); }; typedef int B; diff --git a/test/CodeGenCXX/member-functions.cpp b/test/CodeGenCXX/member-functions.cpp index b95763c..1310eb0 100644 --- a/test/CodeGenCXX/member-functions.cpp +++ b/test/CodeGenCXX/member-functions.cpp @@ -35,6 +35,9 @@ struct S { static void g() { } static void f(); + + // RUN: grep "define linkonce_odr void @_ZN1S1vEv.*unnamed_addr" %t + virtual void v() {} }; // RUN: grep "define void @_ZN1S1fEv" %t diff --git a/test/CodeGenCXX/member-init-assignment.cpp b/test/CodeGenCXX/member-init-assignment.cpp index 84c4a36..acc7084 100644 --- a/test/CodeGenCXX/member-init-assignment.cpp +++ b/test/CodeGenCXX/member-init-assignment.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple i386-unknown-unknown -emit-llvm -o - | FileCheck %s // PR7291 struct Foo { diff --git a/test/CodeGenCXX/member-init-struct.cpp b/test/CodeGenCXX/member-init-struct.cpp index 688d92d..d509b0e 100644 --- a/test/CodeGenCXX/member-init-struct.cpp +++ b/test/CodeGenCXX/member-init-struct.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm-only -verify +// expected-no-diagnostics struct A {int a;}; struct B {float a;}; diff --git a/test/CodeGenCXX/member-init-union.cpp b/test/CodeGenCXX/member-init-union.cpp index 2c50e18..be171a3 100644 --- a/test/CodeGenCXX/member-init-union.cpp +++ b/test/CodeGenCXX/member-init-union.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm-only -verify +// expected-no-diagnostics union x { int a; diff --git a/test/CodeGenCXX/microsoft-abi-constructors.cpp b/test/CodeGenCXX/microsoft-abi-constructors.cpp index ac27f13..89731ff 100644 --- a/test/CodeGenCXX/microsoft-abi-constructors.cpp +++ b/test/CodeGenCXX/microsoft-abi-constructors.cpp @@ -9,13 +9,16 @@ class A { void no_contstructor_destructor_infinite_recursion() { A a; -// Make sure that the constructor doesn't call itself: -// CHECK: define {{.*}} @"\01??0A@@QAE@XZ" -// CHECK-NOT: call void @"\01??0A@@QAE@XZ" -// CHECK: ret +// CHECK: define linkonce_odr x86_thiscallcc %class.A* @"\01??0A@@QAE@XZ"(%class.A* %this) +// CHECK: [[THIS_ADDR:%[.0-9A-Z_a-z]+]] = alloca %class.A*, align 4 +// CHECK-NEXT: store %class.A* %this, %class.A** [[THIS_ADDR]], align 4 +// CHECK-NEXT: [[T1:%[.0-9A-Z_a-z]+]] = load %class.A** [[THIS_ADDR]] +// CHECK-NEXT: ret %class.A* [[T1]] +// CHECK-NEXT: } // Make sure that the destructor doesn't call itself: // CHECK: define {{.*}} @"\01??1A@@QAE@XZ" // CHECK-NOT: call void @"\01??1A@@QAE@XZ" // CHECK: ret } + diff --git a/test/CodeGenCXX/microsoft-abi-default-cc.cpp b/test/CodeGenCXX/microsoft-abi-default-cc.cpp new file mode 100644 index 0000000..d0d25ce --- /dev/null +++ b/test/CodeGenCXX/microsoft-abi-default-cc.cpp @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck -check-prefix GCABI %s +// RUN: %clang_cc1 -emit-llvm %s -o - -DMS_ABI -cxx-abi microsoft -triple=i386-pc-win32 | FileCheck -check-prefix MSABI %s + +#ifdef MS_ABI +# define METHOD_CC __thiscall +#else +# define METHOD_CC __attribute__ ((cdecl)) +#endif + +// Test that it's OK to have multiple function declarations with the default CC +// both mentioned explicitly and implied. +void foo(); +void __cdecl foo(); +void __cdecl foo() {} +// GCABI: define void @_Z3foov() +// MSABI: define void @"\01?foo@@YAXXZ" + +void __cdecl bar(); +void bar(); +void bar() {} +// GCABI: define void @_Z3barv() +// MSABI: define void @"\01?bar@@YAXXZ" + +// Test that it's OK to mark either the method declaration or method definition +// with a default CC explicitly. +class A { +public: + void baz(); + void METHOD_CC qux(); + + void static_baz(); + void __cdecl static_qux(); +}; + +void METHOD_CC A::baz() {} +// GCABI: define void @_ZN1A3bazEv +// MSABI: define x86_thiscallcc void @"\01?baz@A@@QAEXXZ" +void A::qux() {} +// GCABI: define void @_ZN1A3quxEv +// MSABI: define x86_thiscallcc void @"\01?qux@A@@QAEXXZ" + +void __cdecl static_baz() {} +// GCABI: define void @_Z10static_bazv +// MSABI: define void @"\01?static_baz@@YAXXZ" +void static_qux() {} +// GCABI: define void @_Z10static_quxv +// MSABI: define void @"\01?static_qux@@YAXXZ" diff --git a/test/CodeGenCXX/microsoft-abi-methods.cpp b/test/CodeGenCXX/microsoft-abi-methods.cpp index 6b7f004..c996ba5 100644 --- a/test/CodeGenCXX/microsoft-abi-methods.cpp +++ b/test/CodeGenCXX/microsoft-abi-methods.cpp @@ -71,8 +71,8 @@ void constructors() { Child c; // Make sure that the Base constructor call in the Child constructor uses // the right calling convention: -// CHECK: define linkonce_odr x86_thiscallcc void @"\01??0Child@@QAE@XZ" -// CHECK: call x86_thiscallcc void @"\01??0Base@@QAE@XZ" +// CHECK: define linkonce_odr x86_thiscallcc %class.Child* @"\01??0Child@@QAE@XZ" +// CHECK: %{{[.0-9A-Z_a-z]+}} = call x86_thiscallcc %class.Base* @"\01??0Base@@QAE@XZ" // CHECK: ret // Make sure that the Base destructor call in the Child denstructor uses @@ -85,5 +85,5 @@ void constructors() { // CHECK: define linkonce_odr x86_thiscallcc void @"\01??1Base@@QAE@XZ" // Make sure that the Base constructor definition uses the right CC: -// CHECK: define linkonce_odr x86_thiscallcc void @"\01??0Base@@QAE@XZ" +// CHECK: define linkonce_odr x86_thiscallcc %class.Base* @"\01??0Base@@QAE@XZ" } diff --git a/test/CodeGenCXX/microsoft-abi-static-initializers.cpp b/test/CodeGenCXX/microsoft-abi-static-initializers.cpp index d8b7899..448f1ee 100644 --- a/test/CodeGenCXX/microsoft-abi-static-initializers.cpp +++ b/test/CodeGenCXX/microsoft-abi-static-initializers.cpp @@ -6,7 +6,7 @@ struct S { } s; // CHECK: define internal void [[INIT_s:@.*global_var.*]] nounwind -// CHECK: call x86_thiscallcc void @"\01??0S@@QAE@XZ" +// CHECK: %{{[.0-9A-Z_a-z]+}} = call x86_thiscallcc %struct.S* @"\01??0S@@QAE@XZ" // CHECK: call i32 @atexit(void ()* @"__dtor_\01?s@@3US@@A") // CHECK: ret void @@ -34,11 +34,11 @@ void force_usage() { } // CHECK: define internal void [[INIT_foo:@.*global_var.*]] nounwind -// CHECK: call x86_thiscallcc void @"\01??0A@@QAE@XZ" +// CHECK: %{{[.0-9A-Z_a-z]+}} = call x86_thiscallcc %class.A* @"\01??0A@@QAE@XZ" // CHECK: call i32 @atexit(void ()* [[FOO_DTOR:@"__dtor_.*foo@.*]]) // CHECK: ret void -// CHECK: define linkonce_odr x86_thiscallcc void @"\01??0A@@QAE@XZ" +// CHECK: define linkonce_odr x86_thiscallcc %class.A* @"\01??0A@@QAE@XZ" // CHECK: define linkonce_odr x86_thiscallcc void @"\01??1A@@QAE@XZ" diff --git a/test/CodeGenCXX/microsoft-interface.cpp b/test/CodeGenCXX/microsoft-interface.cpp new file mode 100644 index 0000000..0b44bab --- /dev/null +++ b/test/CodeGenCXX/microsoft-interface.cpp @@ -0,0 +1,43 @@ +// RUN: %clang_cc1 -std=c++11 -fms-extensions -Wno-microsoft -triple=i386-pc-win32 -emit-llvm %s -o - | FileCheck %s + +__interface I { + int test() { + return 1; + } +}; + +struct S : I { + virtual int test() override { + return I::test(); + } +}; + +int fn() { + S s; + return s.test(); +} + +// CHECK: @_ZTV1S = linkonce_odr unnamed_addr constant [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI1S to i8*), i8* bitcast (i32 (%struct.S*)* @_ZN1S4testEv to i8*)] + +// CHECK: define i32 @_Z2fnv() +// CHECK: call void @_ZN1SC1Ev(%struct.S* %s) +// CHECK: %{{[.0-9A-Z_a-z]+}} = call i32 @_ZN1S4testEv(%struct.S* %s) + +// CHECK: define linkonce_odr void @_ZN1SC1Ev(%struct.S* %this) +// CHECK: call void @_ZN1SC2Ev(%struct.S* %{{[.0-9A-Z_a-z]+}}) + +// CHECK: define linkonce_odr i32 @_ZN1S4testEv(%struct.S* %this) +// CHECK: %{{[.0-9A-Z_a-z]+}} = call i32 @_ZN1I4testEv(%__interface.I* %{{[.0-9A-Z_a-z]+}}) + +// CHECK: define linkonce_odr i32 @_ZN1I4testEv(%__interface.I* %this) +// CHECK: ret i32 1 + +// CHECK: define linkonce_odr void @_ZN1SC2Ev(%struct.S* %this) +// CHECK: call void @_ZN1IC2Ev(%__interface.I* %{{[.0-9A-Z_a-z]+}}) +// CHECK: store i8** getelementptr inbounds ([3 x i8*]* @_ZTV1S, i64 0, i64 2), i8*** %{{[.0-9A-Z_a-z]+}} + +// CHECK: define linkonce_odr void @_ZN1IC2Ev(%__interface.I* %this) +// CHECK: store i8** getelementptr inbounds ([3 x i8*]* @_ZTV1I, i64 0, i64 2), i8*** %{{[.0-9A-Z_a-z]+}} + +// CHECK-NOT: define linkonce_odr %__interface.I* @_ZN1IaSERKS_(%__interface.I* %this, %__interface.I*) +// CHECK-NOT: define linkonce_odr %__interface.I* @_ZN1IaSEOS_(%__interface.I* %this, %__interface.I*) diff --git a/test/CodeGenCXX/microsoft-uuidof-unsupported-target.cpp b/test/CodeGenCXX/microsoft-uuidof-unsupported-target.cpp new file mode 100644 index 0000000..4f68aa3 --- /dev/null +++ b/test/CodeGenCXX/microsoft-uuidof-unsupported-target.cpp @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -emit-llvm %s -o - -triple=x86_64-apple-macosx10.8.0 -fms-extensions -verify + +typedef struct _GUID +{ + unsigned long Data1; + unsigned short Data2; + unsigned short Data3; + unsigned char Data4[8]; +} GUID; + +struct __declspec(uuid("87654321-4321-4321-4321-ba0987654321")) S { }; + +GUID g = __uuidof(S); // expected-error {{__uuidof codegen is not supported on this architecture}} diff --git a/test/CodeGenCXX/microsoft-uuidof.cpp b/test/CodeGenCXX/microsoft-uuidof.cpp new file mode 100644 index 0000000..8eeb449 --- /dev/null +++ b/test/CodeGenCXX/microsoft-uuidof.cpp @@ -0,0 +1,72 @@ +// RUN: %clang_cc1 -emit-llvm %s -o - -triple=i386-pc-win32 -fms-extensions | FileCheck %s + +typedef struct _GUID +{ + unsigned long Data1; + unsigned short Data2; + unsigned short Data3; + unsigned char Data4[8]; +} GUID; + +struct __declspec(uuid("12345678-1234-1234-1234-1234567890ab")) S1 { } s1; +struct __declspec(uuid("87654321-4321-4321-4321-ba0987654321")) S2 { }; + +// This gets initialized in a static initializer. +// CHECK: @g = global %struct._GUID zeroinitializer, align 4 +GUID g = __uuidof(S1); + +// First global use of __uuidof(S1) forces the creation of the global. +// CHECK: @__uuid_12345678-1234-1234-1234-1234567890ab = private unnamed_addr constant %struct._GUID { i32 305419896, i16 4660, i16 4660, [8 x i8] c"\124\124Vx\90\AB" } +// CHECK: @gr = constant %struct._GUID* @__uuid_12345678-1234-1234-1234-1234567890ab, align 4 +const GUID& gr = __uuidof(S1); + +// CHECK: @gp = global %struct._GUID* @__uuid_12345678-1234-1234-1234-1234567890ab, align 4 +const GUID* gp = &__uuidof(S1); + +// Special case: _uuidof(0) +// CHECK: @zeroiid = constant %struct._GUID* @__uuid_00000000-0000-0000-0000-000000000000, align 4 +const GUID& zeroiid = __uuidof(0); + +// __uuidof(S2) hasn't been used globally yet, so it's emitted when it's used +// in a function and is emitted at the end of the globals section. +// CHECK: @__uuid_87654321-4321-4321-4321-ba0987654321 = private unnamed_addr constant %struct._GUID { i32 -2023406815, i16 17185, i16 17185, [8 x i8] c"C!\BA\09\87eC!" } + +// The static initializer for g. +// CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast (%struct._GUID* @g to i8*), i8* bitcast (%struct._GUID* @__uuid_12345678-1234-1234-1234-1234567890ab to i8*), i32 16, i32 4, i1 false) + +void fun() { + // CHECK: %s1_1 = alloca %struct._GUID, align 4 + // CHECK: %s1_2 = alloca %struct._GUID, align 4 + // CHECK: %s1_3 = alloca %struct._GUID, align 4 + + // CHECK: [[U1:%.+]] = bitcast %struct._GUID* %s1_1 to i8* + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[U1]], i8* bitcast (%struct._GUID* @__uuid_12345678-1234-1234-1234-1234567890ab to i8*), i32 16, i32 4, i1 false) + GUID s1_1 = __uuidof(S1); + + // CHECK: [[U2:%.+]] = bitcast %struct._GUID* %s1_2 to i8* + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[U2]], i8* bitcast (%struct._GUID* @__uuid_12345678-1234-1234-1234-1234567890ab to i8*), i32 16, i32 4, i1 false) + GUID s1_2 = __uuidof(S1); + + // CHECK: [[U3:%.+]] = bitcast %struct._GUID* %s1_3 to i8* + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[U3]], i8* bitcast (%struct._GUID* @__uuid_12345678-1234-1234-1234-1234567890ab to i8*), i32 16, i32 4, i1 false) + GUID s1_3 = __uuidof(s1); +} + +void gun() { + // CHECK: %s2_1 = alloca %struct._GUID, align 4 + // CHECK: %s2_2 = alloca %struct._GUID, align 4 + // CHECK: %r = alloca %struct._GUID*, align 4 + // CHECK: %p = alloca %struct._GUID*, align 4 + // CHECK: %zeroiid = alloca %struct._GUID*, align 4 + GUID s2_1 = __uuidof(S2); + GUID s2_2 = __uuidof(S2); + + // CHECK: store %struct._GUID* @__uuid_87654321-4321-4321-4321-ba0987654321, %struct._GUID** %r, align 4 + const GUID& r = __uuidof(S2); + // CHECK: store %struct._GUID* @__uuid_87654321-4321-4321-4321-ba0987654321, %struct._GUID** %p, align 4 + const GUID* p = &__uuidof(S2); + + // Special case _uuidof(0), local scope version. + // CHECK: store %struct._GUID* @__uuid_00000000-0000-0000-0000-000000000000, %struct._GUID** %zeroiid, align 4 + const GUID& zeroiid = __uuidof(0); +} diff --git a/test/CodeGenCXX/new-operator-phi.cpp b/test/CodeGenCXX/new-operator-phi.cpp index 49859ac..641734d 100644 --- a/test/CodeGenCXX/new-operator-phi.cpp +++ b/test/CodeGenCXX/new-operator-phi.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics // PR5454 #include diff --git a/test/CodeGenCXX/new.cpp b/test/CodeGenCXX/new.cpp index 8d9f641..e052390 100644 --- a/test/CodeGenCXX/new.cpp +++ b/test/CodeGenCXX/new.cpp @@ -250,3 +250,13 @@ namespace PR11757 { // CHECK-NEXT: call void @_ZN7PR117571XC1Ev({{.*}}* [[CASTED]]) // CHECK-NEXT: ret {{.*}} [[CASTED]] } + +namespace PR13380 { + struct A { A() {} }; + struct B : public A { int x; }; + // CHECK: define i8* @_ZN7PR133801fEv + // CHECK: call noalias i8* @_Znam( + // CHECK: call void @llvm.memset.p0i8 + // CHECK-NEXT: call void @_ZN7PR133801BC1Ev + void* f() { return new B[2](); } +} diff --git a/test/CodeGenCXX/nrvo.cpp b/test/CodeGenCXX/nrvo.cpp index 2feaf68..8ff7dd7 100644 --- a/test/CodeGenCXX/nrvo.cpp +++ b/test/CodeGenCXX/nrvo.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -emit-llvm -O1 -o - %s | FileCheck %s -// RUN: %clang_cc1 -emit-llvm -O1 -fcxx-exceptions -fexceptions -o - %s | FileCheck --check-prefix=CHECK-EH %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -O1 -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -O1 -fcxx-exceptions -fexceptions -o - %s | FileCheck --check-prefix=CHECK-EH %s // Test code generation for the named return value optimization. class X { diff --git a/test/CodeGenCXX/override-layout.cpp b/test/CodeGenCXX/override-layout.cpp index d432885..aba4c91 100644 --- a/test/CodeGenCXX/override-layout.cpp +++ b/test/CodeGenCXX/override-layout.cpp @@ -1,7 +1,7 @@ // RUN: %clang_cc1 -fdump-record-layouts-simple %s 2> %t.layouts // RUN: %clang_cc1 -fdump-record-layouts-simple %s > %t.before 2>&1 // RUN: %clang_cc1 -DPACKED= -DALIGNED16= -fdump-record-layouts-simple -foverride-record-layout=%t.layouts %s > %t.after 2>&1 -// RUN: diff %t.before %t.after +// RUN: diff -u %t.before %t.after // RUN: FileCheck %s < %t.after // If not explicitly disabled, set PACKED to the packed attribute. @@ -54,11 +54,24 @@ struct PACKED X4 { X4(); }; +// CHECK: Type: struct X5 +struct PACKED X5 { + union { + long a; + long b; + }; + short l; + short r; +}; + void use_structs() { X0 x0s[sizeof(X0)]; X1 x1s[sizeof(X1)]; X2 x2s[sizeof(X2)]; X3 x3s[sizeof(X3)]; X4 x4s[sizeof(X4)]; + X5 x5s[sizeof(X5)]; x4s[1].a = 1; + x5s[1].a = 17; } + diff --git a/test/CodeGenCXX/pr12251.cpp b/test/CodeGenCXX/pr12251.cpp index a9920c0..f3f2ec4 100644 --- a/test/CodeGenCXX/pr12251.cpp +++ b/test/CodeGenCXX/pr12251.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 %s -emit-llvm -O1 -relaxed-aliasing -fstrict-enums -std=c++11 -o - | FileCheck %s -// RUN: %clang_cc1 %s -emit-llvm -O1 -relaxed-aliasing -std=c++11 -o - | FileCheck --check-prefix=NO-STRICT-ENUMS %s +// RUN: %clang_cc1 %s -triple i386-unknown-unknown -emit-llvm -O1 -relaxed-aliasing -fstrict-enums -std=c++11 -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple i386-unknown-unknown -emit-llvm -O1 -relaxed-aliasing -std=c++11 -o - | FileCheck --check-prefix=NO-STRICT-ENUMS %s bool f(bool *x) { return *x; diff --git a/test/CodeGenCXX/pragma-visibility.cpp b/test/CodeGenCXX/pragma-visibility.cpp index 11a38c1..0640d42 100644 --- a/test/CodeGenCXX/pragma-visibility.cpp +++ b/test/CodeGenCXX/pragma-visibility.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s #pragma GCC visibility push(hidden) struct x { diff --git a/test/CodeGenCXX/reference-bind-default-argument.cpp b/test/CodeGenCXX/reference-bind-default-argument.cpp index acce962..5cf279f 100644 --- a/test/CodeGenCXX/reference-bind-default-argument.cpp +++ b/test/CodeGenCXX/reference-bind-default-argument.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm-only -verify +// expected-no-diagnostics struct A {}; struct B : A {}; diff --git a/test/CodeGenCXX/reference-init.cpp b/test/CodeGenCXX/reference-init.cpp index 9469c84..d47b1f3 100644 --- a/test/CodeGenCXX/reference-init.cpp +++ b/test/CodeGenCXX/reference-init.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics struct XPTParamDescriptor {}; struct nsXPTParamInfo { diff --git a/test/CodeGenCXX/regparm.cpp b/test/CodeGenCXX/regparm.cpp index f0ebd2b..2196c79 100644 --- a/test/CodeGenCXX/regparm.cpp +++ b/test/CodeGenCXX/regparm.cpp @@ -4,3 +4,35 @@ // CHECK: _Z3fooRi(i32* inreg void __attribute__ ((regparm (1))) foo(int &a) { } + +struct S1 { + int x; + S1(const S1 &y); +}; + +void __attribute__((regparm(3))) foo2(S1 a, int b); +// CHECK: declare void @_Z4foo22S1i(%struct.S1* inreg, i32 inreg) +void bar2(S1 a, int b) { + foo2(a, b); +} + +struct S2 { + int x; +}; + +void __attribute__((regparm(3))) foo3(struct S2 a, int b); +// CHECK: declare void @_Z4foo32S2i(i32 inreg, i32 inreg) +void bar3(struct S2 a, int b) { + foo3(a, b); +} + +struct S3 { + struct { + struct {} b[0]; + } a; +}; +__attribute((regparm(2))) void foo4(S3 a, int b); +// CHECK: declare void @_Z4foo42S3i(%struct.S3* byval align 4, i32 inreg) +void bar3(S3 a, int b) { + foo4(a, b); +} diff --git a/test/CodeGenCXX/reinterpret-cast.cpp b/test/CodeGenCXX/reinterpret-cast.cpp index dafa675..63c5a2a 100644 --- a/test/CodeGenCXX/reinterpret-cast.cpp +++ b/test/CodeGenCXX/reinterpret-cast.cpp @@ -1,4 +1,6 @@ // RUN: %clang_cc1 -emit-llvm -o - %s -std=c++11 +// REQUIRES: LP64 + void *f1(unsigned long l) { return reinterpret_cast(l); } diff --git a/test/CodeGenCXX/return.cpp b/test/CodeGenCXX/return.cpp new file mode 100644 index 0000000..43d40ae --- /dev/null +++ b/test/CodeGenCXX/return.cpp @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -emit-llvm -O0 -o - %s | FileCheck %s +// RUN: %clang_cc1 -emit-llvm -O -o - %s | FileCheck %s --check-prefix=CHECK-OPT + +// CHECK: @_Z9no_return +// CHECK-OPT: @_Z9no_return +int no_return() { + // CHECK: call void @llvm.trap + // CHECK-NEXT: unreachable + + // CHECK-OPT-NOT: call void @llvm.trap + // CHECK-OPT: unreachable +} diff --git a/test/CodeGenCXX/static-assert.cpp b/test/CodeGenCXX/static-assert.cpp index 53dc9a7..ff82f6d 100644 --- a/test/CodeGenCXX/static-assert.cpp +++ b/test/CodeGenCXX/static-assert.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm -o - -std=c++11 -verify +// expected-no-diagnostics static_assert(true, ""); diff --git a/test/CodeGenCXX/static-init-2.cpp b/test/CodeGenCXX/static-init-2.cpp index 768e6de..354fcd4 100644 --- a/test/CodeGenCXX/static-init-2.cpp +++ b/test/CodeGenCXX/static-init-2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics // Make sure we don't crash generating y; its value is constant, but the // initializer has side effects, so EmitConstantExpr should fail. diff --git a/test/CodeGenCXX/switch-case-folding-2.cpp b/test/CodeGenCXX/switch-case-folding-2.cpp index 7c0283f..930bfeb 100644 --- a/test/CodeGenCXX/switch-case-folding-2.cpp +++ b/test/CodeGenCXX/switch-case-folding-2.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm %s -o - | FileCheck %s // CHECK that we don't crash. extern int printf(const char*, ...); diff --git a/test/CodeGenCXX/throw-expression-cleanup.cpp b/test/CodeGenCXX/throw-expression-cleanup.cpp index 0c41bc6..e1ecd38 100644 --- a/test/CodeGenCXX/throw-expression-cleanup.cpp +++ b/test/CodeGenCXX/throw-expression-cleanup.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -fcxx-exceptions -fexceptions -std=c++11 -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple x86_64-none-linux-gnu -emit-llvm -fcxx-exceptions -fexceptions -std=c++11 -o - | FileCheck %s // PR13359 struct X { diff --git a/test/CodeGenCXX/throw-expression-dtor.cpp b/test/CodeGenCXX/throw-expression-dtor.cpp index 0de6683..cb4a6c6 100644 --- a/test/CodeGenCXX/throw-expression-dtor.cpp +++ b/test/CodeGenCXX/throw-expression-dtor.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -emit-llvm-only -verify -fcxx-exceptions -fexceptions +// expected-no-diagnostics // PR7281 class A { diff --git a/test/CodeGenCXX/throw-expressions.cpp b/test/CodeGenCXX/throw-expressions.cpp index 2515acb..f04185b 100644 --- a/test/CodeGenCXX/throw-expressions.cpp +++ b/test/CodeGenCXX/throw-expressions.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fcxx-exceptions -fexceptions -emit-llvm-only -verify %s -Wno-unreachable-code +// expected-no-diagnostics int val = 42; int& test1() { diff --git a/test/CodeGenCXX/thunk-linkonce-odr.cpp b/test/CodeGenCXX/thunk-linkonce-odr.cpp index 4f4d61d..82f2148 100644 --- a/test/CodeGenCXX/thunk-linkonce-odr.cpp +++ b/test/CodeGenCXX/thunk-linkonce-odr.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple i386-unknown-unknown -emit-llvm -o - | FileCheck %s // & struct A { diff --git a/test/CodeGenCXX/thunks.cpp b/test/CodeGenCXX/thunks.cpp index 04d0820..0659259 100644 --- a/test/CodeGenCXX/thunks.cpp +++ b/test/CodeGenCXX/thunks.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 %s -triple=x86_64-pc-linux-gnu -emit-llvm -o - | FileCheck %s -// RUN: %clang_cc1 %s -triple=x86_64-pc-linux-gnu -fhidden-weak-vtables -emit-llvm -o - | FileCheck -check-prefix=HIDDEN %s +// RUN: %clang_cc1 %s -triple=x86_64-pc-linux-gnu -munwind-tables -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple=x86_64-pc-linux-gnu -munwind-tables -fhidden-weak-vtables -emit-llvm -o - | FileCheck -check-prefix=HIDDEN %s namespace Test1 { @@ -301,6 +301,47 @@ namespace Test12 { // CHECK: getelementptr inbounds i8* {{.*}}, i64 8 } +// PR13832 +namespace Test13 { + struct B1 { + virtual B1 &foo1(); + }; + struct Pad1 { + virtual ~Pad1(); + }; + struct Proxy1 : Pad1, B1 { + virtual ~Proxy1(); + }; + struct D : virtual Proxy1 { + virtual ~D(); + virtual D &foo1(); + }; + D& D::foo1() { + return *this; + } + // CHECK: define {{.*}} @_ZTcvn8_n32_v8_n24_N6Test131D4foo1Ev + // CHECK: getelementptr inbounds i8* {{.*}}, i64 -8 + // CHECK: getelementptr inbounds i8* {{.*}}, i64 -32 + // CHECK: getelementptr inbounds i8* {{.*}}, i64 -24 + // CHECK: getelementptr inbounds i8* {{.*}}, i64 8 + // CHECK: ret %"struct.Test13::D"* +} + +namespace Test14 { + class A { + virtual void f(); + }; + class B { + virtual void f(); + }; + class C : public A, public B { + virtual void f(); + }; + void C::f() { + } + // CHECK: define void @_ZThn8_N6Test141C1fEv({{.*}}) {{.*}} uwtable +} + /**** The following has to go at the end of the file ****/ // This is from Test5: diff --git a/test/CodeGenCXX/typeid-cxx11.cpp b/test/CodeGenCXX/typeid-cxx11.cpp index 940274e..4e32d2d 100644 --- a/test/CodeGenCXX/typeid-cxx11.cpp +++ b/test/CodeGenCXX/typeid-cxx11.cpp @@ -18,13 +18,15 @@ struct A { virtual ~A(); }; struct B : virtual A {}; struct C { int n; }; -// FIXME: check we produce a constant array for this, once we support IRGen of -// folded structs and arrays. +// CHECK: @_ZN5Test1L5itemsE = internal constant [4 x {{.*}}] [{{.*}} @_ZTIN5Test11AE {{.*}}, {{.*}}, {{.*}} @_ZN5Test19make_implINS_1AEEEPvv }, {{.*}} @_ZTIN5Test11BE {{.*}} @_ZN5Test19make_implINS_1BEEEPvv {{.*}} @_ZTIN5Test11CE {{.*}} @_ZN5Test19make_implINS_1CEEEPvv {{.*}} @_ZTIi {{.*}} @_ZN5Test19make_implIiEEPvv }] constexpr Item items[] = { item("A"), item("B"), item("C"), item("int") }; -// CHECK: @_ZN5Test11xE = constant %"class.std::type_info"* bitcast (i8** @_ZTIN5Test11AE to %"class.std::type_info"*), align 8 +// CHECK: @_ZN5Test11xE = constant %"class.std::type_info"* bitcast ({{.*}}* @_ZTIN5Test11AE to %"class.std::type_info"*), align 8 constexpr auto &x = items[0].ti; +// CHECK: @_ZN5Test11yE = constant %"class.std::type_info"* bitcast ({{.*}}* @_ZTIN5Test11BE to %"class.std::type_info"*), align 8 +constexpr auto &y = typeid(B{}); + } diff --git a/test/CodeGenCXX/unary-type-trait.cpp b/test/CodeGenCXX/unary-type-trait.cpp index a11c67e..3c6f9c0 100644 --- a/test/CodeGenCXX/unary-type-trait.cpp +++ b/test/CodeGenCXX/unary-type-trait.cpp @@ -1,3 +1,4 @@ // RUN: %clang_cc1 -emit-llvm-only -verify %s +// expected-no-diagnostics bool a() { return __is_pod(int); } diff --git a/test/CodeGenCXX/virtual-operator-call.cpp b/test/CodeGenCXX/virtual-operator-call.cpp index 42d38e5..72d49c2 100644 --- a/test/CodeGenCXX/virtual-operator-call.cpp +++ b/test/CodeGenCXX/virtual-operator-call.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple i386-unknown-unknown -emit-llvm -o - | FileCheck %s struct A { virtual int operator-() = 0; diff --git a/test/CodeGenCXX/visibility-inlines-hidden.cpp b/test/CodeGenCXX/visibility-inlines-hidden.cpp index 2bb1348..8519c8c 100644 --- a/test/CodeGenCXX/visibility-inlines-hidden.cpp +++ b/test/CodeGenCXX/visibility-inlines-hidden.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fvisibility-inlines-hidden -emit-llvm -o - %s -O2 -disable-llvm-optzns | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -fvisibility-inlines-hidden -emit-llvm -o - %s -O2 -disable-llvm-optzns | FileCheck %s // The trickery with optimization in the run line is to get IR // generation to emit available_externally function bodies, but not @@ -126,3 +126,12 @@ namespace test3 { // CHECK: define linkonce_odr hidden void @_ZN5test33fooEv // CHECK: define linkonce_odr hidden void @_ZN5test33zedIiEEvv } + +namespace test4 { + extern inline __attribute__ ((__gnu_inline__)) + void foo() {} + void bar() { + foo(); + } + // CHECK: define available_externally void @_ZN5test43fooE +} diff --git a/test/CodeGenCXX/vtable-layout.cpp b/test/CodeGenCXX/vtable-layout.cpp index d7644b9..1e831d2 100644 --- a/test/CodeGenCXX/vtable-layout.cpp +++ b/test/CodeGenCXX/vtable-layout.cpp @@ -43,6 +43,7 @@ // RUN: FileCheck --check-prefix=CHECK-42 %s < %t // RUN: FileCheck --check-prefix=CHECK-43 %s < %t // RUN: FileCheck --check-prefix=CHECK-44 %s < %t +// RUN: FileCheck --check-prefix=CHECK-45 %s < %t // For now, just verify this doesn't crash. namespace test0 { @@ -1727,3 +1728,23 @@ namespace Test38 { void *B::foo() { return 0; } } + +namespace Test39 { + struct A { + virtual void foo() = delete; + }; + + // CHECK-45: Vtable for 'Test39::B' (4 entries). + // CHECK-45-NEXT: 0 | offset_to_top (0) + // CHECK-45-NEXT: 1 | Test39::B RTTI + // CHECK-45-NEXT: -- (Test39::A, 0) vtable address -- + // CHECK-45-NEXT: -- (Test39::B, 0) vtable address -- + // CHECK-45-NEXT: 2 | void Test39::A::foo() [deleted] + // CHECK-45-NEXT: 3 | void Test39::B::foo2() + struct B: A { + virtual void foo2(); + }; + + void B::foo2() { + } +} diff --git a/test/CodeGenCXX/vtt-layout.cpp b/test/CodeGenCXX/vtt-layout.cpp index ace7b74..abc2477 100644 --- a/test/CodeGenCXX/vtt-layout.cpp +++ b/test/CodeGenCXX/vtt-layout.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin10 -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin10 -std=c++11 -emit-llvm -o - | FileCheck %s // Test1::B should just have a single entry in its VTT, which points to the vtable. namespace Test1 { @@ -58,7 +58,29 @@ namespace Test4 { D d; } +namespace Test5 { + struct A { + virtual void f() = 0; + virtual void anchor(); + }; + + void A::anchor() { + } +} + +namespace Test6 { + struct A { + virtual void f() = delete; + virtual void anchor(); + }; + + void A::anchor() { + } +} + // CHECK: @_ZTTN5Test11BE = unnamed_addr constant [1 x i8*] [i8* bitcast (i8** getelementptr inbounds ([4 x i8*]* @_ZTVN5Test11BE, i64 0, i64 3) to i8*)] +// CHECK: @_ZTVN5Test51AE = unnamed_addr constant [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTIN5Test51AE to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*), i8* bitcast (void (%"struct.Test5::A"*)* @_ZN5Test51A6anchorEv to i8*)] +// CHECK: @_ZTVN5Test61AE = unnamed_addr constant [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTIN5Test61AE to i8*), i8* bitcast (void ()* @__cxa_deleted_virtual to i8*), i8* bitcast (void (%"struct.Test6::A"*)* @_ZN5Test61A6anchorEv to i8*)] // CHECK: @_ZTTN5Test41DE = linkonce_odr unnamed_addr constant [19 x i8*] [i8* bitcast (i8** getelementptr inbounds ([25 x i8*]* @_ZTVN5Test41DE, i64 0, i64 6) to i8*), i8* bitcast (i8** getelementptr inbounds ([11 x i8*]* @_ZTCN5Test41DE0_NS_2C1E, i64 0, i64 4) to i8*), i8* bitcast (i8** getelementptr inbounds ([11 x i8*]* @_ZTCN5Test41DE0_NS_2C1E, i64 0, i64 7) to i8*), i8* bitcast (i8** getelementptr inbounds ([11 x i8*]* @_ZTCN5Test41DE0_NS_2C1E, i64 0, i64 10) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTCN5Test41DE16_NS_2C2E, i64 0, i64 7) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTCN5Test41DE16_NS_2C2E, i64 0, i64 7) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTCN5Test41DE16_NS_2C2E, i64 0, i64 12) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTCN5Test41DE16_NS_2C2E, i64 0, i64 15) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTCN5Test41DE16_NS_2C2E, i64 0, i64 18) to i8*), i8* bitcast (i8** getelementptr inbounds ([25 x i8*]* @_ZTVN5Test41DE, i64 0, i64 17) to i8*), i8* bitcast (i8** getelementptr inbounds ([25 x i8*]* @_ZTVN5Test41DE, i64 0, i64 20) to i8*), i8* bitcast (i8** getelementptr inbounds ([25 x i8*]* @_ZTVN5Test41DE, i64 0, i64 13) to i8*), i8* bitcast (i8** getelementptr inbounds ([25 x i8*]* @_ZTVN5Test41DE, i64 0, i64 13) to i8*), i8* bitcast (i8** getelementptr inbounds ([25 x i8*]* @_ZTVN5Test41DE, i64 1, i64 0) to i8*), i8* bitcast (i8** getelementptr inbounds ([7 x i8*]* @_ZTCN5Test41DE40_NS_2V1E, i64 0, i64 3) to i8*), i8* bitcast (i8** getelementptr inbounds ([7 x i8*]* @_ZTCN5Test41DE40_NS_2V1E, i64 0, i64 6) to i8*), i8* bitcast (i8** getelementptr inbounds ([11 x i8*]* @_ZTCN5Test41DE72_NS_2V2E, i64 0, i64 4) to i8*), i8* bitcast (i8** getelementptr inbounds ([11 x i8*]* @_ZTCN5Test41DE72_NS_2V2E, i64 0, i64 7) to i8*), i8* bitcast (i8** getelementptr inbounds ([11 x i8*]* @_ZTCN5Test41DE72_NS_2V2E, i64 0, i64 10) to i8*)] // CHECK: @_ZTTN5Test31DE = linkonce_odr unnamed_addr constant [13 x i8*] [i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTVN5Test31DE, i64 0, i64 5) to i8*), i8* bitcast (i8** getelementptr inbounds ([7 x i8*]* @_ZTCN5Test31DE0_NS_2C1E, i64 0, i64 3) to i8*), i8* bitcast (i8** getelementptr inbounds ([7 x i8*]* @_ZTCN5Test31DE0_NS_2C1E, i64 0, i64 6) to i8*), i8* bitcast (i8** getelementptr inbounds ([14 x i8*]* @_ZTCN5Test31DE16_NS_2C2E, i64 0, i64 6) to i8*), i8* bitcast (i8** getelementptr inbounds ([14 x i8*]* @_ZTCN5Test31DE16_NS_2C2E, i64 0, i64 6) to i8*), i8* bitcast (i8** getelementptr inbounds ([14 x i8*]* @_ZTCN5Test31DE16_NS_2C2E, i64 0, i64 10) to i8*), i8* bitcast (i8** getelementptr inbounds ([14 x i8*]* @_ZTCN5Test31DE16_NS_2C2E, i64 0, i64 13) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTVN5Test31DE, i64 0, i64 15) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTVN5Test31DE, i64 0, i64 11) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTVN5Test31DE, i64 0, i64 11) to i8*), i8* bitcast (i8** getelementptr inbounds ([19 x i8*]* @_ZTVN5Test31DE, i64 1, i64 0) to i8*), i8* bitcast (i8** getelementptr inbounds ([7 x i8*]* @_ZTCN5Test31DE64_NS_2V2E, i64 0, i64 3) to i8*), i8* bitcast (i8** getelementptr inbounds ([7 x i8*]* @_ZTCN5Test31DE64_NS_2V2E, i64 0, i64 6) to i8*)] // CHECK: @_ZTTN5Test21CE = linkonce_odr unnamed_addr constant [2 x i8*] [i8* bitcast (i8** getelementptr inbounds ([5 x i8*]* @_ZTVN5Test21CE, i64 0, i64 4) to i8*), i8* bitcast (i8** getelementptr inbounds ([5 x i8*]* @_ZTVN5Test21CE, i64 0, i64 4) to i8*)] diff --git a/test/CodeGenObjC/2008-10-3-EhValue.m b/test/CodeGenObjC/2008-10-3-EhValue.m index 0ed0d89..bc4dfdb 100644 --- a/test/CodeGenObjC/2008-10-3-EhValue.m +++ b/test/CodeGenObjC/2008-10-3-EhValue.m @@ -1,4 +1,4 @@ -// RUN: %clang -fexceptions -S -emit-llvm %s -o /dev/null +// RUN: %clang -fexceptions -fobjc-exceptions -S -emit-llvm %s -o /dev/null @interface Object { @public diff --git a/test/CodeGenObjC/arc-arm.m b/test/CodeGenObjC/arc-arm.m index 23da3be..2ab8cb6 100644 --- a/test/CodeGenObjC/arc-arm.m +++ b/test/CodeGenObjC/arc-arm.m @@ -13,8 +13,24 @@ void test1(void) { // CHECK-NEXT: call void asm sideeffect "mov\09r7, r7 // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], - // CHECK-NEXT: load - // CHECK-NEXT: call void @objc_release + // CHECK-NEXT: call void @objc_storeStrong( // CHECK-NEXT: ret void id x = test1_helper(); } + +// rdar://problem/12133032 +@class A; +A *test2(void) { + extern A *test2_helper(void); + // CHECK: [[T0:%.*]] = call arm_aapcscc [[A:%.*]]* @test2_helper() + // CHECK-NEXT: ret [[A]]* [[T0]] + return test2_helper(); +} + +id test3(void) { + extern A *test3_helper(void); + // CHECK: [[T0:%.*]] = call arm_aapcscc [[A:%.*]]* @test3_helper() + // CHECK-NEXT: [[T1:%.*]] = bitcast [[A]]* [[T0]] to i8* + // CHECK-NEXT: ret i8* [[T1]] + return test3_helper(); +} diff --git a/test/CodeGenObjC/arc-block-ivar-layout.m b/test/CodeGenObjC/arc-block-ivar-layout.m deleted file mode 100644 index 6c82f29..0000000 --- a/test/CodeGenObjC/arc-block-ivar-layout.m +++ /dev/null @@ -1,60 +0,0 @@ -// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple x86_64-apple-darwin -O0 -emit-llvm %s -o %t-64.s -// RUN: FileCheck -check-prefix LP64 --input-file=%t-64.s %s -// rdar://8991729 - -__weak id wid; -void x(id y) {} -void y(int a) {} - -extern id opaque_id(); - -void f() { - __block int byref_int = 0; - char ch = 'a'; - char ch1 = 'b'; - char ch2 = 'c'; - short sh = 2; - const id bar = (id) opaque_id(); - id baz = 0; - __strong id strong_void_sta; - __block id byref_bab = (id)0; - __block id bl_var1; - int i; double dob; - -// The patterns here are a sequence of bytes, each saying first how -// many sizeof(void*) chunks to skip (high nibble) and then how many -// to scan (low nibble). A zero byte says that we've reached the end -// of the pattern. -// -// All of these patterns start with 01 3x because the block header on -// LP64 consists of an isa pointer (which we're supposed to scan for -// some reason) followed by three words (2 ints, a function pointer, -// and a descriptor pointer). - -// Test 1 -// byref int, short, char, char, char, id, id, strong id, byref id -// 01 35 10 00 -// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [4 x i8] c"\015\10\00" - void (^b)() = ^{ - byref_int = sh + ch+ch1+ch2 ; - x(bar); - x(baz); - x((id)strong_void_sta); - x(byref_bab); - }; - b(); - -// Test 2 -// byref int, short, char, char, char, id, id, strong id, byref void*, byref id -// 01 36 10 00 -// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [4 x i8] c"\016\10\00" - void (^c)() = ^{ - byref_int = sh + ch+ch1+ch2 ; - x(bar); - x(baz); - x((id)strong_void_sta); - x(wid); - bl_var1 = 0; - x(byref_bab); - }; -} diff --git a/test/CodeGenObjC/arc-blocks.m b/test/CodeGenObjC/arc-blocks.m index 2326bce..e776517 100644 --- a/test/CodeGenObjC/arc-blocks.m +++ b/test/CodeGenObjC/arc-blocks.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -fblocks -fobjc-arc -fobjc-runtime-has-weak -O2 -disable-llvm-optzns -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -fblocks -fobjc-arc -fobjc-runtime-has-weak -o - %s | FileCheck -check-prefix=CHECK-UNOPT %s // This shouldn't crash. void test0(id (^maker)(void)) { @@ -41,6 +42,24 @@ void test2(id x) { // CHECK-NEXT: ret void extern void test2_helper(id (^)(void)); test2_helper(^{ return x; }); + +// CHECK: define internal void @__copy_helper_block_ +// CHECK: [[T0:%.*]] = load i8** +// CHECK-NEXT: [[SRC:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* +// CHECK-NEXT: [[T0:%.*]] = load i8** +// CHECK-NEXT: [[DST:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* +// CHECK-NEXT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[SRC]], i32 0, i32 5 +// CHECK-NEXT: [[T1:%.*]] = load i8** [[T0]] +// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) nounwind +// CHECK-NEXT: ret void + +// CHECK: define internal void @__destroy_helper_block_ +// CHECK: [[T0:%.*]] = load i8** +// CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* +// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[T1]], i32 0, i32 5 +// CHECK-NEXT: [[T3:%.*]] = load i8** [[T2]] +// CHECK-NEXT: call void @objc_release(i8* [[T3]]) +// CHECK-NEXT: ret void } void test3(void (^sink)(id*)) { @@ -99,8 +118,8 @@ void test4(void) { // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[SLOT]] // CHECK-NEXT: [[SLOT:%.*]] = getelementptr inbounds [[BYREF_T]]* [[VAR]], i32 0, i32 6 - // 0x42000000 - has signature, copy/dispose helpers - // CHECK: store i32 1107296256, + // 0x42800000 - has signature, copy/dispose helpers, as well as BLOCK_HAS_EXTENDED_LAYOUT + // CHECK: store i32 -1040187392, // CHECK: [[T0:%.*]] = bitcast [[BYREF_T]]* [[VAR]] to i8* // CHECK-NEXT: store i8* [[T0]], i8** // CHECK: call void @test4_helper( @@ -151,8 +170,8 @@ void test5(void) { // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: store i8* [[T1]], i8** [[VAR]], // CHECK-NEXT: call void @objc_release(i8* [[T1]]) - // 0x40000000 - has signature but no copy/dispose - // CHECK: store i32 1073741824, i32* + // 0x40800000 - has signature but no copy/dispose, as well as BLOCK_HAS_EXTENDED_LAYOUT + // CHECK: store i32 -1073741824, i32* // CHECK: [[CAPTURE:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T0:%.*]] = load i8** [[VAR]] // CHECK-NEXT: store i8* [[T0]], i8** [[CAPTURE]] @@ -179,8 +198,8 @@ void test6(void) { // CHECK-NEXT: call i8* @objc_initWeak(i8** [[SLOT]], i8* [[T1]]) // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: [[SLOT:%.*]] = getelementptr inbounds [[BYREF_T]]* [[VAR]], i32 0, i32 6 - // 0x42000000 - has signature, copy/dispose helpers - // CHECK: store i32 1107296256, + // 0x42800000 - has signature, copy/dispose helpers, as well as BLOCK_HAS_EXTENDED_LAYOUT + // CHECK: store i32 -1040187392, // CHECK: [[T0:%.*]] = bitcast [[BYREF_T]]* [[VAR]] to i8* // CHECK-NEXT: store i8* [[T0]], i8** // CHECK: call void @test6_helper( @@ -228,8 +247,8 @@ void test7(void) { // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: call i8* @objc_initWeak(i8** [[VAR]], i8* [[T1]]) // CHECK-NEXT: call void @objc_release(i8* [[T1]]) - // 0x42000000 - has signature, copy/dispose helpers - // CHECK: store i32 1107296256, + // 0x42800000 - has signature, copy/dispose helpers, as well as BLOCK_HAS_EXTENDED_LAYOUT + // CHECK: store i32 -1040187392, // CHECK: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: [[T0:%.*]] = call i8* @objc_loadWeak(i8** [[VAR]]) // CHECK-NEXT: call i8* @objc_initWeak(i8** [[SLOT]], i8* [[T0]]) @@ -359,7 +378,7 @@ void test10a(void) { // CHECK: [[T0:%.*]] = load i8** {{%.*}} // CHECK-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[BYREF_T]]* // CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds [[BYREF_T]]* [[T1]], i32 0, i32 6 -// CHECK-NEXT: [[T3:%.*]] = load void ()** [[T2]], align 8 +// CHECK-NEXT: [[T3:%.*]] = load void ()** [[T2]] // CHECK-NEXT: [[T4:%.*]] = bitcast void ()* [[T3]] to i8* // CHECK-NEXT: call void @objc_release(i8* [[T4]]) // CHECK-NEXT: ret void @@ -532,3 +551,99 @@ void test16() { // CHECK-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 // CHECK-NEXT: store void ()* null, void ()** [[BLKVAR]], align 8 } + +// rdar://12151005 +// +// This is an intentional exception to our conservative jump-scope +// checking for full-expressions containing block literals with +// non-trivial cleanups: if the block literal appears in the operand +// of a return statement, there's no need to extend its lifetime. +id (^test17(id self, int which))(void) { + switch (which) { + case 1: return ^{ return self; }; + case 0: return ^{ return self; }; + } + return (void*) 0; +} +// CHECK: define i8* ()* @test17( +// CHECK: [[RET:%.*]] = alloca i8* ()*, align +// CHECK-NEXT: [[SELF:%.*]] = alloca i8*, +// CHECK: [[B0:%.*]] = alloca [[BLOCK:<.*>]], align +// CHECK: [[B1:%.*]] = alloca [[BLOCK]], align +// CHECK: [[T0:%.*]] = call i8* @objc_retain(i8* +// CHECK-NEXT: store i8* [[T0]], i8** [[SELF]], align +// CHECK-NOT: objc_retain +// CHECK-NOT: objc_release +// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]]* [[B0]], i32 0, i32 5 +// CHECK-NOT: objc_retain +// CHECK-NOT: objc_release +// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]]* [[B0]], i32 0, i32 5 +// CHECK-NEXT: [[T1:%.*]] = load i8** [[SELF]], align +// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) +// CHECK-NEXT: store i8* [[T2]], i8** [[T0]], +// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B0]] to i8* ()* +// CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8* +// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]]) +// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()* +// CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]] +// CHECK-NEXT: [[T0:%.*]] = load i8** [[DESTROY]] +// CHECK-NEXT: call void @objc_release(i8* [[T0]]) +// CHECK-NEXT: store i32 +// CHECK-NEXT: br label +// CHECK-NOT: objc_retain +// CHECK-NOT: objc_release +// CHECK: [[DESTROY:%.*]] = getelementptr inbounds [[BLOCK]]* [[B1]], i32 0, i32 5 +// CHECK-NOT: objc_retain +// CHECK-NOT: objc_release +// CHECK: [[T0:%.*]] = getelementptr inbounds [[BLOCK]]* [[B1]], i32 0, i32 5 +// CHECK-NEXT: [[T1:%.*]] = load i8** [[SELF]], align +// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retain(i8* [[T1]]) +// CHECK-NEXT: store i8* [[T2]], i8** [[T0]], +// CHECK-NEXT: [[T0:%.*]] = bitcast [[BLOCK]]* [[B1]] to i8* ()* +// CHECK-NEXT: [[T1:%.*]] = bitcast i8* ()* [[T0]] to i8* +// CHECK-NEXT: [[T2:%.*]] = call i8* @objc_retainBlock(i8* [[T1]]) +// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8* ()* +// CHECK-NEXT: store i8* ()* [[T3]], i8* ()** [[RET]] +// CHECK-NEXT: [[T0:%.*]] = load i8** [[DESTROY]] +// CHECK-NEXT: call void @objc_release(i8* [[T0]]) +// CHECK-NEXT: store i32 +// CHECK-NEXT: br label + +void test18(id x) { +// CHECK-UNOPT: define void @test18( +// CHECK-UNOPT: [[X:%.*]] = alloca i8*, +// CHECK-UNOPT-NEXT: [[BLOCK:%.*]] = alloca [[BLOCK_T:<{.*}>]], +// CHECK-UNOPT-NEXT: [[PARM:%.*]] = call i8* @objc_retain(i8* {{%.*}}) +// CHECK-UNOPT-NEXT: store i8* [[PARM]], i8** [[X]] +// CHECK-UNOPT-NEXT: [[SLOTREL:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 +// CHECK-UNOPT: [[SLOT:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[BLOCK]], i32 0, i32 5 +// CHECK-UNOPT-NEXT: [[T0:%.*]] = load i8** [[X]], +// CHECK-UNOPT-NEXT: [[T1:%.*]] = call i8* @objc_retain(i8* [[T0]]) +// CHECK-UNOPT-NEXT: store i8* [[T1]], i8** [[SLOT]], +// CHECK-UNOPT-NEXT: bitcast +// CHECK-UNOPT-NEXT: call void @test18_helper( +// CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[SLOTREL]], i8* null) nounwind +// CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[X]], i8* null) nounwind +// CHECK-UNOPT-NEXT: ret void + extern void test18_helper(id (^)(void)); + test18_helper(^{ return x; }); + +// CHECK-UNOPT: define internal void @__copy_helper_block_ +// CHECK-UNOPT: [[T0:%.*]] = load i8** +// CHECK-UNOPT-NEXT: [[SRC:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* +// CHECK-UNOPT-NEXT: [[T0:%.*]] = load i8** +// CHECK-UNOPT-NEXT: [[DST:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* +// CHECK-UNOPT-NEXT: [[T0:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[SRC]], i32 0, i32 5 +// CHECK-UNOPT-NEXT: [[T1:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[DST]], i32 0, i32 5 +// CHECK-UNOPT-NEXT: [[T2:%.*]] = load i8** [[T0]] +// CHECK-UNOPT-NEXT: store i8* null, i8** [[T1]] +// CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[T1]], i8* [[T2]]) nounwind +// CHECK-UNOPT-NEXT: ret void + +// CHECK-UNOPT: define internal void @__destroy_helper_block_ +// CHECK-UNOPT: [[T0:%.*]] = load i8** +// CHECK-UNOPT-NEXT: [[T1:%.*]] = bitcast i8* [[T0]] to [[BLOCK_T]]* +// CHECK-UNOPT-NEXT: [[T2:%.*]] = getelementptr inbounds [[BLOCK_T]]* [[T1]], i32 0, i32 5 +// CHECK-UNOPT-NEXT: call void @objc_storeStrong(i8** [[T2]], i8* null) +// CHECK-UNOPT-NEXT: ret void +} diff --git a/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m b/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m new file mode 100644 index 0000000..6c72138 --- /dev/null +++ b/test/CodeGenObjC/arc-captured-32bit-block-var-layout.m @@ -0,0 +1,425 @@ +// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple i386-apple-darwin -O0 -emit-llvm %s -o %t-64.s +// RUN: FileCheck --input-file=%t-64.s %s +// rdar://12184410 + +void x(id y) {} +void y(int a) {} + +extern id opaque_id(); + +void f() { + __weak id wid; + __block int byref_int = 0; + char ch = 'a'; + char ch1 = 'b'; + char ch2 = 'c'; + short sh = 2; + const id bar = (id) opaque_id(); + id baz = 0; + __strong id strong_void_sta; + __block id byref_bab = (id)0; + __block id bl_var1; + int i; double dob; + +// The patterns here are a sequence of bytes, each saying first how +// many sizeof(void*) chunks to skip (high nibble) and then how many +// to scan (low nibble). A zero byte says that we've reached the end +// of the pattern. +// +// All of these patterns start with 01 3x because the block header on +// LP64 consists of an isa pointer (which we're supposed to scan for +// some reason) followed by three words (2 ints, a function pointer, +// and a descriptor pointer). + +// Test 1 +// block variable layout: BL_BYREF:1, BL_STRONG:3, BL_BYREF:1, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [4 x i8] c"@2@\00" + void (^b)() = ^{ + byref_int = sh + ch+ch1+ch2 ; + x(bar); + x(baz); + x((id)strong_void_sta); + x(byref_bab); + }; + b(); + +// Test 2 +// block variable layout: BL_BYREF:1, BL_STRONG:3, BL_WEAK:1, BL_BYREF:2, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"@2PA\00" + void (^c)() = ^{ + byref_int = sh + ch+ch1+ch2 ; + x(bar); + x(baz); + x((id)strong_void_sta); + x(wid); + bl_var1 = 0; + x(byref_bab); + }; +} + +@class NSString, NSNumber; +void g() { + NSString *foo; + NSNumber *bar; + unsigned int bletch; + __weak id weak_delegate; + unsigned int i; + NSString *y; + NSString *z; +// block variable layout: BL_STRONG:2, BL_WEAK:1, BL_STRONG:2, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"!1P1\00" + void (^c)() = ^{ + int j = i + bletch; + x(foo); + x(bar); + x(weak_delegate); + x(y); + x(z); + }; + c(); +} + +// Test 5 (unions/structs and their nesting): +void h() { + struct S5 { + int i1; + __unsafe_unretained id o1; + struct V { + int i2; + __unsafe_unretained id o2; + } v1; + int i3; + union UI { + void * i1; + __unsafe_unretained id o1; + int i3; + __unsafe_unretained id o3; + }ui; + }; + + union U { + void * i1; + __unsafe_unretained id o1; + int i3; + __unsafe_unretained id o3; + }ui; + + struct S5 s2; + union U u2; + __block id block_id; + +/** +block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, BL_NON_OBJECT_WORD:1, + BL_UNRETAINE:1, BL_NON_OBJECT_WORD:3, BL_BYREF:1, BL_OPERATOR:0 +*/ +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [7 x i8] c" ` `\22@\00" + void (^c)() = ^{ + x(s2.ui.o1); + x(u2.o1); + block_id = 0; + }; + c(); +} + +// Test for array of stuff. +void arr1() { + struct S { + __unsafe_unretained id unsafe_unretained_var[4]; + } imported_s; + +// block variable layout: BL_UNRETAINE:4, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [2 x i8] c"c\00" + void (^c)() = ^{ + x(imported_s.unsafe_unretained_var[2]); + }; + + c(); +} + +// Test2 for array of stuff. +void arr2() { + struct S { + int a; + __unsafe_unretained id unsafe_unretained_var[4]; + } imported_s; + +// block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINE:4, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c" c\00" + void (^c)() = ^{ + x(imported_s.unsafe_unretained_var[2]); + }; + + c(); +} + +// Test3 for array of stuff. +void arr3() { + struct S { + int a; + __unsafe_unretained id unsafe_unretained_var[0]; + } imported_s; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + void (^c)() = ^{ + int i = imported_s.a; + }; + + c(); +} + + +// Test4 for array of stuff. +@class B; +void arr4() { + struct S { + struct s0 { + __unsafe_unretained id s_f0; + __unsafe_unretained id s_f1; + } f0; + + __unsafe_unretained id f1; + + struct s1 { + int *f0; + __unsafe_unretained B *f1; + } f4[2][2]; + } captured_s; + +/** +block variable layout: BL_UNRETAINE:3, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_OPERATOR:0 +*/ +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [10 x i8] + void (^c)() = ^{ + id i = captured_s.f0.s_f1; + }; + + c(); +} + +// Test1 bitfield in cpatured aggregate. +void bf1() { + struct S { + int flag : 25; + int flag1: 7; + int flag2 :1; + int flag3: 7; + int flag4: 24; + } s; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + int (^c)() = ^{ + return s.flag; + }; + c(); +} + +// Test2 bitfield in cpatured aggregate. +void bf2() { + struct S { + int flag : 1; + } s; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + int (^c)() = ^{ + return s.flag; + }; + c(); +} + +// Test3 bitfield in cpatured aggregate. +void bf3() { + + struct { + unsigned short _reserved : 16; + + unsigned char _draggedNodesAreDeletable: 1; + unsigned char _draggedOutsideOutlineView : 1; + unsigned char _adapterRespondsTo_addRootPaths : 1; + unsigned char _adapterRespondsTo_moveDataNodes : 1; + unsigned char _adapterRespondsTo_removeRootDataNode : 1; + unsigned char _adapterRespondsTo_doubleClickDataNode : 1; + unsigned char _adapterRespondsTo_selectDataNode : 1; + unsigned char _adapterRespondsTo_textDidEndEditing : 1; + unsigned char _adapterRespondsTo_updateAndSaveRoots : 1; + unsigned char _adapterRespondsTo_askToDeleteRootNodes : 1; + unsigned char _adapterRespondsTo_contextMenuForSelectedNodes : 1; + unsigned char _adapterRespondsTo_pasteboardFilenamesForNodes : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboard : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboardXXXX : 1; + + unsigned int _filler : 32; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags._draggedNodesAreDeletable; + }; + + c(); +} + +// Test4 unnamed bitfield +void bf4() { + + struct { + unsigned short _reserved : 16; + + unsigned char _draggedNodesAreDeletable: 1; + unsigned char _draggedOutsideOutlineView : 1; + unsigned char _adapterRespondsTo_addRootPaths : 1; + unsigned char _adapterRespondsTo_moveDataNodes : 1; + unsigned char _adapterRespondsTo_removeRootDataNode : 1; + unsigned char _adapterRespondsTo_doubleClickDataNode : 1; + unsigned char _adapterRespondsTo_selectDataNode : 1; + unsigned char _adapterRespondsTo_textDidEndEditing : 1; + + unsigned long long : 64; + + unsigned char _adapterRespondsTo_updateAndSaveRoots : 1; + unsigned char _adapterRespondsTo_askToDeleteRootNodes : 1; + unsigned char _adapterRespondsTo_contextMenuForSelectedNodes : 1; + unsigned char _adapterRespondsTo_pasteboardFilenamesForNodes : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboard : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboardXXXX : 1; + + unsigned int _filler : 32; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags._draggedNodesAreDeletable; + }; + + c(); +} + + + +// Test5 unnamed bitfield. +void bf5() { + struct { + unsigned char flag : 1; + unsigned int : 32; + unsigned char flag1 : 1; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags.flag; + }; + + c(); +} + + +// Test6 0 length bitfield. +void bf6() { + struct { + unsigned char flag : 1; + unsigned int : 0; + unsigned char flag1 : 1; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags.flag; + }; + + c(); +} + +// Test7 large number of captured variables. +void Test7() { + __weak id wid; + __weak id wid1, wid2, wid3, wid4; + __weak id wid5, wid6, wid7, wid8; + __weak id wid9, wid10, wid11, wid12; + __weak id wid13, wid14, wid15, wid16; + const id bar = (id) opaque_id(); +//block variable layout: BL_STRONG:1, BL_WEAK:16, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"0_\00" + void (^b)() = ^{ + x(bar); + x(wid1); + x(wid2); + x(wid3); + x(wid4); + x(wid5); + x(wid6); + x(wid7); + x(wid8); + x(wid9); + x(wid10); + x(wid11); + x(wid12); + x(wid13); + x(wid14); + x(wid15); + x(wid16); + }; +} + + +// Test 8 very large number of captured variables. +void Test8() { +__weak id wid; + __weak id wid1, wid2, wid3, wid4; + __weak id wid5, wid6, wid7, wid8; + __weak id wid9, wid10, wid11, wid12; + __weak id wid13, wid14, wid15, wid16; + __weak id w1, w2, w3, w4; + __weak id w5, w6, w7, w8; + __weak id w9, w10, w11, w12; + __weak id w13, w14, w15, w16; + const id bar = (id) opaque_id(); +// block variable layout: BL_STRONG:1, BL_WEAK:16, BL_WEAK:16, BL_WEAK:1, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] + void (^b)() = ^{ + x(bar); + x(wid1); + x(wid2); + x(wid3); + x(wid4); + x(wid5); + x(wid6); + x(wid7); + x(wid8); + x(wid9); + x(wid10); + x(wid11); + x(wid12); + x(wid13); + x(wid14); + x(wid15); + x(wid16); + x(w1); + x(w2); + x(w3); + x(w4); + x(w5); + x(w6); + x(w7); + x(w8); + x(w9); + x(w10); + x(w11); + x(w12); + x(w13); + x(w14); + x(w15); + x(w16); + x(wid); + }; +} diff --git a/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m b/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m new file mode 100644 index 0000000..b930737 --- /dev/null +++ b/test/CodeGenObjC/arc-captured-block-var-inlined-layout.m @@ -0,0 +1,112 @@ +// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple x86_64-apple-darwin -O0 -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple i386-apple-darwin -O0 -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-i386 %s +// rdar://12184410 + +void x(id y) {} +void y(int a) {} + +extern id opaque_id(); + +void f() { + __block int byref_int = 0; + const id bar = (id) opaque_id(); + id baz = 0; + __strong id strong_void_sta; + __block id byref_bab = (id)0; + __block id bl_var1; + +// Inline instruction for block variable layout: 0x0100 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 256 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 256 } + void (^b)() = ^{ + x(bar); + }; + +// Inline instruction for block variable layout: 0x0210 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 528 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 528 } + void (^c)() = ^{ + x(bar); + x(baz); + byref_int = 1; + }; + +// Inline instruction for block variable layout: 0x0230 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 560 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 560 } + void (^d)() = ^{ + x(bar); + x(baz); + byref_int = 1; + bl_var1 = 0; + byref_bab = 0; + }; + +// Inline instruction for block variable layout: 0x0231 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 561 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 561 } + __weak id wid; + id (^e)() = ^{ + x(bar); + x(baz); + byref_int = 1; + bl_var1 = 0; + byref_bab = 0; + return wid; + }; + +// Inline instruction for block variable layout: 0x0235 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 565 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 565 } + __weak id wid1, wid2, wid3, wid4; + id (^f)() = ^{ + x(bar); + x(baz); + byref_int = 1; + bl_var1 = 0; + byref_bab = 0; + x(wid1); + x(wid2); + x(wid3); + x(wid4); + return wid; + }; + +// Inline instruction for block variable layout: 0x035 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 53 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 53 } + id (^g)() = ^{ + byref_int = 1; + bl_var1 = 0; + byref_bab = 0; + x(wid1); + x(wid2); + x(wid3); + x(wid4); + return wid; + }; + +// Inline instruction for block variable layout: 0x01 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 1 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 1 } + id (^h)() = ^{ + return wid; + }; + +// Inline instruction for block variable layout: 0x020 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 32 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 32 } + void (^ii)() = ^{ + byref_int = 1; + byref_bab = 0; + }; + +// Inline instruction for block variable layout: 0x0102 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 258 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 258 } + void (^jj)() = ^{ + x(bar); + x(wid1); + x(wid2); + }; +} diff --git a/test/CodeGenObjC/arc-captured-block-var-layout.m b/test/CodeGenObjC/arc-captured-block-var-layout.m new file mode 100644 index 0000000..77f042e --- /dev/null +++ b/test/CodeGenObjC/arc-captured-block-var-layout.m @@ -0,0 +1,425 @@ +// RUN: %clang_cc1 -fblocks -fobjc-arc -fobjc-runtime-has-weak -triple x86_64-apple-darwin -O0 -emit-llvm %s -o %t-64.s +// RUN: FileCheck -check-prefix LP64 --input-file=%t-64.s %s +// rdar://12184410 + +void x(id y) {} +void y(int a) {} + +extern id opaque_id(); + +void f() { + __weak id wid; + __block int byref_int = 0; + char ch = 'a'; + char ch1 = 'b'; + char ch2 = 'c'; + short sh = 2; + const id bar = (id) opaque_id(); + id baz = 0; + __strong id strong_void_sta; + __block id byref_bab = (id)0; + __block id bl_var1; + int i; double dob; + +// The patterns here are a sequence of bytes, each saying first how +// many sizeof(void*) chunks to skip (high nibble) and then how many +// to scan (low nibble). A zero byte says that we've reached the end +// of the pattern. +// +// All of these patterns start with 01 3x because the block header on +// LP64 consists of an isa pointer (which we're supposed to scan for +// some reason) followed by three words (2 ints, a function pointer, +// and a descriptor pointer). + +// Test 1 +// block variable layout: BL_BYREF:1, BL_STRONG:3, BL_BYREF:1, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [4 x i8] c"@2@\00" + void (^b)() = ^{ + byref_int = sh + ch+ch1+ch2 ; + x(bar); + x(baz); + x((id)strong_void_sta); + x(byref_bab); + }; + b(); + +// Test 2 +// block variable layout: BL_BYREF:1, BL_STRONG:3, BL_WEAK:1, BL_BYREF:2, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"@2PA\00" + void (^c)() = ^{ + byref_int = sh + ch+ch1+ch2 ; + x(bar); + x(baz); + x((id)strong_void_sta); + x(wid); + bl_var1 = 0; + x(byref_bab); + }; +} + +@class NSString, NSNumber; +void g() { + NSString *foo; + NSNumber *bar; + unsigned int bletch; + __weak id weak_delegate; + unsigned int i; + NSString *y; + NSString *z; +// block variable layout: BL_STRONG:2, BL_WEAK:1, BL_STRONG:2, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [4 x i8] c"1P1\00" + void (^c)() = ^{ + int j = i + bletch; + x(foo); + x(bar); + x(weak_delegate); + x(y); + x(z); + }; + c(); +} + +// Test 5 (unions/structs and their nesting): +void h() { + struct S5 { + int i1; + __unsafe_unretained id o1; + struct V { + int i2; + __unsafe_unretained id o2; + } v1; + int i3; + union UI { + void * i1; + __unsafe_unretained id o1; + int i3; + __unsafe_unretained id o3; + }ui; + }; + + union U { + void * i1; + __unsafe_unretained id o1; + int i3; + __unsafe_unretained id o3; + }ui; + + struct S5 s2; + union U u2; + __block id block_id; + +/** +block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, BL_NON_OBJECT_WORD:1, + BL_UNRETAINE:1, BL_NON_OBJECT_WORD:3, BL_BYREF:1, BL_OPERATOR:0 +*/ +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [7 x i8] c" ` `\22@\00" + void (^c)() = ^{ + x(s2.ui.o1); + x(u2.o1); + block_id = 0; + }; + c(); +} + +// Test for array of stuff. +void arr1() { + struct S { + __unsafe_unretained id unsafe_unretained_var[4]; + } imported_s; + +// block variable layout: BL_UNRETAINE:4, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [2 x i8] c"c\00" + void (^c)() = ^{ + x(imported_s.unsafe_unretained_var[2]); + }; + + c(); +} + +// Test2 for array of stuff. +void arr2() { + struct S { + int a; + __unsafe_unretained id unsafe_unretained_var[4]; + } imported_s; + +// block variable layout: BL_NON_OBJECT_WORD:1, BL_UNRETAINE:4, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c" c\00" + void (^c)() = ^{ + x(imported_s.unsafe_unretained_var[2]); + }; + + c(); +} + +// Test3 for array of stuff. +void arr3() { + struct S { + int a; + __unsafe_unretained id unsafe_unretained_var[0]; + } imported_s; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + void (^c)() = ^{ + int i = imported_s.a; + }; + + c(); +} + + +// Test4 for array of stuff. +@class B; +void arr4() { + struct S { + struct s0 { + __unsafe_unretained id s_f0; + __unsafe_unretained id s_f1; + } f0; + + __unsafe_unretained id f1; + + struct s1 { + int *f0; + __unsafe_unretained B *f1; + } f4[2][2]; + } captured_s; + +/** +block variable layout: BL_UNRETAINE:3, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_NON_OBJECT_WORD:1, BL_UNRETAINE:1, + BL_OPERATOR:0 +*/ +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [10 x i8] + void (^c)() = ^{ + id i = captured_s.f0.s_f1; + }; + + c(); +} + +// Test1 bitfield in cpatured aggregate. +void bf1() { + struct S { + int flag : 25; + int flag1: 7; + int flag2 :1; + int flag3: 7; + int flag4: 24; + } s; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + int (^c)() = ^{ + return s.flag; + }; + c(); +} + +// Test2 bitfield in cpatured aggregate. +void bf2() { + struct S { + int flag : 1; + } s; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + int (^c)() = ^{ + return s.flag; + }; + c(); +} + +// Test3 bitfield in cpatured aggregate. +void bf3() { + + struct { + unsigned short _reserved : 16; + + unsigned char _draggedNodesAreDeletable: 1; + unsigned char _draggedOutsideOutlineView : 1; + unsigned char _adapterRespondsTo_addRootPaths : 1; + unsigned char _adapterRespondsTo_moveDataNodes : 1; + unsigned char _adapterRespondsTo_removeRootDataNode : 1; + unsigned char _adapterRespondsTo_doubleClickDataNode : 1; + unsigned char _adapterRespondsTo_selectDataNode : 1; + unsigned char _adapterRespondsTo_textDidEndEditing : 1; + unsigned char _adapterRespondsTo_updateAndSaveRoots : 1; + unsigned char _adapterRespondsTo_askToDeleteRootNodes : 1; + unsigned char _adapterRespondsTo_contextMenuForSelectedNodes : 1; + unsigned char _adapterRespondsTo_pasteboardFilenamesForNodes : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboard : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboardXXXX : 1; + + unsigned int _filler : 32; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags._draggedNodesAreDeletable; + }; + + c(); +} + +// Test4 unnamed bitfield +void bf4() { + + struct { + unsigned short _reserved : 16; + + unsigned char _draggedNodesAreDeletable: 1; + unsigned char _draggedOutsideOutlineView : 1; + unsigned char _adapterRespondsTo_addRootPaths : 1; + unsigned char _adapterRespondsTo_moveDataNodes : 1; + unsigned char _adapterRespondsTo_removeRootDataNode : 1; + unsigned char _adapterRespondsTo_doubleClickDataNode : 1; + unsigned char _adapterRespondsTo_selectDataNode : 1; + unsigned char _adapterRespondsTo_textDidEndEditing : 1; + + unsigned long long : 64; + + unsigned char _adapterRespondsTo_updateAndSaveRoots : 1; + unsigned char _adapterRespondsTo_askToDeleteRootNodes : 1; + unsigned char _adapterRespondsTo_contextMenuForSelectedNodes : 1; + unsigned char _adapterRespondsTo_pasteboardFilenamesForNodes : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboard : 1; + unsigned char _adapterRespondsTo_writeItemsToPasteboardXXXX : 1; + + unsigned int _filler : 32; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags._draggedNodesAreDeletable; + }; + + c(); +} + + + +// Test5 unnamed bitfield. +void bf5() { + struct { + unsigned char flag : 1; + unsigned int : 32; + unsigned char flag1 : 1; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags.flag; + }; + + c(); +} + + +// Test6 0 length bitfield. +void bf6() { + struct { + unsigned char flag : 1; + unsigned int : 0; + unsigned char flag1 : 1; + } _flags; + +// block variable layout: BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [1 x i8] zeroinitializer + unsigned char (^c)() = ^{ + return _flags.flag; + }; + + c(); +} + +// Test7 large number of captured variables. +void Test7() { + __weak id wid; + __weak id wid1, wid2, wid3, wid4; + __weak id wid5, wid6, wid7, wid8; + __weak id wid9, wid10, wid11, wid12; + __weak id wid13, wid14, wid15, wid16; + const id bar = (id) opaque_id(); +//block variable layout: BL_STRONG:1, BL_WEAK:16, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"0_\00" + void (^b)() = ^{ + x(bar); + x(wid1); + x(wid2); + x(wid3); + x(wid4); + x(wid5); + x(wid6); + x(wid7); + x(wid8); + x(wid9); + x(wid10); + x(wid11); + x(wid12); + x(wid13); + x(wid14); + x(wid15); + x(wid16); + }; +} + + +// Test 8 very large number of captured variables. +void Test8() { +__weak id wid; + __weak id wid1, wid2, wid3, wid4; + __weak id wid5, wid6, wid7, wid8; + __weak id wid9, wid10, wid11, wid12; + __weak id wid13, wid14, wid15, wid16; + __weak id w1, w2, w3, w4; + __weak id w5, w6, w7, w8; + __weak id w9, w10, w11, w12; + __weak id w13, w14, w15, w16; + const id bar = (id) opaque_id(); +// block variable layout: BL_STRONG:1, BL_WEAK:16, BL_WEAK:16, BL_WEAK:1, BL_OPERATOR:0 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] + void (^b)() = ^{ + x(bar); + x(wid1); + x(wid2); + x(wid3); + x(wid4); + x(wid5); + x(wid6); + x(wid7); + x(wid8); + x(wid9); + x(wid10); + x(wid11); + x(wid12); + x(wid13); + x(wid14); + x(wid15); + x(wid16); + x(w1); + x(w2); + x(w3); + x(w4); + x(w5); + x(w6); + x(w7); + x(w8); + x(w9); + x(w10); + x(w11); + x(w12); + x(w13); + x(w14); + x(w15); + x(w16); + x(wid); + }; +} diff --git a/test/CodeGenObjC/arc-exceptions.m b/test/CodeGenObjC/arc-exceptions.m index 5ef5aba..63945e3 100644 --- a/test/CodeGenObjC/arc-exceptions.m +++ b/test/CodeGenObjC/arc-exceptions.m @@ -20,9 +20,8 @@ void test0(void) { // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retain(i8* [[T2]]) nounwind // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to [[ETY]]* // CHECK-NEXT: store [[ETY]]* [[T4]], [[ETY]]** [[E]] -// CHECK-NEXT: [[T0:%.*]] = load [[ETY]]** [[E]] -// CHECK-NEXT: [[T1:%.*]] = bitcast [[ETY]]* [[T0]] to i8* -// CHECK-NEXT: call void @objc_release(i8* [[T1]]) nounwind +// CHECK-NEXT: [[T0:%.*]] = bitcast [[ETY]]** [[E]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T0]], i8* null) nounwind // CHECK-NEXT: call void @objc_end_catch() nounwind void test1_helper(void); diff --git a/test/CodeGenObjC/arc-foreach.m b/test/CodeGenObjC/arc-foreach.m index 67fad4d..b8d2d30 100644 --- a/test/CodeGenObjC/arc-foreach.m +++ b/test/CodeGenObjC/arc-foreach.m @@ -67,8 +67,7 @@ void test0(NSArray *array) { // CHECK-LP64-NEXT: store i8* [[T2]], i8** [[T0]] // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[BLOCK_T]]* [[BLOCK]] // CHECK-LP64: call void @use_block( -// CHECK-LP64-NEXT: [[T1:%.*]] = load i8** [[D0]] -// CHECK-LP64-NEXT: call void @objc_release(i8* [[T1]]) +// CHECK-LP64-NEXT: call void @objc_storeStrong(i8** [[D0]], i8* null) // CHECK-LP64: [[T0:%.*]] = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_ // CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[ARRAY_T]]* [[SAVED_ARRAY]] to i8* @@ -79,9 +78,8 @@ void test0(NSArray *array) { // CHECK-LP64-NEXT: call void @objc_release(i8* [[T0]]) // Destroy 'array'. -// CHECK-LP64: [[T0:%.*]] = load [[ARRAY_T]]** [[ARRAY]] -// CHECK-LP64-NEXT: [[T1:%.*]] = bitcast [[ARRAY_T]]* [[T0]] to i8* -// CHECK-LP64-NEXT: call void @objc_release(i8* [[T1]]) +// CHECK-LP64: [[T0:%.*]] = bitcast [[ARRAY_T]]** [[ARRAY]] to i8** +// CHECK-LP64-NEXT: call void @objc_storeStrong(i8** [[T0]], i8* null) // CHECK-LP64-NEXT: ret void // CHECK-LP64: define internal void @__test0_block_invoke diff --git a/test/CodeGenObjC/arc-ivar-layout.m b/test/CodeGenObjC/arc-ivar-layout.m index 7f58a0c..9068314 100644 --- a/test/CodeGenObjC/arc-ivar-layout.m +++ b/test/CodeGenObjC/arc-ivar-layout.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fobjc-arc -fobjc-runtime-has-weak -triple x86_64-apple-darwin -O0 -S %s -o %t-64.s // RUN: FileCheck -check-prefix LP64 --input-file=%t-64.s %s +// REQUIRES: x86-64-registered-target // rdar://8991729 @interface NSObject { diff --git a/test/CodeGenObjC/arc-no-runtime.m b/test/CodeGenObjC/arc-no-runtime.m index 3c85e87..f5f5b90 100644 --- a/test/CodeGenObjC/arc-no-runtime.m +++ b/test/CodeGenObjC/arc-no-runtime.m @@ -1,9 +1,13 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-arc -emit-llvm %s -o - | FileCheck %s // rdar://problem/9224855 +id make(void) __attribute__((ns_returns_retained)); void test0() { + make(); id x = 0; // CHECK: call void @objc_release( + // CHECK: call void @objc_storeStrong( } // CHECK: declare extern_weak void @objc_release( +// CHECK: declare extern_weak void @objc_storeStrong( diff --git a/test/CodeGenObjC/arc-property.m b/test/CodeGenObjC/arc-property.m index 6c5180b..db00e36 100644 --- a/test/CodeGenObjC/arc-property.m +++ b/test/CodeGenObjC/arc-property.m @@ -11,5 +11,77 @@ void test0(Test0 *t0, id value) { // CHECK: call i8* @objc_retain( // CHECK: call i8* @objc_retain( // CHECK: @objc_msgSend -// CHECK: call void @objc_release( -// CHECK: call void @objc_release( +// CHECK: call void @objc_storeStrong( +// CHECK: call void @objc_storeStrong( + +struct S1 { Class isa; }; +@interface Test1 +@property (nonatomic, strong) __attribute__((NSObject)) struct S1 *pointer; +@end +@implementation Test1 +@synthesize pointer; +@end +// The getter should be a simple load. +// CHECK: define internal [[S1:%.*]]* @"\01-[Test1 pointer]"( +// CHECK: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test1.pointer" +// CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST1:%.*]]* {{%.*}} to i8* +// CHECK-NEXT: [[T1:%.*]] = getelementptr inbounds i8* [[T0]], i64 [[OFFSET]] +// CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[S1]]** +// CHECK-NEXT: [[T3:%.*]] = load [[S1]]** [[T2]], align 8 +// CHECK-NEXT: ret [[S1]]* [[T3]] + +// The setter should be using objc_setProperty. +// CHECK: define internal void @"\01-[Test1 setPointer:]"( +// CHECK: [[T0:%.*]] = bitcast [[TEST1]]* {{%.*}} to i8* +// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test1.pointer" +// CHECK-NEXT: [[T1:%.*]] = load [[S1]]** {{%.*}} +// CHECK-NEXT: [[T2:%.*]] = bitcast [[S1]]* [[T1]] to i8* +// CHECK-NEXT: call void @objc_setProperty(i8* [[T0]], i8* {{%.*}}, i64 [[OFFSET]], i8* [[T2]], i1 zeroext false, i1 zeroext false) +// CHECK-NEXT: ret void + + +// rdar://problem/12039404 +@interface Test2 { +@private + Class _theClass; +} +@property (copy) Class theClass; +@end + +static Class theGlobalClass; +@implementation Test2 +@synthesize theClass = _theClass; +- (void) test { + _theClass = theGlobalClass; +} +@end +// CHECK: define internal void @"\01-[Test2 test]"( +// CHECK: [[T0:%.*]] = load i8** @theGlobalClass, align 8 +// CHECK-NEXT: [[T1:%.*]] = load [[TEST2:%.*]]** +// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK-NEXT: [[T2:%.*]] = bitcast [[TEST2]]* [[T1]] to i8* +// CHECK-NEXT: [[T3:%.*]] = getelementptr inbounds i8* [[T2]], i64 [[OFFSET]] +// CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T4]], i8* [[T0]]) nounwind +// CHECK-NEXT: ret void + +// CHECK: define internal i8* @"\01-[Test2 theClass]"( +// CHECK: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK-NEXT: [[T0:%.*]] = call i8* @objc_getProperty(i8* {{.*}}, i8* {{.*}}, i64 [[OFFSET]], i1 zeroext true) +// CHECK-NEXT: ret i8* [[T0]] + +// CHECK: define internal void @"\01-[Test2 setTheClass:]"( +// CHECK: [[T0:%.*]] = bitcast [[TEST2]]* {{%.*}} to i8* +// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK-NEXT: [[T1:%.*]] = load i8** {{%.*}} +// CHECK-NEXT: call void @objc_setProperty(i8* [[T0]], i8* {{%.*}}, i64 [[OFFSET]], i8* [[T1]], i1 zeroext true, i1 zeroext true) +// CHECK-NEXT: ret void + +// CHECK: define internal void @"\01-[Test2 .cxx_destruct]"( +// CHECK: [[T0:%.*]] = load [[TEST2]]** +// CHECK-NEXT: [[OFFSET:%.*]] = load i64* @"OBJC_IVAR_$_Test2._theClass" +// CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST2]]* [[T0]] to i8* +// CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds i8* [[T1]], i64 [[OFFSET]] +// CHECK-NEXT: [[T3:%.*]] = bitcast i8* [[T2]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T3]], i8* null) nounwind +// CHECK-NEXT: ret void diff --git a/test/CodeGenObjC/arc-related-result-type.m b/test/CodeGenObjC/arc-related-result-type.m index f73aa50..ee0a41d 100644 --- a/test/CodeGenObjC/arc-related-result-type.m +++ b/test/CodeGenObjC/arc-related-result-type.m @@ -20,11 +20,9 @@ void test0(Test0 *val) { // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) // CHECK-NEXT: [[T2:%.*]] = bitcast i8* [[T1]] to [[TEST0]]* // CHECK-NEXT: store [[TEST0]]* [[T2]], [[TEST0]]** [[X]] -// CHECK-NEXT: [[T0:%.*]] = load [[TEST0]]** [[X]] -// CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST0]]* [[T0]] to i8* -// CHECK-NEXT: call void @objc_release(i8* [[T1]]) -// CHECK-NEXT: [[T0:%.*]] = load [[TEST0]]** [[VAL]] -// CHECK-NEXT: [[T1:%.*]] = bitcast [[TEST0]]* [[T0]] to i8* -// CHECK-NEXT: call void @objc_release(i8* [[T1]]) +// CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST0]]** [[X]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T0]], i8* null) +// CHECK-NEXT: [[T0:%.*]] = bitcast [[TEST0]]** [[VAL]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T0]], i8* null) // CHECK-NEXT: ret void } diff --git a/test/CodeGenObjC/arc.m b/test/CodeGenObjC/arc.m index 66a6a2f..8e38019 100644 --- a/test/CodeGenObjC/arc.m +++ b/test/CodeGenObjC/arc.m @@ -602,7 +602,10 @@ void test22(_Bool cond) { // rdar://problem/8922540 // Note that we no longer emit .release_ivars flags. -// CHECK-GLOBALS: @"\01l_OBJC_CLASS_RO_$_Test23" = internal global [[RO_T:%.*]] { i32 134, +// rdar://problem/12492434 +// Note that we set the flag saying that we need destruction *and* +// the flag saying that we don't also need construction. +// CHECK-GLOBALS: @"\01l_OBJC_CLASS_RO_$_Test23" = internal global [[RO_T:%.*]] { i32 390, @interface Test23 { id x; } @end @implementation Test23 @end @@ -1358,9 +1361,9 @@ void test59(void) { // CHECK: define void @test59() // CHECK: [[T0:%.*]] = call i8* @test59_getlock() // CHECK-NEXT: [[T1:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[T0]]) - // CHECK-NEXT: call void @objc_sync_enter(i8* [[T1]]) + // CHECK-NEXT: call i32 @objc_sync_enter(i8* [[T1]]) // CHECK-NEXT: call void @test59_body() - // CHECK-NEXT: call void @objc_sync_exit(i8* [[T1]]) + // CHECK-NEXT: call i32 @objc_sync_exit(i8* [[T1]]) // CHECK-NEXT: call void @objc_release(i8* [[T1]]) // CHECK-NEXT: ret void } diff --git a/test/CodeGenObjC/atomic-aggregate-property.m b/test/CodeGenObjC/atomic-aggregate-property.m index 978299b..878255b 100644 --- a/test/CodeGenObjC/atomic-aggregate-property.m +++ b/test/CodeGenObjC/atomic-aggregate-property.m @@ -1,6 +1,7 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-gc -emit-llvm -o - %s | FileCheck -check-prefix LP64 %s // RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin10 -fobjc-gc -emit-llvm -o - %s | FileCheck -check-prefix LP64 %s // rdar: // 7849824 +// struct s { double a, b, c, d; @@ -12,16 +13,20 @@ struct s1 { id k; }; +struct s2 {}; + @interface A @property (readwrite) double x; @property (readwrite) struct s y; @property (nonatomic, readwrite) struct s1 z; +@property (readwrite) struct s2 a; @end @implementation A @synthesize x; @synthesize y; @synthesize z; +@synthesize a; @end // CHECK-LP64: define internal double @"\01-[A x]"( // CHECK-LP64: load atomic i64* {{%.*}} unordered, align 8 @@ -40,3 +45,9 @@ struct s1 { // CHECK-LP64: define internal void @"\01-[A setZ:]"( // CHECK-LP64: call i8* @objc_memmove_collectable( + +// CHECK-LP64: define internal void @"\01-[A a]"( +// (do nothing) + +// CHECK-LP64: define internal void @"\01-[A setA:]"( +// (do nothing) diff --git a/test/CodeGenObjC/attr-minsize.m b/test/CodeGenObjC/attr-minsize.m new file mode 100644 index 0000000..f46107e --- /dev/null +++ b/test/CodeGenObjC/attr-minsize.m @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s + +@interface Test +- (void)test; +@end + +@implementation Test +- (void)test __attribute__((minsize)) { + // CHECK: define{{.*}}Test test + // CHECK: minsize +} +@end diff --git a/test/CodeGenObjC/block-var-layout.m b/test/CodeGenObjC/block-var-layout.m index c8065be..71b14da 100644 --- a/test/CodeGenObjC/block-var-layout.m +++ b/test/CodeGenObjC/block-var-layout.m @@ -90,7 +90,7 @@ void f() { // Test 4 // struct S (int, id, int, id, int, id) -// 01 41 11 11 +// 01 41 11 11 00 // CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"\01A\11\11\00" struct S s2; void (^e)() = ^{ @@ -128,8 +128,8 @@ void Test5() { union U u2; // struct s2 (int, id, int, id, int, id?), union u2 (id?) -// 01 41 11 12 70 00 -// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [6 x i8] c"\01A\11\12p\00" +// 01 41 11 12 00 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"\01A\11\12\00" void (^c)() = ^{ x(s2.ui.o1); x(u2.o1); diff --git a/test/CodeGenObjC/builtin-memfns.m b/test/CodeGenObjC/builtin-memfns.m new file mode 100644 index 0000000..bb425c0 --- /dev/null +++ b/test/CodeGenObjC/builtin-memfns.m @@ -0,0 +1,10 @@ +// RUN: %clang_cc1 -triple x86_64-apple-macosx10.8.0 -emit-llvm -o - %s | FileCheck %s + +void *memcpy(void *restrict s1, const void *restrict s2, unsigned long n); + +// PR13697 +void test1(int *a, id b) { + // CHECK: @test1 + // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 8, i32 1, i1 false) + memcpy(a, b, 8); +} diff --git a/test/CodeGenObjC/category-super-class-meth.m b/test/CodeGenObjC/category-super-class-meth.m index 6f02aff..5ba7adf 100644 --- a/test/CodeGenObjC/category-super-class-meth.m +++ b/test/CodeGenObjC/category-super-class-meth.m @@ -1,19 +1,29 @@ -// RUN: %clang_cc1 -emit-llvm -o %t %s - -@interface BASE -+ (int) BaseMeth; +// RUN: %clang_cc1 %s -emit-llvm -triple x86_64-apple-darwin -o - | FileCheck %s +// rdar://12459358 +@interface NSObject +-(id)copy; ++(id)copy; @end -@interface Child: BASE -@end +@interface Sub1 : NSObject @end -@interface Child (Categ) -+ (int) flushCache2; +@implementation Sub1 +-(id)copy { return [super copy]; } // ok: instance method in class ++(id)copy { return [super copy]; } // ok: class method in class @end -@implementation Child @end +@interface Sub2 : NSObject @end + +@interface Sub2 (Category) @end -@implementation Child (Categ) -+ (int) flushCache2 { [super BaseMeth]; } +@implementation Sub2 (Category) +-(id)copy { return [super copy]; } // ok: instance method in category ++(id)copy { return [super copy]; } // BAD: class method in category @end +// CHECK: define internal i8* @"\01+[Sub2(Category) copy] +// CHECK: [[ONE:%.*]] = load %struct._class_t** @"\01L_OBJC_CLASSLIST_SUP_REFS_$_3" +// CHECK: [[TWO:%.*]] = bitcast %struct._class_t* [[ONE]] to i8* +// CHECK: [[THREE:%.*]] = getelementptr inbounds %struct._objc_super* [[OBJC_SUPER:%.*]], i32 0, i32 1 +// CHECK: store i8* [[TWO]], i8** [[THREE]] +// CHECK: [[FOUR:%.*]] = load i8** @"\01L_OBJC_SELECTOR_REFERENCES_" diff --git a/test/CodeGenObjC/debug-info-crash-2.m b/test/CodeGenObjC/debug-info-crash-2.m index a2acd9d..7d05f53 100644 --- a/test/CodeGenObjC/debug-info-crash-2.m +++ b/test/CodeGenObjC/debug-info-crash-2.m @@ -1,4 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin11 -g -S %s -o - +// REQUIRES: x86-64-registered-target + @class Bar; @interface Foo @property (strong, nonatomic) Bar *window; diff --git a/test/CodeGenObjC/debug-info-fwddecl.m b/test/CodeGenObjC/debug-info-fwddecl.m index ebdc5f2..8f2860c 100644 --- a/test/CodeGenObjC/debug-info-fwddecl.m +++ b/test/CodeGenObjC/debug-info-fwddecl.m @@ -2,4 +2,4 @@ @class ForwardObjcClass; ForwardObjcClass *ptr = 0; -// CHECK: metadata !{i32 {{.*}}, null, metadata !"ForwardObjcClass", metadata !{{.*}}, i32 2, i32 0, i32 0, i32 0, i32 4, null, null, i32 16} ; [ DW_TAG_structure_type ] +// CHECK: metadata !{i32 {{.*}}, null, metadata !"ForwardObjcClass", metadata !{{.*}}, i32 2, i64 0, i64 0, i32 0, i32 4, null, null, i32 16} ; [ DW_TAG_structure_type ] diff --git a/test/CodeGenObjC/debug-info-impl.m b/test/CodeGenObjC/debug-info-impl.m index 8ad5903..a8450dd 100644 --- a/test/CodeGenObjC/debug-info-impl.m +++ b/test/CodeGenObjC/debug-info-impl.m @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -g -S -emit-llvm %s -o - | FileCheck %s -// CHECK: metadata !{i32 {{.*}}, metadata {{.*}}, metadata !"Circle", metadata {{.*}}, i32 11, i64 64, i64 64, i32 0, i32 512, null, metadata {{.*}}, i32 16, i32 0} ; [ DW_TAG_structure_type ] +// CHECK: metadata !{i32 {{.*}}, metadata {{.*}}, metadata !"Circle", metadata {{.*}}, i32 11, i64 64, i64 64, i32 0, i32 512, null, metadata {{.*}}, i32 16, i32 0, i32 0} ; [ DW_TAG_structure_type ] @interface NSObject { struct objc_object *isa; } diff --git a/test/CodeGenObjC/debug-info-ivars.m b/test/CodeGenObjC/debug-info-ivars.m new file mode 100644 index 0000000..24705e1a --- /dev/null +++ b/test/CodeGenObjC/debug-info-ivars.m @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -g %s -o - | FileCheck %s + +__attribute((objc_root_class)) @interface NSObject { + id isa; +} +@end + +@interface BaseClass : NSObject +{ + int i; + unsigned flag_1 : 9; + unsigned flag_2 : 9; + unsigned : 1; + unsigned flag_3 : 9; +} +@end + +@implementation BaseClass +@end + +// CHECK: metadata !{i32 786445, metadata !{{[0-9]*}}, metadata !"i", metadata !{{[0-9]*}}, i32 10, i64 32, i64 32, i64 0, i32 2, metadata !{{[0-9]*}}, null} ; [ DW_TAG_member ] [i] [line 10, size 32, align 32, offset 0] [protected] [from int] +// CHECK: metadata !{i32 786445, metadata !{{[0-9]*}}, metadata !"flag_1", metadata !{{[0-9]*}}, i32 11, i64 9, i64 32, i64 0, i32 2, metadata !{{[0-9]*}}, null} ; [ DW_TAG_member ] [flag_1] [line 11, size 9, align 32, offset 0] [protected] [from unsigned int] +// CHECK: metadata !{i32 786445, metadata !{{[0-9]*}}, metadata !"flag_2", metadata !{{[0-9]*}}, i32 12, i64 9, i64 32, i64 1, i32 2, metadata !{{[0-9]*}}, null} ; [ DW_TAG_member ] [flag_2] [line 12, size 9, align 32, offset 1] [protected] [from unsigned int] +// CHECK: metadata !{i32 786445, metadata !{{[0-9]*}}, metadata !"flag_3", metadata !{{[0-9]*}}, i32 14, i64 9, i64 32, i64 3, i32 2, metadata !{{[0-9]*}}, null} ; [ DW_TAG_member ] [flag_3] [line 14, size 9, align 32, offset 3] [protected] [from unsigned int] \ No newline at end of file diff --git a/test/CodeGenObjC/debug-info-pubtypes.m b/test/CodeGenObjC/debug-info-pubtypes.m index 658430d..91d9cd1 100644 --- a/test/CodeGenObjC/debug-info-pubtypes.m +++ b/test/CodeGenObjC/debug-info-pubtypes.m @@ -1,7 +1,7 @@ // REQUIRES: x86-64-registered-target -// RUN: %clang -cc1 -triple x86_64-apple-darwin10 -g -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -g -emit-llvm %s -o - | FileCheck %s -// CHECK: !5 = metadata !{i32 {{.*}}, metadata !6, metadata !"H", metadata !6, i32 6, i64 0, i64 8, i32 0, i32 512, null, metadata !2, i32 16, i32 0} ; [ DW_TAG_structure_type ] +// CHECK: !5 = metadata !{i32 {{.*}}, metadata !6, metadata !"H", metadata !6, i32 6, i64 0, i64 8, i32 0, i32 512, null, metadata !2, i32 16, i32 0, i32 0} ; [ DW_TAG_structure_type ] @interface H -(void) foo; diff --git a/test/CodeGenObjC/debug-info-self.m b/test/CodeGenObjC/debug-info-self.m index 9146ab3..9f23435 100644 --- a/test/CodeGenObjC/debug-info-self.m +++ b/test/CodeGenObjC/debug-info-self.m @@ -1,8 +1,12 @@ -// RUN: %clang -fverbose-asm -g -S %s -o - | grep DW_AT_artificial | count 3 +// RUN: %clang_cc1 -emit-llvm -g %s -o - | FileCheck %s // self and _cmd are marked as DW_AT_artificial. -// abbrev code emits another DT_artificial comment. // myarg is not marked as DW_AT_artificial. +// CHECK: metadata !{i32 {{.*}}, metadata !9, metadata !"self", metadata !15, i32 16777232, metadata !30, i32 1088, i32 0} ; [ DW_TAG_arg_variable ] [self] [line 16] +// CHECK: metadata !{i32 {{.*}}, metadata !9, metadata !"_cmd", metadata !15, i32 33554448, metadata !33, i32 64, i32 0} ; [ DW_TAG_arg_variable ] [_cmd] [line 16] +// CHECK: metadata !{i32 {{.*}}, metadata !9, metadata !"myarg", metadata !6, i32 50331664, metadata !24, i32 0, i32 0} ; [ DW_TAG_arg_variable ] [myarg] [line 16] + + @interface MyClass { } - (id)init:(int) myarg; diff --git a/test/CodeGenObjC/exceptions.m b/test/CodeGenObjC/exceptions.m index 25780fd..551e67c 100644 --- a/test/CodeGenObjC/exceptions.m +++ b/test/CodeGenObjC/exceptions.m @@ -149,8 +149,8 @@ void f4() { // finally.call-exit: Predecessors are the @try and @catch fallthroughs // as well as the no-match case in the catch mechanism. The i1 is whether // to rethrow and should be true only in the last case. - // CHECK: phi i1 - // CHECK-NEXT: phi i8* + // CHECK: phi i8* + // CHECK-NEXT: phi i1 // CHECK-NEXT: call void @objc_exception_try_exit([[EXNDATA_T]]* [[EXNDATA]]) // CHECK-NEXT: call void @f4_help(i32 2) // CHECK-NEXT: br i1 diff --git a/test/CodeGenObjC/gnu-exceptions.m b/test/CodeGenObjC/gnu-exceptions.m index 141747e..b7d0adb 100644 --- a/test/CodeGenObjC/gnu-exceptions.m +++ b/test/CodeGenObjC/gnu-exceptions.m @@ -20,7 +20,7 @@ void test0() { // CHECK: call void @log(i32 0) - // CHECK: call void @objc_exception_throw + // CHECK: resume log(0); } diff --git a/test/CodeGenObjC/ivar-layout-64.m b/test/CodeGenObjC/ivar-layout-64.m index ea4cdce..91a8375 100644 --- a/test/CodeGenObjC/ivar-layout-64.m +++ b/test/CodeGenObjC/ivar-layout-64.m @@ -1,15 +1,5 @@ -// RUNX: llvm-gcc -m64 -fobjc-gc -emit-llvm -S -o %t %s && -// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-gc -emit-llvm -o %t %s -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"A\\00"' %t -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"\\11q\\10\\00"' %t -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"!q\\00"' %t -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"\\01\\14\\00"' %t -// RUNX: llvm-gcc -ObjC++ -m64 -fobjc-gc -emit-llvm -S -o %t %s && -// RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin10 -fobjc-gc -emit-llvm -o %t %s -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"A\\00"' %t -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"\\11q\\10\\00"' %t -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"!q\\00"' %t -// RUN: grep '@"\\01L_OBJC_CLASS_NAME_.*" = internal global .* c"\\01\\14\\00"' %t +// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fobjc-gc -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin10 -fobjc-gc -emit-llvm -o - %s | FileCheck %s /* @@ -43,6 +33,11 @@ __weak B *f2; @property int p3; @end +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"C\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"\11p\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"!`\00" + + @implementation C @synthesize p3 = _p3; @end @@ -53,8 +48,10 @@ __weak B *f2; @property (assign) __weak id p2; @end -// FIXME: Check layout for this class, once it is clear what the right -// answer is. +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"A\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"\11q\10\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"!q\00" + @implementation A @synthesize p0 = _p0; @synthesize p1 = _p1; @@ -65,8 +62,10 @@ __weak B *f2; @property int p3; @end -// FIXME: Check layout for this class, once it is clear what the right -// answer is. +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"D\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"\11p\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"!`\00" + @implementation D @synthesize p3 = _p3; @end @@ -90,5 +89,26 @@ typedef unsigned int FSCatalogInfoBitmap; } @end +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"NSFileLocationComponent\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"\01\14\00" + @implementation NSFileLocationComponent @end +@interface NSObject { + id isa; +} +@end + +@interface Foo : NSObject { + id ivar; + + unsigned long bitfield :31; + unsigned long bitfield2 :1; + unsigned long bitfield3 :32; +} +@end + +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"Foo\00" +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global {{.*}} c"\02\10\00" + +@implementation Foo @end diff --git a/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m b/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m new file mode 100644 index 0000000..f1e02dd --- /dev/null +++ b/test/CodeGenObjC/mrr-captured-block-var-inlined-layout.m @@ -0,0 +1,65 @@ +// RUN: %clang_cc1 -fblocks -triple x86_64-apple-darwin -O0 -emit-llvm %s -o - | FileCheck %s +// RUN: %clang_cc1 -fblocks -triple i386-apple-darwin -O0 -emit-llvm %s -o - | FileCheck -check-prefix=CHECK-i386 %s +// rdar://12184410 + +void x(id y) {} +void y(int a) {} + +extern id opaque_id(); +__weak id wid; + +void f() { + __block int byref_int = 0; + const id bar = (id) opaque_id(); + id baz = 0; + __strong id strong_void_sta; + __block id byref_bab = (id)0; + __block id bl_var1; + +// block variable layout: BL_UNRETAINED:1, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [2 x i8] c"`\00" +// CHECK-i386: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [2 x i8] c"`\00" + void (^b)() = ^{ + x(bar); + }; + +// block variable layout: BL_UNRETAINED:2, BL_BYREF:1, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"a@\00" +// CHECK-i386: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"a@\00" + void (^c)() = ^{ + x(bar); + x(baz); + byref_int = 1; + }; + +// block variable layout: BL_UNRETAINED:2, BL_BYREF:3, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"aB\00 +// CHECK-i386: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"aB\00 + void (^d)() = ^{ + x(bar); + x(baz); + byref_int = 1; + bl_var1 = 0; + byref_bab = 0; + }; + +// block variable layout: BL_UNRETAINED:2, BL_BYREF:3, BL_OPERATOR:0 +// CHECK: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"aB\00" +// CHECK-i386: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [3 x i8] c"aB\00" + id (^e)() = ^{ + x(bar); + x(baz); + byref_int = 1; + bl_var1 = 0; + byref_bab = 0; + return wid; + }; + +// Inline instruction for block variable layout: 0x020 +// CHECK: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i64 32 } +// CHECK-i386: i8* getelementptr inbounds ([6 x i8]* {{@.*}}, i32 0, i32 0), i32 32 } + void (^ii)() = ^{ + byref_int = 1; + byref_bab = 0; + }; +} diff --git a/test/CodeGenObjC/newproperty-nested-synthesis-1.m b/test/CodeGenObjC/newproperty-nested-synthesis-1.m index 4831c22..aa0c8c9 100644 --- a/test/CodeGenObjC/newproperty-nested-synthesis-1.m +++ b/test/CodeGenObjC/newproperty-nested-synthesis-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -emit-llvm -o %t %s +// REQUIRES: LP64 @interface Object - (id) new; diff --git a/test/CodeGenObjC/objc-arc-container-subscripting.m b/test/CodeGenObjC/objc-arc-container-subscripting.m index 8924916..71339c7 100644 --- a/test/CodeGenObjC/objc-arc-container-subscripting.m +++ b/test/CodeGenObjC/objc-arc-container-subscripting.m @@ -13,9 +13,8 @@ id func() { // CHECK: [[call:%.*]] = call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend // CHECK: [[SIX:%.*]] = call i8* @objc_retainAutoreleasedReturnValue(i8* [[call]]) nounwind -// CHECK: [[ARRAY:%.*]] = load %0** -// CHECK: [[ARRAY_CASTED:%.*]] = bitcast{{.*}}[[ARRAY]] to i8* -// CHECK: call void @objc_release(i8* [[ARRAY_CASTED]]) +// CHECK: [[ARRAY_CASTED:%.*]] = bitcast %0** {{%.*}} to i8** +// CHECK: call void @objc_storeStrong(i8** [[ARRAY_CASTED]], i8* null) // CHECK: [[EIGHT:%.*]] = call i8* @objc_autoreleaseReturnValue(i8* [[SIX]]) nounwind // CHECK: ret i8* [[EIGHT]] diff --git a/test/CodeGenObjC/objc2-ivar-assign.m b/test/CodeGenObjC/objc2-ivar-assign.m index af76800..05a7b35 100644 --- a/test/CodeGenObjC/objc2-ivar-assign.m +++ b/test/CodeGenObjC/objc2-ivar-assign.m @@ -1,6 +1,9 @@ // RUN: %clang_cc1 -fobjc-gc -emit-llvm -o %t %s // RUN: grep objc_assign_ivar %t | count 6 +// PR13820 +// REQUIRES: LP64 + @interface I @end typedef I TI; diff --git a/test/CodeGenObjC/optimized-setter-ios-device.m b/test/CodeGenObjC/optimized-setter-ios-device.m new file mode 100644 index 0000000..6fa322a --- /dev/null +++ b/test/CodeGenObjC/optimized-setter-ios-device.m @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 %s -emit-llvm -fobjc-runtime=ios-6.0.0 -triple thumbv7-apple-ios6.0.0 -o - | FileCheck %s +// rdar://11915017 + +@interface I +// void objc_setProperty_nonatomic(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., NO, NO) +@property (nonatomic, retain) id nonatomicProperty; + +// void objc_setProperty_nonatomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., NO, YES) +@property (nonatomic, copy) id nonatomicPropertyCopy; + +// void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., YES, NO) +@property (retain) id atomicProperty; + +// void objc_setProperty_atomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., YES, YES) +@property (copy) id atomicPropertyCopy; +@end + +@implementation I +@synthesize nonatomicProperty; +@synthesize nonatomicPropertyCopy; +@synthesize atomicProperty; +@synthesize atomicPropertyCopy; +@end + +// CHECK: call arm_aapcscc void @objc_setProperty_nonatomic +// CHECK: call arm_aapcscc void @objc_setProperty_nonatomic_copy +// CHECK: call arm_aapcscc void @objc_setProperty_atomic +// CHECK: call arm_aapcscc void @objc_setProperty_atomic_copy + diff --git a/test/CodeGenObjC/optimized-setter.m b/test/CodeGenObjC/optimized-setter.m index 0e1b388..6f5cfb1 100644 --- a/test/CodeGenObjC/optimized-setter.m +++ b/test/CodeGenObjC/optimized-setter.m @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 %s -emit-llvm -triple x86_64-apple-macosx10.8.0 -o - | FileCheck %s +// RUN: %clang_cc1 %s -emit-llvm -fobjc-runtime=macosx-10.8 -triple x86_64-apple-macosx10.8.0 -o - | FileCheck %s +// RUN: %clang_cc1 %s -emit-llvm -fobjc-runtime=ios-6.0.0 -triple x86_64-apple-ios6.0.0 -o - | FileCheck %s // rdar://10179974 @interface I diff --git a/test/CodeGenObjC/prop-metadata-gnu.m b/test/CodeGenObjC/prop-metadata-gnu.m new file mode 100644 index 0000000..c15d978 --- /dev/null +++ b/test/CodeGenObjC/prop-metadata-gnu.m @@ -0,0 +1,15 @@ +// RUN: %clang -S -emit-llvm %s -o - -x objective-c -fobjc-runtime=gcc | FileCheck --check-prefix=GCC %s +// RUN: %clang -S -emit-llvm %s -o - -x objective-c -fobjc-runtime=gnustep-1.5 | FileCheck --check-prefix=GCC %s +// RUN: %clang -S -emit-llvm %s -o - -x objective-c -fobjc-runtime=gnustep-1.6 | FileCheck --check-prefix=GNUSTEP %s +// +@interface helloclass { +@private int varName; +} +@property (readwrite,assign) int propName; +@end + +@implementation helloclass +@synthesize propName = varName; +@end +// GCC-NOT: Ti,VvarName +// GNUSTEP: Ti,VvarName diff --git a/test/CodeGenObjC/property.m b/test/CodeGenObjC/property.m index 16881d6..aab7c73 100644 --- a/test/CodeGenObjC/property.m +++ b/test/CodeGenObjC/property.m @@ -1,4 +1,7 @@ -// RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple i386-unknown-unknown -emit-llvm -o - %s | FileCheck %s + +// PR13820 +// REQUIRES: LP64 // TODO: actually test most of this instead of just emitting it diff --git a/test/CodeGenObjC/synchronized.m b/test/CodeGenObjC/synchronized.m index 1f01282..e927882 100644 --- a/test/CodeGenObjC/synchronized.m +++ b/test/CodeGenObjC/synchronized.m @@ -11,7 +11,7 @@ // CHECK: define internal void @"\01-[MyClass method]" - (void)method { - // CHECK: call void @objc_sync_enter + // CHECK: call i32 @objc_sync_enter // CHECK: call void @objc_exception_try_enter // CHECK: call i32 @_setjmp @synchronized(self) { @@ -26,21 +26,21 @@ void foo(id a) { // CHECK: [[SYNC:%.*]] = alloca i8* // CHECK: store i8* [[AVAL:%.*]], i8** [[A]] - // CHECK-NEXT: call void @objc_sync_enter(i8* [[AVAL]]) + // CHECK-NEXT: call i32 @objc_sync_enter(i8* [[AVAL]]) // CHECK-NEXT: store i8* [[AVAL]], i8** [[SYNC]] // CHECK-NEXT: call void @objc_exception_try_enter // CHECK: call i32 @_setjmp @synchronized(a) { // This is unreachable, but the optimizers can't know that. // CHECK: call void asm sideeffect "", "=*m,=*m,=*m"(i8** [[A]], i8** [[SYNC]] - // CHECK: call void @objc_sync_exit + // CHECK: call i32 @objc_sync_exit // CHECK: call i8* @objc_exception_extract // CHECK: call void @objc_exception_throw // CHECK: unreachable // CHECK: call void @objc_exception_try_exit // CHECK: [[T:%.*]] = load i8** [[SYNC]] - // CHECK-NEXT: call void @objc_sync_exit + // CHECK-NEXT: call i32 @objc_sync_exit // CHECK: ret void return; } diff --git a/test/CodeGenObjC/synthesize_ivar-cont-class.m b/test/CodeGenObjC/synthesize_ivar-cont-class.m index 6bc7ac8..9822702 100644 --- a/test/CodeGenObjC/synthesize_ivar-cont-class.m +++ b/test/CodeGenObjC/synthesize_ivar-cont-class.m @@ -1,6 +1,9 @@ // RUN: %clang_cc1 -emit-llvm -o %t %s // RUN: grep '@"OBJC_IVAR_$_XCOrganizerDeviceNodeInfo.viewController"' %t +// PR13820 +// REQUIRES: LP64 + @interface XCOrganizerNodeInfo @property (readonly, retain) id viewController; @end diff --git a/test/CodeGenObjC/synthesize_ivar.m b/test/CodeGenObjC/synthesize_ivar.m index e4fbe10..92f6096 100644 --- a/test/CodeGenObjC/synthesize_ivar.m +++ b/test/CodeGenObjC/synthesize_ivar.m @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -emit-llvm -o %t %s +// PR13820 +// REQUIRES: LP64 + @interface I @property int IP; @end diff --git a/test/CodeGenObjC/undefined-protocol.m b/test/CodeGenObjC/undefined-protocol.m index e5a72ab..78cf8da 100644 --- a/test/CodeGenObjC/undefined-protocol.m +++ b/test/CodeGenObjC/undefined-protocol.m @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -emit-llvm-only -fobjc-runtime=gcc %s +// PR13820 +// REQUIRES: LP64 + @protocol MadeUpProtocol; @interface Object @end diff --git a/test/CodeGenObjC/unoptimized-setter.m b/test/CodeGenObjC/unoptimized-setter.m new file mode 100644 index 0000000..adcf087 --- /dev/null +++ b/test/CodeGenObjC/unoptimized-setter.m @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 %s -emit-llvm -fobjc-runtime=macosx-10.6.0 -triple x86_64-apple-macosx10.6.0 -o - | FileCheck %s +// rdar://11858187 + +@interface I +// void objc_setProperty_nonatomic(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., NO, NO) +@property (nonatomic, retain) id nonatomicProperty; + +// void objc_setProperty_nonatomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., NO, YES) +@property (nonatomic, copy) id nonatomicPropertyCopy; + +// void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., YES, NO) +@property (retain) id atomicProperty; + +// void objc_setProperty_atomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset); +// objc_setProperty(..., YES, YES) +@property (copy) id atomicPropertyCopy; +@end + +@implementation I +@synthesize nonatomicProperty; +@synthesize nonatomicPropertyCopy; +@synthesize atomicProperty; +@synthesize atomicPropertyCopy; +@end + +// CHECK-NOT: call void @objc_setProperty_nonatomic +// CHECK-NOT: call void @objc_setProperty_nonatomic_copy +// CHECK-NOT: call void @objc_setProperty_atomic +// CHECK-NOT: call void @objc_setProperty_atomic_copy diff --git a/test/CodeGenObjCXX/address-safety-attr.mm b/test/CodeGenObjCXX/address-safety-attr.mm index a54ca99..a3824b9 100644 --- a/test/CodeGenObjCXX/address-safety-attr.mm +++ b/test/CodeGenObjCXX/address-safety-attr.mm @@ -1,5 +1,5 @@ // RUN: %clang_cc1 -emit-llvm -o - %s | FileCheck %s -// RUN: %clang_cc1 -emit-llvm -o - %s -faddress-sanitizer | FileCheck -check-prefix ASAN %s +// RUN: %clang_cc1 -emit-llvm -o - %s -fsanitize=address | FileCheck -check-prefix ASAN %s @interface MyClass + (int) addressSafety:(int*)a; diff --git a/test/CodeGenObjCXX/arc-exceptions.mm b/test/CodeGenObjCXX/arc-exceptions.mm index b1fa8ca..fb5300d 100644 --- a/test/CodeGenObjCXX/arc-exceptions.mm +++ b/test/CodeGenObjCXX/arc-exceptions.mm @@ -20,9 +20,8 @@ void test0(void) { // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retain(i8* [[T2]]) nounwind // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to [[ETY]]* // CHECK-NEXT: store [[ETY]]* [[T4]], [[ETY]]** [[E]] -// CHECK-NEXT: [[T0:%.*]] = load [[ETY]]** [[E]] -// CHECK-NEXT: [[T1:%.*]] = bitcast [[ETY]]* [[T0]] to i8* -// CHECK-NEXT: call void @objc_release(i8* [[T1]]) nounwind +// CHECK-NEXT: [[T0:%.*]] = bitcast [[ETY]]** [[E]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T0]], i8* null) nounwind // CHECK-NEXT: call void @objc_end_catch() nounwind void test1_helper(void); @@ -60,9 +59,8 @@ void test2(void) { // CHECK-NEXT: [[T3:%.*]] = call i8* @objc_retain(i8* [[T2]]) nounwind // CHECK-NEXT: [[T4:%.*]] = bitcast i8* [[T3]] to [[ETY]]* // CHECK-NEXT: store [[ETY]]* [[T4]], [[ETY]]** [[E]] -// CHECK-NEXT: [[T0:%.*]] = load [[ETY]]** [[E]] -// CHECK-NEXT: [[T1:%.*]] = bitcast [[ETY]]* [[T0]] to i8* -// CHECK-NEXT: call void @objc_release(i8* [[T1]]) nounwind +// CHECK-NEXT: [[T0:%.*]] = bitcast [[ETY]]** [[E]] to i8** +// CHECK-NEXT: call void @objc_storeStrong(i8** [[T0]], i8* null) nounwind // CHECK-NEXT: call void @__cxa_end_catch() nounwind void test3_helper(void); diff --git a/test/CodeGenObjCXX/arc-new-delete.mm b/test/CodeGenObjCXX/arc-new-delete.mm index a778bca..ce7eb3d 100644 --- a/test/CodeGenObjCXX/arc-new-delete.mm +++ b/test/CodeGenObjCXX/arc-new-delete.mm @@ -35,7 +35,7 @@ void test_new(id invalue) { // CHECK: call i8* @objc_initWeak new __weak id(invalue); - // CHECK: call void @objc_release + // CHECK: call void @objc_storeStrong // CHECK: ret void } @@ -76,8 +76,7 @@ void test_array_delete(__strong id *sptr, __weak id *wptr) { // CHECK-NEXT: icmp eq i8** [[BEGIN]], [[END]] // CHECK: [[PAST:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], // CHECK-NEXT: [[CUR]] = getelementptr inbounds i8** [[PAST]], i64 -1 - // CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] - // CHECK-NEXT: call void @objc_release(i8* [[T0]]) + // CHECK-NEXT: call void @objc_storeStrong(i8** [[CUR]], i8* null) // CHECK-NEXT: icmp eq i8** [[CUR]], [[BEGIN]] // CHECK: call void @_ZdaPv delete [] sptr; diff --git a/test/CodeGenObjCXX/arc-references.mm b/test/CodeGenObjCXX/arc-references.mm index 954c02a..1210774 100644 --- a/test/CodeGenObjCXX/arc-references.mm +++ b/test/CodeGenObjCXX/arc-references.mm @@ -10,7 +10,7 @@ void callee(); // CHECK: define void @_Z5test0v() void test0() { // CHECK: call i8* @_Z9getObjectv - // CHECK-NEXT:: call i8* @objc_retainAutoreleasedReturnValue + // CHECK-NEXT: call i8* @objc_retainAutoreleasedReturnValue const __strong id &ref1 = getObject(); // CHECK: call void @_Z6calleev callee(); diff --git a/test/CodeGenObjCXX/arc-special-member-functions.mm b/test/CodeGenObjCXX/arc-special-member-functions.mm index 421a9fe..0b34538 100644 --- a/test/CodeGenObjCXX/arc-special-member-functions.mm +++ b/test/CodeGenObjCXX/arc-special-member-functions.mm @@ -111,7 +111,7 @@ void test_ObjCBlockMember_copy_assign(ObjCBlockMember m1, ObjCBlockMember m2) { // Implicitly-generated destructor for ObjCBlockMember // CHECK: define linkonce_odr void @_ZN15ObjCBlockMemberD2Ev -// CHECK: call void @objc_release(i8* +// CHECK: call void @objc_storeStrong(i8* // CHECK: ret // Implicitly-generated default constructor for ObjCBlockMember @@ -134,8 +134,7 @@ void test_ObjCBlockMember_copy_assign(ObjCBlockMember m1, ObjCBlockMember m2) { // CHECK-NEXT: br label // CHECK: [[PAST:%.*]] = phi i8** [ [[END]], {{%.*}} ], [ [[CUR:%.*]], {{%.*}} ] // CHECK-NEXT: [[CUR]] = getelementptr inbounds i8** [[PAST]], i64 -1 -// CHECK-NEXT: [[T0:%.*]] = load i8** [[CUR]] -// CHECK-NEXT: call void @objc_release(i8* [[T0]]) +// CHECK-NEXT: call void @objc_storeStrong(i8** [[CUR]], i8* null) // CHECK-NEXT: [[T1:%.*]] = icmp eq i8** [[CUR]], [[BEGIN]] // CHECK-NEXT: br i1 [[T1]], // CHECK: ret void @@ -154,7 +153,7 @@ void test_ObjCBlockMember_copy_assign(ObjCBlockMember m1, ObjCBlockMember m2) { // Implicitly-generated destructor for ObjCMember // CHECK: define linkonce_odr void @_ZN10ObjCMemberD2Ev -// CHECK: call void @objc_release +// CHECK: call void @objc_storeStrong // CHECK: ret void // Implicitly-generated default constructor for ObjCMember diff --git a/test/CodeGenObjCXX/block-var-layout.mm b/test/CodeGenObjCXX/block-var-layout.mm index 00dd2c0..f8b6b9c 100644 --- a/test/CodeGenObjCXX/block-var-layout.mm +++ b/test/CodeGenObjCXX/block-var-layout.mm @@ -80,7 +80,7 @@ void (^d)() = ^{ // Test4 // struct S (int, id, int, id, int, id) -// 01 41 11 11 +// 01 41 11 11 00 // CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"\01A\11\11\00" struct S s2; void (^e)() = ^{ @@ -118,8 +118,8 @@ void Test5() { union U u2; // struct s2 (int, id, int, id, int, id?), union u2 (id?) -// 01 41 11 12 70 00 -// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [6 x i8] c"\01A\11\12p\00" +// 01 41 11 12 00 +// CHECK-LP64: @"\01L_OBJC_CLASS_NAME_{{.*}}" = internal global [5 x i8] c"\01A\11\12\00" void (^c)() = ^{ x(s2.ui.o1); x(u2.o1); diff --git a/test/CodeGenObjCXX/implementation-in-extern-c.mm b/test/CodeGenObjCXX/implementation-in-extern-c.mm new file mode 100644 index 0000000..4c1ee25 --- /dev/null +++ b/test/CodeGenObjCXX/implementation-in-extern-c.mm @@ -0,0 +1,17 @@ +// RUN: %clang_cc1 -emit-llvm %s -o /dev/null +// rdar://12581683 + +extern "C" { +@interface RetainBucket ++ (id) sharedRetainBucket; +@end + +@implementation RetainBucket ++ (id) sharedRetainBucket +{ + static id sharedBucket = (id)0; + return sharedBucket; +} +@end +} + diff --git a/test/CodeGenObjCXX/implicit-copy-assign-operator.mm b/test/CodeGenObjCXX/implicit-copy-assign-operator.mm index 29ec9ac..a5ce789 100644 --- a/test/CodeGenObjCXX/implicit-copy-assign-operator.mm +++ b/test/CodeGenObjCXX/implicit-copy-assign-operator.mm @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -fobjc-gc -emit-llvm -triple x86_64-apple-darwin10.0.0 -fobjc-runtime=macosx-fragile-10.5 -o - %s | FileCheck %s +// RUN: %clang_cc1 -fobjc-gc -emit-llvm -triple x86_64-apple-darwin10.0.0 -fobjc-runtime=macosx-fragile-10.5 -o - %s | FileCheck %s -check-prefix=CHECK-OBJ +// RUN: %clang_cc1 -x c++ -emit-llvm -triple x86_64-apple-darwin10.0.0 -o - %s | FileCheck %s -check-prefix=CHECK-CPP +#ifdef __OBJC__ struct A { A &operator=(const A&); A &operator=(A&); @@ -41,17 +43,82 @@ void test_D(D d1, D d2) { d1 = d2; } -// CHECK: define linkonce_odr %struct.D* @_ZN1DaSERS_ -// CHECK: {{call.*_ZN1AaSERS_}} -// CHECK: {{call.*_ZN1BaSERS_}} -// CHECK: {{call.*_ZN1CaSERKS_}} -// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 24}} -// CHECK: {{call.*_ZN1BaSERS_}} -// CHECK: br -// CHECK: {{call.*_ZN1CaSERKS_}} -// CHECK: {{call.*@objc_memmove_collectable}} -// CHECK: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 12}} -// CHECK: call void @_ZN11CopyByValueC1ERKS_ -// CHECK: {{call.*_ZN11CopyByValueaSES_}} -// CHECK: ret +// CHECK-OBJ: define linkonce_odr %struct.D* @_ZN1DaSERS_ +// CHECK-OBJ: {{call.*_ZN1AaSERS_}} +// CHECK-OBJ: {{call.*_ZN1BaSERS_}} +// CHECK-OBJ: {{call.*_ZN1CaSERKS_}} +// CHECK-OBJ: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 24}} +// CHECK-OBJ: {{call.*_ZN1BaSERS_}} +// CHECK-OBJ: br +// CHECK-OBJ: {{call.*_ZN1CaSERKS_}} +// CHECK-OBJ: {{call.*@objc_memmove_collectable}} +// CHECK-OBJ: {{call void @llvm.memcpy.p0i8.p0i8.i64.*i64 12}} +// CHECK-OBJ: call void @_ZN11CopyByValueC1ERKS_ +// CHECK-OBJ: {{call.*_ZN11CopyByValueaSES_}} +// CHECK-OBJ: ret +#endif +namespace PR13329 { +#ifndef __OBJC__ + typedef void* id; +#endif + struct POD { + id i; + short s; + }; + + struct NonPOD { + id i; + short s; + + NonPOD(); + }; + + struct DerivedNonPOD: NonPOD { + char c; + }; + + struct DerivedPOD: POD { + char c; + }; + + void testPOD() { + POD a; + POD b; + // CHECK-OBJ: @objc_memmove_collectable{{.*}}i64 16 + // CHECK-CPP: @llvm.memcpy{{.*}}i64 16 + b = a; + } + + void testNonPOD() { + NonPOD a; + NonPOD b; + // CHECK-OBJ: @objc_memmove_collectable{{.*}}i64 10 + // CHECK-CPP: @llvm.memcpy{{.*}}i64 10 + b = a; + } + + void testDerivedNonPOD() { + DerivedNonPOD a; + NonPOD b; + DerivedNonPOD c; + // CHECK-OBJ: @objc_memmove_collectable{{.*}}i64 10 + // CHECK-CPP: @llvm.memcpy{{.*}}i64 10 + (NonPOD&) a = b; + // CHECK-OBJ: @objc_memmove_collectable{{.*}}i64 11 + // CHECK-CPP: @llvm.memcpy{{.*}}i64 11 + a = c; + }; + + void testDerivedPOD() { + DerivedPOD a; + POD b; + DerivedPOD c; + // CHECK-OBJ: @objc_memmove_collectable{{.*}}i64 16 + // CHECK-CPP: @llvm.memcpy{{.*}}i64 16 + (POD&) a = b; + // CHECK-OBJ: @objc_memmove_collectable{{.*}}i64 17 + // CHECK-CPP: @llvm.memcpy{{.*}}i64 17 + a = c; + }; +} diff --git a/test/CodeGenObjCXX/property-objects.mm b/test/CodeGenObjCXX/property-objects.mm index 6dfcc27..a3c2ed3 100644 --- a/test/CodeGenObjCXX/property-objects.mm +++ b/test/CodeGenObjCXX/property-objects.mm @@ -1,8 +1,4 @@ // RUN: %clang_cc1 %s -triple=x86_64-apple-darwin10 -emit-llvm -o - | FileCheck %s -// CHECK-NOT: callq _objc_msgSend_stret -// CHECK: call void @_ZN1SC1ERKS_ -// CHECK: call %class.S* @_ZN1SaSERKS_ -// CHECK: call %struct.CGRect* @_ZN6CGRectaSERKS_ class S { public: @@ -19,13 +15,14 @@ struct CGRect { S position; CGRect bounds; } + @property(assign, nonatomic) S position; @property CGRect bounds; @property CGRect frame; - (void)setFrame:(CGRect)frameRect; - (CGRect)frame; - (void) initWithOwner; -- (struct CGRect)extent; +- (CGRect)extent; - (void)dealloc; @end @@ -33,6 +30,11 @@ struct CGRect { @synthesize position; @synthesize bounds; @synthesize frame; + +// CHECK: define internal void @"\01-[I setPosition:]" +// CHECK: call %class.S* @_ZN1SaSERKS_ +// CHECK-NEXT: ret void + - (void)setFrame:(CGRect)frameRect {} - (CGRect)frame {return bounds;} @@ -42,14 +44,20 @@ struct CGRect { labelLayerFrame = self.bounds; _labelLayer.frame = labelLayerFrame; } + // rdar://8366604 - (void)dealloc { CGRect cgrect = self.extent; } - (struct CGRect)extent {return bounds;} + @end +// CHECK: define i32 @main +// CHECK: call void @_ZN1SC1ERKS_(%class.S* [[AGGTMP:%[a-zA-Z0-9\.]+]], %class.S* {{%[a-zA-Z0-9\.]+}}) +// CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %class.S*)*)(i8* {{%[a-zA-Z0-9\.]+}}, i8* {{%[a-zA-Z0-9\.]+}}, %class.S* [[AGGTMP]]) +// CHECK-NEXT: ret i32 0 int main() { I *i; S s1; @@ -59,7 +67,9 @@ int main() { // rdar://8379892 // CHECK: define void @_Z1fP1A -// CHECK: @objc_msgSend to void +// CHECK: call void @_ZN1XC1Ev(%struct.X* [[LVTEMP:%[a-zA-Z0-9\.]+]]) +// CHECK: call void @_ZN1XC1ERKS_(%struct.X* [[AGGTMP:%[a-zA-Z0-9\.]+]], %struct.X* [[LVTEMP]]) +// CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void (i8*, i8*, %struct.X*)*)({{.*}} %struct.X* [[AGGTMP]]) struct X { X(); X(const X&); diff --git a/test/CodeGenOpenCL/single-precision-constant.cl b/test/CodeGenOpenCL/single-precision-constant.cl index 62b37c1..6ff7bd1 100644 --- a/test/CodeGenOpenCL/single-precision-constant.cl +++ b/test/CodeGenOpenCL/single-precision-constant.cl @@ -1,7 +1,6 @@ // RUN: %clang_cc1 %s -cl-single-precision-constant -emit-llvm -o - | FileCheck %s float fn(float f) { - // CHECK: fmul float - // CHECK: fadd float + // CHECK: tail call float @llvm.fmuladd.f32(float %f, float 2.000000e+00, float 1.000000e+00) return f*2. + 1.; } diff --git a/test/Coverage/targets.c b/test/Coverage/targets.c index 7c05122..14b895e 100644 --- a/test/Coverage/targets.c +++ b/test/Coverage/targets.c @@ -17,4 +17,3 @@ // clang 1.0 fails to compile Python 2.6 // RUN: %clang -target x86_64-apple-darwin9 -### -S %s -mmacosx-version-min=10.4 - diff --git a/test/Driver/B-opt.c b/test/Driver/B-opt.c new file mode 100644 index 0000000..a0b9a11 --- /dev/null +++ b/test/Driver/B-opt.c @@ -0,0 +1,22 @@ +// Check -B driver option. +// +// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \ +// RUN: -B %S/Inputs/B_opt_tree/dir1 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-B-OPT-TRIPLE %s +// CHECK-B-OPT-TRIPLE: "{{.*}}/Inputs/B_opt_tree/dir1/i386-unknown-linux-ld" +// +// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \ +// RUN: -B %S/Inputs/B_opt_tree/dir2 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-B-OPT-DIR %s +// CHECK-B-OPT-DIR: "{{.*}}/Inputs/B_opt_tree/dir2/ld" +// +// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \ +// RUN: -B %S/Inputs/B_opt_tree/dir3/prefix- 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-B-OPT-PREFIX %s +// CHECK-B-OPT-PREFIX: "{{.*}}/Inputs/B_opt_tree/dir3/prefix-ld" +// +// RUN: %clang %s -### -o %t.o -target i386-unknown-linux \ +// RUN: -B %S/Inputs/B_opt_tree/dir3/prefix- \ +// RUN: -B %S/Inputs/B_opt_tree/dir2 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-B-OPT-MULT %s +// CHECK-B-OPT-MULT: "{{.*}}/Inputs/B_opt_tree/dir3/prefix-ld" diff --git a/test/Driver/Inputs/B_opt_tree/dir1/i386-unknown-linux-ld b/test/Driver/Inputs/B_opt_tree/dir1/i386-unknown-linux-ld new file mode 100755 index 0000000..e69de29 diff --git a/test/Driver/Inputs/B_opt_tree/dir1/ld b/test/Driver/Inputs/B_opt_tree/dir1/ld new file mode 100755 index 0000000..e69de29 diff --git a/test/Driver/Inputs/B_opt_tree/dir2/ld b/test/Driver/Inputs/B_opt_tree/dir2/ld new file mode 100755 index 0000000..e69de29 diff --git a/test/Driver/Inputs/B_opt_tree/dir3/prefix-ld b/test/Driver/Inputs/B_opt_tree/dir3/prefix-ld new file mode 100755 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/bin/.keep b/test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/bin/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/include/c++/4.4.3/.keep b/test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/include/c++/4.4.3/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/lib/.keep b/test/Driver/Inputs/basic_android_tree/arm-linux-androideabi/lib/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbegin.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbeginS.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbeginS.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbeginT.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtbeginT.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtend.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtend.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtendS.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/arm-linux-androideabi/4.4.3/crtendS.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbegin.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbeginS.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbeginS.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbeginT.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtbeginT.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtend.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtend.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtendS.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/crtendS.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbegin.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbeginS.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbeginS.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbeginT.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtbeginT.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtend.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtend.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtendS.o b/test/Driver/Inputs/basic_android_tree/lib/gcc/mipsel-linux-android/4.4.3/mips-r2/crtendS.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/mipsel-linux-android/bin/.keep b/test/Driver/Inputs/basic_android_tree/mipsel-linux-android/bin/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/mipsel-linux-android/include/c++/4.4.3/.keep b/test/Driver/Inputs/basic_android_tree/mipsel-linux-android/include/c++/4.4.3/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/mipsel-linux-android/lib/.keep b/test/Driver/Inputs/basic_android_tree/mipsel-linux-android/lib/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_dynamic.o b/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_dynamic.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_so.o b/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_so.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_static.o b/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtbegin_static.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtend_android.o b/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtend_android.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtend_so.o b/test/Driver/Inputs/basic_android_tree/sysroot/usr/lib/crtend_so.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_dynamic.o b/test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_dynamic.o deleted file mode 100644 index e69de29..0000000 diff --git a/test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_so.o b/test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_so.o deleted file mode 100644 index e69de29..0000000 diff --git a/test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_static.o b/test/Driver/Inputs/basic_android_tree/usr/lib/crtbegin_static.o deleted file mode 100644 index e69de29..0000000 diff --git a/test/Driver/Inputs/basic_android_tree/usr/lib/crtend_android.o b/test/Driver/Inputs/basic_android_tree/usr/lib/crtend_android.o deleted file mode 100644 index e69de29..0000000 diff --git a/test/Driver/Inputs/basic_android_tree/usr/lib/crtend_so.o b/test/Driver/Inputs/basic_android_tree/usr/lib/crtend_so.o deleted file mode 100644 index e69de29..0000000 diff --git a/test/Driver/Inputs/basic_linux_tree/usr/lib/gcc/x86_64-unknown-linux/4.6.0/crtfastmath.o b/test/Driver/Inputs/basic_linux_tree/usr/lib/gcc/x86_64-unknown-linux/4.6.0/crtfastmath.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/lib/.keep b/test/Driver/Inputs/debian_6_mips_tree/lib/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/lib32/.keep b/test/Driver/Inputs/debian_6_mips_tree/lib32/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/lib64/.keep b/test/Driver/Inputs/debian_6_mips_tree/lib64/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib/crt1.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib/crt1.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib/crti.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib/crti.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/64/crtbegin.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/64/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/crtbegin.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/n32/crtbegin.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib/gcc/mipsel-linux-gnu/4.4/n32/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib32/crt1.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib32/crt1.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib32/crti.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib32/crti.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib64/crt1.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib64/crt1.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/debian_6_mips_tree/usr/lib64/crti.o b/test/Driver/Inputs/debian_6_mips_tree/usr/lib64/crti.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc64_tree/lib64/.keep b/test/Driver/Inputs/freescale_ppc64_tree/lib64/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crt1.o b/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crt1.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crti.o b/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crti.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crtn.o b/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/crtn.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/powerpc64-fsl-linux/4.6.2/crtbegin.o b/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/powerpc64-fsl-linux/4.6.2/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/powerpc64-fsl-linux/4.6.2/crtend.o b/test/Driver/Inputs/freescale_ppc64_tree/usr/lib64/powerpc64-fsl-linux/4.6.2/crtend.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc_tree/lib/.keep b/test/Driver/Inputs/freescale_ppc_tree/lib/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc_tree/usr/lib/crt1.o b/test/Driver/Inputs/freescale_ppc_tree/usr/lib/crt1.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc_tree/usr/lib/crti.o b/test/Driver/Inputs/freescale_ppc_tree/usr/lib/crti.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc_tree/usr/lib/crtn.o b/test/Driver/Inputs/freescale_ppc_tree/usr/lib/crtn.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc_tree/usr/lib/powerpc-fsl-linux/4.6.2/crtbegin.o b/test/Driver/Inputs/freescale_ppc_tree/usr/lib/powerpc-fsl-linux/4.6.2/crtbegin.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Inputs/freescale_ppc_tree/usr/lib/powerpc-fsl-linux/4.6.2/crtend.o b/test/Driver/Inputs/freescale_ppc_tree/usr/lib/powerpc-fsl-linux/4.6.2/crtend.o new file mode 100644 index 0000000..e69de29 diff --git a/test/Driver/Wp-args.c b/test/Driver/Wp-args.c index 0ab85b4..1d1af24 100644 --- a/test/Driver/Wp-args.c +++ b/test/Driver/Wp-args.c @@ -11,3 +11,11 @@ // CHECK: "-MT" // // PR4062 + +// RUN: %clang --target i386-pc-linux-gnu -### \ +// RUN: -Wp,-MMD -fsyntax-only %s 2> %t +// RUN: FileCheck -check-prefix MMD < %t %s + +// MMD: "-cc1" +// MMD-NOT: -MMD +// MMD: "-dependency-file" "Wp-args.d" diff --git a/test/Driver/altivec.cpp b/test/Driver/altivec.cpp index a893636..4e6fbe5 100644 --- a/test/Driver/altivec.cpp +++ b/test/Driver/altivec.cpp @@ -1,15 +1,15 @@ // Check that we error when -faltivec is specified on non-ppc platforms. -// RUN: %clang -ccc-clang-archs powerpc -target powerpc-unk-unk -faltivec -fsyntax-only %s -// RUN: %clang -ccc-clang-archs powerpc64 -target powerpc64-linux-gnu -faltivec -fsyntax-only %s -// RUN: %clang -ccc-clang-archs powerpc64 -target powerpc64-linux-gnu -maltivec -fsyntax-only %s +// RUN: %clang -target powerpc-unk-unk -faltivec -fsyntax-only %s +// RUN: %clang -target powerpc64-linux-gnu -faltivec -fsyntax-only %s +// RUN: %clang -target powerpc64-linux-gnu -maltivec -fsyntax-only %s // RUN: %clang -target i386-pc-win32 -faltivec -fsyntax-only %s 2>&1 | FileCheck %s // RUN: %clang -target x86_64-unknown-freebsd -faltivec -fsyntax-only %s 2>&1 | FileCheck %s // RUN: %clang -target armv6-apple-darwin -faltivec -fsyntax-only %s 2>&1 | FileCheck %s // RUN: %clang -target armv7-apple-darwin -faltivec -fsyntax-only %s 2>&1 | FileCheck %s -// RUN: %clang -ccc-clang-archs mips -target mips-linux-gnu -faltivec -fsyntax-only %s 2>&1 | FileCheck %s -// RUN: %clang -ccc-clang-archs mips64 -target mips64-linux-gnu -faltivec -fsyntax-only %s 2>&1 | FileCheck %s -// RUN: %clang -ccc-clang-archs sparc -target sparc-unknown-solaris -faltivec -fsyntax-only %s 2>&1 | FileCheck %s +// RUN: %clang -target mips-linux-gnu -faltivec -fsyntax-only %s 2>&1 | FileCheck %s +// RUN: %clang -target mips64-linux-gnu -faltivec -fsyntax-only %s 2>&1 | FileCheck %s +// RUN: %clang -target sparc-unknown-solaris -faltivec -fsyntax-only %s 2>&1 | FileCheck %s // CHECK: invalid argument '-faltivec' only allowed with 'ppc/ppc64' diff --git a/test/Driver/android-standalone.cpp b/test/Driver/android-standalone.cpp new file mode 100644 index 0000000..dc41ed7 --- /dev/null +++ b/test/Driver/android-standalone.cpp @@ -0,0 +1,65 @@ +// Test header and library paths when Clang is used with Android standalone +// toolchain. +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-androideabi \ +// RUN: -B%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck %s +// CHECK: {{.*}}clang{{.*}}" "-cc1" +// CHECK: "-internal-isystem" "{{.*}}/arm-linux-androideabi/include/c++/4.4.3" +// CHECK: "-internal-isystem" "{{.*}}/arm-linux-androideabi/include/c++/4.4.3/arm-linux-androideabi" +// CHECK: "-internal-externc-isystem" "{{.*}}/sysroot/include" +// CHECK: "-internal-externc-isystem" "{{.*}}/sysroot/usr/include" +// CHECK: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK: "-L{{.*}}/lib/gcc/arm-linux-androideabi/4.4.3" +// CHECK: "-L{{.*}}/lib/gcc/arm-linux-androideabi/4.4.3/../../../../arm-linux-androideabi/lib" +// CHECK: "-L{{.*}}/sysroot/usr/lib" +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: -mips32 \ +// RUN: -B%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS %s +// CHECK-MIPS: {{.*}}clang{{.*}}" "-cc1" +// CHECK-MIPS: "-internal-isystem" "{{.*}}/mipsel-linux-android/include/c++/4.4.3" +// CHECK-MIPS: "-internal-isystem" "{{.*}}/mipsel-linux-android/include/c++/4.4.3/mipsel-linux-android" +// CHECK-MIPS: "-internal-externc-isystem" "{{.*}}/sysroot/include" +// CHECK-MIPS: "-internal-externc-isystem" "{{.*}}/sysroot/usr/include" +// CHECK-MIPS: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-MIPS: "-L{{.*}}/lib/gcc/mipsel-linux-android/4.4.3" +// CHECK-MIPS: "-L{{.*}}/lib/gcc/mipsel-linux-android/4.4.3/../../../../mipsel-linux-android/lib" +// CHECK-MIPS: "-L{{.*}}/sysroot/usr/lib" +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: -march=mips32 -mips32r2 \ +// RUN: -B%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-MIPSR2 %s +// CHECK-MIPSR2: {{.*}}clang{{.*}}" "-cc1" +// CHECK-MIPSR2: "-internal-isystem" "{{.*}}/mipsel-linux-android/include/c++/4.4.3" +// CHECK-MIPSR2: "-internal-isystem" "{{.*}}/mipsel-linux-android/include/c++/4.4.3/mipsel-linux-android" +// CHECK-MIPSR2: "-internal-externc-isystem" "{{.*}}/sysroot/include" +// CHECK-MIPSR2: "-internal-externc-isystem" "{{.*}}/sysroot/usr/include" +// CHECK-MIPSR2: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-MIPSR2: "-L{{.*}}/lib/gcc/mipsel-linux-android/4.4.3/mips-r2" +// CHECK-MIPSR2: "-L{{.*}}/lib/gcc/mipsel-linux-android/4.4.3/../../../../mipsel-linux-android/lib" +// CHECK-MIPSR2: "-L{{.*}}/sysroot/usr/lib" +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: -mips32 -march=mips32r2 \ +// RUN: -B%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-MIPSR2-A %s +// CHECK-MIPSR2-A: {{.*}}clang{{.*}}" "-cc1" +// CHECK-MIPSR2-A: "-internal-isystem" "{{.*}}/mipsel-linux-android/include/c++/4.4.3" +// CHECK-MIPSR2-A: "-internal-isystem" "{{.*}}/mipsel-linux-android/include/c++/4.4.3/mipsel-linux-android" +// CHECK-MIPSR2-A: "-internal-externc-isystem" "{{.*}}/sysroot/include" +// CHECK-MIPSR2-A: "-internal-externc-isystem" "{{.*}}/sysroot/usr/include" +// CHECK-MIPSR2-A: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-MIPSR2-A: "-L{{.*}}/lib/gcc/mipsel-linux-android/4.4.3/mips-r2" +// CHECK-MIPSR2-A: "-L{{.*}}/lib/gcc/mipsel-linux-android/4.4.3/../../../../mipsel-linux-android/lib" +// CHECK-MIPSR2-A: "-L{{.*}}/sysroot/usr/lib" diff --git a/test/Driver/apple-kext-i386.cpp b/test/Driver/apple-kext-i386.cpp deleted file mode 100644 index eb9f957..0000000 --- a/test/Driver/apple-kext-i386.cpp +++ /dev/null @@ -1,55 +0,0 @@ -// Check that we transparently fallback to llvm-gcc for i386 kexts, we don't -// support the ABI they use (yet). - -// RUN: %clang -target i386-apple-darwin10 \ -// RUN: -fapple-kext -### -fsyntax-only %s 2> %t -// RUN: FileCheck --check-prefix=CHECK < %t %s - -// CHECK: cc1plus" -// CHECK: "-fapple-kext" - -// RUN: %clang -target i386-apple-darwin10 \ -// RUN: -mkernel -### -fsyntax-only %s 2> %t -// RUN: FileCheck --check-prefix=CHECK-MKERNEL < %t %s - -// CHECK-MKERNEL: cc1plus" -// CHECK-MKERNEL: "-mkernel" - -// RUN: %clang -target i386-apple-darwin10 \ -// RUN: -Wno-self-assign -Wc++11-extensions -Wno-microsoft -Wmicrosoft -Wvla \ -// RUN: -faltivec -mthumb -mcpu=G4 -mlongcall -mno-longcall -msoft-float \ -// RUN: -fapple-kext -### -fsyntax-only %s 2> %t -// RUN: FileCheck --check-prefix=CHECK-UNSUPPORTED < %t %s - -// CHECK-UNSUPPORTED: cc1plus" -// CHECK-UNSUPPORTED-NOT: "-Wno-self-assign" -// CHECK-UNSUPPORTED-NOT: "-Wc++11-extensions" -// CHECK-UNSUPPORTED-NOT: "-Wno-microsoft" -// CHECK-UNSUPPORTED-NOT: "-Wmicrosoft" -// CHECK-UNSUPPORTED-NOT: "-Wvla" -// CHECK-UNSUPPORTED-NOT: "-faltivec" -// CHECK-UNSUPPORTED-NOT: "-mthumb" -// CHECK-UNSUPPORTED-NOT: "-mlongcall" -// CHECK-UNSUPPORTED: "-mno-longcall" -// CHECK-UNSUPPORTED: "-msoft-float" - -// RUN: %clang -target i386-apple-darwin10 \ -// RUN: -Wconstant-logical-operand -save-temps \ -// RUN: -fapple-kext -### -fsyntax-only %s 2> %t -// RUN: FileCheck --check-prefix=CHECK-UNSUPPORTED2 < %t %s - -// CHECK-UNSUPPORTED2: cc1plus" -// CHECK-UNSUPPORTED2-NOT: "-Wconstant-logical-operand" - -// Check that -serialize-diagnostics does not cause an "argument unused" error. -// RUN: %clang -target i386-apple-darwin10 \ -// RUN: -Wall -fapple-kext -### -serialize-diagnostics %t.dia -c %s 2>&1 | \ -// RUN: FileCheck --check-prefix=CHECK-UNUSED %s - -// Check that --serialize-diagnostics does not cause an "argument unused" error. -// RUN: %clang -target i386-apple-darwin10 \ -// RUN: -Wall -fapple-kext -### --serialize-diagnostics %t.dia -c %s 2>&1 | \ -// RUN: FileCheck --check-prefix=CHECK-UNUSED %s - -// CHECK-UNUSED-NOT: argument unused -// CHECK-UNUSED: cc1plus diff --git a/test/Driver/arc.c b/test/Driver/arc.c index 5a5720c..4c99e57 100644 --- a/test/Driver/arc.c +++ b/test/Driver/arc.c @@ -8,10 +8,10 @@ // Just to test clang is working. # foo -// CHECK: error: -fobjc-arc is not supported with legacy abi +// CHECK: error: -fobjc-arc is not supported on platforms using the legacy runtime // CHECK-NOT: invalid preprocessing directive -// NOTOBJC-NOT: error: -fobjc-arc is not supported with legacy abi +// NOTOBJC-NOT: error: -fobjc-arc is not supported on platforms using the legacy runtime // NOTOBJC: invalid preprocessing directive -// UNSUPPORTED: error: -fobjc-arc is not supported on current deployment target +// UNSUPPORTED: error: -fobjc-arc is not supported on versions of OS X prior to 10.6 diff --git a/test/Driver/arm-darwin-builtin.c b/test/Driver/arm-darwin-builtin.c index 41f13f3..be50b68 100644 --- a/test/Driver/arm-darwin-builtin.c +++ b/test/Driver/arm-darwin-builtin.c @@ -8,7 +8,7 @@ // RUX: not grep -- "-fno-builtin-strcat" %t && // RUX: not grep -- "-fno-builtin-strcpy" %t && -// RUN: %clang -ccc-no-clang -target x86_64-apple-darwin9 -arch arm -### -fsyntax-only %s -fbuiltin-strcat -fbuiltin-strcpy 2> %t +// RUN: %clang -target x86_64-apple-darwin9 -arch arm -### -fsyntax-only %s -fbuiltin-strcat -fbuiltin-strcpy 2> %t // RUN: not grep -- "-fno-builtin-strcat" %t // RUN: not grep -- "-fno-builtin-strcpy" %t diff --git a/test/Driver/asan-ld.c b/test/Driver/asan-ld.c index daf046b..59dbda1 100644 --- a/test/Driver/asan-ld.c +++ b/test/Driver/asan-ld.c @@ -4,6 +4,12 @@ // RUN: -target i386-unknown-linux -faddress-sanitizer \ // RUN: --sysroot=%S/Inputs/basic_linux_tree \ // RUN: | FileCheck --check-prefix=CHECK-LINUX %s +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target i386-unknown-linux -fsanitize=address \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree \ +// RUN: | FileCheck --check-prefix=CHECK-LINUX %s +// // CHECK-LINUX: "{{.*}}ld{{(.exe)?}}" // CHECK-LINUX-NOT: "-lc" // CHECK-LINUX: libclang_rt.asan-i386.a" @@ -13,19 +19,32 @@ // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ // RUN: -target arm-linux-androideabi -faddress-sanitizer \ -// RUN: --sysroot=%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID %s +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-androideabi -fsanitize=address \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ // RUN: | FileCheck --check-prefix=CHECK-ANDROID %s +// // CHECK-ANDROID: "{{.*}}ld{{(.exe)?}}" // CHECK-ANDROID-NOT: "-lc" -// CHECK-ANDROID: "-u" "__asan_preinit" "-lasan" -// CHECK-ANDROID: "-lasan_preload" "-ldl" +// CHECK-ANDROID: libclang_rt.asan-arm-android.so" +// CHECK-ANDROID-NOT: "-lpthread" // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ // RUN: -target arm-linux-androideabi -faddress-sanitizer \ -// RUN: --sysroot=%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -shared \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-SHARED %s +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-androideabi -fsanitize=address \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ // RUN: -shared \ // RUN: | FileCheck --check-prefix=CHECK-ANDROID-SHARED %s +// // CHECK-ANDROID-SHARED: "{{.*}}ld{{(.exe)?}}" // CHECK-ANDROID-SHARED-NOT: "-lc" -// CHECK-ANDROID-SHARED-NOT: "-lasan" -// CHECK-ANDROID-SHARED: "-lasan_preload" "-ldl" +// CHECK-ANDROID-SHARED: libclang_rt.asan-arm-android.so" +// CHECK-ANDROID-SHARED-NOT: "-lpthread" diff --git a/test/Driver/asan.c b/test/Driver/asan.c index 4c9a1b6..c9b3796 100644 --- a/test/Driver/asan.c +++ b/test/Driver/asan.c @@ -2,6 +2,7 @@ // RUN: %clang -O1 -target i386-unknown-unknown -faddress-sanitizer %s -S -emit-llvm -o - | FileCheck %s // RUN: %clang -O2 -target i386-unknown-unknown -faddress-sanitizer %s -S -emit-llvm -o - | FileCheck %s // RUN: %clang -O3 -target i386-unknown-unknown -faddress-sanitizer %s -S -emit-llvm -o - | FileCheck %s +// RUN: %clang -target i386-unknown-unknown -fsanitize=address %s -S -emit-llvm -o - | FileCheck %s // Verify that -faddress-sanitizer invokes asan instrumentation. int foo(int *a) { return *a; } diff --git a/test/Driver/bindings.c b/test/Driver/bindings.c index a7cda19..d25fc8f 100644 --- a/test/Driver/bindings.c +++ b/test/Driver/bindings.c @@ -1,49 +1,25 @@ // Basic binding. -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings %s 2> %t -// RUN: grep '"clang", inputs: \[".*bindings.c"\], output: ".*\.s"' %t -// RUN: grep '"gcc::Assemble", inputs: \[".*\.s"\], output: ".*\.o"' %t -// RUN: grep '"gcc::Link", inputs: \[".*\.o"\], output: "a.out"' %t +// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings %s 2>&1 | FileCheck %s --check-prefix=CHECK01 +// CHECK01: "clang", inputs: ["{{.*}}bindings.c"], output: "{{.*}}.s" +// CHECK01: "gcc::Assemble", inputs: ["{{.*}}.s"], output: "{{.*}}.o" +// CHECK01: "gcc::Link", inputs: ["{{.*}}.o"], output: "a.out" -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-no-clang %s 2> %t -// RUN: grep '"gcc::Compile", inputs: \[".*bindings.c"\], output: ".*\.s"' %t -// RUN: grep '"gcc::Assemble", inputs: \[".*\.s"\], output: ".*\.o"' %t -// RUN: grep '"gcc::Link", inputs: \[".*\.o"\], output: "a.out"' %t - -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-no-clang -no-integrated-cpp %s 2> %t -// RUN: grep '"gcc::Preprocess", inputs: \[".*bindings.c"\], output: ".*\.i"' %t -// RUN: grep '"gcc::Compile", inputs: \[".*\.i"\], output: ".*\.s"' %t -// RUN: grep '"gcc::Assemble", inputs: \[".*\.s"\], output: ".*\.o"' %t -// RUN: grep '"gcc::Link", inputs: \[".*\.o"\], output: "a.out"' %t +// Clang control options -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-no-clang -x c-header %s 2> %t -// RUN: grep '"gcc::Precompile", inputs: \[".*bindings.c"\], output: ".*bindings.c.gch' %t +// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix=CHECK05 +// CHECK05: "clang", inputs: ["{{.*}}bindings.c"], output: (nothing) -// Clang control options +// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -fsyntax-only -x c++ %s 2>&1 | FileCheck %s --check-prefix=CHECK08 +// CHECK08: "clang", inputs: ["{{.*}}bindings.c"], output: (nothing) -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -fsyntax-only %s 2> %t -// RUN: grep '"clang", inputs: \[".*bindings.c"\], output: (nothing)' %t -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-no-clang -fsyntax-only %s 2> %t -// RUN: grep '"gcc::Compile", inputs: \[".*bindings.c"\], output: (nothing)' %t -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-no-clang-cxx -fsyntax-only -x c++ %s 2> %t -// RUN: grep '"gcc::Compile", inputs: \[".*bindings.c"\], output: (nothing)' %t -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-clang-cxx -fsyntax-only -x c++ %s 2> %t -// RUN: grep '"clang", inputs: \[".*bindings.c"\], output: (nothing)' %t -// RUN: %clang -target i386-unknown-unknown -ccc-print-bindings -ccc-no-clang-cpp -fsyntax-only -no-integrated-cpp %s 2> %t -// RUN: grep '"gcc::Preprocess", inputs: \[".*bindings.c"\], output: ".*\.i"' %t -// RUN: grep '"clang", inputs: \[".*\.i"\], output: (nothing)' %t -// RUN: %clang -target i386-apple-darwin9 -ccc-print-bindings -ccc-clang-archs i386 %s -S -arch ppc 2> %t -// RUN: grep '"gcc::Compile", inputs: \[".*bindings.c"\], output: "bindings.s"' %t -// RUN: %clang -target i386-apple-darwin9 -ccc-print-bindings -ccc-clang-archs powerpc %s -S -arch ppc 2> %t -// RUN: grep '"clang", inputs: \[".*bindings.c"\], output: "bindings.s"' %t +// RUN: %clang -target i386-apple-darwin9 -ccc-print-bindings %s -S -arch ppc 2>&1 | FileCheck %s --check-prefix=CHECK11 +// CHECK11: "clang", inputs: ["{{.*}}bindings.c"], output: "bindings.s" -// RUN: %clang -target powerpc-unknown-unknown -ccc-print-bindings -ccc-clang-archs "" %s -S 2> %t -// RUN: grep '"clang", inputs: \[".*bindings.c"\], output: "bindings.s"' %t -// RUN: %clang -target powerpc-unknown-unknown -ccc-print-bindings -ccc-clang-archs "i386" %s -S 2> %t -// RUN: grep '"gcc::Compile", inputs: \[".*bindings.c"\], output: "bindings.s"' %t +// RUN: %clang -target powerpc-unknown-unknown -ccc-print-bindings %s -S 2>&1 | FileCheck %s --check-prefix=CHECK12 +// CHECK12: "clang", inputs: ["{{.*}}bindings.c"], output: "bindings.s" // Darwin bindings -// RUN: %clang -target i386-apple-darwin9 -no-integrated-as -ccc-print-bindings %s 2> %t -// RUN: grep '"clang", inputs: \[".*bindings.c"\], output: ".*\.s"' %t -// RUN: grep '"darwin::Assemble", inputs: \[".*\.s"\], output: ".*\.o"' %t -// RUN: grep '"darwin::Link", inputs: \[".*\.o"\], output: "a.out"' %t - +// RUN: %clang -target i386-apple-darwin9 -no-integrated-as -ccc-print-bindings %s 2>&1 | FileCheck %s --check-prefix=CHECK14 +// CHECK14: "clang", inputs: ["{{.*}}bindings.c"], output: "{{.*}}.s" +// CHECK14: "darwin::Assemble", inputs: ["{{.*}}.s"], output: "{{.*}}.o" +// CHECK14: "darwin::Link", inputs: ["{{.*}}.o"], output: "a.out" diff --git a/test/Driver/bitrig.c b/test/Driver/bitrig.c new file mode 100644 index 0000000..876a9cd --- /dev/null +++ b/test/Driver/bitrig.c @@ -0,0 +1,29 @@ +// RUN: %clang -no-canonical-prefixes -target amd64-pc-bitrig %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-LD-C %s +// CHECK-LD-C: clang{{.*}}" "-cc1" "-triple" "amd64-pc-bitrig" +// CHECK-LD-C: ld{{.*}}" {{.*}} "-lc" "-lclang_rt.amd64" + +// RUN: %clangxx -no-canonical-prefixes -target amd64-pc-bitrig %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-LD-CXX %s +// CHECK-LD-CXX: clang{{.*}}" "-cc1" "-triple" "amd64-pc-bitrig" +// CHECK-LD-CXX: ld{{.*}}" {{.*}} "-lstdc++" "-lm" "-lc" "-lclang_rt.amd64" + +// RUN: %clangxx -stdlib=libc++ -no-canonical-prefixes -target amd64-pc-bitrig %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-LD-CXX-STDLIB %s +// CHECK-LD-CXX-STDLIB: clang{{.*}}" "-cc1" "-triple" "amd64-pc-bitrig" +// CHECK-LD-CXX-STDLIB: ld{{.*}}" {{.*}} "-lc++" "-lcxxrt" "-lgcc" "-lm" "-lc" "-lclang_rt.amd64" + +// RUN: %clang -no-canonical-prefixes -target amd64-pc-bitrig -pthread %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-PTHREAD %s +// CHECK-PTHREAD: clang{{.*}}" "-cc1" "-triple" "amd64-pc-bitrig" +// CHECK-PTHREAD: ld{{.*}}" {{.*}} "{{.*}}crtbegin.o" {{.*}}.o" "-lpthread" "-lc" "-lclang_rt.amd64" "{{.*}}crtend.o" + +// RUN: %clang -no-canonical-prefixes -target amd64-pc-bitrig -pg -pthread %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-PG-PTHREAD %s +// CHECK-PG-PTHREAD: clang{{.*}}" "-cc1" "-triple" "amd64-pc-bitrig" +// CHECK-PG-PTHREAD: ld{{.*}}" {{.*}} "{{.*}}crtbegin.o" {{.*}}.o" "-lpthread_p" "-lc_p" "-lclang_rt.amd64" "{{.*}}crtend.o" + +// RUN: %clang -no-canonical-prefixes -target amd64-pc-bitrig -shared -pg -pthread %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-PG-PTHREAD-SHARED %s +// CHECK-PG-PTHREAD-SHARED: clang{{.*}}" "-cc1" "-triple" "amd64-pc-bitrig" +// CHECK-PG-PTHREAD-SHARED: ld{{.*}}" {{.*}} "{{.*}}crtbeginS.o" {{.*}}.o" "-lpthread" "-lclang_rt.amd64" "{{.*}}crtendS.o" diff --git a/test/Driver/clang-translation.c b/test/Driver/clang-translation.c index 76196da..3ddb189e 100644 --- a/test/Driver/clang-translation.c +++ b/test/Driver/clang-translation.c @@ -1,22 +1,27 @@ -// RUN: %clang -target i386-unknown-unknown -### -S -O0 -Os %s -o %t.s -fverbose-asm -funwind-tables -fvisibility=hidden 2> %t.log -// RUN: grep '"-triple" "i386-unknown-unknown"' %t.log -// RUN: grep '"-S"' %t.log -// RUN: grep '"-disable-free"' %t.log -// RUN: grep '"-mrelocation-model" "static"' %t.log -// RUN: grep '"-mdisable-fp-elim"' %t.log -// RUN: grep '"-munwind-tables"' %t.log -// RUN: grep '"-Os"' %t.log -// RUN: grep '"-o" .*clang-translation.*' %t.log -// RUN: grep '"-masm-verbose"' %t.log -// RUN: grep '"-fvisibility" "hidden"' %t.log -// RUN: %clang -target i386-apple-darwin9 -### -S %s -o %t.s 2> %t.log -// RUN: grep '"-target-cpu" "yonah"' %t.log -// RUN: %clang -target x86_64-apple-darwin9 -### -S %s -o %t.s 2> %t.log -// RUN: grep '"-target-cpu" "core2"' %t.log +// RUN: %clang -target i386-unknown-unknown -### -S -O0 -Os %s -o %t.s -fverbose-asm -funwind-tables -fvisibility=hidden 2>&1 | FileCheck -check-prefix=I386 %s +// I386: "-triple" "i386-unknown-unknown" +// I386: "-S" +// I386: "-disable-free" +// I386: "-mrelocation-model" "static" +// I386: "-mdisable-fp-elim" +// I386: "-masm-verbose" +// I386: "-munwind-tables" +// I386: "-Os" +// I386: "-fvisibility" +// I386: "hidden" +// I386: "-o" +// I386: clang-translation +// RUN: %clang -target i386-apple-darwin9 -### -S %s -o %t.s 2>&1 | \ +// RUN: FileCheck -check-prefix=YONAH %s +// YONAH: "-target-cpu" +// YONAH: "yonah" +// RUN: %clang -target x86_64-apple-darwin9 -### -S %s -o %t.s 2>&1 | \ +// RUN: FileCheck -check-prefix=CORE2 %s +// CORE2: "-target-cpu" +// CORE2: "core2" -// RUN: %clang -target x86_64-apple-darwin10 -### -S %s 2> %t.log \ -// RUN: -arch armv7 -// RUN: FileCheck -check-prefix=ARMV7_DEFAULT %s < %t.log +// RUN: %clang -target x86_64-apple-darwin10 -### -S %s -arch armv7 2>&1 | \ +// RUN: FileCheck -check-prefix=ARMV7_DEFAULT %s // ARMV7_DEFAULT: clang // ARMV7_DEFAULT: "-cc1" // ARMV7_DEFAULT-NOT: "-msoft-float" @@ -24,9 +29,8 @@ // ARMV7_DEFAULT-NOT: "-msoft-float" // ARMV7_DEFAULT: "-x" "c" -// RUN: %clang -target x86_64-apple-darwin10 -### -S %s 2> %t.log \ -// RUN: -arch armv7 -msoft-float -// RUN: FileCheck -check-prefix=ARMV7_SOFTFLOAT %s < %t.log +// RUN: %clang -target x86_64-apple-darwin10 -### -S %s -arch armv7 \ +// RUN: -msoft-float 2>&1 | FileCheck -check-prefix=ARMV7_SOFTFLOAT %s // ARMV7_SOFTFLOAT: clang // ARMV7_SOFTFLOAT: "-cc1" // ARMV7_SOFTFLOAT: "-msoft-float" @@ -35,9 +39,8 @@ // ARMV7_SOFTFLOAT: "-neon" // ARMV7_SOFTFLOAT: "-x" "c" -// RUN: %clang -target x86_64-apple-darwin10 -### -S %s 2> %t.log \ -// RUN: -arch armv7 -mhard-float -// RUN: FileCheck -check-prefix=ARMV7_HARDFLOAT %s < %t.log +// RUN: %clang -target x86_64-apple-darwin10 -### -S %s -arch armv7 \ +// RUN: -mhard-float 2>&1 | FileCheck -check-prefix=ARMV7_HARDFLOAT %s // ARMV7_HARDFLOAT: clang // ARMV7_HARDFLOAT: "-cc1" // ARMV7_HARDFLOAT-NOT: "-msoft-float" @@ -45,32 +48,60 @@ // ARMV7_HARDFLOAT-NOT: "-msoft-float" // ARMV7_HARDFLOAT: "-x" "c" -// RUN: %clang -target arm-linux -### -S %s 2> %t.log \ -// RUN: -march=armv5e -// RUN: FileCheck -check-prefix=ARMV5E %s < %t.log +// RUN: %clang -target arm-linux -### -S %s -march=armv5e 2>&1 | \ +// RUN: FileCheck -check-prefix=ARMV5E %s // ARMV5E: clang // ARMV5E: "-cc1" // ARMV5E: "-target-cpu" "arm1022e" -// RUN: %clang -ccc-clang-archs powerpc64 \ -// RUN: -target powerpc64-unknown-linux-gnu -### -S %s 2> %t.log \ -// RUN: -mcpu=G5 -// RUN: FileCheck -check-prefix=PPCG5 %s < %t.log +// RUN: %clang -target powerpc64-unknown-linux-gnu \ +// RUN: -### -S %s -mcpu=G5 2>&1 | FileCheck -check-prefix=PPCG5 %s // PPCG5: clang // PPCG5: "-cc1" // PPCG5: "-target-cpu" "g5" -// RUN: %clang -ccc-clang-archs powerpc64 \ -// RUN: -target powerpc64-unknown-linux-gnu -### -S %s 2> %t.log \ -// RUN: -mcpu=power7 -// RUN: FileCheck -check-prefix=PPCPWR7 %s < %t.log +// RUN: %clang -target powerpc64-unknown-linux-gnu \ +// RUN: -### -S %s -mcpu=power7 2>&1 | FileCheck -check-prefix=PPCPWR7 %s // PPCPWR7: clang // PPCPWR7: "-cc1" // PPCPWR7: "-target-cpu" "pwr7" -// RUN: %clang -ccc-clang-archs powerpc64 \ -// RUN: -target powerpc64-unknown-linux-gnu -### -S %s 2> %t.log -// RUN: FileCheck -check-prefix=PPC64NS %s < %t.log +// RUN: %clang -target powerpc64-unknown-linux-gnu \ +// RUN: -### -S %s 2>&1 | FileCheck -check-prefix=PPC64NS %s // PPC64NS: clang // PPC64NS: "-cc1" // PPC64NS: "-target-cpu" "ppc64" + +// RUN: %clang -target powerpc-fsl-linux -### -S %s \ +// RUN: -mcpu=e500mc 2>&1 | FileCheck -check-prefix=PPCE500MC %s +// PPCE500MC: clang +// PPCE500MC: "-cc1" +// PPCE500MC: "-target-cpu" "e500mc" + +// RUN: %clang -target powerpc64-fsl-linux -### -S \ +// RUN: %s -mcpu=e5500 2>&1 | FileCheck -check-prefix=PPCE5500 %s +// PPCE5500: clang +// PPCE5500: "-cc1" +// PPCE5500: "-target-cpu" "e5500" + +// RUN: %clang -target amd64-unknown-openbsd5.2 -### -S %s 2>&1 | \ +// RUN: FileCheck -check-prefix=AMD64 %s +// AMD64: clang +// AMD64: "-cc1" +// AMD64: "-triple" +// AMD64: "amd64-unknown-openbsd5.2" +// AMD64: "-munwind-tables" + +// RUN: %clang -target amd64--mingw32 -### -S %s 2>&1 | \ +// RUN: FileCheck -check-prefix=AMD64-MINGW %s +// AMD64-MINGW: clang +// AMD64-MINGW: "-cc1" +// AMD64-MINGW: "-triple" +// AMD64-MINGW: "amd64--mingw32" +// AMD64-MINGW: "-munwind-tables" + +// RUN: %clang -target i386-linux-android -### -S %s 2>&1 \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=ANDROID-X86 %s +// ANDROID-X86: clang +// ANDROID-X86: "-target-cpu" "core2" diff --git a/test/Driver/cpath.c b/test/Driver/cpath.c index bd7c8d0..ea6ba49 100644 --- a/test/Driver/cpath.c +++ b/test/Driver/cpath.c @@ -1,8 +1,8 @@ // RUN: mkdir -p %T/test1 %T/test2 %T/test3 // RUN: env "CPATH=%T/test1%{pathsep}%T/test2" %clang -x c -E -v %s 2>&1 | FileCheck %s -check-prefix=CPATH -// CPATH: -I {{.*}}/test1 -// CPATH: -I {{.*}}/test2 +// CPATH: -I{{.*}}/test1 +// CPATH: -I{{.*}}/test2 // CPATH: search starts here // CPATH: test1 // CPATH: test2 diff --git a/test/Driver/crash-report.c b/test/Driver/crash-report.c index 7adaf42..bfcd573 100644 --- a/test/Driver/crash-report.c +++ b/test/Driver/crash-report.c @@ -1,6 +1,6 @@ // RUN: rm -rf %t // RUN: mkdir %t -// RUN: env TMPDIR=%t TEMP=%t TMP=%t %clang -fsyntax-only %s \ +// RUN: env TMPDIR=%t TEMP=%t TMP=%t RC_DEBUG_OPTIONS=1 %clang -fsyntax-only %s \ // RUN: -F/tmp/ -I /tmp/ -idirafter /tmp/ -iquote /tmp/ -isystem /tmp/ \ // RUN: -iprefix /the/prefix -iwithprefix /tmp -iwithprefixbefore /tmp/ \ // RUN: -internal-isystem /tmp/ -internal-externc-isystem /tmp/ \ @@ -25,3 +25,4 @@ FOO // CHECKSH-NOT: -iwithprefixbefore /tmp/ // CHECKSH-NOT: -internal-isystem /tmp/ // CHECKSH-NOT: -internal-externc-isystem /tmp/ +// CHECKSH-NOT: -dwarf-debug-flags diff --git a/test/Driver/darwin-arch-default.c b/test/Driver/darwin-arch-default.c new file mode 100644 index 0000000..60bf61d --- /dev/null +++ b/test/Driver/darwin-arch-default.c @@ -0,0 +1,7 @@ +// Check that the name of the arch we bind is "ppc" not "powerpc". +// +// RUN: %clang -target powerpc-apple-darwin8 -### \ +// RUN: -ccc-print-phases %s 2> %t +// RUN: FileCheck --check-prefix=CHECK-POWERPC < %t %s +// +// CHECK-POWERPC: bind-arch, "ppc" diff --git a/test/Driver/darwin-asan-nofortify.c b/test/Driver/darwin-asan-nofortify.c new file mode 100644 index 0000000..7f325e0 --- /dev/null +++ b/test/Driver/darwin-asan-nofortify.c @@ -0,0 +1,6 @@ +// Make sure AddressSanitizer disables _FORTIFY_SOURCE on Darwin. + +// RUN: %clang -faddress-sanitizer %s -E -dM -target x86_64-darwin - | FileCheck %s +// RUN: %clang -fsanitize=address %s -E -dM -target x86_64-darwin - | FileCheck %s + +// CHECK: #define _FORTIFY_SOURCE 0 diff --git a/test/Driver/darwin-cc.c b/test/Driver/darwin-cc.c deleted file mode 100644 index 85cdf12..0000000 --- a/test/Driver/darwin-cc.c +++ /dev/null @@ -1,4 +0,0 @@ -// RUN: %clang -ccc-no-clang -target i386-apple-darwin10 -m32 -### -MD -g -fast -Q -dA -mkernel -ansi -aFOO -S -o /tmp/OUTPUTNAME -g0 -gfull -O2 -Werror -pedantic -Wmost -w -std=c99 -trigraphs -v -pg -fFOO -undef -Qn --param a=b -fmudflap -coverage -save-temps -nostdinc -I ARG0 -F ARG1 -I ARG2 -P -MF ARG3 -MG -MP -remap -g3 -H -D ARG4 -U ARG5 -A ARG6 -D ARG7 -U ARG8 -A ARG9 -include ARG10 -pthread %s 2> %t.log -// RUN: FileCheck %s < %t.log -// CHECK: {{ ".*cc1.*" "-E" "-nostdinc" "-v" "-I" "ARG0" "-FARG1" "-I" "ARG2" "-P" "-MD" "[^"]*/OUTPUTNAME.d" "-MF" "ARG3" "-MG" "-MP" "-MQ" "[^"]*/OUTPUTNAME" "-remap" "-dD" "-H" "-D__STATIC__" "-D_REENTRANT" "-D" "ARG4" "-U" "ARG5" "-A" "ARG6" "-D" "ARG7" "-U" "ARG8" "-A" "ARG9" "-include" "ARG10" ".*darwin-cc.c" "-D_MUDFLAP" "-include" "mf-runtime.h" "-m32" "-mkernel" "-mtune=core2" "-mmacosx-version-min=10.6.0" "-ansi" "-std=c99" "-trigraphs" "-Werror" "-pedantic" "-Wmost" "-w" "-fast" "-fno-eliminate-unused-debug-symbols" "-fFOO" "-fmudflap" "-O2" "-undef" "-fpch-preprocess" "-o" ".*darwin-cc.i"}} -// CHECK: {{ ".*cc1.*" "-fpreprocessed" ".*darwin-cc.i" "-O3" "-dumpbase" ".*darwin-cc.c" "-dA" "-m32" "-mkernel" "-mtune=core2" "-mmacosx-version-min=10.6.0" "-ansi" "-aFOO" "-auxbase-strip" "[^"]*/OUTPUTNAME" "-g" "-g0" "-g" "-g3" "-O2" "-Werror" "-pedantic" "-Wmost" "-w" "-ansi" "-std=c99" "-trigraphs" "-version" "-p" "-fast" "-fno-eliminate-unused-debug-symbols" "-fFOO" "-fmudflap" "-undef" "-fno-ident" "-o" "[^"]*/OUTPUTNAME" "--param" "a=b" "-fno-builtin" "-fno-merge-constants" "-fprofile-arcs" "-ftest-coverage"}} diff --git a/test/Driver/darwin-ld.c b/test/Driver/darwin-ld.c index 4cda37f..cd511e0 100644 --- a/test/Driver/darwin-ld.c +++ b/test/Driver/darwin-ld.c @@ -80,7 +80,7 @@ // LINK_OLDER_NODEMANGLE-NOT: "-demangle" // LINK_OLDER_NODEMANGLE: "-lSystem" -// RUN: %clang -target x86_64-apple-darwin10 -### %t.o \ +// RUN: %clang -target x86_64-apple-darwin10 -### %s \ // RUN: -mlinker-version=117 -flto 2> %t.log // RUN: cat %t.log // RUN: FileCheck -check-prefix=LINK_OBJECT_LTO_PATH %s < %t.log @@ -122,6 +122,10 @@ // RUN: FileCheck -check-prefix=LINK_NO_CRT1 %s < %t.log // LINK_NO_CRT1-NOT: crt +// RUN: %clang -target armv7-apple-ios6.0 -miphoneos-version-min=6.0 -### %t.o 2> %t.log +// RUN: FileCheck -check-prefix=LINK_NO_IOS_CRT1 %s < %t.log +// LINK_NO_IOS_CRT1-NOT: crt + // RUN: %clang -target i386-apple-darwin12 -pg -### %t.o 2> %t.log // RUN: FileCheck -check-prefix=LINK_PG %s < %t.log // LINK_PG: -lgcrt1.o diff --git a/test/Driver/darwin-sdkroot.c b/test/Driver/darwin-sdkroot.c new file mode 100644 index 0000000..5abf081 --- /dev/null +++ b/test/Driver/darwin-sdkroot.c @@ -0,0 +1,22 @@ +// Check that SDKROOT is used to define the default for -isysroot on Darwin. +// +// RUN: rm -rf %t.tmpdir +// RUN: mkdir -p %t.tmpdir +// RUN: env SDKROOT=%t.tmpdir %clang -target x86_64-apple-darwin10 \ +// RUN: -c %s -### 2> %t.log +// RUN: FileCheck --check-prefix=CHECK-BASIC < %t.log %s +// +// CHECK-BASIC: clang +// CHECK-BASIC: "-cc1" +// CHECK-BASIC: "-isysroot" "{{.*tmpdir}}" + +// Check that we don't use SDKROOT as the default if it is not a valid path. + +// RUN: rm -rf %t.nonpath +// RUN: env SDKROOT=%t.nonpath %clang -target x86_64-apple-darwin10 \ +// RUN: -c %s -### 2> %t.log +// RUN: FileCheck --check-prefix=CHECK-NONPATH < %t.log %s +// +// CHECK-NONPATH: clang +// CHECK-NONPATH: "-cc1" +// CHECK-NONPATH-NOT: "-isysroot" diff --git a/test/Driver/fast-math.c b/test/Driver/fast-math.c index 8426f09..17bf6ed 100644 --- a/test/Driver/fast-math.c +++ b/test/Driver/fast-math.c @@ -12,16 +12,46 @@ // CHECK-NO-INFS: "-cc1" // CHECK-NO-INFS: "-menable-no-infs" // +// RUN: %clang -### -fno-fast-math -fno-honor-infinities -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-FAST-MATH-NO-INFS %s +// CHECK-NO-FAST-MATH-NO-INFS: "-cc1" +// CHECK-NO-FAST-MATH-NO-INFS: "-menable-no-infs" +// +// RUN: %clang -### -fno-honor-infinities -fno-fast-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-INFS-NO-FAST-MATH %s +// CHECK-NO-INFS-NO-FAST-MATH: "-cc1" +// CHECK-NO-INFS-NO-FAST-MATH-NOT: "-menable-no-infs" +// // RUN: %clang -### -fno-honor-nans -c %s 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-NO-NANS %s // CHECK-NO-NANS: "-cc1" // CHECK-NO-NANS: "-menable-no-nans" // +// RUN: %clang -### -fno-fast-math -fno-honor-nans -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-FAST-MATH-NO-NANS %s +// CHECK-NO-FAST-MATH-NO-NANS: "-cc1" +// CHECK-NO-FAST-MATH-NO-NANS: "-menable-no-nans" +// +// RUN: %clang -### -fno-honor-nans -fno-fast-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-NANS-NO-FAST-MATH %s +// CHECK-NO-NANS-NO-FAST-MATH: "-cc1" +// CHECK-NO-NANS-NO-FAST-MATH-NOT: "-menable-no-nans" +// // RUN: %clang -### -fmath-errno -c %s 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-MATH-ERRNO %s // CHECK-MATH-ERRNO: "-cc1" // CHECK-MATH-ERRNO: "-fmath-errno" // +// RUN: %clang -### -fno-fast-math -fmath-errno -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-FAST-MATH-MATH-ERRNO %s +// CHECK-NO-FAST-MATH-MATH-ERRNO: "-cc1" +// CHECK-NO-FAST-MATH-MATH-ERRNO: "-fmath-errno" +// +// RUN: %clang -### -fmath-errno -fno-fast-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-MATH-ERRNO-NO-FAST-MATH %s +// CHECK-MATH-ERRNO-NO-FAST-MATH: "-cc1" +// CHECK-MATH-ERRNO-NO-FAST-MATH-NOT: "-fmath-errno" +// // RUN: %clang -### -fmath-errno -fno-math-errno -c %s 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-NO-MATH-ERRNO %s // RUN: %clang -### -target i686-apple-darwin -c %s 2>&1 \ @@ -43,6 +73,18 @@ // CHECK-UNSAFE-MATH: "-cc1" // CHECK-UNSAFE-MATH: "-menable-unsafe-fp-math" // +// RUN: %clang -### -fno-fast-math -fno-math-errno -fassociative-math -freciprocal-math \ +// RUN: -fno-signed-zeros -fno-trapping-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-FAST-MATH-UNSAFE-MATH %s +// CHECK-NO-FAST-MATH-UNSAFE-MATH: "-cc1" +// CHECK-NO-FAST-MATH-UNSAFE-MATH: "-menable-unsafe-fp-math" +// +// RUN: %clang -### -fno-fast-math -fno-math-errno -fassociative-math -freciprocal-math \ +// RUN: -fno-fast-math -fno-signed-zeros -fno-trapping-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-UNSAFE-MATH-NO-FAST-MATH %s +// CHECK-UNSAFE-MATH-NO-FAST-MATH: "-cc1" +// CHECK-UNSAFE-MATH-NO-FAST-MATH-NOT: "-menable-unsafe-fp-math" +// // Check that various umbrella flags also enable these frontend options. // RUN: %clang -### -ffast-math -c %s 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-NO-INFS %s @@ -63,12 +105,19 @@ // impact remains even if every optimization is disabled. // RUN: %clang -### -ffast-math -c %s 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-FAST-MATH %s +// RUN: %clang -### -fno-fast-math -ffast-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-FAST-MATH %s // RUN: %clang -### -ffast-math -fno-finite-math-only \ // RUN: -fno-unsafe-math-optimizations -fmath-errno -c %s 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-FAST-MATH %s // CHECK-FAST-MATH: "-cc1" // CHECK-FAST-MATH: "-ffast-math" // +// RUN: %clang -### -ffast-math -fno-fast-math -c %s 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NO-FAST-MATH %s +// CHECK-NO-FAST-MATH: "-cc1" +// CHECK-NO-FAST-MATH-NOT: "-ffast-math" +// // Check various means of disabling these flags, including disabling them after // they've been enabled via an umbrella flag. // RUN: %clang -### -fno-honor-infinities -fhonor-infinities -c %s 2>&1 \ diff --git a/test/Driver/freebsd-mips-as.c b/test/Driver/freebsd-mips-as.c new file mode 100644 index 0000000..54ff187 --- /dev/null +++ b/test/Driver/freebsd-mips-as.c @@ -0,0 +1,81 @@ +// Check passing options to the assembler for MIPS targets. +// +// RUN: %clang -target mips-unknown-freebsd -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS32-EB-AS %s +// MIPS32-EB-AS: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// MIPS32-EB-AS-NOT: "-KPIC" +// +// RUN: %clang -target mips-unknown-freebsd -### \ +// RUN: -no-integrated-as -fPIC -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS32-EB-PIC %s +// MIPS32-EB-PIC: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// MIPS32-EB-PIC: "-KPIC" +// +// RUN: %clang -target mips-unknown-freebsd -### \ +// RUN: -no-integrated-as -fpic -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS32-EB-PIC-SMALL %s +// MIPS32-EB-PIC-SMALL: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// MIPS32-EB-PIC-SMALL: "-KPIC" +// +// RUN: %clang -target mips-unknown-freebsd -### \ +// RUN: -no-integrated-as -fPIE -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS32-EB-PIE %s +// MIPS32-EB-PIE: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// MIPS32-EB-PIE: "-KPIC" +// +// RUN: %clang -target mips-unknown-freebsd -### \ +// RUN: -no-integrated-as -fpie -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS32-EB-PIE-SMALL %s +// MIPS32-EB-PIE-SMALL: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// MIPS32-EB-PIE-SMALL: "-KPIC" +// +// RUN: %clang -target mipsel-unknown-freebsd -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS32-EL-AS %s +// MIPS32-EL-AS: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EL" +// +// RUN: %clang -target mips64-unknown-freebsd -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS64-EB-AS %s +// MIPS64-EB-AS: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EB" +// +// RUN: %clang -target mips64el-unknown-freebsd -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS64-EL-AS %s +// MIPS64-EL-AS: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EL" +// +// RUN: %clang -target mips-unknown-freebsd -mabi=eabi -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-EABI %s +// MIPS-EABI: as{{(.exe)?}}" "-march" "mips32" "-mabi" "eabi" "-EB" +// +// RUN: %clang -target mips64-unknown-freebsd -mabi=n32 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-N32 %s +// MIPS-N32: as{{(.exe)?}}" "-march" "mips64" "-mabi" "n32" "-EB" +// +// RUN: %clang -target mips-linux-freebsd -march=mips32r2 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-32R2 %s +// MIPS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-EB" +// +// RUN: %clang -target mips-unknown-freebsd -mips32 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32 %s +// MIPS-ALIAS-32: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// +// RUN: %clang -target mips-unknown-freebsd -mips32r2 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R2 %s +// MIPS-ALIAS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-EB" +// +// RUN: %clang -target mips-unknown-freebsd -mips64 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64 %s +// MIPS-ALIAS-64: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EB" +// +// RUN: %clang -target mips-unknown-freebsd -mips64r2 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R2 %s +// MIPS-ALIAS-64R2: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EB" diff --git a/test/Driver/freebsd.c b/test/Driver/freebsd.c index 642c60c..db53d4d 100644 --- a/test/Driver/freebsd.c +++ b/test/Driver/freebsd.c @@ -1,5 +1,5 @@ -// REQUIRES: ppc32-registered-target,ppc64-registered-target -// RUN: %clang -ccc-clang-archs powerpc -no-canonical-prefixes \ +// REQUIRES: ppc32-registered-target,ppc64-registered-target,mips-registered-target +// RUN: %clang -no-canonical-prefixes \ // RUN: -target powerpc-pc-freebsd8 %s \ // RUN: --sysroot=%S/Inputs/basic_freebsd_tree -### 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-PPC %s @@ -7,7 +7,7 @@ // CHECK-PPC: ld{{.*}}" "--sysroot=[[SYSROOT:[^"]+]]" // CHECK-PPC: "--eh-frame-hdr" "-dynamic-linker" "{{.*}}ld-elf{{.*}}" "-o" "a.out" "{{.*}}crt1.o" "{{.*}}crti.o" "{{.*}}crtbegin.o" "-L[[SYSROOT]]/usr/lib" "{{.*}}.o" "-lgcc" "--as-needed" "-lgcc_s" "--no-as-needed" "-lc" "-lgcc" "--as-needed" "-lgcc_s" "--no-as-needed" "{{.*}}crtend.o" "{{.*}}crtn.o" // -// RUN: %clang -ccc-clang-archs powerpc64 -no-canonical-prefixes \ +// RUN: %clang -no-canonical-prefixes \ // RUN: -target powerpc64-pc-freebsd8 %s \ // RUN: --sysroot=%S/Inputs/basic_freebsd64_tree -### 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-PPC64 %s @@ -43,4 +43,56 @@ // CHECK-LDFLAGS8: --enable-new-dtags // CHECK-LDFLAGS9: --hash-style=both // CHECK-LDFLAGS9: --enable-new-dtags +// +// Check that we do not pass --hash-style=gnu and --hash-style=both to linker +// and provide correct path to the dynamic linker for MIPS platforms. +// Also verify that we tell the assembler to target the right ISA and ABI. +// RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: -target mips-unknown-freebsd10.0 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS %s +// CHECK-MIPS: "{{[^" ]*}}ld{{[^" ]*}}" +// CHECK-MIPS: "-dynamic-linker" "{{.*}}/libexec/ld-elf.so.1" +// CHECK-MIPS-NOT: "--hash-style={{gnu|both}}" +// RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-unknown-freebsd10.0 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPSEL %s +// CHECK-MIPSEL: "{{[^" ]*}}ld{{[^" ]*}}" +// CHECK-MIPSEL: "-dynamic-linker" "{{.*}}/libexec/ld-elf.so.1" +// CHECK-MIPSEL-NOT: "--hash-style={{gnu|both}}" +// RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: -target mips64-unknown-freebsd10.0 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS64 %s +// CHECK-MIPS64: "{{[^" ]*}}ld{{[^" ]*}}" +// CHECK-MIPS64: "-dynamic-linker" "{{.*}}/libexec/ld-elf.so.1" +// CHECK-MIPS64-NOT: "--hash-style={{gnu|both}}" +// RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: -target mips64el-unknown-freebsd10.0 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS64EL %s +// CHECK-MIPS64EL: "{{[^" ]*}}ld{{[^" ]*}}" +// CHECK-MIPS64EL: "-dynamic-linker" "{{.*}}/libexec/ld-elf.so.1" +// CHECK-MIPS64EL-NOT: "--hash-style={{gnu|both}}" + +// RUN: %clang -no-canonical-prefixes -target x86_64-pc-freebsd8 -static %s \ +// RUN: --sysroot=%S/Inputs/multiarch_freebsd64_tree -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-STATIC %s +// CHECK-STATIC: crt1.o +// CHECK-STATIC: crtbeginT.o + +// RUN: %clang -no-canonical-prefixes -target x86_64-pc-freebsd8 -shared %s \ +// RUN: --sysroot=%S/Inputs/multiarch_freebsd64_tree -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-SHARED %s +// CHECK-SHARED: crti.o +// CHECK-SHARED: crtbeginS.o + +// RUN: %clang -no-canonical-prefixes -target x86_64-pc-freebsd8 -pie %s \ +// RUN: --sysroot=%S/Inputs/multiarch_freebsd64_tree -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-PIE %s +// CHECK-PIE: pie +// CHECK-PIE: Scrt1.o +// CHECK-PIE: crtbeginS.o +// RUN: %clang -no-canonical-prefixes -target x86_64-pc-freebsd8 %s \ +// RUN: --sysroot=%S/Inputs/multiarch_freebsd64_tree -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NORMAL %s +// CHECK-NORMAL: crt1.o +// CHECK-NORMAL: crtbegin.o diff --git a/test/Driver/fsanitize.c b/test/Driver/fsanitize.c new file mode 100644 index 0000000..9f7cd46 --- /dev/null +++ b/test/Driver/fsanitize.c @@ -0,0 +1,23 @@ +// RUN: %clang -target x86_64-linux-gnu -fcatch-undefined-behavior %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED +// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-UNDEFINED +// CHECK-UNDEFINED: "-fsanitize={{((signed-integer-overflow|divide-by-zero|shift|unreachable|return|vla-bound|alignment|null|vptr|object-size|float-cast-overflow),?){11}"}} + +// RUN: %clang -target x86_64-linux-gnu -fsanitize=thread,undefined -fno-thread-sanitizer -fno-sanitize=float-cast-overflow,vptr %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-PARTIAL-UNDEFINED +// CHECK-PARTIAL-UNDEFINED: "-fsanitize={{((signed-integer-overflow|divide-by-zero|shift|unreachable|return|vla-bound|alignment|null|object-size),?){9}"}} + +// RUN: %clang -target x86_64-linux-gnu -fsanitize=vptr -fno-rtti %s -c -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-VPTR-NO-RTTI +// RUN: %clang -target x86_64-linux-gnu -fsanitize=undefined -fno-rtti %s -c -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-VPTR-NO-RTTI +// CHECK-VPTR-NO-RTTI: '-fsanitize=vptr' not allowed with '-fno-rtti' + +// RUN: %clang -target x86_64-linux-gnu -fsanitize=address,thread -fno-rtti %s -c -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-SANA-SANT +// CHECK-SANA-SANT: '-fsanitize=address' not allowed with '-fsanitize=thread' + +// RUN: %clang -target x86_64-linux-gnu -faddress-sanitizer -fthread-sanitizer -fno-rtti %s -c -o /dev/null 2>&1 | FileCheck %s --check-prefix=CHECK-ASAN-TSAN +// CHECK-ASAN-TSAN: '-faddress-sanitizer' not allowed with '-fthread-sanitizer' + +// RUN: %clang -target x86_64-linux-gnu -fcatch-undefined-behavior -fthread-sanitizer -fno-thread-sanitizer -faddress-sanitizer -fno-address-sanitizer -c -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=CHECK-DEPRECATED +// CHECK-DEPRECATED: argument '-fcatch-undefined-behavior' is deprecated, use '-fsanitize=undefined' instead +// CHECK-DEPRECATED: argument '-fthread-sanitizer' is deprecated, use '-fsanitize=thread' instead +// CHECK-DEPRECATED: argument '-fno-thread-sanitizer' is deprecated, use '-fno-sanitize=thread' instead +// CHECK-DEPRECATED: argument '-faddress-sanitizer' is deprecated, use '-fsanitize=address' instead +// CHECK-DEPRECATED: argument '-fno-address-sanitizer' is deprecated, use '-fno-sanitize=address' instead diff --git a/test/Driver/gcc_forward.c b/test/Driver/gcc_forward.c index 77f401b..8eead21 100644 --- a/test/Driver/gcc_forward.c +++ b/test/Driver/gcc_forward.c @@ -1,7 +1,7 @@ // Check that we don't try to forward -Xclang or -mlinker-version to GCC. // // RUN: %clang -target powerpc-unknown-unknown \ -// RUN: -ccc-clang-archs i386 -c %s \ +// RUN: -c %s \ // RUN: -Xclang foo-bar \ // RUN: -mlinker-version=10 -### 2> %t // RUN: FileCheck < %t %s diff --git a/test/Driver/hello.c b/test/Driver/hello.c deleted file mode 100644 index c2260e5..0000000 --- a/test/Driver/hello.c +++ /dev/null @@ -1,18 +0,0 @@ -// RUN: %clang -ccc-echo -o %t.exe %s 2> %t.log - -// Make sure we used clang. -// RUN: grep 'clang\(-[0-9.]\+\)\?\(\.[Ee][Xx][Ee]\)\?" -cc1 .*hello.c' %t.log - -// RUN: %t.exe > %t.out -// RUN: grep "I'm a little driver, short and stout." %t.out - -// FIXME: We don't have a usable assembler on Windows, so we can't build real -// apps yet. -// XFAIL: win32 - -#include - -int main() { - printf("I'm a little driver, short and stout."); - return 0; -} diff --git a/test/Driver/immediate-options.c b/test/Driver/immediate-options.c index 5a3ec87..2b54ecf7 100644 --- a/test/Driver/immediate-options.c +++ b/test/Driver/immediate-options.c @@ -1,4 +1,6 @@ -// RUN: %clang --help -// RUN: %clang --help-hidden +// RUN: %clang --help | grep isystem +// RUN: %clang --help | not grep ast-dump +// RUN: %clang --help | not grep ccc-cxx +// RUN: %clang --help-hidden | grep ccc-cxx // RUN: %clang -dumpversion // RUN: %clang -print-search-dirs diff --git a/test/Driver/ios-simulator-arcruntime.c b/test/Driver/ios-simulator-arcruntime.c index 33d3492..605df93 100644 --- a/test/Driver/ios-simulator-arcruntime.c +++ b/test/Driver/ios-simulator-arcruntime.c @@ -1,8 +1,8 @@ -// RUN: %clang -### -x objective-c -target i386-apple-darwin10 -arch i386 -mmacosx-version-min=10.6 -D__IPHONE_OS_VERSION_MIN_REQUIRED=40201 -fobjc-arc -fsyntax-only %s 2>&1 | FileCheck -check-prefix=CHECK-OPTIONS1 %s -// RUN: %clang -### -x objective-c -target i386-apple-darwin10 -arch i386 -D__IPHONE_OS_VERSION_MIN_REQUIRED=50000 -fobjc-arc -fsyntax-only %s 2>&1 | FileCheck -check-prefix=CHECK-OPTIONS2 %s +// RUN: %clang -### -x objective-c -target i386-apple-darwin10 -arch i386 -mios-simulator-version-min=4.2.1 -fobjc-arc -fsyntax-only %s 2>&1 | FileCheck -check-prefix=CHECK-OPTIONS1 %s +// RUN: %clang -### -x objective-c -target i386-apple-darwin10 -arch i386 -mios-simulator-version-min=5.0.0 -fobjc-arc -fsyntax-only %s 2>&1 | FileCheck -check-prefix=CHECK-OPTIONS2 %s // -// CHECK-OPTIONS1: i386-apple-macosx10.6.0 +// CHECK-OPTIONS1: i386-apple-ios4.2.1 // CHECK-OPTIONS1: -fobjc-runtime=ios-4.2.1 -// CHECK-OPTIONS2: i386-apple-macosx10.6.0 +// CHECK-OPTIONS2: i386-apple-ios5.0.0 // CHECK-OPTIONS2: -fobjc-runtime=ios-5.0.0 diff --git a/test/Driver/le32-unknown-nacl.cpp b/test/Driver/le32-unknown-nacl.cpp index f68b220..61388c5 100644 --- a/test/Driver/le32-unknown-nacl.cpp +++ b/test/Driver/le32-unknown-nacl.cpp @@ -1,6 +1,6 @@ -// RUN: %clang -target le32-unknown-nacl -ccc-clang-archs le32 -ccc-echo %s -emit-llvm-only -c 2>&1 | FileCheck %s -check-prefix=ECHO -// RUN: %clang -target le32-unknown-nacl -ccc-clang-archs le32 %s -emit-llvm -S -c -o - | FileCheck %s -// RUN: %clang -target le32-unknown-nacl -ccc-clang-archs le32 %s -emit-llvm -S -c -pthread -o - | FileCheck %s -check-prefix=THREADS +// RUN: %clang -target le32-unknown-nacl -ccc-echo %s -emit-llvm-only -c 2>&1 | FileCheck %s -check-prefix=ECHO +// RUN: %clang -target le32-unknown-nacl %s -emit-llvm -S -c -o - | FileCheck %s +// RUN: %clang -target le32-unknown-nacl %s -emit-llvm -S -c -pthread -o - | FileCheck %s -check-prefix=THREADS // ECHO: {{.*}} -cc1 {{.*}}le32-unknown-nacl.c diff --git a/test/Driver/linker-opts.c b/test/Driver/linker-opts.c index 85e180c..2a96a17 100644 --- a/test/Driver/linker-opts.c +++ b/test/Driver/linker-opts.c @@ -1,5 +1,5 @@ // RUN: env LIBRARY_PATH=%T/test1 %clang -x c %s -### 2>&1 | FileCheck %s -// CHECK: "-L" "{{.*}}/test1" +// CHECK: "-L{{.*}}/test1" // GCC driver is used as linker on cygming. It should be aware of LIBRARY_PATH. // XFAIL: cygwin,mingw32,win32 diff --git a/test/Driver/linux-header-search.cpp b/test/Driver/linux-header-search.cpp index ea82660..065bd34 100644 --- a/test/Driver/linux-header-search.cpp +++ b/test/Driver/linux-header-search.cpp @@ -45,7 +45,7 @@ // CHECK-DEBIAN-X86-64: "-internal-externc-isystem" "[[SYSROOT]]/usr/include/x86_64-linux-gnu" // CHECK-DEBIAN-X86-64: "-internal-externc-isystem" "[[SYSROOT]]/include" // CHECK-DEBIAN-X86-64: "-internal-externc-isystem" "[[SYSROOT]]/usr/include" -// RUN: %clang -ccc-clang-archs powerpc -no-canonical-prefixes %s -### -fsyntax-only 2>&1 \ +// RUN: %clang -no-canonical-prefixes %s -### -fsyntax-only 2>&1 \ // RUN: -target powerpc-linux-gnu \ // RUN: --sysroot=%S/Inputs/debian_multiarch_tree \ // RUN: | FileCheck --check-prefix=CHECK-DEBIAN-PPC %s @@ -59,7 +59,7 @@ // CHECK-DEBIAN-PPC: "-internal-externc-isystem" "[[SYSROOT]]/usr/include/powerpc-linux-gnu" // CHECK-DEBIAN-PPC: "-internal-externc-isystem" "[[SYSROOT]]/include" // CHECK-DEBIAN-PPC: "-internal-externc-isystem" "[[SYSROOT]]/usr/include" -// RUN: %clang -ccc-clang-archs powerpc64 -no-canonical-prefixes %s -### -fsyntax-only 2>&1 \ +// RUN: %clang -no-canonical-prefixes %s -### -fsyntax-only 2>&1 \ // RUN: -target powerpc64-linux-gnu \ // RUN: --sysroot=%S/Inputs/debian_multiarch_tree \ // RUN: | FileCheck --check-prefix=CHECK-DEBIAN-PPC64 %s diff --git a/test/Driver/linux-ld.c b/test/Driver/linux-ld.c index a6831b6..7237029 100644 --- a/test/Driver/linux-ld.c +++ b/test/Driver/linux-ld.c @@ -238,33 +238,47 @@ // and provide correct path to the dynamic linker and emulation mode when build // for MIPS platforms. // RUN: %clang %s -### -o %t.o 2>&1 \ -// RUN: -target mips-linux-gnu -ccc-clang-archs mips \ +// RUN: -target mips-linux-gnu \ // RUN: | FileCheck --check-prefix=CHECK-MIPS %s // CHECK-MIPS: "{{.*}}ld{{(.exe)?}}" // CHECK-MIPS: "-m" "elf32btsmip" // CHECK-MIPS: "-dynamic-linker" "{{.*}}/lib/ld.so.1" // CHECK-MIPS-NOT: "--hash-style={{gnu|both}}" // RUN: %clang %s -### -o %t.o 2>&1 \ -// RUN: -target mipsel-linux-gnu -ccc-clang-archs mipsel \ +// RUN: -target mipsel-linux-gnu \ // RUN: | FileCheck --check-prefix=CHECK-MIPSEL %s // CHECK-MIPSEL: "{{.*}}ld{{(.exe)?}}" // CHECK-MIPSEL: "-m" "elf32ltsmip" // CHECK-MIPSEL: "-dynamic-linker" "{{.*}}/lib/ld.so.1" // CHECK-MIPSEL-NOT: "--hash-style={{gnu|both}}" // RUN: %clang %s -### -o %t.o 2>&1 \ -// RUN: -target mips64-linux-gnu -ccc-clang-archs mips64 \ +// RUN: -target mips64-linux-gnu \ // RUN: | FileCheck --check-prefix=CHECK-MIPS64 %s // CHECK-MIPS64: "{{.*}}ld{{(.exe)?}}" // CHECK-MIPS64: "-m" "elf64btsmip" // CHECK-MIPS64: "-dynamic-linker" "{{.*}}/lib64/ld.so.1" // CHECK-MIPS64-NOT: "--hash-style={{gnu|both}}" // RUN: %clang %s -### -o %t.o 2>&1 \ -// RUN: -target mips64el-linux-gnu -ccc-clang-archs mips64el \ +// RUN: -target mips64el-linux-gnu \ // RUN: | FileCheck --check-prefix=CHECK-MIPS64EL %s // CHECK-MIPS64EL: "{{.*}}ld{{(.exe)?}}" // CHECK-MIPS64EL: "-m" "elf64ltsmip" // CHECK-MIPS64EL: "-dynamic-linker" "{{.*}}/lib64/ld.so.1" // CHECK-MIPS64EL-NOT: "--hash-style={{gnu|both}}" +// RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: -target mips64-linux-gnu -mabi=n32 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS64-N32 %s +// CHECK-MIPS64-N32: "{{.*}}ld{{(.exe)?}}" +// CHECK-MIPS64-N32: "-m" "elf32btsmipn32" +// CHECK-MIPS64-N32: "-dynamic-linker" "{{.*}}/lib32/ld.so.1" +// CHECK-MIPS64-N32-NOT: "--hash-style={{gnu|both}}" +// RUN: %clang %s -### -o %t.o 2>&1 \ +// RUN: -target mips64el-linux-gnu -mabi=n32 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS64EL-N32 %s +// CHECK-MIPS64EL-N32: "{{.*}}ld{{(.exe)?}}" +// CHECK-MIPS64EL-N32: "-m" "elf32ltsmipn32" +// CHECK-MIPS64EL-N32: "-dynamic-linker" "{{.*}}/lib32/ld.so.1" +// CHECK-MIPS64EL-N32-NOT: "--hash-style={{gnu|both}}" // // Thoroughly exercise the Debian multiarch environment. // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ @@ -361,11 +375,45 @@ // CHECK-DEBIAN-MIPS64EL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.5/../../.." // CHECK-DEBIAN-MIPS64EL: "-L[[SYSROOT]]/lib" // CHECK-DEBIAN-MIPS64EL: "-L[[SYSROOT]]/usr/lib" +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mips64-linux-gnu -mabi=n32 \ +// RUN: --sysroot=%S/Inputs/debian_multiarch_tree \ +// RUN: | FileCheck --check-prefix=CHECK-DEBIAN-MIPS64-N32 %s +// CHECK-DEBIAN-MIPS64-N32: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-DEBIAN-MIPS64-N32: "{{.*}}/usr/lib/gcc/mips-linux-gnu/4.5/n32/crtbegin.o" +// CHECK-DEBIAN-MIPS64-N32: "-L[[SYSROOT]]/usr/lib/gcc/mips-linux-gnu/4.5/n32" +// CHECK-DEBIAN-MIPS64-N32: "-L[[SYSROOT]]/usr/lib/gcc/mips-linux-gnu/4.5" +// CHECK-DEBIAN-MIPS64-N32: "-L[[SYSROOT]]/usr/lib/gcc/mips-linux-gnu/4.5/../../.." +// CHECK-DEBIAN-MIPS64-N32: "-L[[SYSROOT]]/lib" +// CHECK-DEBIAN-MIPS64-N32: "-L[[SYSROOT]]/usr/lib" +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mips64el-linux-gnu -mabi=n32 \ +// RUN: --sysroot=%S/Inputs/debian_multiarch_tree \ +// RUN: | FileCheck --check-prefix=CHECK-DEBIAN-MIPS64EL-N32 %s +// CHECK-DEBIAN-MIPS64EL-N32: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-DEBIAN-MIPS64EL-N32: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.5/n32/crtbegin.o" +// CHECK-DEBIAN-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.5/n32" +// CHECK-DEBIAN-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.5" +// CHECK-DEBIAN-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.5/../../.." +// CHECK-DEBIAN-MIPS64EL-N32: "-L[[SYSROOT]]/lib" +// CHECK-DEBIAN-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib" // // Test linker invocation on Android. // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ // RUN: -target arm-linux-androideabi \ -// RUN: --sysroot=%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target i386-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ // RUN: | FileCheck --check-prefix=CHECK-ANDROID %s // CHECK-ANDROID: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" // CHECK-ANDROID: "{{.*}}/crtbegin_dynamic.o" @@ -376,10 +424,26 @@ // CHECK-ANDROID: "{{.*}}/crtend_android.o" // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ // RUN: -target arm-linux-androideabi \ -// RUN: --sysroot=%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -shared \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-SO %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -shared \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-SO %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -shared \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-SO %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target i386-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ // RUN: -shared \ // RUN: | FileCheck --check-prefix=CHECK-ANDROID-SO %s // CHECK-ANDROID-SO: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-ANDROID-SO: "-Bsymbolic" // CHECK-ANDROID-SO: "{{.*}}/crtbegin_so.o" // CHECK-ANDROID-SO: "-L[[SYSROOT]]/usr/lib" // CHECK-ANDROID-SO-NOT: "gcc_s" @@ -388,7 +452,22 @@ // CHECK-ANDROID-SO: "{{.*}}/crtend_so.o" // RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ // RUN: -target arm-linux-androideabi \ -// RUN: --sysroot=%S/Inputs/basic_android_tree \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -static \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-STATIC %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -static \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-STATIC %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -static \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-STATIC %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target i386-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ // RUN: -static \ // RUN: | FileCheck --check-prefix=CHECK-ANDROID-STATIC %s // CHECK-ANDROID-STATIC: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" @@ -398,3 +477,120 @@ // CHECK-ANDROID-STATIC: "-lgcc" // CHECK-ANDROID-STATIC-NOT: "gcc_s" // CHECK-ANDROID-STATIC: "{{.*}}/crtend_android.o" +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-androideabi \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -pie \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-PIE %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target arm-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -pie \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-PIE %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -pie \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-PIE %s +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target i386-linux-android \ +// RUN: --sysroot=%S/Inputs/basic_android_tree/sysroot \ +// RUN: -pie \ +// RUN: | FileCheck --check-prefix=CHECK-ANDROID-PIE %s +// CHECK-ANDROID-PIE: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-ANDROID-PIE: "{{.*}}/crtbegin_dynamic.o" +// CHECK-ANDROID-PIE: "-L[[SYSROOT]]/usr/lib" +// CHECK-ANDROID-PIE-NOT: "gcc_s" +// CHECK-ANDROID-PIE: "-lgcc" +// CHECK-ANDROID-PIE-NOT: "gcc_s" +// CHECK-ANDROID-PIE: "{{.*}}/crtend_android.o" +// +// Check linker invocation on Debian 6 MIPS 32/64-bit. +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mipsel-linux-gnu \ +// RUN: --sysroot=%S/Inputs/debian_6_mips_tree \ +// RUN: | FileCheck --check-prefix=CHECK-DEBIAN-ML-MIPSEL %s +// CHECK-DEBIAN-ML-MIPSEL: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-DEBIAN-ML-MIPSEL: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib/crt1.o" +// CHECK-DEBIAN-ML-MIPSEL: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib/crti.o" +// CHECK-DEBIAN-ML-MIPSEL: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/crtbegin.o" +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4" +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib" +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/lib/../lib" +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/usr/lib/../lib" +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/../../.." +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/lib" +// CHECK-DEBIAN-ML-MIPSEL: "-L[[SYSROOT]]/usr/lib" +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mips64el-linux-gnu \ +// RUN: --sysroot=%S/Inputs/debian_6_mips_tree \ +// RUN: | FileCheck --check-prefix=CHECK-DEBIAN-ML-MIPS64EL %s +// CHECK-DEBIAN-ML-MIPS64EL: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-DEBIAN-ML-MIPS64EL: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib64/crt1.o" +// CHECK-DEBIAN-ML-MIPS64EL: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib64/crti.o" +// CHECK-DEBIAN-ML-MIPS64EL: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/64/crtbegin.o" +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/64" +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib64" +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/lib/../lib64" +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/usr/lib/../lib64" +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/../../.." +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/lib" +// CHECK-DEBIAN-ML-MIPS64EL: "-L[[SYSROOT]]/usr/lib" +// +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target mips64el-linux-gnu -mabi=n32 \ +// RUN: --sysroot=%S/Inputs/debian_6_mips_tree \ +// RUN: | FileCheck --check-prefix=CHECK-DEBIAN-ML-MIPS64EL-N32 %s +// CHECK-DEBIAN-ML-MIPS64EL-N32: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib32/crt1.o" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib32/crti.o" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "{{.*}}/usr/lib/gcc/mipsel-linux-gnu/4.4/n32/crtbegin.o" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/n32" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/../../../../lib32" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/lib/../lib32" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/../lib32" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib/gcc/mipsel-linux-gnu/4.4/../../.." +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/lib" +// CHECK-DEBIAN-ML-MIPS64EL-N32: "-L[[SYSROOT]]/usr/lib" +// +// Test linker invocation for Freescale SDK (OpenEmbedded). +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target powerpc-fsl-linux \ +// RUN: --sysroot=%S/Inputs/freescale_ppc_tree \ +// RUN: | FileCheck --check-prefix=CHECK-FSL-PPC %s +// CHECK-FSL-PPC: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-FSL-PPC: "-m" "elf32ppclinux" +// CHECK-FSL-PPC: "{{.*}}/crt1.o" +// CHECK-FSL-PPC: "{{.*}}/crtbegin.o" +// CHECK-FSL-PPC: "-L[[SYSROOT]]/usr/lib" +// RUN: %clang -no-canonical-prefixes %s -### -o %t.o 2>&1 \ +// RUN: -target powerpc64-fsl-linux \ +// RUN: --sysroot=%S/Inputs/freescale_ppc64_tree \ +// RUN: | FileCheck --check-prefix=CHECK-FSL-PPC64 %s +// CHECK-FSL-PPC64: "{{.*}}ld{{(.exe)?}}" "--sysroot=[[SYSROOT:[^"]+]]" +// CHECK-FSL-PPC64: "-m" "elf64ppc" +// CHECK-FSL-PPC64: "{{.*}}/crt1.o" +// CHECK-FSL-PPC64: "{{.*}}/crtbegin.o" +// CHECK-FSL-PPC64: "-L[[SYSROOT]]/usr/lib64/powerpc64-fsl-linux/4.6.2/../.." +// +// Check that crtfastmath.o is linked with -ffast-math. +// RUN: %clang -target x86_64-unknown-linux -### %s \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NOCRTFASTMATH %s +// RUN: %clang -target x86_64-unknown-linux -### %s -ffast-math \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-CRTFASTMATH %s +// RUN: %clang -target x86_64-unknown-linux -### %s -funsafe-math-optimizations\ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-CRTFASTMATH %s +// RUN: %clang -target x86_64-unknown-linux -### %s -ffast-math -fno-fast-math \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NOCRTFASTMATH %s +// We don't have crtfastmath.o in the i386 tree, use it to check that file +// detection works. +// RUN: %clang -target i386-unknown-linux -### %s -ffast-math \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-NOCRTFASTMATH %s +// CHECK-CRTFASTMATH: usr/lib/gcc/x86_64-unknown-linux/4.6.0/crtfastmath.o +// CHECK-NOCRTFASTMATH-NOT: crtfastmath.o diff --git a/test/Driver/mips-as.c b/test/Driver/mips-as.c index 0ace4dd..fbaf62f 100644 --- a/test/Driver/mips-as.c +++ b/test/Driver/mips-as.c @@ -1,3 +1,5 @@ +// REQUIRES: mips-registered-target +// // Check passing options to the assembler for MIPS targets. // // RUN: %clang -target mips-linux-gnu -### \ @@ -36,3 +38,28 @@ // RUN: -no-integrated-as -c %s 2>&1 \ // RUN: | FileCheck -check-prefix=MIPS-N32 %s // MIPS-N32: as{{(.exe)?}}" "-march" "mips64" "-mabi" "n32" "-EB" +// +// RUN: %clang -target mips-linux-gnu -march=mips32r2 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-32R2 %s +// MIPS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-EB" +// +// RUN: %clang -target mips-linux-gnu -mips32 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32 %s +// MIPS-ALIAS-32: as{{(.exe)?}}" "-march" "mips32" "-mabi" "32" "-EB" +// +// RUN: %clang -target mips-linux-gnu -mips32r2 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-32R2 %s +// MIPS-ALIAS-32R2: as{{(.exe)?}}" "-march" "mips32r2" "-mabi" "32" "-EB" +// +// RUN: %clang -target mips-linux-gnu -mips64 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64 %s +// MIPS-ALIAS-64: as{{(.exe)?}}" "-march" "mips64" "-mabi" "64" "-EB" +// +// RUN: %clang -target mips-linux-gnu -mips64r2 -### \ +// RUN: -no-integrated-as -c %s 2>&1 \ +// RUN: | FileCheck -check-prefix=MIPS-ALIAS-64R2 %s +// MIPS-ALIAS-64R2: as{{(.exe)?}}" "-march" "mips64r2" "-mabi" "64" "-EB" diff --git a/test/Driver/mips-features.c b/test/Driver/mips-features.c index 5be2683..28048e7 100644 --- a/test/Driver/mips-features.c +++ b/test/Driver/mips-features.c @@ -37,3 +37,9 @@ // RUN: -mdspr2 -mno-dspr2 2>&1 \ // RUN: | FileCheck --check-prefix=CHECK-NOMDSPR2 %s // CHECK-NOMDSPR2: "-target-feature" "-dspr2" +// +// -G +// RUN: %clang -target mips-linux-gnu -### -c %s \ +// RUN: -G 16 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-MIPS-G %s +// CHECK-MIPS-G: "-mllvm" "-mips-ssection-threshold=16" diff --git a/test/Driver/mips-float.c b/test/Driver/mips-float.c index 95eb002..886c335 100644 --- a/test/Driver/mips-float.c +++ b/test/Driver/mips-float.c @@ -3,19 +3,19 @@ // when build for MIPS platforms. // // Default -// RUN: %clang -ccc-clang-archs mips -c %s -### -o %t.o 2>&1 \ +// RUN: %clang -c %s -### -o %t.o 2>&1 \ // RUN: -target mips-linux-gnu \ // RUN: | FileCheck --check-prefix=CHECK-DEF %s // CHECK-DEF: "-mfloat-abi" "hard" // // -mhard-float -// RUN: %clang -ccc-clang-archs mips -c %s -### -o %t.o 2>&1 \ +// RUN: %clang -c %s -### -o %t.o 2>&1 \ // RUN: -target mips-linux-gnu -mhard-float \ // RUN: | FileCheck --check-prefix=CHECK-HARD %s // CHECK-HARD: "-mfloat-abi" "hard" // // -msoft-float -// RUN: %clang -ccc-clang-archs mips -c %s -### -o %t.o 2>&1 \ +// RUN: %clang -c %s -### -o %t.o 2>&1 \ // RUN: -target mips-linux-gnu -msoft-float \ // RUN: | FileCheck --check-prefix=CHECK-SOFT %s // CHECK-SOFT: "-msoft-float" @@ -23,13 +23,13 @@ // CHECK-SOFT: "-target-feature" "+soft-float" // // -mfloat-abi=hard -// RUN: %clang -ccc-clang-archs mips -c %s -### -o %t.o 2>&1 \ +// RUN: %clang -c %s -### -o %t.o 2>&1 \ // RUN: -target mips-linux-gnu -mfloat-abi=hard \ // RUN: | FileCheck --check-prefix=CHECK-ABI-HARD %s // CHECK-ABI-HARD: "-mfloat-abi" "hard" // // -mfloat-abi=soft -// RUN: %clang -ccc-clang-archs mips -c %s -### -o %t.o 2>&1 \ +// RUN: %clang -c %s -### -o %t.o 2>&1 \ // RUN: -target mips-linux-gnu -mfloat-abi=soft \ // RUN: | FileCheck --check-prefix=CHECK-ABI-SOFT %s // CHECK-ABI-SOFT: "-msoft-float" @@ -37,7 +37,7 @@ // CHECK-ABI-SOFT: "-target-feature" "+soft-float" // // -mfloat-abi=single -// RUN: %clang -ccc-clang-archs mips -c %s -### -o %t.o 2>&1 \ +// RUN: %clang -c %s -### -o %t.o 2>&1 \ // RUN: -target mips-linux-gnu -mfloat-abi=single \ // RUN: | FileCheck --check-prefix=CHECK-ABI-SINGLE %s // CHECK-ABI-SINGLE: "-target-feature" "+single-float" diff --git a/test/Driver/no-objc-arr.m b/test/Driver/no-objc-arr.m index e449393..21246a3 100644 --- a/test/Driver/no-objc-arr.m +++ b/test/Driver/no-objc-arr.m @@ -1,4 +1,5 @@ // RUN: %clang -Werror -fobjc-arc -fsyntax-only -fno-objc-arc -Xclang -verify %s +// expected-no-diagnostics // rdar://8949617 void * FOO() { diff --git a/test/Driver/objc++-cpp-output.mm b/test/Driver/objc++-cpp-output.mm index bb88144..9c4d553 100644 --- a/test/Driver/objc++-cpp-output.mm +++ b/test/Driver/objc++-cpp-output.mm @@ -1,5 +1,8 @@ // RUN: %clang -x objc++-cpp-output -c %s -o /dev/null +// PR13820 +// REQUIRES: LP64 + // Should compile without errors @protocol P - (void)m; diff --git a/test/Driver/objc-cpp-output.m b/test/Driver/objc-cpp-output.m index 6d97483..8c174f7 100644 --- a/test/Driver/objc-cpp-output.m +++ b/test/Driver/objc-cpp-output.m @@ -1,5 +1,8 @@ // RUN: %clang -x objc-cpp-output -c %s -o /dev/null +// PR13820 +// REQUIRES: LP64 + // Should compile without errors @protocol P - (void)m; diff --git a/test/Driver/openbsd.c b/test/Driver/openbsd.c index 911c452c..afd8b5a 100644 --- a/test/Driver/openbsd.c +++ b/test/Driver/openbsd.c @@ -1,5 +1,9 @@ -// RUN: %clang -no-canonical-prefixes -ccc-clang-archs "" -target i686-pc-openbsd %s -### 2> %t.log -// RUN: FileCheck -input-file %t.log %s +// RUN: %clang -no-canonical-prefixes -target i686-pc-openbsd %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-LD %s +// CHECK-LD: clang{{.*}}" "-cc1" "-triple" "i686-pc-openbsd" +// CHECK-LD: ld{{.*}}" "-e" "__start" "--eh-frame-hdr" "-Bdynamic" "-dynamic-linker" "{{.*}}ld.so" "-o" "a.out" "{{.*}}crt0.o" "{{.*}}crtbegin.o" "{{.*}}.o" "-lgcc" "-lc" "-lgcc" "{{.*}}crtend.o" -// CHECK: clang{{.*}}" "-cc1" "-triple" "i686-pc-openbsd" -// CHECK: ld{{.*}}" "-e" "__start" "--eh-frame-hdr" "-Bdynamic" "-dynamic-linker" "{{.*}}ld.so" "-o" "a.out" "{{.*}}crt0.o" "{{.*}}crtbegin.o" "{{.*}}.o" "-lgcc" "-lc" "-lgcc" "{{.*}}crtend.o" +// RUN: %clang -no-canonical-prefixes -target i686-pc-openbsd -pg -pthread %s -### 2>&1 \ +// RUN: | FileCheck --check-prefix=CHECK-PG %s +// CHECK-PG: clang{{.*}}" "-cc1" "-triple" "i686-pc-openbsd" +// CHECK-PG: ld{{.*}}" "-e" "__start" "--eh-frame-hdr" "-Bdynamic" "-dynamic-linker" "{{.*}}ld.so" "-o" "a.out" "{{.*}}crt0.o" "{{.*}}crtbegin.o" "{{.*}}.o" "-lgcc" "-lpthread_p" "-lc_p" "-lgcc" "{{.*}}crtend.o" diff --git a/test/Driver/pic.c b/test/Driver/pic.c index 3952f85..54e5982 100644 --- a/test/Driver/pic.c +++ b/test/Driver/pic.c @@ -5,24 +5,35 @@ // CHECK-NO-PIC-NOT: "-pic-level" // CHECK-NO-PIC-NOT: "-pie-level" // -// CHECK-DYNAMIC-NO-PIC1: "-mrelocation-model" "dynamic-no-pic" -// CHECK-DYNAMIC-NO-PIC1: "-pic-level" "1" -// -// CHECK-DYNAMIC-NO-PIC2: "-mrelocation-model" "dynamic-no-pic" -// CHECK-DYNAMIC-NO-PIC2: "-pic-level" "2" -// -// CHECK-PIC1-NOT: "-mrelocation-model" +// CHECK-PIC1: "-mrelocation-model" "pic" // CHECK-PIC1: "-pic-level" "1" // -// CHECK-PIC2-NOT: "-mrelocation-model" +// CHECK-PIC2: "-mrelocation-model" "pic" // CHECK-PIC2: "-pic-level" "2" // -// CHECK-PIE1-NOT: "-mrelocation-model" +// CHECK-PIE1: "-mrelocation-model" "pic" +// CHECK-PIE1: "-pic-level" "1" // CHECK-PIE1: "-pie-level" "1" // -// CHECK-PIE2-NOT: "-mrelocation-model" +// CHECK-PIE2: "-mrelocation-model" "pic" +// CHECK-PIE2: "-pic-level" "2" // CHECK-PIE2: "-pie-level" "2" // +// CHECK-PIE-LD: "{{.*}}ld{{(.exe)?}}" +// CHECK-PIE-LD: "-pie" +// CHECK-PIE-LD: "Scrt1.o" "crti.o" "crtbeginS.o" +// CHECK-PIE-LD: "crtendS.o" "crtn.o" +// +// CHECK-DYNAMIC-NO-PIC-32: "-mrelocation-model" "dynamic-no-pic" +// CHECK-DYNAMIC-NO-PIC-32-NOT: "-pic-level" +// CHECK-DYNAMIC-NO-PIC-32-NOT: "-pie-level" +// +// CHECK-DYNAMIC-NO-PIC-64: "-mrelocation-model" "dynamic-no-pic" +// CHECK-DYNAMIC-NO-PIC-64: "-pic-level" "2" +// CHECK-DYNAMIC-NO-PIC-64-NOT: "-pie-level" +// +// CHECK-NON-DARWIN-DYNAMIC-NO-PIC: error: unsupported option '-mdynamic-no-pic' for target 'i386-unknown-unknown' +// // RUN: %clang -c %s -target i386-unknown-unknown -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC // RUN: %clang -c %s -target i386-unknown-unknown -fpic -### 2>&1 \ @@ -33,26 +44,62 @@ // RUN: | FileCheck %s --check-prefix=CHECK-PIE1 // RUN: %clang -c %s -target i386-unknown-unknown -fPIE -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-PIE2 +// +// Check that PIC and PIE flags obey last-match-wins. If the last flag is +// a no-* variant, regardless of which variant or which flags precede it, we +// get no PIC. // RUN: %clang -c %s -target i386-unknown-unknown -fpic -fno-pic -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fno-PIC -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fno-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fno-pic -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC // RUN: %clang -c %s -target i386-unknown-unknown -fpic -fno-PIC -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fno-pic -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fno-PIC -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-pie -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-PIC -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fno-PIE -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fno-PIC -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-PIE -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fpic -fno-pie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fno-pie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-pie -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC // RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fno-pie -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-pic -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fpic -fno-PIE -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -fpic -fno-pie -### 2>&1 \ +// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fno-PIE -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fno-PIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fno-PIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// +// Last-match-wins where both pic and pie are specified. +// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fpic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC1 +// RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fpic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC1 +// RUN: %clang -c %s -target i386-unknown-unknown -fpie -fPIC -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target i386-unknown-unknown -fPIE -fPIC -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target i386-unknown-unknown -fpic -fpie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE1 +// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fpie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE1 +// RUN: %clang -c %s -target i386-unknown-unknown -fpic -fPIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE2 +// RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fPIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE2 +// +// Last-match-wins when selecting level 1 vs. level 2. // RUN: %clang -c %s -target i386-unknown-unknown -fpic -fPIC -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-PIC2 // RUN: %clang -c %s -target i386-unknown-unknown -fPIC -fpic -### 2>&1 \ @@ -62,20 +109,81 @@ // RUN: %clang -c %s -target i386-unknown-unknown -fpie -fPIC -fPIE -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-PIE2 // -// Defaults change for Darwin. +// Make sure -pie is passed to along to ld and that the right *crt* files +// are linked in. +// RUN: %clang %s -target i386-unknown-freebsd -fPIE -pie -### \ +// RUN: --sysroot=%S/Inputs/basic_freebsd_tree 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE-LD +// RUN: %clang %s -target i386-linux-gnu -fPIE -pie -### \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE-LD +// RUN: %clang %s -target i386-linux-gnu -fPIC -pie -### \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIE-LD +// +// Disregard any of the PIC-specific flags if we have a trump-card flag. +// RUN: %clang -c %s -target i386-unknown-unknown -mkernel -fPIC -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-unknown-unknown -static -fPIC -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// +// Darwin is a beautiful and unique snowflake when it comes to these flags. +// When targetting a 32-bit darwin system, the -fno-* flag variants work and +// disable PIC, but any other flag enables PIC (*not* PIE) even if the flag +// specifies PIE. On 64-bit targets, there is simply nothing you can do, there +// is no PIE, there is only PIC when it comes to compilation. // RUN: %clang -c %s -target i386-apple-darwin -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-PIC2 -// RUN: %clang -c %s -target i386-apple-darwin -fno-pic -### 2>&1 \ -// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-apple-darwin -fpic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target i386-apple-darwin -fPIC -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target i386-apple-darwin -fpie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target i386-apple-darwin -fPIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 // RUN: %clang -c %s -target i386-apple-darwin -fno-PIC -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-apple-darwin -fno-PIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC +// RUN: %clang -c %s -target i386-apple-darwin -fno-PIC -fpic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target i386-apple-darwin -fno-PIC -fPIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target x86_64-apple-darwin -fno-PIC -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target x86_64-apple-darwin -fno-PIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target x86_64-apple-darwin -fpic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target x86_64-apple-darwin -fPIE -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 // -// Disregard any of the PIC-specific flags if we have a trump-card flag. -// RUN: %clang -c %s -target i386-unknown-unknown -mkernel -fPIC -### 2>&1 \ +// Darwin gets even more special with '-mdynamic-no-pic'. This flag is only +// valid on Darwin, and it's behavior is very strange but needs to remain +// consistent for compatibility. +// RUN: %clang -c %s -target i386-unknown-unknown -mdynamic-no-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-NON-DARWIN-DYNAMIC-NO-PIC +// RUN: %clang -c %s -target i386-apple-darwin -mdynamic-no-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC-32 +// RUN: %clang -c %s -target i386-apple-darwin -mdynamic-no-pic -fno-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC-32 +// RUN: %clang -c %s -target i386-apple-darwin -mdynamic-no-pic -fpie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC-32 +// RUN: %clang -c %s -target x86_64-apple-darwin -mdynamic-no-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC-64 +// RUN: %clang -c %s -target x86_64-apple-darwin -mdynamic-no-pic -fno-pic -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC-64 +// RUN: %clang -c %s -target x86_64-apple-darwin -mdynamic-no-pic -fpie -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC-64 +// +// Checks for ARM+Apple+IOS including -fapple-kext, -mkernel, and iphoneos +// version boundaries. +// RUN: %clang -c %s -target armv7-apple-ios -fapple-kext -miphoneos-version-min=6.0.0 -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target armv7-apple-ios -mkernel -miphoneos-version-min=6.0.0 -### 2>&1 \ +// RUN: | FileCheck %s --check-prefix=CHECK-PIC2 +// RUN: %clang -c %s -target armv7-apple-ios -fapple-kext -miphoneos-version-min=5.0.0 -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -static -fPIC -### 2>&1 \ +// RUN: %clang -c %s -target armv7-apple-ios -fapple-kext -miphoneos-version-min=6.0.0 -static -### 2>&1 \ // RUN: | FileCheck %s --check-prefix=CHECK-NO-PIC -// RUN: %clang -c %s -target i386-unknown-unknown -mdynamic-no-pic -fPIC -### 2>&1 \ -// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC1 -// RUN: %clang -c %s -target i386-apple-darwin -mdynamic-no-pic -fPIC -### 2>&1 \ -// RUN: | FileCheck %s --check-prefix=CHECK-DYNAMIC-NO-PIC2 diff --git a/test/Driver/retain-comments-from-system-headers.c b/test/Driver/retain-comments-from-system-headers.c new file mode 100644 index 0000000..4d3e2ff --- /dev/null +++ b/test/Driver/retain-comments-from-system-headers.c @@ -0,0 +1,9 @@ +// Check that we pass -fretain-comments-from-system-headers to frontend. +// +// RUN: %clang -c %s -### 2>&1 | FileCheck %s --check-prefix=CHECK-NO-RETAIN +// RUN: %clang -c %s -fretain-comments-from-system-headers -### 2>&1 | FileCheck %s --check-prefix=CHECK-RETAIN +// +// CHECK-RETAIN: -fretain-comments-from-system-headers +// +// CHECK-NO-RETAIN-NOT: -fretain-comments-from-system-headers + diff --git a/test/Driver/rewrite-legacy-objc.m b/test/Driver/rewrite-legacy-objc.m index d243c7a..2e3f421 100644 --- a/test/Driver/rewrite-legacy-objc.m +++ b/test/Driver/rewrite-legacy-objc.m @@ -3,13 +3,5 @@ // TEST0: clang{{.*}}" "-cc1" // TEST0: "-rewrite-objc" // FIXME: CHECK-NOT is broken somehow, it doesn't work here. Check adjacency instead. -// TEST0: "-fmessage-length" "0" "-stack-protector" "1" "-mstackrealign" "-fblocks" "-fobjc-runtime=macosx-fragile" "-fobjc-default-synthesize-properties" "-fno-objc-infer-related-result-type" "-fobjc-exceptions" "-fexceptions" "-fdiagnostics-show-option" +// TEST0: "-fmessage-length" "0" "-stack-protector" "1" "-mstackrealign" "-fblocks" "-fobjc-runtime=macosx-fragile" "-fobjc-default-synthesize-properties" "-fencode-extended-block-signature" "-fno-objc-infer-related-result-type" "-fobjc-exceptions" "-fexceptions" "-fdiagnostics-show-option" // TEST0: rewrite-legacy-objc.m" - -// RUN: not %clang -ccc-no-clang -target unknown -rewrite-legacy-objc %s -o - -### 2>&1 | \ -// RUN: FileCheck -check-prefix=TEST1 %s -// TEST1: invalid output type 'rewritten-legacy-objc' for use with gcc - -// RUN: not %clang -ccc-no-clang -target i386-apple-darwin10 -rewrite-legacy-objc %s -o - -### 2>&1 | \ -// RUN: FileCheck -check-prefix=TEST2 %s -// TEST2: invalid output type 'rewritten-legacy-objc' for use with gcc diff --git a/test/Driver/rewrite-objc.m b/test/Driver/rewrite-objc.m index 6696797..fa159a6 100644 --- a/test/Driver/rewrite-objc.m +++ b/test/Driver/rewrite-objc.m @@ -3,13 +3,4 @@ // TEST0: clang{{.*}}" "-cc1" // TEST0: "-rewrite-objc" // FIXME: CHECK-NOT is broken somehow, it doesn't work here. Check adjacency instead. -// TEST0: "-fmessage-length" "0" "-stack-protector" "1" "-mstackrealign" "-fblocks" "-fobjc-runtime=macosx" "-fobjc-dispatch-method=mixed" "-fobjc-default-synthesize-properties" "-fno-objc-infer-related-result-type" "-fobjc-exceptions" "-fexceptions" "-fdiagnostics-show-option" -// TEST0: rewrite-objc.m" - -// RUN: not %clang -ccc-no-clang -target unknown -rewrite-objc %s -o - -### 2>&1 | \ -// RUN: FileCheck -check-prefix=TEST1 %s -// TEST1: invalid output type 'rewritten-objc' for use with gcc - -// RUN: not %clang -ccc-no-clang -target i386-apple-darwin10 -rewrite-objc %s -o - -### 2>&1 | \ -// RUN: FileCheck -check-prefix=TEST2 %s -// TEST2: invalid output type 'rewritten-objc' for use with gcc +// TEST0: "-fmessage-length" "0" "-stack-protector" "1" "-mstackrealign" "-fblocks" "-fobjc-runtime=macosx" "-fobjc-dispatch-method=mixed" "-fobjc-default-synthesize-properties" "-fencode-extended-block-signature" "-fno-objc-infer-related-result-type" "-fobjc-exceptions" "-fexceptions" "-fdiagnostics-show-option" diff --git a/test/Driver/stack-protector.c b/test/Driver/stack-protector.c new file mode 100644 index 0000000..5f3d679 --- /dev/null +++ b/test/Driver/stack-protector.c @@ -0,0 +1,11 @@ +// RUN: %clang -fno-stack-protector -### %s 2>&1 | FileCheck %s -check-prefix=NOSSP +// NOSSP-NOT: "-stack-protector" "1" +// NOSSP-NOT: "-stack-protector-buffer-size" + +// RUN: %clang -fstack-protector -### %s 2>&1 | FileCheck %s -check-prefix=SSP +// SSP: "-stack-protector" "1" +// SSP-NOT: "-stack-protector-buffer-size" + +// RUN: %clang -fstack-protector --param ssp-buffer-size=16 -### %s 2>&1 | FileCheck %s -check-prefix=SSP-BUF +// SSP-BUF: "-stack-protector" "1" +// SSP-BUF: "-stack-protector-buffer-size" "16" diff --git a/test/Driver/std.cpp b/test/Driver/std.cpp index 7704c8d..822658c 100644 --- a/test/Driver/std.cpp +++ b/test/Driver/std.cpp @@ -5,6 +5,8 @@ // RUN: %clang -std=gnu++0x %s -fsyntax-only 2>&1 | FileCheck -check-prefix=GNUXX11 %s // RUN: %clang -std=c++11 %s -fsyntax-only 2>&1 | FileCheck -check-prefix=CXX11 %s // RUN: %clang -std=gnu++11 %s -fsyntax-only 2>&1 | FileCheck -check-prefix=GNUXX11 %s +// RUN: %clang -std=c++1y %s -fsyntax-only 2>&1 | FileCheck -check-prefix=CXX1Y %s +// RUN: %clang -std=gnu++1y %s -fsyntax-only 2>&1 | FileCheck -check-prefix=GNUXX1Y %s void f(int n) { typeof(n)(); @@ -22,3 +24,9 @@ void f(int n) { // GNUXX11-NOT: undeclared identifier 'typeof' // GNUXX11-NOT: undeclared identifier 'decltype' + +// CXX1Y: undeclared identifier 'typeof' +// CXX1Y-NOT: undeclared identifier 'decltype' + +// GNUXX1Y-NOT: undeclared identifier 'typeof' +// GNUXX1Y-NOT: undeclared identifier 'decltype' diff --git a/test/Driver/tsan.c b/test/Driver/tsan.c index 1dadb8e..18b027f 100644 --- a/test/Driver/tsan.c +++ b/test/Driver/tsan.c @@ -1,8 +1,9 @@ -// RUN: %clang -target i386-unknown-unknown -fthread-sanitizer %s -S -emit-llvm -o - | FileCheck %s -// RUN: %clang -O1 -target i386-unknown-unknown -fthread-sanitizer %s -S -emit-llvm -o - | FileCheck %s -// RUN: %clang -O2 -target i386-unknown-unknown -fthread-sanitizer %s -S -emit-llvm -o - | FileCheck %s -// RUN: %clang -O3 -target i386-unknown-unknown -fthread-sanitizer %s -S -emit-llvm -o - | FileCheck %s -// Verify that -fthread-sanitizer invokes tsan instrumentation. +// RUN: %clang -target i386-unknown-unknown -fsanitize=thread %s -S -emit-llvm -o - | FileCheck %s +// RUN: %clang -O1 -target i386-unknown-unknown -fsanitize=thread %s -S -emit-llvm -o - | FileCheck %s +// RUN: %clang -O2 -target i386-unknown-unknown -fsanitize=thread %s -S -emit-llvm -o - | FileCheck %s +// RUN: %clang -O3 -target i386-unknown-unknown -fsanitize=thread %s -S -emit-llvm -o - | FileCheck %s +// RUN: %clang -target i386-unknown-unknown -fsanitize=thread %s -S -emit-llvm -o - | FileCheck %s +// Verify that -fsanitize=thread invokes tsan instrumentation. int foo(int *a) { return *a; } // CHECK: __tsan_init diff --git a/test/Driver/ubsan-ld.c b/test/Driver/ubsan-ld.c new file mode 100644 index 0000000..775e669 --- /dev/null +++ b/test/Driver/ubsan-ld.c @@ -0,0 +1,10 @@ +// Test UndefinedBehaviorSanitizer ld flags. + +// RUN: %clang -fcatch-undefined-behavior %s -### -o %t.o 2>&1 \ +// RUN: -target i386-unknown-linux \ +// RUN: --sysroot=%S/Inputs/basic_linux_tree \ +// RUN: | FileCheck --check-prefix=CHECK-LINUX %s +// CHECK-LINUX: "{{.*}}ld{{(.exe)?}}" +// CHECK-LINUX-NOT: "-lc" +// CHECK-LINUX: libclang_rt.ubsan-i386.a" +// CHECK-LINUX: "-lpthread" diff --git a/test/Driver/warning-options.cpp b/test/Driver/warning-options.cpp index ab0da42..cce88e6 100644 --- a/test/Driver/warning-options.cpp +++ b/test/Driver/warning-options.cpp @@ -8,3 +8,8 @@ // CHECK: unknown warning option '-Wmonkey' // CHECK: unknown warning option '-Wno-monkey' // CHECK: unknown warning option '-Wno-unused-command-line-arguments'; did you mean '-Wno-unused-command-line-argument'? + +// FIXME: Remove this together with -Warc-abi once an Xcode is released that doesn't pass this flag. +// RUN: %clang -### -Warc-abi -Wno-arc-abi %s 2>&1 | FileCheck -check-prefix=ARCABI %s +// ARCABI-NOT: unknown warning option '-Warc-abi' +// ARCABI-NOT: unknown warning option '-Wno-arc-abi' diff --git a/test/Driver/working-directory-and-abs.c b/test/Driver/working-directory-and-abs.c new file mode 100644 index 0000000..6738c77 --- /dev/null +++ b/test/Driver/working-directory-and-abs.c @@ -0,0 +1 @@ +// RUN: %clang -working-directory=%S %S/working-directory-and-abs.c -fsyntax-only diff --git a/test/Driver/x86_64-nacl-defines.cpp b/test/Driver/x86_64-nacl-defines.cpp new file mode 100644 index 0000000..caa9a74 --- /dev/null +++ b/test/Driver/x86_64-nacl-defines.cpp @@ -0,0 +1,45 @@ +// RUN: %clang -target x86_64-unknown-nacl -ccc-echo %s -emit-llvm-only -c 2>&1 | FileCheck %s -check-prefix=ECHO +// RUN: %clang -target x86_64-unknown-nacl %s -emit-llvm -S -c -o - | FileCheck %s +// RUN: %clang -target x86_64-unknown-nacl %s -emit-llvm -S -c -pthread -o - | FileCheck %s -check-prefix=THREADS + +// ECHO: {{.*}} -cc1 {{.*}}x86_64-nacl-defines.c + +// Check platform defines + +// CHECK: __LITTLE_ENDIAN__defined +#ifdef __LITTLE_ENDIAN__ +void __LITTLE_ENDIAN__defined() {} +#endif + +// CHECK: __native_client__defined +#ifdef __native_client__ +void __native_client__defined() {} +#endif + +// CHECK: __x86_64__defined +#ifdef __x86_64__ +void __x86_64__defined() {} +#endif + +// CHECK: unixdefined +#ifdef unix +void unixdefined() {} +#endif + +// CHECK: __ELF__defined +#ifdef __ELF__ +void __ELF__defined() {} +#endif + +// CHECK: _GNU_SOURCEdefined +#ifdef _GNU_SOURCE +void _GNU_SOURCEdefined() {} +#endif + +// THREADS: _REENTRANTdefined +// CHECK: _REENTRANTundefined +#ifdef _REENTRANT +void _REENTRANTdefined() {} +#else +void _REENTRANTundefined() {} +#endif diff --git a/test/Driver/x86_64-nacl-types.cpp b/test/Driver/x86_64-nacl-types.cpp new file mode 100644 index 0000000..a994cb7 --- /dev/null +++ b/test/Driver/x86_64-nacl-types.cpp @@ -0,0 +1,37 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-nacl -std=c++11 -verify %s +// expected-no-diagnostics + +#include +#include + +static_assert(alignof(char) == 1, "alignof char is wrong"); + +static_assert(alignof(short) == 2, "sizeof short is wrong"); +static_assert(alignof(short) == 2, "alignof short is wrong"); + +static_assert(alignof(int) == 4, "sizeof int is wrong"); +static_assert(alignof(int) == 4, "alignof int is wrong"); + +static_assert(sizeof(long) == 4, "sizeof long is wrong"); +static_assert(sizeof(long) == 4, "alignof long is wrong"); + +static_assert(sizeof(long long) == 8, "sizeof long long is wrong wrong"); +static_assert(alignof(long long) == 8, "alignof long long is wrong wrong"); + +static_assert(sizeof(void*) == 4, "sizeof void * is wrong"); +static_assert(alignof(void*) == 4, "alignof void * is wrong"); + +static_assert(sizeof(float) == 4, "sizeof float is wrong"); +static_assert(alignof(float) == 4, "alignof float is wrong"); + +static_assert(sizeof(double) == 8, "sizeof double is wrong"); +static_assert(alignof(double) == 8, "alignof double is wrong"); + +static_assert(sizeof(long double) == 8, "sizeof long double is wrong"); +static_assert(alignof(long double) == 8, "alignof long double is wrong"); + +static_assert(sizeof(va_list) == 16, "sizeof va_list is wrong"); +static_assert(alignof(va_list) == 4, "alignof va_list is wrong"); + +static_assert(sizeof(size_t) == 4, "sizeof size_t is wrong"); +static_assert(alignof(size_t) == 4, "alignof size_t is wrong"); diff --git a/test/FixIt/fixit-cxx0x.cpp b/test/FixIt/fixit-cxx0x.cpp index 0c837b4..a173ce4 100644 --- a/test/FixIt/fixit-cxx0x.cpp +++ b/test/FixIt/fixit-cxx0x.cpp @@ -66,13 +66,11 @@ const char *p = "foo"bar; // expected-error {{requires a space between}} #define ord - '0' int k = '4'ord; // expected-error {{requires a space between}} -void operator""_x(char); // expected-error {{requires a space}} void operator"x" _y(char); // expected-error {{must be '""'}} void operator L"" _z(char); // expected-error {{encoding prefix}} void operator "x" "y" U"z" ""_whoops "z" "y"(char); // expected-error {{must be '""'}} void f() { - 'a'_x; 'b'_y; 'c'_z; 'd'_whoops; diff --git a/test/FixIt/fixit-include.c b/test/FixIt/fixit-include.c index 51bd9b0..383c513 100644 --- a/test/FixIt/fixit-include.c +++ b/test/FixIt/fixit-include.c @@ -3,8 +3,10 @@ // RUN: cp %S/fixit-include.h %T // RUN: not %clang_cc1 -fsyntax-only -fixit %t // RUN: %clang_cc1 -Wall -pedantic %t +// RUN: %clang_cc1 -fsyntax-only -fdiagnostics-parseable-fixits %s 2>&1 | FileCheck %s #include // expected-error {{'fixit-include.h' file not found with include; use "quotes" instead}} +// CHECK: fix-it:{{.*}}:{8:10-8:27} #pragma does_not_exist // expected-warning {{unknown pragma ignored}} diff --git a/test/FixIt/fixit-missing-self-in-block.m b/test/FixIt/fixit-missing-self-in-block.m new file mode 100644 index 0000000..8fd9564 --- /dev/null +++ b/test/FixIt/fixit-missing-self-in-block.m @@ -0,0 +1,20 @@ +// RUN: cp %s %t +// RUN: %clang_cc1 -x objective-c -fobjc-arc -fblocks -fixit %t +// RUN: %clang_cc1 -x objective-c -fobjc-arc -fblocks -Werror %t +// rdar://11194874 + +@interface Root @end + +@interface I : Root +{ + int _bar; +} +@end + +@implementation I + - (void)foo{ + ^{ + _bar = 3; + }(); + } +@end diff --git a/test/FixIt/fixit-objc.m b/test/FixIt/fixit-objc.m index df591c7..77099fc 100644 --- a/test/FixIt/fixit-objc.m +++ b/test/FixIt/fixit-objc.m @@ -49,7 +49,7 @@ void f(Test *t) { @property (assign) int y; @end -int f0(Radar7861841 *a) { return a.x; } // expected-error {{property 'x' not found on object of type 'Radar7861841 *'; did you mean to access ivar 'x'}} +int f0(Radar7861841 *a) { return a.x; } // expected-error {{property 'x' not found on object of type 'Radar7861841 *'; did you mean to access instance variable 'x'}} int f1(Radar7861841 *a) { return a->y; } // expected-error {{property 'y' found on object of type 'Radar7861841 *'; did you mean to access it with the "." operator?}} diff --git a/test/FixIt/fixit.c b/test/FixIt/fixit.c index ce6f109..00adb19 100644 --- a/test/FixIt/fixit.c +++ b/test/FixIt/fixit.c @@ -81,6 +81,13 @@ void oopsMoreCommas() { &a == &b ? oopsMoreCommas() : removeUnusedLabels(a[0]); } +int commaAtEndOfStatement() { + int a = 1; + a = 5, // expected-error {{';'}} + int m = 5, // expected-error {{';'}} + return 0, // expected-error {{';'}} +} + int noSemiAfterLabel(int n) { switch (n) { default: diff --git a/test/FixIt/fixit.cpp b/test/FixIt/fixit.cpp index 3eac434..253abd0 100644 --- a/test/FixIt/fixit.cpp +++ b/test/FixIt/fixit.cpp @@ -64,7 +64,7 @@ namespace rdar7796492 { // extra qualification on member class C { - int C::foo(); // expected-warning {{extra qualification}} + int C::foo(); // expected-error {{extra qualification}} }; namespace rdar8488464 { @@ -292,3 +292,10 @@ namespace greatergreater { //(void)(&t>==p); } } + +class foo { + static void test() { + (void)&i; // expected-error{{must explicitly qualify name of member function when taking its address}} + } + int i(); +}; diff --git a/test/FixIt/format-darwin.m b/test/FixIt/format-darwin.m new file mode 100644 index 0000000..1bfe272 --- /dev/null +++ b/test/FixIt/format-darwin.m @@ -0,0 +1,198 @@ +// RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -fblocks -Wformat-non-iso -verify %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -fsyntax-only -fblocks -Wformat-non-iso -verify %s + +// RUN: %clang_cc1 -triple i386-apple-darwin9 -fdiagnostics-parseable-fixits -fblocks -Wformat-non-iso %s 2>&1 | FileCheck %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -fdiagnostics-parseable-fixits -fblocks -Wformat-non-iso %s 2>&1 | FileCheck %s + +// RUN: %clang_cc1 -triple i386-apple-darwin9 -fdiagnostics-parseable-fixits -fblocks -Wformat-non-iso %s 2>&1 | FileCheck -check-prefix=CHECK-32 %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin9 -fdiagnostics-parseable-fixits -fblocks -Wformat-non-iso %s 2>&1 | FileCheck -check-prefix=CHECK-64 %s + +int printf(const char * restrict, ...); + +#if __LP64__ +typedef long NSInteger; +typedef unsigned long NSUInteger; +typedef int SInt32; +typedef unsigned int UInt32; + +#else + +typedef int NSInteger; +typedef unsigned int NSUInteger; +typedef long SInt32; +typedef unsigned long UInt32; +#endif + +NSInteger getNSInteger(); +NSUInteger getNSUInteger(); +SInt32 getSInt32(); +UInt32 getUInt32(); + +void testCorrectionInAllCases() { + printf("%s", getNSInteger()); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", getNSUInteger()); // expected-warning{{values of type 'NSUInteger' should not be used as format arguments; add an explicit cast to 'unsigned long' instead}} + printf("%s", getSInt32()); // expected-warning{{values of type 'SInt32' should not be used as format arguments; add an explicit cast to 'int' instead}} + printf("%s", getUInt32()); // expected-warning{{values of type 'UInt32' should not be used as format arguments; add an explicit cast to 'unsigned int' instead}} + + // CHECK: fix-it:"{{.*}}":{32:11-32:13}:"%ld" + // CHECK: fix-it:"{{.*}}":{32:16-32:16}:"(long)" + + // CHECK: fix-it:"{{.*}}":{33:11-33:13}:"%lu" + // CHECK: fix-it:"{{.*}}":{33:16-33:16}:"(unsigned long)" + + // CHECK: fix-it:"{{.*}}":{34:11-34:13}:"%d" + // CHECK: fix-it:"{{.*}}":{34:16-34:16}:"(int)" + + // CHECK: fix-it:"{{.*}}":{35:11-35:13}:"%u" + // CHECK: fix-it:"{{.*}}":{35:16-35:16}:"(unsigned int)" +} + +@interface Foo { +@public + NSInteger _value; +} +- (NSInteger)getInteger; + +@property NSInteger value; +@end + +struct Bar { + NSInteger value; +}; + + +void testParens(Foo *obj, struct Bar *record) { + NSInteger arr[4] = {0}; + NSInteger i = 0; + + // These cases match the cases in CheckPrintfHandler::checkFormatExpr. + printf("%s", arr[0]); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", getNSInteger()); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", i); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", obj->_value); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", [obj getInteger]); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", obj.value); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", record->value); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", (i ? i : i)); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", *arr); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + + // CHECK-NOT: fix-it:{{.*}}:")" + + printf("%s", i ? i : i); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + + // CHECK: fix-it:"{{.*}}":{81:11-81:13}:"%ld" + // CHECK: fix-it:"{{.*}}":{81:16-81:16}:"(long)(" + // CHECK: fix-it:"{{.*}}":{81:25-81:25}:")" +} + + +#if __LP64__ + +void testWarn() { + printf("%d", getNSInteger()); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%u", getNSUInteger()); // expected-warning{{values of type 'NSUInteger' should not be used as format arguments; add an explicit cast to 'unsigned long' instead}} + printf("%ld", getSInt32()); // expected-warning{{values of type 'SInt32' should not be used as format arguments; add an explicit cast to 'int' instead}} + printf("%lu", getUInt32()); // expected-warning{{values of type 'UInt32' should not be used as format arguments; add an explicit cast to 'unsigned int' instead}} + + // CHECK-64: fix-it:"{{.*}}":{92:11-92:13}:"%ld" + // CHECK-64: fix-it:"{{.*}}":{92:16-92:16}:"(long)" + + // CHECK-64: fix-it:"{{.*}}":{93:11-93:13}:"%lu" + // CHECK-64: fix-it:"{{.*}}":{93:16-93:16}:"(unsigned long)" + + // CHECK-64: fix-it:"{{.*}}":{94:11-94:14}:"%d" + // CHECK-64: fix-it:"{{.*}}":{94:17-94:17}:"(int)" + + // CHECK-64: fix-it:"{{.*}}":{95:11-95:14}:"%u" + // CHECK-64: fix-it:"{{.*}}":{95:17-95:17}:"(unsigned int)" +} + +void testPreserveHex() { + printf("%x", getNSInteger()); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%x", getNSUInteger()); // expected-warning{{values of type 'NSUInteger' should not be used as format arguments; add an explicit cast to 'unsigned long' instead}} + + // CHECK-64: fix-it:"{{.*}}":{111:11-111:13}:"%lx" + // CHECK-64: fix-it:"{{.*}}":{111:16-111:16}:"(long)" + + // CHECK-64: fix-it:"{{.*}}":{112:11-112:13}:"%lx" + // CHECK-64: fix-it:"{{.*}}":{112:16-112:16}:"(unsigned long)" +} + +void testNoWarn() { + printf("%ld", getNSInteger()); // no-warning + printf("%lu", getNSUInteger()); // no-warning + printf("%d", getSInt32()); // no-warning + printf("%u", getUInt32()); // no-warning +} + +#else + +void testWarn() { + printf("%ld", getNSInteger()); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%lu", getNSUInteger()); // expected-warning{{values of type 'NSUInteger' should not be used as format arguments; add an explicit cast to 'unsigned long' instead}} + printf("%d", getSInt32()); // expected-warning{{values of type 'SInt32' should not be used as format arguments; add an explicit cast to 'int' instead}} + printf("%u", getUInt32()); // expected-warning{{values of type 'UInt32' should not be used as format arguments; add an explicit cast to 'unsigned int' instead}} + + // CHECK-32: fix-it:"{{.*}}":{131:17-131:17}:"(long)" + + // CHECK-32: fix-it:"{{.*}}":{132:17-132:17}:"(unsigned long)" + + // CHECK-32: fix-it:"{{.*}}":{133:16-133:16}:"(int)" + + // CHECK-32: fix-it:"{{.*}}":{134:16-134:16}:"(unsigned int)" +} + +void testPreserveHex() { + printf("%lx", getNSInteger()); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%lx", getNSUInteger()); // expected-warning{{values of type 'NSUInteger' should not be used as format arguments; add an explicit cast to 'unsigned long' instead}} + + // CHECK-32: fix-it:"{{.*}}":{146:17-146:17}:"(long)" + + // CHECK-32: fix-it:"{{.*}}":{147:17-147:17}:"(unsigned long)" +} + +void testNoWarn() { + printf("%d", getNSInteger()); // no-warning + printf("%u", getNSUInteger()); // no-warning + printf("%ld", getSInt32()); // no-warning + printf("%lu", getUInt32()); // no-warning +} + +#endif + + +void testCasts() { + printf("%s", (NSInteger)0); // expected-warning{{values of type 'NSInteger' should not be used as format arguments; add an explicit cast to 'long' instead}} + printf("%s", (NSUInteger)0); // expected-warning{{values of type 'NSUInteger' should not be used as format arguments; add an explicit cast to 'unsigned long' instead}} + printf("%s", (SInt32)0); // expected-warning{{values of type 'SInt32' should not be used as format arguments; add an explicit cast to 'int' instead}} + printf("%s", (UInt32)0); // expected-warning{{values of type 'UInt32' should not be used as format arguments; add an explicit cast to 'unsigned int' instead}} + + // CHECK: fix-it:"{{.*}}":{165:11-165:13}:"%ld" + // CHECK: fix-it:"{{.*}}":{165:16-165:27}:"(long)" + + // CHECK: fix-it:"{{.*}}":{166:11-166:13}:"%lu" + // CHECK: fix-it:"{{.*}}":{166:16-166:28}:"(unsigned long)" + + // CHECK: fix-it:"{{.*}}":{167:11-167:13}:"%d" + // CHECK: fix-it:"{{.*}}":{167:16-167:24}:"(int)" + + // CHECK: fix-it:"{{.*}}":{168:11-168:13}:"%u" + // CHECK: fix-it:"{{.*}}":{168:16-168:24}:"(unsigned int)" +} + +void testCapitals() { + printf("%D", 1); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + printf("%U", 1); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'u'?}} + printf("%O", 1); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'o'?}} + + // CHECK: fix-it:"{{.*}}":{184:12-184:13}:"d" + // CHECK: fix-it:"{{.*}}":{185:12-185:13}:"u" + // CHECK: fix-it:"{{.*}}":{186:12-186:13}:"o" + + + printf("%lD", 1); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} expected-warning{{format specifies type 'long' but the argument has type 'int'}} + + // FIXME: offering two somewhat-conflicting fixits is less than ideal. + // CHECK: fix-it:"{{.*}}":{193:13-193:14}:"d" + // CHECK: fix-it:"{{.*}}":{193:11-193:14}:"%D" +} diff --git a/test/FixIt/no-fixit.cpp b/test/FixIt/no-fixit.cpp index c95c867..9da2922 100644 --- a/test/FixIt/no-fixit.cpp +++ b/test/FixIt/no-fixit.cpp @@ -5,3 +5,9 @@ // CHECK-NOT: fix-it: template +> void func(); + +struct { + void i() { + (void)&i; + } +} x; diff --git a/test/FixIt/typo.cpp b/test/FixIt/typo.cpp index 3d40da8..b3568a5 100644 --- a/test/FixIt/typo.cpp +++ b/test/FixIt/typo.cpp @@ -5,7 +5,8 @@ // RUN: grep test_string %t namespace std { - template class basic_string { // expected-note 2{{'basic_string' declared here}} + template class basic_string { // expected-note 2{{'basic_string' declared here}} \ + // expected-note {{'otherstd::basic_string' declared here}} public: int find(const char *substr); // expected-note{{'find' declared here}} static const int npos = -1; // expected-note{{'npos' declared here}} @@ -76,13 +77,50 @@ int foo() { } namespace nonstd { - typedef std::basic_string yarn; // expected-note{{'nonstd::yarn' declared here}} + typedef std::basic_string yarn; // expected-note 2 {{'nonstd::yarn' declared here}} + int narf; // expected-note{{'nonstd::narf' declared here}} } yarn str4; // expected-error{{unknown type name 'yarn'; did you mean 'nonstd::yarn'?}} +wibble::yarn str5; // expected-error{{no type named 'yarn' in namespace 'otherstd'; did you mean 'nonstd::yarn'?}} + +int poit() { + nonstd::basic_string str; // expected-error{{no template named 'basic_string' in namespace 'nonstd'; did you mean 'otherstd::basic_string'?}} + return wibble::narf; // expected-error{{no member named 'narf' in namespace 'otherstd'; did you mean 'nonstd::narf'?}} +} namespace check_bool { void f() { Bool b; // expected-error{{use of undeclared identifier 'Bool'; did you mean 'bool'?}} } } + +namespace outr { +} +namespace outer { + namespace inner { // expected-note{{'outer::inner' declared here}} \ + // expected-note{{namespace 'outer::inner' defined here}} \ + // expected-note{{'inner' declared here}} + int i; + } +} + +using namespace outr::inner; // expected-error{{no namespace named 'inner' in namespace 'outr'; did you mean 'outer::inner'?}} + +void func() { + outr::inner::i = 3; // expected-error{{no member named 'inner' in namespace 'outr'; did you mean 'outer::inner'?}} + outer::innr::i = 4; // expected-error{{no member named 'innr' in namespace 'outer'; did you mean 'inner'?}} +} + +struct base { +}; +struct derived : base { + int i; +}; + +void func2() { + derived d; + // FIXME: we should offer a fix here. We do if the 'i' is misspelled, but we don't do name qualification changes + // to replace base::i with derived::i as we would for other qualified name misspellings. + // d.base::i = 3; +} diff --git a/test/Frontend/ast-codegen.c b/test/Frontend/ast-codegen.c index b5b2157..b85c5dc 100644 --- a/test/Frontend/ast-codegen.c +++ b/test/Frontend/ast-codegen.c @@ -1,5 +1,5 @@ -// RUN: %clang -emit-ast -o %t.ast %s -// RUN: %clang -emit-llvm -S -o - %t.ast | FileCheck %s +// RUN: %clang -target i386-unknown-unknown -emit-ast -o %t.ast %s +// RUN: %clang -target i386-unknown-unknown -emit-llvm -S -o - %t.ast | FileCheck %s // CHECK: module asm "foo" __asm__("foo"); diff --git a/test/Frontend/iframework.c b/test/Frontend/iframework.c index 0c241fd..6f801f2 100644 --- a/test/Frontend/iframework.c +++ b/test/Frontend/iframework.c @@ -1,3 +1,4 @@ -// RUN: %clang -fsyntax-only -iframework%S/Inputs %s -Xclang -verify +// RUN: %clang -fsyntax-only -iframework %S/Inputs %s -Xclang -verify +// expected-no-diagnostics #include diff --git a/test/Frontend/macros.c b/test/Frontend/macros.c index 3170797..68f2203 100644 --- a/test/Frontend/macros.c +++ b/test/Frontend/macros.c @@ -1,4 +1,13 @@ // RUN: %clang_cc1 -DA= -DB=1 -verify -fsyntax-only %s +// expected-no-diagnostics int a[(B A) == 1 ? 1 : -1]; + +// PR13747 - Don't warn about unused results with statement exprs in macros. +void stuff(int,int,int); +#define memset(x,y,z) ({ stuff(x,y,z); x; }) + +void foo(int a, int b, int c) { + memset(a,b,c); // No warning! +} diff --git a/test/Frontend/unknown-pragmas.c b/test/Frontend/unknown-pragmas.c index 53a5a45..eea025c 100644 --- a/test/Frontend/unknown-pragmas.c +++ b/test/Frontend/unknown-pragmas.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -Eonly -Wall -verify %s // RUN: %clang_cc1 -E -dM -Wall -verify %s +// expected-no-diagnostics #pragma adgohweopihweotnwet diff --git a/test/Frontend/verify.c b/test/Frontend/verify.c index f8d0f42..062e6bd 100644 --- a/test/Frontend/verify.c +++ b/test/Frontend/verify.c @@ -22,7 +22,7 @@ #if 0 // expected-error {{should be ignored}} #endif - +// eexpected-error {{should also be ignored: unrecognised directive}} #error should not be ignored // expected-error@-1 1+ {{should not be ignored}} @@ -111,9 +111,10 @@ unexpected b; // expected-error@33 1-1 {{unknown type}} #if 0 // RUN: %clang_cc1 -verify %t.invalid 2>&1 | FileCheck -check-prefix=CHECK6 %s -// CHECK6: error: 'error' diagnostics seen but not expected: +// CHECK6: error: no expected directives found: consider use of 'expected-no-diagnostics' +// CHECK6-NEXT: error: 'error' diagnostics seen but not expected: // CHECK6-NEXT: (frontend): error reading '{{.*}}verify.c.tmp.invalid' -// CHECK6-NEXT: 1 error generated. +// CHECK6-NEXT: 2 errors generated. // RUN: echo -e '//expected-error@2{{1}}\n#error 2' | %clang_cc1 -verify 2>&1 | FileCheck -check-prefix=CHECK7 %s diff --git a/test/Frontend/verify2.c b/test/Frontend/verify2.c index a1c7975..04f80ad 100644 --- a/test/Frontend/verify2.c +++ b/test/Frontend/verify2.c @@ -3,7 +3,7 @@ // Please note that all comments are inside "#if 0" blocks so that // VerifyDiagnosticConsumer sees no comments while processing this -// test-case. +// test-case (and hence no expected-* directives). #endif #include "verify2.h" @@ -12,8 +12,9 @@ #if 0 // expected-error {{should be ignored}} -// CHECK: error: 'error' diagnostics seen but not expected: +// CHECK: error: no expected directives found: consider use of 'expected-no-diagnostics' +// CHECK-NEXT: error: 'error' diagnostics seen but not expected: // CHECK-NEXT: Line 1: header // CHECK-NEXT: Line 10: source -// CHECK-NEXT: 2 errors generated. +// CHECK-NEXT: 3 errors generated. #endif diff --git a/test/Frontend/verify3.c b/test/Frontend/verify3.c new file mode 100644 index 0000000..0705b4b --- /dev/null +++ b/test/Frontend/verify3.c @@ -0,0 +1,41 @@ +// This test-case runs several sub-tests on -verify to ensure that correct +// diagnostics are generated in relation to the mis-use and non-use of the +// 'expected-no-diagnostics' directive. + +// RUN: %clang_cc1 -DTEST1 -verify %s 2>&1 | FileCheck -check-prefix=CHECK1 %s +#ifdef TEST1 +// expected-no-diagnostics +// expected-note {{}} + +// CHECK1: error: 'error' diagnostics seen but not expected: +// CHECK1-NEXT: Line 8: expected directive cannot follow 'expected-no-diagnostics' directive +// CHECK1-NEXT: 1 error generated. +#endif + +// RUN: %clang_cc1 -DTEST2 -verify %s 2>&1 | FileCheck -check-prefix=CHECK2 %s +#ifdef TEST2 +#warning X +// expected-warning@-1 {{X}} +// expected-no-diagnostics + +// CHECK2: error: 'error' diagnostics seen but not expected: +// CHECK2-NEXT: Line 19: 'expected-no-diagnostics' directive cannot follow other expected directives +// CHECK2-NEXT: 1 error generated. +#endif + +// RUN: %clang_cc1 -DTEST3 -verify %s 2>&1 | FileCheck -check-prefix=CHECK3 %s +// RUN: %clang_cc1 -verify 2>&1 | FileCheck -check-prefix=CHECK3 %s +#ifdef TEST3 +// no directives + +// CHECK3: error: no expected directives found: consider use of 'expected-no-diagnostics' +// CHECK3-NEXT: 1 error generated. +#endif + +// RUN: %clang_cc1 -E -DTEST4 -verify %s 2>&1 | FileCheck -check-prefix=CHECK4 %s +#ifdef TEST4 +#warning X +// expected-warning@-1 {{X}} + +// CHECK4-NOT: error: no expected directives found: consider use of 'expected-no-diagnostics' +#endif diff --git a/test/Frontend/warning-mapping-1.c b/test/Frontend/warning-mapping-1.c index 883dafb..623e5e3 100644 --- a/test/Frontend/warning-mapping-1.c +++ b/test/Frontend/warning-mapping-1.c @@ -1,5 +1,6 @@ // Check that -w has higher priority than -Werror. // RUN: %clang_cc1 -verify -Wsign-compare -Werror -w %s +// expected-no-diagnostics int f0(int x, unsigned y) { return x < y; diff --git a/test/Frontend/warning-mapping-4.c b/test/Frontend/warning-mapping-4.c index d8d2769..6644042 100644 --- a/test/Frontend/warning-mapping-4.c +++ b/test/Frontend/warning-mapping-4.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -verify -Wno-error=sign-compare %s // RUN: %clang_cc1 -verify -Wsign-compare -w -Wno-error=sign-compare %s +// expected-no-diagnostics int f0(int x, unsigned y) { return x < y; diff --git a/test/Headers/Inputs/include/stdint.h b/test/Headers/Inputs/include/stdint.h new file mode 100644 index 0000000..7a1fdde --- /dev/null +++ b/test/Headers/Inputs/include/stdint.h @@ -0,0 +1,18 @@ +#ifndef STDINT_H +#define STDINT_H + +#ifdef __INT32_TYPE__ +typedef unsigned __INT32_TYPE__ uint32_t; +#endif + +#ifdef __INT64_TYPE__ +typedef unsigned __INT64_TYPE__ uint64_t; +#endif + +#ifdef __INTPTR_TYPE__ +typedef unsigned __INTPTR_TYPE__ uintptr_t; +#else +#error Every target should have __INTPTR_TYPE__ +#endif + +#endif /* STDINT_H */ diff --git a/test/Headers/altivec-header.c b/test/Headers/altivec-header.c new file mode 100644 index 0000000..085e799 --- /dev/null +++ b/test/Headers/altivec-header.c @@ -0,0 +1,12 @@ +// REQUIRES: ppc64-registered-target +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -maltivec -ffreestanding -S -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -maltivec -ffreestanding -fno-lax-vector-conversions -S -o - %s | FileCheck %s +// RUN: %clang_cc1 -triple powerpc64-unknown-unknown -maltivec -ffreestanding -x c++ -S -o - %s | FileCheck %s + +#include + +// Verify that simply including does not generate any code +// (i.e. all inline routines in the header are marked "static") + +// CHECK-NOT: .text + diff --git a/test/Headers/c89.c b/test/Headers/c89.c index eea8d44..acf01b4 100644 --- a/test/Headers/c89.c +++ b/test/Headers/c89.c @@ -1,4 +1,5 @@ // RUN: %clang -target i386-apple-darwin10 -fsyntax-only -Xclang -verify -std=c89 %s +// expected-no-diagnostics // FIXME: Disable inclusion of mm_malloc.h, our current implementation is broken // on win32 since we don't generally know how to find errno.h. diff --git a/test/Headers/int64-type.c b/test/Headers/int64-type.c index 16b42d2..442f8a0 100644 --- a/test/Headers/int64-type.c +++ b/test/Headers/int64-type.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -verify %s -ffreestanding +// expected-no-diagnostics #include typedef unsigned long long uint64_t; diff --git a/test/Headers/typedef_guards.c b/test/Headers/typedef_guards.c index 646b2ca0..968b26d 100644 --- a/test/Headers/typedef_guards.c +++ b/test/Headers/typedef_guards.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // NULL is rdefined in stddef.h #define NULL ((void*) 0) diff --git a/test/Headers/unwind.c b/test/Headers/unwind.c new file mode 100644 index 0000000..68100a8 --- /dev/null +++ b/test/Headers/unwind.c @@ -0,0 +1,19 @@ +// RUN: %clang_cc1 -triple arm-unknown-linux-gnueabi \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only %s +// RUN: %clang_cc1 -triple mips-unknown-linux \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only %s +// RUN: %clang_cc1 -triple i686-unknown-linux \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only %s + +// RUN: %clang_cc1 -triple arm-unknown-linux-gnueabi \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only -x c++ %s +// RUN: %clang_cc1 -triple mips-unknown-linux \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only -x c++ %s +// RUN: %clang_cc1 -triple i686-unknown-linux \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only -x c++ %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux \ +// RUN: -isystem %S/Inputs/include -ffreestanding -fsyntax-only -x c++ %s + +#include "unwind.h" diff --git a/test/Headers/wchar_limits.cpp b/test/Headers/wchar_limits.cpp index 93a99ad..35ae7af 100644 --- a/test/Headers/wchar_limits.cpp +++ b/test/Headers/wchar_limits.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -ffreestanding -fsyntax-only -verify %s // RUN: %clang_cc1 -ffreestanding -fsyntax-only -verify -fshort-wchar %s +// expected-no-diagnostics #include diff --git a/test/Headers/wmmintrin.c b/test/Headers/wmmintrin.c index 6aa8be4..8cabbf0 100644 --- a/test/Headers/wmmintrin.c +++ b/test/Headers/wmmintrin.c @@ -1,4 +1,5 @@ // Check that wmmintrin.h is includable with just -maes. // RUN: %clang_cc1 -triple x86_64-unknown-unknown \ // RUN: -verify %s -ffreestanding -target-feature +aes +// expected-no-diagnostics #include diff --git a/test/Index/Inputs/CommentXML/valid-availability-attr-01.xml b/test/Index/Inputs/CommentXML/valid-availability-attr-01.xml new file mode 100644 index 0000000..20ce012 --- /dev/null +++ b/test/Index/Inputs/CommentXML/valid-availability-attr-01.xml @@ -0,0 +1,11 @@ + + +aaa +Aaa. + + 8.0 + 9.0 + 10.0 + use availability_test + + diff --git a/test/Index/Inputs/CommentXML/valid-availability-attr-02.xml b/test/Index/Inputs/CommentXML/valid-availability-attr-02.xml new file mode 100644 index 0000000..589262e --- /dev/null +++ b/test/Index/Inputs/CommentXML/valid-availability-attr-02.xml @@ -0,0 +1,11 @@ + + +aaa +Aaa. + + 8.0.1 + 9.0.1 + 10.0.1 + use availability_test + + diff --git a/test/Index/Inputs/CommentXML/valid-deprecated-attr.xml b/test/Index/Inputs/CommentXML/valid-deprecated-attr.xml new file mode 100644 index 0000000..194720b --- /dev/null +++ b/test/Index/Inputs/CommentXML/valid-deprecated-attr.xml @@ -0,0 +1,6 @@ + + +aaa +Aaa. +since OS X 10.6 + diff --git a/test/Index/Inputs/CommentXML/valid-unavailable-attr.xml b/test/Index/Inputs/CommentXML/valid-unavailable-attr.xml new file mode 100644 index 0000000..5811c34 --- /dev/null +++ b/test/Index/Inputs/CommentXML/valid-unavailable-attr.xml @@ -0,0 +1,7 @@ + + +aaa +Aaa. +Since iOS 4.0 + + diff --git a/test/Index/Inputs/Frameworks/module.map b/test/Index/Inputs/Frameworks/module.map new file mode 100644 index 0000000..77879fe --- /dev/null +++ b/test/Index/Inputs/Frameworks/module.map @@ -0,0 +1 @@ +framework module * { } diff --git a/test/Index/Inputs/retain-comments-from-system-headers.h b/test/Index/Inputs/retain-comments-from-system-headers.h new file mode 100644 index 0000000..28dd94f --- /dev/null +++ b/test/Index/Inputs/retain-comments-from-system-headers.h @@ -0,0 +1,8 @@ +#pragma clang system_header + +/** + * system_function + * \param a Aaa. + */ +int system_function(int a); + diff --git a/test/Index/annotate-comments-availability-attrs.cpp b/test/Index/annotate-comments-availability-attrs.cpp new file mode 100644 index 0000000..777881d --- /dev/null +++ b/test/Index/annotate-comments-availability-attrs.cpp @@ -0,0 +1,44 @@ +// rdar://12378879 + +// RUN: rm -rf %t +// RUN: mkdir %t +// RUN: c-index-test -test-load-source all -comments-xml-schema=%S/../../bindings/xml/comment-xml-schema.rng %s > %t/out +// RUN: FileCheck %s < %t/out + +// Ensure that XML we generate is not invalid. +// RUN: FileCheck %s -check-prefix=WRONG < %t/out +// WRONG-NOT: CommentXMLInvalid + +/// Aaa. +void attr_availability_1() __attribute__((availability(macosx,obsoleted=10.0,introduced=8.0,deprecated=9.0, message="use availability_test in "))) + __attribute__((availability(ios,unavailable, message="not for iOS"))); + +/// Aaa. +void attr_availability_2() __attribute__((availability(macosx,obsoleted=10.0.1,introduced=8.0.1,deprecated=9.0.1))); + +/// Aaa. +void attr_deprecated_1() __attribute__((deprecated)); + +/// Aaa. +void attr_deprecated_2() __attribute__((deprecated("message 1 "))); + +/// Aaa. +void attr_unavailable_1() __attribute__((unavailable)); + +/// Aaa. +void attr_unavailable_2() __attribute__((unavailable("message 2 "))); + +// CHECK: FullCommentAsXML=[attr_availability_1c:@F@attr_availability_1#void attr_availability_1() Aaa.not for iOS8.09.010.0use availability_test in <foo.h>] + + +// CHECK: FullCommentAsXML=[attr_availability_2c:@F@attr_availability_2#void attr_availability_2() Aaa.8.0.19.0.110.0.1] + +// CHECK: FullCommentAsXML=[attr_deprecated_1c:@F@attr_deprecated_1#void attr_deprecated_1() Aaa.] + +// CHECK: FullCommentAsXML=[attr_deprecated_2c:@F@attr_deprecated_2#void attr_deprecated_2() Aaa.message 1 <foo.h>] + + +// CHECK: FullCommentAsXML=[attr_unavailable_1c:@F@attr_unavailable_1#void attr_unavailable_1() Aaa.] + + +// CHECK: FullCommentAsXML=[attr_unavailable_2c:@F@attr_unavailable_2#void attr_unavailable_2() Aaa.message 2 <foo.h>] diff --git a/test/Index/annotate-comments.cpp b/test/Index/annotate-comments.cpp index 22724d0..b8b8e6c 100644 --- a/test/Index/annotate-comments.cpp +++ b/test/Index/annotate-comments.cpp @@ -66,16 +66,16 @@ void isdoxy15(void); /// Doxygen comment. IS_DOXYGEN_END void isdoxy16(void); -/// isdoxy17 IS_DOXYGEN_START -// Not a Doxygen comment, but still picked up. -/// IS_DOXYGEN_END +/// NOT_DOXYGEN +// NOT_DOXYGEN +/// isdoxy17 IS_DOXYGEN_START IS_DOXYGEN_END void isdoxy17(void); unsigned // NOT_DOXYGEN -/// isdoxy18 IS_DOXYGEN_START -// Not a Doxygen comment, but still picked up. -/// IS_DOXYGEN_END +/// NOT_DOXYGEN +// NOT_DOXYGEN +/// isdoxy18 IS_DOXYGEN_START IS_DOXYGEN_END // NOT_DOXYGEN int isdoxy18(void); @@ -168,7 +168,7 @@ class test42 { ///\brief /// /// Some malformed command. -/* \*/ +/** \*/ /** * \brief Aaa aaaaaaa aaaa. * IS_DOXYGEN_END @@ -221,6 +221,32 @@ void isdoxy49(void); /// \returns ddd IS_DOXYGEN_END void isdoxy50(int); +// One of the following lines has trailing whitespace. It is intended, don't +// fix it. +/** + * Aaa. IS_DOXYGEN_START + * + * Bbb. IS_DOXYGEN_END + */ +void isdoxy51(int); + +// One of the following lines has trailing whitespace. It is intended, don't +// fix it. +/** + * Aaa. IS_DOXYGEN_START + * Bbb. + * + * Ccc. IS_DOXYGEN_END + */ +void isdoxy52(int); + +/** + * \fn isdoxy53 + * + * Aaa. IS_DOXYGEN_START IS_DOXYGEN_END + */ +void isdoxy53(int); + /// Aaa. void comment_to_html_conversion_1(); @@ -344,27 +370,30 @@ void comment_to_html_conversion_24(); /// Blah blah. void comment_to_html_conversion_25(); -/// \b Aaa +/// \unknown void comment_to_html_conversion_26(); -/// \c Aaa \p Bbb +/// \b Aaa void comment_to_html_conversion_27(); -/// \a Aaa \e Bbb \em Ccc +/// \c Aaa \p Bbb void comment_to_html_conversion_28(); -/// \a 1<2 \e 3<4 \em 5<6 \param 7<8 aaa \tparam 9<10 bbb +/// \a Aaa \e Bbb \em Ccc void comment_to_html_conversion_29(); -/// \\ \@ \& \$ \# \< \> \% \" \. \:: +/// \a 1<2 \e 3<4 \em 5<6 \param 7<8 aaa \tparam 9<10 bbb void comment_to_html_conversion_30(); -/// & < > " +/// \\ \@ \& \$ \# \< \> \% \" \. \:: void comment_to_html_conversion_31(); -/// 0<i +/// & < > " void comment_to_html_conversion_32(); +/// 0<i +void comment_to_html_conversion_33(); + /// Aaa. class comment_to_xml_conversion_01 { /// \param aaa Blah blah. @@ -518,13 +547,15 @@ enum class comment_to_xml_conversion_17 { // CHECK: annotate-comments.cpp:214:6: FunctionDecl=isdoxy48:{{.*}} BriefComment=[IS_DOXYGEN_START Aaa bbb] // CHECK: annotate-comments.cpp:218:6: FunctionDecl=isdoxy49:{{.*}} BriefComment=[IS_DOXYGEN_START Aaa] // CHECK: annotate-comments.cpp:222:6: FunctionDecl=isdoxy50:{{.*}} BriefComment=[Returns ddd IS_DOXYGEN_END] +// CHECK: annotate-comments.cpp:231:6: FunctionDecl=isdoxy51:{{.*}} BriefComment=[Aaa. IS_DOXYGEN_START] +// CHECK: annotate-comments.cpp:241:6: FunctionDecl=isdoxy52:{{.*}} BriefComment=[Aaa. IS_DOXYGEN_START Bbb.] -// CHECK: annotate-comments.cpp:225:6: FunctionDecl=comment_to_html_conversion_1:{{.*}} FullCommentAsHTML=[

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_1c:@F@comment_to_html_conversion_1# Aaa.] +// CHECK: annotate-comments.cpp:251:6: FunctionDecl=comment_to_html_conversion_1:{{.*}} FullCommentAsHTML=[

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_1c:@F@comment_to_html_conversion_1#void comment_to_html_conversion_1() Aaa.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.])))] -// CHECK: annotate-comments.cpp:228:6: FunctionDecl=comment_to_html_conversion_2:{{.*}} FullCommentAsHTML=[

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_2c:@F@comment_to_html_conversion_2# Aaa.] +// CHECK: annotate-comments.cpp:254:6: FunctionDecl=comment_to_html_conversion_2:{{.*}} FullCommentAsHTML=[

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_2c:@F@comment_to_html_conversion_2#void comment_to_html_conversion_2() Aaa.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -532,7 +563,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[brief] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.]))))] -// CHECK: annotate-comments.cpp:231:6: FunctionDecl=comment_to_html_conversion_3:{{.*}} FullCommentAsHTML=[

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_3c:@F@comment_to_html_conversion_3# Aaa.] +// CHECK: annotate-comments.cpp:257:6: FunctionDecl=comment_to_html_conversion_3:{{.*}} FullCommentAsHTML=[

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_3c:@F@comment_to_html_conversion_3#void comment_to_html_conversion_3() Aaa.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -540,7 +571,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[short] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.]))))] -// CHECK: annotate-comments.cpp:236:6: FunctionDecl=comment_to_html_conversion_4:{{.*}} FullCommentAsHTML=[

    Bbb.

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_4c:@F@comment_to_html_conversion_4# Bbb. Aaa.] +// CHECK: annotate-comments.cpp:262:6: FunctionDecl=comment_to_html_conversion_4:{{.*}} FullCommentAsHTML=[

    Bbb.

    Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_4c:@F@comment_to_html_conversion_4#void comment_to_html_conversion_4() Bbb. Aaa.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -550,7 +581,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[brief] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))))] -// CHECK: annotate-comments.cpp:243:6: FunctionDecl=comment_to_html_conversion_5:{{.*}} FullCommentAsHTML=[

    Bbb.

    Aaa.

    Ccc.

    ] FullCommentAsXML=[comment_to_html_conversion_5c:@F@comment_to_html_conversion_5# Bbb. Aaa. Ccc.] +// CHECK: annotate-comments.cpp:269:6: FunctionDecl=comment_to_html_conversion_5:{{.*}} FullCommentAsHTML=[

    Bbb.

    Aaa.

    Ccc.

    ] FullCommentAsXML=[comment_to_html_conversion_5c:@F@comment_to_html_conversion_5#void comment_to_html_conversion_5() Bbb. Aaa. Ccc.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -562,7 +593,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))) // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Ccc.])))] -// CHECK: annotate-comments.cpp:247:6: FunctionDecl=comment_to_html_conversion_6:{{.*}} FullCommentAsHTML=[

    Aaa.

    Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_6c:@F@comment_to_html_conversion_6# Aaa. Bbb.] +// CHECK: annotate-comments.cpp:273:6: FunctionDecl=comment_to_html_conversion_6:{{.*}} FullCommentAsHTML=[

    Aaa.

    Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_6c:@F@comment_to_html_conversion_6#void comment_to_html_conversion_6() Aaa. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -574,7 +605,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[brief] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))))] -// CHECK: annotate-comments.cpp:252:6: FunctionDecl=comment_to_html_conversion_7:{{.*}} FullCommentAsHTML=[

    Aaa.

    Returns Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_7c:@F@comment_to_html_conversion_7# Aaa. Bbb.] +// CHECK: annotate-comments.cpp:278:6: FunctionDecl=comment_to_html_conversion_7:{{.*}} FullCommentAsHTML=[

    Aaa.

    Returns Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_7c:@F@comment_to_html_conversion_7#void comment_to_html_conversion_7() Aaa. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -584,7 +615,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[return] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))))] -// CHECK: annotate-comments.cpp:257:6: FunctionDecl=comment_to_html_conversion_8:{{.*}} FullCommentAsHTML=[

    Aaa.

    Returns Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_8c:@F@comment_to_html_conversion_8# Aaa. Bbb.] +// CHECK: annotate-comments.cpp:283:6: FunctionDecl=comment_to_html_conversion_8:{{.*}} FullCommentAsHTML=[

    Aaa.

    Returns Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_8c:@F@comment_to_html_conversion_8#void comment_to_html_conversion_8() Aaa. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -594,7 +625,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[returns] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))))] -// CHECK: annotate-comments.cpp:262:6: FunctionDecl=comment_to_html_conversion_9:{{.*}} FullCommentAsHTML=[

    Aaa.

    Returns Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_9c:@F@comment_to_html_conversion_9# Aaa. Bbb.] +// CHECK: annotate-comments.cpp:288:6: FunctionDecl=comment_to_html_conversion_9:{{.*}} FullCommentAsHTML=[

    Aaa.

    Returns Bbb.

    ] FullCommentAsXML=[comment_to_html_conversion_9c:@F@comment_to_html_conversion_9#void comment_to_html_conversion_9() Aaa. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -604,7 +635,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[result] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))))] -// CHECK: annotate-comments.cpp:266:6: FunctionDecl=comment_to_html_conversion_10:{{.*}} FullCommentAsHTML=[

    Returns Bbb.

    Returns Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_10c:@F@comment_to_html_conversion_10# Aaa. Bbb.] +// CHECK: annotate-comments.cpp:292:6: FunctionDecl=comment_to_html_conversion_10:{{.*}} FullCommentAsHTML=[

    Returns Bbb.

    Returns Aaa.

    ] FullCommentAsXML=[comment_to_html_conversion_10c:@F@comment_to_html_conversion_10#void comment_to_html_conversion_10() Aaa. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -616,7 +647,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[returns] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb.]))))] -// CHECK: annotate-comments.cpp:273:6: FunctionDecl=comment_to_html_conversion_11:{{.*}} FullCommentAsHTML=[

    Aaa.

    Bbb.

    Returns Ccc.

    ] FullCommentAsXML=[comment_to_html_conversion_11c:@F@comment_to_html_conversion_11# Aaa. Ccc. Bbb.] +// CHECK: annotate-comments.cpp:299:6: FunctionDecl=comment_to_html_conversion_11:{{.*}} FullCommentAsHTML=[

    Aaa.

    Bbb.

    Returns Ccc.

    ] FullCommentAsXML=[comment_to_html_conversion_11c:@F@comment_to_html_conversion_11#void comment_to_html_conversion_11() Aaa. Ccc. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -628,14 +659,14 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[returns] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Ccc.]))))] -// CHECK: annotate-comments.cpp:276:6: FunctionDecl=comment_to_html_conversion_12:{{.*}} FullCommentAsHTML=[] FullCommentAsXML=[comment_to_html_conversion_12c:@F@comment_to_html_conversion_12#I#] +// CHECK: annotate-comments.cpp:302:6: FunctionDecl=comment_to_html_conversion_12:{{.*}} FullCommentAsHTML=[] FullCommentAsXML=[comment_to_html_conversion_12c:@F@comment_to_html_conversion_12#I#void comment_to_html_conversion_12(int x1)] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace // CHECK-NEXT: (CXComment_Text Text=[ ] IsWhitespace)) // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[] ParamIndex=Invalid // CHECK-NEXT: (CXComment_Paragraph IsWhitespace)))] -// CHECK: annotate-comments.cpp:279:6: FunctionDecl=comment_to_html_conversion_13:{{.*}} FullCommentAsHTML=[
    x1
    Aaa.
    ] FullCommentAsXML=[comment_to_html_conversion_13c:@F@comment_to_html_conversion_13#I#x10in Aaa.] +// CHECK: annotate-comments.cpp:305:6: FunctionDecl=comment_to_html_conversion_13:{{.*}} FullCommentAsHTML=[
    x1
    Aaa.
    ] FullCommentAsXML=[comment_to_html_conversion_13c:@F@comment_to_html_conversion_13#I#void comment_to_html_conversion_13(int x1)x10in Aaa.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -643,7 +674,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[x1] ParamIndex=0 // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.]))))] -// CHECK: annotate-comments.cpp:282:6: FunctionDecl=comment_to_html_conversion_14:{{.*}} FullCommentAsHTML=[
    zzz
    Aaa.
    ] FullCommentAsXML=[comment_to_html_conversion_14c:@F@comment_to_html_conversion_14#I#zzzin Aaa.] +// CHECK: annotate-comments.cpp:308:6: FunctionDecl=comment_to_html_conversion_14:{{.*}} FullCommentAsHTML=[
    zzz
    Aaa.
    ] FullCommentAsXML=[comment_to_html_conversion_14c:@F@comment_to_html_conversion_14#I#void comment_to_html_conversion_14(int x1)zzzin Aaa.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -651,7 +682,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[zzz] ParamIndex=Invalid // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.]))))] -// CHECK: annotate-comments.cpp:286:6: FunctionDecl=comment_to_html_conversion_15:{{.*}} FullCommentAsHTML=[
    x1
    Aaa.
    x2
    Bbb.
    ] FullCommentAsXML=[comment_to_html_conversion_15c:@F@comment_to_html_conversion_15#I#I#x10in Aaa.x21in Bbb. ] +// CHECK: annotate-comments.cpp:312:6: FunctionDecl=comment_to_html_conversion_15:{{.*}} FullCommentAsHTML=[
    x1
    Aaa.
    x2
    Bbb.
    ] FullCommentAsXML=[comment_to_html_conversion_15c:@F@comment_to_html_conversion_15#I#I#void comment_to_html_conversion_15(int x1, int x2)x10in Aaa.x21in Bbb. ] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -663,7 +694,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[x1] ParamIndex=0 // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.]))))] -// CHECK: annotate-comments.cpp:291:6: FunctionDecl=comment_to_html_conversion_16:{{.*}} FullCommentAsHTML=[
    x1
    Aaa.
    x2
    Bbb.
    zzz
    Aaa.
    ] FullCommentAsXML=[comment_to_html_conversion_16c:@F@comment_to_html_conversion_16#I#I#x10in Aaa.x21in Bbb. zzzin Aaa. ] +// CHECK: annotate-comments.cpp:317:6: FunctionDecl=comment_to_html_conversion_16:{{.*}} FullCommentAsHTML=[
    x1
    Aaa.
    x2
    Bbb.
    zzz
    Aaa.
    ] FullCommentAsXML=[comment_to_html_conversion_16c:@F@comment_to_html_conversion_16#I#I#void comment_to_html_conversion_16(int x1, int x2)x10in Aaa.x21in Bbb. zzzin Aaa. ] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -679,7 +710,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[x1] ParamIndex=0 // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa.]))))] -// CHECK: annotate-comments.cpp:296:6: FunctionTemplate=comment_to_html_conversion_17:{{.*}} FullCommentAsHTML=[
    aaa
    Blah blah
    ] FullCommentAsXML=[comment_to_html_conversion_17c:@FT@>1#Tcomment_to_html_conversion_17#t0.0#aaa0in Blah blah] +// CHECK: annotate-comments.cpp:322:6: FunctionTemplate=comment_to_html_conversion_17:{{.*}} FullCommentAsHTML=[
    aaa
    Blah blah
    ] FullCommentAsXML=[comment_to_html_conversion_17c:@FT@>1#Tcomment_to_html_conversion_17#t0.0#template <typename T> void comment_to_html_conversion_17(T aaa)aaa0in Blah blah] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -690,7 +721,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[aaa] ParamIndex=0 // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Blah blah]))))] -// CHECK: annotate-comments.cpp:301:6: FunctionTemplate=comment_to_html_conversion_18:{{.*}} FullCommentAsHTML=[
    aaa
    Blah blah
    ] FullCommentAsXML=[comment_to_html_conversion_18c:@FT@>1#Tcomment_to_html_conversion_18#t0.0#aaa0in Blah blah] +// CHECK: annotate-comments.cpp:327:6: FunctionTemplate=comment_to_html_conversion_18:{{.*}} FullCommentAsHTML=[
    aaa
    Blah blah
    ] FullCommentAsXML=[comment_to_html_conversion_18c:@FT@>1#Tcomment_to_html_conversion_18#t0.0#template <typename T> void comment_to_html_conversion_18(T aaa)aaa0in Blah blah] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -701,7 +732,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_ParamCommand in implicitly ParamName=[aaa] ParamIndex=0 // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Blah blah]))))] -// CHECK: annotate-comments.cpp:306:6: FunctionTemplate=comment_to_html_conversion_19:{{.*}} FullCommentAsHTML=[
    T1
    Aaa
    T2
    Bbb
    ] FullCommentAsXML=[comment_to_html_conversion_19c:@FT@>2#T#Tcomment_to_html_conversion_19#t0.0#t0.1#T10 AaaT21 Bbb ] +// CHECK: annotate-comments.cpp:332:6: FunctionTemplate=comment_to_html_conversion_19:{{.*}} FullCommentAsHTML=[
    T1
    Aaa
    T2
    Bbb
    ] FullCommentAsXML=[comment_to_html_conversion_19c:@FT@>2#T#Tcomment_to_html_conversion_19#t0.0#t0.1#template <typename T1, typename T2> void comment_to_html_conversion_19(T1 aaa, T2 bbb)T10 AaaT21 Bbb ] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -713,7 +744,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_TParamCommand ParamName=[T1] ParamPosition={0} // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa]))))] -// CHECK: annotate-comments.cpp:313:6: FunctionTemplate=comment_to_html_conversion_20:{{.*}} FullCommentAsHTML=[
    T1
    Aaa
    T2
    Bbb
    V
    Ccc
    U
    Zzz
    ] FullCommentAsXML=[comment_to_html_conversion_20c:@FT@>3#T#T#NIcomment_to_html_conversion_20#t0.0#t0.1#T10 AaaT21 Bbb V2 Ccc U Zzz ] +// CHECK: annotate-comments.cpp:339:6: FunctionTemplate=comment_to_html_conversion_20:{{.*}} FullCommentAsHTML=[
    T1
    Aaa
    T2
    Bbb
    V
    Ccc
    U
    Zzz
    ] FullCommentAsXML=[comment_to_html_conversion_20c:@FT@>3#T#T#NIcomment_to_html_conversion_20#t0.0#t0.1#template <typename T1, typename T2, int V> void comment_to_html_conversion_20(T1 aaa, T2 bbb)T10 AaaT21 Bbb V2 Ccc U Zzz ] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -733,7 +764,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_TParamCommand ParamName=[T1] ParamPosition={0} // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Aaa]))))] -// CHECK: annotate-comments.cpp:320:6: FunctionTemplate=comment_to_html_conversion_21:{{.*}} FullCommentAsHTML=[
    TTT
    Ddd
    C
    Ccc
    T
    Aaa
    TT
    Bbb
    ] FullCommentAsXML=[comment_to_html_conversion_21c:@FT@>1#t>2#t>1#T#Tcomment_to_html_conversion_21#TTT0 Ddd C Ccc T Aaa TT Bbb] +// CHECK: annotate-comments.cpp:346:6: FunctionTemplate=comment_to_html_conversion_21:{{.*}} FullCommentAsHTML=[
    TTT
    Ddd
    C
    Ccc
    T
    Aaa
    TT
    Bbb
    ] FullCommentAsXML=[comment_to_html_conversion_21c:@FT@>1#t>2#t>1#T#Tcomment_to_html_conversion_21#template <template <template <typename T> class TT, class C> class TTT> void comment_to_html_conversion_21()TTT0 Ddd C Ccc T Aaa TT Bbb] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -753,7 +784,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_TParamCommand ParamName=[TT] ParamPosition={0, 0} // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Bbb]))))] -// CHECK: annotate-comments.cpp:329:6: FunctionDecl=comment_to_html_conversion_22:{{.*}} FullCommentAsHTML=[

    Aaa.

    Bbb.

    x1
    Ccc.
    x2
    Ddd.

    Returns Eee.

    ] FullCommentAsXML=[comment_to_html_conversion_22c:@F@comment_to_html_conversion_22#I#I# Aaa.x10in Ccc. x21in Ddd. Eee. Bbb.] +// CHECK: annotate-comments.cpp:355:6: FunctionDecl=comment_to_html_conversion_22:{{.*}} FullCommentAsHTML=[

    Aaa.

    Bbb.

    x1
    Ccc.
    x2
    Ddd.

    Returns Eee.

    ] FullCommentAsXML=[comment_to_html_conversion_22c:@F@comment_to_html_conversion_22#I#I#void comment_to_html_conversion_22(int x1, int x2) Aaa.x10in Ccc. x21in Ddd. Eee. Bbb.] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -776,7 +807,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_BlockCommand CommandName=[returns] // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ Eee.]))))] -// CHECK: annotate-comments.cpp:332:6: FunctionDecl=comment_to_html_conversion_23:{{.*}} FullCommentAsHTML=[


    Aaa

    ] FullCommentAsXML=[comment_to_html_conversion_23c:@F@comment_to_html_conversion_23# ]]>]]>Aaa</a>] +// CHECK: annotate-comments.cpp:358:6: FunctionDecl=comment_to_html_conversion_23:{{.*}} FullCommentAsHTML=[


    Aaa

    ] FullCommentAsXML=[comment_to_html_conversion_23c:@F@comment_to_html_conversion_23#void comment_to_html_conversion_23() ]]>]]>Aaa</a>] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -785,7 +816,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_HTMLStartTag Name=[a] Attrs: href=http://example.com/) // CHECK-NEXT: (CXComment_Text Text=[Aaa]) // CHECK-NEXT: (CXComment_HTMLEndTag Name=[a])))] -// CHECK: annotate-comments.cpp:338:6: FunctionDecl=comment_to_html_conversion_24:{{.*}} FullCommentAsHTML=[
     <a href="http://example.com/">Aaa</a>\n <a href='http://example.com/'>Aaa</a>
    ] FullCommentAsXML=[comment_to_html_conversion_24c:@F@comment_to_html_conversion_24# <a href="http://example.com/">Aaa</a>\n <a href='http://example.com/'>Aaa</a>] +// CHECK: annotate-comments.cpp:364:6: FunctionDecl=comment_to_html_conversion_24:{{.*}} FullCommentAsHTML=[
     <a href="http://example.com/">Aaa</a>\n <a href='http://example.com/'>Aaa</a>
    ] FullCommentAsXML=[comment_to_html_conversion_24c:@F@comment_to_html_conversion_24#void comment_to_html_conversion_24() <a href="http://example.com/">Aaa</a>\n <a href='http://example.com/'>Aaa</a>] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph IsWhitespace @@ -793,7 +824,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_VerbatimBlockCommand CommandName=[verbatim] // CHECK-NEXT: (CXComment_VerbatimBlockLine Text=[ Aaa]) // CHECK-NEXT: (CXComment_VerbatimBlockLine Text=[ Aaa])))] -// CHECK: annotate-comments.cpp:345:6: FunctionDecl=comment_to_html_conversion_25:{{.*}} FullCommentAsHTML=[

    Blah blah.

    ] FullCommentAsXML=[comment_to_html_conversion_25c:@F@comment_to_html_conversion_25# Blah blah.] +// CHECK: annotate-comments.cpp:371:6: FunctionDecl=comment_to_html_conversion_25:{{.*}} FullCommentAsHTML=[

    Blah blah.

    ] FullCommentAsXML=[comment_to_html_conversion_25c:@F@comment_to_html_conversion_25#void comment_to_html_conversion_25() Blah blah.] // CHECK: CommentAST=[ // CHECK: (CXComment_FullComment // CHECK: (CXComment_Paragraph IsWhitespace @@ -810,13 +841,19 @@ enum class comment_to_xml_conversion_17 { // CHECK: (CXComment_VerbatimLine Text=[ foo]) // CHECK: (CXComment_Paragraph // CHECK: (CXComment_Text Text=[ Blah blah.])))] -// CHECK: annotate-comments.cpp:348:6: FunctionDecl=comment_to_html_conversion_26:{{.*}} FullCommentAsHTML=[

    Aaa

    ] FullCommentAsXML=[comment_to_html_conversion_26c:@F@comment_to_html_conversion_26# Aaa] +// CHECK: annotate-comments.cpp:374:6: FunctionDecl=comment_to_html_conversion_26:{{.*}} FullCommentAsHTML=[

    ] FullCommentAsXML=[comment_to_html_conversion_26c:@F@comment_to_html_conversion_26#void comment_to_html_conversion_26() ] +// CHECK: CommentAST=[ +// CHECK: (CXComment_FullComment +// CHECK: (CXComment_Paragraph +// CHECK: (CXComment_Text Text=[ ] IsWhitespace) +// CHECK: (CXComment_InlineCommand CommandName=[unknown] RenderNormal)))] +// CHECK: annotate-comments.cpp:377:6: FunctionDecl=comment_to_html_conversion_27:{{.*}} FullCommentAsHTML=[

    Aaa

    ] FullCommentAsXML=[comment_to_html_conversion_27c:@F@comment_to_html_conversion_27#void comment_to_html_conversion_27() Aaa] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ ] IsWhitespace) // CHECK-NEXT: (CXComment_InlineCommand CommandName=[b] RenderBold Arg[0]=Aaa)))] -// CHECK: annotate-comments.cpp:351:6: FunctionDecl=comment_to_html_conversion_27:{{.*}} FullCommentAsHTML=[

    Aaa Bbb

    ] FullCommentAsXML=[comment_to_html_conversion_27c:@F@comment_to_html_conversion_27# Aaa Bbb] +// CHECK: annotate-comments.cpp:380:6: FunctionDecl=comment_to_html_conversion_28:{{.*}} FullCommentAsHTML=[

    Aaa Bbb

    ] FullCommentAsXML=[comment_to_html_conversion_28c:@F@comment_to_html_conversion_28#void comment_to_html_conversion_28() Aaa Bbb] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -824,7 +861,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_InlineCommand CommandName=[c] RenderMonospaced Arg[0]=Aaa) // CHECK-NEXT: (CXComment_Text Text=[ ] IsWhitespace) // CHECK-NEXT: (CXComment_InlineCommand CommandName=[p] RenderMonospaced Arg[0]=Bbb)))] -// CHECK: annotate-comments.cpp:354:6: FunctionDecl=comment_to_html_conversion_28:{{.*}} FullCommentAsHTML=[

    Aaa Bbb Ccc

    ] FullCommentAsXML=[comment_to_html_conversion_28c:@F@comment_to_html_conversion_28# Aaa Bbb Ccc] +// CHECK: annotate-comments.cpp:383:6: FunctionDecl=comment_to_html_conversion_29:{{.*}} FullCommentAsHTML=[

    Aaa Bbb Ccc

    ] FullCommentAsXML=[comment_to_html_conversion_29c:@F@comment_to_html_conversion_29#void comment_to_html_conversion_29() Aaa Bbb Ccc] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -834,7 +871,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_InlineCommand CommandName=[e] RenderEmphasized Arg[0]=Bbb) // CHECK-NEXT: (CXComment_Text Text=[ ] IsWhitespace) // CHECK-NEXT: (CXComment_InlineCommand CommandName=[em] RenderEmphasized Arg[0]=Ccc)))] -// CHECK: annotate-comments.cpp:357:6: FunctionDecl=comment_to_html_conversion_29:{{.*}} FullCommentAsHTML=[

    1<2 3<4 5<6

    9<10
    bbb
    7<8
    aaa
    ] FullCommentAsXML=[comment_to_html_conversion_29c:@F@comment_to_html_conversion_29# 1<2 3<4 5<6 9<10 bbb7<8in aaa ] +// CHECK: annotate-comments.cpp:386:6: FunctionDecl=comment_to_html_conversion_30:{{.*}} FullCommentAsHTML=[

    1<2 3<4 5<6

    9<10
    bbb
    7<8
    aaa
    ] FullCommentAsXML=[comment_to_html_conversion_30c:@F@comment_to_html_conversion_30#void comment_to_html_conversion_30() 1<2 3<4 5<6 9<10 bbb7<8in aaa ] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -851,7 +888,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_TParamCommand ParamName=[9<10] ParamPosition=Invalid // CHECK-NEXT: (CXComment_Paragraph // CHECK-NEXT: (CXComment_Text Text=[ bbb]))))] -// CHECK: annotate-comments.cpp:360:6: FunctionDecl=comment_to_html_conversion_30:{{.*}} FullCommentAsHTML=[

    \ @ & $ # < > % " . ::

    ] FullCommentAsXML=[comment_to_html_conversion_30c:@F@comment_to_html_conversion_30# \ @ & $ # < > % " . ::] +// CHECK: annotate-comments.cpp:389:6: FunctionDecl=comment_to_html_conversion_31:{{.*}} FullCommentAsHTML=[

    \ @ & $ # < > % " . ::

    ] FullCommentAsXML=[comment_to_html_conversion_31c:@F@comment_to_html_conversion_31#void comment_to_html_conversion_31() \ @ & $ # < > % " . ::] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -877,7 +914,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_Text Text=[.]) // CHECK-NEXT: (CXComment_Text Text=[ ] IsWhitespace) // CHECK-NEXT: (CXComment_Text Text=[::])))] -// CHECK: annotate-comments.cpp:363:6: FunctionDecl=comment_to_html_conversion_31:{{.*}} FullCommentAsHTML=[

    & < > "

    ] FullCommentAsXML=[comment_to_html_conversion_31c:@F@comment_to_html_conversion_31# & < > "] +// CHECK: annotate-comments.cpp:392:6: FunctionDecl=comment_to_html_conversion_32:{{.*}} FullCommentAsHTML=[

    & < > "

    ] FullCommentAsXML=[comment_to_html_conversion_32c:@F@comment_to_html_conversion_32#void comment_to_html_conversion_32() & < > "] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -889,7 +926,7 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_Text Text=[>]) // CHECK-NEXT: (CXComment_Text Text=[ ] IsWhitespace) // CHECK-NEXT: (CXComment_Text Text=["])))] -// CHECK: annotate-comments.cpp:366:6: FunctionDecl=comment_to_html_conversion_32:{{.*}} FullCommentAsHTML=[

    0<i

    ] FullCommentAsXML=[comment_to_html_conversion_32c:@F@comment_to_html_conversion_32# ]]>0<i</em>] +// CHECK: annotate-comments.cpp:395:6: FunctionDecl=comment_to_html_conversion_33:{{.*}} FullCommentAsHTML=[

    0<i

    ] FullCommentAsXML=[comment_to_html_conversion_33c:@F@comment_to_html_conversion_33#void comment_to_html_conversion_33() ]]>0<i</em>] // CHECK-NEXT: CommentAST=[ // CHECK-NEXT: (CXComment_FullComment // CHECK-NEXT: (CXComment_Paragraph @@ -900,27 +937,27 @@ enum class comment_to_xml_conversion_17 { // CHECK-NEXT: (CXComment_Text Text=[i]) // CHECK-NEXT: (CXComment_HTMLEndTag Name=[em])))] -// CHECK: annotate-comments.cpp:369:7: ClassDecl=comment_to_xml_conversion_01:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_01c:@C@comment_to_xml_conversion_01 Aaa.] -// CHECK: annotate-comments.cpp:371:3: CXXConstructor=comment_to_xml_conversion_01:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_01c:@C@comment_to_xml_conversion_01@F@comment_to_xml_conversion_01#I#aaa0in Blah blah.] -// CHECK: annotate-comments.cpp:374:3: CXXDestructor=~comment_to_xml_conversion_01:{{.*}} FullCommentAsXML=[~comment_to_xml_conversion_01c:@C@comment_to_xml_conversion_01@F@~comment_to_xml_conversion_01# Aaa.] -// CHECK: annotate-comments.cpp:377:7: CXXMethod=comment_to_xml_conversion_02:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_02c:@C@comment_to_xml_conversion_01@F@comment_to_xml_conversion_02#I#aaa0in Blah blah.] -// CHECK: annotate-comments.cpp:380:14: CXXMethod=comment_to_xml_conversion_03:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_03c:@C@comment_to_xml_conversion_01@F@comment_to_xml_conversion_03#I#Saaa0in Blah blah.] -// CHECK: annotate-comments.cpp:383:7: FieldDecl=comment_to_xml_conversion_04:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_04c:@C@comment_to_xml_conversion_01@FI@comment_to_xml_conversion_04 Aaa.] -// CHECK: annotate-comments.cpp:386:14: VarDecl=comment_to_xml_conversion_05:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_05c:@C@comment_to_xml_conversion_01@comment_to_xml_conversion_05 Aaa.] -// CHECK: annotate-comments.cpp:389:8: CXXMethod=operator():{{.*}} FullCommentAsXML=[operator()c:@C@comment_to_xml_conversion_01@F@operator()#I#aaa0in Blah blah.] -// CHECK: annotate-comments.cpp:392:3: CXXConversion=operator _Bool:{{.*}} FullCommentAsXML=[operator _Boolc:@C@comment_to_xml_conversion_01@F@operator _Bool# Aaa.] -// CHECK: annotate-comments.cpp:395:15: TypedefDecl=comment_to_xml_conversion_06:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_06c:annotate-comments.cpp@8055@C@comment_to_xml_conversion_01@T@comment_to_xml_conversion_06 Aaa.] -// CHECK: annotate-comments.cpp:398:9: TypeAliasDecl=comment_to_xml_conversion_07:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_07c:@C@comment_to_xml_conversion_01@comment_to_xml_conversion_07 Aaa.] -// CHECK: annotate-comments.cpp:405:3: UnexposedDecl=comment_to_xml_conversion_09:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_09c:@C@comment_to_xml_conversion_01@comment_to_xml_conversion_09 Aaa.] -// CHECK: annotate-comments.cpp:410:6: FunctionTemplate=comment_to_xml_conversion_10:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_10c:@FT@>2#T#Tcomment_to_xml_conversion_10#t0.0#t0.1# Aaa.] -// CHECK: annotate-comments.cpp:414:6: FunctionDecl=comment_to_xml_conversion_10:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_10c:@F@comment_to_xml_conversion_10<#I#I>#I#I# Aaa.] -// CHECK: annotate-comments.cpp:418:7: ClassTemplate=comment_to_xml_conversion_11:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_11c:@CT>2#T#T@comment_to_xml_conversion_11 Aaa.] -// CHECK: annotate-comments.cpp:422:7: ClassTemplatePartialSpecialization=comment_to_xml_conversion_11:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_11c:@CP>1#T@comment_to_xml_conversion_11>#t0.0#I Aaa.] -// CHECK: annotate-comments.cpp:426:7: ClassDecl=comment_to_xml_conversion_11:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_11c:@C@comment_to_xml_conversion_11>#I#I Aaa.] -// CHECK: annotate-comments.cpp:429:5: VarDecl=comment_to_xml_conversion_12:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_12c:@comment_to_xml_conversion_12 Aaa.] -// CHECK: annotate-comments.cpp:432:11: Namespace=comment_to_xml_conversion_13:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_13c:@N@comment_to_xml_conversion_13 Aaa.] -// CHECK: annotate-comments.cpp:434:13: Namespace=comment_to_xml_conversion_14:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_14c:@N@comment_to_xml_conversion_13@N@comment_to_xml_conversion_14 Aaa.] -// CHECK: annotate-comments.cpp:439:6: EnumDecl=comment_to_xml_conversion_15:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_15c:@E@comment_to_xml_conversion_15 Aaa.] -// CHECK: annotate-comments.cpp:441:3: EnumConstantDecl=comment_to_xml_conversion_16:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_16c:@E@comment_to_xml_conversion_15@comment_to_xml_conversion_16 Aaa.] -// CHECK: annotate-comments.cpp:445:12: EnumDecl=comment_to_xml_conversion_17:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_17c:@E@comment_to_xml_conversion_17 Aaa.] -// CHECK: annotate-comments.cpp:447:3: EnumConstantDecl=comment_to_xml_conversion_18:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_18c:@E@comment_to_xml_conversion_17@comment_to_xml_conversion_18 Aaa.] +// CHECK: annotate-comments.cpp:398:7: ClassDecl=comment_to_xml_conversion_01:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_01c:@C@comment_to_xml_conversion_01class comment_to_xml_conversion_01 {\n} Aaa.] +// CHECK: annotate-comments.cpp:400:3: CXXConstructor=comment_to_xml_conversion_01:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_01c:@C@comment_to_xml_conversion_01@F@comment_to_xml_conversion_01#I#aaa0in Blah blah.] +// CHECK: annotate-comments.cpp:403:3: CXXDestructor=~comment_to_xml_conversion_01:{{.*}} FullCommentAsXML=[~comment_to_xml_conversion_01c:@C@comment_to_xml_conversion_01@F@~comment_to_xml_conversion_01#void ~comment_to_xml_conversion_01() Aaa.] +// CHECK: annotate-comments.cpp:406:7: CXXMethod=comment_to_xml_conversion_02:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_02c:@C@comment_to_xml_conversion_01@F@comment_to_xml_conversion_02#I#int comment_to_xml_conversion_02(int aaa)aaa0in Blah blah.] +// CHECK: annotate-comments.cpp:409:14: CXXMethod=comment_to_xml_conversion_03:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_03c:@C@comment_to_xml_conversion_01@F@comment_to_xml_conversion_03#I#Sstatic int comment_to_xml_conversion_03(int aaa)aaa0in Blah blah.] +// CHECK: annotate-comments.cpp:412:7: FieldDecl=comment_to_xml_conversion_04:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_04c:@C@comment_to_xml_conversion_01@FI@comment_to_xml_conversion_04int comment_to_xml_conversion_04 Aaa.] +// CHECK: annotate-comments.cpp:415:14: VarDecl=comment_to_xml_conversion_05:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_05c:@C@comment_to_xml_conversion_01@comment_to_xml_conversion_05static int comment_to_xml_conversion_05 Aaa.] +// CHECK: annotate-comments.cpp:418:8: CXXMethod=operator():{{.*}} FullCommentAsXML=[operator()c:@C@comment_to_xml_conversion_01@F@operator()#I#void operator()(int aaa)aaa0in Blah blah.] +// CHECK: annotate-comments.cpp:421:3: CXXConversion=operator _Bool:{{.*}} FullCommentAsXML=[operator _Boolc:@C@comment_to_xml_conversion_01@F@operator _Bool#bool operator _Bool() Aaa.] +// CHECK: annotate-comments.cpp:424:15: TypedefDecl=comment_to_xml_conversion_06:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_06c:annotate-comments.cpp@8505@C@comment_to_xml_conversion_01@T@comment_to_xml_conversion_06typedef int comment_to_xml_conversion_06 Aaa.] +// CHECK: annotate-comments.cpp:427:9: TypeAliasDecl=comment_to_xml_conversion_07:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_07c:@C@comment_to_xml_conversion_01@comment_to_xml_conversion_07using comment_to_xml_conversion_07 = int Aaa.] +// CHECK: annotate-comments.cpp:434:3: UnexposedDecl=comment_to_xml_conversion_09:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_09c:@C@comment_to_xml_conversion_01@comment_to_xml_conversion_09template <typename T> using comment_to_xml_conversion_09 = comment_to_xml_conversion_08<T, int> Aaa.] +// CHECK: annotate-comments.cpp:439:6: FunctionTemplate=comment_to_xml_conversion_10:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_10c:@FT@>2#T#Tcomment_to_xml_conversion_10#t0.0#t0.1#template <typename T = int, typename U = int> void comment_to_xml_conversion_10(int aaa, int bbb)template <typename T, typename U> void comment_to_xml_conversion_10(T aaa, U bbb) Aaa.] +// CHECK: annotate-comments.cpp:443:6: FunctionDecl=comment_to_xml_conversion_10:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_10c:@F@comment_to_xml_conversion_10<#I#I>#I#I#void comment_to_xml_conversion_10(int aaa, int bbb) Aaa.] +// CHECK: annotate-comments.cpp:447:7: ClassTemplate=comment_to_xml_conversion_11:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_11c:@CT>2#T#T@comment_to_xml_conversion_11template <typename T = int, typename U = int> class comment_to_xml_conversion_11 {\n}\ntemplate <typename T, typename U> class comment_to_xml_conversion_11 {\n} Aaa.] +// CHECK: annotate-comments.cpp:451:7: ClassTemplatePartialSpecialization=comment_to_xml_conversion_11:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_11c:@CP>1#T@comment_to_xml_conversion_11>#t0.0#Iclass comment_to_xml_conversion_11 {\n} Aaa.] +// CHECK: annotate-comments.cpp:455:7: ClassDecl=comment_to_xml_conversion_11:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_11c:@C@comment_to_xml_conversion_11>#I#Iclass comment_to_xml_conversion_11 {\n} Aaa.] +// CHECK: annotate-comments.cpp:458:5: VarDecl=comment_to_xml_conversion_12:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_12c:@comment_to_xml_conversion_12int comment_to_xml_conversion_12 Aaa.] +// CHECK: annotate-comments.cpp:461:11: Namespace=comment_to_xml_conversion_13:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_13c:@N@comment_to_xml_conversion_13namespace comment_to_xml_conversion_13 {\n} Aaa.] +// CHECK: annotate-comments.cpp:463:13: Namespace=comment_to_xml_conversion_14:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_14c:@N@comment_to_xml_conversion_13@N@comment_to_xml_conversion_14namespace comment_to_xml_conversion_14 {\n} Aaa.] +// CHECK: annotate-comments.cpp:468:6: EnumDecl=comment_to_xml_conversion_15:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_15c:@E@comment_to_xml_conversion_15enum comment_to_xml_conversion_15{{( : int)?}} {\n} Aaa.] +// CHECK: annotate-comments.cpp:470:3: EnumConstantDecl=comment_to_xml_conversion_16:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_16c:@E@comment_to_xml_conversion_15@comment_to_xml_conversion_16comment_to_xml_conversion_16 Aaa.] +// CHECK: annotate-comments.cpp:474:12: EnumDecl=comment_to_xml_conversion_17:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_17c:@E@comment_to_xml_conversion_17enum class comment_to_xml_conversion_17 : int {\n} Aaa.] +// CHECK: annotate-comments.cpp:476:3: EnumConstantDecl=comment_to_xml_conversion_18:{{.*}} FullCommentAsXML=[comment_to_xml_conversion_18c:@E@comment_to_xml_conversion_17@comment_to_xml_conversion_18comment_to_xml_conversion_18 Aaa.] diff --git a/test/Index/annotate-deep-statements.cpp b/test/Index/annotate-deep-statements.cpp new file mode 100644 index 0000000..32a48b7 --- /dev/null +++ b/test/Index/annotate-deep-statements.cpp @@ -0,0 +1,95 @@ +// RUN: c-index-test -test-annotate-tokens=%s:1:1:1000:1 %s | FileCheck %s + +// rdar://11979525 +// Check that we don't get stack overflow trying to annotate an extremely deep AST. + +struct S { + S &operator()(); +}; + +// CHECK: Identifier: "foo" [11:6 - 11:9] FunctionDecl=foo:11:6 (Definition) +void foo() { + S s; + s()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()()() + ; +} diff --git a/test/Index/annotate-macro-args.m b/test/Index/annotate-macro-args.m index b92c0b2..7d56523 100644 --- a/test/Index/annotate-macro-args.m +++ b/test/Index/annotate-macro-args.m @@ -1,6 +1,6 @@ // Test without PCH -// RUN: c-index-test -test-annotate-tokens=%S/annotate-macro-args.h:9:1:10:1 %s -include annotate-macro-args.h | FileCheck -check-prefix=CHECK1 %s -// RUN: c-index-test -test-annotate-tokens=%S/annotate-macro-args.h:15:1:16:1 %s -include annotate-macro-args.h | FileCheck -check-prefix=CHECK2 %s +// RUN: c-index-test -test-annotate-tokens=%S/annotate-macro-args.h:9:1:10:1 %s -include %S/annotate-macro-args.h | FileCheck -check-prefix=CHECK1 %s +// RUN: c-index-test -test-annotate-tokens=%S/annotate-macro-args.h:15:1:16:1 %s -include %S/annotate-macro-args.h | FileCheck -check-prefix=CHECK2 %s // Test with PCH // RUN: c-index-test -write-pch %t.pch -x objective-c-header %S/annotate-macro-args.h -Xclang -detailed-preprocessing-record diff --git a/test/Index/annotate-module.m b/test/Index/annotate-module.m new file mode 100644 index 0000000..3423f2b --- /dev/null +++ b/test/Index/annotate-module.m @@ -0,0 +1,42 @@ + +#include +@__experimental_modules_import DependsOnModule; +int glob; + +// RUN: rm -rf %t.cache +// RUN: c-index-test -test-annotate-tokens=%s:2:1:5:1 %s -fmodule-cache-path %t.cache -fmodules -F %S/../Modules/Inputs \ +// RUN: | FileCheck %s + +// CHECK: Punctuation: "#" [2:1 - 2:2] inclusion directive=[[INC_DIR:DependsOnModule[/\\]DependsOnModule\.h \(.*/Modules/Inputs/DependsOnModule\.framework[/\\]Headers[/\\]DependsOnModule.h\)]] +// CHECK-NEXT: Identifier: "include" [2:2 - 2:9] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Punctuation: "<" [2:10 - 2:11] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Identifier: "DependsOnModule" [2:11 - 2:26] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Punctuation: "/" [2:26 - 2:27] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Identifier: "DependsOnModule" [2:27 - 2:42] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Punctuation: "." [2:42 - 2:43] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Identifier: "h" [2:43 - 2:44] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Punctuation: ">" [2:44 - 2:45] inclusion directive=[[INC_DIR]] +// CHECK-NEXT: Punctuation: "@" [3:1 - 3:2] ModuleImport=DependsOnModule:3:1 +// CHECK-NEXT: Keyword: "__experimental_modules_import" [3:2 - 3:31] ModuleImport=DependsOnModule:3:1 +// CHECK-NEXT: Identifier: "DependsOnModule" [3:32 - 3:47] ModuleImport=DependsOnModule:3:1 +// CHECK-NEXT: Punctuation: ";" [3:47 - 3:48] +// CHECK-NEXT: Keyword: "int" [4:1 - 4:4] VarDecl=glob:4:5 +// CHECK-NEXT: Identifier: "glob" [4:5 - 4:9] VarDecl=glob:4:5 +// CHECK-NEXT: Punctuation: ";" [4:9 - 4:10] + +// RUN: c-index-test -test-annotate-tokens=%S/../Modules/Inputs/Module.framework/Headers/Sub.h:1:1:3:1 %s -fmodule-cache-path %t.cache -fmodules -F %S/../Modules/Inputs \ +// RUN: | FileCheck %s -check-prefix=CHECK-MOD + +// CHECK-MOD: Punctuation: "#" [1:1 - 1:2] inclusion directive=[[INC_DIR:Module[/\\]Sub2\.h \(.*/Modules/Inputs/Module\.framework[/\\]Headers[/\\]Sub2.h\)]] +// CHECK-MOD-NEXT: Identifier: "include" [1:2 - 1:9] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Punctuation: "<" [1:10 - 1:11] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Identifier: "Module" [1:11 - 1:17] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Punctuation: "/" [1:17 - 1:18] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Identifier: "Sub2" [1:18 - 1:22] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Punctuation: "." [1:22 - 1:23] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Identifier: "h" [1:23 - 1:24] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Punctuation: ">" [1:24 - 1:25] inclusion directive=[[INC_DIR]] +// CHECK-MOD-NEXT: Keyword: "int" [2:1 - 2:4] VarDecl=Module_Sub:2:6 +// CHECK-MOD-NEXT: Punctuation: "*" [2:5 - 2:6] VarDecl=Module_Sub:2:6 +// CHECK-MOD-NEXT: Identifier: "Module_Sub" [2:6 - 2:16] VarDecl=Module_Sub:2:6 +// CHECK-MOD-NEXT: Punctuation: ";" [2:16 - 2:17] diff --git a/test/Index/arc-annotate.m b/test/Index/arc-annotate.m index b836bc8..9534093 100644 --- a/test/Index/arc-annotate.m +++ b/test/Index/arc-annotate.m @@ -4,7 +4,12 @@ @property (unsafe_unretained, nonatomic) id third_property; @end -// RUN: c-index-test -test-annotate-tokens=%s:1:1:5:1 %s -fobjc-arc -fobjc-nonfragile-abi | FileCheck %s +void foo() { + A *avar; + avar = 0; +} + +// RUN: c-index-test -test-annotate-tokens=%s:1:1:11:1 %s -fobjc-arc -fobjc-nonfragile-abi | FileCheck %s // CHECK: Punctuation: "@" [1:1 - 1:2] ObjCInterfaceDecl=A:1:12 // CHECK: Keyword: "interface" [1:2 - 1:11] ObjCInterfaceDecl=A:1:12 // CHECK: Identifier: "A" [1:12 - 1:13] ObjCInterfaceDecl=A:1:12 @@ -36,3 +41,17 @@ // CHECK: Punctuation: ")" [4:40 - 4:41] ObjCPropertyDecl=third_property:4:45 // CHECK: Identifier: "id" [4:42 - 4:44] TypeRef=id:0:0 // CHECK: Identifier: "third_property" [4:45 - 4:59] ObjCPropertyDecl=third_property:4:45 + +// CHECK: Identifier: "A" [8:3 - 8:4] ObjCClassRef=A:1:12 +// CHECK: Punctuation: "*" [8:5 - 8:6] VarDecl=avar:8:6 (Definition) +// CHECK: Identifier: "avar" [8:6 - 8:10] VarDecl=avar:8:6 (Definition) +// CHECK: Punctuation: ";" [8:10 - 8:11] DeclStmt= +// CHECK: Identifier: "avar" [9:3 - 9:7] DeclRefExpr=avar:8:6 +// CHECK: Punctuation: "=" [9:8 - 9:9] BinaryOperator= +// CHECK: Literal: "0" [9:10 - 9:11] IntegerLiteral= +// CHECK: Punctuation: ";" [9:11 - 9:12] CompoundStmt= + +// RUN: c-index-test -file-refs-at=%s:8:8 %s -fobjc-arc -fobjc-nonfragile-abi | FileCheck %s -check-prefix=CHECK-REFS +// CHECK-REFS: VarDecl=avar:8:6 (Definition) +// CHECK-REFS: VarDecl=avar:8:6 (Definition) =[8:6 - 8:10] +// CHECK-REFS: DeclRefExpr=avar:8:6 =[9:3 - 9:7] diff --git a/test/Index/c-index-getCursor-pp.c b/test/Index/c-index-getCursor-pp.c index 0a10450..01b0a69 100644 --- a/test/Index/c-index-getCursor-pp.c +++ b/test/Index/c-index-getCursor-pp.c @@ -15,6 +15,8 @@ B(int x); const char *fname = __FILE__; +#include + // RUN: c-index-test -cursor-at=%s:1:11 -I%S/Inputs %s | FileCheck -check-prefix=CHECK-1 %s // CHECK-1: macro definition=OBSCURE // RUN: c-index-test -cursor-at=%s:2:14 -I%S/Inputs %s | FileCheck -check-prefix=CHECK-2 %s @@ -31,6 +33,8 @@ const char *fname = __FILE__; // CHECK-7: macro expansion=B:12:9 // RUN: c-index-test -cursor-at=%s:16:25 -I%S/Inputs %s | FileCheck -check-prefix=CHECK-8 %s // CHECK-8: macro expansion=__FILE__ +// RUN: c-index-test -cursor-at=%s:18:12 -I%S/Inputs %s | FileCheck -check-prefix=CHECK-9 %s +// CHECK-9: inclusion directive=a.h // Same tests, but with "editing" optimizations // RUN: env CINDEXTEST_EDITING=1 c-index-test -cursor-at=%s:1:11 -I%S/Inputs %s | FileCheck -check-prefix=CHECK-1 %s diff --git a/test/Index/code-completion-skip-bodies.cpp b/test/Index/code-completion-skip-bodies.cpp new file mode 100644 index 0000000..67b2196 --- /dev/null +++ b/test/Index/code-completion-skip-bodies.cpp @@ -0,0 +1,20 @@ + +// This is to make sure we skip function bodies. +void func_to_skip() { + undeclared1 = 0; +} + +struct S { int x; }; + +void func(S *s) { + undeclared2 = 0; + s->x = 0; +} + +// RUN: c-index-test -code-completion-at=%s:11:6 %s 2>&1 | FileCheck %s +// CHECK-NOT: error: use of undeclared identifier 'undeclared1' +// CHECK: error: use of undeclared identifier 'undeclared2' +// CHECK: FieldDecl:{ResultType int}{TypedText x} + +// FIXME: Investigating +// XFAIL: cygwin,mingw32,win32 diff --git a/test/Index/comment-xml-schema.c b/test/Index/comment-xml-schema.c index 1166e78..91ea7b2 100644 --- a/test/Index/comment-xml-schema.c +++ b/test/Index/comment-xml-schema.c @@ -12,6 +12,11 @@ // RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-function-08.xml // RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-function-09.xml // +// RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-availability-attr-01.xml +// RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-availability-attr-02.xml +// RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-deprecated-attr.xml +// RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-unavailable-attr.xml +// // RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-class-01.xml // RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-class-02.xml // RUN: xmllint --noout --relaxng %S/../../bindings/xml/comment-xml-schema.rng %S/Inputs/CommentXML/valid-class-03.xml diff --git a/test/Index/complete-cxx-inline-methods.cpp b/test/Index/complete-cxx-inline-methods.cpp index d441972..ee359f7 100644 --- a/test/Index/complete-cxx-inline-methods.cpp +++ b/test/Index/complete-cxx-inline-methods.cpp @@ -23,8 +23,8 @@ private: }; } -// RUN: c-index-test -code-completion-at=%s:4:9 %s | FileCheck %s -// RUN: c-index-test -code-completion-at=%s:13:7 %s | FileCheck %s +// RUN: c-index-test -code-completion-at=%s:4:9 -std=c++98 %s | FileCheck %s +// RUN: c-index-test -code-completion-at=%s:13:7 -std=c++98 %s | FileCheck %s // CHECK: CXXMethod:{ResultType MyCls::Vec &}{TypedText operator=}{LeftParen (}{Placeholder const MyCls::Vec &}{RightParen )} (34) // CHECK-NEXT: StructDecl:{TypedText Vec}{Text ::} (75) // CHECK-NEXT: FieldDecl:{ResultType int}{TypedText x} (35) diff --git a/test/Index/complete-documentation-templates.cpp b/test/Index/complete-documentation-templates.cpp new file mode 100644 index 0000000..4cb826e --- /dev/null +++ b/test/Index/complete-documentation-templates.cpp @@ -0,0 +1,151 @@ +// Note: the run lines follow their respective tests, since line/column numbers +// matter in this test. + +/// This is T1. +template +void T1(T t) { } + +/// This is T2. +template +void T2(T t) { } + +/// This is T2. +template<> +void T2(int t) { } + +void test_CC1() { + +} + +// Check that implicit instantiations of class templates and members pick up +// comments from class templates and specializations. + +/// This is T3. +template +class T3 { +public: + /// This is T4. + static void T4(); + + /// This is T5. + static int T5; + + /// This is T6. + void T6(); + + /// This is T7. + int T7; + + /// This is T8. + class T8 {}; + + /// This is T9. + enum T9 { + /// This is T10. + T10 + }; + + /// This is T11. + template + void T11(U t) {} + + typedef T3 T12; +}; + +void test_CC2_CC3_CC4() { + T3::T4(); + T3 t3; + t3.T6(); + T3::T8 t8; +} + +/// This is T100. +template +class T100 { +}; + +/// This is T100. +template +class T100 { +public: + /// This is T101. + static void T101(); + + /// This is T102. + static int T102; + + /// This is T103. + void T103(); + + /// This is T104. + int T104; + + /// This is T105. + class T105 {}; + + /// This is T106. + enum T106 { + /// This is T107. + T107 + }; + + /// This is T108. + template + void T108(U t) {} + + typedef T100 T109; + + typedef T100 T110; +}; + +void test_CC5_CC6_CC7() { + T100::T101(); + T100 t100; + t100.T103(); + T100::T105 t105; +} + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:17:1 %s | FileCheck -check-prefix=CC1 %s +// CHECK-CC1: FunctionTemplate:{ResultType void}{TypedText T1}{{.*}}(brief comment: This is T1.) +// CHECK-CC1: FunctionTemplate:{ResultType void}{TypedText T2}{{.*}}(brief comment: This is T2.) + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:56:12 %s | FileCheck -check-prefix=CC2 %s +// CHECK-CC2: CXXMethod:{ResultType void}{TypedText T4}{{.*}}(brief comment: This is T4.) +// CHECK-CC2: VarDecl:{ResultType int}{TypedText T5}{{.*}}(brief comment: This is T5.) + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:58:6 %s | FileCheck -check-prefix=CC3 %s +// CHECK-CC3: FunctionTemplate:{ResultType void}{TypedText T11}{{.*}}(brief comment: This is T11.) +// CHECK-CC3: CXXMethod:{ResultType void}{TypedText T6}{{.*}}(brief comment: This is T6.) +// CHECK-CC3: FieldDecl:{ResultType int}{TypedText T7}{{.*}}(brief comment: This is T7.) + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:59:12 %s | FileCheck -check-prefix=CC4 %s +// CHECK-CC4: EnumConstantDecl:{ResultType T3::T9}{TypedText T10}{{.*}}(brief comment: This is T10.) +// FIXME: after we implement propagating comments through typedefs, this +// typedef for implicit instantiation should pick up the documentation +// comment from class template. +// CHECK-CC4: TypedefDecl:{TypedText T12} +// CHECK-CC4-SHOULD-BE: TypedefDecl:{TypedText T12}{{.*}}(brief comment: This is T3.) +// CHECK-CC4: ClassDecl:{TypedText T8}{{.*}}(brief comment: This is T8.) +// CHECK-CC4: EnumDecl:{TypedText T9}{{.*}}(brief comment: This is T9.) + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:102:20 %s | FileCheck -check-prefix=CC5 %s +// CHECK-CC5: CXXMethod:{ResultType void}{TypedText T101}{{.*}}(brief comment: This is T101.) +// CHECK-CC5: VarDecl:{ResultType int}{TypedText T102}{{.*}}(brief comment: This is T102.) + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:104:8 %s | FileCheck -check-prefix=CC6 %s +// CHECK-CC6: CXXMethod:{ResultType void}{TypedText T103}{{.*}}(brief comment: This is T103.) +// CHECK-CC6: FieldDecl:{ResultType int}{TypedText T104}{{.*}}(brief comment: This is T104.) +// CHECK-CC6: FunctionTemplate:{ResultType void}{TypedText T108}{{.*}}(brief comment: This is T108.) + +// RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:105:20 %s | FileCheck -check-prefix=CC7 %s +// CHECK-CC7: ClassDecl:{TypedText T105}{{.*}}(brief comment: This is T105.) +// CHECK-CC7: EnumDecl:{TypedText T106}{{.*}}(brief comment: This is T106.) +// CHECK-CC7: EnumConstantDecl:{ResultType T100::T106}{TypedText T107}{{.*}}(brief comment: This is T107.) +// FIXME: after we implement propagating comments through typedefs, these two +// typedefs for implicit instantiations should pick up the documentation +// comment from class template. +// CHECK-CC7: TypedefDecl:{TypedText T109} +// CHECK-CC7: TypedefDecl:{TypedText T110} +// CHECK-CC7-SHOULD-BE: TypedefDecl:{TypedText T109}{{.*}}(brief comment: This is T100.) +// CHECK-CC7-SHOULD-BE: TypedefDecl:{TypedText T110}{{.*}}(brief comment: This is T100.) + diff --git a/test/Index/complete-documentation.cpp b/test/Index/complete-documentation.cpp index a64e62f..49b61f0 100644 --- a/test/Index/complete-documentation.cpp +++ b/test/Index/complete-documentation.cpp @@ -47,5 +47,5 @@ void test1() { // CHECK-CC2: FieldDecl:{ResultType int}{TypedText T4}{{.*}}(brief comment: Ddd.) // RUN: env CINDEXTEST_COMPLETION_BRIEF_COMMENTS=1 c-index-test -code-completion-at=%s:37:6 %s | FileCheck -check-prefix=CC3 %s -// CHECK-CC3: CXXMethod:{ResultType void}{TypedText T7}{LeftParen (}{RightParen )} (34) (parent: StructDecl 'T6')(brief comment: Fff.) -// CHECK-CC3: CXXMethod:{ResultType void}{TypedText T8}{LeftParen (}{RightParen )} (34) (parent: StructDecl 'T6')(brief comment: Ggg.) +// CHECK-CC3: CXXMethod:{ResultType void}{TypedText T7}{LeftParen (}{RightParen )} (34)(brief comment: Fff.) +// CHECK-CC3: CXXMethod:{ResultType void}{TypedText T8}{LeftParen (}{RightParen )} (34)(brief comment: Ggg.) diff --git a/test/Index/complete-exprs.cpp b/test/Index/complete-exprs.cpp index de3aac5..3f3bd5c 100644 --- a/test/Index/complete-exprs.cpp +++ b/test/Index/complete-exprs.cpp @@ -78,7 +78,7 @@ namespace N { // CHECK-CC4: NotImplemented:{ResultType const X *}{TypedText this} (40) // RUN: c-index-test -code-completion-at=%s:43:14 %s | FileCheck -check-prefix=CHECK-CC5 %s -// CHECK-CC5: FieldDecl:{ResultType int}{TypedText member} (8) (parent: ClassDecl 'N::C') +// CHECK-CC5: FieldDecl:{ResultType int}{TypedText member} (8) // CHECK-CC5: ParmDecl:{ResultType int}{TypedText param} (8) -// CHECK-CC5: StructDecl:{TypedText X} (50) (parent: TranslationUnit '(null)') -// CHECK-CC5: VarDecl:{ResultType int}{TypedText x} (12) (parent: Namespace 'N') +// CHECK-CC5: StructDecl:{TypedText X} (50) +// CHECK-CC5: VarDecl:{ResultType int}{TypedText x} (12) diff --git a/test/Index/complete-lambdas.mm b/test/Index/complete-lambdas.mm index 3f77dd2..68f2b6b 100644 --- a/test/Index/complete-lambdas.mm +++ b/test/Index/complete-lambdas.mm @@ -23,16 +23,16 @@ @end // RUN: c-index-test -code-completion-at=%s:14:6 -std=c++11 %s | FileCheck -check-prefix=CHECK-CC1 %s -// CHECK-CC1: ObjCInstanceMethodDecl:{ResultType id}{TypedText instanceMethod:}{Placeholder (int)}{HorizontalSpace }{TypedText withOther:}{Placeholder (int)} (35) (parent: ObjCInterfaceDecl 'A') +// CHECK-CC1: ObjCInstanceMethodDecl:{ResultType id}{TypedText instanceMethod:}{Placeholder (int)}{HorizontalSpace }{TypedText withOther:}{Placeholder (int)} (35) // RUN: c-index-test -code-completion-at=%s:15:6 -std=c++11 %s | FileCheck -check-prefix=CHECK-CC2 %s -// CHECK-CC2: ObjCClassMethodDecl:{ResultType id}{TypedText classMethod} (35) (parent: ObjCInterfaceDecl 'A') +// CHECK-CC2: ObjCClassMethodDecl:{ResultType id}{TypedText classMethod} (35) // RUN: c-index-test -code-completion-at=%s:16:4 -std=c++11 %s | FileCheck -check-prefix=CHECK-CC3 %s -// CHECK-CC3: ObjCInterfaceDecl:{TypedText A} (50) (parent: TranslationUnit '(null)') +// CHECK-CC3: ObjCInterfaceDecl:{TypedText A} (50) // CHECK-CC3: ParmDecl:{ResultType A *}{TypedText a} (34) -// CHECK-CC3: ObjCInterfaceDecl:{TypedText B} (50) (parent: TranslationUnit '(null)') -// CHECK-CC3: TypedefDecl:{TypedText Class} (50) (parent: TranslationUnit '(null)') +// CHECK-CC3: ObjCInterfaceDecl:{TypedText B} (50) +// CHECK-CC3: TypedefDecl:{TypedText Class} (50) // RUN: c-index-test -code-completion-at=%s:16:21 -x objective-c++ -std=c++11 %s | FileCheck -check-prefix=CHECK-CC4 %s @@ -46,6 +46,6 @@ // CHECK-CC5-NEXT: NotImplemented:{ResultType B *}{TypedText self} (34) // RUN: c-index-test -code-completion-at=%s:20:11 -x objective-c++ -std=c++11 %s | FileCheck -check-prefix=CHECK-CC6 %s -// CHECK-CC6: ObjCInstanceMethodDecl:{ResultType id}{TypedText instanceMethod:}{Placeholder (int)}{HorizontalSpace }{TypedText withOther:}{Placeholder (int)} (37) (parent: ObjCInterfaceDecl 'A') -// CHECK-CC6-NEXT: ObjCInstanceMethodDecl:{ResultType id}{TypedText someMethod:}{Placeholder (A *)} (32) (parent: ObjCImplementationDecl 'B') +// CHECK-CC6: ObjCInstanceMethodDecl:{ResultType id}{TypedText instanceMethod:}{Placeholder (int)}{HorizontalSpace }{TypedText withOther:}{Placeholder (int)} (37) +// CHECK-CC6-NEXT: ObjCInstanceMethodDecl:{ResultType id}{TypedText someMethod:}{Placeholder (A *)} (32) diff --git a/test/Index/complete-macros.c b/test/Index/complete-macros.c index df798a8..6d27b44 100644 --- a/test/Index/complete-macros.c +++ b/test/Index/complete-macros.c @@ -1,8 +1,8 @@ // Note: the run lines follow their respective tests, since line/column // matter in this test. - #define FOO(Arg1,Arg2) foobar #define nil 0 +#undef FOO void f() { } @@ -25,7 +25,8 @@ void test_variadic() { } -// RUN: c-index-test -code-completion-at=%s:7:1 %s | FileCheck -check-prefix=CHECK-CC1 %s +// RUN: c-index-test -code-completion-at=%s:7:1 %s | FileCheck -check-prefix=CHECK-CC0 %s +// CHECK-CC0-NOT: FOO // RUN: env CINDEXTEST_EDITING=1 CINDEXTEST_COMPLETION_CACHING=1 c-index-test -code-completion-at=%s:7:1 %s | FileCheck -check-prefix=CHECK-CC1 %s // CHECK-CC1: macro definition:{TypedText FOO}{LeftParen (}{Placeholder Arg1}{Comma , }{Placeholder Arg2}{RightParen )} // RUN: c-index-test -code-completion-at=%s:13:13 %s | FileCheck -check-prefix=CHECK-CC2 %s diff --git a/test/Index/complete-method-decls.m b/test/Index/complete-method-decls.m index ce48246..9e52d93 100644 --- a/test/Index/complete-method-decls.m +++ b/test/Index/complete-method-decls.m @@ -73,11 +73,11 @@ @end // RUN: c-index-test -code-completion-at=%s:17:3 %s | FileCheck -check-prefix=CHECK-CC1 %s -// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText abc} (40) (parent: ObjCProtocolDecl 'P1') -// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text int}{RightParen )}{TypedText getInt} (40) (parent: ObjCProtocolDecl 'P1') -// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText getSelf} (40) (parent: ObjCProtocolDecl 'P1') -// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText initWithInt}{TypedText :}{LeftParen (}{Text int}{RightParen )}{Text x} (40) (parent: ObjCProtocolDecl 'P1') -// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText initWithTwoInts}{TypedText :}{LeftParen (}{Text inout }{Text int}{RightParen )}{Text x}{HorizontalSpace }{TypedText second:}{LeftParen (}{Text int}{RightParen )}{Text y} (40) (parent: ObjCProtocolDecl 'P1') +// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText abc} (40) +// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text int}{RightParen )}{TypedText getInt} (40) +// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText getSelf} (40) +// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText initWithInt}{TypedText :}{LeftParen (}{Text int}{RightParen )}{Text x} (40) +// CHECK-CC1: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText initWithTwoInts}{TypedText :}{LeftParen (}{Text inout }{Text int}{RightParen )}{Text x}{HorizontalSpace }{TypedText second:}{LeftParen (}{Text int}{RightParen )}{Text y} (40) // RUN: c-index-test -code-completion-at=%s:17:7 %s | FileCheck -check-prefix=CHECK-CC2 %s // CHECK-CC2: ObjCInstanceMethodDecl:{TypedText abc} // CHECK-CC2-NEXT: ObjCInstanceMethodDecl:{TypedText getSelf} @@ -98,8 +98,8 @@ // CHECK-CC4: ObjCInstanceMethodDecl:{LeftParen (}{Text id}{RightParen )}{TypedText initWithTwoInts}{TypedText :}{LeftParen (}{Text inout }{Text int}{RightParen )}{Text x}{HorizontalSpace }{TypedText second:}{LeftParen (}{Text int}{RightParen )}{Text y}{HorizontalSpace }{LeftBrace {}{VerticalSpace // CHECK-CC4: ObjCInstanceMethodDecl:{LeftParen (}{Text int}{RightParen )}{TypedText setValue}{TypedText :}{LeftParen (}{Text int}{RightParen )}{Text x}{HorizontalSpace }{LeftBrace {}{VerticalSpace // RUN: env CINDEXTEST_CODE_COMPLETE_PATTERNS=1 c-index-test -code-completion-at=%s:33:8 %s | FileCheck -check-prefix=CHECK-CC5 %s -// CHECK-CC5: ObjCInstanceMethodDecl:{TypedText getInt}{HorizontalSpace }{LeftBrace {}{VerticalSpace }{Text return}{HorizontalSpace }{Placeholder expression}{SemiColon ;}{VerticalSpace }{RightBrace }} (42) (parent: ObjCProtocolDecl 'P1') -// CHECK-CC5: ObjCInstanceMethodDecl:{TypedText getSecondValue}{HorizontalSpace }{LeftBrace {}{VerticalSpace }{Text return}{HorizontalSpace }{Placeholder expression}{SemiColon ;}{VerticalSpace }{RightBrace }} (40) (parent: ObjCInterfaceDecl 'B') +// CHECK-CC5: ObjCInstanceMethodDecl:{TypedText getInt}{HorizontalSpace }{LeftBrace {}{VerticalSpace }{Text return}{HorizontalSpace }{Placeholder expression}{SemiColon ;}{VerticalSpace }{RightBrace }} (42) +// CHECK-CC5: ObjCInstanceMethodDecl:{TypedText getSecondValue}{HorizontalSpace }{LeftBrace {}{VerticalSpace }{Text return}{HorizontalSpace }{Placeholder expression}{SemiColon ;}{VerticalSpace }{RightBrace }} (40) // CHECK-CC5-NOT: {TypedText getSelf}{HorizontalSpace }{LeftBrace {}{VerticalSpace // CHECK-CC5: ObjCInstanceMethodDecl:{TypedText setValue}{TypedText :}{LeftParen (}{Text int}{RightParen )}{Text x}{HorizontalSpace }{LeftBrace {}{VerticalSpace // RUN: env CINDEXTEST_CODE_COMPLETE_PATTERNS=1 c-index-test -code-completion-at=%s:37:7 %s | FileCheck -check-prefix=CHECK-CC6 %s @@ -181,4 +181,4 @@ // RUN: c-index-test -code-completion-at=%s:72:2 %s | FileCheck -check-prefix=CHECK-ONEWAY %s -// CHECK-ONEWAY: ObjCInstanceMethodDecl:{LeftParen (}{Text oneway }{Text void}{RightParen )}{TypedText method}{TypedText :}{LeftParen (}{Text in }{Text id}{RightParen )}{Text x} (40) (parent: ObjCInterfaceDecl 'Passing') +// CHECK-ONEWAY: ObjCInstanceMethodDecl:{LeftParen (}{Text oneway }{Text void}{RightParen )}{TypedText method}{TypedText :}{LeftParen (}{Text in }{Text id}{RightParen )}{Text x} (40) diff --git a/test/Index/complete-objc-message.m b/test/Index/complete-objc-message.m index 1835c3e..aa10ea2 100644 --- a/test/Index/complete-objc-message.m +++ b/test/Index/complete-objc-message.m @@ -190,11 +190,11 @@ void test_DO(DO *d, A* a) { } // RUN: c-index-test -code-completion-at=%s:23:19 %s | FileCheck -check-prefix=CHECK-CC1 %s -// CHECK-CC1: {TypedText categoryClassMethod} (35) (parent: ObjCCategoryDecl 'Foo(FooTestCategory)') -// CHECK-CC1: {TypedText classMethod1:}{Placeholder (id)}{HorizontalSpace }{TypedText withKeyword:}{Placeholder (int)} (35) (parent: ObjCInterfaceDecl 'Foo') -// CHECK-CC1: {TypedText classMethod2} (35) (parent: ObjCInterfaceDecl 'Foo') -// CHECK-CC1: {TypedText new} (35) (parent: ObjCInterfaceDecl 'Foo') -// CHECK-CC1: {TypedText protocolClassMethod} (37) (parent: ObjCProtocolDecl 'FooTestProtocol') +// CHECK-CC1: {TypedText categoryClassMethod} (35) +// CHECK-CC1: {TypedText classMethod1:}{Placeholder (id)}{HorizontalSpace }{TypedText withKeyword:}{Placeholder (int)} (35) +// CHECK-CC1: {TypedText classMethod2} (35) +// CHECK-CC1: {TypedText new} (35) +// CHECK-CC1: {TypedText protocolClassMethod} (37) // CHECK-CC1: Completion contexts: // CHECK-CC1-NEXT: Objective-C class method // CHECK-CC1-NEXT: Container Kind: ObjCInterfaceDecl @@ -309,7 +309,7 @@ void test_DO(DO *d, A* a) { // RUN: c-index-test -code-completion-at=%s:170:16 %s | FileCheck -check-prefix=CHECK-CLASS-RESULT %s // CHECK-CLASS-RESULT: ObjCClassMethodDecl:{ResultType void}{TypedText class_method3} (35) -// CHECK-CLASS-RESULT: ObjCClassMethodDecl:{ResultType void}{TypedText class_method4} (35) (parent: ObjCCategoryDecl 'A(Cat)') +// CHECK-CLASS-RESULT: ObjCClassMethodDecl:{ResultType void}{TypedText class_method4} (35) // RUN: c-index-test -code-completion-at=%s:181:4 %s | FileCheck -check-prefix=CHECK-BLOCK-RECEIVER %s // CHECK-BLOCK-RECEIVER: ObjCInterfaceDecl:{TypedText A} (50) diff --git a/test/Index/complete-preamble.cpp b/test/Index/complete-preamble.cpp index 8f48105..61fb90a 100644 --- a/test/Index/complete-preamble.cpp +++ b/test/Index/complete-preamble.cpp @@ -4,5 +4,5 @@ void f() { } // RUN: env CINDEXTEST_EDITING=1 c-index-test -code-completion-at=%s:3:8 %s -o - | FileCheck -check-prefix=CC1 %s -// CHECK-CC1: {ResultType void}{TypedText wibble}{LeftParen (}{RightParen )} (50) (parent: Namespace 'std') +// CHECK-CC1: {ResultType void}{TypedText wibble}{LeftParen (}{RightParen )} (50) diff --git a/test/Index/complete-preprocessor.m b/test/Index/complete-preprocessor.m index bea9d32..baeb4e9 100644 --- a/test/Index/complete-preprocessor.m +++ b/test/Index/complete-preprocessor.m @@ -53,8 +53,8 @@ FOO(in,t) value; // CHECK-CC2-NEXT: NotImplemented:{TypedText undef}{HorizontalSpace }{Placeholder macro} (40) // CHECK-CC2-NEXT: NotImplemented:{TypedText warning}{HorizontalSpace }{Placeholder message} (40) // RUN: c-index-test -code-completion-at=%s:9:8 %s | FileCheck -check-prefix=CHECK-CC3 %s -// CHECK-CC3: NotImplemented:{TypedText BAR} (40) -// CHECK-CC3: NotImplemented:{TypedText FOO} (40) +// CHECK-CC3: macro definition:{TypedText BAR} (40) +// CHECK-CC3: macro definition:{TypedText FOO} (40) // RUN: c-index-test -code-completion-at=%s:11:12 %s | FileCheck -check-prefix=CHECK-CC3 %s // RUN: c-index-test -code-completion-at=%s:11:13 %s | FileCheck -check-prefix=CHECK-CC3 %s // RUN: c-index-test -code-completion-at=%s:11:5 %s | FileCheck -check-prefix=CHECK-CC4 %s diff --git a/test/Index/complete-property-flags.m b/test/Index/complete-property-flags.m index f2e08c3..86ee8e2 100644 --- a/test/Index/complete-property-flags.m +++ b/test/Index/complete-property-flags.m @@ -6,7 +6,8 @@ } @property(copy) Foo *myprop; @property(retain, nonatomic) id xx; -// RUN: c-index-test -code-completion-at=%s:7:11 %s | FileCheck -check-prefix=CHECK-CC1 %s + +// RUN: c-index-test -code-completion-at=%s:7:11 %s -fno-objc-arc | FileCheck -check-prefix=CHECK-CC1 %s // CHECK-CC1: {TypedText assign} // CHECK-CC1-NEXT: {TypedText atomic} // CHECK-CC1-NEXT: {TypedText copy} @@ -18,9 +19,26 @@ // CHECK-CC1-NEXT: {TypedText setter}{Text = }{Placeholder method} // CHECK-CC1-NEXT: {TypedText strong} // CHECK-CC1-NEXT: {TypedText unsafe_unretained} +// CHECK-CC1-NOT: {TypedText weak} + +// RUN: c-index-test -code-completion-at=%s:7:11 %s -fobjc-arc -fobjc-runtime=macosx-10.7 | FileCheck -check-prefix=CHECK-CC1-ARC %s +// CHECK-CC1-ARC: {TypedText assign} +// CHECK-CC1-ARC-NEXT: {TypedText atomic} +// CHECK-CC1-ARC-NEXT: {TypedText copy} +// CHECK-CC1-ARC-NEXT: {TypedText getter}{Text = }{Placeholder method} +// CHECK-CC1-ARC-NEXT: {TypedText nonatomic} +// CHECK-CC1-ARC-NEXT: {TypedText readonly} +// CHECK-CC1-ARC-NEXT: {TypedText readwrite} +// CHECK-CC1-ARC-NEXT: {TypedText retain} +// CHECK-CC1-ARC-NEXT: {TypedText setter}{Text = }{Placeholder method} +// CHECK-CC1-ARC-NEXT: {TypedText strong} +// CHECK-CC1-ARC-NEXT: {TypedText unsafe_unretained} +// CHECK-CC1-ARC-NEXT: {TypedText weak} + // RUN: c-index-test -code-completion-at=%s:8:18 %s | FileCheck -check-prefix=CHECK-CC2 %s // CHECK-CC2: {TypedText getter}{Text = }{Placeholder method} // CHECK-CC2-NEXT: {TypedText nonatomic} +// CHECK-CC2-NEXT: {TypedText readonly} // CHECK-CC2-NEXT: {TypedText readwrite} // CHECK-CC2-NEXT: {TypedText setter}{Text = }{Placeholder method} @end diff --git a/test/Index/complete-qualified.cpp b/test/Index/complete-qualified.cpp index f5c032c..38a678a 100644 --- a/test/Index/complete-qualified.cpp +++ b/test/Index/complete-qualified.cpp @@ -14,7 +14,7 @@ void foo() Foo:: // RUN: c-index-test -code-completion-at=%s:14:8 %s -o - | FileCheck -check-prefix=CC1 %s -// CHECK-CC1: FieldDecl:{ResultType C}{TypedText c} (35) (parent: ClassDecl 'Foo') -// CHECK-CC1: ClassDecl:{TypedText Foo} (35) (parent: ClassDecl 'Foo') -// CHECK-CC1: CXXMethod:{ResultType Foo &}{TypedText operator=}{LeftParen (}{Placeholder const Foo &}{RightParen )} (35) (parent: ClassDecl 'Foo') -// CHECK-CC1: CXXDestructor:{ResultType void}{TypedText ~Foo}{LeftParen (}{RightParen )} (35) (parent: ClassDecl 'Foo') +// CHECK-CC1: FieldDecl:{ResultType C}{TypedText c} (35) +// CHECK-CC1: ClassDecl:{TypedText Foo} (35) +// CHECK-CC1: CXXMethod:{ResultType Foo &}{TypedText operator=}{LeftParen (}{Placeholder const Foo &}{RightParen )} +// CHECK-CC1: CXXDestructor:{ResultType void}{TypedText ~Foo}{LeftParen (}{RightParen )} (35) diff --git a/test/Index/cursor-dynamic-call.mm b/test/Index/cursor-dynamic-call.mm index f9d6a87..ac9e6d3 100644 --- a/test/Index/cursor-dynamic-call.mm +++ b/test/Index/cursor-dynamic-call.mm @@ -50,10 +50,12 @@ void foo(SS *ss, IS* is, Class cls) { // CHECK: 8:11 MemberRefExpr=meth:3:16 {{.*}} Dynamic-call // CHECK-NOT: 9:9 {{.*}} Dynamic-call -// CHECK: 25:3 ObjCMessageExpr=meth:14:8 {{.*}} Dynamic-call +// CHECK: 25:3 ObjCMessageExpr=meth:14:8 {{.*}} Dynamic-call Receiver-type=ObjCObjectPointer // CHECK-NOT: 26:3 {{.*}} Dynamic-call // CHECK-NOT: 29:3 {{.*}} Dynamic-call +// CHECK: 29:3 {{.*}} Receiver-type=ObjCInterface // CHECK: 34:7 MemberRefExpr=meth:3:16 {{.*}} Dynamic-call -// CHECK: 35:3 ObjCMessageExpr=meth:14:8 {{.*}} Dynamic-call +// CHECK: 35:3 ObjCMessageExpr=meth:14:8 {{.*}} Dynamic-call Receiver-type=ObjCObjectPointer // CHECK-NOT: 36:3 {{.*}} Dynamic-call -// CHECK: 37:3 ObjCMessageExpr=ClsMeth:15:8 {{.*}} Dynamic-call +// CHECK: 36:3 {{.*}} Receiver-type=ObjCInterface +// CHECK: 37:3 ObjCMessageExpr=ClsMeth:15:8 {{.*}} Dynamic-call Receiver-type=ObjCClass diff --git a/test/Index/get-cursor-macro-args.h b/test/Index/get-cursor-macro-args.h index 40ec8dc..70d0dc7 100644 --- a/test/Index/get-cursor-macro-args.h +++ b/test/Index/get-cursor-macro-args.h @@ -2,8 +2,8 @@ +(void)meth; @end -#define MACRO2(x) x -#define MACRO(x) MACRO2(x) +#define MACRO2(x) (x) +#define MACRO(x) MACRO2((x)) void test() { MACRO([MyClass meth]); diff --git a/test/Index/get-cursor-macro-args.m b/test/Index/get-cursor-macro-args.m index 4945fd3..d5ab728 100644 --- a/test/Index/get-cursor-macro-args.m +++ b/test/Index/get-cursor-macro-args.m @@ -1,14 +1,18 @@ // Test without PCH // RUN: c-index-test -cursor-at=%S/get-cursor-macro-args.h:9:12 \ // RUN: -cursor-at=%S/get-cursor-macro-args.h:9:21 \ +// RUN: -cursor-at=%S/get-cursor-macro-args.h:9:9 \ +// RUN: -cursor-at=%S/get-cursor-macro-args.h:9:22 \ // RUN: -cursor-at=%S/get-cursor-macro-args.h:15:12 \ // RUN: -cursor-at=%S/get-cursor-macro-args.h:15:20 \ -// RUN: %s -include get-cursor-macro-args.h | FileCheck %s +// RUN: %s -include %S/get-cursor-macro-args.h | FileCheck %s // Test with PCH // RUN: c-index-test -write-pch %t.pch -x objective-c-header %S/get-cursor-macro-args.h // RUN: c-index-test -cursor-at=%S/get-cursor-macro-args.h:9:12 \ // RUN: -cursor-at=%S/get-cursor-macro-args.h:9:21 \ +// RUN: -cursor-at=%S/get-cursor-macro-args.h:9:9 \ +// RUN: -cursor-at=%S/get-cursor-macro-args.h:9:22 \ // RUN: -cursor-at=%S/get-cursor-macro-args.h:15:12 \ // RUN: -cursor-at=%S/get-cursor-macro-args.h:15:20 \ // RUN: %s -include-pch %t.pch | FileCheck %s @@ -16,4 +20,6 @@ // CHECK: ObjCClassRef=MyClass:1:12 // CHECK-NEXT: ObjCMessageExpr=meth:2:8 // CHECK-NEXT: ObjCMessageExpr=meth:2:8 +// CHECK-NEXT: ObjCMessageExpr=meth:2:8 +// CHECK-NEXT: ObjCMessageExpr=meth:2:8 // CHECK-NEXT: ObjCClassRef=MyClass:1:12 diff --git a/test/Index/get-cursor.m b/test/Index/get-cursor.m index d3da9ec..7eaa3a0 100644 --- a/test/Index/get-cursor.m +++ b/test/Index/get-cursor.m @@ -91,6 +91,14 @@ void foo3(Test3 *test3) { } @end +@interface Test6 +@property (assign) id prop1; +@end + +@implementation Test6 +@synthesize prop1 = _prop1; +@end + // RUN: c-index-test -cursor-at=%s:4:28 -cursor-at=%s:5:28 %s | FileCheck -check-prefix=CHECK-PROP %s // CHECK-PROP: ObjCPropertyDecl=foo1:4:26 // CHECK-PROP: ObjCPropertyDecl=foo2:5:27 @@ -126,11 +134,14 @@ void foo3(Test3 *test3) { // CHECK-SPELLRANGE: 70:22 ObjCClassRef=Forw3:70:22 Extent=[70:22 - 70:27] Spelling=Forw3 ([70:22 - 70:27]) // RUN: c-index-test -cursor-at=%s:83:15 -cursor-at=%s:83:21 \ -// RUN: -cursor-at=%s:84:12 -cursor-at=%s:84:20 %s | FileCheck -check-prefix=CHECK-MULTISYNTH %s +// RUN: -cursor-at=%s:84:12 -cursor-at=%s:84:20 \ +// RUN: -cursor-at=%s:99:14 -cursor-at=%s:99:23 %s | FileCheck -check-prefix=CHECK-MULTISYNTH %s // CHECK-MULTISYNTH: 83:13 ObjCSynthesizeDecl=prop1:76:23 (Definition) Extent=[83:1 - 83:18] Spelling=prop1 ([83:13 - 83:18]) // CHECK-MULTISYNTH: 83:20 ObjCSynthesizeDecl=prop2:77:23 (Definition) Extent=[83:1 - 83:25] Spelling=prop2 ([83:20 - 83:25]) // CHECK-MULTISYNTH: 84:10 ObjCDynamicDecl=prop3:78:23 (Definition) Extent=[84:1 - 84:15] Spelling=prop3 ([84:10 - 84:15]) // CHECK-MULTISYNTH: 84:17 ObjCDynamicDecl=prop4:79:23 (Definition) Extent=[84:1 - 84:22] Spelling=prop4 ([84:17 - 84:22]) +// CHECK-MULTISYNTH: 99:13 ObjCSynthesizeDecl=prop1:95:23 (Definition) Extent=[99:1 - 99:27] Spelling=prop1 ([99:13 - 99:18]) +// CHECK-MULTISYNTH: 99:21 MemberRef=_prop1:99:21 Extent=[99:21 - 99:27] Spelling=_prop1 ([99:21 - 99:27]) // RUN: c-index-test -cursor-at=%s:86:7 -cursor-at=%s:89:7 %s | FileCheck -check-prefix=CHECK-SELECTORLOC %s // CHECK-SELECTORLOC: 86:6 ObjCInstanceMethodDecl=meth1:86:6 (Definition) Extent=[86:1 - 88:2] Spelling=meth1 ([86:6 - 86:11]) Selector index=0 diff --git a/test/Index/index-decls.m b/test/Index/index-decls.m index 46d37c4..c6b14bb 100644 --- a/test/Index/index-decls.m +++ b/test/Index/index-decls.m @@ -26,6 +26,13 @@ __attribute__((something)) @interface I2 @end } @end +int test1() { + extern int extvar; + extvar = 2; + extern int extfn(); + return extfn(); +} + // RUN: c-index-test -index-file %s -target x86_64-apple-macosx10.7 > %t // RUN: FileCheck %s -input-file=%t // CHECK: [indexDeclaration]: kind: objc-class | name: I | {{.*}} | loc: 1:12 @@ -41,3 +48,9 @@ __attribute__((something)) @interface I2 @end // CHECK: [indexDeclaration]: kind: objc-ivar | name: _auto_prop | {{.*}} | loc: 20:33 // CHECK: [indexEntityReference]: kind: objc-ivar | name: _auto_prop | {{.*}} | loc: 25:3 + +// CHECK: [indexDeclaration]: kind: function | name: test1 | {{.*}} | loc: 29:5 +// CHECK: [indexDeclaration]: kind: variable | name: extvar | {{.*}} | loc: 30:14 +// CHECK: [indexEntityReference]: kind: variable | name: extvar | {{.*}} | loc: 31:3 +// CHECK: [indexDeclaration]: kind: function | name: extfn | {{.*}} | loc: 32:14 +// CHECK: [indexEntityReference]: kind: function | name: extfn | {{.*}} | loc: 33:10 diff --git a/test/Index/index-file.cpp b/test/Index/index-file.cpp new file mode 100644 index 0000000..bf2d62c --- /dev/null +++ b/test/Index/index-file.cpp @@ -0,0 +1,6 @@ +using MyTypeAlias = int; + +// RUN: c-index-test -index-file %s > %t +// RUN: FileCheck %s -input-file=%t + +// CHECK: [indexDeclaration]: kind: type-alias | name: MyTypeAlias | {{.*}} | loc: 1:7 diff --git a/test/Index/index-module.m b/test/Index/index-module.m new file mode 100644 index 0000000..0af4e37 --- /dev/null +++ b/test/Index/index-module.m @@ -0,0 +1,54 @@ + +#include +@__experimental_modules_import DependsOnModule; +int glob; + +// RUN: rm -rf %t.cache +// RUN: c-index-test -index-file %s -fmodule-cache-path %t.cache -fmodules -F %S/../Modules/Inputs \ +// RUN: -Xclang -fdisable-module-hash | FileCheck %s + +// CHECK-NOT: [indexDeclaration] +// CHECK: [importedASTFile]: [[PCM:.*[/\\]DependsOnModule\.pcm]] | loc: 2:2 | name: "DependsOnModule" | isImplicit: 1 +// CHECK-NEXT: [ppIncludedFile]: {{.*}}/Modules/Inputs/DependsOnModule.framework{{[/\\]}}Headers{{[/\\]}}DependsOnModule.h | name: "DependsOnModule/DependsOnModule.h" | hash loc: 2:1 | isImport: 0 | isAngled: 1 | isModule: 1 +// CHECK-NOT: [indexDeclaration] +// CHECK: [importedASTFile]: [[PCM]] | loc: 3:1 | name: "DependsOnModule" | isImplicit: 0 +// CHECK-NEXT: [indexDeclaration]: kind: variable | name: glob | {{.*}} | loc: 4:5 +// CHECK-NOT: [indexDeclaration] + +// RUN: c-index-test -index-tu %t.cache/DependsOnModule.pcm | FileCheck %s -check-prefix=CHECK-DMOD + +// CHECK-DMOD: [startedTranslationUnit] +// CHECK-DMOD-NEXT: [ppIncludedFile]: [[DMOD_MODULE_H:.*/Modules/Inputs/DependsOnModule\.framework[/\\]Headers[/\\]DependsOnModule\.h]] | {{.*}} | hash loc: +// CHECK-DMOD-NEXT: [ppIncludedFile]: {{.*}}/Modules/Inputs/Module.framework{{[/\\]}}Headers{{[/\\]}}Module.h | name: "Module/Module.h" | hash loc: {{.*}}/Modules/Inputs/DependsOnModule.framework{{[/\\]}}Headers{{[/\\]}}DependsOnModule.h:1:1 | isImport: 0 | isAngled: 1 | isModule: 1 +// CHECK-DMOD-NEXT: [ppIncludedFile]: [[DMOD_OTHER_H:.*/Modules/Inputs/DependsOnModule\.framework[/\\]Headers[/\\]other\.h]] | {{.*}} | hash loc: +// CHECK-DMOD-NEXT: [ppIncludedFile]: [[DMOD_SUB_H:.*/Modules/Inputs/DependsOnModule\.framework[/\\]Frameworks[/\\]SubFramework\.framework[/\\]Headers[/\\]SubFramework\.h]] | {{.*}} | hash loc: +// CHECK-DMOD-NEXT: [ppIncludedFile]: [[DMOD_SUB_OTHER_H:.*/Modules/Inputs/DependsOnModule.framework[/\\]Frameworks/SubFramework\.framework/Headers/Other\.h]] | name: "SubFramework/Other.h" | hash loc: [[DMOD_SUB_H]]:1:1 | isImport: 0 | isAngled: 0 +// CHECK-DMOD-NEXT: [ppIncludedFile]: [[DMOD_PRIVATE_H:.*/Modules/Inputs/DependsOnModule.framework[/\\]PrivateHeaders[/\\]DependsOnModulePrivate.h]] | {{.*}} | hash loc: +// CHECK-DMOD-NEXT: [importedASTFile]: {{.*}}.cache{{[/\\]}}Module.pcm | loc: [[DMOD_MODULE_H]]:1:2 | name: "Module" | isImplicit: 1 +// CHECK-DMOD-NEXT: [indexDeclaration]: kind: variable | name: depends_on_module_other | {{.*}} | loc: [[DMOD_OTHER_H]]:1:5 +// CHECK-DMOD-NEXT: [indexDeclaration]: kind: variable | name: sub_framework | {{.*}} | loc: [[DMOD_SUB_H]]:2:8 +// CHECK-DMOD-NEXT: [indexDeclaration]: kind: variable | name: sub_framework_other | {{.*}} | loc: [[DMOD_SUB_OTHER_H]]:1:9 +// CHECK-DMOD-NEXT: [indexDeclaration]: kind: variable | name: depends_on_module_private | {{.*}} | loc: [[DMOD_PRIVATE_H]]:1:5 +// CHECK-DMOD-NOT: [indexDeclaration] + +// RUN: c-index-test -index-tu %t.cache/Module.pcm | FileCheck %s -check-prefix=CHECK-TMOD + +// CHECK-TMOD: [startedTranslationUnit] +// CHECK-TMOD-NEXT: [ppIncludedFile]: [[TMOD_MODULE_H:.*/Modules/Inputs/Module\.framework[/\\]Headers[/\\]Module\.h]] | {{.*}} | hash loc: +// CHECK-TMOD-NEXT: [ppIncludedFile]: [[TMODHDR:.*/Modules/Inputs/Module.framework[/\\]Headers.]]Sub.h | name: "Module/Sub.h" | hash loc: [[TMOD_MODULE_H]]:23:1 | isImport: 0 | isAngled: 1 +// CHECK-TMOD-NEXT: [ppIncludedFile]: [[TMODHDR]]Sub2.h | name: "Module/Sub2.h" | hash loc: [[TMODHDR]]Sub.h:1:1 | isImport: 0 | isAngled: 1 +// CHECK-TMOD-NEXT: [ppIncludedFile]: [[TMODHDR]]Buried/Treasure.h | name: "Module/Buried/Treasure.h" | hash loc: [[TMOD_MODULE_H]]:24:1 | isImport: 0 | isAngled: 1 +// CHECK-TMOD-NEXT: [ppIncludedFile]: [[TMOD_SUB_H:.*[/\\]Modules[/\\]Inputs[/\\]Module\.framework[/\\]Frameworks[/\\]SubFramework\.framework[/\\]Headers[/\\]SubFramework\.h]] | {{.*}} | hash loc: +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: function | name: getModuleVersion | {{.*}} | loc: [[TMOD_MODULE_H]]:9:13 +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: objc-class | name: Module | {{.*}} | loc: [[TMOD_MODULE_H]]:15:12 +// CHECK-TMOD-NEXT: : kind: interface +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: objc-class-method | name: version | {{.*}} | loc: [[TMOD_MODULE_H]]:16:1 +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: objc-class-method | name: alloc | {{.*}} | loc: [[TMOD_MODULE_H]]:17:1 +// CHECK-TMOD-NEXT: [importedASTFile]: [[PCM:.*\.cache/Module\.pcm]] | loc: [[TMOD_MODULE_H]]:23:2 | name: "Module.Sub" | isImplicit: 1 +// CHECK-TMOD-NEXT: [importedASTFile]: [[PCM]] | loc: [[TMOD_MODULE_H]]:24:2 | name: "Module.Buried.Treasure" | isImplicit: 1 +// CHECK-TMOD-NEXT: [importedASTFile]: [[PCM]] | loc: [[TMODHDR]]Sub.h:1:2 | name: "Module.Sub2" | isImplicit: 1 +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: variable | name: Module_Sub | {{.*}} | loc: [[TMODHDR]]Sub.h:2:6 +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: variable | name: Module_Sub2 | USR: c:@Module_Sub2 | {{.*}} | loc: [[TMODHDR]]Sub2.h:1:6 +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: variable | name: Buried_Treasure | {{.*}} | loc: [[TMODHDR]]Buried/Treasure.h:1:11 +// CHECK-TMOD-NEXT: [indexDeclaration]: kind: variable | name: module_subframework | {{.*}} | loc: [[TMOD_SUB_H]]:4:7 +// CHECK-TMOD-NOT: [indexDeclaration] diff --git a/test/Index/index-pch-with-module.m b/test/Index/index-pch-with-module.m new file mode 100644 index 0000000..ebab648 --- /dev/null +++ b/test/Index/index-pch-with-module.m @@ -0,0 +1,31 @@ + +#ifndef PCH_HEADER +#define PCH_HEADER + +#include +extern int pch_glob; + +#else + +int glob; + +#endif + +// RUN: rm -rf %t.cache +// RUN: c-index-test -write-pch %t.h.pch %s -fmodule-cache-path %t.cache -fmodules -F %S/../Modules/Inputs -Xclang -fdisable-module-hash +// RUN: c-index-test -index-file %s -include %t.h -fmodule-cache-path %t.cache -fmodules -F %S/../Modules/Inputs \ +// RUN: -Xclang -fdisable-module-hash | FileCheck %s + +// CHECK-NOT: [indexDeclaration] +// CHECK: [importedASTFile]: {{.*}}.h.pch +// CHECK-NEXT: [enteredMainFile]: {{.*[/\\]}}index-pch-with-module.m +// CHECK-NEXT: [startedTranslationUnit] +// CHECK-NEXT: [indexDeclaration]: kind: variable | name: glob | {{.*}} | loc: 10:5 +// CHECK-NOT: [indexDeclaration] + +// RUN: c-index-test -index-tu %t.h.pch | FileCheck %s -check-prefix=CHECK-PCH + +// CHECK-PCH: [enteredMainFile]: {{.*[/\\]}}index-pch-with-module.m +// CHECK-PCH: [startedTranslationUnit] +// CHECK-PCH: [importedASTFile]: {{.*}}.cache{{[/\\]}}DependsOnModule.pcm | loc: 5:2 | name: "DependsOnModule" | isImplicit: 1 +// CHECK-PCH: [indexDeclaration]: kind: variable | name: pch_glob | {{.*}} | loc: 6:12 diff --git a/test/Index/index-pch.cpp b/test/Index/index-pch.cpp new file mode 100644 index 0000000..c8da7b2 --- /dev/null +++ b/test/Index/index-pch.cpp @@ -0,0 +1,6 @@ +// RUN: c-index-test -write-pch %t.pch -fshort-wchar %s +// RUN: c-index-test -index-tu %t.pch | FileCheck %s + +const wchar_t *wideStr = L"123"; + +// CHECK: [indexDeclaration]: kind: variable | name: wideStr diff --git a/test/Index/index-with-working-dir.c b/test/Index/index-with-working-dir.c new file mode 100644 index 0000000..de1f1eb --- /dev/null +++ b/test/Index/index-with-working-dir.c @@ -0,0 +1,5 @@ + +void foo(); + +// RUN: c-index-test -index-file -working-directory=%S %s | FileCheck %s +// CHECK: [indexDeclaration]: kind: function | name: foo diff --git a/test/Index/overrides.cpp b/test/Index/overrides.cpp index 698b256..a711d82 100644 --- a/test/Index/overrides.cpp +++ b/test/Index/overrides.cpp @@ -15,6 +15,9 @@ struct D : C { virtual void f(int); }; +void C::g() {} + // RUN: c-index-test -test-load-source local %s | FileCheck %s // CHECK: overrides.cpp:11:16: CXXMethod=g:11:16 (virtual) [Overrides @7:16] Extent=[11:3 - 11:19] // CHECK: overrides.cpp:15:16: CXXMethod=f:15:16 (virtual) [Overrides @2:16, @6:16] Extent=[15:3 - 15:22] +// CHECK: overrides.cpp:18:9: CXXMethod=g:18:9 (Definition) (virtual) [Overrides @7:16] Extent=[18:1 - 18:15] diff --git a/test/Index/overrides.m b/test/Index/overrides.m index d0447b7..d2f1506 100644 --- a/test/Index/overrides.m +++ b/test/Index/overrides.m @@ -99,11 +99,11 @@ // RUN: c-index-test -test-load-source local %s | FileCheck %s // CHECK: overrides.m:12:9: ObjCInstanceMethodDecl=protoMethod:12:9 [Overrides @3:9] // CHECK: overrides.m:22:9: ObjCInstanceMethodDecl=method:22:9 [Overrides @16:9] -// CHECK: overrides.m:23:9: ObjCInstanceMethodDecl=protoMethod:23:9 [Overrides @12:9, @8:9, @32:9, @17:9] +// CHECK: overrides.m:23:9: ObjCInstanceMethodDecl=protoMethod:23:9 [Overrides @8:9, @12:9, @17:9, @32:9] // CHECK: overrides.m:27:9: ObjCInstanceMethodDecl=method:27:9 (Definition) [Overrides @16:9] // CHECK: overrides.m:28:9: ObjCClassMethodDecl=methodWithParam::28:9 (Definition) [Overrides @18:9] // CHECK: overrides.m:32:9: ObjCInstanceMethodDecl=protoMethod:32:9 [Overrides @8:9] -// CHECK: overrides.m:36:9: ObjCInstanceMethodDecl=protoMethod:36:9 [Overrides @12:9, @8:9, @32:9, @17:9] +// CHECK: overrides.m:36:9: ObjCInstanceMethodDecl=protoMethod:36:9 [Overrides @8:9, @12:9, @17:9, @32:9] // CHECK: overrides.m:50:8: ObjCInstanceMethodDecl=meth:50:8 (Definition) [Overrides @43:8] // CHECK: overrides.m:55:8: ObjCInstanceMethodDecl=kol:55:8 Extent=[55:1 - 55:12] // CHECK: overrides.m:65:26: ObjCInstanceMethodDecl=prop1:65:26 [Overrides @59:25] Extent=[65:26 - 65:31] diff --git a/test/Index/overriding-ftemplate-comments.cpp b/test/Index/overriding-ftemplate-comments.cpp new file mode 100644 index 0000000..2c5f539 --- /dev/null +++ b/test/Index/overriding-ftemplate-comments.cpp @@ -0,0 +1,79 @@ +// RUN: rm -rf %t +// RUN: mkdir %t +// RUN: c-index-test -test-load-source all -comments-xml-schema=%S/../../bindings/xml/comment-xml-schema.rng %s > %t/out +// RUN: FileCheck %s < %t/out +// Test to search overridden methods for documentation when overriding method has none. rdar://12378793 + +// Ensure that XML we generate is not invalid. +// RUN: FileCheck %s -check-prefix=WRONG < %t/out +// WRONG-NOT: CommentXMLInvalid + +/// \tparam +/// \param AAA Blah blah +template +void comment_to_html_conversion_17(T AAA); + +template +void comment_to_html_conversion_17(T PPP); + +/// \tparam BBB Bbb +/// \tparam AAA Aaa +template +void comment_to_html_conversion_19(AAA aaa, BBB bbb); + +template +void comment_to_html_conversion_19(PPP aaa, QQQ bbb); + +/// \tparam BBB Bbb +/// \tparam UUU Zzz +/// \tparam CCC Ccc +/// \tparam AAA Aaa +template +void comment_to_html_conversion_20(AAA aaa, BBB bbb); + +template +void comment_to_html_conversion_20(PPP aaa, QQQ bbb); + +/// \tparam AAA Aaa +/// \tparam BBB Bbb +/// \tparam CCC Ccc +/// \tparam DDD Ddd +template class DDD, class BBB> class AAA> +void comment_to_html_conversion_21(); + +template class SSS, class QQQ> class PPP> +void comment_to_html_conversion_21(); + +/// \tparam C1 Ccc 1 +/// \tparam AAA Zzz +/// \tparam C2 Ccc 2 +/// \tparam C3 Ccc 3 +/// \tparam C4 Ccc 4 +/// \tparam BBB Bbb +template class BBB> class AAA> +void comment_to_html_conversion_22(); + + +template class QQQ> class PPP> +void comment_to_html_conversion_22(); + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_17c:@FT@>1#Tcomment_to_html_conversion_17#t0.0#template <typename T> void comment_to_html_conversion_17(T AAA)AAA0in Blah blah] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_17c:@FT@>1#Tcomment_to_html_conversion_17#t0.0#template <typename T> void comment_to_html_conversion_17(T PPP)PPP0in Blah blah] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_19c:@FT@>2#T#Tcomment_to_html_conversion_19#t0.0#t0.1#template <typename AAA, typename BBB> void comment_to_html_conversion_19(AAA aaa, BBB bbb)AAA0 AaaBBB1 Bbb ] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_19c:@FT@>2#T#Tcomment_to_html_conversion_19#t0.0#t0.1#template <typename PPP, typename QQQ> void comment_to_html_conversion_19(PPP aaa, QQQ bbb)PPP0 AaaQQQ1 Bbb ] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_20c:@FT@>3#T#T#NIcomment_to_html_conversion_20#t0.0#t0.1#template <typename AAA, typename BBB, int CCC> void comment_to_html_conversion_20(AAA aaa, BBB bbb)AAA0 AaaBBB1 Bbb CCC2 Ccc UUU Zzz ] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_20c:@FT@>3#T#T#NIcomment_to_html_conversion_20#t0.0#t0.1#template <typename PPP, typename QQQ, int RRR> void comment_to_html_conversion_20(PPP aaa, QQQ bbb)PPP0 AaaQQQ1 Bbb RRR2 Ccc UUU Zzz ] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_21c:@FT@>1#t>2#t>1#T#Tcomment_to_html_conversion_21#template <template <template <typename CCC> class DDD, class BBB> class AAA> void comment_to_html_conversion_21()AAA0 Aaa BBB Bbb CCC Ccc DDD Ddd] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_21c:@FT@>1#t>2#t>1#T#Tcomment_to_html_conversion_21#template <template <template <typename RRR> class SSS, class QQQ> class PPP> void comment_to_html_conversion_21()PPP0 Aaa QQQ Bbb RRR Ccc SSS Ddd] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_22c:@FT@>2#T#t>2#T#t>2#T#Tcomment_to_html_conversion_22#template <class C1, template <class C2, template <class C3, class C4> class BBB> class AAA> void comment_to_html_conversion_22()C10 Ccc 1 AAA1 Zzz C2 Ccc 2 C3 Ccc 3 C4 Ccc 4 BBB Bbb] + +// CHECK: FullCommentAsXML=[comment_to_html_conversion_22c:@FT@>2#T#t>2#T#t>2#T#Tcomment_to_html_conversion_22#template <class CCC1, template <class CCC2, template <class CCC3, class CCC4> class QQQ> class PPP> void comment_to_html_conversion_22()CCC10 Ccc 1 PPP1 Zzz CCC2 Ccc 2 CCC3 Ccc 3 CCC4 Ccc 4 QQQ Bbb] + diff --git a/test/Index/overriding-method-comments.mm b/test/Index/overriding-method-comments.mm new file mode 100644 index 0000000..e718138 --- /dev/null +++ b/test/Index/overriding-method-comments.mm @@ -0,0 +1,124 @@ +// RUN: rm -rf %t +// RUN: mkdir %t +// RUN: c-index-test -test-load-source all -comments-xml-schema=%S/../../bindings/xml/comment-xml-schema.rng %s > %t/out +// RUN: FileCheck %s < %t/out +// Test to search overridden methods for documentation when overriding method has none. rdar://12378793 + +// Ensure that XML we generate is not invalid. +// RUN: FileCheck %s -check-prefix=WRONG < %t/out +// WRONG-NOT: CommentXMLInvalid + +@protocol P +- (void)METH:(id)PPP; +@end + +@interface Root

    +/** + * \param[in] AAA ZZZ + */ +- (void)METH:(id)AAA; +@end + +@interface Sub : Root +@end + +@interface Sub (CAT) +- (void)METH:(id)BBB; +@end + +@implementation Sub(CAT) +- (void)METH:(id)III {} +@end + +@interface Redec : Root +@end + +@interface Redec() +/** + * \param[in] AAA input value + * \param[out] CCC output value is int + * \param[in] BBB 2nd input value is double + */ +- (void)EXT_METH:(id)AAA : (double)BBB : (int)CCC; +@end + +@implementation Redec +- (void)EXT_METH:(id)PPP : (double)QQQ : (int)RRR {} +@end + +struct Base { + /// \brief Does something. + /// \param AAA argument to foo_pure. + virtual void foo_pure(int AAA) = 0; + + /// \brief Does something. + /// \param BBB argument to defined virtual. + virtual void foo_inline(int BBB) {} + + /// \brief Does something. + /// \param CCC argument to undefined virtual. + virtual void foo_outofline(int CCC); +}; + +void Base::foo_outofline(int RRR) {} + +struct Derived : public Base { + virtual void foo_pure(int PPP); + + virtual void foo_inline(int QQQ) {} +}; + +/// \brief Does something. +/// \param DDD a value. +void foo(int DDD); + +void foo(int SSS) {} + +/// \brief Does something. +/// \param EEE argument to function decl. +void foo1(int EEE); + +void foo1(int TTT); + +/// \brief Documentation +/// \tparam BBB The type, silly. +/// \tparam AAA The type, silly as well. +template +void foo(AAA, BBB); + +template +void foo(PPP, QQQ); + +// CHECK: FullCommentAsXML=[METH:c:objc(cs)Root(im)METH:- (void) METH:(id)AAAAAA0in ZZZ ] + +// CHECK: FullCommentAsXML=[METH:c:objc(cs)Root(im)METH:- (void) METH:(id)BBBBBB0in ZZZ ] + +// CHECK: FullCommentAsXML=[METH:c:objc(cs)Root(im)METH:- (void) METH:(id)IIIIII0in ZZZ ] + +// CHECK: FullCommentAsXML=[EXT_METH:::c:objc(cs)Redec(im)EXT_METH:::- (void) EXT_METH:(id)AAA :(double)BBB :(int)CCCAAA0in input value BBB1in 2nd input value is double CCC2out output value is int ] + +// CHECK: FullCommentAsXML=[EXT_METH:::c:objc(cs)Redec(im)EXT_METH:::- (void) EXT_METH:(id)PPP :(double)QQQ :(int)RRRPPP0in input value QQQ1in 2nd input value is double RRR2out output value is int ] + +// CHECK: FullCommentAsXML=[foo_purec:@S@Base@F@foo_pure#I#virtual void foo_pure(int AAA) = 0 Does something. AAA0in argument to foo_pure.] + +// CHECK: FullCommentAsXML=[foo_inlinec:@S@Base@F@foo_inline#I#virtual void foo_inline(int BBB) Does something. BBB0in argument to defined virtual.] + +// CHECK: FullCommentAsXML=[foo_outoflinec:@S@Base@F@foo_outofline#I#virtual void foo_outofline(int CCC) Does something. CCC0in argument to undefined virtual.] + +// CHECK: FullCommentAsXML=[foo_outoflinec:@S@Base@F@foo_outofline#I#void foo_outofline(int RRR) Does something. RRR0in argument to undefined virtual.] + +// CHECK: FullCommentAsXML=[foo_purec:@S@Base@F@foo_pure#I#virtual void foo_pure(int PPP) Does something. PPP0in argument to foo_pure.] + +// CHECK: FullCommentAsXML=[foo_inlinec:@S@Base@F@foo_inline#I#virtual void foo_inline(int QQQ) Does something. QQQ0in argument to defined virtual.] + +// CHECK: FullCommentAsXML=[fooc:@F@foo#I#void foo(int DDD) Does something. DDD0in a value.] + +// CHECK: FullCommentAsXML=[fooc:@F@foo#I#void foo(int SSS) Does something. SSS0in a value.] + +// CHECK: FullCommentAsXML=[foo1c:@F@foo1#I#void foo1(int EEE) Does something. EEE0in argument to function decl. ] + +// CHECK: FullCommentAsXML=[foo1c:@F@foo1#I#void foo1(int TTT) Does something. TTT0in argument to function decl. ] + +// CHECK: FullCommentAsXML=[fooc:@FT@>2#T#Tfoo#t0.0#t0.1#template <typename AAA, typename BBB> void foo(AAA, BBB) Documentation AAA0 The type, silly as well.BBB1 The type, silly. ] + +// CHECK: FullCommentAsXML=[fooc:@FT@>2#T#Tfoo#t0.0#t0.1#template <typename PPP, typename QQQ> void foo(PPP, QQQ) Documentation PPP0 The type, silly as well.QQQ1 The type, silly. ] diff --git a/test/Index/preamble-reparse-with-BOM.m b/test/Index/preamble-reparse-with-BOM.m new file mode 100644 index 0000000..a2a89c8 --- /dev/null +++ b/test/Index/preamble-reparse-with-BOM.m @@ -0,0 +1,6 @@ + +@interface I2 +@end + +// RUN: env CINDEXTEST_EDITING=1 CINDEXTEST_FAILONERROR=1 \ +// RUN: c-index-test -test-load-source-reparse 1 local %s diff --git a/test/Index/rdar12316296-codecompletion.m b/test/Index/rdar12316296-codecompletion.m new file mode 100644 index 0000000..f588a99 --- /dev/null +++ b/test/Index/rdar12316296-codecompletion.m @@ -0,0 +1,23 @@ +// RUN: c-index-test -write-pch %t.h.pch %s +// RUN: c-index-test -code-completion-at=%s:19:1 %s -include %t.h | FileCheck %s + +// clang Code Completion returns nothing but preprocessor macros + +#ifndef HEADER +#define HEADER + +@interface I +@end + +// CHECK: FunctionDecl:{ResultType void}{TypedText foo} +void foo(); + +#else + +@implementation I +-(void)meth { + +} +@end + +#endif diff --git a/test/Index/recursive-cxx-member-calls.cpp b/test/Index/recursive-cxx-member-calls.cpp index 9b93872..501dc29 100644 --- a/test/Index/recursive-cxx-member-calls.cpp +++ b/test/Index/recursive-cxx-member-calls.cpp @@ -1523,7 +1523,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo * Name) { // CHECK-tokens: Punctuation: ";" [185:31 - 185:32] CompoundStmt= // CHECK-tokens: Punctuation: "}" [186:1 - 186:2] CompoundStmt= -// RUN: c-index-test -test-load-source all %s 2>&1 | FileCheck %s +// RUN: c-index-test -test-load-source all %s -std=c++98 2>&1 | FileCheck %s // CHECK: 1:27: TypedefDecl=__darwin_size_t:1:27 (Definition) Extent=[1:1 - 1:42] // CHECK: 2:25: TypedefDecl=size_t:2:25 (Definition) Extent=[2:1 - 2:31] // CHECK: 2:9: TypeRef=__darwin_size_t:1:27 Extent=[2:9 - 2:24] diff --git a/test/Index/retain-comments-from-system-headers.c b/test/Index/retain-comments-from-system-headers.c new file mode 100644 index 0000000..a9b2e2f --- /dev/null +++ b/test/Index/retain-comments-from-system-headers.c @@ -0,0 +1,19 @@ +// Run lines are sensitive to line numbers and come below the code. + +#include "retain-comments-from-system-headers.h" + +/** + * user_function + * \param a Aaa. + */ +int user_function(int a); + +// RUN: c-index-test -test-load-source all %s -I %S/Inputs | FileCheck %s +// RUN: c-index-test -test-load-source all %s -fretain-comments-from-system-headers -I %S/Inputs | FileCheck %s -check-prefix=RETAIN + +// CHECK: retain-comments-from-system-headers.h:7:5: FunctionDecl=system_function:7:5 Extent=[7:1 - 7:27] +// CHECK: retain-comments-from-system-headers.c:9:5: FunctionDecl=user_function:9:5 RawComment=[/**\n * user_function\n * \param a Aaa.\n */] RawCommentRange=[5:1 - 8:4] BriefComment=[user_function] + +// CHECK-RETAIN: retain-comments-from-system-headers.h:7:5: FunctionDecl=system_function:7:5 RawComment=[/**\n * system_function\n * \param a Aaa.\n */] RawCommentRange=[3:1 - 6:4] BriefComment=[system_function] +// CHECK-RETAIN: retain-comments-from-system-headers.c:9:5: FunctionDecl=user_function:9:5 RawComment=[/**\n * user_function\n * \param a Aaa.\n */] RawCommentRange=[5:1 - 8:4] BriefComment=[user_function] + diff --git a/test/Index/usrs.m b/test/Index/usrs.m index 95a5388..be0e323 100644 --- a/test/Index/usrs.m +++ b/test/Index/usrs.m @@ -120,7 +120,6 @@ int test_multi_declaration(void) { // CHECK: usrs.m c:objc(cs)Foo(im)godzilla@z Extent=[37:3 - 37:15] // CHECK: usrs.m c:objc(cs)Foo(cm)kingkong Extent=[40:1 - 43:2] // CHECK: usrs.m c:usrs.m@470objc(cs)Foo(cm)kingkong@local_var Extent=[41:3 - 41:16] -// CHECK: usrs.m c:objc(cs)Foo@d1 Extent=[44:13 - 44:15] // CHECK: usrs.m c:objc(cs)Foo(py)d1 Extent=[44:1 - 44:15] // CHECK: usrs.m c:@z Extent=[47:1 - 47:6] // CHECK: usrs.m c:usrs.m@529@F@local_func Extent=[49:1 - 49:43] @@ -208,7 +207,6 @@ int test_multi_declaration(void) { // CHECK-source: usrs.m:42:3: ReturnStmt= Extent=[42:3 - 42:11] // CHECK-source: usrs.m:42:10: UnexposedExpr= Extent=[42:10 - 42:11] // CHECK-source: usrs.m:42:10: IntegerLiteral= Extent=[42:10 - 42:11] -// CHECK-source: usrs.m:44:13: ObjCIvarDecl=d1:44:13 (Definition) Extent=[44:13 - 44:15] // CHECK-source: usrs.m:44:13: ObjCSynthesizeDecl=d1:31:15 (Definition) Extent=[44:1 - 44:15] // CHECK-source: usrs.m:47:5: VarDecl=z:47:5 Extent=[47:1 - 47:6] // CHECK-source: usrs.m:49:12: FunctionDecl=local_func:49:12 (Definition) Extent=[49:1 - 49:43] diff --git a/test/Lexer/clang-keywords.cpp b/test/Lexer/clang-keywords.cpp index a349b44..3a24dce 100644 --- a/test/Lexer/clang-keywords.cpp +++ b/test/Lexer/clang-keywords.cpp @@ -1,3 +1,4 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics __char16_t c16; void f(__char32_t) { } diff --git a/test/Lexer/digraph.c b/test/Lexer/digraph.c index cf6e478..e940caf 100644 --- a/test/Lexer/digraph.c +++ b/test/Lexer/digraph.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -ffreestanding %s +// expected-no-diagnostics %:include diff --git a/test/Lexer/dollar-idents.c b/test/Lexer/dollar-idents.c index cbf25b0..e57ee86 100644 --- a/test/Lexer/dollar-idents.c +++ b/test/Lexer/dollar-idents.c @@ -1,7 +1,7 @@ -// RUN: %clang_cc1 -dump-tokens %s 2> %t -// RUN: grep "identifier '\$A'" %t -// RUN: %clang_cc1 -dump-tokens -x assembler-with-cpp %s 2> %t -// RUN: grep "identifier 'A'" %t +// RUN: %clang_cc1 -dump-tokens %s 2>&1 | FileCheck %s +// RUN: %clang_cc1 -dump-tokens -x assembler-with-cpp %s 2>&1 | FileCheck %s --check-prefix=ASM // PR3808 +// CHECK: identifier '$A' +// CHECK-ASM: identifier 'A' $A diff --git a/test/Lexer/eof-char.c b/test/Lexer/eof-char.c new file mode 100644 index 0000000..42ee6ea --- /dev/null +++ b/test/Lexer/eof-char.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 %s -verify -fsyntax-only +// vim: set binary noeol: + +// This file intentionally ends without a \n on the last line. Make sure your +// editor doesn't add one. + +// expected-warning@+1{{missing terminating ' character}} expected-error@+1{{expected expression}} expected-error@+1{{expected ';'}} +char c = '\ \ No newline at end of file diff --git a/test/Lexer/eof-file.c b/test/Lexer/eof-file.c new file mode 100644 index 0000000..43c300c --- /dev/null +++ b/test/Lexer/eof-file.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 %s -verify -fsyntax-only +// vim: set binary noeol: + +// This file intentionally ends without a \n on the last line. Make sure your +// editor doesn't add one. + +// expected-error@+1{{expected expression}} expected-error@+1{{expected ';'}} +char c = \ \ No newline at end of file diff --git a/test/Lexer/eof-string.c b/test/Lexer/eof-string.c new file mode 100644 index 0000000..e1a60a4 --- /dev/null +++ b/test/Lexer/eof-string.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 %s -verify -fsyntax-only +// vim: set binary noeol: + +// This file intentionally ends without a \n on the last line. Make sure your +// editor doesn't add one. + +// expected-warning@+1{{missing terminating '"' character}} expected-error@+1{{expected expression}} expected-error@+1{{expected ';'}} +char c = "\ \ No newline at end of file diff --git a/test/Lexer/gnu_keywords.c b/test/Lexer/gnu_keywords.c index c4bd9b3..10a7d31 100644 --- a/test/Lexer/gnu_keywords.c +++ b/test/Lexer/gnu_keywords.c @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -DGNU_KEYWORDS -std=c99 -fgnu-keywords -fsyntax-only -verify %s // RUN: %clang_cc1 -std=c99 -fsyntax-only -verify %s // RUN: %clang_cc1 -std=gnu89 -fno-gnu-keywords -fsyntax-only -verify %s +// expected-no-diagnostics void f() { #ifdef GNU_KEYWORDS diff --git a/test/Lexer/has_feature_address_sanitizer.cpp b/test/Lexer/has_feature_address_sanitizer.cpp index 69acc39..5c98116 100644 --- a/test/Lexer/has_feature_address_sanitizer.cpp +++ b/test/Lexer/has_feature_address_sanitizer.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -E -faddress-sanitizer %s -o - | FileCheck --check-prefix=CHECK-ASAN %s +// RUN: %clang_cc1 -E -fsanitize=address %s -o - | FileCheck --check-prefix=CHECK-ASAN %s // RUN: %clang_cc1 -E %s -o - | FileCheck --check-prefix=CHECK-NO-ASAN %s #if __has_feature(address_sanitizer) diff --git a/test/Lexer/long-long.cpp b/test/Lexer/long-long.cpp new file mode 100644 index 0000000..1a0f37b --- /dev/null +++ b/test/Lexer/long-long.cpp @@ -0,0 +1,24 @@ +/* RUN: %clang_cc1 -x c -std=c89 -fsyntax-only -verify -pedantic-errors -Wno-empty-translation-unit %s + * RUN: %clang_cc1 -x c -std=c99 -fsyntax-only -verify -pedantic-errors -Wno-empty-translation-unit %s + * RUN: %clang_cc1 -x c++ -std=c++98 -fsyntax-only -verify -pedantic-errors -Wno-empty-translation-unit %s + * RUN: %clang_cc1 -x c++ -std=c++11 -fsyntax-only -verify -Wc++98-compat-pedantic -Wno-empty-translation-unit %s + */ + +#if !defined(__cplusplus) +# if __STDC_VERSION__ < 199901L +/* expected-error@21 {{'long long' is an extension when C99 mode is not enabled}} */ +# else +/* expected-no-diagnostics */ +# endif +#else +# if __cplusplus < 201103L +/* expected-error@21 {{'long long' is a C++11 extension}} */ +# else +/* expected-warning@21 {{'long long' is incompatible with C++98}} */ +# endif +#endif + +#if 1 > 2LL +# error should not happen +#endif + diff --git a/test/Lexer/msdos-cpm-eof.c b/test/Lexer/msdos-cpm-eof.c index 9ef6e32..3469b59 100644 --- a/test/Lexer/msdos-cpm-eof.c +++ b/test/Lexer/msdos-cpm-eof.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -fms-extensions %s +// expected-no-diagnostics int x; diff --git a/test/Lexer/newline-eof-c++11.cpp b/test/Lexer/newline-eof-c++11.cpp index 3c45f28..eeabe8b 100644 --- a/test/Lexer/newline-eof-c++11.cpp +++ b/test/Lexer/newline-eof-c++11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++11 -Wnewline-eof -verify %s +// expected-no-diagnostics // The following line isn't terminated, don't fix it. void foo() {} \ No newline at end of file diff --git a/test/Lexer/numeric-literal-trash.c b/test/Lexer/numeric-literal-trash.c index 5407ba9..60981fe 100644 --- a/test/Lexer/numeric-literal-trash.c +++ b/test/Lexer/numeric-literal-trash.c @@ -1,5 +1,5 @@ /* RUN: %clang_cc1 -fsyntax-only -verify %s - */ + * expected-no-diagnostics */ # define XRECORD(x, c_name) e##c (x, __LINE__) diff --git a/test/Lexer/pragma-mark.c b/test/Lexer/pragma-mark.c index 96e8485..e58b226 100644 --- a/test/Lexer/pragma-mark.c +++ b/test/Lexer/pragma-mark.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // Lexer diagnostics shouldn't be included in #pragma mark. #pragma mark Mike's world diff --git a/test/Lexer/rdr-6096838.c b/test/Lexer/rdr-6096838.c index d1426cc..b77b95e 100644 --- a/test/Lexer/rdr-6096838.c +++ b/test/Lexer/rdr-6096838.c @@ -2,5 +2,6 @@ * RUN: %clang_cc1 -triple x86_64-unknown-unknown -std=gnu89 -fsyntax-only -verify %s rdar://6096838 */ +// expected-no-diagnostics long double d = 0x0.0000003ffffffff00000p-16357L; diff --git a/test/Lexer/string-literal-errors.cpp b/test/Lexer/string-literal-errors.cpp new file mode 100644 index 0000000..be574c2 --- /dev/null +++ b/test/Lexer/string-literal-errors.cpp @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -fsyntax-only %s 2>&1 | FileCheck -strict-whitespace %s + +void foo() { + (void)"\q \u123z \x \U \U123 \U12345 \u123 \xyzzy \777 \U" + // CHECK: {{^ \(void\)"\\q \\u123z \\x \\U \\U123 \\U12345 \\u123 \\xyzzy \\777 \\U"$}} + // + // (void)"\q \u123z \x \U \U123 \U12345 \u123 \xyzzy \777 \U" + // CHECK: {{^ \^~$}} + // CHECK: {{^ \^~~~~$}} + // CHECK: {{^ \^~$}} + // CHECK: {{^ \^~$}} + // CHECK: {{^ \^~~~~$}} + // CHECK: {{^ \^~~~~~~$}} + // CHECK: {{^ \^~~~~$}} + // CHECK: {{^ \^~$}} + // CHECK: {{^ \^~~~$}} + // CHECK: {{^ \^~$}} + + "123 \x \z"; + // CHECK: {{^ "123 \\x \\z";$}} + // + // "123 \x \z"; + // CHECK: {{^ \^~$}} + // CHECK: {{^ \^~$}} +} diff --git a/test/Misc/ast-dump-stmt.c b/test/Misc/ast-dump-stmt.c new file mode 100644 index 0000000..d7fdce8 --- /dev/null +++ b/test/Misc/ast-dump-stmt.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -ast-dump -ast-dump-filter Test %s | FileCheck -strict-whitespace %s + +int TestLocation = 0; +// CHECK: Dumping TestLocation +// CHECK-NEXT: IntegerLiteral 0x{{[^ ]*}} <{{.*}}:3:20> 'int' 0 + +int TestIndent = 1 + (1); +// CHECK: Dumping TestIndent +// CHECK-NEXT: {{\(BinaryOperator[^()]*$}} +// CHECK-NEXT: {{^ \(IntegerLiteral.*0[^()]*\)$}} +// CHECK-NEXT: {{^ \(ParenExpr.*0[^()]*$}} +// CHECK-NEXT: {{^ \(IntegerLiteral.*0[^()]*\)\)\)$}} + +void TestDeclStmt() { + int x = 0; + int y, z; +} +// CHECK: Dumping TestDeclStmt +// CHECK-NEXT: CompoundStmt +// CHECK-NEXT: DeclStmt +// CHECK-NEXT: int x = +// CHECK-NEXT: IntegerLiteral +// CHECK-NEXT: DeclStmt +// CHECK-NEXT: int y +// CHECK-NEXT: int z + +int TestOpaqueValueExpr = 0 ?: 1; +// CHECK: Dumping TestOpaqueValueExpr +// CHECK-NEXT: BinaryConditionalOperator +// CHECK-NEXT: IntegerLiteral +// CHECK-NEXT: OpaqueValueExpr +// CHECK-NEXT: IntegerLiteral +// CHECK-NEXT: OpaqueValueExpr +// CHECK-NEXT: IntegerLiteral +// CHECK-NEXT: IntegerLiteral diff --git a/test/Misc/ast-dump-stmt.m b/test/Misc/ast-dump-stmt.m new file mode 100644 index 0000000..8dfee74 --- /dev/null +++ b/test/Misc/ast-dump-stmt.m @@ -0,0 +1,36 @@ +// RUN: %clang_cc1 -Wno-unused -fblocks -fobjc-exceptions -ast-dump -ast-dump-filter Test %s | FileCheck -strict-whitespace %s + +void TestBlockExpr(int x) { + ^{ x; }; +} +// CHECK: Dumping TestBlockExpr +// CHECK: BlockExpr{{.*}} decl= +// CHECK-NEXT: capture ParmVar +// CHECK-NEXT: CompoundStmt + +void TestExprWithCleanup(int x) { + ^{ x; }; +} +// CHECK: Dumping TestExprWithCleanup +// CHECK: ExprWithCleanups +// CHECK-NEXT: cleanup Block +// CHECK-NEXT: BlockExpr + +@interface A +@end + +void TestObjCAtCatchStmt() { + @try { + } @catch(A *a) { + } @catch(...) { + } @finally { + } +} +// CHECK: Dumping TestObjCAtCatchStmt +// CHECK: ObjCAtTryStmt +// CHECK-NEXT: CompoundStmt +// CHECK-NEXT: ObjCAtCatchStmt{{.*}} catch parm = "A *a" +// CHECK-NEXT: CompoundStmt +// CHECK-NEXT: ObjCAtCatchStmt{{.*}} catch all +// CHECK-NEXT: CompoundStmt +// CHECK-NEXT: ObjCAtFinallyStmt diff --git a/test/Misc/caret-diags-macros.c b/test/Misc/caret-diags-macros.c index de1ee76..5faddb6 100644 --- a/test/Misc/caret-diags-macros.c +++ b/test/Misc/caret-diags-macros.c @@ -1,13 +1,13 @@ -// RUN: %clang_cc1 -fsyntax-only %s 2>&1 | FileCheck %s +// RUN: %clang_cc1 -fsyntax-only %s 2>&1 | FileCheck %s -strict-whitespace #define M1(x) x #define M2 1; void foo() { M1( M2); - // CHECK: :7:{{[0-9]+}}: warning: expression result unused - // CHECK: :4:{{[0-9]+}}: note: expanded from macro 'M2' - // CHECK: :3:{{[0-9]+}}: note: expanded from macro 'M1' + // CHECK: {{.*}}:7:{{[0-9]+}}: warning: expression result unused + // CHECK: {{.*}}:4:{{[0-9]+}}: note: expanded from macro 'M2' + // CHECK: {{.*}}:3:{{[0-9]+}}: note: expanded from macro 'M1' } #define A 1 @@ -15,10 +15,10 @@ void foo() { #define C B void bar() { C; - // CHECK: :17:3: warning: expression result unused - // CHECK: :15:11: note: expanded from macro 'C' - // CHECK: :14:11: note: expanded from macro 'B' - // CHECK: :13:11: note: expanded from macro 'A' + // CHECK: {{.*}}:17:3: warning: expression result unused + // CHECK: {{.*}}:15:11: note: expanded from macro 'C' + // CHECK: {{.*}}:14:11: note: expanded from macro 'B' + // CHECK: {{.*}}:13:11: note: expanded from macro 'A' } // rdar://7597492 @@ -40,12 +40,12 @@ void baz(char *Msg) { #define macro_many_args3(x, y, z) macro_many_args2(x, y, z) void test() { - macro_args3(1); + macro_args3(11); // CHECK: {{.*}}:43:15: warning: expression result unused // Also check that the 'caret' printing agrees with the location here where // its easy to FileCheck. - // CHECK-NEXT: macro_args3(1); - // CHECK-NEXT: ~~~~~~~~~~~~^~ + // CHECK-NEXT: macro_args3(11); + // CHECK-NEXT: {{^ \^~}} // CHECK: {{.*}}:36:36: note: expanded from macro 'macro_args3' // CHECK: {{.*}}:35:36: note: expanded from macro 'macro_args2' // CHECK: {{.*}}:34:24: note: expanded from macro 'macro_args1' @@ -71,13 +71,13 @@ void test() { macro_many_args3( 1, - macro_args2(2), + macro_args2(22), 3); // CHECK: {{.*}}:74:17: warning: expression result unused // This caret location needs to be printed *inside* a different macro's // arguments. - // CHECK-NEXT: macro_args2(2), - // CHECK-NEXT: ~~~~~~~~~~~~^~~ + // CHECK-NEXT: macro_args2(22), + // CHECK-NEXT: {{^ \^~}} // CHECK: {{.*}}:35:36: note: expanded from macro 'macro_args2' // CHECK: {{.*}}:34:24: note: expanded from macro 'macro_args1' // CHECK: {{.*}}:40:55: note: expanded from macro 'macro_many_args3' @@ -90,10 +90,10 @@ void test() { #define variadic_args3(x, y, ...) variadic_args2(x, y, __VA_ARGS__) void test2() { - variadic_args3(1, 2, 3, 4); + variadic_args3(1, 22, 3, 4); // CHECK: {{.*}}:93:21: warning: expression result unused - // CHECK-NEXT: variadic_args3(1, 2, 3, 4); - // CHECK-NEXT: ~~~~~~~~~~~~~~~~~~^~~~~~~~ + // CHECK-NEXT: variadic_args3(1, 22, 3, 4); + // CHECK-NEXT: {{^ \^~}} // CHECK: {{.*}}:90:53: note: expanded from macro 'variadic_args3' // CHECK: {{.*}}:89:50: note: expanded from macro 'variadic_args2' // CHECK: {{.*}}:88:35: note: expanded from macro 'variadic_args1' @@ -118,3 +118,48 @@ void test3() { // CHECK: {{.*}}:104:70: note: expanded from macro 'variadic_pasting_args2a' // CHECK: {{.*}}:102:41: note: expanded from macro 'variadic_pasting_args1' } + +#define BAD_CONDITIONAL_OPERATOR (2<3)?2:3 +int test4 = BAD_CONDITIONAL_OPERATOR+BAD_CONDITIONAL_OPERATOR; +// CHECK: {{.*}}:122:39: note: expanded from macro 'BAD_CONDITIONAL_OPERATOR' +// CHECK-NEXT: #define BAD_CONDITIONAL_OPERATOR (2<3)?2:3 +// CHECK-NEXT: {{^ \^}} +// CHECK: {{.*}}:122:39: note: expanded from macro 'BAD_CONDITIONAL_OPERATOR' +// CHECK-NEXT: #define BAD_CONDITIONAL_OPERATOR (2<3)?2:3 +// CHECK-NEXT: {{^ \^}} +// CHECK: {{.*}}:122:39: note: expanded from macro 'BAD_CONDITIONAL_OPERATOR' +// CHECK-NEXT: #define BAD_CONDITIONAL_OPERATOR (2<3)?2:3 +// CHECK-NEXT: {{^ ~~~~~\^~~~}} + +#define QMARK ? +#define TWOL (2< +#define X 1+TWOL 3) QMARK 4:5 +int x = X; +// CHECK: {{.*}}:137:9: note: place parentheses around the '+' expression to silence this warning +// CHECK-NEXT: int x = X; +// CHECK-NEXT: {{^ \^}} +// CHECK-NEXT: {{.*}}:136:21: note: expanded from macro 'X' +// CHECK-NEXT: #define X 1+TWOL 3) QMARK 4:5 +// CHECK-NEXT: {{^ ~~~~~~~~~ \^}} +// CHECK-NEXT: {{.*}}:134:15: note: expanded from macro 'QMARK' +// CHECK-NEXT: #define QMARK ? +// CHECK-NEXT: {{^ \^}} +// CHECK-NEXT: {{.*}}:137:9: note: place parentheses around the '?:' expression to evaluate it first +// CHECK-NEXT: int x = X; +// CHECK-NEXT: {{^ \^}} +// CHECK-NEXT: {{.*}}:136:21: note: expanded from macro 'X' +// CHECK-NEXT: #define X 1+TWOL 3) QMARK 4:5 +// CHECK-NEXT: {{^ ~~~~~~~~\^~~~~~~~~}} + +#define ONEPLUS 1+ +#define Y ONEPLUS (2<3) QMARK 4:5 +int y = Y; +// CHECK: {{.*}}:156:9: warning: operator '?:' has lower precedence than '+'; '+' will be evaluated first +// CHECK-NEXT: int y = Y; +// CHECK-NEXT: {{^ \^}} +// CHECK-NEXT: {{.*}}:155:25: note: expanded from macro 'Y' +// CHECK-NEXT: #define Y ONEPLUS (2<3) QMARK 4:5 +// CHECK-NEXT: {{^ ~~~~~~~~~~~~~ \^}} +// CHECK-NEXT: {{.*}}:134:15: note: expanded from macro 'QMARK' +// CHECK-NEXT: #define QMARK ? +// CHECK-NEXT: {{^ \^}} diff --git a/test/Misc/diag-template-diffing-color.cpp b/test/Misc/diag-template-diffing-color.cpp index 6903e84..cfa1a68 100644 --- a/test/Misc/diag-template-diffing-color.cpp +++ b/test/Misc/diag-template-diffing-color.cpp @@ -17,3 +17,56 @@ foo &B = A; // TREE: non-const lvalue reference cannot bind to a value of unrelated type // TREE: foo< // TREE: [{{.}}[0;1;36mdouble{{.}}[0m{{.}}[1m != {{.}}[0;1;36mint{{.}}[0m{{.}}[1m]>{{.}}[0m + +template class vector {}; + +void set15(vector >) {} +void test15() { + set15(vector >()); +} +// CHECK: {{.*}}candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// TREE: {{.*}}candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// TREE: vector< +// TREE: const vector< +// TREE: [{{.}}[0;1;36mconst{{ ?.}}[0m{{ ?}}!= {{.}}[0;1;36m(no qualifiers){{.}}[0m] int>> + +void set16(vector >) {} +void test16() { + set16(vector >()); +} +// CHECK: {{.*}}candidate function not viable: no known conversion from 'vector<{{.}}[0;1;36mconst{{ ?.}}[0m{{ ?}}vector<[...]>>' to 'vector>' for 1st argument +// TREE: {{.*}}candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// TREE: vector< +// TREE: [{{.}}[0;1;36mconst{{ ?.}}[0m{{ ?}}!= {{.}}[0;1;36m(no qualifiers){{ ?.}}[0m]{{ ?}}vector< +// TREE: [...]>> + +void set17(vector >) {} +void test17() { + set17(vector >()); +} +// CHECK: candidate function not viable: no known conversion from 'vector>' to 'vector<{{.}}[0;1;36mconst{{ ?.}}[0m{{ ?}}vector<[...]>>' for 1st argument +// TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// TREE: vector< +// TREE: [{{.}}[0;1;36m(no qualifiers){{ ?.}}[0m{{ ?}}!= {{.}}[0;1;36mconst{{.}}[0m] vector< +// TREE: [...]>> + +void set18(vector >) {} +void test18() { + set18(vector >()); +} +// CHECK: candidate function not viable: no known conversion from 'vector<{{.}}[0;1;36mconst{{ ?.}}[0m{{ ?}}vector<[...]>>' to 'vector<{{.}}[0;1;36mvolatile{{ ?.}}[0m{{ ?}}vector<[...]>>' for 1st argument +// TREE: no matching function for call to 'set18' +// TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// TREE: vector< +// TREE: [{{.}}[0;1;36mconst{{ ?.}}[0m{{ ?}}!= {{.}}[0;1;36mvolatile{{.}}[0m] vector< +// TREE: [...]>> + +void set19(vector >) {} +void test19() { + set19(vector >()); +} +// CHECK: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// TREE: vector< +// TREE: [const != const {{.}}[0;1;36mvolatile{{.}}[0m] vector< +// TREE: [...]>> diff --git a/test/Misc/diag-template-diffing.cpp b/test/Misc/diag-template-diffing.cpp index 9addcc5..2c044f8 100644 --- a/test/Misc/diag-template-diffing.cpp +++ b/test/Misc/diag-template-diffing.cpp @@ -426,6 +426,372 @@ void test13() { // CHECK-NOELIDE-TREE: &b13, // CHECK-NOELIDE-TREE: [&d13 != (no argument)]> +template struct s14 {}; +template using a14 = s14; +typedef a14 b14; +template using c14 = b14; +int f14(c14); +int k14 = f14(a14()); +// CHECK-ELIDE-NOTREE: no matching function for call to 'f14' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'a14' to 'a14' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'f14' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'a14' to 'a14' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'f14' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: a14< +// CHECK-ELIDE-TREE: [char != int]> +// CHECK-NOELIDE-TREE: no matching function for call to 'f14' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: a14< +// CHECK-NOELIDE-TREE: [char != int]> + +void set15(vector>) {} +void test15() { + set15(vector>()); +} +// CHECK-ELIDE-NOTREE-NOT: set15 +// CHECK-NOELIDE-NOTREE-NOT: set15 +// CHECK-ELIDE-TREE-NOT: set15 +// CHECK-NOELIDE-TREE-NOT: set15 +// no error here + +void set16(vector>) {} +void test16() { + set16(vector>()); +} +// CHECK-ELIDE-NOTREE: no matching function for call to 'set16' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'set16' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'set16' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: const vector< +// CHECK-ELIDE-TREE: [const != (no qualifiers)] int>> +// CHECK-NOELIDE-TREE: no matching function for call to 'set16' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: const vector< +// CHECK-NOELIDE-TREE: [const != (no qualifiers)] int>> + +void set17(vector>) {} +void test17() { + set17(vector>()); +} +// CHECK-ELIDE-NOTREE: no matching function for call to 'set17' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'set17' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'set17' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: [const != (no qualifiers)] vector< +// CHECK-ELIDE-TREE: [...]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'set17' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: [const != (no qualifiers)] vector< +// CHECK-NOELIDE-TREE: int>> + +void set18(vector>) {} +void test18() { + set18(vector>()); +} +// CHECK-ELIDE-NOTREE: no matching function for call to 'set18' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'set18' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'set18' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: [(no qualifiers) != const] vector< +// CHECK-ELIDE-TREE: [...]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'set18' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: [(no qualifiers) != const] vector< +// CHECK-NOELIDE-TREE: int>> + +void set19(vector>) {} +void test19() { + set19(vector>()); +} +// CHECK-ELIDE-NOTREE: no matching function for call to 'set19' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'set19' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'set19' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: [const != volatile] vector< +// CHECK-ELIDE-TREE: [...]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'set19' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: [const != volatile] vector< +// CHECK-NOELIDE-TREE: int>> + +void set20(vector>) {} +void test20() { + set20(vector>()); +} +// CHECK-ELIDE-NOTREE: no matching function for call to 'set20' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'set20' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'set20' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: [const != const volatile] vector< +// CHECK-ELIDE-TREE: [...]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'set20' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: [const != const volatile] vector< +// CHECK-NOELIDE-TREE: int>> + + +// Checks that volatile does not show up in diagnostics. +template struct S21 {}; +template using U21 = volatile S21; +int f21(vector>); +int k21 = f21(vector>()); +// CHECK-ELIDE-NOTREE: no matching function for call to 'f21' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'f21' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'f21' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: [(no qualifiers) != const] U21< +// CHECK-ELIDE-TREE: [...]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'f21' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: [(no qualifiers) != const] U21< +// CHECK-NOELIDE-TREE: int>> + +// Checks that volatile does not show up in diagnostics. +template struct S22 {}; +template using U22 = volatile S22; +int f22(vector>); +int k22 = f22(vector>()); +// CHECK-ELIDE-NOTREE: no matching function for call to 'f22' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'f22' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'vector>' to 'vector>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'f22' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: vector< +// CHECK-ELIDE-TREE: [(no qualifiers) != const] U22< +// CHECK-ELIDE-TREE: [...]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'f22' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: vector< +// CHECK-NOELIDE-TREE: [(no qualifiers) != const] U22< +// CHECK-NOELIDE-TREE: int>> + +// Testing qualifiers and typedefs. +template struct D23{}; +template using C23 = D23; +typedef const C23 B23; +template using A23 = B23; + +void foo23(D23> b) {} +void test23() { + foo23(D23>()); + foo23(C23()); +} + +// CHECK-ELIDE-NOTREE: no matching function for call to 'foo23' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'D23>' to 'D23>' for 1st argument +// CHECK-ELIDE-NOTREE: no matching function for call to 'foo23' +// CHECK-ELIDE-NOTREE: candidate function not viable: no known conversion from 'D23' to 'D23>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'foo23' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'D23>' to 'D23>' for 1st argument +// CHECK-NOELIDE-NOTREE: no matching function for call to 'foo23' +// CHECK-NOELIDE-NOTREE: candidate function not viable: no known conversion from 'D23' to 'D23>' for 1st argument +// CHECK-ELIDE-TREE: no matching function for call to 'foo23' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: D23< +// CHECK-ELIDE-TREE: [(no qualifiers) != const] D23< +// CHECK-ELIDE-TREE: [char != int]>> +// CHECK-ELIDE-TREE: no matching function for call to 'foo23' +// CHECK-ELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: D23< +// CHECK-ELIDE-TREE: [char != A23<>]> +// CHECK-NOELIDE-TREE: no matching function for call to 'foo23' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: D23< +// CHECK-NOELIDE-TREE: [(no qualifiers) != const] D23< +// CHECK-NOELIDE-TREE: [char != int]>> +// CHECK-NOELIDE-TREE: no matching function for call to 'foo23' +// CHECK-NOELIDE-TREE: candidate function not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: D23< +// CHECK-NOELIDE-TREE: [char != A23<>]> + +namespace PR14015 { +template class Foo1 {}; +template class Foo2 {}; +template class Foo3 {}; + +void Play1() { + Foo1<1> F1; + Foo1<2> F2, F3; + F2 = F1; + F1 = F2; + F2 = F3; + F3 = F2; +} + +// CHECK-ELIDE-NOTREE: no viable overloaded '=' +// CHECK-ELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo1<1>' to 'Foo1<2>' for 1st argument +// CHECK-ELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo1<1>' to 'Foo1<2>' for 1st argument +// CHECK-ELIDE-NOTREE: no viable overloaded '=' +// CHECK-ELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo1<2>' to 'Foo1<1>' for 1st argument +// CHECK-ELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo1<2>' to 'Foo1<1>' for 1st argument +// CHECK-NOELIDE-NOTREE: no viable overloaded '=' +// CHECK-NOELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo1<1>' to 'Foo1<2>' for 1st argument +// CHECK-NOELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo1<1>' to 'Foo1<2>' for 1st argument +// CHECK-NOELIDE-NOTREE: no viable overloaded '=' +// CHECK-NOELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo1<2>' to 'Foo1<1>' for 1st argument +// CHECK-NOELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo1<2>' to 'Foo1<1>' for 1st argument +// CHECK-ELIDE-TREE: no viable overloaded '=' +// CHECK-ELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo1< +// CHECK-ELIDE-TREE: [1 != 2]> +// CHECK-ELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo1< +// CHECK-ELIDE-TREE: [1 != 2]> +// CHECK-ELIDE-TREE: no viable overloaded '=' +// CHECK-ELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo1< +// CHECK-ELIDE-TREE: [2 != 1]> +// CHECK-ELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo1< +// CHECK-ELIDE-TREE: [2 != 1]> +// CHECK-NOELIDE-TREE: no viable overloaded '=' +// CHECK-NOELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo1< +// CHECK-NOELIDE-TREE: [1 != 2]> +// CHECK-NOELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo1< +// CHECK-NOELIDE-TREE: [1 != 2]> +// CHECK-NOELIDE-TREE: no viable overloaded '=' +// CHECK-NOELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo1< +// CHECK-NOELIDE-TREE: [2 != 1]> +// CHECK-NOELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo1< +// CHECK-NOELIDE-TREE: [2 != 1]> + +void Play2() { + Foo2<1> F1; + Foo2<> F2, F3; + F2 = F1; + F1 = F2; + F2 = F3; + F3 = F2; +} +// CHECK-ELIDE-NOTREE: no viable overloaded '=' +// CHECK-ELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo2<1>' to 'Foo2<2>' for 1st argument +// CHECK-ELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo2<1>' to 'Foo2<2>' for 1st argument +// CHECK-ELIDE-NOTREE: no viable overloaded '=' +// CHECK-ELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo2<(default) 2>' to 'Foo2<1>' for 1st argument +// CHECK-ELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo2<(default) 2>' to 'Foo2<1>' for 1st argument +// CHECK-NOELIDE-NOTREE: no viable overloaded '=' +// CHECK-NOELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo2<1>' to 'Foo2<2>' for 1st argument +// CHECK-NOELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo2<1>' to 'Foo2<2>' for 1st argument +// CHECK-NOELIDE-NOTREE: no viable overloaded '=' +// CHECK-NOELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo2<(default) 2>' to 'Foo2<1>' for 1st argument +// CHECK-NOELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo2<(default) 2>' to 'Foo2<1>' for 1st argument +// CHECK-ELIDE-TREE: no viable overloaded '=' +// CHECK-ELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo2< +// CHECK-ELIDE-TREE: [1 != 2]> +// CHECK-ELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo2< +// CHECK-ELIDE-TREE: [1 != 2]> +// CHECK-ELIDE-TREE: no viable overloaded '=' +// CHECK-ELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo2< +// CHECK-ELIDE-TREE: [(default) 2 != 1]> +// CHECK-ELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo2< +// CHECK-ELIDE-TREE: [(default) 2 != 1]> +// CHECK-NOELIDE-TREE: no viable overloaded '=' +// CHECK-NOELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo2< +// CHECK-NOELIDE-TREE: [1 != 2]> +// CHECK-NOELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo2< +// CHECK-NOELIDE-TREE: [1 != 2]> +// CHECK-NOELIDE-TREE: no viable overloaded '=' +// CHECK-NOELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo2< +// CHECK-NOELIDE-TREE: [(default) 2 != 1]> +// CHECK-NOELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo2< +// CHECK-NOELIDE-TREE: [(default) 2 != 1]> + +void Play3() { + Foo3<1> F1; + Foo3<2, 1> F2, F3; + F2 = F1; + F1 = F2; + F2 = F3; + F3 = F2; +} +// CHECK-ELIDE-NOTREE: no viable overloaded '=' +// CHECK-ELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo3<1, (no argument)>' to 'Foo3<2, 1>' for 1st argument +// CHECK-ELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo3<1, (no argument)>' to 'Foo3<2, 1>' for 1st argument +// CHECK-ELIDE-NOTREE: no viable overloaded '=' +// CHECK-ELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo3<2, 1>' to 'Foo3<1, (no argument)>' for 1st argument +// CHECK-ELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo3<2, 1>' to 'Foo3<1, (no argument)>' for 1st argument +// CHECK-NOELIDE-NOTREE: no viable overloaded '=' +// CHECK-NOELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo3<1, (no argument)>' to 'Foo3<2, 1>' for 1st argument +// CHECK-NOELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo3<1, (no argument)>' to 'Foo3<2, 1>' for 1st argument +// CHECK-NOELIDE-NOTREE: no viable overloaded '=' +// CHECK-NOELIDE-NOTREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from 'Foo3<2, 1>' to 'Foo3<1, (no argument)>' for 1st argument +// CHECK-NOELIDE-NOTREE: candidate function (the implicit move assignment operator) not viable: no known conversion from 'Foo3<2, 1>' to 'Foo3<1, (no argument)>' for 1st argument +// CHECK-ELIDE-TREE: no viable overloaded '=' +// CHECK-ELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo3< +// CHECK-ELIDE-TREE: [1 != 2], +// CHECK-ELIDE-TREE: [(no argument) != 1]> +// CHECK-ELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo3< +// CHECK-ELIDE-TREE: [1 != 2], +// CHECK-ELIDE-TREE: [(no argument) != 1]> +// CHECK-ELIDE-TREE: no viable overloaded '=' +// CHECK-ELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo3< +// CHECK-ELIDE-TREE: [2 != 1], +// CHECK-ELIDE-TREE: [1 != (no argument)]> +// CHECK-ELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-ELIDE-TREE: Foo3< +// CHECK-ELIDE-TREE: [2 != 1], +// CHECK-ELIDE-TREE: [1 != (no argument)]> +// CHECK-NOELIDE-TREE: no viable overloaded '=' +// CHECK-NOELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo3< +// CHECK-NOELIDE-TREE: [1 != 2], +// CHECK-NOELIDE-TREE: [(no argument) != 1]> +// CHECK-NOELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo3< +// CHECK-NOELIDE-TREE: [1 != 2], +// CHECK-NOELIDE-TREE: [(no argument) != 1]> +// CHECK-NOELIDE-TREE: no viable overloaded '=' +// CHECK-NOELIDE-TREE: candidate function (the implicit copy assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo3< +// CHECK-NOELIDE-TREE: [2 != 1], +// CHECK-NOELIDE-TREE: [1 != (no argument)]> +// CHECK-NOELIDE-TREE: candidate function (the implicit move assignment operator) not viable: no known conversion from argument type to parameter type for 1st argument +// CHECK-NOELIDE-TREE: Foo3< +// CHECK-NOELIDE-TREE: [2 != 1], +// CHECK-NOELIDE-TREE: [1 != (no argument)]> +} + // CHECK-ELIDE-NOTREE: {{[0-9]*}} errors generated. // CHECK-NOELIDE-NOTREE: {{[0-9]*}} errors generated. diff --git a/test/Misc/predefines.c b/test/Misc/predefines.c index 87f676e..63944b0 100644 --- a/test/Misc/predefines.c +++ b/test/Misc/predefines.c @@ -1,4 +1,5 @@ /* RUN: %clang_cc1 -fsyntax-only -verify -std=c89 -ffreestanding -pedantic-errors %s + * expected-no-diagnostics * rdar://6814950 */ #include diff --git a/test/Misc/unnecessary-elipses.cpp b/test/Misc/unnecessary-elipses.cpp new file mode 100644 index 0000000..2ee7258 --- /dev/null +++ b/test/Misc/unnecessary-elipses.cpp @@ -0,0 +1,15 @@ +// RUN: %clang_cc1 -fsyntax-only -fmessage-length 80 %s 2>&1 | FileCheck -strict-whitespace %s + +int main() { + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; +// CHECK: {{^ "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";}} + + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"; +// CHECK: {{^ ..."xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";}} + +"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ; +// CHECK: {{^"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"...}} + + "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" ; +// CHECK: {{^ ..."xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"...}} +} \ No newline at end of file diff --git a/test/Misc/unprintable.c b/test/Misc/unprintable.c index 860503e..cd97131 100644 --- a/test/Misc/unprintable.c +++ b/test/Misc/unprintable.c @@ -1,16 +1,39 @@ -// RUN: %clang_cc1 %s 2>&1 | FileCheck -strict-whitespace %s +// RUN: %clang_cc1 %s -fmessage-length 40 2>&1 | FileCheck -strict-whitespace %s int main() { int i; - if((i==/*👿*/1)); + if((i==/*￾*/1)); -// CHECK: {{^ if\(\(i==/\*\*/1\)\);}} +// CHECK: {{^ if\(\(i==/\*\*/1\)\);}} -// CHECK: {{^ ~\^~~~~~~~~~~~~~~~}} -// CHECK: {{^ ~ \^ ~}} +// CHECK: {{^ ~\^~~~~~~~~~~~~~~}} +// CHECK: {{^ ~ \^ ~}} - /* 👿 */ "👿berhund"; + (void)"Ê￾ô"; -// CHECK: {{^ /\* \*/ "berhund";}} -// CHECK: {{^ \^~~~~~~~~~~~~~~~~~}} -} \ No newline at end of file +// CHECK: {{^ \(void\)"";}} +// CHECK: {{^ \^~~~}} + +  int n = 0; + +// CHECK: {{ int n = 0;}} +// CHECK: {{^\^}} + + "￾ \z"; + +// CHECK: {{^ \.\.\.\\z";}} +// CHECK: {{^ \^~}} + + + /* ￾ */ "￾berhund"; + +// CHECK: {{^ /\* \*/ "berhund";}} +// CHECK: {{^ \^~~~~~~~~~~~~~~~~}} + + +// PR14292 + "x°xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +// CHECK: {{^ "x}} +// CHECK: {{^ \^}} + +} diff --git a/test/Misc/warning-flags-enabled.c b/test/Misc/warning-flags-enabled.c index 7ef5c94..ba29e7ac 100644 --- a/test/Misc/warning-flags-enabled.c +++ b/test/Misc/warning-flags-enabled.c @@ -25,3 +25,19 @@ // CHECK-NO-LEVELS-NOT: E // CHECK-NO-LEVELS-NOT: F // CHECK-NO-LEVELS: warn_objc_root_class_missing [-Wobjc-root-class] + +// Test if EnumConversion is a subgroup of -Wconversion. +// RUN: diagtool show-enabled --no-levels -Wno-conversion -Wenum-conversion %s | FileCheck --check-prefix CHECK-ENUM-CONVERSION %s +// RUN: diagtool show-enabled --no-levels %s | FileCheck --check-prefix CHECK-ENUM-CONVERSION %s +// RUN: diagtool show-enabled --no-levels -Wno-conversion %s | FileCheck --check-prefix CHECK-NO-ENUM-CONVERSION %s +// +// CHECK-ENUM-CONVERSION: -Wenum-conversion +// CHECK-NO-ENUM-CONVERSION-NOT: -Wenum-conversion + +// Test if -Wshift-op-parentheses is a subgroup of -Wparentheses +// RUN: diagtool show-enabled --no-levels -Wno-parentheses -Wshift-op-parentheses %s | FileCheck --check-prefix CHECK-SHIFT-OP-PARENTHESES %s +// RUN: diagtool show-enabled --no-levels %s | FileCheck --check-prefix CHECK-SHIFT-OP-PARENTHESES %s +// RUN: diagtool show-enabled --no-levels -Wno-parentheses %s | FileCheck --check-prefix CHECK-NO-SHIFT-OP-PARENTHESES %s +// +// CHECK-SHIFT-OP-PARENTHESES: -Wshift-op-parentheses +// CHECK-NO-SHIFT-OP-PARENTHESES-NOT: -Wshift-op-parentheses diff --git a/test/Misc/warning-flags.c b/test/Misc/warning-flags.c index 06c70eb..c3f14bc 100644 --- a/test/Misc/warning-flags.c +++ b/test/Misc/warning-flags.c @@ -18,7 +18,7 @@ This test serves two purposes: The list of warnings below should NEVER grow. It should gradually shrink to 0. -CHECK: Warnings without flags (159): +CHECK: Warnings without flags (148): CHECK-NEXT: ext_delete_void_ptr_operand CHECK-NEXT: ext_enum_friend CHECK-NEXT: ext_expected_semi_decl_list @@ -30,8 +30,6 @@ CHECK-NEXT: ext_new_paren_array_nonconst CHECK-NEXT: ext_plain_complex CHECK-NEXT: ext_pp_macro_redef CHECK-NEXT: ext_template_arg_extra_parens -CHECK-NEXT: ext_typecheck_comparison_of_distinct_pointers -CHECK-NEXT: ext_typecheck_comparison_of_distinct_pointers_nonstandard CHECK-NEXT: ext_typecheck_comparison_of_pointer_integer CHECK-NEXT: ext_typecheck_cond_incompatible_operands CHECK-NEXT: ext_typecheck_cond_incompatible_operands_nonstandard @@ -59,10 +57,7 @@ CHECK-NEXT: warn_call_to_pure_virtual_member_function_from_ctor_dtor CHECK-NEXT: warn_call_wrong_number_of_arguments CHECK-NEXT: warn_case_empty_range CHECK-NEXT: warn_char_constant_too_large -CHECK-NEXT: warn_cmdline_missing_macro_defs CHECK-NEXT: warn_collection_expr_type -CHECK-NEXT: warn_conflicting_param_types -CHECK-NEXT: warn_conflicting_ret_types CHECK-NEXT: warn_conflicting_variadic CHECK-NEXT: warn_conv_to_base_not_used CHECK-NEXT: warn_conv_to_self_not_used @@ -71,9 +66,6 @@ CHECK-NEXT: warn_delete_array_type CHECK-NEXT: warn_double_const_requires_fp64 CHECK-NEXT: warn_drv_assuming_mfloat_abi_is CHECK-NEXT: warn_drv_clang_unsupported -CHECK-NEXT: warn_drv_not_using_clang_arch -CHECK-NEXT: warn_drv_not_using_clang_cpp -CHECK-NEXT: warn_drv_not_using_clang_cxx CHECK-NEXT: warn_drv_objc_gc_unsupported CHECK-NEXT: warn_drv_pch_not_first_include CHECK-NEXT: warn_dup_category_def @@ -101,7 +93,6 @@ CHECK-NEXT: warn_integer_too_large_for_signed CHECK-NEXT: warn_invalid_asm_cast_lvalue CHECK-NEXT: warn_many_braces_around_scalar_init CHECK-NEXT: warn_maynot_respond -CHECK-NEXT: warn_member_extra_qualification CHECK-NEXT: warn_method_param_redefinition CHECK-NEXT: warn_mismatched_exception_spec CHECK-NEXT: warn_missing_case_for_condition @@ -134,7 +125,6 @@ CHECK-NEXT: warn_pragma_expected_rparen CHECK-NEXT: warn_pragma_extra_tokens_at_eol CHECK-NEXT: warn_pragma_ms_struct CHECK-NEXT: warn_pragma_options_align_reset_failed -CHECK-NEXT: warn_pragma_options_align_unsupported_option CHECK-NEXT: warn_pragma_options_expected_align CHECK-NEXT: warn_pragma_pack_invalid_action CHECK-NEXT: warn_pragma_pack_invalid_alignment @@ -149,7 +139,6 @@ CHECK-NEXT: warn_pragma_unused_expected_var CHECK-NEXT: warn_pragma_unused_expected_var_arg CHECK-NEXT: warn_pragma_unused_undeclared_var CHECK-NEXT: warn_previous_alias_decl -CHECK-NEXT: warn_printf_asterisk_missing_arg CHECK-NEXT: warn_property_attr_mismatch CHECK-NEXT: warn_property_attribute CHECK-NEXT: warn_property_getter_owning_mismatch @@ -181,4 +170,4 @@ CHECK-NEXT: warn_weak_import The list of warnings in -Wpedantic should NEVER grow. -CHECK: Number in -Wpedantic (not covered by other -W flags): 39 +CHECK: Number in -Wpedantic (not covered by other -W flags): 29 diff --git a/test/Misc/wrong-encoding.c b/test/Misc/wrong-encoding.c index bd1cf3d..c48402d 100644 --- a/test/Misc/wrong-encoding.c +++ b/test/Misc/wrong-encoding.c @@ -1,16 +1,39 @@ -// RUN: %clang_cc1 -fsyntax-only %s 2>&1 | FileCheck -strict-whitespace %s +// RUN: %clang_cc1 -fsyntax-only -Wno-unused-value %s 2>&1 | FileCheck -strict-whitespace %s +// REQUIRES: asserts void foo() { "§Ã"; // ø // CHECK: {{^ ""; // }} -// CHECK: {{^ \^}} +// CHECK: {{^ \^~~~~~~}} /* þ« */ const char *d = "¥"; // CHECK: {{^ /\* \*/ const char \*d = "";}} -// CHECK: {{^ \^}} +// CHECK: {{^ \^~~~}} -// CHECK: {{^ ""; // }} -// CHECK: {{^ \^~~~~~~~~~}} + "xxé¿¿¿d"; +// CHECK: {{^ "xxd";}} +// CHECK: {{^ \^~~~}} + + "xxé¿bcd"; +// CHECK: {{^ "xxbcd";}} +// CHECK: {{^ \^~~~~~~~}} + + "xxéabcd"; +// CHECK: {{^ "xxabcd";}} +// CHECK: {{^ \^~~~}} + + "xxé¿é¿d"; +// CHECK: {{^ "xxd";}} +// CHECK: {{^ \^~~~~~~~~~~~~~~}} + + "xxé¿xxxxxxxxxxxxxxxxxxxxxé¿xx"; +// CHECK: {{^ "xxxxxxxxxxxxxxxxxxxxxxxxx";}} +// CHECK: {{^ \^~~~~~~~ ~~~~~~~~}} + + "?kÍ›S¥ÇØg7†, 2,Díu„†*É,pûäÚ&”‰(K§:Ñ'1á‹ÎjOÅ°<:"; + + "xé¿xé¿xé¿xé¿xé¿xé¿xé¿xé¿xé¿xé¿xé¿xé¿x"; } +// CHECK-NOT:Assertion diff --git a/test/Misc/wrong-encoding2.c b/test/Misc/wrong-encoding2.c new file mode 100644 index 0000000..43a0f4e --- /dev/null +++ b/test/Misc/wrong-encoding2.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -fsyntax-only -fmessage-length 100 %s 2>&1 | FileCheck -strict-whitespace %s +// REQUIRES: asserts + +int main() { + "É#x#p )6Ò)ѽŠ$ûž>U êhÑüÃö|Ÿ থϻgŸY|`?ò;;Æ¿VjÇ\\ù€‡ûݪW9úТ:ÌŠO EøÛy?SKªy¦¹‡Øài&n"; +} + +// CHECK-NOT:Assertion diff --git a/test/Modules/Inputs/Modified/A.h b/test/Modules/Inputs/Modified/A.h new file mode 100644 index 0000000..ff833c7 --- /dev/null +++ b/test/Modules/Inputs/Modified/A.h @@ -0,0 +1 @@ +int getA(); diff --git a/test/Modules/Inputs/Modified/B.h b/test/Modules/Inputs/Modified/B.h new file mode 100644 index 0000000..d1c8bb5 --- /dev/null +++ b/test/Modules/Inputs/Modified/B.h @@ -0,0 +1,2 @@ +#include "A.h" +int getB(); diff --git a/test/Modules/Inputs/Modified/module.map b/test/Modules/Inputs/Modified/module.map new file mode 100644 index 0000000..d9aed01 --- /dev/null +++ b/test/Modules/Inputs/Modified/module.map @@ -0,0 +1,2 @@ +module A { header "A.h" } +module B { header "B.h" } diff --git a/test/Modules/Inputs/Module.framework/Headers/Module.h b/test/Modules/Inputs/Module.framework/Headers/Module.h index f894984..3d2476b 100644 --- a/test/Modules/Inputs/Module.framework/Headers/Module.h +++ b/test/Modules/Inputs/Module.framework/Headers/Module.h @@ -23,4 +23,6 @@ const char *getModuleVersion(void); #include #include +__asm("foo"); + #endif // MODULE_H diff --git a/test/Modules/Inputs/NoUmbrella.framework/module.map b/test/Modules/Inputs/NoUmbrella.framework/module.map index 4a4d970..03a8a17 100644 --- a/test/Modules/Inputs/NoUmbrella.framework/module.map +++ b/test/Modules/Inputs/NoUmbrella.framework/module.map @@ -2,8 +2,5 @@ framework module NoUmbrella [system] { umbrella "Headers" module * { } - module unavailable { - requires unavailable - header "Boom.h" - } + exclude header "Boom.h" } diff --git a/test/Modules/Inputs/NotAModule.framework/Headers/NotAModule.h b/test/Modules/Inputs/NotAModule.framework/Headers/NotAModule.h new file mode 100644 index 0000000..2639792 --- /dev/null +++ b/test/Modules/Inputs/NotAModule.framework/Headers/NotAModule.h @@ -0,0 +1,2 @@ +extern int not_a_module; + diff --git a/test/Modules/Inputs/lookup_right.hpp b/test/Modules/Inputs/lookup_right.hpp index 8845347..b2611a1 100644 --- a/test/Modules/Inputs/lookup_right.hpp +++ b/test/Modules/Inputs/lookup_right.hpp @@ -1 +1,2 @@ float *f0(float*); +// expected-no-diagnostics diff --git a/test/Modules/Inputs/macros.h b/test/Modules/Inputs/macros.h index 4f53556..27f43c0 100644 --- a/test/Modules/Inputs/macros.h +++ b/test/Modules/Inputs/macros.h @@ -8,3 +8,12 @@ #__private_macro MODULE int (INTEGER); + +#if !__building_module(macros) +# error Can't include this header without building the 'macros' module. +#endif + +#ifdef __MODULE__ +extern int __MODULE__; +#endif + diff --git a/test/Modules/Inputs/macros_left.h b/test/Modules/Inputs/macros_left.h new file mode 100644 index 0000000..cd05693 --- /dev/null +++ b/test/Modules/Inputs/macros_left.h @@ -0,0 +1,14 @@ +@__experimental_modules_import macros_top; +#define LEFT unsigned long + +#undef TOP_LEFT_UNDEF + + + + +#define LEFT_RIGHT_IDENTICAL int + +#define LEFT_RIGHT_DIFFERENT2 float +#define LEFT_RIGHT_DIFFERENT3 float + +#define LEFT_RIGHT_DIFFERENT float diff --git a/test/Modules/Inputs/macros_other.h b/test/Modules/Inputs/macros_other.h new file mode 100644 index 0000000..ea686bf --- /dev/null +++ b/test/Modules/Inputs/macros_other.h @@ -0,0 +1 @@ +#define OTHER_INTEGER int diff --git a/test/Modules/Inputs/macros_right.h b/test/Modules/Inputs/macros_right.h new file mode 100644 index 0000000..e16a64b --- /dev/null +++ b/test/Modules/Inputs/macros_right.h @@ -0,0 +1,17 @@ +@__experimental_modules_import macros_top; +#define RIGHT unsigned short + + + + + + + + +#define LEFT_RIGHT_IDENTICAL int +#define LEFT_RIGHT_DIFFERENT int +#define LEFT_RIGHT_DIFFERENT2 int +#define LEFT_RIGHT_DIFFERENT3 int + +#undef TOP_RIGHT_REDEF +#define TOP_RIGHT_REDEF float diff --git a/test/Modules/Inputs/macros_right_undef.h b/test/Modules/Inputs/macros_right_undef.h new file mode 100644 index 0000000..49473e3 --- /dev/null +++ b/test/Modules/Inputs/macros_right_undef.h @@ -0,0 +1 @@ +#undef TOP_RIGHT_UNDEF diff --git a/test/Modules/Inputs/macros_top.h b/test/Modules/Inputs/macros_top.h new file mode 100644 index 0000000..9c3f3c0 --- /dev/null +++ b/test/Modules/Inputs/macros_top.h @@ -0,0 +1,16 @@ +#define TOP unsigned int + +#define TOP_LEFT_UNDEF 1 + + + + + + + + + +#define TOP_RIGHT_REDEF int + +#define TOP_RIGHT_UNDEF int + diff --git a/test/Modules/Inputs/module.map b/test/Modules/Inputs/module.map index 79056cb..032241d 100644 --- a/test/Modules/Inputs/module.map +++ b/test/Modules/Inputs/module.map @@ -18,6 +18,20 @@ module lookup_left_cxx { header "lookup_left.hpp" } module lookup_right_cxx { header "lookup_right.hpp" } module module_private_left { header "module_private_left.h" } module module_private_right { header "module_private_right.h" } +module macros_top { + header "macros_top.h" +} +module macros_left { + header "macros_left.h" + export * +} +module macros_right { + header "macros_right.h" + export * + explicit module undef { + header "macros_right_undef.h" + } +} module macros { header "macros.h" } module category_top { header "category_top.h" } module category_left { @@ -78,6 +92,18 @@ module namespaces_right { header "namespaces-right.h" export * } +module templates_top { + header "templates-top.h" + export * +} +module templates_left { + header "templates-left.h" + export * +} +module templates_right { + header "templates-right.h" + export * +} module MethodPoolA { header "MethodPoolA.h" } @@ -87,3 +113,7 @@ module MethodPoolB { module import_decl { header "import-decl.h" } + +framework module * { + exclude NotAModule +} diff --git a/test/Modules/Inputs/normal-module-map/nested_umbrella/1.h b/test/Modules/Inputs/normal-module-map/nested_umbrella/1.h new file mode 100644 index 0000000..e8a3e63 --- /dev/null +++ b/test/Modules/Inputs/normal-module-map/nested_umbrella/1.h @@ -0,0 +1 @@ +int one; diff --git a/test/Modules/Inputs/normal-module-map/nested_umbrella/a-extras.h b/test/Modules/Inputs/normal-module-map/nested_umbrella/a-extras.h new file mode 100644 index 0000000..91522aa --- /dev/null +++ b/test/Modules/Inputs/normal-module-map/nested_umbrella/a-extras.h @@ -0,0 +1 @@ +int extra_a; diff --git a/test/Modules/Inputs/normal-module-map/nested_umbrella/decltype.h b/test/Modules/Inputs/normal-module-map/nested_umbrella/decltype.h new file mode 100644 index 0000000..506a217 --- /dev/null +++ b/test/Modules/Inputs/normal-module-map/nested_umbrella/decltype.h @@ -0,0 +1,2 @@ +int decltype_val; + diff --git a/test/Modules/Inputs/redecl-merge-bottom.h b/test/Modules/Inputs/redecl-merge-bottom.h index 40a9404..cfea7dc 100644 --- a/test/Modules/Inputs/redecl-merge-bottom.h +++ b/test/Modules/Inputs/redecl-merge-bottom.h @@ -18,11 +18,3 @@ struct S3; void refers_to_C4(C4*); -#ifdef __cplusplus -template class Vector; - -template class Vector; - -template class Vector; -#endif - diff --git a/test/Modules/Inputs/redecl-merge-left.h b/test/Modules/Inputs/redecl-merge-left.h index b3a7ba8..5e6d2e5 100644 --- a/test/Modules/Inputs/redecl-merge-left.h +++ b/test/Modules/Inputs/redecl-merge-left.h @@ -60,7 +60,7 @@ typedef int T1; typedef float T2; int func0(int); -int func1(int); +int func1(int x) { return x; } int func2(int); @@ -78,12 +78,6 @@ extern float var2; extern double var3; -#ifdef __cplusplus -template class Vector; - -template class Vector; -#endif - // Make sure this doesn't introduce an ambiguity-creating 'id' at the // top level. typedef void funcptr_with_id(int id); diff --git a/test/Modules/Inputs/redecl-merge-right.h b/test/Modules/Inputs/redecl-merge-right.h index de7aa08..2022308 100644 --- a/test/Modules/Inputs/redecl-merge-right.h +++ b/test/Modules/Inputs/redecl-merge-right.h @@ -65,7 +65,7 @@ typedef double T2; int func0(int); int func1(int); int func1(int); -int func1(int); +int func1(int x) { return x; } int func1(int); static int func2(int); @@ -78,13 +78,6 @@ extern int var2; static double var3; -#ifdef __cplusplus -template class Vector { -public: - void push_back(const T&); -}; -#endif - int ONE; @__experimental_modules_import redecl_merge_top.Explicit; const int one = ONE; diff --git a/test/Modules/Inputs/redecl-merge-top.h b/test/Modules/Inputs/redecl-merge-top.h index 519254c..690e6df 100644 --- a/test/Modules/Inputs/redecl-merge-top.h +++ b/test/Modules/Inputs/redecl-merge-top.h @@ -15,6 +15,4 @@ struct S1; struct S2; struct S2; -#ifdef __cplusplus -template class Vector; -#endif +int func1(int); diff --git a/test/Modules/Inputs/templates-left.h b/test/Modules/Inputs/templates-left.h new file mode 100644 index 0000000..57a8c85 --- /dev/null +++ b/test/Modules/Inputs/templates-left.h @@ -0,0 +1,29 @@ +@__experimental_modules_import templates_top; + +template class Vector; + +template class Vector; + +template class List; +template<> class List { +public: + void push_back(int); +}; +namespace N { + template class Set; +} +namespace N { + template class Set { + public: + void insert(T); + }; +} + +template +void pendingInstantiationEmit(T) {} +void triggerPendingInstantiation() { + pendingInstantiationEmit(12); + pendingInstantiationEmit(42.); +} + +void redeclDefinitionEmit(){} diff --git a/test/Modules/Inputs/templates-right.h b/test/Modules/Inputs/templates-right.h new file mode 100644 index 0000000..4ef4a32 --- /dev/null +++ b/test/Modules/Inputs/templates-right.h @@ -0,0 +1,27 @@ +@__experimental_modules_import templates_top; + +template class Vector { +public: + void push_back(const T&); +}; + +template class List; +template<> class List { +public: + void push_back(int); +}; + +namespace N { + template class Set { + public: + void insert(T); + }; +} + +template +void pendingInstantiationEmit(T) {} +void triggerPendingInstantiationToo() { + pendingInstantiationEmit(12); +} + +void redeclDefinitionEmit(){} diff --git a/test/Modules/Inputs/templates-top.h b/test/Modules/Inputs/templates-top.h new file mode 100644 index 0000000..5985ee8 --- /dev/null +++ b/test/Modules/Inputs/templates-top.h @@ -0,0 +1,17 @@ +template class Vector; + +template class List { +public: + void push_back(T); +}; + +namespace A { + class Y { + template friend class WhereAmI; + }; +} + +template class A::WhereAmI { +public: + static void func() {} +}; diff --git a/test/Modules/compiler_builtins.m b/test/Modules/compiler_builtins.m index de6f57b..dfa46c8 100644 --- a/test/Modules/compiler_builtins.m +++ b/test/Modules/compiler_builtins.m @@ -1,5 +1,7 @@ // RUN: rm -rf %t // RUN: %clang -fsyntax-only -fmodules -fmodule-cache-path %t -D__need_wint_t %s -Xclang -verify +// RUN: %clang -fsyntax-only -std=c99 -fmodules -fmodule-cache-path %t -D__need_wint_t %s -Xclang -verify +// expected-no-diagnostics #ifdef __SSE__ @__experimental_modules_import _Builtin_intrinsics.intel.sse; diff --git a/test/Modules/direct-module-import.m b/test/Modules/direct-module-import.m new file mode 100644 index 0000000..317d7ae --- /dev/null +++ b/test/Modules/direct-module-import.m @@ -0,0 +1,7 @@ +// RUN: rm -rf %t +// RUN: %clang_cc1 -fmodule-cache-path %t -fmodules -F %S/Inputs -include Module/Module.h %s -emit-llvm -o - | FileCheck %s + +// CHECK: call i8* @getModuleVersion +const char* getVer(void) { + return getModuleVersion(); +} diff --git a/test/Modules/header-import.m b/test/Modules/header-import.m index 5444854..49549d0 100644 --- a/test/Modules/header-import.m +++ b/test/Modules/header-import.m @@ -1,5 +1,6 @@ // RUN: rm -rf %t // RUN: %clang_cc1 -fmodules -fmodule-cache-path %t -F %S/Inputs -I %S/Inputs -verify %s +// expected-no-diagnostics #import "point.h" @__experimental_modules_import Module; diff --git a/test/Modules/import-decl.cpp b/test/Modules/import-decl.cpp index 7696693..0f05f92 100644 --- a/test/Modules/import-decl.cpp +++ b/test/Modules/import-decl.cpp @@ -1,6 +1,6 @@ // RUN: rm -rf %t // RUN: %clang -fmodule-cache-path %t -fmodules -x objective-c -I %S/Inputs -emit-ast -o %t.ast %s -// RUN: %clang -cc1 -ast-print -x ast - < %t.ast | FileCheck %s +// RUN: %clang_cc1 -ast-print -x ast - < %t.ast | FileCheck %s @__experimental_modules_import import_decl; // CHECK: struct T diff --git a/test/Modules/inferred-frameworks.m b/test/Modules/inferred-frameworks.m new file mode 100644 index 0000000..916c900 --- /dev/null +++ b/test/Modules/inferred-frameworks.m @@ -0,0 +1,8 @@ +// RUN: rm -rf %t +// RUN: %clang_cc1 -x objective-c -Wauto-import -fmodule-cache-path %t -fmodules -F %S/Inputs %s -verify + +#include + +@__experimental_modules_import NotAModule; // expected-error{{module 'NotAModule' not found}} + + diff --git a/test/Modules/inferred-submodules.m b/test/Modules/inferred-submodules.m index bee1cec..8c61bc0 100644 --- a/test/Modules/inferred-submodules.m +++ b/test/Modules/inferred-submodules.m @@ -1,5 +1,6 @@ // RUN: rm -rf %t // RUN: %clang_cc1 -x objective-c -Wauto-import -fmodule-cache-path %t -fmodules -F %S/Inputs %s -verify +// expected-no-diagnostics @__experimental_modules_import Module.Sub; diff --git a/test/Modules/macros.c b/test/Modules/macros.c index 83e1c66..8db3915 100644 --- a/test/Modules/macros.c +++ b/test/Modules/macros.c @@ -1,8 +1,20 @@ // RUN: rm -rf %t +// RUN: %clang_cc1 -fmodules -x objective-c -emit-module -fmodule-cache-path %t -fmodule-name=macros_top %S/Inputs/module.map +// RUN: %clang_cc1 -fmodules -x objective-c -emit-module -fmodule-cache-path %t -fmodule-name=macros_left %S/Inputs/module.map +// RUN: %clang_cc1 -fmodules -x objective-c -emit-module -fmodule-cache-path %t -fmodule-name=macros_right %S/Inputs/module.map // RUN: %clang_cc1 -fmodules -x objective-c -emit-module -fmodule-cache-path %t -fmodule-name=macros %S/Inputs/module.map // RUN: %clang_cc1 -fmodules -x objective-c -verify -fmodule-cache-path %t %s // RUN: %clang_cc1 -E -fmodules -x objective-c -fmodule-cache-path %t %s | FileCheck -check-prefix CHECK-PREPROCESSED %s // FIXME: When we have a syntax for modules in C, use that. +// These notes come from headers in modules, and are bogus. + +// FIXME: expected-note{{previous definition is here}} +// expected-note{{other definition of 'LEFT_RIGHT_DIFFERENT'}} +// expected-note{{expanding this definition of 'TOP_RIGHT_REDEF'}} +// FIXME: expected-note{{previous definition is here}} \ +// expected-note{{expanding this definition of 'LEFT_RIGHT_DIFFERENT'}} + +// expected-note{{other definition of 'TOP_RIGHT_REDEF'}} @__experimental_modules_import macros; @@ -27,4 +39,99 @@ DOUBLE *dp = &d; void f() { // CHECK-PREPROCESSED: int i = INTEGER; int i = INTEGER; // the value was exported, the macro was not. + i += macros; // expanded from __MODULE__ within the 'macros' module. +} + +#ifdef __MODULE__ +# error Not building a module! +#endif + +#if __building_module(macros) +# error Not building a module +#endif + +// None of the modules we depend on have been imported, and therefore +// their macros should not be visible. +#ifdef LEFT +# error LEFT should not be visible +#endif + +#ifdef RIGHT +# error RIGHT should not be visible +#endif + +#ifdef TOP +# error TOP should not be visible +#endif + +// Import left module (which also imports top) +@__experimental_modules_import macros_left; + +#ifndef LEFT +# error LEFT should be visible +#endif + +#ifdef RIGHT +# error RIGHT should not be visible +#endif + +#ifndef TOP +# error TOP should be visible +#endif + +#ifdef TOP_LEFT_UNDEF +# error TOP_LEFT_UNDEF should not be visible +#endif + +void test1() { + int i; + TOP_RIGHT_REDEF *ip = &i; } + +#define LEFT_RIGHT_DIFFERENT2 double // FIXME: expected-warning{{'LEFT_RIGHT_DIFFERENT2' macro redefined}} + +// Import right module (which also imports top) +@__experimental_modules_import macros_right; + +#undef LEFT_RIGHT_DIFFERENT3 + +#ifndef LEFT +# error LEFT should be visible +#endif + +#ifndef RIGHT +# error RIGHT should be visible +#endif + +#ifndef TOP +# error TOP should be visible +#endif + +void test2() { + int i; + float f; + double d; + TOP_RIGHT_REDEF *ip = &i; // expected-warning{{ambiguous expansion of macro 'TOP_RIGHT_REDEF'}} + + LEFT_RIGHT_IDENTICAL *ip2 = &i; + LEFT_RIGHT_DIFFERENT *fp = &f; // expected-warning{{ambiguous expansion of macro 'LEFT_RIGHT_DIFFERENT'}} + LEFT_RIGHT_DIFFERENT2 *dp = &d; + int LEFT_RIGHT_DIFFERENT3; +} + +#define LEFT_RIGHT_DIFFERENT double // FIXME: expected-warning{{'LEFT_RIGHT_DIFFERENT' macro redefined}} + +void test3() { + double d; + LEFT_RIGHT_DIFFERENT *dp = &d; // okay +} + +#ifndef TOP_RIGHT_UNDEF +# error TOP_RIGHT_UNDEF should still be defined +#endif + +@__experimental_modules_import macros_right.undef; + +#ifdef TOP_RIGHT_UNDEF +# error TOP_RIGHT_UNDEF should not be defined +#endif diff --git a/test/Modules/modify-module.m b/test/Modules/modify-module.m new file mode 100644 index 0000000..b630ac1 --- /dev/null +++ b/test/Modules/modify-module.m @@ -0,0 +1,23 @@ +// Test that if we modify one of the input files used to form a +// header, that module and dependent modules get rebuilt. + +// RUN: rm -rf %t +// RUN: mkdir -p %t/include +// RUN: cp %S/Inputs/Modified/A.h %t/include +// RUN: cp %S/Inputs/Modified/B.h %t/include +// RUN: cp %S/Inputs/Modified/module.map %t/include +// RUN: %clang_cc1 -fmodule-cache-path %t/cache -fmodules -I %t/include %s -verify +// expected-no-diagnostics +// RUN: touch %t/include/B.h +// RUN: %clang_cc1 -fmodule-cache-path %t/cache -fmodules -I %t/include %s -verify +// RUN: echo 'int getA(); int getA2();' > %t/include/A.h +// RUN: %clang_cc1 -fmodule-cache-path %t/cache -fmodules -I %t/include %s -verify + +@__experimental_modules_import B; + +int getValue() { return getA() + getB(); } + + + + + diff --git a/test/Modules/module-private.cpp b/test/Modules/module-private.cpp index 246dcaf..31a3410 100644 --- a/test/Modules/module-private.cpp +++ b/test/Modules/module-private.cpp @@ -12,9 +12,12 @@ void test() { } int test_broken() { - HiddenStruct hidden; // expected-error{{use of undeclared identifier 'HiddenStruct'}} + HiddenStruct hidden; // \ + // expected-error{{must use 'struct' tag to refer to type 'HiddenStruct' in this scope}} \ + // expected-error{{definition of 'struct HiddenStruct' must be imported}} + // expected-note@3 {{previous definition is here}} - Integer i; // expected-error{{use of undeclared identifier 'Integer'}} + Integer i; // expected-error{{unknown type name 'Integer'}} int *ip = 0; f1(ip); // expected-error{{use of undeclared identifier 'f1'}} diff --git a/test/Modules/normal-module-map.cpp b/test/Modules/normal-module-map.cpp index 7cd4482..07ca5ed 100644 --- a/test/Modules/normal-module-map.cpp +++ b/test/Modules/normal-module-map.cpp @@ -33,3 +33,13 @@ int testNestedUmbrellaBFail() { int testNestedUmbrellaB() { return nested_umbrella_b; } + +@__experimental_modules_import nested_umbrella.a_extras; + +@__experimental_modules_import nested_umbrella._1; + +@__experimental_modules_import nested_umbrella.decltype_; + +int testSanitizedName() { + return extra_a + one + decltype_val; +} diff --git a/test/Modules/on-demand-macros.m b/test/Modules/on-demand-macros.m index 2b8c545..8b50529 100644 --- a/test/Modules/on-demand-macros.m +++ b/test/Modules/on-demand-macros.m @@ -1,6 +1,7 @@ // RUN: rm -rf %t // RUN: %clang_cc1 -fmodules -fmodule-cache-path %t -F %S/Inputs -DFOO_RETURNS_INT_PTR -verify %s // RUN: %clang_cc1 -fmodules -fmodule-cache-path %t -F %S/Inputs -verify %s +// expected-no-diagnostics @__experimental_modules_import CmdLine; diff --git a/test/Modules/redecl-merge.m b/test/Modules/redecl-merge.m index d7930ac..d722414 100644 --- a/test/Modules/redecl-merge.m +++ b/test/Modules/redecl-merge.m @@ -1,6 +1,5 @@ // RUN: rm -rf %t // RUN: %clang_cc1 -fmodules -fmodule-cache-path %t -I %S/Inputs %s -verify -Wno-objc-root-class -// RUN: %clang_cc1 -x objective-c++ -fmodules -fmodule-cache-path %t -I %S/Inputs %s -verify -Wno-objc-root-class @class C2; @class C3; @class C3; @@ -57,26 +56,26 @@ void testTagMerge() { void testTypedefMerge(int i, double d) { T1 *ip = &i; - // in other file: expected-note{{candidate found by name lookup is 'T2'}} // FIXME: Typedefs aren't actually merged in the sense of other merges, because // we should only merge them when the types are identical. - // in other file: expected-note{{candidate found by name lookup is 'T2'}} - // in other file: expected-note{{candidate function}} + // in other file: expected-note@60{{candidate found by name lookup is 'T2'}} + // in other file: expected-note@63{{candidate found by name lookup is 'T2'}} T2 *dp = &d; // expected-error{{reference to 'T2' is ambiguous}} } void testFuncMerge(int i) { func0(i); - // in other file: expected-note{{candidate function}} func1(i); + // in other file: expected-note@64{{candidate function}} + // in other file: expected-note@70{{candidate function}} func2(i); // expected-error{{call to 'func2' is ambiguous}} } void testVarMerge(int i) { var1 = i; - // in other files: expected-note 2{{candidate found by name lookup is 'var2'}} + // in other files: expected-note@77 2{{candidate found by name lookup is 'var2'}} var2 = i; // expected-error{{reference to 'var2' is ambiguous}} - // in other files: expected-note 2{{candidate found by name lookup is 'var3'}} + // in other files: expected-note@79 2{{candidate found by name lookup is 'var3'}} var3 = i; // expected-error{{reference to 'var3' is ambiguous}} } @@ -146,13 +145,6 @@ void g(A *a) { id p4; id p3; -#ifdef __cplusplus -void testVector() { - Vector vec_int; - vec_int.push_back(0); -} -#endif - // Make sure we don't get conflicts with 'id'. funcptr_with_id fid; id id_global; diff --git a/test/Modules/redeclarations.m b/test/Modules/redeclarations.m index 3f3e695..221e154 100644 --- a/test/Modules/redeclarations.m +++ b/test/Modules/redeclarations.m @@ -8,4 +8,5 @@ // RUN: %clang_cc1 -fmodules -x objective-c -fmodule-cache-path %t -emit-module -fmodule-name=redeclarations_left %S/Inputs/module.map // RUN: %clang_cc1 -fmodules -x objective-c -fmodule-cache-path %t -emit-module -fmodule-name=redeclarations_right %S/Inputs/module.map // RUN: %clang_cc1 -fmodules -fmodule-cache-path %t %s -verify +// expected-no-diagnostics diff --git a/test/Modules/submodules.m b/test/Modules/submodules.m index e014bea..a758abc 100644 --- a/test/Modules/submodules.m +++ b/test/Modules/submodules.m @@ -1,6 +1,7 @@ // RUN: rm -rf %t // RUN: %clang_cc1 -Wauto-import -fmodule-cache-path %t -fmodules -F %S/Inputs %s -verify +// expected-no-diagnostics // Note: transitively imports Module.Sub2. @__experimental_modules_import Module.Sub; diff --git a/test/Modules/templates.mm b/test/Modules/templates.mm new file mode 100644 index 0000000..4541740 --- /dev/null +++ b/test/Modules/templates.mm @@ -0,0 +1,36 @@ +// RUN: rm -rf %t +// RUN: %clang_cc1 -x objective-c++ -fmodules -fmodule-cache-path %t -I %S/Inputs -verify %s -Wno-objc-root-class +// RUN: %clang_cc1 -x objective-c++ -fmodules -fmodule-cache-path %t -I %S/Inputs -emit-llvm %s -o - -Wno-objc-root-class | grep Emit | FileCheck %s +// expected-no-diagnostics + +@__experimental_modules_import templates_left; +@__experimental_modules_import templates_right; + + +void testTemplateClasses() { + Vector vec_int; + vec_int.push_back(0); + + List list_bool; + list_bool.push_back(false); + + N::Set set_char; + set_char.insert('A'); +} + +void testPendingInstantiations() { + // CHECK: call {{.*pendingInstantiationEmit}} + // CHECK: call {{.*pendingInstantiationEmit}} + // CHECK: define {{.*pendingInstantiationEmit.*[(]i}} + // CHECK: define {{.*pendingInstantiationEmit.*[(]double}} + triggerPendingInstantiation(); + triggerPendingInstantiationToo(); +} + +void testRedeclDefinition() { + // CHECK: define {{.*redeclDefinitionEmit}} + redeclDefinitionEmit(); +} + +// CHECK: call {{.*pendingInstantiation}} +// CHECK: call {{.*redeclDefinitionEmit}} diff --git a/test/PCH/Inputs/badpch-dir.h.gch/.keep b/test/PCH/Inputs/badpch-dir.h.gch/.keep new file mode 100644 index 0000000..e69de29 diff --git a/test/PCH/Inputs/badpch-empty.h.gch b/test/PCH/Inputs/badpch-empty.h.gch new file mode 100644 index 0000000..e69de29 diff --git a/test/PCH/Inputs/case-insensitive-include.h b/test/PCH/Inputs/case-insensitive-include.h new file mode 100644 index 0000000..60bdf36 --- /dev/null +++ b/test/PCH/Inputs/case-insensitive-include.h @@ -0,0 +1,5 @@ +#pragma once + +struct S { + int x; +}; diff --git a/test/PCH/Inputs/chain-macro-override1.h b/test/PCH/Inputs/chain-macro-override1.h index d956396..7532d7e 100644 --- a/test/PCH/Inputs/chain-macro-override1.h +++ b/test/PCH/Inputs/chain-macro-override1.h @@ -3,3 +3,7 @@ void g(); #define g() f() #define h() f() #define x x +#define h2() f() + +#define h3() +#undef h3 diff --git a/test/PCH/Inputs/chain-macro-override2.h b/test/PCH/Inputs/chain-macro-override2.h index e4bff77..1e0e3b9 100644 --- a/test/PCH/Inputs/chain-macro-override2.h +++ b/test/PCH/Inputs/chain-macro-override2.h @@ -3,3 +3,6 @@ #undef h #define h() g() int x; +#undef h2 + +int h3(); diff --git a/test/PCH/Inputs/cxx11-statement-attributes.h b/test/PCH/Inputs/cxx11-statement-attributes.h new file mode 100644 index 0000000..f4d0619 --- /dev/null +++ b/test/PCH/Inputs/cxx11-statement-attributes.h @@ -0,0 +1,14 @@ +// To be used with cxx11-statement-attributes.cpp. +template +int f(int n) { + switch (n * N) { + case 0: + n += 15; + [[clang::fallthrough]]; // This shouldn't generate a warning. + case 1: + n += 20; + [[clang::fallthrough]]; // This should generate a warning: "fallthrough annotation does not directly precede switch label". + break; + } + return n; +} diff --git a/test/PCH/__va_list_tag.c b/test/PCH/__va_list_tag.c index 23c54ea..efe5c1b 100644 --- a/test/PCH/__va_list_tag.c +++ b/test/PCH/__va_list_tag.c @@ -7,6 +7,8 @@ // RUN: %clang_cc1 -triple=x86_64-unknown-freebsd7.0 -emit-pch -x c-header -o %t %S/Inputs/__va_list_tag.h // RUN: %clang_cc1 -triple=x86_64-unknown-freebsd7.0 -include-pch %t %s -verify +// expected-no-diagnostics + int myvprintf(const char *fmt, va_list args) { return myvfprintf(fmt, args); } diff --git a/test/PCH/asm.c b/test/PCH/asm.c index 99bc14d..160829b 100644 --- a/test/PCH/asm.c +++ b/test/PCH/asm.c @@ -5,6 +5,7 @@ // RUN: %clang_cc1 -triple i386-unknown-unknown -emit-pch -o %t %S/asm.h // RUN: %clang_cc1 -triple i386-unknown-unknown -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics void call_f(void) { f(); } diff --git a/test/PCH/badpch-dir.h.gch/.keep b/test/PCH/badpch-dir.h.gch/.keep deleted file mode 100644 index e69de29..0000000 diff --git a/test/PCH/badpch-empty.h.gch b/test/PCH/badpch-empty.h.gch deleted file mode 100644 index e69de29..0000000 diff --git a/test/PCH/badpch.c b/test/PCH/badpch.c index e687ef3..f34e3d6 100644 --- a/test/PCH/badpch.c +++ b/test/PCH/badpch.c @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -fsyntax-only -include-pch %S/badpch-empty.h.gch %s 2>&1 | FileCheck -check-prefix=CHECK-EMPTY %s -// RUN: %clang_cc1 -fsyntax-only -include-pch %S/badpch-dir.h.gch %s 2>&1 | FileCheck -check-prefix=CHECK-DIR %s +// RUN: %clang_cc1 -fsyntax-only -include-pch %S/Inputs/badpch-empty.h.gch %s 2>&1 | FileCheck -check-prefix=CHECK-EMPTY %s +// RUN: %clang_cc1 -fsyntax-only -include-pch %S/Inputs/badpch-dir.h.gch %s 2>&1 | FileCheck -check-prefix=CHECK-DIR %s // The purpose of this test is to verify that various invalid PCH files are // reported as such. @@ -10,4 +10,4 @@ // submitted on 2012-02-06 introduced a segfault in the case where the PCH is // an empty file and clang was built with assertions. // CHECK-EMPTY: error: input is not a PCH file: '{{.*[/\\]}}badpch-empty.h.gch' -// CHECK-DIR: error: unable to read PCH file {{.*[/\\]}}badpch-dir.h.gch: +// CHECK-DIR:error: no suitable precompiled header file found in directory '{{.*[/\\]}}badpch-dir.h.gch diff --git a/test/PCH/builtins.c b/test/PCH/builtins.c index eed2224..da9eef7 100644 --- a/test/PCH/builtins.c +++ b/test/PCH/builtins.c @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -emit-pch -o %t %S/builtins.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + void hello() { printf("Hello, World!"); } diff --git a/test/PCH/case-insensitive-include.c b/test/PCH/case-insensitive-include.c new file mode 100644 index 0000000..707de70 --- /dev/null +++ b/test/PCH/case-insensitive-include.c @@ -0,0 +1,29 @@ +// REQUIRES: case-insensitive-filesystem + +// Test this without pch. +// RUN: cp %S/Inputs/case-insensitive-include.h %T +// RUN: %clang_cc1 -fsyntax-only %s -include %s -I %T -verify + +// Test with pch. +// RUN: %clang_cc1 -emit-pch -o %t.pch %s -I %T + +// Modify inode of the header. +// RUN: cp %T/case-insensitive-include.h %t.copy +// RUN: touch -r %T/case-insensitive-include.h %t.copy +// RUN: mv %t.copy %T/case-insensitive-include.h + +// RUN: %clang_cc1 -fsyntax-only %s -include-pch %t.pch -I %T -verify + +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +#include "case-insensitive-include.h" +#include "Case-Insensitive-Include.h" + +#else + +#include "Case-Insensitive-Include.h" + +#endif diff --git a/test/PCH/chain-categories.m b/test/PCH/chain-categories.m index 1b91c73..7836e09 100644 --- a/test/PCH/chain-categories.m +++ b/test/PCH/chain-categories.m @@ -4,6 +4,8 @@ // With PCH // RUN: %clang_cc1 -fsyntax-only -verify %s -chain-include %s -chain-include %s +// expected-no-diagnostics + #ifndef HEADER1 #define HEADER1 //===----------------------------------------------------------------------===// diff --git a/test/PCH/chain-class-extension.m b/test/PCH/chain-class-extension.m index c99d6d4..03fdee7 100644 --- a/test/PCH/chain-class-extension.m +++ b/test/PCH/chain-class-extension.m @@ -4,6 +4,8 @@ // With PCH // RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-apple-darwin10 -fobjc-arc %s -chain-include %s -chain-include %s +// expected-no-diagnostics + #ifndef HEADER1 #define HEADER1 //===----------------------------------------------------------------------===// diff --git a/test/PCH/chain-cxx.cpp b/test/PCH/chain-cxx.cpp index 0d50e61..4b64f51 100644 --- a/test/PCH/chain-cxx.cpp +++ b/test/PCH/chain-cxx.cpp @@ -6,6 +6,8 @@ // With PCH // RUN: %clang_cc1 -fsyntax-only -verify %s -chain-include %s -chain-include %s +// expected-no-diagnostics + #ifndef HEADER1 #define HEADER1 //===----------------------------------------------------------------------===// diff --git a/test/PCH/chain-decls.c b/test/PCH/chain-decls.c index f5724c4..bffa092 100644 --- a/test/PCH/chain-decls.c +++ b/test/PCH/chain-decls.c @@ -7,6 +7,8 @@ // RUN: %clang_cc1 -include-pch %t2 -fsyntax-only -verify %s // RUN: %clang_cc1 -ast-print -include-pch %t2 %s | FileCheck %s +// expected-no-diagnostics + // CHECK: void f(); // CHECK: void g(); diff --git a/test/PCH/chain-macro-override.c b/test/PCH/chain-macro-override.c index 2713e70..24a7b3e 100644 --- a/test/PCH/chain-macro-override.c +++ b/test/PCH/chain-macro-override.c @@ -10,5 +10,7 @@ int foo() { f(); g(); h(); + h2(); // expected-warning{{implicit declaration of function 'h2' is invalid in C99}} + h3(); return x; } diff --git a/test/PCH/chain-macro.c b/test/PCH/chain-macro.c index 18356f7..b0fd63d 100644 --- a/test/PCH/chain-macro.c +++ b/test/PCH/chain-macro.c @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -emit-pch -o %t2 -detailed-preprocessing-record %S/Inputs/chain-macro2.h -include-pch %t1 // RUN: %clang_cc1 -fsyntax-only -verify -include-pch %t2 %s // RUN: %clang_cc1 -ast-print -include-pch %t2 %s | FileCheck %s +// expected-no-diagnostics // CHECK: void f(); FOOBAR diff --git a/test/PCH/chain-remap-types.m b/test/PCH/chain-remap-types.m index 585da44..13f2e39 100644 --- a/test/PCH/chain-remap-types.m +++ b/test/PCH/chain-remap-types.m @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -emit-pch -x objective-c-header -o %t2 %S/Inputs/chain-remap-types2.h -include-pch %t1 // RUN: %clang_cc1 -include-pch %t2 -fsyntax-only -verify %s // RUN: %clang_cc1 -ast-print -include-pch %t2 %s | FileCheck %s +// expected-no-diagnostics // CHECK: @class X; // CHECK: struct Y diff --git a/test/PCH/cmdline-include.c b/test/PCH/cmdline-include.c index ad45192..556c28ea 100644 --- a/test/PCH/cmdline-include.c +++ b/test/PCH/cmdline-include.c @@ -2,5 +2,6 @@ // RUN: %clang_cc1 %s -include-pch %t -fsyntax-only -verify // RUN: %clang_cc1 -x c-header %S/cmdline-include1.h -emit-pch -o %t // RUN: %clang_cc1 %s -include-pch %t -include %S/cmdline-include2.h -fsyntax-only -verify +// expected-no-diagnostics int g = x1 + x2; diff --git a/test/PCH/cxx-exprs.cpp b/test/PCH/cxx-exprs.cpp index 9cd3194..b7707e0 100644 --- a/test/PCH/cxx-exprs.cpp +++ b/test/PCH/cxx-exprs.cpp @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -std=c++11 -emit-pch -o %t %s // RUN: %clang_cc1 -include-pch %t -verify -std=c++11 %s +// expected-no-diagnostics + #ifndef HEADER #define HEADER diff --git a/test/PCH/cxx-for-range.h b/test/PCH/cxx-for-range.h index f15c7e7..8f50f2f 100644 --- a/test/PCH/cxx-for-range.h +++ b/test/PCH/cxx-for-range.h @@ -9,11 +9,12 @@ struct T { }; char *begin(T); char *end(T); -struct U { }; -namespace std { +namespace NS { + struct U { }; char *begin(U); char *end(U); } +using NS::U; void f() { char a[3] = { 0, 1, 2 }; diff --git a/test/PCH/cxx-friends.cpp b/test/PCH/cxx-friends.cpp index bdba42b..f7d45ce 100644 --- a/test/PCH/cxx-friends.cpp +++ b/test/PCH/cxx-friends.cpp @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -x c++-header -emit-pch -o %t %S/cxx-friends.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + class F { void m() { A* a; diff --git a/test/PCH/cxx-functions.cpp b/test/PCH/cxx-functions.cpp index 74df01a..3b4fe77 100644 --- a/test/PCH/cxx-functions.cpp +++ b/test/PCH/cxx-functions.cpp @@ -4,6 +4,8 @@ // RUN: %clang_cc1 -x c++-header -emit-pch -o %t %S/cxx-functions.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + void test_foo() { foo(); diff --git a/test/PCH/cxx-implicit-moves.cpp b/test/PCH/cxx-implicit-moves.cpp index ccdc874..ff6322d 100644 --- a/test/PCH/cxx-implicit-moves.cpp +++ b/test/PCH/cxx-implicit-moves.cpp @@ -1,6 +1,7 @@ // Test with PCH // RUN: %clang_cc1 -std=c++11 -x c++-header -emit-pch -o %t %s // RUN: %clang_cc1 -std=c++11 -include-pch %t -verify %s +// expected-no-diagnostics // PR10847 #ifndef HEADER diff --git a/test/PCH/cxx-method.cpp b/test/PCH/cxx-method.cpp index 6ec65b2..40490ea 100644 --- a/test/PCH/cxx-method.cpp +++ b/test/PCH/cxx-method.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -x c++ -emit-pch %S/Inputs/cxx-method.h -o %t // RUN: %clang_cc1 -include-pch %t -verify %s +// expected-no-diagnostics void S::m(int x) { } diff --git a/test/PCH/cxx-ms-function-specialization-class-scope.cpp b/test/PCH/cxx-ms-function-specialization-class-scope.cpp index 1803a11..afbb80b 100644 --- a/test/PCH/cxx-ms-function-specialization-class-scope.cpp +++ b/test/PCH/cxx-ms-function-specialization-class-scope.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fms-extensions -triple i386-unknown-unknown -x c++-header -emit-pch -o %t %S/cxx-ms-function-specialization-class-scope.h // RUN: %clang_cc1 -fms-extensions -triple i386-unknown-unknown -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics void test2() diff --git a/test/PCH/cxx-namespaces.cpp b/test/PCH/cxx-namespaces.cpp index 0fd3de7..e0ff27c 100644 --- a/test/PCH/cxx-namespaces.cpp +++ b/test/PCH/cxx-namespaces.cpp @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -x c++-header -emit-pch -o %t %S/cxx-namespaces.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + void m() { N::x = 0; } diff --git a/test/PCH/cxx-templates.cpp b/test/PCH/cxx-templates.cpp index 7ce2477..d27e9ca 100644 --- a/test/PCH/cxx-templates.cpp +++ b/test/PCH/cxx-templates.cpp @@ -1,11 +1,13 @@ // Test this without pch. -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -include %S/cxx-templates.h -verify %s -ast-dump -o - -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -include %S/cxx-templates.h %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -fexceptions -include %S/cxx-templates.h -verify %s -ast-dump -o - +// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -fexceptions -include %S/cxx-templates.h %s -emit-llvm -o - | FileCheck %s // Test with pch. -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -x c++-header -emit-pch -o %t %S/cxx-templates.h -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -include-pch %t -verify %s -ast-dump -o - -// RUN: %clang_cc1 -fcxx-exceptions -fexceptions -include-pch %t %s -emit-llvm -o - | FileCheck %s +// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -fexceptions -x c++-header -emit-pch -o %t %S/cxx-templates.h +// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -fexceptions -include-pch %t -verify %s -ast-dump -o - +// RUN: %clang_cc1 -std=c++11 -fcxx-exceptions -fexceptions -include-pch %t %s -emit-llvm -o - | FileCheck %s + +// expected-no-diagnostics // CHECK: define weak_odr void @_ZN2S4IiE1mEv // CHECK: define linkonce_odr void @_ZN2S3IiE1mEv @@ -68,3 +70,12 @@ Foo< D >& Foo< D >::operator=( const Foo& other ) { return *this; } + +namespace TestNestedExpansion { + struct Int { + Int(int); + friend Int operator+(Int, Int); + }; + Int &g(Int, int, double); + Int &test = NestedExpansion().f(0, 1, 2, Int(3), 4, 5.0); +} diff --git a/test/PCH/cxx-templates.h b/test/PCH/cxx-templates.h index 152e8ce..756f208 100644 --- a/test/PCH/cxx-templates.h +++ b/test/PCH/cxx-templates.h @@ -215,3 +215,8 @@ class Foo : protected T public: Foo& operator=( const Foo& other ); }; + +template struct NestedExpansion { + template auto f(A...a, B...b) -> decltype(g(a + b...)); +}; +template struct NestedExpansion; diff --git a/test/PCH/cxx-traits.cpp b/test/PCH/cxx-traits.cpp index 3df3479..938f36f 100644 --- a/test/PCH/cxx-traits.cpp +++ b/test/PCH/cxx-traits.cpp @@ -4,6 +4,8 @@ // RUN: %clang_cc1 -x c++-header -std=c++11 -emit-pch -o %t %S/cxx-traits.h // RUN: %clang_cc1 -std=c++11 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + bool _Is_pod_comparator = __is_pod::__value; bool _Is_empty_check = __is_empty::__value; diff --git a/test/PCH/cxx-typeid.cpp b/test/PCH/cxx-typeid.cpp index 41dd544..d1e0f9d 100644 --- a/test/PCH/cxx-typeid.cpp +++ b/test/PCH/cxx-typeid.cpp @@ -4,6 +4,8 @@ // RUN: %clang -ccc-pch-is-pch -x c++-header -o %t.gch %S/cxx-typeid.h // RUN: %clang -ccc-pch-is-pch -include %t -fsyntax-only -Xclang -verify %s +// expected-no-diagnostics + void f() { (void)typeid(int); } diff --git a/test/PCH/cxx-variadic-templates.cpp b/test/PCH/cxx-variadic-templates.cpp index 5b58693..dc00758 100644 --- a/test/PCH/cxx-variadic-templates.cpp +++ b/test/PCH/cxx-variadic-templates.cpp @@ -7,5 +7,11 @@ // RUN: %clang_cc1 -std=c++11 -include-pch %t -verify %s -ast-dump -o - // RUN: %clang_cc1 -std=c++11 -include-pch %t %s -emit-llvm -o - | FileCheck %s +// expected-no-diagnostics + // CHECK: allocate_shared shared_ptr spi = shared_ptr::allocate_shared(1, 2); + +template struct A {}; +template struct B {}; +outer::inner<1, 2, A, B> i(A<1>{}, B<2>{}); diff --git a/test/PCH/cxx-variadic-templates.h b/test/PCH/cxx-variadic-templates.h index f6ee787..50596cd 100644 --- a/test/PCH/cxx-variadic-templates.h +++ b/test/PCH/cxx-variadic-templates.h @@ -16,3 +16,10 @@ shared_ptr<_Tp>::allocate_shared(const _Alloc& __a, _Args&& ...__args) shared_ptr<_Tp> __r; return __r; } + +template struct outer { + template class ...Cs> struct inner { + inner(Cs...); + }; +}; +template struct outer; diff --git a/test/PCH/cxx11-constexpr.cpp b/test/PCH/cxx11-constexpr.cpp index ce43206..8b722ce 100644 --- a/test/PCH/cxx11-constexpr.cpp +++ b/test/PCH/cxx11-constexpr.cpp @@ -20,6 +20,13 @@ struct D : B { int k; }; +constexpr int value = 7; + +template +constexpr T plus_seven(T other) { + return value + other; +} + #else static_assert(D(4).k == 9, ""); @@ -28,4 +35,6 @@ constexpr int f(C c) { return 0; } // expected-error {{not a literal type}} constexpr B b; // expected-error {{constant expression}} expected-note {{non-constexpr}} // expected-note@9 {{here}} +static_assert(plus_seven(3) == 10, ""); + #endif diff --git a/test/PCH/cxx11-exception-spec.cpp b/test/PCH/cxx11-exception-spec.cpp index 3fca4e4..446619e 100644 --- a/test/PCH/cxx11-exception-spec.cpp +++ b/test/PCH/cxx11-exception-spec.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -pedantic-errors -std=c++11 -emit-pch %s -o %t // RUN: %clang_cc1 -pedantic-errors -std=c++11 -include-pch %t -verify %s +// expected-no-diagnostics #ifndef HEADER_INCLUDED diff --git a/test/PCH/cxx11-statement-attributes.cpp b/test/PCH/cxx11-statement-attributes.cpp new file mode 100644 index 0000000..3bb7b40 --- /dev/null +++ b/test/PCH/cxx11-statement-attributes.cpp @@ -0,0 +1,12 @@ +// Sanity check. +// RUN: %clang_cc1 -include %S/Inputs/cxx11-statement-attributes.h -std=c++11 -Wimplicit-fallthrough -fsyntax-only %s -o - -verify +// Run the same tests, this time with the attributes loaded from the PCH file. +// RUN: %clang_cc1 -x c++-header -emit-pch -std=c++11 -o %t %S/Inputs/cxx11-statement-attributes.h +// RUN: %clang_cc1 -include-pch %t -std=c++11 -Wimplicit-fallthrough -fsyntax-only %s -o - -verify + +// Warning from Inputs/cxx11-statement-attributes.h: +// expected-warning@10 {{fallthrough annotation does not directly precede switch label}} + +void g(int n) { + f<1>(n); // expected-note {{in instantiation of function template specialization 'f<1>' requested here}} +} diff --git a/test/PCH/cxx_exprs.cpp b/test/PCH/cxx_exprs.cpp index 4cd9bae..0fb7590 100644 --- a/test/PCH/cxx_exprs.cpp +++ b/test/PCH/cxx_exprs.cpp @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -fcxx-exceptions -fexceptions -x c++-header -std=c++11 -emit-pch -o %t %S/cxx_exprs.h // RUN: %clang_cc1 -fcxx-exceptions -fexceptions -std=c++11 -include-pch %t -fsyntax-only -verify %s -ast-dump +// expected-no-diagnostics + int integer; double floating; char character; @@ -37,3 +39,9 @@ cxx_null_ptr_result null_ptr = nullptr; // CXXTypeidExpr typeid_result1 typeid_1 = 0; typeid_result2 typeid_2 = 0; + +// CharacterLiteral variants +static_assert(char_value == 97, "char_value is correct"); +static_assert(wchar_t_value == 305, "wchar_t_value is correct"); +static_assert(char16_t_value == 231, "char16_t_value is correct"); +static_assert(char32_t_value == 8706, "char32_t_value is correct"); diff --git a/test/PCH/cxx_exprs.h b/test/PCH/cxx_exprs.h index 67ab4a6..35db82e 100644 --- a/test/PCH/cxx_exprs.h +++ b/test/PCH/cxx_exprs.h @@ -81,3 +81,8 @@ CtorStruct create_CtorStruct() { return CtorStruct(1, 3.14f); // CXXTemporaryObjectExpr }; +// CharacterLiteral variants +const char char_value = 'a'; +const wchar_t wchar_t_value = L'ı'; +const char16_t char16_t_value = u'ç'; +const char32_t char32_t_value = U'∂'; diff --git a/test/PCH/empty-with-headers.c b/test/PCH/empty-with-headers.c index 751be1c..b51f0ce 100644 --- a/test/PCH/empty-with-headers.c +++ b/test/PCH/empty-with-headers.c @@ -24,4 +24,4 @@ typedef int my_int; // This should only fire if the header is not included, // either explicitly or as a prefix header. -// expected-error{{ISO C requires a translation unit to contain at least one declaration.}} +// expected-error{{ISO C requires a translation unit to contain at least one declaration}} diff --git a/test/PCH/enum.c b/test/PCH/enum.c index 10ceb7c..81dbd90 100644 --- a/test/PCH/enum.c +++ b/test/PCH/enum.c @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -emit-pch -o %t %S/enum.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + int i = Red; int return_enum_constant() { diff --git a/test/PCH/exprs.c b/test/PCH/exprs.c index 5928abd..c0b279f 100644 --- a/test/PCH/exprs.c +++ b/test/PCH/exprs.c @@ -3,7 +3,11 @@ // Test with pch. // RUN: %clang_cc1 -emit-pch -fblocks -o %t %S/exprs.h -// RUN: %clang_cc1 -fblocks -include-pch %t -fsyntax-only -verify %s +// RUN: %clang_cc1 -fblocks -include-pch %t -fsyntax-only -verify %s -DWITH_PCH + +#ifdef WITH_PCH +// expected-no-diagnostics +#endif __SIZE_TYPE__ size_type_value; int integer; diff --git a/test/PCH/field-designator.c b/test/PCH/field-designator.c new file mode 100644 index 0000000..763cfda --- /dev/null +++ b/test/PCH/field-designator.c @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -cc1 %s -include %s +// RUN: %clang_cc1 -cc1 %s -emit-pch -o %t.pch +// RUN: %clang_cc1 -cc1 %s -include-pch %t.pch + +// rdar://12239321 Make sure we don't emit a bogus +// error: field designator 'e' does not refer to a non-static data member + +#ifndef HEADER +#define HEADER +//===----------------------------------------------------------------------===// + +struct U { + union { + struct { + int e; + int f; + }; + + int a; + }; +}; + +//===----------------------------------------------------------------------===// +#else +#if !defined(HEADER) +# error Header inclusion order messed up +#endif +//===----------------------------------------------------------------------===// + +void bar() { + static const struct U plan = { .e = 1 }; +} + +//===----------------------------------------------------------------------===// +#endif diff --git a/test/PCH/friend-template.cpp b/test/PCH/friend-template.cpp new file mode 100644 index 0000000..989819b --- /dev/null +++ b/test/PCH/friend-template.cpp @@ -0,0 +1,46 @@ +// Test this without pch. +// RUN: %clang_cc1 -include %s -fsyntax-only -verify %s + +// Test with pch. +// RUN: %clang_cc1 -emit-pch -o %t %s +// RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s + +// expected-no-diagnostics + +#ifndef HEADER +#define HEADER + +// rdar://12627738 +namespace rdar12627738 { + +class RecyclerTag { + template friend class Recycler; +}; + +} + +#else + +namespace rdar12627738 { + +template +class CRN { + template friend class Recycler; +}; + + +template +class Recycler { +public: + Recycler (); +}; + + +template +Recycler::Recycler () +{ +} + +} + +#endif diff --git a/test/PCH/fuzzy-pch.c b/test/PCH/fuzzy-pch.c index 5675753..7296d1d 100644 --- a/test/PCH/fuzzy-pch.c +++ b/test/PCH/fuzzy-pch.c @@ -1,8 +1,14 @@ // Test with pch. // RUN: %clang_cc1 -emit-pch -DFOO -o %t %S/variables.h // RUN: %clang_cc1 -DBAR=int -include-pch %t -fsyntax-only -pedantic %s -// RUN: %clang_cc1 -DFOO -DBAR=int -include-pch %t -Werror %s -// RUN: not %clang_cc1 -DFOO -DBAR=int -DX=5 -include-pch %t -Werror %s +// RUN: %clang_cc1 -DFOO -DBAR=int -include-pch %t %s +// RUN: not %clang_cc1 -DFOO=blah -DBAR=int -include-pch %t %s 2> %t.err +// RUN: FileCheck -check-prefix=CHECK-FOO %s < %t.err +// RUN: not %clang_cc1 -UFOO -include-pch %t %s 2> %t.err +// RUN: FileCheck -check-prefix=CHECK-NOFOO %s < %t.err + +// RUN: not %clang_cc1 -DFOO -undef -include-pch %t %s 2> %t.err +// RUN: FileCheck -check-prefix=CHECK-UNDEF %s < %t.err BAR bar = 17; @@ -17,3 +23,9 @@ BAR bar = 17; #ifndef BAR # error BAR was not defined #endif + +// CHECK-FOO: definition of macro 'FOO' differs between the precompiled header ('1') and the command line ('blah') +// CHECK-NOFOO: macro 'FOO' was defined in the precompiled header but undef'd on the command line + +// CHECK-UNDEF: command line contains '-undef' but precompiled header was not built with it + diff --git a/test/PCH/missing-file.cpp b/test/PCH/missing-file.cpp index 7d5cd11..7dd11d4 100644 --- a/test/PCH/missing-file.cpp +++ b/test/PCH/missing-file.cpp @@ -4,19 +4,14 @@ // RUN: echo 'struct S{char c; int i; }; void foo() {}' > %t.h // RUN: echo 'template void tf() { T::foo(); }' >> %t.h // RUN: %clang_cc1 -x c++ -emit-pch -o %t.h.pch %t.h -// RUN: rm %t.h -// Check diagnostic with location in original source: -// RUN: %clang_cc1 -include-pch %t.h.pch -Wpadded -emit-obj -o %t.o %s 2> %t.stderr -// RUN: grep 'bytes to align' %t.stderr - -// Check diagnostic with 2nd location in original source: -// RUN: not %clang_cc1 -DREDECL -include-pch %t.h.pch -emit-obj -o %t.o %s 2> %t.stderr -// RUN: grep 'previous definition is here' %t.stderr +// %t.h might be touched by scanners as a hot file on Windows, +// to fail to remove %.h with single run. +// RUN: rm %t.h || rm %t.h || rm %t.h -// Check diagnostic with instantiation location in original source: -// RUN: not %clang_cc1 -DINSTANTIATION -include-pch %t.h.pch -emit-obj -o %t.o %s 2> %t.stderr -// RUN: grep 'cannot be used prior to' %t.stderr +// Check diagnostic with location in original source: +// RUN: not %clang_cc1 -include-pch %t.h.pch -emit-obj -o %t.o %s 2> %t.stderr +// RUN: grep 'could not find file' %t.stderr void qq(S*) {} diff --git a/test/PCH/objc_container.m b/test/PCH/objc_container.m index 1e59054..07371ca 100644 --- a/test/PCH/objc_container.m +++ b/test/PCH/objc_container.m @@ -4,8 +4,10 @@ // Test with pch. // RUN: %clang_cc1 -x objective-c -emit-pch -o %t %S/objc_container.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s -// RUN: %clang -cc1 -include-pch %t -ast-print %s | FileCheck -check-prefix=PRINT %s -// RUN: %clang -cc1 -include-pch %t -emit-llvm -o - %s | FileCheck -check-prefix=IR %s +// RUN: %clang_cc1 -include-pch %t -ast-print %s | FileCheck -check-prefix=PRINT %s +// RUN: %clang_cc1 -include-pch %t -emit-llvm -o - %s | FileCheck -check-prefix=IR %s + +// expected-no-diagnostics // CHECK-PRINT: id oldObject = array[10]; // CHECK-PRINT: array[10] = oldObject; diff --git a/test/PCH/objc_import.m b/test/PCH/objc_import.m index 277c6dd..c7dd805 100644 --- a/test/PCH/objc_import.m +++ b/test/PCH/objc_import.m @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -x objective-c -emit-pch -o %t %S/objc_import.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + #import "objc_import.h" void func() { diff --git a/test/PCH/objc_literals.m b/test/PCH/objc_literals.m index cce3173..b73c3be 100644 --- a/test/PCH/objc_literals.m +++ b/test/PCH/objc_literals.m @@ -1,7 +1,9 @@ -// RUN: %clang -cc1 -emit-pch -o %t %s -// RUN: %clang -cc1 -include-pch %t -verify %s -// RUN: %clang -cc1 -include-pch %t -ast-print %s | FileCheck -check-prefix=PRINT %s -// RUN: %clang -cc1 -include-pch %t -emit-llvm -o - %s | FileCheck -check-prefix=IR %s +// RUN: %clang_cc1 -emit-pch -o %t %s +// RUN: %clang_cc1 -include-pch %t -verify %s +// RUN: %clang_cc1 -include-pch %t -ast-print %s | FileCheck -check-prefix=PRINT %s +// RUN: %clang_cc1 -include-pch %t -emit-llvm -o - %s | FileCheck -check-prefix=IR %s + +// expected-no-diagnostics #ifndef HEADER #define HEADER diff --git a/test/PCH/objc_literals.mm b/test/PCH/objc_literals.mm index 8ef3351..ef95294 100644 --- a/test/PCH/objc_literals.mm +++ b/test/PCH/objc_literals.mm @@ -1,7 +1,9 @@ -// RUN: %clang -cc1 -emit-pch -x objective-c++ -std=c++0x -o %t %s -// RUN: %clang -cc1 -include-pch %t -x objective-c++ -std=c++0x -verify %s -// RUN: %clang -cc1 -include-pch %t -x objective-c++ -std=c++0x -ast-print %s | FileCheck -check-prefix=PRINT %s -// RUN: %clang -cc1 -include-pch %t -x objective-c++ -std=c++0x -emit-llvm -o - %s | FileCheck -check-prefix=IR %s +// RUN: %clang_cc1 -emit-pch -x objective-c++ -std=c++0x -o %t %s +// RUN: %clang_cc1 -include-pch %t -x objective-c++ -std=c++0x -verify %s +// RUN: %clang_cc1 -include-pch %t -x objective-c++ -std=c++0x -ast-print %s | FileCheck -check-prefix=PRINT %s +// RUN: %clang_cc1 -include-pch %t -x objective-c++ -std=c++0x -emit-llvm -o - %s | FileCheck -check-prefix=IR %s + +// expected-no-diagnostics #ifndef HEADER #define HEADER diff --git a/test/PCH/objc_methods.m b/test/PCH/objc_methods.m index e8aab84..ea40460 100644 --- a/test/PCH/objc_methods.m +++ b/test/PCH/objc_methods.m @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -x objective-c -emit-pch -o %t %S/objc_methods.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + void func() { TestPCH *xx; TestForwardClassDecl *yy; diff --git a/test/PCH/objc_property.m b/test/PCH/objc_property.m index b51cd90..88a0919 100644 --- a/test/PCH/objc_property.m +++ b/test/PCH/objc_property.m @@ -5,6 +5,8 @@ // RUN: %clang_cc1 -x objective-c -emit-pch -o %t %S/objc_property.h // RUN: %clang_cc1 -include-pch %t -fsyntax-only -verify %s +// expected-no-diagnostics + void func() { TestProperties *xx = [TestProperties alloc]; xx.value = 5; diff --git a/test/PCH/pch-dir.c b/test/PCH/pch-dir.c new file mode 100644 index 0000000..ae841ce --- /dev/null +++ b/test/PCH/pch-dir.c @@ -0,0 +1,28 @@ +// RUN: mkdir -p %t.h.gch +// RUN: %clang -x c-header %S/pch-dir.h -DFOO=foo -o %t.h.gch/c.gch +// RUN: %clang -x c-header %S/pch-dir.h -DFOO=bar -o %t.h.gch/cbar.gch +// RUN: %clang -x c++-header -std=c++98 %S/pch-dir.h -o %t.h.gch/cpp.gch +// RUN: %clang -include %t.h -DFOO=foo -fsyntax-only %s -Xclang -print-stats 2> %t.clog +// RUN: FileCheck -check-prefix=C %s < %t.clog +// RUN: %clang -include %t.h -DFOO=bar -DBAR=bar -fsyntax-only %s -Xclang -ast-print > %t.cbarlog +// RUN: FileCheck -check-prefix=CBAR %s < %t.cbarlog +// RUN: %clang -x c++ -include %t.h -std=c++98 -fsyntax-only %s -Xclang -print-stats 2> %t.cpplog +// RUN: FileCheck -check-prefix=CPP %s < %t.cpplog + +// RUN: not %clang -x c++ -std=c++11 -include %t.h -fsyntax-only %s 2> %t.cpp11log +// RUN: FileCheck -check-prefix=CPP11 %s < %t.cpp11log + +// CHECK-CBAR: int bar +int FOO; + +int get() { +#ifdef __cplusplus + // CHECK-CPP: .h.gch{{[/\\]}}cpp.gch + return i; +#else + // CHECK-C: .h.gch{{[/\\]}}c.gch + return j; +#endif +} + +// CHECK-CPP11: no suitable precompiled header file found in directory diff --git a/test/PCH/pch-dir.h b/test/PCH/pch-dir.h new file mode 100644 index 0000000..e94a8c7 --- /dev/null +++ b/test/PCH/pch-dir.h @@ -0,0 +1,5 @@ +#ifdef __cplusplus +extern int i; +#else +extern int j; +#endif diff --git a/test/PCH/pending-ids.m b/test/PCH/pending-ids.m index b612d89..2ca0e6e 100644 --- a/test/PCH/pending-ids.m +++ b/test/PCH/pending-ids.m @@ -7,6 +7,8 @@ // RUN: %clang_cc1 %s -emit-pch -o %t // RUN: %clang_cc1 -emit-llvm-only -verify %s -include-pch %t -g +// expected-no-diagnostics + #ifndef HEADER #define HEADER //===----------------------------------------------------------------------===// diff --git a/test/PCH/pragma-diag-section.cpp b/test/PCH/pragma-diag-section.cpp index 5b996bb..627156f 100644 --- a/test/PCH/pragma-diag-section.cpp +++ b/test/PCH/pragma-diag-section.cpp @@ -11,7 +11,7 @@ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wtautological-compare" template -struct TS { +struct TS1 { void m() { T a = 0; T b = a==a; @@ -21,9 +21,22 @@ struct TS { #else + +template +struct TS2 { + void m() { + T a = 0; + T b = a==a; // expected-warning {{self-comparison always evaluates to true}} expected-note@39 {{in instantiation of member function}} + } +}; + void f() { - TS ts; - ts.m(); + TS1 ts1; + ts1.m(); + + + TS2 ts2; + ts2.m(); } #endif diff --git a/test/PCH/pragma-diag.c b/test/PCH/pragma-diag.c index b304c4b..601c940 100644 --- a/test/PCH/pragma-diag.c +++ b/test/PCH/pragma-diag.c @@ -5,6 +5,8 @@ // RUN: %clang_cc1 %s -emit-pch -o %t // RUN: %clang_cc1 %s -include-pch %t -verify -fsyntax-only +// expected-no-diagnostics + #ifndef HEADER #define HEADER diff --git a/test/PCH/rdar8852495.c b/test/PCH/rdar8852495.c index fb465a3..7639f1f 100644 --- a/test/PCH/rdar8852495.c +++ b/test/PCH/rdar8852495.c @@ -5,6 +5,8 @@ // RUN: %clang_cc1 %s -emit-pch -o %t -Wsign-compare -Wtautological-compare // RUN: %clang_cc1 %s -include-pch %t -verify -fsyntax-only -Wno-sign-compare -Wtautological-compare +// expected-no-diagnostics + // This tests that diagnostic mappings from PCH are propagated for #pragma // diagnostics but not for command-line flags. diff --git a/test/PCH/reinclude.cpp b/test/PCH/reinclude.cpp index 97e22cf..4bec050 100644 --- a/test/PCH/reinclude.cpp +++ b/test/PCH/reinclude.cpp @@ -7,4 +7,6 @@ // RUN: %clang_cc1 -x c++-header %S/reinclude2.h -include-pch %t1 -emit-pch -o %t2 // RUN: %clang_cc1 %s -include-pch %t2 -fsyntax-only -verify +// expected-no-diagnostics + int q2 = A::y; diff --git a/test/PCH/single-token-macro.c b/test/PCH/single-token-macro.c index 29edb75..52873bf 100644 --- a/test/PCH/single-token-macro.c +++ b/test/PCH/single-token-macro.c @@ -7,6 +7,8 @@ // RUN: %clang_cc1 %s -emit-pch -o %t // RUN: %clang_cc1 %s -include-pch %t -verify -fsyntax-only +// expected-no-diagnostics + #ifndef HEADER #define HEADER diff --git a/test/PCH/target-options.c b/test/PCH/target-options.c new file mode 100644 index 0000000..2b85efe --- /dev/null +++ b/test/PCH/target-options.c @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -triple=x86_64-apple-darwin9 -emit-pch -o %t.pch %S/target-options.h +// RUN: not %clang_cc1 -triple=x86_64-unknown-freebsd7.0 -include-pch %t.pch %s -emit-llvm -o - > %t.err 2>&1 +// RUN: FileCheck %s < %t.err + +// CHECK: for the target diff --git a/test/PCH/target-options.h b/test/PCH/target-options.h new file mode 100644 index 0000000..2c3edf6 --- /dev/null +++ b/test/PCH/target-options.h @@ -0,0 +1,2 @@ +enum { apple_cc = __APPLE_CC__ }; + diff --git a/test/Parser/MicrosoftExtensions.cpp b/test/Parser/MicrosoftExtensions.cpp index 6219e29..fd38dca 100644 --- a/test/Parser/MicrosoftExtensions.cpp +++ b/test/Parser/MicrosoftExtensions.cpp @@ -174,13 +174,21 @@ void redundant_typename() { __interface MicrosoftInterface; __interface MicrosoftInterface { - virtual void foo1() = 0; + void foo1() = 0; virtual void foo2() = 0; }; +__interface MicrosoftDerivedInterface : public MicrosoftInterface { + void foo1(); + void foo2() override; + void foo3(); +}; + void interface_test() { MicrosoftInterface* a; a->foo1(); + MicrosoftDerivedInterface* b; + b->foo2(); } __int64 x7 = __int64(0); @@ -230,29 +238,29 @@ __if_not_exists(IF_EXISTS::Type_not) { int __if_exists_init_list() { - int array1[] = { - 0, - __if_exists(IF_EXISTS::Type) {2, } - 3 - }; - - int array2[] = { - 0, - __if_exists(IF_EXISTS::Type_not) { this wont compile } - 3 - }; - - int array3[] = { - 0, - __if_not_exists(IF_EXISTS::Type_not) {2, } - 3 - }; - - int array4[] = { - 0, - __if_not_exists(IF_EXISTS::Type) { this wont compile } - 3 - }; + int array1[] = { + 0, + __if_exists(IF_EXISTS::Type) {2, } + 3 + }; + + int array2[] = { + 0, + __if_exists(IF_EXISTS::Type_not) { this wont compile } + 3 + }; + + int array3[] = { + 0, + __if_not_exists(IF_EXISTS::Type_not) {2, } + 3 + }; + + int array4[] = { + 0, + __if_not_exists(IF_EXISTS::Type) { this wont compile } + 3 + }; } diff --git a/test/Parser/block-block-storageclass.c b/test/Parser/block-block-storageclass.c index 97ba113..53cd997 100644 --- a/test/Parser/block-block-storageclass.c +++ b/test/Parser/block-block-storageclass.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fblocks -verify %s +// expected-no-diagnostics int printf(const char *, ...); void _Block_byref_release(void*src){} diff --git a/test/Parser/block-pointer-decl.c b/test/Parser/block-pointer-decl.c index a8cc258..d88daf3 100644 --- a/test/Parser/block-pointer-decl.c +++ b/test/Parser/block-pointer-decl.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -fblocks %s +// expected-no-diagnostics int printf(char const *, ...); diff --git a/test/Parser/builtin_classify_type.c b/test/Parser/builtin_classify_type.c index a7c0855..ff483b2 100644 --- a/test/Parser/builtin_classify_type.c +++ b/test/Parser/builtin_classify_type.c @@ -10,7 +10,7 @@ int main() { static int ary[__builtin_classify_type(a)]; static int ary2[(__builtin_classify_type)(a)]; // expected-error{{variable length array declaration can not have 'static' storage duration}} - static int ary3[(*__builtin_classify_type)(a)]; // expected-error{{variable length array declaration can not have 'static' storage duration}} + static int ary3[(*__builtin_classify_type)(a)]; // expected-error{{builtin functions must be directly called}} int result; diff --git a/test/Parser/check-objc2-syntax-1.m b/test/Parser/check-objc2-syntax-1.m index 3cdf2b0..9aff963 100644 --- a/test/Parser/check-objc2-syntax-1.m +++ b/test/Parser/check-objc2-syntax-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface Subclass + (int)magicNumber; diff --git a/test/Parser/colon-colon-parentheses.cpp b/test/Parser/colon-colon-parentheses.cpp new file mode 100644 index 0000000..55948fd --- /dev/null +++ b/test/Parser/colon-colon-parentheses.cpp @@ -0,0 +1,22 @@ +// RUN: %clang_cc1 %s -fsyntax-only -verify +// RUN: cp %s %t +// RUN: not %clang_cc1 -x c++ -fixit %t +// RUN: %clang_cc1 -x c++ %t + +struct S { static int a,b,c;}; +int S::(a); // expected-error{{unexpected parenthesis after '::'}} +int S::(b; // expected-error{{unexpected parenthesis after '::'}} +int S::c; +int S::(*d); // expected-error{{unexpected parenthesis after '::'}} +int S::(*e; // expected-error{{unexpected parenthesis after '::'}} +int S::*f; +int g = S::(a); // expected-error{{unexpected parenthesis after '::'}} +int h = S::(b; // expected-error{{unexpected parenthesis after '::'}} +int i = S::c; + +void foo() { + int a; + a = ::(g); // expected-error{{unexpected parenthesis after '::'}} + a = ::(h; // expected-error{{unexpected parenthesis after '::'}} + a = ::i; +} diff --git a/test/Parser/compound_literal.c b/test/Parser/compound_literal.c index 4f3609d..9a0e4a6 100644 --- a/test/Parser/compound_literal.c +++ b/test/Parser/compound_literal.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int main() { char *s; s = (char []){"whatever"}; diff --git a/test/Parser/cxx-ambig-decl-expr.cpp b/test/Parser/cxx-ambig-decl-expr.cpp index b5ff728..feb185f 100644 --- a/test/Parser/cxx-ambig-decl-expr.cpp +++ b/test/Parser/cxx-ambig-decl-expr.cpp @@ -7,4 +7,7 @@ struct X { void f() { void (*ptr)(int, int) = &X::f; + + unknown *p = 0; // expected-error {{unknown type name 'unknown'}} + unknown * p + 0; // expected-error {{undeclared identifier 'unknown'}} } diff --git a/test/Parser/cxx-attributes.cpp b/test/Parser/cxx-attributes.cpp index 8603b30..5ea0ce2 100644 --- a/test/Parser/cxx-attributes.cpp +++ b/test/Parser/cxx-attributes.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics class c { virtual void f1(const char* a, ...) diff --git a/test/Parser/cxx-casting.cpp b/test/Parser/cxx-casting.cpp index 42ad12e..01980d3 100644 --- a/test/Parser/cxx-casting.cpp +++ b/test/Parser/cxx-casting.cpp @@ -58,9 +58,9 @@ void test2(char x, struct B * b) { expected-error {{expected ']'}} #define LC <: #define C : - test1::A LC:B> c; // expected-error {{cannot refer to class template 'A' without a template argument list}} expected-error 2{{}} expected-note{{}} + test1::A LC:B> c; // expected-error {{class template test1::A requires template arguments}} expected-error 2{{}} (void)static_cast LC:c>(&x); // expected-error {{expected '<' after 'static_cast'}} expected-error 2{{}} expected-note{{}} - test1::A<:C B> d; // expected-error {{cannot refer to class template 'A' without a template argument list}} expected-error 2{{}} expected-note{{}} + test1::A<:C B> d; // expected-error {{class template test1::A requires template arguments}} expected-error 2{{}} (void)static_cast<:C c>(&x); // expected-error {{expected '<' after 'static_cast'}} expected-error 2{{}} expected-note{{}} #define LCC <:: @@ -85,8 +85,10 @@ void test3() { E< ::F>(); // Make sure that parser doesn't expand '[:' to '< ::' - ::D[:F> A5; // expected-error {{cannot refer to class template 'D' without a template argument list}} \ + ::D[:F> A5; // expected-error {{class template ::D requires template arguments}} \ // expected-error {{expected expression}} \ - // expected-error {{expected ']'}} \ - // expected-note {{to match this '['}} + // expected-error {{expected unqualified-id}} } + +// PR13619. Must be at end of file. +int n = reinterpret_cast // expected-error {{expected '<'}} expected-error {{expected ';'}} diff --git a/test/Parser/cxx-class.cpp b/test/Parser/cxx-class.cpp index feccba8..8ed5882 100644 --- a/test/Parser/cxx-class.cpp +++ b/test/Parser/cxx-class.cpp @@ -88,6 +88,20 @@ namespace ctor_error { // expected-error{{unknown type name 'UnknownType'}} } +// PR13775: Don't assert here. +namespace PR13775 { + class bar + { + public: + void foo (); + void baz (); + }; + void bar::foo () + { + baz x(); // expected-error 3{{}} + } +} + // PR11109 must appear at the end of the source file class pr11109r3 { // expected-note{{to match this '{'}} public // expected-error{{expected ':'}} expected-error{{expected '}'}} expected-error{{expected ';' after class}} diff --git a/test/Parser/cxx-decl.cpp b/test/Parser/cxx-decl.cpp index 951cd3d..290b947 100644 --- a/test/Parser/cxx-decl.cpp +++ b/test/Parser/cxx-decl.cpp @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -verify -fsyntax-only -triple i386-linux -pedantic %s +const char const *x10; // expected-warning {{duplicate 'const' declaration specifier}} + int x(*g); // expected-error {{use of undeclared identifier 'g'}} struct Type { @@ -119,6 +121,9 @@ void CodeCompleteConsumer::() { // expected-error {{xpected unqualified-id}} ; +// PR4111 +void f(sqrgl); // expected-error {{unknown type name 'sqrgl'}} + // PR8380 extern "" // expected-error {{unknown linkage language}} test6a { ;// expected-error {{C++ requires a type specifier for all declarations}} \ diff --git a/test/Parser/cxx-extern-c-array.cpp b/test/Parser/cxx-extern-c-array.cpp index 14912fd..11092ad 100644 --- a/test/Parser/cxx-extern-c-array.cpp +++ b/test/Parser/cxx-extern-c-array.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics extern "C" int myarray[]; int myarray[12] = {0}; diff --git a/test/Parser/cxx-stmt.cpp b/test/Parser/cxx-stmt.cpp index 7677ca8..c2fa0a4 100644 --- a/test/Parser/cxx-stmt.cpp +++ b/test/Parser/cxx-stmt.cpp @@ -58,3 +58,11 @@ void f5() { asm volatile ("":: :"memory"); asm volatile ("": ::"memory"); } + +int f6() { + int k, // expected-note {{change this ',' to a ';' to call 'f6'}} + f6(), // expected-error {{expected ';'}} expected-warning {{interpreted as a function declaration}} expected-note {{replace paren}} + int n = 0, // expected-error {{expected ';'}} + return f5(), // ok + int(n); +} diff --git a/test/Parser/cxx-template-argument.cpp b/test/Parser/cxx-template-argument.cpp index 5479961..afe318d 100644 --- a/test/Parser/cxx-template-argument.cpp +++ b/test/Parser/cxx-template-argument.cpp @@ -25,3 +25,20 @@ namespace greatergreater { (void)(&t>==p); // expected-error {{use '> >'}} expected-error {{use '> ='}} } } + +namespace PR5925 { + template + class foo { // expected-note {{here}} + }; + void bar(foo *X) { // expected-error {{requires template arguments}} + } +} + +namespace PR13210 { + template + class C {}; // expected-note {{here}} + + void f() { + new C(); // expected-error {{requires template arguments}} + } +} diff --git a/test/Parser/cxx0x-attributes.cpp b/test/Parser/cxx0x-attributes.cpp index a0b8467..58e42bf 100644 --- a/test/Parser/cxx0x-attributes.cpp +++ b/test/Parser/cxx0x-attributes.cpp @@ -44,10 +44,15 @@ int & [[]] ref_attr = after_attr; int && [[]] rref_attr = 0; int array_attr [1] [[]]; alignas(8) int aligned_attr; -[[test::valid(for 42 [very] **** '+' symbols went on a trip and had a "good"_time; the end.)]] - int garbage_attr; -[[,,,static, class, namespace,, inline, constexpr, mutable,, bi\ -tand, bitor::compl(!.*_ Cx.!U^*R),,,]] int more_garbage_attr; +[[test::valid(for 42 [very] **** '+' symbols went on a trip and had a "good"_time; the end.)]] int garbage_attr; // expected-warning {{unknown attribute 'valid' ignored}} +[[,,,static, class, namespace,, inline, constexpr, mutable,, bitand, bitor::compl(!.*_ Cx.!U^*R),,,]] int more_garbage_attr; // expected-warning {{unknown attribute 'static' ignored}} \ + // expected-warning {{unknown attribute 'class' ignored}} \ + // expected-warning {{unknown attribute 'namespace' ignored}} \ + // expected-warning {{unknown attribute 'inline' ignored}} \ + // expected-warning {{unknown attribute 'constexpr' ignored}} \ + // expected-warning {{unknown attribute 'mutable' ignored}} \ + // expected-warning {{unknown attribute 'bitand' ignored}} \ + // expected-warning {{unknown attribute 'compl' ignored}} [[u8"invalid!"]] int invalid_string_attr; // expected-error {{expected ']'}} void fn_attr () [[]]; void noexcept_fn_attr () noexcept [[]]; @@ -212,3 +217,16 @@ enum class __attribute__((visibility("hidden"))) SecretKeepers { one, /* rest are deprecated */ two, three }; enum class [[]] EvenMoreSecrets {}; + +namespace arguments { + // FIXME: remove the sema warnings after migrating existing gnu attributes to c++11 syntax. + void f(const char*, ...) [[gnu::format(printf, 1, 2)]]; // expected-warning {{unknown attribute 'format' ignored}} + void g() [[unknown::foo(currently arguments of attributes from unknown namespace other than 'gnu' namespace are ignored... blah...)]]; // expected-warning {{unknown attribute 'foo' ignored}} +} + +// forbid attributes on decl specifiers +unsigned [[gnu::used]] static int [[gnu::unused]] v1; // expected-warning {{attribute 'unused' ignored, because it is not attached to a declaration}} \ + expected-error {{an attribute list cannot appear here}} +typedef [[gnu::used]] unsigned long [[gnu::unused]] v2; // expected-warning {{attribute 'unused' ignored, because it is not attached to a declaration}} \ + expected-error {{an attribute list cannot appear here}} +int [[carries_dependency]] foo(int [[carries_dependency]] x); // expected-warning 2{{attribute 'carries_dependency' ignored, because it is not attached to a declaration}} diff --git a/test/Parser/cxx0x-condition.cpp b/test/Parser/cxx0x-condition.cpp index e45cd86..8b64bcf 100644 --- a/test/Parser/cxx0x-condition.cpp +++ b/test/Parser/cxx0x-condition.cpp @@ -27,10 +27,11 @@ void f() { if (S b(n) = 0) {} // expected-error {{a function type is not allowed here}} if (S b(n) == 0) {} // expected-error {{a function type is not allowed here}} expected-error {{did you mean '='?}} - if (S{a}) {} // ok - if (S a{a}) {} // ok - if (S a = {a}) {} // ok - if (S a == {a}) {} // expected-error {{did you mean '='?}} + S s(a); + if (S{s}) {} // ok + if (S a{s}) {} // ok + if (S a = {s}) {} // ok + if (S a == {s}) {} // expected-error {{did you mean '='?}} if (S(b){a}) {} // ok if (S(b) = {a}) {} // ok diff --git a/test/Parser/cxx0x-decl.cpp b/test/Parser/cxx0x-decl.cpp index a6fc49c..3af73f9 100644 --- a/test/Parser/cxx0x-decl.cpp +++ b/test/Parser/cxx0x-decl.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -fsyntax-only -std=c++11 -pedantic %s +// RUN: %clang_cc1 -verify -fsyntax-only -std=c++11 -pedantic-errors %s // Make sure we know these are legitimate commas and not typos for ';'. namespace Commas { @@ -23,8 +23,14 @@ class ExtraSemiAfterMemFn { void f() = delete // expected-error {{expected ';' after delete}} void g() = delete; // ok void h() = delete;; // ok - void i() = delete;;; // expected-warning {{extra ';' after member function definition}} + void i() = delete;;; // expected-error {{extra ';' after member function definition}} }; -int *const const p = 0; // ok -const const int *q = 0; // expected-warning {{duplicate 'const' declaration specifier}} +int *const const p = 0; // expected-error {{duplicate 'const' declaration specifier}} +const const int *q = 0; // expected-error {{duplicate 'const' declaration specifier}} + +struct MultiCV { + void f() const const; // expected-error {{duplicate 'const' declaration specifier}} +}; + +static_assert(something, ""); // expected-error {{undeclared identifier}} diff --git a/test/Parser/cxx0x-lambda-expressions.cpp b/test/Parser/cxx0x-lambda-expressions.cpp index 7e9d475..642c69a 100644 --- a/test/Parser/cxx0x-lambda-expressions.cpp +++ b/test/Parser/cxx0x-lambda-expressions.cpp @@ -26,6 +26,7 @@ class C { [] -> int { return 0; }; // expected-error{{lambda requires '()' before return type}} [] mutable -> int { return 0; }; // expected-error{{lambda requires '()' before 'mutable'}} + [](int) -> {}; // PR13652 expected-error {{expected a type}} return 1; } @@ -33,7 +34,7 @@ class C { typedef int T; const int b = 0; const int c = 1; - int a1[1] = {[b] (T()) {}}; // expected-error{{no viable conversion from 'C:: void f(Ts ...x) { - [[test::foo(bar, baz)...]]; - [[used(x)...]]; + [[test::foo(bar, baz)...]]; // expected-error {{attribute 'foo' cannot be used as an attribute pack}} \ + // expected-warning {{unknown attribute 'foo' ignored}} + + [[used(x)...]]; // expected-error {{attribute 'used' cannot be used as an attribute pack}} \ + // expected-warning {{unknown attribute 'used' ignored}} + [[x...] { return [ X alloc ]; }() init]; } diff --git a/test/Parser/opencl-kernel.cl b/test/Parser/opencl-kernel.cl index 3abb62b..01c7ed6 100644 --- a/test/Parser/opencl-kernel.cl +++ b/test/Parser/opencl-kernel.cl @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -pedantic -fsyntax-only +// expected-no-diagnostics __kernel void test() { diff --git a/test/Parser/parmvardecl_conversion.c b/test/Parser/parmvardecl_conversion.c index 9fa8a68..f6afd12 100644 --- a/test/Parser/parmvardecl_conversion.c +++ b/test/Parser/parmvardecl_conversion.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics void f (int p[]) { p++; } diff --git a/test/Parser/pragma-fp-contract.c b/test/Parser/pragma-fp-contract.c new file mode 100644 index 0000000..568c0a0 --- /dev/null +++ b/test/Parser/pragma-fp-contract.c @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -fsyntax-only -verify %s + +void f1(void) { + int x = 0; +/* expected-error@+1 {{'#pragma fp_contract' should only appear at file scope or at the start of a compound expression}} */ +#pragma STDC FP_CONTRACT ON +} + +void f2(void) { + #pragma STDC FP_CONTRACT OFF + #pragma STDC FP_CONTRACT ON +} diff --git a/test/Parser/pragma-options.cpp b/test/Parser/pragma-options.cpp new file mode 100644 index 0000000..84cd38d --- /dev/null +++ b/test/Parser/pragma-options.cpp @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics + +class C { +#pragma options align=natural +}; diff --git a/test/Parser/recovery.cpp b/test/Parser/recovery.cpp index ff68758..732b9ae 100644 --- a/test/Parser/recovery.cpp +++ b/test/Parser/recovery.cpp @@ -1,4 +1,4 @@ -// RUN: %clang -cc1 -verify -std=c++11 %s +// RUN: %clang_cc1 -verify -std=c++11 %s 8gi///===--- recovery.cpp ---===// // expected-error {{unqualified-id}} namespace Std { // expected-note {{here}} @@ -23,12 +23,15 @@ void g() { struct S { int a, b, c; S(); + int x // expected-error {{expected ';'}} + friend void f() }; 8S::S() : a{ 5 }, b{ 6 }, c{ 2 } { // expected-error {{unqualified-id}} return; } int k; -int l = k; +int l = k // expected-error {{expected ';'}} +constexpr int foo(); 5int m = { l }, n = m; // expected-error {{unqualified-id}} diff --git a/test/Parser/recursion-limits.cpp b/test/Parser/recursion-limits.cpp index ea25dea..bb7354f 100644 --- a/test/Parser/recursion-limits.cpp +++ b/test/Parser/recursion-limits.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only %s -verify +// expected-no-diagnostics class outer { class inner1 { inner1(); }; class inner2 { inner2(); }; diff --git a/test/Parser/selector-1.m b/test/Parser/selector-1.m index 5ba2da9..3e2a86d 100644 --- a/test/Parser/selector-1.m +++ b/test/Parser/selector-1.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://8366474 int main() { diff --git a/test/Parser/statements.c b/test/Parser/statements.c index 3a123d6..8215f4c 100644 --- a/test/Parser/statements.c +++ b/test/Parser/statements.c @@ -62,3 +62,18 @@ void test8() { // Should not skip '}' and produce a "expected '}'" error. undecl // expected-error {{use of undeclared identifier 'undecl'}} } + +int test9() { + int T[] = {1, 2, }; + + int X; + X = 0, // expected-error {{expected ';' after expression}} + { + } + + X = 0, // expected-error {{expected ';' after expression}} + if (0) + ; + + return 4, // expected-error {{expected ';' after return statement}} +} diff --git a/test/Parser/top-level-semi-cxx0x.cpp b/test/Parser/top-level-semi-cxx0x.cpp index be342a2..472686e 100644 --- a/test/Parser/top-level-semi-cxx0x.cpp +++ b/test/Parser/top-level-semi-cxx0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -pedantic -std=c++11 -verify %s +// expected-no-diagnostics void foo(); diff --git a/test/Parser/types.c b/test/Parser/types.c index 53b9dd5..db8c083 100644 --- a/test/Parser/types.c +++ b/test/Parser/types.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // Test the X can be overloaded inside the struct. typedef int X; diff --git a/test/Preprocessor/comment_save_if.c b/test/Preprocessor/comment_save_if.c index 4946122..b972d91 100644 --- a/test/Preprocessor/comment_save_if.c +++ b/test/Preprocessor/comment_save_if.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -E -CC -pedantic -verify +// expected-no-diagnostics #if 1 /*bar */ diff --git a/test/Preprocessor/cxx_true.cpp b/test/Preprocessor/cxx_true.cpp index b123e0c..39cb349 100644 --- a/test/Preprocessor/cxx_true.cpp +++ b/test/Preprocessor/cxx_true.cpp @@ -1,7 +1,9 @@ /* RUN: %clang_cc1 -E %s -x c++ | grep block_1 RUN: %clang_cc1 -E %s -x c++ | not grep block_2 RUN: %clang_cc1 -E %s -x c | not grep block + RUN: %clang_cc1 -E %s -x c++ -verify -Wundef */ +// expected-no-diagnostics #if true block_1 diff --git a/test/Preprocessor/expr_define_expansion.c b/test/Preprocessor/expr_define_expansion.c index 38c0384..3e5a2c4 100644 --- a/test/Preprocessor/expr_define_expansion.c +++ b/test/Preprocessor/expr_define_expansion.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -E -CC -pedantic -verify +// expected-no-diagnostics #define FOO && 1 #if defined FOO FOO diff --git a/test/Preprocessor/expr_multichar.c b/test/Preprocessor/expr_multichar.c index 8ab12d9..39155e4 100644 --- a/test/Preprocessor/expr_multichar.c +++ b/test/Preprocessor/expr_multichar.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 < %s -E -verify -triple i686-pc-linux-gnu +// expected-no-diagnostics #if (('1234' >> 24) != '1') #error Bad multichar constant calculation! diff --git a/test/Preprocessor/has_include.c b/test/Preprocessor/has_include.c index 13c8d56..10f7795 100644 --- a/test/Preprocessor/has_include.c +++ b/test/Preprocessor/has_include.c @@ -67,7 +67,7 @@ // Try badly formed expressions. // FIXME: We can recover better in almost all of these cases. (PR13335) -// expected-error@+1 {{missing '(' after '__has_include'}} expected-error@+1 {{expected end of line}} +// expected-error@+1 {{missing '(' after '__has_include'}} #if __has_include "stdint.h") #endif @@ -83,11 +83,11 @@ #if __has_include) #endif -// expected-error@+1 {{missing '(' after '__has_include'}} expected-error@+1 {{token is not a valid binary operator in a preprocessor subexpression}} +// expected-error@+1 {{missing '(' after '__has_include'}} #if __has_include) #endif -// expected-error@+1 {{expected "FILENAME" or }} expected-warning@+1 {{missing terminating '"' character}} +// expected-error@+1 {{expected "FILENAME" or }} expected-warning@+1 {{missing terminating '"' character}} expected-error@+1 {{invalid token at start of a preprocessor expression}} #if __has_include("stdint.h) #endif @@ -99,11 +99,25 @@ #if __has_include(stdint.h>) #endif +// expected-error@+1 {{missing '(' after '__has_include'}} +__has_include + +// expected-error@+1 {{missing ')' after '__has_include'}} // expected-error@+1 {{expected value in expression}} // expected-note@+1 {{to match this '('}} +#if __has_include("stdint.h" +#endif -// FIXME: These test cases cause the compiler to crash. (PR13334) -//#if __has_include("stdint.h" -//#if __has_include( -//#if __has_include -//#if __has_include( -//#if __has_include(}} // expected-error@+1 {{expected value in expression}} +#if __has_include( +#endif +// expected-error@+1 {{missing '(' after '__has_include'}} // expected-error@+1 {{expected value in expression}} +#if __has_include +#endif + +// expected-error@+1 {{missing ')' after '__has_include'}} // expected-error@+1 {{expected value in expression}} // expected-note@+1 {{to match this '('}} +#if __has_include( +#endif + +// expected-error@+1 {{expected "FILENAME" or }} // expected-error@+1 {{expected value in expression}} +#if __has_include( diff --git a/test/Preprocessor/init.c b/test/Preprocessor/init.c index e7321e0..33a21a3 100644 --- a/test/Preprocessor/init.c +++ b/test/Preprocessor/init.c @@ -212,19 +212,19 @@ // ARM:#define __INTPTR_TYPE__ long int // ARM:#define __INTPTR_WIDTH__ 32 // ARM:#define __INT_MAX__ 2147483647 -// ARM:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// ARM:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // ARM:#define __LDBL_DIG__ 15 -// ARM:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// ARM:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // ARM:#define __LDBL_HAS_DENORM__ 1 // ARM:#define __LDBL_HAS_INFINITY__ 1 // ARM:#define __LDBL_HAS_QUIET_NAN__ 1 // ARM:#define __LDBL_MANT_DIG__ 53 // ARM:#define __LDBL_MAX_10_EXP__ 308 // ARM:#define __LDBL_MAX_EXP__ 1024 -// ARM:#define __LDBL_MAX__ 1.7976931348623157e+308 +// ARM:#define __LDBL_MAX__ 1.7976931348623157e+308L // ARM:#define __LDBL_MIN_10_EXP__ (-307) // ARM:#define __LDBL_MIN_EXP__ (-1021) -// ARM:#define __LDBL_MIN__ 2.2250738585072014e-308 +// ARM:#define __LDBL_MIN__ 2.2250738585072014e-308L // ARM:#define __LITTLE_ENDIAN__ 1 // ARM:#define __LONG_LONG_MAX__ 9223372036854775807LL // ARM:#define __LONG_MAX__ 2147483647L @@ -260,6 +260,215 @@ // ARM:#define __WINT_WIDTH__ 32 // ARM:#define __arm 1 // ARM:#define __arm__ 1 + +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=arm-none-linux-gnueabi -target-feature +soft-float -target-feature +soft-float-abi < /dev/null | FileCheck -check-prefix ARMEABISOFTFP %s +// +// ARM-NOT:#define _LP64 +// ARMEABISOFTFP:#define __APCS_32__ 1 +// ARMEABISOFTFP:#define __ARMEL__ 1 +// ARMEABISOFTFP:#define __ARM_ARCH 6 +// ARMEABISOFTFP:#define __ARM_ARCH_6J__ 1 +// ARMEABISOFTFP:#define __ARM_EABI__ 1 +// ARMEABISOFTFP:#define __ARM_PCS 1 +// ARMEABISOFTFP-NOT:#define __ARM_PCS_VFP 1 +// ARMEABISOFTFP:#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ +// ARMEABISOFTFP:#define __CHAR16_TYPE__ unsigned short +// ARMEABISOFTFP:#define __CHAR32_TYPE__ unsigned int +// ARMEABISOFTFP:#define __CHAR_BIT__ 8 +// ARMEABISOFTFP:#define __DBL_DENORM_MIN__ 4.9406564584124654e-324 +// ARMEABISOFTFP:#define __DBL_DIG__ 15 +// ARMEABISOFTFP:#define __DBL_EPSILON__ 2.2204460492503131e-16 +// ARMEABISOFTFP:#define __DBL_HAS_DENORM__ 1 +// ARMEABISOFTFP:#define __DBL_HAS_INFINITY__ 1 +// ARMEABISOFTFP:#define __DBL_HAS_QUIET_NAN__ 1 +// ARMEABISOFTFP:#define __DBL_MANT_DIG__ 53 +// ARMEABISOFTFP:#define __DBL_MAX_10_EXP__ 308 +// ARMEABISOFTFP:#define __DBL_MAX_EXP__ 1024 +// ARMEABISOFTFP:#define __DBL_MAX__ 1.7976931348623157e+308 +// ARMEABISOFTFP:#define __DBL_MIN_10_EXP__ (-307) +// ARMEABISOFTFP:#define __DBL_MIN_EXP__ (-1021) +// ARMEABISOFTFP:#define __DBL_MIN__ 2.2250738585072014e-308 +// ARMEABISOFTFP:#define __DECIMAL_DIG__ 17 +// ARMEABISOFTFP:#define __FLT_DENORM_MIN__ 1.40129846e-45F +// ARMEABISOFTFP:#define __FLT_DIG__ 6 +// ARMEABISOFTFP:#define __FLT_EPSILON__ 1.19209290e-7F +// ARMEABISOFTFP:#define __FLT_EVAL_METHOD__ 0 +// ARMEABISOFTFP:#define __FLT_HAS_DENORM__ 1 +// ARMEABISOFTFP:#define __FLT_HAS_INFINITY__ 1 +// ARMEABISOFTFP:#define __FLT_HAS_QUIET_NAN__ 1 +// ARMEABISOFTFP:#define __FLT_MANT_DIG__ 24 +// ARMEABISOFTFP:#define __FLT_MAX_10_EXP__ 38 +// ARMEABISOFTFP:#define __FLT_MAX_EXP__ 128 +// ARMEABISOFTFP:#define __FLT_MAX__ 3.40282347e+38F +// ARMEABISOFTFP:#define __FLT_MIN_10_EXP__ (-37) +// ARMEABISOFTFP:#define __FLT_MIN_EXP__ (-125) +// ARMEABISOFTFP:#define __FLT_MIN__ 1.17549435e-38F +// ARMEABISOFTFP:#define __FLT_RADIX__ 2 +// ARMEABISOFTFP:#define __INT16_TYPE__ short +// ARMEABISOFTFP:#define __INT32_TYPE__ int +// ARMEABISOFTFP:#define __INT64_C_SUFFIX__ LL +// ARMEABISOFTFP:#define __INT64_TYPE__ long long int +// ARMEABISOFTFP:#define __INT8_TYPE__ char +// ARMEABISOFTFP:#define __INTMAX_MAX__ 9223372036854775807LL +// ARMEABISOFTFP:#define __INTMAX_TYPE__ long long int +// ARMEABISOFTFP:#define __INTMAX_WIDTH__ 64 +// ARMEABISOFTFP:#define __INTPTR_TYPE__ long int +// ARMEABISOFTFP:#define __INTPTR_WIDTH__ 32 +// ARMEABISOFTFP:#define __INT_MAX__ 2147483647 +// ARMEABISOFTFP:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L +// ARMEABISOFTFP:#define __LDBL_DIG__ 15 +// ARMEABISOFTFP:#define __LDBL_EPSILON__ 2.2204460492503131e-16L +// ARMEABISOFTFP:#define __LDBL_HAS_DENORM__ 1 +// ARMEABISOFTFP:#define __LDBL_HAS_INFINITY__ 1 +// ARMEABISOFTFP:#define __LDBL_HAS_QUIET_NAN__ 1 +// ARMEABISOFTFP:#define __LDBL_MANT_DIG__ 53 +// ARMEABISOFTFP:#define __LDBL_MAX_10_EXP__ 308 +// ARMEABISOFTFP:#define __LDBL_MAX_EXP__ 1024 +// ARMEABISOFTFP:#define __LDBL_MAX__ 1.7976931348623157e+308L +// ARMEABISOFTFP:#define __LDBL_MIN_10_EXP__ (-307) +// ARMEABISOFTFP:#define __LDBL_MIN_EXP__ (-1021) +// ARMEABISOFTFP:#define __LDBL_MIN__ 2.2250738585072014e-308L +// ARMEABISOFTFP:#define __LITTLE_ENDIAN__ 1 +// ARMEABISOFTFP:#define __LONG_LONG_MAX__ 9223372036854775807LL +// ARMEABISOFTFP:#define __LONG_MAX__ 2147483647L +// ARMEABISOFTFP-NOT:#define __LP64__ +// ARMEABISOFTFP:#define __POINTER_WIDTH__ 32 +// ARMEABISOFTFP:#define __PTRDIFF_TYPE__ int +// ARMEABISOFTFP:#define __PTRDIFF_WIDTH__ 32 +// ARMEABISOFTFP:#define __REGISTER_PREFIX__ +// ARMEABISOFTFP:#define __SCHAR_MAX__ 127 +// ARMEABISOFTFP:#define __SHRT_MAX__ 32767 +// ARMEABISOFTFP:#define __SIG_ATOMIC_WIDTH__ 32 +// ARMEABISOFTFP:#define __SIZEOF_DOUBLE__ 8 +// ARMEABISOFTFP:#define __SIZEOF_FLOAT__ 4 +// ARMEABISOFTFP:#define __SIZEOF_INT__ 4 +// ARMEABISOFTFP:#define __SIZEOF_LONG_DOUBLE__ 8 +// ARMEABISOFTFP:#define __SIZEOF_LONG_LONG__ 8 +// ARMEABISOFTFP:#define __SIZEOF_LONG__ 4 +// ARMEABISOFTFP:#define __SIZEOF_POINTER__ 4 +// ARMEABISOFTFP:#define __SIZEOF_PTRDIFF_T__ 4 +// ARMEABISOFTFP:#define __SIZEOF_SHORT__ 2 +// ARMEABISOFTFP:#define __SIZEOF_SIZE_T__ 4 +// ARMEABISOFTFP:#define __SIZEOF_WCHAR_T__ 4 +// ARMEABISOFTFP:#define __SIZEOF_WINT_T__ 4 +// ARMEABISOFTFP:#define __SIZE_TYPE__ unsigned int +// ARMEABISOFTFP:#define __SIZE_WIDTH__ 32 +// ARMEABISOFTFP:#define __SOFTFP__ 1 +// ARMEABISOFTFP:#define __THUMB_INTERWORK__ 1 +// ARMEABISOFTFP:#define __UINTMAX_TYPE__ long long unsigned int +// ARMEABISOFTFP:#define __USER_LABEL_PREFIX__ +// ARMEABISOFTFP:#define __WCHAR_MAX__ 4294967295U +// ARMEABISOFTFP:#define __WCHAR_TYPE__ unsigned int +// ARMEABISOFTFP:#define __WCHAR_WIDTH__ 32 +// ARMEABISOFTFP:#define __WINT_TYPE__ unsigned int +// ARMEABISOFTFP:#define __WINT_WIDTH__ 32 +// ARMEABISOFTFP:#define __arm 1 +// ARMEABISOFTFP:#define __arm__ 1 + +// RUN: %clang_cc1 -E -dM -ffreestanding -triple=arm-none-linux-gnueabi < /dev/null | FileCheck -check-prefix ARMEABIHARDFP %s +// +// ARM-NOT:#define _LP64 +// ARMEABIHARDFP:#define __APCS_32__ 1 +// ARMEABIHARDFP:#define __ARMEL__ 1 +// ARMEABIHARDFP:#define __ARM_ARCH 6 +// ARMEABIHARDFP:#define __ARM_ARCH_6J__ 1 +// ARMEABIHARDFP:#define __ARM_EABI__ 1 +// ARMEABIHARDFP:#define __ARM_PCS 1 +// ARMEABIHARDFP:#define __ARM_PCS_VFP 1 +// ARMEABIHARDFP:#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__ +// ARMEABIHARDFP:#define __CHAR16_TYPE__ unsigned short +// ARMEABIHARDFP:#define __CHAR32_TYPE__ unsigned int +// ARMEABIHARDFP:#define __CHAR_BIT__ 8 +// ARMEABIHARDFP:#define __DBL_DENORM_MIN__ 4.9406564584124654e-324 +// ARMEABIHARDFP:#define __DBL_DIG__ 15 +// ARMEABIHARDFP:#define __DBL_EPSILON__ 2.2204460492503131e-16 +// ARMEABIHARDFP:#define __DBL_HAS_DENORM__ 1 +// ARMEABIHARDFP:#define __DBL_HAS_INFINITY__ 1 +// ARMEABIHARDFP:#define __DBL_HAS_QUIET_NAN__ 1 +// ARMEABIHARDFP:#define __DBL_MANT_DIG__ 53 +// ARMEABIHARDFP:#define __DBL_MAX_10_EXP__ 308 +// ARMEABIHARDFP:#define __DBL_MAX_EXP__ 1024 +// ARMEABIHARDFP:#define __DBL_MAX__ 1.7976931348623157e+308 +// ARMEABIHARDFP:#define __DBL_MIN_10_EXP__ (-307) +// ARMEABIHARDFP:#define __DBL_MIN_EXP__ (-1021) +// ARMEABIHARDFP:#define __DBL_MIN__ 2.2250738585072014e-308 +// ARMEABIHARDFP:#define __DECIMAL_DIG__ 17 +// ARMEABIHARDFP:#define __FLT_DENORM_MIN__ 1.40129846e-45F +// ARMEABIHARDFP:#define __FLT_DIG__ 6 +// ARMEABIHARDFP:#define __FLT_EPSILON__ 1.19209290e-7F +// ARMEABIHARDFP:#define __FLT_EVAL_METHOD__ 0 +// ARMEABIHARDFP:#define __FLT_HAS_DENORM__ 1 +// ARMEABIHARDFP:#define __FLT_HAS_INFINITY__ 1 +// ARMEABIHARDFP:#define __FLT_HAS_QUIET_NAN__ 1 +// ARMEABIHARDFP:#define __FLT_MANT_DIG__ 24 +// ARMEABIHARDFP:#define __FLT_MAX_10_EXP__ 38 +// ARMEABIHARDFP:#define __FLT_MAX_EXP__ 128 +// ARMEABIHARDFP:#define __FLT_MAX__ 3.40282347e+38F +// ARMEABIHARDFP:#define __FLT_MIN_10_EXP__ (-37) +// ARMEABIHARDFP:#define __FLT_MIN_EXP__ (-125) +// ARMEABIHARDFP:#define __FLT_MIN__ 1.17549435e-38F +// ARMEABIHARDFP:#define __FLT_RADIX__ 2 +// ARMEABIHARDFP:#define __INT16_TYPE__ short +// ARMEABIHARDFP:#define __INT32_TYPE__ int +// ARMEABIHARDFP:#define __INT64_C_SUFFIX__ LL +// ARMEABIHARDFP:#define __INT64_TYPE__ long long int +// ARMEABIHARDFP:#define __INT8_TYPE__ char +// ARMEABIHARDFP:#define __INTMAX_MAX__ 9223372036854775807LL +// ARMEABIHARDFP:#define __INTMAX_TYPE__ long long int +// ARMEABIHARDFP:#define __INTMAX_WIDTH__ 64 +// ARMEABIHARDFP:#define __INTPTR_TYPE__ long int +// ARMEABIHARDFP:#define __INTPTR_WIDTH__ 32 +// ARMEABIHARDFP:#define __INT_MAX__ 2147483647 +// ARMEABIHARDFP:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L +// ARMEABIHARDFP:#define __LDBL_DIG__ 15 +// ARMEABIHARDFP:#define __LDBL_EPSILON__ 2.2204460492503131e-16L +// ARMEABIHARDFP:#define __LDBL_HAS_DENORM__ 1 +// ARMEABIHARDFP:#define __LDBL_HAS_INFINITY__ 1 +// ARMEABIHARDFP:#define __LDBL_HAS_QUIET_NAN__ 1 +// ARMEABIHARDFP:#define __LDBL_MANT_DIG__ 53 +// ARMEABIHARDFP:#define __LDBL_MAX_10_EXP__ 308 +// ARMEABIHARDFP:#define __LDBL_MAX_EXP__ 1024 +// ARMEABIHARDFP:#define __LDBL_MAX__ 1.7976931348623157e+308L +// ARMEABIHARDFP:#define __LDBL_MIN_10_EXP__ (-307) +// ARMEABIHARDFP:#define __LDBL_MIN_EXP__ (-1021) +// ARMEABIHARDFP:#define __LDBL_MIN__ 2.2250738585072014e-308L +// ARMEABIHARDFP:#define __LITTLE_ENDIAN__ 1 +// ARMEABIHARDFP:#define __LONG_LONG_MAX__ 9223372036854775807LL +// ARMEABIHARDFP:#define __LONG_MAX__ 2147483647L +// ARMEABIHARDFP-NOT:#define __LP64__ +// ARMEABIHARDFP:#define __POINTER_WIDTH__ 32 +// ARMEABIHARDFP:#define __PTRDIFF_TYPE__ int +// ARMEABIHARDFP:#define __PTRDIFF_WIDTH__ 32 +// ARMEABIHARDFP:#define __REGISTER_PREFIX__ +// ARMEABIHARDFP:#define __SCHAR_MAX__ 127 +// ARMEABIHARDFP:#define __SHRT_MAX__ 32767 +// ARMEABIHARDFP:#define __SIG_ATOMIC_WIDTH__ 32 +// ARMEABIHARDFP:#define __SIZEOF_DOUBLE__ 8 +// ARMEABIHARDFP:#define __SIZEOF_FLOAT__ 4 +// ARMEABIHARDFP:#define __SIZEOF_INT__ 4 +// ARMEABIHARDFP:#define __SIZEOF_LONG_DOUBLE__ 8 +// ARMEABIHARDFP:#define __SIZEOF_LONG_LONG__ 8 +// ARMEABIHARDFP:#define __SIZEOF_LONG__ 4 +// ARMEABIHARDFP:#define __SIZEOF_POINTER__ 4 +// ARMEABIHARDFP:#define __SIZEOF_PTRDIFF_T__ 4 +// ARMEABIHARDFP:#define __SIZEOF_SHORT__ 2 +// ARMEABIHARDFP:#define __SIZEOF_SIZE_T__ 4 +// ARMEABIHARDFP:#define __SIZEOF_WCHAR_T__ 4 +// ARMEABIHARDFP:#define __SIZEOF_WINT_T__ 4 +// ARMEABIHARDFP:#define __SIZE_TYPE__ unsigned int +// ARMEABIHARDFP:#define __SIZE_WIDTH__ 32 +// ARMEABIHARDFP-NOT:#define __SOFTFP__ 1 +// ARMEABIHARDFP:#define __THUMB_INTERWORK__ 1 +// ARMEABIHARDFP:#define __UINTMAX_TYPE__ long long unsigned int +// ARMEABIHARDFP:#define __USER_LABEL_PREFIX__ +// ARMEABIHARDFP:#define __WCHAR_MAX__ 4294967295U +// ARMEABIHARDFP:#define __WCHAR_TYPE__ unsigned int +// ARMEABIHARDFP:#define __WCHAR_WIDTH__ 32 +// ARMEABIHARDFP:#define __WINT_TYPE__ unsigned int +// ARMEABIHARDFP:#define __WINT_WIDTH__ 32 +// ARMEABIHARDFP:#define __arm 1 +// ARMEABIHARDFP:#define __arm__ 1 + // // RUN: %clang_cc1 -E -dM -ffreestanding -triple=i386-none-none < /dev/null | FileCheck -check-prefix I386 %s // @@ -461,6 +670,8 @@ // MIPS32BE:#define _ABIO32 1 // MIPS32BE-NOT:#define _LP64 // MIPS32BE:#define _MIPSEB 1 +// MIPS32BE:#define _MIPS_ARCH "mips32" +// MIPS32BE:#define _MIPS_ARCH_MIPS32 1 // MIPS32BE:#define _MIPS_SIM _ABIO32 // MIPS32BE:#define _MIPS_SZINT 32 // MIPS32BE:#define _MIPS_SZLONG 32 @@ -510,19 +721,19 @@ // MIPS32BE:#define __INTPTR_TYPE__ long int // MIPS32BE:#define __INTPTR_WIDTH__ 32 // MIPS32BE:#define __INT_MAX__ 2147483647 -// MIPS32BE:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// MIPS32BE:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // MIPS32BE:#define __LDBL_DIG__ 15 -// MIPS32BE:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// MIPS32BE:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // MIPS32BE:#define __LDBL_HAS_DENORM__ 1 // MIPS32BE:#define __LDBL_HAS_INFINITY__ 1 // MIPS32BE:#define __LDBL_HAS_QUIET_NAN__ 1 // MIPS32BE:#define __LDBL_MANT_DIG__ 53 // MIPS32BE:#define __LDBL_MAX_10_EXP__ 308 // MIPS32BE:#define __LDBL_MAX_EXP__ 1024 -// MIPS32BE:#define __LDBL_MAX__ 1.7976931348623157e+308 +// MIPS32BE:#define __LDBL_MAX__ 1.7976931348623157e+308L // MIPS32BE:#define __LDBL_MIN_10_EXP__ (-307) // MIPS32BE:#define __LDBL_MIN_EXP__ (-1021) -// MIPS32BE:#define __LDBL_MIN__ 2.2250738585072014e-308 +// MIPS32BE:#define __LDBL_MIN__ 2.2250738585072014e-308L // MIPS32BE:#define __LONG_LONG_MAX__ 9223372036854775807LL // MIPS32BE:#define __LONG_MAX__ 2147483647L // MIPS32BE-NOT:#define __LP64__ @@ -575,6 +786,8 @@ // MIPS32EL:#define _ABIO32 1 // MIPS32EL-NOT:#define _LP64 // MIPS32EL:#define _MIPSEL 1 +// MIPS32EL:#define _MIPS_ARCH "mips32" +// MIPS32EL:#define _MIPS_ARCH_MIPS32 1 // MIPS32EL:#define _MIPS_SIM _ABIO32 // MIPS32EL:#define _MIPS_SZINT 32 // MIPS32EL:#define _MIPS_SZLONG 32 @@ -624,19 +837,19 @@ // MIPS32EL:#define __INTPTR_TYPE__ long int // MIPS32EL:#define __INTPTR_WIDTH__ 32 // MIPS32EL:#define __INT_MAX__ 2147483647 -// MIPS32EL:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// MIPS32EL:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // MIPS32EL:#define __LDBL_DIG__ 15 -// MIPS32EL:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// MIPS32EL:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // MIPS32EL:#define __LDBL_HAS_DENORM__ 1 // MIPS32EL:#define __LDBL_HAS_INFINITY__ 1 // MIPS32EL:#define __LDBL_HAS_QUIET_NAN__ 1 // MIPS32EL:#define __LDBL_MANT_DIG__ 53 // MIPS32EL:#define __LDBL_MAX_10_EXP__ 308 // MIPS32EL:#define __LDBL_MAX_EXP__ 1024 -// MIPS32EL:#define __LDBL_MAX__ 1.7976931348623157e+308 +// MIPS32EL:#define __LDBL_MAX__ 1.7976931348623157e+308L // MIPS32EL:#define __LDBL_MIN_10_EXP__ (-307) // MIPS32EL:#define __LDBL_MIN_EXP__ (-1021) -// MIPS32EL:#define __LDBL_MIN__ 2.2250738585072014e-308 +// MIPS32EL:#define __LDBL_MIN__ 2.2250738585072014e-308L // MIPS32EL:#define __LONG_LONG_MAX__ 9223372036854775807LL // MIPS32EL:#define __LONG_MAX__ 2147483647L // MIPS32EL-NOT:#define __LP64__ @@ -686,6 +899,8 @@ // MIPS64BE:#define _ABI64 3 // MIPS64BE:#define _LP64 1 // MIPS64BE:#define _MIPSEB 1 +// MIPS64BE:#define _MIPS_ARCH "mips64" +// MIPS64BE:#define _MIPS_ARCH_MIPS64 1 // MIPS64BE:#define _MIPS_SIM _ABI64 // MIPS64BE:#define _MIPS_SZINT 32 // MIPS64BE:#define _MIPS_SZLONG 64 @@ -785,6 +1000,8 @@ // MIPS64BE:#define __clang__ 1 // MIPS64BE:#define __llvm__ 1 // MIPS64BE:#define __mips 1 +// MIPS64BE:#define __mips64 1 +// MIPS64BE:#define __mips64__ 1 // MIPS64BE:#define __mips__ 1 // MIPS64BE:#define __mips_hard_float 1 // MIPS64BE:#define __mips_n64 1 @@ -797,6 +1014,8 @@ // MIPS64EL:#define _ABI64 3 // MIPS64EL:#define _LP64 1 // MIPS64EL:#define _MIPSEL 1 +// MIPS64EL:#define _MIPS_ARCH "mips64" +// MIPS64EL:#define _MIPS_ARCH_MIPS64 1 // MIPS64EL:#define _MIPS_SIM _ABI64 // MIPS64EL:#define _MIPS_SZINT 32 // MIPS64EL:#define _MIPS_SZLONG 64 @@ -896,6 +1115,8 @@ // MIPS64EL:#define __clang__ 1 // MIPS64EL:#define __llvm__ 1 // MIPS64EL:#define __mips 1 +// MIPS64EL:#define __mips64 1 +// MIPS64EL:#define __mips64__ 1 // MIPS64EL:#define __mips__ 1 // MIPS64EL:#define __mips_hard_float 1 // MIPS64EL:#define __mips_n64 1 @@ -993,19 +1214,19 @@ // MSP430:#define __INTPTR_TYPE__ short // MSP430:#define __INTPTR_WIDTH__ 16 // MSP430:#define __INT_MAX__ 32767 -// MSP430:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// MSP430:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // MSP430:#define __LDBL_DIG__ 15 -// MSP430:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// MSP430:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // MSP430:#define __LDBL_HAS_DENORM__ 1 // MSP430:#define __LDBL_HAS_INFINITY__ 1 // MSP430:#define __LDBL_HAS_QUIET_NAN__ 1 // MSP430:#define __LDBL_MANT_DIG__ 53 // MSP430:#define __LDBL_MAX_10_EXP__ 308 // MSP430:#define __LDBL_MAX_EXP__ 1024 -// MSP430:#define __LDBL_MAX__ 1.7976931348623157e+308 +// MSP430:#define __LDBL_MAX__ 1.7976931348623157e+308L // MSP430:#define __LDBL_MIN_10_EXP__ (-307) // MSP430:#define __LDBL_MIN_EXP__ (-1021) -// MSP430:#define __LDBL_MIN__ 2.2250738585072014e-308 +// MSP430:#define __LDBL_MIN__ 2.2250738585072014e-308L // MSP430:#define __LONG_LONG_MAX__ 9223372036854775807LL // MSP430:#define __LONG_MAX__ 2147483647L // MSP430-NOT:#define __LP64__ @@ -1088,19 +1309,19 @@ // NVPTX32:#define __INTPTR_TYPE__ unsigned int // NVPTX32:#define __INTPTR_WIDTH__ 32 // NVPTX32:#define __INT_MAX__ 2147483647 -// NVPTX32:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// NVPTX32:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // NVPTX32:#define __LDBL_DIG__ 15 -// NVPTX32:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// NVPTX32:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // NVPTX32:#define __LDBL_HAS_DENORM__ 1 // NVPTX32:#define __LDBL_HAS_INFINITY__ 1 // NVPTX32:#define __LDBL_HAS_QUIET_NAN__ 1 // NVPTX32:#define __LDBL_MANT_DIG__ 53 // NVPTX32:#define __LDBL_MAX_10_EXP__ 308 // NVPTX32:#define __LDBL_MAX_EXP__ 1024 -// NVPTX32:#define __LDBL_MAX__ 1.7976931348623157e+308 +// NVPTX32:#define __LDBL_MAX__ 1.7976931348623157e+308L // NVPTX32:#define __LDBL_MIN_10_EXP__ (-307) // NVPTX32:#define __LDBL_MIN_EXP__ (-1021) -// NVPTX32:#define __LDBL_MIN__ 2.2250738585072014e-308 +// NVPTX32:#define __LDBL_MIN__ 2.2250738585072014e-308L // NVPTX32:#define __LONG_LONG_MAX__ 9223372036854775807LL // NVPTX32:#define __LONG_MAX__ 9223372036854775807L // NVPTX32-NOT:#define __LP64__ @@ -1184,19 +1405,19 @@ // NVPTX64:#define __INTPTR_TYPE__ long long unsigned int // NVPTX64:#define __INTPTR_WIDTH__ 64 // NVPTX64:#define __INT_MAX__ 2147483647 -// NVPTX64:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// NVPTX64:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // NVPTX64:#define __LDBL_DIG__ 15 -// NVPTX64:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// NVPTX64:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // NVPTX64:#define __LDBL_HAS_DENORM__ 1 // NVPTX64:#define __LDBL_HAS_INFINITY__ 1 // NVPTX64:#define __LDBL_HAS_QUIET_NAN__ 1 // NVPTX64:#define __LDBL_MANT_DIG__ 53 // NVPTX64:#define __LDBL_MAX_10_EXP__ 308 // NVPTX64:#define __LDBL_MAX_EXP__ 1024 -// NVPTX64:#define __LDBL_MAX__ 1.7976931348623157e+308 +// NVPTX64:#define __LDBL_MAX__ 1.7976931348623157e+308L // NVPTX64:#define __LDBL_MIN_10_EXP__ (-307) // NVPTX64:#define __LDBL_MIN_EXP__ (-1021) -// NVPTX64:#define __LDBL_MIN__ 2.2250738585072014e-308 +// NVPTX64:#define __LDBL_MIN__ 2.2250738585072014e-308L // NVPTX64:#define __LONG_LONG_MAX__ 9223372036854775807LL // NVPTX64:#define __LONG_MAX__ 9223372036854775807L // NVPTX64:#define __LP64__ 1 @@ -1796,19 +2017,19 @@ // SPARC:#define __INTPTR_TYPE__ long int // SPARC:#define __INTPTR_WIDTH__ 32 // SPARC:#define __INT_MAX__ 2147483647 -// SPARC:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324 +// SPARC:#define __LDBL_DENORM_MIN__ 4.9406564584124654e-324L // SPARC:#define __LDBL_DIG__ 15 -// SPARC:#define __LDBL_EPSILON__ 2.2204460492503131e-16 +// SPARC:#define __LDBL_EPSILON__ 2.2204460492503131e-16L // SPARC:#define __LDBL_HAS_DENORM__ 1 // SPARC:#define __LDBL_HAS_INFINITY__ 1 // SPARC:#define __LDBL_HAS_QUIET_NAN__ 1 // SPARC:#define __LDBL_MANT_DIG__ 53 // SPARC:#define __LDBL_MAX_10_EXP__ 308 // SPARC:#define __LDBL_MAX_EXP__ 1024 -// SPARC:#define __LDBL_MAX__ 1.7976931348623157e+308 +// SPARC:#define __LDBL_MAX__ 1.7976931348623157e+308L // SPARC:#define __LDBL_MIN_10_EXP__ (-307) // SPARC:#define __LDBL_MIN_EXP__ (-1021) -// SPARC:#define __LDBL_MIN__ 2.2250738585072014e-308 +// SPARC:#define __LDBL_MIN__ 2.2250738585072014e-308L // SPARC:#define __LONG_LONG_MAX__ 9223372036854775807LL // SPARC:#define __LONG_MAX__ 2147483647L // SPARC-NOT:#define __LP64__ @@ -1853,19 +2074,19 @@ // TCE:#define __CHAR16_TYPE__ unsigned short // TCE:#define __CHAR32_TYPE__ unsigned int // TCE:#define __CHAR_BIT__ 8 -// TCE:#define __DBL_DENORM_MIN__ 1.40129846e-45F +// TCE:#define __DBL_DENORM_MIN__ 1.40129846e-45 // TCE:#define __DBL_DIG__ 6 -// TCE:#define __DBL_EPSILON__ 1.19209290e-7F +// TCE:#define __DBL_EPSILON__ 1.19209290e-7 // TCE:#define __DBL_HAS_DENORM__ 1 // TCE:#define __DBL_HAS_INFINITY__ 1 // TCE:#define __DBL_HAS_QUIET_NAN__ 1 // TCE:#define __DBL_MANT_DIG__ 24 // TCE:#define __DBL_MAX_10_EXP__ 38 // TCE:#define __DBL_MAX_EXP__ 128 -// TCE:#define __DBL_MAX__ 3.40282347e+38F +// TCE:#define __DBL_MAX__ 3.40282347e+38 // TCE:#define __DBL_MIN_10_EXP__ (-37) // TCE:#define __DBL_MIN_EXP__ (-125) -// TCE:#define __DBL_MIN__ 1.17549435e-38F +// TCE:#define __DBL_MIN__ 1.17549435e-38 // TCE:#define __DECIMAL_DIG__ -1 // TCE:#define __FLT_DENORM_MIN__ 1.40129846e-45F // TCE:#define __FLT_DIG__ 6 @@ -1891,19 +2112,19 @@ // TCE:#define __INTPTR_TYPE__ int // TCE:#define __INTPTR_WIDTH__ 32 // TCE:#define __INT_MAX__ 2147483647 -// TCE:#define __LDBL_DENORM_MIN__ 1.40129846e-45F +// TCE:#define __LDBL_DENORM_MIN__ 1.40129846e-45L // TCE:#define __LDBL_DIG__ 6 -// TCE:#define __LDBL_EPSILON__ 1.19209290e-7F +// TCE:#define __LDBL_EPSILON__ 1.19209290e-7L // TCE:#define __LDBL_HAS_DENORM__ 1 // TCE:#define __LDBL_HAS_INFINITY__ 1 // TCE:#define __LDBL_HAS_QUIET_NAN__ 1 // TCE:#define __LDBL_MANT_DIG__ 24 // TCE:#define __LDBL_MAX_10_EXP__ 38 // TCE:#define __LDBL_MAX_EXP__ 128 -// TCE:#define __LDBL_MAX__ 3.40282347e+38F +// TCE:#define __LDBL_MAX__ 3.40282347e+38L // TCE:#define __LDBL_MIN_10_EXP__ (-37) // TCE:#define __LDBL_MIN_EXP__ (-125) -// TCE:#define __LDBL_MIN__ 1.17549435e-38F +// TCE:#define __LDBL_MIN__ 1.17549435e-38L // TCE:#define __LONG_LONG_MAX__ 2147483647LL // TCE:#define __LONG_MAX__ 2147483647L // TCE-NOT:#define __LP64__ diff --git a/test/Preprocessor/macro_arg_directive.c b/test/Preprocessor/macro_arg_directive.c index 5c9943d..5bc2236 100644 --- a/test/Preprocessor/macro_arg_directive.c +++ b/test/Preprocessor/macro_arg_directive.c @@ -1,5 +1,12 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +#define a(x) enum { x } +a(n = +#undef a +#define a 5 + a); +_Static_assert(n == 5, ""); + // header1.h void fail(const char *); #define MUNCH(...) \ diff --git a/test/Preprocessor/macro_fn_comma_swallow2.c b/test/Preprocessor/macro_fn_comma_swallow2.c new file mode 100644 index 0000000..93ab2b8 --- /dev/null +++ b/test/Preprocessor/macro_fn_comma_swallow2.c @@ -0,0 +1,64 @@ +// Test the __VA_ARGS__ comma swallowing extensions of various compiler dialects. + +// RUN: %clang_cc1 -E %s | FileCheck -check-prefix=GCC -strict-whitespace %s +// RUN: %clang_cc1 -E -std=c99 %s | FileCheck -check-prefix=C99 -strict-whitespace %s +// RUN: %clang_cc1 -E -std=c11 %s | FileCheck -check-prefix=C99 -strict-whitespace %s +// RUN: %clang_cc1 -E -x c++ %s | FileCheck -check-prefix=GCC -strict-whitespace %s +// RUN: %clang_cc1 -E -std=gnu99 %s | FileCheck -check-prefix=GCC -strict-whitespace %s +// RUN: %clang_cc1 -E -fms-compatibility %s | FileCheck -check-prefix=MS -strict-whitespace %s +// RUN: %clang_cc1 -E -DNAMED %s | FileCheck -check-prefix=GCC -strict-whitespace %s +// RUN: %clang_cc1 -E -std=c99 -DNAMED %s | FileCheck -check-prefix=C99 -strict-whitespace %s + + +#ifndef NAMED +# define A(...) [ __VA_ARGS__ ] +# define B(...) [ , __VA_ARGS__ ] +# define C(...) [ , ## __VA_ARGS__ ] +# define D(A,...) [ A , ## __VA_ARGS__ ] +# define E(A,...) [ __VA_ARGS__ ## A ] +#else +// These are the GCC named argument versions of the C99-style variadic macros. +// Note that __VA_ARGS__ *may* be used as the name, this is not prohibited! +# define A(__VA_ARGS__...) [ __VA_ARGS__ ] +# define B(__VA_ARGS__...) [ , __VA_ARGS__ ] +# define C(__VA_ARGS__...) [ , ## __VA_ARGS__ ] +# define D(A,__VA_ARGS__...) [ A , ## __VA_ARGS__ ] +# define E(A,__VA_ARGS__...) [ __VA_ARGS__ ## A ] +#endif + + +1: A() B() C() D() E() +2: A(a) B(a) C(a) D(a) E(a) +3: A(,) B(,) C(,) D(,) E(,) +4: A(a,b,c) B(a,b,c) C(a,b,c) D(a,b,c) E(a,b,c) +5: A(a,b,) B(a,b,) C(a,b,) D(a,b,) + +// The GCC ", ## __VA_ARGS__" extension swallows the comma when followed by +// empty __VA_ARGS__. This extension does not apply in -std=c99 mode, but +// does apply in C++. +// +// GCC: 1: [ ] [ , ] [ ] [ ] [ ] +// GCC: 2: [ a ] [ , a ] [ ,a ] [ a ] [ a ] +// GCC: 3: [ , ] [ , , ] [ ,, ] [ , ] [ ] +// GCC: 4: [ a,b,c ] [ , a,b,c ] [ ,a,b,c ] [ a ,b,c ] [ b,ca ] +// GCC: 5: [ a,b, ] [ , a,b, ] [ ,a,b, ] [ a ,b, ] + +// Under C99 standard mode, the GCC ", ## __VA_ARGS__" extension *does not* +// swallow the comma when followed by empty __VA_ARGS__. +// +// C99: 1: [ ] [ , ] [ , ] [ ] [ ] +// C99: 2: [ a ] [ , a ] [ ,a ] [ a ] [ a ] +// C99: 3: [ , ] [ , , ] [ ,, ] [ , ] [ ] +// C99: 4: [ a,b,c ] [ , a,b,c ] [ ,a,b,c ] [ a ,b,c ] [ b,ca ] +// C99: 5: [ a,b, ] [ , a,b, ] [ ,a,b, ] [ a ,b, ] + +// Microsoft's extension is on ", __VA_ARGS__" (without explicit ##) where +// the comma is swallowed when followed by empty __VA_ARGS__. +// +// MS: 1: [ ] [ ] [ ] [ ] [ ] +// MS: 2: [ a ] [ , a ] [ ,a ] [ a ] [ a ] +// MS: 3: [ , ] [ , , ] [ ,, ] [ , ] [ ] +// MS: 4: [ a,b,c ] [ , a,b,c ] [ ,a,b,c ] [ a ,b,c ] [ b,ca ] +// MS: 5: [ a,b, ] [ , a,b, ] [ ,a,b, ] [ a ,b, ] + +// FIXME: Item 3(d) in MS output should be [ ] not [ , ] diff --git a/test/Preprocessor/macro_paste_identifier_error.c b/test/Preprocessor/macro_paste_identifier_error.c index 457e6f7..bba3172 100644 --- a/test/Preprocessor/macro_paste_identifier_error.c +++ b/test/Preprocessor/macro_paste_identifier_error.c @@ -1,6 +1,7 @@ // RUN: %clang_cc1 -fms-extensions -Wno-invalid-token-paste %s -verify // RUN: %clang_cc1 -E -fms-extensions -Wno-invalid-token-paste %s | FileCheck %s // RUN: %clang_cc1 -E -fms-extensions -Wno-invalid-token-paste -x assembler-with-cpp %s | FileCheck %s +// expected-no-diagnostics #define foo a ## b ## = 0 int foo; diff --git a/test/Preprocessor/microsoft-ext.c b/test/Preprocessor/microsoft-ext.c new file mode 100644 index 0000000..ec10374 --- /dev/null +++ b/test/Preprocessor/microsoft-ext.c @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -E -fms-compatibility %s -o %t +// RUN: FileCheck %s < %t + +# define M2(x, y) x + y +# define P(x, y) {x, y} +# define M(x, y) M2(x, P(x, y)) +M(a, b) // CHECK: a + {a, b} + +// Regression test for PR13924 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +#define GMOCK_INTERNAL_COUNT_AND_2_VALUE_PARAMS(p0, p1) P2 + +#define GMOCK_ACTION_CLASS_(name, value_params)\ + GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) + +#define ACTION_TEMPLATE(name, template_params, value_params)\ +class GMOCK_ACTION_CLASS_(name, value_params) {\ +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_2_VALUE_PARAMS(p0, p1)); diff --git a/test/Preprocessor/objc-pp.m b/test/Preprocessor/objc-pp.m index 0ec288c..3522f73 100644 --- a/test/Preprocessor/objc-pp.m +++ b/test/Preprocessor/objc-pp.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -pedantic -ffreestanding +// expected-no-diagnostics #import // no warning on #import in objc mode. diff --git a/test/Preprocessor/optimize.c b/test/Preprocessor/optimize.c index 97f841a..0167e70 100644 --- a/test/Preprocessor/optimize.c +++ b/test/Preprocessor/optimize.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -Eonly %s -DOPT_O2 -O2 -verify #ifdef OPT_O2 + // expected-no-diagnostics #ifndef __OPTIMIZE__ #error "__OPTIMIZE__ not defined" #endif @@ -10,6 +11,7 @@ // RUN: %clang_cc1 -Eonly %s -DOPT_O0 -O0 -verify #ifdef OPT_O0 + // expected-no-diagnostics #ifdef __OPTIMIZE__ #error "__OPTIMIZE__ defined" #endif @@ -20,6 +22,7 @@ // RUN: %clang_cc1 -Eonly %s -DOPT_OS -Os -verify #ifdef OPT_OS + // expected-no-diagnostics #ifndef __OPTIMIZE__ #error "__OPTIMIZE__ not defined" #endif diff --git a/test/Preprocessor/pr13851.c b/test/Preprocessor/pr13851.c new file mode 100644 index 0000000..537594d --- /dev/null +++ b/test/Preprocessor/pr13851.c @@ -0,0 +1,11 @@ +// Check that -E -M -MF does not cause an "argument unused" error, by adding +// -Werror to the clang invocation. Also check the dependency output, if any. +// RUN: %clang -Werror -E -M -MF %t-M.d %s +// RUN: FileCheck --input-file=%t-M.d %s +// CHECK: pr13851.o: +// CHECK: pr13851.c + +// Check that -E -MM -MF does not cause an "argument unused" error, by adding +// -Werror to the clang invocation. Also check the dependency output, if any. +// RUN: %clang -Werror -E -MM -MF %t-MM.d %s +// RUN: FileCheck --input-file=%t-MM.d %s diff --git a/test/Preprocessor/pragma-pushpop-macro.c b/test/Preprocessor/pragma-pushpop-macro.c index 08a6570..0aee074 100644 --- a/test/Preprocessor/pragma-pushpop-macro.c +++ b/test/Preprocessor/pragma-pushpop-macro.c @@ -31,6 +31,22 @@ int pmy1 = Y; #define Y 4 int pmy2 = Y; +// The sequence push, define/undef, pop caused problems if macro was not +// previously defined. +#pragma push_macro("PREVIOUSLY_UNDEFINED1") +#undef PREVIOUSLY_UNDEFINED1 +#pragma pop_macro("PREVIOUSLY_UNDEFINED1") +#ifndef PREVIOUSLY_UNDEFINED1 +int Q; +#endif + +#pragma push_macro("PREVIOUSLY_UNDEFINED2") +#define PREVIOUSLY_UNDEFINED2 +#pragma pop_macro("PREVIOUSLY_UNDEFINED2") +#ifndef PREVIOUSLY_UNDEFINED2 +int P; +#endif + // CHECK: int pmx0 = 1 // CHECK: int pmy0 = 2 // CHECK: int pmx1 = 1 @@ -38,4 +54,5 @@ int pmy2 = Y; // CHECK: int pmx3 = 1 // CHECK: int pmy1 = 3 // CHECK: int pmy2 = 4 - +// CHECK: int Q; +// CHECK: int P; diff --git a/test/Preprocessor/pragma_sysheader.c b/test/Preprocessor/pragma_sysheader.c index 17080fe..075c980 100644 --- a/test/Preprocessor/pragma_sysheader.c +++ b/test/Preprocessor/pragma_sysheader.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -verify -pedantic %s -fsyntax-only // RUN: %clang_cc1 -E %s | FileCheck %s +// expected-no-diagnostics // rdar://6899937 #include "pragma_sysheader.h" @@ -9,4 +10,4 @@ // CHECK-NEXT: # 1 "{{.*}}pragma_sysheader.h" 3 // CHECK-NEXT: typedef int x; // CHECK-NEXT: typedef int x; -// CHECK-NEXT: # 5 "{{.*}}pragma_sysheader.c" 2 +// CHECK-NEXT: # 6 "{{.*}}pragma_sysheader.c" 2 diff --git a/test/Preprocessor/predefined-arch-macros.c b/test/Preprocessor/predefined-arch-macros.c index 2361abe..719f945 100644 --- a/test/Preprocessor/predefined-arch-macros.c +++ b/test/Preprocessor/predefined-arch-macros.c @@ -516,6 +516,7 @@ // CHECK_CORE_AVX2_M32: #define __PCLMUL__ 1 // CHECK_CORE_AVX2_M32: #define __POPCNT__ 1 // CHECK_CORE_AVX2_M32: #define __RDRND__ 1 +// CHECK_CORE_AVX2_M32: #define __RTM__ 1 // CHECK_CORE_AVX2_M32: #define __SSE2__ 1 // CHECK_CORE_AVX2_M32: #define __SSE3__ 1 // CHECK_CORE_AVX2_M32: #define __SSE4_1__ 1 @@ -541,6 +542,7 @@ // CHECK_CORE_AVX2_M64: #define __PCLMUL__ 1 // CHECK_CORE_AVX2_M64: #define __POPCNT__ 1 // CHECK_CORE_AVX2_M64: #define __RDRND__ 1 +// CHECK_CORE_AVX2_M64: #define __RTM__ 1 // CHECK_CORE_AVX2_M64: #define __SSE2_MATH__ 1 // CHECK_CORE_AVX2_M64: #define __SSE2__ 1 // CHECK_CORE_AVX2_M64: #define __SSE3__ 1 diff --git a/test/Preprocessor/user_defined_system_framework.c b/test/Preprocessor/user_defined_system_framework.c index 8e3db56..2ab2a29 100644 --- a/test/Preprocessor/user_defined_system_framework.c +++ b/test/Preprocessor/user_defined_system_framework.c @@ -1,4 +1,5 @@ -// RUN: %clang -cc1 -fsyntax-only -F %S/Inputs -Wsign-conversion -verify %s +// RUN: %clang_cc1 -fsyntax-only -F %S/Inputs -Wsign-conversion -verify %s +// expected-no-diagnostics // Check that TestFramework is treated as a system header. #include diff --git a/test/Rewriter/no-integrated-preprocessing-64bit.m b/test/Rewriter/no-integrated-preprocessing-64bit.m new file mode 100644 index 0000000..6318449 --- /dev/null +++ b/test/Rewriter/no-integrated-preprocessing-64bit.m @@ -0,0 +1,26 @@ +// RUN: %clang -target x86_64-unkown-unknown -fms-extensions -rewrite-objc %s -o - | FileCheck %s +// rdar://12189793 + +#ifdef __cplusplus + +void *sel_registerName(const char *); + +@interface Root @end + +@interface MYINTF : Root +@end + +#endif + +@implementation MYINTF +- (id) MYMETH { return [self MYMETH]; } +@end + +int main() { +} + +// CHECK: static struct _class_ro_t _OBJC_CLASS_RO_$_MYINTF +// CHECK-NEXT: 0, 0, 0, +// CHECK-NEXT: (unsigned int)0, +// CHECK-NEXT: 0, +// CHECK-NEXT: "MYINTF", diff --git a/test/Rewriter/no-integrated-preprocessing.m b/test/Rewriter/no-integrated-preprocessing.m new file mode 100644 index 0000000..e4cc301 --- /dev/null +++ b/test/Rewriter/no-integrated-preprocessing.m @@ -0,0 +1,26 @@ +// RUN: %clang -arch i386 -fms-extensions -rewrite-objc %s -o %t-rw.cpp +// RUN: FileCheck %s < %t-rw.cpp +// rdar://12189793 + +#ifdef __cplusplus + +void *sel_registerName(const char *); + +@interface Root @end + +@interface MYINTF : Root +@end + +#endif + +@implementation MYINTF +- (id) MYMETH { return [self MYMETH]; } +@end + +int main() { +} + +// CHECK: static struct _class_ro_t _OBJC_CLASS_RO_$_MYINTF +// CHECK-NEXT: 0, 0, 0, +// CHECK-NEXT: 0, +// CHECK-NEST: "MYINTF", diff --git a/test/Rewriter/objc-modern-StretAPI-2.mm b/test/Rewriter/objc-modern-StretAPI-2.mm new file mode 100644 index 0000000..961fc16 --- /dev/null +++ b/test/Rewriter/objc-modern-StretAPI-2.mm @@ -0,0 +1,30 @@ +// RUN: %clang_cc1 -x objective-c++ -fblocks -fms-extensions -rewrite-objc %s -o %t-rw.cpp +// RUN: %clang_cc1 -fsyntax-only -fblocks -Wno-address-of-temporary -D"Class=void*" -D"id=void*" -D"SEL=void*" -D"__declspec(X)=" %t-rw.cpp +// rdar://12142241 + +extern "C" void *sel_registerName(const char *); +typedef unsigned long size_t; + +typedef unsigned long NSUInteger; +typedef struct _NSRange { + NSUInteger location; + NSUInteger length; +} NSRange; + + +@interface NSIndexSet +- (NSRange)rangeAtIndex:(NSUInteger)rangeIndex; +@end + +@interface NSArray +@end + +@implementation NSArray +- (NSArray *)objectsAtIndexes:(NSIndexSet *)iset { + + NSUInteger ridx = 0; + NSRange range = [iset rangeAtIndex:ridx]; + return 0; +} +@end + diff --git a/test/Rewriter/objc-modern-boxing.mm b/test/Rewriter/objc-modern-boxing.mm index 8f8ed75..4997c24 100644 --- a/test/Rewriter/objc-modern-boxing.mm +++ b/test/Rewriter/objc-modern-boxing.mm @@ -66,7 +66,7 @@ int main(int argc, const char *argv[]) { // CHECK: NSNumber *fortyTwoUnsigned = ((NSNumber *(*)(id, SEL, unsigned int))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithUnsignedInt:"), (42U)); // CHECK: NSNumber *fortyTwoLong = ((NSNumber *(*)(id, SEL, long))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithLong:"), (42L)); // CHECK: NSNumber *fortyTwoLongLong = ((NSNumber *(*)(id, SEL, long long))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithLongLong:"), (42LL)); -// CHECK: NSNumber *piFloat = ((NSNumber *(*)(id, SEL, float))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithFloat:"), (3.1415927)); +// CHECK: NSNumber *piFloat = ((NSNumber *(*)(id, SEL, float))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithFloat:"), (3.1415927F)); // CHECK: NSNumber *piDouble = ((NSNumber *(*)(id, SEL, double))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithDouble:"), (3.1415926535)); // CHECK: NSNumber *nsb = ((NSNumber *(*)(id, SEL, BOOL))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithBool:"), (BOOL)(b)); // CHECK: NSString *duplicateString = ((NSString *(*)(id, SEL, const char *))(void *)objc_msgSend)(objc_getClass("NSString"), sel_registerName("stringWithUTF8String:"), (const char *)(strdup("Hello"))); diff --git a/test/Rewriter/objc-modern-numeric-literal.mm b/test/Rewriter/objc-modern-numeric-literal.mm index 5f0b1fc..5f63d8c 100644 --- a/test/Rewriter/objc-modern-numeric-literal.mm +++ b/test/Rewriter/objc-modern-numeric-literal.mm @@ -61,7 +61,7 @@ int main(int argc, const char *argv[]) { // CHECK: NSNumber *fortyTwoUnsigned = ((NSNumber *(*)(id, SEL, unsigned int))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithUnsignedInt:"), 42U); // CHECK: NSNumber *fortyTwoLong = ((NSNumber *(*)(id, SEL, long))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithLong:"), 42L); // CHECK: NSNumber *fortyTwoLongLong = ((NSNumber *(*)(id, SEL, long long))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithLongLong:"), 42LL); -// CHECK: NSNumber *piFloat = ((NSNumber *(*)(id, SEL, float))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithFloat:"), 3.1415927); +// CHECK: NSNumber *piFloat = ((NSNumber *(*)(id, SEL, float))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithFloat:"), 3.1415927F); // CHECK: NSNumber *piDouble = ((NSNumber *(*)(id, SEL, double))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithDouble:"), 3.1415926535); // CHECK: NSNumber *yesNumber = ((NSNumber *(*)(id, SEL, BOOL))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithBool:"), true); // CHECK: NSNumber *noNumber = ((NSNumber *(*)(id, SEL, BOOL))(void *)objc_msgSend)(objc_getClass("NSNumber"), sel_registerName("numberWithBool:"), false); diff --git a/test/Sema/MicrosoftCompatibility-x64.c b/test/Sema/MicrosoftCompatibility-x64.c new file mode 100644 index 0000000..bf595af --- /dev/null +++ b/test/Sema/MicrosoftCompatibility-x64.c @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 %s -fsyntax-only -Wno-unused-value -Wmicrosoft -verify -fms-compatibility -triple x86_64-pc-win32 +int __stdcall f(void); /* expected-warning {{calling convention '__stdcall' ignored for this target}} */ + +/* This should compile without warning because __stdcall is treated +as __cdecl in MS compatibility mode for x64 compiles*/ +int __cdecl f(void) { + return 0; +} diff --git a/test/Sema/MicrosoftCompatibility-x86.c b/test/Sema/MicrosoftCompatibility-x86.c new file mode 100644 index 0000000..1e3762b --- /dev/null +++ b/test/Sema/MicrosoftCompatibility-x86.c @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 %s -fsyntax-only -Wno-unused-value -Wmicrosoft -verify -fms-compatibility -triple i386-pc-win32 +int __stdcall f(void); /* expected-note {{previous declaration is here}} */ + +int __cdecl f(void) { /* expected-error {{function declared 'cdecl' here was previously declared 'stdcall'}} */ + return 0; +} diff --git a/test/Sema/MicrosoftCompatibility.c b/test/Sema/MicrosoftCompatibility.c index 6b137a6..6330c15 100644 --- a/test/Sema/MicrosoftCompatibility.c +++ b/test/Sema/MicrosoftCompatibility.c @@ -18,4 +18,4 @@ __declspec(noreturn) void f6( void ) { __declspec(align(32768)) struct S1 { int a; } s; /* expected-error {{requested alignment must be 8192 bytes or smaller}} */ struct __declspec(aligned) S2 {}; /* expected-warning {{unknown __declspec attribute 'aligned' ignored}} */ -struct __declspec(appdomain) S3 {}; /* expected-warning {{__declspec attribute 'appdomain' is not supported}} */ \ No newline at end of file +struct __declspec(appdomain) S3 {}; /* expected-warning {{__declspec attribute 'appdomain' is not supported}} */ diff --git a/test/Sema/PR2727.c b/test/Sema/PR2727.c index 332b0df..11282fd 100644 --- a/test/Sema/PR2727.c +++ b/test/Sema/PR2727.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -verify -fsyntax-only -std=c90 %s // RUN: %clang_cc1 -verify -fsyntax-only -std=c99 %s +// expected-no-diagnostics int f (int x) { diff --git a/test/Sema/PR2728.c b/test/Sema/PR2728.c index e9f1dea..0c99405 100644 --- a/test/Sema/PR2728.c +++ b/test/Sema/PR2728.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -verify -fsyntax-only -std=c90 %s // RUN: %clang_cc1 -verify -fsyntax-only -std=c99 %s +// expected-no-diagnostics struct s { diff --git a/test/Sema/PR2923.c b/test/Sema/PR2923.c index f22e70d..5741de8 100644 --- a/test/Sema/PR2923.c +++ b/test/Sema/PR2923.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // Test for absence of crash reported in PR 2923: // diff --git a/test/Sema/address-constant.c b/test/Sema/address-constant.c index e842a73..c13485b 100644 --- a/test/Sema/address-constant.c +++ b/test/Sema/address-constant.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int i; int a[] = {0}; diff --git a/test/Sema/align-arm-apcs.c b/test/Sema/align-arm-apcs.c index 0a5d3fe..544f539 100644 --- a/test/Sema/align-arm-apcs.c +++ b/test/Sema/align-arm-apcs.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple arm-unknown-unknown -target-abi apcs-gnu -fsyntax-only -verify %s +// expected-no-diagnostics struct s0 { double f0; int f1; }; char chk0[__alignof__(struct s0) == 4 ? 1 : -1]; diff --git a/test/Sema/align-x86-64.c b/test/Sema/align-x86-64.c index edea5d8..09bf633 100644 --- a/test/Sema/align-x86-64.c +++ b/test/Sema/align-x86-64.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -fsyntax-only -verify %s +// expected-no-diagnostics // PR5599 diff --git a/test/Sema/align-x86.c b/test/Sema/align-x86.c index c6cd754..6b93a48 100644 --- a/test/Sema/align-x86.c +++ b/test/Sema/align-x86.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics // PR3433 double g1; diff --git a/test/Sema/arg-scope-c99.c b/test/Sema/arg-scope-c99.c index 912776a..9b2811c 100644 --- a/test/Sema/arg-scope-c99.c +++ b/test/Sema/arg-scope-c99.c @@ -1,2 +1,3 @@ // RUN: %clang_cc1 -fsyntax-only -std=c99 -verify %s +// expected-no-diagnostics void bb(int sz, int ar[sz][sz]) { } diff --git a/test/Sema/arg-scope.c b/test/Sema/arg-scope.c index ed92619..3de672b 100644 --- a/test/Sema/arg-scope.c +++ b/test/Sema/arg-scope.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics void aa(int b, int x[sizeof b]) {} void foo(int i, int A[i]) {} diff --git a/test/Sema/arm-layout.c b/test/Sema/arm-layout.c index d017fdb..4b76515 100644 --- a/test/Sema/arm-layout.c +++ b/test/Sema/arm-layout.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple armv7-unknown-unknown -target-abi apcs-gnu %s -verify // RUN: %clang_cc1 -triple armv7-unknown-unknown -target-abi aapcs %s -verify +// expected-no-diagnostics #define check(name, cond) int _##name##_check[(cond) ? 1 : -1] diff --git a/test/Sema/array-init.c b/test/Sema/array-init.c index cfdf8e2..b3cae60 100644 --- a/test/Sema/array-init.c +++ b/test/Sema/array-init.c @@ -1,4 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -pedantic -verify %s +// RUN: %clang_cc1 -fsyntax-only -Wgnu -Wc11-extensions -verify %s +// REQUIRES: LP64 extern int foof() = 1; // expected-error{{illegal initializer (only variables can be initialized)}} diff --git a/test/Sema/asm.c b/test/Sema/asm.c index 44d83e9..155d736 100644 --- a/test/Sema/asm.c +++ b/test/Sema/asm.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -triple i386-pc-linux-gnu -verify -fsyntax-only +// RUN: %clang_cc1 %s -Wno-private-extern -triple i386-pc-linux-gnu -verify -fsyntax-only void f() { int i; diff --git a/test/Sema/assign-null.c b/test/Sema/assign-null.c index 7f172b1..ac90850 100644 --- a/test/Sema/assign-null.c +++ b/test/Sema/assign-null.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #include diff --git a/test/Sema/atomic-ops.c b/test/Sema/atomic-ops.c index f769271..2a93591 100644 --- a/test/Sema/atomic-ops.c +++ b/test/Sema/atomic-ops.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=i686-linux-gnu +// RUN: %clang_cc1 %s -verify -fsyntax-only -triple=i686-linux-gnu -std=c11 // Basic parsing/Sema tests for __c11_atomic_* @@ -162,4 +162,9 @@ void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d, __atomic_clear(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}} __atomic_clear(&flag, memory_order_seq_cst); (int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}} + + const _Atomic(int) const_atomic; + __c11_atomic_init(&const_atomic, 0); // expected-error {{first argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}} + __c11_atomic_store(&const_atomic, 0, memory_order_release); // expected-error {{first argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}} + __c11_atomic_load(&const_atomic, memory_order_acquire); // expected-error {{first argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}} } diff --git a/test/Sema/attr-availability-macosx.c b/test/Sema/attr-availability-macosx.c index 781523a..468e930 100644 --- a/test/Sema/attr-availability-macosx.c +++ b/test/Sema/attr-availability-macosx.c @@ -10,10 +10,10 @@ void f5(int) __attribute__((availability(ios,introduced=3.2), availability(macos void test() { f0(0); f1(0); - f2(0); // expected-warning{{'f2' is deprecated: first deprecated in Mac OS X 10.5}} + f2(0); // expected-warning{{'f2' is deprecated: first deprecated in OS X 10.5}} f3(0); - f4(0); // expected-error{{f4' is unavailable: obsoleted in Mac OS X 10.5}} - f5(0); // expected-error{{'f5' is unavailable: not available on Mac OS X}} + f4(0); // expected-error{{f4' is unavailable: obsoleted in OS X 10.5}} + f5(0); // expected-error{{'f5' is unavailable: not available on OS X}} } // rdar://10535640 diff --git a/test/Sema/attr-availability.c b/test/Sema/attr-availability.c index b4a6f96..e0c541e 100644 --- a/test/Sema/attr-availability.c +++ b/test/Sema/attr-availability.c @@ -1,6 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin9 -fsyntax-only -verify %s -void f0() __attribute__((availability(macosx,introduced=10.4,deprecated=10.2))); // expected-warning{{feature cannot be deprecated in Mac OS X version 10.2 before it was introduced in version 10.4; attribute ignored}} +void f0() __attribute__((availability(macosx,introduced=10.4,deprecated=10.2))); // expected-warning{{feature cannot be deprecated in OS X version 10.2 before it was introduced in version 10.4; attribute ignored}} void f1() __attribute__((availability(ios,obsoleted=2.1,deprecated=3.0))); // expected-warning{{feature cannot be obsoleted in iOS version 2.1 before it was deprecated in version 3.0; attribute ignored}} void f2() __attribute__((availability(ios,introduced=2.1,deprecated=2.1))); @@ -14,8 +14,8 @@ extern void ATSFontGetPostScriptName(int flags) __attribute__((availability(macosx,introduced=8.0,obsoleted=9.0, message="use ATSFontGetFullPostScriptName"))); // expected-note {{function has been explicitly marked unavailable here}} void test_10095131() { - ATSFontGetName("Hello"); // expected-warning {{'ATSFontGetName' is deprecated: first deprecated in Mac OS X 9.0 - use CTFontCopyFullName}} - ATSFontGetPostScriptName(100); // expected-error {{'ATSFontGetPostScriptName' is unavailable: obsoleted in Mac OS X 9.0 - use ATSFontGetFullPostScriptName}} + ATSFontGetName("Hello"); // expected-warning {{'ATSFontGetName' is deprecated: first deprecated in OS X 9.0 - use CTFontCopyFullName}} + ATSFontGetPostScriptName(100); // expected-error {{'ATSFontGetPostScriptName' is unavailable: obsoleted in OS X 9.0 - use ATSFontGetFullPostScriptName}} } // rdar://10711037 diff --git a/test/Sema/attr-minsize.c b/test/Sema/attr-minsize.c new file mode 100644 index 0000000..7b1c6ae --- /dev/null +++ b/test/Sema/attr-minsize.c @@ -0,0 +1,5 @@ +// RUN: %clang_cc1 -fsyntax-only -verify %s + +int foo() __attribute__((__minsize__)); + +int var1 __attribute__((__minsize__)); // expected-error{{'__minsize__' attribute only applies to functions and methods}} diff --git a/test/Sema/attr-used.c b/test/Sema/attr-used.c index 0838816..e2dfab1 100644 --- a/test/Sema/attr-used.c +++ b/test/Sema/attr-used.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -fsyntax-only %s +// RUN: %clang_cc1 -verify -fsyntax-only -Wno-private-extern %s extern int l0 __attribute__((used)); // expected-warning {{used attribute ignored}} __private_extern__ int l1 __attribute__((used)); // expected-warning {{used attribute ignored}} diff --git a/test/Sema/bitfield-layout.c b/test/Sema/bitfield-layout.c index 2655fc7..d226391 100644 --- a/test/Sema/bitfield-layout.c +++ b/test/Sema/bitfield-layout.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -triple=i686-apple-darwin9 +// expected-no-diagnostics #define CHECK_SIZE(kind, name, size) extern int name##1[sizeof(kind name) == size ? 1 : -1]; #define CHECK_ALIGN(kind, name, size) extern int name##2[__alignof(kind name) == size ? 1 : -1]; diff --git a/test/Sema/bitfield-promote.c b/test/Sema/bitfield-promote.c index 4d14ad1..3189cd5 100644 --- a/test/Sema/bitfield-promote.c +++ b/test/Sema/bitfield-promote.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct {unsigned x : 2;} x; __typeof__((x.x+=1)+1) y; __typeof__(x.x<<1) y; diff --git a/test/Sema/block-return.c b/test/Sema/block-return.c index 6967955..2ea4d81 100644 --- a/test/Sema/block-return.c +++ b/test/Sema/block-return.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -pedantic -fsyntax-only %s -verify -fblocks +// RUN: %clang_cc1 -Wno-int-to-pointer-cast -pedantic -fsyntax-only %s -verify -fblocks typedef void (^CL)(void); diff --git a/test/Sema/block-storageclass.c b/test/Sema/block-storageclass.c index 9bfbfbd..74f1b0e 100644 --- a/test/Sema/block-storageclass.c +++ b/test/Sema/block-storageclass.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -fblocks +// expected-no-diagnostics int printf(const char *, ...); void _Block_byref_release(void*src){} diff --git a/test/Sema/builtin_objc_msgSend.c b/test/Sema/builtin_objc_msgSend.c index 357a5bc..419e92d 100644 --- a/test/Sema/builtin_objc_msgSend.c +++ b/test/Sema/builtin_objc_msgSend.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // rdar://8632525 typedef struct objc_class *Class; diff --git a/test/Sema/builtins-arm.c b/test/Sema/builtins-arm.c index 4077240..7b48af1 100644 --- a/test/Sema/builtins-arm.c +++ b/test/Sema/builtins-arm.c @@ -1,5 +1,7 @@ // RUN: %clang_cc1 -triple armv7 -fsyntax-only -verify -DTEST0 %s // RUN: %clang_cc1 -triple armv7 -fsyntax-only -verify -DTEST1 %s +// RUN: %clang_cc1 -triple armv7 -target-abi apcs-gnu \ +// RUN: -fsyntax-only -verify -DTEST1 %s #ifdef TEST0 void __clear_cache(char*, char*); @@ -9,8 +11,24 @@ void __clear_cache(char*, char*); void __clear_cache(void*, void*); #endif -// va_list on ARM is void*. +#if defined(__ARM_PCS) || defined(__ARM_EABI__) +// va_list on ARM AAPCS is struct { void* __ap }. +void test1() { + __builtin_va_list ptr; + ptr.__ap = "x"; + *(ptr.__ap) = '0'; // expected-error {{incomplete type 'void' is not assignable}} +} +#else +// va_list on ARM apcs-gnu is void*. +void test1() { + __builtin_va_list ptr; + ptr.__ap = "x"; // expected-error {{member reference base type '__builtin_va_list' is not a structure or union}} + *(ptr.__ap) = '0';// expected-error {{member reference base type '__builtin_va_list' is not a structure or union}} +} + void test2() { __builtin_va_list ptr = "x"; *ptr = '0'; // expected-error {{incomplete type 'void' is not assignable}} } + +#endif diff --git a/test/Sema/builtins-decl.c b/test/Sema/builtins-decl.c index d6b004a..729dc45 100644 --- a/test/Sema/builtins-decl.c +++ b/test/Sema/builtins-decl.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -triple=i686-mingw32 // RUN: %clang_cc1 %s -fsyntax-only -verify -triple=x86_64-mingw32 +// expected-no-diagnostics // mingw-w64's intrin.h has decls below. // we should accept them. diff --git a/test/Sema/builtins.c b/test/Sema/builtins.c index b8b0367..e3b3b7e 100644 --- a/test/Sema/builtins.c +++ b/test/Sema/builtins.c @@ -40,7 +40,7 @@ void test9(short v) { old = __sync_fetch_and_add(); // expected-error {{too few arguments to function call}} old = __sync_fetch_and_add(&old); // expected-error {{too few arguments to function call}} - old = __sync_fetch_and_add((unsigned*)0, 42i); // expected-warning {{imaginary constants are an extension}} + old = __sync_fetch_and_add((unsigned*)0, 42i); // expected-warning {{imaginary constants are a GNU extension}} // PR7600: Pointers are implicitly casted to integers and back. void *old_ptr = __sync_val_compare_and_swap((void**)0, 0, 0); @@ -51,6 +51,20 @@ void test9(short v) { } } +// overloaded atomics should be declared only once. +void test9_1(volatile int* ptr, int val) { + __sync_fetch_and_add_4(ptr, val); +} +void test9_2(volatile int* ptr, int val) { + __sync_fetch_and_add(ptr, val); +} +void test9_3(volatile int* ptr, int val) { + __sync_fetch_and_add_4(ptr, val); + __sync_fetch_and_add(ptr, val); + __sync_fetch_and_add(ptr, val); + __sync_fetch_and_add_4(ptr, val); + __sync_fetch_and_add_4(ptr, val); +} // rdar://7236819 void test10(void) __attribute__((noreturn)); diff --git a/test/Sema/c89-2.c b/test/Sema/c89-2.c deleted file mode 100644 index 14b955a..0000000 --- a/test/Sema/c89-2.c +++ /dev/null @@ -1,5 +0,0 @@ -/* RUN: %clang_cc1 %s -std=c89 -pedantic-errors -Wno-empty-translation-unit -verify - */ - -#if 1LL /* expected-error {{long long}} */ -#endif diff --git a/test/Sema/c89.c b/test/Sema/c89.c index 110d7e1..a410a62 100644 --- a/test/Sema/c89.c +++ b/test/Sema/c89.c @@ -110,3 +110,9 @@ typedef CI *array_of_pointer_to_CI[5]; const array_of_pointer_to_CI mine3; void main() {} /* expected-error {{'main' must return 'int'}} */ + +long long ll1 = /* expected-warning {{'long long' is an extension when C99 mode is not enabled}} */ + -42LL; /* expected-warning {{'long long' is an extension when C99 mode is not enabled}} */ +unsigned long long ull1 = /* expected-warning {{'long long' is an extension when C99 mode is not enabled}} */ + 42ULL; /* expected-warning {{'long long' is an extension when C99 mode is not enabled}} */ + diff --git a/test/Sema/callingconv.c b/test/Sema/callingconv.c index 6c844a3..266242d 100644 --- a/test/Sema/callingconv.c +++ b/test/Sema/callingconv.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -fsyntax-only -verify +// RUN: %clang_cc1 %s -fsyntax-only -triple i386-unknown-unknown -verify void __attribute__((fastcall)) foo(float *a) { } @@ -40,8 +40,9 @@ int __attribute__((pcs("aapcs", "aapcs"))) pcs1(void); // expected-error {{attri int __attribute__((pcs())) pcs2(void); // expected-error {{attribute takes one argument}} int __attribute__((pcs(pcs1))) pcs3(void); // expected-error {{attribute takes one argument}} int __attribute__((pcs(0))) pcs4(void); // expected-error {{'pcs' attribute requires parameter 1 to be a string}} -int __attribute__((pcs("aapcs"))) pcs5(void); // no-error -int __attribute__((pcs("aapcs-vfp"))) pcs6(void); // no-error +/* These are ignored because the target is i386 and not ARM */ +int __attribute__((pcs("aapcs"))) pcs5(void); // expected-warning {{calling convention 'pcs' ignored for this target}} +int __attribute__((pcs("aapcs-vfp"))) pcs6(void); // expected-warning {{calling convention 'pcs' ignored for this target}} int __attribute__((pcs("foo"))) pcs7(void); // expected-error {{Invalid PCS type}} // PR6361 @@ -52,3 +53,4 @@ void __attribute__((cdecl)) ctest3() {} typedef __attribute__((stdcall)) void (*PROC)(); PROC __attribute__((cdecl)) ctest4(const char *x) {} +void __attribute__((pnaclcall)) pnaclfunc(float *a) {} // expected-warning {{calling convention 'pnaclcall' ignored for this target}} diff --git a/test/Sema/cast-to-union.c b/test/Sema/cast-to-union.c index c32964d..7b995e3 100644 --- a/test/Sema/cast-to-union.c +++ b/test/Sema/cast-to-union.c @@ -4,16 +4,16 @@ union u { int i; unsigned : 3; }; void f(union u); void test(int x) { - f((union u)x); // expected-warning {{C99 forbids casts to union type}} + f((union u)x); // expected-warning {{cast to union type is a GNU extension}} f((union u)&x); // expected-error {{cast to union type from type 'int *' not present in union}} f((union u)2U); // expected-error {{cast to union type from type 'unsigned int' not present in union}} } -union u w = (union u)2; // expected-warning {{C99 forbids casts to union type}} +union u w = (union u)2; // expected-warning {{cast to union type is a GNU extension}} union u ww = (union u)1.0; // expected-error{{cast to union type from type 'double' not present in union}} union u x = 7; // expected-error{{initializing 'union u' with an expression of incompatible type 'int'}} int i; -union u zz = (union u)i; // expected-error{{initializer element is not a compile-time constant}} expected-warning {{C99 forbids casts to union type}} +union u zz = (union u)i; // expected-error{{initializer element is not a compile-time constant}} expected-warning {{cast to union type is a GNU extension}} struct s {int a, b;}; struct s y = { 1, 5 }; diff --git a/test/Sema/cast.c b/test/Sema/cast.c index 71c44b4..25ef1d0 100644 --- a/test/Sema/cast.c +++ b/test/Sema/cast.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only %s -verify +// RUN: %clang_cc1 -fsyntax-only -triple x86_64-unknown-unknown %s -verify typedef struct { unsigned long bits[(((1) + (64) - 1) / (64))]; } cpumask_t; cpumask_t x; @@ -52,8 +52,8 @@ void testInt(Int v) { (void) (CLong) v; (void) (CFloat) v; (void) (CDouble) v; - (void) (VoidPtr) v; - (void) (CharPtr) v; + (void) (VoidPtr) v; // expected-warning{{cast to 'VoidPtr' (aka 'void *') from smaller integer type 'Int' (aka 'int')}} + (void) (CharPtr) v; // expected-warning{{cast to 'CharPtr' (aka 'char *') from smaller integer type 'Int' (aka 'int')}} } void testLong(Long v) { @@ -157,3 +157,12 @@ void testCharPtr(CharPtr v) { (void) (VoidPtr) v; (void) (CharPtr) v; } + +typedef enum { x_a, x_b } X; +void *intToPointerCast2(X x) { + return (void*)x; +} + +void *intToPointerCast3() { + return (void*)(1 + 3); +} diff --git a/test/Sema/check-increment.c b/test/Sema/check-increment.c index 070ea74..ae33c20 100644 --- a/test/Sema/check-increment.c +++ b/test/Sema/check-increment.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics int printf(const char *, ...); typedef int *pint; diff --git a/test/Sema/compare.c b/test/Sema/compare.c index 406ade8..b5d4ef5 100644 --- a/test/Sema/compare.c +++ b/test/Sema/compare.c @@ -93,8 +93,8 @@ int ints(long a, unsigned long b) { // (C,b) (C == (unsigned long) b) + (C == (unsigned int) b) + - (C == (unsigned short) b) + - (C == (unsigned char) b) + + (C == (unsigned short) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned short' is always false}} + (C == (unsigned char) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned char' is always false}} ((long) C == b) + ((int) C == b) + ((short) C == b) + @@ -105,8 +105,8 @@ int ints(long a, unsigned long b) { ((signed char) C == (unsigned char) b) + (C < (unsigned long) b) + (C < (unsigned int) b) + - (C < (unsigned short) b) + - (C < (unsigned char) b) + + (C < (unsigned short) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned short' is always false}} + (C < (unsigned char) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned char' is always false}} ((long) C < b) + ((int) C < b) + ((short) C < b) + @@ -123,8 +123,8 @@ int ints(long a, unsigned long b) { (a == (unsigned char) C) + ((long) a == C) + ((int) a == C) + - ((short) a == C) + - ((signed char) a == C) + + ((short) a == C) + // expected-warning {{comparison of constant 65536 with expression of type 'short' is always false}} + ((signed char) a == C) + // expected-warning {{comparison of constant 65536 with expression of type 'signed char' is always false}} ((long) a == (unsigned long) C) + ((int) a == (unsigned int) C) + ((short) a == (unsigned short) C) + @@ -135,8 +135,8 @@ int ints(long a, unsigned long b) { (a < (unsigned char) C) + ((long) a < C) + ((int) a < C) + - ((short) a < C) + - ((signed char) a < C) + + ((short) a < C) + // expected-warning {{comparison of constant 65536 with expression of type 'short' is always true}} + ((signed char) a < C) + // expected-warning {{comparison of constant 65536 with expression of type 'signed char' is always true}} ((long) a < (unsigned long) C) + // expected-warning {{comparison of integers of different signs}} ((int) a < (unsigned int) C) + // expected-warning {{comparison of integers of different signs}} ((short) a < (unsigned short) C) + @@ -145,8 +145,8 @@ int ints(long a, unsigned long b) { // (0x80000,b) (0x80000 == (unsigned long) b) + (0x80000 == (unsigned int) b) + - (0x80000 == (unsigned short) b) + - (0x80000 == (unsigned char) b) + + (0x80000 == (unsigned short) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned short' is always false}} + (0x80000 == (unsigned char) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned char' is always false}} ((long) 0x80000 == b) + ((int) 0x80000 == b) + ((short) 0x80000 == b) + @@ -157,8 +157,8 @@ int ints(long a, unsigned long b) { ((signed char) 0x80000 == (unsigned char) b) + (0x80000 < (unsigned long) b) + (0x80000 < (unsigned int) b) + - (0x80000 < (unsigned short) b) + - (0x80000 < (unsigned char) b) + + (0x80000 < (unsigned short) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned short' is always false}} + (0x80000 < (unsigned char) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned char' is always false}} ((long) 0x80000 < b) + ((int) 0x80000 < b) + ((short) 0x80000 < b) + @@ -175,8 +175,8 @@ int ints(long a, unsigned long b) { (a == (unsigned char) 0x80000) + ((long) a == 0x80000) + ((int) a == 0x80000) + - ((short) a == 0x80000) + - ((signed char) a == 0x80000) + + ((short) a == 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'short' is always false}} + ((signed char) a == 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'signed char' is always false}} ((long) a == (unsigned long) 0x80000) + ((int) a == (unsigned int) 0x80000) + ((short) a == (unsigned short) 0x80000) + @@ -187,8 +187,8 @@ int ints(long a, unsigned long b) { (a < (unsigned char) 0x80000) + ((long) a < 0x80000) + ((int) a < 0x80000) + - ((short) a < 0x80000) + - ((signed char) a < 0x80000) + + ((short) a < 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'short' is always true}} + ((signed char) a < 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'signed char' is always true}} ((long) a < (unsigned long) 0x80000) + // expected-warning {{comparison of integers of different signs}} ((int) a < (unsigned int) 0x80000) + // expected-warning {{comparison of integers of different signs}} ((short) a < (unsigned short) 0x80000) + diff --git a/test/Sema/complex-promotion.c b/test/Sema/complex-promotion.c index 23c3b68..a59bf71 100644 --- a/test/Sema/complex-promotion.c +++ b/test/Sema/complex-promotion.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only +// expected-no-diagnostics float a; diff --git a/test/Sema/compound-literal.c b/test/Sema/compound-literal.c index beec6ca..c5c0c17 100644 --- a/test/Sema/compound-literal.c +++ b/test/Sema/compound-literal.c @@ -1,19 +1,20 @@ // RUN: %clang_cc1 -fsyntax-only -verify -pedantic %s +// REQUIRES: LP64 struct foo { int a, b; }; static struct foo t = (struct foo){0,0}; static struct foo t1 = __builtin_choose_expr(0, (struct foo){0,0}, (struct foo){0,0}); static struct foo t2 = {0,0}; -static struct foo t3 = t2; // -expected-error {{initializer element is not a compile-time constant}} +static struct foo t3 = t2; // expected-error {{initializer element is not a compile-time constant}} static int *p = (int []){2,4}; static int x = (int){1}; -static int *p2 = (int []){2,x}; // -expected-error {{initializer element is not a compile-time constant}} -static long *p3 = (long []){2,"x"}; // -expected-warning {{incompatible pointer to integer conversion initializing 'long' with an expression of type 'char [2]'}} +static int *p2 = (int []){2,x}; // expected-error {{initializer element is not a compile-time constant}} +static long *p3 = (long []){2,"x"}; // expected-warning {{incompatible pointer to integer conversion initializing 'long' with an expression of type 'char [2]'}} -typedef struct { } cache_t; // -expected-warning{{empty struct is a GNU extension}} -static cache_t clo_I1_cache = ((cache_t) { } ); // -expected-warning{{use of GNU empty initializer extension}} +typedef struct { } cache_t; // expected-warning{{empty struct is a GNU extension}} +static cache_t clo_I1_cache = ((cache_t) { } ); // expected-warning{{use of GNU empty initializer extension}} typedef struct Test {int a;int b;} Test; static Test* ll = &(Test) {0,0}; @@ -26,11 +27,11 @@ int main(int argc, char **argv) { } struct Incomplete; // expected-note{{forward declaration of 'struct Incomplete'}} -struct Incomplete* I1 = &(struct Incomplete){1, 2, 3}; // -expected-error {{variable has incomplete type}} +struct Incomplete* I1 = &(struct Incomplete){1, 2, 3}; // expected-error {{variable has incomplete type}} void IncompleteFunc(unsigned x) { - struct Incomplete* I2 = (struct foo[x]){1, 2, 3}; // -expected-error {{variable-sized object may not be initialized}} - (void){1,2,3}; // -expected-error {{variable has incomplete type}} - (void(void)) { 0 }; // -expected-error{{illegal initializer type 'void (void)'}} + struct Incomplete* I2 = (struct foo[x]){1, 2, 3}; // expected-error {{variable-sized object may not be initialized}} + (void){1,2,3}; // expected-error {{variable has incomplete type}} + (void(void)) { 0 }; // expected-error{{illegal initializer type 'void (void)'}} } // PR6080 diff --git a/test/Sema/const-eval-64.c b/test/Sema/const-eval-64.c index 5727a93..1290bf4 100644 --- a/test/Sema/const-eval-64.c +++ b/test/Sema/const-eval-64.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-linux %s +// expected-no-diagnostics #define EVAL_EXPR(testno, expr) int test##testno = sizeof(struct{char qq[expr];}); diff --git a/test/Sema/const-ptr-int-ptr-cast.c b/test/Sema/const-ptr-int-ptr-cast.c index 8beaf9d..73b4a8a 100644 --- a/test/Sema/const-ptr-int-ptr-cast.c +++ b/test/Sema/const-ptr-int-ptr-cast.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -ffreestanding %s +// expected-no-diagnostics #include diff --git a/test/Sema/constant-builtins-2.c b/test/Sema/constant-builtins-2.c index 68b46bf..13d81fe 100644 --- a/test/Sema/constant-builtins-2.c +++ b/test/Sema/constant-builtins-2.c @@ -48,6 +48,9 @@ extern int f(); int h0 = __builtin_types_compatible_p(int, float); //int h1 = __builtin_choose_expr(1, 10, f()); //int h2 = __builtin_expect(0, 0); +int h3 = __builtin_bswap16(0x1234) == 0x3412 ? 1 : f(); +int h4 = __builtin_bswap32(0x1234) == 0x34120000 ? 1 : f(); +int h5 = __builtin_bswap64(0x1234) == 0x3412000000000000 ? 1 : f(); extern long int bi0; extern __typeof__(__builtin_expect(0, 0)) bi0; diff --git a/test/Sema/constant-builtins.c b/test/Sema/constant-builtins.c index 5d67fc7..c98f62d 100644 --- a/test/Sema/constant-builtins.c +++ b/test/Sema/constant-builtins.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only %s -verify -pedantic +// expected-no-diagnostics // Math stuff @@ -16,6 +17,9 @@ extern int f(); int h0 = __builtin_types_compatible_p(int,float); //int h1 = __builtin_choose_expr(1, 10, f()); //int h2 = __builtin_expect(0, 0); +int h3 = __builtin_bswap16(0x1234) == 0x3412 ? 1 : f(); +int h4 = __builtin_bswap32(0x1234) == 0x34120000 ? 1 : f(); +int h5 = __builtin_bswap64(0x1234) == 0x3412000000000000 ? 1 : f(); short somefunc(); diff --git a/test/Sema/darwin-align-cast.c b/test/Sema/darwin-align-cast.c index 2080974..bd357ed 100644 --- a/test/Sema/darwin-align-cast.c +++ b/test/Sema/darwin-align-cast.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef long unsigned int __darwin_size_t; typedef long __darwin_ssize_t; typedef __darwin_size_t size_t; diff --git a/test/Sema/enum-packed.c b/test/Sema/enum-packed.c index 0eb6c14..b6ba972 100644 --- a/test/Sema/enum-packed.c +++ b/test/Sema/enum-packed.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR7477 enum __attribute__((packed)) E { diff --git a/test/Sema/expr-comma-c99.c b/test/Sema/expr-comma-c99.c index d0883ba..6e97a4f 100644 --- a/test/Sema/expr-comma-c99.c +++ b/test/Sema/expr-comma-c99.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -std=c99 +// expected-no-diagnostics // rdar://6095180 struct s { char c[17]; }; diff --git a/test/Sema/expr-comma.c b/test/Sema/expr-comma.c index d3e4020..7902715 100644 --- a/test/Sema/expr-comma.c +++ b/test/Sema/expr-comma.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -std=c89 +// expected-no-diagnostics // rdar://6095180 struct s { char c[17]; }; diff --git a/test/Sema/exprs.c b/test/Sema/exprs.c index a93e12e..df3e258 100644 --- a/test/Sema/exprs.c +++ b/test/Sema/exprs.c @@ -40,7 +40,7 @@ _Complex double test1() { } _Complex double test2() { - return 1.0if; // expected-warning {{imaginary constants are an extension}} + return 1.0if; // expected-warning {{imaginary constants are a GNU extension}} } // rdar://6097308 diff --git a/test/Sema/format-string-percentm.c b/test/Sema/format-string-percentm.c index 1ffc439..02fea46 100644 --- a/test/Sema/format-string-percentm.c +++ b/test/Sema/format-string-percentm.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -triple i686-pc-linux-gnu +// expected-no-diagnostics // PR 4142 - support glibc extension to printf: '%m' (which prints strerror(errno)). int printf(char const*,...); diff --git a/test/Sema/format-strings-darwin.c b/test/Sema/format-strings-darwin.c new file mode 100644 index 0000000..5daf3e5 --- /dev/null +++ b/test/Sema/format-strings-darwin.c @@ -0,0 +1,64 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -triple i386-apple-darwin9 -pedantic -DALLOWED %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple thumbv6-apple-ios4.0 -pedantic -DALLOWED %s + +// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-mingw32 -pedantic %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple i686-pc-win32 -pedantic %s + +// RUN: %clang_cc1 -fsyntax-only -verify -triple i686-linux-gnu -pedantic %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-unknown-freebsd -pedantic %s + +int printf(const char *restrict, ...); +int scanf(const char * restrict, ...) ; + +void test() { + int justRight = 1; + long tooLong = 2; + + printf("%D", justRight); + printf("%D", tooLong); + printf("%U", justRight); + printf("%U", tooLong); + printf("%O", justRight); + printf("%O", tooLong); + +#ifdef ALLOWED + // expected-warning@-8 {{'D' conversion specifier is not supported by ISO C}} expected-note@-8 {{did you mean to use 'd'?}} + // expected-warning@-8 {{'D' conversion specifier is not supported by ISO C}} expected-note@-8 {{did you mean to use 'd'?}} expected-warning@-8 {{format specifies type 'int' but the argument has type 'long'}} + // expected-warning@-8 {{'U' conversion specifier is not supported by ISO C}} expected-note@-8 {{did you mean to use 'u'?}} + // expected-warning@-8 {{'U' conversion specifier is not supported by ISO C}} expected-note@-8 {{did you mean to use 'u'?}} expected-warning@-8 {{format specifies type 'unsigned int' but the argument has type 'long'}} + // expected-warning@-8 {{'O' conversion specifier is not supported by ISO C}} expected-note@-8 {{did you mean to use 'o'?}} + // expected-warning@-8 {{'O' conversion specifier is not supported by ISO C}} expected-note@-8 {{did you mean to use 'o'?}} expected-warning@-8 {{format specifies type 'unsigned int' but the argument has type 'long'}} +#else + // expected-warning@-15 {{invalid conversion specifier 'D'}} + // expected-warning@-15 {{invalid conversion specifier 'D'}} + // expected-warning@-15 {{invalid conversion specifier 'U'}} + // expected-warning@-15 {{invalid conversion specifier 'U'}} + // expected-warning@-15 {{invalid conversion specifier 'O'}} + // expected-warning@-15 {{invalid conversion specifier 'O'}} +#endif +} + +#ifdef ALLOWED +void testPrintf(short x, long y) { + printf("%hD", x); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + printf("%lD", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + printf("%hU", x); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'u'?}} + printf("%lU", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'u'?}} + printf("%hO", x); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'o'?}} + printf("%lO", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'o'?}} + + printf("%+'0.5lD", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + printf("% '0.5lD", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + printf("%#0.5lO", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'o'?}} + printf("%'0.5lU", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'u'?}} +} + +void testScanf(short *x, long *y) { + scanf("%hD", x); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + scanf("%lD", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'd'?}} + scanf("%hU", x); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'u'?}} + scanf("%lU", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'u'?}} + scanf("%hO", x); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'o'?}} + scanf("%lO", y); // expected-warning{{conversion specifier is not supported by ISO C}} expected-note {{did you mean to use 'o'?}} +} +#endif diff --git a/test/Sema/format-strings-gnu.c b/test/Sema/format-strings-gnu.c new file mode 100644 index 0000000..f491085 --- /dev/null +++ b/test/Sema/format-strings-gnu.c @@ -0,0 +1,55 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -triple i386-apple-darwin9 %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple thumbv6-apple-ios4.0 %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-mingw32 %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple i686-pc-win32 %s + +// RUN: %clang_cc1 -fsyntax-only -verify -triple i686-linux-gnu -DALLOWED %s +// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-unknown-freebsd -DALLOWED %s + +int printf(const char *restrict, ...); +int scanf(const char * restrict, ...) ; + +void test() { + long notLongEnough = 1; + long long quiteLong = 2; + + printf("%Ld", notLongEnough); // expected-warning {{format specifies type 'long long' but the argument has type 'long'}} + printf("%Ld", quiteLong); + +#ifndef ALLOWED + // expected-warning@-4 {{length modifier 'L' results in undefined behavior or no effect with 'd' conversion specifier}} + // expected-note@-5 {{did you mean to use 'll'?}} + + // expected-warning@-6 {{length modifier 'L' results in undefined behavior or no effect with 'd' conversion specifier}} + // expected-note@-7 {{did you mean to use 'll'?}} +#endif +} + +void testAlwaysInvalid() { + // We should not suggest 'll' here! + printf("%Lc", 'a'); // expected-warning {{length modifier 'L' results in undefined behavior or no effect with 'c' conversion specifier}} + printf("%Ls", "a"); // expected-warning {{length modifier 'L' results in undefined behavior or no effect with 's' conversion specifier}} +} + +#ifdef ALLOWED +// PR 9466: clang: doesn't know about %Lu, %Ld, and %Lx +void printf_longlong(long long x, unsigned long long y) { + printf("%Ld", y); // no-warning + printf("%Lu", y); // no-warning + printf("%Lx", y); // no-warning + printf("%Ld", x); // no-warning + printf("%Lu", x); // no-warning + printf("%Lx", x); // no-warning + printf("%Ls", "hello"); // expected-warning {{length modifier 'L' results in undefined behavior or no effect with 's' conversion specifier}} +} + +void scanf_longlong(long long *x, unsigned long long *y) { + scanf("%Ld", y); // no-warning + scanf("%Lu", y); // no-warning + scanf("%Lx", y); // no-warning + scanf("%Ld", x); // no-warning + scanf("%Lu", x); // no-warning + scanf("%Lx", x); // no-warning + scanf("%Ls", "hello"); // expected-warning {{length modifier 'L' results in undefined behavior or no effect with 's' conversion specifier}} +} +#endif diff --git a/test/Sema/format-strings-non-iso.c b/test/Sema/format-strings-non-iso.c index ed8095f..ee45946 100644 --- a/test/Sema/format-strings-non-iso.c +++ b/test/Sema/format-strings-non-iso.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -std=c99 -pedantic %s +// RUN: %clang_cc1 -triple i686-linux-gnu -fsyntax-only -verify -std=c99 -pedantic %s int printf(const char *restrict, ...); int scanf(const char * restrict, ...); @@ -7,8 +7,8 @@ void f(void) { char *cp; // The 'q' length modifier. - printf("%qd", (long long)42); // expected-warning{{'q' length modifier is not supported by ISO C}} - scanf("%qd", (long long *)0); // expected-warning{{'q' length modifier is not supported by ISO C}} + printf("%qd", (long long)42); // expected-warning{{'q' length modifier is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} + scanf("%qd", (long long *)0); // expected-warning{{'q' length modifier is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} // The 'm' length modifier. scanf("%ms", &cp); // expected-warning{{'m' length modifier is not supported by ISO C}} @@ -18,11 +18,11 @@ void f(void) { printf("%C", L'x'); // expected-warning{{'C' conversion specifier is not supported by ISO C}} // Combining 'L' with an integer conversion specifier. - printf("%Li", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'i' is not supported by ISO C}} - printf("%Lo", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'o' is not supported by ISO C}} - printf("%Lu", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'u' is not supported by ISO C}} - printf("%Lx", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'x' is not supported by ISO C}} - printf("%LX", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'X' is not supported by ISO C}} + printf("%Li", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'i' is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} + printf("%Lo", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'o' is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} + printf("%Lu", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'u' is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} + printf("%Lx", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'x' is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} + printf("%LX", (long long)42); // expected-warning{{using length modifier 'L' with conversion specifier 'X' is not supported by ISO C}} expected-note{{did you mean to use 'll'?}} // Positional arguments. printf("%1$d", 42); // expected-warning{{positional arguments are not supported by ISO C}} diff --git a/test/Sema/format-strings-scanf.c b/test/Sema/format-strings-scanf.c index 235ac11..d66bed5 100644 --- a/test/Sema/format-strings-scanf.c +++ b/test/Sema/format-strings-scanf.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -triple i386-apple-darwin9 -Wformat-nonliteral %s +// RUN: %clang_cc1 -fsyntax-only -verify -Wformat-nonliteral %s // Test that -Wformat=0 works: // RUN: %clang_cc1 -fsyntax-only -Werror -Wformat=0 %s @@ -107,22 +107,12 @@ void test_alloc_extension(char **sp, wchar_t **lsp, float *fp) { // Test argument type check for the 'm' length modifier. scanf("%ms", fp); // expected-warning{{format specifies type 'char **' but the argument has type 'float *'}} - scanf("%mS", fp); // expected-warning{{format specifies type 'wchar_t **' (aka 'int **') but the argument has type 'float *'}} + scanf("%mS", fp); // expected-warning-re{{format specifies type 'wchar_t \*\*' \(aka '[^']+'\) but the argument has type 'float \*'}} scanf("%mc", fp); // expected-warning{{format specifies type 'char **' but the argument has type 'float *'}} - scanf("%mC", fp); // expected-warning{{format specifies type 'wchar_t **' (aka 'int **') but the argument has type 'float *'}} + scanf("%mC", fp); // expected-warning-re{{format specifies type 'wchar_t \*\*' \(aka '[^']+'\) but the argument has type 'float \*'}} scanf("%m[abc]", fp); // expected-warning{{format specifies type 'char **' but the argument has type 'float *'}} } -void test_longlong(long long *x, unsigned long long *y) { - scanf("%Ld", y); // no-warning - scanf("%Lu", y); // no-warning - scanf("%Lx", y); // no-warning - scanf("%Ld", x); // no-warning - scanf("%Lu", x); // no-warning - scanf("%Lx", x); // no-warning - scanf("%Ls", "hello"); // expected-warning {{length modifier 'L' results in undefined behavior or no effect with 's' conversion specifier}} -} - void test_quad(int *x, long long *llx) { scanf("%qd", x); // expected-warning{{format specifies type 'long long *' but the argument has type 'int *'}} scanf("%qd", llx); // no-warning diff --git a/test/Sema/format-strings.c b/test/Sema/format-strings.c index 86b9296..8fb1218 100644 --- a/test/Sema/format-strings.c +++ b/test/Sema/format-strings.c @@ -117,6 +117,7 @@ void check_writeback_specifier() printf("%qn", (int*)0); // expected-warning{{format specifies type 'long long *' but the argument has type 'int *'}} printf("%Ln", 0); // expected-warning{{length modifier 'L' results in undefined behavior or no effect with 'n' conversion specifier}} + // expected-note@-1{{did you mean to use 'll'?}} } void check_invalid_specifier(FILE* fp, char *buf) @@ -531,17 +532,6 @@ void pr9751() { 0.0); // expected-warning{{format specifies}} } -// PR 9466: clang: doesn't know about %Lu, %Ld, and %Lx -void printf_longlong(long long x, unsigned long long y) { - printf("%Ld", y); // no-warning - printf("%Lu", y); // no-warning - printf("%Lx", y); // no-warning - printf("%Ld", x); // no-warning - printf("%Lu", x); // no-warning - printf("%Lx", x); // no-warning - printf("%Ls", "hello"); // expected-warning {{length modifier 'L' results in undefined behavior or no effect with 's' conversion specifier}} -} - void __attribute__((format(strfmon,1,2))) monformat(const char *fmt, ...); void __attribute__((format(strftime,1,0))) dateformat(const char *fmt); diff --git a/test/Sema/function-redecl.c b/test/Sema/function-redecl.c index 7076bdf..ff8e003 100644 --- a/test/Sema/function-redecl.c +++ b/test/Sema/function-redecl.c @@ -129,3 +129,7 @@ void test_x() { enum e0 {one}; void f3(); void f3(enum e0 x) {} + +enum incomplete_enum; +void f4(); // expected-note {{previous declaration is here}} +void f4(enum incomplete_enum); // expected-error {{conflicting types for 'f4'}} diff --git a/test/Sema/heinous-extensions-on.c b/test/Sema/heinous-extensions-on.c index 176f472..5b00407 100644 --- a/test/Sema/heinous-extensions-on.c +++ b/test/Sema/heinous-extensions-on.c @@ -3,7 +3,7 @@ void foo() { int a; // PR3788 - asm("nop" : : "m"((int)(a))); // expected-warning {{cast in a inline asm context requiring an l-value}} + asm("nop" : : "m"((int)(a))); // expected-warning {{cast in an inline asm context requiring an l-value}} // PR3794 - asm("nop" : "=r"((unsigned)a)); // expected-warning {{cast in a inline asm context requiring an l-value}} + asm("nop" : "=r"((unsigned)a)); // expected-warning {{cast in an inline asm context requiring an l-value}} } diff --git a/test/Sema/i-c-e.c b/test/Sema/i-c-e.c index ee61ac3..e7b42c4 100644 --- a/test/Sema/i-c-e.c +++ b/test/Sema/i-c-e.c @@ -1,4 +1,4 @@ -// RUN: %clang %s -ffreestanding -fsyntax-only -Xclang -verify -pedantic -fpascal-strings -std=c99 +// RUN: %clang %s -ffreestanding -Wno-int-to-pointer-cast -fsyntax-only -Xclang -verify -pedantic -fpascal-strings -std=c99 #include #include diff --git a/test/Sema/implicit-builtin-freestanding.c b/test/Sema/implicit-builtin-freestanding.c index 505e522..385cf1f 100644 --- a/test/Sema/implicit-builtin-freestanding.c +++ b/test/Sema/implicit-builtin-freestanding.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -ffreestanding %s +// expected-no-diagnostics int malloc(int a) { return a; } diff --git a/test/Sema/init-struct-qualified.c b/test/Sema/init-struct-qualified.c index 49ec7cc..9d18e22 100644 --- a/test/Sema/init-struct-qualified.c +++ b/test/Sema/init-struct-qualified.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify < %s +// expected-no-diagnostics typedef float CGFloat; typedef struct _NSPoint { CGFloat x; CGFloat y; } NSPoint; typedef struct _NSSize { CGFloat width; CGFloat height; } NSSize; diff --git a/test/Sema/init-vector.c b/test/Sema/init-vector.c index f0cf32b..a95e789 100644 --- a/test/Sema/init-vector.c +++ b/test/Sema/init-vector.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef float __attribute__((vector_size (16))) v4f_t; diff --git a/test/Sema/int-arith-convert.c b/test/Sema/int-arith-convert.c index c56ab3b..0f425bd 100644 --- a/test/Sema/int-arith-convert.c +++ b/test/Sema/int-arith-convert.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple=i686-linux-gnu -fsyntax-only -verify %s +// expected-no-diagnostics // Check types are the same through redeclaration unsigned long x; diff --git a/test/Sema/invalid-decl.c b/test/Sema/invalid-decl.c index 2699b25..b2c2aaf 100644 --- a/test/Sema/invalid-decl.c +++ b/test/Sema/invalid-decl.c @@ -29,3 +29,12 @@ typedef struct { void f(StructType *buf) { buf->fun = 0; } + +// rdar://11743706 +static void bar(hid_t, char); // expected-error {{expected identifier}} + +static void bar(hid_t p, char); // expected-error {{unknown type name 'hid_t'}} + +void foo() { + (void)bar; +} diff --git a/test/Sema/knr-variadic-def.c b/test/Sema/knr-variadic-def.c index 6d5d632..934f32f 100644 --- a/test/Sema/knr-variadic-def.c +++ b/test/Sema/knr-variadic-def.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -pedantic %s +// expected-no-diagnostics // PR4287 #include diff --git a/test/Sema/many-logical-ops.c b/test/Sema/many-logical-ops.c index 09a7684..ec3bbda 100644 --- a/test/Sema/many-logical-ops.c +++ b/test/Sema/many-logical-ops.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -Wconstant-conversion -verify %s +// expected-no-diagnostics // rdar://10913206&10941790 // Check that we don't get stack overflow trying to evaluate a huge number of diff --git a/test/Sema/member-reference.c b/test/Sema/member-reference.c index 7bda143..edbbea5 100644 --- a/test/Sema/member-reference.c +++ b/test/Sema/member-reference.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only +// expected-no-diagnostics struct simple { int i; }; diff --git a/test/Sema/mms-bitfields.c b/test/Sema/mms-bitfields.c new file mode 100644 index 0000000..d238a7a --- /dev/null +++ b/test/Sema/mms-bitfields.c @@ -0,0 +1,13 @@ +// RUN: %clang_cc1 -mms-bitfields -fsyntax-only -verify -triple x86_64-apple-darwin9 %s +// expected-no-diagnostics + +// The -mms-bitfields commandline parameter should behave the same +// as the ms_struct attribute. +struct +{ + int a : 1; + short b : 1; +} t; + +// MS pads out bitfields between different types. +static int arr[(sizeof(t) == 8) ? 1 : -1]; diff --git a/test/Sema/ms-inline-asm.c b/test/Sema/ms-inline-asm.c new file mode 100644 index 0000000..f6a0fdc --- /dev/null +++ b/test/Sema/ms-inline-asm.c @@ -0,0 +1,35 @@ +// REQUIRES: x86-64-registered-target +// RUN: %clang_cc1 %s -triple x86_64-apple-darwin10 -fms-extensions -fenable-experimental-ms-inline-asm -Wno-microsoft -verify -fsyntax-only + +void t1(void) { + __asm __asm // expected-error {{__asm used with no assembly instructions}} +} + +void f() { + int foo; + __asm { + mov eax, eax + .unknowndirective // expected-error {{unknown directive}} + } + f(); + __asm { + mov eax, 1+=2 // expected-error 2 {{unknown token in expression}} + } + f(); + __asm { + mov eax, 1+++ // expected-error 2 {{unknown token in expression}} + } + f(); + __asm { + mov eax, TYPE cat // expected-error {{Unable to lookup TYPE of expr!}} + } + f(); + __asm { + mov eax, SIZE foo // expected-error {{Unsupported directive!}} + } + f(); + __asm { + mov eax, LENGTH foo // expected-error {{Unsupported directive!}} + } + +} diff --git a/test/Sema/ms_wide_predefined_expr.cpp b/test/Sema/ms_wide_predefined_expr.cpp index 8e816e0..d56d157 100644 --- a/test/Sema/ms_wide_predefined_expr.cpp +++ b/test/Sema/ms_wide_predefined_expr.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -Wno-unused-value -Wmicrosoft -verify -fms-extensions +// expected-no-diagnostics // Wide character predefined identifiers #define _STR2WSTR(str) L##str diff --git a/test/Sema/no-format-y2k-turnsoff-format.c b/test/Sema/no-format-y2k-turnsoff-format.c index b206ecd..a26a0ce 100644 --- a/test/Sema/no-format-y2k-turnsoff-format.c +++ b/test/Sema/no-format-y2k-turnsoff-format.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -fsyntax-only -Wformat -Wno-format-y2k +// RUN: %clang_cc1 -verify -fsyntax-only -Wformat -Wno-format-y2k %s // rdar://9504680 void foo(const char *, ...) __attribute__((__format__ (__printf__, 1, 2))); diff --git a/test/Sema/outof-range-constant-compare.c b/test/Sema/outof-range-constant-compare.c new file mode 100644 index 0000000..4b1637c --- /dev/null +++ b/test/Sema/outof-range-constant-compare.c @@ -0,0 +1,149 @@ +// RUN: %clang_cc1 -triple x86_64-apple-darwin -fsyntax-only -Wtautological-constant-out-of-range-compare -verify %s +// rdar://12202422 + +int value(void); + +int main() +{ + int a = value(); + if (a == 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (a != 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (a < 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (a <= 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (a > 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (a >= 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + + if (0x1234567812345678L == a) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (0x1234567812345678L != a) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (0x1234567812345678L < a) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (0x1234567812345678L <= a) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (0x1234567812345678L > a) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (0x1234567812345678L >= a) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (a == 0x1234567812345678LL) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (a == -0x1234567812345678L) // expected-warning {{comparison of constant -1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (a < -0x1234567812345678L) // expected-warning {{comparison of constant -1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (a > -0x1234567812345678L) // expected-warning {{comparison of constant -1311768465173141112 with expression of type 'int' is always true}} + return 0; + if (a <= -0x1234567812345678L) // expected-warning {{comparison of constant -1311768465173141112 with expression of type 'int' is always false}} + return 0; + if (a >= -0x1234567812345678L) // expected-warning {{comparison of constant -1311768465173141112 with expression of type 'int' is always true}} + return 0; + + + if (a == 0x12345678L) // no warning + return 1; + + short s = value(); + if (s == 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always false}} + return 0; + if (s != 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always true}} + return 0; + if (s < 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always true}} + return 0; + if (s <= 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always true}} + return 0; + if (s > 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always false}} + return 0; + if (s >= 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always false}} + return 0; + + if (0x1234567812345678L == s) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always false}} + return 0; + if (0x1234567812345678L != s) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always true}} + return 0; + if (0x1234567812345678L < s) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always false}} + return 0; + if (0x1234567812345678L <= s) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always false}} + return 0; + if (0x1234567812345678L > s) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always true}} + return 0; + if (0x1234567812345678L >= s) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'short' is always true}} + return 0; + long l = value(); + if (l == 0x1234567812345678L) + return 0; + if (l != 0x1234567812345678L) + return 0; + if (l < 0x1234567812345678L) + return 0; + if (l <= 0x1234567812345678L) + return 0; + if (l > 0x1234567812345678L) + return 0; + if (l >= 0x1234567812345678L) + return 0; + + if (0x1234567812345678L == l) + return 0; + if (0x1234567812345678L != l) + return 0; + if (0x1234567812345678L < l) + return 0; + if (0x1234567812345678L <= l) + return 0; + if (0x1234567812345678L > l) + return 0; + if (0x1234567812345678L >= l) + return 0; + + unsigned un = 0; + if (un == 0x0000000000000000L) + return 0; + if (un != 0x0000000000000000L) + return 0; + if (un < 0x0000000000000000L) + return 0; + if (un <= 0x0000000000000000L) + return 0; + if (un > 0x0000000000000000L) + return 0; + if (un >= 0x0000000000000000L) + return 0; + + if (0x0000000000000000L == un) + return 0; + if (0x0000000000000000L != un) + return 0; + if (0x0000000000000000L < un) + return 0; + if (0x0000000000000000L <= un) + return 0; + if (0x0000000000000000L > un) + return 0; + if (0x0000000000000000L >= un) + return 0; + float fl = 0; + if (fl == 0x0000000000000000L) // no warning + return 0; + + float dl = 0; + if (dl == 0x0000000000000000L) // no warning + return 0; + + enum E { + yes, + no, + maybe + }; + enum E e; + + if (e == 0x1234567812345678L) // expected-warning {{comparison of constant 1311768465173141112 with expression of type 'enum E' is always false}} + return 0; + + return 1; +} diff --git a/test/Sema/overloaded-func-transparent-union.c b/test/Sema/overloaded-func-transparent-union.c index fa0314e..acdc589 100644 --- a/test/Sema/overloaded-func-transparent-union.c +++ b/test/Sema/overloaded-func-transparent-union.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // rdar:// 9129552 // PR9406 diff --git a/test/Sema/parentheses.c b/test/Sema/parentheses.c index 9751336..c3b3aa7 100644 --- a/test/Sema/parentheses.c +++ b/test/Sema/parentheses.c @@ -13,13 +13,13 @@ void if_assign(void) { void bitwise_rel(unsigned i) { (void)(i & 0x2 == 0); // expected-warning {{& has lower precedence than ==}} \ // expected-note{{place parentheses around the & expression to evaluate it first}} \ - // expected-note{{place parentheses around the == expression to silence this warning}} + // expected-note{{place parentheses around the '==' expression to silence this warning}} (void)(0 == i & 0x2); // expected-warning {{& has lower precedence than ==}} \ // expected-note{{place parentheses around the & expression to evaluate it first}} \ - // expected-note{{place parentheses around the == expression to silence this warning}} + // expected-note{{place parentheses around the '==' expression to silence this warning}} (void)(i & 0xff < 30); // expected-warning {{& has lower precedence than <}} \ // expected-note{{place parentheses around the & expression to evaluate it first}} \ - // expected-note{{place parentheses around the < expression to silence this warning}} + // expected-note{{place parentheses around the '<' expression to silence this warning}} (void)((i & 0x2) == 0); (void)(i & (0x2 == 0)); // Eager logical op diff --git a/test/Sema/parentheses.cpp b/test/Sema/parentheses.cpp index 7674166..8f5f246 100644 --- a/test/Sema/parentheses.cpp +++ b/test/Sema/parentheses.cpp @@ -22,6 +22,8 @@ public: operator int(); Stream &operator<<(int); Stream &operator<<(const char*); + Stream &operator>>(int); + Stream &operator>>(const char*); }; void f(Stream& s, bool b) { @@ -45,3 +47,13 @@ void test(S *s, bool (S::*m_ptr)()) { // Don't crash on unusual member call expressions. (void)((s->*m_ptr)() ? "foo" : "bar"); } + +void test(int a, int b, int c) { + (void)(a >> b + c); // expected-warning {{operator '>>' has lower precedence than '+'; '+' will be evaluated first}} \ + expected-note {{place parentheses around the '+' expression to silence this warning}} + (void)(a - b << c); // expected-warning {{operator '<<' has lower precedence than '-'; '-' will be evaluated first}} \ + expected-note {{place parentheses around the '-' expression to silence this warning}} + Stream() << b + c; + Stream() >> b + c; // expected-warning {{operator '>>' has lower precedence than '+'; '+' will be evaluated first}} \ + expected-note {{place parentheses around the '+' expression to silence this warning}} +} diff --git a/test/Sema/pragma-align-mac68k.c b/test/Sema/pragma-align-mac68k.c index fb8da51..fd93fcb 100644 --- a/test/Sema/pragma-align-mac68k.c +++ b/test/Sema/pragma-align-mac68k.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics #include @@ -96,3 +97,15 @@ extern int a11_0[offsetof(struct s11, f0) == 0 ? 1 : -1]; extern int a11_1[offsetof(struct s11, f1) == 2 ? 1 : -1]; extern int a11_2[sizeof(struct s11) == 10 ? 1 : -1]; extern int a11_3[__alignof(struct s11) == 2 ? 1 : -1]; + +#pragma options align=reset + +void f12(void) { + #pragma options align=mac68k + struct s12 { + char f0; + int f1; + }; + #pragma options align=reset + extern int a12[sizeof(struct s12) == 6 ? 1 : -1]; +} diff --git a/test/Sema/pragma-align-packed.c b/test/Sema/pragma-align-packed.c index 74fbd13..b8daf40 100644 --- a/test/Sema/pragma-align-packed.c +++ b/test/Sema/pragma-align-packed.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics #pragma pack(push, 1) struct s0 { diff --git a/test/Sema/pragma-ms_struct.c b/test/Sema/pragma-ms_struct.c index d76ee8b..6533320 100644 --- a/test/Sema/pragma-ms_struct.c +++ b/test/Sema/pragma-ms_struct.c @@ -31,6 +31,12 @@ struct S { unsigned long bf_2 : 12; } __attribute__((ms_struct)) t2; +enum +{ + A = 0, + B, + C +} __attribute__((ms_struct)) e1; // expected-warning {{'ms_struct' attribute ignored}} // rdar://10513599 #pragma ms_struct on diff --git a/test/Sema/pragma-pack-2.c b/test/Sema/pragma-pack-2.c index 4a4c202..3696a22 100644 --- a/test/Sema/pragma-pack-2.c +++ b/test/Sema/pragma-pack-2.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i686-apple-darwin9 %s -fsyntax-only -verify +// expected-no-diagnostics #include diff --git a/test/Sema/pragma-pack-3.c b/test/Sema/pragma-pack-3.c index d97359e..e7a6cee 100644 --- a/test/Sema/pragma-pack-3.c +++ b/test/Sema/pragma-pack-3.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i686-apple-darwin9 %s -fsyntax-only -verify +// expected-no-diagnostics // Stack: [], Alignment: 8 diff --git a/test/Sema/pragma-pack-4.c b/test/Sema/pragma-pack-4.c index b06fc0e..6a09e14 100644 --- a/test/Sema/pragma-pack-4.c +++ b/test/Sema/pragma-pack-4.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple i686-apple-darwin9 %s -fsyntax-only -verify // RUN: %clang_cc1 -triple x86_64-apple-darwin9 %s -fsyntax-only -verify +// expected-no-diagnostics // rdar://problem/7095436 #pragma pack(4) diff --git a/test/Sema/pragma-pack-5.c b/test/Sema/pragma-pack-5.c index 95bbe1f..24bd4cd 100644 --- a/test/Sema/pragma-pack-5.c +++ b/test/Sema/pragma-pack-5.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin9 %s -fsyntax-only -verify -ffreestanding +// expected-no-diagnostics // and PR9560 // Check #pragma pack handling with bitfields. diff --git a/test/Sema/pragma-pack-6.c b/test/Sema/pragma-pack-6.c index 40659c2..c87c99d 100644 --- a/test/Sema/pragma-pack-6.c +++ b/test/Sema/pragma-pack-6.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i686-apple-darwin9 %s -fsyntax-only -verify +// expected-no-diagnostics // Pragma pack handling with tag declarations diff --git a/test/Sema/pragma-pack-and-options-align.c b/test/Sema/pragma-pack-and-options-align.c index ebf1ade..5bc7c83 100644 --- a/test/Sema/pragma-pack-and-options-align.c +++ b/test/Sema/pragma-pack-and-options-align.c @@ -38,5 +38,16 @@ struct s3 { }; extern int a[sizeof(struct s3) == 8 ? 1 : -1]; +#pragma pack(push,2) +#pragma options align=power +struct s4 { + char c; + int x; +}; +#pragma pack(pop) +#pragma options align=reset +extern int a[sizeof(struct s4) == 8 ? 1 : -1]; + /* expected-warning {{#pragma options align=reset failed: stack empty}} */ #pragma options align=reset /* expected-warning {{#pragma pack(pop, ...) failed: stack empty}} */ #pragma pack(pop) + diff --git a/test/Sema/private-extern.c b/test/Sema/private-extern.c index 25591dc..e480f3f 100644 --- a/test/Sema/private-extern.c +++ b/test/Sema/private-extern.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -fsyntax-only %s +// RUN: %clang_cc1 -verify -fsyntax-only -Wno-private-extern %s static int g0; // expected-note{{previous definition}} int g0; // expected-error{{non-static declaration of 'g0' follows static declaration}} diff --git a/test/Sema/return-silent.c b/test/Sema/return-silent.c index eb9641b..83c3a0a 100644 --- a/test/Sema/return-silent.c +++ b/test/Sema/return-silent.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -Wno-return-type -fsyntax-only -verify +// expected-no-diagnostics int t14() { return; diff --git a/test/Sema/short-enums.c b/test/Sema/short-enums.c index 6605c4e..9bf0064 100644 --- a/test/Sema/short-enums.c +++ b/test/Sema/short-enums.c @@ -1,5 +1,6 @@ // RUN: not %clang_cc1 -fsyntax-only %s -verify // RUN: %clang_cc1 -fshort-enums -fsyntax-only %s -verify +// expected-no-diagnostics enum x { A }; int t0[sizeof(enum x) == 1 ? 1 : -1]; diff --git a/test/Sema/stdcall-fastcall-x64.c b/test/Sema/stdcall-fastcall-x64.c new file mode 100644 index 0000000..d2a475e --- /dev/null +++ b/test/Sema/stdcall-fastcall-x64.c @@ -0,0 +1,20 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-pc-linux-gnu %s + +// CC qualifier can be applied only to functions +int __attribute__((stdcall)) var1; // expected-warning{{'stdcall' only applies to function types; type here is 'int'}} +int __attribute__((fastcall)) var2; // expected-warning{{'fastcall' only applies to function types; type here is 'int'}} + +// Different CC qualifiers are not compatible +void __attribute__((stdcall, fastcall)) foo3(void); // expected-warning{{calling convention 'stdcall' ignored for this target}} expected-warning {{calling convention 'fastcall' ignored for this target}} +void __attribute__((stdcall)) foo4(); // expected-warning{{calling convention 'stdcall' ignored for this target}} +void __attribute__((fastcall)) foo4(void); // expected-warning {{calling convention 'fastcall' ignored for this target}} + +// rdar://8876096 +void rdar8876096foo1(int i, int j) __attribute__((fastcall, cdecl)); // expected-warning{{calling convention 'fastcall' ignored for this target}} +void rdar8876096foo2(int i, int j) __attribute__((fastcall, stdcall)); // expected-warning{{calling convention 'stdcall' ignored for this target}} expected-warning {{calling convention 'fastcall' ignored for this target}} +void rdar8876096foo3(int i, int j) __attribute__((fastcall, regparm(2))); // expected-warning {{calling convention 'fastcall' ignored for this target}} +void rdar8876096foo4(int i, int j) __attribute__((stdcall, cdecl)); // expected-warning{{calling convention 'stdcall' ignored for this target}} +void rdar8876096foo5(int i, int j) __attribute__((stdcall, fastcall)); // expected-warning{{calling convention 'stdcall' ignored for this target}} expected-warning {{calling convention 'fastcall' ignored for this target}} +void rdar8876096foo6(int i, int j) __attribute__((cdecl, fastcall)); // expected-warning {{calling convention 'fastcall' ignored for this target}} +void rdar8876096foo7(int i, int j) __attribute__((cdecl, stdcall)); // expected-warning{{calling convention 'stdcall' ignored for this target}} +void rdar8876096foo8(int i, int j) __attribute__((regparm(2), fastcall)); // expected-warning {{calling convention 'fastcall' ignored for this target}} diff --git a/test/Sema/stdcall-fastcall.c b/test/Sema/stdcall-fastcall.c index eeacf94..dea1fc5 100644 --- a/test/Sema/stdcall-fastcall.c +++ b/test/Sema/stdcall-fastcall.c @@ -1,4 +1,3 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-pc-linux-gnu %s // RUN: %clang_cc1 -fsyntax-only -verify -triple i686-apple-darwin10 %s // CC qualifier can be applied only to functions diff --git a/test/Sema/struct-cast.c b/test/Sema/struct-cast.c index 30ef892..8aa7ca9 100644 --- a/test/Sema/struct-cast.c +++ b/test/Sema/struct-cast.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only %s -verify +// expected-no-diagnostics struct S { int one; diff --git a/test/Sema/struct-packed-align.c b/test/Sema/struct-packed-align.c index 6ca6a60..166d5eb 100644 --- a/test/Sema/struct-packed-align.c +++ b/test/Sema/struct-packed-align.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // Packed structs. struct s { diff --git a/test/Sema/surpress-deprecated.c b/test/Sema/surpress-deprecated.c index dd673b9..db9ab3f 100644 --- a/test/Sema/surpress-deprecated.c +++ b/test/Sema/surpress-deprecated.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -Wno-deprecated-declarations -verify %s +// expected-no-diagnostics extern void OldFunction() __attribute__((deprecated)); int main (int argc, const char * argv[]) { diff --git a/test/Sema/template-specialization.cpp b/test/Sema/template-specialization.cpp new file mode 100644 index 0000000..ae7bc33 --- /dev/null +++ b/test/Sema/template-specialization.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -verify -fsyntax-only %s +// Verify the absence of assertion failures when solving calls to unresolved +// template member functions. + +struct A { + template + static void bar(int) { } // expected-note {{candidate template ignored: couldn't infer template argument 'T'}} +}; + +struct B { + template + static void foo() { + int array[i]; + A::template bar(array[0]); // expected-error {{no matching function for call to 'bar'}} + } +}; + +int main() { + B::foo<4>(); // expected-note {{in instantiation of function template specialization 'B::foo<4>'}} + return 0; +} diff --git a/test/Sema/tentative-decls.c b/test/Sema/tentative-decls.c index e14540b..bf2b1e4 100644 --- a/test/Sema/tentative-decls.c +++ b/test/Sema/tentative-decls.c @@ -33,7 +33,8 @@ static int i3 = 5; extern int i3; // rdar://7703982 -__private_extern__ int pExtern; // expected-warning {{Use of __private_extern__ on tentative definition has unexpected behaviour}} +__private_extern__ int pExtern; // expected-warning {{use of __private_extern__ on a declaration may not produce external symbol private to the linkage unit and is deprecated}} \ +// expected-note {{use __attribute__((visibility("hidden"))) attribute instead}} int pExtern = 0; int i4; diff --git a/test/Sema/thread-specifier.c b/test/Sema/thread-specifier.c index 0d439b1..8c40fcd 100644 --- a/test/Sema/thread-specifier.c +++ b/test/Sema/thread-specifier.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -triple i686-pc-linux-gnu -fsyntax-only -verify -pedantic %s +// RUN: %clang_cc1 -triple i686-pc-linux-gnu -fsyntax-only -Wno-private-extern -verify -pedantic %s __thread int t1; __thread extern int t2; // expected-warning {{'__thread' before 'extern'}} @@ -21,3 +21,10 @@ __thread int t15; // expected-note {{previous definition is here}} int t15; // expected-error {{non-thread-local declaration of 't15' follows thread-local declaration}} int t16; // expected-note {{previous definition is here}} __thread int t16; // expected-error {{thread-local declaration of 't16' follows non-thread-local declaration}} + +// PR13720 +__thread int thread_int; +int *thread_int_ptr = &thread_int; // expected-error{{initializer element is not a compile-time constant}} +void g() { + int *p = &thread_int; // This is perfectly fine, though. +} diff --git a/test/Sema/tls.c b/test/Sema/tls.c index 3b2a441..4e5cfef 100644 --- a/test/Sema/tls.c +++ b/test/Sema/tls.c @@ -17,4 +17,7 @@ // RUN: not %clang_cc1 -triple x86_64-pc-openbsd -fsyntax-only %s // RUN: not %clang_cc1 -triple i386-pc-openbsd -fsyntax-only %s +// Haiku does not suppport TLS. +// RUN: not %clang_cc1 -triple i586-pc-haiku -fsyntax-only %s + __thread int x; diff --git a/test/Sema/transparent-union-pointer.c b/test/Sema/transparent-union-pointer.c index 31c9391..bf1fb17 100644 --- a/test/Sema/transparent-union-pointer.c +++ b/test/Sema/transparent-union-pointer.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics typedef union { union wait *__uptr; diff --git a/test/Sema/typedef-prototype.c b/test/Sema/typedef-prototype.c index 8372154..98b1ab8 100644 --- a/test/Sema/typedef-prototype.c +++ b/test/Sema/typedef-prototype.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef int unary_int_func(int arg); unary_int_func add_one; diff --git a/test/Sema/types.c b/test/Sema/types.c index 3bec83e..6ae1a92 100644 --- a/test/Sema/types.c +++ b/test/Sema/types.c @@ -43,7 +43,7 @@ int i[(short)1]; enum e { e_1 }; extern int j[sizeof(enum e)]; // expected-note {{previous definition}} -int j[42]; // expected-error {{redefinition of 'j' with a different type}} +int j[42]; // expected-error {{redefinition of 'j' with a different type: 'int [42]' vs 'int [4]'}} // rdar://6880104 _Decimal32 x; // expected-error {{GNU decimal type extension not supported}} diff --git a/test/Sema/uninit-variables.c b/test/Sema/uninit-variables.c index 634ae0d..9e3dd9d 100644 --- a/test/Sema/uninit-variables.c +++ b/test/Sema/uninit-variables.c @@ -508,3 +508,26 @@ int self_init_in_cond(int *p) { int n = ((p && (0 || 1)) && (n = *p)) ? n : -1; // ok return n; } + +void test_analyzer_noreturn_aux() __attribute__((analyzer_noreturn)); + +void test_analyzer_noreturn(int y) { + int x; // expected-note {{initialize the variable 'x' to silence this warning}} + if (y) { + test_analyzer_noreturn_aux(); + ++x; // no-warning + } + else { + ++x; // expected-warning {{variable 'x' is uninitialized when used here}} + } +} +void test_analyzer_noreturn_2(int y) { + int x; + if (y) { + test_analyzer_noreturn_aux(); + } + else { + x = 1; + } + ++x; // no-warning +} diff --git a/test/Sema/unnamed-bitfield-init.c b/test/Sema/unnamed-bitfield-init.c index f3cc49c..6fa1801 100644 --- a/test/Sema/unnamed-bitfield-init.c +++ b/test/Sema/unnamed-bitfield-init.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef struct { int a; int : 24; char b; } S; diff --git a/test/Sema/unused-expr.c b/test/Sema/unused-expr.c index 056d09a..aa81febd 100644 --- a/test/Sema/unused-expr.c +++ b/test/Sema/unused-expr.c @@ -122,3 +122,14 @@ void f(int i, ...) { // PR8371 int fn5() __attribute__ ((__const)); + +// OpenSSL has some macros like this; we shouldn't warn on the cast. +#define M1(a, b) (long)foo((a), (b)) +// But, we should still warn on other subexpressions of casts in macros. +#define M2 (long)0; +void t11(int i, int j) { + M1(i, j); // no warning + M2; // expected-warning {{expression result unused}} +} +#undef M1 +#undef M2 diff --git a/test/Sema/va_arg_x86_64.c b/test/Sema/va_arg_x86_64.c index 9f514c1..2659a1b 100644 --- a/test/Sema/va_arg_x86_64.c +++ b/test/Sema/va_arg_x86_64.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -triple=x86_64-unknown-freebsd7.0 %s +// expected-no-diagnostics // PR2631 char* foo(char *fmt, __builtin_va_list ap) diff --git a/test/Sema/variadic-block.c b/test/Sema/variadic-block.c index ba4bb71..4f23cb8 100644 --- a/test/Sema/variadic-block.c +++ b/test/Sema/variadic-block.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only -fblocks +// expected-no-diagnostics #include diff --git a/test/Sema/vector-cast.c b/test/Sema/vector-cast.c index f1cf013..7fa6e86 100644 --- a/test/Sema/vector-cast.c +++ b/test/Sema/vector-cast.c @@ -10,22 +10,22 @@ void f() t2 v2; t3 v3; - v2 = (t2)v1; // -expected-error {{invalid conversion between vector type \ + v2 = (t2)v1; // expected-error {{invalid conversion between vector type \ 't2' and 't1' of different size}} - v1 = (t1)v2; // -expected-error {{invalid conversion between vector type \ + v1 = (t1)v2; // expected-error {{invalid conversion between vector type \ 't1' and 't2' of different size}} v3 = (t3)v2; - v1 = (t1)(char *)10; // -expected-error {{invalid conversion between vector \ + v1 = (t1)(char *)10; // expected-error {{invalid conversion between vector \ type 't1' and scalar type 'char *'}} v1 = (t1)(long long)10; - v1 = (t1)(short)10; // -expected-error {{invalid conversion between vector \ + v1 = (t1)(short)10; // expected-error {{invalid conversion between vector \ type 't1' and integer type 'short' of different size}} long long r1 = (long long)v1; - short r2 = (short)v1; // -expected-error {{invalid conversion between vector \ + short r2 = (short)v1; // expected-error {{invalid conversion between vector \ type 't1' and integer type 'short' of different size}} - char *r3 = (char *)v1; // -expected-error {{invalid conversion between vector\ + char *r3 = (char *)v1; // expected-error {{invalid conversion between vector\ type 't1' and scalar type 'char *'}} } diff --git a/test/Sema/vfprintf-valid-redecl.c b/test/Sema/vfprintf-valid-redecl.c index 14fbbc4..5c5ce8d 100644 --- a/test/Sema/vfprintf-valid-redecl.c +++ b/test/Sema/vfprintf-valid-redecl.c @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -pedantic -verify +// expected-no-diagnostics // PR4290 // The following declaration is compatible with vfprintf, so we shouldn't diff --git a/test/Sema/warn-bad-function-cast.c b/test/Sema/warn-bad-function-cast.c new file mode 100644 index 0000000..41a3f78 --- /dev/null +++ b/test/Sema/warn-bad-function-cast.c @@ -0,0 +1,47 @@ +// RUN: %clang_cc1 %s -fsyntax-only -Wno-unused-value -Wbad-function-cast -triple x86_64-unknown-unknown -verify +// rdar://9103192 + +void vf(void); +int if1(void); +char if2(void); +long if3(void); +float rf1(void); +double rf2(void); +_Complex double cf(void); +enum e { E1 } ef(void); +_Bool bf(void); +char *pf1(void); +int *pf2(void); + +void +foo(void) +{ + /* Casts to void types are always OK. */ + (void)vf(); + (void)if1(); + (void)cf(); + (const void)bf(); + /* Casts to the same type or similar types are OK. */ + (int)if1(); + (long)if2(); + (char)if3(); + (float)rf1(); + (long double)rf2(); + (_Complex float)cf(); + (enum f { F1 })ef(); + (_Bool)bf(); + (void *)pf1(); + (char *)pf2(); + /* All following casts issue warning */ + (float)if1(); /* expected-warning {{cast from function call of type 'int' to non-matching type 'float'}} */ + (double)if2(); /* expected-warning {{cast from function call of type 'char' to non-matching type 'double'}} */ + (_Bool)if3(); /* expected-warning {{cast from function call of type 'long' to non-matching type '_Bool'}} */ + (int)rf1(); /* expected-warning {{cast from function call of type 'float' to non-matching type 'int'}} */ + (long)rf2(); /* expected-warning {{cast from function call of type 'double' to non-matching type 'long'}} */ + (double)cf(); /* expected-warning {{cast from function call of type '_Complex double' to non-matching type 'double'}} */ + (int)ef(); /* expected-warning {{cast from function call of type 'enum e' to non-matching type 'int'}} */ + (int)bf(); /* expected-warning {{cast from function call of type '_Bool' to non-matching type 'int'}} */ + (__SIZE_TYPE__)pf1(); /* expected-warning {{cast from function call of type 'char *' to non-matching type 'unsigned long'}} */ + (__PTRDIFF_TYPE__)pf2(); /* expected-warning {{cast from function call of type 'int *' to non-matching type 'long'}} */ +} + diff --git a/test/Sema/warn-documentation-fixits.cpp b/test/Sema/warn-documentation-fixits.cpp index 732b44d..a47b776 100644 --- a/test/Sema/warn-documentation-fixits.cpp +++ b/test/Sema/warn-documentation-fixits.cpp @@ -20,8 +20,52 @@ void test3(T aaa); template void test4(SomeTy aaa, OtherTy bbb); +// expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} +/// \deprecated +void test_deprecated_1(); + +// expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} +/// \deprecated +void test_deprecated_2(int a); + +struct test_deprecated_3 { + // expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} + /// \deprecated + void test_deprecated_4(); + + // expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} + /// \deprecated + void test_deprecated_5() { + } +}; + +template +struct test_deprecated_6 { + // expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} + /// \deprecated + void test_deprecated_7(); + + // expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} + /// \deprecated + void test_deprecated_8() { + } +}; + +#define MY_ATTR_DEPRECATED __attribute__((deprecated)) + +// expected-warning@+1 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+2 {{add a deprecation attribute to the declaration to silence this warning}} +/// \deprecated +void test_deprecated_9(int a); + // CHECK: fix-it:"{{.*}}":{5:12-5:22}:"a" // CHECK: fix-it:"{{.*}}":{9:12-9:15}:"aaa" // CHECK: fix-it:"{{.*}}":{13:13-13:23}:"T" // CHECK: fix-it:"{{.*}}":{18:13-18:18}:"SomeTy" +// CHECK: fix-it:"{{.*}}":{25:25-25:25}:" __attribute__((deprecated))" +// CHECK: fix-it:"{{.*}}":{29:30-29:30}:" __attribute__((deprecated))" +// CHECK: fix-it:"{{.*}}":{34:27-34:27}:" __attribute__((deprecated))" +// CHECK: fix-it:"{{.*}}":{38:27-38:27}:" __attribute__((deprecated))" +// CHECK: fix-it:"{{.*}}":{46:27-46:27}:" __attribute__((deprecated))" +// CHECK: fix-it:"{{.*}}":{50:27-50:27}:" __attribute__((deprecated))" +// CHECK: fix-it:"{{.*}}":{58:30-58:30}:" MY_ATTR_DEPRECATED" diff --git a/test/Sema/warn-documentation.cpp b/test/Sema/warn-documentation.cpp index d99520b..5678fd9 100644 --- a/test/Sema/warn-documentation.cpp +++ b/test/Sema/warn-documentation.cpp @@ -38,13 +38,13 @@ int test_html7(int); int test_html8(int); // expected-warning@+2 {{HTML start tag prematurely ended, expected attribute name or '>'}} expected-note@+1 {{HTML tag started here}} -/** Aaa bbb'}} -/** Aaa bbb -void test_param16(int bbb, int ccc); +void test_param20(int bbb, int ccc); // expected-warning@+3 {{parameter 'a' is already documented}} // expected-note@+1 {{previous documentation}} /// \param a Aaa. /// \param a Aaa. -int test_param17(int a); +int test_param21(int a); // expected-warning@+4 {{parameter 'x2' is already documented}} // expected-note@+2 {{previous documentation}} /// \param x1 Aaa. /// \param x2 Bbb. /// \param x2 Ccc. -int test_param18(int x1, int x2, int x3); +int test_param22(int x1, int x2, int x3); + +// expected-warning@+2 {{parameter 'bbb' not found in the function declaration}} expected-note@+2 {{did you mean 'ccc'?}} +/// \param aaa Meow. +/// \param bbb Bbb. +/// \returns aaa. +typedef int test_param23(int aaa, int ccc); + +// expected-warning@+2 {{parameter 'bbb' not found in the function declaration}} expected-note@+2 {{did you mean 'ccc'?}} +/// \param aaa Meow. +/// \param bbb Bbb. +/// \returns aaa. +typedef int (*test_param24)(int aaa, int ccc); + +// expected-warning@+2 {{parameter 'bbb' not found in the function declaration}} expected-note@+2 {{did you mean 'ccc'?}} +/// \param aaa Meow. +/// \param bbb Bbb. +/// \returns aaa. +typedef int (* const test_param25)(int aaa, int ccc); + +// expected-warning@+2 {{parameter 'bbb' not found in the function declaration}} expected-note@+2 {{did you mean 'ccc'?}} +/// \param aaa Meow. +/// \param bbb Bbb. +/// \returns aaa. +typedef int (C::*test_param26)(int aaa, int ccc); + +typedef int (*test_param27)(int aaa); + +// expected-warning@+1 {{'\param' command used in a comment that is not attached to a function declaration}} +/// \param aaa Meow. +typedef test_param27 test_param28; // expected-warning@+1 {{'\tparam' command used in a comment that is not attached to a template declaration}} @@ -324,6 +377,52 @@ using test_tparam14 = test_tparam13; template using test_tparam15 = test_tparam13; + +/// Aaa +/// \deprecated Bbb +void test_deprecated_1(int a) __attribute__((deprecated)); + +// We don't want \deprecated to warn about empty paragraph. It is fine to use +// \deprecated by itself without explanations. + +/// Aaa +/// \deprecated +void test_deprecated_2(int a) __attribute__((deprecated)); + +/// Aaa +/// \deprecated +void test_deprecated_3(int a) __attribute__((availability(macosx,introduced=10.4))); + +/// Aaa +/// \deprecated +void test_deprecated_4(int a) __attribute__((unavailable)); + +// expected-warning@+2 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+3 {{add a deprecation attribute to the declaration to silence this warning}} +/// Aaa +/// \deprecated +void test_deprecated_5(int a); + +// expected-warning@+2 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} expected-note@+3 {{add a deprecation attribute to the declaration to silence this warning}} +/// Aaa +/// \deprecated +void test_deprecated_6(int a) { +} + +// expected-warning@+2 {{declaration is marked with '\deprecated' command but does not have a deprecation attribute}} +/// Aaa +/// \deprecated +template +void test_deprecated_7(T aaa); + + +/// \invariant aaa +void test_invariant_1(int a); + +// expected-warning@+1 {{empty paragraph passed to '\invariant' command}} +/// \invariant +void test_invariant_2(int a); + + // no-warning /// \returns Aaa int test_returns_right_decl_1(int); @@ -403,6 +502,24 @@ enum test_returns_wrong_decl_8 { namespace test_returns_wrong_decl_10 { }; +// expected-warning@+1 {{'\endverbatim' command does not terminate a verbatim text block}} +/// \endverbatim +int test_verbatim_1(); + +// expected-warning@+1 {{'\endcode' command does not terminate a verbatim text block}} +/// \endcode +int test_verbatim_2(); + +// FIXME: we give a bad diagnostic here because we throw away non-documentation +// comments early. +// +// expected-warning@+3 {{'\endcode' command does not terminate a verbatim text block}} +/// \code +// foo +/// \endcode +int test_verbatim_3(); + + // expected-warning@+1 {{empty paragraph passed to '\brief' command}} int test1; ///< \brief\author Aaa @@ -714,3 +831,8 @@ inline void test_nocrash6() */ typedef const struct test_nocrash7 * test_nocrash8; +// We used to crash on this. + +/// aaa \unknown aaa \unknown aaa +int test_nocrash9; + diff --git a/test/Sema/warn-documentation.m b/test/Sema/warn-documentation.m index d6af6ed..8a894dc 100644 --- a/test/Sema/warn-documentation.m +++ b/test/Sema/warn-documentation.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -Wno-objc-root-class -Wdocumentation -Wdocumentation-pedantic -verify %s +// RUN: %clang_cc1 -fsyntax-only -fblocks -Wno-objc-root-class -Wdocumentation -Wdocumentation-pedantic -verify %s @class NSString; @@ -91,3 +91,9 @@ int b; - (void)test2:(NSString *)aaa; @end +// expected-warning@+2 {{parameter 'bbb' not found in the function declaration}} expected-note@+2 {{did you mean 'ccc'?}} +/// \param aaa Meow. +/// \param bbb Bbb. +/// \returns aaa. +typedef int (^test_param1)(int aaa, int ccc); + diff --git a/test/Sema/warn-gnu-designators.c b/test/Sema/warn-gnu-designators.c index bdb3374..e55cfba 100644 --- a/test/Sema/warn-gnu-designators.c +++ b/test/Sema/warn-gnu-designators.c @@ -1,2 +1,3 @@ // RUN: %clang_cc1 -Wno-gnu-designator -verify %s +// expected-no-diagnostics struct { int x, y, z[12]; } value = { x:17, .z [3 ... 5] = 7 }; diff --git a/test/Sema/warn-missing-variable-declarations.c b/test/Sema/warn-missing-variable-declarations.c new file mode 100644 index 0000000..631d3d2 --- /dev/null +++ b/test/Sema/warn-missing-variable-declarations.c @@ -0,0 +1,18 @@ +// RUN: %clang -Wmissing-variable-declarations -fsyntax-only -Xclang -verify %s + +int vbad1; // expected-warning{{no previous extern declaration for non-static variable 'vbad1'}} + +int vbad2; +int vbad2 = 10; // expected-warning{{no previous extern declaration for non-static variable 'vbad2'}} + +struct { + int mgood1; +} vbad3; // expected-warning{{no previous extern declaration for non-static variable 'vbad3'}} + +int vbad4; +int vbad4 = 10; // expected-warning{{no previous extern declaration for non-static variable 'vbad4'}} +extern int vbad4; + +extern int vgood1; +int vgood1; +int vgood1 = 10; diff --git a/test/Sema/warn-type-safety-mpi-hdf5.c b/test/Sema/warn-type-safety-mpi-hdf5.c index 9c2ee96..8c50cb2 100644 --- a/test/Sema/warn-type-safety-mpi-hdf5.c +++ b/test/Sema/warn-type-safety-mpi-hdf5.c @@ -147,6 +147,10 @@ void test_mpi_predefined_types( // Layout-compatible scalar types. MPI_Send(int_buf, 1, MPI_INT); // no-warning + // Null pointer constant. + MPI_Send(0, 0, MPI_INT); // no-warning + MPI_Send(NULL, 0, MPI_INT); // no-warning + // Layout-compatible class types. MPI_Send(pfi, 1, MPI_FLOAT_INT); // no-warning MPI_Send(pii, 1, MPI_2INT); // no-warning diff --git a/test/Sema/warn-type-safety.c b/test/Sema/warn-type-safety.c index 6f548aa..4ac453d 100644 --- a/test/Sema/warn-type-safety.c +++ b/test/Sema/warn-type-safety.c @@ -80,6 +80,14 @@ void test_tag_mismatch(int *ptr) C_func(ptr, 20); // should warn, but may cause false positives } +void test_null_pointer() +{ + C_func(0, C_tag); // no-warning + C_func((void *) 0, C_tag); // no-warning + C_func((int *) 0, C_tag); // no-warning + C_func((long *) 0, C_tag); // expected-warning {{argument type 'long *' doesn't match specified 'c' type tag that requires 'int *'}} +} + // Check that we look through typedefs in the special case of allowing 'char' // to be matched with 'signed char' or 'unsigned char'. void E_func(void *ptr, int tag) __attribute__(( pointer_with_type_tag(e,1,2) )); diff --git a/test/Sema/warn-type-safety.cpp b/test/Sema/warn-type-safety.cpp index d053fba..a73a9d9 100644 --- a/test/Sema/warn-type-safety.cpp +++ b/test/Sema/warn-type-safety.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -verify %s +// RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s typedef struct ompi_datatype_t *MPI_Datatype; @@ -52,6 +52,8 @@ void test1(C *c, int *int_buf) { c->MPI_Send(int_buf, 1, MPI_INT); // no-warning c->MPI_Send(int_buf, 1, MPI_FLOAT); // expected-warning {{argument type 'int *' doesn't match specified 'mpi' type tag that requires 'float *'}} + c->MPI_Send(0, 0, MPI_INT); // no-warning + c->MPI_Send(nullptr, 0, MPI_INT); // no-warning OperatorIntStar i; c->MPI_Send(i, 1, MPI_INT); // no-warning diff --git a/test/Sema/warn-unreachable.c b/test/Sema/warn-unreachable.c index 636513f..2fbe1c7 100644 --- a/test/Sema/warn-unreachable.c +++ b/test/Sema/warn-unreachable.c @@ -132,3 +132,12 @@ void PR9774(int *s) { s[i] = 0; } +// Test case for . We should treat code guarded +// by 'x & 0' and 'x * 0' as unreachable. +void calledFun(); +void test_mul_and_zero(int x) { + if (x & 0) calledFun(); // expected-warning {{will never be executed}} + if (0 & x) calledFun(); // expected-warning {{will never be executed}} + if (x * 0) calledFun(); // expected-warning {{will never be executed}} + if (0 * x) calledFun(); // expected-warning {{will never be executed}} +} diff --git a/test/Sema/warn-unused-function.c b/test/Sema/warn-unused-function.c index 8f4cdcb..a334e71 100644 --- a/test/Sema/warn-unused-function.c +++ b/test/Sema/warn-unused-function.c @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -Wunused-function -Wunneeded-internal-declaration -verify %s +// RUN: %clang_cc1 -fsyntax-only -Wused-but-marked-unused -Wunused-function -Wunneeded-internal-declaration -verify %s // RUN: %clang_cc1 -fsyntax-only -verify -Wunused %s // RUN: %clang_cc1 -fsyntax-only -verify -Wall %s @@ -54,3 +54,15 @@ void f13(void) { char * const __attribute__((cleanup(cleanupMalloc))) a; (void)a; } + +// rdar://12233989 +extern void a(void) __attribute__((unused)); +extern void b(void) __attribute__((unused)); + +void b(void) +{ +} +void a(void) +{ + b(); +} diff --git a/test/Sema/wchar.c b/test/Sema/wchar.c index 28ec2f1..8708aa0 100644 --- a/test/Sema/wchar.c +++ b/test/Sema/wchar.c @@ -6,6 +6,8 @@ typedef __WCHAR_TYPE__ wchar_t; #if defined(_WIN32) || defined(_M_IX86) || defined(__CYGWIN__) \ || defined(_M_X64) || defined(SHORT_WCHAR) #define WCHAR_T_TYPE unsigned short +#elif defined(__arm) + #define WCHAR_T_TYPE unsigned int #elif defined(__sun) || defined(__AuroraUX__) #define WCHAR_T_TYPE long #else /* Solaris or AuroraUX. */ diff --git a/test/Sema/weak-import-on-enum.c b/test/Sema/weak-import-on-enum.c index 3a2c0e5..ad43769 100644 --- a/test/Sema/weak-import-on-enum.c +++ b/test/Sema/weak-import-on-enum.c @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify -triple x86_64-apple-darwin %s // RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://10277579 enum __attribute__((deprecated)) __attribute__((weak_import)) A { diff --git a/test/SemaCXX/2008-01-11-BadWarning.cpp b/test/SemaCXX/2008-01-11-BadWarning.cpp index b84e7c1..e27c084 100644 --- a/test/SemaCXX/2008-01-11-BadWarning.cpp +++ b/test/SemaCXX/2008-01-11-BadWarning.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wall %s +// expected-no-diagnostics // rdar://5683899 void** f(void **Buckets, unsigned NumBuckets) { return Buckets + NumBuckets; diff --git a/test/SemaCXX/MicrosoftCompatibilityNoExceptions.cpp b/test/SemaCXX/MicrosoftCompatibilityNoExceptions.cpp index d932b5d..14e5160 100644 --- a/test/SemaCXX/MicrosoftCompatibilityNoExceptions.cpp +++ b/test/SemaCXX/MicrosoftCompatibilityNoExceptions.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -fms-compatibility +// expected-no-diagnostics // PR13153 namespace std {} diff --git a/test/SemaCXX/MicrosoftExtensions.cpp b/test/SemaCXX/MicrosoftExtensions.cpp index 0b72cd3..6b43ea2 100644 --- a/test/SemaCXX/MicrosoftExtensions.cpp +++ b/test/SemaCXX/MicrosoftExtensions.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 %s -triple i686-pc-win32 -fsyntax-only -Wmicrosoft -verify -fms-extensions -fexceptions -fcxx-exceptions +// RUN: %clang_cc1 %s -triple i686-pc-win32 -fsyntax-only -Wmicrosoft -Wc++11-extensions -Wno-long-long -verify -fms-extensions -fexceptions -fcxx-exceptions // ::type_info is predeclared with forward class declartion @@ -112,11 +112,11 @@ const int seventeen = 17; typedef int Int; struct X0 { - enum E1 : Int { SomeOtherValue } field; // expected-warning{{enumeration types with a fixed underlying type are a Microsoft extension}} + enum E1 : Int { SomeOtherValue } field; // expected-warning{{enumeration types with a fixed underlying type are a C++11 extension}} enum E1 : seventeen; }; -enum : long long { // expected-warning{{enumeration types with a fixed underlying type are a Microsoft extension}} +enum : long long { // expected-warning{{enumeration types with a fixed underlying type are a C++11 extension}} SomeValue = 0x100000000 }; @@ -203,3 +203,4 @@ struct PR11150 { void f() { int __except = 0; } +void ::f(); // expected-warning{{extra qualification on member 'f'}} diff --git a/test/SemaCXX/PR10447.cpp b/test/SemaCXX/PR10447.cpp index 08644ad..5ba74aa 100644 --- a/test/SemaCXX/PR10447.cpp +++ b/test/SemaCXX/PR10447.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify %s +// expected-no-diagnostics // PR12223 namespace test1 { diff --git a/test/SemaCXX/PR5086-ambig-resolution-enum.cpp b/test/SemaCXX/PR5086-ambig-resolution-enum.cpp index b5aac5f..eeb73f6 100644 --- a/test/SemaCXX/PR5086-ambig-resolution-enum.cpp +++ b/test/SemaCXX/PR5086-ambig-resolution-enum.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics class C { public: diff --git a/test/SemaCXX/PR6562.cpp b/test/SemaCXX/PR6562.cpp index 854d9b0..144fde6 100644 --- a/test/SemaCXX/PR6562.cpp +++ b/test/SemaCXX/PR6562.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct X { ~X(); }; template diff --git a/test/SemaCXX/PR9884.cpp b/test/SemaCXX/PR9884.cpp index ab883c4..bb8bd6a 100644 --- a/test/SemaCXX/PR9884.cpp +++ b/test/SemaCXX/PR9884.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics class Base { protected: Base(int val); diff --git a/test/SemaCXX/PR9902.cpp b/test/SemaCXX/PR9902.cpp index 80086e4..a34f99c 100644 --- a/test/SemaCXX/PR9902.cpp +++ b/test/SemaCXX/PR9902.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics template struct __allocator_traits_rebind diff --git a/test/SemaCXX/PR9908.cpp b/test/SemaCXX/PR9908.cpp index fc090cc..a15b637 100644 --- a/test/SemaCXX/PR9908.cpp +++ b/test/SemaCXX/PR9908.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics template struct __allocator_traits_rebind diff --git a/test/SemaCXX/__try.cpp b/test/SemaCXX/__try.cpp index cb5d38a..a0f503a 100644 --- a/test/SemaCXX/__try.cpp +++ b/test/SemaCXX/__try.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -fborland-extensions -fcxx-exceptions %s +// expected-no-diagnostics // This test is from http://docwiki.embarcadero.com/RADStudio/en/Try diff --git a/test/SemaCXX/ambiguous-conversion-show-overload.cpp b/test/SemaCXX/ambiguous-conversion-show-overload.cpp new file mode 100644 index 0000000..6429651 --- /dev/null +++ b/test/SemaCXX/ambiguous-conversion-show-overload.cpp @@ -0,0 +1,21 @@ +// RUN: %clang_cc1 -fsyntax-only -fshow-overloads=best -fno-caret-diagnostics %s 2>&1 | FileCheck %s +struct S { + S(void*); + S(char*); + S(unsigned char*); + S(signed char*); + S(unsigned short*); + S(signed short*); + S(unsigned int*); + S(signed int*); +}; +void f(const S& s); +void g() { + f(0); +} +// CHECK: {{conversion from 'int' to 'const S' is ambiguous}} +// CHECK-NEXT: {{candidate constructor}} +// CHECK-NEXT: {{candidate constructor}} +// CHECK-NEXT: {{candidate constructor}} +// CHECK-NEXT: {{candidate constructor}} +// CHECK-NEXT: {{remaining 4 candidates omitted; pass -fshow-overloads=all to show them}} diff --git a/test/SemaCXX/anonymous-union-cxx11.cpp b/test/SemaCXX/anonymous-union-cxx11.cpp index 8e682eb..9f987a9 100644 --- a/test/SemaCXX/anonymous-union-cxx11.cpp +++ b/test/SemaCXX/anonymous-union-cxx11.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify -pedantic %s +// expected-no-diagnostics namespace PR12866 { struct bar { diff --git a/test/SemaCXX/ast-print.cpp b/test/SemaCXX/ast-print.cpp new file mode 100644 index 0000000..aeb4039 --- /dev/null +++ b/test/SemaCXX/ast-print.cpp @@ -0,0 +1,83 @@ +// RUN: %clang_cc1 -ast-print %s | FileCheck %s + +// CHECK: r; +// CHECK-NEXT: (r->method()); +struct MyClass +{ + void method() {} +}; + +struct Reference +{ + MyClass* object; + MyClass* operator ->() { return object; } +}; + +void test1() { + Reference r; + (r->method()); +} + +// CHECK: if (int a = 1) +// CHECK: while (int a = 1) +// CHECK: switch (int a = 1) + +void test2() +{ + if (int a = 1) { } + while (int a = 1) { } + switch (int a = 1) { } +} + +// CHECK: new (1) int; +void *operator new (typeof(sizeof(1)), int, int = 2); +void test3() { + new (1) int; +} + +// CHECK: new X; +struct X { + void *operator new (typeof(sizeof(1)), int = 2); +}; +void test4() { new X; } + +// CHECK: for (int i = 2097, j = 42; false;) +void test5() { + for (int i = 2097, j = 42; false;) {} +} + +// CHECK: test6fn((int &)y); +void test6fn(int& x); +void test6() { + unsigned int y = 0; + test6fn((int&)y); +} + +// CHECK: S s( 1, 2 ); + +template void test7() +{ + S s( 1,2 ); +} + + +// CHECK: t.~T(); + +template void test8(T t) { t.~T(); } + + +// CHECK: enum E { +// CHECK-NEXT: A, +// CHECK-NEXT: B, +// CHECK-NEXT: C +// CHECK-NEXT: }; +// CHECK-NEXT: {{^[ ]+}}E a = A; + +struct test9 +{ + void f() + { + enum E { A, B, C }; + E a = A; + } +}; diff --git a/test/SemaCXX/attr-format.cpp b/test/SemaCXX/attr-format.cpp index da134a1..3d5c339 100644 --- a/test/SemaCXX/attr-format.cpp +++ b/test/SemaCXX/attr-format.cpp @@ -14,6 +14,8 @@ struct S { expected-error{{out of bounds}} const char* h3(const char*) __attribute__((format_arg(1))); // \ expected-error{{invalid for the implicit this argument}} + + void operator() (const char*, ...) __attribute__((format(printf, 2, 3))); }; // PR5521 @@ -33,3 +35,9 @@ namespace PR8625 { s.f(str, "%s", str); } } + +// Make sure we interpret member operator calls as having an implicit +// this argument. +void test_operator_call(S s, const char* str) { + s("%s", str); +} diff --git a/test/SemaCXX/attr-nodebug.cpp b/test/SemaCXX/attr-nodebug.cpp new file mode 100644 index 0000000..b441da21 --- /dev/null +++ b/test/SemaCXX/attr-nodebug.cpp @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 %s -verify -fsyntax-only +// Note: most of the 'nodebug' tests are in attr-nodebug.c. + +// expected-no-diagnostics +class c { + void t3() __attribute__((nodebug)); +}; diff --git a/test/SemaCXX/attr-noreturn.cpp b/test/SemaCXX/attr-noreturn.cpp index eaf0d0c..f3d548b 100644 --- a/test/SemaCXX/attr-noreturn.cpp +++ b/test/SemaCXX/attr-noreturn.cpp @@ -54,3 +54,29 @@ class xpto { int xpto::blah() { return 3; // expected-warning {{function 'blah' declared 'noreturn' should not return}} } + +// PR12948 + +namespace PR12948 { + template + void foo() __attribute__((__noreturn__)); + + template + void foo() { + while (1) continue; + } + + void bar() __attribute__((__noreturn__)); + + void bar() { + foo<0>(); + } + + + void baz() __attribute__((__noreturn__)); + typedef void voidfn(); + voidfn baz; + + template void wibble() __attribute__((__noreturn__)); + template voidfn wibble; +} diff --git a/test/SemaCXX/attr-unused.cpp b/test/SemaCXX/attr-unused.cpp new file mode 100644 index 0000000..b74bc91 --- /dev/null +++ b/test/SemaCXX/attr-unused.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -verify -Wunused -Wused-but-marked-unused -fsyntax-only %s + +namespace ns_unused { typedef int Int_unused __attribute__((unused)); } +namespace ns_not_unused { typedef int Int_not_unused; } + +void f() { + ns_not_unused::Int_not_unused i1; // expected-warning {{unused variable}} + ns_unused::Int_unused i0; // expected-warning {{'Int_unused' was marked unused but was used}} +} diff --git a/test/SemaCXX/blocks-1.cpp b/test/SemaCXX/blocks-1.cpp index 1b15094..02e9cac 100644 --- a/test/SemaCXX/blocks-1.cpp +++ b/test/SemaCXX/blocks-1.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -fblocks -std=c++11 +// expected-no-diagnostics extern "C" int exit(int); diff --git a/test/SemaCXX/blocks.cpp b/test/SemaCXX/blocks.cpp index adbff55..3f81c27 100644 --- a/test/SemaCXX/blocks.cpp +++ b/test/SemaCXX/blocks.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -fblocks +// expected-no-diagnostics void tovoid(void*); diff --git a/test/SemaCXX/borland-extensions.cpp b/test/SemaCXX/borland-extensions.cpp index 4831530..1e4bd45 100644 --- a/test/SemaCXX/borland-extensions.cpp +++ b/test/SemaCXX/borland-extensions.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -fborland-extensions +// expected-no-diagnostics // Borland extensions diff --git a/test/SemaCXX/builtin-exception-spec.cpp b/test/SemaCXX/builtin-exception-spec.cpp index 324d20e..590cd3c 100644 --- a/test/SemaCXX/builtin-exception-spec.cpp +++ b/test/SemaCXX/builtin-exception-spec.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -isystem %S/Inputs -fsyntax-only -verify %s +// expected-no-diagnostics #include extern "C" { diff --git a/test/SemaCXX/builtin-ptrtomember-overload.cpp b/test/SemaCXX/builtin-ptrtomember-overload.cpp index c7b5173..c27d642 100644 --- a/test/SemaCXX/builtin-ptrtomember-overload.cpp +++ b/test/SemaCXX/builtin-ptrtomember-overload.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics struct A {}; diff --git a/test/SemaCXX/builtin_objc_msgSend.cpp b/test/SemaCXX/builtin_objc_msgSend.cpp index 0e90d54..082fb28 100644 --- a/test/SemaCXX/builtin_objc_msgSend.cpp +++ b/test/SemaCXX/builtin_objc_msgSend.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // rdar://8686888 typedef struct objc_selector *SEL; diff --git a/test/SemaCXX/builtins-arm.cpp b/test/SemaCXX/builtins-arm.cpp new file mode 100644 index 0000000..8a0cf81 --- /dev/null +++ b/test/SemaCXX/builtins-arm.cpp @@ -0,0 +1,6 @@ +// RUN: %clang_cc1 -triple armv7 -fsyntax-only -verify %s + +// va_list on ARM AAPCS is struct { void* __ap }. +int test1(const __builtin_va_list &ap) { + return __builtin_va_arg(ap, int); // expected-error {{binding of reference to type '__builtin_va_list' to a value of type 'const __builtin_va_list' drops qualifiers}} +} diff --git a/test/SemaCXX/builtins-va_arg.cpp b/test/SemaCXX/builtins-va_arg.cpp new file mode 100644 index 0000000..4f549c8 --- /dev/null +++ b/test/SemaCXX/builtins-va_arg.cpp @@ -0,0 +1,52 @@ +// RUN: %clang_cc1 %s -ffreestanding +// RUN: %clang_cc1 %s -ffreestanding -triple i686-unknown-linux +// RUN: %clang_cc1 %s -ffreestanding -triple x86_64-unknown-linux +// RUN: %clang_cc1 %s -ffreestanding -triple mips-unknown-linux +// RUN: %clang_cc1 %s -ffreestanding -triple mipsel-unknown-linux +// RUN: %clang_cc1 %s -ffreestanding -triple armv7-unknown-linux-gnueabi +// RUN: %clang_cc1 %s -ffreestanding -triple thumbv7-unknown-linux-gnueabi + +#include "stdarg.h" + +int int_accumulator = 0; +double double_accumulator = 0; + +int test_vprintf(const char *fmt, va_list ap) { + char ch; + int result = 0; + while (*fmt != '\0') { + ch = *fmt++; + if (ch != '%') { + continue; + } + + ch = *fmt++; + switch (ch) { + case 'd': + int_accumulator += va_arg(ap, int); + result++; + break; + + case 'f': + double_accumulator += va_arg(ap, double); + result++; + break; + + default: + break; + } + + if (ch == '0') { + break; + } + } + return result; +} + +int test_printf(const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + int result = test_vprintf(fmt, ap); + va_end(ap); + return result; +} diff --git a/test/SemaCXX/builtins.cpp b/test/SemaCXX/builtins.cpp index 568ba5d..6b055cf 100644 --- a/test/SemaCXX/builtins.cpp +++ b/test/SemaCXX/builtins.cpp @@ -7,3 +7,16 @@ void f() { } void a() { __builtin_va_list x, y; ::__builtin_va_copy(x, y); } + +// +template +int equal(const char *s1, const char *s2) { + return Compare(s1, s2) == 0; +} +// FIXME: Our error recovery here sucks +template int equal<&__builtin_strcmp>(const char*, const char*); // expected-error {{builtin functions must be directly called}} expected-error {{expected unqualified-id}} expected-error {{expected ')'}} expected-note {{to match this '('}} + +// PR13195 +void f2() { + __builtin_isnan; // expected-error {{builtin functions must be directly called}} +} diff --git a/test/SemaCXX/cast-conversion.cpp b/test/SemaCXX/cast-conversion.cpp index dd2bc98..270f968 100644 --- a/test/SemaCXX/cast-conversion.cpp +++ b/test/SemaCXX/cast-conversion.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// RUN: %clang_cc1 -fsyntax-only -triple x86_64-unknown-unknown -verify %s -std=c++11 struct R { R(int); @@ -45,3 +45,23 @@ protected: static_cast(f0<0>()); // expected-error{{ambiguous}} } }; + +void *intToPointer1(short s) { + return (void*)s; // expected-warning{{cast to 'void *' from smaller integer type 'short'}} +} + +void *intToPointer2(short s) { + return reinterpret_cast(s); +} + +void *intToPointer3(bool b) { + return (void*)b; +} + +void *intToPointer4() { + return (void*)(3 + 7); +} + +void *intToPointer5(long l) { + return (void*)l; +} diff --git a/test/SemaCXX/cast-explicit-ctor.cpp b/test/SemaCXX/cast-explicit-ctor.cpp index 0052856..41d2fa2 100644 --- a/test/SemaCXX/cast-explicit-ctor.cpp +++ b/test/SemaCXX/cast-explicit-ctor.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct B { explicit B(bool); }; void f() { (void)(B)true; diff --git a/test/SemaCXX/class-layout.cpp b/test/SemaCXX/class-layout.cpp index d81944a..f2ff9fc 100644 --- a/test/SemaCXX/class-layout.cpp +++ b/test/SemaCXX/class-layout.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -fsyntax-only -verify +// expected-no-diagnostics #define SA(n, p) int a##n[(p) ? 1 : -1] diff --git a/test/SemaCXX/class.cpp b/test/SemaCXX/class.cpp index 4dffc8d..972a79b 100644 --- a/test/SemaCXX/class.cpp +++ b/test/SemaCXX/class.cpp @@ -120,7 +120,7 @@ struct C4 { struct S { void f(); // expected-note 1 {{previous declaration}} - void S::f() {} // expected-warning {{extra qualification on member}} expected-error {{class member cannot be redeclared}} expected-note {{previous declaration}} expected-note {{previous definition}} + void S::f() {} // expected-error {{extra qualification on member}} expected-error {{class member cannot be redeclared}} expected-note {{previous declaration}} expected-note {{previous definition}} void f() {} // expected-error {{class member cannot be redeclared}} expected-error {{redefinition}} }; diff --git a/test/SemaCXX/comma.cpp b/test/SemaCXX/comma.cpp index 79ff7d1..3a6162b 100644 --- a/test/SemaCXX/comma.cpp +++ b/test/SemaCXX/comma.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR6076 void f(); diff --git a/test/SemaCXX/compare.cpp b/test/SemaCXX/compare.cpp index 28e2dd0..432069f 100644 --- a/test/SemaCXX/compare.cpp +++ b/test/SemaCXX/compare.cpp @@ -89,8 +89,8 @@ int test0(long a, unsigned long b) { // (C,b) (C == (unsigned long) b) + (C == (unsigned int) b) + - (C == (unsigned short) b) + - (C == (unsigned char) b) + + (C == (unsigned short) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned short' is always false}} + (C == (unsigned char) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned char' is always false}} ((long) C == b) + ((int) C == b) + ((short) C == b) + @@ -101,8 +101,8 @@ int test0(long a, unsigned long b) { ((signed char) C == (unsigned char) b) + (C < (unsigned long) b) + (C < (unsigned int) b) + - (C < (unsigned short) b) + - (C < (unsigned char) b) + + (C < (unsigned short) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned short' is always false}} + (C < (unsigned char) b) + // expected-warning {{comparison of constant 65536 with expression of type 'unsigned char' is always false}} ((long) C < b) + ((int) C < b) + ((short) C < b) + @@ -119,8 +119,8 @@ int test0(long a, unsigned long b) { (a == (unsigned char) C) + ((long) a == C) + ((int) a == C) + - ((short) a == C) + - ((signed char) a == C) + + ((short) a == C) + // expected-warning {{comparison of constant 65536 with expression of type 'short' is always false}} + ((signed char) a == C) + // expected-warning {{comparison of constant 65536 with expression of type 'signed char' is always false}} ((long) a == (unsigned long) C) + ((int) a == (unsigned int) C) + ((short) a == (unsigned short) C) + @@ -131,8 +131,8 @@ int test0(long a, unsigned long b) { (a < (unsigned char) C) + ((long) a < C) + ((int) a < C) + - ((short) a < C) + - ((signed char) a < C) + + ((short) a < C) + // expected-warning {{comparison of constant 65536 with expression of type 'short' is always true}} + ((signed char) a < C) + // expected-warning {{comparison of constant 65536 with expression of type 'signed char' is always true}} ((long) a < (unsigned long) C) + // expected-warning {{comparison of integers of different signs}} ((int) a < (unsigned int) C) + // expected-warning {{comparison of integers of different signs}} ((short) a < (unsigned short) C) + @@ -141,8 +141,8 @@ int test0(long a, unsigned long b) { // (0x80000,b) (0x80000 == (unsigned long) b) + (0x80000 == (unsigned int) b) + - (0x80000 == (unsigned short) b) + - (0x80000 == (unsigned char) b) + + (0x80000 == (unsigned short) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned short' is always false}} + (0x80000 == (unsigned char) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned char' is always false}} ((long) 0x80000 == b) + ((int) 0x80000 == b) + ((short) 0x80000 == b) + @@ -153,8 +153,8 @@ int test0(long a, unsigned long b) { ((signed char) 0x80000 == (unsigned char) b) + (0x80000 < (unsigned long) b) + (0x80000 < (unsigned int) b) + - (0x80000 < (unsigned short) b) + - (0x80000 < (unsigned char) b) + + (0x80000 < (unsigned short) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned short' is always false}} + (0x80000 < (unsigned char) b) + // expected-warning {{comparison of constant 524288 with expression of type 'unsigned char' is always false}} ((long) 0x80000 < b) + ((int) 0x80000 < b) + ((short) 0x80000 < b) + @@ -171,8 +171,8 @@ int test0(long a, unsigned long b) { (a == (unsigned char) 0x80000) + ((long) a == 0x80000) + ((int) a == 0x80000) + - ((short) a == 0x80000) + - ((signed char) a == 0x80000) + + ((short) a == 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'short' is always false}} + ((signed char) a == 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'signed char' is always false}} ((long) a == (unsigned long) 0x80000) + ((int) a == (unsigned int) 0x80000) + ((short) a == (unsigned short) 0x80000) + @@ -183,8 +183,8 @@ int test0(long a, unsigned long b) { (a < (unsigned char) 0x80000) + ((long) a < 0x80000) + ((int) a < 0x80000) + - ((short) a < 0x80000) + - ((signed char) a < 0x80000) + + ((short) a < 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'short' is always true}} + ((signed char) a < 0x80000) + // expected-warning {{comparison of constant 524288 with expression of type 'signed char' is always true}} ((long) a < (unsigned long) 0x80000) + // expected-warning {{comparison of integers of different signs}} ((int) a < (unsigned int) 0x80000) + // expected-warning {{comparison of integers of different signs}} ((short) a < (unsigned short) 0x80000) + diff --git a/test/SemaCXX/complex-init-list.cpp b/test/SemaCXX/complex-init-list.cpp index e75833a..f70f9df 100644 --- a/test/SemaCXX/complex-init-list.cpp +++ b/test/SemaCXX/complex-init-list.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only -pedantic +// expected-no-diagnostics // This file tests the clang extension which allows initializing the components // of a complex number individually using an initialization list. Basically, diff --git a/test/SemaCXX/conditional-expr.cpp b/test/SemaCXX/conditional-expr.cpp index a80eda4..7595f1d 100644 --- a/test/SemaCXX/conditional-expr.cpp +++ b/test/SemaCXX/conditional-expr.cpp @@ -57,6 +57,16 @@ struct Ambig { operator signed char(); // expected-note 2 {{candidate function}} }; +struct Abstract { + virtual ~Abstract() = 0; // expected-note {{unimplemented pure virtual method '~Abstract' in 'Abstract'}} +}; + +struct Derived1: Abstract { +}; + +struct Derived2: Abstract { +}; + void test() { // This function tests C++0x 5.16 @@ -206,6 +216,9 @@ void test() // Note the thing that this does not test: since DR446, various situations // *must* create a separate temporary copy of class objects. This can only // be properly tested at runtime, though. + + const Abstract &a = true ? static_cast(Derived1()) : Derived2(); // expected-error {{allocating an object of abstract class type 'const Abstract'}} + true ? static_cast(Derived1()) : throw 3; // expected-error {{allocating an object of abstract class type 'const Abstract'}} } namespace PR6595 { diff --git a/test/SemaCXX/constant-expression-cxx11.cpp b/test/SemaCXX/constant-expression-cxx11.cpp index a3ead79..f504eb6 100644 --- a/test/SemaCXX/constant-expression-cxx11.cpp +++ b/test/SemaCXX/constant-expression-cxx11.cpp @@ -521,12 +521,18 @@ namespace DependentValues { struct I { int n; typedef I V[10]; }; I::V x, y; -template struct S { +int g(); +template struct S : T { int k; void f() { I::V &cells = B ? x : y; I &i = cells[k]; switch (i.n) {} + + // FIXME: We should be able to diagnose this. + constexpr int n = g(); + + constexpr int m = this->g(); // ok, could be constexpr } }; @@ -741,6 +747,15 @@ constexpr bool check(T a, T b) { return a == b.k; } static_assert(S(5) == 11, ""); static_assert(check(S(5), 11), ""); +namespace PR14171 { + +struct X { + constexpr (operator int)() { return 0; } +}; +static_assert(X() == 0, ""); + +} + } } @@ -1374,3 +1389,69 @@ namespace ConditionalLValToRVal { constexpr A a(4); static_assert(f(a).v == 4, ""); } + +namespace TLS { + __thread int n; + int m; + + constexpr bool b = &n == &n; + + constexpr int *p = &n; // expected-error{{constexpr variable 'p' must be initialized by a constant expression}} + + constexpr int *f() { return &n; } + constexpr int *q = f(); // expected-error{{constexpr variable 'q' must be initialized by a constant expression}} + constexpr bool c = f() == f(); + + constexpr int *g() { return &m; } + constexpr int *r = g(); +} + +namespace Void { + constexpr void f() { return; } // expected-error{{constexpr function's return type 'void' is not a literal type}} + + void assert_failed(const char *msg, const char *file, int line); // expected-note {{declared here}} +#define ASSERT(expr) ((expr) ? static_cast(0) : assert_failed(#expr, __FILE__, __LINE__)) + template + constexpr T get(T (&a)[S], size_t k) { + return ASSERT(k > 0 && k < S), a[k]; // expected-note{{non-constexpr function 'assert_failed'}} + } +#undef ASSERT + template int get(int (&a)[4], size_t); + constexpr int arr[] = { 4, 1, 2, 3, 4 }; + static_assert(get(arr, 1) == 1, ""); + static_assert(get(arr, 4) == 4, ""); + static_assert(get(arr, 0) == 4, ""); // expected-error{{not an integral constant expression}} \ + // expected-note{{in call to 'get(arr, 0)'}} +} + +namespace std { struct type_info; } + +namespace TypeId { + struct A { virtual ~A(); }; + A f(); + A &g(); + constexpr auto &x = typeid(f()); + constexpr auto &y = typeid(g()); // expected-error{{constant expression}} \ + // expected-note{{typeid applied to expression of polymorphic type 'TypeId::A' is not allowed in a constant expression}} +} + +namespace PR14203 { + struct duration { + constexpr duration() {} + constexpr operator int() const { return 0; } + }; + template void f() { + // If we want to evaluate this at the point of the template definition, we + // need to trigger the implicit definition of the move constructor at that + // point. + // FIXME: C++ does not permit us to implicitly define it at the appropriate + // times, since it is only allowed to be implicitly defined when it is + // odr-used. + constexpr duration d = duration(); + } + // FIXME: It's unclear whether this is valid. On the one hand, we're not + // allowed to generate a move constructor. On the other hand, if we did, + // this would be a constant expression. For now, we generate a move + // constructor here. + int n = sizeof(short{duration(duration())}); +} diff --git a/test/SemaCXX/constant-expression.cpp b/test/SemaCXX/constant-expression.cpp index ec50cb7..942bf41 100644 --- a/test/SemaCXX/constant-expression.cpp +++ b/test/SemaCXX/constant-expression.cpp @@ -115,7 +115,7 @@ int array2[recurse2]; // expected-warning {{variable length array}} expected-war namespace FloatConvert { typedef int a[(int)42.3]; typedef int a[(int)42.997]; - typedef int b[(long long)4e20]; // expected-warning {{variable length}} expected-error {{variable length}} expected-warning {{'long long' is an extension}} + typedef int b[(long long)4e20]; // expected-warning {{variable length}} expected-error {{variable length}} expected-warning {{'long long' is a C++11 extension}} } // PR12626 diff --git a/test/SemaCXX/constexpr-turing.cpp b/test/SemaCXX/constexpr-turing.cpp index c515378..07c04ef 100644 --- a/test/SemaCXX/constexpr-turing.cpp +++ b/test/SemaCXX/constexpr-turing.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify -std=c++11 %s +// expected-no-diagnostics // A direct proof that constexpr is Turing-complete, once DR1454 is implemented. diff --git a/test/SemaCXX/constructor-initializer.cpp b/test/SemaCXX/constructor-initializer.cpp index f503d01..ecbe7bf 100644 --- a/test/SemaCXX/constructor-initializer.cpp +++ b/test/SemaCXX/constructor-initializer.cpp @@ -135,12 +135,12 @@ class InitializeUsingSelfTest { TwoInOne D; int E; InitializeUsingSelfTest(int F) - : A(A), // expected-warning {{field is uninitialized when used here}} - B((((B)))), // expected-warning {{field is uninitialized when used here}} - C(A && InitializeUsingSelfTest::C), // expected-warning {{field is uninitialized when used here}} - D(D, // expected-warning {{field is uninitialized when used here}} - D), // expected-warning {{field is uninitialized when used here}} - E(IntParam(E)) {} // expected-warning {{field is uninitialized when used here}} + : A(A), // expected-warning {{field 'A' is uninitialized when used here}} + B((((B)))), // expected-warning {{field 'B' is uninitialized when used here}} + C(A && InitializeUsingSelfTest::C), // expected-warning {{field 'C' is uninitialized when used here}} + D(D, // expected-warning {{field 'D' is uninitialized when used here}} + D), // expected-warning {{field 'D' is uninitialized when used here}} + E(IntParam(E)) {} // expected-warning {{field 'E' is uninitialized when used here}} }; int IntWrapper(int &i) { return 0; }; @@ -160,8 +160,8 @@ class CopyConstructorTest { bool A, B, C; CopyConstructorTest(const CopyConstructorTest& rhs) : A(rhs.A), - B(B), // expected-warning {{field is uninitialized when used here}} - C(rhs.C || C) { } // expected-warning {{field is uninitialized when used here}} + B(B), // expected-warning {{field 'B' is uninitialized when used here}} + C(rhs.C || C) { } // expected-warning {{field 'C' is uninitialized when used here}} }; // Make sure we aren't marking default constructors when we shouldn't be. diff --git a/test/SemaCXX/crashes.cpp b/test/SemaCXX/crashes.cpp index d02704c..f5682bd 100644 --- a/test/SemaCXX/crashes.cpp +++ b/test/SemaCXX/crashes.cpp @@ -136,3 +136,38 @@ cc_YCbCr cc_hsl::YCbCr() } } + +namespace test1 { + int getString(const int*); + template class ELFObjectFile { + const int* sh; + ELFObjectFile() { + switch (*sh) { + } + int SectionName(getString(sh)); + } + }; +} + +namespace test2 { + struct fltSemantics ; + const fltSemantics &foobar(); + void VisitCastExpr(int x) { + switch (x) { + case 42: + const fltSemantics &Sem = foobar(); + } + } +} + +namespace test3 { + struct nsCSSRect { + }; + static int nsCSSRect::* sides; + nsCSSRect dimenX; + void ParseBoxCornerRadii(int y) { + switch (y) { + } + int& x = dimenX.*sides; + } +} diff --git a/test/SemaCXX/cstyle-cast.cpp b/test/SemaCXX/cstyle-cast.cpp index 12495ec..468c8ec 100644 --- a/test/SemaCXX/cstyle-cast.cpp +++ b/test/SemaCXX/cstyle-cast.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// REQUIRES: LP64 struct A {}; diff --git a/test/SemaCXX/cxx0x-delegating-ctors.cpp b/test/SemaCXX/cxx0x-delegating-ctors.cpp index 2d49f0f..a34ee4f 100644 --- a/test/SemaCXX/cxx0x-delegating-ctors.cpp +++ b/test/SemaCXX/cxx0x-delegating-ctors.cpp @@ -33,7 +33,9 @@ foo::foo (const float &f) : foo(&f) { //expected-error{{creates a delegation cyc //expected-note{{which delegates to}} } -foo::foo (char) : i(3), foo(3) { // expected-error{{must appear alone}} +foo::foo (char) : + i(3), + foo(3) { // expected-error{{must appear alone}} } // This should not cause an infinite loop diff --git a/test/SemaCXX/cxx0x-initializer-constructor.cpp b/test/SemaCXX/cxx0x-initializer-constructor.cpp index 223e140..a657ec8 100644 --- a/test/SemaCXX/cxx0x-initializer-constructor.cpp +++ b/test/SemaCXX/cxx0x-initializer-constructor.cpp @@ -304,3 +304,19 @@ namespace init_list_default { }; B b {}; // calls default constructor } + + +// +namespace rdar11974632 { + struct X { + X(const X&) = delete; + X(int); + }; + + template + struct Y { + X x{1}; + }; + + Y yi; +} diff --git a/test/SemaCXX/cxx0x-initializer-stdinitializerlist.cpp b/test/SemaCXX/cxx0x-initializer-stdinitializerlist.cpp index f11e19a..0962253 100644 --- a/test/SemaCXX/cxx0x-initializer-stdinitializerlist.cpp +++ b/test/SemaCXX/cxx0x-initializer-stdinitializerlist.cpp @@ -187,3 +187,7 @@ namespace rdar11948732 { XCtorInit xc = { xi, xi }; } } + +namespace PR14272 { + auto x { { 0, 0 } }; // expected-error {{cannot deduce actual type for variable 'x' with type 'auto' from initializer list}} +} diff --git a/test/SemaCXX/cxx11-crashes.cpp b/test/SemaCXX/cxx11-crashes.cpp new file mode 100644 index 0000000..bd51af1 --- /dev/null +++ b/test/SemaCXX/cxx11-crashes.cpp @@ -0,0 +1,76 @@ +// RUN: %clang_cc1 -std=c++11 -verify %s + +// rdar://12240916 stack overflow. +namespace rdar12240916 { + +struct S2 { + S2(const S2&); + S2(); +}; + +struct S { // expected-note {{not complete}} + S x; // expected-error {{incomplete type}} + S2 y; +}; + +S foo() { + S s; + return s; +} + +struct S3; // expected-note {{forward declaration}} + +struct S4 { + S3 x; // expected-error {{incomplete type}} + S2 y; +}; + +struct S3 { + S4 x; + S2 y; +}; + +S4 foo2() { + S4 s; + return s; +} + +} + +// rdar://12542261 stack overflow. +namespace rdar12542261 { + +template +struct check_complete +{ + static_assert(sizeof(_Tp) > 0, "Type must be complete."); +}; + + +template +class function // expected-note 2 {{candidate}} +{ +public: + template + function(_Fp, typename check_complete<_Fp>::type* = 0); // expected-note {{candidate}} +}; + +void foobar() +{ + auto LeftCanvas = new Canvas(); // expected-error {{unknown type name}} + function m_OnChange = [&, LeftCanvas]() { }; // expected-error {{no viable conversion}} +} + +} + +namespace b6981007 { + struct S {}; // expected-note 3{{candidate}} + void f() { + S s(1, 2, 3); // expected-error {{no matching}} + for (auto x : s) { + // We used to attempt to evaluate the initializer of this variable, + // and crash because it has an undeduced type. + const int &n(x); + } + } +} diff --git a/test/SemaCXX/cxx98-compat-pedantic.cpp b/test/SemaCXX/cxx98-compat-pedantic.cpp index c07f64e..18fd152 100644 --- a/test/SemaCXX/cxx98-compat-pedantic.cpp +++ b/test/SemaCXX/cxx98-compat-pedantic.cpp @@ -32,3 +32,9 @@ int *ArraySizeConversion = new int[ConvertToInt()]; // expected-warning {{implic template class ExternTemplate {}; extern template class ExternTemplate; // expected-warning {{extern templates are incompatible with C++98}} + +long long ll1 = // expected-warning {{'long long' is incompatible with C++98}} + -42LL; // expected-warning {{'long long' is incompatible with C++98}} +unsigned long long ull1 = // expected-warning {{'long long' is incompatible with C++98}} + 42ULL; // expected-warning {{'long long' is incompatible with C++98}} + diff --git a/test/SemaCXX/cxx98-compat.cpp b/test/SemaCXX/cxx98-compat.cpp index 37341f8..d497d45 100644 --- a/test/SemaCXX/cxx98-compat.cpp +++ b/test/SemaCXX/cxx98-compat.cpp @@ -20,7 +20,7 @@ class Variadic2 {}; template // expected-warning {{variadic templates are incompatible with C++98}} class Variadic3 {}; -int alignas(8) with_alignas; // expected-warning {{'alignas' is incompatible with C++98}} +alignas(8) int with_alignas; // expected-warning {{'alignas' is incompatible with C++98}} int with_attribute [[ ]]; // expected-warning {{attributes are incompatible with C++98}} void Literals() { @@ -362,3 +362,14 @@ namespace AssignOpUnion { b y; // expected-warning {{union member 'y' with a non-trivial copy assignment operator is incompatible with C++98}} }; } + +namespace rdar11736429 { + struct X { + X(const X&) = delete; // expected-warning{{deleted function definitions are incompatible with C++98}} \ + // expected-note{{because type 'rdar11736429::X' has a user-declared constructor}} + }; + + union S { + X x; // expected-warning{{union member 'x' with a non-trivial constructor is incompatible with C++98}} + }; +} diff --git a/test/SemaCXX/dcl_ambig_res.cpp b/test/SemaCXX/dcl_ambig_res.cpp index 08867c0..97780e4 100644 --- a/test/SemaCXX/dcl_ambig_res.cpp +++ b/test/SemaCXX/dcl_ambig_res.cpp @@ -1,5 +1,8 @@ // RUN: %clang_cc1 -fsyntax-only -pedantic -verify %s +// PR13819 +// REQUIRES: LP64 + // [dcl.ambig.res]p1: struct S { S(int); diff --git a/test/SemaCXX/decl-expr-ambiguity.cpp b/test/SemaCXX/decl-expr-ambiguity.cpp index 0980c40..87fd2da 100644 --- a/test/SemaCXX/decl-expr-ambiguity.cpp +++ b/test/SemaCXX/decl-expr-ambiguity.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -pedantic-errors %s +// RUN: %clang_cc1 -Wno-int-to-pointer-cast -fsyntax-only -verify -pedantic-errors %s void f() { int a; diff --git a/test/SemaCXX/decltype-98.cpp b/test/SemaCXX/decltype-98.cpp index db52565..3202dfe 100644 --- a/test/SemaCXX/decltype-98.cpp +++ b/test/SemaCXX/decltype-98.cpp @@ -1,3 +1,4 @@ // RUN: %clang_cc1 -std=c++98 -fsyntax-only -verify %s +// expected-no-diagnostics extern int x; __decltype(1) x = 3; diff --git a/test/SemaCXX/decltype-overloaded-functions.cpp b/test/SemaCXX/decltype-overloaded-functions.cpp index b0a43a9..c1d01fc 100644 --- a/test/SemaCXX/decltype-overloaded-functions.cpp +++ b/test/SemaCXX/decltype-overloaded-functions.cpp @@ -1,15 +1,30 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 -void f(); // expected-note{{possible target for call}} -void f(int); // expected-note{{possible target for call}} +void f(); // expected-note{{possible target for call}} +void f(int); // expected-note{{possible target for call}} decltype(f) a; // expected-error{{reference to overloaded function could not be resolved; did you mean to call it with no arguments?}} expected-error {{variable has incomplete type 'decltype(f())' (aka 'void')}} template struct S { - decltype(T::f) * f; // expected-error{{reference to overloaded function could not be resolved; did you mean to call it with no arguments?}} expected-error {{call to non-static member function without an object argument}} + decltype(T::f) * f; // expected-error {{call to non-static member function without an object argument}} }; struct K { - void f(); // expected-note{{possible target for call}} - void f(int); // expected-note{{possible target for call}} + void f(); + void f(int); }; S b; // expected-note{{in instantiation of template class 'S' requested here}} + +namespace PR13978 { + template struct S { decltype(1) f(); }; + template decltype(1) S::f() { return 1; } + + // This case is ill-formed (no diagnostic required) because the decltype + // expressions are functionally equivalent but not equivalent. It would + // be acceptable for us to reject this case. + template struct U { struct A {}; decltype(A{}) f(); }; + template decltype(typename U::A{}) U::f() {} + + // This case is valid. + template struct V { struct A {}; decltype(typename V::A{}) f(); }; + template decltype(typename V::A{}) V::f() {} +} diff --git a/test/SemaCXX/decltype-pr4444.cpp b/test/SemaCXX/decltype-pr4444.cpp index 2f95075..a5ac54b 100644 --- a/test/SemaCXX/decltype-pr4444.cpp +++ b/test/SemaCXX/decltype-pr4444.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics template struct TestStruct { diff --git a/test/SemaCXX/decltype-pr4448.cpp b/test/SemaCXX/decltype-pr4448.cpp index 9d33ce7..b781b89 100644 --- a/test/SemaCXX/decltype-pr4448.cpp +++ b/test/SemaCXX/decltype-pr4448.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics template< typename T, T t, decltype(t+2) v > struct Convoluted {}; diff --git a/test/SemaCXX/decltype-this.cpp b/test/SemaCXX/decltype-this.cpp index a13416f..21b4b60 100644 --- a/test/SemaCXX/decltype-this.cpp +++ b/test/SemaCXX/decltype-this.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics template struct is_same { static const bool value = false; diff --git a/test/SemaCXX/decltype.cpp b/test/SemaCXX/decltype.cpp index a1200e0..ccde3dc 100644 --- a/test/SemaCXX/decltype.cpp +++ b/test/SemaCXX/decltype.cpp @@ -28,3 +28,18 @@ template auto f(T t) -> decltype(S(t)) { using U = S; return S(t); } + +struct B { + B(decltype(undeclared)); // expected-error {{undeclared identifier}} +}; +struct C { + C(decltype(undeclared; // expected-error {{undeclared identifier}} \ + // expected-error {{expected ')'}} expected-note {{to match this '('}} +}; + +template +class conditional { +}; + +void foo(conditional) { // expected-note 2 {{to match this '('}} expected-error {{expected ')'}} +} // expected-error {{expected function body after function declarator}} expected-error 2 {{expected '>'}} expected-error {{expected ')'}} diff --git a/test/SemaCXX/default-argument-temporaries.cpp b/test/SemaCXX/default-argument-temporaries.cpp index 3ab7bf4..c0880d5 100644 --- a/test/SemaCXX/default-argument-temporaries.cpp +++ b/test/SemaCXX/default-argument-temporaries.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct B { B(void* = 0); }; struct A { diff --git a/test/SemaCXX/defaulted-ctor-loop.cpp b/test/SemaCXX/defaulted-ctor-loop.cpp index 6416336..bc8dfda 100644 --- a/test/SemaCXX/defaulted-ctor-loop.cpp +++ b/test/SemaCXX/defaulted-ctor-loop.cpp @@ -9,6 +9,6 @@ struct bar { struct foo { bar b; foo() - : b(b) // expected-warning{{field is uninitialized}} + : b(b) // expected-warning{{field 'b' is uninitialized}} {} }; diff --git a/test/SemaCXX/do-while-scope.cpp b/test/SemaCXX/do-while-scope.cpp index 2602ae1..67534db 100644 --- a/test/SemaCXX/do-while-scope.cpp +++ b/test/SemaCXX/do-while-scope.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics void test() { int x; diff --git a/test/SemaCXX/empty-class-layout.cpp b/test/SemaCXX/empty-class-layout.cpp index c68f2bb..951f16c 100644 --- a/test/SemaCXX/empty-class-layout.cpp +++ b/test/SemaCXX/empty-class-layout.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -fsyntax-only -verify +// expected-no-diagnostics #define SA(n, p) int a##n[(p) ? 1 : -1] diff --git a/test/SemaCXX/exception-spec-no-exceptions.cpp b/test/SemaCXX/exception-spec-no-exceptions.cpp index 2e18070..e26e864 100644 --- a/test/SemaCXX/exception-spec-no-exceptions.cpp +++ b/test/SemaCXX/exception-spec-no-exceptions.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -fexceptions -fobjc-exceptions %s +// expected-no-diagnostics // Note that we're specifically excluding -fcxx-exceptions in the command line above. diff --git a/test/SemaCXX/explicit.cpp b/test/SemaCXX/explicit.cpp index 4774637..5ce2cf1 100644 --- a/test/SemaCXX/explicit.cpp +++ b/test/SemaCXX/explicit.cpp @@ -40,10 +40,10 @@ namespace Conversion { void testExplicit() { // Taken from 12.3.2p2 - class Y { }; // expected-note {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'Conversion::Z' to 'const Conversion::Y &' for 1st argument}} \ - expected-note {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'Conversion::Z' to 'Conversion::Y &&' for 1st argument}} \ - expected-note {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'Conversion::Z' to 'const Conversion::Y &' for 1st argument}} \ - expected-note {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'Conversion::Z' to 'Conversion::Y &&' for 1st argument}} + class Y { }; // expected-note {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'Z' to 'const Y &' for 1st argument}} \ + expected-note {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'Z' to 'Y &&' for 1st argument}} \ + expected-note {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'Z' to 'const Y &' for 1st argument}} \ + expected-note {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'Z' to 'Y &&' for 1st argument}} struct Z { explicit operator Y() const; @@ -52,7 +52,7 @@ namespace Conversion { Z z; // 13.3.1.4p1 & 8.5p16: - Y y2 = z; // expected-error {{no viable conversion from 'Conversion::Z' to 'Conversion::Y'}} + Y y2 = z; // expected-error {{no viable conversion from 'Z' to 'Y'}} Y y3 = (Y)z; Y y4 = Y(z); Y y5 = static_cast(z); @@ -62,7 +62,7 @@ namespace Conversion { int i3 = static_cast(z); int i4(z); // 13.3.1.6p1 & 8.5.3p5: - const Y& y6 = z; // expected-error {{no viable conversion from 'Conversion::Z' to 'const Conversion::Y'}} + const Y& y6 = z; // expected-error {{no viable conversion from 'Z' to 'const Y'}} const int& y7(z); } @@ -78,7 +78,7 @@ namespace Conversion { NotBool n; (void) (1 + b); - (void) (1 + n); // expected-error {{invalid operands to binary expression ('int' and 'Conversion::NotBool')}} + (void) (1 + n); // expected-error {{invalid operands to binary expression ('int' and 'NotBool')}} // 5.3.1p9: (void) (!b); @@ -105,7 +105,7 @@ namespace Conversion { // 6.4.2p2: switch (b) {} // expected-warning {{switch condition has boolean value}} - switch (n) {} // expected-error {{switch condition type 'Conversion::NotBool' requires explicit conversion to 'bool'}} \ + switch (n) {} // expected-error {{switch condition type 'NotBool' requires explicit conversion to 'bool'}} \ expected-warning {{switch condition has boolean value}} // 6.5.1: @@ -135,7 +135,7 @@ namespace Conversion { NotInt ni; new int[i]; - new int[ni]; // expected-error {{array size expression of type 'Conversion::NotInt' requires explicit conversion to type 'int'}} + new int[ni]; // expected-error {{array size expression of type 'NotInt' requires explicit conversion to type 'int'}} } void testDelete() @@ -152,7 +152,7 @@ namespace Conversion { NotPtr np; delete p; - delete np; // expected-error {{cannot delete expression of type 'Conversion::NotPtr'}} + delete np; // expected-error {{cannot delete expression of type 'NotPtr'}} } void testFunctionPointer() @@ -170,6 +170,6 @@ namespace Conversion { FP fp; NotFP nfp; fp(1); - nfp(1); // expected-error {{type 'Conversion::NotFP' does not provide a call operator}} + nfp(1); // expected-error {{type 'NotFP' does not provide a call operator}} } } diff --git a/test/SemaCXX/for-range-dereference.cpp b/test/SemaCXX/for-range-dereference.cpp new file mode 100644 index 0000000..bf3187d --- /dev/null +++ b/test/SemaCXX/for-range-dereference.cpp @@ -0,0 +1,89 @@ +// RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +struct Data { }; +struct T { + Data *begin(); + Data *end(); +}; + +struct NoBegin { + Data *end(); +}; + +struct DeletedEnd : public T { + Data *begin(); + Data *end() = delete; //expected-note {{function has been explicitly marked deleted here}} +}; + +struct DeletedADLBegin { }; + +int* begin(DeletedADLBegin) = delete; //expected-note {{candidate function has been explicitly deleted}} \ + expected-note 5 {{candidate function not viable: no known conversion}} + +struct PrivateEnd { + Data *begin(); + + private: + Data *end(); // expected-note 2 {{declared private here}} +}; + +struct ADLNoEnd { }; +Data * begin(ADLNoEnd); // expected-note 6 {{candidate function not viable: no known conversion}} + +struct OverloadedStar { + T operator*(); +}; + +void f() { + T t; + for (auto i : t) { } + T *pt; + for (auto i : pt) { } // expected-error{{invalid range expression of type 'T *'; did you mean to dereference it with '*'?}} + + int arr[10]; + for (auto i : arr) { } + int (*parr)[10]; + for (auto i : parr) { }// expected-error{{invalid range expression of type 'int (*)[10]'; did you mean to dereference it with '*'?}} + + NoBegin NB; + for (auto i : NB) { }// expected-error{{range type 'NoBegin' has 'end' member but no 'begin' member}} + NoBegin *pNB; + for (auto i : pNB) { }// expected-error{{invalid range expression of type 'NoBegin *'; no viable 'begin' function available}} + NoBegin **ppNB; + for (auto i : ppNB) { }// expected-error{{invalid range expression of type 'NoBegin **'; no viable 'begin' function available}} + NoBegin *****pppppNB; + for (auto i : pppppNB) { }// expected-error{{invalid range expression of type 'NoBegin *****'; no viable 'begin' function available}} + + ADLNoEnd ANE; + for (auto i : ANE) { } // expected-error{{invalid range expression of type 'ADLNoEnd'; no viable 'end' function available}} + ADLNoEnd *pANE; + for (auto i : pANE) { } // expected-error{{invalid range expression of type 'ADLNoEnd *'; no viable 'begin' function available}} + + DeletedEnd DE; + for (auto i : DE) { } // expected-error{{attempt to use a deleted function}} \ +expected-note {{when looking up 'end' function for range expression of type 'DeletedEnd'}} + DeletedEnd *pDE; + + for (auto i : pDE) { } // expected-error {{invalid range expression of type 'DeletedEnd *'; no viable 'begin' function available}} + + PrivateEnd PE; + // FIXME: This diagnostic should be improved, as it does not specify that + // the range is invalid. + for (auto i : PE) { } // expected-error{{'end' is a private member of 'PrivateEnd'}} + + PrivateEnd *pPE; + for (auto i : pPE) { }// expected-error {{invalid range expression of type 'PrivateEnd *'}} + // expected-error@-1 {{'end' is a private member of 'PrivateEnd'}} + + DeletedADLBegin DAB; + for (auto i : DAB) { } // expected-error {{call to deleted function 'begin'}}\ + expected-note {{when looking up 'begin' function for range expression of type 'DeletedADLBegin'}} + + OverloadedStar OS; + for (auto i : *OS) { } + + for (auto i : OS) { } // expected-error {{invalid range expression of type 'OverloadedStar'; did you mean to dereference it with '*'?}} + + for (Data *p : pt) { } // expected-error {{invalid range expression of type 'T *'; did you mean to dereference it with '*'?}} + // expected-error@-1 {{no viable conversion from 'Data' to 'Data *'}} + // expected-note@4 {{selected 'begin' function with iterator type 'Data *'}} +} diff --git a/test/SemaCXX/for-range-examples.cpp b/test/SemaCXX/for-range-examples.cpp index 8bda510..953c98b 100644 --- a/test/SemaCXX/for-range-examples.cpp +++ b/test/SemaCXX/for-range-examples.cpp @@ -115,7 +115,7 @@ namespace map_range { } } -#define assert(b) if (!b) { return 1; } +#define assert(b) if (!(b)) { return 1; } int main() { int total = 0; diff --git a/test/SemaCXX/for-range-no-std.cpp b/test/SemaCXX/for-range-no-std.cpp index fa42ca4..66b445e 100644 --- a/test/SemaCXX/for-range-no-std.cpp +++ b/test/SemaCXX/for-range-no-std.cpp @@ -31,10 +31,10 @@ NS::iter end(NS::NoADL); void f() { int a[] = {1, 2, 3}; for (auto b : S()) {} // ok - for (auto b : T()) {} // expected-error {{no matching function for call to 'begin'}} expected-note {{range has type}} + for (auto b : T()) {} // expected-error {{invalid range expression of type 'T'}} for (auto b : a) {} // ok for (int b : NS::ADL()) {} // ok - for (int b : NS::NoADL()) {} // expected-error {{no matching function for call to 'begin'}} expected-note {{range has type}} + for (int b : NS::NoADL()) {} // expected-error {{invalid range expression of type 'NS::NoADL'}} } void PR11601() { diff --git a/test/SemaCXX/format-strings.cpp b/test/SemaCXX/format-strings.cpp index 6b0df29..299aa81 100644 --- a/test/SemaCXX/format-strings.cpp +++ b/test/SemaCXX/format-strings.cpp @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -Wformat-nonliteral -pedantic %s +// RUN: %clang_cc1 -fsyntax-only -verify -Wformat-nonliteral -pedantic -fblocks %s #include @@ -75,3 +75,61 @@ int Foo::printf2(const char *fmt, ...) { return 0; } + + +namespace Templates { + template + void my_uninstantiated_print(const T &arg) { + printf("%d", arg); // no-warning + } + + template + void my_print(const T &arg) { + printf("%d", arg); // expected-warning {{format specifies type 'int' but the argument has type 'const char *'}} + } + + void use_my_print() { + my_print("abc"); // expected-note {{requested here}} + } + + + template + class UninstantiatedPrinter { + public: + static void print(const T &arg) { + printf("%d", arg); // no-warning + } + }; + + template + class Printer { + void format(const char *fmt, ...) __attribute__((format(printf,2,3))); + public: + + void print(const T &arg) { + format("%d", arg); // expected-warning {{format specifies type 'int' but the argument has type 'const char *'}} + } + }; + + void use_class(Printer &p) { + p.print("abc"); // expected-note {{requested here}} + } + + + extern void (^block_print)(const char * format, ...) __attribute__((format(printf, 1, 2))); + + template + void uninstantiated_call_block_print(const T &arg) { + block_print("%d", arg); // no-warning + } + + template + void call_block_print(const T &arg) { + block_print("%d", arg); // expected-warning {{format specifies type 'int' but the argument has type 'const char *'}} + } + + void use_block_print() { + call_block_print("abc"); // expected-note {{requested here}} + } +} + diff --git a/test/SemaCXX/friend-out-of-line.cpp b/test/SemaCXX/friend-out-of-line.cpp index 56b2daa..ab75a4f 100644 --- a/test/SemaCXX/friend-out-of-line.cpp +++ b/test/SemaCXX/friend-out-of-line.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // namespace N { diff --git a/test/SemaCXX/function-type-qual.cpp b/test/SemaCXX/function-type-qual.cpp index 73613ae..ccb5747 100644 --- a/test/SemaCXX/function-type-qual.cpp +++ b/test/SemaCXX/function-type-qual.cpp @@ -26,3 +26,6 @@ class C { void (C::*mpf)() const; cfn C::*mpg; + +// Don't crash! +void (PR14171)() const; // expected-error {{non-member function cannot have 'const' qualifier}} diff --git a/test/SemaCXX/functional-cast.cpp b/test/SemaCXX/functional-cast.cpp index 61e4da3..f8e0c46 100644 --- a/test/SemaCXX/functional-cast.cpp +++ b/test/SemaCXX/functional-cast.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// REQUIRES: LP64 // ------------ not interpreted as C-style cast ------------ diff --git a/test/SemaCXX/gnu-case-ranges.cpp b/test/SemaCXX/gnu-case-ranges.cpp index b082e3a..c613cec 100644 --- a/test/SemaCXX/gnu-case-ranges.cpp +++ b/test/SemaCXX/gnu-case-ranges.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify -Wno-covered-switch-default %s +// expected-no-diagnostics enum E { one, diff --git a/test/SemaCXX/goto2.cpp b/test/SemaCXX/goto2.cpp index 01ea031..b42a611 100644 --- a/test/SemaCXX/goto2.cpp +++ b/test/SemaCXX/goto2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics //PR9463 int subfun(const char *text) { diff --git a/test/SemaCXX/implicit-exception-spec.cpp b/test/SemaCXX/implicit-exception-spec.cpp index b29cff5..e26f985 100644 --- a/test/SemaCXX/implicit-exception-spec.cpp +++ b/test/SemaCXX/implicit-exception-spec.cpp @@ -30,20 +30,17 @@ namespace InClassInitializers { bool x = noexcept(TemplateArg()); // And within a nested class. - // FIXME: The diagnostic location is terrible here. - struct Nested { + struct Nested { // expected-error {{cannot be used by non-static data member initializer}} struct Inner { - int n = ExceptionIf::f(); - } inner; // expected-error {{cannot be used by non-static data member initializer}} + int n = ExceptionIf::f(); // expected-note {{implicit default constructor for 'InClassInitializers::Nested' first required here}} + } inner; }; - bool y = noexcept(Nested()); - bool z = noexcept(Nested::Inner()); struct Nested2 { struct Inner; int n = Inner().n; // expected-error {{cannot be used by non-static data member initializer}} struct Inner { - int n = ExceptionIf::f(); + int n = ExceptionIf::f(); } inner; }; } diff --git a/test/SemaCXX/indirect-goto.cpp b/test/SemaCXX/indirect-goto.cpp index 5b3fac4..cb2213d 100644 --- a/test/SemaCXX/indirect-goto.cpp +++ b/test/SemaCXX/indirect-goto.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace test1 { // Make sure this doesn't crash. diff --git a/test/SemaCXX/issue547.cpp b/test/SemaCXX/issue547.cpp index ab03a15..bfec6e0 100644 --- a/test/SemaCXX/issue547.cpp +++ b/test/SemaCXX/issue547.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template struct classify_function { diff --git a/test/SemaCXX/lambda-expressions.cpp b/test/SemaCXX/lambda-expressions.cpp index 0fd6345..6f92373 100644 --- a/test/SemaCXX/lambda-expressions.cpp +++ b/test/SemaCXX/lambda-expressions.cpp @@ -77,18 +77,21 @@ namespace ImplicitCapture { struct G { G(); G(G&); int a; }; // expected-note 6 {{not viable}} G g; [=]() { const G* gg = &g; return gg->a; }; - [=]() { return [=]{ const G* gg = &g; return gg->a; }(); }; // expected-error {{no matching constructor for initialization of 'ImplicitCapture::G'}} - (void)^{ return [=]{ const G* gg = &g; return gg->a; }(); }; // expected-error 2 {{no matching constructor for initialization of 'const ImplicitCapture::G'}} + [=]() { return [=]{ const G* gg = &g; return gg->a; }(); }; // expected-error {{no matching constructor for initialization of 'G'}} + (void)^{ return [=]{ const G* gg = &g; return gg->a; }(); }; // expected-error 2 {{no matching constructor for initialization of 'const G'}} const int h = a; // expected-note {{declared}} []() { return h; }; // expected-error {{variable 'h' cannot be implicitly captured in a lambda with no capture-default specified}} expected-note {{lambda expression begins here}} - // The exemption for variables which can appear in constant expressions - // applies only to objects (and not to references). - // FIXME: This might be a bug in the standard. - static int i; - constexpr int &ref_i = i; // expected-note {{declared}} + // References can appear in constant expressions if they are initialized by + // reference constant expressions. + int i; + int &ref_i = i; // expected-note {{declared}} [] { return ref_i; }; // expected-error {{variable 'ref_i' cannot be implicitly captured in a lambda with no capture-default specified}} expected-note {{lambda expression begins here}} + + static int j; + int &ref_j = j; + [] { return ref_j; }; // ok } } @@ -221,3 +224,15 @@ namespace VariadicPackExpansion { template void nested2(int); // ok template void nested2(int, int); // expected-note {{in instantiation of}} } + +namespace PR13860 { + void foo() { + auto x = PR13860UndeclaredIdentifier(); // expected-error {{use of undeclared identifier 'PR13860UndeclaredIdentifier'}} + auto y = [x]() { }; + static_assert(sizeof(y), ""); + } +} + +namespace PR13854 { + auto l = [](void){}; +} diff --git a/test/SemaCXX/libstdcxx_atomic_ns_hack.cpp b/test/SemaCXX/libstdcxx_atomic_ns_hack.cpp new file mode 100644 index 0000000..4e4523f --- /dev/null +++ b/test/SemaCXX/libstdcxx_atomic_ns_hack.cpp @@ -0,0 +1,35 @@ +// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify %s + +// libstdc++ 4.6.x contains a bug where it defines std::__atomic[0,1,2] as a +// non-inline namespace, then selects one of those namespaces and reopens it +// as inline, as a strange way of providing something like a using-directive. +// Clang has an egregious hack to work around the problem, by allowing a +// namespace to be converted from non-inline to inline in this one specific +// case. + +#ifdef BE_THE_HEADER + +#pragma clang system_header + +namespace std { + namespace __atomic0 { + typedef int foobar; + } + namespace __atomic1 { + typedef void foobar; + } + + inline namespace __atomic0 {} +} + +#else + +#define BE_THE_HEADER +#include "libstdcxx_atomic_ns_hack.cpp" + +std::foobar fb; + +using T = void; // expected-note {{here}} +using T = std::foobar; // expected-error {{different types ('std::foobar' (aka 'int') vs 'void')}} + +#endif diff --git a/test/SemaCXX/libstdcxx_common_type_hack.cpp b/test/SemaCXX/libstdcxx_common_type_hack.cpp new file mode 100644 index 0000000..e9cb22f --- /dev/null +++ b/test/SemaCXX/libstdcxx_common_type_hack.cpp @@ -0,0 +1,33 @@ +// RUN: %clang_cc1 -fsyntax-only %s -std=c++11 -verify + +// This is a test for an egregious hack in Clang that works around +// an issue with GCC's implementation. std::common_type +// relies on pre-standard rules for decltype(), in which it doesn't +// produce reference types so frequently. + +#ifdef BE_THE_HEADER + +#pragma GCC system_header +namespace std { + template T &&declval(); + + template struct common_type {}; + template struct common_type { + // Under the rules in the standard, this always produces a + // reference type. + typedef decltype(true ? declval() : declval()) type; + }; +} + +#else + +#define BE_THE_HEADER +#include "libstdcxx_common_type_hack.cpp" + +using T = int; +using T = std::common_type::type; + +using U = int; // expected-note {{here}} +using U = decltype(true ? std::declval() : std::declval()); // expected-error {{different types}} + +#endif diff --git a/test/SemaCXX/libstdcxx_is_pod_hack.cpp b/test/SemaCXX/libstdcxx_is_pod_hack.cpp index 3ac2336..1ba3721 100644 --- a/test/SemaCXX/libstdcxx_is_pod_hack.cpp +++ b/test/SemaCXX/libstdcxx_is_pod_hack.cpp @@ -8,6 +8,7 @@ template struct __is_pod { + __is_pod() {} }; __is_pod ipi; @@ -28,6 +29,13 @@ struct test_is_signed { bool check_signed = test_is_signed::__is_signed; -#if __has_feature(is_pod) -# error __is_pod won't work now anyway +template struct must_be_true {}; +template<> struct must_be_true; + +void foo() { + bool b = __is_pod(int); + must_be_true<__is_pod(int)> mbt; +} +#if !__has_feature(is_pod) +# error __is_pod should still be available. #endif diff --git a/test/SemaCXX/local-classes.cpp b/test/SemaCXX/local-classes.cpp index 500b219..f4ca791 100644 --- a/test/SemaCXX/local-classes.cpp +++ b/test/SemaCXX/local-classes.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace PR6382 { int foo() diff --git a/test/SemaCXX/lookup-member.cpp b/test/SemaCXX/lookup-member.cpp index c75b185..39f5a15 100644 --- a/test/SemaCXX/lookup-member.cpp +++ b/test/SemaCXX/lookup-member.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace A { class String; diff --git a/test/SemaCXX/member-expr-anonymous-union.cpp b/test/SemaCXX/member-expr-anonymous-union.cpp index 6e35eb2..246afee 100644 --- a/test/SemaCXX/member-expr-anonymous-union.cpp +++ b/test/SemaCXX/member-expr-anonymous-union.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // PR5543 struct A { int x; union { int* y; float* z; }; }; struct B : A {int a;}; diff --git a/test/SemaCXX/member-expr-static.cpp b/test/SemaCXX/member-expr-static.cpp index 7ed60f7..d4c6c0b 100644 --- a/test/SemaCXX/member-expr-static.cpp +++ b/test/SemaCXX/member-expr-static.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef void (*thread_continue_t)(); extern "C" { diff --git a/test/SemaCXX/member-pointer-size.cpp b/test/SemaCXX/member-pointer-size.cpp index 3aa1eaf..8b59523 100644 --- a/test/SemaCXX/member-pointer-size.cpp +++ b/test/SemaCXX/member-pointer-size.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-unknown-unknown %s -fsyntax-only -verify // RUN: %clang_cc1 -triple i686-unknown-unknown %s -fsyntax-only -verify +// expected-no-diagnostics #include struct A; diff --git a/test/SemaCXX/missing-header.cpp b/test/SemaCXX/missing-header.cpp index 5b3915b..a1048fe 100644 --- a/test/SemaCXX/missing-header.cpp +++ b/test/SemaCXX/missing-header.cpp @@ -4,6 +4,6 @@ class AnalysisDeclContext {}; static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { - if (const AsmStmt *AS = dyn_cast(S)) {} + if (const GCCAsmStmt *AS = dyn_cast(S)) {} bool NoReturnEdge = false; } diff --git a/test/SemaCXX/ms-exception-spec.cpp b/test/SemaCXX/ms-exception-spec.cpp index bda56f5..1be8ec2 100644 --- a/test/SemaCXX/ms-exception-spec.cpp +++ b/test/SemaCXX/ms-exception-spec.cpp @@ -1,3 +1,4 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -fms-extensions +// expected-no-diagnostics void f() throw(...) { } diff --git a/test/SemaCXX/ms-interface.cpp b/test/SemaCXX/ms-interface.cpp new file mode 100644 index 0000000..3625f70 --- /dev/null +++ b/test/SemaCXX/ms-interface.cpp @@ -0,0 +1,77 @@ +// RUN: %clang_cc1 %s -fsyntax-only -verify -fms-extensions -Wno-microsoft -std=c++11 + +__interface I1 { + // expected-error@+1 {{user-declared constructor is not permitted within an interface type}} + I1(); + // expected-error@+1 {{user-declared destructor is not permitted within an interface type}} + ~I1(); + virtual void fn1() const; + // expected-error@+1 {{operator 'operator!' is not permitted within an interface type}} + bool operator!(); + // expected-error@+1 {{operator 'operator int' is not permitted within an interface type}} + operator int(); + // expected-error@+1 {{nested class I1:: is not permitted within an interface type}} + struct { int a; }; + void fn2() { + struct A { }; // should be ignored: not a nested class + } +protected: // expected-error {{interface types cannot specify 'protected' access}} + typedef void void_t; + using int_t = int; +private: // expected-error {{interface types cannot specify 'private' access}} + static_assert(true, "oops"); +}; + +__interface I2 { + // expected-error@+1 {{data member 'i' is not permitted within an interface type}} + int i; + // expected-error@+1 {{static member function 'fn1' is not permitted within an interface type}} + static int fn1(); +private: // expected-error {{interface types cannot specify 'private' access}} + // expected-error@+1 {{non-public member function 'fn2' is not permitted within an interface type}} + void fn2(); +protected: // expected-error {{interface types cannot specify 'protected' access}} + // expected-error@+1 {{non-public member function 'fn3' is not permitted within an interface type}} + void fn3(); +public: + void fn4(); +}; + +// expected-error@+1 {{'final' keyword not permitted with interface types}} +__interface I3 final { +}; + +__interface I4 : I1, I2 { + void fn1() const override; + // expected-error@+1 {{'final' keyword not permitted with interface types}} + void fn2() final; +}; + +// expected-error@+1 {{interface type cannot inherit from non-public 'interface I1'}} +__interface I5 : private I1 { +}; + +template +__interface I6 : X { +}; + +struct S { }; +class C { }; +__interface I { }; + +static_assert(!__is_interface_class(S), "oops"); +static_assert(!__is_interface_class(C), "oops"); +static_assert(__is_interface_class(I), "oops"); + +// expected-error@55 {{interface type cannot inherit from 'struct S'}} +// expected-note@+1 {{in instantiation of template class 'I6' requested here}} +struct S1 : I6 { +}; + +// expected-error@55 {{interface type cannot inherit from 'class C'}} +// expected-note@+1 {{in instantiation of template class 'I6' requested here}} +class C1 : I6 { +}; + +class C2 : I6 { +}; diff --git a/test/SemaCXX/nested-name-spec.cpp b/test/SemaCXX/nested-name-spec.cpp index 4e1abc5..7239646 100644 --- a/test/SemaCXX/nested-name-spec.cpp +++ b/test/SemaCXX/nested-name-spec.cpp @@ -93,8 +93,7 @@ void f3() { } // make sure the following doesn't hit any asserts -void f4(undef::C); // expected-error {{use of undeclared identifier 'undef'}} \ - expected-error {{variable has incomplete type 'void'}} +void f4(undef::C); // expected-error {{use of undeclared identifier 'undef'}} typedef void C2::f5(int); // expected-error{{typedef declarator cannot be qualified}} @@ -160,7 +159,7 @@ namespace N { void f(); // FIXME: if we move this to a separate definition of N, things break! } -void ::global_func2(int) { } // expected-warning{{extra qualification on member 'global_func2'}} +void ::global_func2(int) { } // expected-error{{extra qualification on member 'global_func2'}} void N::f() { } // okay @@ -246,15 +245,15 @@ namespace PR7133 { } class CLASS { - void CLASS::foo2(); // expected-warning {{extra qualification on member 'foo2'}} + void CLASS::foo2(); // expected-error {{extra qualification on member 'foo2'}} }; namespace PR8159 { class B { }; class A { - int A::a; // expected-warning{{extra qualification on member 'a'}} - static int A::b; // expected-warning{{extra qualification on member 'b'}} + int A::a; // expected-error{{extra qualification on member 'a'}} + static int A::b; // expected-error{{extra qualification on member 'b'}} int ::c; // expected-error{{non-friend class member 'c' cannot have a qualified name}} }; } diff --git a/test/SemaCXX/new-delete-0x.cpp b/test/SemaCXX/new-delete-0x.cpp index dcc2e9b..9e3b492 100644 --- a/test/SemaCXX/new-delete-0x.cpp +++ b/test/SemaCXX/new-delete-0x.cpp @@ -27,6 +27,11 @@ void bad_news(int *ip) void good_deletes() { delete [&]{ return (int*)0; }(); - // FIXME: This appears to be legal. - delete []{ return (int*)0; }(); // unexpected-error {{expected expression}} +} + +void bad_deletes() +{ + // 'delete []' is always array delete, per [expr.delete]p1. + // FIXME: Give a better diagnostic. + delete []{ return (int*)0; }(); // expected-error {{expected expression}} } diff --git a/test/SemaCXX/new-delete-predefined-decl-2.cpp b/test/SemaCXX/new-delete-predefined-decl-2.cpp index 981476d..c2dfc77 100644 --- a/test/SemaCXX/new-delete-predefined-decl-2.cpp +++ b/test/SemaCXX/new-delete-predefined-decl-2.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s // RUN: %clang_cc1 -DQUALIFIED -fsyntax-only -verify %s +// expected-no-diagnostics // PR5904 void f0(int *ptr) { diff --git a/test/SemaCXX/new-delete-predefined-decl.cpp b/test/SemaCXX/new-delete-predefined-decl.cpp index 20b15b7..ae10065 100644 --- a/test/SemaCXX/new-delete-predefined-decl.cpp +++ b/test/SemaCXX/new-delete-predefined-decl.cpp @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -DTEMPLATE_OVERLOAD -fsyntax-only -verify %s // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #include diff --git a/test/SemaCXX/no-warn-composite-pointer-type.cpp b/test/SemaCXX/no-warn-composite-pointer-type.cpp new file mode 100644 index 0000000..f33f60d --- /dev/null +++ b/test/SemaCXX/no-warn-composite-pointer-type.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -fsyntax-only -Wno-compare-distinct-pointer-types -verify %s +// expected-no-diagnostics +// rdar://12501960 + +void Foo(int **thing, const int **thingMax) +{ + if ((thing + 3) > thingMax) + return; +} diff --git a/test/SemaCXX/no-wchar.cpp b/test/SemaCXX/no-wchar.cpp new file mode 100644 index 0000000..291b657 --- /dev/null +++ b/test/SemaCXX/no-wchar.cpp @@ -0,0 +1,9 @@ +// RUN: %clang_cc1 -triple i386-pc-win32 -fsyntax-only -fno-wchar -verify %s +wchar_t x; // expected-error {{unknown type name 'wchar_t'}} + +typedef unsigned short wchar_t; +void foo(const wchar_t* x); + +void bar() { + foo(L"wide string literal"); +} diff --git a/test/SemaCXX/null_in_arithmetic_ops.cpp b/test/SemaCXX/null_in_arithmetic_ops.cpp index a6c0dbf..a919213 100644 --- a/test/SemaCXX/null_in_arithmetic_ops.cpp +++ b/test/SemaCXX/null_in_arithmetic_ops.cpp @@ -90,4 +90,6 @@ void f() { b = e == NULL || NULL == e || e != NULL || NULL != e; b = f == NULL || NULL == f || f != NULL || NULL != f; b = "f" == NULL || NULL == "f" || "f" != NULL || NULL != "f"; + + return NULL; // expected-error{{void function 'f' should not return a value}} } diff --git a/test/SemaCXX/nullptr-98.cpp b/test/SemaCXX/nullptr-98.cpp index 0d624c2..306b203 100644 --- a/test/SemaCXX/nullptr-98.cpp +++ b/test/SemaCXX/nullptr-98.cpp @@ -1,3 +1,4 @@ // RUN: %clang_cc1 -std=c++98 -fsyntax-only -verify %s +// expected-no-diagnostics void f(void *); void g() { f(__nullptr); } diff --git a/test/SemaCXX/overload-value-dep-arg.cpp b/test/SemaCXX/overload-value-dep-arg.cpp index c1834a7..763daad 100644 --- a/test/SemaCXX/overload-value-dep-arg.cpp +++ b/test/SemaCXX/overload-value-dep-arg.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics class C { C(void*); diff --git a/test/SemaCXX/overloaded-builtin-operators-0x.cpp b/test/SemaCXX/overloaded-builtin-operators-0x.cpp index 6a5a162..bf54389 100644 --- a/test/SemaCXX/overloaded-builtin-operators-0x.cpp +++ b/test/SemaCXX/overloaded-builtin-operators-0x.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fshow-overloads=best -std=c++11 -verify %s +// expected-no-diagnostics template struct X diff --git a/test/SemaCXX/overloaded-builtin-operators.cpp b/test/SemaCXX/overloaded-builtin-operators.cpp index ac110a3..19dc338 100644 --- a/test/SemaCXX/overloaded-builtin-operators.cpp +++ b/test/SemaCXX/overloaded-builtin-operators.cpp @@ -1,4 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -fshow-overloads=best -verify %s +// REQUIRES: LP64 + struct yes; struct no; diff --git a/test/SemaCXX/overloaded-operator-decl.cpp b/test/SemaCXX/overloaded-operator-decl.cpp index 4519a2d..972e2de 100644 --- a/test/SemaCXX/overloaded-operator-decl.cpp +++ b/test/SemaCXX/overloaded-operator-decl.cpp @@ -48,3 +48,13 @@ struct PR10839 { operator int; // expected-error{{'operator int' cannot be the name of a variable or data member}} int operator+; // expected-error{{'operator+' cannot be the name of a variable or data member}} }; + +namespace PR14120 { + struct A { + static void operator()(int& i) { ++i; } // expected-error{{overloaded 'operator()' cannot be a static member function}} + }; + void f() { + int i = 0; + A()(i); + } +} diff --git a/test/SemaCXX/pragma-pack.cpp b/test/SemaCXX/pragma-pack.cpp index 5c1d5c6..e468cce 100644 --- a/test/SemaCXX/pragma-pack.cpp +++ b/test/SemaCXX/pragma-pack.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i686-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics namespace rdar8745206 { diff --git a/test/SemaCXX/pragma-unused.cpp b/test/SemaCXX/pragma-unused.cpp index c9ddffa..c9eaab6 100644 --- a/test/SemaCXX/pragma-unused.cpp +++ b/test/SemaCXX/pragma-unused.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -Wunused-parameter -Wunused -verify %s +// expected-no-diagnostics struct S { void m(int x, int y) { diff --git a/test/SemaCXX/pragma-visibility.cpp b/test/SemaCXX/pragma-visibility.cpp index e3ef97a..18c59c8 100644 --- a/test/SemaCXX/pragma-visibility.cpp +++ b/test/SemaCXX/pragma-visibility.cpp @@ -21,3 +21,10 @@ void f() { #pragma GCC visibility push(protected) #pragma GCC visibility pop } + +namespace pr13662 { +#pragma GCC visibility push(hidden) + template class __attribute__((__visibility__("default"))) foo; + class bar { template friend class foo; }; +#pragma GCC visibility pop +} diff --git a/test/SemaCXX/prefetch-enum.cpp b/test/SemaCXX/prefetch-enum.cpp index 3c77dae..5457bbe 100644 --- a/test/SemaCXX/prefetch-enum.cpp +++ b/test/SemaCXX/prefetch-enum.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only %s -verify +// expected-no-diagnostics // PR5679 enum X { A = 3 }; diff --git a/test/SemaCXX/primary-base.cpp b/test/SemaCXX/primary-base.cpp index a6cbbad..0b6aaef 100644 --- a/test/SemaCXX/primary-base.cpp +++ b/test/SemaCXX/primary-base.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics class A { virtual void f(); }; class B : virtual A { }; diff --git a/test/SemaCXX/ptrtomember-overload-resolution.cpp b/test/SemaCXX/ptrtomember-overload-resolution.cpp index 787e330..85ed0aa 100644 --- a/test/SemaCXX/ptrtomember-overload-resolution.cpp +++ b/test/SemaCXX/ptrtomember-overload-resolution.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics // 13.3.3.2 Ranking implicit conversion sequences // conversion of A::* to B::* is better than conversion of A::* to C::*, diff --git a/test/SemaCXX/qualified-member-enum.cpp b/test/SemaCXX/qualified-member-enum.cpp index 83b0a591..750821b 100644 --- a/test/SemaCXX/qualified-member-enum.cpp +++ b/test/SemaCXX/qualified-member-enum.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // Check that this doesn't crash. struct A { diff --git a/test/SemaCXX/references.cpp b/test/SemaCXX/references.cpp index 028c690..4f3dab0 100644 --- a/test/SemaCXX/references.cpp +++ b/test/SemaCXX/references.cpp @@ -136,4 +136,4 @@ namespace PR8608 { } // The following crashed trying to recursively evaluate the LValue. -const int &do_not_crash = do_not_crash; // expected-warning{{variable 'do_not_crash' is uninitialized when used within its own initialization}} +const int &do_not_crash = do_not_crash; // expected-warning{{reference 'do_not_crash' is not yet bound to a value when used within its own initialization}} diff --git a/test/SemaCXX/scope-check.cpp b/test/SemaCXX/scope-check.cpp index b659de0..8fd23f4 100644 --- a/test/SemaCXX/scope-check.cpp +++ b/test/SemaCXX/scope-check.cpp @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -fblocks %s -Wno-unreachable-code -// RUN: %clang_cc1 -fsyntax-only -verify -fblocks -std=gnu++11 %s -Wno-unreachable-code +// RUN: %clang_cc1 -fsyntax-only -verify -fblocks -fcxx-exceptions %s -Wno-unreachable-code +// RUN: %clang_cc1 -fsyntax-only -verify -fblocks -fcxx-exceptions -std=gnu++11 %s -Wno-unreachable-code namespace test0 { struct D { ~D(); }; @@ -174,14 +174,14 @@ namespace test9 { // http://llvm.org/PR10462 namespace PR10462 { -enum MyEnum { - something_valid, - something_invalid -}; - -bool recurse() { - MyEnum K; - switch (K) { // expected-warning {{enumeration value 'something_invalid' not handled in switch}} + enum MyEnum { + something_valid, + something_invalid + }; + + bool recurse() { + MyEnum K; + switch (K) { // expected-warning {{enumeration value 'something_invalid' not handled in switch}} case something_valid: case what_am_i_thinking: // expected-error {{use of undeclared identifier}} int *X = 0; @@ -189,21 +189,88 @@ bool recurse() { } break; + } } } - namespace test10 { - -int test() { - static void *ps[] = { &&a0 }; - goto *&&a0; // expected-error {{goto into protected scope}} - int a = 3; // expected-note {{jump bypasses variable initialization}} - a0: - return 0; + int test() { + static void *ps[] = { &&a0 }; + goto *&&a0; // expected-error {{goto into protected scope}} + int a = 3; // expected-note {{jump bypasses variable initialization}} + a0: + return 0; + } +} + +// pr13812 +namespace test11 { + struct C { + C(int x); + ~C(); + }; + void f(void **ip) { + static void *ips[] = { &&l0 }; + l0: // expected-note {{possible target of indirect goto}} + C c0 = 42; // expected-note {{jump exits scope of variable with non-trivial destructor}} + goto *ip; // expected-error {{indirect goto might cross protected scopes}} + } +} + +namespace test12 { + struct C { + C(int x); + ~C(); + }; + void f(void **ip) { + static void *ips[] = { &&l0 }; + const C c0 = 17; + l0: // expected-note {{possible target of indirect goto}} + const C &c1 = 42; // expected-note {{jump exits scope of variable with non-trivial destructor}} + const C &c2 = c0; + goto *ip; // expected-error {{indirect goto might cross protected scopes}} + } } +namespace test13 { + struct C { + C(int x); + ~C(); + int i; + }; + void f(void **ip) { + static void *ips[] = { &&l0 }; + l0: // expected-note {{possible target of indirect goto}} + const int &c1 = C(1).i; // expected-note {{jump exits scope of variable with non-trivial destructor}} + goto *ip; // expected-error {{indirect goto might cross protected scopes}} + } } +namespace test14 { + struct C { + C(int x); + ~C(); + operator int&() const; + }; + void f(void **ip) { + static void *ips[] = { &&l0 }; + l0: + // no warning since the C temporary is destructed before the goto. + const int &c1 = C(1); + goto *ip; + } } +// PR14225 +namespace test15 { + void f1() try { + goto x; // expected-error {{goto into protected scope}} + } catch(...) { // expected-note {{jump bypasses initialization of catch block}} + x: ; + } + void f2() try { // expected-note {{jump bypasses initialization of try block}} + x: ; + } catch(...) { + goto x; // expected-error {{goto into protected scope}} + } +} diff --git a/test/SemaCXX/short-wchar-sign.cpp b/test/SemaCXX/short-wchar-sign.cpp index 9a177c0..7ce21c5 100644 --- a/test/SemaCXX/short-wchar-sign.cpp +++ b/test/SemaCXX/short-wchar-sign.cpp @@ -1,6 +1,7 @@ // RUN: %clang_cc1 -triple i386-mingw32 -fsyntax-only -pedantic -verify %s // RUN: %clang_cc1 -fshort-wchar -fsyntax-only -pedantic -verify %s // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -pedantic -verify %s +// expected-no-diagnostics // Check that short wchar_t is unsigned, and that regular wchar_t is not. int test[(wchar_t(-1) +int fallthrough_compatibility_macro_history_template(int n) { + switch (N * n) { + case 0: + n = n * 20; +#define MACRO_WITH_HISTORY2 [[clang::fallthrough]] + case 1: // expected-warning{{unannotated fall-through between switch labels}} expected-note{{insert 'MACRO_WITH_HISTORY2;' to silence this warning}} expected-note{{insert 'break;' to avoid fall-through}} + ; +#undef MACRO_WITH_HISTORY2 +#define MACRO_WITH_HISTORY2 3333333 + } + return n; +} + +#undef MACRO_WITH_HISTORY2 +#define MACRO_WITH_HISTORY2 4444444 +#undef MACRO_WITH_HISTORY2 +#define MACRO_WITH_HISTORY2 5555555 + +void f() { + fallthrough_compatibility_macro_history_template<1>(0); // expected-note{{in instantiation of function template specialization 'fallthrough_compatibility_macro_history_template<1>' requested here}} +} diff --git a/test/SemaCXX/switch-implicit-fallthrough-per-method.cpp b/test/SemaCXX/switch-implicit-fallthrough-per-method.cpp index 7c52e51..009c818 100644 --- a/test/SemaCXX/switch-implicit-fallthrough-per-method.cpp +++ b/test/SemaCXX/switch-implicit-fallthrough-per-method.cpp @@ -42,7 +42,7 @@ void unscoped(int n) { switch (n % 2) { case 0: // FIXME: This should be typo-corrected, probably. - [[fallthrough]]; + [[fallthrough]]; // expected-warning{{unknown attribute 'fallthrough' ignored}} case 2: // expected-warning{{unannotated fall-through}} expected-note{{clang::fallthrough}} expected-note{{break;}} [[clang::fallthrough]]; case 1: diff --git a/test/SemaCXX/tag-ambig.cpp b/test/SemaCXX/tag-ambig.cpp index 6403cf3..bbd17e7 100644 --- a/test/SemaCXX/tag-ambig.cpp +++ b/test/SemaCXX/tag-ambig.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // typedef struct Point Point; diff --git a/test/SemaCXX/trailing-return-0x.cpp b/test/SemaCXX/trailing-return-0x.cpp index c219b77..462b4fa 100644 --- a/test/SemaCXX/trailing-return-0x.cpp +++ b/test/SemaCXX/trailing-return-0x.cpp @@ -69,3 +69,19 @@ X xx; only p2 = xx.f(0L); only p3 = xx.g(0L, 1.0); only p4 = xx.get_nested().h(0L, 1.0, 3.14f); + +namespace PR12053 { + template + auto f1(T t) -> decltype(f1(t)) {} // expected-note{{candidate template ignored}} + + void test_f1() { + f1(0); // expected-error{{no matching function for call to 'f1'}} + } + + template + auto f2(T t) -> decltype(f2(&t)) {} // expected-note{{candidate template ignored}} + + void test_f2() { + f2(0); // expected-error{{no matching function for call to 'f2'}} + } +} diff --git a/test/SemaCXX/trivial-constructor.cpp b/test/SemaCXX/trivial-constructor.cpp index bda206b..ed5b526 100644 --- a/test/SemaCXX/trivial-constructor.cpp +++ b/test/SemaCXX/trivial-constructor.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics struct T1 { }; static_assert(__has_trivial_constructor(T1), "T1 has trivial constructor!"); diff --git a/test/SemaCXX/trivial-destructor.cpp b/test/SemaCXX/trivial-destructor.cpp index db415cf..d3acec6 100644 --- a/test/SemaCXX/trivial-destructor.cpp +++ b/test/SemaCXX/trivial-destructor.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -std=c++11 +// expected-no-diagnostics struct T1 { }; static_assert(__has_trivial_destructor(T1), "T1 has trivial destructor!"); diff --git a/test/SemaCXX/typo-correction.cpp b/test/SemaCXX/typo-correction.cpp index 893f08a..c21ef51 100644 --- a/test/SemaCXX/typo-correction.cpp +++ b/test/SemaCXX/typo-correction.cpp @@ -116,14 +116,14 @@ void TestRedecl::add_in(int i) {} // expected-error{{out-of-line definition of ' // Test the improved typo correction for the Parser::ParseCastExpr => // Sema::ActOnIdExpression => Sema::DiagnoseEmptyLookup call path. -class SomeNetMessage; +class SomeNetMessage; // expected-note 2{{'SomeNetMessage'}} class Message {}; void foo(Message&); void foo(SomeNetMessage&); void doit(void *data) { Message somenetmsg; // expected-note{{'somenetmsg' declared here}} foo(somenetmessage); // expected-error{{use of undeclared identifier 'somenetmessage'; did you mean 'somenetmsg'?}} - foo((somenetmessage)data); // expected-error{{use of undeclared identifier 'somenetmessage'; did you mean 'SomeNetMessage'?}} + foo((somenetmessage)data); // expected-error{{unknown type name 'somenetmessage'; did you mean 'SomeNetMessage'?}} expected-error{{incomplete type}} } // Test the typo-correction callback in BuildRecoveryCallExpr. @@ -155,7 +155,7 @@ void Test3() { struct R {}; bool begun(R); void RangeTest() { - for (auto b : R()) {} // expected-error {{use of undeclared identifier 'begin'}} expected-note {{range has type}} + for (auto b : R()) {} // expected-error {{invalid range expression of type 'R'}} } // PR 12019 - Avoid infinite mutual recursion in DiagnoseInvalidRedeclaration @@ -172,7 +172,7 @@ void Child::add_types(int value) {} // expected-error{{out-of-line definition of // Sema::ActOnIdExpression by Parser::ParseCastExpression to allow type names as // potential corrections for template arguments. namespace clash { -class ConstructExpr {}; // expected-note{{'clash::ConstructExpr' declared here}} +class ConstructExpr {}; // expected-note 2{{'clash::ConstructExpr' declared here}} } class ClashTool { bool HaveConstructExpr(); @@ -180,7 +180,7 @@ class ClashTool { void test() { ConstructExpr *expr = // expected-error{{unknown type name 'ConstructExpr'; did you mean 'clash::ConstructExpr'?}} - getExprAs(); // expected-error{{use of undeclared identifier 'ConstructExpr'; did you mean 'clash::ConstructExpr'?}} + getExprAs(); // expected-error{{unknown type name 'ConstructExpr'; did you mean 'clash::ConstructExpr'?}} } }; @@ -220,6 +220,8 @@ namespace PR13051 { } } +inf f(doulbe); // expected-error{{'int'}} expected-error{{'double'}} + namespace PR6325 { class foo { }; // expected-note{{'foo' declared here}} // Note that for this example (pulled from the PR), if keywords are not excluded diff --git a/test/SemaCXX/uninitialized.cpp b/test/SemaCXX/uninitialized.cpp index 385548b..f55f10f 100644 --- a/test/SemaCXX/uninitialized.cpp +++ b/test/SemaCXX/uninitialized.cpp @@ -114,6 +114,19 @@ void setupA(bool x) { A a17(a17.get2()); // expected-warning {{variable 'a17' is uninitialized when used within its own initialization}} A a18 = x ? a18 : a17; // expected-warning {{variable 'a18' is uninitialized when used within its own initialization}} A a19 = getA(x ? a19 : a17); // expected-warning {{variable 'a19' is uninitialized when used within its own initialization}} + A a20{a20}; // expected-warning {{variable 'a20' is uninitialized when used within its own initialization}} + A a21 = {a21}; // expected-warning {{variable 'a21' is uninitialized when used within its own initialization}} + + // FIXME: Make the local uninitialized warning consistant with the global + // uninitialized checking. + A *a22 = new A(a22->count); // expected-warning {{variable 'a22' is uninitialized when used within its own initialization}} + A *a23 = new A(a23->ONE); // expected-warning {{variable 'a23' is uninitialized when used within its own initialization}} + A *a24 = new A(a24->TWO); // expected-warning {{variable 'a24' is uninitialized when used within its own initialization}} + A *a25 = new A(a25->zero()); // expected-warning {{variable 'a25' is uninitialized when used within its own initialization}} + + A *a26 = new A(a26->get()); // expected-warning {{variable 'a26' is uninitialized when used within its own initialization}} + A *a27 = new A(a27->get2()); // expected-warning {{variable 'a27' is uninitialized when used within its own initialization}} + A *a28 = new A(a28->num); // expected-warning {{variable 'a28' is uninitialized when used within its own initialization}} } bool x; @@ -138,6 +151,17 @@ A a16(&a16.num); // expected-warning {{variable 'a16' is uninitialized when use A a17(a17.get2()); // expected-warning {{variable 'a17' is uninitialized when used within its own initialization}} A a18 = x ? a18 : a17; // expected-warning {{variable 'a18' is uninitialized when used within its own initialization}} A a19 = getA(x ? a19 : a17); // expected-warning {{variable 'a19' is uninitialized when used within its own initialization}} +A a20{a20}; // expected-warning {{variable 'a20' is uninitialized when used within its own initialization}} +A a21 = {a21}; // expected-warning {{variable 'a21' is uninitialized when used within its own initialization}} + +A *a22 = new A(a22->count); +A *a23 = new A(a23->ONE); +A *a24 = new A(a24->TWO); +A *a25 = new A(a25->zero()); + +A *a26 = new A(a26->get()); // expected-warning {{variable 'a26' is uninitialized when used within its own initialization}} +A *a27 = new A(a27->get2()); // expected-warning {{variable 'a27' is uninitialized when used within its own initialization}} +A *a28 = new A(a28->num); // expected-warning {{variable 'a28' is uninitialized when used within its own initialization}} struct B { // POD struct. @@ -150,6 +174,11 @@ B getB(int x) { return B(); }; B getB(int *x) { return B(); }; B getB(B *b) { return B(); }; +B* getPtrB() { return 0; }; +B* getPtrB(int x) { return 0; }; +B* getPtrB(int *x) { return 0; }; +B* getPtrB(B **b) { return 0; }; + void setupB() { B b1; B b2(b1); @@ -166,18 +195,56 @@ void setupB() { B b8 = getB(b8.x); // expected-warning {{variable 'b8' is uninitialized when used within its own initialization}} B b9 = getB(b9.y); // expected-warning {{variable 'b9' is uninitialized when used within its own initialization}} B b10 = getB(-b10.x); // expected-warning {{variable 'b10' is uninitialized when used within its own initialization}} + + B* b11 = 0; + B* b12(b11); + B* b13 = getPtrB(); + B* b14 = getPtrB(&b14); + + (void) b12; + (void) b13; + + B* b15 = getPtrB(b15->x); // expected-warning {{variable 'b15' is uninitialized when used within its own initialization}} + B* b16 = getPtrB(b16->y); // expected-warning {{variable 'b16' is uninitialized when used within its own initialization}} + + B b17 = { b17.x = 5, b17.y = 0 }; + B b18 = { b18.x + 1, b18.y }; // expected-warning 2{{variable 'b18' is uninitialized when used within its own initialization}} } +B b1; +B b2(b1); +B b3 = { 5, &b3.x }; +B b4 = getB(); +B b5 = getB(&b5); +B b6 = getB(&b6.x); + +B b7(b7); // expected-warning {{variable 'b7' is uninitialized when used within its own initialization}} +B b8 = getB(b8.x); // expected-warning {{variable 'b8' is uninitialized when used within its own initialization}} +B b9 = getB(b9.y); // expected-warning {{variable 'b9' is uninitialized when used within its own initialization}} +B b10 = getB(-b10.x); // expected-warning {{variable 'b10' is uninitialized when used within its own initialization}} + +B* b11 = 0; +B* b12(b11); +B* b13 = getPtrB(); +B* b14 = getPtrB(&b14); + +B* b15 = getPtrB(b15->x); // expected-warning {{variable 'b15' is uninitialized when used within its own initialization}} +B* b16 = getPtrB(b16->y); // expected-warning {{variable 'b16' is uninitialized when used within its own initialization}} + +B b17 = { b17.x = 5, b17.y = 0 }; +B b18 = { b18.x + 1, b18.y }; // expected-warning 2{{variable 'b18' is uninitialized when used within its own initialization}} + + // Also test similar constructs in a field's initializer. struct S { int x; void *ptr; - S(bool (*)[1]) : x(x) {} // expected-warning {{field is uninitialized when used here}} - S(bool (*)[2]) : x(x + 1) {} // expected-warning {{field is uninitialized when used here}} - S(bool (*)[3]) : x(x + x) {} // expected-warning 2{{field is uninitialized when used here}} - S(bool (*)[4]) : x(static_cast(x) + 1) {} // expected-warning {{field is uninitialized when used here}} - S(bool (*)[5]) : x(foo(x)) {} // expected-warning {{field is uninitialized when used here}} + S(bool (*)[1]) : x(x) {} // expected-warning {{field 'x' is uninitialized when used here}} + S(bool (*)[2]) : x(x + 1) {} // expected-warning {{field 'x' is uninitialized when used here}} + S(bool (*)[3]) : x(x + x) {} // expected-warning 2{{field 'x' is uninitialized when used here}} + S(bool (*)[4]) : x(static_cast(x) + 1) {} // expected-warning {{field 'x' is uninitialized when used here}} + S(bool (*)[5]) : x(foo(x)) {} // expected-warning {{field 'x' is uninitialized when used here}} // These don't actually require the value of x and so shouldn't warn. S(char (*)[1]) : x(sizeof(x)) {} // rdar://8610363 @@ -262,8 +329,8 @@ namespace { C c; D(char (*)[1]) : c(c.b.a.A1) {} D(char (*)[2]) : c(c.b.a.A2()) {} - D(char (*)[3]) : c(c.b.a.A3) {} // expected-warning {{field is uninitialized when used here}} - D(char (*)[4]) : c(c.b.a.A4()) {} // expected-warning {{field is uninitialized when used here}} + D(char (*)[3]) : c(c.b.a.A3) {} // expected-warning {{field 'c' is uninitialized when used here}} + D(char (*)[4]) : c(c.b.a.A4()) {} // expected-warning {{field 'c' is uninitialized when used here}} // c::a is static, so it is already initialized D(char (*)[5]) : c(c.a.A1) {} @@ -274,21 +341,21 @@ namespace { struct E { int a, b, c; - E(char (*)[1]) : a(a ? b : c) {} // expected-warning {{field is uninitialized when used here}} - E(char (*)[2]) : a(b ? a : a) {} // expected-warning 2{{field is uninitialized when used here}} - E(char (*)[3]) : a(b ? (a) : c) {} // expected-warning {{field is uninitialized when used here}} - E(char (*)[4]) : a(b ? c : (a+c)) {} // expected-warning {{field is uninitialized when used here}} + E(char (*)[1]) : a(a ? b : c) {} // expected-warning {{field 'a' is uninitialized when used here}} + E(char (*)[2]) : a(b ? a : a) {} // expected-warning 2{{field 'a' is uninitialized when used here}} + E(char (*)[3]) : a(b ? (a) : c) {} // expected-warning {{field 'a' is uninitialized when used here}} + E(char (*)[4]) : a(b ? c : (a+c)) {} // expected-warning {{field 'a' is uninitialized when used here}} E(char (*)[5]) : a(b ? c : b) {} - E(char (*)[6]) : a(a ?: a) {} // expected-warning 2{{field is uninitialized when used here}} - E(char (*)[7]) : a(b ?: a) {} // expected-warning {{field is uninitialized when used here}} - E(char (*)[8]) : a(a ?: c) {} // expected-warning {{field is uninitialized when used here}} + E(char (*)[6]) : a(a ?: a) {} // expected-warning 2{{field 'a' is uninitialized when used here}} + E(char (*)[7]) : a(b ?: a) {} // expected-warning {{field 'a' is uninitialized when used here}} + E(char (*)[8]) : a(a ?: c) {} // expected-warning {{field 'a' is uninitialized when used here}} E(char (*)[9]) : a(b ?: c) {} E(char (*)[10]) : a((a, a, b)) {} - E(char (*)[11]) : a((c + a, a + 1, b)) {} // expected-warning 2{{field is uninitialized when used here}} - E(char (*)[12]) : a((b + c, c, a)) {} // expected-warning {{field is uninitialized when used here}} - E(char (*)[13]) : a((a, a, a, a)) {} // expected-warning {{field is uninitialized when used here}} + E(char (*)[11]) : a((c + a, a + 1, b)) {} // expected-warning 2{{field 'a' is uninitialized when used here}} + E(char (*)[12]) : a((b + c, c, a)) {} // expected-warning {{field 'a' is uninitialized when used here}} + E(char (*)[13]) : a((a, a, a, a)) {} // expected-warning {{field 'a' is uninitialized when used here}} E(char (*)[14]) : a((b, c, c)) {} }; @@ -304,16 +371,16 @@ namespace { struct G { F f1, f2; F *f3, *f4; - G(char (*)[1]) : f1(f1) {} // expected-warning {{field is uninitialized when used here}} + G(char (*)[1]) : f1(f1) {} // expected-warning {{field 'f1' is uninitialized when used here}} G(char (*)[2]) : f2(f1) {} G(char (*)[3]) : f2(F()) {} - G(char (*)[4]) : f1(f1.*ptr) {} // expected-warning {{field is uninitialized when used here}} + G(char (*)[4]) : f1(f1.*ptr) {} // expected-warning {{field 'f1' is uninitialized when used here}} G(char (*)[5]) : f2(f1.*ptr) {} - G(char (*)[6]) : f3(f3) {} // expected-warning {{field is uninitialized when used here}} - G(char (*)[7]) : f3(f3->*f_ptr) {} // expected-warning {{field is uninitialized when used here}} - G(char (*)[8]) : f3(new F(f3->*ptr)) {} // expected-warning {{field is uninitialized when used here}} + G(char (*)[6]) : f3(f3) {} // expected-warning {{field 'f3' is uninitialized when used here}} + G(char (*)[7]) : f3(f3->*f_ptr) {} // expected-warning {{field 'f3' is uninitialized when used here}} + G(char (*)[8]) : f3(new F(f3->*ptr)) {} // expected-warning {{field 'f3' is uninitialized when used here}} }; } @@ -379,21 +446,53 @@ namespace statics { } } +namespace in_class_initializers { + struct S { + S() : a(a + 1) {} // expected-warning{{field 'a' is uninitialized when used here}} + int a = 42; // Note: because a is in a member initializer list, this initialization is ignored. + }; + + struct T { + T() : b(a + 1) {} // No-warning. + int a = 42; + int b; + }; + + struct U { + U() : a(b + 1), b(a + 1) {} // FIXME: Warn here. + int a = 42; // Note: because a and b are in the member initializer list, these initializers are ignored. + int b = 1; + }; +} + namespace references { - int &a = a; // expected-warning{{variable 'a' is uninitialized when used within its own initialization}} + int &a = a; // expected-warning{{reference 'a' is not yet bound to a value when used within its own initialization}} + int &b(b); // expected-warning{{reference 'b' is not yet bound to a value when used within its own initialization}} + int &c = a ? b : c; // expected-warning{{reference 'c' is not yet bound to a value when used within its own initialization}} + int &d{d}; // expected-warning{{reference 'd' is not yet bound to a value when used within its own initialization}} struct S { - S() : a(a) {} // expected-warning{{field is uninitialized when used here}} + S() : a(a) {} // expected-warning{{reference 'a' is not yet bound to a value when used here}} int &a; }; void f() { - int &a = a; // expected-warning{{variable 'a' is uninitialized when used within its own initialization}} + int &a = a; // expected-warning{{reference 'a' is not yet bound to a value when used within its own initialization}} + int &b(b); // expected-warning{{reference 'b' is not yet bound to a value when used within its own initialization}} + int &c = a ? b : c; // expected-warning{{reference 'c' is not yet bound to a value when used within its own initialization}} + int &d{d}; // expected-warning{{reference 'd' is not yet bound to a value when used within its own initialization}} } struct T { T() : a(b), b(a) {} // FIXME: Warn here. int &a, &b; - int &c = c; // FIXME: Warn here. + int &c = c; // expected-warning{{reference 'c' is not yet bound to a value when used here}} + }; + + int x; + struct U { + U() : b(a) {} // No-warning. + int &a = x; + int &b; }; } diff --git a/test/SemaCXX/unknown-type-name.cpp b/test/SemaCXX/unknown-type-name.cpp index 893e0cc..ce5972b 100644 --- a/test/SemaCXX/unknown-type-name.cpp +++ b/test/SemaCXX/unknown-type-name.cpp @@ -6,6 +6,8 @@ namespace N { }; typedef Wibble foo; + + int zeppelin; // expected-note{{declared here}} } using namespace N; @@ -15,6 +17,13 @@ void f() { foo::bar = 4; // expected-error{{no member named 'bar' in 'N::Wibble'}} } +int f(foo::bar); // expected-error{{no type named 'bar' in 'N::Wibble'}} + +int f(doulbe); // expected-error{{did you mean 'double'?}} + +int fun(zapotron); // expected-error{{unknown type name 'zapotron'}} +int var(zepelin); // expected-error{{did you mean 'zeppelin'?}} + template struct A { typedef T type; @@ -59,6 +68,20 @@ void f(int, T::type, int) { } // expected-error{{missing 'typename'}} template void f(int, T::type x, char) { } // expected-error{{missing 'typename'}} +int *p; + +// FIXME: We should assume that 'undeclared' is a type, not a parameter name +// here, and produce an 'unknown type name' diagnostic instead. +int f1(undeclared, int); // expected-error{{requires a type specifier}} + +int f2(undeclared, 0); // expected-error{{undeclared identifier}} + +int f3(undeclared *p, int); // expected-error{{unknown type name 'undeclared'}} + +int f4(undeclared *p, 0); // expected-error{{undeclared identifier}} + +int *test(UnknownType *fool) { return 0; } // expected-error{{unknown type name 'UnknownType'}} + template int A::n(T::value); // ok template A::type // expected-error{{missing 'typename'}} diff --git a/test/SemaCXX/unused-functions.cpp b/test/SemaCXX/unused-functions.cpp index 3598082..d05ff4d 100644 --- a/test/SemaCXX/unused-functions.cpp +++ b/test/SemaCXX/unused-functions.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -Wunused -verify %s +// expected-no-diagnostics static int foo(int x) { return x; } diff --git a/test/SemaCXX/unused.cpp b/test/SemaCXX/unused.cpp index 54898c8..fbaf8c8 100644 --- a/test/SemaCXX/unused.cpp +++ b/test/SemaCXX/unused.cpp @@ -34,3 +34,30 @@ namespace derefvolatile { (void)y; // don't warn here, because it's a common pattern. } } + +// +namespace AnonObject { + struct Foo { + Foo(const char* const message); + ~Foo(); + }; + void f() { + Foo("Hello World!"); // don't warn + int(1); // expected-warning {{expression result unused}} + } +} + +// Test that constructing an object (which may have side effects) with +// constructor arguments which are dependent doesn't produce an unused value +// warning. +namespace UnresolvedLookup { + struct Foo { + Foo(int i, int j); + }; + template + struct Bar { + void f(T t) { + Foo(t, 0); // no warning + } + }; +} diff --git a/test/SemaCXX/using-decl-pr4441.cpp b/test/SemaCXX/using-decl-pr4441.cpp index 39a446f..da21db3 100644 --- a/test/SemaCXX/using-decl-pr4441.cpp +++ b/test/SemaCXX/using-decl-pr4441.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace A { struct B { }; diff --git a/test/SemaCXX/using-decl-pr4450.cpp b/test/SemaCXX/using-decl-pr4450.cpp index 4f929ad..ba81e93 100644 --- a/test/SemaCXX/using-decl-pr4450.cpp +++ b/test/SemaCXX/using-decl-pr4450.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics namespace A { void g(); diff --git a/test/SemaCXX/value-dependent-exprs.cpp b/test/SemaCXX/value-dependent-exprs.cpp index 2017ffa..b26ca25 100644 --- a/test/SemaCXX/value-dependent-exprs.cpp +++ b/test/SemaCXX/value-dependent-exprs.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify %s +// expected-no-diagnostics template class C0 { diff --git a/test/SemaCXX/vararg-default-arg.cpp b/test/SemaCXX/vararg-default-arg.cpp index 3c8e41c..27c2bbb 100644 --- a/test/SemaCXX/vararg-default-arg.cpp +++ b/test/SemaCXX/vararg-default-arg.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only +// expected-no-diagnostics // PR5462 void f1(void); diff --git a/test/SemaCXX/vararg-non-pod.cpp b/test/SemaCXX/vararg-non-pod.cpp index 86b560e..da06d95 100644 --- a/test/SemaCXX/vararg-non-pod.cpp +++ b/test/SemaCXX/vararg-non-pod.cpp @@ -123,3 +123,21 @@ int t9(int n) { // Make sure the error works in potentially-evaluated sizeof return (int)sizeof(*(Helper(Foo()), (int (*)[n])0)); // expected-warning{{cannot pass object of non-POD type}} } + +// PR14057 +namespace t10 { + struct F { + F(); + }; + + struct S { + void operator()(F, ...); + }; + + void foo() { + S s; + F f; + s.operator()(f); + s(f); + } +} diff --git a/test/SemaCXX/vector.cpp b/test/SemaCXX/vector.cpp index 82245ac..4d2d064 100644 --- a/test/SemaCXX/vector.cpp +++ b/test/SemaCXX/vector.cpp @@ -267,3 +267,14 @@ void test_mixed_vector_types(fltx4 f, intx4 n, flte4 g, flte4 m) { (void)(n *= m); (void)(n /= m); } + +template void test_pseudo_dtor_tmpl(T *ptr) { + ptr->~T(); + (*ptr).~T(); +} + +void test_pseudo_dtor(fltx4 *f) { + f->~fltx4(); + (*f).~fltx4(); + test_pseudo_dtor_tmpl(f); +} diff --git a/test/SemaCXX/warn-assignment-condition.cpp b/test/SemaCXX/warn-assignment-condition.cpp index 04f2e79..09084e3 100644 --- a/test/SemaCXX/warn-assignment-condition.cpp +++ b/test/SemaCXX/warn-assignment-condition.cpp @@ -133,14 +133,14 @@ void test2() { namespace rdar9027658 { template -void f() { - if ((T::g == 3)) { } // expected-warning {{equality comparison with extraneous parentheses}} \ +void f(T t) { + if ((t.g == 3)) { } // expected-warning {{equality comparison with extraneous parentheses}} \ // expected-note {{use '=' to turn this equality comparison into an assignment}} \ // expected-note {{remove extraneous parentheses around the comparison to silence this warning}} } struct S { int g; }; void test() { - f(); // expected-note {{in instantiation}} + f(S()); // expected-note {{in instantiation}} } } diff --git a/test/SemaCXX/warn-c++11-extensions.cpp b/test/SemaCXX/warn-c++11-extensions.cpp new file mode 100644 index 0000000..8f35171 --- /dev/null +++ b/test/SemaCXX/warn-c++11-extensions.cpp @@ -0,0 +1,7 @@ +// RUN: %clang_cc1 -fsyntax-only -std=c++98 -Wc++11-extensions -verify %s + +long long ll1 = // expected-warning {{'long long' is a C++11 extension}} + -42LL; // expected-warning {{'long long' is a C++11 extension}} +unsigned long long ull1 = // expected-warning {{'long long' is a C++11 extension}} + 42ULL; // expected-warning {{'long long' is a C++11 extension}} + diff --git a/test/SemaCXX/warn-enum-compare.cpp b/test/SemaCXX/warn-enum-compare.cpp index 52639e7..c68275e 100644 --- a/test/SemaCXX/warn-enum-compare.cpp +++ b/test/SemaCXX/warn-enum-compare.cpp @@ -39,8 +39,8 @@ void test () { while (b == c); while (B1 == name1::B2); while (B2 == name2::B1); - while (x == AnonAA); - while (AnonBB == y); + while (x == AnonAA); // expected-warning {{comparison of constant 42 with expression of type 'Foo' is always false}} + while (AnonBB == y); // expected-warning {{comparison of constant 45 with expression of type 'Bar' is always false}} while (AnonAA == AnonAB); while (AnonAB == AnonBA); while (AnonBB == AnonAA); diff --git a/test/SemaCXX/warn-implicit-conversion-floating-point-to-bool.cpp b/test/SemaCXX/warn-implicit-conversion-floating-point-to-bool.cpp new file mode 100644 index 0000000..1d8037a --- /dev/null +++ b/test/SemaCXX/warn-implicit-conversion-floating-point-to-bool.cpp @@ -0,0 +1,24 @@ +// RUN: %clang_cc1 -verify -fsyntax-only %s + +float foof(float x); +double food(double x); +void foo(bool b, float f); + +void bar() { + + float c = 1.7; + bool b = c; + + double e = 1.7; + b = e; + + b = foof(4.0); + + b = foof(c < 1); // expected-warning {{implicit conversion turns floating-point number into bool: 'float' to 'bool'}} + + b = food(e < 2); // expected-warning {{implicit conversion turns floating-point number into bool: 'double' to 'bool'}} + + foo(c, b); // expected-warning {{implicit conversion turns floating-point number into bool: 'float' to 'bool'}} + foo(c, c); + +} diff --git a/test/SemaCXX/warn-missing-variable-declarations.cpp b/test/SemaCXX/warn-missing-variable-declarations.cpp new file mode 100644 index 0000000..12af973 --- /dev/null +++ b/test/SemaCXX/warn-missing-variable-declarations.cpp @@ -0,0 +1,43 @@ +// RUN: %clang -Wmissing-variable-declarations -fsyntax-only -Xclang -verify %s + +// Variable declarations that should trigger a warning. +int vbad1; // expected-warning{{no previous extern declaration for non-static variable 'vbad1'}} +int vbad2 = 10; // expected-warning{{no previous extern declaration for non-static variable 'vbad2'}} + +namespace x { + int vbad3; // expected-warning{{no previous extern declaration for non-static variable 'vbad3'}} +} + +// Variable declarations that should not trigger a warning. +static int vgood1; +extern int vgood2; +int vgood2; +static struct { + int mgood1; +} vgood3; + +// Functions should never trigger a warning. +void fgood1(void); +void fgood2(void) { + int lgood1; + static int lgood2; +} +static void fgood3(void) { + int lgood3; + static int lgood4; +} + +// Structures, namespaces and classes should be unaffected. +struct sgood1 { + int mgood2; +}; +struct { + int mgood3; +} sgood2; +class CGood1 { + static int MGood1; +}; +int CGood1::MGood1; +namespace { + int mgood4; +} diff --git a/test/SemaCXX/warn-new-overaligned-2.cpp b/test/SemaCXX/warn-new-overaligned-2.cpp index 5505009..e643015 100644 --- a/test/SemaCXX/warn-new-overaligned-2.cpp +++ b/test/SemaCXX/warn-new-overaligned-2.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple=x86_64-pc-linux-gnu -Wover-aligned -verify %s +// expected-no-diagnostics // This test verifies that we don't warn when the global operator new is // overridden. That's why we can't merge this with the other test file. diff --git a/test/SemaCXX/warn-overloaded-virtual.cpp b/test/SemaCXX/warn-overloaded-virtual.cpp index 8e2b671..9b0f5aa 100644 --- a/test/SemaCXX/warn-overloaded-virtual.cpp +++ b/test/SemaCXX/warn-overloaded-virtual.cpp @@ -64,3 +64,59 @@ public: static void f() {} }; } + +namespace ThreeLayer { +struct A { + virtual void f(); +}; + +struct B: A { + void f(); + void f(int); +}; + +struct C: B { + void f(int); + using A::f; +}; +} + +namespace UnbalancedVirtual { +struct Base { + virtual void func(); +}; + +struct Derived1: virtual Base { + virtual void func(); +}; + +struct Derived2: virtual Base { +}; + +struct MostDerived: Derived1, Derived2 { + void func(int); + void func(); +}; +} + +namespace UnbalancedVirtual2 { +struct Base { + virtual void func(); +}; + +struct Derived1: virtual Base { + virtual void func(); +}; + +struct Derived2: virtual Base { +}; + +struct Derived3: Derived1 { + virtual void func(); +}; + +struct MostDerived: Derived3, Derived2 { + void func(int); + void func(); +}; +} diff --git a/test/SemaCXX/warn-self-comparisons.cpp b/test/SemaCXX/warn-self-comparisons.cpp index 620be19..2e8d130 100644 --- a/test/SemaCXX/warn-self-comparisons.cpp +++ b/test/SemaCXX/warn-self-comparisons.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics void f(int (&array1)[2], int (&array2)[2]) { if (array1 == array2) { } // no warning diff --git a/test/SemaCXX/warn-thread-safety-analysis.cpp b/test/SemaCXX/warn-thread-safety-analysis.cpp index 17a1931..bd555ac 100644 --- a/test/SemaCXX/warn-thread-safety-analysis.cpp +++ b/test/SemaCXX/warn-thread-safety-analysis.cpp @@ -24,10 +24,6 @@ __attribute__ ((shared_locks_required(__VA_ARGS__))) #define NO_THREAD_SAFETY_ANALYSIS __attribute__ ((no_thread_safety_analysis)) -//-----------------------------------------// -// Helper fields -//-----------------------------------------// - class __attribute__((lockable)) Mutex { public: @@ -60,6 +56,15 @@ class SCOPED_LOCKABLE ReleasableMutexLock { }; +// The universal lock, written "*", allows checking to be selectively turned +// off for a particular piece of code. +void beginNoWarnOnReads() SHARED_LOCK_FUNCTION("*"); +void endNoWarnOnReads() UNLOCK_FUNCTION("*"); +void beginNoWarnOnWrites() EXCLUSIVE_LOCK_FUNCTION("*"); +void endNoWarnOnWrites() UNLOCK_FUNCTION("*"); + + +// For testing handling of smart pointers. template class SmartPtr { public: @@ -76,6 +81,15 @@ private: }; +// For testing destructor calls and cleanup. +class MyString { +public: + MyString(const char* s); + ~MyString(); +}; + + + Mutex sls_mu; Mutex sls_mu2 __attribute__((acquired_after(sls_mu))); @@ -529,7 +543,8 @@ void late_bad_0() { LateFoo fooB; fooA.mu.Lock(); fooB.a = 5; // \ - // expected-warning{{writing variable 'a' requires locking 'fooB.mu' exclusively}} + // expected-warning{{writing variable 'a' requires locking 'fooB.mu' exclusively}} \ + // expected-note{{found near match 'fooA.mu'}} fooA.mu.Unlock(); } @@ -549,7 +564,8 @@ void late_bad_2() { LateBar BarA; BarA.FooPointer->mu.Lock(); BarA.Foo.a = 2; // \ - // expected-warning{{writing variable 'a' requires locking 'BarA.Foo.mu' exclusively}} + // expected-warning{{writing variable 'a' requires locking 'BarA.Foo.mu' exclusively}} \ + // expected-note{{found near match 'BarA.FooPointer->mu'}} BarA.FooPointer->mu.Unlock(); } @@ -557,7 +573,8 @@ void late_bad_3() { LateBar BarA; BarA.Foo.mu.Lock(); BarA.FooPointer->a = 2; // \ - // expected-warning{{writing variable 'a' requires locking 'BarA.FooPointer->mu' exclusively}} + // expected-warning{{writing variable 'a' requires locking 'BarA.FooPointer->mu' exclusively}} \ + // expected-note{{found near match 'BarA.Foo.mu'}} BarA.Foo.mu.Unlock(); } @@ -565,7 +582,8 @@ void late_bad_4() { LateBar BarA; BarA.Foo.mu.Lock(); BarA.Foo2.a = 2; // \ - // expected-warning{{writing variable 'a' requires locking 'BarA.Foo2.mu' exclusively}} + // expected-warning{{writing variable 'a' requires locking 'BarA.Foo2.mu' exclusively}} \ + // expected-note{{found near match 'BarA.Foo.mu'}} BarA.Foo.mu.Unlock(); } @@ -1233,7 +1251,9 @@ void func() { b1->MyLock(); b1->a_ = 5; - b2->a_ = 3; // expected-warning {{writing variable 'a_' requires locking 'b2->mu1_' exclusively}} + b2->a_ = 3; // \ + // expected-warning {{writing variable 'a_' requires locking 'b2->mu1_' exclusively}} \ + // expected-note {{found near match 'b1->mu1_'}} b2->MyLock(); b2->MyUnlock(); b1->MyUnlock(); @@ -1263,11 +1283,13 @@ int func(int i) int x; b3->mu1_.Lock(); res = b1.a_ + b3->b_; // expected-warning {{reading variable 'a_' requires locking 'b1.mu1_'}} \ - // expected-warning {{writing variable 'res' requires locking 'mu' exclusively}} + // expected-warning {{writing variable 'res' requires locking 'mu' exclusively}} \ + // expected-note {{found near match 'b3->mu1_'}} *p = i; // expected-warning {{reading variable 'p' requires locking 'mu'}} \ // expected-warning {{writing the value pointed to by 'p' requires locking 'mu' exclusively}} b1.a_ = res + b3->b_; // expected-warning {{reading variable 'res' requires locking 'mu'}} \ - // expected-warning {{writing variable 'a_' requires locking 'b1.mu1_' exclusively}} + // expected-warning {{writing variable 'a_' requires locking 'b1.mu1_' exclusively}} \ + // expected-note {{found near match 'b3->mu1_'}} b3->b_ = *b1.q; // expected-warning {{reading the value pointed to by 'q' requires locking 'mu'}} b3->mu1_.Unlock(); b1.b_ = res; // expected-warning {{reading variable 'res' requires locking 'mu'}} @@ -1292,8 +1314,12 @@ class Foo { child->Func(new_foo); // There shouldn't be any warning here as the // acquired lock is not in child. - child->bar(7); // expected-warning {{calling function 'bar' requires exclusive lock on 'child->lock_'}} - child->a_ = 5; // expected-warning {{writing variable 'a_' requires locking 'child->lock_' exclusively}} + child->bar(7); // \ + // expected-warning {{calling function 'bar' requires exclusive lock on 'child->lock_'}} \ + // expected-note {{found near match 'lock_'}} + child->a_ = 5; // \ + // expected-warning {{writing variable 'a_' requires locking 'child->lock_' exclusively}} \ + // expected-note {{found near match 'lock_'}} lock_.Unlock(); } @@ -1491,7 +1517,8 @@ namespace substitution_test { DataLocker dlr; dlr.lockData(d1); foo(d2); // \ - // expected-warning {{calling function 'foo' requires exclusive lock on 'd2->mu'}} + // expected-warning {{calling function 'foo' requires exclusive lock on 'd2->mu'}} \ + // expected-note {{found near match 'd1->mu'}} dlr.unlockData(d1); } }; @@ -1516,22 +1543,6 @@ namespace constructor_destructor_tests { } -namespace invalid_lock_expression_test { - -class LOCKABLE MyLockable { -public: - MyLockable() __attribute__((exclusive_lock_function)) { } - ~MyLockable() { } -}; - -// create an empty lock expression -void foo() { - MyLockable lock; // \ - // expected-warning {{cannot resolve lock expression}} -} - -} // end namespace invalid_lock_expression_test - namespace template_member_test { struct S { int n; }; @@ -1638,6 +1649,8 @@ void bar() { }; // end namespace FunctionAttrTest +namespace TryLockTest { + struct TestTryLock { Mutex mu; int a GUARDED_BY(mu); @@ -1734,8 +1747,36 @@ struct TestTryLock { b = !b; } } + + // Test merge of exclusive trylock + void foo11() { + if (cond) { + if (!mu.TryLock()) + return; + } + else { + mu.Lock(); + } + a = 10; + mu.Unlock(); + } + + // Test merge of shared trylock + void foo12() { + if (cond) { + if (!mu.ReaderTryLock()) + return; + } + else { + mu.ReaderLock(); + } + int i = a; + mu.Unlock(); + } }; // end TestTrylock +} // end namespace TrylockTest + namespace TestTemplateAttributeInstantiation { @@ -1829,7 +1870,8 @@ void test() { f1.mu_.Unlock(); bt.barTD(&f1); // \ - // expected-warning {{calling function 'barTD' requires exclusive lock on 'f1.mu_'}} + // expected-warning {{calling function 'barTD' requires exclusive lock on 'f1.mu_'}} \ + // expected-note {{found near match 'bt.fooBase.mu_'}} bt.fooBase.mu_.Unlock(); bt.fooBaseT.mu_.Unlock(); @@ -2235,27 +2277,32 @@ void test() { bar.getFoo().mu_.Lock(); bar.getFooey().a = 0; // \ - // expected-warning {{writing variable 'a' requires locking 'bar.getFooey().mu_' exclusively}} + // expected-warning {{writing variable 'a' requires locking 'bar.getFooey().mu_' exclusively}} \ + // expected-note {{found near match 'bar.getFoo().mu_'}} bar.getFoo().mu_.Unlock(); bar.getFoo2(a).mu_.Lock(); bar.getFoo2(b).a = 0; // \ - // expected-warning {{writing variable 'a' requires locking 'bar.getFoo2(b).mu_' exclusively}} + // expected-warning {{writing variable 'a' requires locking 'bar.getFoo2(b).mu_' exclusively}} \ + // expected-note {{found near match 'bar.getFoo2(a).mu_'}} bar.getFoo2(a).mu_.Unlock(); bar.getFoo3(a, b).mu_.Lock(); bar.getFoo3(a, c).a = 0; // \ - // expected-warning {{writing variable 'a' requires locking 'bar.getFoo3(a,c).mu_' exclusively}} + // expected-warning {{writing variable 'a' requires locking 'bar.getFoo3(a,c).mu_' exclusively}} \ + // expected-note {{'bar.getFoo3(a,b).mu_'}} bar.getFoo3(a, b).mu_.Unlock(); getBarFoo(bar, a).mu_.Lock(); getBarFoo(bar, b).a = 0; // \ - // expected-warning {{writing variable 'a' requires locking 'getBarFoo(bar,b).mu_' exclusively}} + // expected-warning {{writing variable 'a' requires locking 'getBarFoo(bar,b).mu_' exclusively}} \ + // expected-note {{'getBarFoo(bar,a).mu_'}} getBarFoo(bar, a).mu_.Unlock(); (a > 0 ? fooArray[1] : fooArray[b]).mu_.Lock(); (a > 0 ? fooArray[b] : fooArray[c]).a = 0; // \ - // expected-warning {{writing variable 'a' requires locking '((a#_)#_#fooArray[b]).mu_' exclusively}} + // expected-warning {{writing variable 'a' requires locking '((a#_)#_#fooArray[b]).mu_' exclusively}} \ + // expected-note {{'((a#_)#_#fooArray[_]).mu_'}} (a > 0 ? fooArray[1] : fooArray[b]).mu_.Unlock(); } @@ -2314,7 +2361,9 @@ void test1(Foo* f1, Foo* f2) { f1->a = 0; f1->foo(); - f1->foo2(f2); // expected-warning {{calling function 'foo2' requires exclusive lock on 'f2->mu_'}} + f1->foo2(f2); // \ + // expected-warning {{calling function 'foo2' requires exclusive lock on 'f2->mu_'}} \ + // expected-note {{found near match 'f1->mu_'}} Foo::getMu(f2)->Lock(); f1->foo2(f2); @@ -2354,7 +2403,9 @@ void test2(Bar* b1, Bar* b2) { b1->b = 0; b1->bar(); - b1->bar2(b2); // expected-warning {{calling function 'bar2' requires exclusive lock on 'b2->mu_'}} + b1->bar2(b2); // \ + // expected-warning {{calling function 'bar2' requires exclusive lock on 'b2->mu_'}} \ + // // expected-note {{found near match 'b1->mu_'}} b2->getMu()->Lock(); b1->bar2(b2); @@ -3119,3 +3170,545 @@ void test() { } // end namespace ExistentialPatternMatching + +namespace StringIgnoreTest { + +class Foo { +public: + Mutex mu_; + void lock() EXCLUSIVE_LOCK_FUNCTION(""); + void unlock() UNLOCK_FUNCTION(""); + void goober() EXCLUSIVE_LOCKS_REQUIRED(""); + void roober() SHARED_LOCKS_REQUIRED(""); +}; + + +class Bar : public Foo { +public: + void bar(Foo* f) { + f->unlock(); + f->goober(); + f->roober(); + f->lock(); + }; +}; + +} // end namespace StringIgnoreTest + + +namespace LockReturnedScopeFix { + +class Base { +protected: + struct Inner; + bool c; + + const Mutex& getLock(const Inner* i); + + void lockInner (Inner* i) EXCLUSIVE_LOCK_FUNCTION(getLock(i)); + void unlockInner(Inner* i) UNLOCK_FUNCTION(getLock(i)); + void foo(Inner* i) EXCLUSIVE_LOCKS_REQUIRED(getLock(i)); + + void bar(Inner* i); +}; + + +struct Base::Inner { + Mutex lock_; + void doSomething() EXCLUSIVE_LOCKS_REQUIRED(lock_); +}; + + +const Mutex& Base::getLock(const Inner* i) LOCK_RETURNED(i->lock_) { + return i->lock_; +} + + +void Base::foo(Inner* i) { + i->doSomething(); +} + +void Base::bar(Inner* i) { + if (c) { + i->lock_.Lock(); + unlockInner(i); + } + else { + lockInner(i); + i->lock_.Unlock(); + } +} + +} // end namespace LockReturnedScopeFix + + +namespace TrylockWithCleanups { + +struct Foo { + Mutex mu_; + int a GUARDED_BY(mu_); +}; + +Foo* GetAndLockFoo(const MyString& s) + EXCLUSIVE_TRYLOCK_FUNCTION(true, &Foo::mu_); + +static void test() { + Foo* lt = GetAndLockFoo("foo"); + if (!lt) return; + int a = lt->a; + lt->mu_.Unlock(); +} + +} // end namespace TrylockWithCleanups + + +namespace UniversalLock { + +class Foo { + Mutex mu_; + bool c; + + int a GUARDED_BY(mu_); + void r_foo() SHARED_LOCKS_REQUIRED(mu_); + void w_foo() EXCLUSIVE_LOCKS_REQUIRED(mu_); + + void test1() { + int b; + + beginNoWarnOnReads(); + b = a; + r_foo(); + endNoWarnOnReads(); + + beginNoWarnOnWrites(); + a = 0; + w_foo(); + endNoWarnOnWrites(); + } + + // don't warn on joins with universal lock + void test2() { + if (c) { + beginNoWarnOnWrites(); + } + a = 0; // \ + // expected-warning {{writing variable 'a' requires locking 'mu_' exclusively}} + endNoWarnOnWrites(); // \ + // expected-warning {{unlocking '*' that was not locked}} + } + + + // make sure the universal lock joins properly + void test3() { + if (c) { + mu_.Lock(); + beginNoWarnOnWrites(); + } + else { + beginNoWarnOnWrites(); + mu_.Lock(); + } + a = 0; + endNoWarnOnWrites(); + mu_.Unlock(); + } + + + // combine universal lock with other locks + void test4() { + beginNoWarnOnWrites(); + mu_.Lock(); + mu_.Unlock(); + endNoWarnOnWrites(); + + mu_.Lock(); + beginNoWarnOnWrites(); + endNoWarnOnWrites(); + mu_.Unlock(); + + mu_.Lock(); + beginNoWarnOnWrites(); + mu_.Unlock(); + endNoWarnOnWrites(); + } +}; + +} // end namespace UniversalLock + + +namespace TemplateLockReturned { + +template +class BaseT { +public: + virtual void baseMethod() = 0; + Mutex* get_mutex() LOCK_RETURNED(mutex_) { return &mutex_; } + + Mutex mutex_; + int a GUARDED_BY(mutex_); +}; + + +class Derived : public BaseT { +public: + void baseMethod() EXCLUSIVE_LOCKS_REQUIRED(get_mutex()) { + a = 0; + } +}; + +} // end namespace TemplateLockReturned + + +namespace ExprMatchingBugFix { + +class Foo { +public: + Mutex mu_; +}; + + +class Bar { +public: + bool c; + Foo* foo; + Bar(Foo* f) : foo(f) { } + + struct Nested { + Foo* foo; + Nested(Foo* f) : foo(f) { } + + void unlockFoo() UNLOCK_FUNCTION(&Foo::mu_); + }; + + void test(); +}; + + +void Bar::test() { + foo->mu_.Lock(); + if (c) { + Nested *n = new Nested(foo); + n->unlockFoo(); + } + else { + foo->mu_.Unlock(); + } +} + +}; // end namespace ExprMatchingBugfix + + +namespace ComplexNameTest { + +class Foo { +public: + static Mutex mu_; + + Foo() EXCLUSIVE_LOCKS_REQUIRED(mu_) { } + ~Foo() EXCLUSIVE_LOCKS_REQUIRED(mu_) { } + + int operator[](int i) EXCLUSIVE_LOCKS_REQUIRED(mu_) { return 0; } +}; + +class Bar { +public: + static Mutex mu_; + + Bar() LOCKS_EXCLUDED(mu_) { } + ~Bar() LOCKS_EXCLUDED(mu_) { } + + int operator[](int i) LOCKS_EXCLUDED(mu_) { return 0; } +}; + + +void test1() { + Foo f; // expected-warning {{calling function 'Foo' requires exclusive lock on 'mu_'}} + int a = f[0]; // expected-warning {{calling function 'operator[]' requires exclusive lock on 'mu_'}} +} // expected-warning {{calling function '~Foo' requires exclusive lock on 'mu_'}} + + +void test2() { + Bar::mu_.Lock(); + { + Bar b; // expected-warning {{cannot call function 'Bar' while mutex 'mu_' is locked}} + int a = b[0]; // expected-warning {{cannot call function 'operator[]' while mutex 'mu_' is locked}} + } // expected-warning {{cannot call function '~Bar' while mutex 'mu_' is locked}} + Bar::mu_.Unlock(); +} + +}; // end namespace ComplexNameTest + + +namespace UnreachableExitTest { + +class FemmeFatale { +public: + FemmeFatale(); + ~FemmeFatale() __attribute__((noreturn)); +}; + +void exitNow() __attribute__((noreturn)); +void exitDestruct(const MyString& ms) __attribute__((noreturn)); + +Mutex fatalmu_; + +void test1() EXCLUSIVE_LOCKS_REQUIRED(fatalmu_) { + exitNow(); +} + +void test2() EXCLUSIVE_LOCKS_REQUIRED(fatalmu_) { + FemmeFatale femme; +} + +bool c; + +void test3() EXCLUSIVE_LOCKS_REQUIRED(fatalmu_) { + if (c) { + exitNow(); + } + else { + FemmeFatale femme; + } +} + +void test4() EXCLUSIVE_LOCKS_REQUIRED(fatalmu_) { + exitDestruct("foo"); +} + +} // end namespace UnreachableExitTest + + +namespace VirtualMethodCanonicalizationTest { + +class Base { +public: + virtual Mutex* getMutex() = 0; +}; + +class Base2 : public Base { +public: + Mutex* getMutex(); +}; + +class Base3 : public Base2 { +public: + Mutex* getMutex(); +}; + +class Derived : public Base3 { +public: + Mutex* getMutex(); // overrides Base::getMutex() +}; + +void baseFun(Base *b) EXCLUSIVE_LOCKS_REQUIRED(b->getMutex()) { } + +void derivedFun(Derived *d) EXCLUSIVE_LOCKS_REQUIRED(d->getMutex()) { + baseFun(d); +} + +} // end namespace VirtualMethodCanonicalizationTest + + +namespace TemplateFunctionParamRemapTest { + +template +struct Cell { + T dummy_; + Mutex* mu_; +}; + +class Foo { +public: + template + void elr(Cell* c) __attribute__((exclusive_locks_required(c->mu_))); + + void test(); +}; + +template +void Foo::elr(Cell* c1) { } + +void Foo::test() { + Cell cell; + elr(&cell); // \ + // expected-warning {{calling function 'elr' requires exclusive lock on 'cell.mu_'}} +} + + +template +void globalELR(Cell* c) __attribute__((exclusive_locks_required(c->mu_))); + +template +void globalELR(Cell* c1) { } + +void globalTest() { + Cell cell; + globalELR(&cell); // \ + // expected-warning {{calling function 'globalELR' requires exclusive lock on 'cell.mu_'}} +} + + +template +void globalELR2(Cell* c) __attribute__((exclusive_locks_required(c->mu_))); + +// second declaration +template +void globalELR2(Cell* c2); + +template +void globalELR2(Cell* c3) { } + +// re-declaration after definition +template +void globalELR2(Cell* c4); + +void globalTest2() { + Cell cell; + globalELR2(&cell); // \ + // expected-warning {{calling function 'globalELR2' requires exclusive lock on 'cell.mu_'}} +} + + +template +class FooT { +public: + void elr(Cell* c) __attribute__((exclusive_locks_required(c->mu_))); +}; + +template +void FooT::elr(Cell* c1) { } + +void testFooT() { + Cell cell; + FooT foo; + foo.elr(&cell); // \ + // expected-warning {{calling function 'elr' requires exclusive lock on 'cell.mu_'}} +} + +} // end namespace TemplateFunctionParamRemapTest + + +namespace SelfConstructorTest { + +class SelfLock { +public: + SelfLock() EXCLUSIVE_LOCK_FUNCTION(mu_); + ~SelfLock() UNLOCK_FUNCTION(mu_); + + void foo() EXCLUSIVE_LOCKS_REQUIRED(mu_); + + Mutex mu_; +}; + +class LOCKABLE SelfLock2 { +public: + SelfLock2() EXCLUSIVE_LOCK_FUNCTION(); + ~SelfLock2() UNLOCK_FUNCTION(); + + void foo() EXCLUSIVE_LOCKS_REQUIRED(this); +}; + + +void test() { + SelfLock s; + s.foo(); +} + +void test2() { + SelfLock2 s2; + s2.foo(); +} + +} // end namespace SelfConstructorTest + + +namespace MultipleAttributeTest { + +class Foo { + Mutex mu1_; + Mutex mu2_; + int a GUARDED_BY(mu1_); + int b GUARDED_BY(mu2_); + int c GUARDED_BY(mu1_) GUARDED_BY(mu2_); + int* d PT_GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); + + void foo1() EXCLUSIVE_LOCKS_REQUIRED(mu1_) + EXCLUSIVE_LOCKS_REQUIRED(mu2_); + void foo2() SHARED_LOCKS_REQUIRED(mu1_) + SHARED_LOCKS_REQUIRED(mu2_); + void foo3() LOCKS_EXCLUDED(mu1_) + LOCKS_EXCLUDED(mu2_); + void lock() EXCLUSIVE_LOCK_FUNCTION(mu1_) + EXCLUSIVE_LOCK_FUNCTION(mu2_); + void readerlock() EXCLUSIVE_LOCK_FUNCTION(mu1_) + EXCLUSIVE_LOCK_FUNCTION(mu2_); + void unlock() UNLOCK_FUNCTION(mu1_) + UNLOCK_FUNCTION(mu2_); + bool trylock() EXCLUSIVE_TRYLOCK_FUNCTION(true, mu1_) + EXCLUSIVE_TRYLOCK_FUNCTION(true, mu2_); + bool readertrylock() SHARED_TRYLOCK_FUNCTION(true, mu1_) + SHARED_TRYLOCK_FUNCTION(true, mu2_); + + void test(); +}; + + +void Foo::foo1() { + a = 1; + b = 2; +} + +void Foo::foo2() { + int result = a + b; +} + +void Foo::foo3() { } +void Foo::lock() { } +void Foo::readerlock() { } +void Foo::unlock() { } +bool Foo::trylock() { return true; } +bool Foo::readertrylock() { return true; } + + +void Foo::test() { + mu1_.Lock(); + foo1(); // expected-warning {{}} + c = 0; // expected-warning {{}} + *d = 0; // expected-warning {{}} + mu1_.Unlock(); + + mu1_.ReaderLock(); + foo2(); // expected-warning {{}} + int x = c; // expected-warning {{}} + int y = *d; // expected-warning {{}} + mu1_.Unlock(); + + mu2_.Lock(); + foo3(); // expected-warning {{}} + mu2_.Unlock(); + + lock(); + a = 0; + b = 0; + unlock(); + + readerlock(); + int z = a + b; + unlock(); + + if (trylock()) { + a = 0; + b = 0; + unlock(); + } + + if (readertrylock()) { + int zz = a + b; + unlock(); + } +} + + +} // end namespace MultipleAttributeTest + + diff --git a/test/SemaCXX/warn-thread-safety-parsing.cpp b/test/SemaCXX/warn-thread-safety-parsing.cpp index 8aa6a91..df9415c 100644 --- a/test/SemaCXX/warn-thread-safety-parsing.cpp +++ b/test/SemaCXX/warn-thread-safety-parsing.cpp @@ -1255,7 +1255,7 @@ public: void foo4(FooLate *f) EXCLUSIVE_LOCKS_REQUIRED(f->mu); static void foo5() EXCLUSIVE_LOCKS_REQUIRED(mu); // \ - // expected-error {{'this' cannot be implicitly used in a static member function declaration}} + // expected-error {{invalid use of member 'mu' in static member function}} template void foo6() EXCLUSIVE_LOCKS_REQUIRED(T::statmu) { } @@ -1440,3 +1440,50 @@ void Foo::bar(Mutex* mu) LOCKS_EXCLUDED(mu) { } // \ } // end namespace InvalidDeclTest + +namespace StaticScopeTest { + +class FooStream; + +class Foo { + mutable Mutex mu; + int a GUARDED_BY(mu); + + static int si GUARDED_BY(mu); // \ + // expected-error {{invalid use of non-static data member 'mu'}} + + static void foo() EXCLUSIVE_LOCKS_REQUIRED(mu); // \ + // expected-error {{invalid use of member 'mu' in static member function}} + + friend FooStream& operator<<(FooStream& s, const Foo& f) + EXCLUSIVE_LOCKS_REQUIRED(mu); // \ + // expected-error {{invalid use of non-static data member 'mu'}} +}; + + +} // end namespace StaticScopeTest + + +namespace FunctionAttributesInsideClass_ICE_Test { + +class Foo { +public: + /* Originally found when parsing foo() as an ordinary method after the + * the following: + + template + void syntaxErrorMethod(int i) { + if (i) { + foo( + } + } + */ + + void method() { + void foo() EXCLUSIVE_LOCKS_REQUIRED(mu); // \ + // expected-error {{use of undeclared identifier 'mu'}} + } +}; + +} // end namespace FunctionAttributesInsideClass_ICE_Test + diff --git a/test/SemaCXX/warn-unique-enum.cpp b/test/SemaCXX/warn-unique-enum.cpp deleted file mode 100644 index 59a1278..0000000 --- a/test/SemaCXX/warn-unique-enum.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// RUN: %clang_cc1 %s -fsyntax-only -verify -Wunique-enum -enum A { A1 = 1, A2 = 1, A3 = 1 }; // expected-warning {{all elements of 'A' are initialized with literals to value 1}} \ -// expected-note {{initialize the last element with the previous element to silence this warning}} -enum { B1 = 1, B2 = 1, B3 = 1 }; // no warning -enum C { // expected-warning {{all elements of 'C' are initialized with literals to value 1}} - C1 = true, - C2 = true // expected-note {{initialize the last element with the previous element to silence this warning}} -}; -enum D { D1 = 5, D2 = 5L, D3 = 5UL, D4 = 5LL, D5 = 5ULL }; // expected-warning {{all elements of 'D' are initialized with literals to value 5}} \ -// expected-note {{initialize the last element with the previous element to silence this warning}} - -// Don't warn on enums with less than 2 elements. -enum E { E1 = 4 }; -enum F { F1 }; -enum G {}; - -// Don't warn when integer literals do not initialize the elements. -enum H { H1 = 4, H_MAX = H1, H_MIN = H1 }; -enum I { I1 = H1, I2 = 4 }; -enum J { J1 = 4, J2 = I2 }; -enum K { K1, K2, K3, K4 }; - -// Don't crash or warn on this one. -// rdar://11875995 -enum L { - L1 = 0x8000000000000000ULL, L2 = 0x0000000000000001ULL -}; diff --git a/test/SemaCXX/warn-unused-filescoped.cpp b/test/SemaCXX/warn-unused-filescoped.cpp index dbff4b0..ad896b5 100644 --- a/test/SemaCXX/warn-unused-filescoped.cpp +++ b/test/SemaCXX/warn-unused-filescoped.cpp @@ -1,4 +1,5 @@ -// RUN: %clang_cc1 -fsyntax-only -verify -Wunused -Wunused-member-function %s +// RUN: %clang_cc1 -fsyntax-only -verify -Wunused -Wunused-member-function -std=c++98 %s +// RUN: %clang_cc1 -fsyntax-only -verify -Wunused -Wunused-member-function -std=c++11 %s static void f1(); // expected-warning{{unused}} @@ -87,3 +88,16 @@ namespace rdar8733476 { foo(); } } + +namespace test5 { + static int n = 0; + static int &r = n; + int f(int &); + int k = f(r); + + // FIXME: We should produce warnings for both of these. + static const int m = n; + int x = sizeof(m); + static const double d = 0.0; + int y = sizeof(d); +} diff --git a/test/SemaCXX/warn-unused-variables.cpp b/test/SemaCXX/warn-unused-variables.cpp index 5827019..4e8d51d 100644 --- a/test/SemaCXX/warn-unused-variables.cpp +++ b/test/SemaCXX/warn-unused-variables.cpp @@ -42,10 +42,11 @@ void test_dependent_init(T *p) { } namespace PR6948 { - template class X; + template class X; // expected-note{{template is declared here}} void f() { - X str (read_from_file()); // expected-error{{use of undeclared identifier 'read_from_file'}} + X str (read_from_file()); // expected-error{{use of undeclared identifier 'read_from_file'}} \ + expected-error{{implicit instantiation of undefined template 'PR6948::X'}} } } @@ -122,3 +123,15 @@ namespace PR11550 { S3 z = a; // expected-warning {{unused variable 'z'}} } } + +namespace ctor_with_cleanups { + struct S1 { + ~S1(); + }; + struct S2 { + S2(const S1&); + }; + void func() { + S2 s((S1())); + } +} diff --git a/test/SemaCXX/warn-using-namespace-in-header.cpp b/test/SemaCXX/warn-using-namespace-in-header.cpp index 72c2552..f68b998 100644 --- a/test/SemaCXX/warn-using-namespace-in-header.cpp +++ b/test/SemaCXX/warn-using-namespace-in-header.cpp @@ -1,54 +1,60 @@ // RUN: %clang_cc1 -fsyntax-only -Wheader-hygiene -verify %s -#include "warn-using-namespace-in-header.h" +#ifdef BE_THE_HEADER +namespace warn_in_header_in_global_context {} +using namespace warn_in_header_in_global_context; // expected-warning {{using namespace directive in global context in header}} + +// While we want to error on the previous using directive, we don't when we are +// inside a namespace +namespace dont_warn_here { +using namespace warn_in_header_in_global_context; +} + +// We should warn in toplevel extern contexts. +namespace warn_inside_linkage {} +extern "C++" { +using namespace warn_inside_linkage; // expected-warning {{using namespace directive in global context in header}} +} + +// This is really silly, but we should warn on it: +extern "C++" { +extern "C" { +extern "C++" { +using namespace warn_inside_linkage; // expected-warning {{using namespace directive in global context in header}} +} +} +} + +// But we shouldn't warn in extern contexts inside namespaces. +namespace dont_warn_here { +extern "C++" { +using namespace warn_in_header_in_global_context; +} +} + +// We also shouldn't warn in case of functions. +inline void foo() { + using namespace warn_in_header_in_global_context; +} + + +namespace macronamespace {} +#define USING_MACRO using namespace macronamespace; + +// |using namespace| through a macro should warn if the instantiation is in a +// header. +USING_MACRO // expected-warning {{using namespace directive in global context in header}} + +#else + +#define BE_THE_HEADER +#include __FILE__ namespace dont_warn {} using namespace dont_warn; -// Warning is actually in the header but only the cpp file gets scanned. -// expected-warning {{using namespace directive in global context in header}} - - - - - - - - - -// Warn inside linkage specs too. -// expected-warning {{using namespace directive in global context in header}} - - - - - - -// expected-warning {{using namespace directive in global context in header}} - - - - - - - - - - - - - - - - - - - - - - -// expected-warning {{using namespace directive in global context in header}} - // |using namespace| through a macro shouldn't warn if the instantiation is in a // cc file. USING_MACRO + +#endif diff --git a/test/SemaCXX/warn-using-namespace-in-header.h b/test/SemaCXX/warn-using-namespace-in-header.h deleted file mode 100644 index b544c54..0000000 --- a/test/SemaCXX/warn-using-namespace-in-header.h +++ /dev/null @@ -1,50 +0,0 @@ - - - - - -// Lots of vertical space to make the error line match up with the line of the -// expected line in the source file. -namespace warn_in_header_in_global_context {} -using namespace warn_in_header_in_global_context; - -// While we want to error on the previous using directive, we don't when we are -// inside a namespace -namespace dont_warn_here { -using namespace warn_in_header_in_global_context; -} - -// We should warn in toplevel extern contexts. -namespace warn_inside_linkage {} -extern "C++" { -using namespace warn_inside_linkage; -} - -// This is really silly, but we should warn on it: -extern "C++" { -extern "C" { -extern "C++" { -using namespace warn_inside_linkage; -} -} -} - -// But we shouldn't warn in extern contexts inside namespaces. -namespace dont_warn_here { -extern "C++" { -using namespace warn_in_header_in_global_context; -} -} - -// We also shouldn't warn in case of functions. -inline void foo() { - using namespace warn_in_header_in_global_context; -} - - -namespace macronamespace {} -#define USING_MACRO using namespace macronamespace; - -// |using namespace| through a macro should warn if the instantiation is in a -// header. -USING_MACRO diff --git a/test/SemaCXX/zero-length-arrays.cpp b/test/SemaCXX/zero-length-arrays.cpp index 05ded4a..d86ab86 100644 --- a/test/SemaCXX/zero-length-arrays.cpp +++ b/test/SemaCXX/zero-length-arrays.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // class Foo { diff --git a/test/SemaObjC/ClassPropertyNotObject.m b/test/SemaObjC/ClassPropertyNotObject.m index 02ed40a..67d76b8 100644 --- a/test/SemaObjC/ClassPropertyNotObject.m +++ b/test/SemaObjC/ClassPropertyNotObject.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10565506 @protocol P @end diff --git a/test/SemaObjC/ContClassPropertyLookup.m b/test/SemaObjC/ContClassPropertyLookup.m index 06a0ffa..bf4f643 100644 --- a/test/SemaObjC/ContClassPropertyLookup.m +++ b/test/SemaObjC/ContClassPropertyLookup.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface MyObject { int _foo; diff --git a/test/SemaObjC/arc-decls.m b/test/SemaObjC/arc-decls.m index 8d5cca2..a53b52a 100644 --- a/test/SemaObjC/arc-decls.m +++ b/test/SemaObjC/arc-decls.m @@ -85,6 +85,8 @@ void func() - (id)ns_non __attribute((ns_returns_not_retained)); // expected-error {{overriding method has mismatched ns_returns_not_retained attributes}} - (id)not_ret:(id) b __attribute((ns_returns_retained)); // expected-error {{overriding method has mismatched ns_returns_retained attributes}} - (id)both__returns_not_retained:(id) b __attribute((ns_returns_not_retained)); +// rdar://12173491 +@property (copy, nonatomic) __attribute__((ns_returns_retained)) id (^fblock)(void); @end // Test that we give a good diagnostic here that mentions the missing diff --git a/test/SemaObjC/arc-objc-lifetime.m b/test/SemaObjC/arc-objc-lifetime.m index 03260e8..08d2dbe 100644 --- a/test/SemaObjC/arc-objc-lifetime.m +++ b/test/SemaObjC/arc-objc-lifetime.m @@ -1,5 +1,5 @@ -// RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fsyntax-only -fobjc-arc -verify -Wno-objc-root-class %s -// RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin11 -fsyntax-only -fobjc-arc -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fsyntax-only -fobjc-arc -fblocks -Wexplicit-ownership-type -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin11 -fsyntax-only -fobjc-arc -fblocks -Wexplicit-ownership-type -verify -Wno-objc-root-class %s // rdar://10244607 typedef const struct __CFString * CFStringRef; @@ -40,3 +40,30 @@ __strong I *(__strong (test3)); // expected-error {{the type 'I *__strong' is al __unsafe_unretained __typeof__(test3) test4; typedef __strong I *strong_I; __unsafe_unretained strong_I test5; + +// rdar://10907090 +typedef void (^T) (); +@interface NSObject @end +@protocol P; +@interface Radar10907090 @end + +@implementation Radar10907090 +- (void) MMM : (NSObject*) arg0 : (NSObject

    **)arg : (id) arg1 : (id

    *) arg2 {} // expected-warning {{method parameter of type 'NSObject

    *__autoreleasing *' with no explicit ownership}} \ + // expected-warning {{method parameter of type '__autoreleasing id

    *' with no explicit ownership}} +- (void) MM : (NSObject*) arg0 : (__strong NSObject**)arg : (id) arg1 : (__strong id*) arg2 {} +- (void) M : (NSObject**)arg0 : (id*)arg {} // expected-warning {{method parameter of type 'NSObject *__autoreleasing *' with no explicit ownership}} \ + // expected-warning {{method parameter of type '__autoreleasing id *' with no explicit ownership}} +- (void) N : (__strong NSObject***) arg0 : (__strong NSObject

    ***)arg : (float**) arg1 : (double) arg2 {} +- (void) BLOCK : (T*) arg0 : (T)arg : (__strong T*) arg1 {} // expected-warning {{method parameter of type '__autoreleasing T *' (aka 'void (^__autoreleasing *)()') with no explicit ownership}} +@end + +// rdar://12280826 +@class NSMutableDictionary, NSError; +@interface Radar12280826 +- (void)createInferiorTransportAndSetEnvironment:(NSMutableDictionary*)environment error:(__autoreleasing NSError**)error; +@end + +@implementation Radar12280826 +- (void)createInferiorTransportAndSetEnvironment:(NSMutableDictionary*)environment error:(__autoreleasing NSError**)error {} +@end + diff --git a/test/SemaObjC/arc-property-decl-attrs.m b/test/SemaObjC/arc-property-decl-attrs.m index 1386241..283772c 100644 --- a/test/SemaObjC/arc-property-decl-attrs.m +++ b/test/SemaObjC/arc-property-decl-attrs.m @@ -5,7 +5,7 @@ @public id __unsafe_unretained x; id __weak y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(strong) id x; @property(strong) id y; @@ -16,7 +16,7 @@ @public id __unsafe_unretained x; id __weak y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(retain) id x; @property(retain) id y; @@ -27,7 +27,7 @@ @public id __unsafe_unretained x; id __weak y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(copy) id x; @property(copy) id y; diff --git a/test/SemaObjC/arc-property-lifetime.m b/test/SemaObjC/arc-property-lifetime.m index bd393e0..1957081 100644 --- a/test/SemaObjC/arc-property-lifetime.m +++ b/test/SemaObjC/arc-property-lifetime.m @@ -5,7 +5,7 @@ @public id __unsafe_unretained x; id __weak y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(strong) id x; // expected-note {{property declared here}} @property(strong) id y; // expected-note {{property declared here}} @@ -13,8 +13,8 @@ @end @implementation Foo -@synthesize x; // expected-error {{existing ivar 'x' for strong property 'x' may not be __unsafe_unretained}} -@synthesize y; // expected-error {{existing ivar 'y' for strong property 'y' may not be __weak}} +@synthesize x; // expected-error {{existing instance variable 'x' for strong property 'x' may not be __unsafe_unretained}} +@synthesize y; // expected-error {{existing instance variable 'y' for strong property 'y' may not be __weak}} @synthesize z; // suppressed @end @@ -22,7 +22,7 @@ @public id __unsafe_unretained x; id __weak y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(retain) id x; // expected-note {{property declared here}} @property(retain) id y; // expected-note {{property declared here}} @@ -30,8 +30,8 @@ @end @implementation Bar -@synthesize x; // expected-error {{existing ivar 'x' for strong property 'x' may not be __unsafe_unretained}} -@synthesize y; // expected-error {{existing ivar 'y' for strong property 'y' may not be __weak}} +@synthesize x; // expected-error {{existing instance variable 'x' for strong property 'x' may not be __unsafe_unretained}} +@synthesize y; // expected-error {{existing instance variable 'y' for strong property 'y' may not be __weak}} @synthesize z; // suppressed @end @@ -39,7 +39,7 @@ @public id __unsafe_unretained x; id __weak y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(copy) id x; // expected-note {{property declared here}} @property(copy) id y; // expected-note {{property declared here}} @@ -47,8 +47,8 @@ @end @implementation Bas -@synthesize x; // expected-error {{existing ivar 'x' for strong property 'x' may not be __unsafe_unretained}} -@synthesize y; // expected-error {{existing ivar 'y' for strong property 'y' may not be __weak}} +@synthesize x; // expected-error {{existing instance variable 'x' for strong property 'x' may not be __unsafe_unretained}} +@synthesize y; // expected-error {{existing instance variable 'y' for strong property 'y' may not be __weak}} @synthesize z; // suppressed @end @@ -79,7 +79,7 @@ @implementation Gorf @synthesize x; -@synthesize y; // expected-error {{existing ivar 'y' for property 'y' with assign attribute must be __unsafe_unretained}} +@synthesize y; // expected-error {{existing instance variable 'y' for property 'y' with assign attribute must be __unsafe_unretained}} @synthesize z; @end @@ -94,7 +94,7 @@ @implementation Gorf2 @synthesize x; -@synthesize y; // expected-error {{existing ivar 'y' for property 'y' with unsafe_unretained attribute must be __unsafe_unretained}} +@synthesize y; // expected-error {{existing instance variable 'y' for property 'y' with unsafe_unretained attribute must be __unsafe_unretained}} @synthesize z; @end diff --git a/test/SemaObjC/arc-property.m b/test/SemaObjC/arc-property.m index 41d8e87..2925459 100644 --- a/test/SemaObjC/arc-property.m +++ b/test/SemaObjC/arc-property.m @@ -18,13 +18,13 @@ @end @implementation MyClass -@synthesize myString; // expected-error {{existing ivar 'myString' for strong property 'myString' may not be __weak}} +@synthesize myString; // expected-error {{existing instance variable 'myString' for strong property 'myString' may not be __weak}} @synthesize myString1 = StrongIvar; // OK -@synthesize myString2 = myString2; // expected-error {{existing ivar 'myString2' for strong property 'myString2' may not be __weak}} +@synthesize myString2 = myString2; // expected-error {{existing instance variable 'myString2' for strong property 'myString2' may not be __weak}} // @synthesize myString3; // OK @synthesize myString4; // OK -@synthesize myString5 = StrongIvar5; // expected-error {{existing ivar 'StrongIvar5' for __weak property 'myString5' must be __weak}} +@synthesize myString5 = StrongIvar5; // expected-error {{existing instance variable 'StrongIvar5' for __weak property 'myString5' must be __weak}} @end @@ -33,7 +33,7 @@ @public id __unsafe_unretained x; // should be __weak id __strong y; - id __autoreleasing z; // expected-error {{ivars cannot have __autoreleasing ownership}} + id __autoreleasing z; // expected-error {{instance variables cannot have __autoreleasing ownership}} } @property(weak) id x; // expected-note {{property declared here}} @property(weak) id y; // expected-note {{property declared here}} @@ -41,8 +41,8 @@ @end @implementation Foo -@synthesize x; // expected-error {{existing ivar 'x' for __weak property 'x' must be __weak}} -@synthesize y; // expected-error {{existing ivar 'y' for __weak property 'y' must be __weak}} +@synthesize x; // expected-error {{existing instance variable 'x' for __weak property 'x' must be __weak}} +@synthesize y; // expected-error {{existing instance variable 'y' for __weak property 'y' must be __weak}} @synthesize z; // suppressed @end diff --git a/test/SemaObjC/arc-readonly-property-ivar-1.m b/test/SemaObjC/arc-readonly-property-ivar-1.m index c773f26..418f90d 100644 --- a/test/SemaObjC/arc-readonly-property-ivar-1.m +++ b/test/SemaObjC/arc-readonly-property-ivar-1.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fobjc-default-synthesize-properties -triple x86_64-apple-darwin11 -fobjc-runtime-has-weak -fobjc-arc -fsyntax-only -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -x objective-c++ -fobjc-default-synthesize-properties -triple x86_64-apple-darwin11 -fobjc-runtime-has-weak -fobjc-arc -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar:// 10558871 @interface PP diff --git a/test/SemaObjC/arc-readonly-property-ivar.m b/test/SemaObjC/arc-readonly-property-ivar.m index 635b9fe..bcc1f4b 100644 --- a/test/SemaObjC/arc-readonly-property-ivar.m +++ b/test/SemaObjC/arc-readonly-property-ivar.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fobjc-runtime-has-weak -fobjc-arc -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar:// 10558871 @interface PP diff --git a/test/SemaObjC/arc-repeated-weak.mm b/test/SemaObjC/arc-repeated-weak.mm new file mode 100644 index 0000000..e652bee --- /dev/null +++ b/test/SemaObjC/arc-repeated-weak.mm @@ -0,0 +1,386 @@ +// RUN: %clang_cc1 -fsyntax-only -fobjc-runtime-has-weak -fobjc-arc -fblocks -Wno-objc-root-class -std=c++11 -Warc-repeated-use-of-weak -verify %s + +@interface Test { +@public + Test *ivar; + __weak id weakIvar; +} +@property(weak) Test *weakProp; +@property(strong) Test *strongProp; + +- (__weak id)implicitProp; + ++ (__weak id)weakProp; +@end + +extern void use(id); +extern id get(); +extern bool condition(); +#define nil ((id)0) + +void sanity(Test *a) { + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times in this function but may be unpredictably set to nil; assign to a strong variable to keep the object alive}} + use(a.weakProp); // expected-note{{also accessed here}} + + use(a.strongProp); + use(a.strongProp); // no-warning + + use(a.weakProp); // expected-note{{also accessed here}} +} + +void singleUse(Test *a) { + use(a.weakProp); // no-warning + use(a.strongProp); // no-warning +} + +void assignsOnly(Test *a) { + a.weakProp = get(); // no-warning + + id next = get(); + if (next) + a.weakProp = next; // no-warning + + a->weakIvar = get(); // no-warning + next = get(); + if (next) + a->weakIvar = next; // no-warning + + extern __weak id x; + x = get(); // no-warning + next = get(); + if (next) + x = next; // no-warning +} + +void assignThenRead(Test *a) { + a.weakProp = get(); // expected-note{{also accessed here}} + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} +} + +void twoVariables(Test *a, Test *b) { + use(a.weakProp); // no-warning + use(b.weakProp); // no-warning +} + +void doubleLevelAccess(Test *a) { + use(a.strongProp.weakProp); // expected-warning{{weak property 'weakProp' may be accessed multiple times in this function and may be unpredictably set to nil; assign to a strong variable to keep the object alive}} + use(a.strongProp.weakProp); // expected-note{{also accessed here}} +} + +void doubleLevelAccessIvar(Test *a) { + use(a.strongProp.weakProp); // expected-warning{{weak property 'weakProp' may be accessed multiple times}} + use(a.strongProp.weakProp); // expected-note{{also accessed here}} +} + +void implicitProperties(Test *a) { + use(a.implicitProp); // expected-warning{{weak implicit property 'implicitProp' is accessed multiple times}} + use(a.implicitProp); // expected-note{{also accessed here}} +} + +void classProperties() { + use(Test.weakProp); // expected-warning{{weak implicit property 'weakProp' is accessed multiple times}} + use(Test.weakProp); // expected-note{{also accessed here}} +} + +void classPropertiesAreDifferent(Test *a) { + use(Test.weakProp); // no-warning + use(a.weakProp); // no-warning + use(a.strongProp.weakProp); // no-warning +} + +void ivars(Test *a) { + use(a->weakIvar); // expected-warning{{weak instance variable 'weakIvar' is accessed multiple times}} + use(a->weakIvar); // expected-note{{also accessed here}} +} + +void globals() { + extern __weak id a; + use(a); // expected-warning{{weak variable 'a' is accessed multiple times}} + use(a); // expected-note{{also accessed here}} +} + +void messageGetter(Test *a) { + use([a weakProp]); // expected-warning{{weak property 'weakProp' is accessed multiple times}} + use([a weakProp]); // expected-note{{also accessed here}} +} + +void messageSetter(Test *a) { + [a setWeakProp:get()]; // no-warning + [a setWeakProp:get()]; // no-warning +} + +void messageSetterAndGetter(Test *a) { + [a setWeakProp:get()]; // expected-note{{also accessed here}} + use([a weakProp]); // expected-warning{{weak property 'weakProp' is accessed multiple times}} +} + +void mixDotAndMessageSend(Test *a, Test *b) { + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} + use([a weakProp]); // expected-note{{also accessed here}} + + use([b weakProp]); // expected-warning{{weak property 'weakProp' is accessed multiple times}} + use(b.weakProp); // expected-note{{also accessed here}} +} + + +void assignToStrongWrongInit(Test *a) { + id val = a.weakProp; // expected-note{{also accessed here}} + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} +} + +void assignToStrongWrong(Test *a) { + id val; + val = a.weakProp; // expected-note{{also accessed here}} + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} +} + +void assignToIvarWrong(Test *a) { + a->weakIvar = get(); // expected-note{{also accessed here}} + use(a->weakIvar); // expected-warning{{weak instance variable 'weakIvar' is accessed multiple times}} +} + +void assignToGlobalWrong() { + extern __weak id a; + a = get(); // expected-note{{also accessed here}} + use(a); // expected-warning{{weak variable 'a' is accessed multiple times}} +} + +void assignToStrongOK(Test *a) { + if (condition()) { + id val = a.weakProp; // no-warning + (void)val; + } else { + id val; + val = a.weakProp; // no-warning + (void)val; + } +} + +void assignToStrongConditional(Test *a) { + id val = (condition() ? a.weakProp : a.weakProp); // no-warning + id val2 = a.implicitProp ?: a.implicitProp; // no-warning +} + +void testBlock(Test *a) { + use(a.weakProp); // no-warning + + use(^{ + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times in this block}} + use(a.weakProp); // expected-note{{also accessed here}} + }); +} + +void assignToStrongWithCasts(Test *a) { + if (condition()) { + Test *val = (Test *)a.weakProp; // no-warning + (void)val; + } else { + id val; + val = (Test *)a.weakProp; // no-warning + (void)val; + } +} + +void assignToStrongWithMessages(Test *a) { + if (condition()) { + id val = [a weakProp]; // no-warning + (void)val; + } else { + id val; + val = [a weakProp]; // no-warning + (void)val; + } +} + + +void assignAfterRead(Test *a) { + // Special exception for a single read before any writes. + if (!a.weakProp) // no-warning + a.weakProp = get(); // no-warning +} + +void readOnceWriteMany(Test *a) { + if (!a.weakProp) { // no-warning + a.weakProp = get(); // no-warning + a.weakProp = get(); // no-warning + } +} + +void readOnceAfterWrite(Test *a) { + a.weakProp = get(); // expected-note{{also accessed here}} + if (!a.weakProp) { // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + a.weakProp = get(); // expected-note{{also accessed here}} + } +} + +void readOnceWriteManyLoops(Test *a, Test *b, Test *c, Test *d, Test *e) { + while (condition()) { + if (!a.weakProp) { // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + a.weakProp = get(); // expected-note{{also accessed here}} + a.weakProp = get(); // expected-note{{also accessed here}} + } + } + + do { + if (!b.weakProp) { // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + b.weakProp = get(); // expected-note{{also accessed here}} + b.weakProp = get(); // expected-note{{also accessed here}} + } + } while (condition()); + + for (id x = get(); x; x = get()) { + if (!c.weakProp) { // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + c.weakProp = get(); // expected-note{{also accessed here}} + c.weakProp = get(); // expected-note{{also accessed here}} + } + } + + for (id x in get()) { + if (!d.weakProp) { // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + d.weakProp = get(); // expected-note{{also accessed here}} + d.weakProp = get(); // expected-note{{also accessed here}} + } + } + + int array[] = { 1, 2, 3 }; + for (int i : array) { + if (!e.weakProp) { // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + e.weakProp = get(); // expected-note{{also accessed here}} + e.weakProp = get(); // expected-note{{also accessed here}} + } + } +} + +void readOnlyLoop(Test *a) { + while (condition()) { + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + } +} + +void readInIterationLoop() { + for (Test *a in get()) + use(a.weakProp); // no-warning +} + +void readDoubleLevelAccessInLoop() { + for (Test *a in get()) { + use(a.strongProp.weakProp); // no-warning + } +} + +void readParameterInLoop(Test *a) { + for (id unused in get()) { + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times in this function}} + (void)unused; + } +} + +void readGlobalInLoop() { + static __weak id a; + for (id unused in get()) { + use(a); // expected-warning{{weak variable 'a' is accessed multiple times in this function}} + (void)unused; + } +} + +void doWhileLoop(Test *a) { + do { + use(a.weakProp); // no-warning + } while(0); +} + + +@interface Test (Methods) +@end + +@implementation Test (Methods) +- (void)sanity { + use(self.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times in this method but may be unpredictably set to nil; assign to a strong variable to keep the object alive}} + use(self.weakProp); // expected-note{{also accessed here}} +} + +- (void)ivars { + use(weakIvar); // expected-warning{{weak instance variable 'weakIvar' is accessed multiple times in this method but may be unpredictably set to nil; assign to a strong variable to keep the object alive}} + use(weakIvar); // expected-note{{also accessed here}} +} + +- (void)doubleLevelAccessForSelf { + use(self.strongProp.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} + use(self.strongProp.weakProp); // expected-note{{also accessed here}} + + use(self->ivar.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} + use(self->ivar.weakProp); // expected-note{{also accessed here}} + + use(self->ivar->weakIvar); // expected-warning{{weak instance variable 'weakIvar' is accessed multiple times}} + use(self->ivar->weakIvar); // expected-note{{also accessed here}} +} + +- (void)distinctFromOther:(Test *)other { + use(self.strongProp.weakProp); // no-warning + use(other.strongProp.weakProp); // no-warning + + use(self->ivar.weakProp); // no-warning + use(other->ivar.weakProp); // no-warning + + use(self.strongProp->weakIvar); // no-warning + use(other.strongProp->weakIvar); // no-warning +} +@end + + +class Wrapper { + Test *a; + +public: + void fields() { + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times in this function but may be unpredictably set to nil; assign to a strong variable to keep the object alive}} + use(a.weakProp); // expected-note{{also accessed here}} + } + + void distinctFromOther(Test *b, const Wrapper &w) { + use(a.weakProp); // no-warning + use(b.weakProp); // no-warning + use(w.a.weakProp); // no-warning + } + + static void doubleLevelAccessField(const Wrapper &x, const Wrapper &y) { + use(x.a.weakProp); // expected-warning{{weak property 'weakProp' may be accessed multiple times}} + use(y.a.weakProp); // expected-note{{also accessed here}} + } +}; + + +// ----------------------- +// False positives +// ----------------------- + +// Most of these would require flow-sensitive analysis to silence correctly. + +void assignNil(Test *a) { + if (condition()) + a.weakProp = nil; // expected-note{{also accessed here}} + + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} +} + +void branch(Test *a) { + if (condition()) + use(a.weakProp); // expected-warning{{weak property 'weakProp' is accessed multiple times}} + else + use(a.weakProp); // expected-note{{also accessed here}} +} + +void doubleLevelAccess(Test *a, Test *b) { + use(a.strongProp.weakProp); // expected-warning{{weak property 'weakProp' may be accessed multiple times}} + use(b.strongProp.weakProp); // expected-note{{also accessed here}} + + use(a.weakProp.weakProp); // no-warning +} + +void doubleLevelAccessIvar(Test *a, Test *b) { + use(a->ivar.weakProp); // expected-warning{{weak property 'weakProp' may be accessed multiple times}} + use(b->ivar.weakProp); // expected-note{{also accessed here}} + + use(a.strongProp.weakProp); // no-warning +} + diff --git a/test/SemaObjC/arc-setter-property-match.m b/test/SemaObjC/arc-setter-property-match.m index 9158b09..83a07e9 100644 --- a/test/SemaObjC/arc-setter-property-match.m +++ b/test/SemaObjC/arc-setter-property-match.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin11 -fsyntax-only -fobjc-arc -fblocks -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10156674 @class NSArray; diff --git a/test/SemaObjC/arc-system-header.m b/test/SemaObjC/arc-system-header.m index 1a7c39d..3443bda 100644 --- a/test/SemaObjC/arc-system-header.m +++ b/test/SemaObjC/arc-system-header.m @@ -38,7 +38,7 @@ id test6() { x = (id) (test6_helper(), kMagicConstant); } -// workaround expected-note 4 {{marked unavailable here}} +// workaround expected-note 4 {{marked unavailable here}} expected-note 2 {{property 'prop' is declared unavailable here}} void test7(Test7 *p) { *p.prop = 0; // expected-error {{'prop' is unavailable: this system declaration uses an unsupported type}} p.prop = 0; // expected-error {{'prop' is unavailable: this system declaration uses an unsupported type}} diff --git a/test/SemaObjC/arc-unavailable-for-weakref.m b/test/SemaObjC/arc-unavailable-for-weakref.m index 8498de6..b140c64 100644 --- a/test/SemaObjC/arc-unavailable-for-weakref.m +++ b/test/SemaObjC/arc-unavailable-for-weakref.m @@ -60,5 +60,5 @@ __attribute__((objc_arc_weak_reference_unavailable)) @end @implementation I -@synthesize font = _font; // expected-error {{synthesis of a weak-unavailable property is disallowed because it requires synthesis of an ivar of the __weak object}} +@synthesize font = _font; // expected-error {{synthesis of a weak-unavailable property is disallowed because it requires synthesis of an instance variable of the __weak object}} @end diff --git a/test/SemaObjC/arc-unsafe_unretained.m b/test/SemaObjC/arc-unsafe_unretained.m index a6c5f98..99e870f 100644 --- a/test/SemaObjC/arc-unsafe_unretained.m +++ b/test/SemaObjC/arc-unsafe_unretained.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -verify -fblocks %s // RUN: %clang_cc1 -fsyntax-only -verify -fblocks -fobjc-arc %s +// expected-no-diagnostics struct X { __unsafe_unretained id object; diff --git a/test/SemaObjC/attr-availability.m b/test/SemaObjC/attr-availability.m index 7c9ff0f..ed6b760 100644 --- a/test/SemaObjC/attr-availability.m +++ b/test/SemaObjC/attr-availability.m @@ -14,8 +14,8 @@ @end void f(A *a, B *b) { - [a method]; // expected-warning{{'method' is deprecated: first deprecated in Mac OS X 10.2}} - [b method]; // expected-warning {{'method' is deprecated: first deprecated in Mac OS X 10.2}} - [a proto_method]; // expected-warning{{'proto_method' is deprecated: first deprecated in Mac OS X 10.2}} - [b proto_method]; // expected-warning{{'proto_method' is deprecated: first deprecated in Mac OS X 10.2}} + [a method]; // expected-warning{{'method' is deprecated: first deprecated in OS X 10.2}} + [b method]; // expected-warning {{'method' is deprecated: first deprecated in OS X 10.2}} + [a proto_method]; // expected-warning{{'proto_method' is deprecated: first deprecated in OS X 10.2}} + [b proto_method]; // expected-warning{{'proto_method' is deprecated: first deprecated in OS X 10.2}} } diff --git a/test/SemaObjC/attr-cleanup.m b/test/SemaObjC/attr-cleanup.m index 8415c69..978498c 100644 --- a/test/SemaObjC/attr-cleanup.m +++ b/test/SemaObjC/attr-cleanup.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only +// expected-no-diagnostics @class NSString; diff --git a/test/SemaObjC/attr-deprecated.m b/test/SemaObjC/attr-deprecated.m index 260462a..c0aa9fc 100644 --- a/test/SemaObjC/attr-deprecated.m +++ b/test/SemaObjC/attr-deprecated.m @@ -107,7 +107,8 @@ __attribute ((deprecated)) @interface Test2 -@property int test2 __attribute__((deprecated)); // expected-note 4 {{declared here}} +@property int test2 __attribute__((deprecated)); // expected-note 4 {{declared here}} \ + // expected-note 2 {{property 'test2' is declared deprecated here}} @end void test(Test2 *foo) { diff --git a/test/SemaObjC/block-as-object.m b/test/SemaObjC/block-as-object.m index a85b606..945d6f6 100644 --- a/test/SemaObjC/block-as-object.m +++ b/test/SemaObjC/block-as-object.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -fblocks +// expected-no-diagnostics @interface Whatever - copy; diff --git a/test/SemaObjC/block-ivar.m b/test/SemaObjC/block-ivar.m index c7ea1d9..5864b63 100644 --- a/test/SemaObjC/block-ivar.m +++ b/test/SemaObjC/block-ivar.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -fblocks +// expected-no-diagnostics @interface NSObject { struct objc_object *isa; diff --git a/test/SemaObjC/block-return.m b/test/SemaObjC/block-return.m index 15c3fb6..e0bac99 100644 --- a/test/SemaObjC/block-return.m +++ b/test/SemaObjC/block-return.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -verify -fblocks -fobjc-gc-only %s +// expected-no-diagnostics // rdar://8979379 @interface NSString diff --git a/test/SemaObjC/builtin_objc_assign_ivar.m b/test/SemaObjC/builtin_objc_assign_ivar.m index 5839bf4..6c28178 100644 --- a/test/SemaObjC/builtin_objc_assign_ivar.m +++ b/test/SemaObjC/builtin_objc_assign_ivar.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -x objective-c %s -fsyntax-only -verify +// expected-no-diagnostics // rdar://9362887 typedef __typeof__(((int*)0)-((int*)0)) ptrdiff_t; diff --git a/test/SemaObjC/builtin_objc_msgSend.m b/test/SemaObjC/builtin_objc_msgSend.m index bf17225..bfa09d9 100644 --- a/test/SemaObjC/builtin_objc_msgSend.m +++ b/test/SemaObjC/builtin_objc_msgSend.m @@ -1,3 +1,4 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // rdar://8632525 extern id objc_msgSend(id self, SEL op, ...); diff --git a/test/SemaObjC/category-method-lookup-2.m b/test/SemaObjC/category-method-lookup-2.m index a31d824..ed347c7 100644 --- a/test/SemaObjC/category-method-lookup-2.m +++ b/test/SemaObjC/category-method-lookup-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef struct objc_class *Class; @interface NSObject diff --git a/test/SemaObjC/category-method-lookup.m b/test/SemaObjC/category-method-lookup.m index 4223a74..6239e94 100644 --- a/test/SemaObjC/category-method-lookup.m +++ b/test/SemaObjC/category-method-lookup.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface Foo @end diff --git a/test/SemaObjC/class-getter-using-dotsyntax.m b/test/SemaObjC/class-getter-using-dotsyntax.m index 4ff9428..dd384b5 100644 --- a/test/SemaObjC/class-getter-using-dotsyntax.m +++ b/test/SemaObjC/class-getter-using-dotsyntax.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics typedef struct objc_class *Class; diff --git a/test/SemaObjC/class-property-access.m b/test/SemaObjC/class-property-access.m index c46d3fb..735b51a 100644 --- a/test/SemaObjC/class-property-access.m +++ b/test/SemaObjC/class-property-access.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface Test {} + (Test*)one; diff --git a/test/SemaObjC/class-protocol.m b/test/SemaObjC/class-protocol.m index 91cd138..021047e 100644 --- a/test/SemaObjC/class-protocol.m +++ b/test/SemaObjC/class-protocol.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // pr5552 @interface Protocol diff --git a/test/SemaObjC/comptypes-2.m b/test/SemaObjC/comptypes-2.m index 74e42c9..8e90455 100644 --- a/test/SemaObjC/comptypes-2.m +++ b/test/SemaObjC/comptypes-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #define nil (void *)0; #define Nil (void *)0; diff --git a/test/SemaObjC/comptypes-8.m b/test/SemaObjC/comptypes-8.m index 750b0a6..e651030 100644 --- a/test/SemaObjC/comptypes-8.m +++ b/test/SemaObjC/comptypes-8.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol MyProtocol @end diff --git a/test/SemaObjC/conditional-expr-5.m b/test/SemaObjC/conditional-expr-5.m index 47aed3e..b1f7e59 100644 --- a/test/SemaObjC/conditional-expr-5.m +++ b/test/SemaObjC/conditional-expr-5.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface PBXBuildSettingsDictionary { diff --git a/test/SemaObjC/conditional-expr-6.m b/test/SemaObjC/conditional-expr-6.m index 098688a..e944e54 100644 --- a/test/SemaObjC/conditional-expr-6.m +++ b/test/SemaObjC/conditional-expr-6.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol MyProtocol @end diff --git a/test/SemaObjC/conditional-expr-7.m b/test/SemaObjC/conditional-expr-7.m index 3ddf3d7..5b4a863 100644 --- a/test/SemaObjC/conditional-expr-7.m +++ b/test/SemaObjC/conditional-expr-7.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // radar 7682116 @interface Super @end diff --git a/test/SemaObjC/conditional-expr-8.m b/test/SemaObjC/conditional-expr-8.m index 6799983..beddd20 100644 --- a/test/SemaObjC/conditional-expr-8.m +++ b/test/SemaObjC/conditional-expr-8.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://9296866 @interface NSResponder diff --git a/test/SemaObjC/conflict-nonfragile-abi2.m b/test/SemaObjC/conflict-nonfragile-abi2.m index 8197327..d0d6be8 100644 --- a/test/SemaObjC/conflict-nonfragile-abi2.m +++ b/test/SemaObjC/conflict-nonfragile-abi2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify -fsyntax-only -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://8225011 int glob; diff --git a/test/SemaObjC/continuation-class-err.m b/test/SemaObjC/continuation-class-err.m index ceb8ee9..8378c3f 100644 --- a/test/SemaObjC/continuation-class-err.m +++ b/test/SemaObjC/continuation-class-err.m @@ -5,13 +5,13 @@ id _object; id _object1; } -@property(readonly) id object; // expected-note {{property declared here}} +@property(readonly) id object; @property(readwrite, assign) id object1; // expected-note {{property declared here}} @property (readonly) int indentLevel; @end @interface ReadOnly () -@property(readwrite, copy) id object; // expected-warning {{property attribute in class extension does not match the primary class}} +@property(readwrite, copy) id object; // Ok. declaring memory model in class extension - primary has none. @property(readonly) id object1; // expected-error {{illegal redeclaration of property in class extension 'ReadOnly' (attribute must be 'readwrite', while its primary must be 'readonly')}} @property (readwrite, assign) int indentLevel; // OK. assign the default in any case. @end diff --git a/test/SemaObjC/crash-on-objc-bool-literal.m b/test/SemaObjC/crash-on-objc-bool-literal.m new file mode 100644 index 0000000..2c003a5 --- /dev/null +++ b/test/SemaObjC/crash-on-objc-bool-literal.m @@ -0,0 +1,12 @@ +// RUN: %clang_cc1 -fsyntax-only -verify %s +// RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify %s +// rdar://12456743 + +typedef signed char BOOL; // expected-note 2 {{candidate found by name lookup is 'BOOL'}} + +EXPORT BOOL FUNC(BOOL enabled); // expected-error {{unknown type name 'EXPORT'}} // expected-error {{expected ';' after top level declarator}} \ + // expected-note 2 {{candidate found by name lookup is 'BOOL'}} + +static inline BOOL MFIsPrivateVersion(void) { // expected-error {{reference to 'BOOL' is ambiguous}} + return __objc_yes; // expected-error {{reference to 'BOOL' is ambiguous}} +} diff --git a/test/SemaObjC/default-synthesize-2.m b/test/SemaObjC/default-synthesize-2.m index 3756413..20c045e 100644 --- a/test/SemaObjC/default-synthesize-2.m +++ b/test/SemaObjC/default-synthesize-2.m @@ -41,7 +41,7 @@ // Test3 @interface Test3 { - id uid; // expected-note {{ivar is declared here}} + id uid; // expected-note {{instance variable is declared here}} } @property (readwrite, assign) id uid; // expected-note {{property declared here}} @end @@ -119,7 +119,7 @@ int* _object; @interface Test8 { id _y; - id y; // expected-note {{ivar is declared here}} + id y; // expected-note {{instance variable is declared here}} } @property(copy) id y; // expected-note {{property declared here}} @end diff --git a/test/SemaObjC/delay-parsing-cfunctions.m b/test/SemaObjC/delay-parsing-cfunctions.m index a6f66fe..c74b054 100644 --- a/test/SemaObjC/delay-parsing-cfunctions.m +++ b/test/SemaObjC/delay-parsing-cfunctions.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -Werror -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10387088 @interface MyClass diff --git a/test/SemaObjC/direct-synthesized-ivar-access.m b/test/SemaObjC/direct-synthesized-ivar-access.m index dc14911..a276a64 100644 --- a/test/SemaObjC/direct-synthesized-ivar-access.m +++ b/test/SemaObjC/direct-synthesized-ivar-access.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-default-synthesize-properties -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://8673791 // rdar://9943851 diff --git a/test/SemaObjC/enhanced-proto-2.m b/test/SemaObjC/enhanced-proto-2.m index 28b03d9..352f291 100644 --- a/test/SemaObjC/enhanced-proto-2.m +++ b/test/SemaObjC/enhanced-proto-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify -Wno-objc-root-class %s +// expected-no-diagnostics @protocol MyProto1 @optional diff --git a/test/SemaObjC/enum-fixed-type.m b/test/SemaObjC/enum-fixed-type.m index 95153be..4fe643f 100644 --- a/test/SemaObjC/enum-fixed-type.m +++ b/test/SemaObjC/enum-fixed-type.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #if !__has_feature(objc_fixed_enum) # error Enumerations with a fixed underlying type are not supported diff --git a/test/SemaObjC/error-property-gc-attr.m b/test/SemaObjC/error-property-gc-attr.m index 5680296..dfac0d4 100644 --- a/test/SemaObjC/error-property-gc-attr.m +++ b/test/SemaObjC/error-property-gc-attr.m @@ -3,7 +3,7 @@ @interface INTF { - id IVAR; // expected-note {{ivar is declared here}} + id IVAR; // expected-note {{instance variable is declared here}} __weak id II; __weak id WID; id ID; @@ -19,10 +19,10 @@ @end @implementation INTF -@synthesize pweak=IVAR; // expected-error {{existing ivar 'IVAR' for __weak property 'pweak' must be __weak}} -@synthesize NOT=II; // expected-error {{existing ivar 'II' for strong property 'NOT' may not be __weak}} +@synthesize pweak=IVAR; // expected-error {{existing instance variable 'IVAR' for __weak property 'pweak' must be __weak}} +@synthesize NOT=II; // expected-error {{existing instance variable 'II' for strong property 'NOT' may not be __weak}} @synthesize WID; @synthesize ID; -@synthesize AWEAK; // expected-error {{existing ivar 'AWEAK' for strong property 'AWEAK' may not be __weak}} +@synthesize AWEAK; // expected-error {{existing instance variable 'AWEAK' for strong property 'AWEAK' may not be __weak}} @synthesize WI; @end diff --git a/test/SemaObjC/getter-setter-defined-in-category-of-parent.m b/test/SemaObjC/getter-setter-defined-in-category-of-parent.m index 71c3237..ff5c174 100644 --- a/test/SemaObjC/getter-setter-defined-in-category-of-parent.m +++ b/test/SemaObjC/getter-setter-defined-in-category-of-parent.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface MyParent { int X; diff --git a/test/SemaObjC/iboutletcollection-attr.m b/test/SemaObjC/iboutletcollection-attr.m index 22c21a7..82cb96f 100644 --- a/test/SemaObjC/iboutletcollection-attr.m +++ b/test/SemaObjC/iboutletcollection-attr.m @@ -21,9 +21,9 @@ typedef void *PV; __attribute__((iboutletcollection(I, 1))) id ivar1; // expected-error {{attribute takes one argument}} __attribute__((iboutletcollection(B))) id ivar2; // expected-error {{invalid type 'B' as argument of iboutletcollection attribute}} __attribute__((iboutletcollection(PV))) id ivar3; // expected-error {{invalid type 'PV' as argument of iboutletcollection attribute}} - __attribute__((iboutletcollection(PV))) void *ivar4; // expected-warning {{ivar with 'iboutletcollection' attribute must be an object type (invalid 'void *')}} + __attribute__((iboutletcollection(PV))) void *ivar4; // expected-warning {{instance variable with 'iboutletcollection' attribute must be an object type (invalid 'void *')}} __attribute__((iboutletcollection(int))) id ivar5; // expected-error {{type argument of iboutletcollection attribute cannot be a builtin type}} - __attribute__((iboutlet)) int ivar6; // expected-warning {{ivar with 'iboutlet' attribute must be an object type}} + __attribute__((iboutlet)) int ivar6; // expected-warning {{instance variable with 'iboutlet' attribute must be an object type}} } @property (nonatomic, retain) __attribute__((iboutletcollection(I,2,3))) id prop1; // expected-error {{attribute takes one argument}} @property (nonatomic, retain) __attribute__((iboutletcollection(B))) id prop2; // expected-error {{invalid type 'B' as argument of iboutletcollection attribute}} diff --git a/test/SemaObjC/id_builtin.m b/test/SemaObjC/id_builtin.m index a1431d6..be42e7d 100644 --- a/test/SemaObjC/id_builtin.m +++ b/test/SemaObjC/id_builtin.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // id is now builtin. There should be no errors. id obj; diff --git a/test/SemaObjC/idiomatic-parentheses.m b/test/SemaObjC/idiomatic-parentheses.m index 417b948..801db59 100644 --- a/test/SemaObjC/idiomatic-parentheses.m +++ b/test/SemaObjC/idiomatic-parentheses.m @@ -4,13 +4,18 @@ // @interface Object +{ + unsigned uid; +} - (id) init; - (id) initWithInt: (int) i; - (void) iterate: (id) coll; - (id) nextObject; +@property unsigned uid; @end @implementation Object +@synthesize uid; - (id) init { if (self = [self init]) { } @@ -20,6 +25,12 @@ - (id) initWithInt: (int) i { if (self = [self initWithInt: i]) { } + // rdar://11066598 + if (self.uid = 100) { // expected-warning {{using the result of an assignment as a condition without parentheses}} \ + // expected-note {{place parentheses around the assignment to silence this warning}} \ + // expected-note {{use '==' to turn this assignment into an equality comparison}} + // ... + } return self; } diff --git a/test/SemaObjC/ignore-qualifier-on-qualified-id.m b/test/SemaObjC/ignore-qualifier-on-qualified-id.m index 36a2c1a..996664f 100644 --- a/test/SemaObjC/ignore-qualifier-on-qualified-id.m +++ b/test/SemaObjC/ignore-qualifier-on-qualified-id.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://10667659 @protocol NSCopying @end diff --git a/test/SemaObjC/ignore-weakimport-method.m b/test/SemaObjC/ignore-weakimport-method.m index d71cebf..c68c578 100644 --- a/test/SemaObjC/ignore-weakimport-method.m +++ b/test/SemaObjC/ignore-weakimport-method.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface foo + (void) cx __attribute__((weak_import)); - (void) x __attribute__((weak_import)); diff --git a/test/SemaObjC/interface-layout-2.m b/test/SemaObjC/interface-layout-2.m index 02b1403..17e34d4 100644 --- a/test/SemaObjC/interface-layout-2.m +++ b/test/SemaObjC/interface-layout-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics @interface A { int ivar; diff --git a/test/SemaObjC/interface-layout.m b/test/SemaObjC/interface-layout.m index 336605a..9b083b0 100644 --- a/test/SemaObjC/interface-layout.m +++ b/test/SemaObjC/interface-layout.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify -triple i386-apple-darwin9 -fobjc-runtime=macosx-fragile-10.5 +// expected-no-diagnostics typedef struct objc_object {} *id; typedef signed char BOOL; typedef unsigned int NSUInteger; diff --git a/test/SemaObjC/interface-scope-2.m b/test/SemaObjC/interface-scope-2.m index 60fd900..ffd740f 100644 --- a/test/SemaObjC/interface-scope-2.m +++ b/test/SemaObjC/interface-scope-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -triple i686-apple-darwin9 -Wno-objc-root-class %s +// expected-no-diagnostics // FIXME: must also compile as Objective-C++ // diff --git a/test/SemaObjC/interface-scope.m b/test/SemaObjC/interface-scope.m index 0671dae..9875eca 100644 --- a/test/SemaObjC/interface-scope.m +++ b/test/SemaObjC/interface-scope.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface I1 { @private diff --git a/test/SemaObjC/ivar-access-package.m b/test/SemaObjC/ivar-access-package.m index abc3420..ff5ff4e 100644 --- a/test/SemaObjC/ivar-access-package.m +++ b/test/SemaObjC/ivar-access-package.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef unsigned char BOOL; diff --git a/test/SemaObjC/ivar-in-class-extension-error.m b/test/SemaObjC/ivar-in-class-extension-error.m index b22b798..c90e478 100644 --- a/test/SemaObjC/ivar-in-class-extension-error.m +++ b/test/SemaObjC/ivar-in-class-extension-error.m @@ -4,12 +4,12 @@ @interface A @end @interface A () { - int _p0; // expected-error {{ivars may not be placed in class extension}} + int _p0; // expected-error {{instance variables may not be placed in class extension}} } @property int p0; @end @interface A(CAT) { - int _p1; // expected-error {{ivars may not be placed in categories}} + int _p1; // expected-error {{instance variables may not be placed in categories}} } @end diff --git a/test/SemaObjC/ivar-in-class-extension.m b/test/SemaObjC/ivar-in-class-extension.m index cf02d26..dc5cf6a 100644 --- a/test/SemaObjC/ivar-in-class-extension.m +++ b/test/SemaObjC/ivar-in-class-extension.m @@ -32,7 +32,7 @@ int fn3(SomeClass *obj) { @interface SomeClass (Category) { - int categoryIvar; // expected-error {{ivars may not be placed in categories}} + int categoryIvar; // expected-error {{instance variables may not be placed in categories}} } @end diff --git a/test/SemaObjC/ivar-sem-check-2.m b/test/SemaObjC/ivar-sem-check-2.m index bf884b3..b1e1f2c 100644 --- a/test/SemaObjC/ivar-sem-check-2.m +++ b/test/SemaObjC/ivar-sem-check-2.m @@ -16,8 +16,8 @@ @implementation Sub @synthesize value; // expected-note {{previous use is here}} -@synthesize value1=value; // expected-error {{synthesized properties 'value1' and 'value' both claim ivar 'value'}} -@synthesize prop=value2; // expected-error {{property 'prop' attempting to use ivar 'value2' declared in super class 'Super'}} +@synthesize value1=value; // expected-error {{synthesized properties 'value1' and 'value' both claim instance variable 'value'}} +@synthesize prop=value2; // expected-error {{property 'prop' attempting to use instance variable 'value2' declared in super class 'Super'}} @end diff --git a/test/SemaObjC/method-attributes.m b/test/SemaObjC/method-attributes.m index f7252af..b402d52 100644 --- a/test/SemaObjC/method-attributes.m +++ b/test/SemaObjC/method-attributes.m @@ -55,3 +55,32 @@ - (IBAction)doSomething2:(id)sender {} // expected-warning {{attributes on method implementation and its declaration must match}} - (IBAction)doSomething3:(id)sender {} @end + +// rdar://11593375 +@interface NSObject @end + +@interface Test : NSObject +-(id)method __attribute__((deprecated)); +-(id)method1; +-(id)method2 __attribute__((aligned(16))); +- (id) method3: (int)arg1 __attribute__((aligned(16))) __attribute__((deprecated)) __attribute__((unavailable)); // expected-note {{method 'method3:' declared here}} +- (id) method4: (int)arg1 __attribute__((aligned(16))) __attribute__((deprecated)) __attribute__((unavailable)); +@end + +@implementation Test +-(id)method __attribute__((aligned(16))) __attribute__((aligned(16))) __attribute__((deprecated)) { + return self; +} +-(id)method1 __attribute__((aligned(16))) { + return self; +} +-(id)method2 { + return self; +} +- (id) method3: (int)arg1 __attribute__((deprecated)) __attribute__((unavailable)) { // expected-warning {{attributes on method implementation and its declaration must match}} + return self; +} +- (id) method4: (int)arg1 __attribute__((aligned(16))) __attribute__((deprecated)) __attribute__((unavailable)) { + return self; +} +@end diff --git a/test/SemaObjC/method-conflict-1.m b/test/SemaObjC/method-conflict-1.m index ca91ebd..654cd01 100644 --- a/test/SemaObjC/method-conflict-1.m +++ b/test/SemaObjC/method-conflict-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // This test case tests the default behavior. diff --git a/test/SemaObjC/method-in-class-extension-impl.m b/test/SemaObjC/method-in-class-extension-impl.m index c205322..d74ae8f 100644 --- a/test/SemaObjC/method-in-class-extension-impl.m +++ b/test/SemaObjC/method-in-class-extension-impl.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://8530080 @protocol ViewDelegate @end diff --git a/test/SemaObjC/method-lookup-2.m b/test/SemaObjC/method-lookup-2.m index 53cae83..2596304 100644 --- a/test/SemaObjC/method-lookup-2.m +++ b/test/SemaObjC/method-lookup-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef signed char BOOL; @protocol NSObject diff --git a/test/SemaObjC/method-lookup-4.m b/test/SemaObjC/method-lookup-4.m index 700565e..807d4da 100644 --- a/test/SemaObjC/method-lookup-4.m +++ b/test/SemaObjC/method-lookup-4.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface NSObject {} diff --git a/test/SemaObjC/method-typecheck-1.m b/test/SemaObjC/method-typecheck-1.m index ee068d0..2d4e868 100644 --- a/test/SemaObjC/method-typecheck-1.m +++ b/test/SemaObjC/method-typecheck-1.m @@ -34,3 +34,18 @@ (float) x { return 0; } // expected-warning {{conflicting parameter types in implementation of 'setCat:': 'int' vs 'float'}} + (int) cCat: (int) x { return 0; } // expected-warning {{conflicting return type in implementation of 'cCat:': 'void' vs 'int'}} @end + +// rdar://12519216 +// test that when implementation implements method in a category, types match. +@interface testObject {} +@end + +@interface testObject(Category) +- (float)returnCGFloat; // expected-note {{previous definition is here}} +@end + +@implementation testObject +- (double)returnCGFloat { // expected-warning {{conflicting return type in implementation of 'returnCGFloat': 'float' vs 'double'}} + return 0.0; +} +@end diff --git a/test/SemaObjC/nested-typedef-decl.m b/test/SemaObjC/nested-typedef-decl.m index bb01ead..7051ac6 100644 --- a/test/SemaObjC/nested-typedef-decl.m +++ b/test/SemaObjC/nested-typedef-decl.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -x objective-c -fsyntax-only -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10041908 @interface Bar { diff --git a/test/SemaObjC/no-gc-weak-test.m b/test/SemaObjC/no-gc-weak-test.m index dd9b73c..6539a9b 100644 --- a/test/SemaObjC/no-gc-weak-test.m +++ b/test/SemaObjC/no-gc-weak-test.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface Subtask { diff --git a/test/SemaObjC/no-ivar-access-control.m b/test/SemaObjC/no-ivar-access-control.m index 6f00b1a..9bbff24 100644 --- a/test/SemaObjC/no-ivar-access-control.m +++ b/test/SemaObjC/no-ivar-access-control.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -fdebugger-support -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -x objective-c++ -fdebugger-support -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10997647 @interface I diff --git a/test/SemaObjC/no-ivar-in-interface-block.m b/test/SemaObjC/no-ivar-in-interface-block.m index 215db61..af4797f 100644 --- a/test/SemaObjC/no-ivar-in-interface-block.m +++ b/test/SemaObjC/no-ivar-in-interface-block.m @@ -3,11 +3,11 @@ @interface I { - @protected int P_IVAR; // expected-warning {{declaration of ivars in the interface is deprecated}} + @protected int P_IVAR; // expected-warning {{declaration of instance variables in the interface is deprecated}} - @public int PU_IVAR; // expected-warning {{declaration of ivars in the interface is deprecated}} + @public int PU_IVAR; // expected-warning {{declaration of instance variables in the interface is deprecated}} - @private int PRV_IVAR; // expected-warning {{declaration of ivars in the interface is deprecated}} + @private int PRV_IVAR; // expected-warning {{declaration of instance variables in the interface is deprecated}} } @end diff --git a/test/SemaObjC/no-warn-qual-mismatch.m b/test/SemaObjC/no-warn-qual-mismatch.m index 1e3c186..9638da4 100644 --- a/test/SemaObjC/no-warn-qual-mismatch.m +++ b/test/SemaObjC/no-warn-qual-mismatch.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // radar 7211563 @interface X diff --git a/test/SemaObjC/no-warn-synth-protocol-meth.m b/test/SemaObjC/no-warn-synth-protocol-meth.m index 103f6bb..cdb855e 100644 --- a/test/SemaObjC/no-warn-synth-protocol-meth.m +++ b/test/SemaObjC/no-warn-synth-protocol-meth.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @protocol CYCdef - (int)name; diff --git a/test/SemaObjC/no-warn-unimpl-method.m b/test/SemaObjC/no-warn-unimpl-method.m index dd6e3ad..174f70a 100644 --- a/test/SemaObjC/no-warn-unimpl-method.m +++ b/test/SemaObjC/no-warn-unimpl-method.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple x86_64-apple-darwin10 -fsyntax-only -verify %s +// expected-no-diagnostics // This program tests that if class implements the forwardInvocation method, then // every method possible is implemented in the class and should not issue // warning of the "Method definition not found" kind. */ diff --git a/test/SemaObjC/no-warning-unavail-unimp.m b/test/SemaObjC/no-warning-unavail-unimp.m index 88d519d..d5a4eac 100644 --- a/test/SemaObjC/no-warning-unavail-unimp.m +++ b/test/SemaObjC/no-warning-unavail-unimp.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://9651605 @interface Foo diff --git a/test/SemaObjC/nonarc-weak.m b/test/SemaObjC/nonarc-weak.m new file mode 100644 index 0000000..ab51875 --- /dev/null +++ b/test/SemaObjC/nonarc-weak.m @@ -0,0 +1,16 @@ +// RUN: %clang_cc1 -triple x86_64-apple-macosx10.8.0 -fobjc-runtime=macosx-10.8.0 -fsyntax-only -Wunused-function %s > %t.nonarc 2>&1 +// RUN: %clang_cc1 -triple x86_64-apple-macosx10.8.0 -fobjc-runtime=macosx-10.8.0 -fsyntax-only -Wunused-function -fobjc-arc %s > %t.arc 2>&1 +// RUN: FileCheck -input-file=%t.nonarc %s +// RUN: FileCheck -input-file=%t.arc -check-prefix=ARC %s + +static void bar() {} // Intentionally unused. + +void foo(id self) { + __weak id weakSelf = self; +} + +// CHECK: 9:13: warning: __weak attribute cannot be specified on an automatic variable when ARC is not enabled +// CHECK: 6:13: warning: unused function 'bar' +// CHECK: 2 warnings generated +// ARC: 6:13: warning: unused function 'bar' +// ARC: 1 warning generated diff --git a/test/SemaObjC/nonnull.m b/test/SemaObjC/nonnull.m index a38c0ac..902105b 100644 --- a/test/SemaObjC/nonnull.m +++ b/test/SemaObjC/nonnull.m @@ -1,6 +1,7 @@ #include "nonnull.h" // RUN: %clang_cc1 -fblocks -fsyntax-only -verify -Wno-objc-root-class %s +// REQUIRES: LP64 @class NSObject; diff --git a/test/SemaObjC/nowarn-superclass-method-mismatch.m b/test/SemaObjC/nowarn-superclass-method-mismatch.m index b211cde..d522e89 100644 --- a/test/SemaObjC/nowarn-superclass-method-mismatch.m +++ b/test/SemaObjC/nowarn-superclass-method-mismatch.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-arc -fobjc-runtime-has-weak -Wsuper-class-method-mismatch -verify %s +// expected-no-diagnostics // rdar://11793793 @class NSString; diff --git a/test/SemaObjC/nsobject-attribute-1.m b/test/SemaObjC/nsobject-attribute-1.m index 72d8fa6..4a75f5c 100644 --- a/test/SemaObjC/nsobject-attribute-1.m +++ b/test/SemaObjC/nsobject-attribute-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fblocks -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface NSObject - (id)self; diff --git a/test/SemaObjC/nsobject-attribute.m b/test/SemaObjC/nsobject-attribute.m index e3f2874..b794eaf 100644 --- a/test/SemaObjC/nsobject-attribute.m +++ b/test/SemaObjC/nsobject-attribute.m @@ -5,8 +5,8 @@ static int count; static CGColorRef tmp = 0; typedef struct S1 __attribute__ ((NSObject)) CGColorRef1; // expected-error {{__attribute ((NSObject)) is for pointer types only}} -typedef void * __attribute__ ((NSObject)) CGColorRef2; // expected-error {{__attribute ((NSObject)) is for pointer types only}} - +typedef void * __attribute__ ((NSObject)) CGColorRef2; // no-warning +typedef void * CFTypeRef; @interface HandTested { @public @@ -14,9 +14,11 @@ typedef void * __attribute__ ((NSObject)) CGColorRef2; // expected-error {{__at } @property(copy) CGColorRef x; -// rdar: // 7809460 -typedef struct CGColor * __attribute__((NSObject)) CGColorRefNoNSObject; +// rdar://problem/7809460 +typedef struct CGColor * __attribute__((NSObject)) CGColorRefNoNSObject; // no-warning @property (nonatomic, retain) CGColorRefNoNSObject color; +// rdar://problem/12197822 +@property (strong) __attribute__((NSObject)) CFTypeRef myObj; // no-warning @end void setProperty(id self, id value) { @@ -29,6 +31,7 @@ id getProperty(id self) { @implementation HandTested @synthesize x=x; +@synthesize myObj; @dynamic color; @end diff --git a/test/SemaObjC/objc-buffered-methods.m b/test/SemaObjC/objc-buffered-methods.m index a4b83be..55e4897 100644 --- a/test/SemaObjC/objc-buffered-methods.m +++ b/test/SemaObjC/objc-buffered-methods.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://8843851 int* global; diff --git a/test/SemaObjC/objc-literal-comparison.m b/test/SemaObjC/objc-literal-comparison.m index f1aa8ec..0a10582 100644 --- a/test/SemaObjC/objc-literal-comparison.m +++ b/test/SemaObjC/objc-literal-comparison.m @@ -2,6 +2,10 @@ // RUN: %clang_cc1 -fsyntax-only -Wno-everything -Wobjc-literal-compare "-Dnil=(id)0" -verify %s // RUN: %clang_cc1 -fsyntax-only -Wno-everything -Wobjc-literal-compare "-Dnil=0" -verify %s +// RUN: %clang_cc1 -fsyntax-only -Wno-everything -Wobjc-literal-compare -fobjc-arc "-Dnil=((id)0)" -verify %s +// RUN: %clang_cc1 -fsyntax-only -Wno-everything -Wobjc-literal-compare -fobjc-arc "-Dnil=(id)0" -verify %s +// RUN: %clang_cc1 -fsyntax-only -Wno-everything -Wobjc-literal-compare -fobjc-arc "-Dnil=0" -verify %s + // (test the warning flag as well) typedef signed char BOOL; diff --git a/test/SemaObjC/objc-qualified-property-lookup.m b/test/SemaObjC/objc-qualified-property-lookup.m index 48b28cb..b5cadbd 100644 --- a/test/SemaObjC/objc-qualified-property-lookup.m +++ b/test/SemaObjC/objc-qualified-property-lookup.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://9078584 @interface NSObject @end diff --git a/test/SemaObjC/overriding-property-in-class-extension.m b/test/SemaObjC/overriding-property-in-class-extension.m new file mode 100644 index 0000000..77efd55 --- /dev/null +++ b/test/SemaObjC/overriding-property-in-class-extension.m @@ -0,0 +1,27 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -Weverything %s +// expected-no-diagnostics +// rdar://12103434 + +@class NSString; + +@interface NSObject @end + +@interface MyClass : NSObject + +@property (nonatomic, copy, readonly) NSString* name; + +@end + +@interface MyClass () { + NSString* _name; +} + +@property (nonatomic, copy) NSString* name; + +@end + +@implementation MyClass + +@synthesize name = _name; + +@end diff --git a/test/SemaObjC/pedantic-dynamic-test.m b/test/SemaObjC/pedantic-dynamic-test.m index 61f36b3..1fc5ef6 100644 --- a/test/SemaObjC/pedantic-dynamic-test.m +++ b/test/SemaObjC/pedantic-dynamic-test.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -pedantic -Wno-objc-root-class %s +// expected-no-diagnostics // rdar: // 7860960 @interface I diff --git a/test/SemaObjC/pragma-pack.m b/test/SemaObjC/pragma-pack.m index ba39257..6869bca 100644 --- a/test/SemaObjC/pragma-pack.m +++ b/test/SemaObjC/pragma-pack.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i686-apple-darwin9 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // Make sure pragma pack works inside ObjC methods. @interface X diff --git a/test/SemaObjC/property-11.m b/test/SemaObjC/property-11.m index 2976115..e41a840 100644 --- a/test/SemaObjC/property-11.m +++ b/test/SemaObjC/property-11.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface NSSound @end diff --git a/test/SemaObjC/property-13.m b/test/SemaObjC/property-13.m index 2ca3416..362d6d3 100644 --- a/test/SemaObjC/property-13.m +++ b/test/SemaObjC/property-13.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s -Wno-unreachable-code +// expected-no-diagnostics @interface NSObject + alloc; diff --git a/test/SemaObjC/property-2.m b/test/SemaObjC/property-2.m index f95af59..3298ee5 100644 --- a/test/SemaObjC/property-2.m +++ b/test/SemaObjC/property-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface Tester @property char PropertyAtomic_char; diff --git a/test/SemaObjC/property-6.m b/test/SemaObjC/property-6.m index 933a4f0..f2a293e 100644 --- a/test/SemaObjC/property-6.m +++ b/test/SemaObjC/property-6.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -fobjc-exceptions %s +// expected-no-diagnostics # 1 "" # 1 "/System/Library/Frameworks/Foundation.framework/Headers/Foundation.h" 1 3 typedef signed char BOOL; diff --git a/test/SemaObjC/property-7.m b/test/SemaObjC/property-7.m index e6cba50..3d03b8f 100644 --- a/test/SemaObjC/property-7.m +++ b/test/SemaObjC/property-7.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef signed char BOOL; typedef struct _NSZone NSZone; diff --git a/test/SemaObjC/property-8.m b/test/SemaObjC/property-8.m index 8647aba..da97ffc 100644 --- a/test/SemaObjC/property-8.m +++ b/test/SemaObjC/property-8.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef signed char BOOL; typedef unsigned int NSUInteger; typedef struct _NSZone NSZone; diff --git a/test/SemaObjC/property-9-impl-method.m b/test/SemaObjC/property-9-impl-method.m index 84eb363..d6220f6 100644 --- a/test/SemaObjC/property-9-impl-method.m +++ b/test/SemaObjC/property-9-impl-method.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -fsyntax-only -verify +// expected-no-diagnostics // rdar://5967199 typedef signed char BOOL; diff --git a/test/SemaObjC/property-and-class-extension.m b/test/SemaObjC/property-and-class-extension.m index 7040078..80cedc3 100644 --- a/test/SemaObjC/property-and-class-extension.m +++ b/test/SemaObjC/property-and-class-extension.m @@ -31,6 +31,6 @@ extension but ignore any ivars in superclass class extensions. @end @implementation SomeClass -@synthesize Property; // expected-error {{property 'Property' attempting to use ivar 'Property' declared in super class 'Super'}} +@synthesize Property; // expected-error {{property 'Property' attempting to use instance variable 'Property' declared in super class 'Super'}} @synthesize Property1; // OK @end diff --git a/test/SemaObjC/property-and-ivar-use.m b/test/SemaObjC/property-and-ivar-use.m index 5b40d85..a997494 100644 --- a/test/SemaObjC/property-and-ivar-use.m +++ b/test/SemaObjC/property-and-ivar-use.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // Do not issue error if 'ivar' used previously belongs to the inherited class // and has same name as @dynalic property in current class. diff --git a/test/SemaObjC/property-deprecated-warning.m b/test/SemaObjC/property-deprecated-warning.m new file mode 100644 index 0000000..aa7b764 --- /dev/null +++ b/test/SemaObjC/property-deprecated-warning.m @@ -0,0 +1,64 @@ +// RUN: %clang_cc1 -fsyntax-only -triple thumbv6-apple-ios3.0 -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -x objective-c++ -fsyntax-only -triple thumbv6-apple-ios3.0 -verify -Wno-objc-root-class %s +// rdar://12324295 + +typedef signed char BOOL; + +@protocol P +@property(nonatomic,assign) id ptarget __attribute__((availability(ios,introduced=2.0,deprecated=3.0))); // expected-note 2 {{property 'ptarget' is declared deprecated here}} +@end + +@protocol P1

    +- (void)setPtarget:(id)arg; // expected-note {{method 'setPtarget:' declared here}} +@end + + +@interface UITableViewCell +@property(nonatomic,assign) id target __attribute__((availability(ios,introduced=2.0,deprecated=3.0))); // expected-note {{property 'target' is declared deprecated here}} +@end + +@interface PSTableCell : UITableViewCell + - (void)setTarget:(id)target; // expected-note {{method 'setTarget:' declared here}} +@end + +@interface UITableViewCell(UIDeprecated) +@property(nonatomic,assign) id dep_target __attribute__((availability(ios,introduced=2.0,deprecated=3.0))); // expected-note {{method 'dep_target' declared here}} \ + // expected-note 2 {{property 'dep_target' is declared deprecated here}} \ + // expected-note {{method 'setDep_target:' declared here}} +@end + +@implementation PSTableCell +- (void)setTarget:(id)target {}; +- (void)setPtarget:(id)val {}; +- (void) Meth { + [self setTarget: (id)0]; // expected-warning {{'setTarget:' is deprecated: first deprecated in iOS 3.0}} + [self setDep_target: [self dep_target]]; // expected-warning {{'dep_target' is deprecated: first deprecated in iOS 3.0}} \ + // expected-warning {{'setDep_target:' is deprecated: first deprecated in iOS 3.0}} + + [self setPtarget: (id)0]; // expected-warning {{setPtarget:' is deprecated: first deprecated in iOS 3.0}} +} +@end + + +@interface CustomAccessorNames +@property(getter=isEnabled,assign) BOOL enabled __attribute__((availability(ios,introduced=2.0,deprecated=3.0))); // expected-note {{method 'isEnabled' declared here}} expected-note {{property 'enabled' is declared deprecated here}} + +@property(setter=setNewDelegate:,assign) id delegate __attribute__((availability(ios,introduced=2.0,deprecated=3.0))); // expected-note {{method 'setNewDelegate:' declared here}} expected-note {{property 'delegate' is declared deprecated here}} +@end + +void testCustomAccessorNames(CustomAccessorNames *obj) { + if ([obj isEnabled]) // expected-warning {{'isEnabled' is deprecated: first deprecated in iOS 3.0}} + [obj setNewDelegate:0]; // expected-warning {{'setNewDelegate:' is deprecated: first deprecated in iOS 3.0}} +} + + +@interface ProtocolInCategory +@end + +@interface ProtocolInCategory (TheCategory) +- (id)ptarget; // expected-note {{method 'ptarget' declared here}} +@end + +id useDeprecatedProperty(ProtocolInCategory *obj) { + return [obj ptarget]; // expected-warning {{'ptarget' is deprecated: first deprecated in iOS 3.0}} +} diff --git a/test/SemaObjC/property-dot-receiver.m b/test/SemaObjC/property-dot-receiver.m index c5a928b..4a5f195 100644 --- a/test/SemaObjC/property-dot-receiver.m +++ b/test/SemaObjC/property-dot-receiver.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://8962253 @interface Singleton { diff --git a/test/SemaObjC/property-impl-misuse.m b/test/SemaObjC/property-impl-misuse.m index 939909e..c49916e 100644 --- a/test/SemaObjC/property-impl-misuse.m +++ b/test/SemaObjC/property-impl-misuse.m @@ -12,7 +12,7 @@ @dynamic X; // expected-note {{previous declaration is here}} @dynamic X; // expected-error {{property 'X' is already implemented}} @synthesize Y; // expected-note {{previous use is here}} -@synthesize Z=Y; // expected-error {{synthesized properties 'Z' and 'Y' both claim ivar 'Y'}} +@synthesize Z=Y; // expected-error {{synthesized properties 'Z' and 'Y' both claim instance variable 'Y'}} @end // rdar://8703553 diff --git a/test/SemaObjC/property-in-class-extension-1.m b/test/SemaObjC/property-in-class-extension-1.m new file mode 100644 index 0000000..ab461ef --- /dev/null +++ b/test/SemaObjC/property-in-class-extension-1.m @@ -0,0 +1,59 @@ +// RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-darwin11 -fobjc-runtime-has-weak -verify -Weverything %s +// RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin11 -fobjc-runtime-has-weak -fsyntax-only -verify -Weverything %s +// rdar://12103400 + +@class NSString; + +@interface MyClass + +@property (nonatomic, readonly) NSString* addingMemoryModel; + +@property (nonatomic, copy, readonly) NSString* matchingMemoryModel; + +@property (nonatomic, retain, readonly) NSString* addingNoNewMemoryModel; + +@property (readonly) NSString* none; +@property (readonly) NSString* none1; + +@property (assign, readonly) NSString* changeMemoryModel; // expected-note {{property declared here}} + +@property (readonly) __weak id weak_prop; +@property (readonly) __weak id weak_prop1; + +@property (assign, readonly) NSString* assignProperty; + +@property (readonly) NSString* readonlyProp; + + + +@end + +@interface MyClass () +{ + NSString* _name; +} + +@property (nonatomic, copy) NSString* addingMemoryModel; +@property (nonatomic, copy) NSString* matchingMemoryModel; +@property () NSString* addingNoNewMemoryModel; +@property () NSString* none; +@property (readwrite, retain) NSString* none1; + +@property (retain) NSString* changeMemoryModel; // expected-warning {{property attribute in class extension does not match the primary class}} +@property () __weak id weak_prop; +@property (readwrite) __weak id weak_prop1; + +@property (assign, readwrite) NSString* assignProperty; +@property (assign) NSString* readonlyProp; +@end + +// rdar://12214070 +@interface radar12214070 +@property (nonatomic, atomic, readonly) float propertyName; // expected-error {{property attributes 'atomic' and 'nonatomic' are mutually exclusive}} +@end + +@interface radar12214070 () +@property (atomic, nonatomic, readonly, readwrite) float propertyName; // expected-error {{property attributes 'readonly' and 'readwrite' are mutually exclusive}} \ + // expected-error {{property attributes 'atomic' and 'nonatomic' are mutually exclusive}} +@end + diff --git a/test/SemaObjC/property-ivar-mismatch.m b/test/SemaObjC/property-ivar-mismatch.m index a0d1c9d..148fa8e 100644 --- a/test/SemaObjC/property-ivar-mismatch.m +++ b/test/SemaObjC/property-ivar-mismatch.m @@ -3,24 +3,24 @@ @interface Test4 { - char ivar; // expected-note{{ivar is declared here}} + char ivar; // expected-note{{instance variable is declared here}} } @property int prop; @end @implementation Test4 -@synthesize prop = ivar; // expected-error {{type of property 'prop' ('int') does not match type of ivar 'ivar' ('char')}} +@synthesize prop = ivar; // expected-error {{type of property 'prop' ('int') does not match type of instance variable 'ivar' ('char')}} @end @interface Test5 { - void * _P; // expected-note {{ivar is declared here}} + void * _P; // expected-note {{instance variable is declared here}} } @property int P; @end @implementation Test5 -@synthesize P=_P; // expected-error {{ype of property 'P' ('int') does not match type of ivar '_P' ('void *')}} +@synthesize P=_P; // expected-error {{ype of property 'P' ('int') does not match type of instance variable '_P' ('void *')}} @end diff --git a/test/SemaObjC/property-method-lookup-impl.m b/test/SemaObjC/property-method-lookup-impl.m index 19d4e68..dc490ed 100644 --- a/test/SemaObjC/property-method-lookup-impl.m +++ b/test/SemaObjC/property-method-lookup-impl.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface SSyncCEList { diff --git a/test/SemaObjC/property-nonfragile-abi.m b/test/SemaObjC/property-nonfragile-abi.m index 55bf91f..3684cb0 100644 --- a/test/SemaObjC/property-nonfragile-abi.m +++ b/test/SemaObjC/property-nonfragile-abi.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef signed char BOOL; diff --git a/test/SemaObjC/property-noprotocol-warning.m b/test/SemaObjC/property-noprotocol-warning.m index 71bb86a..e4752c5 100644 --- a/test/SemaObjC/property-noprotocol-warning.m +++ b/test/SemaObjC/property-noprotocol-warning.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface Object diff --git a/test/SemaObjC/property-redundant-decl-accessor.m b/test/SemaObjC/property-redundant-decl-accessor.m index 3b0e825..6ff2cea 100644 --- a/test/SemaObjC/property-redundant-decl-accessor.m +++ b/test/SemaObjC/property-redundant-decl-accessor.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -Werror -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface MyClass { const char *_myName; diff --git a/test/SemaObjC/property-weak.m b/test/SemaObjC/property-weak.m index a4397a6..d57774b 100644 --- a/test/SemaObjC/property-weak.m +++ b/test/SemaObjC/property-weak.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -triple i386-apple-darwin9 -fsyntax-only -verify %s +// expected-no-diagnostics @interface foo @property(nonatomic) int foo __attribute__((weak_import)); diff --git a/test/SemaObjC/property.m b/test/SemaObjC/property.m index d6495b4..76fdf5b 100644 --- a/test/SemaObjC/property.m +++ b/test/SemaObjC/property.m @@ -2,7 +2,7 @@ @interface I { - int IVAR; // expected-note{{ivar is declared here}} + int IVAR; // expected-note{{instance variable is declared here}} int name; } @property int d1; @@ -18,7 +18,7 @@ @synthesize d1; // expected-error {{synthesized property 'd1' must either be named the same as}} @dynamic bad; // expected-error {{property implementation must have its declaration in interface 'I'}} @synthesize prop_id; // expected-error {{synthesized property 'prop_id' must either be named the same}} // expected-note {{previous declaration is here}} -@synthesize prop_id = IVAR; // expected-error {{type of property 'prop_id' ('id') does not match type of ivar 'IVAR' ('int')}} // expected-error {{property 'prop_id' is already implemented}} +@synthesize prop_id = IVAR; // expected-error {{type of property 'prop_id' ('id') does not match type of instance variable 'IVAR' ('int')}} // expected-error {{property 'prop_id' is already implemented}} @synthesize name; // OK! property with same name as an accessible ivar of same name @end diff --git a/test/SemaObjC/props-on-prots.m b/test/SemaObjC/props-on-prots.m index c01e833..6962d6f 100644 --- a/test/SemaObjC/props-on-prots.m +++ b/test/SemaObjC/props-on-prots.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef signed char BOOL; @class NSInvocation, NSMethodSignature, NSCoder, NSString, NSEnumerator; diff --git a/test/SemaObjC/protocol-expr-1.m b/test/SemaObjC/protocol-expr-1.m index fe01d1d..94a0d9e 100644 --- a/test/SemaObjC/protocol-expr-1.m +++ b/test/SemaObjC/protocol-expr-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol fproto; diff --git a/test/SemaObjC/protocol-implementation-inherited.m b/test/SemaObjC/protocol-implementation-inherited.m index c333bb5..45010d5 100644 --- a/test/SemaObjC/protocol-implementation-inherited.m +++ b/test/SemaObjC/protocol-implementation-inherited.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol P0 -bar; diff --git a/test/SemaObjC/protocol-lookup-2.m b/test/SemaObjC/protocol-lookup-2.m index bf07523..9e8ed8a 100644 --- a/test/SemaObjC/protocol-lookup-2.m +++ b/test/SemaObjC/protocol-lookup-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface NSObject @end @protocol ProtocolA diff --git a/test/SemaObjC/protocol-lookup.m b/test/SemaObjC/protocol-lookup.m index ed3fbe0..26718ae 100644 --- a/test/SemaObjC/protocol-lookup.m +++ b/test/SemaObjC/protocol-lookup.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol NSObject - retain; - release; diff --git a/test/SemaObjC/protocol-qualified-class-unsupported.m b/test/SemaObjC/protocol-qualified-class-unsupported.m index 4bf6b28..777084d 100644 --- a/test/SemaObjC/protocol-qualified-class-unsupported.m +++ b/test/SemaObjC/protocol-qualified-class-unsupported.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #include diff --git a/test/SemaObjC/rdar6248119.m b/test/SemaObjC/rdar6248119.m index 046992c..a495978 100644 --- a/test/SemaObjC/rdar6248119.m +++ b/test/SemaObjC/rdar6248119.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only %s -verify -fobjc-exceptions +// expected-no-diagnostics // Test case for: // @finally doesn't introduce a new scope diff --git a/test/SemaObjC/restrict-id-type.m b/test/SemaObjC/restrict-id-type.m index b24fcb0..24f74c9 100644 --- a/test/SemaObjC/restrict-id-type.m +++ b/test/SemaObjC/restrict-id-type.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=gnu99 -fsyntax-only -verify %s +// expected-no-diagnostics void f0(restrict id a0) {} diff --git a/test/SemaObjC/selector-1.m b/test/SemaObjC/selector-1.m index 16d44cb..186e19f 100644 --- a/test/SemaObjC/selector-1.m +++ b/test/SemaObjC/selector-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify %s +// expected-no-diagnostics @interface I - (id) compare: (char) arg1; diff --git a/test/SemaObjC/selector-2.m b/test/SemaObjC/selector-2.m index fb75369..17d1872 100644 --- a/test/SemaObjC/selector-2.m +++ b/test/SemaObjC/selector-2.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -Wselector -verify %s +// expected-no-diagnostics // rdar://8851684 @interface I - length; diff --git a/test/SemaObjC/self-declared-in-block.m b/test/SemaObjC/self-declared-in-block.m index 40a0331..36a9ef5 100644 --- a/test/SemaObjC/self-declared-in-block.m +++ b/test/SemaObjC/self-declared-in-block.m @@ -1,5 +1,6 @@ // RUN: %clang_cc1 -fsyntax-only -triple x86_64-apple-darwin10 -fblocks -verify -Wno-objc-root-class %s // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -triple x86_64-apple-darwin10 -fblocks -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://9154582 @interface Blocky @end diff --git a/test/SemaObjC/self-in-function.m b/test/SemaObjC/self-in-function.m index 9027a94..a14ad90 100644 --- a/test/SemaObjC/self-in-function.m +++ b/test/SemaObjC/self-in-function.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fblocks -verify %s +// expected-no-diagnostics // rdar://9181463 typedef struct objc_class *Class; diff --git a/test/SemaObjC/setter-dotsyntax.m b/test/SemaObjC/setter-dotsyntax.m index e0b51e8..ec47ee2 100644 --- a/test/SemaObjC/setter-dotsyntax.m +++ b/test/SemaObjC/setter-dotsyntax.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://8528170 @interface NSObject @end diff --git a/test/SemaObjC/super-cat-prot.m b/test/SemaObjC/super-cat-prot.m index 3e28986..fd93994 100644 --- a/test/SemaObjC/super-cat-prot.m +++ b/test/SemaObjC/super-cat-prot.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics typedef signed char BOOL; typedef unsigned int NSUInteger; @class NSInvocation, NSMethodSignature, NSCoder, NSString, NSEnumerator; diff --git a/test/SemaObjC/super-dealloc-attribute.m b/test/SemaObjC/super-dealloc-attribute.m new file mode 100644 index 0000000..35f6dac --- /dev/null +++ b/test/SemaObjC/super-dealloc-attribute.m @@ -0,0 +1,87 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -fsyntax-only -fobjc-arc -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -x objective-c++ -fobjc-arc -fsyntax-only -verify -Wno-objc-root-class %s +// rdar://6386358 + +#if __has_attribute(objc_requires_super) +#define NS_REQUIRES_SUPER __attribute((objc_requires_super)) +#endif + +@protocol NSObject // expected-note {{protocol is declared here}} +- MyDealloc NS_REQUIRES_SUPER; // expected-warning {{'objc_requires_super' attribute cannot be applied to methods in protocols}} +@end + +@interface Root +- MyDealloc __attribute((objc_requires_super)); +- (void)XXX __attribute((objc_requires_super)); +- (void) dealloc __attribute((objc_requires_super)); // expected-warning {{'objc_requires_super' attribute cannot be applied to dealloc}} +- (void) MyDeallocMeth; // Method in root is not annotated. +- (void) AnnotMyDeallocMeth __attribute((objc_requires_super)); +- (void) AnnotMyDeallocMethCAT NS_REQUIRES_SUPER; + ++ (void)registerClass:(id)name __attribute((objc_requires_super)); +@end + +@interface Baz : Root +- MyDealloc; +- (void) MyDeallocMeth __attribute((objc_requires_super)); // 'Baz' author has annotated method +- (void) AnnotMyDeallocMeth; // Annotated in root but not here. Annotation is inherited though +- (void) AnnotMeth __attribute((objc_requires_super)); // 'Baz' author has annotated method +@end + +@implementation Baz +- MyDealloc { + [super MyDealloc]; + return 0; +} + +- (void)XXX { + [super MyDealloc]; +} // expected-warning {{method possibly missing a [super XXX] call}} + +- (void) MyDeallocMeth {} // No warning here. +- (void) AnnotMyDeallocMeth{} // expected-warning {{method possibly missing a [super AnnotMyDeallocMeth] call}} +- (void) AnnotMeth{}; // No warning here. Annotation is in its class. + ++ (void)registerClass:(id)name {} // expected-warning {{method possibly missing a [super registerClass:] call}} +@end + +@interface Bar : Baz +@end + +@implementation Bar +- (void) MyDeallocMeth {} // expected-warning {{method possibly missing a [super MyDeallocMeth] call}} +- (void) AnnotMyDeallocMeth{} // expected-warning {{method possibly missing a [super AnnotMyDeallocMeth] call}} +- (void) AnnotMeth{}; // expected-warning {{method possibly missing a [super AnnotMeth] call}} +@end + +@interface Bar(CAT) +- (void) AnnotMyDeallocMethCAT; // Annotated in root but not here. Annotation is inherited though +- (void) AnnotMethCAT __attribute((objc_requires_super)); +@end + +@implementation Bar(CAT) +- (void) MyDeallocMeth {} // expected-warning {{method possibly missing a [super MyDeallocMeth] call}} +- (void) AnnotMyDeallocMeth{} // expected-warning {{method possibly missing a [super AnnotMyDeallocMeth] call}} +- (void) AnnotMeth{}; // expected-warning {{method possibly missing a [super AnnotMeth] call}} +- (void) AnnotMyDeallocMethCAT{}; // expected-warning {{method possibly missing a [super AnnotMyDeallocMethCAT] call}} +- (void) AnnotMethCAT {}; +@end + + +@interface Valid : Baz +@end + +@implementation Valid + +- (void)MyDeallocMeth { + [super MyDeallocMeth]; // no-warning +} + + ++ (void)registerClass:(id)name { + [super registerClass:name]; // no-warning +} + +@end diff --git a/test/SemaObjC/super-property-message-expr.m b/test/SemaObjC/super-property-message-expr.m index c25164e..81b8f8fa 100644 --- a/test/SemaObjC/super-property-message-expr.m +++ b/test/SemaObjC/super-property-message-expr.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface SStoreNodeInfo diff --git a/test/SemaObjC/super-property-notation.m b/test/SemaObjC/super-property-notation.m index 7d3f7c7..0c17bb9 100644 --- a/test/SemaObjC/super-property-notation.m +++ b/test/SemaObjC/super-property-notation.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface B +(int) classGetter; diff --git a/test/SemaObjC/synth-provisional-ivars-1.m b/test/SemaObjC/synth-provisional-ivars-1.m index 0e155f4..92a9d71 100644 --- a/test/SemaObjC/synth-provisional-ivars-1.m +++ b/test/SemaObjC/synth-provisional-ivars-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-default-synthesize-properties -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://8913053 typedef unsigned char BOOL; diff --git a/test/SemaObjC/synthesize-setter-contclass.m b/test/SemaObjC/synthesize-setter-contclass.m index d754415..df954db 100644 --- a/test/SemaObjC/synthesize-setter-contclass.m +++ b/test/SemaObjC/synthesize-setter-contclass.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface TestClass { diff --git a/test/SemaObjC/transparent-union.m b/test/SemaObjC/transparent-union.m index 6f2dbf9..bda0a54 100644 --- a/test/SemaObjC/transparent-union.m +++ b/test/SemaObjC/transparent-union.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics typedef union { struct xx_object_s *_do; diff --git a/test/SemaObjC/ucn-objc-string.m b/test/SemaObjC/ucn-objc-string.m index 6070278..f80d1ff 100644 --- a/test/SemaObjC/ucn-objc-string.m +++ b/test/SemaObjC/ucn-objc-string.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -fsyntax-only +// expected-no-diagnostics @class NSString; extern void NSLog(NSString *format, ...) __attribute__((format(__NSString__, 1, 2))); diff --git a/test/SemaObjC/uninit-variables.m b/test/SemaObjC/uninit-variables.m index cad0f54..a331226 100644 --- a/test/SemaObjC/uninit-variables.m +++ b/test/SemaObjC/uninit-variables.m @@ -1,5 +1,16 @@ // RUN: %clang_cc1 -fsyntax-only -Wuninitialized -fsyntax-only -fblocks %s -verify +#include + +@interface NSObject {} @end +@class NSString; + +@interface NSException ++ (void)raise:(NSString *)name format:(NSString *)format, ...; ++ (void)raise:(NSString *)name format:(NSString *)format arguments:(va_list)argList; +- (void)raise; +@end + // Duplicated from uninit-variables.c. // Test just to ensure the analysis is working. int test1() { @@ -25,3 +36,21 @@ void test3() { } } +int test_abort_on_exceptions(int y, NSException *e, NSString *s, int *z, ...) { + int x; // expected-note {{initialize the variable 'x' to silence this warning}} + if (y == 1) { + va_list alist; + va_start(alist, z); + [NSException raise:@"Blah" format:@"Blah %@" arguments:alist]; + return x; + } + else if (y == 2) { + [NSException raise:@"Blah" format:s]; + return x; + } + else if (y == 3) { + [e raise]; + return x; + } + return x; // expected-warning {{variable 'x' is uninitialized when used here}} +} diff --git a/test/SemaObjC/unused.m b/test/SemaObjC/unused.m index 5c7542b..3fd1cf0 100644 --- a/test/SemaObjC/unused.m +++ b/test/SemaObjC/unused.m @@ -1,4 +1,4 @@ -// RUN: %clang_cc1 -verify -Wunused -Wunused-parameter -fsyntax-only -Wno-objc-root-class %s +// RUN: %clang_cc1 -verify -Wused-but-marked-unused -Wno-objc-protocol-method-implementation -Wunused -Wunused-parameter -fsyntax-only -Wno-objc-root-class %s int printf(const char *, ...); @@ -25,12 +25,17 @@ void test2() { } @interface foo -- (int)meth: (int)x: (int)y: (int)z ; +- (int)meth: (int)x : (int)y : (int)z ; @end @implementation foo -- (int) meth: (int)x: -(int)y: // expected-warning{{unused}} +- (int) meth: (int)x: // expected-warning {{'x' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'x' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'x' as parameter name and have an empty entry in the selector}} + +(int)y: // expected-warning {{unused}} expected-warning {{'y' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'y' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'y' as parameter name and have an empty entry in the selector}} (int) __attribute__((unused))z { return x; } @end @@ -53,3 +58,17 @@ void test2() { // rdar://10777111 static NSString *x = @"hi"; // expected-warning {{unused variable 'x'}} + +// rdar://12233989 +@interface TestTransitiveUnused +- (void) a __attribute__((unused)); +- (void) b __attribute__((unused)); +@end + +@interface TestTransitiveUnused(CAT) +@end + +@implementation TestTransitiveUnused(CAT) +- (void) b {} +- (void) a { [self b]; } +@end diff --git a/test/SemaObjC/va-method-1.m b/test/SemaObjC/va-method-1.m index fe7ccd7..4959df3 100644 --- a/test/SemaObjC/va-method-1.m +++ b/test/SemaObjC/va-method-1.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #include diff --git a/test/SemaObjC/warn-direct-ivar-access.m b/test/SemaObjC/warn-direct-ivar-access.m index d2295f4..088fe0f 100644 --- a/test/SemaObjC/warn-direct-ivar-access.m +++ b/test/SemaObjC/warn-direct-ivar-access.m @@ -15,7 +15,7 @@ __attribute__((objc_root_class)) @interface MyObject { @implementation MyObject @synthesize myMaster = _myMaster; -@synthesize isTickledPink = _isTickledPink; // expected-error {{existing ivar '_isTickledPink' for property 'isTickledPink'}} +@synthesize isTickledPink = _isTickledPink; // expected-error {{existing instance variable '_isTickledPink' for property 'isTickledPink'}} @synthesize myIntProp = _myIntProp; - (void) doSomething { diff --git a/test/SemaObjC/warn-implicit-self-in-block.m b/test/SemaObjC/warn-implicit-self-in-block.m new file mode 100644 index 0000000..a7ee16e --- /dev/null +++ b/test/SemaObjC/warn-implicit-self-in-block.m @@ -0,0 +1,18 @@ +// RUN: %clang_cc1 -x objective-c -fobjc-arc -fblocks -Wimplicit-retain-self -verify %s +// rdar://11194874 + +@interface Root @end + +@interface I : Root +{ + int _bar; +} +@end + +@implementation I + - (void)foo{ + ^{ + _bar = 3; // expected-warning {{block implicitly retains 'self'; explicitly mention 'self' to indicate this is intended behavior}} + }(); + } +@end diff --git a/test/SemaObjC/warn-isa-ref.m b/test/SemaObjC/warn-isa-ref.m index 1932a02..9d7abd4 100644 --- a/test/SemaObjC/warn-isa-ref.m +++ b/test/SemaObjC/warn-isa-ref.m @@ -41,7 +41,7 @@ static void func() { @interface BaseClass { @public - Class isa; // expected-note 3 {{ivar is declared here}} + Class isa; // expected-note 3 {{instance variable is declared here}} } @end diff --git a/test/SemaObjC/warn-retain-cycle.m b/test/SemaObjC/warn-retain-cycle.m index 00fd234..eb4e966 100644 --- a/test/SemaObjC/warn-retain-cycle.m +++ b/test/SemaObjC/warn-retain-cycle.m @@ -1,4 +1,6 @@ -// RUN: %clang_cc1 -fsyntax-only -fobjc-runtime-has-weak -fobjc-arc -fblocks -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -fsyntax-only -fobjc-runtime-has-weak -fobjc-arc -fblocks -verify -Wno-objc-root-class -Wno-implicit-retain-self %s + +void *_Block_copy(const void *block); @interface Test0 - (void) setBlock: (void(^)(void)) block; @@ -24,6 +26,10 @@ void test0(Test0 *x) { [weakx addBlock: ^{ [x actNow]; }]; [weakx setBlock: ^{ [x actNow]; }]; weakx.block = ^{ [x actNow]; }; + + // rdar://11702054 + x.block = ^{ (void)x.actNow; }; // expected-warning {{capturing 'x' strongly in this block is likely to lead to a retain cycle}} \ + // expected-note {{block will be retained by the captured object}} } @interface BlockOwner @@ -123,3 +129,58 @@ void doSomething(unsigned v); } @end + +void testBlockVariable() { + typedef void (^block_t)(void); + + // This case will be caught by -Wuninitialized, and does not create a + // retain cycle. + block_t a1 = ^{ + a1(); // no-warning + }; + + // This case will also be caught by -Wuninitialized. + block_t a2; + a2 = ^{ + a2(); // no-warning + }; + + __block block_t b1 = ^{ // expected-note{{block will be retained by the captured object}} + b1(); // expected-warning{{capturing 'b1' strongly in this block is likely to lead to a retain cycle}} + }; + + __block block_t b2; + b2 = ^{ // expected-note{{block will be retained by the captured object}} + b2(); // expected-warning{{capturing 'b2' strongly in this block is likely to lead to a retain cycle}} + }; +} + + +@interface NSObject +- (id)copy; + +- (void (^)(void))someRandomMethodReturningABlock; +@end + + +void testCopying(Test0 *obj) { + typedef void (^block_t)(void); + + [obj setBlock:[^{ // expected-note{{block will be retained by the captured object}} + [obj actNow]; // expected-warning{{capturing 'obj' strongly in this block is likely to lead to a retain cycle}} + } copy]]; + + [obj addBlock:(__bridge_transfer block_t)_Block_copy((__bridge void *)^{ // expected-note{{block will be retained by the captured object}} + [obj actNow]; // expected-warning{{capturing 'obj' strongly in this block is likely to lead to a retain cycle}} + })]; + + [obj addBlock:[^{ + [obj actNow]; // no-warning + } someRandomMethodReturningABlock]]; + + extern block_t someRandomFunctionReturningABlock(block_t); + [obj setBlock:someRandomFunctionReturningABlock(^{ + [obj actNow]; // no-warning + })]; +} + diff --git a/test/SemaObjC/warning-missing-selector-name.m b/test/SemaObjC/warning-missing-selector-name.m new file mode 100644 index 0000000..d43031e --- /dev/null +++ b/test/SemaObjC/warning-missing-selector-name.m @@ -0,0 +1,32 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// RUN: %clang_cc1 -x objective-c++ -fsyntax-only -verify -Wno-objc-root-class -Wmissing-selector-name %s +// rdar://12263549 + +@interface Super @end +@interface INTF : Super +-(void) Name1:(id)Arg1 Name2:(id)Arg2; // Name1:Name2: +-(void) Name1:(id) Name2:(id)Arg2; // expected-warning {{'Name2' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'Name2' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'Name2' as parameter name and have an empty entry in the selector}} +-(void) Name1:(id)Arg1 Name2:(id)Arg2 Name3:(id)Arg3; // Name1:Name2:Name3: +-(void) Name1:(id)Arg1 Name2:(id) Name3:(id)Arg3; // expected-warning {{'Name3' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'Name3' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'Name3' as parameter name and have an empty entry in the selector}} +- method:(id) second:(id)second; // expected-warning {{'second' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'second' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'second' as parameter name and have an empty entry in the selector}} \ + // expected-note {{method definition for 'method::' not found}} + +@end + +@implementation INTF // expected-warning {{incomplete implementation}} +-(void) Name1:(id)Arg1 Name2:(id)Arg2{} +-(void) Name1:(id) Name2:(id)Arg2 {} // expected-warning {{'Name2' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'Name2' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'Name2' as parameter name and have an empty entry in the selector}} +-(void) Name1:(id)Arg1 Name2:(id)Arg2 Name3:(id)Arg3 {} +-(void) Name1:(id)Arg1 Name2:(id) Name3:(id)Arg3 {} // expected-warning {{'Name3' used as the name of the previous parameter rather than as part of the selector}} \ + // expected-note {{introduce a parameter name to make 'Name3' part of the selector}} \ + // expected-note {{or insert whitespace before ':' to use 'Name3' as parameter name and have an empty entry in the selector}} +- method:(id)first second:(id)second {return 0; } +@end diff --git a/test/SemaObjC/weak-property.m b/test/SemaObjC/weak-property.m index 8a2adf9..141c35b 100644 --- a/test/SemaObjC/weak-property.m +++ b/test/SemaObjC/weak-property.m @@ -19,6 +19,6 @@ @end @implementation WeakPropertyTest -@synthesize x; // expected-error {{existing ivar 'x' for __weak property 'x' must be __weak}} +@synthesize x; // expected-error {{existing instance variable 'x' for __weak property 'x' must be __weak}} @dynamic value1, value, value2, v1,v2,v3,v4; @end diff --git a/test/SemaObjC/weak-receiver-warn.m b/test/SemaObjC/weak-receiver-warn.m index 547f008..88b867e 100644 --- a/test/SemaObjC/weak-receiver-warn.m +++ b/test/SemaObjC/weak-receiver-warn.m @@ -9,13 +9,13 @@ void test0(Test0 *x) { __weak Test0 *weakx = x; - [x addBlock: ^{ [weakx actNow]; }]; // expected-warning {{weak receiver may be unpredictably null in ARC mode}} - [x setBlock: ^{ [weakx actNow]; }]; // expected-warning {{weak receiver may be unpredictably null in ARC mode}} - x.block = ^{ [weakx actNow]; }; // expected-warning {{weak receiver may be unpredictably null in ARC mode}} + [x addBlock: ^{ [weakx actNow]; }]; // expected-warning {{weak receiver may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} + [x setBlock: ^{ [weakx actNow]; }]; // expected-warning {{weak receiver may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} + x.block = ^{ [weakx actNow]; }; // expected-warning {{weak receiver may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} - [weakx addBlock: ^{ [x actNow]; }]; // expected-warning {{weak receiver may be unpredictably null in ARC mode}} - [weakx setBlock: ^{ [x actNow]; }]; // expected-warning {{weak receiver may be unpredictably null in ARC mode}} - weakx.block = ^{ [x actNow]; }; // expected-warning {{weak receiver may be unpredictably null in ARC mode}} + [weakx addBlock: ^{ [x actNow]; }]; // expected-warning {{weak receiver may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} + [weakx setBlock: ^{ [x actNow]; }]; // expected-warning {{weak receiver may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} + weakx.block = ^{ [x actNow]; }; // expected-warning {{weak receiver may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} } @interface Test @@ -36,12 +36,12 @@ void test0(Test0 *x) { if (self.weak_atomic_prop) { self.weak_atomic_prop = 0; } - [self.weak_prop Meth]; // expected-warning {{weak property may be unpredictably null in ARC mode}} + [self.weak_prop Meth]; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} id pi = self.P; - [self.weak_atomic_prop Meth]; // expected-warning {{weak property may be unpredictably null in ARC mode}} + [self.weak_atomic_prop Meth]; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} - [self.P Meth]; // expected-warning {{weak implicit property may be unpredictably null in ARC mode}} + [self.P Meth]; // expected-warning {{weak implicit property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} } - (__weak id) P { return 0; } @@ -52,7 +52,7 @@ void test0(Test0 *x) { @interface MyClass { __weak MyClass *_parent; } -@property (weak) MyClass *parent; // expected-note 2 {{property declared here}} +@property (weak) MyClass *parent; // expected-note 4 {{property declared here}} @end @implementation MyClass @@ -60,9 +60,9 @@ void test0(Test0 *x) { - (void)doSomething { - [[self parent] doSomething]; // expected-warning {{weak property may be unpredictably null in ARC mode}} + [[self parent] doSomething]; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} - (void)self.parent.doSomething; // expected-warning {{weak property may be unpredictably null in ARC mode}} + (void)self.parent.doSomething; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} } @end @@ -74,7 +74,27 @@ void test0(Test0 *x) { @end void testProtocol(id input) { - [[input object] Meth]; // expected-warning {{weak property may be unpredictably null in ARC mode}} - [input.object Meth]; // expected-warning {{weak property may be unpredictably null in ARC mode}} + [[input object] Meth]; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} + [input.object Meth]; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} } + +@interface Subclass : MyClass +// Unnecessarily redeclare -parent. +- (id)parent; +@end + +@implementation Subclass + +- (id)parent { + return [super parent]; +} + +- (void)doSomethingElse { + [[self parent] doSomething]; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} + + (void)self.parent.doSomething; // expected-warning {{weak property may be unpredictably set to nil}} expected-note {{assign the value to a strong variable to keep the object alive during use}} +} + +@end + diff --git a/test/SemaObjC/writable-property-in-superclass.m b/test/SemaObjC/writable-property-in-superclass.m index bbd1f16..99be541 100644 --- a/test/SemaObjC/writable-property-in-superclass.m +++ b/test/SemaObjC/writable-property-in-superclass.m @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface MessageStore @property (assign, readonly) int P; diff --git a/test/SemaObjCXX/abstract-class-type-ivar.mm b/test/SemaObjCXX/abstract-class-type-ivar.mm index 823e9c1..990ba1c2 100644 --- a/test/SemaObjCXX/abstract-class-type-ivar.mm +++ b/test/SemaObjCXX/abstract-class-type-ivar.mm @@ -13,7 +13,7 @@ class CppConcreteSub : public CppAbstractBase { }; @interface Objc { - CppConcreteSub _concrete; // expected-error{{ivar type 'CppConcreteSub' is an abstract class}} + CppConcreteSub _concrete; // expected-error{{instance variable type 'CppConcreteSub' is an abstract class}} } - (CppAbstractBase*)abstract; @end diff --git a/test/SemaObjCXX/arc-0x.mm b/test/SemaObjCXX/arc-0x.mm index e24b960..43f6671 100644 --- a/test/SemaObjCXX/arc-0x.mm +++ b/test/SemaObjCXX/arc-0x.mm @@ -80,4 +80,16 @@ void testAutoIdTemplate(id obj) { autoTemplateFunction(obj, obj, [Array new]); // no-warning } +// rdar://12229679 +@interface NSObject @end +typedef __builtin_va_list va_list; +@interface MyClass : NSObject +@end + +@implementation MyClass ++ (void)fooMethod:(id)firstArg, ... { + va_list args; + __builtin_va_arg(args, id); +} +@end diff --git a/test/SemaObjCXX/arc-bool-conversion.mm b/test/SemaObjCXX/arc-bool-conversion.mm index d8f840e..12a3be3 100644 --- a/test/SemaObjCXX/arc-bool-conversion.mm +++ b/test/SemaObjCXX/arc-bool-conversion.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-arc -verify -fblocks -triple x86_64-apple-darwin10.0.0 %s +// expected-no-diagnostics // rdar://9310049 bool fn(id obj) { diff --git a/test/SemaObjCXX/arc-libstdcxx.mm b/test/SemaObjCXX/arc-libstdcxx.mm index 71771b4..537e6b4 100644 --- a/test/SemaObjCXX/arc-libstdcxx.mm +++ b/test/SemaObjCXX/arc-libstdcxx.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-arc -fobjc-arc-cxxlib=libstdc++ -fobjc-runtime-has-weak -verify %s +// expected-no-diagnostics @interface A @end diff --git a/test/SemaObjCXX/arc-memfunc.mm b/test/SemaObjCXX/arc-memfunc.mm index 274f873..09556e3 100644 --- a/test/SemaObjCXX/arc-memfunc.mm +++ b/test/SemaObjCXX/arc-memfunc.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-arc -verify -fblocks %s +// expected-no-diagnostics struct X0 { static id makeObject1() __attribute__((ns_returns_retained)); diff --git a/test/SemaObjCXX/arc-non-pod.mm b/test/SemaObjCXX/arc-non-pod.mm deleted file mode 100644 index 1c5cf7a..0000000 --- a/test/SemaObjCXX/arc-non-pod.mm +++ /dev/null @@ -1,116 +0,0 @@ -// RUN: %clang_cc1 -fsyntax-only -fobjc-arc -Warc-abi -verify -fblocks -triple x86_64-apple-darwin10.0.0 %s - -// Classes that have an Objective-C object pointer. -struct HasObjectMember0 { // expected-warning{{'HasObjectMember0' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} - id x; -}; - -struct HasObjectMember1 { // expected-warning{{'HasObjectMember1' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} - id x[3]; -}; - -struct HasObjectMember2 { // expected-warning{{'HasObjectMember2' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} - id x[3][2]; -}; - -// Don't complain if the type has non-external linkage -namespace { - struct HasObjectMember3 { - id x[3][2]; - }; -} - -// Don't complain if the Objective-C pointer type was explicitly given -// no ownership. -struct HasObjectMember3 { - __unsafe_unretained id x[3][2]; -}; - -struct HasBlockPointerMember0 { // expected-warning{{'HasBlockPointerMember0' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} - int (^bp)(int); -}; - -struct HasBlockPointerMember1 { // expected-warning{{'HasBlockPointerMember1' cannot be shared between ARC and non-ARC code; add a copy constructor, a copy assignment operator, and a destructor to make it ABI-compatible}} - int (^bp[2][3])(int); -}; - -struct NonPOD { - NonPOD(const NonPOD&); -}; - -struct HasObjectMemberAndNonPOD0 { // expected-warning{{'HasObjectMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ - // expected-warning{{'HasObjectMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} - id x; - NonPOD np; -}; - -struct HasObjectMemberAndNonPOD1 { // expected-warning{{'HasObjectMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ - // expected-warning{{'HasObjectMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} - NonPOD np; - id x[3]; -}; - -struct HasObjectMemberAndNonPOD2 { // expected-warning{{'HasObjectMemberAndNonPOD2' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ - // expected-warning{{'HasObjectMemberAndNonPOD2' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} - NonPOD np; - id x[3][2]; -}; - -struct HasObjectMemberAndNonPOD3 { - HasObjectMemberAndNonPOD3 &operator=(const HasObjectMemberAndNonPOD3&); - ~HasObjectMemberAndNonPOD3(); - NonPOD np; - id x[3][2]; -}; - -struct HasBlockPointerMemberAndNonPOD0 { // expected-warning{{'HasBlockPointerMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ -// expected-warning{{'HasBlockPointerMemberAndNonPOD0' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} - NonPOD np; - int (^bp)(int); -}; - -struct HasBlockPointerMemberAndNonPOD1 { // expected-warning{{'HasBlockPointerMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial copy assignment operator to make it ABI-compatible}} \ -// expected-warning{{'HasBlockPointerMemberAndNonPOD1' cannot be shared between ARC and non-ARC code; add a non-trivial destructor to make it ABI-compatible}} - NonPOD np; - int (^bp[2][3])(int); -}; - -int check_non_pod_objc_pointer0[__is_pod(id)? 1 : -1]; -int check_non_pod_objc_pointer1[__is_pod(__strong id)? -1 : 1]; -int check_non_pod_objc_pointer2[__is_pod(__unsafe_unretained id)? 1 : -1]; -int check_non_pod_objc_pointer3[__is_pod(id[2][3])? 1 : -1]; -int check_non_pod_objc_pointer4[__is_pod(__unsafe_unretained id[2][3])? 1 : -1]; -int check_non_pod_block0[__is_pod(int (^)(int))? 1 : -1]; -int check_non_pod_block1[__is_pod(int (^ __unsafe_unretained)(int))? 1 : -1]; -int check_non_pod_block2[__is_pod(int (^ __strong)(int))? -1 : 1]; - -struct FlexibleArrayMember0 { - int length; - id array[]; // expected-error{{flexible array member 'array' of non-POD element type 'id __strong[]'}} -}; - -struct FlexibleArrayMember1 { - int length; - __unsafe_unretained id array[]; -}; - -// It's okay to pass a retainable type through an ellipsis. -void variadic(...); -void test_variadic() { - variadic(1, 17, @"Foo"); -} - -// It's okay to create a VLA of retainable types. -void vla(int n) { - id vla[n]; -} - -@interface Crufty { - union { - struct { - id object; // expected-note{{has __strong ownership}} - } an_object; // expected-error{{union member 'an_object' has a non-trivial copy constructor}} - void *ptr; - } storage; -} -@end diff --git a/test/SemaObjCXX/arc-objc-lifetime.mm b/test/SemaObjCXX/arc-objc-lifetime.mm new file mode 100644 index 0000000..1e4df74 --- /dev/null +++ b/test/SemaObjCXX/arc-objc-lifetime.mm @@ -0,0 +1,68 @@ +// RUN: %clang_cc1 -x objective-c++ -triple x86_64-apple-darwin11 -fsyntax-only -fobjc-arc -fblocks -Wexplicit-ownership-type -verify -Wno-objc-root-class %s +// rdar://10244607 + +typedef const struct __CFString * CFStringRef; +@class NSString; + +NSString *CFBridgingRelease(); + +typedef NSString * PNSString; + +typedef __autoreleasing NSString * AUTORELEASEPNSString; + +@interface I @end + +@implementation I +- (CFStringRef)myString +{ + CFStringRef myString = + (__bridge CFStringRef) (__strong NSString *)CFBridgingRelease(); // expected-error {{explicit ownership qualifier on cast result has no effect}} + + myString = + (__bridge CFStringRef) (__autoreleasing PNSString) CFBridgingRelease(); // expected-error {{explicit ownership qualifier on cast result has no effect}} + myString = + (__bridge CFStringRef) (AUTORELEASEPNSString) CFBridgingRelease(); // OK + myString = + (__bridge CFStringRef) (typeof(__strong NSString *)) CFBridgingRelease(); // expected-error {{explicit ownership qualifier on cast result has no effect}} + return myString; +} + +- (void)decodeValueOfObjCType:(const char *)type at:(void *)addr { + __autoreleasing id *stuff = (__autoreleasing id *)addr; +} +@end + +// rdar://problem/10711456 +__strong I *__strong test1; // expected-error {{the type 'I *__strong' is already explicitly ownership-qualified}} +__strong I *(__strong test2); // expected-error {{the type 'I *__strong' is already explicitly ownership-qualified}} +__strong I *(__strong (test3)); // expected-error {{the type 'I *__strong' is already explicitly ownership-qualified}} +__unsafe_unretained __typeof__(test3) test4; +typedef __strong I *strong_I; +__unsafe_unretained strong_I test5; + +// rdar://10907090 +typedef void (^T) (); +@interface NSObject @end +@protocol P; +@interface Radar10907090 @end + +@implementation Radar10907090 +- (void) MMM : (NSObject*) arg0 : (NSObject

    *&)arg : (id) arg1 : (id

    &) arg2 {} // expected-warning {{method parameter of type 'NSObject

    *__autoreleasing &' with no explicit ownership}} \ + // expected-warning {{method parameter of type '__autoreleasing id

    &' with no explicit ownership}} +- (void) MM : (NSObject*) arg0 : (__strong NSObject**)arg : (id) arg1 : (__strong id*) arg2 {} +- (void) M : (NSObject**)arg0 : (id*)arg {} // expected-warning {{method parameter of type 'NSObject *__autoreleasing *' with no explicit ownership}} \ + // expected-warning {{method parameter of type '__autoreleasing id *' with no explicit ownership}} +- (void) N : (__strong NSObject***) arg0 : (__strong NSObject

    ***)arg : (float**) arg1 : (double) arg2 {} +- (void) BLOCK : (T&) arg0 : (T)arg : (__strong T*) arg1 {} // expected-warning {{method parameter of type '__autoreleasing T &' (aka 'void (^__autoreleasing &)()') with no explicit ownership}} +@end + +// rdar://12280826 +@class NSMutableDictionary, NSError; +@interface Radar12280826 +- (void)createInferiorTransportAndSetEnvironment:(NSMutableDictionary*)environment error:(__autoreleasing NSError*&)error; +@end + +@implementation Radar12280826 +- (void)createInferiorTransportAndSetEnvironment:(NSMutableDictionary*)environment error:(__autoreleasing NSError*&)error {} +@end + diff --git a/test/SemaObjCXX/arc-object-init-destroy.mm b/test/SemaObjCXX/arc-object-init-destroy.mm deleted file mode 100644 index e10e3ea..0000000 --- a/test/SemaObjCXX/arc-object-init-destroy.mm +++ /dev/null @@ -1,52 +0,0 @@ -// RUN: %clang_cc1 -fobjc-runtime-has-weak -fsyntax-only -fobjc-arc -verify -Warc-abi -fblocks -triple x86_64-apple-darwin10.0.0 %s - -typedef __strong id strong_id; -typedef __weak id weak_id; -void test_pseudo_destructors(__strong id *sptr, __weak id *wptr) { - sptr->~id(); // okay - wptr->~id(); // okay - sptr->~strong_id(); // okay - wptr->~weak_id(); - sptr->~weak_id(); // expected-error{{pseudo-destructor destroys object of type '__strong id' with inconsistently-qualified type 'weak_id' (aka '__weak id')}} - wptr->strong_id::~strong_id(); // expected-error{{pseudo-destructor destroys object of type '__weak id' with inconsistently-qualified type 'strong_id' (aka '__strong id')}} - - sptr->id::~id(); // okay - wptr->id::~id(); // okay -} - -void test_delete(__strong id *sptr, __weak id *wptr) { - delete sptr; - delete wptr; - delete [] sptr; // expected-warning{{destroying an array of '__strong id'; this array must not have been allocated from non-ARC code}} - delete [] wptr; // expected-warning{{destroying an array of '__weak id'; this array must not have been allocated from non-ARC code}} -} - -void test_new(int n) { - (void)new strong_id; - (void)new weak_id; - (void)new strong_id [n]; // expected-warning{{allocating an array of 'strong_id' (aka '__strong id'); this array must not be deleted in non-ARC code}} - (void)new weak_id [n]; // expected-warning{{allocating an array of 'weak_id' (aka '__weak id'); this array must not be deleted in non-ARC code}} - - (void)new __strong id; - (void)new __weak id; - (void)new __strong id [n]; // expected-warning{{allocating an array of '__strong id'; this array must not be deleted in non-ARC code}} - - // Infer '__strong'. - __strong id *idptr = new id; - __strong id *idptr2 = new id [n]; // expected-warning{{allocating an array of '__strong id'; this array must not be deleted in non-ARC code}} - - // ... but not for arrays. - typedef id id_array[2][3]; - (void)new id_array; // expected-error{{'new' cannot allocate an array of 'id' with no explicit ownership}} - - typedef __strong id strong_id_array[2][3]; - typedef __strong id strong_id_3[3]; - strong_id_3 *idptr3 = new strong_id_array; // expected-warning{{allocating an array of '__strong id'; this array must not be deleted in non-ARC code}} -} - -void test_jump_scope() { - goto done; // expected-error{{goto into protected scope}} - __strong id x; // expected-note{{jump bypasses initialization of retaining variable}} - done: - return; -} diff --git a/test/SemaObjCXX/arc-type-traits.mm b/test/SemaObjCXX/arc-type-traits.mm index 67bab00..12993a9 100644 --- a/test/SemaObjCXX/arc-type-traits.mm +++ b/test/SemaObjCXX/arc-type-traits.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -fobjc-arc -fobjc-runtime-has-weak -verify -std=c++11 %s +// expected-no-diagnostics // Check the results of the various type-trait query functions on // lifetime-qualified types in ARC. diff --git a/test/SemaObjCXX/argument-dependent-lookup.mm b/test/SemaObjCXX/argument-dependent-lookup.mm index a25cc68..244c3f7 100644 --- a/test/SemaObjCXX/argument-dependent-lookup.mm +++ b/test/SemaObjCXX/argument-dependent-lookup.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // : For the purposes of Argument-Dependent // Lookup, Objective-C classes are considered to be in the global diff --git a/test/SemaObjCXX/category-lookup.mm b/test/SemaObjCXX/category-lookup.mm index 0e87025..7ac4d1c 100644 --- a/test/SemaObjCXX/category-lookup.mm +++ b/test/SemaObjCXX/category-lookup.mm @@ -6,5 +6,5 @@ @end void f() { - NSScriptClassDescription *f; // expected-error {{use of undeclared identifier 'NSScriptClassDescription'}} + NSScriptClassDescription *f; // expected-error {{unknown type name 'NSScriptClassDescription'}} } diff --git a/test/SemaObjCXX/composite-objc-pointertype.mm b/test/SemaObjCXX/composite-objc-pointertype.mm index 684f633..35739a8 100644 --- a/test/SemaObjCXX/composite-objc-pointertype.mm +++ b/test/SemaObjCXX/composite-objc-pointertype.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface Foo @end diff --git a/test/SemaObjCXX/conversion-ranking.mm b/test/SemaObjCXX/conversion-ranking.mm index 6c1408b..b34c9a2 100644 --- a/test/SemaObjCXX/conversion-ranking.mm +++ b/test/SemaObjCXX/conversion-ranking.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol P1 @end diff --git a/test/SemaObjCXX/conversion-to-objc-pointer-2.mm b/test/SemaObjCXX/conversion-to-objc-pointer-2.mm index b03d4d8..063ce32 100644 --- a/test/SemaObjCXX/conversion-to-objc-pointer-2.mm +++ b/test/SemaObjCXX/conversion-to-objc-pointer-2.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar: // 7963410 @protocol NSObject @end diff --git a/test/SemaObjCXX/conversion-to-objc-pointer.mm b/test/SemaObjCXX/conversion-to-objc-pointer.mm index 235aaac..41bb4ff 100644 --- a/test/SemaObjCXX/conversion-to-objc-pointer.mm +++ b/test/SemaObjCXX/conversion-to-objc-pointer.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar: // 7963410 template diff --git a/test/SemaObjCXX/debugger-support.mm b/test/SemaObjCXX/debugger-support.mm new file mode 100644 index 0000000..e8e382a --- /dev/null +++ b/test/SemaObjCXX/debugger-support.mm @@ -0,0 +1,8 @@ +// RUN: %clang_cc1 -fdebugger-support -fsyntax-only -verify %s +// expected-no-diagnostics + +@class NSString; +void testCompareAgainstPtr(int *ptr, NSString *ns) { + if (ptr == 17) {} + if (ns != 42) {} +} diff --git a/test/SemaObjCXX/decltype.mm b/test/SemaObjCXX/decltype.mm new file mode 100644 index 0000000..9c4ac16 --- /dev/null +++ b/test/SemaObjCXX/decltype.mm @@ -0,0 +1,25 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics +struct HasValueType { + typedef int value_type; +}; + +__attribute__((objc_root_class)) +@interface Foo +{ +@protected + HasValueType foo; +} + +@property (nonatomic) HasValueType bar; +@end + +@implementation Foo +@synthesize bar; + +- (void)test { + decltype(foo)::value_type vt1; + decltype(self->foo)::value_type vt2; + decltype(self.bar)::value_type vt3; +} +@end diff --git a/test/SemaObjCXX/delay-parsing-cfunctions.mm b/test/SemaObjCXX/delay-parsing-cfunctions.mm index fa65dbe..4035d00 100644 --- a/test/SemaObjCXX/delay-parsing-cfunctions.mm +++ b/test/SemaObjCXX/delay-parsing-cfunctions.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -x objective-c++ -std=c++11 -fsyntax-only -Werror -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10387088 struct X { diff --git a/test/SemaObjCXX/delay-parsing-cplusfuncs.mm b/test/SemaObjCXX/delay-parsing-cplusfuncs.mm index b022709..d0d7922 100644 --- a/test/SemaObjCXX/delay-parsing-cplusfuncs.mm +++ b/test/SemaObjCXX/delay-parsing-cplusfuncs.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -x objective-c++ -fsyntax-only -Werror -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10387088 @interface MyClass diff --git a/test/SemaObjCXX/delay-parsing-func-tryblock.mm b/test/SemaObjCXX/delay-parsing-func-tryblock.mm index 8cf615e..ecee7be 100644 --- a/test/SemaObjCXX/delay-parsing-func-tryblock.mm +++ b/test/SemaObjCXX/delay-parsing-func-tryblock.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -x objective-c++ -fcxx-exceptions -fsyntax-only -Werror -verify -Wno-objc-root-class %s +// expected-no-diagnostics // rdar://10387088 @interface MyClass diff --git a/test/SemaObjCXX/expr-objcxx.mm b/test/SemaObjCXX/expr-objcxx.mm index e70a001..8ea4dab 100644 --- a/test/SemaObjCXX/expr-objcxx.mm +++ b/test/SemaObjCXX/expr-objcxx.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -pedantic -fsyntax-only +// expected-no-diagnostics // rdar://8366474 void *P = @selector(foo::bar::); diff --git a/test/SemaObjCXX/format-strings.mm b/test/SemaObjCXX/format-strings.mm new file mode 100644 index 0000000..2fb92e2 --- /dev/null +++ b/test/SemaObjCXX/format-strings.mm @@ -0,0 +1,81 @@ +// RUN: %clang_cc1 -fsyntax-only -verify -Wformat-nonliteral -pedantic %s + +#include + +extern "C" { +extern int scanf(const char *restrict, ...); +extern int printf(const char *restrict, ...); +extern int vprintf(const char *restrict, va_list); +} + +@class NSString; + +@interface Format ++ (void)print:(NSString *)format, ... __attribute__((format(NSString, 1, 2))); +@end + + +namespace Templates { + template + void my_uninstantiated_print(const T &arg) { + [Format print:@"%d", arg]; + } + + template + void my_print(const T &arg) { + [Format print:@"%d", arg]; // expected-warning {{format specifies type 'int' but the argument has type 'const char *'}} + } + + void use_my_print() { + my_print("abc"); // expected-note {{requested here}} + } + + + template + class UninstantiatedPrinter { + public: + static void print(const T &arg) { + [Format print:@"%d", arg]; // no-warning + } + }; + + template + class Printer { + public: + void print(const T &arg) { + [Format print:@"%d", arg]; // expected-warning {{format specifies type 'int' but the argument has type 'const char *'}} + } + }; + + void use_class(Printer &p) { + p.print("abc"); // expected-note {{requested here}} + } + + + template + class UninstantiatedWrapper { + public: + class Printer { + public: + void print(const T &arg) { + [Format print:@"%d", arg]; // no-warning + } + }; + }; + + template + class Wrapper { + public: + class Printer { + public: + void print(const T &arg) { + [Format print:@"%d", arg]; // expected-warning {{format specifies type 'int' but the argument has type 'const char *'}} + } + }; + }; + + void use_class(Wrapper::Printer &p) { + p.print("abc"); // expected-note {{requested here}} + } +} + diff --git a/test/SemaObjCXX/function-pointer-void-star.mm b/test/SemaObjCXX/function-pointer-void-star.mm index 8d3d625..7c83215 100644 --- a/test/SemaObjCXX/function-pointer-void-star.mm +++ b/test/SemaObjCXX/function-pointer-void-star.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics extern "C" id (*_dealloc)(id) ; diff --git a/test/SemaObjCXX/instantiate-method-return.mm b/test/SemaObjCXX/instantiate-method-return.mm index 2a3ae32..9fad82f 100644 --- a/test/SemaObjCXX/instantiate-method-return.mm +++ b/test/SemaObjCXX/instantiate-method-return.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics // PR7386 @class NSObject; diff --git a/test/SemaObjCXX/ivar-lookup.mm b/test/SemaObjCXX/ivar-lookup.mm index fc99c15..d99e617 100644 --- a/test/SemaObjCXX/ivar-lookup.mm +++ b/test/SemaObjCXX/ivar-lookup.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wno-objc-root-class %s +// expected-no-diagnostics @interface Ivar - (float*)method; @end diff --git a/test/SemaObjCXX/ivar-struct.mm b/test/SemaObjCXX/ivar-struct.mm index 3f9c7eb..c8c9ca9 100644 --- a/test/SemaObjCXX/ivar-struct.mm +++ b/test/SemaObjCXX/ivar-struct.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface A { struct X { int x, y; diff --git a/test/SemaObjCXX/linkage-spec.mm b/test/SemaObjCXX/linkage-spec.mm index 584571d..25b57a9 100644 --- a/test/SemaObjCXX/linkage-spec.mm +++ b/test/SemaObjCXX/linkage-spec.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics extern "C" { @class Protocol; } diff --git a/test/SemaObjCXX/namespace-lookup.mm b/test/SemaObjCXX/namespace-lookup.mm index 205b443..c5521c1 100644 --- a/test/SemaObjCXX/namespace-lookup.mm +++ b/test/SemaObjCXX/namespace-lookup.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // @interface A diff --git a/test/SemaObjCXX/null_objc_pointer.mm b/test/SemaObjCXX/null_objc_pointer.mm index 0da9e50..e0232bf 100644 --- a/test/SemaObjCXX/null_objc_pointer.mm +++ b/test/SemaObjCXX/null_objc_pointer.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -Wnull-arithmetic %s +// expected-no-diagnostics #define NULL __null @interface X diff --git a/test/SemaObjCXX/nullptr.mm b/test/SemaObjCXX/nullptr.mm index 2b29b04..73a921e 100644 --- a/test/SemaObjCXX/nullptr.mm +++ b/test/SemaObjCXX/nullptr.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fblocks -fsyntax-only -verify %s +// expected-no-diagnostics @interface A @end diff --git a/test/SemaObjCXX/overload-gc.mm b/test/SemaObjCXX/overload-gc.mm index 5488ea5..ffb8680 100644 --- a/test/SemaObjCXX/overload-gc.mm +++ b/test/SemaObjCXX/overload-gc.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -triple i386-apple-darwin9 -fobjc-gc -verify %s +// expected-no-diagnostics void f0(__weak id *); diff --git a/test/SemaObjCXX/pointer-to-objc-pointer-conv.mm b/test/SemaObjCXX/pointer-to-objc-pointer-conv.mm index d0f8404..7ada2f4 100644 --- a/test/SemaObjCXX/pointer-to-objc-pointer-conv.mm +++ b/test/SemaObjCXX/pointer-to-objc-pointer-conv.mm @@ -1,4 +1,7 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics + +// REQUIRES: LP64 @interface G @end diff --git a/test/SemaObjCXX/propert-dot-error.mm b/test/SemaObjCXX/propert-dot-error.mm index 2237411..2a462e4 100644 --- a/test/SemaObjCXX/propert-dot-error.mm +++ b/test/SemaObjCXX/propert-dot-error.mm @@ -63,7 +63,7 @@ class Forward; void testD(D *d) { d.Forward::property = 17; // expected-error{{property access cannot be qualified with 'Forward::'}} - d->Forward::ivar = 12; // expected-error{{ivar access cannot be qualified with 'Forward::'}} + d->Forward::ivar = 12; // expected-error{{instance variable access cannot be qualified with 'Forward::'}} d.D::property = 17; // expected-error{{expected a class or namespace}} d->D::ivar = 12; // expected-error{{expected a class or namespace}} } diff --git a/test/SemaObjCXX/properties.mm b/test/SemaObjCXX/properties.mm index 3c6b138..0783eeb 100644 --- a/test/SemaObjCXX/properties.mm +++ b/test/SemaObjCXX/properties.mm @@ -106,3 +106,26 @@ void test7(Test7 *ptr) { delete ptr.implicit_struct_property; delete ptr.explicit_struct_property; } + +// Make sure the returned value from property assignment is void, +// because there isn't any other viable way to handle it for +// non-trivial classes. +class NonTrivial1 { +public: + ~NonTrivial1(); +}; +class NonTrivial2 { +public: + NonTrivial2(); + NonTrivial2(const NonTrivial2&); +}; +@interface TestNonTrivial +@property(assign, nonatomic) NonTrivial1 p1; +@property(assign, nonatomic) NonTrivial2 p2; +@end +TestNonTrivial *TestNonTrivialObj; + +extern void* VoidType; +extern decltype(TestNonTrivialObj.p1 = NonTrivial1())* VoidType; +extern decltype(TestNonTrivialObj.p2 = NonTrivial2())* VoidType; + diff --git a/test/SemaObjCXX/property-synthesis-error.mm b/test/SemaObjCXX/property-synthesis-error.mm index 4f64a3a..b6ab85c 100644 --- a/test/SemaObjCXX/property-synthesis-error.mm +++ b/test/SemaObjCXX/property-synthesis-error.mm @@ -16,7 +16,7 @@ @interface MyClass () -@property (readwrite) NSMutableArray * array; +@property (readwrite, retain) NSMutableArray * array; @end @@ -83,3 +83,24 @@ struct ConvertToIncomplete { operator IncompleteStruct&(); }; @implementation SynthIncompleteRef // expected-error {{cannot synthesize property 'x' with incomplete type 'IncompleteStruct'}} @synthesize y; // expected-error {{cannot synthesize property 'y' with incomplete type 'IncompleteStruct'}} @end + + +// Check error handling for instantiation during property synthesis. +template class TemplateClass1 { + T *x; // expected-error {{'x' declared as a pointer to a reference of type 'int &'}} +}; +template class TemplateClass2 { + TemplateClass2& operator=(TemplateClass1); + TemplateClass2& operator=(TemplateClass2) { T(); } // expected-error {{reference to type 'int' requires an initializer}} \ + // expected-note 2 {{implicitly declared private here}} \ + // expected-note {{'operator=' declared here}} +}; +__attribute__((objc_root_class)) @interface InterfaceWithTemplateProperties +@property TemplateClass2 intprop; +@property TemplateClass2 &floatprop; +@end +@implementation InterfaceWithTemplateProperties // expected-error 2 {{'operator=' is a private member of 'TemplateClass2'}} \ + // expected-error {{atomic property of reference type 'TemplateClass2 &' cannot have non-trivial assignment operator}} \ + // expected-note {{in instantiation of template class}} \ + // expected-note {{in instantiation of member function}} +@end diff --git a/test/SemaObjCXX/property-type-mismatch.mm b/test/SemaObjCXX/property-type-mismatch.mm index 059793c..2b267ad 100644 --- a/test/SemaObjCXX/property-type-mismatch.mm +++ b/test/SemaObjCXX/property-type-mismatch.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // rdar://9740328 @protocol P1; diff --git a/test/SemaObjCXX/references.mm b/test/SemaObjCXX/references.mm index 3a52200..f63e17d 100644 --- a/test/SemaObjCXX/references.mm +++ b/test/SemaObjCXX/references.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -verify -emit-llvm -o - %s +// expected-no-diagnostics // Test reference binding. diff --git a/test/SemaObjCXX/reinterpret-cast-objc-pointertype.mm b/test/SemaObjCXX/reinterpret-cast-objc-pointertype.mm index fcabade..4d7c049 100644 --- a/test/SemaObjCXX/reinterpret-cast-objc-pointertype.mm +++ b/test/SemaObjCXX/reinterpret-cast-objc-pointertype.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface NSString @end diff --git a/test/SemaObjCXX/reserved-keyword-methods.mm b/test/SemaObjCXX/reserved-keyword-methods.mm index 1302128..12608de 100644 --- a/test/SemaObjCXX/reserved-keyword-methods.mm +++ b/test/SemaObjCXX/reserved-keyword-methods.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics #define FOR_EACH_KEYWORD(macro) \ macro(asm) \ diff --git a/test/SemaObjCXX/standard-conversion-to-bool.mm b/test/SemaObjCXX/standard-conversion-to-bool.mm index 2e69848..c36b63b 100644 --- a/test/SemaObjCXX/standard-conversion-to-bool.mm +++ b/test/SemaObjCXX/standard-conversion-to-bool.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @class NSString; id a; diff --git a/test/SemaObjCXX/static-cast.mm b/test/SemaObjCXX/static-cast.mm index e282702..494ee25 100644 --- a/test/SemaObjCXX/static-cast.mm +++ b/test/SemaObjCXX/static-cast.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @protocol NSTextViewDelegate; diff --git a/test/SemaObjCXX/vla.mm b/test/SemaObjCXX/vla.mm index d6da1c0..e1d556e 100644 --- a/test/SemaObjCXX/vla.mm +++ b/test/SemaObjCXX/vla.mm @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics @interface Data - (unsigned)length; diff --git a/test/SemaOpenCL/cond.cl b/test/SemaOpenCL/cond.cl index 79dc82d..802ad9b 100644 --- a/test/SemaOpenCL/cond.cl +++ b/test/SemaOpenCL/cond.cl @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -pedantic -fsyntax-only +// expected-no-diagnostics typedef __attribute__((ext_vector_type(4))) float float4; diff --git a/test/SemaOpenCL/init.cl b/test/SemaOpenCL/init.cl index b3ecfec..a156921 100644 --- a/test/SemaOpenCL/init.cl +++ b/test/SemaOpenCL/init.cl @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -pedantic -fsyntax-only +// expected-no-diagnostics typedef float float8 __attribute((ext_vector_type(8))); diff --git a/test/SemaOpenCL/vec_compare.cl b/test/SemaOpenCL/vec_compare.cl index dd91aa5..567629c 100644 --- a/test/SemaOpenCL/vec_compare.cl +++ b/test/SemaOpenCL/vec_compare.cl @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -pedantic -fsyntax-only +// expected-no-diagnostics typedef __attribute__((ext_vector_type(2))) unsigned int uint2; typedef __attribute__((ext_vector_type(2))) int int2; diff --git a/test/SemaOpenCL/vector_literals_const.cl b/test/SemaOpenCL/vector_literals_const.cl index e761816..ee5ae20 100644 --- a/test/SemaOpenCL/vector_literals_const.cl +++ b/test/SemaOpenCL/vector_literals_const.cl @@ -1,4 +1,5 @@ // RUN: %clang_cc1 %s -verify -pedantic -fsyntax-only +// expected-no-diagnostics typedef int int2 __attribute((ext_vector_type(2))); typedef int int3 __attribute((ext_vector_type(3))); diff --git a/test/SemaTemplate/ackermann.cpp b/test/SemaTemplate/ackermann.cpp index 9525bfc..fc523e3 100644 --- a/test/SemaTemplate/ackermann.cpp +++ b/test/SemaTemplate/ackermann.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // template // struct Ackermann { diff --git a/test/SemaTemplate/alias-church-numerals.cpp b/test/SemaTemplate/alias-church-numerals.cpp index 69d7716..a1613230 100644 --- a/test/SemaTemplate/alias-church-numerals.cpp +++ b/test/SemaTemplate/alias-church-numerals.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s +// expected-no-diagnostics template class, typename> class T, template class V> struct PartialApply { template using R = T; diff --git a/test/SemaTemplate/alias-template-template-param.cpp b/test/SemaTemplate/alias-template-template-param.cpp index c22fccb..0b17d10 100644 --- a/test/SemaTemplate/alias-template-template-param.cpp +++ b/test/SemaTemplate/alias-template-template-param.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s +// expected-no-diagnostics template class D> using C = D; diff --git a/test/SemaTemplate/array-to-pointer-decay.cpp b/test/SemaTemplate/array-to-pointer-decay.cpp index 072c0e5..26d277d 100644 --- a/test/SemaTemplate/array-to-pointer-decay.cpp +++ b/test/SemaTemplate/array-to-pointer-decay.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics struct mystruct { int member; diff --git a/test/SemaTemplate/atomics.cpp b/test/SemaTemplate/atomics.cpp index e9fdc9d..19b607f 100644 --- a/test/SemaTemplate/atomics.cpp +++ b/test/SemaTemplate/atomics.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -verify %s +// expected-no-diagnostics // PR8345 template T f(T* value) { diff --git a/test/SemaTemplate/class-template-ctor-initializer.cpp b/test/SemaTemplate/class-template-ctor-initializer.cpp index 44bb4bd..6043327 100644 --- a/test/SemaTemplate/class-template-ctor-initializer.cpp +++ b/test/SemaTemplate/class-template-ctor-initializer.cpp @@ -53,3 +53,20 @@ namespace PR7259 { return 0; } } + +namespace NonDependentError { + struct Base { Base(int); }; // expected-note 2{{candidate}} + + template + struct Derived1 : Base { + Derived1() : Base(1, 2) {} // expected-error {{no matching constructor}} + }; + + template + struct Derived2 : Base { + Derived2() : BaseClass(1) {} // expected-error {{does not name a non-static data member or base}} + }; + + Derived1 d1; + Derived2 d2; +} diff --git a/test/SemaTemplate/class-template-id.cpp b/test/SemaTemplate/class-template-id.cpp index 3b02778..b674537 100644 --- a/test/SemaTemplate/class-template-id.cpp +++ b/test/SemaTemplate/class-template-id.cpp @@ -40,7 +40,7 @@ typedef N::C c2; // PR5655 template struct Foo { }; // expected-note{{template is declared here}} -void f(void) { Foo bar; } // expected-error{{without a template argument list}} +void f(void) { Foo bar; } // expected-error{{use of class template Foo requires template arguments}} // rdar://problem/8254267 template class Party; diff --git a/test/SemaTemplate/constexpr-instantiate.cpp b/test/SemaTemplate/constexpr-instantiate.cpp index 2f9fe0e..80c4aaf 100644 --- a/test/SemaTemplate/constexpr-instantiate.cpp +++ b/test/SemaTemplate/constexpr-instantiate.cpp @@ -75,3 +75,136 @@ namespace Reference { constexpr int n = const_cast(S::r); static_assert(n == 5, ""); } + +namespace Unevaluated { + // We follow g++ in treating any reference to a constexpr function template + // specialization as requiring an instantiation, even if it occurs in an + // unevaluated context. + // + // We go slightly further than g++, and also trigger the implicit definition + // of a defaulted special member in the same circumstances. This seems scary, + // since a lot of classes have constexpr special members in C++11, but the + // only observable impact should be the implicit instantiation of constexpr + // special member templates (defaulted special members should only be + // generated if they are well-formed, and non-constexpr special members in a + // base or member cause the class's special member to not be constexpr). + // + // FIXME: None of this is required by the C++ standard. The rules in this + // area are poorly specified, so this is subject to change. + namespace NotConstexpr { + template struct S { + S() : n(0) {} + S(const S&) : n(T::error) {} + int n; + }; + struct U : S {}; + decltype(U(U())) u; // ok, don't instantiate S::S() because it wasn't declared constexpr + } + namespace Constexpr { + template struct S { + constexpr S() : n(0) {} + constexpr S(const S&) : n(T::error) {} // expected-error {{has no members}} + int n; + }; + struct U : S {}; // expected-note {{instantiation}} + decltype(U(U())) u; // expected-note {{here}} + } + + namespace PR11851_Comment0 { + template constexpr int f() { return x; } + template void ovf(int (&x)[f()]); + void f() { int x[10]; ovf<10>(x); } + } + + namespace PR11851_Comment1 { + template + constexpr bool Integral() { + return true; + } + template()> + struct safe_make_unsigned { + typedef T type; + }; + template + using Make_unsigned = typename safe_make_unsigned::type; + template + struct get_distance_type { + using type = int; + }; + template + auto size(R) -> Make_unsigned::type>; + auto check() -> decltype(size(0)); + } + + namespace PR11851_Comment6 { + template struct foo {}; + template constexpr int bar() { return 0; } + template foo()> foobar(); + auto foobar_ = foobar(); + } + + namespace PR11851_Comment9 { + struct S1 { + constexpr S1() {} + constexpr operator int() const { return 0; } + }; + int k1 = sizeof(short{S1(S1())}); + + struct S2 { + constexpr S2() {} + constexpr operator int() const { return 123456; } + }; + int k2 = sizeof(short{S2(S2())}); // expected-error {{cannot be narrowed}} expected-note {{override}} + } + + namespace PR12288 { + template constexpr bool foo() { return true; } + template struct bar {}; + template bar()> baz() { return bar()>(); } + int main() { baz(); } + } + + namespace PR13423 { + template struct enable_if {}; + template struct enable_if { using type = T; }; + + template struct F { + template + static constexpr bool f() { return sizeof(T) < U::size; } + + template + static typename enable_if(), void>::type g() {} // expected-note {{disabled by 'enable_if'}} + }; + + struct U { static constexpr int size = 2; }; + + void h() { F::g(); } + void i() { F::g(); } // expected-error {{no matching function}} + } + + namespace PR14203 { + struct duration { constexpr duration() {} }; + + template + void sleep_for() { + constexpr duration max = duration(); + } + } +} + +namespace NoInstantiationWhenSelectingOverload { + // Check that we don't instantiate conversion functions when we're checking + // for the existence of an implicit conversion sequence, only when a function + // is actually chosen by overload resolution. + struct S { + template constexpr S(T) : n(T::error) {} // expected-error {{no members}} + int n; + }; + + void f(S); + void f(int); + + void g() { f(0); } + void h() { (void)sizeof(f(0)); } + void i() { (void)sizeof(f("oops")); } // expected-note {{instantiation of}} +} diff --git a/test/SemaTemplate/current-instantiation.cpp b/test/SemaTemplate/current-instantiation.cpp index ccef811..f47c071 100644 --- a/test/SemaTemplate/current-instantiation.cpp +++ b/test/SemaTemplate/current-instantiation.cpp @@ -2,7 +2,7 @@ // This test concerns the identity of dependent types within the // canonical type system, specifically focusing on the difference -// between members of the current instantiation and membmers of an +// between members of the current instantiation and members of an // unknown specialization. This considers C++ [temp.type], which // specifies type equivalence within a template, and C++0x // [temp.dep.type], which defines what it means to be a member of the @@ -235,3 +235,15 @@ namespace rdar10194295 { template::Enum> class X::Inner { }; } + +namespace RebuildDependentScopeDeclRefExpr { + template struct N {}; + template struct X { + static const int thing = 0; + N data(); + N foo(); + }; + template N::thing> X::data() {} + // FIXME: We should issue a typo-correction here. + template N::think> X::foo() {} // expected-error {{no member named 'think' in 'X'}} +} diff --git a/test/SemaTemplate/deduction-crash.cpp b/test/SemaTemplate/deduction-crash.cpp index cf3899f..0714c5e 100644 --- a/test/SemaTemplate/deduction-crash.cpp +++ b/test/SemaTemplate/deduction-crash.cpp @@ -2,7 +2,7 @@ // Note that the error count below doesn't matter. We just want to // make sure that the parser doesn't crash. -// CHECK: 13 errors +// CHECK: 15 errors // PR7511 template @@ -87,3 +87,26 @@ void register_object_imp ( ) { cout << endl<1>; } + +// PR12933 +namespacae PR12933 { + template + template + void function(S a, T b) {} + + int main() { + function(0, 1); + return 0; + } +} + +// A buildbot failure from libcxx +namespace libcxx_test { + template struct __pointer_traits_element_type; + template struct __pointer_traits_element_type<_Ptr, true>; + template